]> git.proxmox.com Git - qemu.git/blame - target-arm/translate.c
Merge branch 's390-next' of git://repo.or.cz/qemu/agraf
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
28#include "exec-all.h"
29#include "disas.h"
57fec1fe 30#include "tcg-op.h"
79383c9c 31#include "qemu-log.h"
1497c961 32
7b59220e 33#include "helper.h"
1497c961 34#define GEN_HELPER 1
7b59220e 35#include "helper.h"
2c0262af 36
be5e7a76
DES
37#define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
38#define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
39/* currently all emulated v5 cores are also v5TE, so don't bother */
40#define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
9ee6e8bb
PB
41#define ENABLE_ARCH_5J 0
42#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
43#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
44#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
45#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 46
86753403 47#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 48
2c0262af
FB
49/* internal defines */
50typedef struct DisasContext {
0fa85d43 51 target_ulong pc;
2c0262af 52 int is_jmp;
e50e6a20
FB
53 /* Nonzero if this instruction has been conditionally skipped. */
54 int condjmp;
55 /* The label that will be jumped to when the instruction is skipped. */
56 int condlabel;
9ee6e8bb
PB
57 /* Thumb-2 condtional execution bits. */
58 int condexec_mask;
59 int condexec_cond;
2c0262af 60 struct TranslationBlock *tb;
8aaca4c0 61 int singlestep_enabled;
5899f386 62 int thumb;
b5ff1b31
FB
63#if !defined(CONFIG_USER_ONLY)
64 int user;
65#endif
5df8bac1 66 int vfp_enabled;
69d1fc22
PM
67 int vec_len;
68 int vec_stride;
2c0262af
FB
69} DisasContext;
70
e12ce78d
PM
71static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
72
b5ff1b31
FB
73#if defined(CONFIG_USER_ONLY)
74#define IS_USER(s) 1
75#else
76#define IS_USER(s) (s->user)
77#endif
78
9ee6e8bb
PB
79/* These instructions trap after executing, so defer them until after the
80 conditional executions state has been updated. */
81#define DISAS_WFI 4
82#define DISAS_SWI 5
2c0262af 83
a7812ae4 84static TCGv_ptr cpu_env;
ad69471c 85/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 86static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 87static TCGv_i32 cpu_R[16];
426f5abc
PB
88static TCGv_i32 cpu_exclusive_addr;
89static TCGv_i32 cpu_exclusive_val;
90static TCGv_i32 cpu_exclusive_high;
91#ifdef CONFIG_USER_ONLY
92static TCGv_i32 cpu_exclusive_test;
93static TCGv_i32 cpu_exclusive_info;
94#endif
ad69471c 95
b26eefb6 96/* FIXME: These should be removed. */
a7812ae4
PB
97static TCGv cpu_F0s, cpu_F1s;
98static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 99
2e70f6ef
PB
100#include "gen-icount.h"
101
155c3eac
FN
102static const char *regnames[] =
103 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
104 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
105
b26eefb6
PB
106/* initialize TCG globals. */
107void arm_translate_init(void)
108{
155c3eac
FN
109 int i;
110
a7812ae4
PB
111 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
112
155c3eac
FN
113 for (i = 0; i < 16; i++) {
114 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
115 offsetof(CPUState, regs[i]),
116 regnames[i]);
117 }
426f5abc
PB
118 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUState, exclusive_addr), "exclusive_addr");
120 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
121 offsetof(CPUState, exclusive_val), "exclusive_val");
122 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
123 offsetof(CPUState, exclusive_high), "exclusive_high");
124#ifdef CONFIG_USER_ONLY
125 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
126 offsetof(CPUState, exclusive_test), "exclusive_test");
127 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
128 offsetof(CPUState, exclusive_info), "exclusive_info");
129#endif
155c3eac 130
a7812ae4 131#define GEN_HELPER 2
7b59220e 132#include "helper.h"
b26eefb6
PB
133}
134
d9ba4830
PB
135static inline TCGv load_cpu_offset(int offset)
136{
7d1b0095 137 TCGv tmp = tcg_temp_new_i32();
d9ba4830
PB
138 tcg_gen_ld_i32(tmp, cpu_env, offset);
139 return tmp;
140}
141
142#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
143
144static inline void store_cpu_offset(TCGv var, int offset)
145{
146 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 147 tcg_temp_free_i32(var);
d9ba4830
PB
148}
149
150#define store_cpu_field(var, name) \
151 store_cpu_offset(var, offsetof(CPUState, name))
152
b26eefb6
PB
153/* Set a variable to the value of a CPU register. */
154static void load_reg_var(DisasContext *s, TCGv var, int reg)
155{
156 if (reg == 15) {
157 uint32_t addr;
158 /* normaly, since we updated PC, we need only to add one insn */
159 if (s->thumb)
160 addr = (long)s->pc + 2;
161 else
162 addr = (long)s->pc + 4;
163 tcg_gen_movi_i32(var, addr);
164 } else {
155c3eac 165 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
166 }
167}
168
169/* Create a new temporary and set it to the value of a CPU register. */
170static inline TCGv load_reg(DisasContext *s, int reg)
171{
7d1b0095 172 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
173 load_reg_var(s, tmp, reg);
174 return tmp;
175}
176
177/* Set a CPU register. The source must be a temporary and will be
178 marked as dead. */
179static void store_reg(DisasContext *s, int reg, TCGv var)
180{
181 if (reg == 15) {
182 tcg_gen_andi_i32(var, var, ~1);
183 s->is_jmp = DISAS_JUMP;
184 }
155c3eac 185 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 186 tcg_temp_free_i32(var);
b26eefb6
PB
187}
188
b26eefb6 189/* Value extensions. */
86831435
PB
190#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
191#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
192#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
193#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
194
1497c961
PB
195#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
196#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 197
b26eefb6 198
b75263d6
JR
199static inline void gen_set_cpsr(TCGv var, uint32_t mask)
200{
201 TCGv tmp_mask = tcg_const_i32(mask);
202 gen_helper_cpsr_write(var, tmp_mask);
203 tcg_temp_free_i32(tmp_mask);
204}
d9ba4830
PB
205/* Set NZCV flags from the high 4 bits of var. */
206#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
207
208static void gen_exception(int excp)
209{
7d1b0095 210 TCGv tmp = tcg_temp_new_i32();
d9ba4830
PB
211 tcg_gen_movi_i32(tmp, excp);
212 gen_helper_exception(tmp);
7d1b0095 213 tcg_temp_free_i32(tmp);
d9ba4830
PB
214}
215
3670669c
PB
216static void gen_smul_dual(TCGv a, TCGv b)
217{
7d1b0095
PM
218 TCGv tmp1 = tcg_temp_new_i32();
219 TCGv tmp2 = tcg_temp_new_i32();
22478e79
AZ
220 tcg_gen_ext16s_i32(tmp1, a);
221 tcg_gen_ext16s_i32(tmp2, b);
3670669c 222 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 223 tcg_temp_free_i32(tmp2);
3670669c
PB
224 tcg_gen_sari_i32(a, a, 16);
225 tcg_gen_sari_i32(b, b, 16);
226 tcg_gen_mul_i32(b, b, a);
227 tcg_gen_mov_i32(a, tmp1);
7d1b0095 228 tcg_temp_free_i32(tmp1);
3670669c
PB
229}
230
231/* Byteswap each halfword. */
232static void gen_rev16(TCGv var)
233{
7d1b0095 234 TCGv tmp = tcg_temp_new_i32();
3670669c
PB
235 tcg_gen_shri_i32(tmp, var, 8);
236 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
237 tcg_gen_shli_i32(var, var, 8);
238 tcg_gen_andi_i32(var, var, 0xff00ff00);
239 tcg_gen_or_i32(var, var, tmp);
7d1b0095 240 tcg_temp_free_i32(tmp);
3670669c
PB
241}
242
243/* Byteswap low halfword and sign extend. */
244static void gen_revsh(TCGv var)
245{
1a855029
AJ
246 tcg_gen_ext16u_i32(var, var);
247 tcg_gen_bswap16_i32(var, var);
248 tcg_gen_ext16s_i32(var, var);
3670669c
PB
249}
250
251/* Unsigned bitfield extract. */
252static void gen_ubfx(TCGv var, int shift, uint32_t mask)
253{
254 if (shift)
255 tcg_gen_shri_i32(var, var, shift);
256 tcg_gen_andi_i32(var, var, mask);
257}
258
259/* Signed bitfield extract. */
260static void gen_sbfx(TCGv var, int shift, int width)
261{
262 uint32_t signbit;
263
264 if (shift)
265 tcg_gen_sari_i32(var, var, shift);
266 if (shift + width < 32) {
267 signbit = 1u << (width - 1);
268 tcg_gen_andi_i32(var, var, (1u << width) - 1);
269 tcg_gen_xori_i32(var, var, signbit);
270 tcg_gen_subi_i32(var, var, signbit);
271 }
272}
273
274/* Bitfield insertion. Insert val into base. Clobbers base and val. */
275static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
276{
3670669c 277 tcg_gen_andi_i32(val, val, mask);
8f8e3aa4
PB
278 tcg_gen_shli_i32(val, val, shift);
279 tcg_gen_andi_i32(base, base, ~(mask << shift));
3670669c
PB
280 tcg_gen_or_i32(dest, base, val);
281}
282
838fa72d
AJ
283/* Return (b << 32) + a. Mark inputs as dead */
284static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
3670669c 285{
838fa72d
AJ
286 TCGv_i64 tmp64 = tcg_temp_new_i64();
287
288 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 289 tcg_temp_free_i32(b);
838fa72d
AJ
290 tcg_gen_shli_i64(tmp64, tmp64, 32);
291 tcg_gen_add_i64(a, tmp64, a);
292
293 tcg_temp_free_i64(tmp64);
294 return a;
295}
296
297/* Return (b << 32) - a. Mark inputs as dead. */
298static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
299{
300 TCGv_i64 tmp64 = tcg_temp_new_i64();
301
302 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 303 tcg_temp_free_i32(b);
838fa72d
AJ
304 tcg_gen_shli_i64(tmp64, tmp64, 32);
305 tcg_gen_sub_i64(a, tmp64, a);
306
307 tcg_temp_free_i64(tmp64);
308 return a;
3670669c
PB
309}
310
8f01245e
PB
311/* FIXME: Most targets have native widening multiplication.
312 It would be good to use that instead of a full wide multiply. */
5e3f878a 313/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 314static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 315{
a7812ae4
PB
316 TCGv_i64 tmp1 = tcg_temp_new_i64();
317 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
318
319 tcg_gen_extu_i32_i64(tmp1, a);
7d1b0095 320 tcg_temp_free_i32(a);
5e3f878a 321 tcg_gen_extu_i32_i64(tmp2, b);
7d1b0095 322 tcg_temp_free_i32(b);
5e3f878a 323 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 324 tcg_temp_free_i64(tmp2);
5e3f878a
PB
325 return tmp1;
326}
327
a7812ae4 328static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 329{
a7812ae4
PB
330 TCGv_i64 tmp1 = tcg_temp_new_i64();
331 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
332
333 tcg_gen_ext_i32_i64(tmp1, a);
7d1b0095 334 tcg_temp_free_i32(a);
5e3f878a 335 tcg_gen_ext_i32_i64(tmp2, b);
7d1b0095 336 tcg_temp_free_i32(b);
5e3f878a 337 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 338 tcg_temp_free_i64(tmp2);
5e3f878a
PB
339 return tmp1;
340}
341
8f01245e
PB
342/* Swap low and high halfwords. */
343static void gen_swap_half(TCGv var)
344{
7d1b0095 345 TCGv tmp = tcg_temp_new_i32();
8f01245e
PB
346 tcg_gen_shri_i32(tmp, var, 16);
347 tcg_gen_shli_i32(var, var, 16);
348 tcg_gen_or_i32(var, var, tmp);
7d1b0095 349 tcg_temp_free_i32(tmp);
8f01245e
PB
350}
351
b26eefb6
PB
352/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
353 tmp = (t0 ^ t1) & 0x8000;
354 t0 &= ~0x8000;
355 t1 &= ~0x8000;
356 t0 = (t0 + t1) ^ tmp;
357 */
358
359static void gen_add16(TCGv t0, TCGv t1)
360{
7d1b0095 361 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
362 tcg_gen_xor_i32(tmp, t0, t1);
363 tcg_gen_andi_i32(tmp, tmp, 0x8000);
364 tcg_gen_andi_i32(t0, t0, ~0x8000);
365 tcg_gen_andi_i32(t1, t1, ~0x8000);
366 tcg_gen_add_i32(t0, t0, t1);
367 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
368 tcg_temp_free_i32(tmp);
369 tcg_temp_free_i32(t1);
b26eefb6
PB
370}
371
9a119ff6
PB
372#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
373
b26eefb6
PB
374/* Set CF to the top bit of var. */
375static void gen_set_CF_bit31(TCGv var)
376{
7d1b0095 377 TCGv tmp = tcg_temp_new_i32();
b26eefb6 378 tcg_gen_shri_i32(tmp, var, 31);
4cc633c3 379 gen_set_CF(tmp);
7d1b0095 380 tcg_temp_free_i32(tmp);
b26eefb6
PB
381}
382
383/* Set N and Z flags from var. */
384static inline void gen_logic_CC(TCGv var)
385{
6fbe23d5
PB
386 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
387 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
b26eefb6
PB
388}
389
390/* T0 += T1 + CF. */
396e467c 391static void gen_adc(TCGv t0, TCGv t1)
b26eefb6 392{
d9ba4830 393 TCGv tmp;
396e467c 394 tcg_gen_add_i32(t0, t0, t1);
d9ba4830 395 tmp = load_cpu_field(CF);
396e467c 396 tcg_gen_add_i32(t0, t0, tmp);
7d1b0095 397 tcg_temp_free_i32(tmp);
b26eefb6
PB
398}
399
e9bb4aa9
JR
400/* dest = T0 + T1 + CF. */
401static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
402{
403 TCGv tmp;
404 tcg_gen_add_i32(dest, t0, t1);
405 tmp = load_cpu_field(CF);
406 tcg_gen_add_i32(dest, dest, tmp);
7d1b0095 407 tcg_temp_free_i32(tmp);
e9bb4aa9
JR
408}
409
3670669c
PB
410/* dest = T0 - T1 + CF - 1. */
411static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
412{
d9ba4830 413 TCGv tmp;
3670669c 414 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 415 tmp = load_cpu_field(CF);
3670669c
PB
416 tcg_gen_add_i32(dest, dest, tmp);
417 tcg_gen_subi_i32(dest, dest, 1);
7d1b0095 418 tcg_temp_free_i32(tmp);
3670669c
PB
419}
420
ad69471c
PB
421/* FIXME: Implement this natively. */
422#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
423
9a119ff6 424static void shifter_out_im(TCGv var, int shift)
b26eefb6 425{
7d1b0095 426 TCGv tmp = tcg_temp_new_i32();
9a119ff6
PB
427 if (shift == 0) {
428 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 429 } else {
9a119ff6 430 tcg_gen_shri_i32(tmp, var, shift);
4cc633c3 431 if (shift != 31)
9a119ff6
PB
432 tcg_gen_andi_i32(tmp, tmp, 1);
433 }
434 gen_set_CF(tmp);
7d1b0095 435 tcg_temp_free_i32(tmp);
9a119ff6 436}
b26eefb6 437
9a119ff6
PB
438/* Shift by immediate. Includes special handling for shift == 0. */
439static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
440{
441 switch (shiftop) {
442 case 0: /* LSL */
443 if (shift != 0) {
444 if (flags)
445 shifter_out_im(var, 32 - shift);
446 tcg_gen_shli_i32(var, var, shift);
447 }
448 break;
449 case 1: /* LSR */
450 if (shift == 0) {
451 if (flags) {
452 tcg_gen_shri_i32(var, var, 31);
453 gen_set_CF(var);
454 }
455 tcg_gen_movi_i32(var, 0);
456 } else {
457 if (flags)
458 shifter_out_im(var, shift - 1);
459 tcg_gen_shri_i32(var, var, shift);
460 }
461 break;
462 case 2: /* ASR */
463 if (shift == 0)
464 shift = 32;
465 if (flags)
466 shifter_out_im(var, shift - 1);
467 if (shift == 32)
468 shift = 31;
469 tcg_gen_sari_i32(var, var, shift);
470 break;
471 case 3: /* ROR/RRX */
472 if (shift != 0) {
473 if (flags)
474 shifter_out_im(var, shift - 1);
f669df27 475 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 476 } else {
d9ba4830 477 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
478 if (flags)
479 shifter_out_im(var, 0);
480 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
481 tcg_gen_shli_i32(tmp, tmp, 31);
482 tcg_gen_or_i32(var, var, tmp);
7d1b0095 483 tcg_temp_free_i32(tmp);
b26eefb6
PB
484 }
485 }
486};
487
8984bd2e
PB
488static inline void gen_arm_shift_reg(TCGv var, int shiftop,
489 TCGv shift, int flags)
490{
491 if (flags) {
492 switch (shiftop) {
493 case 0: gen_helper_shl_cc(var, var, shift); break;
494 case 1: gen_helper_shr_cc(var, var, shift); break;
495 case 2: gen_helper_sar_cc(var, var, shift); break;
496 case 3: gen_helper_ror_cc(var, var, shift); break;
497 }
498 } else {
499 switch (shiftop) {
500 case 0: gen_helper_shl(var, var, shift); break;
501 case 1: gen_helper_shr(var, var, shift); break;
502 case 2: gen_helper_sar(var, var, shift); break;
f669df27
AJ
503 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
504 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
505 }
506 }
7d1b0095 507 tcg_temp_free_i32(shift);
8984bd2e
PB
508}
509
6ddbc6e4
PB
510#define PAS_OP(pfx) \
511 switch (op2) { \
512 case 0: gen_pas_helper(glue(pfx,add16)); break; \
513 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
514 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
515 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
516 case 4: gen_pas_helper(glue(pfx,add8)); break; \
517 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
518 }
d9ba4830 519static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 520{
a7812ae4 521 TCGv_ptr tmp;
6ddbc6e4
PB
522
523 switch (op1) {
524#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
525 case 1:
a7812ae4 526 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
527 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
528 PAS_OP(s)
b75263d6 529 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
530 break;
531 case 5:
a7812ae4 532 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
533 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
534 PAS_OP(u)
b75263d6 535 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
536 break;
537#undef gen_pas_helper
538#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
539 case 2:
540 PAS_OP(q);
541 break;
542 case 3:
543 PAS_OP(sh);
544 break;
545 case 6:
546 PAS_OP(uq);
547 break;
548 case 7:
549 PAS_OP(uh);
550 break;
551#undef gen_pas_helper
552 }
553}
9ee6e8bb
PB
554#undef PAS_OP
555
6ddbc6e4
PB
556/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
557#define PAS_OP(pfx) \
ed89a2f1 558 switch (op1) { \
6ddbc6e4
PB
559 case 0: gen_pas_helper(glue(pfx,add8)); break; \
560 case 1: gen_pas_helper(glue(pfx,add16)); break; \
561 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
562 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
563 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
564 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
565 }
d9ba4830 566static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 567{
a7812ae4 568 TCGv_ptr tmp;
6ddbc6e4 569
ed89a2f1 570 switch (op2) {
6ddbc6e4
PB
571#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
572 case 0:
a7812ae4 573 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
574 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
575 PAS_OP(s)
b75263d6 576 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
577 break;
578 case 4:
a7812ae4 579 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
580 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
581 PAS_OP(u)
b75263d6 582 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
583 break;
584#undef gen_pas_helper
585#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
586 case 1:
587 PAS_OP(q);
588 break;
589 case 2:
590 PAS_OP(sh);
591 break;
592 case 5:
593 PAS_OP(uq);
594 break;
595 case 6:
596 PAS_OP(uh);
597 break;
598#undef gen_pas_helper
599 }
600}
9ee6e8bb
PB
601#undef PAS_OP
602
d9ba4830
PB
603static void gen_test_cc(int cc, int label)
604{
605 TCGv tmp;
606 TCGv tmp2;
d9ba4830
PB
607 int inv;
608
d9ba4830
PB
609 switch (cc) {
610 case 0: /* eq: Z */
6fbe23d5 611 tmp = load_cpu_field(ZF);
cb63669a 612 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
613 break;
614 case 1: /* ne: !Z */
6fbe23d5 615 tmp = load_cpu_field(ZF);
cb63669a 616 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
617 break;
618 case 2: /* cs: C */
619 tmp = load_cpu_field(CF);
cb63669a 620 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
621 break;
622 case 3: /* cc: !C */
623 tmp = load_cpu_field(CF);
cb63669a 624 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
625 break;
626 case 4: /* mi: N */
6fbe23d5 627 tmp = load_cpu_field(NF);
cb63669a 628 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
629 break;
630 case 5: /* pl: !N */
6fbe23d5 631 tmp = load_cpu_field(NF);
cb63669a 632 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
633 break;
634 case 6: /* vs: V */
635 tmp = load_cpu_field(VF);
cb63669a 636 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
637 break;
638 case 7: /* vc: !V */
639 tmp = load_cpu_field(VF);
cb63669a 640 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
641 break;
642 case 8: /* hi: C && !Z */
643 inv = gen_new_label();
644 tmp = load_cpu_field(CF);
cb63669a 645 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
7d1b0095 646 tcg_temp_free_i32(tmp);
6fbe23d5 647 tmp = load_cpu_field(ZF);
cb63669a 648 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
649 gen_set_label(inv);
650 break;
651 case 9: /* ls: !C || Z */
652 tmp = load_cpu_field(CF);
cb63669a 653 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
7d1b0095 654 tcg_temp_free_i32(tmp);
6fbe23d5 655 tmp = load_cpu_field(ZF);
cb63669a 656 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
657 break;
658 case 10: /* ge: N == V -> N ^ V == 0 */
659 tmp = load_cpu_field(VF);
6fbe23d5 660 tmp2 = load_cpu_field(NF);
d9ba4830 661 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 662 tcg_temp_free_i32(tmp2);
cb63669a 663 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
664 break;
665 case 11: /* lt: N != V -> N ^ V != 0 */
666 tmp = load_cpu_field(VF);
6fbe23d5 667 tmp2 = load_cpu_field(NF);
d9ba4830 668 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 669 tcg_temp_free_i32(tmp2);
cb63669a 670 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
671 break;
672 case 12: /* gt: !Z && N == V */
673 inv = gen_new_label();
6fbe23d5 674 tmp = load_cpu_field(ZF);
cb63669a 675 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
7d1b0095 676 tcg_temp_free_i32(tmp);
d9ba4830 677 tmp = load_cpu_field(VF);
6fbe23d5 678 tmp2 = load_cpu_field(NF);
d9ba4830 679 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 680 tcg_temp_free_i32(tmp2);
cb63669a 681 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
682 gen_set_label(inv);
683 break;
684 case 13: /* le: Z || N != V */
6fbe23d5 685 tmp = load_cpu_field(ZF);
cb63669a 686 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
7d1b0095 687 tcg_temp_free_i32(tmp);
d9ba4830 688 tmp = load_cpu_field(VF);
6fbe23d5 689 tmp2 = load_cpu_field(NF);
d9ba4830 690 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 691 tcg_temp_free_i32(tmp2);
cb63669a 692 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
693 break;
694 default:
695 fprintf(stderr, "Bad condition code 0x%x\n", cc);
696 abort();
697 }
7d1b0095 698 tcg_temp_free_i32(tmp);
d9ba4830 699}
2c0262af 700
b1d8e52e 701static const uint8_t table_logic_cc[16] = {
2c0262af
FB
702 1, /* and */
703 1, /* xor */
704 0, /* sub */
705 0, /* rsb */
706 0, /* add */
707 0, /* adc */
708 0, /* sbc */
709 0, /* rsc */
710 1, /* andl */
711 1, /* xorl */
712 0, /* cmp */
713 0, /* cmn */
714 1, /* orr */
715 1, /* mov */
716 1, /* bic */
717 1, /* mvn */
718};
3b46e624 719
d9ba4830
PB
720/* Set PC and Thumb state from an immediate address. */
721static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 722{
b26eefb6 723 TCGv tmp;
99c475ab 724
b26eefb6 725 s->is_jmp = DISAS_UPDATE;
d9ba4830 726 if (s->thumb != (addr & 1)) {
7d1b0095 727 tmp = tcg_temp_new_i32();
d9ba4830
PB
728 tcg_gen_movi_i32(tmp, addr & 1);
729 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
7d1b0095 730 tcg_temp_free_i32(tmp);
d9ba4830 731 }
155c3eac 732 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
733}
734
735/* Set PC and Thumb state from var. var is marked as dead. */
736static inline void gen_bx(DisasContext *s, TCGv var)
737{
d9ba4830 738 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
739 tcg_gen_andi_i32(cpu_R[15], var, ~1);
740 tcg_gen_andi_i32(var, var, 1);
741 store_cpu_field(var, thumb);
d9ba4830
PB
742}
743
21aeb343
JR
744/* Variant of store_reg which uses branch&exchange logic when storing
745 to r15 in ARM architecture v7 and above. The source must be a temporary
746 and will be marked as dead. */
747static inline void store_reg_bx(CPUState *env, DisasContext *s,
748 int reg, TCGv var)
749{
750 if (reg == 15 && ENABLE_ARCH_7) {
751 gen_bx(s, var);
752 } else {
753 store_reg(s, reg, var);
754 }
755}
756
be5e7a76
DES
757/* Variant of store_reg which uses branch&exchange logic when storing
758 * to r15 in ARM architecture v5T and above. This is used for storing
759 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
760 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
761static inline void store_reg_from_load(CPUState *env, DisasContext *s,
762 int reg, TCGv var)
763{
764 if (reg == 15 && ENABLE_ARCH_5) {
765 gen_bx(s, var);
766 } else {
767 store_reg(s, reg, var);
768 }
769}
770
b0109805
PB
771static inline TCGv gen_ld8s(TCGv addr, int index)
772{
7d1b0095 773 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
774 tcg_gen_qemu_ld8s(tmp, addr, index);
775 return tmp;
776}
777static inline TCGv gen_ld8u(TCGv addr, int index)
778{
7d1b0095 779 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
780 tcg_gen_qemu_ld8u(tmp, addr, index);
781 return tmp;
782}
783static inline TCGv gen_ld16s(TCGv addr, int index)
784{
7d1b0095 785 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
786 tcg_gen_qemu_ld16s(tmp, addr, index);
787 return tmp;
788}
789static inline TCGv gen_ld16u(TCGv addr, int index)
790{
7d1b0095 791 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
792 tcg_gen_qemu_ld16u(tmp, addr, index);
793 return tmp;
794}
795static inline TCGv gen_ld32(TCGv addr, int index)
796{
7d1b0095 797 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
798 tcg_gen_qemu_ld32u(tmp, addr, index);
799 return tmp;
800}
84496233
JR
801static inline TCGv_i64 gen_ld64(TCGv addr, int index)
802{
803 TCGv_i64 tmp = tcg_temp_new_i64();
804 tcg_gen_qemu_ld64(tmp, addr, index);
805 return tmp;
806}
b0109805
PB
807static inline void gen_st8(TCGv val, TCGv addr, int index)
808{
809 tcg_gen_qemu_st8(val, addr, index);
7d1b0095 810 tcg_temp_free_i32(val);
b0109805
PB
811}
812static inline void gen_st16(TCGv val, TCGv addr, int index)
813{
814 tcg_gen_qemu_st16(val, addr, index);
7d1b0095 815 tcg_temp_free_i32(val);
b0109805
PB
816}
817static inline void gen_st32(TCGv val, TCGv addr, int index)
818{
819 tcg_gen_qemu_st32(val, addr, index);
7d1b0095 820 tcg_temp_free_i32(val);
b0109805 821}
84496233
JR
822static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
823{
824 tcg_gen_qemu_st64(val, addr, index);
825 tcg_temp_free_i64(val);
826}
b5ff1b31 827
5e3f878a
PB
828static inline void gen_set_pc_im(uint32_t val)
829{
155c3eac 830 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
831}
832
b5ff1b31
FB
833/* Force a TB lookup after an instruction that changes the CPU state. */
834static inline void gen_lookup_tb(DisasContext *s)
835{
a6445c52 836 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
837 s->is_jmp = DISAS_UPDATE;
838}
839
b0109805
PB
840static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
841 TCGv var)
2c0262af 842{
1e8d4eec 843 int val, rm, shift, shiftop;
b26eefb6 844 TCGv offset;
2c0262af
FB
845
846 if (!(insn & (1 << 25))) {
847 /* immediate */
848 val = insn & 0xfff;
849 if (!(insn & (1 << 23)))
850 val = -val;
537730b9 851 if (val != 0)
b0109805 852 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
853 } else {
854 /* shift/register */
855 rm = (insn) & 0xf;
856 shift = (insn >> 7) & 0x1f;
1e8d4eec 857 shiftop = (insn >> 5) & 3;
b26eefb6 858 offset = load_reg(s, rm);
9a119ff6 859 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 860 if (!(insn & (1 << 23)))
b0109805 861 tcg_gen_sub_i32(var, var, offset);
2c0262af 862 else
b0109805 863 tcg_gen_add_i32(var, var, offset);
7d1b0095 864 tcg_temp_free_i32(offset);
2c0262af
FB
865 }
866}
867
191f9a93 868static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 869 int extra, TCGv var)
2c0262af
FB
870{
871 int val, rm;
b26eefb6 872 TCGv offset;
3b46e624 873
2c0262af
FB
874 if (insn & (1 << 22)) {
875 /* immediate */
876 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
877 if (!(insn & (1 << 23)))
878 val = -val;
18acad92 879 val += extra;
537730b9 880 if (val != 0)
b0109805 881 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
882 } else {
883 /* register */
191f9a93 884 if (extra)
b0109805 885 tcg_gen_addi_i32(var, var, extra);
2c0262af 886 rm = (insn) & 0xf;
b26eefb6 887 offset = load_reg(s, rm);
2c0262af 888 if (!(insn & (1 << 23)))
b0109805 889 tcg_gen_sub_i32(var, var, offset);
2c0262af 890 else
b0109805 891 tcg_gen_add_i32(var, var, offset);
7d1b0095 892 tcg_temp_free_i32(offset);
2c0262af
FB
893 }
894}
895
4373f3ce
PB
896#define VFP_OP2(name) \
897static inline void gen_vfp_##name(int dp) \
898{ \
899 if (dp) \
900 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
901 else \
902 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
b7bcbe95
FB
903}
904
4373f3ce
PB
905VFP_OP2(add)
906VFP_OP2(sub)
907VFP_OP2(mul)
908VFP_OP2(div)
909
910#undef VFP_OP2
911
605a6aed
PM
912static inline void gen_vfp_F1_mul(int dp)
913{
914 /* Like gen_vfp_mul() but put result in F1 */
915 if (dp) {
916 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, cpu_env);
917 } else {
918 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, cpu_env);
919 }
920}
921
922static inline void gen_vfp_F1_neg(int dp)
923{
924 /* Like gen_vfp_neg() but put result in F1 */
925 if (dp) {
926 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
927 } else {
928 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
929 }
930}
931
4373f3ce
PB
932static inline void gen_vfp_abs(int dp)
933{
934 if (dp)
935 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
936 else
937 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
938}
939
940static inline void gen_vfp_neg(int dp)
941{
942 if (dp)
943 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
944 else
945 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
946}
947
948static inline void gen_vfp_sqrt(int dp)
949{
950 if (dp)
951 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
952 else
953 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
954}
955
956static inline void gen_vfp_cmp(int dp)
957{
958 if (dp)
959 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
960 else
961 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
962}
963
964static inline void gen_vfp_cmpe(int dp)
965{
966 if (dp)
967 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
968 else
969 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
970}
971
972static inline void gen_vfp_F1_ld0(int dp)
973{
974 if (dp)
5b340b51 975 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 976 else
5b340b51 977 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
978}
979
5500b06c
PM
980#define VFP_GEN_ITOF(name) \
981static inline void gen_vfp_##name(int dp, int neon) \
982{ \
983 TCGv statusptr = tcg_temp_new_i32(); \
984 int offset; \
985 if (neon) { \
986 offset = offsetof(CPUState, vfp.standard_fp_status); \
987 } else { \
988 offset = offsetof(CPUState, vfp.fp_status); \
989 } \
990 tcg_gen_addi_i32(statusptr, cpu_env, offset); \
991 if (dp) { \
992 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
993 } else { \
994 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
995 } \
996 tcg_temp_free_i32(statusptr); \
4373f3ce
PB
997}
998
5500b06c
PM
999VFP_GEN_ITOF(uito)
1000VFP_GEN_ITOF(sito)
1001#undef VFP_GEN_ITOF
4373f3ce 1002
5500b06c
PM
1003#define VFP_GEN_FTOI(name) \
1004static inline void gen_vfp_##name(int dp, int neon) \
1005{ \
1006 TCGv statusptr = tcg_temp_new_i32(); \
1007 int offset; \
1008 if (neon) { \
1009 offset = offsetof(CPUState, vfp.standard_fp_status); \
1010 } else { \
1011 offset = offsetof(CPUState, vfp.fp_status); \
1012 } \
1013 tcg_gen_addi_i32(statusptr, cpu_env, offset); \
1014 if (dp) { \
1015 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1016 } else { \
1017 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1018 } \
1019 tcg_temp_free_i32(statusptr); \
4373f3ce
PB
1020}
1021
5500b06c
PM
1022VFP_GEN_FTOI(toui)
1023VFP_GEN_FTOI(touiz)
1024VFP_GEN_FTOI(tosi)
1025VFP_GEN_FTOI(tosiz)
1026#undef VFP_GEN_FTOI
4373f3ce
PB
1027
1028#define VFP_GEN_FIX(name) \
5500b06c 1029static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1030{ \
b75263d6 1031 TCGv tmp_shift = tcg_const_i32(shift); \
5500b06c
PM
1032 TCGv statusptr = tcg_temp_new_i32(); \
1033 int offset; \
1034 if (neon) { \
1035 offset = offsetof(CPUState, vfp.standard_fp_status); \
1036 } else { \
1037 offset = offsetof(CPUState, vfp.fp_status); \
1038 } \
1039 tcg_gen_addi_i32(statusptr, cpu_env, offset); \
1040 if (dp) { \
1041 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1042 } else { \
1043 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1044 } \
b75263d6 1045 tcg_temp_free_i32(tmp_shift); \
5500b06c 1046 tcg_temp_free_i32(statusptr); \
9ee6e8bb 1047}
4373f3ce
PB
1048VFP_GEN_FIX(tosh)
1049VFP_GEN_FIX(tosl)
1050VFP_GEN_FIX(touh)
1051VFP_GEN_FIX(toul)
1052VFP_GEN_FIX(shto)
1053VFP_GEN_FIX(slto)
1054VFP_GEN_FIX(uhto)
1055VFP_GEN_FIX(ulto)
1056#undef VFP_GEN_FIX
9ee6e8bb 1057
312eea9f 1058static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1059{
1060 if (dp)
312eea9f 1061 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1062 else
312eea9f 1063 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1064}
1065
312eea9f 1066static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1067{
1068 if (dp)
312eea9f 1069 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1070 else
312eea9f 1071 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1072}
1073
8e96005d
FB
1074static inline long
1075vfp_reg_offset (int dp, int reg)
1076{
1077 if (dp)
1078 return offsetof(CPUARMState, vfp.regs[reg]);
1079 else if (reg & 1) {
1080 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1081 + offsetof(CPU_DoubleU, l.upper);
1082 } else {
1083 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1084 + offsetof(CPU_DoubleU, l.lower);
1085 }
1086}
9ee6e8bb
PB
1087
1088/* Return the offset of a 32-bit piece of a NEON register.
1089 zero is the least significant end of the register. */
1090static inline long
1091neon_reg_offset (int reg, int n)
1092{
1093 int sreg;
1094 sreg = reg * 2 + n;
1095 return vfp_reg_offset(0, sreg);
1096}
1097
8f8e3aa4
PB
1098static TCGv neon_load_reg(int reg, int pass)
1099{
7d1b0095 1100 TCGv tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1101 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1102 return tmp;
1103}
1104
1105static void neon_store_reg(int reg, int pass, TCGv var)
1106{
1107 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1108 tcg_temp_free_i32(var);
8f8e3aa4
PB
1109}
1110
a7812ae4 1111static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1112{
1113 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1114}
1115
a7812ae4 1116static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1117{
1118 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1119}
1120
4373f3ce
PB
1121#define tcg_gen_ld_f32 tcg_gen_ld_i32
1122#define tcg_gen_ld_f64 tcg_gen_ld_i64
1123#define tcg_gen_st_f32 tcg_gen_st_i32
1124#define tcg_gen_st_f64 tcg_gen_st_i64
1125
b7bcbe95
FB
1126static inline void gen_mov_F0_vreg(int dp, int reg)
1127{
1128 if (dp)
4373f3ce 1129 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1130 else
4373f3ce 1131 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1132}
1133
1134static inline void gen_mov_F1_vreg(int dp, int reg)
1135{
1136 if (dp)
4373f3ce 1137 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1138 else
4373f3ce 1139 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1140}
1141
1142static inline void gen_mov_vreg_F0(int dp, int reg)
1143{
1144 if (dp)
4373f3ce 1145 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1146 else
4373f3ce 1147 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1148}
1149
18c9b560
AZ
1150#define ARM_CP_RW_BIT (1 << 20)
1151
a7812ae4 1152static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d
PB
1153{
1154 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1155}
1156
a7812ae4 1157static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d
PB
1158{
1159 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1160}
1161
da6b5335 1162static inline TCGv iwmmxt_load_creg(int reg)
e677137d 1163{
7d1b0095 1164 TCGv var = tcg_temp_new_i32();
da6b5335
FN
1165 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1166 return var;
e677137d
PB
1167}
1168
da6b5335 1169static inline void iwmmxt_store_creg(int reg, TCGv var)
e677137d 1170{
da6b5335 1171 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
7d1b0095 1172 tcg_temp_free_i32(var);
e677137d
PB
1173}
1174
1175static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1176{
1177 iwmmxt_store_reg(cpu_M0, rn);
1178}
1179
1180static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1181{
1182 iwmmxt_load_reg(cpu_M0, rn);
1183}
1184
1185static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1186{
1187 iwmmxt_load_reg(cpu_V1, rn);
1188 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1189}
1190
1191static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1192{
1193 iwmmxt_load_reg(cpu_V1, rn);
1194 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1195}
1196
1197static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1198{
1199 iwmmxt_load_reg(cpu_V1, rn);
1200 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1201}
1202
1203#define IWMMXT_OP(name) \
1204static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1205{ \
1206 iwmmxt_load_reg(cpu_V1, rn); \
1207 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1208}
1209
947a2fa2
PM
1210#define IWMMXT_OP_SIZE(name) \
1211IWMMXT_OP(name##b) \
1212IWMMXT_OP(name##w) \
1213IWMMXT_OP(name##l)
e677137d 1214
947a2fa2 1215#define IWMMXT_OP_1(name) \
e677137d
PB
1216static inline void gen_op_iwmmxt_##name##_M0(void) \
1217{ \
947a2fa2 1218 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0); \
e677137d
PB
1219}
1220
1221IWMMXT_OP(maddsq)
1222IWMMXT_OP(madduq)
1223IWMMXT_OP(sadb)
1224IWMMXT_OP(sadw)
1225IWMMXT_OP(mulslw)
1226IWMMXT_OP(mulshw)
1227IWMMXT_OP(mululw)
1228IWMMXT_OP(muluhw)
1229IWMMXT_OP(macsw)
1230IWMMXT_OP(macuw)
1231
947a2fa2
PM
1232IWMMXT_OP_SIZE(unpackl)
1233IWMMXT_OP_SIZE(unpackh)
1234
1235IWMMXT_OP_1(unpacklub)
1236IWMMXT_OP_1(unpackluw)
1237IWMMXT_OP_1(unpacklul)
1238IWMMXT_OP_1(unpackhub)
1239IWMMXT_OP_1(unpackhuw)
1240IWMMXT_OP_1(unpackhul)
1241IWMMXT_OP_1(unpacklsb)
1242IWMMXT_OP_1(unpacklsw)
1243IWMMXT_OP_1(unpacklsl)
1244IWMMXT_OP_1(unpackhsb)
1245IWMMXT_OP_1(unpackhsw)
1246IWMMXT_OP_1(unpackhsl)
1247
1248IWMMXT_OP_SIZE(cmpeq)
1249IWMMXT_OP_SIZE(cmpgtu)
1250IWMMXT_OP_SIZE(cmpgts)
1251
1252IWMMXT_OP_SIZE(mins)
1253IWMMXT_OP_SIZE(minu)
1254IWMMXT_OP_SIZE(maxs)
1255IWMMXT_OP_SIZE(maxu)
1256
1257IWMMXT_OP_SIZE(subn)
1258IWMMXT_OP_SIZE(addn)
1259IWMMXT_OP_SIZE(subu)
1260IWMMXT_OP_SIZE(addu)
1261IWMMXT_OP_SIZE(subs)
1262IWMMXT_OP_SIZE(adds)
1263
1264IWMMXT_OP(avgb0)
1265IWMMXT_OP(avgb1)
1266IWMMXT_OP(avgw0)
1267IWMMXT_OP(avgw1)
e677137d
PB
1268
1269IWMMXT_OP(msadb)
1270
947a2fa2
PM
1271IWMMXT_OP(packuw)
1272IWMMXT_OP(packul)
1273IWMMXT_OP(packuq)
1274IWMMXT_OP(packsw)
1275IWMMXT_OP(packsl)
1276IWMMXT_OP(packsq)
e677137d 1277
e677137d
PB
1278static void gen_op_iwmmxt_set_mup(void)
1279{
1280 TCGv tmp;
1281 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1282 tcg_gen_ori_i32(tmp, tmp, 2);
1283 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1284}
1285
1286static void gen_op_iwmmxt_set_cup(void)
1287{
1288 TCGv tmp;
1289 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1290 tcg_gen_ori_i32(tmp, tmp, 1);
1291 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1292}
1293
1294static void gen_op_iwmmxt_setpsr_nz(void)
1295{
7d1b0095 1296 TCGv tmp = tcg_temp_new_i32();
e677137d
PB
1297 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1298 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1299}
1300
1301static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1302{
1303 iwmmxt_load_reg(cpu_V1, rn);
86831435 1304 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1305 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1306}
1307
da6b5335 1308static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
18c9b560
AZ
1309{
1310 int rd;
1311 uint32_t offset;
da6b5335 1312 TCGv tmp;
18c9b560
AZ
1313
1314 rd = (insn >> 16) & 0xf;
da6b5335 1315 tmp = load_reg(s, rd);
18c9b560
AZ
1316
1317 offset = (insn & 0xff) << ((insn >> 7) & 2);
1318 if (insn & (1 << 24)) {
1319 /* Pre indexed */
1320 if (insn & (1 << 23))
da6b5335 1321 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1322 else
da6b5335
FN
1323 tcg_gen_addi_i32(tmp, tmp, -offset);
1324 tcg_gen_mov_i32(dest, tmp);
18c9b560 1325 if (insn & (1 << 21))
da6b5335
FN
1326 store_reg(s, rd, tmp);
1327 else
7d1b0095 1328 tcg_temp_free_i32(tmp);
18c9b560
AZ
1329 } else if (insn & (1 << 21)) {
1330 /* Post indexed */
da6b5335 1331 tcg_gen_mov_i32(dest, tmp);
18c9b560 1332 if (insn & (1 << 23))
da6b5335 1333 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1334 else
da6b5335
FN
1335 tcg_gen_addi_i32(tmp, tmp, -offset);
1336 store_reg(s, rd, tmp);
18c9b560
AZ
1337 } else if (!(insn & (1 << 23)))
1338 return 1;
1339 return 0;
1340}
1341
da6b5335 1342static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
18c9b560
AZ
1343{
1344 int rd = (insn >> 0) & 0xf;
da6b5335 1345 TCGv tmp;
18c9b560 1346
da6b5335
FN
1347 if (insn & (1 << 8)) {
1348 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1349 return 1;
da6b5335
FN
1350 } else {
1351 tmp = iwmmxt_load_creg(rd);
1352 }
1353 } else {
7d1b0095 1354 tmp = tcg_temp_new_i32();
da6b5335
FN
1355 iwmmxt_load_reg(cpu_V0, rd);
1356 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1357 }
1358 tcg_gen_andi_i32(tmp, tmp, mask);
1359 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1360 tcg_temp_free_i32(tmp);
18c9b560
AZ
1361 return 0;
1362}
1363
a1c7273b 1364/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560
AZ
1365 (ie. an undefined instruction). */
1366static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1367{
1368 int rd, wrd;
1369 int rdhi, rdlo, rd0, rd1, i;
da6b5335
FN
1370 TCGv addr;
1371 TCGv tmp, tmp2, tmp3;
18c9b560
AZ
1372
1373 if ((insn & 0x0e000e00) == 0x0c000000) {
1374 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1375 wrd = insn & 0xf;
1376 rdlo = (insn >> 12) & 0xf;
1377 rdhi = (insn >> 16) & 0xf;
1378 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1379 iwmmxt_load_reg(cpu_V0, wrd);
1380 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1381 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1382 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1383 } else { /* TMCRR */
da6b5335
FN
1384 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1385 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1386 gen_op_iwmmxt_set_mup();
1387 }
1388 return 0;
1389 }
1390
1391 wrd = (insn >> 12) & 0xf;
7d1b0095 1392 addr = tcg_temp_new_i32();
da6b5335 1393 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1394 tcg_temp_free_i32(addr);
18c9b560 1395 return 1;
da6b5335 1396 }
18c9b560
AZ
1397 if (insn & ARM_CP_RW_BIT) {
1398 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1399 tmp = tcg_temp_new_i32();
da6b5335
FN
1400 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1401 iwmmxt_store_creg(wrd, tmp);
18c9b560 1402 } else {
e677137d
PB
1403 i = 1;
1404 if (insn & (1 << 8)) {
1405 if (insn & (1 << 22)) { /* WLDRD */
da6b5335 1406 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1407 i = 0;
1408 } else { /* WLDRW wRd */
da6b5335 1409 tmp = gen_ld32(addr, IS_USER(s));
e677137d
PB
1410 }
1411 } else {
1412 if (insn & (1 << 22)) { /* WLDRH */
da6b5335 1413 tmp = gen_ld16u(addr, IS_USER(s));
e677137d 1414 } else { /* WLDRB */
da6b5335 1415 tmp = gen_ld8u(addr, IS_USER(s));
e677137d
PB
1416 }
1417 }
1418 if (i) {
1419 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1420 tcg_temp_free_i32(tmp);
e677137d 1421 }
18c9b560
AZ
1422 gen_op_iwmmxt_movq_wRn_M0(wrd);
1423 }
1424 } else {
1425 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335
FN
1426 tmp = iwmmxt_load_creg(wrd);
1427 gen_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1428 } else {
1429 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1430 tmp = tcg_temp_new_i32();
e677137d
PB
1431 if (insn & (1 << 8)) {
1432 if (insn & (1 << 22)) { /* WSTRD */
7d1b0095 1433 tcg_temp_free_i32(tmp);
da6b5335 1434 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1435 } else { /* WSTRW wRd */
1436 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1437 gen_st32(tmp, addr, IS_USER(s));
e677137d
PB
1438 }
1439 } else {
1440 if (insn & (1 << 22)) { /* WSTRH */
1441 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1442 gen_st16(tmp, addr, IS_USER(s));
e677137d
PB
1443 } else { /* WSTRB */
1444 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1445 gen_st8(tmp, addr, IS_USER(s));
e677137d
PB
1446 }
1447 }
18c9b560
AZ
1448 }
1449 }
7d1b0095 1450 tcg_temp_free_i32(addr);
18c9b560
AZ
1451 return 0;
1452 }
1453
1454 if ((insn & 0x0f000000) != 0x0e000000)
1455 return 1;
1456
1457 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1458 case 0x000: /* WOR */
1459 wrd = (insn >> 12) & 0xf;
1460 rd0 = (insn >> 0) & 0xf;
1461 rd1 = (insn >> 16) & 0xf;
1462 gen_op_iwmmxt_movq_M0_wRn(rd0);
1463 gen_op_iwmmxt_orq_M0_wRn(rd1);
1464 gen_op_iwmmxt_setpsr_nz();
1465 gen_op_iwmmxt_movq_wRn_M0(wrd);
1466 gen_op_iwmmxt_set_mup();
1467 gen_op_iwmmxt_set_cup();
1468 break;
1469 case 0x011: /* TMCR */
1470 if (insn & 0xf)
1471 return 1;
1472 rd = (insn >> 12) & 0xf;
1473 wrd = (insn >> 16) & 0xf;
1474 switch (wrd) {
1475 case ARM_IWMMXT_wCID:
1476 case ARM_IWMMXT_wCASF:
1477 break;
1478 case ARM_IWMMXT_wCon:
1479 gen_op_iwmmxt_set_cup();
1480 /* Fall through. */
1481 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1482 tmp = iwmmxt_load_creg(wrd);
1483 tmp2 = load_reg(s, rd);
f669df27 1484 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1485 tcg_temp_free_i32(tmp2);
da6b5335 1486 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1487 break;
1488 case ARM_IWMMXT_wCGR0:
1489 case ARM_IWMMXT_wCGR1:
1490 case ARM_IWMMXT_wCGR2:
1491 case ARM_IWMMXT_wCGR3:
1492 gen_op_iwmmxt_set_cup();
da6b5335
FN
1493 tmp = load_reg(s, rd);
1494 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1495 break;
1496 default:
1497 return 1;
1498 }
1499 break;
1500 case 0x100: /* WXOR */
1501 wrd = (insn >> 12) & 0xf;
1502 rd0 = (insn >> 0) & 0xf;
1503 rd1 = (insn >> 16) & 0xf;
1504 gen_op_iwmmxt_movq_M0_wRn(rd0);
1505 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1506 gen_op_iwmmxt_setpsr_nz();
1507 gen_op_iwmmxt_movq_wRn_M0(wrd);
1508 gen_op_iwmmxt_set_mup();
1509 gen_op_iwmmxt_set_cup();
1510 break;
1511 case 0x111: /* TMRC */
1512 if (insn & 0xf)
1513 return 1;
1514 rd = (insn >> 12) & 0xf;
1515 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1516 tmp = iwmmxt_load_creg(wrd);
1517 store_reg(s, rd, tmp);
18c9b560
AZ
1518 break;
1519 case 0x300: /* WANDN */
1520 wrd = (insn >> 12) & 0xf;
1521 rd0 = (insn >> 0) & 0xf;
1522 rd1 = (insn >> 16) & 0xf;
1523 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1524 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1525 gen_op_iwmmxt_andq_M0_wRn(rd1);
1526 gen_op_iwmmxt_setpsr_nz();
1527 gen_op_iwmmxt_movq_wRn_M0(wrd);
1528 gen_op_iwmmxt_set_mup();
1529 gen_op_iwmmxt_set_cup();
1530 break;
1531 case 0x200: /* WAND */
1532 wrd = (insn >> 12) & 0xf;
1533 rd0 = (insn >> 0) & 0xf;
1534 rd1 = (insn >> 16) & 0xf;
1535 gen_op_iwmmxt_movq_M0_wRn(rd0);
1536 gen_op_iwmmxt_andq_M0_wRn(rd1);
1537 gen_op_iwmmxt_setpsr_nz();
1538 gen_op_iwmmxt_movq_wRn_M0(wrd);
1539 gen_op_iwmmxt_set_mup();
1540 gen_op_iwmmxt_set_cup();
1541 break;
1542 case 0x810: case 0xa10: /* WMADD */
1543 wrd = (insn >> 12) & 0xf;
1544 rd0 = (insn >> 0) & 0xf;
1545 rd1 = (insn >> 16) & 0xf;
1546 gen_op_iwmmxt_movq_M0_wRn(rd0);
1547 if (insn & (1 << 21))
1548 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1549 else
1550 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1551 gen_op_iwmmxt_movq_wRn_M0(wrd);
1552 gen_op_iwmmxt_set_mup();
1553 break;
1554 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1555 wrd = (insn >> 12) & 0xf;
1556 rd0 = (insn >> 16) & 0xf;
1557 rd1 = (insn >> 0) & 0xf;
1558 gen_op_iwmmxt_movq_M0_wRn(rd0);
1559 switch ((insn >> 22) & 3) {
1560 case 0:
1561 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1562 break;
1563 case 1:
1564 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1565 break;
1566 case 2:
1567 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1568 break;
1569 case 3:
1570 return 1;
1571 }
1572 gen_op_iwmmxt_movq_wRn_M0(wrd);
1573 gen_op_iwmmxt_set_mup();
1574 gen_op_iwmmxt_set_cup();
1575 break;
1576 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1577 wrd = (insn >> 12) & 0xf;
1578 rd0 = (insn >> 16) & 0xf;
1579 rd1 = (insn >> 0) & 0xf;
1580 gen_op_iwmmxt_movq_M0_wRn(rd0);
1581 switch ((insn >> 22) & 3) {
1582 case 0:
1583 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1584 break;
1585 case 1:
1586 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1587 break;
1588 case 2:
1589 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1590 break;
1591 case 3:
1592 return 1;
1593 }
1594 gen_op_iwmmxt_movq_wRn_M0(wrd);
1595 gen_op_iwmmxt_set_mup();
1596 gen_op_iwmmxt_set_cup();
1597 break;
1598 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1599 wrd = (insn >> 12) & 0xf;
1600 rd0 = (insn >> 16) & 0xf;
1601 rd1 = (insn >> 0) & 0xf;
1602 gen_op_iwmmxt_movq_M0_wRn(rd0);
1603 if (insn & (1 << 22))
1604 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1605 else
1606 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1607 if (!(insn & (1 << 20)))
1608 gen_op_iwmmxt_addl_M0_wRn(wrd);
1609 gen_op_iwmmxt_movq_wRn_M0(wrd);
1610 gen_op_iwmmxt_set_mup();
1611 break;
1612 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1613 wrd = (insn >> 12) & 0xf;
1614 rd0 = (insn >> 16) & 0xf;
1615 rd1 = (insn >> 0) & 0xf;
1616 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1617 if (insn & (1 << 21)) {
1618 if (insn & (1 << 20))
1619 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1620 else
1621 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1622 } else {
1623 if (insn & (1 << 20))
1624 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1625 else
1626 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1627 }
18c9b560
AZ
1628 gen_op_iwmmxt_movq_wRn_M0(wrd);
1629 gen_op_iwmmxt_set_mup();
1630 break;
1631 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1632 wrd = (insn >> 12) & 0xf;
1633 rd0 = (insn >> 16) & 0xf;
1634 rd1 = (insn >> 0) & 0xf;
1635 gen_op_iwmmxt_movq_M0_wRn(rd0);
1636 if (insn & (1 << 21))
1637 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1638 else
1639 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1640 if (!(insn & (1 << 20))) {
e677137d
PB
1641 iwmmxt_load_reg(cpu_V1, wrd);
1642 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1643 }
1644 gen_op_iwmmxt_movq_wRn_M0(wrd);
1645 gen_op_iwmmxt_set_mup();
1646 break;
1647 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1648 wrd = (insn >> 12) & 0xf;
1649 rd0 = (insn >> 16) & 0xf;
1650 rd1 = (insn >> 0) & 0xf;
1651 gen_op_iwmmxt_movq_M0_wRn(rd0);
1652 switch ((insn >> 22) & 3) {
1653 case 0:
1654 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1655 break;
1656 case 1:
1657 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1658 break;
1659 case 2:
1660 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1661 break;
1662 case 3:
1663 return 1;
1664 }
1665 gen_op_iwmmxt_movq_wRn_M0(wrd);
1666 gen_op_iwmmxt_set_mup();
1667 gen_op_iwmmxt_set_cup();
1668 break;
1669 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1670 wrd = (insn >> 12) & 0xf;
1671 rd0 = (insn >> 16) & 0xf;
1672 rd1 = (insn >> 0) & 0xf;
1673 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1674 if (insn & (1 << 22)) {
1675 if (insn & (1 << 20))
1676 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1677 else
1678 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1679 } else {
1680 if (insn & (1 << 20))
1681 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1682 else
1683 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1684 }
18c9b560
AZ
1685 gen_op_iwmmxt_movq_wRn_M0(wrd);
1686 gen_op_iwmmxt_set_mup();
1687 gen_op_iwmmxt_set_cup();
1688 break;
1689 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1690 wrd = (insn >> 12) & 0xf;
1691 rd0 = (insn >> 16) & 0xf;
1692 rd1 = (insn >> 0) & 0xf;
1693 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1694 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1695 tcg_gen_andi_i32(tmp, tmp, 7);
1696 iwmmxt_load_reg(cpu_V1, rd1);
1697 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1698 tcg_temp_free_i32(tmp);
18c9b560
AZ
1699 gen_op_iwmmxt_movq_wRn_M0(wrd);
1700 gen_op_iwmmxt_set_mup();
1701 break;
1702 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1703 if (((insn >> 6) & 3) == 3)
1704 return 1;
18c9b560
AZ
1705 rd = (insn >> 12) & 0xf;
1706 wrd = (insn >> 16) & 0xf;
da6b5335 1707 tmp = load_reg(s, rd);
18c9b560
AZ
1708 gen_op_iwmmxt_movq_M0_wRn(wrd);
1709 switch ((insn >> 6) & 3) {
1710 case 0:
da6b5335
FN
1711 tmp2 = tcg_const_i32(0xff);
1712 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1713 break;
1714 case 1:
da6b5335
FN
1715 tmp2 = tcg_const_i32(0xffff);
1716 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1717 break;
1718 case 2:
da6b5335
FN
1719 tmp2 = tcg_const_i32(0xffffffff);
1720 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1721 break;
da6b5335
FN
1722 default:
1723 TCGV_UNUSED(tmp2);
1724 TCGV_UNUSED(tmp3);
18c9b560 1725 }
da6b5335
FN
1726 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1727 tcg_temp_free(tmp3);
1728 tcg_temp_free(tmp2);
7d1b0095 1729 tcg_temp_free_i32(tmp);
18c9b560
AZ
1730 gen_op_iwmmxt_movq_wRn_M0(wrd);
1731 gen_op_iwmmxt_set_mup();
1732 break;
1733 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1734 rd = (insn >> 12) & 0xf;
1735 wrd = (insn >> 16) & 0xf;
da6b5335 1736 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1737 return 1;
1738 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1739 tmp = tcg_temp_new_i32();
18c9b560
AZ
1740 switch ((insn >> 22) & 3) {
1741 case 0:
da6b5335
FN
1742 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1743 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1744 if (insn & 8) {
1745 tcg_gen_ext8s_i32(tmp, tmp);
1746 } else {
1747 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1748 }
1749 break;
1750 case 1:
da6b5335
FN
1751 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1752 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1753 if (insn & 8) {
1754 tcg_gen_ext16s_i32(tmp, tmp);
1755 } else {
1756 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1757 }
1758 break;
1759 case 2:
da6b5335
FN
1760 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1761 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1762 break;
18c9b560 1763 }
da6b5335 1764 store_reg(s, rd, tmp);
18c9b560
AZ
1765 break;
1766 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1767 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1768 return 1;
da6b5335 1769 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1770 switch ((insn >> 22) & 3) {
1771 case 0:
da6b5335 1772 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1773 break;
1774 case 1:
da6b5335 1775 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1776 break;
1777 case 2:
da6b5335 1778 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1779 break;
18c9b560 1780 }
da6b5335
FN
1781 tcg_gen_shli_i32(tmp, tmp, 28);
1782 gen_set_nzcv(tmp);
7d1b0095 1783 tcg_temp_free_i32(tmp);
18c9b560
AZ
1784 break;
1785 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1786 if (((insn >> 6) & 3) == 3)
1787 return 1;
18c9b560
AZ
1788 rd = (insn >> 12) & 0xf;
1789 wrd = (insn >> 16) & 0xf;
da6b5335 1790 tmp = load_reg(s, rd);
18c9b560
AZ
1791 switch ((insn >> 6) & 3) {
1792 case 0:
da6b5335 1793 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1794 break;
1795 case 1:
da6b5335 1796 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1797 break;
1798 case 2:
da6b5335 1799 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1800 break;
18c9b560 1801 }
7d1b0095 1802 tcg_temp_free_i32(tmp);
18c9b560
AZ
1803 gen_op_iwmmxt_movq_wRn_M0(wrd);
1804 gen_op_iwmmxt_set_mup();
1805 break;
1806 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1807 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1808 return 1;
da6b5335 1809 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1810 tmp2 = tcg_temp_new_i32();
da6b5335 1811 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1812 switch ((insn >> 22) & 3) {
1813 case 0:
1814 for (i = 0; i < 7; i ++) {
da6b5335
FN
1815 tcg_gen_shli_i32(tmp2, tmp2, 4);
1816 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1817 }
1818 break;
1819 case 1:
1820 for (i = 0; i < 3; i ++) {
da6b5335
FN
1821 tcg_gen_shli_i32(tmp2, tmp2, 8);
1822 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1823 }
1824 break;
1825 case 2:
da6b5335
FN
1826 tcg_gen_shli_i32(tmp2, tmp2, 16);
1827 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1828 break;
18c9b560 1829 }
da6b5335 1830 gen_set_nzcv(tmp);
7d1b0095
PM
1831 tcg_temp_free_i32(tmp2);
1832 tcg_temp_free_i32(tmp);
18c9b560
AZ
1833 break;
1834 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1835 wrd = (insn >> 12) & 0xf;
1836 rd0 = (insn >> 16) & 0xf;
1837 gen_op_iwmmxt_movq_M0_wRn(rd0);
1838 switch ((insn >> 22) & 3) {
1839 case 0:
e677137d 1840 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1841 break;
1842 case 1:
e677137d 1843 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1844 break;
1845 case 2:
e677137d 1846 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1847 break;
1848 case 3:
1849 return 1;
1850 }
1851 gen_op_iwmmxt_movq_wRn_M0(wrd);
1852 gen_op_iwmmxt_set_mup();
1853 break;
1854 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1855 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1856 return 1;
da6b5335 1857 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1858 tmp2 = tcg_temp_new_i32();
da6b5335 1859 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1860 switch ((insn >> 22) & 3) {
1861 case 0:
1862 for (i = 0; i < 7; i ++) {
da6b5335
FN
1863 tcg_gen_shli_i32(tmp2, tmp2, 4);
1864 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1865 }
1866 break;
1867 case 1:
1868 for (i = 0; i < 3; i ++) {
da6b5335
FN
1869 tcg_gen_shli_i32(tmp2, tmp2, 8);
1870 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1871 }
1872 break;
1873 case 2:
da6b5335
FN
1874 tcg_gen_shli_i32(tmp2, tmp2, 16);
1875 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1876 break;
18c9b560 1877 }
da6b5335 1878 gen_set_nzcv(tmp);
7d1b0095
PM
1879 tcg_temp_free_i32(tmp2);
1880 tcg_temp_free_i32(tmp);
18c9b560
AZ
1881 break;
1882 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1883 rd = (insn >> 12) & 0xf;
1884 rd0 = (insn >> 16) & 0xf;
da6b5335 1885 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1886 return 1;
1887 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 1888 tmp = tcg_temp_new_i32();
18c9b560
AZ
1889 switch ((insn >> 22) & 3) {
1890 case 0:
da6b5335 1891 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1892 break;
1893 case 1:
da6b5335 1894 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1895 break;
1896 case 2:
da6b5335 1897 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1898 break;
18c9b560 1899 }
da6b5335 1900 store_reg(s, rd, tmp);
18c9b560
AZ
1901 break;
1902 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1903 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1904 wrd = (insn >> 12) & 0xf;
1905 rd0 = (insn >> 16) & 0xf;
1906 rd1 = (insn >> 0) & 0xf;
1907 gen_op_iwmmxt_movq_M0_wRn(rd0);
1908 switch ((insn >> 22) & 3) {
1909 case 0:
1910 if (insn & (1 << 21))
1911 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1912 else
1913 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1914 break;
1915 case 1:
1916 if (insn & (1 << 21))
1917 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1918 else
1919 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1920 break;
1921 case 2:
1922 if (insn & (1 << 21))
1923 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1924 else
1925 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1926 break;
1927 case 3:
1928 return 1;
1929 }
1930 gen_op_iwmmxt_movq_wRn_M0(wrd);
1931 gen_op_iwmmxt_set_mup();
1932 gen_op_iwmmxt_set_cup();
1933 break;
1934 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1935 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1936 wrd = (insn >> 12) & 0xf;
1937 rd0 = (insn >> 16) & 0xf;
1938 gen_op_iwmmxt_movq_M0_wRn(rd0);
1939 switch ((insn >> 22) & 3) {
1940 case 0:
1941 if (insn & (1 << 21))
1942 gen_op_iwmmxt_unpacklsb_M0();
1943 else
1944 gen_op_iwmmxt_unpacklub_M0();
1945 break;
1946 case 1:
1947 if (insn & (1 << 21))
1948 gen_op_iwmmxt_unpacklsw_M0();
1949 else
1950 gen_op_iwmmxt_unpackluw_M0();
1951 break;
1952 case 2:
1953 if (insn & (1 << 21))
1954 gen_op_iwmmxt_unpacklsl_M0();
1955 else
1956 gen_op_iwmmxt_unpacklul_M0();
1957 break;
1958 case 3:
1959 return 1;
1960 }
1961 gen_op_iwmmxt_movq_wRn_M0(wrd);
1962 gen_op_iwmmxt_set_mup();
1963 gen_op_iwmmxt_set_cup();
1964 break;
1965 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1966 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1967 wrd = (insn >> 12) & 0xf;
1968 rd0 = (insn >> 16) & 0xf;
1969 gen_op_iwmmxt_movq_M0_wRn(rd0);
1970 switch ((insn >> 22) & 3) {
1971 case 0:
1972 if (insn & (1 << 21))
1973 gen_op_iwmmxt_unpackhsb_M0();
1974 else
1975 gen_op_iwmmxt_unpackhub_M0();
1976 break;
1977 case 1:
1978 if (insn & (1 << 21))
1979 gen_op_iwmmxt_unpackhsw_M0();
1980 else
1981 gen_op_iwmmxt_unpackhuw_M0();
1982 break;
1983 case 2:
1984 if (insn & (1 << 21))
1985 gen_op_iwmmxt_unpackhsl_M0();
1986 else
1987 gen_op_iwmmxt_unpackhul_M0();
1988 break;
1989 case 3:
1990 return 1;
1991 }
1992 gen_op_iwmmxt_movq_wRn_M0(wrd);
1993 gen_op_iwmmxt_set_mup();
1994 gen_op_iwmmxt_set_cup();
1995 break;
1996 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1997 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
1998 if (((insn >> 22) & 3) == 0)
1999 return 1;
18c9b560
AZ
2000 wrd = (insn >> 12) & 0xf;
2001 rd0 = (insn >> 16) & 0xf;
2002 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2003 tmp = tcg_temp_new_i32();
da6b5335 2004 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2005 tcg_temp_free_i32(tmp);
18c9b560 2006 return 1;
da6b5335 2007 }
18c9b560 2008 switch ((insn >> 22) & 3) {
18c9b560 2009 case 1:
947a2fa2 2010 gen_helper_iwmmxt_srlw(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2011 break;
2012 case 2:
947a2fa2 2013 gen_helper_iwmmxt_srll(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2014 break;
2015 case 3:
947a2fa2 2016 gen_helper_iwmmxt_srlq(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2017 break;
2018 }
7d1b0095 2019 tcg_temp_free_i32(tmp);
18c9b560
AZ
2020 gen_op_iwmmxt_movq_wRn_M0(wrd);
2021 gen_op_iwmmxt_set_mup();
2022 gen_op_iwmmxt_set_cup();
2023 break;
2024 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2025 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2026 if (((insn >> 22) & 3) == 0)
2027 return 1;
18c9b560
AZ
2028 wrd = (insn >> 12) & 0xf;
2029 rd0 = (insn >> 16) & 0xf;
2030 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2031 tmp = tcg_temp_new_i32();
da6b5335 2032 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2033 tcg_temp_free_i32(tmp);
18c9b560 2034 return 1;
da6b5335 2035 }
18c9b560 2036 switch ((insn >> 22) & 3) {
18c9b560 2037 case 1:
947a2fa2 2038 gen_helper_iwmmxt_sraw(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2039 break;
2040 case 2:
947a2fa2 2041 gen_helper_iwmmxt_sral(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2042 break;
2043 case 3:
947a2fa2 2044 gen_helper_iwmmxt_sraq(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2045 break;
2046 }
7d1b0095 2047 tcg_temp_free_i32(tmp);
18c9b560
AZ
2048 gen_op_iwmmxt_movq_wRn_M0(wrd);
2049 gen_op_iwmmxt_set_mup();
2050 gen_op_iwmmxt_set_cup();
2051 break;
2052 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2053 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2054 if (((insn >> 22) & 3) == 0)
2055 return 1;
18c9b560
AZ
2056 wrd = (insn >> 12) & 0xf;
2057 rd0 = (insn >> 16) & 0xf;
2058 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2059 tmp = tcg_temp_new_i32();
da6b5335 2060 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2061 tcg_temp_free_i32(tmp);
18c9b560 2062 return 1;
da6b5335 2063 }
18c9b560 2064 switch ((insn >> 22) & 3) {
18c9b560 2065 case 1:
947a2fa2 2066 gen_helper_iwmmxt_sllw(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2067 break;
2068 case 2:
947a2fa2 2069 gen_helper_iwmmxt_slll(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2070 break;
2071 case 3:
947a2fa2 2072 gen_helper_iwmmxt_sllq(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2073 break;
2074 }
7d1b0095 2075 tcg_temp_free_i32(tmp);
18c9b560
AZ
2076 gen_op_iwmmxt_movq_wRn_M0(wrd);
2077 gen_op_iwmmxt_set_mup();
2078 gen_op_iwmmxt_set_cup();
2079 break;
2080 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2081 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2082 if (((insn >> 22) & 3) == 0)
2083 return 1;
18c9b560
AZ
2084 wrd = (insn >> 12) & 0xf;
2085 rd0 = (insn >> 16) & 0xf;
2086 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2087 tmp = tcg_temp_new_i32();
18c9b560 2088 switch ((insn >> 22) & 3) {
18c9b560 2089 case 1:
da6b5335 2090 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2091 tcg_temp_free_i32(tmp);
18c9b560 2092 return 1;
da6b5335 2093 }
947a2fa2 2094 gen_helper_iwmmxt_rorw(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2095 break;
2096 case 2:
da6b5335 2097 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2098 tcg_temp_free_i32(tmp);
18c9b560 2099 return 1;
da6b5335 2100 }
947a2fa2 2101 gen_helper_iwmmxt_rorl(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2102 break;
2103 case 3:
da6b5335 2104 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2105 tcg_temp_free_i32(tmp);
18c9b560 2106 return 1;
da6b5335 2107 }
947a2fa2 2108 gen_helper_iwmmxt_rorq(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2109 break;
2110 }
7d1b0095 2111 tcg_temp_free_i32(tmp);
18c9b560
AZ
2112 gen_op_iwmmxt_movq_wRn_M0(wrd);
2113 gen_op_iwmmxt_set_mup();
2114 gen_op_iwmmxt_set_cup();
2115 break;
2116 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2117 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2118 wrd = (insn >> 12) & 0xf;
2119 rd0 = (insn >> 16) & 0xf;
2120 rd1 = (insn >> 0) & 0xf;
2121 gen_op_iwmmxt_movq_M0_wRn(rd0);
2122 switch ((insn >> 22) & 3) {
2123 case 0:
2124 if (insn & (1 << 21))
2125 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2126 else
2127 gen_op_iwmmxt_minub_M0_wRn(rd1);
2128 break;
2129 case 1:
2130 if (insn & (1 << 21))
2131 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2132 else
2133 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2134 break;
2135 case 2:
2136 if (insn & (1 << 21))
2137 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2138 else
2139 gen_op_iwmmxt_minul_M0_wRn(rd1);
2140 break;
2141 case 3:
2142 return 1;
2143 }
2144 gen_op_iwmmxt_movq_wRn_M0(wrd);
2145 gen_op_iwmmxt_set_mup();
2146 break;
2147 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2148 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2149 wrd = (insn >> 12) & 0xf;
2150 rd0 = (insn >> 16) & 0xf;
2151 rd1 = (insn >> 0) & 0xf;
2152 gen_op_iwmmxt_movq_M0_wRn(rd0);
2153 switch ((insn >> 22) & 3) {
2154 case 0:
2155 if (insn & (1 << 21))
2156 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2157 else
2158 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2159 break;
2160 case 1:
2161 if (insn & (1 << 21))
2162 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2163 else
2164 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2165 break;
2166 case 2:
2167 if (insn & (1 << 21))
2168 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2169 else
2170 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2171 break;
2172 case 3:
2173 return 1;
2174 }
2175 gen_op_iwmmxt_movq_wRn_M0(wrd);
2176 gen_op_iwmmxt_set_mup();
2177 break;
2178 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2179 case 0x402: case 0x502: case 0x602: case 0x702:
2180 wrd = (insn >> 12) & 0xf;
2181 rd0 = (insn >> 16) & 0xf;
2182 rd1 = (insn >> 0) & 0xf;
2183 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2184 tmp = tcg_const_i32((insn >> 20) & 3);
2185 iwmmxt_load_reg(cpu_V1, rd1);
2186 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2187 tcg_temp_free(tmp);
18c9b560
AZ
2188 gen_op_iwmmxt_movq_wRn_M0(wrd);
2189 gen_op_iwmmxt_set_mup();
2190 break;
2191 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2192 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2193 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2194 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2195 wrd = (insn >> 12) & 0xf;
2196 rd0 = (insn >> 16) & 0xf;
2197 rd1 = (insn >> 0) & 0xf;
2198 gen_op_iwmmxt_movq_M0_wRn(rd0);
2199 switch ((insn >> 20) & 0xf) {
2200 case 0x0:
2201 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2202 break;
2203 case 0x1:
2204 gen_op_iwmmxt_subub_M0_wRn(rd1);
2205 break;
2206 case 0x3:
2207 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2208 break;
2209 case 0x4:
2210 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2211 break;
2212 case 0x5:
2213 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2214 break;
2215 case 0x7:
2216 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2217 break;
2218 case 0x8:
2219 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2220 break;
2221 case 0x9:
2222 gen_op_iwmmxt_subul_M0_wRn(rd1);
2223 break;
2224 case 0xb:
2225 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2226 break;
2227 default:
2228 return 1;
2229 }
2230 gen_op_iwmmxt_movq_wRn_M0(wrd);
2231 gen_op_iwmmxt_set_mup();
2232 gen_op_iwmmxt_set_cup();
2233 break;
2234 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2235 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2236 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2237 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2238 wrd = (insn >> 12) & 0xf;
2239 rd0 = (insn >> 16) & 0xf;
2240 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2241 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
947a2fa2 2242 gen_helper_iwmmxt_shufh(cpu_M0, cpu_M0, tmp);
da6b5335 2243 tcg_temp_free(tmp);
18c9b560
AZ
2244 gen_op_iwmmxt_movq_wRn_M0(wrd);
2245 gen_op_iwmmxt_set_mup();
2246 gen_op_iwmmxt_set_cup();
2247 break;
2248 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2249 case 0x418: case 0x518: case 0x618: case 0x718:
2250 case 0x818: case 0x918: case 0xa18: case 0xb18:
2251 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2252 wrd = (insn >> 12) & 0xf;
2253 rd0 = (insn >> 16) & 0xf;
2254 rd1 = (insn >> 0) & 0xf;
2255 gen_op_iwmmxt_movq_M0_wRn(rd0);
2256 switch ((insn >> 20) & 0xf) {
2257 case 0x0:
2258 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2259 break;
2260 case 0x1:
2261 gen_op_iwmmxt_addub_M0_wRn(rd1);
2262 break;
2263 case 0x3:
2264 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2265 break;
2266 case 0x4:
2267 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2268 break;
2269 case 0x5:
2270 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2271 break;
2272 case 0x7:
2273 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2274 break;
2275 case 0x8:
2276 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2277 break;
2278 case 0x9:
2279 gen_op_iwmmxt_addul_M0_wRn(rd1);
2280 break;
2281 case 0xb:
2282 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2283 break;
2284 default:
2285 return 1;
2286 }
2287 gen_op_iwmmxt_movq_wRn_M0(wrd);
2288 gen_op_iwmmxt_set_mup();
2289 gen_op_iwmmxt_set_cup();
2290 break;
2291 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2292 case 0x408: case 0x508: case 0x608: case 0x708:
2293 case 0x808: case 0x908: case 0xa08: case 0xb08:
2294 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2295 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2296 return 1;
18c9b560
AZ
2297 wrd = (insn >> 12) & 0xf;
2298 rd0 = (insn >> 16) & 0xf;
2299 rd1 = (insn >> 0) & 0xf;
2300 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2301 switch ((insn >> 22) & 3) {
18c9b560
AZ
2302 case 1:
2303 if (insn & (1 << 21))
2304 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2305 else
2306 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2307 break;
2308 case 2:
2309 if (insn & (1 << 21))
2310 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2311 else
2312 gen_op_iwmmxt_packul_M0_wRn(rd1);
2313 break;
2314 case 3:
2315 if (insn & (1 << 21))
2316 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2317 else
2318 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2319 break;
2320 }
2321 gen_op_iwmmxt_movq_wRn_M0(wrd);
2322 gen_op_iwmmxt_set_mup();
2323 gen_op_iwmmxt_set_cup();
2324 break;
2325 case 0x201: case 0x203: case 0x205: case 0x207:
2326 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2327 case 0x211: case 0x213: case 0x215: case 0x217:
2328 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2329 wrd = (insn >> 5) & 0xf;
2330 rd0 = (insn >> 12) & 0xf;
2331 rd1 = (insn >> 0) & 0xf;
2332 if (rd0 == 0xf || rd1 == 0xf)
2333 return 1;
2334 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2335 tmp = load_reg(s, rd0);
2336 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2337 switch ((insn >> 16) & 0xf) {
2338 case 0x0: /* TMIA */
da6b5335 2339 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2340 break;
2341 case 0x8: /* TMIAPH */
da6b5335 2342 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2343 break;
2344 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2345 if (insn & (1 << 16))
da6b5335 2346 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2347 if (insn & (1 << 17))
da6b5335
FN
2348 tcg_gen_shri_i32(tmp2, tmp2, 16);
2349 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2350 break;
2351 default:
7d1b0095
PM
2352 tcg_temp_free_i32(tmp2);
2353 tcg_temp_free_i32(tmp);
18c9b560
AZ
2354 return 1;
2355 }
7d1b0095
PM
2356 tcg_temp_free_i32(tmp2);
2357 tcg_temp_free_i32(tmp);
18c9b560
AZ
2358 gen_op_iwmmxt_movq_wRn_M0(wrd);
2359 gen_op_iwmmxt_set_mup();
2360 break;
2361 default:
2362 return 1;
2363 }
2364
2365 return 0;
2366}
2367
a1c7273b 2368/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560
AZ
2369 (ie. an undefined instruction). */
2370static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2371{
2372 int acc, rd0, rd1, rdhi, rdlo;
3a554c0f 2373 TCGv tmp, tmp2;
18c9b560
AZ
2374
2375 if ((insn & 0x0ff00f10) == 0x0e200010) {
2376 /* Multiply with Internal Accumulate Format */
2377 rd0 = (insn >> 12) & 0xf;
2378 rd1 = insn & 0xf;
2379 acc = (insn >> 5) & 7;
2380
2381 if (acc != 0)
2382 return 1;
2383
3a554c0f
FN
2384 tmp = load_reg(s, rd0);
2385 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2386 switch ((insn >> 16) & 0xf) {
2387 case 0x0: /* MIA */
3a554c0f 2388 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2389 break;
2390 case 0x8: /* MIAPH */
3a554c0f 2391 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2392 break;
2393 case 0xc: /* MIABB */
2394 case 0xd: /* MIABT */
2395 case 0xe: /* MIATB */
2396 case 0xf: /* MIATT */
18c9b560 2397 if (insn & (1 << 16))
3a554c0f 2398 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2399 if (insn & (1 << 17))
3a554c0f
FN
2400 tcg_gen_shri_i32(tmp2, tmp2, 16);
2401 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2402 break;
2403 default:
2404 return 1;
2405 }
7d1b0095
PM
2406 tcg_temp_free_i32(tmp2);
2407 tcg_temp_free_i32(tmp);
18c9b560
AZ
2408
2409 gen_op_iwmmxt_movq_wRn_M0(acc);
2410 return 0;
2411 }
2412
2413 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2414 /* Internal Accumulator Access Format */
2415 rdhi = (insn >> 16) & 0xf;
2416 rdlo = (insn >> 12) & 0xf;
2417 acc = insn & 7;
2418
2419 if (acc != 0)
2420 return 1;
2421
2422 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2423 iwmmxt_load_reg(cpu_V0, acc);
2424 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2425 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2426 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2427 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2428 } else { /* MAR */
3a554c0f
FN
2429 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2430 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2431 }
2432 return 0;
2433 }
2434
2435 return 1;
2436}
2437
c1713132
AZ
2438/* Disassemble system coprocessor instruction. Return nonzero if
2439 instruction is not defined. */
2440static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2441{
b75263d6 2442 TCGv tmp, tmp2;
c1713132
AZ
2443 uint32_t rd = (insn >> 12) & 0xf;
2444 uint32_t cp = (insn >> 8) & 0xf;
2445 if (IS_USER(s)) {
2446 return 1;
2447 }
2448
18c9b560 2449 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2450 if (!env->cp[cp].cp_read)
2451 return 1;
8984bd2e 2452 gen_set_pc_im(s->pc);
7d1b0095 2453 tmp = tcg_temp_new_i32();
b75263d6
JR
2454 tmp2 = tcg_const_i32(insn);
2455 gen_helper_get_cp(tmp, cpu_env, tmp2);
2456 tcg_temp_free(tmp2);
8984bd2e 2457 store_reg(s, rd, tmp);
c1713132
AZ
2458 } else {
2459 if (!env->cp[cp].cp_write)
2460 return 1;
8984bd2e
PB
2461 gen_set_pc_im(s->pc);
2462 tmp = load_reg(s, rd);
b75263d6
JR
2463 tmp2 = tcg_const_i32(insn);
2464 gen_helper_set_cp(cpu_env, tmp2, tmp);
2465 tcg_temp_free(tmp2);
7d1b0095 2466 tcg_temp_free_i32(tmp);
c1713132
AZ
2467 }
2468 return 0;
2469}
2470
9ee6e8bb
PB
2471static int cp15_user_ok(uint32_t insn)
2472{
2473 int cpn = (insn >> 16) & 0xf;
2474 int cpm = insn & 0xf;
2475 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2476
2477 if (cpn == 13 && cpm == 0) {
2478 /* TLS register. */
2479 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2480 return 1;
2481 }
2482 if (cpn == 7) {
2483 /* ISB, DSB, DMB. */
2484 if ((cpm == 5 && op == 4)
2485 || (cpm == 10 && (op == 4 || op == 5)))
2486 return 1;
2487 }
2488 return 0;
2489}
2490
3f26c122
RV
2491static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2492{
2493 TCGv tmp;
2494 int cpn = (insn >> 16) & 0xf;
2495 int cpm = insn & 0xf;
2496 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2497
2498 if (!arm_feature(env, ARM_FEATURE_V6K))
2499 return 0;
2500
2501 if (!(cpn == 13 && cpm == 0))
2502 return 0;
2503
2504 if (insn & ARM_CP_RW_BIT) {
3f26c122
RV
2505 switch (op) {
2506 case 2:
c5883be2 2507 tmp = load_cpu_field(cp15.c13_tls1);
3f26c122
RV
2508 break;
2509 case 3:
c5883be2 2510 tmp = load_cpu_field(cp15.c13_tls2);
3f26c122
RV
2511 break;
2512 case 4:
c5883be2 2513 tmp = load_cpu_field(cp15.c13_tls3);
3f26c122
RV
2514 break;
2515 default:
3f26c122
RV
2516 return 0;
2517 }
2518 store_reg(s, rd, tmp);
2519
2520 } else {
2521 tmp = load_reg(s, rd);
2522 switch (op) {
2523 case 2:
c5883be2 2524 store_cpu_field(tmp, cp15.c13_tls1);
3f26c122
RV
2525 break;
2526 case 3:
c5883be2 2527 store_cpu_field(tmp, cp15.c13_tls2);
3f26c122
RV
2528 break;
2529 case 4:
c5883be2 2530 store_cpu_field(tmp, cp15.c13_tls3);
3f26c122
RV
2531 break;
2532 default:
7d1b0095 2533 tcg_temp_free_i32(tmp);
3f26c122
RV
2534 return 0;
2535 }
3f26c122
RV
2536 }
2537 return 1;
2538}
2539
b5ff1b31
FB
2540/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2541 instruction is not defined. */
a90b7318 2542static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2543{
2544 uint32_t rd;
b75263d6 2545 TCGv tmp, tmp2;
b5ff1b31 2546
9ee6e8bb
PB
2547 /* M profile cores use memory mapped registers instead of cp15. */
2548 if (arm_feature(env, ARM_FEATURE_M))
2549 return 1;
2550
2551 if ((insn & (1 << 25)) == 0) {
2552 if (insn & (1 << 20)) {
2553 /* mrrc */
2554 return 1;
2555 }
2556 /* mcrr. Used for block cache operations, so implement as no-op. */
2557 return 0;
2558 }
2559 if ((insn & (1 << 4)) == 0) {
2560 /* cdp */
2561 return 1;
2562 }
2563 if (IS_USER(s) && !cp15_user_ok(insn)) {
b5ff1b31
FB
2564 return 1;
2565 }
cc688901
PM
2566
2567 /* Pre-v7 versions of the architecture implemented WFI via coprocessor
2568 * instructions rather than a separate instruction.
2569 */
2570 if ((insn & 0x0fff0fff) == 0x0e070f90) {
2571 /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores).
2572 * In v7, this must NOP.
2573 */
2574 if (!arm_feature(env, ARM_FEATURE_V7)) {
2575 /* Wait for interrupt. */
2576 gen_set_pc_im(s->pc);
2577 s->is_jmp = DISAS_WFI;
2578 }
9332f9da
FB
2579 return 0;
2580 }
cc688901
PM
2581
2582 if ((insn & 0x0fff0fff) == 0x0e070f58) {
2583 /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI,
2584 * so this is slightly over-broad.
2585 */
2586 if (!arm_feature(env, ARM_FEATURE_V6)) {
2587 /* Wait for interrupt. */
2588 gen_set_pc_im(s->pc);
2589 s->is_jmp = DISAS_WFI;
2590 return 0;
2591 }
2592 /* Otherwise fall through to handle via helper function.
2593 * In particular, on v7 and some v6 cores this is one of
2594 * the VA-PA registers.
2595 */
2596 }
2597
b5ff1b31 2598 rd = (insn >> 12) & 0xf;
3f26c122
RV
2599
2600 if (cp15_tls_load_store(env, s, insn, rd))
2601 return 0;
2602
b75263d6 2603 tmp2 = tcg_const_i32(insn);
18c9b560 2604 if (insn & ARM_CP_RW_BIT) {
7d1b0095 2605 tmp = tcg_temp_new_i32();
b75263d6 2606 gen_helper_get_cp15(tmp, cpu_env, tmp2);
b5ff1b31
FB
2607 /* If the destination register is r15 then sets condition codes. */
2608 if (rd != 15)
8984bd2e
PB
2609 store_reg(s, rd, tmp);
2610 else
7d1b0095 2611 tcg_temp_free_i32(tmp);
b5ff1b31 2612 } else {
8984bd2e 2613 tmp = load_reg(s, rd);
b75263d6 2614 gen_helper_set_cp15(cpu_env, tmp2, tmp);
7d1b0095 2615 tcg_temp_free_i32(tmp);
a90b7318
AZ
2616 /* Normally we would always end the TB here, but Linux
2617 * arch/arm/mach-pxa/sleep.S expects two instructions following
2618 * an MMU enable to execute from cache. Imitate this behaviour. */
2619 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2620 (insn & 0x0fff0fff) != 0x0e010f10)
2621 gen_lookup_tb(s);
b5ff1b31 2622 }
b75263d6 2623 tcg_temp_free_i32(tmp2);
b5ff1b31
FB
2624 return 0;
2625}
2626
9ee6e8bb
PB
2627#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2628#define VFP_SREG(insn, bigbit, smallbit) \
2629 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2630#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2631 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2632 reg = (((insn) >> (bigbit)) & 0x0f) \
2633 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2634 } else { \
2635 if (insn & (1 << (smallbit))) \
2636 return 1; \
2637 reg = ((insn) >> (bigbit)) & 0x0f; \
2638 }} while (0)
2639
2640#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2641#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2642#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2643#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2644#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2645#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2646
4373f3ce
PB
2647/* Move between integer and VFP cores. */
2648static TCGv gen_vfp_mrs(void)
2649{
7d1b0095 2650 TCGv tmp = tcg_temp_new_i32();
4373f3ce
PB
2651 tcg_gen_mov_i32(tmp, cpu_F0s);
2652 return tmp;
2653}
2654
2655static void gen_vfp_msr(TCGv tmp)
2656{
2657 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2658 tcg_temp_free_i32(tmp);
4373f3ce
PB
2659}
2660
ad69471c
PB
2661static void gen_neon_dup_u8(TCGv var, int shift)
2662{
7d1b0095 2663 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2664 if (shift)
2665 tcg_gen_shri_i32(var, var, shift);
86831435 2666 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2667 tcg_gen_shli_i32(tmp, var, 8);
2668 tcg_gen_or_i32(var, var, tmp);
2669 tcg_gen_shli_i32(tmp, var, 16);
2670 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2671 tcg_temp_free_i32(tmp);
ad69471c
PB
2672}
2673
2674static void gen_neon_dup_low16(TCGv var)
2675{
7d1b0095 2676 TCGv tmp = tcg_temp_new_i32();
86831435 2677 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2678 tcg_gen_shli_i32(tmp, var, 16);
2679 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2680 tcg_temp_free_i32(tmp);
ad69471c
PB
2681}
2682
2683static void gen_neon_dup_high16(TCGv var)
2684{
7d1b0095 2685 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2686 tcg_gen_andi_i32(var, var, 0xffff0000);
2687 tcg_gen_shri_i32(tmp, var, 16);
2688 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2689 tcg_temp_free_i32(tmp);
ad69471c
PB
2690}
2691
8e18cde3
PM
2692static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size)
2693{
2694 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2695 TCGv tmp;
2696 switch (size) {
2697 case 0:
2698 tmp = gen_ld8u(addr, IS_USER(s));
2699 gen_neon_dup_u8(tmp, 0);
2700 break;
2701 case 1:
2702 tmp = gen_ld16u(addr, IS_USER(s));
2703 gen_neon_dup_low16(tmp);
2704 break;
2705 case 2:
2706 tmp = gen_ld32(addr, IS_USER(s));
2707 break;
2708 default: /* Avoid compiler warnings. */
2709 abort();
2710 }
2711 return tmp;
2712}
2713
a1c7273b 2714/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95
FB
2715 (ie. an undefined instruction). */
2716static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2717{
2718 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2719 int dp, veclen;
312eea9f 2720 TCGv addr;
4373f3ce 2721 TCGv tmp;
ad69471c 2722 TCGv tmp2;
b7bcbe95 2723
40f137e1
PB
2724 if (!arm_feature(env, ARM_FEATURE_VFP))
2725 return 1;
2726
5df8bac1 2727 if (!s->vfp_enabled) {
9ee6e8bb 2728 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2729 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2730 return 1;
2731 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2732 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2733 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2734 return 1;
2735 }
b7bcbe95
FB
2736 dp = ((insn & 0xf00) == 0xb00);
2737 switch ((insn >> 24) & 0xf) {
2738 case 0xe:
2739 if (insn & (1 << 4)) {
2740 /* single register transfer */
b7bcbe95
FB
2741 rd = (insn >> 12) & 0xf;
2742 if (dp) {
9ee6e8bb
PB
2743 int size;
2744 int pass;
2745
2746 VFP_DREG_N(rn, insn);
2747 if (insn & 0xf)
b7bcbe95 2748 return 1;
9ee6e8bb
PB
2749 if (insn & 0x00c00060
2750 && !arm_feature(env, ARM_FEATURE_NEON))
2751 return 1;
2752
2753 pass = (insn >> 21) & 1;
2754 if (insn & (1 << 22)) {
2755 size = 0;
2756 offset = ((insn >> 5) & 3) * 8;
2757 } else if (insn & (1 << 5)) {
2758 size = 1;
2759 offset = (insn & (1 << 6)) ? 16 : 0;
2760 } else {
2761 size = 2;
2762 offset = 0;
2763 }
18c9b560 2764 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2765 /* vfp->arm */
ad69471c 2766 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2767 switch (size) {
2768 case 0:
9ee6e8bb 2769 if (offset)
ad69471c 2770 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2771 if (insn & (1 << 23))
ad69471c 2772 gen_uxtb(tmp);
9ee6e8bb 2773 else
ad69471c 2774 gen_sxtb(tmp);
9ee6e8bb
PB
2775 break;
2776 case 1:
9ee6e8bb
PB
2777 if (insn & (1 << 23)) {
2778 if (offset) {
ad69471c 2779 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2780 } else {
ad69471c 2781 gen_uxth(tmp);
9ee6e8bb
PB
2782 }
2783 } else {
2784 if (offset) {
ad69471c 2785 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2786 } else {
ad69471c 2787 gen_sxth(tmp);
9ee6e8bb
PB
2788 }
2789 }
2790 break;
2791 case 2:
9ee6e8bb
PB
2792 break;
2793 }
ad69471c 2794 store_reg(s, rd, tmp);
b7bcbe95
FB
2795 } else {
2796 /* arm->vfp */
ad69471c 2797 tmp = load_reg(s, rd);
9ee6e8bb
PB
2798 if (insn & (1 << 23)) {
2799 /* VDUP */
2800 if (size == 0) {
ad69471c 2801 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2802 } else if (size == 1) {
ad69471c 2803 gen_neon_dup_low16(tmp);
9ee6e8bb 2804 }
cbbccffc 2805 for (n = 0; n <= pass * 2; n++) {
7d1b0095 2806 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
2807 tcg_gen_mov_i32(tmp2, tmp);
2808 neon_store_reg(rn, n, tmp2);
2809 }
2810 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2811 } else {
2812 /* VMOV */
2813 switch (size) {
2814 case 0:
ad69471c
PB
2815 tmp2 = neon_load_reg(rn, pass);
2816 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
7d1b0095 2817 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2818 break;
2819 case 1:
ad69471c
PB
2820 tmp2 = neon_load_reg(rn, pass);
2821 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
7d1b0095 2822 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2823 break;
2824 case 2:
9ee6e8bb
PB
2825 break;
2826 }
ad69471c 2827 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2828 }
b7bcbe95 2829 }
9ee6e8bb
PB
2830 } else { /* !dp */
2831 if ((insn & 0x6f) != 0x00)
2832 return 1;
2833 rn = VFP_SREG_N(insn);
18c9b560 2834 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2835 /* vfp->arm */
2836 if (insn & (1 << 21)) {
2837 /* system register */
40f137e1 2838 rn >>= 1;
9ee6e8bb 2839
b7bcbe95 2840 switch (rn) {
40f137e1 2841 case ARM_VFP_FPSID:
4373f3ce 2842 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2843 VFP3 restricts all id registers to privileged
2844 accesses. */
2845 if (IS_USER(s)
2846 && arm_feature(env, ARM_FEATURE_VFP3))
2847 return 1;
4373f3ce 2848 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2849 break;
40f137e1 2850 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2851 if (IS_USER(s))
2852 return 1;
4373f3ce 2853 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2854 break;
40f137e1
PB
2855 case ARM_VFP_FPINST:
2856 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2857 /* Not present in VFP3. */
2858 if (IS_USER(s)
2859 || arm_feature(env, ARM_FEATURE_VFP3))
2860 return 1;
4373f3ce 2861 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2862 break;
40f137e1 2863 case ARM_VFP_FPSCR:
601d70b9 2864 if (rd == 15) {
4373f3ce
PB
2865 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2866 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2867 } else {
7d1b0095 2868 tmp = tcg_temp_new_i32();
4373f3ce
PB
2869 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2870 }
b7bcbe95 2871 break;
9ee6e8bb
PB
2872 case ARM_VFP_MVFR0:
2873 case ARM_VFP_MVFR1:
2874 if (IS_USER(s)
2875 || !arm_feature(env, ARM_FEATURE_VFP3))
2876 return 1;
4373f3ce 2877 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2878 break;
b7bcbe95
FB
2879 default:
2880 return 1;
2881 }
2882 } else {
2883 gen_mov_F0_vreg(0, rn);
4373f3ce 2884 tmp = gen_vfp_mrs();
b7bcbe95
FB
2885 }
2886 if (rd == 15) {
b5ff1b31 2887 /* Set the 4 flag bits in the CPSR. */
4373f3ce 2888 gen_set_nzcv(tmp);
7d1b0095 2889 tcg_temp_free_i32(tmp);
4373f3ce
PB
2890 } else {
2891 store_reg(s, rd, tmp);
2892 }
b7bcbe95
FB
2893 } else {
2894 /* arm->vfp */
4373f3ce 2895 tmp = load_reg(s, rd);
b7bcbe95 2896 if (insn & (1 << 21)) {
40f137e1 2897 rn >>= 1;
b7bcbe95
FB
2898 /* system register */
2899 switch (rn) {
40f137e1 2900 case ARM_VFP_FPSID:
9ee6e8bb
PB
2901 case ARM_VFP_MVFR0:
2902 case ARM_VFP_MVFR1:
b7bcbe95
FB
2903 /* Writes are ignored. */
2904 break;
40f137e1 2905 case ARM_VFP_FPSCR:
4373f3ce 2906 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 2907 tcg_temp_free_i32(tmp);
b5ff1b31 2908 gen_lookup_tb(s);
b7bcbe95 2909 break;
40f137e1 2910 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2911 if (IS_USER(s))
2912 return 1;
71b3c3de
JR
2913 /* TODO: VFP subarchitecture support.
2914 * For now, keep the EN bit only */
2915 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 2916 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2917 gen_lookup_tb(s);
2918 break;
2919 case ARM_VFP_FPINST:
2920 case ARM_VFP_FPINST2:
4373f3ce 2921 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2922 break;
b7bcbe95
FB
2923 default:
2924 return 1;
2925 }
2926 } else {
4373f3ce 2927 gen_vfp_msr(tmp);
b7bcbe95
FB
2928 gen_mov_vreg_F0(0, rn);
2929 }
2930 }
2931 }
2932 } else {
2933 /* data processing */
2934 /* The opcode is in bits 23, 21, 20 and 6. */
2935 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2936 if (dp) {
2937 if (op == 15) {
2938 /* rn is opcode */
2939 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2940 } else {
2941 /* rn is register number */
9ee6e8bb 2942 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2943 }
2944
04595bf6 2945 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
b7bcbe95 2946 /* Integer or single precision destination. */
9ee6e8bb 2947 rd = VFP_SREG_D(insn);
b7bcbe95 2948 } else {
9ee6e8bb 2949 VFP_DREG_D(rd, insn);
b7bcbe95 2950 }
04595bf6
PM
2951 if (op == 15 &&
2952 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2953 /* VCVT from int is always from S reg regardless of dp bit.
2954 * VCVT with immediate frac_bits has same format as SREG_M
2955 */
2956 rm = VFP_SREG_M(insn);
b7bcbe95 2957 } else {
9ee6e8bb 2958 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2959 }
2960 } else {
9ee6e8bb 2961 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2962 if (op == 15 && rn == 15) {
2963 /* Double precision destination. */
9ee6e8bb
PB
2964 VFP_DREG_D(rd, insn);
2965 } else {
2966 rd = VFP_SREG_D(insn);
2967 }
04595bf6
PM
2968 /* NB that we implicitly rely on the encoding for the frac_bits
2969 * in VCVT of fixed to float being the same as that of an SREG_M
2970 */
9ee6e8bb 2971 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2972 }
2973
69d1fc22 2974 veclen = s->vec_len;
b7bcbe95
FB
2975 if (op == 15 && rn > 3)
2976 veclen = 0;
2977
2978 /* Shut up compiler warnings. */
2979 delta_m = 0;
2980 delta_d = 0;
2981 bank_mask = 0;
3b46e624 2982
b7bcbe95
FB
2983 if (veclen > 0) {
2984 if (dp)
2985 bank_mask = 0xc;
2986 else
2987 bank_mask = 0x18;
2988
2989 /* Figure out what type of vector operation this is. */
2990 if ((rd & bank_mask) == 0) {
2991 /* scalar */
2992 veclen = 0;
2993 } else {
2994 if (dp)
69d1fc22 2995 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 2996 else
69d1fc22 2997 delta_d = s->vec_stride + 1;
b7bcbe95
FB
2998
2999 if ((rm & bank_mask) == 0) {
3000 /* mixed scalar/vector */
3001 delta_m = 0;
3002 } else {
3003 /* vector */
3004 delta_m = delta_d;
3005 }
3006 }
3007 }
3008
3009 /* Load the initial operands. */
3010 if (op == 15) {
3011 switch (rn) {
3012 case 16:
3013 case 17:
3014 /* Integer source */
3015 gen_mov_F0_vreg(0, rm);
3016 break;
3017 case 8:
3018 case 9:
3019 /* Compare */
3020 gen_mov_F0_vreg(dp, rd);
3021 gen_mov_F1_vreg(dp, rm);
3022 break;
3023 case 10:
3024 case 11:
3025 /* Compare with zero */
3026 gen_mov_F0_vreg(dp, rd);
3027 gen_vfp_F1_ld0(dp);
3028 break;
9ee6e8bb
PB
3029 case 20:
3030 case 21:
3031 case 22:
3032 case 23:
644ad806
PB
3033 case 28:
3034 case 29:
3035 case 30:
3036 case 31:
9ee6e8bb
PB
3037 /* Source and destination the same. */
3038 gen_mov_F0_vreg(dp, rd);
3039 break;
b7bcbe95
FB
3040 default:
3041 /* One source operand. */
3042 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3043 break;
b7bcbe95
FB
3044 }
3045 } else {
3046 /* Two source operands. */
3047 gen_mov_F0_vreg(dp, rn);
3048 gen_mov_F1_vreg(dp, rm);
3049 }
3050
3051 for (;;) {
3052 /* Perform the calculation. */
3053 switch (op) {
605a6aed
PM
3054 case 0: /* VMLA: fd + (fn * fm) */
3055 /* Note that order of inputs to the add matters for NaNs */
3056 gen_vfp_F1_mul(dp);
3057 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3058 gen_vfp_add(dp);
3059 break;
605a6aed 3060 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3061 gen_vfp_mul(dp);
605a6aed
PM
3062 gen_vfp_F1_neg(dp);
3063 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3064 gen_vfp_add(dp);
3065 break;
605a6aed
PM
3066 case 2: /* VNMLS: -fd + (fn * fm) */
3067 /* Note that it isn't valid to replace (-A + B) with (B - A)
3068 * or similar plausible looking simplifications
3069 * because this will give wrong results for NaNs.
3070 */
3071 gen_vfp_F1_mul(dp);
3072 gen_mov_F0_vreg(dp, rd);
3073 gen_vfp_neg(dp);
3074 gen_vfp_add(dp);
b7bcbe95 3075 break;
605a6aed 3076 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3077 gen_vfp_mul(dp);
605a6aed
PM
3078 gen_vfp_F1_neg(dp);
3079 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3080 gen_vfp_neg(dp);
605a6aed 3081 gen_vfp_add(dp);
b7bcbe95
FB
3082 break;
3083 case 4: /* mul: fn * fm */
3084 gen_vfp_mul(dp);
3085 break;
3086 case 5: /* nmul: -(fn * fm) */
3087 gen_vfp_mul(dp);
3088 gen_vfp_neg(dp);
3089 break;
3090 case 6: /* add: fn + fm */
3091 gen_vfp_add(dp);
3092 break;
3093 case 7: /* sub: fn - fm */
3094 gen_vfp_sub(dp);
3095 break;
3096 case 8: /* div: fn / fm */
3097 gen_vfp_div(dp);
3098 break;
9ee6e8bb
PB
3099 case 14: /* fconst */
3100 if (!arm_feature(env, ARM_FEATURE_VFP3))
3101 return 1;
3102
3103 n = (insn << 12) & 0x80000000;
3104 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3105 if (dp) {
3106 if (i & 0x40)
3107 i |= 0x3f80;
3108 else
3109 i |= 0x4000;
3110 n |= i << 16;
4373f3ce 3111 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3112 } else {
3113 if (i & 0x40)
3114 i |= 0x780;
3115 else
3116 i |= 0x800;
3117 n |= i << 19;
5b340b51 3118 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3119 }
9ee6e8bb 3120 break;
b7bcbe95
FB
3121 case 15: /* extension space */
3122 switch (rn) {
3123 case 0: /* cpy */
3124 /* no-op */
3125 break;
3126 case 1: /* abs */
3127 gen_vfp_abs(dp);
3128 break;
3129 case 2: /* neg */
3130 gen_vfp_neg(dp);
3131 break;
3132 case 3: /* sqrt */
3133 gen_vfp_sqrt(dp);
3134 break;
60011498
PB
3135 case 4: /* vcvtb.f32.f16 */
3136 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3137 return 1;
3138 tmp = gen_vfp_mrs();
3139 tcg_gen_ext16u_i32(tmp, tmp);
3140 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3141 tcg_temp_free_i32(tmp);
60011498
PB
3142 break;
3143 case 5: /* vcvtt.f32.f16 */
3144 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3145 return 1;
3146 tmp = gen_vfp_mrs();
3147 tcg_gen_shri_i32(tmp, tmp, 16);
3148 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3149 tcg_temp_free_i32(tmp);
60011498
PB
3150 break;
3151 case 6: /* vcvtb.f16.f32 */
3152 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3153 return 1;
7d1b0095 3154 tmp = tcg_temp_new_i32();
60011498
PB
3155 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3156 gen_mov_F0_vreg(0, rd);
3157 tmp2 = gen_vfp_mrs();
3158 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3159 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3160 tcg_temp_free_i32(tmp2);
60011498
PB
3161 gen_vfp_msr(tmp);
3162 break;
3163 case 7: /* vcvtt.f16.f32 */
3164 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3165 return 1;
7d1b0095 3166 tmp = tcg_temp_new_i32();
60011498
PB
3167 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3168 tcg_gen_shli_i32(tmp, tmp, 16);
3169 gen_mov_F0_vreg(0, rd);
3170 tmp2 = gen_vfp_mrs();
3171 tcg_gen_ext16u_i32(tmp2, tmp2);
3172 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3173 tcg_temp_free_i32(tmp2);
60011498
PB
3174 gen_vfp_msr(tmp);
3175 break;
b7bcbe95
FB
3176 case 8: /* cmp */
3177 gen_vfp_cmp(dp);
3178 break;
3179 case 9: /* cmpe */
3180 gen_vfp_cmpe(dp);
3181 break;
3182 case 10: /* cmpz */
3183 gen_vfp_cmp(dp);
3184 break;
3185 case 11: /* cmpez */
3186 gen_vfp_F1_ld0(dp);
3187 gen_vfp_cmpe(dp);
3188 break;
3189 case 15: /* single<->double conversion */
3190 if (dp)
4373f3ce 3191 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3192 else
4373f3ce 3193 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3194 break;
3195 case 16: /* fuito */
5500b06c 3196 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3197 break;
3198 case 17: /* fsito */
5500b06c 3199 gen_vfp_sito(dp, 0);
b7bcbe95 3200 break;
9ee6e8bb
PB
3201 case 20: /* fshto */
3202 if (!arm_feature(env, ARM_FEATURE_VFP3))
3203 return 1;
5500b06c 3204 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3205 break;
3206 case 21: /* fslto */
3207 if (!arm_feature(env, ARM_FEATURE_VFP3))
3208 return 1;
5500b06c 3209 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3210 break;
3211 case 22: /* fuhto */
3212 if (!arm_feature(env, ARM_FEATURE_VFP3))
3213 return 1;
5500b06c 3214 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3215 break;
3216 case 23: /* fulto */
3217 if (!arm_feature(env, ARM_FEATURE_VFP3))
3218 return 1;
5500b06c 3219 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3220 break;
b7bcbe95 3221 case 24: /* ftoui */
5500b06c 3222 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3223 break;
3224 case 25: /* ftouiz */
5500b06c 3225 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3226 break;
3227 case 26: /* ftosi */
5500b06c 3228 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3229 break;
3230 case 27: /* ftosiz */
5500b06c 3231 gen_vfp_tosiz(dp, 0);
b7bcbe95 3232 break;
9ee6e8bb
PB
3233 case 28: /* ftosh */
3234 if (!arm_feature(env, ARM_FEATURE_VFP3))
3235 return 1;
5500b06c 3236 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3237 break;
3238 case 29: /* ftosl */
3239 if (!arm_feature(env, ARM_FEATURE_VFP3))
3240 return 1;
5500b06c 3241 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3242 break;
3243 case 30: /* ftouh */
3244 if (!arm_feature(env, ARM_FEATURE_VFP3))
3245 return 1;
5500b06c 3246 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3247 break;
3248 case 31: /* ftoul */
3249 if (!arm_feature(env, ARM_FEATURE_VFP3))
3250 return 1;
5500b06c 3251 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3252 break;
b7bcbe95
FB
3253 default: /* undefined */
3254 printf ("rn:%d\n", rn);
3255 return 1;
3256 }
3257 break;
3258 default: /* undefined */
3259 printf ("op:%d\n", op);
3260 return 1;
3261 }
3262
3263 /* Write back the result. */
3264 if (op == 15 && (rn >= 8 && rn <= 11))
3265 ; /* Comparison, do nothing. */
04595bf6
PM
3266 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3267 /* VCVT double to int: always integer result. */
b7bcbe95
FB
3268 gen_mov_vreg_F0(0, rd);
3269 else if (op == 15 && rn == 15)
3270 /* conversion */
3271 gen_mov_vreg_F0(!dp, rd);
3272 else
3273 gen_mov_vreg_F0(dp, rd);
3274
3275 /* break out of the loop if we have finished */
3276 if (veclen == 0)
3277 break;
3278
3279 if (op == 15 && delta_m == 0) {
3280 /* single source one-many */
3281 while (veclen--) {
3282 rd = ((rd + delta_d) & (bank_mask - 1))
3283 | (rd & bank_mask);
3284 gen_mov_vreg_F0(dp, rd);
3285 }
3286 break;
3287 }
3288 /* Setup the next operands. */
3289 veclen--;
3290 rd = ((rd + delta_d) & (bank_mask - 1))
3291 | (rd & bank_mask);
3292
3293 if (op == 15) {
3294 /* One source operand. */
3295 rm = ((rm + delta_m) & (bank_mask - 1))
3296 | (rm & bank_mask);
3297 gen_mov_F0_vreg(dp, rm);
3298 } else {
3299 /* Two source operands. */
3300 rn = ((rn + delta_d) & (bank_mask - 1))
3301 | (rn & bank_mask);
3302 gen_mov_F0_vreg(dp, rn);
3303 if (delta_m) {
3304 rm = ((rm + delta_m) & (bank_mask - 1))
3305 | (rm & bank_mask);
3306 gen_mov_F1_vreg(dp, rm);
3307 }
3308 }
3309 }
3310 }
3311 break;
3312 case 0xc:
3313 case 0xd:
8387da81 3314 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3315 /* two-register transfer */
3316 rn = (insn >> 16) & 0xf;
3317 rd = (insn >> 12) & 0xf;
3318 if (dp) {
9ee6e8bb
PB
3319 VFP_DREG_M(rm, insn);
3320 } else {
3321 rm = VFP_SREG_M(insn);
3322 }
b7bcbe95 3323
18c9b560 3324 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3325 /* vfp->arm */
3326 if (dp) {
4373f3ce
PB
3327 gen_mov_F0_vreg(0, rm * 2);
3328 tmp = gen_vfp_mrs();
3329 store_reg(s, rd, tmp);
3330 gen_mov_F0_vreg(0, rm * 2 + 1);
3331 tmp = gen_vfp_mrs();
3332 store_reg(s, rn, tmp);
b7bcbe95
FB
3333 } else {
3334 gen_mov_F0_vreg(0, rm);
4373f3ce 3335 tmp = gen_vfp_mrs();
8387da81 3336 store_reg(s, rd, tmp);
b7bcbe95 3337 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3338 tmp = gen_vfp_mrs();
8387da81 3339 store_reg(s, rn, tmp);
b7bcbe95
FB
3340 }
3341 } else {
3342 /* arm->vfp */
3343 if (dp) {
4373f3ce
PB
3344 tmp = load_reg(s, rd);
3345 gen_vfp_msr(tmp);
3346 gen_mov_vreg_F0(0, rm * 2);
3347 tmp = load_reg(s, rn);
3348 gen_vfp_msr(tmp);
3349 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3350 } else {
8387da81 3351 tmp = load_reg(s, rd);
4373f3ce 3352 gen_vfp_msr(tmp);
b7bcbe95 3353 gen_mov_vreg_F0(0, rm);
8387da81 3354 tmp = load_reg(s, rn);
4373f3ce 3355 gen_vfp_msr(tmp);
b7bcbe95
FB
3356 gen_mov_vreg_F0(0, rm + 1);
3357 }
3358 }
3359 } else {
3360 /* Load/store */
3361 rn = (insn >> 16) & 0xf;
3362 if (dp)
9ee6e8bb 3363 VFP_DREG_D(rd, insn);
b7bcbe95 3364 else
9ee6e8bb
PB
3365 rd = VFP_SREG_D(insn);
3366 if (s->thumb && rn == 15) {
7d1b0095 3367 addr = tcg_temp_new_i32();
312eea9f 3368 tcg_gen_movi_i32(addr, s->pc & ~2);
9ee6e8bb 3369 } else {
312eea9f 3370 addr = load_reg(s, rn);
9ee6e8bb 3371 }
b7bcbe95
FB
3372 if ((insn & 0x01200000) == 0x01000000) {
3373 /* Single load/store */
3374 offset = (insn & 0xff) << 2;
3375 if ((insn & (1 << 23)) == 0)
3376 offset = -offset;
312eea9f 3377 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3378 if (insn & (1 << 20)) {
312eea9f 3379 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3380 gen_mov_vreg_F0(dp, rd);
3381 } else {
3382 gen_mov_F0_vreg(dp, rd);
312eea9f 3383 gen_vfp_st(s, dp, addr);
b7bcbe95 3384 }
7d1b0095 3385 tcg_temp_free_i32(addr);
b7bcbe95
FB
3386 } else {
3387 /* load/store multiple */
3388 if (dp)
3389 n = (insn >> 1) & 0x7f;
3390 else
3391 n = insn & 0xff;
3392
3393 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3394 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3395
3396 if (dp)
3397 offset = 8;
3398 else
3399 offset = 4;
3400 for (i = 0; i < n; i++) {
18c9b560 3401 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3402 /* load */
312eea9f 3403 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3404 gen_mov_vreg_F0(dp, rd + i);
3405 } else {
3406 /* store */
3407 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3408 gen_vfp_st(s, dp, addr);
b7bcbe95 3409 }
312eea9f 3410 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95
FB
3411 }
3412 if (insn & (1 << 21)) {
3413 /* writeback */
3414 if (insn & (1 << 24))
3415 offset = -offset * n;
3416 else if (dp && (insn & 1))
3417 offset = 4;
3418 else
3419 offset = 0;
3420
3421 if (offset != 0)
312eea9f
FN
3422 tcg_gen_addi_i32(addr, addr, offset);
3423 store_reg(s, rn, addr);
3424 } else {
7d1b0095 3425 tcg_temp_free_i32(addr);
b7bcbe95
FB
3426 }
3427 }
3428 }
3429 break;
3430 default:
3431 /* Should never happen. */
3432 return 1;
3433 }
3434 return 0;
3435}
3436
6e256c93 3437static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3438{
6e256c93
FB
3439 TranslationBlock *tb;
3440
3441 tb = s->tb;
3442 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3443 tcg_gen_goto_tb(n);
8984bd2e 3444 gen_set_pc_im(dest);
4b4a72e5 3445 tcg_gen_exit_tb((tcg_target_long)tb + n);
6e256c93 3446 } else {
8984bd2e 3447 gen_set_pc_im(dest);
57fec1fe 3448 tcg_gen_exit_tb(0);
6e256c93 3449 }
c53be334
FB
3450}
3451
8aaca4c0
FB
3452static inline void gen_jmp (DisasContext *s, uint32_t dest)
3453{
551bd27f 3454 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3455 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3456 if (s->thumb)
d9ba4830
PB
3457 dest |= 1;
3458 gen_bx_im(s, dest);
8aaca4c0 3459 } else {
6e256c93 3460 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3461 s->is_jmp = DISAS_TB_JUMP;
3462 }
3463}
3464
d9ba4830 3465static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3466{
ee097184 3467 if (x)
d9ba4830 3468 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3469 else
d9ba4830 3470 gen_sxth(t0);
ee097184 3471 if (y)
d9ba4830 3472 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3473 else
d9ba4830
PB
3474 gen_sxth(t1);
3475 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3476}
3477
3478/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 3479static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3480 uint32_t mask;
3481
3482 mask = 0;
3483 if (flags & (1 << 0))
3484 mask |= 0xff;
3485 if (flags & (1 << 1))
3486 mask |= 0xff00;
3487 if (flags & (1 << 2))
3488 mask |= 0xff0000;
3489 if (flags & (1 << 3))
3490 mask |= 0xff000000;
9ee6e8bb 3491
2ae23e75 3492 /* Mask out undefined bits. */
9ee6e8bb 3493 mask &= ~CPSR_RESERVED;
be5e7a76
DES
3494 if (!arm_feature(env, ARM_FEATURE_V4T))
3495 mask &= ~CPSR_T;
3496 if (!arm_feature(env, ARM_FEATURE_V5))
3497 mask &= ~CPSR_Q; /* V5TE in reality*/
9ee6e8bb 3498 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3499 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3500 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3501 mask &= ~CPSR_IT;
9ee6e8bb 3502 /* Mask out execution state bits. */
2ae23e75 3503 if (!spsr)
e160c51c 3504 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3505 /* Mask out privileged bits. */
3506 if (IS_USER(s))
9ee6e8bb 3507 mask &= CPSR_USER;
b5ff1b31
FB
3508 return mask;
3509}
3510
2fbac54b
FN
3511/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3512static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
b5ff1b31 3513{
d9ba4830 3514 TCGv tmp;
b5ff1b31
FB
3515 if (spsr) {
3516 /* ??? This is also undefined in system mode. */
3517 if (IS_USER(s))
3518 return 1;
d9ba4830
PB
3519
3520 tmp = load_cpu_field(spsr);
3521 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3522 tcg_gen_andi_i32(t0, t0, mask);
3523 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3524 store_cpu_field(tmp, spsr);
b5ff1b31 3525 } else {
2fbac54b 3526 gen_set_cpsr(t0, mask);
b5ff1b31 3527 }
7d1b0095 3528 tcg_temp_free_i32(t0);
b5ff1b31
FB
3529 gen_lookup_tb(s);
3530 return 0;
3531}
3532
2fbac54b
FN
3533/* Returns nonzero if access to the PSR is not permitted. */
3534static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3535{
3536 TCGv tmp;
7d1b0095 3537 tmp = tcg_temp_new_i32();
2fbac54b
FN
3538 tcg_gen_movi_i32(tmp, val);
3539 return gen_set_psr(s, mask, spsr, tmp);
3540}
3541
e9bb4aa9
JR
3542/* Generate an old-style exception return. Marks pc as dead. */
3543static void gen_exception_return(DisasContext *s, TCGv pc)
b5ff1b31 3544{
d9ba4830 3545 TCGv tmp;
e9bb4aa9 3546 store_reg(s, 15, pc);
d9ba4830
PB
3547 tmp = load_cpu_field(spsr);
3548 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 3549 tcg_temp_free_i32(tmp);
b5ff1b31
FB
3550 s->is_jmp = DISAS_UPDATE;
3551}
3552
b0109805
PB
3553/* Generate a v6 exception return. Marks both values as dead. */
3554static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3555{
b0109805 3556 gen_set_cpsr(cpsr, 0xffffffff);
7d1b0095 3557 tcg_temp_free_i32(cpsr);
b0109805 3558 store_reg(s, 15, pc);
9ee6e8bb
PB
3559 s->is_jmp = DISAS_UPDATE;
3560}
3b46e624 3561
9ee6e8bb
PB
3562static inline void
3563gen_set_condexec (DisasContext *s)
3564{
3565 if (s->condexec_mask) {
8f01245e 3566 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
7d1b0095 3567 TCGv tmp = tcg_temp_new_i32();
8f01245e 3568 tcg_gen_movi_i32(tmp, val);
d9ba4830 3569 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3570 }
3571}
3b46e624 3572
bc4a0de0
PM
3573static void gen_exception_insn(DisasContext *s, int offset, int excp)
3574{
3575 gen_set_condexec(s);
3576 gen_set_pc_im(s->pc - offset);
3577 gen_exception(excp);
3578 s->is_jmp = DISAS_JUMP;
3579}
3580
9ee6e8bb
PB
3581static void gen_nop_hint(DisasContext *s, int val)
3582{
3583 switch (val) {
3584 case 3: /* wfi */
8984bd2e 3585 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3586 s->is_jmp = DISAS_WFI;
3587 break;
3588 case 2: /* wfe */
3589 case 4: /* sev */
3590 /* TODO: Implement SEV and WFE. May help SMP performance. */
3591 default: /* nop */
3592 break;
3593 }
3594}
99c475ab 3595
ad69471c 3596#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3597
62698be3 3598static inline void gen_neon_add(int size, TCGv t0, TCGv t1)
9ee6e8bb
PB
3599{
3600 switch (size) {
dd8fbd78
FN
3601 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3602 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3603 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 3604 default: abort();
9ee6e8bb 3605 }
9ee6e8bb
PB
3606}
3607
dd8fbd78 3608static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
ad69471c
PB
3609{
3610 switch (size) {
dd8fbd78
FN
3611 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3612 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3613 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3614 default: return;
3615 }
3616}
3617
3618/* 32-bit pairwise ops end up the same as the elementwise versions. */
3619#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3620#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3621#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3622#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3623
ad69471c
PB
3624#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3625 switch ((size << 1) | u) { \
3626 case 0: \
dd8fbd78 3627 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3628 break; \
3629 case 1: \
dd8fbd78 3630 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3631 break; \
3632 case 2: \
dd8fbd78 3633 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3634 break; \
3635 case 3: \
dd8fbd78 3636 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3637 break; \
3638 case 4: \
dd8fbd78 3639 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3640 break; \
3641 case 5: \
dd8fbd78 3642 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3643 break; \
3644 default: return 1; \
3645 }} while (0)
9ee6e8bb
PB
3646
3647#define GEN_NEON_INTEGER_OP(name) do { \
3648 switch ((size << 1) | u) { \
ad69471c 3649 case 0: \
dd8fbd78 3650 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3651 break; \
3652 case 1: \
dd8fbd78 3653 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3654 break; \
3655 case 2: \
dd8fbd78 3656 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3657 break; \
3658 case 3: \
dd8fbd78 3659 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3660 break; \
3661 case 4: \
dd8fbd78 3662 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3663 break; \
3664 case 5: \
dd8fbd78 3665 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3666 break; \
9ee6e8bb
PB
3667 default: return 1; \
3668 }} while (0)
3669
dd8fbd78 3670static TCGv neon_load_scratch(int scratch)
9ee6e8bb 3671{
7d1b0095 3672 TCGv tmp = tcg_temp_new_i32();
dd8fbd78
FN
3673 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3674 return tmp;
9ee6e8bb
PB
3675}
3676
dd8fbd78 3677static void neon_store_scratch(int scratch, TCGv var)
9ee6e8bb 3678{
dd8fbd78 3679 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 3680 tcg_temp_free_i32(var);
9ee6e8bb
PB
3681}
3682
dd8fbd78 3683static inline TCGv neon_get_scalar(int size, int reg)
9ee6e8bb 3684{
dd8fbd78 3685 TCGv tmp;
9ee6e8bb 3686 if (size == 1) {
0fad6efc
PM
3687 tmp = neon_load_reg(reg & 7, reg >> 4);
3688 if (reg & 8) {
dd8fbd78 3689 gen_neon_dup_high16(tmp);
0fad6efc
PM
3690 } else {
3691 gen_neon_dup_low16(tmp);
dd8fbd78 3692 }
0fad6efc
PM
3693 } else {
3694 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 3695 }
dd8fbd78 3696 return tmp;
9ee6e8bb
PB
3697}
3698
02acedf9 3699static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 3700{
02acedf9 3701 TCGv tmp, tmp2;
600b828c 3702 if (!q && size == 2) {
02acedf9
PM
3703 return 1;
3704 }
3705 tmp = tcg_const_i32(rd);
3706 tmp2 = tcg_const_i32(rm);
3707 if (q) {
3708 switch (size) {
3709 case 0:
2a3f75b4 3710 gen_helper_neon_qunzip8(tmp, tmp2);
02acedf9
PM
3711 break;
3712 case 1:
2a3f75b4 3713 gen_helper_neon_qunzip16(tmp, tmp2);
02acedf9
PM
3714 break;
3715 case 2:
2a3f75b4 3716 gen_helper_neon_qunzip32(tmp, tmp2);
02acedf9
PM
3717 break;
3718 default:
3719 abort();
3720 }
3721 } else {
3722 switch (size) {
3723 case 0:
2a3f75b4 3724 gen_helper_neon_unzip8(tmp, tmp2);
02acedf9
PM
3725 break;
3726 case 1:
2a3f75b4 3727 gen_helper_neon_unzip16(tmp, tmp2);
02acedf9
PM
3728 break;
3729 default:
3730 abort();
3731 }
3732 }
3733 tcg_temp_free_i32(tmp);
3734 tcg_temp_free_i32(tmp2);
3735 return 0;
19457615
FN
3736}
3737
d68a6f3a 3738static int gen_neon_zip(int rd, int rm, int size, int q)
19457615
FN
3739{
3740 TCGv tmp, tmp2;
600b828c 3741 if (!q && size == 2) {
d68a6f3a
PM
3742 return 1;
3743 }
3744 tmp = tcg_const_i32(rd);
3745 tmp2 = tcg_const_i32(rm);
3746 if (q) {
3747 switch (size) {
3748 case 0:
2a3f75b4 3749 gen_helper_neon_qzip8(tmp, tmp2);
d68a6f3a
PM
3750 break;
3751 case 1:
2a3f75b4 3752 gen_helper_neon_qzip16(tmp, tmp2);
d68a6f3a
PM
3753 break;
3754 case 2:
2a3f75b4 3755 gen_helper_neon_qzip32(tmp, tmp2);
d68a6f3a
PM
3756 break;
3757 default:
3758 abort();
3759 }
3760 } else {
3761 switch (size) {
3762 case 0:
2a3f75b4 3763 gen_helper_neon_zip8(tmp, tmp2);
d68a6f3a
PM
3764 break;
3765 case 1:
2a3f75b4 3766 gen_helper_neon_zip16(tmp, tmp2);
d68a6f3a
PM
3767 break;
3768 default:
3769 abort();
3770 }
3771 }
3772 tcg_temp_free_i32(tmp);
3773 tcg_temp_free_i32(tmp2);
3774 return 0;
19457615
FN
3775}
3776
19457615
FN
3777static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3778{
3779 TCGv rd, tmp;
3780
7d1b0095
PM
3781 rd = tcg_temp_new_i32();
3782 tmp = tcg_temp_new_i32();
19457615
FN
3783
3784 tcg_gen_shli_i32(rd, t0, 8);
3785 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3786 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3787 tcg_gen_or_i32(rd, rd, tmp);
3788
3789 tcg_gen_shri_i32(t1, t1, 8);
3790 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3791 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3792 tcg_gen_or_i32(t1, t1, tmp);
3793 tcg_gen_mov_i32(t0, rd);
3794
7d1b0095
PM
3795 tcg_temp_free_i32(tmp);
3796 tcg_temp_free_i32(rd);
19457615
FN
3797}
3798
3799static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3800{
3801 TCGv rd, tmp;
3802
7d1b0095
PM
3803 rd = tcg_temp_new_i32();
3804 tmp = tcg_temp_new_i32();
19457615
FN
3805
3806 tcg_gen_shli_i32(rd, t0, 16);
3807 tcg_gen_andi_i32(tmp, t1, 0xffff);
3808 tcg_gen_or_i32(rd, rd, tmp);
3809 tcg_gen_shri_i32(t1, t1, 16);
3810 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3811 tcg_gen_or_i32(t1, t1, tmp);
3812 tcg_gen_mov_i32(t0, rd);
3813
7d1b0095
PM
3814 tcg_temp_free_i32(tmp);
3815 tcg_temp_free_i32(rd);
19457615
FN
3816}
3817
3818
9ee6e8bb
PB
3819static struct {
3820 int nregs;
3821 int interleave;
3822 int spacing;
3823} neon_ls_element_type[11] = {
3824 {4, 4, 1},
3825 {4, 4, 2},
3826 {4, 1, 1},
3827 {4, 2, 1},
3828 {3, 3, 1},
3829 {3, 3, 2},
3830 {3, 1, 1},
3831 {1, 1, 1},
3832 {2, 2, 1},
3833 {2, 2, 2},
3834 {2, 1, 1}
3835};
3836
3837/* Translate a NEON load/store element instruction. Return nonzero if the
3838 instruction is invalid. */
3839static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3840{
3841 int rd, rn, rm;
3842 int op;
3843 int nregs;
3844 int interleave;
84496233 3845 int spacing;
9ee6e8bb
PB
3846 int stride;
3847 int size;
3848 int reg;
3849 int pass;
3850 int load;
3851 int shift;
9ee6e8bb 3852 int n;
1b2b1e54 3853 TCGv addr;
b0109805 3854 TCGv tmp;
8f8e3aa4 3855 TCGv tmp2;
84496233 3856 TCGv_i64 tmp64;
9ee6e8bb 3857
5df8bac1 3858 if (!s->vfp_enabled)
9ee6e8bb
PB
3859 return 1;
3860 VFP_DREG_D(rd, insn);
3861 rn = (insn >> 16) & 0xf;
3862 rm = insn & 0xf;
3863 load = (insn & (1 << 21)) != 0;
3864 if ((insn & (1 << 23)) == 0) {
3865 /* Load store all elements. */
3866 op = (insn >> 8) & 0xf;
3867 size = (insn >> 6) & 3;
84496233 3868 if (op > 10)
9ee6e8bb 3869 return 1;
f2dd89d0
PM
3870 /* Catch UNDEF cases for bad values of align field */
3871 switch (op & 0xc) {
3872 case 4:
3873 if (((insn >> 5) & 1) == 1) {
3874 return 1;
3875 }
3876 break;
3877 case 8:
3878 if (((insn >> 4) & 3) == 3) {
3879 return 1;
3880 }
3881 break;
3882 default:
3883 break;
3884 }
9ee6e8bb
PB
3885 nregs = neon_ls_element_type[op].nregs;
3886 interleave = neon_ls_element_type[op].interleave;
84496233
JR
3887 spacing = neon_ls_element_type[op].spacing;
3888 if (size == 3 && (interleave | spacing) != 1)
3889 return 1;
e318a60b 3890 addr = tcg_temp_new_i32();
dcc65026 3891 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3892 stride = (1 << size) * interleave;
3893 for (reg = 0; reg < nregs; reg++) {
3894 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
3895 load_reg_var(s, addr, rn);
3896 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 3897 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
3898 load_reg_var(s, addr, rn);
3899 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 3900 }
84496233
JR
3901 if (size == 3) {
3902 if (load) {
3903 tmp64 = gen_ld64(addr, IS_USER(s));
3904 neon_store_reg64(tmp64, rd);
3905 tcg_temp_free_i64(tmp64);
3906 } else {
3907 tmp64 = tcg_temp_new_i64();
3908 neon_load_reg64(tmp64, rd);
3909 gen_st64(tmp64, addr, IS_USER(s));
3910 }
3911 tcg_gen_addi_i32(addr, addr, stride);
3912 } else {
3913 for (pass = 0; pass < 2; pass++) {
3914 if (size == 2) {
3915 if (load) {
3916 tmp = gen_ld32(addr, IS_USER(s));
3917 neon_store_reg(rd, pass, tmp);
3918 } else {
3919 tmp = neon_load_reg(rd, pass);
3920 gen_st32(tmp, addr, IS_USER(s));
3921 }
1b2b1e54 3922 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
3923 } else if (size == 1) {
3924 if (load) {
3925 tmp = gen_ld16u(addr, IS_USER(s));
3926 tcg_gen_addi_i32(addr, addr, stride);
3927 tmp2 = gen_ld16u(addr, IS_USER(s));
3928 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
3929 tcg_gen_shli_i32(tmp2, tmp2, 16);
3930 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3931 tcg_temp_free_i32(tmp2);
84496233
JR
3932 neon_store_reg(rd, pass, tmp);
3933 } else {
3934 tmp = neon_load_reg(rd, pass);
7d1b0095 3935 tmp2 = tcg_temp_new_i32();
84496233
JR
3936 tcg_gen_shri_i32(tmp2, tmp, 16);
3937 gen_st16(tmp, addr, IS_USER(s));
3938 tcg_gen_addi_i32(addr, addr, stride);
3939 gen_st16(tmp2, addr, IS_USER(s));
1b2b1e54 3940 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3941 }
84496233
JR
3942 } else /* size == 0 */ {
3943 if (load) {
3944 TCGV_UNUSED(tmp2);
3945 for (n = 0; n < 4; n++) {
3946 tmp = gen_ld8u(addr, IS_USER(s));
3947 tcg_gen_addi_i32(addr, addr, stride);
3948 if (n == 0) {
3949 tmp2 = tmp;
3950 } else {
41ba8341
PB
3951 tcg_gen_shli_i32(tmp, tmp, n * 8);
3952 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 3953 tcg_temp_free_i32(tmp);
84496233 3954 }
9ee6e8bb 3955 }
84496233
JR
3956 neon_store_reg(rd, pass, tmp2);
3957 } else {
3958 tmp2 = neon_load_reg(rd, pass);
3959 for (n = 0; n < 4; n++) {
7d1b0095 3960 tmp = tcg_temp_new_i32();
84496233
JR
3961 if (n == 0) {
3962 tcg_gen_mov_i32(tmp, tmp2);
3963 } else {
3964 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3965 }
3966 gen_st8(tmp, addr, IS_USER(s));
3967 tcg_gen_addi_i32(addr, addr, stride);
3968 }
7d1b0095 3969 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3970 }
3971 }
3972 }
3973 }
84496233 3974 rd += spacing;
9ee6e8bb 3975 }
e318a60b 3976 tcg_temp_free_i32(addr);
9ee6e8bb
PB
3977 stride = nregs * 8;
3978 } else {
3979 size = (insn >> 10) & 3;
3980 if (size == 3) {
3981 /* Load single element to all lanes. */
8e18cde3
PM
3982 int a = (insn >> 4) & 1;
3983 if (!load) {
9ee6e8bb 3984 return 1;
8e18cde3 3985 }
9ee6e8bb
PB
3986 size = (insn >> 6) & 3;
3987 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
3988
3989 if (size == 3) {
3990 if (nregs != 4 || a == 0) {
9ee6e8bb 3991 return 1;
99c475ab 3992 }
8e18cde3
PM
3993 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3994 size = 2;
3995 }
3996 if (nregs == 1 && a == 1 && size == 0) {
3997 return 1;
3998 }
3999 if (nregs == 3 && a == 1) {
4000 return 1;
4001 }
e318a60b 4002 addr = tcg_temp_new_i32();
8e18cde3
PM
4003 load_reg_var(s, addr, rn);
4004 if (nregs == 1) {
4005 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4006 tmp = gen_load_and_replicate(s, addr, size);
4007 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4008 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4009 if (insn & (1 << 5)) {
4010 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4011 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4012 }
4013 tcg_temp_free_i32(tmp);
4014 } else {
4015 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4016 stride = (insn & (1 << 5)) ? 2 : 1;
4017 for (reg = 0; reg < nregs; reg++) {
4018 tmp = gen_load_and_replicate(s, addr, size);
4019 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4020 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4021 tcg_temp_free_i32(tmp);
4022 tcg_gen_addi_i32(addr, addr, 1 << size);
4023 rd += stride;
4024 }
9ee6e8bb 4025 }
e318a60b 4026 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4027 stride = (1 << size) * nregs;
4028 } else {
4029 /* Single element. */
93262b16 4030 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
4031 pass = (insn >> 7) & 1;
4032 switch (size) {
4033 case 0:
4034 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
4035 stride = 1;
4036 break;
4037 case 1:
4038 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
4039 stride = (insn & (1 << 5)) ? 2 : 1;
4040 break;
4041 case 2:
4042 shift = 0;
9ee6e8bb
PB
4043 stride = (insn & (1 << 6)) ? 2 : 1;
4044 break;
4045 default:
4046 abort();
4047 }
4048 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
4049 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4050 switch (nregs) {
4051 case 1:
4052 if (((idx & (1 << size)) != 0) ||
4053 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4054 return 1;
4055 }
4056 break;
4057 case 3:
4058 if ((idx & 1) != 0) {
4059 return 1;
4060 }
4061 /* fall through */
4062 case 2:
4063 if (size == 2 && (idx & 2) != 0) {
4064 return 1;
4065 }
4066 break;
4067 case 4:
4068 if ((size == 2) && ((idx & 3) == 3)) {
4069 return 1;
4070 }
4071 break;
4072 default:
4073 abort();
4074 }
4075 if ((rd + stride * (nregs - 1)) > 31) {
4076 /* Attempts to write off the end of the register file
4077 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4078 * the neon_load_reg() would write off the end of the array.
4079 */
4080 return 1;
4081 }
e318a60b 4082 addr = tcg_temp_new_i32();
dcc65026 4083 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4084 for (reg = 0; reg < nregs; reg++) {
4085 if (load) {
9ee6e8bb
PB
4086 switch (size) {
4087 case 0:
1b2b1e54 4088 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb
PB
4089 break;
4090 case 1:
1b2b1e54 4091 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
4092 break;
4093 case 2:
1b2b1e54 4094 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 4095 break;
a50f5b91
PB
4096 default: /* Avoid compiler warnings. */
4097 abort();
9ee6e8bb
PB
4098 }
4099 if (size != 2) {
8f8e3aa4
PB
4100 tmp2 = neon_load_reg(rd, pass);
4101 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
7d1b0095 4102 tcg_temp_free_i32(tmp2);
9ee6e8bb 4103 }
8f8e3aa4 4104 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4105 } else { /* Store */
8f8e3aa4
PB
4106 tmp = neon_load_reg(rd, pass);
4107 if (shift)
4108 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4109 switch (size) {
4110 case 0:
1b2b1e54 4111 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4112 break;
4113 case 1:
1b2b1e54 4114 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4115 break;
4116 case 2:
1b2b1e54 4117 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 4118 break;
99c475ab 4119 }
99c475ab 4120 }
9ee6e8bb 4121 rd += stride;
1b2b1e54 4122 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4123 }
e318a60b 4124 tcg_temp_free_i32(addr);
9ee6e8bb 4125 stride = nregs * (1 << size);
99c475ab 4126 }
9ee6e8bb
PB
4127 }
4128 if (rm != 15) {
b26eefb6
PB
4129 TCGv base;
4130
4131 base = load_reg(s, rn);
9ee6e8bb 4132 if (rm == 13) {
b26eefb6 4133 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4134 } else {
b26eefb6
PB
4135 TCGv index;
4136 index = load_reg(s, rm);
4137 tcg_gen_add_i32(base, base, index);
7d1b0095 4138 tcg_temp_free_i32(index);
9ee6e8bb 4139 }
b26eefb6 4140 store_reg(s, rn, base);
9ee6e8bb
PB
4141 }
4142 return 0;
4143}
3b46e624 4144
8f8e3aa4
PB
4145/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4146static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4147{
4148 tcg_gen_and_i32(t, t, c);
f669df27 4149 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4150 tcg_gen_or_i32(dest, t, f);
4151}
4152
a7812ae4 4153static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4154{
4155 switch (size) {
4156 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4157 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4158 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4159 default: abort();
4160 }
4161}
4162
a7812ae4 4163static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4164{
4165 switch (size) {
2a3f75b4
PM
4166 case 0: gen_helper_neon_narrow_sat_s8(dest, src); break;
4167 case 1: gen_helper_neon_narrow_sat_s16(dest, src); break;
4168 case 2: gen_helper_neon_narrow_sat_s32(dest, src); break;
ad69471c
PB
4169 default: abort();
4170 }
4171}
4172
a7812ae4 4173static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4174{
4175 switch (size) {
2a3f75b4
PM
4176 case 0: gen_helper_neon_narrow_sat_u8(dest, src); break;
4177 case 1: gen_helper_neon_narrow_sat_u16(dest, src); break;
4178 case 2: gen_helper_neon_narrow_sat_u32(dest, src); break;
ad69471c
PB
4179 default: abort();
4180 }
4181}
4182
af1bbf30
JR
4183static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4184{
4185 switch (size) {
2a3f75b4
PM
4186 case 0: gen_helper_neon_unarrow_sat8(dest, src); break;
4187 case 1: gen_helper_neon_unarrow_sat16(dest, src); break;
4188 case 2: gen_helper_neon_unarrow_sat32(dest, src); break;
af1bbf30
JR
4189 default: abort();
4190 }
4191}
4192
ad69471c
PB
4193static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4194 int q, int u)
4195{
4196 if (q) {
4197 if (u) {
4198 switch (size) {
4199 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4200 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4201 default: abort();
4202 }
4203 } else {
4204 switch (size) {
4205 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4206 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4207 default: abort();
4208 }
4209 }
4210 } else {
4211 if (u) {
4212 switch (size) {
b408a9b0
CL
4213 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4214 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4215 default: abort();
4216 }
4217 } else {
4218 switch (size) {
4219 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4220 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4221 default: abort();
4222 }
4223 }
4224 }
4225}
4226
a7812ae4 4227static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
4228{
4229 if (u) {
4230 switch (size) {
4231 case 0: gen_helper_neon_widen_u8(dest, src); break;
4232 case 1: gen_helper_neon_widen_u16(dest, src); break;
4233 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4234 default: abort();
4235 }
4236 } else {
4237 switch (size) {
4238 case 0: gen_helper_neon_widen_s8(dest, src); break;
4239 case 1: gen_helper_neon_widen_s16(dest, src); break;
4240 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4241 default: abort();
4242 }
4243 }
7d1b0095 4244 tcg_temp_free_i32(src);
ad69471c
PB
4245}
4246
4247static inline void gen_neon_addl(int size)
4248{
4249 switch (size) {
4250 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4251 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4252 case 2: tcg_gen_add_i64(CPU_V001); break;
4253 default: abort();
4254 }
4255}
4256
4257static inline void gen_neon_subl(int size)
4258{
4259 switch (size) {
4260 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4261 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4262 case 2: tcg_gen_sub_i64(CPU_V001); break;
4263 default: abort();
4264 }
4265}
4266
a7812ae4 4267static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4268{
4269 switch (size) {
4270 case 0: gen_helper_neon_negl_u16(var, var); break;
4271 case 1: gen_helper_neon_negl_u32(var, var); break;
4272 case 2: gen_helper_neon_negl_u64(var, var); break;
4273 default: abort();
4274 }
4275}
4276
a7812ae4 4277static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4278{
4279 switch (size) {
2a3f75b4
PM
4280 case 1: gen_helper_neon_addl_saturate_s32(op0, op0, op1); break;
4281 case 2: gen_helper_neon_addl_saturate_s64(op0, op0, op1); break;
ad69471c
PB
4282 default: abort();
4283 }
4284}
4285
a7812ae4 4286static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4287{
a7812ae4 4288 TCGv_i64 tmp;
ad69471c
PB
4289
4290 switch ((size << 1) | u) {
4291 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4292 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4293 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4294 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4295 case 4:
4296 tmp = gen_muls_i64_i32(a, b);
4297 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4298 tcg_temp_free_i64(tmp);
ad69471c
PB
4299 break;
4300 case 5:
4301 tmp = gen_mulu_i64_i32(a, b);
4302 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4303 tcg_temp_free_i64(tmp);
ad69471c
PB
4304 break;
4305 default: abort();
4306 }
c6067f04
CL
4307
4308 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4309 Don't forget to clean them now. */
4310 if (size < 2) {
7d1b0095
PM
4311 tcg_temp_free_i32(a);
4312 tcg_temp_free_i32(b);
c6067f04 4313 }
ad69471c
PB
4314}
4315
c33171c7
PM
4316static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4317{
4318 if (op) {
4319 if (u) {
4320 gen_neon_unarrow_sats(size, dest, src);
4321 } else {
4322 gen_neon_narrow(size, dest, src);
4323 }
4324 } else {
4325 if (u) {
4326 gen_neon_narrow_satu(size, dest, src);
4327 } else {
4328 gen_neon_narrow_sats(size, dest, src);
4329 }
4330 }
4331}
4332
62698be3
PM
4333/* Symbolic constants for op fields for Neon 3-register same-length.
4334 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4335 * table A7-9.
4336 */
4337#define NEON_3R_VHADD 0
4338#define NEON_3R_VQADD 1
4339#define NEON_3R_VRHADD 2
4340#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4341#define NEON_3R_VHSUB 4
4342#define NEON_3R_VQSUB 5
4343#define NEON_3R_VCGT 6
4344#define NEON_3R_VCGE 7
4345#define NEON_3R_VSHL 8
4346#define NEON_3R_VQSHL 9
4347#define NEON_3R_VRSHL 10
4348#define NEON_3R_VQRSHL 11
4349#define NEON_3R_VMAX 12
4350#define NEON_3R_VMIN 13
4351#define NEON_3R_VABD 14
4352#define NEON_3R_VABA 15
4353#define NEON_3R_VADD_VSUB 16
4354#define NEON_3R_VTST_VCEQ 17
4355#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4356#define NEON_3R_VMUL 19
4357#define NEON_3R_VPMAX 20
4358#define NEON_3R_VPMIN 21
4359#define NEON_3R_VQDMULH_VQRDMULH 22
4360#define NEON_3R_VPADD 23
4361#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4362#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4363#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4364#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4365#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4366#define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4367
4368static const uint8_t neon_3r_sizes[] = {
4369 [NEON_3R_VHADD] = 0x7,
4370 [NEON_3R_VQADD] = 0xf,
4371 [NEON_3R_VRHADD] = 0x7,
4372 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4373 [NEON_3R_VHSUB] = 0x7,
4374 [NEON_3R_VQSUB] = 0xf,
4375 [NEON_3R_VCGT] = 0x7,
4376 [NEON_3R_VCGE] = 0x7,
4377 [NEON_3R_VSHL] = 0xf,
4378 [NEON_3R_VQSHL] = 0xf,
4379 [NEON_3R_VRSHL] = 0xf,
4380 [NEON_3R_VQRSHL] = 0xf,
4381 [NEON_3R_VMAX] = 0x7,
4382 [NEON_3R_VMIN] = 0x7,
4383 [NEON_3R_VABD] = 0x7,
4384 [NEON_3R_VABA] = 0x7,
4385 [NEON_3R_VADD_VSUB] = 0xf,
4386 [NEON_3R_VTST_VCEQ] = 0x7,
4387 [NEON_3R_VML] = 0x7,
4388 [NEON_3R_VMUL] = 0x7,
4389 [NEON_3R_VPMAX] = 0x7,
4390 [NEON_3R_VPMIN] = 0x7,
4391 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4392 [NEON_3R_VPADD] = 0x7,
4393 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4394 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4395 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4396 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4397 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4398 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4399};
4400
600b828c
PM
4401/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4402 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4403 * table A7-13.
4404 */
4405#define NEON_2RM_VREV64 0
4406#define NEON_2RM_VREV32 1
4407#define NEON_2RM_VREV16 2
4408#define NEON_2RM_VPADDL 4
4409#define NEON_2RM_VPADDL_U 5
4410#define NEON_2RM_VCLS 8
4411#define NEON_2RM_VCLZ 9
4412#define NEON_2RM_VCNT 10
4413#define NEON_2RM_VMVN 11
4414#define NEON_2RM_VPADAL 12
4415#define NEON_2RM_VPADAL_U 13
4416#define NEON_2RM_VQABS 14
4417#define NEON_2RM_VQNEG 15
4418#define NEON_2RM_VCGT0 16
4419#define NEON_2RM_VCGE0 17
4420#define NEON_2RM_VCEQ0 18
4421#define NEON_2RM_VCLE0 19
4422#define NEON_2RM_VCLT0 20
4423#define NEON_2RM_VABS 22
4424#define NEON_2RM_VNEG 23
4425#define NEON_2RM_VCGT0_F 24
4426#define NEON_2RM_VCGE0_F 25
4427#define NEON_2RM_VCEQ0_F 26
4428#define NEON_2RM_VCLE0_F 27
4429#define NEON_2RM_VCLT0_F 28
4430#define NEON_2RM_VABS_F 30
4431#define NEON_2RM_VNEG_F 31
4432#define NEON_2RM_VSWP 32
4433#define NEON_2RM_VTRN 33
4434#define NEON_2RM_VUZP 34
4435#define NEON_2RM_VZIP 35
4436#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4437#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4438#define NEON_2RM_VSHLL 38
4439#define NEON_2RM_VCVT_F16_F32 44
4440#define NEON_2RM_VCVT_F32_F16 46
4441#define NEON_2RM_VRECPE 56
4442#define NEON_2RM_VRSQRTE 57
4443#define NEON_2RM_VRECPE_F 58
4444#define NEON_2RM_VRSQRTE_F 59
4445#define NEON_2RM_VCVT_FS 60
4446#define NEON_2RM_VCVT_FU 61
4447#define NEON_2RM_VCVT_SF 62
4448#define NEON_2RM_VCVT_UF 63
4449
4450static int neon_2rm_is_float_op(int op)
4451{
4452 /* Return true if this neon 2reg-misc op is float-to-float */
4453 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4454 op >= NEON_2RM_VRECPE_F);
4455}
4456
4457/* Each entry in this array has bit n set if the insn allows
4458 * size value n (otherwise it will UNDEF). Since unallocated
4459 * op values will have no bits set they always UNDEF.
4460 */
4461static const uint8_t neon_2rm_sizes[] = {
4462 [NEON_2RM_VREV64] = 0x7,
4463 [NEON_2RM_VREV32] = 0x3,
4464 [NEON_2RM_VREV16] = 0x1,
4465 [NEON_2RM_VPADDL] = 0x7,
4466 [NEON_2RM_VPADDL_U] = 0x7,
4467 [NEON_2RM_VCLS] = 0x7,
4468 [NEON_2RM_VCLZ] = 0x7,
4469 [NEON_2RM_VCNT] = 0x1,
4470 [NEON_2RM_VMVN] = 0x1,
4471 [NEON_2RM_VPADAL] = 0x7,
4472 [NEON_2RM_VPADAL_U] = 0x7,
4473 [NEON_2RM_VQABS] = 0x7,
4474 [NEON_2RM_VQNEG] = 0x7,
4475 [NEON_2RM_VCGT0] = 0x7,
4476 [NEON_2RM_VCGE0] = 0x7,
4477 [NEON_2RM_VCEQ0] = 0x7,
4478 [NEON_2RM_VCLE0] = 0x7,
4479 [NEON_2RM_VCLT0] = 0x7,
4480 [NEON_2RM_VABS] = 0x7,
4481 [NEON_2RM_VNEG] = 0x7,
4482 [NEON_2RM_VCGT0_F] = 0x4,
4483 [NEON_2RM_VCGE0_F] = 0x4,
4484 [NEON_2RM_VCEQ0_F] = 0x4,
4485 [NEON_2RM_VCLE0_F] = 0x4,
4486 [NEON_2RM_VCLT0_F] = 0x4,
4487 [NEON_2RM_VABS_F] = 0x4,
4488 [NEON_2RM_VNEG_F] = 0x4,
4489 [NEON_2RM_VSWP] = 0x1,
4490 [NEON_2RM_VTRN] = 0x7,
4491 [NEON_2RM_VUZP] = 0x7,
4492 [NEON_2RM_VZIP] = 0x7,
4493 [NEON_2RM_VMOVN] = 0x7,
4494 [NEON_2RM_VQMOVN] = 0x7,
4495 [NEON_2RM_VSHLL] = 0x7,
4496 [NEON_2RM_VCVT_F16_F32] = 0x2,
4497 [NEON_2RM_VCVT_F32_F16] = 0x2,
4498 [NEON_2RM_VRECPE] = 0x4,
4499 [NEON_2RM_VRSQRTE] = 0x4,
4500 [NEON_2RM_VRECPE_F] = 0x4,
4501 [NEON_2RM_VRSQRTE_F] = 0x4,
4502 [NEON_2RM_VCVT_FS] = 0x4,
4503 [NEON_2RM_VCVT_FU] = 0x4,
4504 [NEON_2RM_VCVT_SF] = 0x4,
4505 [NEON_2RM_VCVT_UF] = 0x4,
4506};
4507
9ee6e8bb
PB
4508/* Translate a NEON data processing instruction. Return nonzero if the
4509 instruction is invalid.
ad69471c
PB
4510 We process data in a mixture of 32-bit and 64-bit chunks.
4511 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4512
9ee6e8bb
PB
4513static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4514{
4515 int op;
4516 int q;
4517 int rd, rn, rm;
4518 int size;
4519 int shift;
4520 int pass;
4521 int count;
4522 int pairwise;
4523 int u;
ca9a32e4 4524 uint32_t imm, mask;
b75263d6 4525 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4526 TCGv_i64 tmp64;
9ee6e8bb 4527
5df8bac1 4528 if (!s->vfp_enabled)
9ee6e8bb
PB
4529 return 1;
4530 q = (insn & (1 << 6)) != 0;
4531 u = (insn >> 24) & 1;
4532 VFP_DREG_D(rd, insn);
4533 VFP_DREG_N(rn, insn);
4534 VFP_DREG_M(rm, insn);
4535 size = (insn >> 20) & 3;
4536 if ((insn & (1 << 23)) == 0) {
4537 /* Three register same length. */
4538 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
4539 /* Catch invalid op and bad size combinations: UNDEF */
4540 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4541 return 1;
4542 }
25f84f79
PM
4543 /* All insns of this form UNDEF for either this condition or the
4544 * superset of cases "Q==1"; we catch the latter later.
4545 */
4546 if (q && ((rd | rn | rm) & 1)) {
4547 return 1;
4548 }
62698be3
PM
4549 if (size == 3 && op != NEON_3R_LOGIC) {
4550 /* 64-bit element instructions. */
9ee6e8bb 4551 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4552 neon_load_reg64(cpu_V0, rn + pass);
4553 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 4554 switch (op) {
62698be3 4555 case NEON_3R_VQADD:
9ee6e8bb 4556 if (u) {
2a3f75b4 4557 gen_helper_neon_qadd_u64(cpu_V0, cpu_V0, cpu_V1);
2c0262af 4558 } else {
2a3f75b4 4559 gen_helper_neon_qadd_s64(cpu_V0, cpu_V0, cpu_V1);
2c0262af 4560 }
9ee6e8bb 4561 break;
62698be3 4562 case NEON_3R_VQSUB:
9ee6e8bb 4563 if (u) {
2a3f75b4 4564 gen_helper_neon_qsub_u64(cpu_V0, cpu_V0, cpu_V1);
ad69471c 4565 } else {
2a3f75b4 4566 gen_helper_neon_qsub_s64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
4567 }
4568 break;
62698be3 4569 case NEON_3R_VSHL:
ad69471c
PB
4570 if (u) {
4571 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4572 } else {
4573 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4574 }
4575 break;
62698be3 4576 case NEON_3R_VQSHL:
ad69471c 4577 if (u) {
2a3f75b4 4578 gen_helper_neon_qshl_u64(cpu_V0, cpu_V1, cpu_V0);
ad69471c 4579 } else {
2a3f75b4 4580 gen_helper_neon_qshl_s64(cpu_V0, cpu_V1, cpu_V0);
ad69471c
PB
4581 }
4582 break;
62698be3 4583 case NEON_3R_VRSHL:
ad69471c
PB
4584 if (u) {
4585 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4586 } else {
ad69471c
PB
4587 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4588 }
4589 break;
62698be3 4590 case NEON_3R_VQRSHL:
ad69471c 4591 if (u) {
2a3f75b4 4592 gen_helper_neon_qrshl_u64(cpu_V0, cpu_V1, cpu_V0);
ad69471c 4593 } else {
2a3f75b4 4594 gen_helper_neon_qrshl_s64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4595 }
9ee6e8bb 4596 break;
62698be3 4597 case NEON_3R_VADD_VSUB:
9ee6e8bb 4598 if (u) {
ad69471c 4599 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4600 } else {
ad69471c 4601 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4602 }
4603 break;
4604 default:
4605 abort();
2c0262af 4606 }
ad69471c 4607 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4608 }
9ee6e8bb 4609 return 0;
2c0262af 4610 }
25f84f79 4611 pairwise = 0;
9ee6e8bb 4612 switch (op) {
62698be3
PM
4613 case NEON_3R_VSHL:
4614 case NEON_3R_VQSHL:
4615 case NEON_3R_VRSHL:
4616 case NEON_3R_VQRSHL:
9ee6e8bb 4617 {
ad69471c
PB
4618 int rtmp;
4619 /* Shift instruction operands are reversed. */
4620 rtmp = rn;
9ee6e8bb 4621 rn = rm;
ad69471c 4622 rm = rtmp;
9ee6e8bb 4623 }
2c0262af 4624 break;
25f84f79
PM
4625 case NEON_3R_VPADD:
4626 if (u) {
4627 return 1;
4628 }
4629 /* Fall through */
62698be3
PM
4630 case NEON_3R_VPMAX:
4631 case NEON_3R_VPMIN:
9ee6e8bb 4632 pairwise = 1;
2c0262af 4633 break;
25f84f79
PM
4634 case NEON_3R_FLOAT_ARITH:
4635 pairwise = (u && size < 2); /* if VPADD (float) */
4636 break;
4637 case NEON_3R_FLOAT_MINMAX:
4638 pairwise = u; /* if VPMIN/VPMAX (float) */
4639 break;
4640 case NEON_3R_FLOAT_CMP:
4641 if (!u && size) {
4642 /* no encoding for U=0 C=1x */
4643 return 1;
4644 }
4645 break;
4646 case NEON_3R_FLOAT_ACMP:
4647 if (!u) {
4648 return 1;
4649 }
4650 break;
4651 case NEON_3R_VRECPS_VRSQRTS:
4652 if (u) {
4653 return 1;
4654 }
2c0262af 4655 break;
25f84f79
PM
4656 case NEON_3R_VMUL:
4657 if (u && (size != 0)) {
4658 /* UNDEF on invalid size for polynomial subcase */
4659 return 1;
4660 }
2c0262af 4661 break;
9ee6e8bb 4662 default:
2c0262af 4663 break;
9ee6e8bb 4664 }
dd8fbd78 4665
25f84f79
PM
4666 if (pairwise && q) {
4667 /* All the pairwise insns UNDEF if Q is set */
4668 return 1;
4669 }
4670
9ee6e8bb
PB
4671 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4672
4673 if (pairwise) {
4674 /* Pairwise. */
a5a14945
JR
4675 if (pass < 1) {
4676 tmp = neon_load_reg(rn, 0);
4677 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 4678 } else {
a5a14945
JR
4679 tmp = neon_load_reg(rm, 0);
4680 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
4681 }
4682 } else {
4683 /* Elementwise. */
dd8fbd78
FN
4684 tmp = neon_load_reg(rn, pass);
4685 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4686 }
4687 switch (op) {
62698be3 4688 case NEON_3R_VHADD:
9ee6e8bb
PB
4689 GEN_NEON_INTEGER_OP(hadd);
4690 break;
62698be3 4691 case NEON_3R_VQADD:
2a3f75b4 4692 GEN_NEON_INTEGER_OP(qadd);
2c0262af 4693 break;
62698be3 4694 case NEON_3R_VRHADD:
9ee6e8bb 4695 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4696 break;
62698be3 4697 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
4698 switch ((u << 2) | size) {
4699 case 0: /* VAND */
dd8fbd78 4700 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4701 break;
4702 case 1: /* BIC */
f669df27 4703 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4704 break;
4705 case 2: /* VORR */
dd8fbd78 4706 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4707 break;
4708 case 3: /* VORN */
f669df27 4709 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4710 break;
4711 case 4: /* VEOR */
dd8fbd78 4712 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4713 break;
4714 case 5: /* VBSL */
dd8fbd78
FN
4715 tmp3 = neon_load_reg(rd, pass);
4716 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 4717 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4718 break;
4719 case 6: /* VBIT */
dd8fbd78
FN
4720 tmp3 = neon_load_reg(rd, pass);
4721 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 4722 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4723 break;
4724 case 7: /* VBIF */
dd8fbd78
FN
4725 tmp3 = neon_load_reg(rd, pass);
4726 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 4727 tcg_temp_free_i32(tmp3);
9ee6e8bb 4728 break;
2c0262af
FB
4729 }
4730 break;
62698be3 4731 case NEON_3R_VHSUB:
9ee6e8bb
PB
4732 GEN_NEON_INTEGER_OP(hsub);
4733 break;
62698be3 4734 case NEON_3R_VQSUB:
2a3f75b4 4735 GEN_NEON_INTEGER_OP(qsub);
2c0262af 4736 break;
62698be3 4737 case NEON_3R_VCGT:
9ee6e8bb
PB
4738 GEN_NEON_INTEGER_OP(cgt);
4739 break;
62698be3 4740 case NEON_3R_VCGE:
9ee6e8bb
PB
4741 GEN_NEON_INTEGER_OP(cge);
4742 break;
62698be3 4743 case NEON_3R_VSHL:
ad69471c 4744 GEN_NEON_INTEGER_OP(shl);
2c0262af 4745 break;
62698be3 4746 case NEON_3R_VQSHL:
2a3f75b4 4747 GEN_NEON_INTEGER_OP(qshl);
2c0262af 4748 break;
62698be3 4749 case NEON_3R_VRSHL:
ad69471c 4750 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4751 break;
62698be3 4752 case NEON_3R_VQRSHL:
2a3f75b4 4753 GEN_NEON_INTEGER_OP(qrshl);
9ee6e8bb 4754 break;
62698be3 4755 case NEON_3R_VMAX:
9ee6e8bb
PB
4756 GEN_NEON_INTEGER_OP(max);
4757 break;
62698be3 4758 case NEON_3R_VMIN:
9ee6e8bb
PB
4759 GEN_NEON_INTEGER_OP(min);
4760 break;
62698be3 4761 case NEON_3R_VABD:
9ee6e8bb
PB
4762 GEN_NEON_INTEGER_OP(abd);
4763 break;
62698be3 4764 case NEON_3R_VABA:
9ee6e8bb 4765 GEN_NEON_INTEGER_OP(abd);
7d1b0095 4766 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
4767 tmp2 = neon_load_reg(rd, pass);
4768 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 4769 break;
62698be3 4770 case NEON_3R_VADD_VSUB:
9ee6e8bb 4771 if (!u) { /* VADD */
62698be3 4772 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4773 } else { /* VSUB */
4774 switch (size) {
dd8fbd78
FN
4775 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4776 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4777 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 4778 default: abort();
9ee6e8bb
PB
4779 }
4780 }
4781 break;
62698be3 4782 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
4783 if (!u) { /* VTST */
4784 switch (size) {
dd8fbd78
FN
4785 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4786 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4787 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 4788 default: abort();
9ee6e8bb
PB
4789 }
4790 } else { /* VCEQ */
4791 switch (size) {
dd8fbd78
FN
4792 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4793 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4794 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 4795 default: abort();
9ee6e8bb
PB
4796 }
4797 }
4798 break;
62698be3 4799 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 4800 switch (size) {
dd8fbd78
FN
4801 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4802 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4803 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4804 default: abort();
9ee6e8bb 4805 }
7d1b0095 4806 tcg_temp_free_i32(tmp2);
dd8fbd78 4807 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4808 if (u) { /* VMLS */
dd8fbd78 4809 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4810 } else { /* VMLA */
dd8fbd78 4811 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4812 }
4813 break;
62698be3 4814 case NEON_3R_VMUL:
9ee6e8bb 4815 if (u) { /* polynomial */
dd8fbd78 4816 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4817 } else { /* Integer */
4818 switch (size) {
dd8fbd78
FN
4819 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4820 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4821 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4822 default: abort();
9ee6e8bb
PB
4823 }
4824 }
4825 break;
62698be3 4826 case NEON_3R_VPMAX:
9ee6e8bb
PB
4827 GEN_NEON_INTEGER_OP(pmax);
4828 break;
62698be3 4829 case NEON_3R_VPMIN:
9ee6e8bb
PB
4830 GEN_NEON_INTEGER_OP(pmin);
4831 break;
62698be3 4832 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
4833 if (!u) { /* VQDMULH */
4834 switch (size) {
2a3f75b4
PM
4835 case 1: gen_helper_neon_qdmulh_s16(tmp, tmp, tmp2); break;
4836 case 2: gen_helper_neon_qdmulh_s32(tmp, tmp, tmp2); break;
62698be3 4837 default: abort();
9ee6e8bb 4838 }
62698be3 4839 } else { /* VQRDMULH */
9ee6e8bb 4840 switch (size) {
2a3f75b4
PM
4841 case 1: gen_helper_neon_qrdmulh_s16(tmp, tmp, tmp2); break;
4842 case 2: gen_helper_neon_qrdmulh_s32(tmp, tmp, tmp2); break;
62698be3 4843 default: abort();
9ee6e8bb
PB
4844 }
4845 }
4846 break;
62698be3 4847 case NEON_3R_VPADD:
9ee6e8bb 4848 switch (size) {
dd8fbd78
FN
4849 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4850 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4851 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 4852 default: abort();
9ee6e8bb
PB
4853 }
4854 break;
62698be3 4855 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
9ee6e8bb
PB
4856 switch ((u << 2) | size) {
4857 case 0: /* VADD */
dd8fbd78 4858 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4859 break;
4860 case 2: /* VSUB */
dd8fbd78 4861 gen_helper_neon_sub_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4862 break;
4863 case 4: /* VPADD */
dd8fbd78 4864 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4865 break;
4866 case 6: /* VABD */
dd8fbd78 4867 gen_helper_neon_abd_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4868 break;
4869 default:
62698be3 4870 abort();
9ee6e8bb
PB
4871 }
4872 break;
62698be3 4873 case NEON_3R_FLOAT_MULTIPLY:
dd8fbd78 4874 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb 4875 if (!u) {
7d1b0095 4876 tcg_temp_free_i32(tmp2);
dd8fbd78 4877 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4878 if (size == 0) {
dd8fbd78 4879 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb 4880 } else {
dd8fbd78 4881 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
4882 }
4883 }
4884 break;
62698be3 4885 case NEON_3R_FLOAT_CMP:
9ee6e8bb 4886 if (!u) {
dd8fbd78 4887 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
b5ff1b31 4888 } else {
9ee6e8bb 4889 if (size == 0)
dd8fbd78 4890 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
9ee6e8bb 4891 else
dd8fbd78 4892 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
b5ff1b31 4893 }
2c0262af 4894 break;
62698be3 4895 case NEON_3R_FLOAT_ACMP:
9ee6e8bb 4896 if (size == 0)
dd8fbd78 4897 gen_helper_neon_acge_f32(tmp, tmp, tmp2);
9ee6e8bb 4898 else
dd8fbd78 4899 gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
2c0262af 4900 break;
62698be3 4901 case NEON_3R_FLOAT_MINMAX:
9ee6e8bb 4902 if (size == 0)
dd8fbd78 4903 gen_helper_neon_max_f32(tmp, tmp, tmp2);
9ee6e8bb 4904 else
dd8fbd78 4905 gen_helper_neon_min_f32(tmp, tmp, tmp2);
9ee6e8bb 4906 break;
62698be3 4907 case NEON_3R_VRECPS_VRSQRTS:
9ee6e8bb 4908 if (size == 0)
dd8fbd78 4909 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 4910 else
dd8fbd78 4911 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 4912 break;
9ee6e8bb
PB
4913 default:
4914 abort();
2c0262af 4915 }
7d1b0095 4916 tcg_temp_free_i32(tmp2);
dd8fbd78 4917
9ee6e8bb
PB
4918 /* Save the result. For elementwise operations we can put it
4919 straight into the destination register. For pairwise operations
4920 we have to be careful to avoid clobbering the source operands. */
4921 if (pairwise && rd == rm) {
dd8fbd78 4922 neon_store_scratch(pass, tmp);
9ee6e8bb 4923 } else {
dd8fbd78 4924 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4925 }
4926
4927 } /* for pass */
4928 if (pairwise && rd == rm) {
4929 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
4930 tmp = neon_load_scratch(pass);
4931 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4932 }
4933 }
ad69471c 4934 /* End of 3 register same size operations. */
9ee6e8bb
PB
4935 } else if (insn & (1 << 4)) {
4936 if ((insn & 0x00380080) != 0) {
4937 /* Two registers and shift. */
4938 op = (insn >> 8) & 0xf;
4939 if (insn & (1 << 7)) {
cc13115b
PM
4940 /* 64-bit shift. */
4941 if (op > 7) {
4942 return 1;
4943 }
9ee6e8bb
PB
4944 size = 3;
4945 } else {
4946 size = 2;
4947 while ((insn & (1 << (size + 19))) == 0)
4948 size--;
4949 }
4950 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4951 /* To avoid excessive dumplication of ops we implement shift
4952 by immediate using the variable shift operations. */
4953 if (op < 8) {
4954 /* Shift by immediate:
4955 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
4956 if (q && ((rd | rm) & 1)) {
4957 return 1;
4958 }
4959 if (!u && (op == 4 || op == 6)) {
4960 return 1;
4961 }
9ee6e8bb
PB
4962 /* Right shifts are encoded as N - shift, where N is the
4963 element size in bits. */
4964 if (op <= 4)
4965 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4966 if (size == 3) {
4967 count = q + 1;
4968 } else {
4969 count = q ? 4: 2;
4970 }
4971 switch (size) {
4972 case 0:
4973 imm = (uint8_t) shift;
4974 imm |= imm << 8;
4975 imm |= imm << 16;
4976 break;
4977 case 1:
4978 imm = (uint16_t) shift;
4979 imm |= imm << 16;
4980 break;
4981 case 2:
4982 case 3:
4983 imm = shift;
4984 break;
4985 default:
4986 abort();
4987 }
4988
4989 for (pass = 0; pass < count; pass++) {
ad69471c
PB
4990 if (size == 3) {
4991 neon_load_reg64(cpu_V0, rm + pass);
4992 tcg_gen_movi_i64(cpu_V1, imm);
4993 switch (op) {
4994 case 0: /* VSHR */
4995 case 1: /* VSRA */
4996 if (u)
4997 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4998 else
ad69471c 4999 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5000 break;
ad69471c
PB
5001 case 2: /* VRSHR */
5002 case 3: /* VRSRA */
5003 if (u)
5004 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5005 else
ad69471c 5006 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5007 break;
ad69471c 5008 case 4: /* VSRI */
ad69471c
PB
5009 case 5: /* VSHL, VSLI */
5010 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5011 break;
0322b26e 5012 case 6: /* VQSHLU */
cc13115b 5013 gen_helper_neon_qshlu_s64(cpu_V0, cpu_V0, cpu_V1);
ad69471c 5014 break;
0322b26e
PM
5015 case 7: /* VQSHL */
5016 if (u) {
2a3f75b4 5017 gen_helper_neon_qshl_u64(cpu_V0,
0322b26e
PM
5018 cpu_V0, cpu_V1);
5019 } else {
2a3f75b4 5020 gen_helper_neon_qshl_s64(cpu_V0,
0322b26e
PM
5021 cpu_V0, cpu_V1);
5022 }
9ee6e8bb 5023 break;
9ee6e8bb 5024 }
ad69471c
PB
5025 if (op == 1 || op == 3) {
5026 /* Accumulate. */
5371cb81 5027 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
5028 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5029 } else if (op == 4 || (op == 5 && u)) {
5030 /* Insert */
923e6509
CL
5031 neon_load_reg64(cpu_V1, rd + pass);
5032 uint64_t mask;
5033 if (shift < -63 || shift > 63) {
5034 mask = 0;
5035 } else {
5036 if (op == 4) {
5037 mask = 0xffffffffffffffffull >> -shift;
5038 } else {
5039 mask = 0xffffffffffffffffull << shift;
5040 }
5041 }
5042 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5043 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5044 }
5045 neon_store_reg64(cpu_V0, rd + pass);
5046 } else { /* size < 3 */
5047 /* Operands in T0 and T1. */
dd8fbd78 5048 tmp = neon_load_reg(rm, pass);
7d1b0095 5049 tmp2 = tcg_temp_new_i32();
dd8fbd78 5050 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
5051 switch (op) {
5052 case 0: /* VSHR */
5053 case 1: /* VSRA */
5054 GEN_NEON_INTEGER_OP(shl);
5055 break;
5056 case 2: /* VRSHR */
5057 case 3: /* VRSRA */
5058 GEN_NEON_INTEGER_OP(rshl);
5059 break;
5060 case 4: /* VSRI */
ad69471c
PB
5061 case 5: /* VSHL, VSLI */
5062 switch (size) {
dd8fbd78
FN
5063 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5064 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5065 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 5066 default: abort();
ad69471c
PB
5067 }
5068 break;
0322b26e 5069 case 6: /* VQSHLU */
ad69471c 5070 switch (size) {
0322b26e 5071 case 0:
2a3f75b4 5072 gen_helper_neon_qshlu_s8(tmp, tmp, tmp2);
0322b26e
PM
5073 break;
5074 case 1:
2a3f75b4 5075 gen_helper_neon_qshlu_s16(tmp, tmp, tmp2);
0322b26e
PM
5076 break;
5077 case 2:
2a3f75b4 5078 gen_helper_neon_qshlu_s32(tmp, tmp, tmp2);
0322b26e
PM
5079 break;
5080 default:
cc13115b 5081 abort();
ad69471c
PB
5082 }
5083 break;
0322b26e 5084 case 7: /* VQSHL */
2a3f75b4 5085 GEN_NEON_INTEGER_OP(qshl);
0322b26e 5086 break;
ad69471c 5087 }
7d1b0095 5088 tcg_temp_free_i32(tmp2);
ad69471c
PB
5089
5090 if (op == 1 || op == 3) {
5091 /* Accumulate. */
dd8fbd78 5092 tmp2 = neon_load_reg(rd, pass);
5371cb81 5093 gen_neon_add(size, tmp, tmp2);
7d1b0095 5094 tcg_temp_free_i32(tmp2);
ad69471c
PB
5095 } else if (op == 4 || (op == 5 && u)) {
5096 /* Insert */
5097 switch (size) {
5098 case 0:
5099 if (op == 4)
ca9a32e4 5100 mask = 0xff >> -shift;
ad69471c 5101 else
ca9a32e4
JR
5102 mask = (uint8_t)(0xff << shift);
5103 mask |= mask << 8;
5104 mask |= mask << 16;
ad69471c
PB
5105 break;
5106 case 1:
5107 if (op == 4)
ca9a32e4 5108 mask = 0xffff >> -shift;
ad69471c 5109 else
ca9a32e4
JR
5110 mask = (uint16_t)(0xffff << shift);
5111 mask |= mask << 16;
ad69471c
PB
5112 break;
5113 case 2:
ca9a32e4
JR
5114 if (shift < -31 || shift > 31) {
5115 mask = 0;
5116 } else {
5117 if (op == 4)
5118 mask = 0xffffffffu >> -shift;
5119 else
5120 mask = 0xffffffffu << shift;
5121 }
ad69471c
PB
5122 break;
5123 default:
5124 abort();
5125 }
dd8fbd78 5126 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
5127 tcg_gen_andi_i32(tmp, tmp, mask);
5128 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 5129 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 5130 tcg_temp_free_i32(tmp2);
ad69471c 5131 }
dd8fbd78 5132 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5133 }
5134 } /* for pass */
5135 } else if (op < 10) {
ad69471c 5136 /* Shift by immediate and narrow:
9ee6e8bb 5137 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5138 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5139 if (rm & 1) {
5140 return 1;
5141 }
9ee6e8bb
PB
5142 shift = shift - (1 << (size + 3));
5143 size++;
92cdfaeb 5144 if (size == 3) {
a7812ae4 5145 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5146 neon_load_reg64(cpu_V0, rm);
5147 neon_load_reg64(cpu_V1, rm + 1);
5148 for (pass = 0; pass < 2; pass++) {
5149 TCGv_i64 in;
5150 if (pass == 0) {
5151 in = cpu_V0;
5152 } else {
5153 in = cpu_V1;
5154 }
ad69471c 5155 if (q) {
0b36f4cd 5156 if (input_unsigned) {
92cdfaeb 5157 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5158 } else {
92cdfaeb 5159 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5160 }
ad69471c 5161 } else {
0b36f4cd 5162 if (input_unsigned) {
92cdfaeb 5163 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5164 } else {
92cdfaeb 5165 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5166 }
ad69471c 5167 }
7d1b0095 5168 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5169 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5170 neon_store_reg(rd, pass, tmp);
5171 } /* for pass */
5172 tcg_temp_free_i64(tmp64);
5173 } else {
5174 if (size == 1) {
5175 imm = (uint16_t)shift;
5176 imm |= imm << 16;
2c0262af 5177 } else {
92cdfaeb
PM
5178 /* size == 2 */
5179 imm = (uint32_t)shift;
5180 }
5181 tmp2 = tcg_const_i32(imm);
5182 tmp4 = neon_load_reg(rm + 1, 0);
5183 tmp5 = neon_load_reg(rm + 1, 1);
5184 for (pass = 0; pass < 2; pass++) {
5185 if (pass == 0) {
5186 tmp = neon_load_reg(rm, 0);
5187 } else {
5188 tmp = tmp4;
5189 }
0b36f4cd
CL
5190 gen_neon_shift_narrow(size, tmp, tmp2, q,
5191 input_unsigned);
92cdfaeb
PM
5192 if (pass == 0) {
5193 tmp3 = neon_load_reg(rm, 1);
5194 } else {
5195 tmp3 = tmp5;
5196 }
0b36f4cd
CL
5197 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5198 input_unsigned);
36aa55dc 5199 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5200 tcg_temp_free_i32(tmp);
5201 tcg_temp_free_i32(tmp3);
5202 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5203 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5204 neon_store_reg(rd, pass, tmp);
5205 } /* for pass */
c6067f04 5206 tcg_temp_free_i32(tmp2);
b75263d6 5207 }
9ee6e8bb 5208 } else if (op == 10) {
cc13115b
PM
5209 /* VSHLL, VMOVL */
5210 if (q || (rd & 1)) {
9ee6e8bb 5211 return 1;
cc13115b 5212 }
ad69471c
PB
5213 tmp = neon_load_reg(rm, 0);
5214 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5215 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5216 if (pass == 1)
5217 tmp = tmp2;
5218
5219 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5220
9ee6e8bb
PB
5221 if (shift != 0) {
5222 /* The shift is less than the width of the source
ad69471c
PB
5223 type, so we can just shift the whole register. */
5224 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5225 /* Widen the result of shift: we need to clear
5226 * the potential overflow bits resulting from
5227 * left bits of the narrow input appearing as
5228 * right bits of left the neighbour narrow
5229 * input. */
ad69471c
PB
5230 if (size < 2 || !u) {
5231 uint64_t imm64;
5232 if (size == 0) {
5233 imm = (0xffu >> (8 - shift));
5234 imm |= imm << 16;
acdf01ef 5235 } else if (size == 1) {
ad69471c 5236 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5237 } else {
5238 /* size == 2 */
5239 imm = 0xffffffff >> (32 - shift);
5240 }
5241 if (size < 2) {
5242 imm64 = imm | (((uint64_t)imm) << 32);
5243 } else {
5244 imm64 = imm;
9ee6e8bb 5245 }
acdf01ef 5246 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5247 }
5248 }
ad69471c 5249 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5250 }
f73534a5 5251 } else if (op >= 14) {
9ee6e8bb 5252 /* VCVT fixed-point. */
cc13115b
PM
5253 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5254 return 1;
5255 }
f73534a5
PM
5256 /* We have already masked out the must-be-1 top bit of imm6,
5257 * hence this 32-shift where the ARM ARM has 64-imm6.
5258 */
5259 shift = 32 - shift;
9ee6e8bb 5260 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 5261 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 5262 if (!(op & 1)) {
9ee6e8bb 5263 if (u)
5500b06c 5264 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 5265 else
5500b06c 5266 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
5267 } else {
5268 if (u)
5500b06c 5269 gen_vfp_toul(0, shift, 1);
9ee6e8bb 5270 else
5500b06c 5271 gen_vfp_tosl(0, shift, 1);
2c0262af 5272 }
4373f3ce 5273 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
5274 }
5275 } else {
9ee6e8bb
PB
5276 return 1;
5277 }
5278 } else { /* (insn & 0x00380080) == 0 */
5279 int invert;
7d80fee5
PM
5280 if (q && (rd & 1)) {
5281 return 1;
5282 }
9ee6e8bb
PB
5283
5284 op = (insn >> 8) & 0xf;
5285 /* One register and immediate. */
5286 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5287 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
5288 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5289 * We choose to not special-case this and will behave as if a
5290 * valid constant encoding of 0 had been given.
5291 */
9ee6e8bb
PB
5292 switch (op) {
5293 case 0: case 1:
5294 /* no-op */
5295 break;
5296 case 2: case 3:
5297 imm <<= 8;
5298 break;
5299 case 4: case 5:
5300 imm <<= 16;
5301 break;
5302 case 6: case 7:
5303 imm <<= 24;
5304 break;
5305 case 8: case 9:
5306 imm |= imm << 16;
5307 break;
5308 case 10: case 11:
5309 imm = (imm << 8) | (imm << 24);
5310 break;
5311 case 12:
8e31209e 5312 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
5313 break;
5314 case 13:
5315 imm = (imm << 16) | 0xffff;
5316 break;
5317 case 14:
5318 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5319 if (invert)
5320 imm = ~imm;
5321 break;
5322 case 15:
7d80fee5
PM
5323 if (invert) {
5324 return 1;
5325 }
9ee6e8bb
PB
5326 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5327 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5328 break;
5329 }
5330 if (invert)
5331 imm = ~imm;
5332
9ee6e8bb
PB
5333 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5334 if (op & 1 && op < 12) {
ad69471c 5335 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
5336 if (invert) {
5337 /* The immediate value has already been inverted, so
5338 BIC becomes AND. */
ad69471c 5339 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 5340 } else {
ad69471c 5341 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 5342 }
9ee6e8bb 5343 } else {
ad69471c 5344 /* VMOV, VMVN. */
7d1b0095 5345 tmp = tcg_temp_new_i32();
9ee6e8bb 5346 if (op == 14 && invert) {
a5a14945 5347 int n;
ad69471c
PB
5348 uint32_t val;
5349 val = 0;
9ee6e8bb
PB
5350 for (n = 0; n < 4; n++) {
5351 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 5352 val |= 0xff << (n * 8);
9ee6e8bb 5353 }
ad69471c
PB
5354 tcg_gen_movi_i32(tmp, val);
5355 } else {
5356 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 5357 }
9ee6e8bb 5358 }
ad69471c 5359 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5360 }
5361 }
e4b3861d 5362 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
5363 if (size != 3) {
5364 op = (insn >> 8) & 0xf;
5365 if ((insn & (1 << 6)) == 0) {
5366 /* Three registers of different lengths. */
5367 int src1_wide;
5368 int src2_wide;
5369 int prewiden;
695272dc
PM
5370 /* undefreq: bit 0 : UNDEF if size != 0
5371 * bit 1 : UNDEF if size == 0
5372 * bit 2 : UNDEF if U == 1
5373 * Note that [1:0] set implies 'always UNDEF'
5374 */
5375 int undefreq;
5376 /* prewiden, src1_wide, src2_wide, undefreq */
5377 static const int neon_3reg_wide[16][4] = {
5378 {1, 0, 0, 0}, /* VADDL */
5379 {1, 1, 0, 0}, /* VADDW */
5380 {1, 0, 0, 0}, /* VSUBL */
5381 {1, 1, 0, 0}, /* VSUBW */
5382 {0, 1, 1, 0}, /* VADDHN */
5383 {0, 0, 0, 0}, /* VABAL */
5384 {0, 1, 1, 0}, /* VSUBHN */
5385 {0, 0, 0, 0}, /* VABDL */
5386 {0, 0, 0, 0}, /* VMLAL */
5387 {0, 0, 0, 6}, /* VQDMLAL */
5388 {0, 0, 0, 0}, /* VMLSL */
5389 {0, 0, 0, 6}, /* VQDMLSL */
5390 {0, 0, 0, 0}, /* Integer VMULL */
5391 {0, 0, 0, 2}, /* VQDMULL */
5392 {0, 0, 0, 5}, /* Polynomial VMULL */
5393 {0, 0, 0, 3}, /* Reserved: always UNDEF */
9ee6e8bb
PB
5394 };
5395
5396 prewiden = neon_3reg_wide[op][0];
5397 src1_wide = neon_3reg_wide[op][1];
5398 src2_wide = neon_3reg_wide[op][2];
695272dc 5399 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 5400
695272dc
PM
5401 if (((undefreq & 1) && (size != 0)) ||
5402 ((undefreq & 2) && (size == 0)) ||
5403 ((undefreq & 4) && u)) {
5404 return 1;
5405 }
5406 if ((src1_wide && (rn & 1)) ||
5407 (src2_wide && (rm & 1)) ||
5408 (!src2_wide && (rd & 1))) {
ad69471c 5409 return 1;
695272dc 5410 }
ad69471c 5411
9ee6e8bb
PB
5412 /* Avoid overlapping operands. Wide source operands are
5413 always aligned so will never overlap with wide
5414 destinations in problematic ways. */
8f8e3aa4 5415 if (rd == rm && !src2_wide) {
dd8fbd78
FN
5416 tmp = neon_load_reg(rm, 1);
5417 neon_store_scratch(2, tmp);
8f8e3aa4 5418 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
5419 tmp = neon_load_reg(rn, 1);
5420 neon_store_scratch(2, tmp);
9ee6e8bb 5421 }
a50f5b91 5422 TCGV_UNUSED(tmp3);
9ee6e8bb 5423 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5424 if (src1_wide) {
5425 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 5426 TCGV_UNUSED(tmp);
9ee6e8bb 5427 } else {
ad69471c 5428 if (pass == 1 && rd == rn) {
dd8fbd78 5429 tmp = neon_load_scratch(2);
9ee6e8bb 5430 } else {
ad69471c
PB
5431 tmp = neon_load_reg(rn, pass);
5432 }
5433 if (prewiden) {
5434 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5435 }
5436 }
ad69471c
PB
5437 if (src2_wide) {
5438 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 5439 TCGV_UNUSED(tmp2);
9ee6e8bb 5440 } else {
ad69471c 5441 if (pass == 1 && rd == rm) {
dd8fbd78 5442 tmp2 = neon_load_scratch(2);
9ee6e8bb 5443 } else {
ad69471c
PB
5444 tmp2 = neon_load_reg(rm, pass);
5445 }
5446 if (prewiden) {
5447 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5448 }
9ee6e8bb
PB
5449 }
5450 switch (op) {
5451 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5452 gen_neon_addl(size);
9ee6e8bb 5453 break;
79b0e534 5454 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5455 gen_neon_subl(size);
9ee6e8bb
PB
5456 break;
5457 case 5: case 7: /* VABAL, VABDL */
5458 switch ((size << 1) | u) {
ad69471c
PB
5459 case 0:
5460 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5461 break;
5462 case 1:
5463 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5464 break;
5465 case 2:
5466 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5467 break;
5468 case 3:
5469 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5470 break;
5471 case 4:
5472 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5473 break;
5474 case 5:
5475 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5476 break;
9ee6e8bb
PB
5477 default: abort();
5478 }
7d1b0095
PM
5479 tcg_temp_free_i32(tmp2);
5480 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5481 break;
5482 case 8: case 9: case 10: case 11: case 12: case 13:
5483 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5484 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
5485 break;
5486 case 14: /* Polynomial VMULL */
e5ca24cb 5487 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
5488 tcg_temp_free_i32(tmp2);
5489 tcg_temp_free_i32(tmp);
e5ca24cb 5490 break;
695272dc
PM
5491 default: /* 15 is RESERVED: caught earlier */
5492 abort();
9ee6e8bb 5493 }
ebcd88ce
PM
5494 if (op == 13) {
5495 /* VQDMULL */
5496 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5497 neon_store_reg64(cpu_V0, rd + pass);
5498 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 5499 /* Accumulate. */
ebcd88ce 5500 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5501 switch (op) {
4dc064e6
PM
5502 case 10: /* VMLSL */
5503 gen_neon_negl(cpu_V0, size);
5504 /* Fall through */
5505 case 5: case 8: /* VABAL, VMLAL */
ad69471c 5506 gen_neon_addl(size);
9ee6e8bb
PB
5507 break;
5508 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 5509 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5510 if (op == 11) {
5511 gen_neon_negl(cpu_V0, size);
5512 }
ad69471c
PB
5513 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5514 break;
9ee6e8bb
PB
5515 default:
5516 abort();
5517 }
ad69471c 5518 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5519 } else if (op == 4 || op == 6) {
5520 /* Narrowing operation. */
7d1b0095 5521 tmp = tcg_temp_new_i32();
79b0e534 5522 if (!u) {
9ee6e8bb 5523 switch (size) {
ad69471c
PB
5524 case 0:
5525 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5526 break;
5527 case 1:
5528 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5529 break;
5530 case 2:
5531 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5532 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5533 break;
9ee6e8bb
PB
5534 default: abort();
5535 }
5536 } else {
5537 switch (size) {
ad69471c
PB
5538 case 0:
5539 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5540 break;
5541 case 1:
5542 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5543 break;
5544 case 2:
5545 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5546 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5547 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5548 break;
9ee6e8bb
PB
5549 default: abort();
5550 }
5551 }
ad69471c
PB
5552 if (pass == 0) {
5553 tmp3 = tmp;
5554 } else {
5555 neon_store_reg(rd, 0, tmp3);
5556 neon_store_reg(rd, 1, tmp);
5557 }
9ee6e8bb
PB
5558 } else {
5559 /* Write back the result. */
ad69471c 5560 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5561 }
5562 }
5563 } else {
3e3326df
PM
5564 /* Two registers and a scalar. NB that for ops of this form
5565 * the ARM ARM labels bit 24 as Q, but it is in our variable
5566 * 'u', not 'q'.
5567 */
5568 if (size == 0) {
5569 return 1;
5570 }
9ee6e8bb 5571 switch (op) {
9ee6e8bb 5572 case 1: /* Float VMLA scalar */
9ee6e8bb 5573 case 5: /* Floating point VMLS scalar */
9ee6e8bb 5574 case 9: /* Floating point VMUL scalar */
3e3326df
PM
5575 if (size == 1) {
5576 return 1;
5577 }
5578 /* fall through */
5579 case 0: /* Integer VMLA scalar */
5580 case 4: /* Integer VMLS scalar */
5581 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
5582 case 12: /* VQDMULH scalar */
5583 case 13: /* VQRDMULH scalar */
3e3326df
PM
5584 if (u && ((rd | rn) & 1)) {
5585 return 1;
5586 }
dd8fbd78
FN
5587 tmp = neon_get_scalar(size, rm);
5588 neon_store_scratch(0, tmp);
9ee6e8bb 5589 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5590 tmp = neon_load_scratch(0);
5591 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5592 if (op == 12) {
5593 if (size == 1) {
2a3f75b4 5594 gen_helper_neon_qdmulh_s16(tmp, tmp, tmp2);
9ee6e8bb 5595 } else {
2a3f75b4 5596 gen_helper_neon_qdmulh_s32(tmp, tmp, tmp2);
9ee6e8bb
PB
5597 }
5598 } else if (op == 13) {
5599 if (size == 1) {
2a3f75b4 5600 gen_helper_neon_qrdmulh_s16(tmp, tmp, tmp2);
9ee6e8bb 5601 } else {
2a3f75b4 5602 gen_helper_neon_qrdmulh_s32(tmp, tmp, tmp2);
9ee6e8bb
PB
5603 }
5604 } else if (op & 1) {
dd8fbd78 5605 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5606 } else {
5607 switch (size) {
dd8fbd78
FN
5608 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5609 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5610 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 5611 default: abort();
9ee6e8bb
PB
5612 }
5613 }
7d1b0095 5614 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
5615 if (op < 8) {
5616 /* Accumulate. */
dd8fbd78 5617 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5618 switch (op) {
5619 case 0:
dd8fbd78 5620 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5621 break;
5622 case 1:
dd8fbd78 5623 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5624 break;
5625 case 4:
dd8fbd78 5626 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5627 break;
5628 case 5:
dd8fbd78 5629 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
5630 break;
5631 default:
5632 abort();
5633 }
7d1b0095 5634 tcg_temp_free_i32(tmp2);
9ee6e8bb 5635 }
dd8fbd78 5636 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5637 }
5638 break;
9ee6e8bb 5639 case 3: /* VQDMLAL scalar */
9ee6e8bb 5640 case 7: /* VQDMLSL scalar */
9ee6e8bb 5641 case 11: /* VQDMULL scalar */
3e3326df 5642 if (u == 1) {
ad69471c 5643 return 1;
3e3326df
PM
5644 }
5645 /* fall through */
5646 case 2: /* VMLAL sclar */
5647 case 6: /* VMLSL scalar */
5648 case 10: /* VMULL scalar */
5649 if (rd & 1) {
5650 return 1;
5651 }
dd8fbd78 5652 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
5653 /* We need a copy of tmp2 because gen_neon_mull
5654 * deletes it during pass 0. */
7d1b0095 5655 tmp4 = tcg_temp_new_i32();
c6067f04 5656 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 5657 tmp3 = neon_load_reg(rn, 1);
ad69471c 5658
9ee6e8bb 5659 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5660 if (pass == 0) {
5661 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5662 } else {
dd8fbd78 5663 tmp = tmp3;
c6067f04 5664 tmp2 = tmp4;
9ee6e8bb 5665 }
ad69471c 5666 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
5667 if (op != 11) {
5668 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5669 }
9ee6e8bb 5670 switch (op) {
4dc064e6
PM
5671 case 6:
5672 gen_neon_negl(cpu_V0, size);
5673 /* Fall through */
5674 case 2:
ad69471c 5675 gen_neon_addl(size);
9ee6e8bb
PB
5676 break;
5677 case 3: case 7:
ad69471c 5678 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5679 if (op == 7) {
5680 gen_neon_negl(cpu_V0, size);
5681 }
ad69471c 5682 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5683 break;
5684 case 10:
5685 /* no-op */
5686 break;
5687 case 11:
ad69471c 5688 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5689 break;
5690 default:
5691 abort();
5692 }
ad69471c 5693 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5694 }
dd8fbd78 5695
dd8fbd78 5696
9ee6e8bb
PB
5697 break;
5698 default: /* 14 and 15 are RESERVED */
5699 return 1;
5700 }
5701 }
5702 } else { /* size == 3 */
5703 if (!u) {
5704 /* Extract. */
9ee6e8bb 5705 imm = (insn >> 8) & 0xf;
ad69471c
PB
5706
5707 if (imm > 7 && !q)
5708 return 1;
5709
52579ea1
PM
5710 if (q && ((rd | rn | rm) & 1)) {
5711 return 1;
5712 }
5713
ad69471c
PB
5714 if (imm == 0) {
5715 neon_load_reg64(cpu_V0, rn);
5716 if (q) {
5717 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5718 }
ad69471c
PB
5719 } else if (imm == 8) {
5720 neon_load_reg64(cpu_V0, rn + 1);
5721 if (q) {
5722 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5723 }
ad69471c 5724 } else if (q) {
a7812ae4 5725 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5726 if (imm < 8) {
5727 neon_load_reg64(cpu_V0, rn);
a7812ae4 5728 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5729 } else {
5730 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5731 neon_load_reg64(tmp64, rm);
ad69471c
PB
5732 }
5733 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5734 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5735 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5736 if (imm < 8) {
5737 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5738 } else {
ad69471c
PB
5739 neon_load_reg64(cpu_V1, rm + 1);
5740 imm -= 8;
9ee6e8bb 5741 }
ad69471c 5742 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5743 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5744 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 5745 tcg_temp_free_i64(tmp64);
ad69471c 5746 } else {
a7812ae4 5747 /* BUGFIX */
ad69471c 5748 neon_load_reg64(cpu_V0, rn);
a7812ae4 5749 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5750 neon_load_reg64(cpu_V1, rm);
a7812ae4 5751 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5752 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5753 }
5754 neon_store_reg64(cpu_V0, rd);
5755 if (q) {
5756 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5757 }
5758 } else if ((insn & (1 << 11)) == 0) {
5759 /* Two register misc. */
5760 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5761 size = (insn >> 18) & 3;
600b828c
PM
5762 /* UNDEF for unknown op values and bad op-size combinations */
5763 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
5764 return 1;
5765 }
fc2a9b37
PM
5766 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
5767 q && ((rm | rd) & 1)) {
5768 return 1;
5769 }
9ee6e8bb 5770 switch (op) {
600b828c 5771 case NEON_2RM_VREV64:
9ee6e8bb 5772 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5773 tmp = neon_load_reg(rm, pass * 2);
5774 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5775 switch (size) {
dd8fbd78
FN
5776 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5777 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5778 case 2: /* no-op */ break;
5779 default: abort();
5780 }
dd8fbd78 5781 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5782 if (size == 2) {
dd8fbd78 5783 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5784 } else {
9ee6e8bb 5785 switch (size) {
dd8fbd78
FN
5786 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5787 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5788 default: abort();
5789 }
dd8fbd78 5790 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5791 }
5792 }
5793 break;
600b828c
PM
5794 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
5795 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
5796 for (pass = 0; pass < q + 1; pass++) {
5797 tmp = neon_load_reg(rm, pass * 2);
5798 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5799 tmp = neon_load_reg(rm, pass * 2 + 1);
5800 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5801 switch (size) {
5802 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5803 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5804 case 2: tcg_gen_add_i64(CPU_V001); break;
5805 default: abort();
5806 }
600b828c 5807 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 5808 /* Accumulate. */
ad69471c
PB
5809 neon_load_reg64(cpu_V1, rd + pass);
5810 gen_neon_addl(size);
9ee6e8bb 5811 }
ad69471c 5812 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5813 }
5814 break;
600b828c 5815 case NEON_2RM_VTRN:
9ee6e8bb 5816 if (size == 2) {
a5a14945 5817 int n;
9ee6e8bb 5818 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
5819 tmp = neon_load_reg(rm, n);
5820 tmp2 = neon_load_reg(rd, n + 1);
5821 neon_store_reg(rm, n, tmp2);
5822 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
5823 }
5824 } else {
5825 goto elementwise;
5826 }
5827 break;
600b828c 5828 case NEON_2RM_VUZP:
02acedf9 5829 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 5830 return 1;
9ee6e8bb
PB
5831 }
5832 break;
600b828c 5833 case NEON_2RM_VZIP:
d68a6f3a 5834 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 5835 return 1;
9ee6e8bb
PB
5836 }
5837 break;
600b828c
PM
5838 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
5839 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
5840 if (rm & 1) {
5841 return 1;
5842 }
a50f5b91 5843 TCGV_UNUSED(tmp2);
9ee6e8bb 5844 for (pass = 0; pass < 2; pass++) {
ad69471c 5845 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 5846 tmp = tcg_temp_new_i32();
600b828c
PM
5847 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
5848 tmp, cpu_V0);
ad69471c
PB
5849 if (pass == 0) {
5850 tmp2 = tmp;
5851 } else {
5852 neon_store_reg(rd, 0, tmp2);
5853 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5854 }
9ee6e8bb
PB
5855 }
5856 break;
600b828c 5857 case NEON_2RM_VSHLL:
fc2a9b37 5858 if (q || (rd & 1)) {
9ee6e8bb 5859 return 1;
600b828c 5860 }
ad69471c
PB
5861 tmp = neon_load_reg(rm, 0);
5862 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5863 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5864 if (pass == 1)
5865 tmp = tmp2;
5866 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 5867 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 5868 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5869 }
5870 break;
600b828c 5871 case NEON_2RM_VCVT_F16_F32:
fc2a9b37
PM
5872 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5873 q || (rm & 1)) {
5874 return 1;
5875 }
7d1b0095
PM
5876 tmp = tcg_temp_new_i32();
5877 tmp2 = tcg_temp_new_i32();
60011498 5878 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 5879 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 5880 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 5881 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5882 tcg_gen_shli_i32(tmp2, tmp2, 16);
5883 tcg_gen_or_i32(tmp2, tmp2, tmp);
5884 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 5885 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
5886 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5887 neon_store_reg(rd, 0, tmp2);
7d1b0095 5888 tmp2 = tcg_temp_new_i32();
2d981da7 5889 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5890 tcg_gen_shli_i32(tmp2, tmp2, 16);
5891 tcg_gen_or_i32(tmp2, tmp2, tmp);
5892 neon_store_reg(rd, 1, tmp2);
7d1b0095 5893 tcg_temp_free_i32(tmp);
60011498 5894 break;
600b828c 5895 case NEON_2RM_VCVT_F32_F16:
fc2a9b37
PM
5896 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5897 q || (rd & 1)) {
5898 return 1;
5899 }
7d1b0095 5900 tmp3 = tcg_temp_new_i32();
60011498
PB
5901 tmp = neon_load_reg(rm, 0);
5902 tmp2 = neon_load_reg(rm, 1);
5903 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 5904 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5905 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5906 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 5907 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5908 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 5909 tcg_temp_free_i32(tmp);
60011498 5910 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 5911 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5912 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5913 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 5914 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5915 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
5916 tcg_temp_free_i32(tmp2);
5917 tcg_temp_free_i32(tmp3);
60011498 5918 break;
9ee6e8bb
PB
5919 default:
5920 elementwise:
5921 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 5922 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
5923 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5924 neon_reg_offset(rm, pass));
dd8fbd78 5925 TCGV_UNUSED(tmp);
9ee6e8bb 5926 } else {
dd8fbd78 5927 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
5928 }
5929 switch (op) {
600b828c 5930 case NEON_2RM_VREV32:
9ee6e8bb 5931 switch (size) {
dd8fbd78
FN
5932 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5933 case 1: gen_swap_half(tmp); break;
600b828c 5934 default: abort();
9ee6e8bb
PB
5935 }
5936 break;
600b828c 5937 case NEON_2RM_VREV16:
dd8fbd78 5938 gen_rev16(tmp);
9ee6e8bb 5939 break;
600b828c 5940 case NEON_2RM_VCLS:
9ee6e8bb 5941 switch (size) {
dd8fbd78
FN
5942 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5943 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5944 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 5945 default: abort();
9ee6e8bb
PB
5946 }
5947 break;
600b828c 5948 case NEON_2RM_VCLZ:
9ee6e8bb 5949 switch (size) {
dd8fbd78
FN
5950 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5951 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5952 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 5953 default: abort();
9ee6e8bb
PB
5954 }
5955 break;
600b828c 5956 case NEON_2RM_VCNT:
dd8fbd78 5957 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 5958 break;
600b828c 5959 case NEON_2RM_VMVN:
dd8fbd78 5960 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 5961 break;
600b828c 5962 case NEON_2RM_VQABS:
9ee6e8bb 5963 switch (size) {
2a3f75b4
PM
5964 case 0: gen_helper_neon_qabs_s8(tmp, tmp); break;
5965 case 1: gen_helper_neon_qabs_s16(tmp, tmp); break;
5966 case 2: gen_helper_neon_qabs_s32(tmp, tmp); break;
600b828c 5967 default: abort();
9ee6e8bb
PB
5968 }
5969 break;
600b828c 5970 case NEON_2RM_VQNEG:
9ee6e8bb 5971 switch (size) {
2a3f75b4
PM
5972 case 0: gen_helper_neon_qneg_s8(tmp, tmp); break;
5973 case 1: gen_helper_neon_qneg_s16(tmp, tmp); break;
5974 case 2: gen_helper_neon_qneg_s32(tmp, tmp); break;
600b828c 5975 default: abort();
9ee6e8bb
PB
5976 }
5977 break;
600b828c 5978 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 5979 tmp2 = tcg_const_i32(0);
9ee6e8bb 5980 switch(size) {
dd8fbd78
FN
5981 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5982 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5983 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 5984 default: abort();
9ee6e8bb 5985 }
dd8fbd78 5986 tcg_temp_free(tmp2);
600b828c 5987 if (op == NEON_2RM_VCLE0) {
dd8fbd78 5988 tcg_gen_not_i32(tmp, tmp);
600b828c 5989 }
9ee6e8bb 5990 break;
600b828c 5991 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 5992 tmp2 = tcg_const_i32(0);
9ee6e8bb 5993 switch(size) {
dd8fbd78
FN
5994 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5995 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5996 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 5997 default: abort();
9ee6e8bb 5998 }
dd8fbd78 5999 tcg_temp_free(tmp2);
600b828c 6000 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6001 tcg_gen_not_i32(tmp, tmp);
600b828c 6002 }
9ee6e8bb 6003 break;
600b828c 6004 case NEON_2RM_VCEQ0:
dd8fbd78 6005 tmp2 = tcg_const_i32(0);
9ee6e8bb 6006 switch(size) {
dd8fbd78
FN
6007 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6008 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6009 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6010 default: abort();
9ee6e8bb 6011 }
dd8fbd78 6012 tcg_temp_free(tmp2);
9ee6e8bb 6013 break;
600b828c 6014 case NEON_2RM_VABS:
9ee6e8bb 6015 switch(size) {
dd8fbd78
FN
6016 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6017 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6018 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 6019 default: abort();
9ee6e8bb
PB
6020 }
6021 break;
600b828c 6022 case NEON_2RM_VNEG:
dd8fbd78
FN
6023 tmp2 = tcg_const_i32(0);
6024 gen_neon_rsb(size, tmp, tmp2);
6025 tcg_temp_free(tmp2);
9ee6e8bb 6026 break;
600b828c 6027 case NEON_2RM_VCGT0_F:
dd8fbd78
FN
6028 tmp2 = tcg_const_i32(0);
6029 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
6030 tcg_temp_free(tmp2);
9ee6e8bb 6031 break;
600b828c 6032 case NEON_2RM_VCGE0_F:
dd8fbd78
FN
6033 tmp2 = tcg_const_i32(0);
6034 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
6035 tcg_temp_free(tmp2);
9ee6e8bb 6036 break;
600b828c 6037 case NEON_2RM_VCEQ0_F:
dd8fbd78
FN
6038 tmp2 = tcg_const_i32(0);
6039 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
6040 tcg_temp_free(tmp2);
9ee6e8bb 6041 break;
600b828c 6042 case NEON_2RM_VCLE0_F:
0e326109
PM
6043 tmp2 = tcg_const_i32(0);
6044 gen_helper_neon_cge_f32(tmp, tmp2, tmp);
6045 tcg_temp_free(tmp2);
6046 break;
600b828c 6047 case NEON_2RM_VCLT0_F:
0e326109
PM
6048 tmp2 = tcg_const_i32(0);
6049 gen_helper_neon_cgt_f32(tmp, tmp2, tmp);
6050 tcg_temp_free(tmp2);
6051 break;
600b828c 6052 case NEON_2RM_VABS_F:
4373f3ce 6053 gen_vfp_abs(0);
9ee6e8bb 6054 break;
600b828c 6055 case NEON_2RM_VNEG_F:
4373f3ce 6056 gen_vfp_neg(0);
9ee6e8bb 6057 break;
600b828c 6058 case NEON_2RM_VSWP:
dd8fbd78
FN
6059 tmp2 = neon_load_reg(rd, pass);
6060 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6061 break;
600b828c 6062 case NEON_2RM_VTRN:
dd8fbd78 6063 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6064 switch (size) {
dd8fbd78
FN
6065 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6066 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6067 default: abort();
9ee6e8bb 6068 }
dd8fbd78 6069 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6070 break;
600b828c 6071 case NEON_2RM_VRECPE:
dd8fbd78 6072 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb 6073 break;
600b828c 6074 case NEON_2RM_VRSQRTE:
dd8fbd78 6075 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb 6076 break;
600b828c 6077 case NEON_2RM_VRECPE_F:
4373f3ce 6078 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6079 break;
600b828c 6080 case NEON_2RM_VRSQRTE_F:
4373f3ce 6081 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6082 break;
600b828c 6083 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 6084 gen_vfp_sito(0, 1);
9ee6e8bb 6085 break;
600b828c 6086 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 6087 gen_vfp_uito(0, 1);
9ee6e8bb 6088 break;
600b828c 6089 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 6090 gen_vfp_tosiz(0, 1);
9ee6e8bb 6091 break;
600b828c 6092 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 6093 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
6094 break;
6095 default:
600b828c
PM
6096 /* Reserved op values were caught by the
6097 * neon_2rm_sizes[] check earlier.
6098 */
6099 abort();
9ee6e8bb 6100 }
600b828c 6101 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6102 tcg_gen_st_f32(cpu_F0s, cpu_env,
6103 neon_reg_offset(rd, pass));
9ee6e8bb 6104 } else {
dd8fbd78 6105 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6106 }
6107 }
6108 break;
6109 }
6110 } else if ((insn & (1 << 10)) == 0) {
6111 /* VTBL, VTBX. */
56907d77
PM
6112 int n = ((insn >> 8) & 3) + 1;
6113 if ((rn + n) > 32) {
6114 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6115 * helper function running off the end of the register file.
6116 */
6117 return 1;
6118 }
6119 n <<= 3;
9ee6e8bb 6120 if (insn & (1 << 6)) {
8f8e3aa4 6121 tmp = neon_load_reg(rd, 0);
9ee6e8bb 6122 } else {
7d1b0095 6123 tmp = tcg_temp_new_i32();
8f8e3aa4 6124 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6125 }
8f8e3aa4 6126 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
6127 tmp4 = tcg_const_i32(rn);
6128 tmp5 = tcg_const_i32(n);
6129 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
7d1b0095 6130 tcg_temp_free_i32(tmp);
9ee6e8bb 6131 if (insn & (1 << 6)) {
8f8e3aa4 6132 tmp = neon_load_reg(rd, 1);
9ee6e8bb 6133 } else {
7d1b0095 6134 tmp = tcg_temp_new_i32();
8f8e3aa4 6135 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6136 }
8f8e3aa4 6137 tmp3 = neon_load_reg(rm, 1);
b75263d6 6138 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
6139 tcg_temp_free_i32(tmp5);
6140 tcg_temp_free_i32(tmp4);
8f8e3aa4 6141 neon_store_reg(rd, 0, tmp2);
3018f259 6142 neon_store_reg(rd, 1, tmp3);
7d1b0095 6143 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6144 } else if ((insn & 0x380) == 0) {
6145 /* VDUP */
133da6aa
JR
6146 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6147 return 1;
6148 }
9ee6e8bb 6149 if (insn & (1 << 19)) {
dd8fbd78 6150 tmp = neon_load_reg(rm, 1);
9ee6e8bb 6151 } else {
dd8fbd78 6152 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
6153 }
6154 if (insn & (1 << 16)) {
dd8fbd78 6155 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
6156 } else if (insn & (1 << 17)) {
6157 if ((insn >> 18) & 1)
dd8fbd78 6158 gen_neon_dup_high16(tmp);
9ee6e8bb 6159 else
dd8fbd78 6160 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
6161 }
6162 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 6163 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
6164 tcg_gen_mov_i32(tmp2, tmp);
6165 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 6166 }
7d1b0095 6167 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6168 } else {
6169 return 1;
6170 }
6171 }
6172 }
6173 return 0;
6174}
6175
fe1479c3
PB
6176static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
6177{
6178 int crn = (insn >> 16) & 0xf;
6179 int crm = insn & 0xf;
6180 int op1 = (insn >> 21) & 7;
6181 int op2 = (insn >> 5) & 7;
6182 int rt = (insn >> 12) & 0xf;
6183 TCGv tmp;
6184
ca27c052
PM
6185 /* Minimal set of debug registers, since we don't support debug */
6186 if (op1 == 0 && crn == 0 && op2 == 0) {
6187 switch (crm) {
6188 case 0:
6189 /* DBGDIDR: just RAZ. In particular this means the
6190 * "debug architecture version" bits will read as
6191 * a reserved value, which should cause Linux to
6192 * not try to use the debug hardware.
6193 */
6194 tmp = tcg_const_i32(0);
6195 store_reg(s, rt, tmp);
6196 return 0;
6197 case 1:
6198 case 2:
6199 /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we
6200 * don't implement memory mapped debug components
6201 */
6202 if (ENABLE_ARCH_7) {
6203 tmp = tcg_const_i32(0);
6204 store_reg(s, rt, tmp);
6205 return 0;
6206 }
6207 break;
6208 default:
6209 break;
6210 }
6211 }
6212
fe1479c3
PB
6213 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6214 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
6215 /* TEECR */
6216 if (IS_USER(s))
6217 return 1;
6218 tmp = load_cpu_field(teecr);
6219 store_reg(s, rt, tmp);
6220 return 0;
6221 }
6222 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
6223 /* TEEHBR */
6224 if (IS_USER(s) && (env->teecr & 1))
6225 return 1;
6226 tmp = load_cpu_field(teehbr);
6227 store_reg(s, rt, tmp);
6228 return 0;
6229 }
6230 }
6231 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
6232 op1, crn, crm, op2);
6233 return 1;
6234}
6235
6236static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
6237{
6238 int crn = (insn >> 16) & 0xf;
6239 int crm = insn & 0xf;
6240 int op1 = (insn >> 21) & 7;
6241 int op2 = (insn >> 5) & 7;
6242 int rt = (insn >> 12) & 0xf;
6243 TCGv tmp;
6244
6245 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6246 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
6247 /* TEECR */
6248 if (IS_USER(s))
6249 return 1;
6250 tmp = load_reg(s, rt);
6251 gen_helper_set_teecr(cpu_env, tmp);
7d1b0095 6252 tcg_temp_free_i32(tmp);
fe1479c3
PB
6253 return 0;
6254 }
6255 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
6256 /* TEEHBR */
6257 if (IS_USER(s) && (env->teecr & 1))
6258 return 1;
6259 tmp = load_reg(s, rt);
6260 store_cpu_field(tmp, teehbr);
6261 return 0;
6262 }
6263 }
6264 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
6265 op1, crn, crm, op2);
6266 return 1;
6267}
6268
9ee6e8bb
PB
6269static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
6270{
6271 int cpnum;
6272
6273 cpnum = (insn >> 8) & 0xf;
6274 if (arm_feature(env, ARM_FEATURE_XSCALE)
6275 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6276 return 1;
6277
6278 switch (cpnum) {
6279 case 0:
6280 case 1:
6281 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6282 return disas_iwmmxt_insn(env, s, insn);
6283 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6284 return disas_dsp_insn(env, s, insn);
6285 }
6286 return 1;
6287 case 10:
6288 case 11:
6289 return disas_vfp_insn (env, s, insn);
fe1479c3
PB
6290 case 14:
6291 /* Coprocessors 7-15 are architecturally reserved by ARM.
6292 Unfortunately Intel decided to ignore this. */
6293 if (arm_feature(env, ARM_FEATURE_XSCALE))
6294 goto board;
6295 if (insn & (1 << 20))
6296 return disas_cp14_read(env, s, insn);
6297 else
6298 return disas_cp14_write(env, s, insn);
9ee6e8bb
PB
6299 case 15:
6300 return disas_cp15_insn (env, s, insn);
6301 default:
fe1479c3 6302 board:
9ee6e8bb
PB
6303 /* Unknown coprocessor. See if the board has hooked it. */
6304 return disas_cp_insn (env, s, insn);
6305 }
6306}
6307
5e3f878a
PB
6308
6309/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 6310static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
6311{
6312 TCGv tmp;
7d1b0095 6313 tmp = tcg_temp_new_i32();
5e3f878a
PB
6314 tcg_gen_trunc_i64_i32(tmp, val);
6315 store_reg(s, rlow, tmp);
7d1b0095 6316 tmp = tcg_temp_new_i32();
5e3f878a
PB
6317 tcg_gen_shri_i64(val, val, 32);
6318 tcg_gen_trunc_i64_i32(tmp, val);
6319 store_reg(s, rhigh, tmp);
6320}
6321
6322/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 6323static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 6324{
a7812ae4 6325 TCGv_i64 tmp;
5e3f878a
PB
6326 TCGv tmp2;
6327
36aa55dc 6328 /* Load value and extend to 64 bits. */
a7812ae4 6329 tmp = tcg_temp_new_i64();
5e3f878a
PB
6330 tmp2 = load_reg(s, rlow);
6331 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 6332 tcg_temp_free_i32(tmp2);
5e3f878a 6333 tcg_gen_add_i64(val, val, tmp);
b75263d6 6334 tcg_temp_free_i64(tmp);
5e3f878a
PB
6335}
6336
6337/* load and add a 64-bit value from a register pair. */
a7812ae4 6338static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 6339{
a7812ae4 6340 TCGv_i64 tmp;
36aa55dc
PB
6341 TCGv tmpl;
6342 TCGv tmph;
5e3f878a
PB
6343
6344 /* Load 64-bit value rd:rn. */
36aa55dc
PB
6345 tmpl = load_reg(s, rlow);
6346 tmph = load_reg(s, rhigh);
a7812ae4 6347 tmp = tcg_temp_new_i64();
36aa55dc 6348 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
6349 tcg_temp_free_i32(tmpl);
6350 tcg_temp_free_i32(tmph);
5e3f878a 6351 tcg_gen_add_i64(val, val, tmp);
b75263d6 6352 tcg_temp_free_i64(tmp);
5e3f878a
PB
6353}
6354
6355/* Set N and Z flags from a 64-bit value. */
a7812ae4 6356static void gen_logicq_cc(TCGv_i64 val)
5e3f878a 6357{
7d1b0095 6358 TCGv tmp = tcg_temp_new_i32();
5e3f878a 6359 gen_helper_logicq_cc(tmp, val);
6fbe23d5 6360 gen_logic_CC(tmp);
7d1b0095 6361 tcg_temp_free_i32(tmp);
5e3f878a
PB
6362}
6363
426f5abc
PB
6364/* Load/Store exclusive instructions are implemented by remembering
6365 the value/address loaded, and seeing if these are the same
6366 when the store is performed. This should be is sufficient to implement
6367 the architecturally mandated semantics, and avoids having to monitor
6368 regular stores.
6369
6370 In system emulation mode only one CPU will be running at once, so
6371 this sequence is effectively atomic. In user emulation mode we
6372 throw an exception and handle the atomic operation elsewhere. */
6373static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
6374 TCGv addr, int size)
6375{
6376 TCGv tmp;
6377
6378 switch (size) {
6379 case 0:
6380 tmp = gen_ld8u(addr, IS_USER(s));
6381 break;
6382 case 1:
6383 tmp = gen_ld16u(addr, IS_USER(s));
6384 break;
6385 case 2:
6386 case 3:
6387 tmp = gen_ld32(addr, IS_USER(s));
6388 break;
6389 default:
6390 abort();
6391 }
6392 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6393 store_reg(s, rt, tmp);
6394 if (size == 3) {
7d1b0095 6395 TCGv tmp2 = tcg_temp_new_i32();
2c9adbda
PM
6396 tcg_gen_addi_i32(tmp2, addr, 4);
6397 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 6398 tcg_temp_free_i32(tmp2);
426f5abc
PB
6399 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6400 store_reg(s, rt2, tmp);
6401 }
6402 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6403}
6404
6405static void gen_clrex(DisasContext *s)
6406{
6407 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6408}
6409
6410#ifdef CONFIG_USER_ONLY
6411static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6412 TCGv addr, int size)
6413{
6414 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6415 tcg_gen_movi_i32(cpu_exclusive_info,
6416 size | (rd << 4) | (rt << 8) | (rt2 << 12));
bc4a0de0 6417 gen_exception_insn(s, 4, EXCP_STREX);
426f5abc
PB
6418}
6419#else
6420static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6421 TCGv addr, int size)
6422{
6423 TCGv tmp;
6424 int done_label;
6425 int fail_label;
6426
6427 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6428 [addr] = {Rt};
6429 {Rd} = 0;
6430 } else {
6431 {Rd} = 1;
6432 } */
6433 fail_label = gen_new_label();
6434 done_label = gen_new_label();
6435 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6436 switch (size) {
6437 case 0:
6438 tmp = gen_ld8u(addr, IS_USER(s));
6439 break;
6440 case 1:
6441 tmp = gen_ld16u(addr, IS_USER(s));
6442 break;
6443 case 2:
6444 case 3:
6445 tmp = gen_ld32(addr, IS_USER(s));
6446 break;
6447 default:
6448 abort();
6449 }
6450 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
7d1b0095 6451 tcg_temp_free_i32(tmp);
426f5abc 6452 if (size == 3) {
7d1b0095 6453 TCGv tmp2 = tcg_temp_new_i32();
426f5abc 6454 tcg_gen_addi_i32(tmp2, addr, 4);
2c9adbda 6455 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 6456 tcg_temp_free_i32(tmp2);
426f5abc 6457 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
7d1b0095 6458 tcg_temp_free_i32(tmp);
426f5abc
PB
6459 }
6460 tmp = load_reg(s, rt);
6461 switch (size) {
6462 case 0:
6463 gen_st8(tmp, addr, IS_USER(s));
6464 break;
6465 case 1:
6466 gen_st16(tmp, addr, IS_USER(s));
6467 break;
6468 case 2:
6469 case 3:
6470 gen_st32(tmp, addr, IS_USER(s));
6471 break;
6472 default:
6473 abort();
6474 }
6475 if (size == 3) {
6476 tcg_gen_addi_i32(addr, addr, 4);
6477 tmp = load_reg(s, rt2);
6478 gen_st32(tmp, addr, IS_USER(s));
6479 }
6480 tcg_gen_movi_i32(cpu_R[rd], 0);
6481 tcg_gen_br(done_label);
6482 gen_set_label(fail_label);
6483 tcg_gen_movi_i32(cpu_R[rd], 1);
6484 gen_set_label(done_label);
6485 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6486}
6487#endif
6488
9ee6e8bb
PB
6489static void disas_arm_insn(CPUState * env, DisasContext *s)
6490{
6491 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 6492 TCGv tmp;
3670669c 6493 TCGv tmp2;
6ddbc6e4 6494 TCGv tmp3;
b0109805 6495 TCGv addr;
a7812ae4 6496 TCGv_i64 tmp64;
9ee6e8bb
PB
6497
6498 insn = ldl_code(s->pc);
6499 s->pc += 4;
6500
6501 /* M variants do not implement ARM mode. */
6502 if (IS_M(env))
6503 goto illegal_op;
6504 cond = insn >> 28;
6505 if (cond == 0xf){
be5e7a76
DES
6506 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6507 * choose to UNDEF. In ARMv5 and above the space is used
6508 * for miscellaneous unconditional instructions.
6509 */
6510 ARCH(5);
6511
9ee6e8bb
PB
6512 /* Unconditional instructions. */
6513 if (((insn >> 25) & 7) == 1) {
6514 /* NEON Data processing. */
6515 if (!arm_feature(env, ARM_FEATURE_NEON))
6516 goto illegal_op;
6517
6518 if (disas_neon_data_insn(env, s, insn))
6519 goto illegal_op;
6520 return;
6521 }
6522 if ((insn & 0x0f100000) == 0x04000000) {
6523 /* NEON load/store. */
6524 if (!arm_feature(env, ARM_FEATURE_NEON))
6525 goto illegal_op;
6526
6527 if (disas_neon_ls_insn(env, s, insn))
6528 goto illegal_op;
6529 return;
6530 }
3d185e5d
PM
6531 if (((insn & 0x0f30f000) == 0x0510f000) ||
6532 ((insn & 0x0f30f010) == 0x0710f000)) {
6533 if ((insn & (1 << 22)) == 0) {
6534 /* PLDW; v7MP */
6535 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6536 goto illegal_op;
6537 }
6538 }
6539 /* Otherwise PLD; v5TE+ */
be5e7a76 6540 ARCH(5TE);
3d185e5d
PM
6541 return;
6542 }
6543 if (((insn & 0x0f70f000) == 0x0450f000) ||
6544 ((insn & 0x0f70f010) == 0x0650f000)) {
6545 ARCH(7);
6546 return; /* PLI; V7 */
6547 }
6548 if (((insn & 0x0f700000) == 0x04100000) ||
6549 ((insn & 0x0f700010) == 0x06100000)) {
6550 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6551 goto illegal_op;
6552 }
6553 return; /* v7MP: Unallocated memory hint: must NOP */
6554 }
6555
6556 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
6557 ARCH(6);
6558 /* setend */
6559 if (insn & (1 << 9)) {
6560 /* BE8 mode not implemented. */
6561 goto illegal_op;
6562 }
6563 return;
6564 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6565 switch ((insn >> 4) & 0xf) {
6566 case 1: /* clrex */
6567 ARCH(6K);
426f5abc 6568 gen_clrex(s);
9ee6e8bb
PB
6569 return;
6570 case 4: /* dsb */
6571 case 5: /* dmb */
6572 case 6: /* isb */
6573 ARCH(7);
6574 /* We don't emulate caches so these are a no-op. */
6575 return;
6576 default:
6577 goto illegal_op;
6578 }
6579 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6580 /* srs */
c67b6b71 6581 int32_t offset;
9ee6e8bb
PB
6582 if (IS_USER(s))
6583 goto illegal_op;
6584 ARCH(6);
6585 op1 = (insn & 0x1f);
7d1b0095 6586 addr = tcg_temp_new_i32();
39ea3d4e
PM
6587 tmp = tcg_const_i32(op1);
6588 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6589 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6590 i = (insn >> 23) & 3;
6591 switch (i) {
6592 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6593 case 1: offset = 0; break; /* IA */
6594 case 2: offset = -8; break; /* DB */
9ee6e8bb
PB
6595 case 3: offset = 4; break; /* IB */
6596 default: abort();
6597 }
6598 if (offset)
b0109805
PB
6599 tcg_gen_addi_i32(addr, addr, offset);
6600 tmp = load_reg(s, 14);
6601 gen_st32(tmp, addr, 0);
c67b6b71 6602 tmp = load_cpu_field(spsr);
b0109805
PB
6603 tcg_gen_addi_i32(addr, addr, 4);
6604 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
6605 if (insn & (1 << 21)) {
6606 /* Base writeback. */
6607 switch (i) {
6608 case 0: offset = -8; break;
c67b6b71
FN
6609 case 1: offset = 4; break;
6610 case 2: offset = -4; break;
9ee6e8bb
PB
6611 case 3: offset = 0; break;
6612 default: abort();
6613 }
6614 if (offset)
c67b6b71 6615 tcg_gen_addi_i32(addr, addr, offset);
39ea3d4e
PM
6616 tmp = tcg_const_i32(op1);
6617 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6618 tcg_temp_free_i32(tmp);
7d1b0095 6619 tcg_temp_free_i32(addr);
b0109805 6620 } else {
7d1b0095 6621 tcg_temp_free_i32(addr);
9ee6e8bb 6622 }
a990f58f 6623 return;
ea825eee 6624 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 6625 /* rfe */
c67b6b71 6626 int32_t offset;
9ee6e8bb
PB
6627 if (IS_USER(s))
6628 goto illegal_op;
6629 ARCH(6);
6630 rn = (insn >> 16) & 0xf;
b0109805 6631 addr = load_reg(s, rn);
9ee6e8bb
PB
6632 i = (insn >> 23) & 3;
6633 switch (i) {
b0109805 6634 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6635 case 1: offset = 0; break; /* IA */
6636 case 2: offset = -8; break; /* DB */
b0109805 6637 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
6638 default: abort();
6639 }
6640 if (offset)
b0109805
PB
6641 tcg_gen_addi_i32(addr, addr, offset);
6642 /* Load PC into tmp and CPSR into tmp2. */
6643 tmp = gen_ld32(addr, 0);
6644 tcg_gen_addi_i32(addr, addr, 4);
6645 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
6646 if (insn & (1 << 21)) {
6647 /* Base writeback. */
6648 switch (i) {
b0109805 6649 case 0: offset = -8; break;
c67b6b71
FN
6650 case 1: offset = 4; break;
6651 case 2: offset = -4; break;
b0109805 6652 case 3: offset = 0; break;
9ee6e8bb
PB
6653 default: abort();
6654 }
6655 if (offset)
b0109805
PB
6656 tcg_gen_addi_i32(addr, addr, offset);
6657 store_reg(s, rn, addr);
6658 } else {
7d1b0095 6659 tcg_temp_free_i32(addr);
9ee6e8bb 6660 }
b0109805 6661 gen_rfe(s, tmp, tmp2);
c67b6b71 6662 return;
9ee6e8bb
PB
6663 } else if ((insn & 0x0e000000) == 0x0a000000) {
6664 /* branch link and change to thumb (blx <offset>) */
6665 int32_t offset;
6666
6667 val = (uint32_t)s->pc;
7d1b0095 6668 tmp = tcg_temp_new_i32();
d9ba4830
PB
6669 tcg_gen_movi_i32(tmp, val);
6670 store_reg(s, 14, tmp);
9ee6e8bb
PB
6671 /* Sign-extend the 24-bit offset */
6672 offset = (((int32_t)insn) << 8) >> 8;
6673 /* offset * 4 + bit24 * 2 + (thumb bit) */
6674 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6675 /* pipeline offset */
6676 val += 4;
be5e7a76 6677 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 6678 gen_bx_im(s, val);
9ee6e8bb
PB
6679 return;
6680 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6681 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6682 /* iWMMXt register transfer. */
6683 if (env->cp15.c15_cpar & (1 << 1))
6684 if (!disas_iwmmxt_insn(env, s, insn))
6685 return;
6686 }
6687 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6688 /* Coprocessor double register transfer. */
be5e7a76 6689 ARCH(5TE);
9ee6e8bb
PB
6690 } else if ((insn & 0x0f000010) == 0x0e000010) {
6691 /* Additional coprocessor register transfer. */
7997d92f 6692 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
6693 uint32_t mask;
6694 uint32_t val;
6695 /* cps (privileged) */
6696 if (IS_USER(s))
6697 return;
6698 mask = val = 0;
6699 if (insn & (1 << 19)) {
6700 if (insn & (1 << 8))
6701 mask |= CPSR_A;
6702 if (insn & (1 << 7))
6703 mask |= CPSR_I;
6704 if (insn & (1 << 6))
6705 mask |= CPSR_F;
6706 if (insn & (1 << 18))
6707 val |= mask;
6708 }
7997d92f 6709 if (insn & (1 << 17)) {
9ee6e8bb
PB
6710 mask |= CPSR_M;
6711 val |= (insn & 0x1f);
6712 }
6713 if (mask) {
2fbac54b 6714 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
6715 }
6716 return;
6717 }
6718 goto illegal_op;
6719 }
6720 if (cond != 0xe) {
6721 /* if not always execute, we generate a conditional jump to
6722 next instruction */
6723 s->condlabel = gen_new_label();
d9ba4830 6724 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
6725 s->condjmp = 1;
6726 }
6727 if ((insn & 0x0f900000) == 0x03000000) {
6728 if ((insn & (1 << 21)) == 0) {
6729 ARCH(6T2);
6730 rd = (insn >> 12) & 0xf;
6731 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6732 if ((insn & (1 << 22)) == 0) {
6733 /* MOVW */
7d1b0095 6734 tmp = tcg_temp_new_i32();
5e3f878a 6735 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
6736 } else {
6737 /* MOVT */
5e3f878a 6738 tmp = load_reg(s, rd);
86831435 6739 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 6740 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 6741 }
5e3f878a 6742 store_reg(s, rd, tmp);
9ee6e8bb
PB
6743 } else {
6744 if (((insn >> 12) & 0xf) != 0xf)
6745 goto illegal_op;
6746 if (((insn >> 16) & 0xf) == 0) {
6747 gen_nop_hint(s, insn & 0xff);
6748 } else {
6749 /* CPSR = immediate */
6750 val = insn & 0xff;
6751 shift = ((insn >> 8) & 0xf) * 2;
6752 if (shift)
6753 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 6754 i = ((insn & (1 << 22)) != 0);
2fbac54b 6755 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
6756 goto illegal_op;
6757 }
6758 }
6759 } else if ((insn & 0x0f900000) == 0x01000000
6760 && (insn & 0x00000090) != 0x00000090) {
6761 /* miscellaneous instructions */
6762 op1 = (insn >> 21) & 3;
6763 sh = (insn >> 4) & 0xf;
6764 rm = insn & 0xf;
6765 switch (sh) {
6766 case 0x0: /* move program status register */
6767 if (op1 & 1) {
6768 /* PSR = reg */
2fbac54b 6769 tmp = load_reg(s, rm);
9ee6e8bb 6770 i = ((op1 & 2) != 0);
2fbac54b 6771 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
6772 goto illegal_op;
6773 } else {
6774 /* reg = PSR */
6775 rd = (insn >> 12) & 0xf;
6776 if (op1 & 2) {
6777 if (IS_USER(s))
6778 goto illegal_op;
d9ba4830 6779 tmp = load_cpu_field(spsr);
9ee6e8bb 6780 } else {
7d1b0095 6781 tmp = tcg_temp_new_i32();
d9ba4830 6782 gen_helper_cpsr_read(tmp);
9ee6e8bb 6783 }
d9ba4830 6784 store_reg(s, rd, tmp);
9ee6e8bb
PB
6785 }
6786 break;
6787 case 0x1:
6788 if (op1 == 1) {
6789 /* branch/exchange thumb (bx). */
be5e7a76 6790 ARCH(4T);
d9ba4830
PB
6791 tmp = load_reg(s, rm);
6792 gen_bx(s, tmp);
9ee6e8bb
PB
6793 } else if (op1 == 3) {
6794 /* clz */
be5e7a76 6795 ARCH(5);
9ee6e8bb 6796 rd = (insn >> 12) & 0xf;
1497c961
PB
6797 tmp = load_reg(s, rm);
6798 gen_helper_clz(tmp, tmp);
6799 store_reg(s, rd, tmp);
9ee6e8bb
PB
6800 } else {
6801 goto illegal_op;
6802 }
6803 break;
6804 case 0x2:
6805 if (op1 == 1) {
6806 ARCH(5J); /* bxj */
6807 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6808 tmp = load_reg(s, rm);
6809 gen_bx(s, tmp);
9ee6e8bb
PB
6810 } else {
6811 goto illegal_op;
6812 }
6813 break;
6814 case 0x3:
6815 if (op1 != 1)
6816 goto illegal_op;
6817
be5e7a76 6818 ARCH(5);
9ee6e8bb 6819 /* branch link/exchange thumb (blx) */
d9ba4830 6820 tmp = load_reg(s, rm);
7d1b0095 6821 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
6822 tcg_gen_movi_i32(tmp2, s->pc);
6823 store_reg(s, 14, tmp2);
6824 gen_bx(s, tmp);
9ee6e8bb
PB
6825 break;
6826 case 0x5: /* saturating add/subtract */
be5e7a76 6827 ARCH(5TE);
9ee6e8bb
PB
6828 rd = (insn >> 12) & 0xf;
6829 rn = (insn >> 16) & 0xf;
b40d0353 6830 tmp = load_reg(s, rm);
5e3f878a 6831 tmp2 = load_reg(s, rn);
9ee6e8bb 6832 if (op1 & 2)
5e3f878a 6833 gen_helper_double_saturate(tmp2, tmp2);
9ee6e8bb 6834 if (op1 & 1)
5e3f878a 6835 gen_helper_sub_saturate(tmp, tmp, tmp2);
9ee6e8bb 6836 else
5e3f878a 6837 gen_helper_add_saturate(tmp, tmp, tmp2);
7d1b0095 6838 tcg_temp_free_i32(tmp2);
5e3f878a 6839 store_reg(s, rd, tmp);
9ee6e8bb 6840 break;
49e14940
AL
6841 case 7:
6842 /* SMC instruction (op1 == 3)
6843 and undefined instructions (op1 == 0 || op1 == 2)
6844 will trap */
6845 if (op1 != 1) {
6846 goto illegal_op;
6847 }
6848 /* bkpt */
be5e7a76 6849 ARCH(5);
bc4a0de0 6850 gen_exception_insn(s, 4, EXCP_BKPT);
9ee6e8bb
PB
6851 break;
6852 case 0x8: /* signed multiply */
6853 case 0xa:
6854 case 0xc:
6855 case 0xe:
be5e7a76 6856 ARCH(5TE);
9ee6e8bb
PB
6857 rs = (insn >> 8) & 0xf;
6858 rn = (insn >> 12) & 0xf;
6859 rd = (insn >> 16) & 0xf;
6860 if (op1 == 1) {
6861 /* (32 * 16) >> 16 */
5e3f878a
PB
6862 tmp = load_reg(s, rm);
6863 tmp2 = load_reg(s, rs);
9ee6e8bb 6864 if (sh & 4)
5e3f878a 6865 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 6866 else
5e3f878a 6867 gen_sxth(tmp2);
a7812ae4
PB
6868 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6869 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 6870 tmp = tcg_temp_new_i32();
a7812ae4 6871 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 6872 tcg_temp_free_i64(tmp64);
9ee6e8bb 6873 if ((sh & 2) == 0) {
5e3f878a
PB
6874 tmp2 = load_reg(s, rn);
6875 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 6876 tcg_temp_free_i32(tmp2);
9ee6e8bb 6877 }
5e3f878a 6878 store_reg(s, rd, tmp);
9ee6e8bb
PB
6879 } else {
6880 /* 16 * 16 */
5e3f878a
PB
6881 tmp = load_reg(s, rm);
6882 tmp2 = load_reg(s, rs);
6883 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 6884 tcg_temp_free_i32(tmp2);
9ee6e8bb 6885 if (op1 == 2) {
a7812ae4
PB
6886 tmp64 = tcg_temp_new_i64();
6887 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 6888 tcg_temp_free_i32(tmp);
a7812ae4
PB
6889 gen_addq(s, tmp64, rn, rd);
6890 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 6891 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
6892 } else {
6893 if (op1 == 0) {
5e3f878a
PB
6894 tmp2 = load_reg(s, rn);
6895 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 6896 tcg_temp_free_i32(tmp2);
9ee6e8bb 6897 }
5e3f878a 6898 store_reg(s, rd, tmp);
9ee6e8bb
PB
6899 }
6900 }
6901 break;
6902 default:
6903 goto illegal_op;
6904 }
6905 } else if (((insn & 0x0e000000) == 0 &&
6906 (insn & 0x00000090) != 0x90) ||
6907 ((insn & 0x0e000000) == (1 << 25))) {
6908 int set_cc, logic_cc, shiftop;
6909
6910 op1 = (insn >> 21) & 0xf;
6911 set_cc = (insn >> 20) & 1;
6912 logic_cc = table_logic_cc[op1] & set_cc;
6913
6914 /* data processing instruction */
6915 if (insn & (1 << 25)) {
6916 /* immediate operand */
6917 val = insn & 0xff;
6918 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 6919 if (shift) {
9ee6e8bb 6920 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 6921 }
7d1b0095 6922 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
6923 tcg_gen_movi_i32(tmp2, val);
6924 if (logic_cc && shift) {
6925 gen_set_CF_bit31(tmp2);
6926 }
9ee6e8bb
PB
6927 } else {
6928 /* register */
6929 rm = (insn) & 0xf;
e9bb4aa9 6930 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6931 shiftop = (insn >> 5) & 3;
6932 if (!(insn & (1 << 4))) {
6933 shift = (insn >> 7) & 0x1f;
e9bb4aa9 6934 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
6935 } else {
6936 rs = (insn >> 8) & 0xf;
8984bd2e 6937 tmp = load_reg(s, rs);
e9bb4aa9 6938 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
6939 }
6940 }
6941 if (op1 != 0x0f && op1 != 0x0d) {
6942 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
6943 tmp = load_reg(s, rn);
6944 } else {
6945 TCGV_UNUSED(tmp);
9ee6e8bb
PB
6946 }
6947 rd = (insn >> 12) & 0xf;
6948 switch(op1) {
6949 case 0x00:
e9bb4aa9
JR
6950 tcg_gen_and_i32(tmp, tmp, tmp2);
6951 if (logic_cc) {
6952 gen_logic_CC(tmp);
6953 }
21aeb343 6954 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6955 break;
6956 case 0x01:
e9bb4aa9
JR
6957 tcg_gen_xor_i32(tmp, tmp, tmp2);
6958 if (logic_cc) {
6959 gen_logic_CC(tmp);
6960 }
21aeb343 6961 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6962 break;
6963 case 0x02:
6964 if (set_cc && rd == 15) {
6965 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 6966 if (IS_USER(s)) {
9ee6e8bb 6967 goto illegal_op;
e9bb4aa9
JR
6968 }
6969 gen_helper_sub_cc(tmp, tmp, tmp2);
6970 gen_exception_return(s, tmp);
9ee6e8bb 6971 } else {
e9bb4aa9
JR
6972 if (set_cc) {
6973 gen_helper_sub_cc(tmp, tmp, tmp2);
6974 } else {
6975 tcg_gen_sub_i32(tmp, tmp, tmp2);
6976 }
21aeb343 6977 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6978 }
6979 break;
6980 case 0x03:
e9bb4aa9
JR
6981 if (set_cc) {
6982 gen_helper_sub_cc(tmp, tmp2, tmp);
6983 } else {
6984 tcg_gen_sub_i32(tmp, tmp2, tmp);
6985 }
21aeb343 6986 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6987 break;
6988 case 0x04:
e9bb4aa9
JR
6989 if (set_cc) {
6990 gen_helper_add_cc(tmp, tmp, tmp2);
6991 } else {
6992 tcg_gen_add_i32(tmp, tmp, tmp2);
6993 }
21aeb343 6994 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6995 break;
6996 case 0x05:
e9bb4aa9
JR
6997 if (set_cc) {
6998 gen_helper_adc_cc(tmp, tmp, tmp2);
6999 } else {
7000 gen_add_carry(tmp, tmp, tmp2);
7001 }
21aeb343 7002 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7003 break;
7004 case 0x06:
e9bb4aa9
JR
7005 if (set_cc) {
7006 gen_helper_sbc_cc(tmp, tmp, tmp2);
7007 } else {
7008 gen_sub_carry(tmp, tmp, tmp2);
7009 }
21aeb343 7010 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7011 break;
7012 case 0x07:
e9bb4aa9
JR
7013 if (set_cc) {
7014 gen_helper_sbc_cc(tmp, tmp2, tmp);
7015 } else {
7016 gen_sub_carry(tmp, tmp2, tmp);
7017 }
21aeb343 7018 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7019 break;
7020 case 0x08:
7021 if (set_cc) {
e9bb4aa9
JR
7022 tcg_gen_and_i32(tmp, tmp, tmp2);
7023 gen_logic_CC(tmp);
9ee6e8bb 7024 }
7d1b0095 7025 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7026 break;
7027 case 0x09:
7028 if (set_cc) {
e9bb4aa9
JR
7029 tcg_gen_xor_i32(tmp, tmp, tmp2);
7030 gen_logic_CC(tmp);
9ee6e8bb 7031 }
7d1b0095 7032 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7033 break;
7034 case 0x0a:
7035 if (set_cc) {
e9bb4aa9 7036 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb 7037 }
7d1b0095 7038 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7039 break;
7040 case 0x0b:
7041 if (set_cc) {
e9bb4aa9 7042 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 7043 }
7d1b0095 7044 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7045 break;
7046 case 0x0c:
e9bb4aa9
JR
7047 tcg_gen_or_i32(tmp, tmp, tmp2);
7048 if (logic_cc) {
7049 gen_logic_CC(tmp);
7050 }
21aeb343 7051 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7052 break;
7053 case 0x0d:
7054 if (logic_cc && rd == 15) {
7055 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 7056 if (IS_USER(s)) {
9ee6e8bb 7057 goto illegal_op;
e9bb4aa9
JR
7058 }
7059 gen_exception_return(s, tmp2);
9ee6e8bb 7060 } else {
e9bb4aa9
JR
7061 if (logic_cc) {
7062 gen_logic_CC(tmp2);
7063 }
21aeb343 7064 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7065 }
7066 break;
7067 case 0x0e:
f669df27 7068 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
7069 if (logic_cc) {
7070 gen_logic_CC(tmp);
7071 }
21aeb343 7072 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7073 break;
7074 default:
7075 case 0x0f:
e9bb4aa9
JR
7076 tcg_gen_not_i32(tmp2, tmp2);
7077 if (logic_cc) {
7078 gen_logic_CC(tmp2);
7079 }
21aeb343 7080 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7081 break;
7082 }
e9bb4aa9 7083 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 7084 tcg_temp_free_i32(tmp2);
e9bb4aa9 7085 }
9ee6e8bb
PB
7086 } else {
7087 /* other instructions */
7088 op1 = (insn >> 24) & 0xf;
7089 switch(op1) {
7090 case 0x0:
7091 case 0x1:
7092 /* multiplies, extra load/stores */
7093 sh = (insn >> 5) & 3;
7094 if (sh == 0) {
7095 if (op1 == 0x0) {
7096 rd = (insn >> 16) & 0xf;
7097 rn = (insn >> 12) & 0xf;
7098 rs = (insn >> 8) & 0xf;
7099 rm = (insn) & 0xf;
7100 op1 = (insn >> 20) & 0xf;
7101 switch (op1) {
7102 case 0: case 1: case 2: case 3: case 6:
7103 /* 32 bit mul */
5e3f878a
PB
7104 tmp = load_reg(s, rs);
7105 tmp2 = load_reg(s, rm);
7106 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 7107 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7108 if (insn & (1 << 22)) {
7109 /* Subtract (mls) */
7110 ARCH(6T2);
5e3f878a
PB
7111 tmp2 = load_reg(s, rn);
7112 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 7113 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7114 } else if (insn & (1 << 21)) {
7115 /* Add */
5e3f878a
PB
7116 tmp2 = load_reg(s, rn);
7117 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7118 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7119 }
7120 if (insn & (1 << 20))
5e3f878a
PB
7121 gen_logic_CC(tmp);
7122 store_reg(s, rd, tmp);
9ee6e8bb 7123 break;
8aac08b1
AJ
7124 case 4:
7125 /* 64 bit mul double accumulate (UMAAL) */
7126 ARCH(6);
7127 tmp = load_reg(s, rs);
7128 tmp2 = load_reg(s, rm);
7129 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7130 gen_addq_lo(s, tmp64, rn);
7131 gen_addq_lo(s, tmp64, rd);
7132 gen_storeq_reg(s, rn, rd, tmp64);
7133 tcg_temp_free_i64(tmp64);
7134 break;
7135 case 8: case 9: case 10: case 11:
7136 case 12: case 13: case 14: case 15:
7137 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
7138 tmp = load_reg(s, rs);
7139 tmp2 = load_reg(s, rm);
8aac08b1 7140 if (insn & (1 << 22)) {
a7812ae4 7141 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8aac08b1 7142 } else {
a7812ae4 7143 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8aac08b1
AJ
7144 }
7145 if (insn & (1 << 21)) { /* mult accumulate */
a7812ae4 7146 gen_addq(s, tmp64, rn, rd);
9ee6e8bb 7147 }
8aac08b1 7148 if (insn & (1 << 20)) {
a7812ae4 7149 gen_logicq_cc(tmp64);
8aac08b1 7150 }
a7812ae4 7151 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 7152 tcg_temp_free_i64(tmp64);
9ee6e8bb 7153 break;
8aac08b1
AJ
7154 default:
7155 goto illegal_op;
9ee6e8bb
PB
7156 }
7157 } else {
7158 rn = (insn >> 16) & 0xf;
7159 rd = (insn >> 12) & 0xf;
7160 if (insn & (1 << 23)) {
7161 /* load/store exclusive */
86753403
PB
7162 op1 = (insn >> 21) & 0x3;
7163 if (op1)
a47f43d2 7164 ARCH(6K);
86753403
PB
7165 else
7166 ARCH(6);
3174f8e9 7167 addr = tcg_temp_local_new_i32();
98a46317 7168 load_reg_var(s, addr, rn);
9ee6e8bb 7169 if (insn & (1 << 20)) {
86753403
PB
7170 switch (op1) {
7171 case 0: /* ldrex */
426f5abc 7172 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
7173 break;
7174 case 1: /* ldrexd */
426f5abc 7175 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
7176 break;
7177 case 2: /* ldrexb */
426f5abc 7178 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
7179 break;
7180 case 3: /* ldrexh */
426f5abc 7181 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
7182 break;
7183 default:
7184 abort();
7185 }
9ee6e8bb
PB
7186 } else {
7187 rm = insn & 0xf;
86753403
PB
7188 switch (op1) {
7189 case 0: /* strex */
426f5abc 7190 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
7191 break;
7192 case 1: /* strexd */
502e64fe 7193 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
7194 break;
7195 case 2: /* strexb */
426f5abc 7196 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
7197 break;
7198 case 3: /* strexh */
426f5abc 7199 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
7200 break;
7201 default:
7202 abort();
7203 }
9ee6e8bb 7204 }
3174f8e9 7205 tcg_temp_free(addr);
9ee6e8bb
PB
7206 } else {
7207 /* SWP instruction */
7208 rm = (insn) & 0xf;
7209
8984bd2e
PB
7210 /* ??? This is not really atomic. However we know
7211 we never have multiple CPUs running in parallel,
7212 so it is good enough. */
7213 addr = load_reg(s, rn);
7214 tmp = load_reg(s, rm);
9ee6e8bb 7215 if (insn & (1 << 22)) {
8984bd2e
PB
7216 tmp2 = gen_ld8u(addr, IS_USER(s));
7217 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 7218 } else {
8984bd2e
PB
7219 tmp2 = gen_ld32(addr, IS_USER(s));
7220 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7221 }
7d1b0095 7222 tcg_temp_free_i32(addr);
8984bd2e 7223 store_reg(s, rd, tmp2);
9ee6e8bb
PB
7224 }
7225 }
7226 } else {
7227 int address_offset;
7228 int load;
7229 /* Misc load/store */
7230 rn = (insn >> 16) & 0xf;
7231 rd = (insn >> 12) & 0xf;
b0109805 7232 addr = load_reg(s, rn);
9ee6e8bb 7233 if (insn & (1 << 24))
b0109805 7234 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
7235 address_offset = 0;
7236 if (insn & (1 << 20)) {
7237 /* load */
7238 switch(sh) {
7239 case 1:
b0109805 7240 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
7241 break;
7242 case 2:
b0109805 7243 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
7244 break;
7245 default:
7246 case 3:
b0109805 7247 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
7248 break;
7249 }
7250 load = 1;
7251 } else if (sh & 2) {
be5e7a76 7252 ARCH(5TE);
9ee6e8bb
PB
7253 /* doubleword */
7254 if (sh & 1) {
7255 /* store */
b0109805
PB
7256 tmp = load_reg(s, rd);
7257 gen_st32(tmp, addr, IS_USER(s));
7258 tcg_gen_addi_i32(addr, addr, 4);
7259 tmp = load_reg(s, rd + 1);
7260 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7261 load = 0;
7262 } else {
7263 /* load */
b0109805
PB
7264 tmp = gen_ld32(addr, IS_USER(s));
7265 store_reg(s, rd, tmp);
7266 tcg_gen_addi_i32(addr, addr, 4);
7267 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
7268 rd++;
7269 load = 1;
7270 }
7271 address_offset = -4;
7272 } else {
7273 /* store */
b0109805
PB
7274 tmp = load_reg(s, rd);
7275 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7276 load = 0;
7277 }
7278 /* Perform base writeback before the loaded value to
7279 ensure correct behavior with overlapping index registers.
7280 ldrd with base writeback is is undefined if the
7281 destination and index registers overlap. */
7282 if (!(insn & (1 << 24))) {
b0109805
PB
7283 gen_add_datah_offset(s, insn, address_offset, addr);
7284 store_reg(s, rn, addr);
9ee6e8bb
PB
7285 } else if (insn & (1 << 21)) {
7286 if (address_offset)
b0109805
PB
7287 tcg_gen_addi_i32(addr, addr, address_offset);
7288 store_reg(s, rn, addr);
7289 } else {
7d1b0095 7290 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7291 }
7292 if (load) {
7293 /* Complete the load. */
b0109805 7294 store_reg(s, rd, tmp);
9ee6e8bb
PB
7295 }
7296 }
7297 break;
7298 case 0x4:
7299 case 0x5:
7300 goto do_ldst;
7301 case 0x6:
7302 case 0x7:
7303 if (insn & (1 << 4)) {
7304 ARCH(6);
7305 /* Armv6 Media instructions. */
7306 rm = insn & 0xf;
7307 rn = (insn >> 16) & 0xf;
2c0262af 7308 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
7309 rs = (insn >> 8) & 0xf;
7310 switch ((insn >> 23) & 3) {
7311 case 0: /* Parallel add/subtract. */
7312 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
7313 tmp = load_reg(s, rn);
7314 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7315 sh = (insn >> 5) & 7;
7316 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7317 goto illegal_op;
6ddbc6e4 7318 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 7319 tcg_temp_free_i32(tmp2);
6ddbc6e4 7320 store_reg(s, rd, tmp);
9ee6e8bb
PB
7321 break;
7322 case 1:
7323 if ((insn & 0x00700020) == 0) {
6c95676b 7324 /* Halfword pack. */
3670669c
PB
7325 tmp = load_reg(s, rn);
7326 tmp2 = load_reg(s, rm);
9ee6e8bb 7327 shift = (insn >> 7) & 0x1f;
3670669c
PB
7328 if (insn & (1 << 6)) {
7329 /* pkhtb */
22478e79
AZ
7330 if (shift == 0)
7331 shift = 31;
7332 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 7333 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 7334 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
7335 } else {
7336 /* pkhbt */
22478e79
AZ
7337 if (shift)
7338 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 7339 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
7340 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7341 }
7342 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 7343 tcg_temp_free_i32(tmp2);
3670669c 7344 store_reg(s, rd, tmp);
9ee6e8bb
PB
7345 } else if ((insn & 0x00200020) == 0x00200000) {
7346 /* [us]sat */
6ddbc6e4 7347 tmp = load_reg(s, rm);
9ee6e8bb
PB
7348 shift = (insn >> 7) & 0x1f;
7349 if (insn & (1 << 6)) {
7350 if (shift == 0)
7351 shift = 31;
6ddbc6e4 7352 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7353 } else {
6ddbc6e4 7354 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
7355 }
7356 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7357 tmp2 = tcg_const_i32(sh);
7358 if (insn & (1 << 22))
7359 gen_helper_usat(tmp, tmp, tmp2);
7360 else
7361 gen_helper_ssat(tmp, tmp, tmp2);
7362 tcg_temp_free_i32(tmp2);
6ddbc6e4 7363 store_reg(s, rd, tmp);
9ee6e8bb
PB
7364 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7365 /* [us]sat16 */
6ddbc6e4 7366 tmp = load_reg(s, rm);
9ee6e8bb 7367 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7368 tmp2 = tcg_const_i32(sh);
7369 if (insn & (1 << 22))
7370 gen_helper_usat16(tmp, tmp, tmp2);
7371 else
7372 gen_helper_ssat16(tmp, tmp, tmp2);
7373 tcg_temp_free_i32(tmp2);
6ddbc6e4 7374 store_reg(s, rd, tmp);
9ee6e8bb
PB
7375 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7376 /* Select bytes. */
6ddbc6e4
PB
7377 tmp = load_reg(s, rn);
7378 tmp2 = load_reg(s, rm);
7d1b0095 7379 tmp3 = tcg_temp_new_i32();
6ddbc6e4
PB
7380 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7381 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
7382 tcg_temp_free_i32(tmp3);
7383 tcg_temp_free_i32(tmp2);
6ddbc6e4 7384 store_reg(s, rd, tmp);
9ee6e8bb 7385 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 7386 tmp = load_reg(s, rm);
9ee6e8bb 7387 shift = (insn >> 10) & 3;
1301f322 7388 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
7389 rotate, a shift is sufficient. */
7390 if (shift != 0)
f669df27 7391 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7392 op1 = (insn >> 20) & 7;
7393 switch (op1) {
5e3f878a
PB
7394 case 0: gen_sxtb16(tmp); break;
7395 case 2: gen_sxtb(tmp); break;
7396 case 3: gen_sxth(tmp); break;
7397 case 4: gen_uxtb16(tmp); break;
7398 case 6: gen_uxtb(tmp); break;
7399 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
7400 default: goto illegal_op;
7401 }
7402 if (rn != 15) {
5e3f878a 7403 tmp2 = load_reg(s, rn);
9ee6e8bb 7404 if ((op1 & 3) == 0) {
5e3f878a 7405 gen_add16(tmp, tmp2);
9ee6e8bb 7406 } else {
5e3f878a 7407 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7408 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7409 }
7410 }
6c95676b 7411 store_reg(s, rd, tmp);
9ee6e8bb
PB
7412 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7413 /* rev */
b0109805 7414 tmp = load_reg(s, rm);
9ee6e8bb
PB
7415 if (insn & (1 << 22)) {
7416 if (insn & (1 << 7)) {
b0109805 7417 gen_revsh(tmp);
9ee6e8bb
PB
7418 } else {
7419 ARCH(6T2);
b0109805 7420 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7421 }
7422 } else {
7423 if (insn & (1 << 7))
b0109805 7424 gen_rev16(tmp);
9ee6e8bb 7425 else
66896cb8 7426 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 7427 }
b0109805 7428 store_reg(s, rd, tmp);
9ee6e8bb
PB
7429 } else {
7430 goto illegal_op;
7431 }
7432 break;
7433 case 2: /* Multiplies (Type 3). */
5e3f878a
PB
7434 tmp = load_reg(s, rm);
7435 tmp2 = load_reg(s, rs);
9ee6e8bb 7436 if (insn & (1 << 20)) {
838fa72d
AJ
7437 /* Signed multiply most significant [accumulate].
7438 (SMMUL, SMMLA, SMMLS) */
a7812ae4 7439 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 7440
955a7dd5 7441 if (rd != 15) {
838fa72d 7442 tmp = load_reg(s, rd);
9ee6e8bb 7443 if (insn & (1 << 6)) {
838fa72d 7444 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 7445 } else {
838fa72d 7446 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
7447 }
7448 }
838fa72d
AJ
7449 if (insn & (1 << 5)) {
7450 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7451 }
7452 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 7453 tmp = tcg_temp_new_i32();
838fa72d
AJ
7454 tcg_gen_trunc_i64_i32(tmp, tmp64);
7455 tcg_temp_free_i64(tmp64);
955a7dd5 7456 store_reg(s, rn, tmp);
9ee6e8bb
PB
7457 } else {
7458 if (insn & (1 << 5))
5e3f878a
PB
7459 gen_swap_half(tmp2);
7460 gen_smul_dual(tmp, tmp2);
5e3f878a 7461 if (insn & (1 << 6)) {
e1d177b9 7462 /* This subtraction cannot overflow. */
5e3f878a
PB
7463 tcg_gen_sub_i32(tmp, tmp, tmp2);
7464 } else {
e1d177b9
PM
7465 /* This addition cannot overflow 32 bits;
7466 * however it may overflow considered as a signed
7467 * operation, in which case we must set the Q flag.
7468 */
7469 gen_helper_add_setq(tmp, tmp, tmp2);
5e3f878a 7470 }
7d1b0095 7471 tcg_temp_free_i32(tmp2);
9ee6e8bb 7472 if (insn & (1 << 22)) {
5e3f878a 7473 /* smlald, smlsld */
a7812ae4
PB
7474 tmp64 = tcg_temp_new_i64();
7475 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7476 tcg_temp_free_i32(tmp);
a7812ae4
PB
7477 gen_addq(s, tmp64, rd, rn);
7478 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 7479 tcg_temp_free_i64(tmp64);
9ee6e8bb 7480 } else {
5e3f878a 7481 /* smuad, smusd, smlad, smlsd */
22478e79 7482 if (rd != 15)
9ee6e8bb 7483 {
22478e79 7484 tmp2 = load_reg(s, rd);
5e3f878a 7485 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 7486 tcg_temp_free_i32(tmp2);
9ee6e8bb 7487 }
22478e79 7488 store_reg(s, rn, tmp);
9ee6e8bb
PB
7489 }
7490 }
7491 break;
7492 case 3:
7493 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7494 switch (op1) {
7495 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
7496 ARCH(6);
7497 tmp = load_reg(s, rm);
7498 tmp2 = load_reg(s, rs);
7499 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 7500 tcg_temp_free_i32(tmp2);
ded9d295
AZ
7501 if (rd != 15) {
7502 tmp2 = load_reg(s, rd);
6ddbc6e4 7503 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7504 tcg_temp_free_i32(tmp2);
9ee6e8bb 7505 }
ded9d295 7506 store_reg(s, rn, tmp);
9ee6e8bb
PB
7507 break;
7508 case 0x20: case 0x24: case 0x28: case 0x2c:
7509 /* Bitfield insert/clear. */
7510 ARCH(6T2);
7511 shift = (insn >> 7) & 0x1f;
7512 i = (insn >> 16) & 0x1f;
7513 i = i + 1 - shift;
7514 if (rm == 15) {
7d1b0095 7515 tmp = tcg_temp_new_i32();
5e3f878a 7516 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7517 } else {
5e3f878a 7518 tmp = load_reg(s, rm);
9ee6e8bb
PB
7519 }
7520 if (i != 32) {
5e3f878a 7521 tmp2 = load_reg(s, rd);
8f8e3aa4 7522 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7d1b0095 7523 tcg_temp_free_i32(tmp2);
9ee6e8bb 7524 }
5e3f878a 7525 store_reg(s, rd, tmp);
9ee6e8bb
PB
7526 break;
7527 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7528 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 7529 ARCH(6T2);
5e3f878a 7530 tmp = load_reg(s, rm);
9ee6e8bb
PB
7531 shift = (insn >> 7) & 0x1f;
7532 i = ((insn >> 16) & 0x1f) + 1;
7533 if (shift + i > 32)
7534 goto illegal_op;
7535 if (i < 32) {
7536 if (op1 & 0x20) {
5e3f878a 7537 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 7538 } else {
5e3f878a 7539 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
7540 }
7541 }
5e3f878a 7542 store_reg(s, rd, tmp);
9ee6e8bb
PB
7543 break;
7544 default:
7545 goto illegal_op;
7546 }
7547 break;
7548 }
7549 break;
7550 }
7551 do_ldst:
7552 /* Check for undefined extension instructions
7553 * per the ARM Bible IE:
7554 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7555 */
7556 sh = (0xf << 20) | (0xf << 4);
7557 if (op1 == 0x7 && ((insn & sh) == sh))
7558 {
7559 goto illegal_op;
7560 }
7561 /* load/store byte/word */
7562 rn = (insn >> 16) & 0xf;
7563 rd = (insn >> 12) & 0xf;
b0109805 7564 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
7565 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7566 if (insn & (1 << 24))
b0109805 7567 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
7568 if (insn & (1 << 20)) {
7569 /* load */
9ee6e8bb 7570 if (insn & (1 << 22)) {
b0109805 7571 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 7572 } else {
b0109805 7573 tmp = gen_ld32(tmp2, i);
9ee6e8bb 7574 }
9ee6e8bb
PB
7575 } else {
7576 /* store */
b0109805 7577 tmp = load_reg(s, rd);
9ee6e8bb 7578 if (insn & (1 << 22))
b0109805 7579 gen_st8(tmp, tmp2, i);
9ee6e8bb 7580 else
b0109805 7581 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
7582 }
7583 if (!(insn & (1 << 24))) {
b0109805
PB
7584 gen_add_data_offset(s, insn, tmp2);
7585 store_reg(s, rn, tmp2);
7586 } else if (insn & (1 << 21)) {
7587 store_reg(s, rn, tmp2);
7588 } else {
7d1b0095 7589 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7590 }
7591 if (insn & (1 << 20)) {
7592 /* Complete the load. */
be5e7a76 7593 store_reg_from_load(env, s, rd, tmp);
9ee6e8bb
PB
7594 }
7595 break;
7596 case 0x08:
7597 case 0x09:
7598 {
7599 int j, n, user, loaded_base;
b0109805 7600 TCGv loaded_var;
9ee6e8bb
PB
7601 /* load/store multiple words */
7602 /* XXX: store correct base if write back */
7603 user = 0;
7604 if (insn & (1 << 22)) {
7605 if (IS_USER(s))
7606 goto illegal_op; /* only usable in supervisor mode */
7607
7608 if ((insn & (1 << 15)) == 0)
7609 user = 1;
7610 }
7611 rn = (insn >> 16) & 0xf;
b0109805 7612 addr = load_reg(s, rn);
9ee6e8bb
PB
7613
7614 /* compute total size */
7615 loaded_base = 0;
a50f5b91 7616 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
7617 n = 0;
7618 for(i=0;i<16;i++) {
7619 if (insn & (1 << i))
7620 n++;
7621 }
7622 /* XXX: test invalid n == 0 case ? */
7623 if (insn & (1 << 23)) {
7624 if (insn & (1 << 24)) {
7625 /* pre increment */
b0109805 7626 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7627 } else {
7628 /* post increment */
7629 }
7630 } else {
7631 if (insn & (1 << 24)) {
7632 /* pre decrement */
b0109805 7633 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7634 } else {
7635 /* post decrement */
7636 if (n != 1)
b0109805 7637 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7638 }
7639 }
7640 j = 0;
7641 for(i=0;i<16;i++) {
7642 if (insn & (1 << i)) {
7643 if (insn & (1 << 20)) {
7644 /* load */
b0109805 7645 tmp = gen_ld32(addr, IS_USER(s));
be5e7a76 7646 if (user) {
b75263d6
JR
7647 tmp2 = tcg_const_i32(i);
7648 gen_helper_set_user_reg(tmp2, tmp);
7649 tcg_temp_free_i32(tmp2);
7d1b0095 7650 tcg_temp_free_i32(tmp);
9ee6e8bb 7651 } else if (i == rn) {
b0109805 7652 loaded_var = tmp;
9ee6e8bb
PB
7653 loaded_base = 1;
7654 } else {
be5e7a76 7655 store_reg_from_load(env, s, i, tmp);
9ee6e8bb
PB
7656 }
7657 } else {
7658 /* store */
7659 if (i == 15) {
7660 /* special case: r15 = PC + 8 */
7661 val = (long)s->pc + 4;
7d1b0095 7662 tmp = tcg_temp_new_i32();
b0109805 7663 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 7664 } else if (user) {
7d1b0095 7665 tmp = tcg_temp_new_i32();
b75263d6
JR
7666 tmp2 = tcg_const_i32(i);
7667 gen_helper_get_user_reg(tmp, tmp2);
7668 tcg_temp_free_i32(tmp2);
9ee6e8bb 7669 } else {
b0109805 7670 tmp = load_reg(s, i);
9ee6e8bb 7671 }
b0109805 7672 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7673 }
7674 j++;
7675 /* no need to add after the last transfer */
7676 if (j != n)
b0109805 7677 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7678 }
7679 }
7680 if (insn & (1 << 21)) {
7681 /* write back */
7682 if (insn & (1 << 23)) {
7683 if (insn & (1 << 24)) {
7684 /* pre increment */
7685 } else {
7686 /* post increment */
b0109805 7687 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7688 }
7689 } else {
7690 if (insn & (1 << 24)) {
7691 /* pre decrement */
7692 if (n != 1)
b0109805 7693 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7694 } else {
7695 /* post decrement */
b0109805 7696 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7697 }
7698 }
b0109805
PB
7699 store_reg(s, rn, addr);
7700 } else {
7d1b0095 7701 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7702 }
7703 if (loaded_base) {
b0109805 7704 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
7705 }
7706 if ((insn & (1 << 22)) && !user) {
7707 /* Restore CPSR from SPSR. */
d9ba4830
PB
7708 tmp = load_cpu_field(spsr);
7709 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 7710 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7711 s->is_jmp = DISAS_UPDATE;
7712 }
7713 }
7714 break;
7715 case 0xa:
7716 case 0xb:
7717 {
7718 int32_t offset;
7719
7720 /* branch (and link) */
7721 val = (int32_t)s->pc;
7722 if (insn & (1 << 24)) {
7d1b0095 7723 tmp = tcg_temp_new_i32();
5e3f878a
PB
7724 tcg_gen_movi_i32(tmp, val);
7725 store_reg(s, 14, tmp);
9ee6e8bb
PB
7726 }
7727 offset = (((int32_t)insn << 8) >> 8);
7728 val += (offset << 2) + 4;
7729 gen_jmp(s, val);
7730 }
7731 break;
7732 case 0xc:
7733 case 0xd:
7734 case 0xe:
7735 /* Coprocessor. */
7736 if (disas_coproc_insn(env, s, insn))
7737 goto illegal_op;
7738 break;
7739 case 0xf:
7740 /* swi */
5e3f878a 7741 gen_set_pc_im(s->pc);
9ee6e8bb
PB
7742 s->is_jmp = DISAS_SWI;
7743 break;
7744 default:
7745 illegal_op:
bc4a0de0 7746 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
7747 break;
7748 }
7749 }
7750}
7751
7752/* Return true if this is a Thumb-2 logical op. */
7753static int
7754thumb2_logic_op(int op)
7755{
7756 return (op < 8);
7757}
7758
7759/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7760 then set condition code flags based on the result of the operation.
7761 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7762 to the high bit of T1.
7763 Returns zero if the opcode is valid. */
7764
7765static int
396e467c 7766gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
9ee6e8bb
PB
7767{
7768 int logic_cc;
7769
7770 logic_cc = 0;
7771 switch (op) {
7772 case 0: /* and */
396e467c 7773 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
7774 logic_cc = conds;
7775 break;
7776 case 1: /* bic */
f669df27 7777 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
7778 logic_cc = conds;
7779 break;
7780 case 2: /* orr */
396e467c 7781 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7782 logic_cc = conds;
7783 break;
7784 case 3: /* orn */
29501f1b 7785 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
7786 logic_cc = conds;
7787 break;
7788 case 4: /* eor */
396e467c 7789 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
7790 logic_cc = conds;
7791 break;
7792 case 8: /* add */
7793 if (conds)
396e467c 7794 gen_helper_add_cc(t0, t0, t1);
9ee6e8bb 7795 else
396e467c 7796 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
7797 break;
7798 case 10: /* adc */
7799 if (conds)
396e467c 7800 gen_helper_adc_cc(t0, t0, t1);
9ee6e8bb 7801 else
396e467c 7802 gen_adc(t0, t1);
9ee6e8bb
PB
7803 break;
7804 case 11: /* sbc */
7805 if (conds)
396e467c 7806 gen_helper_sbc_cc(t0, t0, t1);
9ee6e8bb 7807 else
396e467c 7808 gen_sub_carry(t0, t0, t1);
9ee6e8bb
PB
7809 break;
7810 case 13: /* sub */
7811 if (conds)
396e467c 7812 gen_helper_sub_cc(t0, t0, t1);
9ee6e8bb 7813 else
396e467c 7814 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
7815 break;
7816 case 14: /* rsb */
7817 if (conds)
396e467c 7818 gen_helper_sub_cc(t0, t1, t0);
9ee6e8bb 7819 else
396e467c 7820 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
7821 break;
7822 default: /* 5, 6, 7, 9, 12, 15. */
7823 return 1;
7824 }
7825 if (logic_cc) {
396e467c 7826 gen_logic_CC(t0);
9ee6e8bb 7827 if (shifter_out)
396e467c 7828 gen_set_CF_bit31(t1);
9ee6e8bb
PB
7829 }
7830 return 0;
7831}
7832
7833/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7834 is not legal. */
7835static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7836{
b0109805 7837 uint32_t insn, imm, shift, offset;
9ee6e8bb 7838 uint32_t rd, rn, rm, rs;
b26eefb6 7839 TCGv tmp;
6ddbc6e4
PB
7840 TCGv tmp2;
7841 TCGv tmp3;
b0109805 7842 TCGv addr;
a7812ae4 7843 TCGv_i64 tmp64;
9ee6e8bb
PB
7844 int op;
7845 int shiftop;
7846 int conds;
7847 int logic_cc;
7848
7849 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7850 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 7851 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
7852 16-bit instructions to get correct prefetch abort behavior. */
7853 insn = insn_hw1;
7854 if ((insn & (1 << 12)) == 0) {
be5e7a76 7855 ARCH(5);
9ee6e8bb
PB
7856 /* Second half of blx. */
7857 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
7858 tmp = load_reg(s, 14);
7859 tcg_gen_addi_i32(tmp, tmp, offset);
7860 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 7861
7d1b0095 7862 tmp2 = tcg_temp_new_i32();
b0109805 7863 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7864 store_reg(s, 14, tmp2);
7865 gen_bx(s, tmp);
9ee6e8bb
PB
7866 return 0;
7867 }
7868 if (insn & (1 << 11)) {
7869 /* Second half of bl. */
7870 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 7871 tmp = load_reg(s, 14);
6a0d8a1d 7872 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 7873
7d1b0095 7874 tmp2 = tcg_temp_new_i32();
b0109805 7875 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7876 store_reg(s, 14, tmp2);
7877 gen_bx(s, tmp);
9ee6e8bb
PB
7878 return 0;
7879 }
7880 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7881 /* Instruction spans a page boundary. Implement it as two
7882 16-bit instructions in case the second half causes an
7883 prefetch abort. */
7884 offset = ((int32_t)insn << 21) >> 9;
396e467c 7885 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
7886 return 0;
7887 }
7888 /* Fall through to 32-bit decode. */
7889 }
7890
7891 insn = lduw_code(s->pc);
7892 s->pc += 2;
7893 insn |= (uint32_t)insn_hw1 << 16;
7894
7895 if ((insn & 0xf800e800) != 0xf000e800) {
7896 ARCH(6T2);
7897 }
7898
7899 rn = (insn >> 16) & 0xf;
7900 rs = (insn >> 12) & 0xf;
7901 rd = (insn >> 8) & 0xf;
7902 rm = insn & 0xf;
7903 switch ((insn >> 25) & 0xf) {
7904 case 0: case 1: case 2: case 3:
7905 /* 16-bit instructions. Should never happen. */
7906 abort();
7907 case 4:
7908 if (insn & (1 << 22)) {
7909 /* Other load/store, table branch. */
7910 if (insn & 0x01200000) {
7911 /* Load/store doubleword. */
7912 if (rn == 15) {
7d1b0095 7913 addr = tcg_temp_new_i32();
b0109805 7914 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 7915 } else {
b0109805 7916 addr = load_reg(s, rn);
9ee6e8bb
PB
7917 }
7918 offset = (insn & 0xff) * 4;
7919 if ((insn & (1 << 23)) == 0)
7920 offset = -offset;
7921 if (insn & (1 << 24)) {
b0109805 7922 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
7923 offset = 0;
7924 }
7925 if (insn & (1 << 20)) {
7926 /* ldrd */
b0109805
PB
7927 tmp = gen_ld32(addr, IS_USER(s));
7928 store_reg(s, rs, tmp);
7929 tcg_gen_addi_i32(addr, addr, 4);
7930 tmp = gen_ld32(addr, IS_USER(s));
7931 store_reg(s, rd, tmp);
9ee6e8bb
PB
7932 } else {
7933 /* strd */
b0109805
PB
7934 tmp = load_reg(s, rs);
7935 gen_st32(tmp, addr, IS_USER(s));
7936 tcg_gen_addi_i32(addr, addr, 4);
7937 tmp = load_reg(s, rd);
7938 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7939 }
7940 if (insn & (1 << 21)) {
7941 /* Base writeback. */
7942 if (rn == 15)
7943 goto illegal_op;
b0109805
PB
7944 tcg_gen_addi_i32(addr, addr, offset - 4);
7945 store_reg(s, rn, addr);
7946 } else {
7d1b0095 7947 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7948 }
7949 } else if ((insn & (1 << 23)) == 0) {
7950 /* Load/store exclusive word. */
3174f8e9 7951 addr = tcg_temp_local_new();
98a46317 7952 load_reg_var(s, addr, rn);
426f5abc 7953 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 7954 if (insn & (1 << 20)) {
426f5abc 7955 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 7956 } else {
426f5abc 7957 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 7958 }
3174f8e9 7959 tcg_temp_free(addr);
9ee6e8bb
PB
7960 } else if ((insn & (1 << 6)) == 0) {
7961 /* Table Branch. */
7962 if (rn == 15) {
7d1b0095 7963 addr = tcg_temp_new_i32();
b0109805 7964 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 7965 } else {
b0109805 7966 addr = load_reg(s, rn);
9ee6e8bb 7967 }
b26eefb6 7968 tmp = load_reg(s, rm);
b0109805 7969 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
7970 if (insn & (1 << 4)) {
7971 /* tbh */
b0109805 7972 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 7973 tcg_temp_free_i32(tmp);
b0109805 7974 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 7975 } else { /* tbb */
7d1b0095 7976 tcg_temp_free_i32(tmp);
b0109805 7977 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7978 }
7d1b0095 7979 tcg_temp_free_i32(addr);
b0109805
PB
7980 tcg_gen_shli_i32(tmp, tmp, 1);
7981 tcg_gen_addi_i32(tmp, tmp, s->pc);
7982 store_reg(s, 15, tmp);
9ee6e8bb
PB
7983 } else {
7984 /* Load/store exclusive byte/halfword/doubleword. */
426f5abc 7985 ARCH(7);
9ee6e8bb 7986 op = (insn >> 4) & 0x3;
426f5abc
PB
7987 if (op == 2) {
7988 goto illegal_op;
7989 }
3174f8e9 7990 addr = tcg_temp_local_new();
98a46317 7991 load_reg_var(s, addr, rn);
9ee6e8bb 7992 if (insn & (1 << 20)) {
426f5abc 7993 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 7994 } else {
426f5abc 7995 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 7996 }
3174f8e9 7997 tcg_temp_free(addr);
9ee6e8bb
PB
7998 }
7999 } else {
8000 /* Load/store multiple, RFE, SRS. */
8001 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
8002 /* Not available in user mode. */
b0109805 8003 if (IS_USER(s))
9ee6e8bb
PB
8004 goto illegal_op;
8005 if (insn & (1 << 20)) {
8006 /* rfe */
b0109805
PB
8007 addr = load_reg(s, rn);
8008 if ((insn & (1 << 24)) == 0)
8009 tcg_gen_addi_i32(addr, addr, -8);
8010 /* Load PC into tmp and CPSR into tmp2. */
8011 tmp = gen_ld32(addr, 0);
8012 tcg_gen_addi_i32(addr, addr, 4);
8013 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
8014 if (insn & (1 << 21)) {
8015 /* Base writeback. */
b0109805
PB
8016 if (insn & (1 << 24)) {
8017 tcg_gen_addi_i32(addr, addr, 4);
8018 } else {
8019 tcg_gen_addi_i32(addr, addr, -4);
8020 }
8021 store_reg(s, rn, addr);
8022 } else {
7d1b0095 8023 tcg_temp_free_i32(addr);
9ee6e8bb 8024 }
b0109805 8025 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
8026 } else {
8027 /* srs */
8028 op = (insn & 0x1f);
7d1b0095 8029 addr = tcg_temp_new_i32();
39ea3d4e
PM
8030 tmp = tcg_const_i32(op);
8031 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8032 tcg_temp_free_i32(tmp);
9ee6e8bb 8033 if ((insn & (1 << 24)) == 0) {
b0109805 8034 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 8035 }
b0109805
PB
8036 tmp = load_reg(s, 14);
8037 gen_st32(tmp, addr, 0);
8038 tcg_gen_addi_i32(addr, addr, 4);
7d1b0095 8039 tmp = tcg_temp_new_i32();
b0109805
PB
8040 gen_helper_cpsr_read(tmp);
8041 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
8042 if (insn & (1 << 21)) {
8043 if ((insn & (1 << 24)) == 0) {
b0109805 8044 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 8045 } else {
b0109805 8046 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8047 }
39ea3d4e
PM
8048 tmp = tcg_const_i32(op);
8049 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8050 tcg_temp_free_i32(tmp);
b0109805 8051 } else {
7d1b0095 8052 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8053 }
8054 }
8055 } else {
5856d44e
YO
8056 int i, loaded_base = 0;
8057 TCGv loaded_var;
9ee6e8bb 8058 /* Load/store multiple. */
b0109805 8059 addr = load_reg(s, rn);
9ee6e8bb
PB
8060 offset = 0;
8061 for (i = 0; i < 16; i++) {
8062 if (insn & (1 << i))
8063 offset += 4;
8064 }
8065 if (insn & (1 << 24)) {
b0109805 8066 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8067 }
8068
5856d44e 8069 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
8070 for (i = 0; i < 16; i++) {
8071 if ((insn & (1 << i)) == 0)
8072 continue;
8073 if (insn & (1 << 20)) {
8074 /* Load. */
b0109805 8075 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 8076 if (i == 15) {
b0109805 8077 gen_bx(s, tmp);
5856d44e
YO
8078 } else if (i == rn) {
8079 loaded_var = tmp;
8080 loaded_base = 1;
9ee6e8bb 8081 } else {
b0109805 8082 store_reg(s, i, tmp);
9ee6e8bb
PB
8083 }
8084 } else {
8085 /* Store. */
b0109805
PB
8086 tmp = load_reg(s, i);
8087 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 8088 }
b0109805 8089 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8090 }
5856d44e
YO
8091 if (loaded_base) {
8092 store_reg(s, rn, loaded_var);
8093 }
9ee6e8bb
PB
8094 if (insn & (1 << 21)) {
8095 /* Base register writeback. */
8096 if (insn & (1 << 24)) {
b0109805 8097 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8098 }
8099 /* Fault if writeback register is in register list. */
8100 if (insn & (1 << rn))
8101 goto illegal_op;
b0109805
PB
8102 store_reg(s, rn, addr);
8103 } else {
7d1b0095 8104 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8105 }
8106 }
8107 }
8108 break;
2af9ab77
JB
8109 case 5:
8110
9ee6e8bb 8111 op = (insn >> 21) & 0xf;
2af9ab77
JB
8112 if (op == 6) {
8113 /* Halfword pack. */
8114 tmp = load_reg(s, rn);
8115 tmp2 = load_reg(s, rm);
8116 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8117 if (insn & (1 << 5)) {
8118 /* pkhtb */
8119 if (shift == 0)
8120 shift = 31;
8121 tcg_gen_sari_i32(tmp2, tmp2, shift);
8122 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8123 tcg_gen_ext16u_i32(tmp2, tmp2);
8124 } else {
8125 /* pkhbt */
8126 if (shift)
8127 tcg_gen_shli_i32(tmp2, tmp2, shift);
8128 tcg_gen_ext16u_i32(tmp, tmp);
8129 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8130 }
8131 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8132 tcg_temp_free_i32(tmp2);
3174f8e9
FN
8133 store_reg(s, rd, tmp);
8134 } else {
2af9ab77
JB
8135 /* Data processing register constant shift. */
8136 if (rn == 15) {
7d1b0095 8137 tmp = tcg_temp_new_i32();
2af9ab77
JB
8138 tcg_gen_movi_i32(tmp, 0);
8139 } else {
8140 tmp = load_reg(s, rn);
8141 }
8142 tmp2 = load_reg(s, rm);
8143
8144 shiftop = (insn >> 4) & 3;
8145 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8146 conds = (insn & (1 << 20)) != 0;
8147 logic_cc = (conds && thumb2_logic_op(op));
8148 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8149 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
8150 goto illegal_op;
7d1b0095 8151 tcg_temp_free_i32(tmp2);
2af9ab77
JB
8152 if (rd != 15) {
8153 store_reg(s, rd, tmp);
8154 } else {
7d1b0095 8155 tcg_temp_free_i32(tmp);
2af9ab77 8156 }
3174f8e9 8157 }
9ee6e8bb
PB
8158 break;
8159 case 13: /* Misc data processing. */
8160 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
8161 if (op < 4 && (insn & 0xf000) != 0xf000)
8162 goto illegal_op;
8163 switch (op) {
8164 case 0: /* Register controlled shift. */
8984bd2e
PB
8165 tmp = load_reg(s, rn);
8166 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8167 if ((insn & 0x70) != 0)
8168 goto illegal_op;
8169 op = (insn >> 21) & 3;
8984bd2e
PB
8170 logic_cc = (insn & (1 << 20)) != 0;
8171 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
8172 if (logic_cc)
8173 gen_logic_CC(tmp);
21aeb343 8174 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8175 break;
8176 case 1: /* Sign/zero extend. */
5e3f878a 8177 tmp = load_reg(s, rm);
9ee6e8bb 8178 shift = (insn >> 4) & 3;
1301f322 8179 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
8180 rotate, a shift is sufficient. */
8181 if (shift != 0)
f669df27 8182 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8183 op = (insn >> 20) & 7;
8184 switch (op) {
5e3f878a
PB
8185 case 0: gen_sxth(tmp); break;
8186 case 1: gen_uxth(tmp); break;
8187 case 2: gen_sxtb16(tmp); break;
8188 case 3: gen_uxtb16(tmp); break;
8189 case 4: gen_sxtb(tmp); break;
8190 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
8191 default: goto illegal_op;
8192 }
8193 if (rn != 15) {
5e3f878a 8194 tmp2 = load_reg(s, rn);
9ee6e8bb 8195 if ((op >> 1) == 1) {
5e3f878a 8196 gen_add16(tmp, tmp2);
9ee6e8bb 8197 } else {
5e3f878a 8198 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8199 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8200 }
8201 }
5e3f878a 8202 store_reg(s, rd, tmp);
9ee6e8bb
PB
8203 break;
8204 case 2: /* SIMD add/subtract. */
8205 op = (insn >> 20) & 7;
8206 shift = (insn >> 4) & 7;
8207 if ((op & 3) == 3 || (shift & 3) == 3)
8208 goto illegal_op;
6ddbc6e4
PB
8209 tmp = load_reg(s, rn);
8210 tmp2 = load_reg(s, rm);
8211 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 8212 tcg_temp_free_i32(tmp2);
6ddbc6e4 8213 store_reg(s, rd, tmp);
9ee6e8bb
PB
8214 break;
8215 case 3: /* Other data processing. */
8216 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
8217 if (op < 4) {
8218 /* Saturating add/subtract. */
d9ba4830
PB
8219 tmp = load_reg(s, rn);
8220 tmp2 = load_reg(s, rm);
9ee6e8bb 8221 if (op & 1)
4809c612
JB
8222 gen_helper_double_saturate(tmp, tmp);
8223 if (op & 2)
d9ba4830 8224 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 8225 else
d9ba4830 8226 gen_helper_add_saturate(tmp, tmp, tmp2);
7d1b0095 8227 tcg_temp_free_i32(tmp2);
9ee6e8bb 8228 } else {
d9ba4830 8229 tmp = load_reg(s, rn);
9ee6e8bb
PB
8230 switch (op) {
8231 case 0x0a: /* rbit */
d9ba4830 8232 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8233 break;
8234 case 0x08: /* rev */
66896cb8 8235 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
8236 break;
8237 case 0x09: /* rev16 */
d9ba4830 8238 gen_rev16(tmp);
9ee6e8bb
PB
8239 break;
8240 case 0x0b: /* revsh */
d9ba4830 8241 gen_revsh(tmp);
9ee6e8bb
PB
8242 break;
8243 case 0x10: /* sel */
d9ba4830 8244 tmp2 = load_reg(s, rm);
7d1b0095 8245 tmp3 = tcg_temp_new_i32();
6ddbc6e4 8246 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
d9ba4830 8247 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8248 tcg_temp_free_i32(tmp3);
8249 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8250 break;
8251 case 0x18: /* clz */
d9ba4830 8252 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
8253 break;
8254 default:
8255 goto illegal_op;
8256 }
8257 }
d9ba4830 8258 store_reg(s, rd, tmp);
9ee6e8bb
PB
8259 break;
8260 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8261 op = (insn >> 4) & 0xf;
d9ba4830
PB
8262 tmp = load_reg(s, rn);
8263 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8264 switch ((insn >> 20) & 7) {
8265 case 0: /* 32 x 32 -> 32 */
d9ba4830 8266 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8267 tcg_temp_free_i32(tmp2);
9ee6e8bb 8268 if (rs != 15) {
d9ba4830 8269 tmp2 = load_reg(s, rs);
9ee6e8bb 8270 if (op)
d9ba4830 8271 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 8272 else
d9ba4830 8273 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8274 tcg_temp_free_i32(tmp2);
9ee6e8bb 8275 }
9ee6e8bb
PB
8276 break;
8277 case 1: /* 16 x 16 -> 32 */
d9ba4830 8278 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8279 tcg_temp_free_i32(tmp2);
9ee6e8bb 8280 if (rs != 15) {
d9ba4830
PB
8281 tmp2 = load_reg(s, rs);
8282 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 8283 tcg_temp_free_i32(tmp2);
9ee6e8bb 8284 }
9ee6e8bb
PB
8285 break;
8286 case 2: /* Dual multiply add. */
8287 case 4: /* Dual multiply subtract. */
8288 if (op)
d9ba4830
PB
8289 gen_swap_half(tmp2);
8290 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8291 if (insn & (1 << 22)) {
e1d177b9 8292 /* This subtraction cannot overflow. */
d9ba4830 8293 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8294 } else {
e1d177b9
PM
8295 /* This addition cannot overflow 32 bits;
8296 * however it may overflow considered as a signed
8297 * operation, in which case we must set the Q flag.
8298 */
8299 gen_helper_add_setq(tmp, tmp, tmp2);
9ee6e8bb 8300 }
7d1b0095 8301 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8302 if (rs != 15)
8303 {
d9ba4830
PB
8304 tmp2 = load_reg(s, rs);
8305 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 8306 tcg_temp_free_i32(tmp2);
9ee6e8bb 8307 }
9ee6e8bb
PB
8308 break;
8309 case 3: /* 32 * 16 -> 32msb */
8310 if (op)
d9ba4830 8311 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8312 else
d9ba4830 8313 gen_sxth(tmp2);
a7812ae4
PB
8314 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8315 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8316 tmp = tcg_temp_new_i32();
a7812ae4 8317 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 8318 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8319 if (rs != 15)
8320 {
d9ba4830
PB
8321 tmp2 = load_reg(s, rs);
8322 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 8323 tcg_temp_free_i32(tmp2);
9ee6e8bb 8324 }
9ee6e8bb 8325 break;
838fa72d
AJ
8326 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8327 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8328 if (rs != 15) {
838fa72d
AJ
8329 tmp = load_reg(s, rs);
8330 if (insn & (1 << 20)) {
8331 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 8332 } else {
838fa72d 8333 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 8334 }
2c0262af 8335 }
838fa72d
AJ
8336 if (insn & (1 << 4)) {
8337 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8338 }
8339 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 8340 tmp = tcg_temp_new_i32();
838fa72d
AJ
8341 tcg_gen_trunc_i64_i32(tmp, tmp64);
8342 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8343 break;
8344 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 8345 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 8346 tcg_temp_free_i32(tmp2);
9ee6e8bb 8347 if (rs != 15) {
d9ba4830
PB
8348 tmp2 = load_reg(s, rs);
8349 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8350 tcg_temp_free_i32(tmp2);
5fd46862 8351 }
9ee6e8bb 8352 break;
2c0262af 8353 }
d9ba4830 8354 store_reg(s, rd, tmp);
2c0262af 8355 break;
9ee6e8bb
PB
8356 case 6: case 7: /* 64-bit multiply, Divide. */
8357 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
8358 tmp = load_reg(s, rn);
8359 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8360 if ((op & 0x50) == 0x10) {
8361 /* sdiv, udiv */
8362 if (!arm_feature(env, ARM_FEATURE_DIV))
8363 goto illegal_op;
8364 if (op & 0x20)
5e3f878a 8365 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 8366 else
5e3f878a 8367 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 8368 tcg_temp_free_i32(tmp2);
5e3f878a 8369 store_reg(s, rd, tmp);
9ee6e8bb
PB
8370 } else if ((op & 0xe) == 0xc) {
8371 /* Dual multiply accumulate long. */
8372 if (op & 1)
5e3f878a
PB
8373 gen_swap_half(tmp2);
8374 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8375 if (op & 0x10) {
5e3f878a 8376 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 8377 } else {
5e3f878a 8378 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 8379 }
7d1b0095 8380 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8381 /* BUGFIX */
8382 tmp64 = tcg_temp_new_i64();
8383 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8384 tcg_temp_free_i32(tmp);
a7812ae4
PB
8385 gen_addq(s, tmp64, rs, rd);
8386 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8387 tcg_temp_free_i64(tmp64);
2c0262af 8388 } else {
9ee6e8bb
PB
8389 if (op & 0x20) {
8390 /* Unsigned 64-bit multiply */
a7812ae4 8391 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 8392 } else {
9ee6e8bb
PB
8393 if (op & 8) {
8394 /* smlalxy */
5e3f878a 8395 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8396 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8397 tmp64 = tcg_temp_new_i64();
8398 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8399 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8400 } else {
8401 /* Signed 64-bit multiply */
a7812ae4 8402 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8403 }
b5ff1b31 8404 }
9ee6e8bb
PB
8405 if (op & 4) {
8406 /* umaal */
a7812ae4
PB
8407 gen_addq_lo(s, tmp64, rs);
8408 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
8409 } else if (op & 0x40) {
8410 /* 64-bit accumulate. */
a7812ae4 8411 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 8412 }
a7812ae4 8413 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8414 tcg_temp_free_i64(tmp64);
5fd46862 8415 }
2c0262af 8416 break;
9ee6e8bb
PB
8417 }
8418 break;
8419 case 6: case 7: case 14: case 15:
8420 /* Coprocessor. */
8421 if (((insn >> 24) & 3) == 3) {
8422 /* Translate into the equivalent ARM encoding. */
f06053e3 8423 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9ee6e8bb
PB
8424 if (disas_neon_data_insn(env, s, insn))
8425 goto illegal_op;
8426 } else {
8427 if (insn & (1 << 28))
8428 goto illegal_op;
8429 if (disas_coproc_insn (env, s, insn))
8430 goto illegal_op;
8431 }
8432 break;
8433 case 8: case 9: case 10: case 11:
8434 if (insn & (1 << 15)) {
8435 /* Branches, misc control. */
8436 if (insn & 0x5000) {
8437 /* Unconditional branch. */
8438 /* signextend(hw1[10:0]) -> offset[:12]. */
8439 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8440 /* hw1[10:0] -> offset[11:1]. */
8441 offset |= (insn & 0x7ff) << 1;
8442 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8443 offset[24:22] already have the same value because of the
8444 sign extension above. */
8445 offset ^= ((~insn) & (1 << 13)) << 10;
8446 offset ^= ((~insn) & (1 << 11)) << 11;
8447
9ee6e8bb
PB
8448 if (insn & (1 << 14)) {
8449 /* Branch and link. */
3174f8e9 8450 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 8451 }
3b46e624 8452
b0109805 8453 offset += s->pc;
9ee6e8bb
PB
8454 if (insn & (1 << 12)) {
8455 /* b/bl */
b0109805 8456 gen_jmp(s, offset);
9ee6e8bb
PB
8457 } else {
8458 /* blx */
b0109805 8459 offset &= ~(uint32_t)2;
be5e7a76 8460 /* thumb2 bx, no need to check */
b0109805 8461 gen_bx_im(s, offset);
2c0262af 8462 }
9ee6e8bb
PB
8463 } else if (((insn >> 23) & 7) == 7) {
8464 /* Misc control */
8465 if (insn & (1 << 13))
8466 goto illegal_op;
8467
8468 if (insn & (1 << 26)) {
8469 /* Secure monitor call (v6Z) */
8470 goto illegal_op; /* not implemented. */
2c0262af 8471 } else {
9ee6e8bb
PB
8472 op = (insn >> 20) & 7;
8473 switch (op) {
8474 case 0: /* msr cpsr. */
8475 if (IS_M(env)) {
8984bd2e
PB
8476 tmp = load_reg(s, rn);
8477 addr = tcg_const_i32(insn & 0xff);
8478 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8479 tcg_temp_free_i32(addr);
7d1b0095 8480 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8481 gen_lookup_tb(s);
8482 break;
8483 }
8484 /* fall through */
8485 case 1: /* msr spsr. */
8486 if (IS_M(env))
8487 goto illegal_op;
2fbac54b
FN
8488 tmp = load_reg(s, rn);
8489 if (gen_set_psr(s,
9ee6e8bb 8490 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 8491 op == 1, tmp))
9ee6e8bb
PB
8492 goto illegal_op;
8493 break;
8494 case 2: /* cps, nop-hint. */
8495 if (((insn >> 8) & 7) == 0) {
8496 gen_nop_hint(s, insn & 0xff);
8497 }
8498 /* Implemented as NOP in user mode. */
8499 if (IS_USER(s))
8500 break;
8501 offset = 0;
8502 imm = 0;
8503 if (insn & (1 << 10)) {
8504 if (insn & (1 << 7))
8505 offset |= CPSR_A;
8506 if (insn & (1 << 6))
8507 offset |= CPSR_I;
8508 if (insn & (1 << 5))
8509 offset |= CPSR_F;
8510 if (insn & (1 << 9))
8511 imm = CPSR_A | CPSR_I | CPSR_F;
8512 }
8513 if (insn & (1 << 8)) {
8514 offset |= 0x1f;
8515 imm |= (insn & 0x1f);
8516 }
8517 if (offset) {
2fbac54b 8518 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
8519 }
8520 break;
8521 case 3: /* Special control operations. */
426f5abc 8522 ARCH(7);
9ee6e8bb
PB
8523 op = (insn >> 4) & 0xf;
8524 switch (op) {
8525 case 2: /* clrex */
426f5abc 8526 gen_clrex(s);
9ee6e8bb
PB
8527 break;
8528 case 4: /* dsb */
8529 case 5: /* dmb */
8530 case 6: /* isb */
8531 /* These execute as NOPs. */
9ee6e8bb
PB
8532 break;
8533 default:
8534 goto illegal_op;
8535 }
8536 break;
8537 case 4: /* bxj */
8538 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8539 tmp = load_reg(s, rn);
8540 gen_bx(s, tmp);
9ee6e8bb
PB
8541 break;
8542 case 5: /* Exception return. */
b8b45b68
RV
8543 if (IS_USER(s)) {
8544 goto illegal_op;
8545 }
8546 if (rn != 14 || rd != 15) {
8547 goto illegal_op;
8548 }
8549 tmp = load_reg(s, rn);
8550 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8551 gen_exception_return(s, tmp);
8552 break;
9ee6e8bb 8553 case 6: /* mrs cpsr. */
7d1b0095 8554 tmp = tcg_temp_new_i32();
9ee6e8bb 8555 if (IS_M(env)) {
8984bd2e
PB
8556 addr = tcg_const_i32(insn & 0xff);
8557 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 8558 tcg_temp_free_i32(addr);
9ee6e8bb 8559 } else {
8984bd2e 8560 gen_helper_cpsr_read(tmp);
9ee6e8bb 8561 }
8984bd2e 8562 store_reg(s, rd, tmp);
9ee6e8bb
PB
8563 break;
8564 case 7: /* mrs spsr. */
8565 /* Not accessible in user mode. */
8566 if (IS_USER(s) || IS_M(env))
8567 goto illegal_op;
d9ba4830
PB
8568 tmp = load_cpu_field(spsr);
8569 store_reg(s, rd, tmp);
9ee6e8bb 8570 break;
2c0262af
FB
8571 }
8572 }
9ee6e8bb
PB
8573 } else {
8574 /* Conditional branch. */
8575 op = (insn >> 22) & 0xf;
8576 /* Generate a conditional jump to next instruction. */
8577 s->condlabel = gen_new_label();
d9ba4830 8578 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
8579 s->condjmp = 1;
8580
8581 /* offset[11:1] = insn[10:0] */
8582 offset = (insn & 0x7ff) << 1;
8583 /* offset[17:12] = insn[21:16]. */
8584 offset |= (insn & 0x003f0000) >> 4;
8585 /* offset[31:20] = insn[26]. */
8586 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8587 /* offset[18] = insn[13]. */
8588 offset |= (insn & (1 << 13)) << 5;
8589 /* offset[19] = insn[11]. */
8590 offset |= (insn & (1 << 11)) << 8;
8591
8592 /* jump to the offset */
b0109805 8593 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
8594 }
8595 } else {
8596 /* Data processing immediate. */
8597 if (insn & (1 << 25)) {
8598 if (insn & (1 << 24)) {
8599 if (insn & (1 << 20))
8600 goto illegal_op;
8601 /* Bitfield/Saturate. */
8602 op = (insn >> 21) & 7;
8603 imm = insn & 0x1f;
8604 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 8605 if (rn == 15) {
7d1b0095 8606 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
8607 tcg_gen_movi_i32(tmp, 0);
8608 } else {
8609 tmp = load_reg(s, rn);
8610 }
9ee6e8bb
PB
8611 switch (op) {
8612 case 2: /* Signed bitfield extract. */
8613 imm++;
8614 if (shift + imm > 32)
8615 goto illegal_op;
8616 if (imm < 32)
6ddbc6e4 8617 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
8618 break;
8619 case 6: /* Unsigned bitfield extract. */
8620 imm++;
8621 if (shift + imm > 32)
8622 goto illegal_op;
8623 if (imm < 32)
6ddbc6e4 8624 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
8625 break;
8626 case 3: /* Bitfield insert/clear. */
8627 if (imm < shift)
8628 goto illegal_op;
8629 imm = imm + 1 - shift;
8630 if (imm != 32) {
6ddbc6e4 8631 tmp2 = load_reg(s, rd);
8f8e3aa4 8632 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
7d1b0095 8633 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8634 }
8635 break;
8636 case 7:
8637 goto illegal_op;
8638 default: /* Saturate. */
9ee6e8bb
PB
8639 if (shift) {
8640 if (op & 1)
6ddbc6e4 8641 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8642 else
6ddbc6e4 8643 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 8644 }
6ddbc6e4 8645 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
8646 if (op & 4) {
8647 /* Unsigned. */
9ee6e8bb 8648 if ((op & 1) && shift == 0)
6ddbc6e4 8649 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 8650 else
6ddbc6e4 8651 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 8652 } else {
9ee6e8bb 8653 /* Signed. */
9ee6e8bb 8654 if ((op & 1) && shift == 0)
6ddbc6e4 8655 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 8656 else
6ddbc6e4 8657 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 8658 }
b75263d6 8659 tcg_temp_free_i32(tmp2);
9ee6e8bb 8660 break;
2c0262af 8661 }
6ddbc6e4 8662 store_reg(s, rd, tmp);
9ee6e8bb
PB
8663 } else {
8664 imm = ((insn & 0x04000000) >> 15)
8665 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8666 if (insn & (1 << 22)) {
8667 /* 16-bit immediate. */
8668 imm |= (insn >> 4) & 0xf000;
8669 if (insn & (1 << 23)) {
8670 /* movt */
5e3f878a 8671 tmp = load_reg(s, rd);
86831435 8672 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8673 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 8674 } else {
9ee6e8bb 8675 /* movw */
7d1b0095 8676 tmp = tcg_temp_new_i32();
5e3f878a 8677 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
8678 }
8679 } else {
9ee6e8bb
PB
8680 /* Add/sub 12-bit immediate. */
8681 if (rn == 15) {
b0109805 8682 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 8683 if (insn & (1 << 23))
b0109805 8684 offset -= imm;
9ee6e8bb 8685 else
b0109805 8686 offset += imm;
7d1b0095 8687 tmp = tcg_temp_new_i32();
5e3f878a 8688 tcg_gen_movi_i32(tmp, offset);
2c0262af 8689 } else {
5e3f878a 8690 tmp = load_reg(s, rn);
9ee6e8bb 8691 if (insn & (1 << 23))
5e3f878a 8692 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 8693 else
5e3f878a 8694 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 8695 }
9ee6e8bb 8696 }
5e3f878a 8697 store_reg(s, rd, tmp);
191abaa2 8698 }
9ee6e8bb
PB
8699 } else {
8700 int shifter_out = 0;
8701 /* modified 12-bit immediate. */
8702 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8703 imm = (insn & 0xff);
8704 switch (shift) {
8705 case 0: /* XY */
8706 /* Nothing to do. */
8707 break;
8708 case 1: /* 00XY00XY */
8709 imm |= imm << 16;
8710 break;
8711 case 2: /* XY00XY00 */
8712 imm |= imm << 16;
8713 imm <<= 8;
8714 break;
8715 case 3: /* XYXYXYXY */
8716 imm |= imm << 16;
8717 imm |= imm << 8;
8718 break;
8719 default: /* Rotated constant. */
8720 shift = (shift << 1) | (imm >> 7);
8721 imm |= 0x80;
8722 imm = imm << (32 - shift);
8723 shifter_out = 1;
8724 break;
b5ff1b31 8725 }
7d1b0095 8726 tmp2 = tcg_temp_new_i32();
3174f8e9 8727 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 8728 rn = (insn >> 16) & 0xf;
3174f8e9 8729 if (rn == 15) {
7d1b0095 8730 tmp = tcg_temp_new_i32();
3174f8e9
FN
8731 tcg_gen_movi_i32(tmp, 0);
8732 } else {
8733 tmp = load_reg(s, rn);
8734 }
9ee6e8bb
PB
8735 op = (insn >> 21) & 0xf;
8736 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 8737 shifter_out, tmp, tmp2))
9ee6e8bb 8738 goto illegal_op;
7d1b0095 8739 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8740 rd = (insn >> 8) & 0xf;
8741 if (rd != 15) {
3174f8e9
FN
8742 store_reg(s, rd, tmp);
8743 } else {
7d1b0095 8744 tcg_temp_free_i32(tmp);
2c0262af 8745 }
2c0262af 8746 }
9ee6e8bb
PB
8747 }
8748 break;
8749 case 12: /* Load/store single data item. */
8750 {
8751 int postinc = 0;
8752 int writeback = 0;
b0109805 8753 int user;
9ee6e8bb
PB
8754 if ((insn & 0x01100000) == 0x01000000) {
8755 if (disas_neon_ls_insn(env, s, insn))
c1713132 8756 goto illegal_op;
9ee6e8bb
PB
8757 break;
8758 }
a2fdc890
PM
8759 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8760 if (rs == 15) {
8761 if (!(insn & (1 << 20))) {
8762 goto illegal_op;
8763 }
8764 if (op != 2) {
8765 /* Byte or halfword load space with dest == r15 : memory hints.
8766 * Catch them early so we don't emit pointless addressing code.
8767 * This space is a mix of:
8768 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8769 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8770 * cores)
8771 * unallocated hints, which must be treated as NOPs
8772 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8773 * which is easiest for the decoding logic
8774 * Some space which must UNDEF
8775 */
8776 int op1 = (insn >> 23) & 3;
8777 int op2 = (insn >> 6) & 0x3f;
8778 if (op & 2) {
8779 goto illegal_op;
8780 }
8781 if (rn == 15) {
8782 /* UNPREDICTABLE or unallocated hint */
8783 return 0;
8784 }
8785 if (op1 & 1) {
8786 return 0; /* PLD* or unallocated hint */
8787 }
8788 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
8789 return 0; /* PLD* or unallocated hint */
8790 }
8791 /* UNDEF space, or an UNPREDICTABLE */
8792 return 1;
8793 }
8794 }
b0109805 8795 user = IS_USER(s);
9ee6e8bb 8796 if (rn == 15) {
7d1b0095 8797 addr = tcg_temp_new_i32();
9ee6e8bb
PB
8798 /* PC relative. */
8799 /* s->pc has already been incremented by 4. */
8800 imm = s->pc & 0xfffffffc;
8801 if (insn & (1 << 23))
8802 imm += insn & 0xfff;
8803 else
8804 imm -= insn & 0xfff;
b0109805 8805 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 8806 } else {
b0109805 8807 addr = load_reg(s, rn);
9ee6e8bb
PB
8808 if (insn & (1 << 23)) {
8809 /* Positive offset. */
8810 imm = insn & 0xfff;
b0109805 8811 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 8812 } else {
9ee6e8bb 8813 imm = insn & 0xff;
2a0308c5
PM
8814 switch ((insn >> 8) & 0xf) {
8815 case 0x0: /* Shifted Register. */
9ee6e8bb 8816 shift = (insn >> 4) & 0xf;
2a0308c5
PM
8817 if (shift > 3) {
8818 tcg_temp_free_i32(addr);
18c9b560 8819 goto illegal_op;
2a0308c5 8820 }
b26eefb6 8821 tmp = load_reg(s, rm);
9ee6e8bb 8822 if (shift)
b26eefb6 8823 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 8824 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8825 tcg_temp_free_i32(tmp);
9ee6e8bb 8826 break;
2a0308c5 8827 case 0xc: /* Negative offset. */
b0109805 8828 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 8829 break;
2a0308c5 8830 case 0xe: /* User privilege. */
b0109805
PB
8831 tcg_gen_addi_i32(addr, addr, imm);
8832 user = 1;
9ee6e8bb 8833 break;
2a0308c5 8834 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
8835 imm = -imm;
8836 /* Fall through. */
2a0308c5 8837 case 0xb: /* Post-increment. */
9ee6e8bb
PB
8838 postinc = 1;
8839 writeback = 1;
8840 break;
2a0308c5 8841 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
8842 imm = -imm;
8843 /* Fall through. */
2a0308c5 8844 case 0xf: /* Pre-increment. */
b0109805 8845 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8846 writeback = 1;
8847 break;
8848 default:
2a0308c5 8849 tcg_temp_free_i32(addr);
b7bcbe95 8850 goto illegal_op;
9ee6e8bb
PB
8851 }
8852 }
8853 }
9ee6e8bb
PB
8854 if (insn & (1 << 20)) {
8855 /* Load. */
a2fdc890
PM
8856 switch (op) {
8857 case 0: tmp = gen_ld8u(addr, user); break;
8858 case 4: tmp = gen_ld8s(addr, user); break;
8859 case 1: tmp = gen_ld16u(addr, user); break;
8860 case 5: tmp = gen_ld16s(addr, user); break;
8861 case 2: tmp = gen_ld32(addr, user); break;
2a0308c5
PM
8862 default:
8863 tcg_temp_free_i32(addr);
8864 goto illegal_op;
a2fdc890
PM
8865 }
8866 if (rs == 15) {
8867 gen_bx(s, tmp);
9ee6e8bb 8868 } else {
a2fdc890 8869 store_reg(s, rs, tmp);
9ee6e8bb
PB
8870 }
8871 } else {
8872 /* Store. */
b0109805 8873 tmp = load_reg(s, rs);
9ee6e8bb 8874 switch (op) {
b0109805
PB
8875 case 0: gen_st8(tmp, addr, user); break;
8876 case 1: gen_st16(tmp, addr, user); break;
8877 case 2: gen_st32(tmp, addr, user); break;
2a0308c5
PM
8878 default:
8879 tcg_temp_free_i32(addr);
8880 goto illegal_op;
b7bcbe95 8881 }
2c0262af 8882 }
9ee6e8bb 8883 if (postinc)
b0109805
PB
8884 tcg_gen_addi_i32(addr, addr, imm);
8885 if (writeback) {
8886 store_reg(s, rn, addr);
8887 } else {
7d1b0095 8888 tcg_temp_free_i32(addr);
b0109805 8889 }
9ee6e8bb
PB
8890 }
8891 break;
8892 default:
8893 goto illegal_op;
2c0262af 8894 }
9ee6e8bb
PB
8895 return 0;
8896illegal_op:
8897 return 1;
2c0262af
FB
8898}
8899
9ee6e8bb 8900static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
8901{
8902 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8903 int32_t offset;
8904 int i;
b26eefb6 8905 TCGv tmp;
d9ba4830 8906 TCGv tmp2;
b0109805 8907 TCGv addr;
99c475ab 8908
9ee6e8bb
PB
8909 if (s->condexec_mask) {
8910 cond = s->condexec_cond;
bedd2912
JB
8911 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
8912 s->condlabel = gen_new_label();
8913 gen_test_cc(cond ^ 1, s->condlabel);
8914 s->condjmp = 1;
8915 }
9ee6e8bb
PB
8916 }
8917
b5ff1b31 8918 insn = lduw_code(s->pc);
99c475ab 8919 s->pc += 2;
b5ff1b31 8920
99c475ab
FB
8921 switch (insn >> 12) {
8922 case 0: case 1:
396e467c 8923
99c475ab
FB
8924 rd = insn & 7;
8925 op = (insn >> 11) & 3;
8926 if (op == 3) {
8927 /* add/subtract */
8928 rn = (insn >> 3) & 7;
396e467c 8929 tmp = load_reg(s, rn);
99c475ab
FB
8930 if (insn & (1 << 10)) {
8931 /* immediate */
7d1b0095 8932 tmp2 = tcg_temp_new_i32();
396e467c 8933 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
8934 } else {
8935 /* reg */
8936 rm = (insn >> 6) & 7;
396e467c 8937 tmp2 = load_reg(s, rm);
99c475ab 8938 }
9ee6e8bb
PB
8939 if (insn & (1 << 9)) {
8940 if (s->condexec_mask)
396e467c 8941 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8942 else
396e467c 8943 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb
PB
8944 } else {
8945 if (s->condexec_mask)
396e467c 8946 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 8947 else
396e467c 8948 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 8949 }
7d1b0095 8950 tcg_temp_free_i32(tmp2);
396e467c 8951 store_reg(s, rd, tmp);
99c475ab
FB
8952 } else {
8953 /* shift immediate */
8954 rm = (insn >> 3) & 7;
8955 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
8956 tmp = load_reg(s, rm);
8957 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8958 if (!s->condexec_mask)
8959 gen_logic_CC(tmp);
8960 store_reg(s, rd, tmp);
99c475ab
FB
8961 }
8962 break;
8963 case 2: case 3:
8964 /* arithmetic large immediate */
8965 op = (insn >> 11) & 3;
8966 rd = (insn >> 8) & 0x7;
396e467c 8967 if (op == 0) { /* mov */
7d1b0095 8968 tmp = tcg_temp_new_i32();
396e467c 8969 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 8970 if (!s->condexec_mask)
396e467c
FN
8971 gen_logic_CC(tmp);
8972 store_reg(s, rd, tmp);
8973 } else {
8974 tmp = load_reg(s, rd);
7d1b0095 8975 tmp2 = tcg_temp_new_i32();
396e467c
FN
8976 tcg_gen_movi_i32(tmp2, insn & 0xff);
8977 switch (op) {
8978 case 1: /* cmp */
8979 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095
PM
8980 tcg_temp_free_i32(tmp);
8981 tcg_temp_free_i32(tmp2);
396e467c
FN
8982 break;
8983 case 2: /* add */
8984 if (s->condexec_mask)
8985 tcg_gen_add_i32(tmp, tmp, tmp2);
8986 else
8987 gen_helper_add_cc(tmp, tmp, tmp2);
7d1b0095 8988 tcg_temp_free_i32(tmp2);
396e467c
FN
8989 store_reg(s, rd, tmp);
8990 break;
8991 case 3: /* sub */
8992 if (s->condexec_mask)
8993 tcg_gen_sub_i32(tmp, tmp, tmp2);
8994 else
8995 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095 8996 tcg_temp_free_i32(tmp2);
396e467c
FN
8997 store_reg(s, rd, tmp);
8998 break;
8999 }
99c475ab 9000 }
99c475ab
FB
9001 break;
9002 case 4:
9003 if (insn & (1 << 11)) {
9004 rd = (insn >> 8) & 7;
5899f386
FB
9005 /* load pc-relative. Bit 1 of PC is ignored. */
9006 val = s->pc + 2 + ((insn & 0xff) * 4);
9007 val &= ~(uint32_t)2;
7d1b0095 9008 addr = tcg_temp_new_i32();
b0109805
PB
9009 tcg_gen_movi_i32(addr, val);
9010 tmp = gen_ld32(addr, IS_USER(s));
7d1b0095 9011 tcg_temp_free_i32(addr);
b0109805 9012 store_reg(s, rd, tmp);
99c475ab
FB
9013 break;
9014 }
9015 if (insn & (1 << 10)) {
9016 /* data processing extended or blx */
9017 rd = (insn & 7) | ((insn >> 4) & 8);
9018 rm = (insn >> 3) & 0xf;
9019 op = (insn >> 8) & 3;
9020 switch (op) {
9021 case 0: /* add */
396e467c
FN
9022 tmp = load_reg(s, rd);
9023 tmp2 = load_reg(s, rm);
9024 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9025 tcg_temp_free_i32(tmp2);
396e467c 9026 store_reg(s, rd, tmp);
99c475ab
FB
9027 break;
9028 case 1: /* cmp */
396e467c
FN
9029 tmp = load_reg(s, rd);
9030 tmp2 = load_reg(s, rm);
9031 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095
PM
9032 tcg_temp_free_i32(tmp2);
9033 tcg_temp_free_i32(tmp);
99c475ab
FB
9034 break;
9035 case 2: /* mov/cpy */
396e467c
FN
9036 tmp = load_reg(s, rm);
9037 store_reg(s, rd, tmp);
99c475ab
FB
9038 break;
9039 case 3:/* branch [and link] exchange thumb register */
b0109805 9040 tmp = load_reg(s, rm);
99c475ab 9041 if (insn & (1 << 7)) {
be5e7a76 9042 ARCH(5);
99c475ab 9043 val = (uint32_t)s->pc | 1;
7d1b0095 9044 tmp2 = tcg_temp_new_i32();
b0109805
PB
9045 tcg_gen_movi_i32(tmp2, val);
9046 store_reg(s, 14, tmp2);
99c475ab 9047 }
be5e7a76 9048 /* already thumb, no need to check */
d9ba4830 9049 gen_bx(s, tmp);
99c475ab
FB
9050 break;
9051 }
9052 break;
9053 }
9054
9055 /* data processing register */
9056 rd = insn & 7;
9057 rm = (insn >> 3) & 7;
9058 op = (insn >> 6) & 0xf;
9059 if (op == 2 || op == 3 || op == 4 || op == 7) {
9060 /* the shift/rotate ops want the operands backwards */
9061 val = rm;
9062 rm = rd;
9063 rd = val;
9064 val = 1;
9065 } else {
9066 val = 0;
9067 }
9068
396e467c 9069 if (op == 9) { /* neg */
7d1b0095 9070 tmp = tcg_temp_new_i32();
396e467c
FN
9071 tcg_gen_movi_i32(tmp, 0);
9072 } else if (op != 0xf) { /* mvn doesn't read its first operand */
9073 tmp = load_reg(s, rd);
9074 } else {
9075 TCGV_UNUSED(tmp);
9076 }
99c475ab 9077
396e467c 9078 tmp2 = load_reg(s, rm);
5899f386 9079 switch (op) {
99c475ab 9080 case 0x0: /* and */
396e467c 9081 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 9082 if (!s->condexec_mask)
396e467c 9083 gen_logic_CC(tmp);
99c475ab
FB
9084 break;
9085 case 0x1: /* eor */
396e467c 9086 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 9087 if (!s->condexec_mask)
396e467c 9088 gen_logic_CC(tmp);
99c475ab
FB
9089 break;
9090 case 0x2: /* lsl */
9ee6e8bb 9091 if (s->condexec_mask) {
396e467c 9092 gen_helper_shl(tmp2, tmp2, tmp);
9ee6e8bb 9093 } else {
396e467c
FN
9094 gen_helper_shl_cc(tmp2, tmp2, tmp);
9095 gen_logic_CC(tmp2);
9ee6e8bb 9096 }
99c475ab
FB
9097 break;
9098 case 0x3: /* lsr */
9ee6e8bb 9099 if (s->condexec_mask) {
396e467c 9100 gen_helper_shr(tmp2, tmp2, tmp);
9ee6e8bb 9101 } else {
396e467c
FN
9102 gen_helper_shr_cc(tmp2, tmp2, tmp);
9103 gen_logic_CC(tmp2);
9ee6e8bb 9104 }
99c475ab
FB
9105 break;
9106 case 0x4: /* asr */
9ee6e8bb 9107 if (s->condexec_mask) {
396e467c 9108 gen_helper_sar(tmp2, tmp2, tmp);
9ee6e8bb 9109 } else {
396e467c
FN
9110 gen_helper_sar_cc(tmp2, tmp2, tmp);
9111 gen_logic_CC(tmp2);
9ee6e8bb 9112 }
99c475ab
FB
9113 break;
9114 case 0x5: /* adc */
9ee6e8bb 9115 if (s->condexec_mask)
396e467c 9116 gen_adc(tmp, tmp2);
9ee6e8bb 9117 else
396e467c 9118 gen_helper_adc_cc(tmp, tmp, tmp2);
99c475ab
FB
9119 break;
9120 case 0x6: /* sbc */
9ee6e8bb 9121 if (s->condexec_mask)
396e467c 9122 gen_sub_carry(tmp, tmp, tmp2);
9ee6e8bb 9123 else
396e467c 9124 gen_helper_sbc_cc(tmp, tmp, tmp2);
99c475ab
FB
9125 break;
9126 case 0x7: /* ror */
9ee6e8bb 9127 if (s->condexec_mask) {
f669df27
AJ
9128 tcg_gen_andi_i32(tmp, tmp, 0x1f);
9129 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 9130 } else {
396e467c
FN
9131 gen_helper_ror_cc(tmp2, tmp2, tmp);
9132 gen_logic_CC(tmp2);
9ee6e8bb 9133 }
99c475ab
FB
9134 break;
9135 case 0x8: /* tst */
396e467c
FN
9136 tcg_gen_and_i32(tmp, tmp, tmp2);
9137 gen_logic_CC(tmp);
99c475ab 9138 rd = 16;
5899f386 9139 break;
99c475ab 9140 case 0x9: /* neg */
9ee6e8bb 9141 if (s->condexec_mask)
396e467c 9142 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 9143 else
396e467c 9144 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
9145 break;
9146 case 0xa: /* cmp */
396e467c 9147 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
9148 rd = 16;
9149 break;
9150 case 0xb: /* cmn */
396e467c 9151 gen_helper_add_cc(tmp, tmp, tmp2);
99c475ab
FB
9152 rd = 16;
9153 break;
9154 case 0xc: /* orr */
396e467c 9155 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 9156 if (!s->condexec_mask)
396e467c 9157 gen_logic_CC(tmp);
99c475ab
FB
9158 break;
9159 case 0xd: /* mul */
7b2919a0 9160 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 9161 if (!s->condexec_mask)
396e467c 9162 gen_logic_CC(tmp);
99c475ab
FB
9163 break;
9164 case 0xe: /* bic */
f669df27 9165 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 9166 if (!s->condexec_mask)
396e467c 9167 gen_logic_CC(tmp);
99c475ab
FB
9168 break;
9169 case 0xf: /* mvn */
396e467c 9170 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 9171 if (!s->condexec_mask)
396e467c 9172 gen_logic_CC(tmp2);
99c475ab 9173 val = 1;
5899f386 9174 rm = rd;
99c475ab
FB
9175 break;
9176 }
9177 if (rd != 16) {
396e467c
FN
9178 if (val) {
9179 store_reg(s, rm, tmp2);
9180 if (op != 0xf)
7d1b0095 9181 tcg_temp_free_i32(tmp);
396e467c
FN
9182 } else {
9183 store_reg(s, rd, tmp);
7d1b0095 9184 tcg_temp_free_i32(tmp2);
396e467c
FN
9185 }
9186 } else {
7d1b0095
PM
9187 tcg_temp_free_i32(tmp);
9188 tcg_temp_free_i32(tmp2);
99c475ab
FB
9189 }
9190 break;
9191
9192 case 5:
9193 /* load/store register offset. */
9194 rd = insn & 7;
9195 rn = (insn >> 3) & 7;
9196 rm = (insn >> 6) & 7;
9197 op = (insn >> 9) & 7;
b0109805 9198 addr = load_reg(s, rn);
b26eefb6 9199 tmp = load_reg(s, rm);
b0109805 9200 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9201 tcg_temp_free_i32(tmp);
99c475ab
FB
9202
9203 if (op < 3) /* store */
b0109805 9204 tmp = load_reg(s, rd);
99c475ab
FB
9205
9206 switch (op) {
9207 case 0: /* str */
b0109805 9208 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
9209 break;
9210 case 1: /* strh */
b0109805 9211 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
9212 break;
9213 case 2: /* strb */
b0109805 9214 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
9215 break;
9216 case 3: /* ldrsb */
b0109805 9217 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
9218 break;
9219 case 4: /* ldr */
b0109805 9220 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
9221 break;
9222 case 5: /* ldrh */
b0109805 9223 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
9224 break;
9225 case 6: /* ldrb */
b0109805 9226 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
9227 break;
9228 case 7: /* ldrsh */
b0109805 9229 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
9230 break;
9231 }
9232 if (op >= 3) /* load */
b0109805 9233 store_reg(s, rd, tmp);
7d1b0095 9234 tcg_temp_free_i32(addr);
99c475ab
FB
9235 break;
9236
9237 case 6:
9238 /* load/store word immediate offset */
9239 rd = insn & 7;
9240 rn = (insn >> 3) & 7;
b0109805 9241 addr = load_reg(s, rn);
99c475ab 9242 val = (insn >> 4) & 0x7c;
b0109805 9243 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9244
9245 if (insn & (1 << 11)) {
9246 /* load */
b0109805
PB
9247 tmp = gen_ld32(addr, IS_USER(s));
9248 store_reg(s, rd, tmp);
99c475ab
FB
9249 } else {
9250 /* store */
b0109805
PB
9251 tmp = load_reg(s, rd);
9252 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9253 }
7d1b0095 9254 tcg_temp_free_i32(addr);
99c475ab
FB
9255 break;
9256
9257 case 7:
9258 /* load/store byte immediate offset */
9259 rd = insn & 7;
9260 rn = (insn >> 3) & 7;
b0109805 9261 addr = load_reg(s, rn);
99c475ab 9262 val = (insn >> 6) & 0x1f;
b0109805 9263 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9264
9265 if (insn & (1 << 11)) {
9266 /* load */
b0109805
PB
9267 tmp = gen_ld8u(addr, IS_USER(s));
9268 store_reg(s, rd, tmp);
99c475ab
FB
9269 } else {
9270 /* store */
b0109805
PB
9271 tmp = load_reg(s, rd);
9272 gen_st8(tmp, addr, IS_USER(s));
99c475ab 9273 }
7d1b0095 9274 tcg_temp_free_i32(addr);
99c475ab
FB
9275 break;
9276
9277 case 8:
9278 /* load/store halfword immediate offset */
9279 rd = insn & 7;
9280 rn = (insn >> 3) & 7;
b0109805 9281 addr = load_reg(s, rn);
99c475ab 9282 val = (insn >> 5) & 0x3e;
b0109805 9283 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9284
9285 if (insn & (1 << 11)) {
9286 /* load */
b0109805
PB
9287 tmp = gen_ld16u(addr, IS_USER(s));
9288 store_reg(s, rd, tmp);
99c475ab
FB
9289 } else {
9290 /* store */
b0109805
PB
9291 tmp = load_reg(s, rd);
9292 gen_st16(tmp, addr, IS_USER(s));
99c475ab 9293 }
7d1b0095 9294 tcg_temp_free_i32(addr);
99c475ab
FB
9295 break;
9296
9297 case 9:
9298 /* load/store from stack */
9299 rd = (insn >> 8) & 7;
b0109805 9300 addr = load_reg(s, 13);
99c475ab 9301 val = (insn & 0xff) * 4;
b0109805 9302 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9303
9304 if (insn & (1 << 11)) {
9305 /* load */
b0109805
PB
9306 tmp = gen_ld32(addr, IS_USER(s));
9307 store_reg(s, rd, tmp);
99c475ab
FB
9308 } else {
9309 /* store */
b0109805
PB
9310 tmp = load_reg(s, rd);
9311 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9312 }
7d1b0095 9313 tcg_temp_free_i32(addr);
99c475ab
FB
9314 break;
9315
9316 case 10:
9317 /* add to high reg */
9318 rd = (insn >> 8) & 7;
5899f386
FB
9319 if (insn & (1 << 11)) {
9320 /* SP */
5e3f878a 9321 tmp = load_reg(s, 13);
5899f386
FB
9322 } else {
9323 /* PC. bit 1 is ignored. */
7d1b0095 9324 tmp = tcg_temp_new_i32();
5e3f878a 9325 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 9326 }
99c475ab 9327 val = (insn & 0xff) * 4;
5e3f878a
PB
9328 tcg_gen_addi_i32(tmp, tmp, val);
9329 store_reg(s, rd, tmp);
99c475ab
FB
9330 break;
9331
9332 case 11:
9333 /* misc */
9334 op = (insn >> 8) & 0xf;
9335 switch (op) {
9336 case 0:
9337 /* adjust stack pointer */
b26eefb6 9338 tmp = load_reg(s, 13);
99c475ab
FB
9339 val = (insn & 0x7f) * 4;
9340 if (insn & (1 << 7))
6a0d8a1d 9341 val = -(int32_t)val;
b26eefb6
PB
9342 tcg_gen_addi_i32(tmp, tmp, val);
9343 store_reg(s, 13, tmp);
99c475ab
FB
9344 break;
9345
9ee6e8bb
PB
9346 case 2: /* sign/zero extend. */
9347 ARCH(6);
9348 rd = insn & 7;
9349 rm = (insn >> 3) & 7;
b0109805 9350 tmp = load_reg(s, rm);
9ee6e8bb 9351 switch ((insn >> 6) & 3) {
b0109805
PB
9352 case 0: gen_sxth(tmp); break;
9353 case 1: gen_sxtb(tmp); break;
9354 case 2: gen_uxth(tmp); break;
9355 case 3: gen_uxtb(tmp); break;
9ee6e8bb 9356 }
b0109805 9357 store_reg(s, rd, tmp);
9ee6e8bb 9358 break;
99c475ab
FB
9359 case 4: case 5: case 0xc: case 0xd:
9360 /* push/pop */
b0109805 9361 addr = load_reg(s, 13);
5899f386
FB
9362 if (insn & (1 << 8))
9363 offset = 4;
99c475ab 9364 else
5899f386
FB
9365 offset = 0;
9366 for (i = 0; i < 8; i++) {
9367 if (insn & (1 << i))
9368 offset += 4;
9369 }
9370 if ((insn & (1 << 11)) == 0) {
b0109805 9371 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9372 }
99c475ab
FB
9373 for (i = 0; i < 8; i++) {
9374 if (insn & (1 << i)) {
9375 if (insn & (1 << 11)) {
9376 /* pop */
b0109805
PB
9377 tmp = gen_ld32(addr, IS_USER(s));
9378 store_reg(s, i, tmp);
99c475ab
FB
9379 } else {
9380 /* push */
b0109805
PB
9381 tmp = load_reg(s, i);
9382 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9383 }
5899f386 9384 /* advance to the next address. */
b0109805 9385 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9386 }
9387 }
a50f5b91 9388 TCGV_UNUSED(tmp);
99c475ab
FB
9389 if (insn & (1 << 8)) {
9390 if (insn & (1 << 11)) {
9391 /* pop pc */
b0109805 9392 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
9393 /* don't set the pc until the rest of the instruction
9394 has completed */
9395 } else {
9396 /* push lr */
b0109805
PB
9397 tmp = load_reg(s, 14);
9398 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9399 }
b0109805 9400 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 9401 }
5899f386 9402 if ((insn & (1 << 11)) == 0) {
b0109805 9403 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9404 }
99c475ab 9405 /* write back the new stack pointer */
b0109805 9406 store_reg(s, 13, addr);
99c475ab 9407 /* set the new PC value */
be5e7a76
DES
9408 if ((insn & 0x0900) == 0x0900) {
9409 store_reg_from_load(env, s, 15, tmp);
9410 }
99c475ab
FB
9411 break;
9412
9ee6e8bb
PB
9413 case 1: case 3: case 9: case 11: /* czb */
9414 rm = insn & 7;
d9ba4830 9415 tmp = load_reg(s, rm);
9ee6e8bb
PB
9416 s->condlabel = gen_new_label();
9417 s->condjmp = 1;
9418 if (insn & (1 << 11))
cb63669a 9419 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 9420 else
cb63669a 9421 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 9422 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9423 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9424 val = (uint32_t)s->pc + 2;
9425 val += offset;
9426 gen_jmp(s, val);
9427 break;
9428
9429 case 15: /* IT, nop-hint. */
9430 if ((insn & 0xf) == 0) {
9431 gen_nop_hint(s, (insn >> 4) & 0xf);
9432 break;
9433 }
9434 /* If Then. */
9435 s->condexec_cond = (insn >> 4) & 0xe;
9436 s->condexec_mask = insn & 0x1f;
9437 /* No actual code generated for this insn, just setup state. */
9438 break;
9439
06c949e6 9440 case 0xe: /* bkpt */
be5e7a76 9441 ARCH(5);
bc4a0de0 9442 gen_exception_insn(s, 2, EXCP_BKPT);
06c949e6
PB
9443 break;
9444
9ee6e8bb
PB
9445 case 0xa: /* rev */
9446 ARCH(6);
9447 rn = (insn >> 3) & 0x7;
9448 rd = insn & 0x7;
b0109805 9449 tmp = load_reg(s, rn);
9ee6e8bb 9450 switch ((insn >> 6) & 3) {
66896cb8 9451 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
9452 case 1: gen_rev16(tmp); break;
9453 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
9454 default: goto illegal_op;
9455 }
b0109805 9456 store_reg(s, rd, tmp);
9ee6e8bb
PB
9457 break;
9458
9459 case 6: /* cps */
9460 ARCH(6);
9461 if (IS_USER(s))
9462 break;
9463 if (IS_M(env)) {
8984bd2e 9464 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9ee6e8bb 9465 /* PRIMASK */
8984bd2e
PB
9466 if (insn & 1) {
9467 addr = tcg_const_i32(16);
9468 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 9469 tcg_temp_free_i32(addr);
8984bd2e 9470 }
9ee6e8bb 9471 /* FAULTMASK */
8984bd2e
PB
9472 if (insn & 2) {
9473 addr = tcg_const_i32(17);
9474 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 9475 tcg_temp_free_i32(addr);
8984bd2e 9476 }
b75263d6 9477 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9478 gen_lookup_tb(s);
9479 } else {
9480 if (insn & (1 << 4))
9481 shift = CPSR_A | CPSR_I | CPSR_F;
9482 else
9483 shift = 0;
fa26df03 9484 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9ee6e8bb
PB
9485 }
9486 break;
9487
99c475ab
FB
9488 default:
9489 goto undef;
9490 }
9491 break;
9492
9493 case 12:
a7d3970d 9494 {
99c475ab 9495 /* load/store multiple */
a7d3970d
PM
9496 TCGv loaded_var;
9497 TCGV_UNUSED(loaded_var);
99c475ab 9498 rn = (insn >> 8) & 0x7;
b0109805 9499 addr = load_reg(s, rn);
99c475ab
FB
9500 for (i = 0; i < 8; i++) {
9501 if (insn & (1 << i)) {
99c475ab
FB
9502 if (insn & (1 << 11)) {
9503 /* load */
b0109805 9504 tmp = gen_ld32(addr, IS_USER(s));
a7d3970d
PM
9505 if (i == rn) {
9506 loaded_var = tmp;
9507 } else {
9508 store_reg(s, i, tmp);
9509 }
99c475ab
FB
9510 } else {
9511 /* store */
b0109805
PB
9512 tmp = load_reg(s, i);
9513 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9514 }
5899f386 9515 /* advance to the next address */
b0109805 9516 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9517 }
9518 }
b0109805 9519 if ((insn & (1 << rn)) == 0) {
a7d3970d 9520 /* base reg not in list: base register writeback */
b0109805
PB
9521 store_reg(s, rn, addr);
9522 } else {
a7d3970d
PM
9523 /* base reg in list: if load, complete it now */
9524 if (insn & (1 << 11)) {
9525 store_reg(s, rn, loaded_var);
9526 }
7d1b0095 9527 tcg_temp_free_i32(addr);
b0109805 9528 }
99c475ab 9529 break;
a7d3970d 9530 }
99c475ab
FB
9531 case 13:
9532 /* conditional branch or swi */
9533 cond = (insn >> 8) & 0xf;
9534 if (cond == 0xe)
9535 goto undef;
9536
9537 if (cond == 0xf) {
9538 /* swi */
422ebf69 9539 gen_set_pc_im(s->pc);
9ee6e8bb 9540 s->is_jmp = DISAS_SWI;
99c475ab
FB
9541 break;
9542 }
9543 /* generate a conditional jump to next instruction */
e50e6a20 9544 s->condlabel = gen_new_label();
d9ba4830 9545 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 9546 s->condjmp = 1;
99c475ab
FB
9547
9548 /* jump to the offset */
5899f386 9549 val = (uint32_t)s->pc + 2;
99c475ab 9550 offset = ((int32_t)insn << 24) >> 24;
5899f386 9551 val += offset << 1;
8aaca4c0 9552 gen_jmp(s, val);
99c475ab
FB
9553 break;
9554
9555 case 14:
358bf29e 9556 if (insn & (1 << 11)) {
9ee6e8bb
PB
9557 if (disas_thumb2_insn(env, s, insn))
9558 goto undef32;
358bf29e
PB
9559 break;
9560 }
9ee6e8bb 9561 /* unconditional branch */
99c475ab
FB
9562 val = (uint32_t)s->pc;
9563 offset = ((int32_t)insn << 21) >> 21;
9564 val += (offset << 1) + 2;
8aaca4c0 9565 gen_jmp(s, val);
99c475ab
FB
9566 break;
9567
9568 case 15:
9ee6e8bb 9569 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 9570 goto undef32;
9ee6e8bb 9571 break;
99c475ab
FB
9572 }
9573 return;
9ee6e8bb 9574undef32:
bc4a0de0 9575 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
9576 return;
9577illegal_op:
99c475ab 9578undef:
bc4a0de0 9579 gen_exception_insn(s, 2, EXCP_UDEF);
99c475ab
FB
9580}
9581
2c0262af
FB
9582/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9583 basic block 'tb'. If search_pc is TRUE, also generate PC
9584 information for each intermediate instruction. */
2cfc5f17
TS
9585static inline void gen_intermediate_code_internal(CPUState *env,
9586 TranslationBlock *tb,
9587 int search_pc)
2c0262af
FB
9588{
9589 DisasContext dc1, *dc = &dc1;
a1d1bb31 9590 CPUBreakpoint *bp;
2c0262af
FB
9591 uint16_t *gen_opc_end;
9592 int j, lj;
0fa85d43 9593 target_ulong pc_start;
b5ff1b31 9594 uint32_t next_page_start;
2e70f6ef
PB
9595 int num_insns;
9596 int max_insns;
3b46e624 9597
2c0262af 9598 /* generate intermediate code */
0fa85d43 9599 pc_start = tb->pc;
3b46e624 9600
2c0262af
FB
9601 dc->tb = tb;
9602
2c0262af 9603 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
9604
9605 dc->is_jmp = DISAS_NEXT;
9606 dc->pc = pc_start;
8aaca4c0 9607 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 9608 dc->condjmp = 0;
7204ab88 9609 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
98eac7ca
PM
9610 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9611 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
b5ff1b31 9612#if !defined(CONFIG_USER_ONLY)
61f74d6a 9613 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
b5ff1b31 9614#endif
5df8bac1 9615 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
69d1fc22
PM
9616 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9617 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
a7812ae4
PB
9618 cpu_F0s = tcg_temp_new_i32();
9619 cpu_F1s = tcg_temp_new_i32();
9620 cpu_F0d = tcg_temp_new_i64();
9621 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
9622 cpu_V0 = cpu_F0d;
9623 cpu_V1 = cpu_F1d;
e677137d 9624 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 9625 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 9626 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 9627 lj = -1;
2e70f6ef
PB
9628 num_insns = 0;
9629 max_insns = tb->cflags & CF_COUNT_MASK;
9630 if (max_insns == 0)
9631 max_insns = CF_COUNT_MASK;
9632
9633 gen_icount_start();
e12ce78d 9634
3849902c
PM
9635 tcg_clear_temp_count();
9636
e12ce78d
PM
9637 /* A note on handling of the condexec (IT) bits:
9638 *
9639 * We want to avoid the overhead of having to write the updated condexec
9640 * bits back to the CPUState for every instruction in an IT block. So:
9641 * (1) if the condexec bits are not already zero then we write
9642 * zero back into the CPUState now. This avoids complications trying
9643 * to do it at the end of the block. (For example if we don't do this
9644 * it's hard to identify whether we can safely skip writing condexec
9645 * at the end of the TB, which we definitely want to do for the case
9646 * where a TB doesn't do anything with the IT state at all.)
9647 * (2) if we are going to leave the TB then we call gen_set_condexec()
9648 * which will write the correct value into CPUState if zero is wrong.
9649 * This is done both for leaving the TB at the end, and for leaving
9650 * it because of an exception we know will happen, which is done in
9651 * gen_exception_insn(). The latter is necessary because we need to
9652 * leave the TB with the PC/IT state just prior to execution of the
9653 * instruction which caused the exception.
9654 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9655 * then the CPUState will be wrong and we need to reset it.
9656 * This is handled in the same way as restoration of the
9657 * PC in these situations: we will be called again with search_pc=1
9658 * and generate a mapping of the condexec bits for each PC in
e87b7cb0
SW
9659 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9660 * this to restore the condexec bits.
e12ce78d
PM
9661 *
9662 * Note that there are no instructions which can read the condexec
9663 * bits, and none which can write non-static values to them, so
9664 * we don't need to care about whether CPUState is correct in the
9665 * middle of a TB.
9666 */
9667
9ee6e8bb
PB
9668 /* Reset the conditional execution bits immediately. This avoids
9669 complications trying to do it at the end of the block. */
98eac7ca 9670 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 9671 {
7d1b0095 9672 TCGv tmp = tcg_temp_new_i32();
8f01245e 9673 tcg_gen_movi_i32(tmp, 0);
d9ba4830 9674 store_cpu_field(tmp, condexec_bits);
8f01245e 9675 }
2c0262af 9676 do {
fbb4a2e3
PB
9677#ifdef CONFIG_USER_ONLY
9678 /* Intercept jump to the magic kernel page. */
9679 if (dc->pc >= 0xffff0000) {
9680 /* We always get here via a jump, so know we are not in a
9681 conditional execution block. */
9682 gen_exception(EXCP_KERNEL_TRAP);
9683 dc->is_jmp = DISAS_UPDATE;
9684 break;
9685 }
9686#else
9ee6e8bb
PB
9687 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9688 /* We always get here via a jump, so know we are not in a
9689 conditional execution block. */
d9ba4830 9690 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
9691 dc->is_jmp = DISAS_UPDATE;
9692 break;
9ee6e8bb
PB
9693 }
9694#endif
9695
72cf2d4f
BS
9696 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9697 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 9698 if (bp->pc == dc->pc) {
bc4a0de0 9699 gen_exception_insn(dc, 0, EXCP_DEBUG);
9ee6e8bb
PB
9700 /* Advance PC so that clearing the breakpoint will
9701 invalidate this TB. */
9702 dc->pc += 2;
9703 goto done_generating;
1fddef4b
FB
9704 break;
9705 }
9706 }
9707 }
2c0262af
FB
9708 if (search_pc) {
9709 j = gen_opc_ptr - gen_opc_buf;
9710 if (lj < j) {
9711 lj++;
9712 while (lj < j)
9713 gen_opc_instr_start[lj++] = 0;
9714 }
0fa85d43 9715 gen_opc_pc[lj] = dc->pc;
e12ce78d 9716 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
2c0262af 9717 gen_opc_instr_start[lj] = 1;
2e70f6ef 9718 gen_opc_icount[lj] = num_insns;
2c0262af 9719 }
e50e6a20 9720
2e70f6ef
PB
9721 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9722 gen_io_start();
9723
5642463a
PM
9724 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
9725 tcg_gen_debug_insn_start(dc->pc);
9726 }
9727
7204ab88 9728 if (dc->thumb) {
9ee6e8bb
PB
9729 disas_thumb_insn(env, dc);
9730 if (dc->condexec_mask) {
9731 dc->condexec_cond = (dc->condexec_cond & 0xe)
9732 | ((dc->condexec_mask >> 4) & 1);
9733 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9734 if (dc->condexec_mask == 0) {
9735 dc->condexec_cond = 0;
9736 }
9737 }
9738 } else {
9739 disas_arm_insn(env, dc);
9740 }
e50e6a20
FB
9741
9742 if (dc->condjmp && !dc->is_jmp) {
9743 gen_set_label(dc->condlabel);
9744 dc->condjmp = 0;
9745 }
3849902c
PM
9746
9747 if (tcg_check_temp_count()) {
9748 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
9749 }
9750
aaf2d97d 9751 /* Translation stops when a conditional branch is encountered.
e50e6a20 9752 * Otherwise the subsequent code could get translated several times.
b5ff1b31 9753 * Also stop translation when a page boundary is reached. This
bf20dc07 9754 * ensures prefetch aborts occur at the right place. */
2e70f6ef 9755 num_insns ++;
1fddef4b
FB
9756 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9757 !env->singlestep_enabled &&
1b530a6d 9758 !singlestep &&
2e70f6ef
PB
9759 dc->pc < next_page_start &&
9760 num_insns < max_insns);
9761
9762 if (tb->cflags & CF_LAST_IO) {
9763 if (dc->condjmp) {
9764 /* FIXME: This can theoretically happen with self-modifying
9765 code. */
9766 cpu_abort(env, "IO on conditional branch instruction");
9767 }
9768 gen_io_end();
9769 }
9ee6e8bb 9770
b5ff1b31 9771 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
9772 instruction was a conditional branch or trap, and the PC has
9773 already been written. */
551bd27f 9774 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 9775 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 9776 if (dc->condjmp) {
9ee6e8bb
PB
9777 gen_set_condexec(dc);
9778 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 9779 gen_exception(EXCP_SWI);
9ee6e8bb 9780 } else {
d9ba4830 9781 gen_exception(EXCP_DEBUG);
9ee6e8bb 9782 }
e50e6a20
FB
9783 gen_set_label(dc->condlabel);
9784 }
9785 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 9786 gen_set_pc_im(dc->pc);
e50e6a20 9787 dc->condjmp = 0;
8aaca4c0 9788 }
9ee6e8bb
PB
9789 gen_set_condexec(dc);
9790 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 9791 gen_exception(EXCP_SWI);
9ee6e8bb
PB
9792 } else {
9793 /* FIXME: Single stepping a WFI insn will not halt
9794 the CPU. */
d9ba4830 9795 gen_exception(EXCP_DEBUG);
9ee6e8bb 9796 }
8aaca4c0 9797 } else {
9ee6e8bb
PB
9798 /* While branches must always occur at the end of an IT block,
9799 there are a few other things that can cause us to terminate
9800 the TB in the middel of an IT block:
9801 - Exception generating instructions (bkpt, swi, undefined).
9802 - Page boundaries.
9803 - Hardware watchpoints.
9804 Hardware breakpoints have already been handled and skip this code.
9805 */
9806 gen_set_condexec(dc);
8aaca4c0 9807 switch(dc->is_jmp) {
8aaca4c0 9808 case DISAS_NEXT:
6e256c93 9809 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
9810 break;
9811 default:
9812 case DISAS_JUMP:
9813 case DISAS_UPDATE:
9814 /* indicate that the hash table must be used to find the next TB */
57fec1fe 9815 tcg_gen_exit_tb(0);
8aaca4c0
FB
9816 break;
9817 case DISAS_TB_JUMP:
9818 /* nothing more to generate */
9819 break;
9ee6e8bb 9820 case DISAS_WFI:
d9ba4830 9821 gen_helper_wfi();
9ee6e8bb
PB
9822 break;
9823 case DISAS_SWI:
d9ba4830 9824 gen_exception(EXCP_SWI);
9ee6e8bb 9825 break;
8aaca4c0 9826 }
e50e6a20
FB
9827 if (dc->condjmp) {
9828 gen_set_label(dc->condlabel);
9ee6e8bb 9829 gen_set_condexec(dc);
6e256c93 9830 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
9831 dc->condjmp = 0;
9832 }
2c0262af 9833 }
2e70f6ef 9834
9ee6e8bb 9835done_generating:
2e70f6ef 9836 gen_icount_end(tb, num_insns);
2c0262af
FB
9837 *gen_opc_ptr = INDEX_op_end;
9838
9839#ifdef DEBUG_DISAS
8fec2b8c 9840 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
9841 qemu_log("----------------\n");
9842 qemu_log("IN: %s\n", lookup_symbol(pc_start));
7204ab88 9843 log_target_disas(pc_start, dc->pc - pc_start, dc->thumb);
93fcfe39 9844 qemu_log("\n");
2c0262af
FB
9845 }
9846#endif
b5ff1b31
FB
9847 if (search_pc) {
9848 j = gen_opc_ptr - gen_opc_buf;
9849 lj++;
9850 while (lj <= j)
9851 gen_opc_instr_start[lj++] = 0;
b5ff1b31 9852 } else {
2c0262af 9853 tb->size = dc->pc - pc_start;
2e70f6ef 9854 tb->icount = num_insns;
b5ff1b31 9855 }
2c0262af
FB
9856}
9857
2cfc5f17 9858void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2c0262af 9859{
2cfc5f17 9860 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
9861}
9862
2cfc5f17 9863void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2c0262af 9864{
2cfc5f17 9865 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
9866}
9867
b5ff1b31
FB
9868static const char *cpu_mode_names[16] = {
9869 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9870 "???", "???", "???", "und", "???", "???", "???", "sys"
9871};
9ee6e8bb 9872
9a78eead 9873void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
7fe48483 9874 int flags)
2c0262af
FB
9875{
9876 int i;
06e80fc9 9877#if 0
bc380d17 9878 union {
b7bcbe95
FB
9879 uint32_t i;
9880 float s;
9881 } s0, s1;
9882 CPU_DoubleU d;
a94a6abf
PB
9883 /* ??? This assumes float64 and double have the same layout.
9884 Oh well, it's only debug dumps. */
9885 union {
9886 float64 f64;
9887 double d;
9888 } d0;
06e80fc9 9889#endif
b5ff1b31 9890 uint32_t psr;
2c0262af
FB
9891
9892 for(i=0;i<16;i++) {
7fe48483 9893 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 9894 if ((i % 4) == 3)
7fe48483 9895 cpu_fprintf(f, "\n");
2c0262af 9896 else
7fe48483 9897 cpu_fprintf(f, " ");
2c0262af 9898 }
b5ff1b31 9899 psr = cpsr_read(env);
687fa640
TS
9900 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9901 psr,
b5ff1b31
FB
9902 psr & (1 << 31) ? 'N' : '-',
9903 psr & (1 << 30) ? 'Z' : '-',
9904 psr & (1 << 29) ? 'C' : '-',
9905 psr & (1 << 28) ? 'V' : '-',
5fafdf24 9906 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 9907 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 9908
5e3f878a 9909#if 0
b7bcbe95 9910 for (i = 0; i < 16; i++) {
8e96005d
FB
9911 d.d = env->vfp.regs[i];
9912 s0.i = d.l.lower;
9913 s1.i = d.l.upper;
a94a6abf
PB
9914 d0.f64 = d.d;
9915 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 9916 i * 2, (int)s0.i, s0.s,
a94a6abf 9917 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 9918 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 9919 d0.d);
b7bcbe95 9920 }
40f137e1 9921 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
5e3f878a 9922#endif
2c0262af 9923}
a6b025d3 9924
e87b7cb0 9925void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
d2856f1a
AJ
9926{
9927 env->regs[15] = gen_opc_pc[pc_pos];
e12ce78d 9928 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
d2856f1a 9929}