]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/translate.c
Fix typos in comments (neccessary -> necessary)
[mirror_qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
28#include "exec-all.h"
29#include "disas.h"
57fec1fe 30#include "tcg-op.h"
79383c9c 31#include "qemu-log.h"
1497c961 32
7b59220e 33#include "helper.h"
1497c961 34#define GEN_HELPER 1
7b59220e 35#include "helper.h"
2c0262af 36
be5e7a76
DES
37#define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
38#define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
39/* currently all emulated v5 cores are also v5TE, so don't bother */
40#define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
9ee6e8bb
PB
41#define ENABLE_ARCH_5J 0
42#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
43#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
44#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
45#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 46
86753403 47#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 48
2c0262af
FB
49/* internal defines */
50typedef struct DisasContext {
0fa85d43 51 target_ulong pc;
2c0262af 52 int is_jmp;
e50e6a20
FB
53 /* Nonzero if this instruction has been conditionally skipped. */
54 int condjmp;
55 /* The label that will be jumped to when the instruction is skipped. */
56 int condlabel;
9ee6e8bb
PB
57 /* Thumb-2 condtional execution bits. */
58 int condexec_mask;
59 int condexec_cond;
2c0262af 60 struct TranslationBlock *tb;
8aaca4c0 61 int singlestep_enabled;
5899f386 62 int thumb;
b5ff1b31
FB
63#if !defined(CONFIG_USER_ONLY)
64 int user;
65#endif
5df8bac1 66 int vfp_enabled;
69d1fc22
PM
67 int vec_len;
68 int vec_stride;
2c0262af
FB
69} DisasContext;
70
e12ce78d
PM
71static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
72
b5ff1b31
FB
73#if defined(CONFIG_USER_ONLY)
74#define IS_USER(s) 1
75#else
76#define IS_USER(s) (s->user)
77#endif
78
9ee6e8bb
PB
79/* These instructions trap after executing, so defer them until after the
80 conditional executions state has been updated. */
81#define DISAS_WFI 4
82#define DISAS_SWI 5
2c0262af 83
a7812ae4 84static TCGv_ptr cpu_env;
ad69471c 85/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 86static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 87static TCGv_i32 cpu_R[16];
426f5abc
PB
88static TCGv_i32 cpu_exclusive_addr;
89static TCGv_i32 cpu_exclusive_val;
90static TCGv_i32 cpu_exclusive_high;
91#ifdef CONFIG_USER_ONLY
92static TCGv_i32 cpu_exclusive_test;
93static TCGv_i32 cpu_exclusive_info;
94#endif
ad69471c 95
b26eefb6 96/* FIXME: These should be removed. */
a7812ae4
PB
97static TCGv cpu_F0s, cpu_F1s;
98static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 99
2e70f6ef
PB
100#include "gen-icount.h"
101
155c3eac
FN
102static const char *regnames[] =
103 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
104 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
105
b26eefb6
PB
106/* initialize TCG globals. */
107void arm_translate_init(void)
108{
155c3eac
FN
109 int i;
110
a7812ae4
PB
111 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
112
155c3eac
FN
113 for (i = 0; i < 16; i++) {
114 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
115 offsetof(CPUState, regs[i]),
116 regnames[i]);
117 }
426f5abc
PB
118 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUState, exclusive_addr), "exclusive_addr");
120 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
121 offsetof(CPUState, exclusive_val), "exclusive_val");
122 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
123 offsetof(CPUState, exclusive_high), "exclusive_high");
124#ifdef CONFIG_USER_ONLY
125 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
126 offsetof(CPUState, exclusive_test), "exclusive_test");
127 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
128 offsetof(CPUState, exclusive_info), "exclusive_info");
129#endif
155c3eac 130
a7812ae4 131#define GEN_HELPER 2
7b59220e 132#include "helper.h"
b26eefb6
PB
133}
134
d9ba4830
PB
135static inline TCGv load_cpu_offset(int offset)
136{
7d1b0095 137 TCGv tmp = tcg_temp_new_i32();
d9ba4830
PB
138 tcg_gen_ld_i32(tmp, cpu_env, offset);
139 return tmp;
140}
141
142#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
143
144static inline void store_cpu_offset(TCGv var, int offset)
145{
146 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 147 tcg_temp_free_i32(var);
d9ba4830
PB
148}
149
150#define store_cpu_field(var, name) \
151 store_cpu_offset(var, offsetof(CPUState, name))
152
b26eefb6
PB
153/* Set a variable to the value of a CPU register. */
154static void load_reg_var(DisasContext *s, TCGv var, int reg)
155{
156 if (reg == 15) {
157 uint32_t addr;
158 /* normaly, since we updated PC, we need only to add one insn */
159 if (s->thumb)
160 addr = (long)s->pc + 2;
161 else
162 addr = (long)s->pc + 4;
163 tcg_gen_movi_i32(var, addr);
164 } else {
155c3eac 165 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
166 }
167}
168
169/* Create a new temporary and set it to the value of a CPU register. */
170static inline TCGv load_reg(DisasContext *s, int reg)
171{
7d1b0095 172 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
173 load_reg_var(s, tmp, reg);
174 return tmp;
175}
176
177/* Set a CPU register. The source must be a temporary and will be
178 marked as dead. */
179static void store_reg(DisasContext *s, int reg, TCGv var)
180{
181 if (reg == 15) {
182 tcg_gen_andi_i32(var, var, ~1);
183 s->is_jmp = DISAS_JUMP;
184 }
155c3eac 185 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 186 tcg_temp_free_i32(var);
b26eefb6
PB
187}
188
b26eefb6 189/* Value extensions. */
86831435
PB
190#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
191#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
192#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
193#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
194
1497c961
PB
195#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
196#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 197
b26eefb6 198
b75263d6
JR
199static inline void gen_set_cpsr(TCGv var, uint32_t mask)
200{
201 TCGv tmp_mask = tcg_const_i32(mask);
202 gen_helper_cpsr_write(var, tmp_mask);
203 tcg_temp_free_i32(tmp_mask);
204}
d9ba4830
PB
205/* Set NZCV flags from the high 4 bits of var. */
206#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
207
208static void gen_exception(int excp)
209{
7d1b0095 210 TCGv tmp = tcg_temp_new_i32();
d9ba4830
PB
211 tcg_gen_movi_i32(tmp, excp);
212 gen_helper_exception(tmp);
7d1b0095 213 tcg_temp_free_i32(tmp);
d9ba4830
PB
214}
215
3670669c
PB
216static void gen_smul_dual(TCGv a, TCGv b)
217{
7d1b0095
PM
218 TCGv tmp1 = tcg_temp_new_i32();
219 TCGv tmp2 = tcg_temp_new_i32();
22478e79
AZ
220 tcg_gen_ext16s_i32(tmp1, a);
221 tcg_gen_ext16s_i32(tmp2, b);
3670669c 222 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 223 tcg_temp_free_i32(tmp2);
3670669c
PB
224 tcg_gen_sari_i32(a, a, 16);
225 tcg_gen_sari_i32(b, b, 16);
226 tcg_gen_mul_i32(b, b, a);
227 tcg_gen_mov_i32(a, tmp1);
7d1b0095 228 tcg_temp_free_i32(tmp1);
3670669c
PB
229}
230
231/* Byteswap each halfword. */
232static void gen_rev16(TCGv var)
233{
7d1b0095 234 TCGv tmp = tcg_temp_new_i32();
3670669c
PB
235 tcg_gen_shri_i32(tmp, var, 8);
236 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
237 tcg_gen_shli_i32(var, var, 8);
238 tcg_gen_andi_i32(var, var, 0xff00ff00);
239 tcg_gen_or_i32(var, var, tmp);
7d1b0095 240 tcg_temp_free_i32(tmp);
3670669c
PB
241}
242
243/* Byteswap low halfword and sign extend. */
244static void gen_revsh(TCGv var)
245{
1a855029
AJ
246 tcg_gen_ext16u_i32(var, var);
247 tcg_gen_bswap16_i32(var, var);
248 tcg_gen_ext16s_i32(var, var);
3670669c
PB
249}
250
251/* Unsigned bitfield extract. */
252static void gen_ubfx(TCGv var, int shift, uint32_t mask)
253{
254 if (shift)
255 tcg_gen_shri_i32(var, var, shift);
256 tcg_gen_andi_i32(var, var, mask);
257}
258
259/* Signed bitfield extract. */
260static void gen_sbfx(TCGv var, int shift, int width)
261{
262 uint32_t signbit;
263
264 if (shift)
265 tcg_gen_sari_i32(var, var, shift);
266 if (shift + width < 32) {
267 signbit = 1u << (width - 1);
268 tcg_gen_andi_i32(var, var, (1u << width) - 1);
269 tcg_gen_xori_i32(var, var, signbit);
270 tcg_gen_subi_i32(var, var, signbit);
271 }
272}
273
274/* Bitfield insertion. Insert val into base. Clobbers base and val. */
275static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
276{
3670669c 277 tcg_gen_andi_i32(val, val, mask);
8f8e3aa4
PB
278 tcg_gen_shli_i32(val, val, shift);
279 tcg_gen_andi_i32(base, base, ~(mask << shift));
3670669c
PB
280 tcg_gen_or_i32(dest, base, val);
281}
282
838fa72d
AJ
283/* Return (b << 32) + a. Mark inputs as dead */
284static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
3670669c 285{
838fa72d
AJ
286 TCGv_i64 tmp64 = tcg_temp_new_i64();
287
288 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 289 tcg_temp_free_i32(b);
838fa72d
AJ
290 tcg_gen_shli_i64(tmp64, tmp64, 32);
291 tcg_gen_add_i64(a, tmp64, a);
292
293 tcg_temp_free_i64(tmp64);
294 return a;
295}
296
297/* Return (b << 32) - a. Mark inputs as dead. */
298static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
299{
300 TCGv_i64 tmp64 = tcg_temp_new_i64();
301
302 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 303 tcg_temp_free_i32(b);
838fa72d
AJ
304 tcg_gen_shli_i64(tmp64, tmp64, 32);
305 tcg_gen_sub_i64(a, tmp64, a);
306
307 tcg_temp_free_i64(tmp64);
308 return a;
3670669c
PB
309}
310
8f01245e
PB
311/* FIXME: Most targets have native widening multiplication.
312 It would be good to use that instead of a full wide multiply. */
5e3f878a 313/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 314static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 315{
a7812ae4
PB
316 TCGv_i64 tmp1 = tcg_temp_new_i64();
317 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
318
319 tcg_gen_extu_i32_i64(tmp1, a);
7d1b0095 320 tcg_temp_free_i32(a);
5e3f878a 321 tcg_gen_extu_i32_i64(tmp2, b);
7d1b0095 322 tcg_temp_free_i32(b);
5e3f878a 323 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 324 tcg_temp_free_i64(tmp2);
5e3f878a
PB
325 return tmp1;
326}
327
a7812ae4 328static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 329{
a7812ae4
PB
330 TCGv_i64 tmp1 = tcg_temp_new_i64();
331 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
332
333 tcg_gen_ext_i32_i64(tmp1, a);
7d1b0095 334 tcg_temp_free_i32(a);
5e3f878a 335 tcg_gen_ext_i32_i64(tmp2, b);
7d1b0095 336 tcg_temp_free_i32(b);
5e3f878a 337 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 338 tcg_temp_free_i64(tmp2);
5e3f878a
PB
339 return tmp1;
340}
341
8f01245e
PB
342/* Swap low and high halfwords. */
343static void gen_swap_half(TCGv var)
344{
7d1b0095 345 TCGv tmp = tcg_temp_new_i32();
8f01245e
PB
346 tcg_gen_shri_i32(tmp, var, 16);
347 tcg_gen_shli_i32(var, var, 16);
348 tcg_gen_or_i32(var, var, tmp);
7d1b0095 349 tcg_temp_free_i32(tmp);
8f01245e
PB
350}
351
b26eefb6
PB
352/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
353 tmp = (t0 ^ t1) & 0x8000;
354 t0 &= ~0x8000;
355 t1 &= ~0x8000;
356 t0 = (t0 + t1) ^ tmp;
357 */
358
359static void gen_add16(TCGv t0, TCGv t1)
360{
7d1b0095 361 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
362 tcg_gen_xor_i32(tmp, t0, t1);
363 tcg_gen_andi_i32(tmp, tmp, 0x8000);
364 tcg_gen_andi_i32(t0, t0, ~0x8000);
365 tcg_gen_andi_i32(t1, t1, ~0x8000);
366 tcg_gen_add_i32(t0, t0, t1);
367 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
368 tcg_temp_free_i32(tmp);
369 tcg_temp_free_i32(t1);
b26eefb6
PB
370}
371
9a119ff6
PB
372#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
373
b26eefb6
PB
374/* Set CF to the top bit of var. */
375static void gen_set_CF_bit31(TCGv var)
376{
7d1b0095 377 TCGv tmp = tcg_temp_new_i32();
b26eefb6 378 tcg_gen_shri_i32(tmp, var, 31);
4cc633c3 379 gen_set_CF(tmp);
7d1b0095 380 tcg_temp_free_i32(tmp);
b26eefb6
PB
381}
382
383/* Set N and Z flags from var. */
384static inline void gen_logic_CC(TCGv var)
385{
6fbe23d5
PB
386 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
387 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
b26eefb6
PB
388}
389
390/* T0 += T1 + CF. */
396e467c 391static void gen_adc(TCGv t0, TCGv t1)
b26eefb6 392{
d9ba4830 393 TCGv tmp;
396e467c 394 tcg_gen_add_i32(t0, t0, t1);
d9ba4830 395 tmp = load_cpu_field(CF);
396e467c 396 tcg_gen_add_i32(t0, t0, tmp);
7d1b0095 397 tcg_temp_free_i32(tmp);
b26eefb6
PB
398}
399
e9bb4aa9
JR
400/* dest = T0 + T1 + CF. */
401static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
402{
403 TCGv tmp;
404 tcg_gen_add_i32(dest, t0, t1);
405 tmp = load_cpu_field(CF);
406 tcg_gen_add_i32(dest, dest, tmp);
7d1b0095 407 tcg_temp_free_i32(tmp);
e9bb4aa9
JR
408}
409
3670669c
PB
410/* dest = T0 - T1 + CF - 1. */
411static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
412{
d9ba4830 413 TCGv tmp;
3670669c 414 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 415 tmp = load_cpu_field(CF);
3670669c
PB
416 tcg_gen_add_i32(dest, dest, tmp);
417 tcg_gen_subi_i32(dest, dest, 1);
7d1b0095 418 tcg_temp_free_i32(tmp);
3670669c
PB
419}
420
ad69471c
PB
421/* FIXME: Implement this natively. */
422#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
423
9a119ff6 424static void shifter_out_im(TCGv var, int shift)
b26eefb6 425{
7d1b0095 426 TCGv tmp = tcg_temp_new_i32();
9a119ff6
PB
427 if (shift == 0) {
428 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 429 } else {
9a119ff6 430 tcg_gen_shri_i32(tmp, var, shift);
4cc633c3 431 if (shift != 31)
9a119ff6
PB
432 tcg_gen_andi_i32(tmp, tmp, 1);
433 }
434 gen_set_CF(tmp);
7d1b0095 435 tcg_temp_free_i32(tmp);
9a119ff6 436}
b26eefb6 437
9a119ff6
PB
438/* Shift by immediate. Includes special handling for shift == 0. */
439static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
440{
441 switch (shiftop) {
442 case 0: /* LSL */
443 if (shift != 0) {
444 if (flags)
445 shifter_out_im(var, 32 - shift);
446 tcg_gen_shli_i32(var, var, shift);
447 }
448 break;
449 case 1: /* LSR */
450 if (shift == 0) {
451 if (flags) {
452 tcg_gen_shri_i32(var, var, 31);
453 gen_set_CF(var);
454 }
455 tcg_gen_movi_i32(var, 0);
456 } else {
457 if (flags)
458 shifter_out_im(var, shift - 1);
459 tcg_gen_shri_i32(var, var, shift);
460 }
461 break;
462 case 2: /* ASR */
463 if (shift == 0)
464 shift = 32;
465 if (flags)
466 shifter_out_im(var, shift - 1);
467 if (shift == 32)
468 shift = 31;
469 tcg_gen_sari_i32(var, var, shift);
470 break;
471 case 3: /* ROR/RRX */
472 if (shift != 0) {
473 if (flags)
474 shifter_out_im(var, shift - 1);
f669df27 475 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 476 } else {
d9ba4830 477 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
478 if (flags)
479 shifter_out_im(var, 0);
480 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
481 tcg_gen_shli_i32(tmp, tmp, 31);
482 tcg_gen_or_i32(var, var, tmp);
7d1b0095 483 tcg_temp_free_i32(tmp);
b26eefb6
PB
484 }
485 }
486};
487
8984bd2e
PB
488static inline void gen_arm_shift_reg(TCGv var, int shiftop,
489 TCGv shift, int flags)
490{
491 if (flags) {
492 switch (shiftop) {
493 case 0: gen_helper_shl_cc(var, var, shift); break;
494 case 1: gen_helper_shr_cc(var, var, shift); break;
495 case 2: gen_helper_sar_cc(var, var, shift); break;
496 case 3: gen_helper_ror_cc(var, var, shift); break;
497 }
498 } else {
499 switch (shiftop) {
500 case 0: gen_helper_shl(var, var, shift); break;
501 case 1: gen_helper_shr(var, var, shift); break;
502 case 2: gen_helper_sar(var, var, shift); break;
f669df27
AJ
503 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
504 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
505 }
506 }
7d1b0095 507 tcg_temp_free_i32(shift);
8984bd2e
PB
508}
509
6ddbc6e4
PB
510#define PAS_OP(pfx) \
511 switch (op2) { \
512 case 0: gen_pas_helper(glue(pfx,add16)); break; \
513 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
514 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
515 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
516 case 4: gen_pas_helper(glue(pfx,add8)); break; \
517 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
518 }
d9ba4830 519static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 520{
a7812ae4 521 TCGv_ptr tmp;
6ddbc6e4
PB
522
523 switch (op1) {
524#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
525 case 1:
a7812ae4 526 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
527 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
528 PAS_OP(s)
b75263d6 529 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
530 break;
531 case 5:
a7812ae4 532 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
533 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
534 PAS_OP(u)
b75263d6 535 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
536 break;
537#undef gen_pas_helper
538#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
539 case 2:
540 PAS_OP(q);
541 break;
542 case 3:
543 PAS_OP(sh);
544 break;
545 case 6:
546 PAS_OP(uq);
547 break;
548 case 7:
549 PAS_OP(uh);
550 break;
551#undef gen_pas_helper
552 }
553}
9ee6e8bb
PB
554#undef PAS_OP
555
6ddbc6e4
PB
556/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
557#define PAS_OP(pfx) \
ed89a2f1 558 switch (op1) { \
6ddbc6e4
PB
559 case 0: gen_pas_helper(glue(pfx,add8)); break; \
560 case 1: gen_pas_helper(glue(pfx,add16)); break; \
561 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
562 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
563 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
564 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
565 }
d9ba4830 566static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 567{
a7812ae4 568 TCGv_ptr tmp;
6ddbc6e4 569
ed89a2f1 570 switch (op2) {
6ddbc6e4
PB
571#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
572 case 0:
a7812ae4 573 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
574 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
575 PAS_OP(s)
b75263d6 576 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
577 break;
578 case 4:
a7812ae4 579 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
580 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
581 PAS_OP(u)
b75263d6 582 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
583 break;
584#undef gen_pas_helper
585#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
586 case 1:
587 PAS_OP(q);
588 break;
589 case 2:
590 PAS_OP(sh);
591 break;
592 case 5:
593 PAS_OP(uq);
594 break;
595 case 6:
596 PAS_OP(uh);
597 break;
598#undef gen_pas_helper
599 }
600}
9ee6e8bb
PB
601#undef PAS_OP
602
d9ba4830
PB
603static void gen_test_cc(int cc, int label)
604{
605 TCGv tmp;
606 TCGv tmp2;
d9ba4830
PB
607 int inv;
608
d9ba4830
PB
609 switch (cc) {
610 case 0: /* eq: Z */
6fbe23d5 611 tmp = load_cpu_field(ZF);
cb63669a 612 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
613 break;
614 case 1: /* ne: !Z */
6fbe23d5 615 tmp = load_cpu_field(ZF);
cb63669a 616 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
617 break;
618 case 2: /* cs: C */
619 tmp = load_cpu_field(CF);
cb63669a 620 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
621 break;
622 case 3: /* cc: !C */
623 tmp = load_cpu_field(CF);
cb63669a 624 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
625 break;
626 case 4: /* mi: N */
6fbe23d5 627 tmp = load_cpu_field(NF);
cb63669a 628 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
629 break;
630 case 5: /* pl: !N */
6fbe23d5 631 tmp = load_cpu_field(NF);
cb63669a 632 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
633 break;
634 case 6: /* vs: V */
635 tmp = load_cpu_field(VF);
cb63669a 636 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
637 break;
638 case 7: /* vc: !V */
639 tmp = load_cpu_field(VF);
cb63669a 640 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
641 break;
642 case 8: /* hi: C && !Z */
643 inv = gen_new_label();
644 tmp = load_cpu_field(CF);
cb63669a 645 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
7d1b0095 646 tcg_temp_free_i32(tmp);
6fbe23d5 647 tmp = load_cpu_field(ZF);
cb63669a 648 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
649 gen_set_label(inv);
650 break;
651 case 9: /* ls: !C || Z */
652 tmp = load_cpu_field(CF);
cb63669a 653 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
7d1b0095 654 tcg_temp_free_i32(tmp);
6fbe23d5 655 tmp = load_cpu_field(ZF);
cb63669a 656 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
657 break;
658 case 10: /* ge: N == V -> N ^ V == 0 */
659 tmp = load_cpu_field(VF);
6fbe23d5 660 tmp2 = load_cpu_field(NF);
d9ba4830 661 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 662 tcg_temp_free_i32(tmp2);
cb63669a 663 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
664 break;
665 case 11: /* lt: N != V -> N ^ V != 0 */
666 tmp = load_cpu_field(VF);
6fbe23d5 667 tmp2 = load_cpu_field(NF);
d9ba4830 668 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 669 tcg_temp_free_i32(tmp2);
cb63669a 670 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
671 break;
672 case 12: /* gt: !Z && N == V */
673 inv = gen_new_label();
6fbe23d5 674 tmp = load_cpu_field(ZF);
cb63669a 675 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
7d1b0095 676 tcg_temp_free_i32(tmp);
d9ba4830 677 tmp = load_cpu_field(VF);
6fbe23d5 678 tmp2 = load_cpu_field(NF);
d9ba4830 679 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 680 tcg_temp_free_i32(tmp2);
cb63669a 681 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
682 gen_set_label(inv);
683 break;
684 case 13: /* le: Z || N != V */
6fbe23d5 685 tmp = load_cpu_field(ZF);
cb63669a 686 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
7d1b0095 687 tcg_temp_free_i32(tmp);
d9ba4830 688 tmp = load_cpu_field(VF);
6fbe23d5 689 tmp2 = load_cpu_field(NF);
d9ba4830 690 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 691 tcg_temp_free_i32(tmp2);
cb63669a 692 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
693 break;
694 default:
695 fprintf(stderr, "Bad condition code 0x%x\n", cc);
696 abort();
697 }
7d1b0095 698 tcg_temp_free_i32(tmp);
d9ba4830 699}
2c0262af 700
b1d8e52e 701static const uint8_t table_logic_cc[16] = {
2c0262af
FB
702 1, /* and */
703 1, /* xor */
704 0, /* sub */
705 0, /* rsb */
706 0, /* add */
707 0, /* adc */
708 0, /* sbc */
709 0, /* rsc */
710 1, /* andl */
711 1, /* xorl */
712 0, /* cmp */
713 0, /* cmn */
714 1, /* orr */
715 1, /* mov */
716 1, /* bic */
717 1, /* mvn */
718};
3b46e624 719
d9ba4830
PB
720/* Set PC and Thumb state from an immediate address. */
721static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 722{
b26eefb6 723 TCGv tmp;
99c475ab 724
b26eefb6 725 s->is_jmp = DISAS_UPDATE;
d9ba4830 726 if (s->thumb != (addr & 1)) {
7d1b0095 727 tmp = tcg_temp_new_i32();
d9ba4830
PB
728 tcg_gen_movi_i32(tmp, addr & 1);
729 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
7d1b0095 730 tcg_temp_free_i32(tmp);
d9ba4830 731 }
155c3eac 732 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
733}
734
735/* Set PC and Thumb state from var. var is marked as dead. */
736static inline void gen_bx(DisasContext *s, TCGv var)
737{
d9ba4830 738 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
739 tcg_gen_andi_i32(cpu_R[15], var, ~1);
740 tcg_gen_andi_i32(var, var, 1);
741 store_cpu_field(var, thumb);
d9ba4830
PB
742}
743
21aeb343
JR
744/* Variant of store_reg which uses branch&exchange logic when storing
745 to r15 in ARM architecture v7 and above. The source must be a temporary
746 and will be marked as dead. */
747static inline void store_reg_bx(CPUState *env, DisasContext *s,
748 int reg, TCGv var)
749{
750 if (reg == 15 && ENABLE_ARCH_7) {
751 gen_bx(s, var);
752 } else {
753 store_reg(s, reg, var);
754 }
755}
756
be5e7a76
DES
757/* Variant of store_reg which uses branch&exchange logic when storing
758 * to r15 in ARM architecture v5T and above. This is used for storing
759 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
760 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
761static inline void store_reg_from_load(CPUState *env, DisasContext *s,
762 int reg, TCGv var)
763{
764 if (reg == 15 && ENABLE_ARCH_5) {
765 gen_bx(s, var);
766 } else {
767 store_reg(s, reg, var);
768 }
769}
770
b0109805
PB
771static inline TCGv gen_ld8s(TCGv addr, int index)
772{
7d1b0095 773 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
774 tcg_gen_qemu_ld8s(tmp, addr, index);
775 return tmp;
776}
777static inline TCGv gen_ld8u(TCGv addr, int index)
778{
7d1b0095 779 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
780 tcg_gen_qemu_ld8u(tmp, addr, index);
781 return tmp;
782}
783static inline TCGv gen_ld16s(TCGv addr, int index)
784{
7d1b0095 785 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
786 tcg_gen_qemu_ld16s(tmp, addr, index);
787 return tmp;
788}
789static inline TCGv gen_ld16u(TCGv addr, int index)
790{
7d1b0095 791 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
792 tcg_gen_qemu_ld16u(tmp, addr, index);
793 return tmp;
794}
795static inline TCGv gen_ld32(TCGv addr, int index)
796{
7d1b0095 797 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
798 tcg_gen_qemu_ld32u(tmp, addr, index);
799 return tmp;
800}
84496233
JR
801static inline TCGv_i64 gen_ld64(TCGv addr, int index)
802{
803 TCGv_i64 tmp = tcg_temp_new_i64();
804 tcg_gen_qemu_ld64(tmp, addr, index);
805 return tmp;
806}
b0109805
PB
807static inline void gen_st8(TCGv val, TCGv addr, int index)
808{
809 tcg_gen_qemu_st8(val, addr, index);
7d1b0095 810 tcg_temp_free_i32(val);
b0109805
PB
811}
812static inline void gen_st16(TCGv val, TCGv addr, int index)
813{
814 tcg_gen_qemu_st16(val, addr, index);
7d1b0095 815 tcg_temp_free_i32(val);
b0109805
PB
816}
817static inline void gen_st32(TCGv val, TCGv addr, int index)
818{
819 tcg_gen_qemu_st32(val, addr, index);
7d1b0095 820 tcg_temp_free_i32(val);
b0109805 821}
84496233
JR
822static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
823{
824 tcg_gen_qemu_st64(val, addr, index);
825 tcg_temp_free_i64(val);
826}
b5ff1b31 827
5e3f878a
PB
828static inline void gen_set_pc_im(uint32_t val)
829{
155c3eac 830 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
831}
832
b5ff1b31
FB
833/* Force a TB lookup after an instruction that changes the CPU state. */
834static inline void gen_lookup_tb(DisasContext *s)
835{
a6445c52 836 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
837 s->is_jmp = DISAS_UPDATE;
838}
839
b0109805
PB
840static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
841 TCGv var)
2c0262af 842{
1e8d4eec 843 int val, rm, shift, shiftop;
b26eefb6 844 TCGv offset;
2c0262af
FB
845
846 if (!(insn & (1 << 25))) {
847 /* immediate */
848 val = insn & 0xfff;
849 if (!(insn & (1 << 23)))
850 val = -val;
537730b9 851 if (val != 0)
b0109805 852 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
853 } else {
854 /* shift/register */
855 rm = (insn) & 0xf;
856 shift = (insn >> 7) & 0x1f;
1e8d4eec 857 shiftop = (insn >> 5) & 3;
b26eefb6 858 offset = load_reg(s, rm);
9a119ff6 859 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 860 if (!(insn & (1 << 23)))
b0109805 861 tcg_gen_sub_i32(var, var, offset);
2c0262af 862 else
b0109805 863 tcg_gen_add_i32(var, var, offset);
7d1b0095 864 tcg_temp_free_i32(offset);
2c0262af
FB
865 }
866}
867
191f9a93 868static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 869 int extra, TCGv var)
2c0262af
FB
870{
871 int val, rm;
b26eefb6 872 TCGv offset;
3b46e624 873
2c0262af
FB
874 if (insn & (1 << 22)) {
875 /* immediate */
876 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
877 if (!(insn & (1 << 23)))
878 val = -val;
18acad92 879 val += extra;
537730b9 880 if (val != 0)
b0109805 881 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
882 } else {
883 /* register */
191f9a93 884 if (extra)
b0109805 885 tcg_gen_addi_i32(var, var, extra);
2c0262af 886 rm = (insn) & 0xf;
b26eefb6 887 offset = load_reg(s, rm);
2c0262af 888 if (!(insn & (1 << 23)))
b0109805 889 tcg_gen_sub_i32(var, var, offset);
2c0262af 890 else
b0109805 891 tcg_gen_add_i32(var, var, offset);
7d1b0095 892 tcg_temp_free_i32(offset);
2c0262af
FB
893 }
894}
895
4373f3ce
PB
896#define VFP_OP2(name) \
897static inline void gen_vfp_##name(int dp) \
898{ \
899 if (dp) \
900 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
901 else \
902 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
b7bcbe95
FB
903}
904
4373f3ce
PB
905VFP_OP2(add)
906VFP_OP2(sub)
907VFP_OP2(mul)
908VFP_OP2(div)
909
910#undef VFP_OP2
911
912static inline void gen_vfp_abs(int dp)
913{
914 if (dp)
915 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
916 else
917 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
918}
919
920static inline void gen_vfp_neg(int dp)
921{
922 if (dp)
923 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
924 else
925 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
926}
927
928static inline void gen_vfp_sqrt(int dp)
929{
930 if (dp)
931 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
932 else
933 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
934}
935
936static inline void gen_vfp_cmp(int dp)
937{
938 if (dp)
939 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
940 else
941 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
942}
943
944static inline void gen_vfp_cmpe(int dp)
945{
946 if (dp)
947 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
948 else
949 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
950}
951
952static inline void gen_vfp_F1_ld0(int dp)
953{
954 if (dp)
5b340b51 955 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 956 else
5b340b51 957 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
958}
959
960static inline void gen_vfp_uito(int dp)
961{
962 if (dp)
963 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
964 else
965 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
966}
967
968static inline void gen_vfp_sito(int dp)
969{
970 if (dp)
66230e0d 971 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
4373f3ce 972 else
66230e0d 973 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
4373f3ce
PB
974}
975
976static inline void gen_vfp_toui(int dp)
977{
978 if (dp)
979 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
980 else
981 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
982}
983
984static inline void gen_vfp_touiz(int dp)
985{
986 if (dp)
987 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
988 else
989 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
990}
991
992static inline void gen_vfp_tosi(int dp)
993{
994 if (dp)
995 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
996 else
997 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
998}
999
1000static inline void gen_vfp_tosiz(int dp)
9ee6e8bb
PB
1001{
1002 if (dp)
4373f3ce 1003 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
9ee6e8bb 1004 else
4373f3ce
PB
1005 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
1006}
1007
1008#define VFP_GEN_FIX(name) \
1009static inline void gen_vfp_##name(int dp, int shift) \
1010{ \
b75263d6 1011 TCGv tmp_shift = tcg_const_i32(shift); \
4373f3ce 1012 if (dp) \
b75263d6 1013 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
4373f3ce 1014 else \
b75263d6
JR
1015 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
1016 tcg_temp_free_i32(tmp_shift); \
9ee6e8bb 1017}
4373f3ce
PB
1018VFP_GEN_FIX(tosh)
1019VFP_GEN_FIX(tosl)
1020VFP_GEN_FIX(touh)
1021VFP_GEN_FIX(toul)
1022VFP_GEN_FIX(shto)
1023VFP_GEN_FIX(slto)
1024VFP_GEN_FIX(uhto)
1025VFP_GEN_FIX(ulto)
1026#undef VFP_GEN_FIX
9ee6e8bb 1027
312eea9f 1028static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1029{
1030 if (dp)
312eea9f 1031 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1032 else
312eea9f 1033 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1034}
1035
312eea9f 1036static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1037{
1038 if (dp)
312eea9f 1039 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1040 else
312eea9f 1041 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1042}
1043
8e96005d
FB
1044static inline long
1045vfp_reg_offset (int dp, int reg)
1046{
1047 if (dp)
1048 return offsetof(CPUARMState, vfp.regs[reg]);
1049 else if (reg & 1) {
1050 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1051 + offsetof(CPU_DoubleU, l.upper);
1052 } else {
1053 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1054 + offsetof(CPU_DoubleU, l.lower);
1055 }
1056}
9ee6e8bb
PB
1057
1058/* Return the offset of a 32-bit piece of a NEON register.
1059 zero is the least significant end of the register. */
1060static inline long
1061neon_reg_offset (int reg, int n)
1062{
1063 int sreg;
1064 sreg = reg * 2 + n;
1065 return vfp_reg_offset(0, sreg);
1066}
1067
8f8e3aa4
PB
1068static TCGv neon_load_reg(int reg, int pass)
1069{
7d1b0095 1070 TCGv tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1071 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1072 return tmp;
1073}
1074
1075static void neon_store_reg(int reg, int pass, TCGv var)
1076{
1077 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1078 tcg_temp_free_i32(var);
8f8e3aa4
PB
1079}
1080
a7812ae4 1081static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1082{
1083 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1084}
1085
a7812ae4 1086static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1087{
1088 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1089}
1090
4373f3ce
PB
1091#define tcg_gen_ld_f32 tcg_gen_ld_i32
1092#define tcg_gen_ld_f64 tcg_gen_ld_i64
1093#define tcg_gen_st_f32 tcg_gen_st_i32
1094#define tcg_gen_st_f64 tcg_gen_st_i64
1095
b7bcbe95
FB
1096static inline void gen_mov_F0_vreg(int dp, int reg)
1097{
1098 if (dp)
4373f3ce 1099 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1100 else
4373f3ce 1101 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1102}
1103
1104static inline void gen_mov_F1_vreg(int dp, int reg)
1105{
1106 if (dp)
4373f3ce 1107 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1108 else
4373f3ce 1109 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1110}
1111
1112static inline void gen_mov_vreg_F0(int dp, int reg)
1113{
1114 if (dp)
4373f3ce 1115 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1116 else
4373f3ce 1117 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1118}
1119
18c9b560
AZ
1120#define ARM_CP_RW_BIT (1 << 20)
1121
a7812ae4 1122static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d
PB
1123{
1124 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1125}
1126
a7812ae4 1127static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d
PB
1128{
1129 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1130}
1131
da6b5335 1132static inline TCGv iwmmxt_load_creg(int reg)
e677137d 1133{
7d1b0095 1134 TCGv var = tcg_temp_new_i32();
da6b5335
FN
1135 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1136 return var;
e677137d
PB
1137}
1138
da6b5335 1139static inline void iwmmxt_store_creg(int reg, TCGv var)
e677137d 1140{
da6b5335 1141 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
7d1b0095 1142 tcg_temp_free_i32(var);
e677137d
PB
1143}
1144
1145static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1146{
1147 iwmmxt_store_reg(cpu_M0, rn);
1148}
1149
1150static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1151{
1152 iwmmxt_load_reg(cpu_M0, rn);
1153}
1154
1155static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1156{
1157 iwmmxt_load_reg(cpu_V1, rn);
1158 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1159}
1160
1161static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1162{
1163 iwmmxt_load_reg(cpu_V1, rn);
1164 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1165}
1166
1167static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1168{
1169 iwmmxt_load_reg(cpu_V1, rn);
1170 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1171}
1172
1173#define IWMMXT_OP(name) \
1174static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1175{ \
1176 iwmmxt_load_reg(cpu_V1, rn); \
1177 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1178}
1179
947a2fa2
PM
1180#define IWMMXT_OP_SIZE(name) \
1181IWMMXT_OP(name##b) \
1182IWMMXT_OP(name##w) \
1183IWMMXT_OP(name##l)
e677137d 1184
947a2fa2 1185#define IWMMXT_OP_1(name) \
e677137d
PB
1186static inline void gen_op_iwmmxt_##name##_M0(void) \
1187{ \
947a2fa2 1188 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0); \
e677137d
PB
1189}
1190
1191IWMMXT_OP(maddsq)
1192IWMMXT_OP(madduq)
1193IWMMXT_OP(sadb)
1194IWMMXT_OP(sadw)
1195IWMMXT_OP(mulslw)
1196IWMMXT_OP(mulshw)
1197IWMMXT_OP(mululw)
1198IWMMXT_OP(muluhw)
1199IWMMXT_OP(macsw)
1200IWMMXT_OP(macuw)
1201
947a2fa2
PM
1202IWMMXT_OP_SIZE(unpackl)
1203IWMMXT_OP_SIZE(unpackh)
1204
1205IWMMXT_OP_1(unpacklub)
1206IWMMXT_OP_1(unpackluw)
1207IWMMXT_OP_1(unpacklul)
1208IWMMXT_OP_1(unpackhub)
1209IWMMXT_OP_1(unpackhuw)
1210IWMMXT_OP_1(unpackhul)
1211IWMMXT_OP_1(unpacklsb)
1212IWMMXT_OP_1(unpacklsw)
1213IWMMXT_OP_1(unpacklsl)
1214IWMMXT_OP_1(unpackhsb)
1215IWMMXT_OP_1(unpackhsw)
1216IWMMXT_OP_1(unpackhsl)
1217
1218IWMMXT_OP_SIZE(cmpeq)
1219IWMMXT_OP_SIZE(cmpgtu)
1220IWMMXT_OP_SIZE(cmpgts)
1221
1222IWMMXT_OP_SIZE(mins)
1223IWMMXT_OP_SIZE(minu)
1224IWMMXT_OP_SIZE(maxs)
1225IWMMXT_OP_SIZE(maxu)
1226
1227IWMMXT_OP_SIZE(subn)
1228IWMMXT_OP_SIZE(addn)
1229IWMMXT_OP_SIZE(subu)
1230IWMMXT_OP_SIZE(addu)
1231IWMMXT_OP_SIZE(subs)
1232IWMMXT_OP_SIZE(adds)
1233
1234IWMMXT_OP(avgb0)
1235IWMMXT_OP(avgb1)
1236IWMMXT_OP(avgw0)
1237IWMMXT_OP(avgw1)
e677137d
PB
1238
1239IWMMXT_OP(msadb)
1240
947a2fa2
PM
1241IWMMXT_OP(packuw)
1242IWMMXT_OP(packul)
1243IWMMXT_OP(packuq)
1244IWMMXT_OP(packsw)
1245IWMMXT_OP(packsl)
1246IWMMXT_OP(packsq)
e677137d 1247
e677137d
PB
1248static void gen_op_iwmmxt_set_mup(void)
1249{
1250 TCGv tmp;
1251 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1252 tcg_gen_ori_i32(tmp, tmp, 2);
1253 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1254}
1255
1256static void gen_op_iwmmxt_set_cup(void)
1257{
1258 TCGv tmp;
1259 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1260 tcg_gen_ori_i32(tmp, tmp, 1);
1261 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1262}
1263
1264static void gen_op_iwmmxt_setpsr_nz(void)
1265{
7d1b0095 1266 TCGv tmp = tcg_temp_new_i32();
e677137d
PB
1267 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1268 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1269}
1270
1271static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1272{
1273 iwmmxt_load_reg(cpu_V1, rn);
86831435 1274 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1275 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1276}
1277
da6b5335 1278static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
18c9b560
AZ
1279{
1280 int rd;
1281 uint32_t offset;
da6b5335 1282 TCGv tmp;
18c9b560
AZ
1283
1284 rd = (insn >> 16) & 0xf;
da6b5335 1285 tmp = load_reg(s, rd);
18c9b560
AZ
1286
1287 offset = (insn & 0xff) << ((insn >> 7) & 2);
1288 if (insn & (1 << 24)) {
1289 /* Pre indexed */
1290 if (insn & (1 << 23))
da6b5335 1291 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1292 else
da6b5335
FN
1293 tcg_gen_addi_i32(tmp, tmp, -offset);
1294 tcg_gen_mov_i32(dest, tmp);
18c9b560 1295 if (insn & (1 << 21))
da6b5335
FN
1296 store_reg(s, rd, tmp);
1297 else
7d1b0095 1298 tcg_temp_free_i32(tmp);
18c9b560
AZ
1299 } else if (insn & (1 << 21)) {
1300 /* Post indexed */
da6b5335 1301 tcg_gen_mov_i32(dest, tmp);
18c9b560 1302 if (insn & (1 << 23))
da6b5335 1303 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1304 else
da6b5335
FN
1305 tcg_gen_addi_i32(tmp, tmp, -offset);
1306 store_reg(s, rd, tmp);
18c9b560
AZ
1307 } else if (!(insn & (1 << 23)))
1308 return 1;
1309 return 0;
1310}
1311
da6b5335 1312static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
18c9b560
AZ
1313{
1314 int rd = (insn >> 0) & 0xf;
da6b5335 1315 TCGv tmp;
18c9b560 1316
da6b5335
FN
1317 if (insn & (1 << 8)) {
1318 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1319 return 1;
da6b5335
FN
1320 } else {
1321 tmp = iwmmxt_load_creg(rd);
1322 }
1323 } else {
7d1b0095 1324 tmp = tcg_temp_new_i32();
da6b5335
FN
1325 iwmmxt_load_reg(cpu_V0, rd);
1326 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1327 }
1328 tcg_gen_andi_i32(tmp, tmp, mask);
1329 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1330 tcg_temp_free_i32(tmp);
18c9b560
AZ
1331 return 0;
1332}
1333
1334/* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1335 (ie. an undefined instruction). */
1336static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1337{
1338 int rd, wrd;
1339 int rdhi, rdlo, rd0, rd1, i;
da6b5335
FN
1340 TCGv addr;
1341 TCGv tmp, tmp2, tmp3;
18c9b560
AZ
1342
1343 if ((insn & 0x0e000e00) == 0x0c000000) {
1344 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1345 wrd = insn & 0xf;
1346 rdlo = (insn >> 12) & 0xf;
1347 rdhi = (insn >> 16) & 0xf;
1348 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1349 iwmmxt_load_reg(cpu_V0, wrd);
1350 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1351 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1352 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1353 } else { /* TMCRR */
da6b5335
FN
1354 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1355 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1356 gen_op_iwmmxt_set_mup();
1357 }
1358 return 0;
1359 }
1360
1361 wrd = (insn >> 12) & 0xf;
7d1b0095 1362 addr = tcg_temp_new_i32();
da6b5335 1363 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1364 tcg_temp_free_i32(addr);
18c9b560 1365 return 1;
da6b5335 1366 }
18c9b560
AZ
1367 if (insn & ARM_CP_RW_BIT) {
1368 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1369 tmp = tcg_temp_new_i32();
da6b5335
FN
1370 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1371 iwmmxt_store_creg(wrd, tmp);
18c9b560 1372 } else {
e677137d
PB
1373 i = 1;
1374 if (insn & (1 << 8)) {
1375 if (insn & (1 << 22)) { /* WLDRD */
da6b5335 1376 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1377 i = 0;
1378 } else { /* WLDRW wRd */
da6b5335 1379 tmp = gen_ld32(addr, IS_USER(s));
e677137d
PB
1380 }
1381 } else {
1382 if (insn & (1 << 22)) { /* WLDRH */
da6b5335 1383 tmp = gen_ld16u(addr, IS_USER(s));
e677137d 1384 } else { /* WLDRB */
da6b5335 1385 tmp = gen_ld8u(addr, IS_USER(s));
e677137d
PB
1386 }
1387 }
1388 if (i) {
1389 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1390 tcg_temp_free_i32(tmp);
e677137d 1391 }
18c9b560
AZ
1392 gen_op_iwmmxt_movq_wRn_M0(wrd);
1393 }
1394 } else {
1395 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335
FN
1396 tmp = iwmmxt_load_creg(wrd);
1397 gen_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1398 } else {
1399 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1400 tmp = tcg_temp_new_i32();
e677137d
PB
1401 if (insn & (1 << 8)) {
1402 if (insn & (1 << 22)) { /* WSTRD */
7d1b0095 1403 tcg_temp_free_i32(tmp);
da6b5335 1404 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1405 } else { /* WSTRW wRd */
1406 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1407 gen_st32(tmp, addr, IS_USER(s));
e677137d
PB
1408 }
1409 } else {
1410 if (insn & (1 << 22)) { /* WSTRH */
1411 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1412 gen_st16(tmp, addr, IS_USER(s));
e677137d
PB
1413 } else { /* WSTRB */
1414 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1415 gen_st8(tmp, addr, IS_USER(s));
e677137d
PB
1416 }
1417 }
18c9b560
AZ
1418 }
1419 }
7d1b0095 1420 tcg_temp_free_i32(addr);
18c9b560
AZ
1421 return 0;
1422 }
1423
1424 if ((insn & 0x0f000000) != 0x0e000000)
1425 return 1;
1426
1427 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1428 case 0x000: /* WOR */
1429 wrd = (insn >> 12) & 0xf;
1430 rd0 = (insn >> 0) & 0xf;
1431 rd1 = (insn >> 16) & 0xf;
1432 gen_op_iwmmxt_movq_M0_wRn(rd0);
1433 gen_op_iwmmxt_orq_M0_wRn(rd1);
1434 gen_op_iwmmxt_setpsr_nz();
1435 gen_op_iwmmxt_movq_wRn_M0(wrd);
1436 gen_op_iwmmxt_set_mup();
1437 gen_op_iwmmxt_set_cup();
1438 break;
1439 case 0x011: /* TMCR */
1440 if (insn & 0xf)
1441 return 1;
1442 rd = (insn >> 12) & 0xf;
1443 wrd = (insn >> 16) & 0xf;
1444 switch (wrd) {
1445 case ARM_IWMMXT_wCID:
1446 case ARM_IWMMXT_wCASF:
1447 break;
1448 case ARM_IWMMXT_wCon:
1449 gen_op_iwmmxt_set_cup();
1450 /* Fall through. */
1451 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1452 tmp = iwmmxt_load_creg(wrd);
1453 tmp2 = load_reg(s, rd);
f669df27 1454 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1455 tcg_temp_free_i32(tmp2);
da6b5335 1456 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1457 break;
1458 case ARM_IWMMXT_wCGR0:
1459 case ARM_IWMMXT_wCGR1:
1460 case ARM_IWMMXT_wCGR2:
1461 case ARM_IWMMXT_wCGR3:
1462 gen_op_iwmmxt_set_cup();
da6b5335
FN
1463 tmp = load_reg(s, rd);
1464 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1465 break;
1466 default:
1467 return 1;
1468 }
1469 break;
1470 case 0x100: /* WXOR */
1471 wrd = (insn >> 12) & 0xf;
1472 rd0 = (insn >> 0) & 0xf;
1473 rd1 = (insn >> 16) & 0xf;
1474 gen_op_iwmmxt_movq_M0_wRn(rd0);
1475 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1476 gen_op_iwmmxt_setpsr_nz();
1477 gen_op_iwmmxt_movq_wRn_M0(wrd);
1478 gen_op_iwmmxt_set_mup();
1479 gen_op_iwmmxt_set_cup();
1480 break;
1481 case 0x111: /* TMRC */
1482 if (insn & 0xf)
1483 return 1;
1484 rd = (insn >> 12) & 0xf;
1485 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1486 tmp = iwmmxt_load_creg(wrd);
1487 store_reg(s, rd, tmp);
18c9b560
AZ
1488 break;
1489 case 0x300: /* WANDN */
1490 wrd = (insn >> 12) & 0xf;
1491 rd0 = (insn >> 0) & 0xf;
1492 rd1 = (insn >> 16) & 0xf;
1493 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1494 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1495 gen_op_iwmmxt_andq_M0_wRn(rd1);
1496 gen_op_iwmmxt_setpsr_nz();
1497 gen_op_iwmmxt_movq_wRn_M0(wrd);
1498 gen_op_iwmmxt_set_mup();
1499 gen_op_iwmmxt_set_cup();
1500 break;
1501 case 0x200: /* WAND */
1502 wrd = (insn >> 12) & 0xf;
1503 rd0 = (insn >> 0) & 0xf;
1504 rd1 = (insn >> 16) & 0xf;
1505 gen_op_iwmmxt_movq_M0_wRn(rd0);
1506 gen_op_iwmmxt_andq_M0_wRn(rd1);
1507 gen_op_iwmmxt_setpsr_nz();
1508 gen_op_iwmmxt_movq_wRn_M0(wrd);
1509 gen_op_iwmmxt_set_mup();
1510 gen_op_iwmmxt_set_cup();
1511 break;
1512 case 0x810: case 0xa10: /* WMADD */
1513 wrd = (insn >> 12) & 0xf;
1514 rd0 = (insn >> 0) & 0xf;
1515 rd1 = (insn >> 16) & 0xf;
1516 gen_op_iwmmxt_movq_M0_wRn(rd0);
1517 if (insn & (1 << 21))
1518 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1519 else
1520 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1521 gen_op_iwmmxt_movq_wRn_M0(wrd);
1522 gen_op_iwmmxt_set_mup();
1523 break;
1524 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1525 wrd = (insn >> 12) & 0xf;
1526 rd0 = (insn >> 16) & 0xf;
1527 rd1 = (insn >> 0) & 0xf;
1528 gen_op_iwmmxt_movq_M0_wRn(rd0);
1529 switch ((insn >> 22) & 3) {
1530 case 0:
1531 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1532 break;
1533 case 1:
1534 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1535 break;
1536 case 2:
1537 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1538 break;
1539 case 3:
1540 return 1;
1541 }
1542 gen_op_iwmmxt_movq_wRn_M0(wrd);
1543 gen_op_iwmmxt_set_mup();
1544 gen_op_iwmmxt_set_cup();
1545 break;
1546 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1547 wrd = (insn >> 12) & 0xf;
1548 rd0 = (insn >> 16) & 0xf;
1549 rd1 = (insn >> 0) & 0xf;
1550 gen_op_iwmmxt_movq_M0_wRn(rd0);
1551 switch ((insn >> 22) & 3) {
1552 case 0:
1553 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1554 break;
1555 case 1:
1556 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1557 break;
1558 case 2:
1559 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1560 break;
1561 case 3:
1562 return 1;
1563 }
1564 gen_op_iwmmxt_movq_wRn_M0(wrd);
1565 gen_op_iwmmxt_set_mup();
1566 gen_op_iwmmxt_set_cup();
1567 break;
1568 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1569 wrd = (insn >> 12) & 0xf;
1570 rd0 = (insn >> 16) & 0xf;
1571 rd1 = (insn >> 0) & 0xf;
1572 gen_op_iwmmxt_movq_M0_wRn(rd0);
1573 if (insn & (1 << 22))
1574 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1575 else
1576 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1577 if (!(insn & (1 << 20)))
1578 gen_op_iwmmxt_addl_M0_wRn(wrd);
1579 gen_op_iwmmxt_movq_wRn_M0(wrd);
1580 gen_op_iwmmxt_set_mup();
1581 break;
1582 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1583 wrd = (insn >> 12) & 0xf;
1584 rd0 = (insn >> 16) & 0xf;
1585 rd1 = (insn >> 0) & 0xf;
1586 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1587 if (insn & (1 << 21)) {
1588 if (insn & (1 << 20))
1589 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1590 else
1591 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1592 } else {
1593 if (insn & (1 << 20))
1594 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1595 else
1596 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1597 }
18c9b560
AZ
1598 gen_op_iwmmxt_movq_wRn_M0(wrd);
1599 gen_op_iwmmxt_set_mup();
1600 break;
1601 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1602 wrd = (insn >> 12) & 0xf;
1603 rd0 = (insn >> 16) & 0xf;
1604 rd1 = (insn >> 0) & 0xf;
1605 gen_op_iwmmxt_movq_M0_wRn(rd0);
1606 if (insn & (1 << 21))
1607 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1608 else
1609 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1610 if (!(insn & (1 << 20))) {
e677137d
PB
1611 iwmmxt_load_reg(cpu_V1, wrd);
1612 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1613 }
1614 gen_op_iwmmxt_movq_wRn_M0(wrd);
1615 gen_op_iwmmxt_set_mup();
1616 break;
1617 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1618 wrd = (insn >> 12) & 0xf;
1619 rd0 = (insn >> 16) & 0xf;
1620 rd1 = (insn >> 0) & 0xf;
1621 gen_op_iwmmxt_movq_M0_wRn(rd0);
1622 switch ((insn >> 22) & 3) {
1623 case 0:
1624 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1625 break;
1626 case 1:
1627 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1628 break;
1629 case 2:
1630 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1631 break;
1632 case 3:
1633 return 1;
1634 }
1635 gen_op_iwmmxt_movq_wRn_M0(wrd);
1636 gen_op_iwmmxt_set_mup();
1637 gen_op_iwmmxt_set_cup();
1638 break;
1639 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1640 wrd = (insn >> 12) & 0xf;
1641 rd0 = (insn >> 16) & 0xf;
1642 rd1 = (insn >> 0) & 0xf;
1643 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1644 if (insn & (1 << 22)) {
1645 if (insn & (1 << 20))
1646 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1647 else
1648 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1649 } else {
1650 if (insn & (1 << 20))
1651 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1652 else
1653 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1654 }
18c9b560
AZ
1655 gen_op_iwmmxt_movq_wRn_M0(wrd);
1656 gen_op_iwmmxt_set_mup();
1657 gen_op_iwmmxt_set_cup();
1658 break;
1659 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1660 wrd = (insn >> 12) & 0xf;
1661 rd0 = (insn >> 16) & 0xf;
1662 rd1 = (insn >> 0) & 0xf;
1663 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1664 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1665 tcg_gen_andi_i32(tmp, tmp, 7);
1666 iwmmxt_load_reg(cpu_V1, rd1);
1667 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1668 tcg_temp_free_i32(tmp);
18c9b560
AZ
1669 gen_op_iwmmxt_movq_wRn_M0(wrd);
1670 gen_op_iwmmxt_set_mup();
1671 break;
1672 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1673 if (((insn >> 6) & 3) == 3)
1674 return 1;
18c9b560
AZ
1675 rd = (insn >> 12) & 0xf;
1676 wrd = (insn >> 16) & 0xf;
da6b5335 1677 tmp = load_reg(s, rd);
18c9b560
AZ
1678 gen_op_iwmmxt_movq_M0_wRn(wrd);
1679 switch ((insn >> 6) & 3) {
1680 case 0:
da6b5335
FN
1681 tmp2 = tcg_const_i32(0xff);
1682 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1683 break;
1684 case 1:
da6b5335
FN
1685 tmp2 = tcg_const_i32(0xffff);
1686 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1687 break;
1688 case 2:
da6b5335
FN
1689 tmp2 = tcg_const_i32(0xffffffff);
1690 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1691 break;
da6b5335
FN
1692 default:
1693 TCGV_UNUSED(tmp2);
1694 TCGV_UNUSED(tmp3);
18c9b560 1695 }
da6b5335
FN
1696 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1697 tcg_temp_free(tmp3);
1698 tcg_temp_free(tmp2);
7d1b0095 1699 tcg_temp_free_i32(tmp);
18c9b560
AZ
1700 gen_op_iwmmxt_movq_wRn_M0(wrd);
1701 gen_op_iwmmxt_set_mup();
1702 break;
1703 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1704 rd = (insn >> 12) & 0xf;
1705 wrd = (insn >> 16) & 0xf;
da6b5335 1706 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1707 return 1;
1708 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1709 tmp = tcg_temp_new_i32();
18c9b560
AZ
1710 switch ((insn >> 22) & 3) {
1711 case 0:
da6b5335
FN
1712 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1713 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1714 if (insn & 8) {
1715 tcg_gen_ext8s_i32(tmp, tmp);
1716 } else {
1717 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1718 }
1719 break;
1720 case 1:
da6b5335
FN
1721 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1722 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1723 if (insn & 8) {
1724 tcg_gen_ext16s_i32(tmp, tmp);
1725 } else {
1726 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1727 }
1728 break;
1729 case 2:
da6b5335
FN
1730 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1731 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1732 break;
18c9b560 1733 }
da6b5335 1734 store_reg(s, rd, tmp);
18c9b560
AZ
1735 break;
1736 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1737 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1738 return 1;
da6b5335 1739 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1740 switch ((insn >> 22) & 3) {
1741 case 0:
da6b5335 1742 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1743 break;
1744 case 1:
da6b5335 1745 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1746 break;
1747 case 2:
da6b5335 1748 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1749 break;
18c9b560 1750 }
da6b5335
FN
1751 tcg_gen_shli_i32(tmp, tmp, 28);
1752 gen_set_nzcv(tmp);
7d1b0095 1753 tcg_temp_free_i32(tmp);
18c9b560
AZ
1754 break;
1755 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1756 if (((insn >> 6) & 3) == 3)
1757 return 1;
18c9b560
AZ
1758 rd = (insn >> 12) & 0xf;
1759 wrd = (insn >> 16) & 0xf;
da6b5335 1760 tmp = load_reg(s, rd);
18c9b560
AZ
1761 switch ((insn >> 6) & 3) {
1762 case 0:
da6b5335 1763 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1764 break;
1765 case 1:
da6b5335 1766 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1767 break;
1768 case 2:
da6b5335 1769 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1770 break;
18c9b560 1771 }
7d1b0095 1772 tcg_temp_free_i32(tmp);
18c9b560
AZ
1773 gen_op_iwmmxt_movq_wRn_M0(wrd);
1774 gen_op_iwmmxt_set_mup();
1775 break;
1776 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1777 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1778 return 1;
da6b5335 1779 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1780 tmp2 = tcg_temp_new_i32();
da6b5335 1781 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1782 switch ((insn >> 22) & 3) {
1783 case 0:
1784 for (i = 0; i < 7; i ++) {
da6b5335
FN
1785 tcg_gen_shli_i32(tmp2, tmp2, 4);
1786 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1787 }
1788 break;
1789 case 1:
1790 for (i = 0; i < 3; i ++) {
da6b5335
FN
1791 tcg_gen_shli_i32(tmp2, tmp2, 8);
1792 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1793 }
1794 break;
1795 case 2:
da6b5335
FN
1796 tcg_gen_shli_i32(tmp2, tmp2, 16);
1797 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1798 break;
18c9b560 1799 }
da6b5335 1800 gen_set_nzcv(tmp);
7d1b0095
PM
1801 tcg_temp_free_i32(tmp2);
1802 tcg_temp_free_i32(tmp);
18c9b560
AZ
1803 break;
1804 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1805 wrd = (insn >> 12) & 0xf;
1806 rd0 = (insn >> 16) & 0xf;
1807 gen_op_iwmmxt_movq_M0_wRn(rd0);
1808 switch ((insn >> 22) & 3) {
1809 case 0:
e677137d 1810 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1811 break;
1812 case 1:
e677137d 1813 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1814 break;
1815 case 2:
e677137d 1816 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1817 break;
1818 case 3:
1819 return 1;
1820 }
1821 gen_op_iwmmxt_movq_wRn_M0(wrd);
1822 gen_op_iwmmxt_set_mup();
1823 break;
1824 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1825 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1826 return 1;
da6b5335 1827 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1828 tmp2 = tcg_temp_new_i32();
da6b5335 1829 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1830 switch ((insn >> 22) & 3) {
1831 case 0:
1832 for (i = 0; i < 7; i ++) {
da6b5335
FN
1833 tcg_gen_shli_i32(tmp2, tmp2, 4);
1834 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1835 }
1836 break;
1837 case 1:
1838 for (i = 0; i < 3; i ++) {
da6b5335
FN
1839 tcg_gen_shli_i32(tmp2, tmp2, 8);
1840 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1841 }
1842 break;
1843 case 2:
da6b5335
FN
1844 tcg_gen_shli_i32(tmp2, tmp2, 16);
1845 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1846 break;
18c9b560 1847 }
da6b5335 1848 gen_set_nzcv(tmp);
7d1b0095
PM
1849 tcg_temp_free_i32(tmp2);
1850 tcg_temp_free_i32(tmp);
18c9b560
AZ
1851 break;
1852 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1853 rd = (insn >> 12) & 0xf;
1854 rd0 = (insn >> 16) & 0xf;
da6b5335 1855 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1856 return 1;
1857 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 1858 tmp = tcg_temp_new_i32();
18c9b560
AZ
1859 switch ((insn >> 22) & 3) {
1860 case 0:
da6b5335 1861 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1862 break;
1863 case 1:
da6b5335 1864 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1865 break;
1866 case 2:
da6b5335 1867 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1868 break;
18c9b560 1869 }
da6b5335 1870 store_reg(s, rd, tmp);
18c9b560
AZ
1871 break;
1872 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1873 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1874 wrd = (insn >> 12) & 0xf;
1875 rd0 = (insn >> 16) & 0xf;
1876 rd1 = (insn >> 0) & 0xf;
1877 gen_op_iwmmxt_movq_M0_wRn(rd0);
1878 switch ((insn >> 22) & 3) {
1879 case 0:
1880 if (insn & (1 << 21))
1881 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1882 else
1883 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1884 break;
1885 case 1:
1886 if (insn & (1 << 21))
1887 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1888 else
1889 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1890 break;
1891 case 2:
1892 if (insn & (1 << 21))
1893 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1894 else
1895 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1896 break;
1897 case 3:
1898 return 1;
1899 }
1900 gen_op_iwmmxt_movq_wRn_M0(wrd);
1901 gen_op_iwmmxt_set_mup();
1902 gen_op_iwmmxt_set_cup();
1903 break;
1904 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1905 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1906 wrd = (insn >> 12) & 0xf;
1907 rd0 = (insn >> 16) & 0xf;
1908 gen_op_iwmmxt_movq_M0_wRn(rd0);
1909 switch ((insn >> 22) & 3) {
1910 case 0:
1911 if (insn & (1 << 21))
1912 gen_op_iwmmxt_unpacklsb_M0();
1913 else
1914 gen_op_iwmmxt_unpacklub_M0();
1915 break;
1916 case 1:
1917 if (insn & (1 << 21))
1918 gen_op_iwmmxt_unpacklsw_M0();
1919 else
1920 gen_op_iwmmxt_unpackluw_M0();
1921 break;
1922 case 2:
1923 if (insn & (1 << 21))
1924 gen_op_iwmmxt_unpacklsl_M0();
1925 else
1926 gen_op_iwmmxt_unpacklul_M0();
1927 break;
1928 case 3:
1929 return 1;
1930 }
1931 gen_op_iwmmxt_movq_wRn_M0(wrd);
1932 gen_op_iwmmxt_set_mup();
1933 gen_op_iwmmxt_set_cup();
1934 break;
1935 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1936 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1937 wrd = (insn >> 12) & 0xf;
1938 rd0 = (insn >> 16) & 0xf;
1939 gen_op_iwmmxt_movq_M0_wRn(rd0);
1940 switch ((insn >> 22) & 3) {
1941 case 0:
1942 if (insn & (1 << 21))
1943 gen_op_iwmmxt_unpackhsb_M0();
1944 else
1945 gen_op_iwmmxt_unpackhub_M0();
1946 break;
1947 case 1:
1948 if (insn & (1 << 21))
1949 gen_op_iwmmxt_unpackhsw_M0();
1950 else
1951 gen_op_iwmmxt_unpackhuw_M0();
1952 break;
1953 case 2:
1954 if (insn & (1 << 21))
1955 gen_op_iwmmxt_unpackhsl_M0();
1956 else
1957 gen_op_iwmmxt_unpackhul_M0();
1958 break;
1959 case 3:
1960 return 1;
1961 }
1962 gen_op_iwmmxt_movq_wRn_M0(wrd);
1963 gen_op_iwmmxt_set_mup();
1964 gen_op_iwmmxt_set_cup();
1965 break;
1966 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1967 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
1968 if (((insn >> 22) & 3) == 0)
1969 return 1;
18c9b560
AZ
1970 wrd = (insn >> 12) & 0xf;
1971 rd0 = (insn >> 16) & 0xf;
1972 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 1973 tmp = tcg_temp_new_i32();
da6b5335 1974 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 1975 tcg_temp_free_i32(tmp);
18c9b560 1976 return 1;
da6b5335 1977 }
18c9b560 1978 switch ((insn >> 22) & 3) {
18c9b560 1979 case 1:
947a2fa2 1980 gen_helper_iwmmxt_srlw(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
1981 break;
1982 case 2:
947a2fa2 1983 gen_helper_iwmmxt_srll(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
1984 break;
1985 case 3:
947a2fa2 1986 gen_helper_iwmmxt_srlq(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
1987 break;
1988 }
7d1b0095 1989 tcg_temp_free_i32(tmp);
18c9b560
AZ
1990 gen_op_iwmmxt_movq_wRn_M0(wrd);
1991 gen_op_iwmmxt_set_mup();
1992 gen_op_iwmmxt_set_cup();
1993 break;
1994 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1995 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
1996 if (((insn >> 22) & 3) == 0)
1997 return 1;
18c9b560
AZ
1998 wrd = (insn >> 12) & 0xf;
1999 rd0 = (insn >> 16) & 0xf;
2000 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2001 tmp = tcg_temp_new_i32();
da6b5335 2002 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2003 tcg_temp_free_i32(tmp);
18c9b560 2004 return 1;
da6b5335 2005 }
18c9b560 2006 switch ((insn >> 22) & 3) {
18c9b560 2007 case 1:
947a2fa2 2008 gen_helper_iwmmxt_sraw(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2009 break;
2010 case 2:
947a2fa2 2011 gen_helper_iwmmxt_sral(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2012 break;
2013 case 3:
947a2fa2 2014 gen_helper_iwmmxt_sraq(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2015 break;
2016 }
7d1b0095 2017 tcg_temp_free_i32(tmp);
18c9b560
AZ
2018 gen_op_iwmmxt_movq_wRn_M0(wrd);
2019 gen_op_iwmmxt_set_mup();
2020 gen_op_iwmmxt_set_cup();
2021 break;
2022 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2023 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2024 if (((insn >> 22) & 3) == 0)
2025 return 1;
18c9b560
AZ
2026 wrd = (insn >> 12) & 0xf;
2027 rd0 = (insn >> 16) & 0xf;
2028 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2029 tmp = tcg_temp_new_i32();
da6b5335 2030 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2031 tcg_temp_free_i32(tmp);
18c9b560 2032 return 1;
da6b5335 2033 }
18c9b560 2034 switch ((insn >> 22) & 3) {
18c9b560 2035 case 1:
947a2fa2 2036 gen_helper_iwmmxt_sllw(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2037 break;
2038 case 2:
947a2fa2 2039 gen_helper_iwmmxt_slll(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2040 break;
2041 case 3:
947a2fa2 2042 gen_helper_iwmmxt_sllq(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2043 break;
2044 }
7d1b0095 2045 tcg_temp_free_i32(tmp);
18c9b560
AZ
2046 gen_op_iwmmxt_movq_wRn_M0(wrd);
2047 gen_op_iwmmxt_set_mup();
2048 gen_op_iwmmxt_set_cup();
2049 break;
2050 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2051 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2052 if (((insn >> 22) & 3) == 0)
2053 return 1;
18c9b560
AZ
2054 wrd = (insn >> 12) & 0xf;
2055 rd0 = (insn >> 16) & 0xf;
2056 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2057 tmp = tcg_temp_new_i32();
18c9b560 2058 switch ((insn >> 22) & 3) {
18c9b560 2059 case 1:
da6b5335 2060 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2061 tcg_temp_free_i32(tmp);
18c9b560 2062 return 1;
da6b5335 2063 }
947a2fa2 2064 gen_helper_iwmmxt_rorw(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2065 break;
2066 case 2:
da6b5335 2067 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2068 tcg_temp_free_i32(tmp);
18c9b560 2069 return 1;
da6b5335 2070 }
947a2fa2 2071 gen_helper_iwmmxt_rorl(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2072 break;
2073 case 3:
da6b5335 2074 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2075 tcg_temp_free_i32(tmp);
18c9b560 2076 return 1;
da6b5335 2077 }
947a2fa2 2078 gen_helper_iwmmxt_rorq(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2079 break;
2080 }
7d1b0095 2081 tcg_temp_free_i32(tmp);
18c9b560
AZ
2082 gen_op_iwmmxt_movq_wRn_M0(wrd);
2083 gen_op_iwmmxt_set_mup();
2084 gen_op_iwmmxt_set_cup();
2085 break;
2086 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2087 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2088 wrd = (insn >> 12) & 0xf;
2089 rd0 = (insn >> 16) & 0xf;
2090 rd1 = (insn >> 0) & 0xf;
2091 gen_op_iwmmxt_movq_M0_wRn(rd0);
2092 switch ((insn >> 22) & 3) {
2093 case 0:
2094 if (insn & (1 << 21))
2095 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2096 else
2097 gen_op_iwmmxt_minub_M0_wRn(rd1);
2098 break;
2099 case 1:
2100 if (insn & (1 << 21))
2101 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2102 else
2103 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2104 break;
2105 case 2:
2106 if (insn & (1 << 21))
2107 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2108 else
2109 gen_op_iwmmxt_minul_M0_wRn(rd1);
2110 break;
2111 case 3:
2112 return 1;
2113 }
2114 gen_op_iwmmxt_movq_wRn_M0(wrd);
2115 gen_op_iwmmxt_set_mup();
2116 break;
2117 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2118 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2119 wrd = (insn >> 12) & 0xf;
2120 rd0 = (insn >> 16) & 0xf;
2121 rd1 = (insn >> 0) & 0xf;
2122 gen_op_iwmmxt_movq_M0_wRn(rd0);
2123 switch ((insn >> 22) & 3) {
2124 case 0:
2125 if (insn & (1 << 21))
2126 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2127 else
2128 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2129 break;
2130 case 1:
2131 if (insn & (1 << 21))
2132 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2133 else
2134 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2135 break;
2136 case 2:
2137 if (insn & (1 << 21))
2138 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2139 else
2140 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2141 break;
2142 case 3:
2143 return 1;
2144 }
2145 gen_op_iwmmxt_movq_wRn_M0(wrd);
2146 gen_op_iwmmxt_set_mup();
2147 break;
2148 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2149 case 0x402: case 0x502: case 0x602: case 0x702:
2150 wrd = (insn >> 12) & 0xf;
2151 rd0 = (insn >> 16) & 0xf;
2152 rd1 = (insn >> 0) & 0xf;
2153 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2154 tmp = tcg_const_i32((insn >> 20) & 3);
2155 iwmmxt_load_reg(cpu_V1, rd1);
2156 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2157 tcg_temp_free(tmp);
18c9b560
AZ
2158 gen_op_iwmmxt_movq_wRn_M0(wrd);
2159 gen_op_iwmmxt_set_mup();
2160 break;
2161 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2162 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2163 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2164 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2165 wrd = (insn >> 12) & 0xf;
2166 rd0 = (insn >> 16) & 0xf;
2167 rd1 = (insn >> 0) & 0xf;
2168 gen_op_iwmmxt_movq_M0_wRn(rd0);
2169 switch ((insn >> 20) & 0xf) {
2170 case 0x0:
2171 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2172 break;
2173 case 0x1:
2174 gen_op_iwmmxt_subub_M0_wRn(rd1);
2175 break;
2176 case 0x3:
2177 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2178 break;
2179 case 0x4:
2180 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2181 break;
2182 case 0x5:
2183 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2184 break;
2185 case 0x7:
2186 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2187 break;
2188 case 0x8:
2189 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2190 break;
2191 case 0x9:
2192 gen_op_iwmmxt_subul_M0_wRn(rd1);
2193 break;
2194 case 0xb:
2195 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2196 break;
2197 default:
2198 return 1;
2199 }
2200 gen_op_iwmmxt_movq_wRn_M0(wrd);
2201 gen_op_iwmmxt_set_mup();
2202 gen_op_iwmmxt_set_cup();
2203 break;
2204 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2205 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2206 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2207 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2208 wrd = (insn >> 12) & 0xf;
2209 rd0 = (insn >> 16) & 0xf;
2210 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2211 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
947a2fa2 2212 gen_helper_iwmmxt_shufh(cpu_M0, cpu_M0, tmp);
da6b5335 2213 tcg_temp_free(tmp);
18c9b560
AZ
2214 gen_op_iwmmxt_movq_wRn_M0(wrd);
2215 gen_op_iwmmxt_set_mup();
2216 gen_op_iwmmxt_set_cup();
2217 break;
2218 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2219 case 0x418: case 0x518: case 0x618: case 0x718:
2220 case 0x818: case 0x918: case 0xa18: case 0xb18:
2221 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2222 wrd = (insn >> 12) & 0xf;
2223 rd0 = (insn >> 16) & 0xf;
2224 rd1 = (insn >> 0) & 0xf;
2225 gen_op_iwmmxt_movq_M0_wRn(rd0);
2226 switch ((insn >> 20) & 0xf) {
2227 case 0x0:
2228 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2229 break;
2230 case 0x1:
2231 gen_op_iwmmxt_addub_M0_wRn(rd1);
2232 break;
2233 case 0x3:
2234 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2235 break;
2236 case 0x4:
2237 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2238 break;
2239 case 0x5:
2240 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2241 break;
2242 case 0x7:
2243 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2244 break;
2245 case 0x8:
2246 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2247 break;
2248 case 0x9:
2249 gen_op_iwmmxt_addul_M0_wRn(rd1);
2250 break;
2251 case 0xb:
2252 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2253 break;
2254 default:
2255 return 1;
2256 }
2257 gen_op_iwmmxt_movq_wRn_M0(wrd);
2258 gen_op_iwmmxt_set_mup();
2259 gen_op_iwmmxt_set_cup();
2260 break;
2261 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2262 case 0x408: case 0x508: case 0x608: case 0x708:
2263 case 0x808: case 0x908: case 0xa08: case 0xb08:
2264 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2265 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2266 return 1;
18c9b560
AZ
2267 wrd = (insn >> 12) & 0xf;
2268 rd0 = (insn >> 16) & 0xf;
2269 rd1 = (insn >> 0) & 0xf;
2270 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2271 switch ((insn >> 22) & 3) {
18c9b560
AZ
2272 case 1:
2273 if (insn & (1 << 21))
2274 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2275 else
2276 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2277 break;
2278 case 2:
2279 if (insn & (1 << 21))
2280 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2281 else
2282 gen_op_iwmmxt_packul_M0_wRn(rd1);
2283 break;
2284 case 3:
2285 if (insn & (1 << 21))
2286 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2287 else
2288 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2289 break;
2290 }
2291 gen_op_iwmmxt_movq_wRn_M0(wrd);
2292 gen_op_iwmmxt_set_mup();
2293 gen_op_iwmmxt_set_cup();
2294 break;
2295 case 0x201: case 0x203: case 0x205: case 0x207:
2296 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2297 case 0x211: case 0x213: case 0x215: case 0x217:
2298 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2299 wrd = (insn >> 5) & 0xf;
2300 rd0 = (insn >> 12) & 0xf;
2301 rd1 = (insn >> 0) & 0xf;
2302 if (rd0 == 0xf || rd1 == 0xf)
2303 return 1;
2304 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2305 tmp = load_reg(s, rd0);
2306 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2307 switch ((insn >> 16) & 0xf) {
2308 case 0x0: /* TMIA */
da6b5335 2309 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2310 break;
2311 case 0x8: /* TMIAPH */
da6b5335 2312 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2313 break;
2314 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2315 if (insn & (1 << 16))
da6b5335 2316 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2317 if (insn & (1 << 17))
da6b5335
FN
2318 tcg_gen_shri_i32(tmp2, tmp2, 16);
2319 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2320 break;
2321 default:
7d1b0095
PM
2322 tcg_temp_free_i32(tmp2);
2323 tcg_temp_free_i32(tmp);
18c9b560
AZ
2324 return 1;
2325 }
7d1b0095
PM
2326 tcg_temp_free_i32(tmp2);
2327 tcg_temp_free_i32(tmp);
18c9b560
AZ
2328 gen_op_iwmmxt_movq_wRn_M0(wrd);
2329 gen_op_iwmmxt_set_mup();
2330 break;
2331 default:
2332 return 1;
2333 }
2334
2335 return 0;
2336}
2337
2338/* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2339 (ie. an undefined instruction). */
2340static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2341{
2342 int acc, rd0, rd1, rdhi, rdlo;
3a554c0f 2343 TCGv tmp, tmp2;
18c9b560
AZ
2344
2345 if ((insn & 0x0ff00f10) == 0x0e200010) {
2346 /* Multiply with Internal Accumulate Format */
2347 rd0 = (insn >> 12) & 0xf;
2348 rd1 = insn & 0xf;
2349 acc = (insn >> 5) & 7;
2350
2351 if (acc != 0)
2352 return 1;
2353
3a554c0f
FN
2354 tmp = load_reg(s, rd0);
2355 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2356 switch ((insn >> 16) & 0xf) {
2357 case 0x0: /* MIA */
3a554c0f 2358 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2359 break;
2360 case 0x8: /* MIAPH */
3a554c0f 2361 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2362 break;
2363 case 0xc: /* MIABB */
2364 case 0xd: /* MIABT */
2365 case 0xe: /* MIATB */
2366 case 0xf: /* MIATT */
18c9b560 2367 if (insn & (1 << 16))
3a554c0f 2368 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2369 if (insn & (1 << 17))
3a554c0f
FN
2370 tcg_gen_shri_i32(tmp2, tmp2, 16);
2371 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2372 break;
2373 default:
2374 return 1;
2375 }
7d1b0095
PM
2376 tcg_temp_free_i32(tmp2);
2377 tcg_temp_free_i32(tmp);
18c9b560
AZ
2378
2379 gen_op_iwmmxt_movq_wRn_M0(acc);
2380 return 0;
2381 }
2382
2383 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2384 /* Internal Accumulator Access Format */
2385 rdhi = (insn >> 16) & 0xf;
2386 rdlo = (insn >> 12) & 0xf;
2387 acc = insn & 7;
2388
2389 if (acc != 0)
2390 return 1;
2391
2392 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2393 iwmmxt_load_reg(cpu_V0, acc);
2394 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2395 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2396 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2397 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2398 } else { /* MAR */
3a554c0f
FN
2399 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2400 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2401 }
2402 return 0;
2403 }
2404
2405 return 1;
2406}
2407
c1713132
AZ
2408/* Disassemble system coprocessor instruction. Return nonzero if
2409 instruction is not defined. */
2410static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2411{
b75263d6 2412 TCGv tmp, tmp2;
c1713132
AZ
2413 uint32_t rd = (insn >> 12) & 0xf;
2414 uint32_t cp = (insn >> 8) & 0xf;
2415 if (IS_USER(s)) {
2416 return 1;
2417 }
2418
18c9b560 2419 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2420 if (!env->cp[cp].cp_read)
2421 return 1;
8984bd2e 2422 gen_set_pc_im(s->pc);
7d1b0095 2423 tmp = tcg_temp_new_i32();
b75263d6
JR
2424 tmp2 = tcg_const_i32(insn);
2425 gen_helper_get_cp(tmp, cpu_env, tmp2);
2426 tcg_temp_free(tmp2);
8984bd2e 2427 store_reg(s, rd, tmp);
c1713132
AZ
2428 } else {
2429 if (!env->cp[cp].cp_write)
2430 return 1;
8984bd2e
PB
2431 gen_set_pc_im(s->pc);
2432 tmp = load_reg(s, rd);
b75263d6
JR
2433 tmp2 = tcg_const_i32(insn);
2434 gen_helper_set_cp(cpu_env, tmp2, tmp);
2435 tcg_temp_free(tmp2);
7d1b0095 2436 tcg_temp_free_i32(tmp);
c1713132
AZ
2437 }
2438 return 0;
2439}
2440
9ee6e8bb
PB
2441static int cp15_user_ok(uint32_t insn)
2442{
2443 int cpn = (insn >> 16) & 0xf;
2444 int cpm = insn & 0xf;
2445 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2446
2447 if (cpn == 13 && cpm == 0) {
2448 /* TLS register. */
2449 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2450 return 1;
2451 }
2452 if (cpn == 7) {
2453 /* ISB, DSB, DMB. */
2454 if ((cpm == 5 && op == 4)
2455 || (cpm == 10 && (op == 4 || op == 5)))
2456 return 1;
2457 }
2458 return 0;
2459}
2460
3f26c122
RV
2461static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2462{
2463 TCGv tmp;
2464 int cpn = (insn >> 16) & 0xf;
2465 int cpm = insn & 0xf;
2466 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2467
2468 if (!arm_feature(env, ARM_FEATURE_V6K))
2469 return 0;
2470
2471 if (!(cpn == 13 && cpm == 0))
2472 return 0;
2473
2474 if (insn & ARM_CP_RW_BIT) {
3f26c122
RV
2475 switch (op) {
2476 case 2:
c5883be2 2477 tmp = load_cpu_field(cp15.c13_tls1);
3f26c122
RV
2478 break;
2479 case 3:
c5883be2 2480 tmp = load_cpu_field(cp15.c13_tls2);
3f26c122
RV
2481 break;
2482 case 4:
c5883be2 2483 tmp = load_cpu_field(cp15.c13_tls3);
3f26c122
RV
2484 break;
2485 default:
3f26c122
RV
2486 return 0;
2487 }
2488 store_reg(s, rd, tmp);
2489
2490 } else {
2491 tmp = load_reg(s, rd);
2492 switch (op) {
2493 case 2:
c5883be2 2494 store_cpu_field(tmp, cp15.c13_tls1);
3f26c122
RV
2495 break;
2496 case 3:
c5883be2 2497 store_cpu_field(tmp, cp15.c13_tls2);
3f26c122
RV
2498 break;
2499 case 4:
c5883be2 2500 store_cpu_field(tmp, cp15.c13_tls3);
3f26c122
RV
2501 break;
2502 default:
7d1b0095 2503 tcg_temp_free_i32(tmp);
3f26c122
RV
2504 return 0;
2505 }
3f26c122
RV
2506 }
2507 return 1;
2508}
2509
b5ff1b31
FB
2510/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2511 instruction is not defined. */
a90b7318 2512static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2513{
2514 uint32_t rd;
b75263d6 2515 TCGv tmp, tmp2;
b5ff1b31 2516
9ee6e8bb
PB
2517 /* M profile cores use memory mapped registers instead of cp15. */
2518 if (arm_feature(env, ARM_FEATURE_M))
2519 return 1;
2520
2521 if ((insn & (1 << 25)) == 0) {
2522 if (insn & (1 << 20)) {
2523 /* mrrc */
2524 return 1;
2525 }
2526 /* mcrr. Used for block cache operations, so implement as no-op. */
2527 return 0;
2528 }
2529 if ((insn & (1 << 4)) == 0) {
2530 /* cdp */
2531 return 1;
2532 }
2533 if (IS_USER(s) && !cp15_user_ok(insn)) {
b5ff1b31
FB
2534 return 1;
2535 }
cc688901
PM
2536
2537 /* Pre-v7 versions of the architecture implemented WFI via coprocessor
2538 * instructions rather than a separate instruction.
2539 */
2540 if ((insn & 0x0fff0fff) == 0x0e070f90) {
2541 /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores).
2542 * In v7, this must NOP.
2543 */
2544 if (!arm_feature(env, ARM_FEATURE_V7)) {
2545 /* Wait for interrupt. */
2546 gen_set_pc_im(s->pc);
2547 s->is_jmp = DISAS_WFI;
2548 }
9332f9da
FB
2549 return 0;
2550 }
cc688901
PM
2551
2552 if ((insn & 0x0fff0fff) == 0x0e070f58) {
2553 /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI,
2554 * so this is slightly over-broad.
2555 */
2556 if (!arm_feature(env, ARM_FEATURE_V6)) {
2557 /* Wait for interrupt. */
2558 gen_set_pc_im(s->pc);
2559 s->is_jmp = DISAS_WFI;
2560 return 0;
2561 }
2562 /* Otherwise fall through to handle via helper function.
2563 * In particular, on v7 and some v6 cores this is one of
2564 * the VA-PA registers.
2565 */
2566 }
2567
b5ff1b31 2568 rd = (insn >> 12) & 0xf;
3f26c122
RV
2569
2570 if (cp15_tls_load_store(env, s, insn, rd))
2571 return 0;
2572
b75263d6 2573 tmp2 = tcg_const_i32(insn);
18c9b560 2574 if (insn & ARM_CP_RW_BIT) {
7d1b0095 2575 tmp = tcg_temp_new_i32();
b75263d6 2576 gen_helper_get_cp15(tmp, cpu_env, tmp2);
b5ff1b31
FB
2577 /* If the destination register is r15 then sets condition codes. */
2578 if (rd != 15)
8984bd2e
PB
2579 store_reg(s, rd, tmp);
2580 else
7d1b0095 2581 tcg_temp_free_i32(tmp);
b5ff1b31 2582 } else {
8984bd2e 2583 tmp = load_reg(s, rd);
b75263d6 2584 gen_helper_set_cp15(cpu_env, tmp2, tmp);
7d1b0095 2585 tcg_temp_free_i32(tmp);
a90b7318
AZ
2586 /* Normally we would always end the TB here, but Linux
2587 * arch/arm/mach-pxa/sleep.S expects two instructions following
2588 * an MMU enable to execute from cache. Imitate this behaviour. */
2589 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2590 (insn & 0x0fff0fff) != 0x0e010f10)
2591 gen_lookup_tb(s);
b5ff1b31 2592 }
b75263d6 2593 tcg_temp_free_i32(tmp2);
b5ff1b31
FB
2594 return 0;
2595}
2596
9ee6e8bb
PB
2597#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2598#define VFP_SREG(insn, bigbit, smallbit) \
2599 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2600#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2601 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2602 reg = (((insn) >> (bigbit)) & 0x0f) \
2603 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2604 } else { \
2605 if (insn & (1 << (smallbit))) \
2606 return 1; \
2607 reg = ((insn) >> (bigbit)) & 0x0f; \
2608 }} while (0)
2609
2610#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2611#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2612#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2613#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2614#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2615#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2616
4373f3ce
PB
2617/* Move between integer and VFP cores. */
2618static TCGv gen_vfp_mrs(void)
2619{
7d1b0095 2620 TCGv tmp = tcg_temp_new_i32();
4373f3ce
PB
2621 tcg_gen_mov_i32(tmp, cpu_F0s);
2622 return tmp;
2623}
2624
2625static void gen_vfp_msr(TCGv tmp)
2626{
2627 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2628 tcg_temp_free_i32(tmp);
4373f3ce
PB
2629}
2630
ad69471c
PB
2631static void gen_neon_dup_u8(TCGv var, int shift)
2632{
7d1b0095 2633 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2634 if (shift)
2635 tcg_gen_shri_i32(var, var, shift);
86831435 2636 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2637 tcg_gen_shli_i32(tmp, var, 8);
2638 tcg_gen_or_i32(var, var, tmp);
2639 tcg_gen_shli_i32(tmp, var, 16);
2640 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2641 tcg_temp_free_i32(tmp);
ad69471c
PB
2642}
2643
2644static void gen_neon_dup_low16(TCGv var)
2645{
7d1b0095 2646 TCGv tmp = tcg_temp_new_i32();
86831435 2647 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2648 tcg_gen_shli_i32(tmp, var, 16);
2649 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2650 tcg_temp_free_i32(tmp);
ad69471c
PB
2651}
2652
2653static void gen_neon_dup_high16(TCGv var)
2654{
7d1b0095 2655 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2656 tcg_gen_andi_i32(var, var, 0xffff0000);
2657 tcg_gen_shri_i32(tmp, var, 16);
2658 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2659 tcg_temp_free_i32(tmp);
ad69471c
PB
2660}
2661
8e18cde3
PM
2662static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size)
2663{
2664 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2665 TCGv tmp;
2666 switch (size) {
2667 case 0:
2668 tmp = gen_ld8u(addr, IS_USER(s));
2669 gen_neon_dup_u8(tmp, 0);
2670 break;
2671 case 1:
2672 tmp = gen_ld16u(addr, IS_USER(s));
2673 gen_neon_dup_low16(tmp);
2674 break;
2675 case 2:
2676 tmp = gen_ld32(addr, IS_USER(s));
2677 break;
2678 default: /* Avoid compiler warnings. */
2679 abort();
2680 }
2681 return tmp;
2682}
2683
b7bcbe95
FB
2684/* Disassemble a VFP instruction. Returns nonzero if an error occured
2685 (ie. an undefined instruction). */
2686static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2687{
2688 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2689 int dp, veclen;
312eea9f 2690 TCGv addr;
4373f3ce 2691 TCGv tmp;
ad69471c 2692 TCGv tmp2;
b7bcbe95 2693
40f137e1
PB
2694 if (!arm_feature(env, ARM_FEATURE_VFP))
2695 return 1;
2696
5df8bac1 2697 if (!s->vfp_enabled) {
9ee6e8bb 2698 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2699 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2700 return 1;
2701 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2702 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2703 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2704 return 1;
2705 }
b7bcbe95
FB
2706 dp = ((insn & 0xf00) == 0xb00);
2707 switch ((insn >> 24) & 0xf) {
2708 case 0xe:
2709 if (insn & (1 << 4)) {
2710 /* single register transfer */
b7bcbe95
FB
2711 rd = (insn >> 12) & 0xf;
2712 if (dp) {
9ee6e8bb
PB
2713 int size;
2714 int pass;
2715
2716 VFP_DREG_N(rn, insn);
2717 if (insn & 0xf)
b7bcbe95 2718 return 1;
9ee6e8bb
PB
2719 if (insn & 0x00c00060
2720 && !arm_feature(env, ARM_FEATURE_NEON))
2721 return 1;
2722
2723 pass = (insn >> 21) & 1;
2724 if (insn & (1 << 22)) {
2725 size = 0;
2726 offset = ((insn >> 5) & 3) * 8;
2727 } else if (insn & (1 << 5)) {
2728 size = 1;
2729 offset = (insn & (1 << 6)) ? 16 : 0;
2730 } else {
2731 size = 2;
2732 offset = 0;
2733 }
18c9b560 2734 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2735 /* vfp->arm */
ad69471c 2736 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2737 switch (size) {
2738 case 0:
9ee6e8bb 2739 if (offset)
ad69471c 2740 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2741 if (insn & (1 << 23))
ad69471c 2742 gen_uxtb(tmp);
9ee6e8bb 2743 else
ad69471c 2744 gen_sxtb(tmp);
9ee6e8bb
PB
2745 break;
2746 case 1:
9ee6e8bb
PB
2747 if (insn & (1 << 23)) {
2748 if (offset) {
ad69471c 2749 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2750 } else {
ad69471c 2751 gen_uxth(tmp);
9ee6e8bb
PB
2752 }
2753 } else {
2754 if (offset) {
ad69471c 2755 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2756 } else {
ad69471c 2757 gen_sxth(tmp);
9ee6e8bb
PB
2758 }
2759 }
2760 break;
2761 case 2:
9ee6e8bb
PB
2762 break;
2763 }
ad69471c 2764 store_reg(s, rd, tmp);
b7bcbe95
FB
2765 } else {
2766 /* arm->vfp */
ad69471c 2767 tmp = load_reg(s, rd);
9ee6e8bb
PB
2768 if (insn & (1 << 23)) {
2769 /* VDUP */
2770 if (size == 0) {
ad69471c 2771 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2772 } else if (size == 1) {
ad69471c 2773 gen_neon_dup_low16(tmp);
9ee6e8bb 2774 }
cbbccffc 2775 for (n = 0; n <= pass * 2; n++) {
7d1b0095 2776 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
2777 tcg_gen_mov_i32(tmp2, tmp);
2778 neon_store_reg(rn, n, tmp2);
2779 }
2780 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2781 } else {
2782 /* VMOV */
2783 switch (size) {
2784 case 0:
ad69471c
PB
2785 tmp2 = neon_load_reg(rn, pass);
2786 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
7d1b0095 2787 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2788 break;
2789 case 1:
ad69471c
PB
2790 tmp2 = neon_load_reg(rn, pass);
2791 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
7d1b0095 2792 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2793 break;
2794 case 2:
9ee6e8bb
PB
2795 break;
2796 }
ad69471c 2797 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2798 }
b7bcbe95 2799 }
9ee6e8bb
PB
2800 } else { /* !dp */
2801 if ((insn & 0x6f) != 0x00)
2802 return 1;
2803 rn = VFP_SREG_N(insn);
18c9b560 2804 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2805 /* vfp->arm */
2806 if (insn & (1 << 21)) {
2807 /* system register */
40f137e1 2808 rn >>= 1;
9ee6e8bb 2809
b7bcbe95 2810 switch (rn) {
40f137e1 2811 case ARM_VFP_FPSID:
4373f3ce 2812 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2813 VFP3 restricts all id registers to privileged
2814 accesses. */
2815 if (IS_USER(s)
2816 && arm_feature(env, ARM_FEATURE_VFP3))
2817 return 1;
4373f3ce 2818 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2819 break;
40f137e1 2820 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2821 if (IS_USER(s))
2822 return 1;
4373f3ce 2823 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2824 break;
40f137e1
PB
2825 case ARM_VFP_FPINST:
2826 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2827 /* Not present in VFP3. */
2828 if (IS_USER(s)
2829 || arm_feature(env, ARM_FEATURE_VFP3))
2830 return 1;
4373f3ce 2831 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2832 break;
40f137e1 2833 case ARM_VFP_FPSCR:
601d70b9 2834 if (rd == 15) {
4373f3ce
PB
2835 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2836 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2837 } else {
7d1b0095 2838 tmp = tcg_temp_new_i32();
4373f3ce
PB
2839 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2840 }
b7bcbe95 2841 break;
9ee6e8bb
PB
2842 case ARM_VFP_MVFR0:
2843 case ARM_VFP_MVFR1:
2844 if (IS_USER(s)
2845 || !arm_feature(env, ARM_FEATURE_VFP3))
2846 return 1;
4373f3ce 2847 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2848 break;
b7bcbe95
FB
2849 default:
2850 return 1;
2851 }
2852 } else {
2853 gen_mov_F0_vreg(0, rn);
4373f3ce 2854 tmp = gen_vfp_mrs();
b7bcbe95
FB
2855 }
2856 if (rd == 15) {
b5ff1b31 2857 /* Set the 4 flag bits in the CPSR. */
4373f3ce 2858 gen_set_nzcv(tmp);
7d1b0095 2859 tcg_temp_free_i32(tmp);
4373f3ce
PB
2860 } else {
2861 store_reg(s, rd, tmp);
2862 }
b7bcbe95
FB
2863 } else {
2864 /* arm->vfp */
4373f3ce 2865 tmp = load_reg(s, rd);
b7bcbe95 2866 if (insn & (1 << 21)) {
40f137e1 2867 rn >>= 1;
b7bcbe95
FB
2868 /* system register */
2869 switch (rn) {
40f137e1 2870 case ARM_VFP_FPSID:
9ee6e8bb
PB
2871 case ARM_VFP_MVFR0:
2872 case ARM_VFP_MVFR1:
b7bcbe95
FB
2873 /* Writes are ignored. */
2874 break;
40f137e1 2875 case ARM_VFP_FPSCR:
4373f3ce 2876 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 2877 tcg_temp_free_i32(tmp);
b5ff1b31 2878 gen_lookup_tb(s);
b7bcbe95 2879 break;
40f137e1 2880 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2881 if (IS_USER(s))
2882 return 1;
71b3c3de
JR
2883 /* TODO: VFP subarchitecture support.
2884 * For now, keep the EN bit only */
2885 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 2886 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2887 gen_lookup_tb(s);
2888 break;
2889 case ARM_VFP_FPINST:
2890 case ARM_VFP_FPINST2:
4373f3ce 2891 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2892 break;
b7bcbe95
FB
2893 default:
2894 return 1;
2895 }
2896 } else {
4373f3ce 2897 gen_vfp_msr(tmp);
b7bcbe95
FB
2898 gen_mov_vreg_F0(0, rn);
2899 }
2900 }
2901 }
2902 } else {
2903 /* data processing */
2904 /* The opcode is in bits 23, 21, 20 and 6. */
2905 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2906 if (dp) {
2907 if (op == 15) {
2908 /* rn is opcode */
2909 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2910 } else {
2911 /* rn is register number */
9ee6e8bb 2912 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2913 }
2914
04595bf6 2915 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
b7bcbe95 2916 /* Integer or single precision destination. */
9ee6e8bb 2917 rd = VFP_SREG_D(insn);
b7bcbe95 2918 } else {
9ee6e8bb 2919 VFP_DREG_D(rd, insn);
b7bcbe95 2920 }
04595bf6
PM
2921 if (op == 15 &&
2922 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2923 /* VCVT from int is always from S reg regardless of dp bit.
2924 * VCVT with immediate frac_bits has same format as SREG_M
2925 */
2926 rm = VFP_SREG_M(insn);
b7bcbe95 2927 } else {
9ee6e8bb 2928 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2929 }
2930 } else {
9ee6e8bb 2931 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2932 if (op == 15 && rn == 15) {
2933 /* Double precision destination. */
9ee6e8bb
PB
2934 VFP_DREG_D(rd, insn);
2935 } else {
2936 rd = VFP_SREG_D(insn);
2937 }
04595bf6
PM
2938 /* NB that we implicitly rely on the encoding for the frac_bits
2939 * in VCVT of fixed to float being the same as that of an SREG_M
2940 */
9ee6e8bb 2941 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2942 }
2943
69d1fc22 2944 veclen = s->vec_len;
b7bcbe95
FB
2945 if (op == 15 && rn > 3)
2946 veclen = 0;
2947
2948 /* Shut up compiler warnings. */
2949 delta_m = 0;
2950 delta_d = 0;
2951 bank_mask = 0;
3b46e624 2952
b7bcbe95
FB
2953 if (veclen > 0) {
2954 if (dp)
2955 bank_mask = 0xc;
2956 else
2957 bank_mask = 0x18;
2958
2959 /* Figure out what type of vector operation this is. */
2960 if ((rd & bank_mask) == 0) {
2961 /* scalar */
2962 veclen = 0;
2963 } else {
2964 if (dp)
69d1fc22 2965 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 2966 else
69d1fc22 2967 delta_d = s->vec_stride + 1;
b7bcbe95
FB
2968
2969 if ((rm & bank_mask) == 0) {
2970 /* mixed scalar/vector */
2971 delta_m = 0;
2972 } else {
2973 /* vector */
2974 delta_m = delta_d;
2975 }
2976 }
2977 }
2978
2979 /* Load the initial operands. */
2980 if (op == 15) {
2981 switch (rn) {
2982 case 16:
2983 case 17:
2984 /* Integer source */
2985 gen_mov_F0_vreg(0, rm);
2986 break;
2987 case 8:
2988 case 9:
2989 /* Compare */
2990 gen_mov_F0_vreg(dp, rd);
2991 gen_mov_F1_vreg(dp, rm);
2992 break;
2993 case 10:
2994 case 11:
2995 /* Compare with zero */
2996 gen_mov_F0_vreg(dp, rd);
2997 gen_vfp_F1_ld0(dp);
2998 break;
9ee6e8bb
PB
2999 case 20:
3000 case 21:
3001 case 22:
3002 case 23:
644ad806
PB
3003 case 28:
3004 case 29:
3005 case 30:
3006 case 31:
9ee6e8bb
PB
3007 /* Source and destination the same. */
3008 gen_mov_F0_vreg(dp, rd);
3009 break;
b7bcbe95
FB
3010 default:
3011 /* One source operand. */
3012 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3013 break;
b7bcbe95
FB
3014 }
3015 } else {
3016 /* Two source operands. */
3017 gen_mov_F0_vreg(dp, rn);
3018 gen_mov_F1_vreg(dp, rm);
3019 }
3020
3021 for (;;) {
3022 /* Perform the calculation. */
3023 switch (op) {
3024 case 0: /* mac: fd + (fn * fm) */
3025 gen_vfp_mul(dp);
3026 gen_mov_F1_vreg(dp, rd);
3027 gen_vfp_add(dp);
3028 break;
3029 case 1: /* nmac: fd - (fn * fm) */
3030 gen_vfp_mul(dp);
3031 gen_vfp_neg(dp);
3032 gen_mov_F1_vreg(dp, rd);
3033 gen_vfp_add(dp);
3034 break;
3035 case 2: /* msc: -fd + (fn * fm) */
3036 gen_vfp_mul(dp);
3037 gen_mov_F1_vreg(dp, rd);
3038 gen_vfp_sub(dp);
3039 break;
3040 case 3: /* nmsc: -fd - (fn * fm) */
3041 gen_vfp_mul(dp);
b7bcbe95 3042 gen_vfp_neg(dp);
c9fb531a
PB
3043 gen_mov_F1_vreg(dp, rd);
3044 gen_vfp_sub(dp);
b7bcbe95
FB
3045 break;
3046 case 4: /* mul: fn * fm */
3047 gen_vfp_mul(dp);
3048 break;
3049 case 5: /* nmul: -(fn * fm) */
3050 gen_vfp_mul(dp);
3051 gen_vfp_neg(dp);
3052 break;
3053 case 6: /* add: fn + fm */
3054 gen_vfp_add(dp);
3055 break;
3056 case 7: /* sub: fn - fm */
3057 gen_vfp_sub(dp);
3058 break;
3059 case 8: /* div: fn / fm */
3060 gen_vfp_div(dp);
3061 break;
9ee6e8bb
PB
3062 case 14: /* fconst */
3063 if (!arm_feature(env, ARM_FEATURE_VFP3))
3064 return 1;
3065
3066 n = (insn << 12) & 0x80000000;
3067 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3068 if (dp) {
3069 if (i & 0x40)
3070 i |= 0x3f80;
3071 else
3072 i |= 0x4000;
3073 n |= i << 16;
4373f3ce 3074 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3075 } else {
3076 if (i & 0x40)
3077 i |= 0x780;
3078 else
3079 i |= 0x800;
3080 n |= i << 19;
5b340b51 3081 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3082 }
9ee6e8bb 3083 break;
b7bcbe95
FB
3084 case 15: /* extension space */
3085 switch (rn) {
3086 case 0: /* cpy */
3087 /* no-op */
3088 break;
3089 case 1: /* abs */
3090 gen_vfp_abs(dp);
3091 break;
3092 case 2: /* neg */
3093 gen_vfp_neg(dp);
3094 break;
3095 case 3: /* sqrt */
3096 gen_vfp_sqrt(dp);
3097 break;
60011498
PB
3098 case 4: /* vcvtb.f32.f16 */
3099 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3100 return 1;
3101 tmp = gen_vfp_mrs();
3102 tcg_gen_ext16u_i32(tmp, tmp);
3103 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3104 tcg_temp_free_i32(tmp);
60011498
PB
3105 break;
3106 case 5: /* vcvtt.f32.f16 */
3107 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3108 return 1;
3109 tmp = gen_vfp_mrs();
3110 tcg_gen_shri_i32(tmp, tmp, 16);
3111 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3112 tcg_temp_free_i32(tmp);
60011498
PB
3113 break;
3114 case 6: /* vcvtb.f16.f32 */
3115 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3116 return 1;
7d1b0095 3117 tmp = tcg_temp_new_i32();
60011498
PB
3118 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3119 gen_mov_F0_vreg(0, rd);
3120 tmp2 = gen_vfp_mrs();
3121 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3122 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3123 tcg_temp_free_i32(tmp2);
60011498
PB
3124 gen_vfp_msr(tmp);
3125 break;
3126 case 7: /* vcvtt.f16.f32 */
3127 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3128 return 1;
7d1b0095 3129 tmp = tcg_temp_new_i32();
60011498
PB
3130 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3131 tcg_gen_shli_i32(tmp, tmp, 16);
3132 gen_mov_F0_vreg(0, rd);
3133 tmp2 = gen_vfp_mrs();
3134 tcg_gen_ext16u_i32(tmp2, tmp2);
3135 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3136 tcg_temp_free_i32(tmp2);
60011498
PB
3137 gen_vfp_msr(tmp);
3138 break;
b7bcbe95
FB
3139 case 8: /* cmp */
3140 gen_vfp_cmp(dp);
3141 break;
3142 case 9: /* cmpe */
3143 gen_vfp_cmpe(dp);
3144 break;
3145 case 10: /* cmpz */
3146 gen_vfp_cmp(dp);
3147 break;
3148 case 11: /* cmpez */
3149 gen_vfp_F1_ld0(dp);
3150 gen_vfp_cmpe(dp);
3151 break;
3152 case 15: /* single<->double conversion */
3153 if (dp)
4373f3ce 3154 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3155 else
4373f3ce 3156 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3157 break;
3158 case 16: /* fuito */
3159 gen_vfp_uito(dp);
3160 break;
3161 case 17: /* fsito */
3162 gen_vfp_sito(dp);
3163 break;
9ee6e8bb
PB
3164 case 20: /* fshto */
3165 if (!arm_feature(env, ARM_FEATURE_VFP3))
3166 return 1;
644ad806 3167 gen_vfp_shto(dp, 16 - rm);
9ee6e8bb
PB
3168 break;
3169 case 21: /* fslto */
3170 if (!arm_feature(env, ARM_FEATURE_VFP3))
3171 return 1;
644ad806 3172 gen_vfp_slto(dp, 32 - rm);
9ee6e8bb
PB
3173 break;
3174 case 22: /* fuhto */
3175 if (!arm_feature(env, ARM_FEATURE_VFP3))
3176 return 1;
644ad806 3177 gen_vfp_uhto(dp, 16 - rm);
9ee6e8bb
PB
3178 break;
3179 case 23: /* fulto */
3180 if (!arm_feature(env, ARM_FEATURE_VFP3))
3181 return 1;
644ad806 3182 gen_vfp_ulto(dp, 32 - rm);
9ee6e8bb 3183 break;
b7bcbe95
FB
3184 case 24: /* ftoui */
3185 gen_vfp_toui(dp);
3186 break;
3187 case 25: /* ftouiz */
3188 gen_vfp_touiz(dp);
3189 break;
3190 case 26: /* ftosi */
3191 gen_vfp_tosi(dp);
3192 break;
3193 case 27: /* ftosiz */
3194 gen_vfp_tosiz(dp);
3195 break;
9ee6e8bb
PB
3196 case 28: /* ftosh */
3197 if (!arm_feature(env, ARM_FEATURE_VFP3))
3198 return 1;
644ad806 3199 gen_vfp_tosh(dp, 16 - rm);
9ee6e8bb
PB
3200 break;
3201 case 29: /* ftosl */
3202 if (!arm_feature(env, ARM_FEATURE_VFP3))
3203 return 1;
644ad806 3204 gen_vfp_tosl(dp, 32 - rm);
9ee6e8bb
PB
3205 break;
3206 case 30: /* ftouh */
3207 if (!arm_feature(env, ARM_FEATURE_VFP3))
3208 return 1;
644ad806 3209 gen_vfp_touh(dp, 16 - rm);
9ee6e8bb
PB
3210 break;
3211 case 31: /* ftoul */
3212 if (!arm_feature(env, ARM_FEATURE_VFP3))
3213 return 1;
644ad806 3214 gen_vfp_toul(dp, 32 - rm);
9ee6e8bb 3215 break;
b7bcbe95
FB
3216 default: /* undefined */
3217 printf ("rn:%d\n", rn);
3218 return 1;
3219 }
3220 break;
3221 default: /* undefined */
3222 printf ("op:%d\n", op);
3223 return 1;
3224 }
3225
3226 /* Write back the result. */
3227 if (op == 15 && (rn >= 8 && rn <= 11))
3228 ; /* Comparison, do nothing. */
04595bf6
PM
3229 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3230 /* VCVT double to int: always integer result. */
b7bcbe95
FB
3231 gen_mov_vreg_F0(0, rd);
3232 else if (op == 15 && rn == 15)
3233 /* conversion */
3234 gen_mov_vreg_F0(!dp, rd);
3235 else
3236 gen_mov_vreg_F0(dp, rd);
3237
3238 /* break out of the loop if we have finished */
3239 if (veclen == 0)
3240 break;
3241
3242 if (op == 15 && delta_m == 0) {
3243 /* single source one-many */
3244 while (veclen--) {
3245 rd = ((rd + delta_d) & (bank_mask - 1))
3246 | (rd & bank_mask);
3247 gen_mov_vreg_F0(dp, rd);
3248 }
3249 break;
3250 }
3251 /* Setup the next operands. */
3252 veclen--;
3253 rd = ((rd + delta_d) & (bank_mask - 1))
3254 | (rd & bank_mask);
3255
3256 if (op == 15) {
3257 /* One source operand. */
3258 rm = ((rm + delta_m) & (bank_mask - 1))
3259 | (rm & bank_mask);
3260 gen_mov_F0_vreg(dp, rm);
3261 } else {
3262 /* Two source operands. */
3263 rn = ((rn + delta_d) & (bank_mask - 1))
3264 | (rn & bank_mask);
3265 gen_mov_F0_vreg(dp, rn);
3266 if (delta_m) {
3267 rm = ((rm + delta_m) & (bank_mask - 1))
3268 | (rm & bank_mask);
3269 gen_mov_F1_vreg(dp, rm);
3270 }
3271 }
3272 }
3273 }
3274 break;
3275 case 0xc:
3276 case 0xd:
8387da81 3277 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3278 /* two-register transfer */
3279 rn = (insn >> 16) & 0xf;
3280 rd = (insn >> 12) & 0xf;
3281 if (dp) {
9ee6e8bb
PB
3282 VFP_DREG_M(rm, insn);
3283 } else {
3284 rm = VFP_SREG_M(insn);
3285 }
b7bcbe95 3286
18c9b560 3287 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3288 /* vfp->arm */
3289 if (dp) {
4373f3ce
PB
3290 gen_mov_F0_vreg(0, rm * 2);
3291 tmp = gen_vfp_mrs();
3292 store_reg(s, rd, tmp);
3293 gen_mov_F0_vreg(0, rm * 2 + 1);
3294 tmp = gen_vfp_mrs();
3295 store_reg(s, rn, tmp);
b7bcbe95
FB
3296 } else {
3297 gen_mov_F0_vreg(0, rm);
4373f3ce 3298 tmp = gen_vfp_mrs();
8387da81 3299 store_reg(s, rd, tmp);
b7bcbe95 3300 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3301 tmp = gen_vfp_mrs();
8387da81 3302 store_reg(s, rn, tmp);
b7bcbe95
FB
3303 }
3304 } else {
3305 /* arm->vfp */
3306 if (dp) {
4373f3ce
PB
3307 tmp = load_reg(s, rd);
3308 gen_vfp_msr(tmp);
3309 gen_mov_vreg_F0(0, rm * 2);
3310 tmp = load_reg(s, rn);
3311 gen_vfp_msr(tmp);
3312 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3313 } else {
8387da81 3314 tmp = load_reg(s, rd);
4373f3ce 3315 gen_vfp_msr(tmp);
b7bcbe95 3316 gen_mov_vreg_F0(0, rm);
8387da81 3317 tmp = load_reg(s, rn);
4373f3ce 3318 gen_vfp_msr(tmp);
b7bcbe95
FB
3319 gen_mov_vreg_F0(0, rm + 1);
3320 }
3321 }
3322 } else {
3323 /* Load/store */
3324 rn = (insn >> 16) & 0xf;
3325 if (dp)
9ee6e8bb 3326 VFP_DREG_D(rd, insn);
b7bcbe95 3327 else
9ee6e8bb
PB
3328 rd = VFP_SREG_D(insn);
3329 if (s->thumb && rn == 15) {
7d1b0095 3330 addr = tcg_temp_new_i32();
312eea9f 3331 tcg_gen_movi_i32(addr, s->pc & ~2);
9ee6e8bb 3332 } else {
312eea9f 3333 addr = load_reg(s, rn);
9ee6e8bb 3334 }
b7bcbe95
FB
3335 if ((insn & 0x01200000) == 0x01000000) {
3336 /* Single load/store */
3337 offset = (insn & 0xff) << 2;
3338 if ((insn & (1 << 23)) == 0)
3339 offset = -offset;
312eea9f 3340 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3341 if (insn & (1 << 20)) {
312eea9f 3342 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3343 gen_mov_vreg_F0(dp, rd);
3344 } else {
3345 gen_mov_F0_vreg(dp, rd);
312eea9f 3346 gen_vfp_st(s, dp, addr);
b7bcbe95 3347 }
7d1b0095 3348 tcg_temp_free_i32(addr);
b7bcbe95
FB
3349 } else {
3350 /* load/store multiple */
3351 if (dp)
3352 n = (insn >> 1) & 0x7f;
3353 else
3354 n = insn & 0xff;
3355
3356 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3357 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3358
3359 if (dp)
3360 offset = 8;
3361 else
3362 offset = 4;
3363 for (i = 0; i < n; i++) {
18c9b560 3364 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3365 /* load */
312eea9f 3366 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3367 gen_mov_vreg_F0(dp, rd + i);
3368 } else {
3369 /* store */
3370 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3371 gen_vfp_st(s, dp, addr);
b7bcbe95 3372 }
312eea9f 3373 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95
FB
3374 }
3375 if (insn & (1 << 21)) {
3376 /* writeback */
3377 if (insn & (1 << 24))
3378 offset = -offset * n;
3379 else if (dp && (insn & 1))
3380 offset = 4;
3381 else
3382 offset = 0;
3383
3384 if (offset != 0)
312eea9f
FN
3385 tcg_gen_addi_i32(addr, addr, offset);
3386 store_reg(s, rn, addr);
3387 } else {
7d1b0095 3388 tcg_temp_free_i32(addr);
b7bcbe95
FB
3389 }
3390 }
3391 }
3392 break;
3393 default:
3394 /* Should never happen. */
3395 return 1;
3396 }
3397 return 0;
3398}
3399
6e256c93 3400static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3401{
6e256c93
FB
3402 TranslationBlock *tb;
3403
3404 tb = s->tb;
3405 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3406 tcg_gen_goto_tb(n);
8984bd2e 3407 gen_set_pc_im(dest);
4b4a72e5 3408 tcg_gen_exit_tb((tcg_target_long)tb + n);
6e256c93 3409 } else {
8984bd2e 3410 gen_set_pc_im(dest);
57fec1fe 3411 tcg_gen_exit_tb(0);
6e256c93 3412 }
c53be334
FB
3413}
3414
8aaca4c0
FB
3415static inline void gen_jmp (DisasContext *s, uint32_t dest)
3416{
551bd27f 3417 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3418 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3419 if (s->thumb)
d9ba4830
PB
3420 dest |= 1;
3421 gen_bx_im(s, dest);
8aaca4c0 3422 } else {
6e256c93 3423 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3424 s->is_jmp = DISAS_TB_JUMP;
3425 }
3426}
3427
d9ba4830 3428static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3429{
ee097184 3430 if (x)
d9ba4830 3431 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3432 else
d9ba4830 3433 gen_sxth(t0);
ee097184 3434 if (y)
d9ba4830 3435 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3436 else
d9ba4830
PB
3437 gen_sxth(t1);
3438 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3439}
3440
3441/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 3442static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3443 uint32_t mask;
3444
3445 mask = 0;
3446 if (flags & (1 << 0))
3447 mask |= 0xff;
3448 if (flags & (1 << 1))
3449 mask |= 0xff00;
3450 if (flags & (1 << 2))
3451 mask |= 0xff0000;
3452 if (flags & (1 << 3))
3453 mask |= 0xff000000;
9ee6e8bb 3454
2ae23e75 3455 /* Mask out undefined bits. */
9ee6e8bb 3456 mask &= ~CPSR_RESERVED;
be5e7a76
DES
3457 if (!arm_feature(env, ARM_FEATURE_V4T))
3458 mask &= ~CPSR_T;
3459 if (!arm_feature(env, ARM_FEATURE_V5))
3460 mask &= ~CPSR_Q; /* V5TE in reality*/
9ee6e8bb 3461 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3462 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3463 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3464 mask &= ~CPSR_IT;
9ee6e8bb 3465 /* Mask out execution state bits. */
2ae23e75 3466 if (!spsr)
e160c51c 3467 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3468 /* Mask out privileged bits. */
3469 if (IS_USER(s))
9ee6e8bb 3470 mask &= CPSR_USER;
b5ff1b31
FB
3471 return mask;
3472}
3473
2fbac54b
FN
3474/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3475static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
b5ff1b31 3476{
d9ba4830 3477 TCGv tmp;
b5ff1b31
FB
3478 if (spsr) {
3479 /* ??? This is also undefined in system mode. */
3480 if (IS_USER(s))
3481 return 1;
d9ba4830
PB
3482
3483 tmp = load_cpu_field(spsr);
3484 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3485 tcg_gen_andi_i32(t0, t0, mask);
3486 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3487 store_cpu_field(tmp, spsr);
b5ff1b31 3488 } else {
2fbac54b 3489 gen_set_cpsr(t0, mask);
b5ff1b31 3490 }
7d1b0095 3491 tcg_temp_free_i32(t0);
b5ff1b31
FB
3492 gen_lookup_tb(s);
3493 return 0;
3494}
3495
2fbac54b
FN
3496/* Returns nonzero if access to the PSR is not permitted. */
3497static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3498{
3499 TCGv tmp;
7d1b0095 3500 tmp = tcg_temp_new_i32();
2fbac54b
FN
3501 tcg_gen_movi_i32(tmp, val);
3502 return gen_set_psr(s, mask, spsr, tmp);
3503}
3504
e9bb4aa9
JR
3505/* Generate an old-style exception return. Marks pc as dead. */
3506static void gen_exception_return(DisasContext *s, TCGv pc)
b5ff1b31 3507{
d9ba4830 3508 TCGv tmp;
e9bb4aa9 3509 store_reg(s, 15, pc);
d9ba4830
PB
3510 tmp = load_cpu_field(spsr);
3511 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 3512 tcg_temp_free_i32(tmp);
b5ff1b31
FB
3513 s->is_jmp = DISAS_UPDATE;
3514}
3515
b0109805
PB
3516/* Generate a v6 exception return. Marks both values as dead. */
3517static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3518{
b0109805 3519 gen_set_cpsr(cpsr, 0xffffffff);
7d1b0095 3520 tcg_temp_free_i32(cpsr);
b0109805 3521 store_reg(s, 15, pc);
9ee6e8bb
PB
3522 s->is_jmp = DISAS_UPDATE;
3523}
3b46e624 3524
9ee6e8bb
PB
3525static inline void
3526gen_set_condexec (DisasContext *s)
3527{
3528 if (s->condexec_mask) {
8f01245e 3529 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
7d1b0095 3530 TCGv tmp = tcg_temp_new_i32();
8f01245e 3531 tcg_gen_movi_i32(tmp, val);
d9ba4830 3532 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3533 }
3534}
3b46e624 3535
bc4a0de0
PM
3536static void gen_exception_insn(DisasContext *s, int offset, int excp)
3537{
3538 gen_set_condexec(s);
3539 gen_set_pc_im(s->pc - offset);
3540 gen_exception(excp);
3541 s->is_jmp = DISAS_JUMP;
3542}
3543
9ee6e8bb
PB
3544static void gen_nop_hint(DisasContext *s, int val)
3545{
3546 switch (val) {
3547 case 3: /* wfi */
8984bd2e 3548 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3549 s->is_jmp = DISAS_WFI;
3550 break;
3551 case 2: /* wfe */
3552 case 4: /* sev */
3553 /* TODO: Implement SEV and WFE. May help SMP performance. */
3554 default: /* nop */
3555 break;
3556 }
3557}
99c475ab 3558
ad69471c 3559#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3560
62698be3 3561static inline void gen_neon_add(int size, TCGv t0, TCGv t1)
9ee6e8bb
PB
3562{
3563 switch (size) {
dd8fbd78
FN
3564 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3565 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3566 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 3567 default: abort();
9ee6e8bb 3568 }
9ee6e8bb
PB
3569}
3570
dd8fbd78 3571static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
ad69471c
PB
3572{
3573 switch (size) {
dd8fbd78
FN
3574 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3575 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3576 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3577 default: return;
3578 }
3579}
3580
3581/* 32-bit pairwise ops end up the same as the elementwise versions. */
3582#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3583#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3584#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3585#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3586
ad69471c
PB
3587#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3588 switch ((size << 1) | u) { \
3589 case 0: \
dd8fbd78 3590 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3591 break; \
3592 case 1: \
dd8fbd78 3593 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3594 break; \
3595 case 2: \
dd8fbd78 3596 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3597 break; \
3598 case 3: \
dd8fbd78 3599 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3600 break; \
3601 case 4: \
dd8fbd78 3602 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3603 break; \
3604 case 5: \
dd8fbd78 3605 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3606 break; \
3607 default: return 1; \
3608 }} while (0)
9ee6e8bb
PB
3609
3610#define GEN_NEON_INTEGER_OP(name) do { \
3611 switch ((size << 1) | u) { \
ad69471c 3612 case 0: \
dd8fbd78 3613 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3614 break; \
3615 case 1: \
dd8fbd78 3616 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3617 break; \
3618 case 2: \
dd8fbd78 3619 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3620 break; \
3621 case 3: \
dd8fbd78 3622 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3623 break; \
3624 case 4: \
dd8fbd78 3625 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3626 break; \
3627 case 5: \
dd8fbd78 3628 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3629 break; \
9ee6e8bb
PB
3630 default: return 1; \
3631 }} while (0)
3632
dd8fbd78 3633static TCGv neon_load_scratch(int scratch)
9ee6e8bb 3634{
7d1b0095 3635 TCGv tmp = tcg_temp_new_i32();
dd8fbd78
FN
3636 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3637 return tmp;
9ee6e8bb
PB
3638}
3639
dd8fbd78 3640static void neon_store_scratch(int scratch, TCGv var)
9ee6e8bb 3641{
dd8fbd78 3642 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 3643 tcg_temp_free_i32(var);
9ee6e8bb
PB
3644}
3645
dd8fbd78 3646static inline TCGv neon_get_scalar(int size, int reg)
9ee6e8bb 3647{
dd8fbd78 3648 TCGv tmp;
9ee6e8bb 3649 if (size == 1) {
0fad6efc
PM
3650 tmp = neon_load_reg(reg & 7, reg >> 4);
3651 if (reg & 8) {
dd8fbd78 3652 gen_neon_dup_high16(tmp);
0fad6efc
PM
3653 } else {
3654 gen_neon_dup_low16(tmp);
dd8fbd78 3655 }
0fad6efc
PM
3656 } else {
3657 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 3658 }
dd8fbd78 3659 return tmp;
9ee6e8bb
PB
3660}
3661
02acedf9 3662static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 3663{
02acedf9 3664 TCGv tmp, tmp2;
600b828c 3665 if (!q && size == 2) {
02acedf9
PM
3666 return 1;
3667 }
3668 tmp = tcg_const_i32(rd);
3669 tmp2 = tcg_const_i32(rm);
3670 if (q) {
3671 switch (size) {
3672 case 0:
2a3f75b4 3673 gen_helper_neon_qunzip8(tmp, tmp2);
02acedf9
PM
3674 break;
3675 case 1:
2a3f75b4 3676 gen_helper_neon_qunzip16(tmp, tmp2);
02acedf9
PM
3677 break;
3678 case 2:
2a3f75b4 3679 gen_helper_neon_qunzip32(tmp, tmp2);
02acedf9
PM
3680 break;
3681 default:
3682 abort();
3683 }
3684 } else {
3685 switch (size) {
3686 case 0:
2a3f75b4 3687 gen_helper_neon_unzip8(tmp, tmp2);
02acedf9
PM
3688 break;
3689 case 1:
2a3f75b4 3690 gen_helper_neon_unzip16(tmp, tmp2);
02acedf9
PM
3691 break;
3692 default:
3693 abort();
3694 }
3695 }
3696 tcg_temp_free_i32(tmp);
3697 tcg_temp_free_i32(tmp2);
3698 return 0;
19457615
FN
3699}
3700
d68a6f3a 3701static int gen_neon_zip(int rd, int rm, int size, int q)
19457615
FN
3702{
3703 TCGv tmp, tmp2;
600b828c 3704 if (!q && size == 2) {
d68a6f3a
PM
3705 return 1;
3706 }
3707 tmp = tcg_const_i32(rd);
3708 tmp2 = tcg_const_i32(rm);
3709 if (q) {
3710 switch (size) {
3711 case 0:
2a3f75b4 3712 gen_helper_neon_qzip8(tmp, tmp2);
d68a6f3a
PM
3713 break;
3714 case 1:
2a3f75b4 3715 gen_helper_neon_qzip16(tmp, tmp2);
d68a6f3a
PM
3716 break;
3717 case 2:
2a3f75b4 3718 gen_helper_neon_qzip32(tmp, tmp2);
d68a6f3a
PM
3719 break;
3720 default:
3721 abort();
3722 }
3723 } else {
3724 switch (size) {
3725 case 0:
2a3f75b4 3726 gen_helper_neon_zip8(tmp, tmp2);
d68a6f3a
PM
3727 break;
3728 case 1:
2a3f75b4 3729 gen_helper_neon_zip16(tmp, tmp2);
d68a6f3a
PM
3730 break;
3731 default:
3732 abort();
3733 }
3734 }
3735 tcg_temp_free_i32(tmp);
3736 tcg_temp_free_i32(tmp2);
3737 return 0;
19457615
FN
3738}
3739
19457615
FN
3740static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3741{
3742 TCGv rd, tmp;
3743
7d1b0095
PM
3744 rd = tcg_temp_new_i32();
3745 tmp = tcg_temp_new_i32();
19457615
FN
3746
3747 tcg_gen_shli_i32(rd, t0, 8);
3748 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3749 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3750 tcg_gen_or_i32(rd, rd, tmp);
3751
3752 tcg_gen_shri_i32(t1, t1, 8);
3753 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3754 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3755 tcg_gen_or_i32(t1, t1, tmp);
3756 tcg_gen_mov_i32(t0, rd);
3757
7d1b0095
PM
3758 tcg_temp_free_i32(tmp);
3759 tcg_temp_free_i32(rd);
19457615
FN
3760}
3761
3762static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3763{
3764 TCGv rd, tmp;
3765
7d1b0095
PM
3766 rd = tcg_temp_new_i32();
3767 tmp = tcg_temp_new_i32();
19457615
FN
3768
3769 tcg_gen_shli_i32(rd, t0, 16);
3770 tcg_gen_andi_i32(tmp, t1, 0xffff);
3771 tcg_gen_or_i32(rd, rd, tmp);
3772 tcg_gen_shri_i32(t1, t1, 16);
3773 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3774 tcg_gen_or_i32(t1, t1, tmp);
3775 tcg_gen_mov_i32(t0, rd);
3776
7d1b0095
PM
3777 tcg_temp_free_i32(tmp);
3778 tcg_temp_free_i32(rd);
19457615
FN
3779}
3780
3781
9ee6e8bb
PB
3782static struct {
3783 int nregs;
3784 int interleave;
3785 int spacing;
3786} neon_ls_element_type[11] = {
3787 {4, 4, 1},
3788 {4, 4, 2},
3789 {4, 1, 1},
3790 {4, 2, 1},
3791 {3, 3, 1},
3792 {3, 3, 2},
3793 {3, 1, 1},
3794 {1, 1, 1},
3795 {2, 2, 1},
3796 {2, 2, 2},
3797 {2, 1, 1}
3798};
3799
3800/* Translate a NEON load/store element instruction. Return nonzero if the
3801 instruction is invalid. */
3802static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3803{
3804 int rd, rn, rm;
3805 int op;
3806 int nregs;
3807 int interleave;
84496233 3808 int spacing;
9ee6e8bb
PB
3809 int stride;
3810 int size;
3811 int reg;
3812 int pass;
3813 int load;
3814 int shift;
9ee6e8bb 3815 int n;
1b2b1e54 3816 TCGv addr;
b0109805 3817 TCGv tmp;
8f8e3aa4 3818 TCGv tmp2;
84496233 3819 TCGv_i64 tmp64;
9ee6e8bb 3820
5df8bac1 3821 if (!s->vfp_enabled)
9ee6e8bb
PB
3822 return 1;
3823 VFP_DREG_D(rd, insn);
3824 rn = (insn >> 16) & 0xf;
3825 rm = insn & 0xf;
3826 load = (insn & (1 << 21)) != 0;
3827 if ((insn & (1 << 23)) == 0) {
3828 /* Load store all elements. */
3829 op = (insn >> 8) & 0xf;
3830 size = (insn >> 6) & 3;
84496233 3831 if (op > 10)
9ee6e8bb 3832 return 1;
f2dd89d0
PM
3833 /* Catch UNDEF cases for bad values of align field */
3834 switch (op & 0xc) {
3835 case 4:
3836 if (((insn >> 5) & 1) == 1) {
3837 return 1;
3838 }
3839 break;
3840 case 8:
3841 if (((insn >> 4) & 3) == 3) {
3842 return 1;
3843 }
3844 break;
3845 default:
3846 break;
3847 }
9ee6e8bb
PB
3848 nregs = neon_ls_element_type[op].nregs;
3849 interleave = neon_ls_element_type[op].interleave;
84496233
JR
3850 spacing = neon_ls_element_type[op].spacing;
3851 if (size == 3 && (interleave | spacing) != 1)
3852 return 1;
e318a60b 3853 addr = tcg_temp_new_i32();
dcc65026 3854 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3855 stride = (1 << size) * interleave;
3856 for (reg = 0; reg < nregs; reg++) {
3857 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
3858 load_reg_var(s, addr, rn);
3859 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 3860 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
3861 load_reg_var(s, addr, rn);
3862 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 3863 }
84496233
JR
3864 if (size == 3) {
3865 if (load) {
3866 tmp64 = gen_ld64(addr, IS_USER(s));
3867 neon_store_reg64(tmp64, rd);
3868 tcg_temp_free_i64(tmp64);
3869 } else {
3870 tmp64 = tcg_temp_new_i64();
3871 neon_load_reg64(tmp64, rd);
3872 gen_st64(tmp64, addr, IS_USER(s));
3873 }
3874 tcg_gen_addi_i32(addr, addr, stride);
3875 } else {
3876 for (pass = 0; pass < 2; pass++) {
3877 if (size == 2) {
3878 if (load) {
3879 tmp = gen_ld32(addr, IS_USER(s));
3880 neon_store_reg(rd, pass, tmp);
3881 } else {
3882 tmp = neon_load_reg(rd, pass);
3883 gen_st32(tmp, addr, IS_USER(s));
3884 }
1b2b1e54 3885 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
3886 } else if (size == 1) {
3887 if (load) {
3888 tmp = gen_ld16u(addr, IS_USER(s));
3889 tcg_gen_addi_i32(addr, addr, stride);
3890 tmp2 = gen_ld16u(addr, IS_USER(s));
3891 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
3892 tcg_gen_shli_i32(tmp2, tmp2, 16);
3893 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3894 tcg_temp_free_i32(tmp2);
84496233
JR
3895 neon_store_reg(rd, pass, tmp);
3896 } else {
3897 tmp = neon_load_reg(rd, pass);
7d1b0095 3898 tmp2 = tcg_temp_new_i32();
84496233
JR
3899 tcg_gen_shri_i32(tmp2, tmp, 16);
3900 gen_st16(tmp, addr, IS_USER(s));
3901 tcg_gen_addi_i32(addr, addr, stride);
3902 gen_st16(tmp2, addr, IS_USER(s));
1b2b1e54 3903 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3904 }
84496233
JR
3905 } else /* size == 0 */ {
3906 if (load) {
3907 TCGV_UNUSED(tmp2);
3908 for (n = 0; n < 4; n++) {
3909 tmp = gen_ld8u(addr, IS_USER(s));
3910 tcg_gen_addi_i32(addr, addr, stride);
3911 if (n == 0) {
3912 tmp2 = tmp;
3913 } else {
41ba8341
PB
3914 tcg_gen_shli_i32(tmp, tmp, n * 8);
3915 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 3916 tcg_temp_free_i32(tmp);
84496233 3917 }
9ee6e8bb 3918 }
84496233
JR
3919 neon_store_reg(rd, pass, tmp2);
3920 } else {
3921 tmp2 = neon_load_reg(rd, pass);
3922 for (n = 0; n < 4; n++) {
7d1b0095 3923 tmp = tcg_temp_new_i32();
84496233
JR
3924 if (n == 0) {
3925 tcg_gen_mov_i32(tmp, tmp2);
3926 } else {
3927 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3928 }
3929 gen_st8(tmp, addr, IS_USER(s));
3930 tcg_gen_addi_i32(addr, addr, stride);
3931 }
7d1b0095 3932 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3933 }
3934 }
3935 }
3936 }
84496233 3937 rd += spacing;
9ee6e8bb 3938 }
e318a60b 3939 tcg_temp_free_i32(addr);
9ee6e8bb
PB
3940 stride = nregs * 8;
3941 } else {
3942 size = (insn >> 10) & 3;
3943 if (size == 3) {
3944 /* Load single element to all lanes. */
8e18cde3
PM
3945 int a = (insn >> 4) & 1;
3946 if (!load) {
9ee6e8bb 3947 return 1;
8e18cde3 3948 }
9ee6e8bb
PB
3949 size = (insn >> 6) & 3;
3950 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
3951
3952 if (size == 3) {
3953 if (nregs != 4 || a == 0) {
9ee6e8bb 3954 return 1;
99c475ab 3955 }
8e18cde3
PM
3956 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3957 size = 2;
3958 }
3959 if (nregs == 1 && a == 1 && size == 0) {
3960 return 1;
3961 }
3962 if (nregs == 3 && a == 1) {
3963 return 1;
3964 }
e318a60b 3965 addr = tcg_temp_new_i32();
8e18cde3
PM
3966 load_reg_var(s, addr, rn);
3967 if (nregs == 1) {
3968 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
3969 tmp = gen_load_and_replicate(s, addr, size);
3970 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3971 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3972 if (insn & (1 << 5)) {
3973 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
3974 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
3975 }
3976 tcg_temp_free_i32(tmp);
3977 } else {
3978 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
3979 stride = (insn & (1 << 5)) ? 2 : 1;
3980 for (reg = 0; reg < nregs; reg++) {
3981 tmp = gen_load_and_replicate(s, addr, size);
3982 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3983 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3984 tcg_temp_free_i32(tmp);
3985 tcg_gen_addi_i32(addr, addr, 1 << size);
3986 rd += stride;
3987 }
9ee6e8bb 3988 }
e318a60b 3989 tcg_temp_free_i32(addr);
9ee6e8bb
PB
3990 stride = (1 << size) * nregs;
3991 } else {
3992 /* Single element. */
93262b16 3993 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
3994 pass = (insn >> 7) & 1;
3995 switch (size) {
3996 case 0:
3997 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
3998 stride = 1;
3999 break;
4000 case 1:
4001 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
4002 stride = (insn & (1 << 5)) ? 2 : 1;
4003 break;
4004 case 2:
4005 shift = 0;
9ee6e8bb
PB
4006 stride = (insn & (1 << 6)) ? 2 : 1;
4007 break;
4008 default:
4009 abort();
4010 }
4011 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
4012 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4013 switch (nregs) {
4014 case 1:
4015 if (((idx & (1 << size)) != 0) ||
4016 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4017 return 1;
4018 }
4019 break;
4020 case 3:
4021 if ((idx & 1) != 0) {
4022 return 1;
4023 }
4024 /* fall through */
4025 case 2:
4026 if (size == 2 && (idx & 2) != 0) {
4027 return 1;
4028 }
4029 break;
4030 case 4:
4031 if ((size == 2) && ((idx & 3) == 3)) {
4032 return 1;
4033 }
4034 break;
4035 default:
4036 abort();
4037 }
4038 if ((rd + stride * (nregs - 1)) > 31) {
4039 /* Attempts to write off the end of the register file
4040 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4041 * the neon_load_reg() would write off the end of the array.
4042 */
4043 return 1;
4044 }
e318a60b 4045 addr = tcg_temp_new_i32();
dcc65026 4046 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4047 for (reg = 0; reg < nregs; reg++) {
4048 if (load) {
9ee6e8bb
PB
4049 switch (size) {
4050 case 0:
1b2b1e54 4051 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb
PB
4052 break;
4053 case 1:
1b2b1e54 4054 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
4055 break;
4056 case 2:
1b2b1e54 4057 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 4058 break;
a50f5b91
PB
4059 default: /* Avoid compiler warnings. */
4060 abort();
9ee6e8bb
PB
4061 }
4062 if (size != 2) {
8f8e3aa4
PB
4063 tmp2 = neon_load_reg(rd, pass);
4064 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
7d1b0095 4065 tcg_temp_free_i32(tmp2);
9ee6e8bb 4066 }
8f8e3aa4 4067 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4068 } else { /* Store */
8f8e3aa4
PB
4069 tmp = neon_load_reg(rd, pass);
4070 if (shift)
4071 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4072 switch (size) {
4073 case 0:
1b2b1e54 4074 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4075 break;
4076 case 1:
1b2b1e54 4077 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4078 break;
4079 case 2:
1b2b1e54 4080 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 4081 break;
99c475ab 4082 }
99c475ab 4083 }
9ee6e8bb 4084 rd += stride;
1b2b1e54 4085 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4086 }
e318a60b 4087 tcg_temp_free_i32(addr);
9ee6e8bb 4088 stride = nregs * (1 << size);
99c475ab 4089 }
9ee6e8bb
PB
4090 }
4091 if (rm != 15) {
b26eefb6
PB
4092 TCGv base;
4093
4094 base = load_reg(s, rn);
9ee6e8bb 4095 if (rm == 13) {
b26eefb6 4096 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4097 } else {
b26eefb6
PB
4098 TCGv index;
4099 index = load_reg(s, rm);
4100 tcg_gen_add_i32(base, base, index);
7d1b0095 4101 tcg_temp_free_i32(index);
9ee6e8bb 4102 }
b26eefb6 4103 store_reg(s, rn, base);
9ee6e8bb
PB
4104 }
4105 return 0;
4106}
3b46e624 4107
8f8e3aa4
PB
4108/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4109static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4110{
4111 tcg_gen_and_i32(t, t, c);
f669df27 4112 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4113 tcg_gen_or_i32(dest, t, f);
4114}
4115
a7812ae4 4116static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4117{
4118 switch (size) {
4119 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4120 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4121 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4122 default: abort();
4123 }
4124}
4125
a7812ae4 4126static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4127{
4128 switch (size) {
2a3f75b4
PM
4129 case 0: gen_helper_neon_narrow_sat_s8(dest, src); break;
4130 case 1: gen_helper_neon_narrow_sat_s16(dest, src); break;
4131 case 2: gen_helper_neon_narrow_sat_s32(dest, src); break;
ad69471c
PB
4132 default: abort();
4133 }
4134}
4135
a7812ae4 4136static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4137{
4138 switch (size) {
2a3f75b4
PM
4139 case 0: gen_helper_neon_narrow_sat_u8(dest, src); break;
4140 case 1: gen_helper_neon_narrow_sat_u16(dest, src); break;
4141 case 2: gen_helper_neon_narrow_sat_u32(dest, src); break;
ad69471c
PB
4142 default: abort();
4143 }
4144}
4145
af1bbf30
JR
4146static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4147{
4148 switch (size) {
2a3f75b4
PM
4149 case 0: gen_helper_neon_unarrow_sat8(dest, src); break;
4150 case 1: gen_helper_neon_unarrow_sat16(dest, src); break;
4151 case 2: gen_helper_neon_unarrow_sat32(dest, src); break;
af1bbf30
JR
4152 default: abort();
4153 }
4154}
4155
ad69471c
PB
4156static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4157 int q, int u)
4158{
4159 if (q) {
4160 if (u) {
4161 switch (size) {
4162 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4163 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4164 default: abort();
4165 }
4166 } else {
4167 switch (size) {
4168 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4169 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4170 default: abort();
4171 }
4172 }
4173 } else {
4174 if (u) {
4175 switch (size) {
b408a9b0
CL
4176 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4177 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4178 default: abort();
4179 }
4180 } else {
4181 switch (size) {
4182 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4183 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4184 default: abort();
4185 }
4186 }
4187 }
4188}
4189
a7812ae4 4190static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
4191{
4192 if (u) {
4193 switch (size) {
4194 case 0: gen_helper_neon_widen_u8(dest, src); break;
4195 case 1: gen_helper_neon_widen_u16(dest, src); break;
4196 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4197 default: abort();
4198 }
4199 } else {
4200 switch (size) {
4201 case 0: gen_helper_neon_widen_s8(dest, src); break;
4202 case 1: gen_helper_neon_widen_s16(dest, src); break;
4203 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4204 default: abort();
4205 }
4206 }
7d1b0095 4207 tcg_temp_free_i32(src);
ad69471c
PB
4208}
4209
4210static inline void gen_neon_addl(int size)
4211{
4212 switch (size) {
4213 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4214 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4215 case 2: tcg_gen_add_i64(CPU_V001); break;
4216 default: abort();
4217 }
4218}
4219
4220static inline void gen_neon_subl(int size)
4221{
4222 switch (size) {
4223 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4224 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4225 case 2: tcg_gen_sub_i64(CPU_V001); break;
4226 default: abort();
4227 }
4228}
4229
a7812ae4 4230static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4231{
4232 switch (size) {
4233 case 0: gen_helper_neon_negl_u16(var, var); break;
4234 case 1: gen_helper_neon_negl_u32(var, var); break;
4235 case 2: gen_helper_neon_negl_u64(var, var); break;
4236 default: abort();
4237 }
4238}
4239
a7812ae4 4240static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4241{
4242 switch (size) {
2a3f75b4
PM
4243 case 1: gen_helper_neon_addl_saturate_s32(op0, op0, op1); break;
4244 case 2: gen_helper_neon_addl_saturate_s64(op0, op0, op1); break;
ad69471c
PB
4245 default: abort();
4246 }
4247}
4248
a7812ae4 4249static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4250{
a7812ae4 4251 TCGv_i64 tmp;
ad69471c
PB
4252
4253 switch ((size << 1) | u) {
4254 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4255 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4256 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4257 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4258 case 4:
4259 tmp = gen_muls_i64_i32(a, b);
4260 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4261 tcg_temp_free_i64(tmp);
ad69471c
PB
4262 break;
4263 case 5:
4264 tmp = gen_mulu_i64_i32(a, b);
4265 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4266 tcg_temp_free_i64(tmp);
ad69471c
PB
4267 break;
4268 default: abort();
4269 }
c6067f04
CL
4270
4271 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4272 Don't forget to clean them now. */
4273 if (size < 2) {
7d1b0095
PM
4274 tcg_temp_free_i32(a);
4275 tcg_temp_free_i32(b);
c6067f04 4276 }
ad69471c
PB
4277}
4278
c33171c7
PM
4279static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4280{
4281 if (op) {
4282 if (u) {
4283 gen_neon_unarrow_sats(size, dest, src);
4284 } else {
4285 gen_neon_narrow(size, dest, src);
4286 }
4287 } else {
4288 if (u) {
4289 gen_neon_narrow_satu(size, dest, src);
4290 } else {
4291 gen_neon_narrow_sats(size, dest, src);
4292 }
4293 }
4294}
4295
62698be3
PM
4296/* Symbolic constants for op fields for Neon 3-register same-length.
4297 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4298 * table A7-9.
4299 */
4300#define NEON_3R_VHADD 0
4301#define NEON_3R_VQADD 1
4302#define NEON_3R_VRHADD 2
4303#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4304#define NEON_3R_VHSUB 4
4305#define NEON_3R_VQSUB 5
4306#define NEON_3R_VCGT 6
4307#define NEON_3R_VCGE 7
4308#define NEON_3R_VSHL 8
4309#define NEON_3R_VQSHL 9
4310#define NEON_3R_VRSHL 10
4311#define NEON_3R_VQRSHL 11
4312#define NEON_3R_VMAX 12
4313#define NEON_3R_VMIN 13
4314#define NEON_3R_VABD 14
4315#define NEON_3R_VABA 15
4316#define NEON_3R_VADD_VSUB 16
4317#define NEON_3R_VTST_VCEQ 17
4318#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4319#define NEON_3R_VMUL 19
4320#define NEON_3R_VPMAX 20
4321#define NEON_3R_VPMIN 21
4322#define NEON_3R_VQDMULH_VQRDMULH 22
4323#define NEON_3R_VPADD 23
4324#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4325#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4326#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4327#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4328#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4329#define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4330
4331static const uint8_t neon_3r_sizes[] = {
4332 [NEON_3R_VHADD] = 0x7,
4333 [NEON_3R_VQADD] = 0xf,
4334 [NEON_3R_VRHADD] = 0x7,
4335 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4336 [NEON_3R_VHSUB] = 0x7,
4337 [NEON_3R_VQSUB] = 0xf,
4338 [NEON_3R_VCGT] = 0x7,
4339 [NEON_3R_VCGE] = 0x7,
4340 [NEON_3R_VSHL] = 0xf,
4341 [NEON_3R_VQSHL] = 0xf,
4342 [NEON_3R_VRSHL] = 0xf,
4343 [NEON_3R_VQRSHL] = 0xf,
4344 [NEON_3R_VMAX] = 0x7,
4345 [NEON_3R_VMIN] = 0x7,
4346 [NEON_3R_VABD] = 0x7,
4347 [NEON_3R_VABA] = 0x7,
4348 [NEON_3R_VADD_VSUB] = 0xf,
4349 [NEON_3R_VTST_VCEQ] = 0x7,
4350 [NEON_3R_VML] = 0x7,
4351 [NEON_3R_VMUL] = 0x7,
4352 [NEON_3R_VPMAX] = 0x7,
4353 [NEON_3R_VPMIN] = 0x7,
4354 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4355 [NEON_3R_VPADD] = 0x7,
4356 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4357 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4358 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4359 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4360 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4361 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4362};
4363
600b828c
PM
4364/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4365 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4366 * table A7-13.
4367 */
4368#define NEON_2RM_VREV64 0
4369#define NEON_2RM_VREV32 1
4370#define NEON_2RM_VREV16 2
4371#define NEON_2RM_VPADDL 4
4372#define NEON_2RM_VPADDL_U 5
4373#define NEON_2RM_VCLS 8
4374#define NEON_2RM_VCLZ 9
4375#define NEON_2RM_VCNT 10
4376#define NEON_2RM_VMVN 11
4377#define NEON_2RM_VPADAL 12
4378#define NEON_2RM_VPADAL_U 13
4379#define NEON_2RM_VQABS 14
4380#define NEON_2RM_VQNEG 15
4381#define NEON_2RM_VCGT0 16
4382#define NEON_2RM_VCGE0 17
4383#define NEON_2RM_VCEQ0 18
4384#define NEON_2RM_VCLE0 19
4385#define NEON_2RM_VCLT0 20
4386#define NEON_2RM_VABS 22
4387#define NEON_2RM_VNEG 23
4388#define NEON_2RM_VCGT0_F 24
4389#define NEON_2RM_VCGE0_F 25
4390#define NEON_2RM_VCEQ0_F 26
4391#define NEON_2RM_VCLE0_F 27
4392#define NEON_2RM_VCLT0_F 28
4393#define NEON_2RM_VABS_F 30
4394#define NEON_2RM_VNEG_F 31
4395#define NEON_2RM_VSWP 32
4396#define NEON_2RM_VTRN 33
4397#define NEON_2RM_VUZP 34
4398#define NEON_2RM_VZIP 35
4399#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4400#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4401#define NEON_2RM_VSHLL 38
4402#define NEON_2RM_VCVT_F16_F32 44
4403#define NEON_2RM_VCVT_F32_F16 46
4404#define NEON_2RM_VRECPE 56
4405#define NEON_2RM_VRSQRTE 57
4406#define NEON_2RM_VRECPE_F 58
4407#define NEON_2RM_VRSQRTE_F 59
4408#define NEON_2RM_VCVT_FS 60
4409#define NEON_2RM_VCVT_FU 61
4410#define NEON_2RM_VCVT_SF 62
4411#define NEON_2RM_VCVT_UF 63
4412
4413static int neon_2rm_is_float_op(int op)
4414{
4415 /* Return true if this neon 2reg-misc op is float-to-float */
4416 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4417 op >= NEON_2RM_VRECPE_F);
4418}
4419
4420/* Each entry in this array has bit n set if the insn allows
4421 * size value n (otherwise it will UNDEF). Since unallocated
4422 * op values will have no bits set they always UNDEF.
4423 */
4424static const uint8_t neon_2rm_sizes[] = {
4425 [NEON_2RM_VREV64] = 0x7,
4426 [NEON_2RM_VREV32] = 0x3,
4427 [NEON_2RM_VREV16] = 0x1,
4428 [NEON_2RM_VPADDL] = 0x7,
4429 [NEON_2RM_VPADDL_U] = 0x7,
4430 [NEON_2RM_VCLS] = 0x7,
4431 [NEON_2RM_VCLZ] = 0x7,
4432 [NEON_2RM_VCNT] = 0x1,
4433 [NEON_2RM_VMVN] = 0x1,
4434 [NEON_2RM_VPADAL] = 0x7,
4435 [NEON_2RM_VPADAL_U] = 0x7,
4436 [NEON_2RM_VQABS] = 0x7,
4437 [NEON_2RM_VQNEG] = 0x7,
4438 [NEON_2RM_VCGT0] = 0x7,
4439 [NEON_2RM_VCGE0] = 0x7,
4440 [NEON_2RM_VCEQ0] = 0x7,
4441 [NEON_2RM_VCLE0] = 0x7,
4442 [NEON_2RM_VCLT0] = 0x7,
4443 [NEON_2RM_VABS] = 0x7,
4444 [NEON_2RM_VNEG] = 0x7,
4445 [NEON_2RM_VCGT0_F] = 0x4,
4446 [NEON_2RM_VCGE0_F] = 0x4,
4447 [NEON_2RM_VCEQ0_F] = 0x4,
4448 [NEON_2RM_VCLE0_F] = 0x4,
4449 [NEON_2RM_VCLT0_F] = 0x4,
4450 [NEON_2RM_VABS_F] = 0x4,
4451 [NEON_2RM_VNEG_F] = 0x4,
4452 [NEON_2RM_VSWP] = 0x1,
4453 [NEON_2RM_VTRN] = 0x7,
4454 [NEON_2RM_VUZP] = 0x7,
4455 [NEON_2RM_VZIP] = 0x7,
4456 [NEON_2RM_VMOVN] = 0x7,
4457 [NEON_2RM_VQMOVN] = 0x7,
4458 [NEON_2RM_VSHLL] = 0x7,
4459 [NEON_2RM_VCVT_F16_F32] = 0x2,
4460 [NEON_2RM_VCVT_F32_F16] = 0x2,
4461 [NEON_2RM_VRECPE] = 0x4,
4462 [NEON_2RM_VRSQRTE] = 0x4,
4463 [NEON_2RM_VRECPE_F] = 0x4,
4464 [NEON_2RM_VRSQRTE_F] = 0x4,
4465 [NEON_2RM_VCVT_FS] = 0x4,
4466 [NEON_2RM_VCVT_FU] = 0x4,
4467 [NEON_2RM_VCVT_SF] = 0x4,
4468 [NEON_2RM_VCVT_UF] = 0x4,
4469};
4470
9ee6e8bb
PB
4471/* Translate a NEON data processing instruction. Return nonzero if the
4472 instruction is invalid.
ad69471c
PB
4473 We process data in a mixture of 32-bit and 64-bit chunks.
4474 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4475
9ee6e8bb
PB
4476static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4477{
4478 int op;
4479 int q;
4480 int rd, rn, rm;
4481 int size;
4482 int shift;
4483 int pass;
4484 int count;
4485 int pairwise;
4486 int u;
ca9a32e4 4487 uint32_t imm, mask;
b75263d6 4488 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4489 TCGv_i64 tmp64;
9ee6e8bb 4490
5df8bac1 4491 if (!s->vfp_enabled)
9ee6e8bb
PB
4492 return 1;
4493 q = (insn & (1 << 6)) != 0;
4494 u = (insn >> 24) & 1;
4495 VFP_DREG_D(rd, insn);
4496 VFP_DREG_N(rn, insn);
4497 VFP_DREG_M(rm, insn);
4498 size = (insn >> 20) & 3;
4499 if ((insn & (1 << 23)) == 0) {
4500 /* Three register same length. */
4501 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
4502 /* Catch invalid op and bad size combinations: UNDEF */
4503 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4504 return 1;
4505 }
25f84f79
PM
4506 /* All insns of this form UNDEF for either this condition or the
4507 * superset of cases "Q==1"; we catch the latter later.
4508 */
4509 if (q && ((rd | rn | rm) & 1)) {
4510 return 1;
4511 }
62698be3
PM
4512 if (size == 3 && op != NEON_3R_LOGIC) {
4513 /* 64-bit element instructions. */
9ee6e8bb 4514 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4515 neon_load_reg64(cpu_V0, rn + pass);
4516 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 4517 switch (op) {
62698be3 4518 case NEON_3R_VQADD:
9ee6e8bb 4519 if (u) {
2a3f75b4 4520 gen_helper_neon_qadd_u64(cpu_V0, cpu_V0, cpu_V1);
2c0262af 4521 } else {
2a3f75b4 4522 gen_helper_neon_qadd_s64(cpu_V0, cpu_V0, cpu_V1);
2c0262af 4523 }
9ee6e8bb 4524 break;
62698be3 4525 case NEON_3R_VQSUB:
9ee6e8bb 4526 if (u) {
2a3f75b4 4527 gen_helper_neon_qsub_u64(cpu_V0, cpu_V0, cpu_V1);
ad69471c 4528 } else {
2a3f75b4 4529 gen_helper_neon_qsub_s64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
4530 }
4531 break;
62698be3 4532 case NEON_3R_VSHL:
ad69471c
PB
4533 if (u) {
4534 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4535 } else {
4536 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4537 }
4538 break;
62698be3 4539 case NEON_3R_VQSHL:
ad69471c 4540 if (u) {
2a3f75b4 4541 gen_helper_neon_qshl_u64(cpu_V0, cpu_V1, cpu_V0);
ad69471c 4542 } else {
2a3f75b4 4543 gen_helper_neon_qshl_s64(cpu_V0, cpu_V1, cpu_V0);
ad69471c
PB
4544 }
4545 break;
62698be3 4546 case NEON_3R_VRSHL:
ad69471c
PB
4547 if (u) {
4548 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4549 } else {
ad69471c
PB
4550 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4551 }
4552 break;
62698be3 4553 case NEON_3R_VQRSHL:
ad69471c 4554 if (u) {
2a3f75b4 4555 gen_helper_neon_qrshl_u64(cpu_V0, cpu_V1, cpu_V0);
ad69471c 4556 } else {
2a3f75b4 4557 gen_helper_neon_qrshl_s64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4558 }
9ee6e8bb 4559 break;
62698be3 4560 case NEON_3R_VADD_VSUB:
9ee6e8bb 4561 if (u) {
ad69471c 4562 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4563 } else {
ad69471c 4564 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4565 }
4566 break;
4567 default:
4568 abort();
2c0262af 4569 }
ad69471c 4570 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4571 }
9ee6e8bb 4572 return 0;
2c0262af 4573 }
25f84f79 4574 pairwise = 0;
9ee6e8bb 4575 switch (op) {
62698be3
PM
4576 case NEON_3R_VSHL:
4577 case NEON_3R_VQSHL:
4578 case NEON_3R_VRSHL:
4579 case NEON_3R_VQRSHL:
9ee6e8bb 4580 {
ad69471c
PB
4581 int rtmp;
4582 /* Shift instruction operands are reversed. */
4583 rtmp = rn;
9ee6e8bb 4584 rn = rm;
ad69471c 4585 rm = rtmp;
9ee6e8bb 4586 }
2c0262af 4587 break;
25f84f79
PM
4588 case NEON_3R_VPADD:
4589 if (u) {
4590 return 1;
4591 }
4592 /* Fall through */
62698be3
PM
4593 case NEON_3R_VPMAX:
4594 case NEON_3R_VPMIN:
9ee6e8bb 4595 pairwise = 1;
2c0262af 4596 break;
25f84f79
PM
4597 case NEON_3R_FLOAT_ARITH:
4598 pairwise = (u && size < 2); /* if VPADD (float) */
4599 break;
4600 case NEON_3R_FLOAT_MINMAX:
4601 pairwise = u; /* if VPMIN/VPMAX (float) */
4602 break;
4603 case NEON_3R_FLOAT_CMP:
4604 if (!u && size) {
4605 /* no encoding for U=0 C=1x */
4606 return 1;
4607 }
4608 break;
4609 case NEON_3R_FLOAT_ACMP:
4610 if (!u) {
4611 return 1;
4612 }
4613 break;
4614 case NEON_3R_VRECPS_VRSQRTS:
4615 if (u) {
4616 return 1;
4617 }
2c0262af 4618 break;
25f84f79
PM
4619 case NEON_3R_VMUL:
4620 if (u && (size != 0)) {
4621 /* UNDEF on invalid size for polynomial subcase */
4622 return 1;
4623 }
2c0262af 4624 break;
9ee6e8bb 4625 default:
2c0262af 4626 break;
9ee6e8bb 4627 }
dd8fbd78 4628
25f84f79
PM
4629 if (pairwise && q) {
4630 /* All the pairwise insns UNDEF if Q is set */
4631 return 1;
4632 }
4633
9ee6e8bb
PB
4634 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4635
4636 if (pairwise) {
4637 /* Pairwise. */
a5a14945
JR
4638 if (pass < 1) {
4639 tmp = neon_load_reg(rn, 0);
4640 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 4641 } else {
a5a14945
JR
4642 tmp = neon_load_reg(rm, 0);
4643 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
4644 }
4645 } else {
4646 /* Elementwise. */
dd8fbd78
FN
4647 tmp = neon_load_reg(rn, pass);
4648 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4649 }
4650 switch (op) {
62698be3 4651 case NEON_3R_VHADD:
9ee6e8bb
PB
4652 GEN_NEON_INTEGER_OP(hadd);
4653 break;
62698be3 4654 case NEON_3R_VQADD:
2a3f75b4 4655 GEN_NEON_INTEGER_OP(qadd);
2c0262af 4656 break;
62698be3 4657 case NEON_3R_VRHADD:
9ee6e8bb 4658 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4659 break;
62698be3 4660 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
4661 switch ((u << 2) | size) {
4662 case 0: /* VAND */
dd8fbd78 4663 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4664 break;
4665 case 1: /* BIC */
f669df27 4666 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4667 break;
4668 case 2: /* VORR */
dd8fbd78 4669 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4670 break;
4671 case 3: /* VORN */
f669df27 4672 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4673 break;
4674 case 4: /* VEOR */
dd8fbd78 4675 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4676 break;
4677 case 5: /* VBSL */
dd8fbd78
FN
4678 tmp3 = neon_load_reg(rd, pass);
4679 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 4680 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4681 break;
4682 case 6: /* VBIT */
dd8fbd78
FN
4683 tmp3 = neon_load_reg(rd, pass);
4684 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 4685 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4686 break;
4687 case 7: /* VBIF */
dd8fbd78
FN
4688 tmp3 = neon_load_reg(rd, pass);
4689 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 4690 tcg_temp_free_i32(tmp3);
9ee6e8bb 4691 break;
2c0262af
FB
4692 }
4693 break;
62698be3 4694 case NEON_3R_VHSUB:
9ee6e8bb
PB
4695 GEN_NEON_INTEGER_OP(hsub);
4696 break;
62698be3 4697 case NEON_3R_VQSUB:
2a3f75b4 4698 GEN_NEON_INTEGER_OP(qsub);
2c0262af 4699 break;
62698be3 4700 case NEON_3R_VCGT:
9ee6e8bb
PB
4701 GEN_NEON_INTEGER_OP(cgt);
4702 break;
62698be3 4703 case NEON_3R_VCGE:
9ee6e8bb
PB
4704 GEN_NEON_INTEGER_OP(cge);
4705 break;
62698be3 4706 case NEON_3R_VSHL:
ad69471c 4707 GEN_NEON_INTEGER_OP(shl);
2c0262af 4708 break;
62698be3 4709 case NEON_3R_VQSHL:
2a3f75b4 4710 GEN_NEON_INTEGER_OP(qshl);
2c0262af 4711 break;
62698be3 4712 case NEON_3R_VRSHL:
ad69471c 4713 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4714 break;
62698be3 4715 case NEON_3R_VQRSHL:
2a3f75b4 4716 GEN_NEON_INTEGER_OP(qrshl);
9ee6e8bb 4717 break;
62698be3 4718 case NEON_3R_VMAX:
9ee6e8bb
PB
4719 GEN_NEON_INTEGER_OP(max);
4720 break;
62698be3 4721 case NEON_3R_VMIN:
9ee6e8bb
PB
4722 GEN_NEON_INTEGER_OP(min);
4723 break;
62698be3 4724 case NEON_3R_VABD:
9ee6e8bb
PB
4725 GEN_NEON_INTEGER_OP(abd);
4726 break;
62698be3 4727 case NEON_3R_VABA:
9ee6e8bb 4728 GEN_NEON_INTEGER_OP(abd);
7d1b0095 4729 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
4730 tmp2 = neon_load_reg(rd, pass);
4731 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 4732 break;
62698be3 4733 case NEON_3R_VADD_VSUB:
9ee6e8bb 4734 if (!u) { /* VADD */
62698be3 4735 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4736 } else { /* VSUB */
4737 switch (size) {
dd8fbd78
FN
4738 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4739 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4740 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 4741 default: abort();
9ee6e8bb
PB
4742 }
4743 }
4744 break;
62698be3 4745 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
4746 if (!u) { /* VTST */
4747 switch (size) {
dd8fbd78
FN
4748 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4749 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4750 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 4751 default: abort();
9ee6e8bb
PB
4752 }
4753 } else { /* VCEQ */
4754 switch (size) {
dd8fbd78
FN
4755 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4756 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4757 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 4758 default: abort();
9ee6e8bb
PB
4759 }
4760 }
4761 break;
62698be3 4762 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 4763 switch (size) {
dd8fbd78
FN
4764 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4765 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4766 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4767 default: abort();
9ee6e8bb 4768 }
7d1b0095 4769 tcg_temp_free_i32(tmp2);
dd8fbd78 4770 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4771 if (u) { /* VMLS */
dd8fbd78 4772 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4773 } else { /* VMLA */
dd8fbd78 4774 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4775 }
4776 break;
62698be3 4777 case NEON_3R_VMUL:
9ee6e8bb 4778 if (u) { /* polynomial */
dd8fbd78 4779 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4780 } else { /* Integer */
4781 switch (size) {
dd8fbd78
FN
4782 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4783 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4784 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4785 default: abort();
9ee6e8bb
PB
4786 }
4787 }
4788 break;
62698be3 4789 case NEON_3R_VPMAX:
9ee6e8bb
PB
4790 GEN_NEON_INTEGER_OP(pmax);
4791 break;
62698be3 4792 case NEON_3R_VPMIN:
9ee6e8bb
PB
4793 GEN_NEON_INTEGER_OP(pmin);
4794 break;
62698be3 4795 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
4796 if (!u) { /* VQDMULH */
4797 switch (size) {
2a3f75b4
PM
4798 case 1: gen_helper_neon_qdmulh_s16(tmp, tmp, tmp2); break;
4799 case 2: gen_helper_neon_qdmulh_s32(tmp, tmp, tmp2); break;
62698be3 4800 default: abort();
9ee6e8bb 4801 }
62698be3 4802 } else { /* VQRDMULH */
9ee6e8bb 4803 switch (size) {
2a3f75b4
PM
4804 case 1: gen_helper_neon_qrdmulh_s16(tmp, tmp, tmp2); break;
4805 case 2: gen_helper_neon_qrdmulh_s32(tmp, tmp, tmp2); break;
62698be3 4806 default: abort();
9ee6e8bb
PB
4807 }
4808 }
4809 break;
62698be3 4810 case NEON_3R_VPADD:
9ee6e8bb 4811 switch (size) {
dd8fbd78
FN
4812 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4813 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4814 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 4815 default: abort();
9ee6e8bb
PB
4816 }
4817 break;
62698be3 4818 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
9ee6e8bb
PB
4819 switch ((u << 2) | size) {
4820 case 0: /* VADD */
dd8fbd78 4821 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4822 break;
4823 case 2: /* VSUB */
dd8fbd78 4824 gen_helper_neon_sub_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4825 break;
4826 case 4: /* VPADD */
dd8fbd78 4827 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4828 break;
4829 case 6: /* VABD */
dd8fbd78 4830 gen_helper_neon_abd_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4831 break;
4832 default:
62698be3 4833 abort();
9ee6e8bb
PB
4834 }
4835 break;
62698be3 4836 case NEON_3R_FLOAT_MULTIPLY:
dd8fbd78 4837 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb 4838 if (!u) {
7d1b0095 4839 tcg_temp_free_i32(tmp2);
dd8fbd78 4840 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4841 if (size == 0) {
dd8fbd78 4842 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb 4843 } else {
dd8fbd78 4844 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
4845 }
4846 }
4847 break;
62698be3 4848 case NEON_3R_FLOAT_CMP:
9ee6e8bb 4849 if (!u) {
dd8fbd78 4850 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
b5ff1b31 4851 } else {
9ee6e8bb 4852 if (size == 0)
dd8fbd78 4853 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
9ee6e8bb 4854 else
dd8fbd78 4855 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
b5ff1b31 4856 }
2c0262af 4857 break;
62698be3 4858 case NEON_3R_FLOAT_ACMP:
9ee6e8bb 4859 if (size == 0)
dd8fbd78 4860 gen_helper_neon_acge_f32(tmp, tmp, tmp2);
9ee6e8bb 4861 else
dd8fbd78 4862 gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
2c0262af 4863 break;
62698be3 4864 case NEON_3R_FLOAT_MINMAX:
9ee6e8bb 4865 if (size == 0)
dd8fbd78 4866 gen_helper_neon_max_f32(tmp, tmp, tmp2);
9ee6e8bb 4867 else
dd8fbd78 4868 gen_helper_neon_min_f32(tmp, tmp, tmp2);
9ee6e8bb 4869 break;
62698be3 4870 case NEON_3R_VRECPS_VRSQRTS:
9ee6e8bb 4871 if (size == 0)
dd8fbd78 4872 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 4873 else
dd8fbd78 4874 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 4875 break;
9ee6e8bb
PB
4876 default:
4877 abort();
2c0262af 4878 }
7d1b0095 4879 tcg_temp_free_i32(tmp2);
dd8fbd78 4880
9ee6e8bb
PB
4881 /* Save the result. For elementwise operations we can put it
4882 straight into the destination register. For pairwise operations
4883 we have to be careful to avoid clobbering the source operands. */
4884 if (pairwise && rd == rm) {
dd8fbd78 4885 neon_store_scratch(pass, tmp);
9ee6e8bb 4886 } else {
dd8fbd78 4887 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4888 }
4889
4890 } /* for pass */
4891 if (pairwise && rd == rm) {
4892 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
4893 tmp = neon_load_scratch(pass);
4894 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4895 }
4896 }
ad69471c 4897 /* End of 3 register same size operations. */
9ee6e8bb
PB
4898 } else if (insn & (1 << 4)) {
4899 if ((insn & 0x00380080) != 0) {
4900 /* Two registers and shift. */
4901 op = (insn >> 8) & 0xf;
4902 if (insn & (1 << 7)) {
cc13115b
PM
4903 /* 64-bit shift. */
4904 if (op > 7) {
4905 return 1;
4906 }
9ee6e8bb
PB
4907 size = 3;
4908 } else {
4909 size = 2;
4910 while ((insn & (1 << (size + 19))) == 0)
4911 size--;
4912 }
4913 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4914 /* To avoid excessive dumplication of ops we implement shift
4915 by immediate using the variable shift operations. */
4916 if (op < 8) {
4917 /* Shift by immediate:
4918 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
4919 if (q && ((rd | rm) & 1)) {
4920 return 1;
4921 }
4922 if (!u && (op == 4 || op == 6)) {
4923 return 1;
4924 }
9ee6e8bb
PB
4925 /* Right shifts are encoded as N - shift, where N is the
4926 element size in bits. */
4927 if (op <= 4)
4928 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4929 if (size == 3) {
4930 count = q + 1;
4931 } else {
4932 count = q ? 4: 2;
4933 }
4934 switch (size) {
4935 case 0:
4936 imm = (uint8_t) shift;
4937 imm |= imm << 8;
4938 imm |= imm << 16;
4939 break;
4940 case 1:
4941 imm = (uint16_t) shift;
4942 imm |= imm << 16;
4943 break;
4944 case 2:
4945 case 3:
4946 imm = shift;
4947 break;
4948 default:
4949 abort();
4950 }
4951
4952 for (pass = 0; pass < count; pass++) {
ad69471c
PB
4953 if (size == 3) {
4954 neon_load_reg64(cpu_V0, rm + pass);
4955 tcg_gen_movi_i64(cpu_V1, imm);
4956 switch (op) {
4957 case 0: /* VSHR */
4958 case 1: /* VSRA */
4959 if (u)
4960 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4961 else
ad69471c 4962 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4963 break;
ad69471c
PB
4964 case 2: /* VRSHR */
4965 case 3: /* VRSRA */
4966 if (u)
4967 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4968 else
ad69471c 4969 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4970 break;
ad69471c 4971 case 4: /* VSRI */
ad69471c
PB
4972 case 5: /* VSHL, VSLI */
4973 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4974 break;
0322b26e 4975 case 6: /* VQSHLU */
cc13115b 4976 gen_helper_neon_qshlu_s64(cpu_V0, cpu_V0, cpu_V1);
ad69471c 4977 break;
0322b26e
PM
4978 case 7: /* VQSHL */
4979 if (u) {
2a3f75b4 4980 gen_helper_neon_qshl_u64(cpu_V0,
0322b26e
PM
4981 cpu_V0, cpu_V1);
4982 } else {
2a3f75b4 4983 gen_helper_neon_qshl_s64(cpu_V0,
0322b26e
PM
4984 cpu_V0, cpu_V1);
4985 }
9ee6e8bb 4986 break;
9ee6e8bb 4987 }
ad69471c
PB
4988 if (op == 1 || op == 3) {
4989 /* Accumulate. */
5371cb81 4990 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
4991 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4992 } else if (op == 4 || (op == 5 && u)) {
4993 /* Insert */
923e6509
CL
4994 neon_load_reg64(cpu_V1, rd + pass);
4995 uint64_t mask;
4996 if (shift < -63 || shift > 63) {
4997 mask = 0;
4998 } else {
4999 if (op == 4) {
5000 mask = 0xffffffffffffffffull >> -shift;
5001 } else {
5002 mask = 0xffffffffffffffffull << shift;
5003 }
5004 }
5005 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5006 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5007 }
5008 neon_store_reg64(cpu_V0, rd + pass);
5009 } else { /* size < 3 */
5010 /* Operands in T0 and T1. */
dd8fbd78 5011 tmp = neon_load_reg(rm, pass);
7d1b0095 5012 tmp2 = tcg_temp_new_i32();
dd8fbd78 5013 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
5014 switch (op) {
5015 case 0: /* VSHR */
5016 case 1: /* VSRA */
5017 GEN_NEON_INTEGER_OP(shl);
5018 break;
5019 case 2: /* VRSHR */
5020 case 3: /* VRSRA */
5021 GEN_NEON_INTEGER_OP(rshl);
5022 break;
5023 case 4: /* VSRI */
ad69471c
PB
5024 case 5: /* VSHL, VSLI */
5025 switch (size) {
dd8fbd78
FN
5026 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5027 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5028 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 5029 default: abort();
ad69471c
PB
5030 }
5031 break;
0322b26e 5032 case 6: /* VQSHLU */
ad69471c 5033 switch (size) {
0322b26e 5034 case 0:
2a3f75b4 5035 gen_helper_neon_qshlu_s8(tmp, tmp, tmp2);
0322b26e
PM
5036 break;
5037 case 1:
2a3f75b4 5038 gen_helper_neon_qshlu_s16(tmp, tmp, tmp2);
0322b26e
PM
5039 break;
5040 case 2:
2a3f75b4 5041 gen_helper_neon_qshlu_s32(tmp, tmp, tmp2);
0322b26e
PM
5042 break;
5043 default:
cc13115b 5044 abort();
ad69471c
PB
5045 }
5046 break;
0322b26e 5047 case 7: /* VQSHL */
2a3f75b4 5048 GEN_NEON_INTEGER_OP(qshl);
0322b26e 5049 break;
ad69471c 5050 }
7d1b0095 5051 tcg_temp_free_i32(tmp2);
ad69471c
PB
5052
5053 if (op == 1 || op == 3) {
5054 /* Accumulate. */
dd8fbd78 5055 tmp2 = neon_load_reg(rd, pass);
5371cb81 5056 gen_neon_add(size, tmp, tmp2);
7d1b0095 5057 tcg_temp_free_i32(tmp2);
ad69471c
PB
5058 } else if (op == 4 || (op == 5 && u)) {
5059 /* Insert */
5060 switch (size) {
5061 case 0:
5062 if (op == 4)
ca9a32e4 5063 mask = 0xff >> -shift;
ad69471c 5064 else
ca9a32e4
JR
5065 mask = (uint8_t)(0xff << shift);
5066 mask |= mask << 8;
5067 mask |= mask << 16;
ad69471c
PB
5068 break;
5069 case 1:
5070 if (op == 4)
ca9a32e4 5071 mask = 0xffff >> -shift;
ad69471c 5072 else
ca9a32e4
JR
5073 mask = (uint16_t)(0xffff << shift);
5074 mask |= mask << 16;
ad69471c
PB
5075 break;
5076 case 2:
ca9a32e4
JR
5077 if (shift < -31 || shift > 31) {
5078 mask = 0;
5079 } else {
5080 if (op == 4)
5081 mask = 0xffffffffu >> -shift;
5082 else
5083 mask = 0xffffffffu << shift;
5084 }
ad69471c
PB
5085 break;
5086 default:
5087 abort();
5088 }
dd8fbd78 5089 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
5090 tcg_gen_andi_i32(tmp, tmp, mask);
5091 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 5092 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 5093 tcg_temp_free_i32(tmp2);
ad69471c 5094 }
dd8fbd78 5095 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5096 }
5097 } /* for pass */
5098 } else if (op < 10) {
ad69471c 5099 /* Shift by immediate and narrow:
9ee6e8bb 5100 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5101 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5102 if (rm & 1) {
5103 return 1;
5104 }
9ee6e8bb
PB
5105 shift = shift - (1 << (size + 3));
5106 size++;
92cdfaeb 5107 if (size == 3) {
a7812ae4 5108 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5109 neon_load_reg64(cpu_V0, rm);
5110 neon_load_reg64(cpu_V1, rm + 1);
5111 for (pass = 0; pass < 2; pass++) {
5112 TCGv_i64 in;
5113 if (pass == 0) {
5114 in = cpu_V0;
5115 } else {
5116 in = cpu_V1;
5117 }
ad69471c 5118 if (q) {
0b36f4cd 5119 if (input_unsigned) {
92cdfaeb 5120 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5121 } else {
92cdfaeb 5122 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5123 }
ad69471c 5124 } else {
0b36f4cd 5125 if (input_unsigned) {
92cdfaeb 5126 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5127 } else {
92cdfaeb 5128 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5129 }
ad69471c 5130 }
7d1b0095 5131 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5132 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5133 neon_store_reg(rd, pass, tmp);
5134 } /* for pass */
5135 tcg_temp_free_i64(tmp64);
5136 } else {
5137 if (size == 1) {
5138 imm = (uint16_t)shift;
5139 imm |= imm << 16;
2c0262af 5140 } else {
92cdfaeb
PM
5141 /* size == 2 */
5142 imm = (uint32_t)shift;
5143 }
5144 tmp2 = tcg_const_i32(imm);
5145 tmp4 = neon_load_reg(rm + 1, 0);
5146 tmp5 = neon_load_reg(rm + 1, 1);
5147 for (pass = 0; pass < 2; pass++) {
5148 if (pass == 0) {
5149 tmp = neon_load_reg(rm, 0);
5150 } else {
5151 tmp = tmp4;
5152 }
0b36f4cd
CL
5153 gen_neon_shift_narrow(size, tmp, tmp2, q,
5154 input_unsigned);
92cdfaeb
PM
5155 if (pass == 0) {
5156 tmp3 = neon_load_reg(rm, 1);
5157 } else {
5158 tmp3 = tmp5;
5159 }
0b36f4cd
CL
5160 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5161 input_unsigned);
36aa55dc 5162 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5163 tcg_temp_free_i32(tmp);
5164 tcg_temp_free_i32(tmp3);
5165 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5166 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5167 neon_store_reg(rd, pass, tmp);
5168 } /* for pass */
c6067f04 5169 tcg_temp_free_i32(tmp2);
b75263d6 5170 }
9ee6e8bb 5171 } else if (op == 10) {
cc13115b
PM
5172 /* VSHLL, VMOVL */
5173 if (q || (rd & 1)) {
9ee6e8bb 5174 return 1;
cc13115b 5175 }
ad69471c
PB
5176 tmp = neon_load_reg(rm, 0);
5177 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5178 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5179 if (pass == 1)
5180 tmp = tmp2;
5181
5182 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5183
9ee6e8bb
PB
5184 if (shift != 0) {
5185 /* The shift is less than the width of the source
ad69471c
PB
5186 type, so we can just shift the whole register. */
5187 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5188 /* Widen the result of shift: we need to clear
5189 * the potential overflow bits resulting from
5190 * left bits of the narrow input appearing as
5191 * right bits of left the neighbour narrow
5192 * input. */
ad69471c
PB
5193 if (size < 2 || !u) {
5194 uint64_t imm64;
5195 if (size == 0) {
5196 imm = (0xffu >> (8 - shift));
5197 imm |= imm << 16;
acdf01ef 5198 } else if (size == 1) {
ad69471c 5199 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5200 } else {
5201 /* size == 2 */
5202 imm = 0xffffffff >> (32 - shift);
5203 }
5204 if (size < 2) {
5205 imm64 = imm | (((uint64_t)imm) << 32);
5206 } else {
5207 imm64 = imm;
9ee6e8bb 5208 }
acdf01ef 5209 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5210 }
5211 }
ad69471c 5212 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5213 }
f73534a5 5214 } else if (op >= 14) {
9ee6e8bb 5215 /* VCVT fixed-point. */
cc13115b
PM
5216 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5217 return 1;
5218 }
f73534a5
PM
5219 /* We have already masked out the must-be-1 top bit of imm6,
5220 * hence this 32-shift where the ARM ARM has 64-imm6.
5221 */
5222 shift = 32 - shift;
9ee6e8bb 5223 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 5224 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 5225 if (!(op & 1)) {
9ee6e8bb 5226 if (u)
4373f3ce 5227 gen_vfp_ulto(0, shift);
9ee6e8bb 5228 else
4373f3ce 5229 gen_vfp_slto(0, shift);
9ee6e8bb
PB
5230 } else {
5231 if (u)
4373f3ce 5232 gen_vfp_toul(0, shift);
9ee6e8bb 5233 else
4373f3ce 5234 gen_vfp_tosl(0, shift);
2c0262af 5235 }
4373f3ce 5236 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
5237 }
5238 } else {
9ee6e8bb
PB
5239 return 1;
5240 }
5241 } else { /* (insn & 0x00380080) == 0 */
5242 int invert;
7d80fee5
PM
5243 if (q && (rd & 1)) {
5244 return 1;
5245 }
9ee6e8bb
PB
5246
5247 op = (insn >> 8) & 0xf;
5248 /* One register and immediate. */
5249 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5250 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
5251 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5252 * We choose to not special-case this and will behave as if a
5253 * valid constant encoding of 0 had been given.
5254 */
9ee6e8bb
PB
5255 switch (op) {
5256 case 0: case 1:
5257 /* no-op */
5258 break;
5259 case 2: case 3:
5260 imm <<= 8;
5261 break;
5262 case 4: case 5:
5263 imm <<= 16;
5264 break;
5265 case 6: case 7:
5266 imm <<= 24;
5267 break;
5268 case 8: case 9:
5269 imm |= imm << 16;
5270 break;
5271 case 10: case 11:
5272 imm = (imm << 8) | (imm << 24);
5273 break;
5274 case 12:
8e31209e 5275 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
5276 break;
5277 case 13:
5278 imm = (imm << 16) | 0xffff;
5279 break;
5280 case 14:
5281 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5282 if (invert)
5283 imm = ~imm;
5284 break;
5285 case 15:
7d80fee5
PM
5286 if (invert) {
5287 return 1;
5288 }
9ee6e8bb
PB
5289 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5290 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5291 break;
5292 }
5293 if (invert)
5294 imm = ~imm;
5295
9ee6e8bb
PB
5296 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5297 if (op & 1 && op < 12) {
ad69471c 5298 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
5299 if (invert) {
5300 /* The immediate value has already been inverted, so
5301 BIC becomes AND. */
ad69471c 5302 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 5303 } else {
ad69471c 5304 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 5305 }
9ee6e8bb 5306 } else {
ad69471c 5307 /* VMOV, VMVN. */
7d1b0095 5308 tmp = tcg_temp_new_i32();
9ee6e8bb 5309 if (op == 14 && invert) {
a5a14945 5310 int n;
ad69471c
PB
5311 uint32_t val;
5312 val = 0;
9ee6e8bb
PB
5313 for (n = 0; n < 4; n++) {
5314 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 5315 val |= 0xff << (n * 8);
9ee6e8bb 5316 }
ad69471c
PB
5317 tcg_gen_movi_i32(tmp, val);
5318 } else {
5319 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 5320 }
9ee6e8bb 5321 }
ad69471c 5322 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5323 }
5324 }
e4b3861d 5325 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
5326 if (size != 3) {
5327 op = (insn >> 8) & 0xf;
5328 if ((insn & (1 << 6)) == 0) {
5329 /* Three registers of different lengths. */
5330 int src1_wide;
5331 int src2_wide;
5332 int prewiden;
695272dc
PM
5333 /* undefreq: bit 0 : UNDEF if size != 0
5334 * bit 1 : UNDEF if size == 0
5335 * bit 2 : UNDEF if U == 1
5336 * Note that [1:0] set implies 'always UNDEF'
5337 */
5338 int undefreq;
5339 /* prewiden, src1_wide, src2_wide, undefreq */
5340 static const int neon_3reg_wide[16][4] = {
5341 {1, 0, 0, 0}, /* VADDL */
5342 {1, 1, 0, 0}, /* VADDW */
5343 {1, 0, 0, 0}, /* VSUBL */
5344 {1, 1, 0, 0}, /* VSUBW */
5345 {0, 1, 1, 0}, /* VADDHN */
5346 {0, 0, 0, 0}, /* VABAL */
5347 {0, 1, 1, 0}, /* VSUBHN */
5348 {0, 0, 0, 0}, /* VABDL */
5349 {0, 0, 0, 0}, /* VMLAL */
5350 {0, 0, 0, 6}, /* VQDMLAL */
5351 {0, 0, 0, 0}, /* VMLSL */
5352 {0, 0, 0, 6}, /* VQDMLSL */
5353 {0, 0, 0, 0}, /* Integer VMULL */
5354 {0, 0, 0, 2}, /* VQDMULL */
5355 {0, 0, 0, 5}, /* Polynomial VMULL */
5356 {0, 0, 0, 3}, /* Reserved: always UNDEF */
9ee6e8bb
PB
5357 };
5358
5359 prewiden = neon_3reg_wide[op][0];
5360 src1_wide = neon_3reg_wide[op][1];
5361 src2_wide = neon_3reg_wide[op][2];
695272dc 5362 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 5363
695272dc
PM
5364 if (((undefreq & 1) && (size != 0)) ||
5365 ((undefreq & 2) && (size == 0)) ||
5366 ((undefreq & 4) && u)) {
5367 return 1;
5368 }
5369 if ((src1_wide && (rn & 1)) ||
5370 (src2_wide && (rm & 1)) ||
5371 (!src2_wide && (rd & 1))) {
ad69471c 5372 return 1;
695272dc 5373 }
ad69471c 5374
9ee6e8bb
PB
5375 /* Avoid overlapping operands. Wide source operands are
5376 always aligned so will never overlap with wide
5377 destinations in problematic ways. */
8f8e3aa4 5378 if (rd == rm && !src2_wide) {
dd8fbd78
FN
5379 tmp = neon_load_reg(rm, 1);
5380 neon_store_scratch(2, tmp);
8f8e3aa4 5381 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
5382 tmp = neon_load_reg(rn, 1);
5383 neon_store_scratch(2, tmp);
9ee6e8bb 5384 }
a50f5b91 5385 TCGV_UNUSED(tmp3);
9ee6e8bb 5386 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5387 if (src1_wide) {
5388 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 5389 TCGV_UNUSED(tmp);
9ee6e8bb 5390 } else {
ad69471c 5391 if (pass == 1 && rd == rn) {
dd8fbd78 5392 tmp = neon_load_scratch(2);
9ee6e8bb 5393 } else {
ad69471c
PB
5394 tmp = neon_load_reg(rn, pass);
5395 }
5396 if (prewiden) {
5397 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5398 }
5399 }
ad69471c
PB
5400 if (src2_wide) {
5401 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 5402 TCGV_UNUSED(tmp2);
9ee6e8bb 5403 } else {
ad69471c 5404 if (pass == 1 && rd == rm) {
dd8fbd78 5405 tmp2 = neon_load_scratch(2);
9ee6e8bb 5406 } else {
ad69471c
PB
5407 tmp2 = neon_load_reg(rm, pass);
5408 }
5409 if (prewiden) {
5410 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5411 }
9ee6e8bb
PB
5412 }
5413 switch (op) {
5414 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5415 gen_neon_addl(size);
9ee6e8bb 5416 break;
79b0e534 5417 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5418 gen_neon_subl(size);
9ee6e8bb
PB
5419 break;
5420 case 5: case 7: /* VABAL, VABDL */
5421 switch ((size << 1) | u) {
ad69471c
PB
5422 case 0:
5423 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5424 break;
5425 case 1:
5426 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5427 break;
5428 case 2:
5429 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5430 break;
5431 case 3:
5432 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5433 break;
5434 case 4:
5435 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5436 break;
5437 case 5:
5438 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5439 break;
9ee6e8bb
PB
5440 default: abort();
5441 }
7d1b0095
PM
5442 tcg_temp_free_i32(tmp2);
5443 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5444 break;
5445 case 8: case 9: case 10: case 11: case 12: case 13:
5446 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5447 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
5448 break;
5449 case 14: /* Polynomial VMULL */
e5ca24cb 5450 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
5451 tcg_temp_free_i32(tmp2);
5452 tcg_temp_free_i32(tmp);
e5ca24cb 5453 break;
695272dc
PM
5454 default: /* 15 is RESERVED: caught earlier */
5455 abort();
9ee6e8bb 5456 }
ebcd88ce
PM
5457 if (op == 13) {
5458 /* VQDMULL */
5459 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5460 neon_store_reg64(cpu_V0, rd + pass);
5461 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 5462 /* Accumulate. */
ebcd88ce 5463 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5464 switch (op) {
4dc064e6
PM
5465 case 10: /* VMLSL */
5466 gen_neon_negl(cpu_V0, size);
5467 /* Fall through */
5468 case 5: case 8: /* VABAL, VMLAL */
ad69471c 5469 gen_neon_addl(size);
9ee6e8bb
PB
5470 break;
5471 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 5472 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5473 if (op == 11) {
5474 gen_neon_negl(cpu_V0, size);
5475 }
ad69471c
PB
5476 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5477 break;
9ee6e8bb
PB
5478 default:
5479 abort();
5480 }
ad69471c 5481 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5482 } else if (op == 4 || op == 6) {
5483 /* Narrowing operation. */
7d1b0095 5484 tmp = tcg_temp_new_i32();
79b0e534 5485 if (!u) {
9ee6e8bb 5486 switch (size) {
ad69471c
PB
5487 case 0:
5488 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5489 break;
5490 case 1:
5491 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5492 break;
5493 case 2:
5494 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5495 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5496 break;
9ee6e8bb
PB
5497 default: abort();
5498 }
5499 } else {
5500 switch (size) {
ad69471c
PB
5501 case 0:
5502 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5503 break;
5504 case 1:
5505 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5506 break;
5507 case 2:
5508 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5509 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5510 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5511 break;
9ee6e8bb
PB
5512 default: abort();
5513 }
5514 }
ad69471c
PB
5515 if (pass == 0) {
5516 tmp3 = tmp;
5517 } else {
5518 neon_store_reg(rd, 0, tmp3);
5519 neon_store_reg(rd, 1, tmp);
5520 }
9ee6e8bb
PB
5521 } else {
5522 /* Write back the result. */
ad69471c 5523 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5524 }
5525 }
5526 } else {
3e3326df
PM
5527 /* Two registers and a scalar. NB that for ops of this form
5528 * the ARM ARM labels bit 24 as Q, but it is in our variable
5529 * 'u', not 'q'.
5530 */
5531 if (size == 0) {
5532 return 1;
5533 }
9ee6e8bb 5534 switch (op) {
9ee6e8bb 5535 case 1: /* Float VMLA scalar */
9ee6e8bb 5536 case 5: /* Floating point VMLS scalar */
9ee6e8bb 5537 case 9: /* Floating point VMUL scalar */
3e3326df
PM
5538 if (size == 1) {
5539 return 1;
5540 }
5541 /* fall through */
5542 case 0: /* Integer VMLA scalar */
5543 case 4: /* Integer VMLS scalar */
5544 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
5545 case 12: /* VQDMULH scalar */
5546 case 13: /* VQRDMULH scalar */
3e3326df
PM
5547 if (u && ((rd | rn) & 1)) {
5548 return 1;
5549 }
dd8fbd78
FN
5550 tmp = neon_get_scalar(size, rm);
5551 neon_store_scratch(0, tmp);
9ee6e8bb 5552 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5553 tmp = neon_load_scratch(0);
5554 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5555 if (op == 12) {
5556 if (size == 1) {
2a3f75b4 5557 gen_helper_neon_qdmulh_s16(tmp, tmp, tmp2);
9ee6e8bb 5558 } else {
2a3f75b4 5559 gen_helper_neon_qdmulh_s32(tmp, tmp, tmp2);
9ee6e8bb
PB
5560 }
5561 } else if (op == 13) {
5562 if (size == 1) {
2a3f75b4 5563 gen_helper_neon_qrdmulh_s16(tmp, tmp, tmp2);
9ee6e8bb 5564 } else {
2a3f75b4 5565 gen_helper_neon_qrdmulh_s32(tmp, tmp, tmp2);
9ee6e8bb
PB
5566 }
5567 } else if (op & 1) {
dd8fbd78 5568 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5569 } else {
5570 switch (size) {
dd8fbd78
FN
5571 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5572 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5573 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 5574 default: abort();
9ee6e8bb
PB
5575 }
5576 }
7d1b0095 5577 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
5578 if (op < 8) {
5579 /* Accumulate. */
dd8fbd78 5580 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5581 switch (op) {
5582 case 0:
dd8fbd78 5583 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5584 break;
5585 case 1:
dd8fbd78 5586 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5587 break;
5588 case 4:
dd8fbd78 5589 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5590 break;
5591 case 5:
dd8fbd78 5592 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
5593 break;
5594 default:
5595 abort();
5596 }
7d1b0095 5597 tcg_temp_free_i32(tmp2);
9ee6e8bb 5598 }
dd8fbd78 5599 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5600 }
5601 break;
9ee6e8bb 5602 case 3: /* VQDMLAL scalar */
9ee6e8bb 5603 case 7: /* VQDMLSL scalar */
9ee6e8bb 5604 case 11: /* VQDMULL scalar */
3e3326df 5605 if (u == 1) {
ad69471c 5606 return 1;
3e3326df
PM
5607 }
5608 /* fall through */
5609 case 2: /* VMLAL sclar */
5610 case 6: /* VMLSL scalar */
5611 case 10: /* VMULL scalar */
5612 if (rd & 1) {
5613 return 1;
5614 }
dd8fbd78 5615 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
5616 /* We need a copy of tmp2 because gen_neon_mull
5617 * deletes it during pass 0. */
7d1b0095 5618 tmp4 = tcg_temp_new_i32();
c6067f04 5619 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 5620 tmp3 = neon_load_reg(rn, 1);
ad69471c 5621
9ee6e8bb 5622 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5623 if (pass == 0) {
5624 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5625 } else {
dd8fbd78 5626 tmp = tmp3;
c6067f04 5627 tmp2 = tmp4;
9ee6e8bb 5628 }
ad69471c 5629 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
5630 if (op != 11) {
5631 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5632 }
9ee6e8bb 5633 switch (op) {
4dc064e6
PM
5634 case 6:
5635 gen_neon_negl(cpu_V0, size);
5636 /* Fall through */
5637 case 2:
ad69471c 5638 gen_neon_addl(size);
9ee6e8bb
PB
5639 break;
5640 case 3: case 7:
ad69471c 5641 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5642 if (op == 7) {
5643 gen_neon_negl(cpu_V0, size);
5644 }
ad69471c 5645 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5646 break;
5647 case 10:
5648 /* no-op */
5649 break;
5650 case 11:
ad69471c 5651 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5652 break;
5653 default:
5654 abort();
5655 }
ad69471c 5656 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5657 }
dd8fbd78 5658
dd8fbd78 5659
9ee6e8bb
PB
5660 break;
5661 default: /* 14 and 15 are RESERVED */
5662 return 1;
5663 }
5664 }
5665 } else { /* size == 3 */
5666 if (!u) {
5667 /* Extract. */
9ee6e8bb 5668 imm = (insn >> 8) & 0xf;
ad69471c
PB
5669
5670 if (imm > 7 && !q)
5671 return 1;
5672
52579ea1
PM
5673 if (q && ((rd | rn | rm) & 1)) {
5674 return 1;
5675 }
5676
ad69471c
PB
5677 if (imm == 0) {
5678 neon_load_reg64(cpu_V0, rn);
5679 if (q) {
5680 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5681 }
ad69471c
PB
5682 } else if (imm == 8) {
5683 neon_load_reg64(cpu_V0, rn + 1);
5684 if (q) {
5685 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5686 }
ad69471c 5687 } else if (q) {
a7812ae4 5688 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5689 if (imm < 8) {
5690 neon_load_reg64(cpu_V0, rn);
a7812ae4 5691 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5692 } else {
5693 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5694 neon_load_reg64(tmp64, rm);
ad69471c
PB
5695 }
5696 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5697 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5698 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5699 if (imm < 8) {
5700 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5701 } else {
ad69471c
PB
5702 neon_load_reg64(cpu_V1, rm + 1);
5703 imm -= 8;
9ee6e8bb 5704 }
ad69471c 5705 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5706 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5707 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 5708 tcg_temp_free_i64(tmp64);
ad69471c 5709 } else {
a7812ae4 5710 /* BUGFIX */
ad69471c 5711 neon_load_reg64(cpu_V0, rn);
a7812ae4 5712 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5713 neon_load_reg64(cpu_V1, rm);
a7812ae4 5714 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5715 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5716 }
5717 neon_store_reg64(cpu_V0, rd);
5718 if (q) {
5719 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5720 }
5721 } else if ((insn & (1 << 11)) == 0) {
5722 /* Two register misc. */
5723 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5724 size = (insn >> 18) & 3;
600b828c
PM
5725 /* UNDEF for unknown op values and bad op-size combinations */
5726 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
5727 return 1;
5728 }
fc2a9b37
PM
5729 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
5730 q && ((rm | rd) & 1)) {
5731 return 1;
5732 }
9ee6e8bb 5733 switch (op) {
600b828c 5734 case NEON_2RM_VREV64:
9ee6e8bb 5735 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5736 tmp = neon_load_reg(rm, pass * 2);
5737 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5738 switch (size) {
dd8fbd78
FN
5739 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5740 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5741 case 2: /* no-op */ break;
5742 default: abort();
5743 }
dd8fbd78 5744 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5745 if (size == 2) {
dd8fbd78 5746 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5747 } else {
9ee6e8bb 5748 switch (size) {
dd8fbd78
FN
5749 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5750 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5751 default: abort();
5752 }
dd8fbd78 5753 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5754 }
5755 }
5756 break;
600b828c
PM
5757 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
5758 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
5759 for (pass = 0; pass < q + 1; pass++) {
5760 tmp = neon_load_reg(rm, pass * 2);
5761 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5762 tmp = neon_load_reg(rm, pass * 2 + 1);
5763 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5764 switch (size) {
5765 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5766 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5767 case 2: tcg_gen_add_i64(CPU_V001); break;
5768 default: abort();
5769 }
600b828c 5770 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 5771 /* Accumulate. */
ad69471c
PB
5772 neon_load_reg64(cpu_V1, rd + pass);
5773 gen_neon_addl(size);
9ee6e8bb 5774 }
ad69471c 5775 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5776 }
5777 break;
600b828c 5778 case NEON_2RM_VTRN:
9ee6e8bb 5779 if (size == 2) {
a5a14945 5780 int n;
9ee6e8bb 5781 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
5782 tmp = neon_load_reg(rm, n);
5783 tmp2 = neon_load_reg(rd, n + 1);
5784 neon_store_reg(rm, n, tmp2);
5785 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
5786 }
5787 } else {
5788 goto elementwise;
5789 }
5790 break;
600b828c 5791 case NEON_2RM_VUZP:
02acedf9 5792 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 5793 return 1;
9ee6e8bb
PB
5794 }
5795 break;
600b828c 5796 case NEON_2RM_VZIP:
d68a6f3a 5797 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 5798 return 1;
9ee6e8bb
PB
5799 }
5800 break;
600b828c
PM
5801 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
5802 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
5803 if (rm & 1) {
5804 return 1;
5805 }
a50f5b91 5806 TCGV_UNUSED(tmp2);
9ee6e8bb 5807 for (pass = 0; pass < 2; pass++) {
ad69471c 5808 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 5809 tmp = tcg_temp_new_i32();
600b828c
PM
5810 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
5811 tmp, cpu_V0);
ad69471c
PB
5812 if (pass == 0) {
5813 tmp2 = tmp;
5814 } else {
5815 neon_store_reg(rd, 0, tmp2);
5816 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5817 }
9ee6e8bb
PB
5818 }
5819 break;
600b828c 5820 case NEON_2RM_VSHLL:
fc2a9b37 5821 if (q || (rd & 1)) {
9ee6e8bb 5822 return 1;
600b828c 5823 }
ad69471c
PB
5824 tmp = neon_load_reg(rm, 0);
5825 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5826 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5827 if (pass == 1)
5828 tmp = tmp2;
5829 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 5830 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 5831 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5832 }
5833 break;
600b828c 5834 case NEON_2RM_VCVT_F16_F32:
fc2a9b37
PM
5835 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5836 q || (rm & 1)) {
5837 return 1;
5838 }
7d1b0095
PM
5839 tmp = tcg_temp_new_i32();
5840 tmp2 = tcg_temp_new_i32();
60011498 5841 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 5842 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 5843 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 5844 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5845 tcg_gen_shli_i32(tmp2, tmp2, 16);
5846 tcg_gen_or_i32(tmp2, tmp2, tmp);
5847 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 5848 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
5849 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5850 neon_store_reg(rd, 0, tmp2);
7d1b0095 5851 tmp2 = tcg_temp_new_i32();
2d981da7 5852 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5853 tcg_gen_shli_i32(tmp2, tmp2, 16);
5854 tcg_gen_or_i32(tmp2, tmp2, tmp);
5855 neon_store_reg(rd, 1, tmp2);
7d1b0095 5856 tcg_temp_free_i32(tmp);
60011498 5857 break;
600b828c 5858 case NEON_2RM_VCVT_F32_F16:
fc2a9b37
PM
5859 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5860 q || (rd & 1)) {
5861 return 1;
5862 }
7d1b0095 5863 tmp3 = tcg_temp_new_i32();
60011498
PB
5864 tmp = neon_load_reg(rm, 0);
5865 tmp2 = neon_load_reg(rm, 1);
5866 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 5867 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5868 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5869 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 5870 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5871 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 5872 tcg_temp_free_i32(tmp);
60011498 5873 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 5874 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5875 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5876 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 5877 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5878 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
5879 tcg_temp_free_i32(tmp2);
5880 tcg_temp_free_i32(tmp3);
60011498 5881 break;
9ee6e8bb
PB
5882 default:
5883 elementwise:
5884 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 5885 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
5886 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5887 neon_reg_offset(rm, pass));
dd8fbd78 5888 TCGV_UNUSED(tmp);
9ee6e8bb 5889 } else {
dd8fbd78 5890 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
5891 }
5892 switch (op) {
600b828c 5893 case NEON_2RM_VREV32:
9ee6e8bb 5894 switch (size) {
dd8fbd78
FN
5895 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5896 case 1: gen_swap_half(tmp); break;
600b828c 5897 default: abort();
9ee6e8bb
PB
5898 }
5899 break;
600b828c 5900 case NEON_2RM_VREV16:
dd8fbd78 5901 gen_rev16(tmp);
9ee6e8bb 5902 break;
600b828c 5903 case NEON_2RM_VCLS:
9ee6e8bb 5904 switch (size) {
dd8fbd78
FN
5905 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5906 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5907 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 5908 default: abort();
9ee6e8bb
PB
5909 }
5910 break;
600b828c 5911 case NEON_2RM_VCLZ:
9ee6e8bb 5912 switch (size) {
dd8fbd78
FN
5913 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5914 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5915 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 5916 default: abort();
9ee6e8bb
PB
5917 }
5918 break;
600b828c 5919 case NEON_2RM_VCNT:
dd8fbd78 5920 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 5921 break;
600b828c 5922 case NEON_2RM_VMVN:
dd8fbd78 5923 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 5924 break;
600b828c 5925 case NEON_2RM_VQABS:
9ee6e8bb 5926 switch (size) {
2a3f75b4
PM
5927 case 0: gen_helper_neon_qabs_s8(tmp, tmp); break;
5928 case 1: gen_helper_neon_qabs_s16(tmp, tmp); break;
5929 case 2: gen_helper_neon_qabs_s32(tmp, tmp); break;
600b828c 5930 default: abort();
9ee6e8bb
PB
5931 }
5932 break;
600b828c 5933 case NEON_2RM_VQNEG:
9ee6e8bb 5934 switch (size) {
2a3f75b4
PM
5935 case 0: gen_helper_neon_qneg_s8(tmp, tmp); break;
5936 case 1: gen_helper_neon_qneg_s16(tmp, tmp); break;
5937 case 2: gen_helper_neon_qneg_s32(tmp, tmp); break;
600b828c 5938 default: abort();
9ee6e8bb
PB
5939 }
5940 break;
600b828c 5941 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 5942 tmp2 = tcg_const_i32(0);
9ee6e8bb 5943 switch(size) {
dd8fbd78
FN
5944 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5945 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5946 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 5947 default: abort();
9ee6e8bb 5948 }
dd8fbd78 5949 tcg_temp_free(tmp2);
600b828c 5950 if (op == NEON_2RM_VCLE0) {
dd8fbd78 5951 tcg_gen_not_i32(tmp, tmp);
600b828c 5952 }
9ee6e8bb 5953 break;
600b828c 5954 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 5955 tmp2 = tcg_const_i32(0);
9ee6e8bb 5956 switch(size) {
dd8fbd78
FN
5957 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5958 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5959 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 5960 default: abort();
9ee6e8bb 5961 }
dd8fbd78 5962 tcg_temp_free(tmp2);
600b828c 5963 if (op == NEON_2RM_VCLT0) {
dd8fbd78 5964 tcg_gen_not_i32(tmp, tmp);
600b828c 5965 }
9ee6e8bb 5966 break;
600b828c 5967 case NEON_2RM_VCEQ0:
dd8fbd78 5968 tmp2 = tcg_const_i32(0);
9ee6e8bb 5969 switch(size) {
dd8fbd78
FN
5970 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5971 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5972 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 5973 default: abort();
9ee6e8bb 5974 }
dd8fbd78 5975 tcg_temp_free(tmp2);
9ee6e8bb 5976 break;
600b828c 5977 case NEON_2RM_VABS:
9ee6e8bb 5978 switch(size) {
dd8fbd78
FN
5979 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
5980 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
5981 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 5982 default: abort();
9ee6e8bb
PB
5983 }
5984 break;
600b828c 5985 case NEON_2RM_VNEG:
dd8fbd78
FN
5986 tmp2 = tcg_const_i32(0);
5987 gen_neon_rsb(size, tmp, tmp2);
5988 tcg_temp_free(tmp2);
9ee6e8bb 5989 break;
600b828c 5990 case NEON_2RM_VCGT0_F:
dd8fbd78
FN
5991 tmp2 = tcg_const_i32(0);
5992 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
5993 tcg_temp_free(tmp2);
9ee6e8bb 5994 break;
600b828c 5995 case NEON_2RM_VCGE0_F:
dd8fbd78
FN
5996 tmp2 = tcg_const_i32(0);
5997 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
5998 tcg_temp_free(tmp2);
9ee6e8bb 5999 break;
600b828c 6000 case NEON_2RM_VCEQ0_F:
dd8fbd78
FN
6001 tmp2 = tcg_const_i32(0);
6002 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
6003 tcg_temp_free(tmp2);
9ee6e8bb 6004 break;
600b828c 6005 case NEON_2RM_VCLE0_F:
0e326109
PM
6006 tmp2 = tcg_const_i32(0);
6007 gen_helper_neon_cge_f32(tmp, tmp2, tmp);
6008 tcg_temp_free(tmp2);
6009 break;
600b828c 6010 case NEON_2RM_VCLT0_F:
0e326109
PM
6011 tmp2 = tcg_const_i32(0);
6012 gen_helper_neon_cgt_f32(tmp, tmp2, tmp);
6013 tcg_temp_free(tmp2);
6014 break;
600b828c 6015 case NEON_2RM_VABS_F:
4373f3ce 6016 gen_vfp_abs(0);
9ee6e8bb 6017 break;
600b828c 6018 case NEON_2RM_VNEG_F:
4373f3ce 6019 gen_vfp_neg(0);
9ee6e8bb 6020 break;
600b828c 6021 case NEON_2RM_VSWP:
dd8fbd78
FN
6022 tmp2 = neon_load_reg(rd, pass);
6023 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6024 break;
600b828c 6025 case NEON_2RM_VTRN:
dd8fbd78 6026 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6027 switch (size) {
dd8fbd78
FN
6028 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6029 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6030 default: abort();
9ee6e8bb 6031 }
dd8fbd78 6032 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6033 break;
600b828c 6034 case NEON_2RM_VRECPE:
dd8fbd78 6035 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb 6036 break;
600b828c 6037 case NEON_2RM_VRSQRTE:
dd8fbd78 6038 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb 6039 break;
600b828c 6040 case NEON_2RM_VRECPE_F:
4373f3ce 6041 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6042 break;
600b828c 6043 case NEON_2RM_VRSQRTE_F:
4373f3ce 6044 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6045 break;
600b828c 6046 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
d3587ef8 6047 gen_vfp_sito(0);
9ee6e8bb 6048 break;
600b828c 6049 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
d3587ef8 6050 gen_vfp_uito(0);
9ee6e8bb 6051 break;
600b828c 6052 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
d3587ef8 6053 gen_vfp_tosiz(0);
9ee6e8bb 6054 break;
600b828c 6055 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
d3587ef8 6056 gen_vfp_touiz(0);
9ee6e8bb
PB
6057 break;
6058 default:
600b828c
PM
6059 /* Reserved op values were caught by the
6060 * neon_2rm_sizes[] check earlier.
6061 */
6062 abort();
9ee6e8bb 6063 }
600b828c 6064 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6065 tcg_gen_st_f32(cpu_F0s, cpu_env,
6066 neon_reg_offset(rd, pass));
9ee6e8bb 6067 } else {
dd8fbd78 6068 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6069 }
6070 }
6071 break;
6072 }
6073 } else if ((insn & (1 << 10)) == 0) {
6074 /* VTBL, VTBX. */
56907d77
PM
6075 int n = ((insn >> 8) & 3) + 1;
6076 if ((rn + n) > 32) {
6077 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6078 * helper function running off the end of the register file.
6079 */
6080 return 1;
6081 }
6082 n <<= 3;
9ee6e8bb 6083 if (insn & (1 << 6)) {
8f8e3aa4 6084 tmp = neon_load_reg(rd, 0);
9ee6e8bb 6085 } else {
7d1b0095 6086 tmp = tcg_temp_new_i32();
8f8e3aa4 6087 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6088 }
8f8e3aa4 6089 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
6090 tmp4 = tcg_const_i32(rn);
6091 tmp5 = tcg_const_i32(n);
6092 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
7d1b0095 6093 tcg_temp_free_i32(tmp);
9ee6e8bb 6094 if (insn & (1 << 6)) {
8f8e3aa4 6095 tmp = neon_load_reg(rd, 1);
9ee6e8bb 6096 } else {
7d1b0095 6097 tmp = tcg_temp_new_i32();
8f8e3aa4 6098 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6099 }
8f8e3aa4 6100 tmp3 = neon_load_reg(rm, 1);
b75263d6 6101 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
6102 tcg_temp_free_i32(tmp5);
6103 tcg_temp_free_i32(tmp4);
8f8e3aa4 6104 neon_store_reg(rd, 0, tmp2);
3018f259 6105 neon_store_reg(rd, 1, tmp3);
7d1b0095 6106 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6107 } else if ((insn & 0x380) == 0) {
6108 /* VDUP */
133da6aa
JR
6109 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6110 return 1;
6111 }
9ee6e8bb 6112 if (insn & (1 << 19)) {
dd8fbd78 6113 tmp = neon_load_reg(rm, 1);
9ee6e8bb 6114 } else {
dd8fbd78 6115 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
6116 }
6117 if (insn & (1 << 16)) {
dd8fbd78 6118 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
6119 } else if (insn & (1 << 17)) {
6120 if ((insn >> 18) & 1)
dd8fbd78 6121 gen_neon_dup_high16(tmp);
9ee6e8bb 6122 else
dd8fbd78 6123 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
6124 }
6125 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 6126 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
6127 tcg_gen_mov_i32(tmp2, tmp);
6128 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 6129 }
7d1b0095 6130 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6131 } else {
6132 return 1;
6133 }
6134 }
6135 }
6136 return 0;
6137}
6138
fe1479c3
PB
6139static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
6140{
6141 int crn = (insn >> 16) & 0xf;
6142 int crm = insn & 0xf;
6143 int op1 = (insn >> 21) & 7;
6144 int op2 = (insn >> 5) & 7;
6145 int rt = (insn >> 12) & 0xf;
6146 TCGv tmp;
6147
ca27c052
PM
6148 /* Minimal set of debug registers, since we don't support debug */
6149 if (op1 == 0 && crn == 0 && op2 == 0) {
6150 switch (crm) {
6151 case 0:
6152 /* DBGDIDR: just RAZ. In particular this means the
6153 * "debug architecture version" bits will read as
6154 * a reserved value, which should cause Linux to
6155 * not try to use the debug hardware.
6156 */
6157 tmp = tcg_const_i32(0);
6158 store_reg(s, rt, tmp);
6159 return 0;
6160 case 1:
6161 case 2:
6162 /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we
6163 * don't implement memory mapped debug components
6164 */
6165 if (ENABLE_ARCH_7) {
6166 tmp = tcg_const_i32(0);
6167 store_reg(s, rt, tmp);
6168 return 0;
6169 }
6170 break;
6171 default:
6172 break;
6173 }
6174 }
6175
fe1479c3
PB
6176 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6177 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
6178 /* TEECR */
6179 if (IS_USER(s))
6180 return 1;
6181 tmp = load_cpu_field(teecr);
6182 store_reg(s, rt, tmp);
6183 return 0;
6184 }
6185 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
6186 /* TEEHBR */
6187 if (IS_USER(s) && (env->teecr & 1))
6188 return 1;
6189 tmp = load_cpu_field(teehbr);
6190 store_reg(s, rt, tmp);
6191 return 0;
6192 }
6193 }
6194 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
6195 op1, crn, crm, op2);
6196 return 1;
6197}
6198
6199static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
6200{
6201 int crn = (insn >> 16) & 0xf;
6202 int crm = insn & 0xf;
6203 int op1 = (insn >> 21) & 7;
6204 int op2 = (insn >> 5) & 7;
6205 int rt = (insn >> 12) & 0xf;
6206 TCGv tmp;
6207
6208 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6209 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
6210 /* TEECR */
6211 if (IS_USER(s))
6212 return 1;
6213 tmp = load_reg(s, rt);
6214 gen_helper_set_teecr(cpu_env, tmp);
7d1b0095 6215 tcg_temp_free_i32(tmp);
fe1479c3
PB
6216 return 0;
6217 }
6218 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
6219 /* TEEHBR */
6220 if (IS_USER(s) && (env->teecr & 1))
6221 return 1;
6222 tmp = load_reg(s, rt);
6223 store_cpu_field(tmp, teehbr);
6224 return 0;
6225 }
6226 }
6227 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
6228 op1, crn, crm, op2);
6229 return 1;
6230}
6231
9ee6e8bb
PB
6232static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
6233{
6234 int cpnum;
6235
6236 cpnum = (insn >> 8) & 0xf;
6237 if (arm_feature(env, ARM_FEATURE_XSCALE)
6238 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6239 return 1;
6240
6241 switch (cpnum) {
6242 case 0:
6243 case 1:
6244 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6245 return disas_iwmmxt_insn(env, s, insn);
6246 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6247 return disas_dsp_insn(env, s, insn);
6248 }
6249 return 1;
6250 case 10:
6251 case 11:
6252 return disas_vfp_insn (env, s, insn);
fe1479c3
PB
6253 case 14:
6254 /* Coprocessors 7-15 are architecturally reserved by ARM.
6255 Unfortunately Intel decided to ignore this. */
6256 if (arm_feature(env, ARM_FEATURE_XSCALE))
6257 goto board;
6258 if (insn & (1 << 20))
6259 return disas_cp14_read(env, s, insn);
6260 else
6261 return disas_cp14_write(env, s, insn);
9ee6e8bb
PB
6262 case 15:
6263 return disas_cp15_insn (env, s, insn);
6264 default:
fe1479c3 6265 board:
9ee6e8bb
PB
6266 /* Unknown coprocessor. See if the board has hooked it. */
6267 return disas_cp_insn (env, s, insn);
6268 }
6269}
6270
5e3f878a
PB
6271
6272/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 6273static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
6274{
6275 TCGv tmp;
7d1b0095 6276 tmp = tcg_temp_new_i32();
5e3f878a
PB
6277 tcg_gen_trunc_i64_i32(tmp, val);
6278 store_reg(s, rlow, tmp);
7d1b0095 6279 tmp = tcg_temp_new_i32();
5e3f878a
PB
6280 tcg_gen_shri_i64(val, val, 32);
6281 tcg_gen_trunc_i64_i32(tmp, val);
6282 store_reg(s, rhigh, tmp);
6283}
6284
6285/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 6286static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 6287{
a7812ae4 6288 TCGv_i64 tmp;
5e3f878a
PB
6289 TCGv tmp2;
6290
36aa55dc 6291 /* Load value and extend to 64 bits. */
a7812ae4 6292 tmp = tcg_temp_new_i64();
5e3f878a
PB
6293 tmp2 = load_reg(s, rlow);
6294 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 6295 tcg_temp_free_i32(tmp2);
5e3f878a 6296 tcg_gen_add_i64(val, val, tmp);
b75263d6 6297 tcg_temp_free_i64(tmp);
5e3f878a
PB
6298}
6299
6300/* load and add a 64-bit value from a register pair. */
a7812ae4 6301static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 6302{
a7812ae4 6303 TCGv_i64 tmp;
36aa55dc
PB
6304 TCGv tmpl;
6305 TCGv tmph;
5e3f878a
PB
6306
6307 /* Load 64-bit value rd:rn. */
36aa55dc
PB
6308 tmpl = load_reg(s, rlow);
6309 tmph = load_reg(s, rhigh);
a7812ae4 6310 tmp = tcg_temp_new_i64();
36aa55dc 6311 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
6312 tcg_temp_free_i32(tmpl);
6313 tcg_temp_free_i32(tmph);
5e3f878a 6314 tcg_gen_add_i64(val, val, tmp);
b75263d6 6315 tcg_temp_free_i64(tmp);
5e3f878a
PB
6316}
6317
6318/* Set N and Z flags from a 64-bit value. */
a7812ae4 6319static void gen_logicq_cc(TCGv_i64 val)
5e3f878a 6320{
7d1b0095 6321 TCGv tmp = tcg_temp_new_i32();
5e3f878a 6322 gen_helper_logicq_cc(tmp, val);
6fbe23d5 6323 gen_logic_CC(tmp);
7d1b0095 6324 tcg_temp_free_i32(tmp);
5e3f878a
PB
6325}
6326
426f5abc
PB
6327/* Load/Store exclusive instructions are implemented by remembering
6328 the value/address loaded, and seeing if these are the same
6329 when the store is performed. This should be is sufficient to implement
6330 the architecturally mandated semantics, and avoids having to monitor
6331 regular stores.
6332
6333 In system emulation mode only one CPU will be running at once, so
6334 this sequence is effectively atomic. In user emulation mode we
6335 throw an exception and handle the atomic operation elsewhere. */
6336static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
6337 TCGv addr, int size)
6338{
6339 TCGv tmp;
6340
6341 switch (size) {
6342 case 0:
6343 tmp = gen_ld8u(addr, IS_USER(s));
6344 break;
6345 case 1:
6346 tmp = gen_ld16u(addr, IS_USER(s));
6347 break;
6348 case 2:
6349 case 3:
6350 tmp = gen_ld32(addr, IS_USER(s));
6351 break;
6352 default:
6353 abort();
6354 }
6355 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6356 store_reg(s, rt, tmp);
6357 if (size == 3) {
7d1b0095 6358 TCGv tmp2 = tcg_temp_new_i32();
2c9adbda
PM
6359 tcg_gen_addi_i32(tmp2, addr, 4);
6360 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 6361 tcg_temp_free_i32(tmp2);
426f5abc
PB
6362 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6363 store_reg(s, rt2, tmp);
6364 }
6365 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6366}
6367
6368static void gen_clrex(DisasContext *s)
6369{
6370 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6371}
6372
6373#ifdef CONFIG_USER_ONLY
6374static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6375 TCGv addr, int size)
6376{
6377 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6378 tcg_gen_movi_i32(cpu_exclusive_info,
6379 size | (rd << 4) | (rt << 8) | (rt2 << 12));
bc4a0de0 6380 gen_exception_insn(s, 4, EXCP_STREX);
426f5abc
PB
6381}
6382#else
6383static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6384 TCGv addr, int size)
6385{
6386 TCGv tmp;
6387 int done_label;
6388 int fail_label;
6389
6390 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6391 [addr] = {Rt};
6392 {Rd} = 0;
6393 } else {
6394 {Rd} = 1;
6395 } */
6396 fail_label = gen_new_label();
6397 done_label = gen_new_label();
6398 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6399 switch (size) {
6400 case 0:
6401 tmp = gen_ld8u(addr, IS_USER(s));
6402 break;
6403 case 1:
6404 tmp = gen_ld16u(addr, IS_USER(s));
6405 break;
6406 case 2:
6407 case 3:
6408 tmp = gen_ld32(addr, IS_USER(s));
6409 break;
6410 default:
6411 abort();
6412 }
6413 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
7d1b0095 6414 tcg_temp_free_i32(tmp);
426f5abc 6415 if (size == 3) {
7d1b0095 6416 TCGv tmp2 = tcg_temp_new_i32();
426f5abc 6417 tcg_gen_addi_i32(tmp2, addr, 4);
2c9adbda 6418 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 6419 tcg_temp_free_i32(tmp2);
426f5abc 6420 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
7d1b0095 6421 tcg_temp_free_i32(tmp);
426f5abc
PB
6422 }
6423 tmp = load_reg(s, rt);
6424 switch (size) {
6425 case 0:
6426 gen_st8(tmp, addr, IS_USER(s));
6427 break;
6428 case 1:
6429 gen_st16(tmp, addr, IS_USER(s));
6430 break;
6431 case 2:
6432 case 3:
6433 gen_st32(tmp, addr, IS_USER(s));
6434 break;
6435 default:
6436 abort();
6437 }
6438 if (size == 3) {
6439 tcg_gen_addi_i32(addr, addr, 4);
6440 tmp = load_reg(s, rt2);
6441 gen_st32(tmp, addr, IS_USER(s));
6442 }
6443 tcg_gen_movi_i32(cpu_R[rd], 0);
6444 tcg_gen_br(done_label);
6445 gen_set_label(fail_label);
6446 tcg_gen_movi_i32(cpu_R[rd], 1);
6447 gen_set_label(done_label);
6448 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6449}
6450#endif
6451
9ee6e8bb
PB
6452static void disas_arm_insn(CPUState * env, DisasContext *s)
6453{
6454 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 6455 TCGv tmp;
3670669c 6456 TCGv tmp2;
6ddbc6e4 6457 TCGv tmp3;
b0109805 6458 TCGv addr;
a7812ae4 6459 TCGv_i64 tmp64;
9ee6e8bb
PB
6460
6461 insn = ldl_code(s->pc);
6462 s->pc += 4;
6463
6464 /* M variants do not implement ARM mode. */
6465 if (IS_M(env))
6466 goto illegal_op;
6467 cond = insn >> 28;
6468 if (cond == 0xf){
be5e7a76
DES
6469 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6470 * choose to UNDEF. In ARMv5 and above the space is used
6471 * for miscellaneous unconditional instructions.
6472 */
6473 ARCH(5);
6474
9ee6e8bb
PB
6475 /* Unconditional instructions. */
6476 if (((insn >> 25) & 7) == 1) {
6477 /* NEON Data processing. */
6478 if (!arm_feature(env, ARM_FEATURE_NEON))
6479 goto illegal_op;
6480
6481 if (disas_neon_data_insn(env, s, insn))
6482 goto illegal_op;
6483 return;
6484 }
6485 if ((insn & 0x0f100000) == 0x04000000) {
6486 /* NEON load/store. */
6487 if (!arm_feature(env, ARM_FEATURE_NEON))
6488 goto illegal_op;
6489
6490 if (disas_neon_ls_insn(env, s, insn))
6491 goto illegal_op;
6492 return;
6493 }
3d185e5d
PM
6494 if (((insn & 0x0f30f000) == 0x0510f000) ||
6495 ((insn & 0x0f30f010) == 0x0710f000)) {
6496 if ((insn & (1 << 22)) == 0) {
6497 /* PLDW; v7MP */
6498 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6499 goto illegal_op;
6500 }
6501 }
6502 /* Otherwise PLD; v5TE+ */
be5e7a76 6503 ARCH(5TE);
3d185e5d
PM
6504 return;
6505 }
6506 if (((insn & 0x0f70f000) == 0x0450f000) ||
6507 ((insn & 0x0f70f010) == 0x0650f000)) {
6508 ARCH(7);
6509 return; /* PLI; V7 */
6510 }
6511 if (((insn & 0x0f700000) == 0x04100000) ||
6512 ((insn & 0x0f700010) == 0x06100000)) {
6513 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6514 goto illegal_op;
6515 }
6516 return; /* v7MP: Unallocated memory hint: must NOP */
6517 }
6518
6519 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
6520 ARCH(6);
6521 /* setend */
6522 if (insn & (1 << 9)) {
6523 /* BE8 mode not implemented. */
6524 goto illegal_op;
6525 }
6526 return;
6527 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6528 switch ((insn >> 4) & 0xf) {
6529 case 1: /* clrex */
6530 ARCH(6K);
426f5abc 6531 gen_clrex(s);
9ee6e8bb
PB
6532 return;
6533 case 4: /* dsb */
6534 case 5: /* dmb */
6535 case 6: /* isb */
6536 ARCH(7);
6537 /* We don't emulate caches so these are a no-op. */
6538 return;
6539 default:
6540 goto illegal_op;
6541 }
6542 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6543 /* srs */
c67b6b71 6544 int32_t offset;
9ee6e8bb
PB
6545 if (IS_USER(s))
6546 goto illegal_op;
6547 ARCH(6);
6548 op1 = (insn & 0x1f);
7d1b0095 6549 addr = tcg_temp_new_i32();
39ea3d4e
PM
6550 tmp = tcg_const_i32(op1);
6551 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6552 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6553 i = (insn >> 23) & 3;
6554 switch (i) {
6555 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6556 case 1: offset = 0; break; /* IA */
6557 case 2: offset = -8; break; /* DB */
9ee6e8bb
PB
6558 case 3: offset = 4; break; /* IB */
6559 default: abort();
6560 }
6561 if (offset)
b0109805
PB
6562 tcg_gen_addi_i32(addr, addr, offset);
6563 tmp = load_reg(s, 14);
6564 gen_st32(tmp, addr, 0);
c67b6b71 6565 tmp = load_cpu_field(spsr);
b0109805
PB
6566 tcg_gen_addi_i32(addr, addr, 4);
6567 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
6568 if (insn & (1 << 21)) {
6569 /* Base writeback. */
6570 switch (i) {
6571 case 0: offset = -8; break;
c67b6b71
FN
6572 case 1: offset = 4; break;
6573 case 2: offset = -4; break;
9ee6e8bb
PB
6574 case 3: offset = 0; break;
6575 default: abort();
6576 }
6577 if (offset)
c67b6b71 6578 tcg_gen_addi_i32(addr, addr, offset);
39ea3d4e
PM
6579 tmp = tcg_const_i32(op1);
6580 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6581 tcg_temp_free_i32(tmp);
7d1b0095 6582 tcg_temp_free_i32(addr);
b0109805 6583 } else {
7d1b0095 6584 tcg_temp_free_i32(addr);
9ee6e8bb 6585 }
a990f58f 6586 return;
ea825eee 6587 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 6588 /* rfe */
c67b6b71 6589 int32_t offset;
9ee6e8bb
PB
6590 if (IS_USER(s))
6591 goto illegal_op;
6592 ARCH(6);
6593 rn = (insn >> 16) & 0xf;
b0109805 6594 addr = load_reg(s, rn);
9ee6e8bb
PB
6595 i = (insn >> 23) & 3;
6596 switch (i) {
b0109805 6597 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6598 case 1: offset = 0; break; /* IA */
6599 case 2: offset = -8; break; /* DB */
b0109805 6600 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
6601 default: abort();
6602 }
6603 if (offset)
b0109805
PB
6604 tcg_gen_addi_i32(addr, addr, offset);
6605 /* Load PC into tmp and CPSR into tmp2. */
6606 tmp = gen_ld32(addr, 0);
6607 tcg_gen_addi_i32(addr, addr, 4);
6608 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
6609 if (insn & (1 << 21)) {
6610 /* Base writeback. */
6611 switch (i) {
b0109805 6612 case 0: offset = -8; break;
c67b6b71
FN
6613 case 1: offset = 4; break;
6614 case 2: offset = -4; break;
b0109805 6615 case 3: offset = 0; break;
9ee6e8bb
PB
6616 default: abort();
6617 }
6618 if (offset)
b0109805
PB
6619 tcg_gen_addi_i32(addr, addr, offset);
6620 store_reg(s, rn, addr);
6621 } else {
7d1b0095 6622 tcg_temp_free_i32(addr);
9ee6e8bb 6623 }
b0109805 6624 gen_rfe(s, tmp, tmp2);
c67b6b71 6625 return;
9ee6e8bb
PB
6626 } else if ((insn & 0x0e000000) == 0x0a000000) {
6627 /* branch link and change to thumb (blx <offset>) */
6628 int32_t offset;
6629
6630 val = (uint32_t)s->pc;
7d1b0095 6631 tmp = tcg_temp_new_i32();
d9ba4830
PB
6632 tcg_gen_movi_i32(tmp, val);
6633 store_reg(s, 14, tmp);
9ee6e8bb
PB
6634 /* Sign-extend the 24-bit offset */
6635 offset = (((int32_t)insn) << 8) >> 8;
6636 /* offset * 4 + bit24 * 2 + (thumb bit) */
6637 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6638 /* pipeline offset */
6639 val += 4;
be5e7a76 6640 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 6641 gen_bx_im(s, val);
9ee6e8bb
PB
6642 return;
6643 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6644 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6645 /* iWMMXt register transfer. */
6646 if (env->cp15.c15_cpar & (1 << 1))
6647 if (!disas_iwmmxt_insn(env, s, insn))
6648 return;
6649 }
6650 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6651 /* Coprocessor double register transfer. */
be5e7a76 6652 ARCH(5TE);
9ee6e8bb
PB
6653 } else if ((insn & 0x0f000010) == 0x0e000010) {
6654 /* Additional coprocessor register transfer. */
7997d92f 6655 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
6656 uint32_t mask;
6657 uint32_t val;
6658 /* cps (privileged) */
6659 if (IS_USER(s))
6660 return;
6661 mask = val = 0;
6662 if (insn & (1 << 19)) {
6663 if (insn & (1 << 8))
6664 mask |= CPSR_A;
6665 if (insn & (1 << 7))
6666 mask |= CPSR_I;
6667 if (insn & (1 << 6))
6668 mask |= CPSR_F;
6669 if (insn & (1 << 18))
6670 val |= mask;
6671 }
7997d92f 6672 if (insn & (1 << 17)) {
9ee6e8bb
PB
6673 mask |= CPSR_M;
6674 val |= (insn & 0x1f);
6675 }
6676 if (mask) {
2fbac54b 6677 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
6678 }
6679 return;
6680 }
6681 goto illegal_op;
6682 }
6683 if (cond != 0xe) {
6684 /* if not always execute, we generate a conditional jump to
6685 next instruction */
6686 s->condlabel = gen_new_label();
d9ba4830 6687 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
6688 s->condjmp = 1;
6689 }
6690 if ((insn & 0x0f900000) == 0x03000000) {
6691 if ((insn & (1 << 21)) == 0) {
6692 ARCH(6T2);
6693 rd = (insn >> 12) & 0xf;
6694 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6695 if ((insn & (1 << 22)) == 0) {
6696 /* MOVW */
7d1b0095 6697 tmp = tcg_temp_new_i32();
5e3f878a 6698 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
6699 } else {
6700 /* MOVT */
5e3f878a 6701 tmp = load_reg(s, rd);
86831435 6702 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 6703 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 6704 }
5e3f878a 6705 store_reg(s, rd, tmp);
9ee6e8bb
PB
6706 } else {
6707 if (((insn >> 12) & 0xf) != 0xf)
6708 goto illegal_op;
6709 if (((insn >> 16) & 0xf) == 0) {
6710 gen_nop_hint(s, insn & 0xff);
6711 } else {
6712 /* CPSR = immediate */
6713 val = insn & 0xff;
6714 shift = ((insn >> 8) & 0xf) * 2;
6715 if (shift)
6716 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 6717 i = ((insn & (1 << 22)) != 0);
2fbac54b 6718 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
6719 goto illegal_op;
6720 }
6721 }
6722 } else if ((insn & 0x0f900000) == 0x01000000
6723 && (insn & 0x00000090) != 0x00000090) {
6724 /* miscellaneous instructions */
6725 op1 = (insn >> 21) & 3;
6726 sh = (insn >> 4) & 0xf;
6727 rm = insn & 0xf;
6728 switch (sh) {
6729 case 0x0: /* move program status register */
6730 if (op1 & 1) {
6731 /* PSR = reg */
2fbac54b 6732 tmp = load_reg(s, rm);
9ee6e8bb 6733 i = ((op1 & 2) != 0);
2fbac54b 6734 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
6735 goto illegal_op;
6736 } else {
6737 /* reg = PSR */
6738 rd = (insn >> 12) & 0xf;
6739 if (op1 & 2) {
6740 if (IS_USER(s))
6741 goto illegal_op;
d9ba4830 6742 tmp = load_cpu_field(spsr);
9ee6e8bb 6743 } else {
7d1b0095 6744 tmp = tcg_temp_new_i32();
d9ba4830 6745 gen_helper_cpsr_read(tmp);
9ee6e8bb 6746 }
d9ba4830 6747 store_reg(s, rd, tmp);
9ee6e8bb
PB
6748 }
6749 break;
6750 case 0x1:
6751 if (op1 == 1) {
6752 /* branch/exchange thumb (bx). */
be5e7a76 6753 ARCH(4T);
d9ba4830
PB
6754 tmp = load_reg(s, rm);
6755 gen_bx(s, tmp);
9ee6e8bb
PB
6756 } else if (op1 == 3) {
6757 /* clz */
be5e7a76 6758 ARCH(5);
9ee6e8bb 6759 rd = (insn >> 12) & 0xf;
1497c961
PB
6760 tmp = load_reg(s, rm);
6761 gen_helper_clz(tmp, tmp);
6762 store_reg(s, rd, tmp);
9ee6e8bb
PB
6763 } else {
6764 goto illegal_op;
6765 }
6766 break;
6767 case 0x2:
6768 if (op1 == 1) {
6769 ARCH(5J); /* bxj */
6770 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6771 tmp = load_reg(s, rm);
6772 gen_bx(s, tmp);
9ee6e8bb
PB
6773 } else {
6774 goto illegal_op;
6775 }
6776 break;
6777 case 0x3:
6778 if (op1 != 1)
6779 goto illegal_op;
6780
be5e7a76 6781 ARCH(5);
9ee6e8bb 6782 /* branch link/exchange thumb (blx) */
d9ba4830 6783 tmp = load_reg(s, rm);
7d1b0095 6784 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
6785 tcg_gen_movi_i32(tmp2, s->pc);
6786 store_reg(s, 14, tmp2);
6787 gen_bx(s, tmp);
9ee6e8bb
PB
6788 break;
6789 case 0x5: /* saturating add/subtract */
be5e7a76 6790 ARCH(5TE);
9ee6e8bb
PB
6791 rd = (insn >> 12) & 0xf;
6792 rn = (insn >> 16) & 0xf;
b40d0353 6793 tmp = load_reg(s, rm);
5e3f878a 6794 tmp2 = load_reg(s, rn);
9ee6e8bb 6795 if (op1 & 2)
5e3f878a 6796 gen_helper_double_saturate(tmp2, tmp2);
9ee6e8bb 6797 if (op1 & 1)
5e3f878a 6798 gen_helper_sub_saturate(tmp, tmp, tmp2);
9ee6e8bb 6799 else
5e3f878a 6800 gen_helper_add_saturate(tmp, tmp, tmp2);
7d1b0095 6801 tcg_temp_free_i32(tmp2);
5e3f878a 6802 store_reg(s, rd, tmp);
9ee6e8bb 6803 break;
49e14940
AL
6804 case 7:
6805 /* SMC instruction (op1 == 3)
6806 and undefined instructions (op1 == 0 || op1 == 2)
6807 will trap */
6808 if (op1 != 1) {
6809 goto illegal_op;
6810 }
6811 /* bkpt */
be5e7a76 6812 ARCH(5);
bc4a0de0 6813 gen_exception_insn(s, 4, EXCP_BKPT);
9ee6e8bb
PB
6814 break;
6815 case 0x8: /* signed multiply */
6816 case 0xa:
6817 case 0xc:
6818 case 0xe:
be5e7a76 6819 ARCH(5TE);
9ee6e8bb
PB
6820 rs = (insn >> 8) & 0xf;
6821 rn = (insn >> 12) & 0xf;
6822 rd = (insn >> 16) & 0xf;
6823 if (op1 == 1) {
6824 /* (32 * 16) >> 16 */
5e3f878a
PB
6825 tmp = load_reg(s, rm);
6826 tmp2 = load_reg(s, rs);
9ee6e8bb 6827 if (sh & 4)
5e3f878a 6828 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 6829 else
5e3f878a 6830 gen_sxth(tmp2);
a7812ae4
PB
6831 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6832 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 6833 tmp = tcg_temp_new_i32();
a7812ae4 6834 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 6835 tcg_temp_free_i64(tmp64);
9ee6e8bb 6836 if ((sh & 2) == 0) {
5e3f878a
PB
6837 tmp2 = load_reg(s, rn);
6838 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 6839 tcg_temp_free_i32(tmp2);
9ee6e8bb 6840 }
5e3f878a 6841 store_reg(s, rd, tmp);
9ee6e8bb
PB
6842 } else {
6843 /* 16 * 16 */
5e3f878a
PB
6844 tmp = load_reg(s, rm);
6845 tmp2 = load_reg(s, rs);
6846 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 6847 tcg_temp_free_i32(tmp2);
9ee6e8bb 6848 if (op1 == 2) {
a7812ae4
PB
6849 tmp64 = tcg_temp_new_i64();
6850 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 6851 tcg_temp_free_i32(tmp);
a7812ae4
PB
6852 gen_addq(s, tmp64, rn, rd);
6853 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 6854 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
6855 } else {
6856 if (op1 == 0) {
5e3f878a
PB
6857 tmp2 = load_reg(s, rn);
6858 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 6859 tcg_temp_free_i32(tmp2);
9ee6e8bb 6860 }
5e3f878a 6861 store_reg(s, rd, tmp);
9ee6e8bb
PB
6862 }
6863 }
6864 break;
6865 default:
6866 goto illegal_op;
6867 }
6868 } else if (((insn & 0x0e000000) == 0 &&
6869 (insn & 0x00000090) != 0x90) ||
6870 ((insn & 0x0e000000) == (1 << 25))) {
6871 int set_cc, logic_cc, shiftop;
6872
6873 op1 = (insn >> 21) & 0xf;
6874 set_cc = (insn >> 20) & 1;
6875 logic_cc = table_logic_cc[op1] & set_cc;
6876
6877 /* data processing instruction */
6878 if (insn & (1 << 25)) {
6879 /* immediate operand */
6880 val = insn & 0xff;
6881 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 6882 if (shift) {
9ee6e8bb 6883 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 6884 }
7d1b0095 6885 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
6886 tcg_gen_movi_i32(tmp2, val);
6887 if (logic_cc && shift) {
6888 gen_set_CF_bit31(tmp2);
6889 }
9ee6e8bb
PB
6890 } else {
6891 /* register */
6892 rm = (insn) & 0xf;
e9bb4aa9 6893 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6894 shiftop = (insn >> 5) & 3;
6895 if (!(insn & (1 << 4))) {
6896 shift = (insn >> 7) & 0x1f;
e9bb4aa9 6897 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
6898 } else {
6899 rs = (insn >> 8) & 0xf;
8984bd2e 6900 tmp = load_reg(s, rs);
e9bb4aa9 6901 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
6902 }
6903 }
6904 if (op1 != 0x0f && op1 != 0x0d) {
6905 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
6906 tmp = load_reg(s, rn);
6907 } else {
6908 TCGV_UNUSED(tmp);
9ee6e8bb
PB
6909 }
6910 rd = (insn >> 12) & 0xf;
6911 switch(op1) {
6912 case 0x00:
e9bb4aa9
JR
6913 tcg_gen_and_i32(tmp, tmp, tmp2);
6914 if (logic_cc) {
6915 gen_logic_CC(tmp);
6916 }
21aeb343 6917 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6918 break;
6919 case 0x01:
e9bb4aa9
JR
6920 tcg_gen_xor_i32(tmp, tmp, tmp2);
6921 if (logic_cc) {
6922 gen_logic_CC(tmp);
6923 }
21aeb343 6924 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6925 break;
6926 case 0x02:
6927 if (set_cc && rd == 15) {
6928 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 6929 if (IS_USER(s)) {
9ee6e8bb 6930 goto illegal_op;
e9bb4aa9
JR
6931 }
6932 gen_helper_sub_cc(tmp, tmp, tmp2);
6933 gen_exception_return(s, tmp);
9ee6e8bb 6934 } else {
e9bb4aa9
JR
6935 if (set_cc) {
6936 gen_helper_sub_cc(tmp, tmp, tmp2);
6937 } else {
6938 tcg_gen_sub_i32(tmp, tmp, tmp2);
6939 }
21aeb343 6940 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6941 }
6942 break;
6943 case 0x03:
e9bb4aa9
JR
6944 if (set_cc) {
6945 gen_helper_sub_cc(tmp, tmp2, tmp);
6946 } else {
6947 tcg_gen_sub_i32(tmp, tmp2, tmp);
6948 }
21aeb343 6949 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6950 break;
6951 case 0x04:
e9bb4aa9
JR
6952 if (set_cc) {
6953 gen_helper_add_cc(tmp, tmp, tmp2);
6954 } else {
6955 tcg_gen_add_i32(tmp, tmp, tmp2);
6956 }
21aeb343 6957 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6958 break;
6959 case 0x05:
e9bb4aa9
JR
6960 if (set_cc) {
6961 gen_helper_adc_cc(tmp, tmp, tmp2);
6962 } else {
6963 gen_add_carry(tmp, tmp, tmp2);
6964 }
21aeb343 6965 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6966 break;
6967 case 0x06:
e9bb4aa9
JR
6968 if (set_cc) {
6969 gen_helper_sbc_cc(tmp, tmp, tmp2);
6970 } else {
6971 gen_sub_carry(tmp, tmp, tmp2);
6972 }
21aeb343 6973 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6974 break;
6975 case 0x07:
e9bb4aa9
JR
6976 if (set_cc) {
6977 gen_helper_sbc_cc(tmp, tmp2, tmp);
6978 } else {
6979 gen_sub_carry(tmp, tmp2, tmp);
6980 }
21aeb343 6981 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6982 break;
6983 case 0x08:
6984 if (set_cc) {
e9bb4aa9
JR
6985 tcg_gen_and_i32(tmp, tmp, tmp2);
6986 gen_logic_CC(tmp);
9ee6e8bb 6987 }
7d1b0095 6988 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6989 break;
6990 case 0x09:
6991 if (set_cc) {
e9bb4aa9
JR
6992 tcg_gen_xor_i32(tmp, tmp, tmp2);
6993 gen_logic_CC(tmp);
9ee6e8bb 6994 }
7d1b0095 6995 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6996 break;
6997 case 0x0a:
6998 if (set_cc) {
e9bb4aa9 6999 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb 7000 }
7d1b0095 7001 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7002 break;
7003 case 0x0b:
7004 if (set_cc) {
e9bb4aa9 7005 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 7006 }
7d1b0095 7007 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7008 break;
7009 case 0x0c:
e9bb4aa9
JR
7010 tcg_gen_or_i32(tmp, tmp, tmp2);
7011 if (logic_cc) {
7012 gen_logic_CC(tmp);
7013 }
21aeb343 7014 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7015 break;
7016 case 0x0d:
7017 if (logic_cc && rd == 15) {
7018 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 7019 if (IS_USER(s)) {
9ee6e8bb 7020 goto illegal_op;
e9bb4aa9
JR
7021 }
7022 gen_exception_return(s, tmp2);
9ee6e8bb 7023 } else {
e9bb4aa9
JR
7024 if (logic_cc) {
7025 gen_logic_CC(tmp2);
7026 }
21aeb343 7027 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7028 }
7029 break;
7030 case 0x0e:
f669df27 7031 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
7032 if (logic_cc) {
7033 gen_logic_CC(tmp);
7034 }
21aeb343 7035 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7036 break;
7037 default:
7038 case 0x0f:
e9bb4aa9
JR
7039 tcg_gen_not_i32(tmp2, tmp2);
7040 if (logic_cc) {
7041 gen_logic_CC(tmp2);
7042 }
21aeb343 7043 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7044 break;
7045 }
e9bb4aa9 7046 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 7047 tcg_temp_free_i32(tmp2);
e9bb4aa9 7048 }
9ee6e8bb
PB
7049 } else {
7050 /* other instructions */
7051 op1 = (insn >> 24) & 0xf;
7052 switch(op1) {
7053 case 0x0:
7054 case 0x1:
7055 /* multiplies, extra load/stores */
7056 sh = (insn >> 5) & 3;
7057 if (sh == 0) {
7058 if (op1 == 0x0) {
7059 rd = (insn >> 16) & 0xf;
7060 rn = (insn >> 12) & 0xf;
7061 rs = (insn >> 8) & 0xf;
7062 rm = (insn) & 0xf;
7063 op1 = (insn >> 20) & 0xf;
7064 switch (op1) {
7065 case 0: case 1: case 2: case 3: case 6:
7066 /* 32 bit mul */
5e3f878a
PB
7067 tmp = load_reg(s, rs);
7068 tmp2 = load_reg(s, rm);
7069 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 7070 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7071 if (insn & (1 << 22)) {
7072 /* Subtract (mls) */
7073 ARCH(6T2);
5e3f878a
PB
7074 tmp2 = load_reg(s, rn);
7075 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 7076 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7077 } else if (insn & (1 << 21)) {
7078 /* Add */
5e3f878a
PB
7079 tmp2 = load_reg(s, rn);
7080 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7081 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7082 }
7083 if (insn & (1 << 20))
5e3f878a
PB
7084 gen_logic_CC(tmp);
7085 store_reg(s, rd, tmp);
9ee6e8bb 7086 break;
8aac08b1
AJ
7087 case 4:
7088 /* 64 bit mul double accumulate (UMAAL) */
7089 ARCH(6);
7090 tmp = load_reg(s, rs);
7091 tmp2 = load_reg(s, rm);
7092 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7093 gen_addq_lo(s, tmp64, rn);
7094 gen_addq_lo(s, tmp64, rd);
7095 gen_storeq_reg(s, rn, rd, tmp64);
7096 tcg_temp_free_i64(tmp64);
7097 break;
7098 case 8: case 9: case 10: case 11:
7099 case 12: case 13: case 14: case 15:
7100 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
7101 tmp = load_reg(s, rs);
7102 tmp2 = load_reg(s, rm);
8aac08b1 7103 if (insn & (1 << 22)) {
a7812ae4 7104 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8aac08b1 7105 } else {
a7812ae4 7106 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8aac08b1
AJ
7107 }
7108 if (insn & (1 << 21)) { /* mult accumulate */
a7812ae4 7109 gen_addq(s, tmp64, rn, rd);
9ee6e8bb 7110 }
8aac08b1 7111 if (insn & (1 << 20)) {
a7812ae4 7112 gen_logicq_cc(tmp64);
8aac08b1 7113 }
a7812ae4 7114 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 7115 tcg_temp_free_i64(tmp64);
9ee6e8bb 7116 break;
8aac08b1
AJ
7117 default:
7118 goto illegal_op;
9ee6e8bb
PB
7119 }
7120 } else {
7121 rn = (insn >> 16) & 0xf;
7122 rd = (insn >> 12) & 0xf;
7123 if (insn & (1 << 23)) {
7124 /* load/store exclusive */
86753403
PB
7125 op1 = (insn >> 21) & 0x3;
7126 if (op1)
a47f43d2 7127 ARCH(6K);
86753403
PB
7128 else
7129 ARCH(6);
3174f8e9 7130 addr = tcg_temp_local_new_i32();
98a46317 7131 load_reg_var(s, addr, rn);
9ee6e8bb 7132 if (insn & (1 << 20)) {
86753403
PB
7133 switch (op1) {
7134 case 0: /* ldrex */
426f5abc 7135 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
7136 break;
7137 case 1: /* ldrexd */
426f5abc 7138 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
7139 break;
7140 case 2: /* ldrexb */
426f5abc 7141 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
7142 break;
7143 case 3: /* ldrexh */
426f5abc 7144 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
7145 break;
7146 default:
7147 abort();
7148 }
9ee6e8bb
PB
7149 } else {
7150 rm = insn & 0xf;
86753403
PB
7151 switch (op1) {
7152 case 0: /* strex */
426f5abc 7153 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
7154 break;
7155 case 1: /* strexd */
502e64fe 7156 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
7157 break;
7158 case 2: /* strexb */
426f5abc 7159 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
7160 break;
7161 case 3: /* strexh */
426f5abc 7162 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
7163 break;
7164 default:
7165 abort();
7166 }
9ee6e8bb 7167 }
3174f8e9 7168 tcg_temp_free(addr);
9ee6e8bb
PB
7169 } else {
7170 /* SWP instruction */
7171 rm = (insn) & 0xf;
7172
8984bd2e
PB
7173 /* ??? This is not really atomic. However we know
7174 we never have multiple CPUs running in parallel,
7175 so it is good enough. */
7176 addr = load_reg(s, rn);
7177 tmp = load_reg(s, rm);
9ee6e8bb 7178 if (insn & (1 << 22)) {
8984bd2e
PB
7179 tmp2 = gen_ld8u(addr, IS_USER(s));
7180 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 7181 } else {
8984bd2e
PB
7182 tmp2 = gen_ld32(addr, IS_USER(s));
7183 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7184 }
7d1b0095 7185 tcg_temp_free_i32(addr);
8984bd2e 7186 store_reg(s, rd, tmp2);
9ee6e8bb
PB
7187 }
7188 }
7189 } else {
7190 int address_offset;
7191 int load;
7192 /* Misc load/store */
7193 rn = (insn >> 16) & 0xf;
7194 rd = (insn >> 12) & 0xf;
b0109805 7195 addr = load_reg(s, rn);
9ee6e8bb 7196 if (insn & (1 << 24))
b0109805 7197 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
7198 address_offset = 0;
7199 if (insn & (1 << 20)) {
7200 /* load */
7201 switch(sh) {
7202 case 1:
b0109805 7203 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
7204 break;
7205 case 2:
b0109805 7206 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
7207 break;
7208 default:
7209 case 3:
b0109805 7210 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
7211 break;
7212 }
7213 load = 1;
7214 } else if (sh & 2) {
be5e7a76 7215 ARCH(5TE);
9ee6e8bb
PB
7216 /* doubleword */
7217 if (sh & 1) {
7218 /* store */
b0109805
PB
7219 tmp = load_reg(s, rd);
7220 gen_st32(tmp, addr, IS_USER(s));
7221 tcg_gen_addi_i32(addr, addr, 4);
7222 tmp = load_reg(s, rd + 1);
7223 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7224 load = 0;
7225 } else {
7226 /* load */
b0109805
PB
7227 tmp = gen_ld32(addr, IS_USER(s));
7228 store_reg(s, rd, tmp);
7229 tcg_gen_addi_i32(addr, addr, 4);
7230 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
7231 rd++;
7232 load = 1;
7233 }
7234 address_offset = -4;
7235 } else {
7236 /* store */
b0109805
PB
7237 tmp = load_reg(s, rd);
7238 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7239 load = 0;
7240 }
7241 /* Perform base writeback before the loaded value to
7242 ensure correct behavior with overlapping index registers.
7243 ldrd with base writeback is is undefined if the
7244 destination and index registers overlap. */
7245 if (!(insn & (1 << 24))) {
b0109805
PB
7246 gen_add_datah_offset(s, insn, address_offset, addr);
7247 store_reg(s, rn, addr);
9ee6e8bb
PB
7248 } else if (insn & (1 << 21)) {
7249 if (address_offset)
b0109805
PB
7250 tcg_gen_addi_i32(addr, addr, address_offset);
7251 store_reg(s, rn, addr);
7252 } else {
7d1b0095 7253 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7254 }
7255 if (load) {
7256 /* Complete the load. */
b0109805 7257 store_reg(s, rd, tmp);
9ee6e8bb
PB
7258 }
7259 }
7260 break;
7261 case 0x4:
7262 case 0x5:
7263 goto do_ldst;
7264 case 0x6:
7265 case 0x7:
7266 if (insn & (1 << 4)) {
7267 ARCH(6);
7268 /* Armv6 Media instructions. */
7269 rm = insn & 0xf;
7270 rn = (insn >> 16) & 0xf;
2c0262af 7271 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
7272 rs = (insn >> 8) & 0xf;
7273 switch ((insn >> 23) & 3) {
7274 case 0: /* Parallel add/subtract. */
7275 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
7276 tmp = load_reg(s, rn);
7277 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7278 sh = (insn >> 5) & 7;
7279 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7280 goto illegal_op;
6ddbc6e4 7281 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 7282 tcg_temp_free_i32(tmp2);
6ddbc6e4 7283 store_reg(s, rd, tmp);
9ee6e8bb
PB
7284 break;
7285 case 1:
7286 if ((insn & 0x00700020) == 0) {
6c95676b 7287 /* Halfword pack. */
3670669c
PB
7288 tmp = load_reg(s, rn);
7289 tmp2 = load_reg(s, rm);
9ee6e8bb 7290 shift = (insn >> 7) & 0x1f;
3670669c
PB
7291 if (insn & (1 << 6)) {
7292 /* pkhtb */
22478e79
AZ
7293 if (shift == 0)
7294 shift = 31;
7295 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 7296 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 7297 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
7298 } else {
7299 /* pkhbt */
22478e79
AZ
7300 if (shift)
7301 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 7302 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
7303 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7304 }
7305 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 7306 tcg_temp_free_i32(tmp2);
3670669c 7307 store_reg(s, rd, tmp);
9ee6e8bb
PB
7308 } else if ((insn & 0x00200020) == 0x00200000) {
7309 /* [us]sat */
6ddbc6e4 7310 tmp = load_reg(s, rm);
9ee6e8bb
PB
7311 shift = (insn >> 7) & 0x1f;
7312 if (insn & (1 << 6)) {
7313 if (shift == 0)
7314 shift = 31;
6ddbc6e4 7315 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7316 } else {
6ddbc6e4 7317 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
7318 }
7319 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7320 tmp2 = tcg_const_i32(sh);
7321 if (insn & (1 << 22))
7322 gen_helper_usat(tmp, tmp, tmp2);
7323 else
7324 gen_helper_ssat(tmp, tmp, tmp2);
7325 tcg_temp_free_i32(tmp2);
6ddbc6e4 7326 store_reg(s, rd, tmp);
9ee6e8bb
PB
7327 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7328 /* [us]sat16 */
6ddbc6e4 7329 tmp = load_reg(s, rm);
9ee6e8bb 7330 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7331 tmp2 = tcg_const_i32(sh);
7332 if (insn & (1 << 22))
7333 gen_helper_usat16(tmp, tmp, tmp2);
7334 else
7335 gen_helper_ssat16(tmp, tmp, tmp2);
7336 tcg_temp_free_i32(tmp2);
6ddbc6e4 7337 store_reg(s, rd, tmp);
9ee6e8bb
PB
7338 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7339 /* Select bytes. */
6ddbc6e4
PB
7340 tmp = load_reg(s, rn);
7341 tmp2 = load_reg(s, rm);
7d1b0095 7342 tmp3 = tcg_temp_new_i32();
6ddbc6e4
PB
7343 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7344 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
7345 tcg_temp_free_i32(tmp3);
7346 tcg_temp_free_i32(tmp2);
6ddbc6e4 7347 store_reg(s, rd, tmp);
9ee6e8bb 7348 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 7349 tmp = load_reg(s, rm);
9ee6e8bb 7350 shift = (insn >> 10) & 3;
1301f322 7351 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
7352 rotate, a shift is sufficient. */
7353 if (shift != 0)
f669df27 7354 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7355 op1 = (insn >> 20) & 7;
7356 switch (op1) {
5e3f878a
PB
7357 case 0: gen_sxtb16(tmp); break;
7358 case 2: gen_sxtb(tmp); break;
7359 case 3: gen_sxth(tmp); break;
7360 case 4: gen_uxtb16(tmp); break;
7361 case 6: gen_uxtb(tmp); break;
7362 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
7363 default: goto illegal_op;
7364 }
7365 if (rn != 15) {
5e3f878a 7366 tmp2 = load_reg(s, rn);
9ee6e8bb 7367 if ((op1 & 3) == 0) {
5e3f878a 7368 gen_add16(tmp, tmp2);
9ee6e8bb 7369 } else {
5e3f878a 7370 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7371 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7372 }
7373 }
6c95676b 7374 store_reg(s, rd, tmp);
9ee6e8bb
PB
7375 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7376 /* rev */
b0109805 7377 tmp = load_reg(s, rm);
9ee6e8bb
PB
7378 if (insn & (1 << 22)) {
7379 if (insn & (1 << 7)) {
b0109805 7380 gen_revsh(tmp);
9ee6e8bb
PB
7381 } else {
7382 ARCH(6T2);
b0109805 7383 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7384 }
7385 } else {
7386 if (insn & (1 << 7))
b0109805 7387 gen_rev16(tmp);
9ee6e8bb 7388 else
66896cb8 7389 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 7390 }
b0109805 7391 store_reg(s, rd, tmp);
9ee6e8bb
PB
7392 } else {
7393 goto illegal_op;
7394 }
7395 break;
7396 case 2: /* Multiplies (Type 3). */
5e3f878a
PB
7397 tmp = load_reg(s, rm);
7398 tmp2 = load_reg(s, rs);
9ee6e8bb 7399 if (insn & (1 << 20)) {
838fa72d
AJ
7400 /* Signed multiply most significant [accumulate].
7401 (SMMUL, SMMLA, SMMLS) */
a7812ae4 7402 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 7403
955a7dd5 7404 if (rd != 15) {
838fa72d 7405 tmp = load_reg(s, rd);
9ee6e8bb 7406 if (insn & (1 << 6)) {
838fa72d 7407 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 7408 } else {
838fa72d 7409 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
7410 }
7411 }
838fa72d
AJ
7412 if (insn & (1 << 5)) {
7413 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7414 }
7415 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 7416 tmp = tcg_temp_new_i32();
838fa72d
AJ
7417 tcg_gen_trunc_i64_i32(tmp, tmp64);
7418 tcg_temp_free_i64(tmp64);
955a7dd5 7419 store_reg(s, rn, tmp);
9ee6e8bb
PB
7420 } else {
7421 if (insn & (1 << 5))
5e3f878a
PB
7422 gen_swap_half(tmp2);
7423 gen_smul_dual(tmp, tmp2);
5e3f878a 7424 if (insn & (1 << 6)) {
e1d177b9 7425 /* This subtraction cannot overflow. */
5e3f878a
PB
7426 tcg_gen_sub_i32(tmp, tmp, tmp2);
7427 } else {
e1d177b9
PM
7428 /* This addition cannot overflow 32 bits;
7429 * however it may overflow considered as a signed
7430 * operation, in which case we must set the Q flag.
7431 */
7432 gen_helper_add_setq(tmp, tmp, tmp2);
5e3f878a 7433 }
7d1b0095 7434 tcg_temp_free_i32(tmp2);
9ee6e8bb 7435 if (insn & (1 << 22)) {
5e3f878a 7436 /* smlald, smlsld */
a7812ae4
PB
7437 tmp64 = tcg_temp_new_i64();
7438 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7439 tcg_temp_free_i32(tmp);
a7812ae4
PB
7440 gen_addq(s, tmp64, rd, rn);
7441 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 7442 tcg_temp_free_i64(tmp64);
9ee6e8bb 7443 } else {
5e3f878a 7444 /* smuad, smusd, smlad, smlsd */
22478e79 7445 if (rd != 15)
9ee6e8bb 7446 {
22478e79 7447 tmp2 = load_reg(s, rd);
5e3f878a 7448 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 7449 tcg_temp_free_i32(tmp2);
9ee6e8bb 7450 }
22478e79 7451 store_reg(s, rn, tmp);
9ee6e8bb
PB
7452 }
7453 }
7454 break;
7455 case 3:
7456 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7457 switch (op1) {
7458 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
7459 ARCH(6);
7460 tmp = load_reg(s, rm);
7461 tmp2 = load_reg(s, rs);
7462 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 7463 tcg_temp_free_i32(tmp2);
ded9d295
AZ
7464 if (rd != 15) {
7465 tmp2 = load_reg(s, rd);
6ddbc6e4 7466 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7467 tcg_temp_free_i32(tmp2);
9ee6e8bb 7468 }
ded9d295 7469 store_reg(s, rn, tmp);
9ee6e8bb
PB
7470 break;
7471 case 0x20: case 0x24: case 0x28: case 0x2c:
7472 /* Bitfield insert/clear. */
7473 ARCH(6T2);
7474 shift = (insn >> 7) & 0x1f;
7475 i = (insn >> 16) & 0x1f;
7476 i = i + 1 - shift;
7477 if (rm == 15) {
7d1b0095 7478 tmp = tcg_temp_new_i32();
5e3f878a 7479 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7480 } else {
5e3f878a 7481 tmp = load_reg(s, rm);
9ee6e8bb
PB
7482 }
7483 if (i != 32) {
5e3f878a 7484 tmp2 = load_reg(s, rd);
8f8e3aa4 7485 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7d1b0095 7486 tcg_temp_free_i32(tmp2);
9ee6e8bb 7487 }
5e3f878a 7488 store_reg(s, rd, tmp);
9ee6e8bb
PB
7489 break;
7490 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7491 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 7492 ARCH(6T2);
5e3f878a 7493 tmp = load_reg(s, rm);
9ee6e8bb
PB
7494 shift = (insn >> 7) & 0x1f;
7495 i = ((insn >> 16) & 0x1f) + 1;
7496 if (shift + i > 32)
7497 goto illegal_op;
7498 if (i < 32) {
7499 if (op1 & 0x20) {
5e3f878a 7500 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 7501 } else {
5e3f878a 7502 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
7503 }
7504 }
5e3f878a 7505 store_reg(s, rd, tmp);
9ee6e8bb
PB
7506 break;
7507 default:
7508 goto illegal_op;
7509 }
7510 break;
7511 }
7512 break;
7513 }
7514 do_ldst:
7515 /* Check for undefined extension instructions
7516 * per the ARM Bible IE:
7517 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7518 */
7519 sh = (0xf << 20) | (0xf << 4);
7520 if (op1 == 0x7 && ((insn & sh) == sh))
7521 {
7522 goto illegal_op;
7523 }
7524 /* load/store byte/word */
7525 rn = (insn >> 16) & 0xf;
7526 rd = (insn >> 12) & 0xf;
b0109805 7527 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
7528 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7529 if (insn & (1 << 24))
b0109805 7530 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
7531 if (insn & (1 << 20)) {
7532 /* load */
9ee6e8bb 7533 if (insn & (1 << 22)) {
b0109805 7534 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 7535 } else {
b0109805 7536 tmp = gen_ld32(tmp2, i);
9ee6e8bb 7537 }
9ee6e8bb
PB
7538 } else {
7539 /* store */
b0109805 7540 tmp = load_reg(s, rd);
9ee6e8bb 7541 if (insn & (1 << 22))
b0109805 7542 gen_st8(tmp, tmp2, i);
9ee6e8bb 7543 else
b0109805 7544 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
7545 }
7546 if (!(insn & (1 << 24))) {
b0109805
PB
7547 gen_add_data_offset(s, insn, tmp2);
7548 store_reg(s, rn, tmp2);
7549 } else if (insn & (1 << 21)) {
7550 store_reg(s, rn, tmp2);
7551 } else {
7d1b0095 7552 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7553 }
7554 if (insn & (1 << 20)) {
7555 /* Complete the load. */
be5e7a76 7556 store_reg_from_load(env, s, rd, tmp);
9ee6e8bb
PB
7557 }
7558 break;
7559 case 0x08:
7560 case 0x09:
7561 {
7562 int j, n, user, loaded_base;
b0109805 7563 TCGv loaded_var;
9ee6e8bb
PB
7564 /* load/store multiple words */
7565 /* XXX: store correct base if write back */
7566 user = 0;
7567 if (insn & (1 << 22)) {
7568 if (IS_USER(s))
7569 goto illegal_op; /* only usable in supervisor mode */
7570
7571 if ((insn & (1 << 15)) == 0)
7572 user = 1;
7573 }
7574 rn = (insn >> 16) & 0xf;
b0109805 7575 addr = load_reg(s, rn);
9ee6e8bb
PB
7576
7577 /* compute total size */
7578 loaded_base = 0;
a50f5b91 7579 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
7580 n = 0;
7581 for(i=0;i<16;i++) {
7582 if (insn & (1 << i))
7583 n++;
7584 }
7585 /* XXX: test invalid n == 0 case ? */
7586 if (insn & (1 << 23)) {
7587 if (insn & (1 << 24)) {
7588 /* pre increment */
b0109805 7589 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7590 } else {
7591 /* post increment */
7592 }
7593 } else {
7594 if (insn & (1 << 24)) {
7595 /* pre decrement */
b0109805 7596 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7597 } else {
7598 /* post decrement */
7599 if (n != 1)
b0109805 7600 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7601 }
7602 }
7603 j = 0;
7604 for(i=0;i<16;i++) {
7605 if (insn & (1 << i)) {
7606 if (insn & (1 << 20)) {
7607 /* load */
b0109805 7608 tmp = gen_ld32(addr, IS_USER(s));
be5e7a76 7609 if (user) {
b75263d6
JR
7610 tmp2 = tcg_const_i32(i);
7611 gen_helper_set_user_reg(tmp2, tmp);
7612 tcg_temp_free_i32(tmp2);
7d1b0095 7613 tcg_temp_free_i32(tmp);
9ee6e8bb 7614 } else if (i == rn) {
b0109805 7615 loaded_var = tmp;
9ee6e8bb
PB
7616 loaded_base = 1;
7617 } else {
be5e7a76 7618 store_reg_from_load(env, s, i, tmp);
9ee6e8bb
PB
7619 }
7620 } else {
7621 /* store */
7622 if (i == 15) {
7623 /* special case: r15 = PC + 8 */
7624 val = (long)s->pc + 4;
7d1b0095 7625 tmp = tcg_temp_new_i32();
b0109805 7626 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 7627 } else if (user) {
7d1b0095 7628 tmp = tcg_temp_new_i32();
b75263d6
JR
7629 tmp2 = tcg_const_i32(i);
7630 gen_helper_get_user_reg(tmp, tmp2);
7631 tcg_temp_free_i32(tmp2);
9ee6e8bb 7632 } else {
b0109805 7633 tmp = load_reg(s, i);
9ee6e8bb 7634 }
b0109805 7635 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7636 }
7637 j++;
7638 /* no need to add after the last transfer */
7639 if (j != n)
b0109805 7640 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7641 }
7642 }
7643 if (insn & (1 << 21)) {
7644 /* write back */
7645 if (insn & (1 << 23)) {
7646 if (insn & (1 << 24)) {
7647 /* pre increment */
7648 } else {
7649 /* post increment */
b0109805 7650 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7651 }
7652 } else {
7653 if (insn & (1 << 24)) {
7654 /* pre decrement */
7655 if (n != 1)
b0109805 7656 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7657 } else {
7658 /* post decrement */
b0109805 7659 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7660 }
7661 }
b0109805
PB
7662 store_reg(s, rn, addr);
7663 } else {
7d1b0095 7664 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7665 }
7666 if (loaded_base) {
b0109805 7667 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
7668 }
7669 if ((insn & (1 << 22)) && !user) {
7670 /* Restore CPSR from SPSR. */
d9ba4830
PB
7671 tmp = load_cpu_field(spsr);
7672 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 7673 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7674 s->is_jmp = DISAS_UPDATE;
7675 }
7676 }
7677 break;
7678 case 0xa:
7679 case 0xb:
7680 {
7681 int32_t offset;
7682
7683 /* branch (and link) */
7684 val = (int32_t)s->pc;
7685 if (insn & (1 << 24)) {
7d1b0095 7686 tmp = tcg_temp_new_i32();
5e3f878a
PB
7687 tcg_gen_movi_i32(tmp, val);
7688 store_reg(s, 14, tmp);
9ee6e8bb
PB
7689 }
7690 offset = (((int32_t)insn << 8) >> 8);
7691 val += (offset << 2) + 4;
7692 gen_jmp(s, val);
7693 }
7694 break;
7695 case 0xc:
7696 case 0xd:
7697 case 0xe:
7698 /* Coprocessor. */
7699 if (disas_coproc_insn(env, s, insn))
7700 goto illegal_op;
7701 break;
7702 case 0xf:
7703 /* swi */
5e3f878a 7704 gen_set_pc_im(s->pc);
9ee6e8bb
PB
7705 s->is_jmp = DISAS_SWI;
7706 break;
7707 default:
7708 illegal_op:
bc4a0de0 7709 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
7710 break;
7711 }
7712 }
7713}
7714
7715/* Return true if this is a Thumb-2 logical op. */
7716static int
7717thumb2_logic_op(int op)
7718{
7719 return (op < 8);
7720}
7721
7722/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7723 then set condition code flags based on the result of the operation.
7724 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7725 to the high bit of T1.
7726 Returns zero if the opcode is valid. */
7727
7728static int
396e467c 7729gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
9ee6e8bb
PB
7730{
7731 int logic_cc;
7732
7733 logic_cc = 0;
7734 switch (op) {
7735 case 0: /* and */
396e467c 7736 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
7737 logic_cc = conds;
7738 break;
7739 case 1: /* bic */
f669df27 7740 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
7741 logic_cc = conds;
7742 break;
7743 case 2: /* orr */
396e467c 7744 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7745 logic_cc = conds;
7746 break;
7747 case 3: /* orn */
29501f1b 7748 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
7749 logic_cc = conds;
7750 break;
7751 case 4: /* eor */
396e467c 7752 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
7753 logic_cc = conds;
7754 break;
7755 case 8: /* add */
7756 if (conds)
396e467c 7757 gen_helper_add_cc(t0, t0, t1);
9ee6e8bb 7758 else
396e467c 7759 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
7760 break;
7761 case 10: /* adc */
7762 if (conds)
396e467c 7763 gen_helper_adc_cc(t0, t0, t1);
9ee6e8bb 7764 else
396e467c 7765 gen_adc(t0, t1);
9ee6e8bb
PB
7766 break;
7767 case 11: /* sbc */
7768 if (conds)
396e467c 7769 gen_helper_sbc_cc(t0, t0, t1);
9ee6e8bb 7770 else
396e467c 7771 gen_sub_carry(t0, t0, t1);
9ee6e8bb
PB
7772 break;
7773 case 13: /* sub */
7774 if (conds)
396e467c 7775 gen_helper_sub_cc(t0, t0, t1);
9ee6e8bb 7776 else
396e467c 7777 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
7778 break;
7779 case 14: /* rsb */
7780 if (conds)
396e467c 7781 gen_helper_sub_cc(t0, t1, t0);
9ee6e8bb 7782 else
396e467c 7783 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
7784 break;
7785 default: /* 5, 6, 7, 9, 12, 15. */
7786 return 1;
7787 }
7788 if (logic_cc) {
396e467c 7789 gen_logic_CC(t0);
9ee6e8bb 7790 if (shifter_out)
396e467c 7791 gen_set_CF_bit31(t1);
9ee6e8bb
PB
7792 }
7793 return 0;
7794}
7795
7796/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7797 is not legal. */
7798static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7799{
b0109805 7800 uint32_t insn, imm, shift, offset;
9ee6e8bb 7801 uint32_t rd, rn, rm, rs;
b26eefb6 7802 TCGv tmp;
6ddbc6e4
PB
7803 TCGv tmp2;
7804 TCGv tmp3;
b0109805 7805 TCGv addr;
a7812ae4 7806 TCGv_i64 tmp64;
9ee6e8bb
PB
7807 int op;
7808 int shiftop;
7809 int conds;
7810 int logic_cc;
7811
7812 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7813 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 7814 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
7815 16-bit instructions to get correct prefetch abort behavior. */
7816 insn = insn_hw1;
7817 if ((insn & (1 << 12)) == 0) {
be5e7a76 7818 ARCH(5);
9ee6e8bb
PB
7819 /* Second half of blx. */
7820 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
7821 tmp = load_reg(s, 14);
7822 tcg_gen_addi_i32(tmp, tmp, offset);
7823 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 7824
7d1b0095 7825 tmp2 = tcg_temp_new_i32();
b0109805 7826 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7827 store_reg(s, 14, tmp2);
7828 gen_bx(s, tmp);
9ee6e8bb
PB
7829 return 0;
7830 }
7831 if (insn & (1 << 11)) {
7832 /* Second half of bl. */
7833 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 7834 tmp = load_reg(s, 14);
6a0d8a1d 7835 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 7836
7d1b0095 7837 tmp2 = tcg_temp_new_i32();
b0109805 7838 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7839 store_reg(s, 14, tmp2);
7840 gen_bx(s, tmp);
9ee6e8bb
PB
7841 return 0;
7842 }
7843 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7844 /* Instruction spans a page boundary. Implement it as two
7845 16-bit instructions in case the second half causes an
7846 prefetch abort. */
7847 offset = ((int32_t)insn << 21) >> 9;
396e467c 7848 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
7849 return 0;
7850 }
7851 /* Fall through to 32-bit decode. */
7852 }
7853
7854 insn = lduw_code(s->pc);
7855 s->pc += 2;
7856 insn |= (uint32_t)insn_hw1 << 16;
7857
7858 if ((insn & 0xf800e800) != 0xf000e800) {
7859 ARCH(6T2);
7860 }
7861
7862 rn = (insn >> 16) & 0xf;
7863 rs = (insn >> 12) & 0xf;
7864 rd = (insn >> 8) & 0xf;
7865 rm = insn & 0xf;
7866 switch ((insn >> 25) & 0xf) {
7867 case 0: case 1: case 2: case 3:
7868 /* 16-bit instructions. Should never happen. */
7869 abort();
7870 case 4:
7871 if (insn & (1 << 22)) {
7872 /* Other load/store, table branch. */
7873 if (insn & 0x01200000) {
7874 /* Load/store doubleword. */
7875 if (rn == 15) {
7d1b0095 7876 addr = tcg_temp_new_i32();
b0109805 7877 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 7878 } else {
b0109805 7879 addr = load_reg(s, rn);
9ee6e8bb
PB
7880 }
7881 offset = (insn & 0xff) * 4;
7882 if ((insn & (1 << 23)) == 0)
7883 offset = -offset;
7884 if (insn & (1 << 24)) {
b0109805 7885 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
7886 offset = 0;
7887 }
7888 if (insn & (1 << 20)) {
7889 /* ldrd */
b0109805
PB
7890 tmp = gen_ld32(addr, IS_USER(s));
7891 store_reg(s, rs, tmp);
7892 tcg_gen_addi_i32(addr, addr, 4);
7893 tmp = gen_ld32(addr, IS_USER(s));
7894 store_reg(s, rd, tmp);
9ee6e8bb
PB
7895 } else {
7896 /* strd */
b0109805
PB
7897 tmp = load_reg(s, rs);
7898 gen_st32(tmp, addr, IS_USER(s));
7899 tcg_gen_addi_i32(addr, addr, 4);
7900 tmp = load_reg(s, rd);
7901 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7902 }
7903 if (insn & (1 << 21)) {
7904 /* Base writeback. */
7905 if (rn == 15)
7906 goto illegal_op;
b0109805
PB
7907 tcg_gen_addi_i32(addr, addr, offset - 4);
7908 store_reg(s, rn, addr);
7909 } else {
7d1b0095 7910 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7911 }
7912 } else if ((insn & (1 << 23)) == 0) {
7913 /* Load/store exclusive word. */
3174f8e9 7914 addr = tcg_temp_local_new();
98a46317 7915 load_reg_var(s, addr, rn);
426f5abc 7916 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 7917 if (insn & (1 << 20)) {
426f5abc 7918 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 7919 } else {
426f5abc 7920 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 7921 }
3174f8e9 7922 tcg_temp_free(addr);
9ee6e8bb
PB
7923 } else if ((insn & (1 << 6)) == 0) {
7924 /* Table Branch. */
7925 if (rn == 15) {
7d1b0095 7926 addr = tcg_temp_new_i32();
b0109805 7927 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 7928 } else {
b0109805 7929 addr = load_reg(s, rn);
9ee6e8bb 7930 }
b26eefb6 7931 tmp = load_reg(s, rm);
b0109805 7932 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
7933 if (insn & (1 << 4)) {
7934 /* tbh */
b0109805 7935 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 7936 tcg_temp_free_i32(tmp);
b0109805 7937 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 7938 } else { /* tbb */
7d1b0095 7939 tcg_temp_free_i32(tmp);
b0109805 7940 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7941 }
7d1b0095 7942 tcg_temp_free_i32(addr);
b0109805
PB
7943 tcg_gen_shli_i32(tmp, tmp, 1);
7944 tcg_gen_addi_i32(tmp, tmp, s->pc);
7945 store_reg(s, 15, tmp);
9ee6e8bb
PB
7946 } else {
7947 /* Load/store exclusive byte/halfword/doubleword. */
426f5abc 7948 ARCH(7);
9ee6e8bb 7949 op = (insn >> 4) & 0x3;
426f5abc
PB
7950 if (op == 2) {
7951 goto illegal_op;
7952 }
3174f8e9 7953 addr = tcg_temp_local_new();
98a46317 7954 load_reg_var(s, addr, rn);
9ee6e8bb 7955 if (insn & (1 << 20)) {
426f5abc 7956 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 7957 } else {
426f5abc 7958 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 7959 }
3174f8e9 7960 tcg_temp_free(addr);
9ee6e8bb
PB
7961 }
7962 } else {
7963 /* Load/store multiple, RFE, SRS. */
7964 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7965 /* Not available in user mode. */
b0109805 7966 if (IS_USER(s))
9ee6e8bb
PB
7967 goto illegal_op;
7968 if (insn & (1 << 20)) {
7969 /* rfe */
b0109805
PB
7970 addr = load_reg(s, rn);
7971 if ((insn & (1 << 24)) == 0)
7972 tcg_gen_addi_i32(addr, addr, -8);
7973 /* Load PC into tmp and CPSR into tmp2. */
7974 tmp = gen_ld32(addr, 0);
7975 tcg_gen_addi_i32(addr, addr, 4);
7976 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
7977 if (insn & (1 << 21)) {
7978 /* Base writeback. */
b0109805
PB
7979 if (insn & (1 << 24)) {
7980 tcg_gen_addi_i32(addr, addr, 4);
7981 } else {
7982 tcg_gen_addi_i32(addr, addr, -4);
7983 }
7984 store_reg(s, rn, addr);
7985 } else {
7d1b0095 7986 tcg_temp_free_i32(addr);
9ee6e8bb 7987 }
b0109805 7988 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
7989 } else {
7990 /* srs */
7991 op = (insn & 0x1f);
7d1b0095 7992 addr = tcg_temp_new_i32();
39ea3d4e
PM
7993 tmp = tcg_const_i32(op);
7994 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7995 tcg_temp_free_i32(tmp);
9ee6e8bb 7996 if ((insn & (1 << 24)) == 0) {
b0109805 7997 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 7998 }
b0109805
PB
7999 tmp = load_reg(s, 14);
8000 gen_st32(tmp, addr, 0);
8001 tcg_gen_addi_i32(addr, addr, 4);
7d1b0095 8002 tmp = tcg_temp_new_i32();
b0109805
PB
8003 gen_helper_cpsr_read(tmp);
8004 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
8005 if (insn & (1 << 21)) {
8006 if ((insn & (1 << 24)) == 0) {
b0109805 8007 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 8008 } else {
b0109805 8009 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8010 }
39ea3d4e
PM
8011 tmp = tcg_const_i32(op);
8012 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8013 tcg_temp_free_i32(tmp);
b0109805 8014 } else {
7d1b0095 8015 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8016 }
8017 }
8018 } else {
5856d44e
YO
8019 int i, loaded_base = 0;
8020 TCGv loaded_var;
9ee6e8bb 8021 /* Load/store multiple. */
b0109805 8022 addr = load_reg(s, rn);
9ee6e8bb
PB
8023 offset = 0;
8024 for (i = 0; i < 16; i++) {
8025 if (insn & (1 << i))
8026 offset += 4;
8027 }
8028 if (insn & (1 << 24)) {
b0109805 8029 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8030 }
8031
5856d44e 8032 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
8033 for (i = 0; i < 16; i++) {
8034 if ((insn & (1 << i)) == 0)
8035 continue;
8036 if (insn & (1 << 20)) {
8037 /* Load. */
b0109805 8038 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 8039 if (i == 15) {
b0109805 8040 gen_bx(s, tmp);
5856d44e
YO
8041 } else if (i == rn) {
8042 loaded_var = tmp;
8043 loaded_base = 1;
9ee6e8bb 8044 } else {
b0109805 8045 store_reg(s, i, tmp);
9ee6e8bb
PB
8046 }
8047 } else {
8048 /* Store. */
b0109805
PB
8049 tmp = load_reg(s, i);
8050 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 8051 }
b0109805 8052 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8053 }
5856d44e
YO
8054 if (loaded_base) {
8055 store_reg(s, rn, loaded_var);
8056 }
9ee6e8bb
PB
8057 if (insn & (1 << 21)) {
8058 /* Base register writeback. */
8059 if (insn & (1 << 24)) {
b0109805 8060 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8061 }
8062 /* Fault if writeback register is in register list. */
8063 if (insn & (1 << rn))
8064 goto illegal_op;
b0109805
PB
8065 store_reg(s, rn, addr);
8066 } else {
7d1b0095 8067 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8068 }
8069 }
8070 }
8071 break;
2af9ab77
JB
8072 case 5:
8073
9ee6e8bb 8074 op = (insn >> 21) & 0xf;
2af9ab77
JB
8075 if (op == 6) {
8076 /* Halfword pack. */
8077 tmp = load_reg(s, rn);
8078 tmp2 = load_reg(s, rm);
8079 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8080 if (insn & (1 << 5)) {
8081 /* pkhtb */
8082 if (shift == 0)
8083 shift = 31;
8084 tcg_gen_sari_i32(tmp2, tmp2, shift);
8085 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8086 tcg_gen_ext16u_i32(tmp2, tmp2);
8087 } else {
8088 /* pkhbt */
8089 if (shift)
8090 tcg_gen_shli_i32(tmp2, tmp2, shift);
8091 tcg_gen_ext16u_i32(tmp, tmp);
8092 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8093 }
8094 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8095 tcg_temp_free_i32(tmp2);
3174f8e9
FN
8096 store_reg(s, rd, tmp);
8097 } else {
2af9ab77
JB
8098 /* Data processing register constant shift. */
8099 if (rn == 15) {
7d1b0095 8100 tmp = tcg_temp_new_i32();
2af9ab77
JB
8101 tcg_gen_movi_i32(tmp, 0);
8102 } else {
8103 tmp = load_reg(s, rn);
8104 }
8105 tmp2 = load_reg(s, rm);
8106
8107 shiftop = (insn >> 4) & 3;
8108 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8109 conds = (insn & (1 << 20)) != 0;
8110 logic_cc = (conds && thumb2_logic_op(op));
8111 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8112 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
8113 goto illegal_op;
7d1b0095 8114 tcg_temp_free_i32(tmp2);
2af9ab77
JB
8115 if (rd != 15) {
8116 store_reg(s, rd, tmp);
8117 } else {
7d1b0095 8118 tcg_temp_free_i32(tmp);
2af9ab77 8119 }
3174f8e9 8120 }
9ee6e8bb
PB
8121 break;
8122 case 13: /* Misc data processing. */
8123 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
8124 if (op < 4 && (insn & 0xf000) != 0xf000)
8125 goto illegal_op;
8126 switch (op) {
8127 case 0: /* Register controlled shift. */
8984bd2e
PB
8128 tmp = load_reg(s, rn);
8129 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8130 if ((insn & 0x70) != 0)
8131 goto illegal_op;
8132 op = (insn >> 21) & 3;
8984bd2e
PB
8133 logic_cc = (insn & (1 << 20)) != 0;
8134 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
8135 if (logic_cc)
8136 gen_logic_CC(tmp);
21aeb343 8137 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8138 break;
8139 case 1: /* Sign/zero extend. */
5e3f878a 8140 tmp = load_reg(s, rm);
9ee6e8bb 8141 shift = (insn >> 4) & 3;
1301f322 8142 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
8143 rotate, a shift is sufficient. */
8144 if (shift != 0)
f669df27 8145 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8146 op = (insn >> 20) & 7;
8147 switch (op) {
5e3f878a
PB
8148 case 0: gen_sxth(tmp); break;
8149 case 1: gen_uxth(tmp); break;
8150 case 2: gen_sxtb16(tmp); break;
8151 case 3: gen_uxtb16(tmp); break;
8152 case 4: gen_sxtb(tmp); break;
8153 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
8154 default: goto illegal_op;
8155 }
8156 if (rn != 15) {
5e3f878a 8157 tmp2 = load_reg(s, rn);
9ee6e8bb 8158 if ((op >> 1) == 1) {
5e3f878a 8159 gen_add16(tmp, tmp2);
9ee6e8bb 8160 } else {
5e3f878a 8161 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8162 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8163 }
8164 }
5e3f878a 8165 store_reg(s, rd, tmp);
9ee6e8bb
PB
8166 break;
8167 case 2: /* SIMD add/subtract. */
8168 op = (insn >> 20) & 7;
8169 shift = (insn >> 4) & 7;
8170 if ((op & 3) == 3 || (shift & 3) == 3)
8171 goto illegal_op;
6ddbc6e4
PB
8172 tmp = load_reg(s, rn);
8173 tmp2 = load_reg(s, rm);
8174 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 8175 tcg_temp_free_i32(tmp2);
6ddbc6e4 8176 store_reg(s, rd, tmp);
9ee6e8bb
PB
8177 break;
8178 case 3: /* Other data processing. */
8179 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
8180 if (op < 4) {
8181 /* Saturating add/subtract. */
d9ba4830
PB
8182 tmp = load_reg(s, rn);
8183 tmp2 = load_reg(s, rm);
9ee6e8bb 8184 if (op & 1)
4809c612
JB
8185 gen_helper_double_saturate(tmp, tmp);
8186 if (op & 2)
d9ba4830 8187 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 8188 else
d9ba4830 8189 gen_helper_add_saturate(tmp, tmp, tmp2);
7d1b0095 8190 tcg_temp_free_i32(tmp2);
9ee6e8bb 8191 } else {
d9ba4830 8192 tmp = load_reg(s, rn);
9ee6e8bb
PB
8193 switch (op) {
8194 case 0x0a: /* rbit */
d9ba4830 8195 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8196 break;
8197 case 0x08: /* rev */
66896cb8 8198 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
8199 break;
8200 case 0x09: /* rev16 */
d9ba4830 8201 gen_rev16(tmp);
9ee6e8bb
PB
8202 break;
8203 case 0x0b: /* revsh */
d9ba4830 8204 gen_revsh(tmp);
9ee6e8bb
PB
8205 break;
8206 case 0x10: /* sel */
d9ba4830 8207 tmp2 = load_reg(s, rm);
7d1b0095 8208 tmp3 = tcg_temp_new_i32();
6ddbc6e4 8209 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
d9ba4830 8210 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8211 tcg_temp_free_i32(tmp3);
8212 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8213 break;
8214 case 0x18: /* clz */
d9ba4830 8215 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
8216 break;
8217 default:
8218 goto illegal_op;
8219 }
8220 }
d9ba4830 8221 store_reg(s, rd, tmp);
9ee6e8bb
PB
8222 break;
8223 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8224 op = (insn >> 4) & 0xf;
d9ba4830
PB
8225 tmp = load_reg(s, rn);
8226 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8227 switch ((insn >> 20) & 7) {
8228 case 0: /* 32 x 32 -> 32 */
d9ba4830 8229 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8230 tcg_temp_free_i32(tmp2);
9ee6e8bb 8231 if (rs != 15) {
d9ba4830 8232 tmp2 = load_reg(s, rs);
9ee6e8bb 8233 if (op)
d9ba4830 8234 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 8235 else
d9ba4830 8236 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8237 tcg_temp_free_i32(tmp2);
9ee6e8bb 8238 }
9ee6e8bb
PB
8239 break;
8240 case 1: /* 16 x 16 -> 32 */
d9ba4830 8241 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8242 tcg_temp_free_i32(tmp2);
9ee6e8bb 8243 if (rs != 15) {
d9ba4830
PB
8244 tmp2 = load_reg(s, rs);
8245 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 8246 tcg_temp_free_i32(tmp2);
9ee6e8bb 8247 }
9ee6e8bb
PB
8248 break;
8249 case 2: /* Dual multiply add. */
8250 case 4: /* Dual multiply subtract. */
8251 if (op)
d9ba4830
PB
8252 gen_swap_half(tmp2);
8253 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8254 if (insn & (1 << 22)) {
e1d177b9 8255 /* This subtraction cannot overflow. */
d9ba4830 8256 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8257 } else {
e1d177b9
PM
8258 /* This addition cannot overflow 32 bits;
8259 * however it may overflow considered as a signed
8260 * operation, in which case we must set the Q flag.
8261 */
8262 gen_helper_add_setq(tmp, tmp, tmp2);
9ee6e8bb 8263 }
7d1b0095 8264 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8265 if (rs != 15)
8266 {
d9ba4830
PB
8267 tmp2 = load_reg(s, rs);
8268 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 8269 tcg_temp_free_i32(tmp2);
9ee6e8bb 8270 }
9ee6e8bb
PB
8271 break;
8272 case 3: /* 32 * 16 -> 32msb */
8273 if (op)
d9ba4830 8274 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8275 else
d9ba4830 8276 gen_sxth(tmp2);
a7812ae4
PB
8277 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8278 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8279 tmp = tcg_temp_new_i32();
a7812ae4 8280 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 8281 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8282 if (rs != 15)
8283 {
d9ba4830
PB
8284 tmp2 = load_reg(s, rs);
8285 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 8286 tcg_temp_free_i32(tmp2);
9ee6e8bb 8287 }
9ee6e8bb 8288 break;
838fa72d
AJ
8289 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8290 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8291 if (rs != 15) {
838fa72d
AJ
8292 tmp = load_reg(s, rs);
8293 if (insn & (1 << 20)) {
8294 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 8295 } else {
838fa72d 8296 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 8297 }
2c0262af 8298 }
838fa72d
AJ
8299 if (insn & (1 << 4)) {
8300 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8301 }
8302 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 8303 tmp = tcg_temp_new_i32();
838fa72d
AJ
8304 tcg_gen_trunc_i64_i32(tmp, tmp64);
8305 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8306 break;
8307 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 8308 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 8309 tcg_temp_free_i32(tmp2);
9ee6e8bb 8310 if (rs != 15) {
d9ba4830
PB
8311 tmp2 = load_reg(s, rs);
8312 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8313 tcg_temp_free_i32(tmp2);
5fd46862 8314 }
9ee6e8bb 8315 break;
2c0262af 8316 }
d9ba4830 8317 store_reg(s, rd, tmp);
2c0262af 8318 break;
9ee6e8bb
PB
8319 case 6: case 7: /* 64-bit multiply, Divide. */
8320 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
8321 tmp = load_reg(s, rn);
8322 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8323 if ((op & 0x50) == 0x10) {
8324 /* sdiv, udiv */
8325 if (!arm_feature(env, ARM_FEATURE_DIV))
8326 goto illegal_op;
8327 if (op & 0x20)
5e3f878a 8328 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 8329 else
5e3f878a 8330 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 8331 tcg_temp_free_i32(tmp2);
5e3f878a 8332 store_reg(s, rd, tmp);
9ee6e8bb
PB
8333 } else if ((op & 0xe) == 0xc) {
8334 /* Dual multiply accumulate long. */
8335 if (op & 1)
5e3f878a
PB
8336 gen_swap_half(tmp2);
8337 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8338 if (op & 0x10) {
5e3f878a 8339 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 8340 } else {
5e3f878a 8341 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 8342 }
7d1b0095 8343 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8344 /* BUGFIX */
8345 tmp64 = tcg_temp_new_i64();
8346 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8347 tcg_temp_free_i32(tmp);
a7812ae4
PB
8348 gen_addq(s, tmp64, rs, rd);
8349 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8350 tcg_temp_free_i64(tmp64);
2c0262af 8351 } else {
9ee6e8bb
PB
8352 if (op & 0x20) {
8353 /* Unsigned 64-bit multiply */
a7812ae4 8354 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 8355 } else {
9ee6e8bb
PB
8356 if (op & 8) {
8357 /* smlalxy */
5e3f878a 8358 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8359 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8360 tmp64 = tcg_temp_new_i64();
8361 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8362 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8363 } else {
8364 /* Signed 64-bit multiply */
a7812ae4 8365 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8366 }
b5ff1b31 8367 }
9ee6e8bb
PB
8368 if (op & 4) {
8369 /* umaal */
a7812ae4
PB
8370 gen_addq_lo(s, tmp64, rs);
8371 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
8372 } else if (op & 0x40) {
8373 /* 64-bit accumulate. */
a7812ae4 8374 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 8375 }
a7812ae4 8376 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8377 tcg_temp_free_i64(tmp64);
5fd46862 8378 }
2c0262af 8379 break;
9ee6e8bb
PB
8380 }
8381 break;
8382 case 6: case 7: case 14: case 15:
8383 /* Coprocessor. */
8384 if (((insn >> 24) & 3) == 3) {
8385 /* Translate into the equivalent ARM encoding. */
f06053e3 8386 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9ee6e8bb
PB
8387 if (disas_neon_data_insn(env, s, insn))
8388 goto illegal_op;
8389 } else {
8390 if (insn & (1 << 28))
8391 goto illegal_op;
8392 if (disas_coproc_insn (env, s, insn))
8393 goto illegal_op;
8394 }
8395 break;
8396 case 8: case 9: case 10: case 11:
8397 if (insn & (1 << 15)) {
8398 /* Branches, misc control. */
8399 if (insn & 0x5000) {
8400 /* Unconditional branch. */
8401 /* signextend(hw1[10:0]) -> offset[:12]. */
8402 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8403 /* hw1[10:0] -> offset[11:1]. */
8404 offset |= (insn & 0x7ff) << 1;
8405 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8406 offset[24:22] already have the same value because of the
8407 sign extension above. */
8408 offset ^= ((~insn) & (1 << 13)) << 10;
8409 offset ^= ((~insn) & (1 << 11)) << 11;
8410
9ee6e8bb
PB
8411 if (insn & (1 << 14)) {
8412 /* Branch and link. */
3174f8e9 8413 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 8414 }
3b46e624 8415
b0109805 8416 offset += s->pc;
9ee6e8bb
PB
8417 if (insn & (1 << 12)) {
8418 /* b/bl */
b0109805 8419 gen_jmp(s, offset);
9ee6e8bb
PB
8420 } else {
8421 /* blx */
b0109805 8422 offset &= ~(uint32_t)2;
be5e7a76 8423 /* thumb2 bx, no need to check */
b0109805 8424 gen_bx_im(s, offset);
2c0262af 8425 }
9ee6e8bb
PB
8426 } else if (((insn >> 23) & 7) == 7) {
8427 /* Misc control */
8428 if (insn & (1 << 13))
8429 goto illegal_op;
8430
8431 if (insn & (1 << 26)) {
8432 /* Secure monitor call (v6Z) */
8433 goto illegal_op; /* not implemented. */
2c0262af 8434 } else {
9ee6e8bb
PB
8435 op = (insn >> 20) & 7;
8436 switch (op) {
8437 case 0: /* msr cpsr. */
8438 if (IS_M(env)) {
8984bd2e
PB
8439 tmp = load_reg(s, rn);
8440 addr = tcg_const_i32(insn & 0xff);
8441 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8442 tcg_temp_free_i32(addr);
7d1b0095 8443 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8444 gen_lookup_tb(s);
8445 break;
8446 }
8447 /* fall through */
8448 case 1: /* msr spsr. */
8449 if (IS_M(env))
8450 goto illegal_op;
2fbac54b
FN
8451 tmp = load_reg(s, rn);
8452 if (gen_set_psr(s,
9ee6e8bb 8453 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 8454 op == 1, tmp))
9ee6e8bb
PB
8455 goto illegal_op;
8456 break;
8457 case 2: /* cps, nop-hint. */
8458 if (((insn >> 8) & 7) == 0) {
8459 gen_nop_hint(s, insn & 0xff);
8460 }
8461 /* Implemented as NOP in user mode. */
8462 if (IS_USER(s))
8463 break;
8464 offset = 0;
8465 imm = 0;
8466 if (insn & (1 << 10)) {
8467 if (insn & (1 << 7))
8468 offset |= CPSR_A;
8469 if (insn & (1 << 6))
8470 offset |= CPSR_I;
8471 if (insn & (1 << 5))
8472 offset |= CPSR_F;
8473 if (insn & (1 << 9))
8474 imm = CPSR_A | CPSR_I | CPSR_F;
8475 }
8476 if (insn & (1 << 8)) {
8477 offset |= 0x1f;
8478 imm |= (insn & 0x1f);
8479 }
8480 if (offset) {
2fbac54b 8481 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
8482 }
8483 break;
8484 case 3: /* Special control operations. */
426f5abc 8485 ARCH(7);
9ee6e8bb
PB
8486 op = (insn >> 4) & 0xf;
8487 switch (op) {
8488 case 2: /* clrex */
426f5abc 8489 gen_clrex(s);
9ee6e8bb
PB
8490 break;
8491 case 4: /* dsb */
8492 case 5: /* dmb */
8493 case 6: /* isb */
8494 /* These execute as NOPs. */
9ee6e8bb
PB
8495 break;
8496 default:
8497 goto illegal_op;
8498 }
8499 break;
8500 case 4: /* bxj */
8501 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8502 tmp = load_reg(s, rn);
8503 gen_bx(s, tmp);
9ee6e8bb
PB
8504 break;
8505 case 5: /* Exception return. */
b8b45b68
RV
8506 if (IS_USER(s)) {
8507 goto illegal_op;
8508 }
8509 if (rn != 14 || rd != 15) {
8510 goto illegal_op;
8511 }
8512 tmp = load_reg(s, rn);
8513 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8514 gen_exception_return(s, tmp);
8515 break;
9ee6e8bb 8516 case 6: /* mrs cpsr. */
7d1b0095 8517 tmp = tcg_temp_new_i32();
9ee6e8bb 8518 if (IS_M(env)) {
8984bd2e
PB
8519 addr = tcg_const_i32(insn & 0xff);
8520 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 8521 tcg_temp_free_i32(addr);
9ee6e8bb 8522 } else {
8984bd2e 8523 gen_helper_cpsr_read(tmp);
9ee6e8bb 8524 }
8984bd2e 8525 store_reg(s, rd, tmp);
9ee6e8bb
PB
8526 break;
8527 case 7: /* mrs spsr. */
8528 /* Not accessible in user mode. */
8529 if (IS_USER(s) || IS_M(env))
8530 goto illegal_op;
d9ba4830
PB
8531 tmp = load_cpu_field(spsr);
8532 store_reg(s, rd, tmp);
9ee6e8bb 8533 break;
2c0262af
FB
8534 }
8535 }
9ee6e8bb
PB
8536 } else {
8537 /* Conditional branch. */
8538 op = (insn >> 22) & 0xf;
8539 /* Generate a conditional jump to next instruction. */
8540 s->condlabel = gen_new_label();
d9ba4830 8541 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
8542 s->condjmp = 1;
8543
8544 /* offset[11:1] = insn[10:0] */
8545 offset = (insn & 0x7ff) << 1;
8546 /* offset[17:12] = insn[21:16]. */
8547 offset |= (insn & 0x003f0000) >> 4;
8548 /* offset[31:20] = insn[26]. */
8549 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8550 /* offset[18] = insn[13]. */
8551 offset |= (insn & (1 << 13)) << 5;
8552 /* offset[19] = insn[11]. */
8553 offset |= (insn & (1 << 11)) << 8;
8554
8555 /* jump to the offset */
b0109805 8556 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
8557 }
8558 } else {
8559 /* Data processing immediate. */
8560 if (insn & (1 << 25)) {
8561 if (insn & (1 << 24)) {
8562 if (insn & (1 << 20))
8563 goto illegal_op;
8564 /* Bitfield/Saturate. */
8565 op = (insn >> 21) & 7;
8566 imm = insn & 0x1f;
8567 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 8568 if (rn == 15) {
7d1b0095 8569 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
8570 tcg_gen_movi_i32(tmp, 0);
8571 } else {
8572 tmp = load_reg(s, rn);
8573 }
9ee6e8bb
PB
8574 switch (op) {
8575 case 2: /* Signed bitfield extract. */
8576 imm++;
8577 if (shift + imm > 32)
8578 goto illegal_op;
8579 if (imm < 32)
6ddbc6e4 8580 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
8581 break;
8582 case 6: /* Unsigned bitfield extract. */
8583 imm++;
8584 if (shift + imm > 32)
8585 goto illegal_op;
8586 if (imm < 32)
6ddbc6e4 8587 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
8588 break;
8589 case 3: /* Bitfield insert/clear. */
8590 if (imm < shift)
8591 goto illegal_op;
8592 imm = imm + 1 - shift;
8593 if (imm != 32) {
6ddbc6e4 8594 tmp2 = load_reg(s, rd);
8f8e3aa4 8595 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
7d1b0095 8596 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8597 }
8598 break;
8599 case 7:
8600 goto illegal_op;
8601 default: /* Saturate. */
9ee6e8bb
PB
8602 if (shift) {
8603 if (op & 1)
6ddbc6e4 8604 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8605 else
6ddbc6e4 8606 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 8607 }
6ddbc6e4 8608 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
8609 if (op & 4) {
8610 /* Unsigned. */
9ee6e8bb 8611 if ((op & 1) && shift == 0)
6ddbc6e4 8612 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 8613 else
6ddbc6e4 8614 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 8615 } else {
9ee6e8bb 8616 /* Signed. */
9ee6e8bb 8617 if ((op & 1) && shift == 0)
6ddbc6e4 8618 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 8619 else
6ddbc6e4 8620 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 8621 }
b75263d6 8622 tcg_temp_free_i32(tmp2);
9ee6e8bb 8623 break;
2c0262af 8624 }
6ddbc6e4 8625 store_reg(s, rd, tmp);
9ee6e8bb
PB
8626 } else {
8627 imm = ((insn & 0x04000000) >> 15)
8628 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8629 if (insn & (1 << 22)) {
8630 /* 16-bit immediate. */
8631 imm |= (insn >> 4) & 0xf000;
8632 if (insn & (1 << 23)) {
8633 /* movt */
5e3f878a 8634 tmp = load_reg(s, rd);
86831435 8635 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8636 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 8637 } else {
9ee6e8bb 8638 /* movw */
7d1b0095 8639 tmp = tcg_temp_new_i32();
5e3f878a 8640 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
8641 }
8642 } else {
9ee6e8bb
PB
8643 /* Add/sub 12-bit immediate. */
8644 if (rn == 15) {
b0109805 8645 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 8646 if (insn & (1 << 23))
b0109805 8647 offset -= imm;
9ee6e8bb 8648 else
b0109805 8649 offset += imm;
7d1b0095 8650 tmp = tcg_temp_new_i32();
5e3f878a 8651 tcg_gen_movi_i32(tmp, offset);
2c0262af 8652 } else {
5e3f878a 8653 tmp = load_reg(s, rn);
9ee6e8bb 8654 if (insn & (1 << 23))
5e3f878a 8655 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 8656 else
5e3f878a 8657 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 8658 }
9ee6e8bb 8659 }
5e3f878a 8660 store_reg(s, rd, tmp);
191abaa2 8661 }
9ee6e8bb
PB
8662 } else {
8663 int shifter_out = 0;
8664 /* modified 12-bit immediate. */
8665 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8666 imm = (insn & 0xff);
8667 switch (shift) {
8668 case 0: /* XY */
8669 /* Nothing to do. */
8670 break;
8671 case 1: /* 00XY00XY */
8672 imm |= imm << 16;
8673 break;
8674 case 2: /* XY00XY00 */
8675 imm |= imm << 16;
8676 imm <<= 8;
8677 break;
8678 case 3: /* XYXYXYXY */
8679 imm |= imm << 16;
8680 imm |= imm << 8;
8681 break;
8682 default: /* Rotated constant. */
8683 shift = (shift << 1) | (imm >> 7);
8684 imm |= 0x80;
8685 imm = imm << (32 - shift);
8686 shifter_out = 1;
8687 break;
b5ff1b31 8688 }
7d1b0095 8689 tmp2 = tcg_temp_new_i32();
3174f8e9 8690 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 8691 rn = (insn >> 16) & 0xf;
3174f8e9 8692 if (rn == 15) {
7d1b0095 8693 tmp = tcg_temp_new_i32();
3174f8e9
FN
8694 tcg_gen_movi_i32(tmp, 0);
8695 } else {
8696 tmp = load_reg(s, rn);
8697 }
9ee6e8bb
PB
8698 op = (insn >> 21) & 0xf;
8699 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 8700 shifter_out, tmp, tmp2))
9ee6e8bb 8701 goto illegal_op;
7d1b0095 8702 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8703 rd = (insn >> 8) & 0xf;
8704 if (rd != 15) {
3174f8e9
FN
8705 store_reg(s, rd, tmp);
8706 } else {
7d1b0095 8707 tcg_temp_free_i32(tmp);
2c0262af 8708 }
2c0262af 8709 }
9ee6e8bb
PB
8710 }
8711 break;
8712 case 12: /* Load/store single data item. */
8713 {
8714 int postinc = 0;
8715 int writeback = 0;
b0109805 8716 int user;
9ee6e8bb
PB
8717 if ((insn & 0x01100000) == 0x01000000) {
8718 if (disas_neon_ls_insn(env, s, insn))
c1713132 8719 goto illegal_op;
9ee6e8bb
PB
8720 break;
8721 }
a2fdc890
PM
8722 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8723 if (rs == 15) {
8724 if (!(insn & (1 << 20))) {
8725 goto illegal_op;
8726 }
8727 if (op != 2) {
8728 /* Byte or halfword load space with dest == r15 : memory hints.
8729 * Catch them early so we don't emit pointless addressing code.
8730 * This space is a mix of:
8731 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8732 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8733 * cores)
8734 * unallocated hints, which must be treated as NOPs
8735 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8736 * which is easiest for the decoding logic
8737 * Some space which must UNDEF
8738 */
8739 int op1 = (insn >> 23) & 3;
8740 int op2 = (insn >> 6) & 0x3f;
8741 if (op & 2) {
8742 goto illegal_op;
8743 }
8744 if (rn == 15) {
8745 /* UNPREDICTABLE or unallocated hint */
8746 return 0;
8747 }
8748 if (op1 & 1) {
8749 return 0; /* PLD* or unallocated hint */
8750 }
8751 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
8752 return 0; /* PLD* or unallocated hint */
8753 }
8754 /* UNDEF space, or an UNPREDICTABLE */
8755 return 1;
8756 }
8757 }
b0109805 8758 user = IS_USER(s);
9ee6e8bb 8759 if (rn == 15) {
7d1b0095 8760 addr = tcg_temp_new_i32();
9ee6e8bb
PB
8761 /* PC relative. */
8762 /* s->pc has already been incremented by 4. */
8763 imm = s->pc & 0xfffffffc;
8764 if (insn & (1 << 23))
8765 imm += insn & 0xfff;
8766 else
8767 imm -= insn & 0xfff;
b0109805 8768 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 8769 } else {
b0109805 8770 addr = load_reg(s, rn);
9ee6e8bb
PB
8771 if (insn & (1 << 23)) {
8772 /* Positive offset. */
8773 imm = insn & 0xfff;
b0109805 8774 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 8775 } else {
9ee6e8bb 8776 imm = insn & 0xff;
2a0308c5
PM
8777 switch ((insn >> 8) & 0xf) {
8778 case 0x0: /* Shifted Register. */
9ee6e8bb 8779 shift = (insn >> 4) & 0xf;
2a0308c5
PM
8780 if (shift > 3) {
8781 tcg_temp_free_i32(addr);
18c9b560 8782 goto illegal_op;
2a0308c5 8783 }
b26eefb6 8784 tmp = load_reg(s, rm);
9ee6e8bb 8785 if (shift)
b26eefb6 8786 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 8787 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8788 tcg_temp_free_i32(tmp);
9ee6e8bb 8789 break;
2a0308c5 8790 case 0xc: /* Negative offset. */
b0109805 8791 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 8792 break;
2a0308c5 8793 case 0xe: /* User privilege. */
b0109805
PB
8794 tcg_gen_addi_i32(addr, addr, imm);
8795 user = 1;
9ee6e8bb 8796 break;
2a0308c5 8797 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
8798 imm = -imm;
8799 /* Fall through. */
2a0308c5 8800 case 0xb: /* Post-increment. */
9ee6e8bb
PB
8801 postinc = 1;
8802 writeback = 1;
8803 break;
2a0308c5 8804 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
8805 imm = -imm;
8806 /* Fall through. */
2a0308c5 8807 case 0xf: /* Pre-increment. */
b0109805 8808 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8809 writeback = 1;
8810 break;
8811 default:
2a0308c5 8812 tcg_temp_free_i32(addr);
b7bcbe95 8813 goto illegal_op;
9ee6e8bb
PB
8814 }
8815 }
8816 }
9ee6e8bb
PB
8817 if (insn & (1 << 20)) {
8818 /* Load. */
a2fdc890
PM
8819 switch (op) {
8820 case 0: tmp = gen_ld8u(addr, user); break;
8821 case 4: tmp = gen_ld8s(addr, user); break;
8822 case 1: tmp = gen_ld16u(addr, user); break;
8823 case 5: tmp = gen_ld16s(addr, user); break;
8824 case 2: tmp = gen_ld32(addr, user); break;
2a0308c5
PM
8825 default:
8826 tcg_temp_free_i32(addr);
8827 goto illegal_op;
a2fdc890
PM
8828 }
8829 if (rs == 15) {
8830 gen_bx(s, tmp);
9ee6e8bb 8831 } else {
a2fdc890 8832 store_reg(s, rs, tmp);
9ee6e8bb
PB
8833 }
8834 } else {
8835 /* Store. */
b0109805 8836 tmp = load_reg(s, rs);
9ee6e8bb 8837 switch (op) {
b0109805
PB
8838 case 0: gen_st8(tmp, addr, user); break;
8839 case 1: gen_st16(tmp, addr, user); break;
8840 case 2: gen_st32(tmp, addr, user); break;
2a0308c5
PM
8841 default:
8842 tcg_temp_free_i32(addr);
8843 goto illegal_op;
b7bcbe95 8844 }
2c0262af 8845 }
9ee6e8bb 8846 if (postinc)
b0109805
PB
8847 tcg_gen_addi_i32(addr, addr, imm);
8848 if (writeback) {
8849 store_reg(s, rn, addr);
8850 } else {
7d1b0095 8851 tcg_temp_free_i32(addr);
b0109805 8852 }
9ee6e8bb
PB
8853 }
8854 break;
8855 default:
8856 goto illegal_op;
2c0262af 8857 }
9ee6e8bb
PB
8858 return 0;
8859illegal_op:
8860 return 1;
2c0262af
FB
8861}
8862
9ee6e8bb 8863static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
8864{
8865 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8866 int32_t offset;
8867 int i;
b26eefb6 8868 TCGv tmp;
d9ba4830 8869 TCGv tmp2;
b0109805 8870 TCGv addr;
99c475ab 8871
9ee6e8bb
PB
8872 if (s->condexec_mask) {
8873 cond = s->condexec_cond;
bedd2912
JB
8874 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
8875 s->condlabel = gen_new_label();
8876 gen_test_cc(cond ^ 1, s->condlabel);
8877 s->condjmp = 1;
8878 }
9ee6e8bb
PB
8879 }
8880
b5ff1b31 8881 insn = lduw_code(s->pc);
99c475ab 8882 s->pc += 2;
b5ff1b31 8883
99c475ab
FB
8884 switch (insn >> 12) {
8885 case 0: case 1:
396e467c 8886
99c475ab
FB
8887 rd = insn & 7;
8888 op = (insn >> 11) & 3;
8889 if (op == 3) {
8890 /* add/subtract */
8891 rn = (insn >> 3) & 7;
396e467c 8892 tmp = load_reg(s, rn);
99c475ab
FB
8893 if (insn & (1 << 10)) {
8894 /* immediate */
7d1b0095 8895 tmp2 = tcg_temp_new_i32();
396e467c 8896 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
8897 } else {
8898 /* reg */
8899 rm = (insn >> 6) & 7;
396e467c 8900 tmp2 = load_reg(s, rm);
99c475ab 8901 }
9ee6e8bb
PB
8902 if (insn & (1 << 9)) {
8903 if (s->condexec_mask)
396e467c 8904 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8905 else
396e467c 8906 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb
PB
8907 } else {
8908 if (s->condexec_mask)
396e467c 8909 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 8910 else
396e467c 8911 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 8912 }
7d1b0095 8913 tcg_temp_free_i32(tmp2);
396e467c 8914 store_reg(s, rd, tmp);
99c475ab
FB
8915 } else {
8916 /* shift immediate */
8917 rm = (insn >> 3) & 7;
8918 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
8919 tmp = load_reg(s, rm);
8920 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8921 if (!s->condexec_mask)
8922 gen_logic_CC(tmp);
8923 store_reg(s, rd, tmp);
99c475ab
FB
8924 }
8925 break;
8926 case 2: case 3:
8927 /* arithmetic large immediate */
8928 op = (insn >> 11) & 3;
8929 rd = (insn >> 8) & 0x7;
396e467c 8930 if (op == 0) { /* mov */
7d1b0095 8931 tmp = tcg_temp_new_i32();
396e467c 8932 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 8933 if (!s->condexec_mask)
396e467c
FN
8934 gen_logic_CC(tmp);
8935 store_reg(s, rd, tmp);
8936 } else {
8937 tmp = load_reg(s, rd);
7d1b0095 8938 tmp2 = tcg_temp_new_i32();
396e467c
FN
8939 tcg_gen_movi_i32(tmp2, insn & 0xff);
8940 switch (op) {
8941 case 1: /* cmp */
8942 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095
PM
8943 tcg_temp_free_i32(tmp);
8944 tcg_temp_free_i32(tmp2);
396e467c
FN
8945 break;
8946 case 2: /* add */
8947 if (s->condexec_mask)
8948 tcg_gen_add_i32(tmp, tmp, tmp2);
8949 else
8950 gen_helper_add_cc(tmp, tmp, tmp2);
7d1b0095 8951 tcg_temp_free_i32(tmp2);
396e467c
FN
8952 store_reg(s, rd, tmp);
8953 break;
8954 case 3: /* sub */
8955 if (s->condexec_mask)
8956 tcg_gen_sub_i32(tmp, tmp, tmp2);
8957 else
8958 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095 8959 tcg_temp_free_i32(tmp2);
396e467c
FN
8960 store_reg(s, rd, tmp);
8961 break;
8962 }
99c475ab 8963 }
99c475ab
FB
8964 break;
8965 case 4:
8966 if (insn & (1 << 11)) {
8967 rd = (insn >> 8) & 7;
5899f386
FB
8968 /* load pc-relative. Bit 1 of PC is ignored. */
8969 val = s->pc + 2 + ((insn & 0xff) * 4);
8970 val &= ~(uint32_t)2;
7d1b0095 8971 addr = tcg_temp_new_i32();
b0109805
PB
8972 tcg_gen_movi_i32(addr, val);
8973 tmp = gen_ld32(addr, IS_USER(s));
7d1b0095 8974 tcg_temp_free_i32(addr);
b0109805 8975 store_reg(s, rd, tmp);
99c475ab
FB
8976 break;
8977 }
8978 if (insn & (1 << 10)) {
8979 /* data processing extended or blx */
8980 rd = (insn & 7) | ((insn >> 4) & 8);
8981 rm = (insn >> 3) & 0xf;
8982 op = (insn >> 8) & 3;
8983 switch (op) {
8984 case 0: /* add */
396e467c
FN
8985 tmp = load_reg(s, rd);
8986 tmp2 = load_reg(s, rm);
8987 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8988 tcg_temp_free_i32(tmp2);
396e467c 8989 store_reg(s, rd, tmp);
99c475ab
FB
8990 break;
8991 case 1: /* cmp */
396e467c
FN
8992 tmp = load_reg(s, rd);
8993 tmp2 = load_reg(s, rm);
8994 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095
PM
8995 tcg_temp_free_i32(tmp2);
8996 tcg_temp_free_i32(tmp);
99c475ab
FB
8997 break;
8998 case 2: /* mov/cpy */
396e467c
FN
8999 tmp = load_reg(s, rm);
9000 store_reg(s, rd, tmp);
99c475ab
FB
9001 break;
9002 case 3:/* branch [and link] exchange thumb register */
b0109805 9003 tmp = load_reg(s, rm);
99c475ab 9004 if (insn & (1 << 7)) {
be5e7a76 9005 ARCH(5);
99c475ab 9006 val = (uint32_t)s->pc | 1;
7d1b0095 9007 tmp2 = tcg_temp_new_i32();
b0109805
PB
9008 tcg_gen_movi_i32(tmp2, val);
9009 store_reg(s, 14, tmp2);
99c475ab 9010 }
be5e7a76 9011 /* already thumb, no need to check */
d9ba4830 9012 gen_bx(s, tmp);
99c475ab
FB
9013 break;
9014 }
9015 break;
9016 }
9017
9018 /* data processing register */
9019 rd = insn & 7;
9020 rm = (insn >> 3) & 7;
9021 op = (insn >> 6) & 0xf;
9022 if (op == 2 || op == 3 || op == 4 || op == 7) {
9023 /* the shift/rotate ops want the operands backwards */
9024 val = rm;
9025 rm = rd;
9026 rd = val;
9027 val = 1;
9028 } else {
9029 val = 0;
9030 }
9031
396e467c 9032 if (op == 9) { /* neg */
7d1b0095 9033 tmp = tcg_temp_new_i32();
396e467c
FN
9034 tcg_gen_movi_i32(tmp, 0);
9035 } else if (op != 0xf) { /* mvn doesn't read its first operand */
9036 tmp = load_reg(s, rd);
9037 } else {
9038 TCGV_UNUSED(tmp);
9039 }
99c475ab 9040
396e467c 9041 tmp2 = load_reg(s, rm);
5899f386 9042 switch (op) {
99c475ab 9043 case 0x0: /* and */
396e467c 9044 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 9045 if (!s->condexec_mask)
396e467c 9046 gen_logic_CC(tmp);
99c475ab
FB
9047 break;
9048 case 0x1: /* eor */
396e467c 9049 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 9050 if (!s->condexec_mask)
396e467c 9051 gen_logic_CC(tmp);
99c475ab
FB
9052 break;
9053 case 0x2: /* lsl */
9ee6e8bb 9054 if (s->condexec_mask) {
396e467c 9055 gen_helper_shl(tmp2, tmp2, tmp);
9ee6e8bb 9056 } else {
396e467c
FN
9057 gen_helper_shl_cc(tmp2, tmp2, tmp);
9058 gen_logic_CC(tmp2);
9ee6e8bb 9059 }
99c475ab
FB
9060 break;
9061 case 0x3: /* lsr */
9ee6e8bb 9062 if (s->condexec_mask) {
396e467c 9063 gen_helper_shr(tmp2, tmp2, tmp);
9ee6e8bb 9064 } else {
396e467c
FN
9065 gen_helper_shr_cc(tmp2, tmp2, tmp);
9066 gen_logic_CC(tmp2);
9ee6e8bb 9067 }
99c475ab
FB
9068 break;
9069 case 0x4: /* asr */
9ee6e8bb 9070 if (s->condexec_mask) {
396e467c 9071 gen_helper_sar(tmp2, tmp2, tmp);
9ee6e8bb 9072 } else {
396e467c
FN
9073 gen_helper_sar_cc(tmp2, tmp2, tmp);
9074 gen_logic_CC(tmp2);
9ee6e8bb 9075 }
99c475ab
FB
9076 break;
9077 case 0x5: /* adc */
9ee6e8bb 9078 if (s->condexec_mask)
396e467c 9079 gen_adc(tmp, tmp2);
9ee6e8bb 9080 else
396e467c 9081 gen_helper_adc_cc(tmp, tmp, tmp2);
99c475ab
FB
9082 break;
9083 case 0x6: /* sbc */
9ee6e8bb 9084 if (s->condexec_mask)
396e467c 9085 gen_sub_carry(tmp, tmp, tmp2);
9ee6e8bb 9086 else
396e467c 9087 gen_helper_sbc_cc(tmp, tmp, tmp2);
99c475ab
FB
9088 break;
9089 case 0x7: /* ror */
9ee6e8bb 9090 if (s->condexec_mask) {
f669df27
AJ
9091 tcg_gen_andi_i32(tmp, tmp, 0x1f);
9092 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 9093 } else {
396e467c
FN
9094 gen_helper_ror_cc(tmp2, tmp2, tmp);
9095 gen_logic_CC(tmp2);
9ee6e8bb 9096 }
99c475ab
FB
9097 break;
9098 case 0x8: /* tst */
396e467c
FN
9099 tcg_gen_and_i32(tmp, tmp, tmp2);
9100 gen_logic_CC(tmp);
99c475ab 9101 rd = 16;
5899f386 9102 break;
99c475ab 9103 case 0x9: /* neg */
9ee6e8bb 9104 if (s->condexec_mask)
396e467c 9105 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 9106 else
396e467c 9107 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
9108 break;
9109 case 0xa: /* cmp */
396e467c 9110 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
9111 rd = 16;
9112 break;
9113 case 0xb: /* cmn */
396e467c 9114 gen_helper_add_cc(tmp, tmp, tmp2);
99c475ab
FB
9115 rd = 16;
9116 break;
9117 case 0xc: /* orr */
396e467c 9118 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 9119 if (!s->condexec_mask)
396e467c 9120 gen_logic_CC(tmp);
99c475ab
FB
9121 break;
9122 case 0xd: /* mul */
7b2919a0 9123 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 9124 if (!s->condexec_mask)
396e467c 9125 gen_logic_CC(tmp);
99c475ab
FB
9126 break;
9127 case 0xe: /* bic */
f669df27 9128 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 9129 if (!s->condexec_mask)
396e467c 9130 gen_logic_CC(tmp);
99c475ab
FB
9131 break;
9132 case 0xf: /* mvn */
396e467c 9133 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 9134 if (!s->condexec_mask)
396e467c 9135 gen_logic_CC(tmp2);
99c475ab 9136 val = 1;
5899f386 9137 rm = rd;
99c475ab
FB
9138 break;
9139 }
9140 if (rd != 16) {
396e467c
FN
9141 if (val) {
9142 store_reg(s, rm, tmp2);
9143 if (op != 0xf)
7d1b0095 9144 tcg_temp_free_i32(tmp);
396e467c
FN
9145 } else {
9146 store_reg(s, rd, tmp);
7d1b0095 9147 tcg_temp_free_i32(tmp2);
396e467c
FN
9148 }
9149 } else {
7d1b0095
PM
9150 tcg_temp_free_i32(tmp);
9151 tcg_temp_free_i32(tmp2);
99c475ab
FB
9152 }
9153 break;
9154
9155 case 5:
9156 /* load/store register offset. */
9157 rd = insn & 7;
9158 rn = (insn >> 3) & 7;
9159 rm = (insn >> 6) & 7;
9160 op = (insn >> 9) & 7;
b0109805 9161 addr = load_reg(s, rn);
b26eefb6 9162 tmp = load_reg(s, rm);
b0109805 9163 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9164 tcg_temp_free_i32(tmp);
99c475ab
FB
9165
9166 if (op < 3) /* store */
b0109805 9167 tmp = load_reg(s, rd);
99c475ab
FB
9168
9169 switch (op) {
9170 case 0: /* str */
b0109805 9171 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
9172 break;
9173 case 1: /* strh */
b0109805 9174 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
9175 break;
9176 case 2: /* strb */
b0109805 9177 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
9178 break;
9179 case 3: /* ldrsb */
b0109805 9180 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
9181 break;
9182 case 4: /* ldr */
b0109805 9183 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
9184 break;
9185 case 5: /* ldrh */
b0109805 9186 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
9187 break;
9188 case 6: /* ldrb */
b0109805 9189 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
9190 break;
9191 case 7: /* ldrsh */
b0109805 9192 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
9193 break;
9194 }
9195 if (op >= 3) /* load */
b0109805 9196 store_reg(s, rd, tmp);
7d1b0095 9197 tcg_temp_free_i32(addr);
99c475ab
FB
9198 break;
9199
9200 case 6:
9201 /* load/store word immediate offset */
9202 rd = insn & 7;
9203 rn = (insn >> 3) & 7;
b0109805 9204 addr = load_reg(s, rn);
99c475ab 9205 val = (insn >> 4) & 0x7c;
b0109805 9206 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9207
9208 if (insn & (1 << 11)) {
9209 /* load */
b0109805
PB
9210 tmp = gen_ld32(addr, IS_USER(s));
9211 store_reg(s, rd, tmp);
99c475ab
FB
9212 } else {
9213 /* store */
b0109805
PB
9214 tmp = load_reg(s, rd);
9215 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9216 }
7d1b0095 9217 tcg_temp_free_i32(addr);
99c475ab
FB
9218 break;
9219
9220 case 7:
9221 /* load/store byte immediate offset */
9222 rd = insn & 7;
9223 rn = (insn >> 3) & 7;
b0109805 9224 addr = load_reg(s, rn);
99c475ab 9225 val = (insn >> 6) & 0x1f;
b0109805 9226 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9227
9228 if (insn & (1 << 11)) {
9229 /* load */
b0109805
PB
9230 tmp = gen_ld8u(addr, IS_USER(s));
9231 store_reg(s, rd, tmp);
99c475ab
FB
9232 } else {
9233 /* store */
b0109805
PB
9234 tmp = load_reg(s, rd);
9235 gen_st8(tmp, addr, IS_USER(s));
99c475ab 9236 }
7d1b0095 9237 tcg_temp_free_i32(addr);
99c475ab
FB
9238 break;
9239
9240 case 8:
9241 /* load/store halfword immediate offset */
9242 rd = insn & 7;
9243 rn = (insn >> 3) & 7;
b0109805 9244 addr = load_reg(s, rn);
99c475ab 9245 val = (insn >> 5) & 0x3e;
b0109805 9246 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9247
9248 if (insn & (1 << 11)) {
9249 /* load */
b0109805
PB
9250 tmp = gen_ld16u(addr, IS_USER(s));
9251 store_reg(s, rd, tmp);
99c475ab
FB
9252 } else {
9253 /* store */
b0109805
PB
9254 tmp = load_reg(s, rd);
9255 gen_st16(tmp, addr, IS_USER(s));
99c475ab 9256 }
7d1b0095 9257 tcg_temp_free_i32(addr);
99c475ab
FB
9258 break;
9259
9260 case 9:
9261 /* load/store from stack */
9262 rd = (insn >> 8) & 7;
b0109805 9263 addr = load_reg(s, 13);
99c475ab 9264 val = (insn & 0xff) * 4;
b0109805 9265 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9266
9267 if (insn & (1 << 11)) {
9268 /* load */
b0109805
PB
9269 tmp = gen_ld32(addr, IS_USER(s));
9270 store_reg(s, rd, tmp);
99c475ab
FB
9271 } else {
9272 /* store */
b0109805
PB
9273 tmp = load_reg(s, rd);
9274 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9275 }
7d1b0095 9276 tcg_temp_free_i32(addr);
99c475ab
FB
9277 break;
9278
9279 case 10:
9280 /* add to high reg */
9281 rd = (insn >> 8) & 7;
5899f386
FB
9282 if (insn & (1 << 11)) {
9283 /* SP */
5e3f878a 9284 tmp = load_reg(s, 13);
5899f386
FB
9285 } else {
9286 /* PC. bit 1 is ignored. */
7d1b0095 9287 tmp = tcg_temp_new_i32();
5e3f878a 9288 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 9289 }
99c475ab 9290 val = (insn & 0xff) * 4;
5e3f878a
PB
9291 tcg_gen_addi_i32(tmp, tmp, val);
9292 store_reg(s, rd, tmp);
99c475ab
FB
9293 break;
9294
9295 case 11:
9296 /* misc */
9297 op = (insn >> 8) & 0xf;
9298 switch (op) {
9299 case 0:
9300 /* adjust stack pointer */
b26eefb6 9301 tmp = load_reg(s, 13);
99c475ab
FB
9302 val = (insn & 0x7f) * 4;
9303 if (insn & (1 << 7))
6a0d8a1d 9304 val = -(int32_t)val;
b26eefb6
PB
9305 tcg_gen_addi_i32(tmp, tmp, val);
9306 store_reg(s, 13, tmp);
99c475ab
FB
9307 break;
9308
9ee6e8bb
PB
9309 case 2: /* sign/zero extend. */
9310 ARCH(6);
9311 rd = insn & 7;
9312 rm = (insn >> 3) & 7;
b0109805 9313 tmp = load_reg(s, rm);
9ee6e8bb 9314 switch ((insn >> 6) & 3) {
b0109805
PB
9315 case 0: gen_sxth(tmp); break;
9316 case 1: gen_sxtb(tmp); break;
9317 case 2: gen_uxth(tmp); break;
9318 case 3: gen_uxtb(tmp); break;
9ee6e8bb 9319 }
b0109805 9320 store_reg(s, rd, tmp);
9ee6e8bb 9321 break;
99c475ab
FB
9322 case 4: case 5: case 0xc: case 0xd:
9323 /* push/pop */
b0109805 9324 addr = load_reg(s, 13);
5899f386
FB
9325 if (insn & (1 << 8))
9326 offset = 4;
99c475ab 9327 else
5899f386
FB
9328 offset = 0;
9329 for (i = 0; i < 8; i++) {
9330 if (insn & (1 << i))
9331 offset += 4;
9332 }
9333 if ((insn & (1 << 11)) == 0) {
b0109805 9334 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9335 }
99c475ab
FB
9336 for (i = 0; i < 8; i++) {
9337 if (insn & (1 << i)) {
9338 if (insn & (1 << 11)) {
9339 /* pop */
b0109805
PB
9340 tmp = gen_ld32(addr, IS_USER(s));
9341 store_reg(s, i, tmp);
99c475ab
FB
9342 } else {
9343 /* push */
b0109805
PB
9344 tmp = load_reg(s, i);
9345 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9346 }
5899f386 9347 /* advance to the next address. */
b0109805 9348 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9349 }
9350 }
a50f5b91 9351 TCGV_UNUSED(tmp);
99c475ab
FB
9352 if (insn & (1 << 8)) {
9353 if (insn & (1 << 11)) {
9354 /* pop pc */
b0109805 9355 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
9356 /* don't set the pc until the rest of the instruction
9357 has completed */
9358 } else {
9359 /* push lr */
b0109805
PB
9360 tmp = load_reg(s, 14);
9361 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9362 }
b0109805 9363 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 9364 }
5899f386 9365 if ((insn & (1 << 11)) == 0) {
b0109805 9366 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9367 }
99c475ab 9368 /* write back the new stack pointer */
b0109805 9369 store_reg(s, 13, addr);
99c475ab 9370 /* set the new PC value */
be5e7a76
DES
9371 if ((insn & 0x0900) == 0x0900) {
9372 store_reg_from_load(env, s, 15, tmp);
9373 }
99c475ab
FB
9374 break;
9375
9ee6e8bb
PB
9376 case 1: case 3: case 9: case 11: /* czb */
9377 rm = insn & 7;
d9ba4830 9378 tmp = load_reg(s, rm);
9ee6e8bb
PB
9379 s->condlabel = gen_new_label();
9380 s->condjmp = 1;
9381 if (insn & (1 << 11))
cb63669a 9382 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 9383 else
cb63669a 9384 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 9385 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9386 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9387 val = (uint32_t)s->pc + 2;
9388 val += offset;
9389 gen_jmp(s, val);
9390 break;
9391
9392 case 15: /* IT, nop-hint. */
9393 if ((insn & 0xf) == 0) {
9394 gen_nop_hint(s, (insn >> 4) & 0xf);
9395 break;
9396 }
9397 /* If Then. */
9398 s->condexec_cond = (insn >> 4) & 0xe;
9399 s->condexec_mask = insn & 0x1f;
9400 /* No actual code generated for this insn, just setup state. */
9401 break;
9402
06c949e6 9403 case 0xe: /* bkpt */
be5e7a76 9404 ARCH(5);
bc4a0de0 9405 gen_exception_insn(s, 2, EXCP_BKPT);
06c949e6
PB
9406 break;
9407
9ee6e8bb
PB
9408 case 0xa: /* rev */
9409 ARCH(6);
9410 rn = (insn >> 3) & 0x7;
9411 rd = insn & 0x7;
b0109805 9412 tmp = load_reg(s, rn);
9ee6e8bb 9413 switch ((insn >> 6) & 3) {
66896cb8 9414 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
9415 case 1: gen_rev16(tmp); break;
9416 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
9417 default: goto illegal_op;
9418 }
b0109805 9419 store_reg(s, rd, tmp);
9ee6e8bb
PB
9420 break;
9421
9422 case 6: /* cps */
9423 ARCH(6);
9424 if (IS_USER(s))
9425 break;
9426 if (IS_M(env)) {
8984bd2e 9427 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9ee6e8bb 9428 /* PRIMASK */
8984bd2e
PB
9429 if (insn & 1) {
9430 addr = tcg_const_i32(16);
9431 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 9432 tcg_temp_free_i32(addr);
8984bd2e 9433 }
9ee6e8bb 9434 /* FAULTMASK */
8984bd2e
PB
9435 if (insn & 2) {
9436 addr = tcg_const_i32(17);
9437 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 9438 tcg_temp_free_i32(addr);
8984bd2e 9439 }
b75263d6 9440 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9441 gen_lookup_tb(s);
9442 } else {
9443 if (insn & (1 << 4))
9444 shift = CPSR_A | CPSR_I | CPSR_F;
9445 else
9446 shift = 0;
fa26df03 9447 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9ee6e8bb
PB
9448 }
9449 break;
9450
99c475ab
FB
9451 default:
9452 goto undef;
9453 }
9454 break;
9455
9456 case 12:
a7d3970d 9457 {
99c475ab 9458 /* load/store multiple */
a7d3970d
PM
9459 TCGv loaded_var;
9460 TCGV_UNUSED(loaded_var);
99c475ab 9461 rn = (insn >> 8) & 0x7;
b0109805 9462 addr = load_reg(s, rn);
99c475ab
FB
9463 for (i = 0; i < 8; i++) {
9464 if (insn & (1 << i)) {
99c475ab
FB
9465 if (insn & (1 << 11)) {
9466 /* load */
b0109805 9467 tmp = gen_ld32(addr, IS_USER(s));
a7d3970d
PM
9468 if (i == rn) {
9469 loaded_var = tmp;
9470 } else {
9471 store_reg(s, i, tmp);
9472 }
99c475ab
FB
9473 } else {
9474 /* store */
b0109805
PB
9475 tmp = load_reg(s, i);
9476 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9477 }
5899f386 9478 /* advance to the next address */
b0109805 9479 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9480 }
9481 }
b0109805 9482 if ((insn & (1 << rn)) == 0) {
a7d3970d 9483 /* base reg not in list: base register writeback */
b0109805
PB
9484 store_reg(s, rn, addr);
9485 } else {
a7d3970d
PM
9486 /* base reg in list: if load, complete it now */
9487 if (insn & (1 << 11)) {
9488 store_reg(s, rn, loaded_var);
9489 }
7d1b0095 9490 tcg_temp_free_i32(addr);
b0109805 9491 }
99c475ab 9492 break;
a7d3970d 9493 }
99c475ab
FB
9494 case 13:
9495 /* conditional branch or swi */
9496 cond = (insn >> 8) & 0xf;
9497 if (cond == 0xe)
9498 goto undef;
9499
9500 if (cond == 0xf) {
9501 /* swi */
422ebf69 9502 gen_set_pc_im(s->pc);
9ee6e8bb 9503 s->is_jmp = DISAS_SWI;
99c475ab
FB
9504 break;
9505 }
9506 /* generate a conditional jump to next instruction */
e50e6a20 9507 s->condlabel = gen_new_label();
d9ba4830 9508 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 9509 s->condjmp = 1;
99c475ab
FB
9510
9511 /* jump to the offset */
5899f386 9512 val = (uint32_t)s->pc + 2;
99c475ab 9513 offset = ((int32_t)insn << 24) >> 24;
5899f386 9514 val += offset << 1;
8aaca4c0 9515 gen_jmp(s, val);
99c475ab
FB
9516 break;
9517
9518 case 14:
358bf29e 9519 if (insn & (1 << 11)) {
9ee6e8bb
PB
9520 if (disas_thumb2_insn(env, s, insn))
9521 goto undef32;
358bf29e
PB
9522 break;
9523 }
9ee6e8bb 9524 /* unconditional branch */
99c475ab
FB
9525 val = (uint32_t)s->pc;
9526 offset = ((int32_t)insn << 21) >> 21;
9527 val += (offset << 1) + 2;
8aaca4c0 9528 gen_jmp(s, val);
99c475ab
FB
9529 break;
9530
9531 case 15:
9ee6e8bb 9532 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 9533 goto undef32;
9ee6e8bb 9534 break;
99c475ab
FB
9535 }
9536 return;
9ee6e8bb 9537undef32:
bc4a0de0 9538 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
9539 return;
9540illegal_op:
99c475ab 9541undef:
bc4a0de0 9542 gen_exception_insn(s, 2, EXCP_UDEF);
99c475ab
FB
9543}
9544
2c0262af
FB
9545/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9546 basic block 'tb'. If search_pc is TRUE, also generate PC
9547 information for each intermediate instruction. */
2cfc5f17
TS
9548static inline void gen_intermediate_code_internal(CPUState *env,
9549 TranslationBlock *tb,
9550 int search_pc)
2c0262af
FB
9551{
9552 DisasContext dc1, *dc = &dc1;
a1d1bb31 9553 CPUBreakpoint *bp;
2c0262af
FB
9554 uint16_t *gen_opc_end;
9555 int j, lj;
0fa85d43 9556 target_ulong pc_start;
b5ff1b31 9557 uint32_t next_page_start;
2e70f6ef
PB
9558 int num_insns;
9559 int max_insns;
3b46e624 9560
2c0262af 9561 /* generate intermediate code */
0fa85d43 9562 pc_start = tb->pc;
3b46e624 9563
2c0262af
FB
9564 dc->tb = tb;
9565
2c0262af 9566 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
9567
9568 dc->is_jmp = DISAS_NEXT;
9569 dc->pc = pc_start;
8aaca4c0 9570 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 9571 dc->condjmp = 0;
7204ab88 9572 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
98eac7ca
PM
9573 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9574 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
b5ff1b31 9575#if !defined(CONFIG_USER_ONLY)
61f74d6a 9576 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
b5ff1b31 9577#endif
5df8bac1 9578 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
69d1fc22
PM
9579 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9580 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
a7812ae4
PB
9581 cpu_F0s = tcg_temp_new_i32();
9582 cpu_F1s = tcg_temp_new_i32();
9583 cpu_F0d = tcg_temp_new_i64();
9584 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
9585 cpu_V0 = cpu_F0d;
9586 cpu_V1 = cpu_F1d;
e677137d 9587 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 9588 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 9589 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 9590 lj = -1;
2e70f6ef
PB
9591 num_insns = 0;
9592 max_insns = tb->cflags & CF_COUNT_MASK;
9593 if (max_insns == 0)
9594 max_insns = CF_COUNT_MASK;
9595
9596 gen_icount_start();
e12ce78d 9597
3849902c
PM
9598 tcg_clear_temp_count();
9599
e12ce78d
PM
9600 /* A note on handling of the condexec (IT) bits:
9601 *
9602 * We want to avoid the overhead of having to write the updated condexec
9603 * bits back to the CPUState for every instruction in an IT block. So:
9604 * (1) if the condexec bits are not already zero then we write
9605 * zero back into the CPUState now. This avoids complications trying
9606 * to do it at the end of the block. (For example if we don't do this
9607 * it's hard to identify whether we can safely skip writing condexec
9608 * at the end of the TB, which we definitely want to do for the case
9609 * where a TB doesn't do anything with the IT state at all.)
9610 * (2) if we are going to leave the TB then we call gen_set_condexec()
9611 * which will write the correct value into CPUState if zero is wrong.
9612 * This is done both for leaving the TB at the end, and for leaving
9613 * it because of an exception we know will happen, which is done in
9614 * gen_exception_insn(). The latter is necessary because we need to
9615 * leave the TB with the PC/IT state just prior to execution of the
9616 * instruction which caused the exception.
9617 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9618 * then the CPUState will be wrong and we need to reset it.
9619 * This is handled in the same way as restoration of the
9620 * PC in these situations: we will be called again with search_pc=1
9621 * and generate a mapping of the condexec bits for each PC in
e87b7cb0
SW
9622 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9623 * this to restore the condexec bits.
e12ce78d
PM
9624 *
9625 * Note that there are no instructions which can read the condexec
9626 * bits, and none which can write non-static values to them, so
9627 * we don't need to care about whether CPUState is correct in the
9628 * middle of a TB.
9629 */
9630
9ee6e8bb
PB
9631 /* Reset the conditional execution bits immediately. This avoids
9632 complications trying to do it at the end of the block. */
98eac7ca 9633 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 9634 {
7d1b0095 9635 TCGv tmp = tcg_temp_new_i32();
8f01245e 9636 tcg_gen_movi_i32(tmp, 0);
d9ba4830 9637 store_cpu_field(tmp, condexec_bits);
8f01245e 9638 }
2c0262af 9639 do {
fbb4a2e3
PB
9640#ifdef CONFIG_USER_ONLY
9641 /* Intercept jump to the magic kernel page. */
9642 if (dc->pc >= 0xffff0000) {
9643 /* We always get here via a jump, so know we are not in a
9644 conditional execution block. */
9645 gen_exception(EXCP_KERNEL_TRAP);
9646 dc->is_jmp = DISAS_UPDATE;
9647 break;
9648 }
9649#else
9ee6e8bb
PB
9650 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9651 /* We always get here via a jump, so know we are not in a
9652 conditional execution block. */
d9ba4830 9653 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
9654 dc->is_jmp = DISAS_UPDATE;
9655 break;
9ee6e8bb
PB
9656 }
9657#endif
9658
72cf2d4f
BS
9659 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9660 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 9661 if (bp->pc == dc->pc) {
bc4a0de0 9662 gen_exception_insn(dc, 0, EXCP_DEBUG);
9ee6e8bb
PB
9663 /* Advance PC so that clearing the breakpoint will
9664 invalidate this TB. */
9665 dc->pc += 2;
9666 goto done_generating;
1fddef4b
FB
9667 break;
9668 }
9669 }
9670 }
2c0262af
FB
9671 if (search_pc) {
9672 j = gen_opc_ptr - gen_opc_buf;
9673 if (lj < j) {
9674 lj++;
9675 while (lj < j)
9676 gen_opc_instr_start[lj++] = 0;
9677 }
0fa85d43 9678 gen_opc_pc[lj] = dc->pc;
e12ce78d 9679 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
2c0262af 9680 gen_opc_instr_start[lj] = 1;
2e70f6ef 9681 gen_opc_icount[lj] = num_insns;
2c0262af 9682 }
e50e6a20 9683
2e70f6ef
PB
9684 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9685 gen_io_start();
9686
5642463a
PM
9687 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
9688 tcg_gen_debug_insn_start(dc->pc);
9689 }
9690
7204ab88 9691 if (dc->thumb) {
9ee6e8bb
PB
9692 disas_thumb_insn(env, dc);
9693 if (dc->condexec_mask) {
9694 dc->condexec_cond = (dc->condexec_cond & 0xe)
9695 | ((dc->condexec_mask >> 4) & 1);
9696 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9697 if (dc->condexec_mask == 0) {
9698 dc->condexec_cond = 0;
9699 }
9700 }
9701 } else {
9702 disas_arm_insn(env, dc);
9703 }
e50e6a20
FB
9704
9705 if (dc->condjmp && !dc->is_jmp) {
9706 gen_set_label(dc->condlabel);
9707 dc->condjmp = 0;
9708 }
3849902c
PM
9709
9710 if (tcg_check_temp_count()) {
9711 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
9712 }
9713
aaf2d97d 9714 /* Translation stops when a conditional branch is encountered.
e50e6a20 9715 * Otherwise the subsequent code could get translated several times.
b5ff1b31 9716 * Also stop translation when a page boundary is reached. This
bf20dc07 9717 * ensures prefetch aborts occur at the right place. */
2e70f6ef 9718 num_insns ++;
1fddef4b
FB
9719 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9720 !env->singlestep_enabled &&
1b530a6d 9721 !singlestep &&
2e70f6ef
PB
9722 dc->pc < next_page_start &&
9723 num_insns < max_insns);
9724
9725 if (tb->cflags & CF_LAST_IO) {
9726 if (dc->condjmp) {
9727 /* FIXME: This can theoretically happen with self-modifying
9728 code. */
9729 cpu_abort(env, "IO on conditional branch instruction");
9730 }
9731 gen_io_end();
9732 }
9ee6e8bb 9733
b5ff1b31 9734 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
9735 instruction was a conditional branch or trap, and the PC has
9736 already been written. */
551bd27f 9737 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 9738 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 9739 if (dc->condjmp) {
9ee6e8bb
PB
9740 gen_set_condexec(dc);
9741 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 9742 gen_exception(EXCP_SWI);
9ee6e8bb 9743 } else {
d9ba4830 9744 gen_exception(EXCP_DEBUG);
9ee6e8bb 9745 }
e50e6a20
FB
9746 gen_set_label(dc->condlabel);
9747 }
9748 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 9749 gen_set_pc_im(dc->pc);
e50e6a20 9750 dc->condjmp = 0;
8aaca4c0 9751 }
9ee6e8bb
PB
9752 gen_set_condexec(dc);
9753 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 9754 gen_exception(EXCP_SWI);
9ee6e8bb
PB
9755 } else {
9756 /* FIXME: Single stepping a WFI insn will not halt
9757 the CPU. */
d9ba4830 9758 gen_exception(EXCP_DEBUG);
9ee6e8bb 9759 }
8aaca4c0 9760 } else {
9ee6e8bb
PB
9761 /* While branches must always occur at the end of an IT block,
9762 there are a few other things that can cause us to terminate
9763 the TB in the middel of an IT block:
9764 - Exception generating instructions (bkpt, swi, undefined).
9765 - Page boundaries.
9766 - Hardware watchpoints.
9767 Hardware breakpoints have already been handled and skip this code.
9768 */
9769 gen_set_condexec(dc);
8aaca4c0 9770 switch(dc->is_jmp) {
8aaca4c0 9771 case DISAS_NEXT:
6e256c93 9772 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
9773 break;
9774 default:
9775 case DISAS_JUMP:
9776 case DISAS_UPDATE:
9777 /* indicate that the hash table must be used to find the next TB */
57fec1fe 9778 tcg_gen_exit_tb(0);
8aaca4c0
FB
9779 break;
9780 case DISAS_TB_JUMP:
9781 /* nothing more to generate */
9782 break;
9ee6e8bb 9783 case DISAS_WFI:
d9ba4830 9784 gen_helper_wfi();
9ee6e8bb
PB
9785 break;
9786 case DISAS_SWI:
d9ba4830 9787 gen_exception(EXCP_SWI);
9ee6e8bb 9788 break;
8aaca4c0 9789 }
e50e6a20
FB
9790 if (dc->condjmp) {
9791 gen_set_label(dc->condlabel);
9ee6e8bb 9792 gen_set_condexec(dc);
6e256c93 9793 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
9794 dc->condjmp = 0;
9795 }
2c0262af 9796 }
2e70f6ef 9797
9ee6e8bb 9798done_generating:
2e70f6ef 9799 gen_icount_end(tb, num_insns);
2c0262af
FB
9800 *gen_opc_ptr = INDEX_op_end;
9801
9802#ifdef DEBUG_DISAS
8fec2b8c 9803 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
9804 qemu_log("----------------\n");
9805 qemu_log("IN: %s\n", lookup_symbol(pc_start));
7204ab88 9806 log_target_disas(pc_start, dc->pc - pc_start, dc->thumb);
93fcfe39 9807 qemu_log("\n");
2c0262af
FB
9808 }
9809#endif
b5ff1b31
FB
9810 if (search_pc) {
9811 j = gen_opc_ptr - gen_opc_buf;
9812 lj++;
9813 while (lj <= j)
9814 gen_opc_instr_start[lj++] = 0;
b5ff1b31 9815 } else {
2c0262af 9816 tb->size = dc->pc - pc_start;
2e70f6ef 9817 tb->icount = num_insns;
b5ff1b31 9818 }
2c0262af
FB
9819}
9820
2cfc5f17 9821void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2c0262af 9822{
2cfc5f17 9823 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
9824}
9825
2cfc5f17 9826void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2c0262af 9827{
2cfc5f17 9828 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
9829}
9830
b5ff1b31
FB
9831static const char *cpu_mode_names[16] = {
9832 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9833 "???", "???", "???", "und", "???", "???", "???", "sys"
9834};
9ee6e8bb 9835
9a78eead 9836void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
7fe48483 9837 int flags)
2c0262af
FB
9838{
9839 int i;
06e80fc9 9840#if 0
bc380d17 9841 union {
b7bcbe95
FB
9842 uint32_t i;
9843 float s;
9844 } s0, s1;
9845 CPU_DoubleU d;
a94a6abf
PB
9846 /* ??? This assumes float64 and double have the same layout.
9847 Oh well, it's only debug dumps. */
9848 union {
9849 float64 f64;
9850 double d;
9851 } d0;
06e80fc9 9852#endif
b5ff1b31 9853 uint32_t psr;
2c0262af
FB
9854
9855 for(i=0;i<16;i++) {
7fe48483 9856 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 9857 if ((i % 4) == 3)
7fe48483 9858 cpu_fprintf(f, "\n");
2c0262af 9859 else
7fe48483 9860 cpu_fprintf(f, " ");
2c0262af 9861 }
b5ff1b31 9862 psr = cpsr_read(env);
687fa640
TS
9863 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9864 psr,
b5ff1b31
FB
9865 psr & (1 << 31) ? 'N' : '-',
9866 psr & (1 << 30) ? 'Z' : '-',
9867 psr & (1 << 29) ? 'C' : '-',
9868 psr & (1 << 28) ? 'V' : '-',
5fafdf24 9869 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 9870 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 9871
5e3f878a 9872#if 0
b7bcbe95 9873 for (i = 0; i < 16; i++) {
8e96005d
FB
9874 d.d = env->vfp.regs[i];
9875 s0.i = d.l.lower;
9876 s1.i = d.l.upper;
a94a6abf
PB
9877 d0.f64 = d.d;
9878 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 9879 i * 2, (int)s0.i, s0.s,
a94a6abf 9880 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 9881 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 9882 d0.d);
b7bcbe95 9883 }
40f137e1 9884 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
5e3f878a 9885#endif
2c0262af 9886}
a6b025d3 9887
e87b7cb0 9888void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
d2856f1a
AJ
9889{
9890 env->regs[15] = gen_opc_pc[pc_pos];
e12ce78d 9891 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
d2856f1a 9892}