]> git.proxmox.com Git - qemu.git/blame - target-arm/translate.c
target-arm: Use global env in neon_helper.c helpers
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
28#include "exec-all.h"
29#include "disas.h"
57fec1fe 30#include "tcg-op.h"
79383c9c 31#include "qemu-log.h"
1497c961 32
a7812ae4 33#include "helpers.h"
1497c961 34#define GEN_HELPER 1
b26eefb6 35#include "helpers.h"
2c0262af 36
9ee6e8bb
PB
37#define ENABLE_ARCH_5J 0
38#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 42
86753403 43#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 44
2c0262af
FB
45/* internal defines */
46typedef struct DisasContext {
0fa85d43 47 target_ulong pc;
2c0262af 48 int is_jmp;
e50e6a20
FB
49 /* Nonzero if this instruction has been conditionally skipped. */
50 int condjmp;
51 /* The label that will be jumped to when the instruction is skipped. */
52 int condlabel;
9ee6e8bb
PB
53 /* Thumb-2 condtional execution bits. */
54 int condexec_mask;
55 int condexec_cond;
2c0262af 56 struct TranslationBlock *tb;
8aaca4c0 57 int singlestep_enabled;
5899f386 58 int thumb;
b5ff1b31
FB
59#if !defined(CONFIG_USER_ONLY)
60 int user;
61#endif
5df8bac1 62 int vfp_enabled;
69d1fc22
PM
63 int vec_len;
64 int vec_stride;
2c0262af
FB
65} DisasContext;
66
e12ce78d
PM
67static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
68
b5ff1b31
FB
69#if defined(CONFIG_USER_ONLY)
70#define IS_USER(s) 1
71#else
72#define IS_USER(s) (s->user)
73#endif
74
9ee6e8bb
PB
75/* These instructions trap after executing, so defer them until after the
76 conditional executions state has been updated. */
77#define DISAS_WFI 4
78#define DISAS_SWI 5
2c0262af 79
a7812ae4 80static TCGv_ptr cpu_env;
ad69471c 81/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 82static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 83static TCGv_i32 cpu_R[16];
426f5abc
PB
84static TCGv_i32 cpu_exclusive_addr;
85static TCGv_i32 cpu_exclusive_val;
86static TCGv_i32 cpu_exclusive_high;
87#ifdef CONFIG_USER_ONLY
88static TCGv_i32 cpu_exclusive_test;
89static TCGv_i32 cpu_exclusive_info;
90#endif
ad69471c 91
b26eefb6 92/* FIXME: These should be removed. */
a7812ae4
PB
93static TCGv cpu_F0s, cpu_F1s;
94static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 95
2e70f6ef
PB
96#include "gen-icount.h"
97
155c3eac
FN
98static const char *regnames[] =
99 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
100 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
101
b26eefb6
PB
102/* initialize TCG globals. */
103void arm_translate_init(void)
104{
155c3eac
FN
105 int i;
106
a7812ae4
PB
107 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
108
155c3eac
FN
109 for (i = 0; i < 16; i++) {
110 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
111 offsetof(CPUState, regs[i]),
112 regnames[i]);
113 }
426f5abc
PB
114 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
115 offsetof(CPUState, exclusive_addr), "exclusive_addr");
116 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
117 offsetof(CPUState, exclusive_val), "exclusive_val");
118 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUState, exclusive_high), "exclusive_high");
120#ifdef CONFIG_USER_ONLY
121 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
122 offsetof(CPUState, exclusive_test), "exclusive_test");
123 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
124 offsetof(CPUState, exclusive_info), "exclusive_info");
125#endif
155c3eac 126
a7812ae4
PB
127#define GEN_HELPER 2
128#include "helpers.h"
b26eefb6
PB
129}
130
d9ba4830
PB
131static inline TCGv load_cpu_offset(int offset)
132{
7d1b0095 133 TCGv tmp = tcg_temp_new_i32();
d9ba4830
PB
134 tcg_gen_ld_i32(tmp, cpu_env, offset);
135 return tmp;
136}
137
138#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
139
140static inline void store_cpu_offset(TCGv var, int offset)
141{
142 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 143 tcg_temp_free_i32(var);
d9ba4830
PB
144}
145
146#define store_cpu_field(var, name) \
147 store_cpu_offset(var, offsetof(CPUState, name))
148
b26eefb6
PB
149/* Set a variable to the value of a CPU register. */
150static void load_reg_var(DisasContext *s, TCGv var, int reg)
151{
152 if (reg == 15) {
153 uint32_t addr;
154 /* normaly, since we updated PC, we need only to add one insn */
155 if (s->thumb)
156 addr = (long)s->pc + 2;
157 else
158 addr = (long)s->pc + 4;
159 tcg_gen_movi_i32(var, addr);
160 } else {
155c3eac 161 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
162 }
163}
164
165/* Create a new temporary and set it to the value of a CPU register. */
166static inline TCGv load_reg(DisasContext *s, int reg)
167{
7d1b0095 168 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
169 load_reg_var(s, tmp, reg);
170 return tmp;
171}
172
173/* Set a CPU register. The source must be a temporary and will be
174 marked as dead. */
175static void store_reg(DisasContext *s, int reg, TCGv var)
176{
177 if (reg == 15) {
178 tcg_gen_andi_i32(var, var, ~1);
179 s->is_jmp = DISAS_JUMP;
180 }
155c3eac 181 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 182 tcg_temp_free_i32(var);
b26eefb6
PB
183}
184
b26eefb6 185/* Value extensions. */
86831435
PB
186#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
187#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
188#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
189#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
190
1497c961
PB
191#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
192#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 193
b26eefb6 194
b75263d6
JR
195static inline void gen_set_cpsr(TCGv var, uint32_t mask)
196{
197 TCGv tmp_mask = tcg_const_i32(mask);
198 gen_helper_cpsr_write(var, tmp_mask);
199 tcg_temp_free_i32(tmp_mask);
200}
d9ba4830
PB
201/* Set NZCV flags from the high 4 bits of var. */
202#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
203
204static void gen_exception(int excp)
205{
7d1b0095 206 TCGv tmp = tcg_temp_new_i32();
d9ba4830
PB
207 tcg_gen_movi_i32(tmp, excp);
208 gen_helper_exception(tmp);
7d1b0095 209 tcg_temp_free_i32(tmp);
d9ba4830
PB
210}
211
3670669c
PB
212static void gen_smul_dual(TCGv a, TCGv b)
213{
7d1b0095
PM
214 TCGv tmp1 = tcg_temp_new_i32();
215 TCGv tmp2 = tcg_temp_new_i32();
22478e79
AZ
216 tcg_gen_ext16s_i32(tmp1, a);
217 tcg_gen_ext16s_i32(tmp2, b);
3670669c 218 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 219 tcg_temp_free_i32(tmp2);
3670669c
PB
220 tcg_gen_sari_i32(a, a, 16);
221 tcg_gen_sari_i32(b, b, 16);
222 tcg_gen_mul_i32(b, b, a);
223 tcg_gen_mov_i32(a, tmp1);
7d1b0095 224 tcg_temp_free_i32(tmp1);
3670669c
PB
225}
226
227/* Byteswap each halfword. */
228static void gen_rev16(TCGv var)
229{
7d1b0095 230 TCGv tmp = tcg_temp_new_i32();
3670669c
PB
231 tcg_gen_shri_i32(tmp, var, 8);
232 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
233 tcg_gen_shli_i32(var, var, 8);
234 tcg_gen_andi_i32(var, var, 0xff00ff00);
235 tcg_gen_or_i32(var, var, tmp);
7d1b0095 236 tcg_temp_free_i32(tmp);
3670669c
PB
237}
238
239/* Byteswap low halfword and sign extend. */
240static void gen_revsh(TCGv var)
241{
1a855029
AJ
242 tcg_gen_ext16u_i32(var, var);
243 tcg_gen_bswap16_i32(var, var);
244 tcg_gen_ext16s_i32(var, var);
3670669c
PB
245}
246
247/* Unsigned bitfield extract. */
248static void gen_ubfx(TCGv var, int shift, uint32_t mask)
249{
250 if (shift)
251 tcg_gen_shri_i32(var, var, shift);
252 tcg_gen_andi_i32(var, var, mask);
253}
254
255/* Signed bitfield extract. */
256static void gen_sbfx(TCGv var, int shift, int width)
257{
258 uint32_t signbit;
259
260 if (shift)
261 tcg_gen_sari_i32(var, var, shift);
262 if (shift + width < 32) {
263 signbit = 1u << (width - 1);
264 tcg_gen_andi_i32(var, var, (1u << width) - 1);
265 tcg_gen_xori_i32(var, var, signbit);
266 tcg_gen_subi_i32(var, var, signbit);
267 }
268}
269
270/* Bitfield insertion. Insert val into base. Clobbers base and val. */
271static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
272{
3670669c 273 tcg_gen_andi_i32(val, val, mask);
8f8e3aa4
PB
274 tcg_gen_shli_i32(val, val, shift);
275 tcg_gen_andi_i32(base, base, ~(mask << shift));
3670669c
PB
276 tcg_gen_or_i32(dest, base, val);
277}
278
838fa72d
AJ
279/* Return (b << 32) + a. Mark inputs as dead */
280static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
3670669c 281{
838fa72d
AJ
282 TCGv_i64 tmp64 = tcg_temp_new_i64();
283
284 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 285 tcg_temp_free_i32(b);
838fa72d
AJ
286 tcg_gen_shli_i64(tmp64, tmp64, 32);
287 tcg_gen_add_i64(a, tmp64, a);
288
289 tcg_temp_free_i64(tmp64);
290 return a;
291}
292
293/* Return (b << 32) - a. Mark inputs as dead. */
294static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
295{
296 TCGv_i64 tmp64 = tcg_temp_new_i64();
297
298 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 299 tcg_temp_free_i32(b);
838fa72d
AJ
300 tcg_gen_shli_i64(tmp64, tmp64, 32);
301 tcg_gen_sub_i64(a, tmp64, a);
302
303 tcg_temp_free_i64(tmp64);
304 return a;
3670669c
PB
305}
306
8f01245e
PB
307/* FIXME: Most targets have native widening multiplication.
308 It would be good to use that instead of a full wide multiply. */
5e3f878a 309/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 310static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 311{
a7812ae4
PB
312 TCGv_i64 tmp1 = tcg_temp_new_i64();
313 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
314
315 tcg_gen_extu_i32_i64(tmp1, a);
7d1b0095 316 tcg_temp_free_i32(a);
5e3f878a 317 tcg_gen_extu_i32_i64(tmp2, b);
7d1b0095 318 tcg_temp_free_i32(b);
5e3f878a 319 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 320 tcg_temp_free_i64(tmp2);
5e3f878a
PB
321 return tmp1;
322}
323
a7812ae4 324static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 325{
a7812ae4
PB
326 TCGv_i64 tmp1 = tcg_temp_new_i64();
327 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
328
329 tcg_gen_ext_i32_i64(tmp1, a);
7d1b0095 330 tcg_temp_free_i32(a);
5e3f878a 331 tcg_gen_ext_i32_i64(tmp2, b);
7d1b0095 332 tcg_temp_free_i32(b);
5e3f878a 333 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 334 tcg_temp_free_i64(tmp2);
5e3f878a
PB
335 return tmp1;
336}
337
8f01245e
PB
338/* Swap low and high halfwords. */
339static void gen_swap_half(TCGv var)
340{
7d1b0095 341 TCGv tmp = tcg_temp_new_i32();
8f01245e
PB
342 tcg_gen_shri_i32(tmp, var, 16);
343 tcg_gen_shli_i32(var, var, 16);
344 tcg_gen_or_i32(var, var, tmp);
7d1b0095 345 tcg_temp_free_i32(tmp);
8f01245e
PB
346}
347
b26eefb6
PB
348/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
349 tmp = (t0 ^ t1) & 0x8000;
350 t0 &= ~0x8000;
351 t1 &= ~0x8000;
352 t0 = (t0 + t1) ^ tmp;
353 */
354
355static void gen_add16(TCGv t0, TCGv t1)
356{
7d1b0095 357 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
358 tcg_gen_xor_i32(tmp, t0, t1);
359 tcg_gen_andi_i32(tmp, tmp, 0x8000);
360 tcg_gen_andi_i32(t0, t0, ~0x8000);
361 tcg_gen_andi_i32(t1, t1, ~0x8000);
362 tcg_gen_add_i32(t0, t0, t1);
363 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
364 tcg_temp_free_i32(tmp);
365 tcg_temp_free_i32(t1);
b26eefb6
PB
366}
367
9a119ff6
PB
368#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
369
b26eefb6
PB
370/* Set CF to the top bit of var. */
371static void gen_set_CF_bit31(TCGv var)
372{
7d1b0095 373 TCGv tmp = tcg_temp_new_i32();
b26eefb6 374 tcg_gen_shri_i32(tmp, var, 31);
4cc633c3 375 gen_set_CF(tmp);
7d1b0095 376 tcg_temp_free_i32(tmp);
b26eefb6
PB
377}
378
379/* Set N and Z flags from var. */
380static inline void gen_logic_CC(TCGv var)
381{
6fbe23d5
PB
382 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
383 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
b26eefb6
PB
384}
385
386/* T0 += T1 + CF. */
396e467c 387static void gen_adc(TCGv t0, TCGv t1)
b26eefb6 388{
d9ba4830 389 TCGv tmp;
396e467c 390 tcg_gen_add_i32(t0, t0, t1);
d9ba4830 391 tmp = load_cpu_field(CF);
396e467c 392 tcg_gen_add_i32(t0, t0, tmp);
7d1b0095 393 tcg_temp_free_i32(tmp);
b26eefb6
PB
394}
395
e9bb4aa9
JR
396/* dest = T0 + T1 + CF. */
397static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
398{
399 TCGv tmp;
400 tcg_gen_add_i32(dest, t0, t1);
401 tmp = load_cpu_field(CF);
402 tcg_gen_add_i32(dest, dest, tmp);
7d1b0095 403 tcg_temp_free_i32(tmp);
e9bb4aa9
JR
404}
405
3670669c
PB
406/* dest = T0 - T1 + CF - 1. */
407static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
408{
d9ba4830 409 TCGv tmp;
3670669c 410 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 411 tmp = load_cpu_field(CF);
3670669c
PB
412 tcg_gen_add_i32(dest, dest, tmp);
413 tcg_gen_subi_i32(dest, dest, 1);
7d1b0095 414 tcg_temp_free_i32(tmp);
3670669c
PB
415}
416
ad69471c
PB
417/* FIXME: Implement this natively. */
418#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
419
9a119ff6 420static void shifter_out_im(TCGv var, int shift)
b26eefb6 421{
7d1b0095 422 TCGv tmp = tcg_temp_new_i32();
9a119ff6
PB
423 if (shift == 0) {
424 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 425 } else {
9a119ff6 426 tcg_gen_shri_i32(tmp, var, shift);
4cc633c3 427 if (shift != 31)
9a119ff6
PB
428 tcg_gen_andi_i32(tmp, tmp, 1);
429 }
430 gen_set_CF(tmp);
7d1b0095 431 tcg_temp_free_i32(tmp);
9a119ff6 432}
b26eefb6 433
9a119ff6
PB
434/* Shift by immediate. Includes special handling for shift == 0. */
435static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
436{
437 switch (shiftop) {
438 case 0: /* LSL */
439 if (shift != 0) {
440 if (flags)
441 shifter_out_im(var, 32 - shift);
442 tcg_gen_shli_i32(var, var, shift);
443 }
444 break;
445 case 1: /* LSR */
446 if (shift == 0) {
447 if (flags) {
448 tcg_gen_shri_i32(var, var, 31);
449 gen_set_CF(var);
450 }
451 tcg_gen_movi_i32(var, 0);
452 } else {
453 if (flags)
454 shifter_out_im(var, shift - 1);
455 tcg_gen_shri_i32(var, var, shift);
456 }
457 break;
458 case 2: /* ASR */
459 if (shift == 0)
460 shift = 32;
461 if (flags)
462 shifter_out_im(var, shift - 1);
463 if (shift == 32)
464 shift = 31;
465 tcg_gen_sari_i32(var, var, shift);
466 break;
467 case 3: /* ROR/RRX */
468 if (shift != 0) {
469 if (flags)
470 shifter_out_im(var, shift - 1);
f669df27 471 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 472 } else {
d9ba4830 473 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
474 if (flags)
475 shifter_out_im(var, 0);
476 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
477 tcg_gen_shli_i32(tmp, tmp, 31);
478 tcg_gen_or_i32(var, var, tmp);
7d1b0095 479 tcg_temp_free_i32(tmp);
b26eefb6
PB
480 }
481 }
482};
483
8984bd2e
PB
484static inline void gen_arm_shift_reg(TCGv var, int shiftop,
485 TCGv shift, int flags)
486{
487 if (flags) {
488 switch (shiftop) {
489 case 0: gen_helper_shl_cc(var, var, shift); break;
490 case 1: gen_helper_shr_cc(var, var, shift); break;
491 case 2: gen_helper_sar_cc(var, var, shift); break;
492 case 3: gen_helper_ror_cc(var, var, shift); break;
493 }
494 } else {
495 switch (shiftop) {
496 case 0: gen_helper_shl(var, var, shift); break;
497 case 1: gen_helper_shr(var, var, shift); break;
498 case 2: gen_helper_sar(var, var, shift); break;
f669df27
AJ
499 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
500 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
501 }
502 }
7d1b0095 503 tcg_temp_free_i32(shift);
8984bd2e
PB
504}
505
6ddbc6e4
PB
506#define PAS_OP(pfx) \
507 switch (op2) { \
508 case 0: gen_pas_helper(glue(pfx,add16)); break; \
509 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
510 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
511 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
512 case 4: gen_pas_helper(glue(pfx,add8)); break; \
513 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
514 }
d9ba4830 515static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 516{
a7812ae4 517 TCGv_ptr tmp;
6ddbc6e4
PB
518
519 switch (op1) {
520#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
521 case 1:
a7812ae4 522 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
523 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
524 PAS_OP(s)
b75263d6 525 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
526 break;
527 case 5:
a7812ae4 528 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
529 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
530 PAS_OP(u)
b75263d6 531 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
532 break;
533#undef gen_pas_helper
534#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
535 case 2:
536 PAS_OP(q);
537 break;
538 case 3:
539 PAS_OP(sh);
540 break;
541 case 6:
542 PAS_OP(uq);
543 break;
544 case 7:
545 PAS_OP(uh);
546 break;
547#undef gen_pas_helper
548 }
549}
9ee6e8bb
PB
550#undef PAS_OP
551
6ddbc6e4
PB
552/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
553#define PAS_OP(pfx) \
ed89a2f1 554 switch (op1) { \
6ddbc6e4
PB
555 case 0: gen_pas_helper(glue(pfx,add8)); break; \
556 case 1: gen_pas_helper(glue(pfx,add16)); break; \
557 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
558 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
559 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
560 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
561 }
d9ba4830 562static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 563{
a7812ae4 564 TCGv_ptr tmp;
6ddbc6e4 565
ed89a2f1 566 switch (op2) {
6ddbc6e4
PB
567#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
568 case 0:
a7812ae4 569 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
570 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
571 PAS_OP(s)
b75263d6 572 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
573 break;
574 case 4:
a7812ae4 575 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
576 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
577 PAS_OP(u)
b75263d6 578 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
579 break;
580#undef gen_pas_helper
581#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
582 case 1:
583 PAS_OP(q);
584 break;
585 case 2:
586 PAS_OP(sh);
587 break;
588 case 5:
589 PAS_OP(uq);
590 break;
591 case 6:
592 PAS_OP(uh);
593 break;
594#undef gen_pas_helper
595 }
596}
9ee6e8bb
PB
597#undef PAS_OP
598
d9ba4830
PB
599static void gen_test_cc(int cc, int label)
600{
601 TCGv tmp;
602 TCGv tmp2;
d9ba4830
PB
603 int inv;
604
d9ba4830
PB
605 switch (cc) {
606 case 0: /* eq: Z */
6fbe23d5 607 tmp = load_cpu_field(ZF);
cb63669a 608 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
609 break;
610 case 1: /* ne: !Z */
6fbe23d5 611 tmp = load_cpu_field(ZF);
cb63669a 612 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
613 break;
614 case 2: /* cs: C */
615 tmp = load_cpu_field(CF);
cb63669a 616 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
617 break;
618 case 3: /* cc: !C */
619 tmp = load_cpu_field(CF);
cb63669a 620 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
621 break;
622 case 4: /* mi: N */
6fbe23d5 623 tmp = load_cpu_field(NF);
cb63669a 624 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
625 break;
626 case 5: /* pl: !N */
6fbe23d5 627 tmp = load_cpu_field(NF);
cb63669a 628 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
629 break;
630 case 6: /* vs: V */
631 tmp = load_cpu_field(VF);
cb63669a 632 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
633 break;
634 case 7: /* vc: !V */
635 tmp = load_cpu_field(VF);
cb63669a 636 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
637 break;
638 case 8: /* hi: C && !Z */
639 inv = gen_new_label();
640 tmp = load_cpu_field(CF);
cb63669a 641 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
7d1b0095 642 tcg_temp_free_i32(tmp);
6fbe23d5 643 tmp = load_cpu_field(ZF);
cb63669a 644 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
645 gen_set_label(inv);
646 break;
647 case 9: /* ls: !C || Z */
648 tmp = load_cpu_field(CF);
cb63669a 649 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
7d1b0095 650 tcg_temp_free_i32(tmp);
6fbe23d5 651 tmp = load_cpu_field(ZF);
cb63669a 652 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
653 break;
654 case 10: /* ge: N == V -> N ^ V == 0 */
655 tmp = load_cpu_field(VF);
6fbe23d5 656 tmp2 = load_cpu_field(NF);
d9ba4830 657 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 658 tcg_temp_free_i32(tmp2);
cb63669a 659 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
660 break;
661 case 11: /* lt: N != V -> N ^ V != 0 */
662 tmp = load_cpu_field(VF);
6fbe23d5 663 tmp2 = load_cpu_field(NF);
d9ba4830 664 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 665 tcg_temp_free_i32(tmp2);
cb63669a 666 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
667 break;
668 case 12: /* gt: !Z && N == V */
669 inv = gen_new_label();
6fbe23d5 670 tmp = load_cpu_field(ZF);
cb63669a 671 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
7d1b0095 672 tcg_temp_free_i32(tmp);
d9ba4830 673 tmp = load_cpu_field(VF);
6fbe23d5 674 tmp2 = load_cpu_field(NF);
d9ba4830 675 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 676 tcg_temp_free_i32(tmp2);
cb63669a 677 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
678 gen_set_label(inv);
679 break;
680 case 13: /* le: Z || N != V */
6fbe23d5 681 tmp = load_cpu_field(ZF);
cb63669a 682 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
7d1b0095 683 tcg_temp_free_i32(tmp);
d9ba4830 684 tmp = load_cpu_field(VF);
6fbe23d5 685 tmp2 = load_cpu_field(NF);
d9ba4830 686 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 687 tcg_temp_free_i32(tmp2);
cb63669a 688 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
689 break;
690 default:
691 fprintf(stderr, "Bad condition code 0x%x\n", cc);
692 abort();
693 }
7d1b0095 694 tcg_temp_free_i32(tmp);
d9ba4830 695}
2c0262af 696
b1d8e52e 697static const uint8_t table_logic_cc[16] = {
2c0262af
FB
698 1, /* and */
699 1, /* xor */
700 0, /* sub */
701 0, /* rsb */
702 0, /* add */
703 0, /* adc */
704 0, /* sbc */
705 0, /* rsc */
706 1, /* andl */
707 1, /* xorl */
708 0, /* cmp */
709 0, /* cmn */
710 1, /* orr */
711 1, /* mov */
712 1, /* bic */
713 1, /* mvn */
714};
3b46e624 715
d9ba4830
PB
716/* Set PC and Thumb state from an immediate address. */
717static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 718{
b26eefb6 719 TCGv tmp;
99c475ab 720
b26eefb6 721 s->is_jmp = DISAS_UPDATE;
d9ba4830 722 if (s->thumb != (addr & 1)) {
7d1b0095 723 tmp = tcg_temp_new_i32();
d9ba4830
PB
724 tcg_gen_movi_i32(tmp, addr & 1);
725 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
7d1b0095 726 tcg_temp_free_i32(tmp);
d9ba4830 727 }
155c3eac 728 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
729}
730
731/* Set PC and Thumb state from var. var is marked as dead. */
732static inline void gen_bx(DisasContext *s, TCGv var)
733{
d9ba4830 734 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
735 tcg_gen_andi_i32(cpu_R[15], var, ~1);
736 tcg_gen_andi_i32(var, var, 1);
737 store_cpu_field(var, thumb);
d9ba4830
PB
738}
739
21aeb343
JR
740/* Variant of store_reg which uses branch&exchange logic when storing
741 to r15 in ARM architecture v7 and above. The source must be a temporary
742 and will be marked as dead. */
743static inline void store_reg_bx(CPUState *env, DisasContext *s,
744 int reg, TCGv var)
745{
746 if (reg == 15 && ENABLE_ARCH_7) {
747 gen_bx(s, var);
748 } else {
749 store_reg(s, reg, var);
750 }
751}
752
b0109805
PB
753static inline TCGv gen_ld8s(TCGv addr, int index)
754{
7d1b0095 755 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
756 tcg_gen_qemu_ld8s(tmp, addr, index);
757 return tmp;
758}
759static inline TCGv gen_ld8u(TCGv addr, int index)
760{
7d1b0095 761 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
762 tcg_gen_qemu_ld8u(tmp, addr, index);
763 return tmp;
764}
765static inline TCGv gen_ld16s(TCGv addr, int index)
766{
7d1b0095 767 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
768 tcg_gen_qemu_ld16s(tmp, addr, index);
769 return tmp;
770}
771static inline TCGv gen_ld16u(TCGv addr, int index)
772{
7d1b0095 773 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
774 tcg_gen_qemu_ld16u(tmp, addr, index);
775 return tmp;
776}
777static inline TCGv gen_ld32(TCGv addr, int index)
778{
7d1b0095 779 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
780 tcg_gen_qemu_ld32u(tmp, addr, index);
781 return tmp;
782}
84496233
JR
783static inline TCGv_i64 gen_ld64(TCGv addr, int index)
784{
785 TCGv_i64 tmp = tcg_temp_new_i64();
786 tcg_gen_qemu_ld64(tmp, addr, index);
787 return tmp;
788}
b0109805
PB
789static inline void gen_st8(TCGv val, TCGv addr, int index)
790{
791 tcg_gen_qemu_st8(val, addr, index);
7d1b0095 792 tcg_temp_free_i32(val);
b0109805
PB
793}
794static inline void gen_st16(TCGv val, TCGv addr, int index)
795{
796 tcg_gen_qemu_st16(val, addr, index);
7d1b0095 797 tcg_temp_free_i32(val);
b0109805
PB
798}
799static inline void gen_st32(TCGv val, TCGv addr, int index)
800{
801 tcg_gen_qemu_st32(val, addr, index);
7d1b0095 802 tcg_temp_free_i32(val);
b0109805 803}
84496233
JR
804static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
805{
806 tcg_gen_qemu_st64(val, addr, index);
807 tcg_temp_free_i64(val);
808}
b5ff1b31 809
5e3f878a
PB
810static inline void gen_set_pc_im(uint32_t val)
811{
155c3eac 812 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
813}
814
b5ff1b31
FB
815/* Force a TB lookup after an instruction that changes the CPU state. */
816static inline void gen_lookup_tb(DisasContext *s)
817{
a6445c52 818 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
819 s->is_jmp = DISAS_UPDATE;
820}
821
b0109805
PB
822static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
823 TCGv var)
2c0262af 824{
1e8d4eec 825 int val, rm, shift, shiftop;
b26eefb6 826 TCGv offset;
2c0262af
FB
827
828 if (!(insn & (1 << 25))) {
829 /* immediate */
830 val = insn & 0xfff;
831 if (!(insn & (1 << 23)))
832 val = -val;
537730b9 833 if (val != 0)
b0109805 834 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
835 } else {
836 /* shift/register */
837 rm = (insn) & 0xf;
838 shift = (insn >> 7) & 0x1f;
1e8d4eec 839 shiftop = (insn >> 5) & 3;
b26eefb6 840 offset = load_reg(s, rm);
9a119ff6 841 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 842 if (!(insn & (1 << 23)))
b0109805 843 tcg_gen_sub_i32(var, var, offset);
2c0262af 844 else
b0109805 845 tcg_gen_add_i32(var, var, offset);
7d1b0095 846 tcg_temp_free_i32(offset);
2c0262af
FB
847 }
848}
849
191f9a93 850static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 851 int extra, TCGv var)
2c0262af
FB
852{
853 int val, rm;
b26eefb6 854 TCGv offset;
3b46e624 855
2c0262af
FB
856 if (insn & (1 << 22)) {
857 /* immediate */
858 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
859 if (!(insn & (1 << 23)))
860 val = -val;
18acad92 861 val += extra;
537730b9 862 if (val != 0)
b0109805 863 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
864 } else {
865 /* register */
191f9a93 866 if (extra)
b0109805 867 tcg_gen_addi_i32(var, var, extra);
2c0262af 868 rm = (insn) & 0xf;
b26eefb6 869 offset = load_reg(s, rm);
2c0262af 870 if (!(insn & (1 << 23)))
b0109805 871 tcg_gen_sub_i32(var, var, offset);
2c0262af 872 else
b0109805 873 tcg_gen_add_i32(var, var, offset);
7d1b0095 874 tcg_temp_free_i32(offset);
2c0262af
FB
875 }
876}
877
4373f3ce
PB
878#define VFP_OP2(name) \
879static inline void gen_vfp_##name(int dp) \
880{ \
881 if (dp) \
882 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
883 else \
884 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
b7bcbe95
FB
885}
886
4373f3ce
PB
887VFP_OP2(add)
888VFP_OP2(sub)
889VFP_OP2(mul)
890VFP_OP2(div)
891
892#undef VFP_OP2
893
894static inline void gen_vfp_abs(int dp)
895{
896 if (dp)
897 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
898 else
899 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
900}
901
902static inline void gen_vfp_neg(int dp)
903{
904 if (dp)
905 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
906 else
907 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
908}
909
910static inline void gen_vfp_sqrt(int dp)
911{
912 if (dp)
913 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
914 else
915 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
916}
917
918static inline void gen_vfp_cmp(int dp)
919{
920 if (dp)
921 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
922 else
923 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
924}
925
926static inline void gen_vfp_cmpe(int dp)
927{
928 if (dp)
929 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
930 else
931 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
932}
933
934static inline void gen_vfp_F1_ld0(int dp)
935{
936 if (dp)
5b340b51 937 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 938 else
5b340b51 939 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
940}
941
942static inline void gen_vfp_uito(int dp)
943{
944 if (dp)
945 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
946 else
947 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
948}
949
950static inline void gen_vfp_sito(int dp)
951{
952 if (dp)
66230e0d 953 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
4373f3ce 954 else
66230e0d 955 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
4373f3ce
PB
956}
957
958static inline void gen_vfp_toui(int dp)
959{
960 if (dp)
961 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
962 else
963 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
964}
965
966static inline void gen_vfp_touiz(int dp)
967{
968 if (dp)
969 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
970 else
971 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
972}
973
974static inline void gen_vfp_tosi(int dp)
975{
976 if (dp)
977 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
978 else
979 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
980}
981
982static inline void gen_vfp_tosiz(int dp)
9ee6e8bb
PB
983{
984 if (dp)
4373f3ce 985 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
9ee6e8bb 986 else
4373f3ce
PB
987 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
988}
989
990#define VFP_GEN_FIX(name) \
991static inline void gen_vfp_##name(int dp, int shift) \
992{ \
b75263d6 993 TCGv tmp_shift = tcg_const_i32(shift); \
4373f3ce 994 if (dp) \
b75263d6 995 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
4373f3ce 996 else \
b75263d6
JR
997 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
998 tcg_temp_free_i32(tmp_shift); \
9ee6e8bb 999}
4373f3ce
PB
1000VFP_GEN_FIX(tosh)
1001VFP_GEN_FIX(tosl)
1002VFP_GEN_FIX(touh)
1003VFP_GEN_FIX(toul)
1004VFP_GEN_FIX(shto)
1005VFP_GEN_FIX(slto)
1006VFP_GEN_FIX(uhto)
1007VFP_GEN_FIX(ulto)
1008#undef VFP_GEN_FIX
9ee6e8bb 1009
312eea9f 1010static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1011{
1012 if (dp)
312eea9f 1013 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1014 else
312eea9f 1015 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1016}
1017
312eea9f 1018static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1019{
1020 if (dp)
312eea9f 1021 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1022 else
312eea9f 1023 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1024}
1025
8e96005d
FB
1026static inline long
1027vfp_reg_offset (int dp, int reg)
1028{
1029 if (dp)
1030 return offsetof(CPUARMState, vfp.regs[reg]);
1031 else if (reg & 1) {
1032 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1033 + offsetof(CPU_DoubleU, l.upper);
1034 } else {
1035 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1036 + offsetof(CPU_DoubleU, l.lower);
1037 }
1038}
9ee6e8bb
PB
1039
1040/* Return the offset of a 32-bit piece of a NEON register.
1041 zero is the least significant end of the register. */
1042static inline long
1043neon_reg_offset (int reg, int n)
1044{
1045 int sreg;
1046 sreg = reg * 2 + n;
1047 return vfp_reg_offset(0, sreg);
1048}
1049
8f8e3aa4
PB
1050static TCGv neon_load_reg(int reg, int pass)
1051{
7d1b0095 1052 TCGv tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1053 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1054 return tmp;
1055}
1056
1057static void neon_store_reg(int reg, int pass, TCGv var)
1058{
1059 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1060 tcg_temp_free_i32(var);
8f8e3aa4
PB
1061}
1062
a7812ae4 1063static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1064{
1065 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1066}
1067
a7812ae4 1068static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1069{
1070 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1071}
1072
4373f3ce
PB
1073#define tcg_gen_ld_f32 tcg_gen_ld_i32
1074#define tcg_gen_ld_f64 tcg_gen_ld_i64
1075#define tcg_gen_st_f32 tcg_gen_st_i32
1076#define tcg_gen_st_f64 tcg_gen_st_i64
1077
b7bcbe95
FB
1078static inline void gen_mov_F0_vreg(int dp, int reg)
1079{
1080 if (dp)
4373f3ce 1081 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1082 else
4373f3ce 1083 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1084}
1085
1086static inline void gen_mov_F1_vreg(int dp, int reg)
1087{
1088 if (dp)
4373f3ce 1089 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1090 else
4373f3ce 1091 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1092}
1093
1094static inline void gen_mov_vreg_F0(int dp, int reg)
1095{
1096 if (dp)
4373f3ce 1097 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1098 else
4373f3ce 1099 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1100}
1101
18c9b560
AZ
1102#define ARM_CP_RW_BIT (1 << 20)
1103
a7812ae4 1104static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d
PB
1105{
1106 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1107}
1108
a7812ae4 1109static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d
PB
1110{
1111 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1112}
1113
da6b5335 1114static inline TCGv iwmmxt_load_creg(int reg)
e677137d 1115{
7d1b0095 1116 TCGv var = tcg_temp_new_i32();
da6b5335
FN
1117 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1118 return var;
e677137d
PB
1119}
1120
da6b5335 1121static inline void iwmmxt_store_creg(int reg, TCGv var)
e677137d 1122{
da6b5335 1123 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
7d1b0095 1124 tcg_temp_free_i32(var);
e677137d
PB
1125}
1126
1127static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1128{
1129 iwmmxt_store_reg(cpu_M0, rn);
1130}
1131
1132static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1133{
1134 iwmmxt_load_reg(cpu_M0, rn);
1135}
1136
1137static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1138{
1139 iwmmxt_load_reg(cpu_V1, rn);
1140 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1141}
1142
1143static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1144{
1145 iwmmxt_load_reg(cpu_V1, rn);
1146 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1147}
1148
1149static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1150{
1151 iwmmxt_load_reg(cpu_V1, rn);
1152 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1153}
1154
1155#define IWMMXT_OP(name) \
1156static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1157{ \
1158 iwmmxt_load_reg(cpu_V1, rn); \
1159 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1160}
1161
1162#define IWMMXT_OP_ENV(name) \
1163static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1164{ \
1165 iwmmxt_load_reg(cpu_V1, rn); \
1166 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1167}
1168
1169#define IWMMXT_OP_ENV_SIZE(name) \
1170IWMMXT_OP_ENV(name##b) \
1171IWMMXT_OP_ENV(name##w) \
1172IWMMXT_OP_ENV(name##l)
1173
1174#define IWMMXT_OP_ENV1(name) \
1175static inline void gen_op_iwmmxt_##name##_M0(void) \
1176{ \
1177 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1178}
1179
1180IWMMXT_OP(maddsq)
1181IWMMXT_OP(madduq)
1182IWMMXT_OP(sadb)
1183IWMMXT_OP(sadw)
1184IWMMXT_OP(mulslw)
1185IWMMXT_OP(mulshw)
1186IWMMXT_OP(mululw)
1187IWMMXT_OP(muluhw)
1188IWMMXT_OP(macsw)
1189IWMMXT_OP(macuw)
1190
1191IWMMXT_OP_ENV_SIZE(unpackl)
1192IWMMXT_OP_ENV_SIZE(unpackh)
1193
1194IWMMXT_OP_ENV1(unpacklub)
1195IWMMXT_OP_ENV1(unpackluw)
1196IWMMXT_OP_ENV1(unpacklul)
1197IWMMXT_OP_ENV1(unpackhub)
1198IWMMXT_OP_ENV1(unpackhuw)
1199IWMMXT_OP_ENV1(unpackhul)
1200IWMMXT_OP_ENV1(unpacklsb)
1201IWMMXT_OP_ENV1(unpacklsw)
1202IWMMXT_OP_ENV1(unpacklsl)
1203IWMMXT_OP_ENV1(unpackhsb)
1204IWMMXT_OP_ENV1(unpackhsw)
1205IWMMXT_OP_ENV1(unpackhsl)
1206
1207IWMMXT_OP_ENV_SIZE(cmpeq)
1208IWMMXT_OP_ENV_SIZE(cmpgtu)
1209IWMMXT_OP_ENV_SIZE(cmpgts)
1210
1211IWMMXT_OP_ENV_SIZE(mins)
1212IWMMXT_OP_ENV_SIZE(minu)
1213IWMMXT_OP_ENV_SIZE(maxs)
1214IWMMXT_OP_ENV_SIZE(maxu)
1215
1216IWMMXT_OP_ENV_SIZE(subn)
1217IWMMXT_OP_ENV_SIZE(addn)
1218IWMMXT_OP_ENV_SIZE(subu)
1219IWMMXT_OP_ENV_SIZE(addu)
1220IWMMXT_OP_ENV_SIZE(subs)
1221IWMMXT_OP_ENV_SIZE(adds)
1222
1223IWMMXT_OP_ENV(avgb0)
1224IWMMXT_OP_ENV(avgb1)
1225IWMMXT_OP_ENV(avgw0)
1226IWMMXT_OP_ENV(avgw1)
1227
1228IWMMXT_OP(msadb)
1229
1230IWMMXT_OP_ENV(packuw)
1231IWMMXT_OP_ENV(packul)
1232IWMMXT_OP_ENV(packuq)
1233IWMMXT_OP_ENV(packsw)
1234IWMMXT_OP_ENV(packsl)
1235IWMMXT_OP_ENV(packsq)
1236
e677137d
PB
1237static void gen_op_iwmmxt_set_mup(void)
1238{
1239 TCGv tmp;
1240 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1241 tcg_gen_ori_i32(tmp, tmp, 2);
1242 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1243}
1244
1245static void gen_op_iwmmxt_set_cup(void)
1246{
1247 TCGv tmp;
1248 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1249 tcg_gen_ori_i32(tmp, tmp, 1);
1250 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1251}
1252
1253static void gen_op_iwmmxt_setpsr_nz(void)
1254{
7d1b0095 1255 TCGv tmp = tcg_temp_new_i32();
e677137d
PB
1256 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1257 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1258}
1259
1260static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1261{
1262 iwmmxt_load_reg(cpu_V1, rn);
86831435 1263 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1264 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1265}
1266
da6b5335 1267static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
18c9b560
AZ
1268{
1269 int rd;
1270 uint32_t offset;
da6b5335 1271 TCGv tmp;
18c9b560
AZ
1272
1273 rd = (insn >> 16) & 0xf;
da6b5335 1274 tmp = load_reg(s, rd);
18c9b560
AZ
1275
1276 offset = (insn & 0xff) << ((insn >> 7) & 2);
1277 if (insn & (1 << 24)) {
1278 /* Pre indexed */
1279 if (insn & (1 << 23))
da6b5335 1280 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1281 else
da6b5335
FN
1282 tcg_gen_addi_i32(tmp, tmp, -offset);
1283 tcg_gen_mov_i32(dest, tmp);
18c9b560 1284 if (insn & (1 << 21))
da6b5335
FN
1285 store_reg(s, rd, tmp);
1286 else
7d1b0095 1287 tcg_temp_free_i32(tmp);
18c9b560
AZ
1288 } else if (insn & (1 << 21)) {
1289 /* Post indexed */
da6b5335 1290 tcg_gen_mov_i32(dest, tmp);
18c9b560 1291 if (insn & (1 << 23))
da6b5335 1292 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1293 else
da6b5335
FN
1294 tcg_gen_addi_i32(tmp, tmp, -offset);
1295 store_reg(s, rd, tmp);
18c9b560
AZ
1296 } else if (!(insn & (1 << 23)))
1297 return 1;
1298 return 0;
1299}
1300
da6b5335 1301static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
18c9b560
AZ
1302{
1303 int rd = (insn >> 0) & 0xf;
da6b5335 1304 TCGv tmp;
18c9b560 1305
da6b5335
FN
1306 if (insn & (1 << 8)) {
1307 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1308 return 1;
da6b5335
FN
1309 } else {
1310 tmp = iwmmxt_load_creg(rd);
1311 }
1312 } else {
7d1b0095 1313 tmp = tcg_temp_new_i32();
da6b5335
FN
1314 iwmmxt_load_reg(cpu_V0, rd);
1315 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1316 }
1317 tcg_gen_andi_i32(tmp, tmp, mask);
1318 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1319 tcg_temp_free_i32(tmp);
18c9b560
AZ
1320 return 0;
1321}
1322
1323/* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1324 (ie. an undefined instruction). */
1325static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1326{
1327 int rd, wrd;
1328 int rdhi, rdlo, rd0, rd1, i;
da6b5335
FN
1329 TCGv addr;
1330 TCGv tmp, tmp2, tmp3;
18c9b560
AZ
1331
1332 if ((insn & 0x0e000e00) == 0x0c000000) {
1333 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1334 wrd = insn & 0xf;
1335 rdlo = (insn >> 12) & 0xf;
1336 rdhi = (insn >> 16) & 0xf;
1337 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1338 iwmmxt_load_reg(cpu_V0, wrd);
1339 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1340 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1341 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1342 } else { /* TMCRR */
da6b5335
FN
1343 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1344 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1345 gen_op_iwmmxt_set_mup();
1346 }
1347 return 0;
1348 }
1349
1350 wrd = (insn >> 12) & 0xf;
7d1b0095 1351 addr = tcg_temp_new_i32();
da6b5335 1352 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1353 tcg_temp_free_i32(addr);
18c9b560 1354 return 1;
da6b5335 1355 }
18c9b560
AZ
1356 if (insn & ARM_CP_RW_BIT) {
1357 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1358 tmp = tcg_temp_new_i32();
da6b5335
FN
1359 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1360 iwmmxt_store_creg(wrd, tmp);
18c9b560 1361 } else {
e677137d
PB
1362 i = 1;
1363 if (insn & (1 << 8)) {
1364 if (insn & (1 << 22)) { /* WLDRD */
da6b5335 1365 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1366 i = 0;
1367 } else { /* WLDRW wRd */
da6b5335 1368 tmp = gen_ld32(addr, IS_USER(s));
e677137d
PB
1369 }
1370 } else {
1371 if (insn & (1 << 22)) { /* WLDRH */
da6b5335 1372 tmp = gen_ld16u(addr, IS_USER(s));
e677137d 1373 } else { /* WLDRB */
da6b5335 1374 tmp = gen_ld8u(addr, IS_USER(s));
e677137d
PB
1375 }
1376 }
1377 if (i) {
1378 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1379 tcg_temp_free_i32(tmp);
e677137d 1380 }
18c9b560
AZ
1381 gen_op_iwmmxt_movq_wRn_M0(wrd);
1382 }
1383 } else {
1384 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335
FN
1385 tmp = iwmmxt_load_creg(wrd);
1386 gen_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1387 } else {
1388 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1389 tmp = tcg_temp_new_i32();
e677137d
PB
1390 if (insn & (1 << 8)) {
1391 if (insn & (1 << 22)) { /* WSTRD */
7d1b0095 1392 tcg_temp_free_i32(tmp);
da6b5335 1393 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1394 } else { /* WSTRW wRd */
1395 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1396 gen_st32(tmp, addr, IS_USER(s));
e677137d
PB
1397 }
1398 } else {
1399 if (insn & (1 << 22)) { /* WSTRH */
1400 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1401 gen_st16(tmp, addr, IS_USER(s));
e677137d
PB
1402 } else { /* WSTRB */
1403 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1404 gen_st8(tmp, addr, IS_USER(s));
e677137d
PB
1405 }
1406 }
18c9b560
AZ
1407 }
1408 }
7d1b0095 1409 tcg_temp_free_i32(addr);
18c9b560
AZ
1410 return 0;
1411 }
1412
1413 if ((insn & 0x0f000000) != 0x0e000000)
1414 return 1;
1415
1416 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1417 case 0x000: /* WOR */
1418 wrd = (insn >> 12) & 0xf;
1419 rd0 = (insn >> 0) & 0xf;
1420 rd1 = (insn >> 16) & 0xf;
1421 gen_op_iwmmxt_movq_M0_wRn(rd0);
1422 gen_op_iwmmxt_orq_M0_wRn(rd1);
1423 gen_op_iwmmxt_setpsr_nz();
1424 gen_op_iwmmxt_movq_wRn_M0(wrd);
1425 gen_op_iwmmxt_set_mup();
1426 gen_op_iwmmxt_set_cup();
1427 break;
1428 case 0x011: /* TMCR */
1429 if (insn & 0xf)
1430 return 1;
1431 rd = (insn >> 12) & 0xf;
1432 wrd = (insn >> 16) & 0xf;
1433 switch (wrd) {
1434 case ARM_IWMMXT_wCID:
1435 case ARM_IWMMXT_wCASF:
1436 break;
1437 case ARM_IWMMXT_wCon:
1438 gen_op_iwmmxt_set_cup();
1439 /* Fall through. */
1440 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1441 tmp = iwmmxt_load_creg(wrd);
1442 tmp2 = load_reg(s, rd);
f669df27 1443 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1444 tcg_temp_free_i32(tmp2);
da6b5335 1445 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1446 break;
1447 case ARM_IWMMXT_wCGR0:
1448 case ARM_IWMMXT_wCGR1:
1449 case ARM_IWMMXT_wCGR2:
1450 case ARM_IWMMXT_wCGR3:
1451 gen_op_iwmmxt_set_cup();
da6b5335
FN
1452 tmp = load_reg(s, rd);
1453 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1454 break;
1455 default:
1456 return 1;
1457 }
1458 break;
1459 case 0x100: /* WXOR */
1460 wrd = (insn >> 12) & 0xf;
1461 rd0 = (insn >> 0) & 0xf;
1462 rd1 = (insn >> 16) & 0xf;
1463 gen_op_iwmmxt_movq_M0_wRn(rd0);
1464 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1465 gen_op_iwmmxt_setpsr_nz();
1466 gen_op_iwmmxt_movq_wRn_M0(wrd);
1467 gen_op_iwmmxt_set_mup();
1468 gen_op_iwmmxt_set_cup();
1469 break;
1470 case 0x111: /* TMRC */
1471 if (insn & 0xf)
1472 return 1;
1473 rd = (insn >> 12) & 0xf;
1474 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1475 tmp = iwmmxt_load_creg(wrd);
1476 store_reg(s, rd, tmp);
18c9b560
AZ
1477 break;
1478 case 0x300: /* WANDN */
1479 wrd = (insn >> 12) & 0xf;
1480 rd0 = (insn >> 0) & 0xf;
1481 rd1 = (insn >> 16) & 0xf;
1482 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1483 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1484 gen_op_iwmmxt_andq_M0_wRn(rd1);
1485 gen_op_iwmmxt_setpsr_nz();
1486 gen_op_iwmmxt_movq_wRn_M0(wrd);
1487 gen_op_iwmmxt_set_mup();
1488 gen_op_iwmmxt_set_cup();
1489 break;
1490 case 0x200: /* WAND */
1491 wrd = (insn >> 12) & 0xf;
1492 rd0 = (insn >> 0) & 0xf;
1493 rd1 = (insn >> 16) & 0xf;
1494 gen_op_iwmmxt_movq_M0_wRn(rd0);
1495 gen_op_iwmmxt_andq_M0_wRn(rd1);
1496 gen_op_iwmmxt_setpsr_nz();
1497 gen_op_iwmmxt_movq_wRn_M0(wrd);
1498 gen_op_iwmmxt_set_mup();
1499 gen_op_iwmmxt_set_cup();
1500 break;
1501 case 0x810: case 0xa10: /* WMADD */
1502 wrd = (insn >> 12) & 0xf;
1503 rd0 = (insn >> 0) & 0xf;
1504 rd1 = (insn >> 16) & 0xf;
1505 gen_op_iwmmxt_movq_M0_wRn(rd0);
1506 if (insn & (1 << 21))
1507 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1508 else
1509 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1510 gen_op_iwmmxt_movq_wRn_M0(wrd);
1511 gen_op_iwmmxt_set_mup();
1512 break;
1513 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1514 wrd = (insn >> 12) & 0xf;
1515 rd0 = (insn >> 16) & 0xf;
1516 rd1 = (insn >> 0) & 0xf;
1517 gen_op_iwmmxt_movq_M0_wRn(rd0);
1518 switch ((insn >> 22) & 3) {
1519 case 0:
1520 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1521 break;
1522 case 1:
1523 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1524 break;
1525 case 2:
1526 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1527 break;
1528 case 3:
1529 return 1;
1530 }
1531 gen_op_iwmmxt_movq_wRn_M0(wrd);
1532 gen_op_iwmmxt_set_mup();
1533 gen_op_iwmmxt_set_cup();
1534 break;
1535 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1536 wrd = (insn >> 12) & 0xf;
1537 rd0 = (insn >> 16) & 0xf;
1538 rd1 = (insn >> 0) & 0xf;
1539 gen_op_iwmmxt_movq_M0_wRn(rd0);
1540 switch ((insn >> 22) & 3) {
1541 case 0:
1542 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1543 break;
1544 case 1:
1545 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1546 break;
1547 case 2:
1548 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1549 break;
1550 case 3:
1551 return 1;
1552 }
1553 gen_op_iwmmxt_movq_wRn_M0(wrd);
1554 gen_op_iwmmxt_set_mup();
1555 gen_op_iwmmxt_set_cup();
1556 break;
1557 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1558 wrd = (insn >> 12) & 0xf;
1559 rd0 = (insn >> 16) & 0xf;
1560 rd1 = (insn >> 0) & 0xf;
1561 gen_op_iwmmxt_movq_M0_wRn(rd0);
1562 if (insn & (1 << 22))
1563 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1564 else
1565 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1566 if (!(insn & (1 << 20)))
1567 gen_op_iwmmxt_addl_M0_wRn(wrd);
1568 gen_op_iwmmxt_movq_wRn_M0(wrd);
1569 gen_op_iwmmxt_set_mup();
1570 break;
1571 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1572 wrd = (insn >> 12) & 0xf;
1573 rd0 = (insn >> 16) & 0xf;
1574 rd1 = (insn >> 0) & 0xf;
1575 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1576 if (insn & (1 << 21)) {
1577 if (insn & (1 << 20))
1578 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1579 else
1580 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1581 } else {
1582 if (insn & (1 << 20))
1583 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1584 else
1585 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1586 }
18c9b560
AZ
1587 gen_op_iwmmxt_movq_wRn_M0(wrd);
1588 gen_op_iwmmxt_set_mup();
1589 break;
1590 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1591 wrd = (insn >> 12) & 0xf;
1592 rd0 = (insn >> 16) & 0xf;
1593 rd1 = (insn >> 0) & 0xf;
1594 gen_op_iwmmxt_movq_M0_wRn(rd0);
1595 if (insn & (1 << 21))
1596 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1597 else
1598 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1599 if (!(insn & (1 << 20))) {
e677137d
PB
1600 iwmmxt_load_reg(cpu_V1, wrd);
1601 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1602 }
1603 gen_op_iwmmxt_movq_wRn_M0(wrd);
1604 gen_op_iwmmxt_set_mup();
1605 break;
1606 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1607 wrd = (insn >> 12) & 0xf;
1608 rd0 = (insn >> 16) & 0xf;
1609 rd1 = (insn >> 0) & 0xf;
1610 gen_op_iwmmxt_movq_M0_wRn(rd0);
1611 switch ((insn >> 22) & 3) {
1612 case 0:
1613 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1614 break;
1615 case 1:
1616 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1617 break;
1618 case 2:
1619 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1620 break;
1621 case 3:
1622 return 1;
1623 }
1624 gen_op_iwmmxt_movq_wRn_M0(wrd);
1625 gen_op_iwmmxt_set_mup();
1626 gen_op_iwmmxt_set_cup();
1627 break;
1628 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1629 wrd = (insn >> 12) & 0xf;
1630 rd0 = (insn >> 16) & 0xf;
1631 rd1 = (insn >> 0) & 0xf;
1632 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1633 if (insn & (1 << 22)) {
1634 if (insn & (1 << 20))
1635 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1636 else
1637 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1638 } else {
1639 if (insn & (1 << 20))
1640 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1641 else
1642 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1643 }
18c9b560
AZ
1644 gen_op_iwmmxt_movq_wRn_M0(wrd);
1645 gen_op_iwmmxt_set_mup();
1646 gen_op_iwmmxt_set_cup();
1647 break;
1648 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1649 wrd = (insn >> 12) & 0xf;
1650 rd0 = (insn >> 16) & 0xf;
1651 rd1 = (insn >> 0) & 0xf;
1652 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1653 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1654 tcg_gen_andi_i32(tmp, tmp, 7);
1655 iwmmxt_load_reg(cpu_V1, rd1);
1656 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1657 tcg_temp_free_i32(tmp);
18c9b560
AZ
1658 gen_op_iwmmxt_movq_wRn_M0(wrd);
1659 gen_op_iwmmxt_set_mup();
1660 break;
1661 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1662 if (((insn >> 6) & 3) == 3)
1663 return 1;
18c9b560
AZ
1664 rd = (insn >> 12) & 0xf;
1665 wrd = (insn >> 16) & 0xf;
da6b5335 1666 tmp = load_reg(s, rd);
18c9b560
AZ
1667 gen_op_iwmmxt_movq_M0_wRn(wrd);
1668 switch ((insn >> 6) & 3) {
1669 case 0:
da6b5335
FN
1670 tmp2 = tcg_const_i32(0xff);
1671 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1672 break;
1673 case 1:
da6b5335
FN
1674 tmp2 = tcg_const_i32(0xffff);
1675 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1676 break;
1677 case 2:
da6b5335
FN
1678 tmp2 = tcg_const_i32(0xffffffff);
1679 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1680 break;
da6b5335
FN
1681 default:
1682 TCGV_UNUSED(tmp2);
1683 TCGV_UNUSED(tmp3);
18c9b560 1684 }
da6b5335
FN
1685 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1686 tcg_temp_free(tmp3);
1687 tcg_temp_free(tmp2);
7d1b0095 1688 tcg_temp_free_i32(tmp);
18c9b560
AZ
1689 gen_op_iwmmxt_movq_wRn_M0(wrd);
1690 gen_op_iwmmxt_set_mup();
1691 break;
1692 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1693 rd = (insn >> 12) & 0xf;
1694 wrd = (insn >> 16) & 0xf;
da6b5335 1695 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1696 return 1;
1697 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1698 tmp = tcg_temp_new_i32();
18c9b560
AZ
1699 switch ((insn >> 22) & 3) {
1700 case 0:
da6b5335
FN
1701 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1702 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1703 if (insn & 8) {
1704 tcg_gen_ext8s_i32(tmp, tmp);
1705 } else {
1706 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1707 }
1708 break;
1709 case 1:
da6b5335
FN
1710 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1711 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1712 if (insn & 8) {
1713 tcg_gen_ext16s_i32(tmp, tmp);
1714 } else {
1715 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1716 }
1717 break;
1718 case 2:
da6b5335
FN
1719 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1720 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1721 break;
18c9b560 1722 }
da6b5335 1723 store_reg(s, rd, tmp);
18c9b560
AZ
1724 break;
1725 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1726 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1727 return 1;
da6b5335 1728 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1729 switch ((insn >> 22) & 3) {
1730 case 0:
da6b5335 1731 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1732 break;
1733 case 1:
da6b5335 1734 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1735 break;
1736 case 2:
da6b5335 1737 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1738 break;
18c9b560 1739 }
da6b5335
FN
1740 tcg_gen_shli_i32(tmp, tmp, 28);
1741 gen_set_nzcv(tmp);
7d1b0095 1742 tcg_temp_free_i32(tmp);
18c9b560
AZ
1743 break;
1744 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1745 if (((insn >> 6) & 3) == 3)
1746 return 1;
18c9b560
AZ
1747 rd = (insn >> 12) & 0xf;
1748 wrd = (insn >> 16) & 0xf;
da6b5335 1749 tmp = load_reg(s, rd);
18c9b560
AZ
1750 switch ((insn >> 6) & 3) {
1751 case 0:
da6b5335 1752 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1753 break;
1754 case 1:
da6b5335 1755 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1756 break;
1757 case 2:
da6b5335 1758 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1759 break;
18c9b560 1760 }
7d1b0095 1761 tcg_temp_free_i32(tmp);
18c9b560
AZ
1762 gen_op_iwmmxt_movq_wRn_M0(wrd);
1763 gen_op_iwmmxt_set_mup();
1764 break;
1765 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1766 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1767 return 1;
da6b5335 1768 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1769 tmp2 = tcg_temp_new_i32();
da6b5335 1770 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1771 switch ((insn >> 22) & 3) {
1772 case 0:
1773 for (i = 0; i < 7; i ++) {
da6b5335
FN
1774 tcg_gen_shli_i32(tmp2, tmp2, 4);
1775 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1776 }
1777 break;
1778 case 1:
1779 for (i = 0; i < 3; i ++) {
da6b5335
FN
1780 tcg_gen_shli_i32(tmp2, tmp2, 8);
1781 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1782 }
1783 break;
1784 case 2:
da6b5335
FN
1785 tcg_gen_shli_i32(tmp2, tmp2, 16);
1786 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1787 break;
18c9b560 1788 }
da6b5335 1789 gen_set_nzcv(tmp);
7d1b0095
PM
1790 tcg_temp_free_i32(tmp2);
1791 tcg_temp_free_i32(tmp);
18c9b560
AZ
1792 break;
1793 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1794 wrd = (insn >> 12) & 0xf;
1795 rd0 = (insn >> 16) & 0xf;
1796 gen_op_iwmmxt_movq_M0_wRn(rd0);
1797 switch ((insn >> 22) & 3) {
1798 case 0:
e677137d 1799 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1800 break;
1801 case 1:
e677137d 1802 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1803 break;
1804 case 2:
e677137d 1805 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1806 break;
1807 case 3:
1808 return 1;
1809 }
1810 gen_op_iwmmxt_movq_wRn_M0(wrd);
1811 gen_op_iwmmxt_set_mup();
1812 break;
1813 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1814 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1815 return 1;
da6b5335 1816 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1817 tmp2 = tcg_temp_new_i32();
da6b5335 1818 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1819 switch ((insn >> 22) & 3) {
1820 case 0:
1821 for (i = 0; i < 7; i ++) {
da6b5335
FN
1822 tcg_gen_shli_i32(tmp2, tmp2, 4);
1823 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1824 }
1825 break;
1826 case 1:
1827 for (i = 0; i < 3; i ++) {
da6b5335
FN
1828 tcg_gen_shli_i32(tmp2, tmp2, 8);
1829 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1830 }
1831 break;
1832 case 2:
da6b5335
FN
1833 tcg_gen_shli_i32(tmp2, tmp2, 16);
1834 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1835 break;
18c9b560 1836 }
da6b5335 1837 gen_set_nzcv(tmp);
7d1b0095
PM
1838 tcg_temp_free_i32(tmp2);
1839 tcg_temp_free_i32(tmp);
18c9b560
AZ
1840 break;
1841 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1842 rd = (insn >> 12) & 0xf;
1843 rd0 = (insn >> 16) & 0xf;
da6b5335 1844 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1845 return 1;
1846 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 1847 tmp = tcg_temp_new_i32();
18c9b560
AZ
1848 switch ((insn >> 22) & 3) {
1849 case 0:
da6b5335 1850 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1851 break;
1852 case 1:
da6b5335 1853 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1854 break;
1855 case 2:
da6b5335 1856 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1857 break;
18c9b560 1858 }
da6b5335 1859 store_reg(s, rd, tmp);
18c9b560
AZ
1860 break;
1861 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1862 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1863 wrd = (insn >> 12) & 0xf;
1864 rd0 = (insn >> 16) & 0xf;
1865 rd1 = (insn >> 0) & 0xf;
1866 gen_op_iwmmxt_movq_M0_wRn(rd0);
1867 switch ((insn >> 22) & 3) {
1868 case 0:
1869 if (insn & (1 << 21))
1870 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1871 else
1872 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1873 break;
1874 case 1:
1875 if (insn & (1 << 21))
1876 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1877 else
1878 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1879 break;
1880 case 2:
1881 if (insn & (1 << 21))
1882 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1883 else
1884 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1885 break;
1886 case 3:
1887 return 1;
1888 }
1889 gen_op_iwmmxt_movq_wRn_M0(wrd);
1890 gen_op_iwmmxt_set_mup();
1891 gen_op_iwmmxt_set_cup();
1892 break;
1893 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1894 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1895 wrd = (insn >> 12) & 0xf;
1896 rd0 = (insn >> 16) & 0xf;
1897 gen_op_iwmmxt_movq_M0_wRn(rd0);
1898 switch ((insn >> 22) & 3) {
1899 case 0:
1900 if (insn & (1 << 21))
1901 gen_op_iwmmxt_unpacklsb_M0();
1902 else
1903 gen_op_iwmmxt_unpacklub_M0();
1904 break;
1905 case 1:
1906 if (insn & (1 << 21))
1907 gen_op_iwmmxt_unpacklsw_M0();
1908 else
1909 gen_op_iwmmxt_unpackluw_M0();
1910 break;
1911 case 2:
1912 if (insn & (1 << 21))
1913 gen_op_iwmmxt_unpacklsl_M0();
1914 else
1915 gen_op_iwmmxt_unpacklul_M0();
1916 break;
1917 case 3:
1918 return 1;
1919 }
1920 gen_op_iwmmxt_movq_wRn_M0(wrd);
1921 gen_op_iwmmxt_set_mup();
1922 gen_op_iwmmxt_set_cup();
1923 break;
1924 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1925 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1926 wrd = (insn >> 12) & 0xf;
1927 rd0 = (insn >> 16) & 0xf;
1928 gen_op_iwmmxt_movq_M0_wRn(rd0);
1929 switch ((insn >> 22) & 3) {
1930 case 0:
1931 if (insn & (1 << 21))
1932 gen_op_iwmmxt_unpackhsb_M0();
1933 else
1934 gen_op_iwmmxt_unpackhub_M0();
1935 break;
1936 case 1:
1937 if (insn & (1 << 21))
1938 gen_op_iwmmxt_unpackhsw_M0();
1939 else
1940 gen_op_iwmmxt_unpackhuw_M0();
1941 break;
1942 case 2:
1943 if (insn & (1 << 21))
1944 gen_op_iwmmxt_unpackhsl_M0();
1945 else
1946 gen_op_iwmmxt_unpackhul_M0();
1947 break;
1948 case 3:
1949 return 1;
1950 }
1951 gen_op_iwmmxt_movq_wRn_M0(wrd);
1952 gen_op_iwmmxt_set_mup();
1953 gen_op_iwmmxt_set_cup();
1954 break;
1955 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1956 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
1957 if (((insn >> 22) & 3) == 0)
1958 return 1;
18c9b560
AZ
1959 wrd = (insn >> 12) & 0xf;
1960 rd0 = (insn >> 16) & 0xf;
1961 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 1962 tmp = tcg_temp_new_i32();
da6b5335 1963 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 1964 tcg_temp_free_i32(tmp);
18c9b560 1965 return 1;
da6b5335 1966 }
18c9b560 1967 switch ((insn >> 22) & 3) {
18c9b560 1968 case 1:
da6b5335 1969 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1970 break;
1971 case 2:
da6b5335 1972 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1973 break;
1974 case 3:
da6b5335 1975 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1976 break;
1977 }
7d1b0095 1978 tcg_temp_free_i32(tmp);
18c9b560
AZ
1979 gen_op_iwmmxt_movq_wRn_M0(wrd);
1980 gen_op_iwmmxt_set_mup();
1981 gen_op_iwmmxt_set_cup();
1982 break;
1983 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1984 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
1985 if (((insn >> 22) & 3) == 0)
1986 return 1;
18c9b560
AZ
1987 wrd = (insn >> 12) & 0xf;
1988 rd0 = (insn >> 16) & 0xf;
1989 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 1990 tmp = tcg_temp_new_i32();
da6b5335 1991 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 1992 tcg_temp_free_i32(tmp);
18c9b560 1993 return 1;
da6b5335 1994 }
18c9b560 1995 switch ((insn >> 22) & 3) {
18c9b560 1996 case 1:
da6b5335 1997 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1998 break;
1999 case 2:
da6b5335 2000 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2001 break;
2002 case 3:
da6b5335 2003 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2004 break;
2005 }
7d1b0095 2006 tcg_temp_free_i32(tmp);
18c9b560
AZ
2007 gen_op_iwmmxt_movq_wRn_M0(wrd);
2008 gen_op_iwmmxt_set_mup();
2009 gen_op_iwmmxt_set_cup();
2010 break;
2011 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2012 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2013 if (((insn >> 22) & 3) == 0)
2014 return 1;
18c9b560
AZ
2015 wrd = (insn >> 12) & 0xf;
2016 rd0 = (insn >> 16) & 0xf;
2017 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2018 tmp = tcg_temp_new_i32();
da6b5335 2019 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2020 tcg_temp_free_i32(tmp);
18c9b560 2021 return 1;
da6b5335 2022 }
18c9b560 2023 switch ((insn >> 22) & 3) {
18c9b560 2024 case 1:
da6b5335 2025 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2026 break;
2027 case 2:
da6b5335 2028 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2029 break;
2030 case 3:
da6b5335 2031 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2032 break;
2033 }
7d1b0095 2034 tcg_temp_free_i32(tmp);
18c9b560
AZ
2035 gen_op_iwmmxt_movq_wRn_M0(wrd);
2036 gen_op_iwmmxt_set_mup();
2037 gen_op_iwmmxt_set_cup();
2038 break;
2039 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2040 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2041 if (((insn >> 22) & 3) == 0)
2042 return 1;
18c9b560
AZ
2043 wrd = (insn >> 12) & 0xf;
2044 rd0 = (insn >> 16) & 0xf;
2045 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2046 tmp = tcg_temp_new_i32();
18c9b560 2047 switch ((insn >> 22) & 3) {
18c9b560 2048 case 1:
da6b5335 2049 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2050 tcg_temp_free_i32(tmp);
18c9b560 2051 return 1;
da6b5335
FN
2052 }
2053 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2054 break;
2055 case 2:
da6b5335 2056 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2057 tcg_temp_free_i32(tmp);
18c9b560 2058 return 1;
da6b5335
FN
2059 }
2060 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2061 break;
2062 case 3:
da6b5335 2063 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2064 tcg_temp_free_i32(tmp);
18c9b560 2065 return 1;
da6b5335
FN
2066 }
2067 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2068 break;
2069 }
7d1b0095 2070 tcg_temp_free_i32(tmp);
18c9b560
AZ
2071 gen_op_iwmmxt_movq_wRn_M0(wrd);
2072 gen_op_iwmmxt_set_mup();
2073 gen_op_iwmmxt_set_cup();
2074 break;
2075 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2076 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2077 wrd = (insn >> 12) & 0xf;
2078 rd0 = (insn >> 16) & 0xf;
2079 rd1 = (insn >> 0) & 0xf;
2080 gen_op_iwmmxt_movq_M0_wRn(rd0);
2081 switch ((insn >> 22) & 3) {
2082 case 0:
2083 if (insn & (1 << 21))
2084 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2085 else
2086 gen_op_iwmmxt_minub_M0_wRn(rd1);
2087 break;
2088 case 1:
2089 if (insn & (1 << 21))
2090 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2091 else
2092 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2093 break;
2094 case 2:
2095 if (insn & (1 << 21))
2096 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2097 else
2098 gen_op_iwmmxt_minul_M0_wRn(rd1);
2099 break;
2100 case 3:
2101 return 1;
2102 }
2103 gen_op_iwmmxt_movq_wRn_M0(wrd);
2104 gen_op_iwmmxt_set_mup();
2105 break;
2106 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2107 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2108 wrd = (insn >> 12) & 0xf;
2109 rd0 = (insn >> 16) & 0xf;
2110 rd1 = (insn >> 0) & 0xf;
2111 gen_op_iwmmxt_movq_M0_wRn(rd0);
2112 switch ((insn >> 22) & 3) {
2113 case 0:
2114 if (insn & (1 << 21))
2115 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2116 else
2117 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2118 break;
2119 case 1:
2120 if (insn & (1 << 21))
2121 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2122 else
2123 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2124 break;
2125 case 2:
2126 if (insn & (1 << 21))
2127 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2128 else
2129 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2130 break;
2131 case 3:
2132 return 1;
2133 }
2134 gen_op_iwmmxt_movq_wRn_M0(wrd);
2135 gen_op_iwmmxt_set_mup();
2136 break;
2137 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2138 case 0x402: case 0x502: case 0x602: case 0x702:
2139 wrd = (insn >> 12) & 0xf;
2140 rd0 = (insn >> 16) & 0xf;
2141 rd1 = (insn >> 0) & 0xf;
2142 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2143 tmp = tcg_const_i32((insn >> 20) & 3);
2144 iwmmxt_load_reg(cpu_V1, rd1);
2145 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2146 tcg_temp_free(tmp);
18c9b560
AZ
2147 gen_op_iwmmxt_movq_wRn_M0(wrd);
2148 gen_op_iwmmxt_set_mup();
2149 break;
2150 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2151 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2152 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2153 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2154 wrd = (insn >> 12) & 0xf;
2155 rd0 = (insn >> 16) & 0xf;
2156 rd1 = (insn >> 0) & 0xf;
2157 gen_op_iwmmxt_movq_M0_wRn(rd0);
2158 switch ((insn >> 20) & 0xf) {
2159 case 0x0:
2160 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2161 break;
2162 case 0x1:
2163 gen_op_iwmmxt_subub_M0_wRn(rd1);
2164 break;
2165 case 0x3:
2166 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2167 break;
2168 case 0x4:
2169 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2170 break;
2171 case 0x5:
2172 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2173 break;
2174 case 0x7:
2175 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2176 break;
2177 case 0x8:
2178 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2179 break;
2180 case 0x9:
2181 gen_op_iwmmxt_subul_M0_wRn(rd1);
2182 break;
2183 case 0xb:
2184 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2185 break;
2186 default:
2187 return 1;
2188 }
2189 gen_op_iwmmxt_movq_wRn_M0(wrd);
2190 gen_op_iwmmxt_set_mup();
2191 gen_op_iwmmxt_set_cup();
2192 break;
2193 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2194 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2195 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2196 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2197 wrd = (insn >> 12) & 0xf;
2198 rd0 = (insn >> 16) & 0xf;
2199 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2200 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2201 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2202 tcg_temp_free(tmp);
18c9b560
AZ
2203 gen_op_iwmmxt_movq_wRn_M0(wrd);
2204 gen_op_iwmmxt_set_mup();
2205 gen_op_iwmmxt_set_cup();
2206 break;
2207 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2208 case 0x418: case 0x518: case 0x618: case 0x718:
2209 case 0x818: case 0x918: case 0xa18: case 0xb18:
2210 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2211 wrd = (insn >> 12) & 0xf;
2212 rd0 = (insn >> 16) & 0xf;
2213 rd1 = (insn >> 0) & 0xf;
2214 gen_op_iwmmxt_movq_M0_wRn(rd0);
2215 switch ((insn >> 20) & 0xf) {
2216 case 0x0:
2217 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2218 break;
2219 case 0x1:
2220 gen_op_iwmmxt_addub_M0_wRn(rd1);
2221 break;
2222 case 0x3:
2223 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2224 break;
2225 case 0x4:
2226 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2227 break;
2228 case 0x5:
2229 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2230 break;
2231 case 0x7:
2232 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2233 break;
2234 case 0x8:
2235 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2236 break;
2237 case 0x9:
2238 gen_op_iwmmxt_addul_M0_wRn(rd1);
2239 break;
2240 case 0xb:
2241 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2242 break;
2243 default:
2244 return 1;
2245 }
2246 gen_op_iwmmxt_movq_wRn_M0(wrd);
2247 gen_op_iwmmxt_set_mup();
2248 gen_op_iwmmxt_set_cup();
2249 break;
2250 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2251 case 0x408: case 0x508: case 0x608: case 0x708:
2252 case 0x808: case 0x908: case 0xa08: case 0xb08:
2253 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2254 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2255 return 1;
18c9b560
AZ
2256 wrd = (insn >> 12) & 0xf;
2257 rd0 = (insn >> 16) & 0xf;
2258 rd1 = (insn >> 0) & 0xf;
2259 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2260 switch ((insn >> 22) & 3) {
18c9b560
AZ
2261 case 1:
2262 if (insn & (1 << 21))
2263 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2264 else
2265 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2266 break;
2267 case 2:
2268 if (insn & (1 << 21))
2269 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2270 else
2271 gen_op_iwmmxt_packul_M0_wRn(rd1);
2272 break;
2273 case 3:
2274 if (insn & (1 << 21))
2275 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2276 else
2277 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2278 break;
2279 }
2280 gen_op_iwmmxt_movq_wRn_M0(wrd);
2281 gen_op_iwmmxt_set_mup();
2282 gen_op_iwmmxt_set_cup();
2283 break;
2284 case 0x201: case 0x203: case 0x205: case 0x207:
2285 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2286 case 0x211: case 0x213: case 0x215: case 0x217:
2287 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2288 wrd = (insn >> 5) & 0xf;
2289 rd0 = (insn >> 12) & 0xf;
2290 rd1 = (insn >> 0) & 0xf;
2291 if (rd0 == 0xf || rd1 == 0xf)
2292 return 1;
2293 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2294 tmp = load_reg(s, rd0);
2295 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2296 switch ((insn >> 16) & 0xf) {
2297 case 0x0: /* TMIA */
da6b5335 2298 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2299 break;
2300 case 0x8: /* TMIAPH */
da6b5335 2301 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2302 break;
2303 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2304 if (insn & (1 << 16))
da6b5335 2305 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2306 if (insn & (1 << 17))
da6b5335
FN
2307 tcg_gen_shri_i32(tmp2, tmp2, 16);
2308 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2309 break;
2310 default:
7d1b0095
PM
2311 tcg_temp_free_i32(tmp2);
2312 tcg_temp_free_i32(tmp);
18c9b560
AZ
2313 return 1;
2314 }
7d1b0095
PM
2315 tcg_temp_free_i32(tmp2);
2316 tcg_temp_free_i32(tmp);
18c9b560
AZ
2317 gen_op_iwmmxt_movq_wRn_M0(wrd);
2318 gen_op_iwmmxt_set_mup();
2319 break;
2320 default:
2321 return 1;
2322 }
2323
2324 return 0;
2325}
2326
2327/* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2328 (ie. an undefined instruction). */
2329static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2330{
2331 int acc, rd0, rd1, rdhi, rdlo;
3a554c0f 2332 TCGv tmp, tmp2;
18c9b560
AZ
2333
2334 if ((insn & 0x0ff00f10) == 0x0e200010) {
2335 /* Multiply with Internal Accumulate Format */
2336 rd0 = (insn >> 12) & 0xf;
2337 rd1 = insn & 0xf;
2338 acc = (insn >> 5) & 7;
2339
2340 if (acc != 0)
2341 return 1;
2342
3a554c0f
FN
2343 tmp = load_reg(s, rd0);
2344 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2345 switch ((insn >> 16) & 0xf) {
2346 case 0x0: /* MIA */
3a554c0f 2347 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2348 break;
2349 case 0x8: /* MIAPH */
3a554c0f 2350 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2351 break;
2352 case 0xc: /* MIABB */
2353 case 0xd: /* MIABT */
2354 case 0xe: /* MIATB */
2355 case 0xf: /* MIATT */
18c9b560 2356 if (insn & (1 << 16))
3a554c0f 2357 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2358 if (insn & (1 << 17))
3a554c0f
FN
2359 tcg_gen_shri_i32(tmp2, tmp2, 16);
2360 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2361 break;
2362 default:
2363 return 1;
2364 }
7d1b0095
PM
2365 tcg_temp_free_i32(tmp2);
2366 tcg_temp_free_i32(tmp);
18c9b560
AZ
2367
2368 gen_op_iwmmxt_movq_wRn_M0(acc);
2369 return 0;
2370 }
2371
2372 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2373 /* Internal Accumulator Access Format */
2374 rdhi = (insn >> 16) & 0xf;
2375 rdlo = (insn >> 12) & 0xf;
2376 acc = insn & 7;
2377
2378 if (acc != 0)
2379 return 1;
2380
2381 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2382 iwmmxt_load_reg(cpu_V0, acc);
2383 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2384 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2385 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2386 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2387 } else { /* MAR */
3a554c0f
FN
2388 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2389 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2390 }
2391 return 0;
2392 }
2393
2394 return 1;
2395}
2396
c1713132
AZ
2397/* Disassemble system coprocessor instruction. Return nonzero if
2398 instruction is not defined. */
2399static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2400{
b75263d6 2401 TCGv tmp, tmp2;
c1713132
AZ
2402 uint32_t rd = (insn >> 12) & 0xf;
2403 uint32_t cp = (insn >> 8) & 0xf;
2404 if (IS_USER(s)) {
2405 return 1;
2406 }
2407
18c9b560 2408 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2409 if (!env->cp[cp].cp_read)
2410 return 1;
8984bd2e 2411 gen_set_pc_im(s->pc);
7d1b0095 2412 tmp = tcg_temp_new_i32();
b75263d6
JR
2413 tmp2 = tcg_const_i32(insn);
2414 gen_helper_get_cp(tmp, cpu_env, tmp2);
2415 tcg_temp_free(tmp2);
8984bd2e 2416 store_reg(s, rd, tmp);
c1713132
AZ
2417 } else {
2418 if (!env->cp[cp].cp_write)
2419 return 1;
8984bd2e
PB
2420 gen_set_pc_im(s->pc);
2421 tmp = load_reg(s, rd);
b75263d6
JR
2422 tmp2 = tcg_const_i32(insn);
2423 gen_helper_set_cp(cpu_env, tmp2, tmp);
2424 tcg_temp_free(tmp2);
7d1b0095 2425 tcg_temp_free_i32(tmp);
c1713132
AZ
2426 }
2427 return 0;
2428}
2429
9ee6e8bb
PB
2430static int cp15_user_ok(uint32_t insn)
2431{
2432 int cpn = (insn >> 16) & 0xf;
2433 int cpm = insn & 0xf;
2434 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2435
2436 if (cpn == 13 && cpm == 0) {
2437 /* TLS register. */
2438 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2439 return 1;
2440 }
2441 if (cpn == 7) {
2442 /* ISB, DSB, DMB. */
2443 if ((cpm == 5 && op == 4)
2444 || (cpm == 10 && (op == 4 || op == 5)))
2445 return 1;
2446 }
2447 return 0;
2448}
2449
3f26c122
RV
2450static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2451{
2452 TCGv tmp;
2453 int cpn = (insn >> 16) & 0xf;
2454 int cpm = insn & 0xf;
2455 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2456
2457 if (!arm_feature(env, ARM_FEATURE_V6K))
2458 return 0;
2459
2460 if (!(cpn == 13 && cpm == 0))
2461 return 0;
2462
2463 if (insn & ARM_CP_RW_BIT) {
3f26c122
RV
2464 switch (op) {
2465 case 2:
c5883be2 2466 tmp = load_cpu_field(cp15.c13_tls1);
3f26c122
RV
2467 break;
2468 case 3:
c5883be2 2469 tmp = load_cpu_field(cp15.c13_tls2);
3f26c122
RV
2470 break;
2471 case 4:
c5883be2 2472 tmp = load_cpu_field(cp15.c13_tls3);
3f26c122
RV
2473 break;
2474 default:
3f26c122
RV
2475 return 0;
2476 }
2477 store_reg(s, rd, tmp);
2478
2479 } else {
2480 tmp = load_reg(s, rd);
2481 switch (op) {
2482 case 2:
c5883be2 2483 store_cpu_field(tmp, cp15.c13_tls1);
3f26c122
RV
2484 break;
2485 case 3:
c5883be2 2486 store_cpu_field(tmp, cp15.c13_tls2);
3f26c122
RV
2487 break;
2488 case 4:
c5883be2 2489 store_cpu_field(tmp, cp15.c13_tls3);
3f26c122
RV
2490 break;
2491 default:
7d1b0095 2492 tcg_temp_free_i32(tmp);
3f26c122
RV
2493 return 0;
2494 }
3f26c122
RV
2495 }
2496 return 1;
2497}
2498
b5ff1b31
FB
2499/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2500 instruction is not defined. */
a90b7318 2501static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2502{
2503 uint32_t rd;
b75263d6 2504 TCGv tmp, tmp2;
b5ff1b31 2505
9ee6e8bb
PB
2506 /* M profile cores use memory mapped registers instead of cp15. */
2507 if (arm_feature(env, ARM_FEATURE_M))
2508 return 1;
2509
2510 if ((insn & (1 << 25)) == 0) {
2511 if (insn & (1 << 20)) {
2512 /* mrrc */
2513 return 1;
2514 }
2515 /* mcrr. Used for block cache operations, so implement as no-op. */
2516 return 0;
2517 }
2518 if ((insn & (1 << 4)) == 0) {
2519 /* cdp */
2520 return 1;
2521 }
2522 if (IS_USER(s) && !cp15_user_ok(insn)) {
b5ff1b31
FB
2523 return 1;
2524 }
cc688901
PM
2525
2526 /* Pre-v7 versions of the architecture implemented WFI via coprocessor
2527 * instructions rather than a separate instruction.
2528 */
2529 if ((insn & 0x0fff0fff) == 0x0e070f90) {
2530 /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores).
2531 * In v7, this must NOP.
2532 */
2533 if (!arm_feature(env, ARM_FEATURE_V7)) {
2534 /* Wait for interrupt. */
2535 gen_set_pc_im(s->pc);
2536 s->is_jmp = DISAS_WFI;
2537 }
9332f9da
FB
2538 return 0;
2539 }
cc688901
PM
2540
2541 if ((insn & 0x0fff0fff) == 0x0e070f58) {
2542 /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI,
2543 * so this is slightly over-broad.
2544 */
2545 if (!arm_feature(env, ARM_FEATURE_V6)) {
2546 /* Wait for interrupt. */
2547 gen_set_pc_im(s->pc);
2548 s->is_jmp = DISAS_WFI;
2549 return 0;
2550 }
2551 /* Otherwise fall through to handle via helper function.
2552 * In particular, on v7 and some v6 cores this is one of
2553 * the VA-PA registers.
2554 */
2555 }
2556
b5ff1b31 2557 rd = (insn >> 12) & 0xf;
3f26c122
RV
2558
2559 if (cp15_tls_load_store(env, s, insn, rd))
2560 return 0;
2561
b75263d6 2562 tmp2 = tcg_const_i32(insn);
18c9b560 2563 if (insn & ARM_CP_RW_BIT) {
7d1b0095 2564 tmp = tcg_temp_new_i32();
b75263d6 2565 gen_helper_get_cp15(tmp, cpu_env, tmp2);
b5ff1b31
FB
2566 /* If the destination register is r15 then sets condition codes. */
2567 if (rd != 15)
8984bd2e
PB
2568 store_reg(s, rd, tmp);
2569 else
7d1b0095 2570 tcg_temp_free_i32(tmp);
b5ff1b31 2571 } else {
8984bd2e 2572 tmp = load_reg(s, rd);
b75263d6 2573 gen_helper_set_cp15(cpu_env, tmp2, tmp);
7d1b0095 2574 tcg_temp_free_i32(tmp);
a90b7318
AZ
2575 /* Normally we would always end the TB here, but Linux
2576 * arch/arm/mach-pxa/sleep.S expects two instructions following
2577 * an MMU enable to execute from cache. Imitate this behaviour. */
2578 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2579 (insn & 0x0fff0fff) != 0x0e010f10)
2580 gen_lookup_tb(s);
b5ff1b31 2581 }
b75263d6 2582 tcg_temp_free_i32(tmp2);
b5ff1b31
FB
2583 return 0;
2584}
2585
9ee6e8bb
PB
2586#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2587#define VFP_SREG(insn, bigbit, smallbit) \
2588 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2589#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2590 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2591 reg = (((insn) >> (bigbit)) & 0x0f) \
2592 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2593 } else { \
2594 if (insn & (1 << (smallbit))) \
2595 return 1; \
2596 reg = ((insn) >> (bigbit)) & 0x0f; \
2597 }} while (0)
2598
2599#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2600#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2601#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2602#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2603#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2604#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2605
4373f3ce
PB
2606/* Move between integer and VFP cores. */
2607static TCGv gen_vfp_mrs(void)
2608{
7d1b0095 2609 TCGv tmp = tcg_temp_new_i32();
4373f3ce
PB
2610 tcg_gen_mov_i32(tmp, cpu_F0s);
2611 return tmp;
2612}
2613
2614static void gen_vfp_msr(TCGv tmp)
2615{
2616 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2617 tcg_temp_free_i32(tmp);
4373f3ce
PB
2618}
2619
ad69471c
PB
2620static void gen_neon_dup_u8(TCGv var, int shift)
2621{
7d1b0095 2622 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2623 if (shift)
2624 tcg_gen_shri_i32(var, var, shift);
86831435 2625 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2626 tcg_gen_shli_i32(tmp, var, 8);
2627 tcg_gen_or_i32(var, var, tmp);
2628 tcg_gen_shli_i32(tmp, var, 16);
2629 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2630 tcg_temp_free_i32(tmp);
ad69471c
PB
2631}
2632
2633static void gen_neon_dup_low16(TCGv var)
2634{
7d1b0095 2635 TCGv tmp = tcg_temp_new_i32();
86831435 2636 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2637 tcg_gen_shli_i32(tmp, var, 16);
2638 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2639 tcg_temp_free_i32(tmp);
ad69471c
PB
2640}
2641
2642static void gen_neon_dup_high16(TCGv var)
2643{
7d1b0095 2644 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2645 tcg_gen_andi_i32(var, var, 0xffff0000);
2646 tcg_gen_shri_i32(tmp, var, 16);
2647 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2648 tcg_temp_free_i32(tmp);
ad69471c
PB
2649}
2650
8e18cde3
PM
2651static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size)
2652{
2653 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2654 TCGv tmp;
2655 switch (size) {
2656 case 0:
2657 tmp = gen_ld8u(addr, IS_USER(s));
2658 gen_neon_dup_u8(tmp, 0);
2659 break;
2660 case 1:
2661 tmp = gen_ld16u(addr, IS_USER(s));
2662 gen_neon_dup_low16(tmp);
2663 break;
2664 case 2:
2665 tmp = gen_ld32(addr, IS_USER(s));
2666 break;
2667 default: /* Avoid compiler warnings. */
2668 abort();
2669 }
2670 return tmp;
2671}
2672
b7bcbe95
FB
2673/* Disassemble a VFP instruction. Returns nonzero if an error occured
2674 (ie. an undefined instruction). */
2675static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2676{
2677 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2678 int dp, veclen;
312eea9f 2679 TCGv addr;
4373f3ce 2680 TCGv tmp;
ad69471c 2681 TCGv tmp2;
b7bcbe95 2682
40f137e1
PB
2683 if (!arm_feature(env, ARM_FEATURE_VFP))
2684 return 1;
2685
5df8bac1 2686 if (!s->vfp_enabled) {
9ee6e8bb 2687 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2688 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2689 return 1;
2690 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2691 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2692 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2693 return 1;
2694 }
b7bcbe95
FB
2695 dp = ((insn & 0xf00) == 0xb00);
2696 switch ((insn >> 24) & 0xf) {
2697 case 0xe:
2698 if (insn & (1 << 4)) {
2699 /* single register transfer */
b7bcbe95
FB
2700 rd = (insn >> 12) & 0xf;
2701 if (dp) {
9ee6e8bb
PB
2702 int size;
2703 int pass;
2704
2705 VFP_DREG_N(rn, insn);
2706 if (insn & 0xf)
b7bcbe95 2707 return 1;
9ee6e8bb
PB
2708 if (insn & 0x00c00060
2709 && !arm_feature(env, ARM_FEATURE_NEON))
2710 return 1;
2711
2712 pass = (insn >> 21) & 1;
2713 if (insn & (1 << 22)) {
2714 size = 0;
2715 offset = ((insn >> 5) & 3) * 8;
2716 } else if (insn & (1 << 5)) {
2717 size = 1;
2718 offset = (insn & (1 << 6)) ? 16 : 0;
2719 } else {
2720 size = 2;
2721 offset = 0;
2722 }
18c9b560 2723 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2724 /* vfp->arm */
ad69471c 2725 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2726 switch (size) {
2727 case 0:
9ee6e8bb 2728 if (offset)
ad69471c 2729 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2730 if (insn & (1 << 23))
ad69471c 2731 gen_uxtb(tmp);
9ee6e8bb 2732 else
ad69471c 2733 gen_sxtb(tmp);
9ee6e8bb
PB
2734 break;
2735 case 1:
9ee6e8bb
PB
2736 if (insn & (1 << 23)) {
2737 if (offset) {
ad69471c 2738 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2739 } else {
ad69471c 2740 gen_uxth(tmp);
9ee6e8bb
PB
2741 }
2742 } else {
2743 if (offset) {
ad69471c 2744 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2745 } else {
ad69471c 2746 gen_sxth(tmp);
9ee6e8bb
PB
2747 }
2748 }
2749 break;
2750 case 2:
9ee6e8bb
PB
2751 break;
2752 }
ad69471c 2753 store_reg(s, rd, tmp);
b7bcbe95
FB
2754 } else {
2755 /* arm->vfp */
ad69471c 2756 tmp = load_reg(s, rd);
9ee6e8bb
PB
2757 if (insn & (1 << 23)) {
2758 /* VDUP */
2759 if (size == 0) {
ad69471c 2760 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2761 } else if (size == 1) {
ad69471c 2762 gen_neon_dup_low16(tmp);
9ee6e8bb 2763 }
cbbccffc 2764 for (n = 0; n <= pass * 2; n++) {
7d1b0095 2765 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
2766 tcg_gen_mov_i32(tmp2, tmp);
2767 neon_store_reg(rn, n, tmp2);
2768 }
2769 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2770 } else {
2771 /* VMOV */
2772 switch (size) {
2773 case 0:
ad69471c
PB
2774 tmp2 = neon_load_reg(rn, pass);
2775 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
7d1b0095 2776 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2777 break;
2778 case 1:
ad69471c
PB
2779 tmp2 = neon_load_reg(rn, pass);
2780 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
7d1b0095 2781 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2782 break;
2783 case 2:
9ee6e8bb
PB
2784 break;
2785 }
ad69471c 2786 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2787 }
b7bcbe95 2788 }
9ee6e8bb
PB
2789 } else { /* !dp */
2790 if ((insn & 0x6f) != 0x00)
2791 return 1;
2792 rn = VFP_SREG_N(insn);
18c9b560 2793 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2794 /* vfp->arm */
2795 if (insn & (1 << 21)) {
2796 /* system register */
40f137e1 2797 rn >>= 1;
9ee6e8bb 2798
b7bcbe95 2799 switch (rn) {
40f137e1 2800 case ARM_VFP_FPSID:
4373f3ce 2801 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2802 VFP3 restricts all id registers to privileged
2803 accesses. */
2804 if (IS_USER(s)
2805 && arm_feature(env, ARM_FEATURE_VFP3))
2806 return 1;
4373f3ce 2807 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2808 break;
40f137e1 2809 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2810 if (IS_USER(s))
2811 return 1;
4373f3ce 2812 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2813 break;
40f137e1
PB
2814 case ARM_VFP_FPINST:
2815 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2816 /* Not present in VFP3. */
2817 if (IS_USER(s)
2818 || arm_feature(env, ARM_FEATURE_VFP3))
2819 return 1;
4373f3ce 2820 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2821 break;
40f137e1 2822 case ARM_VFP_FPSCR:
601d70b9 2823 if (rd == 15) {
4373f3ce
PB
2824 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2825 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2826 } else {
7d1b0095 2827 tmp = tcg_temp_new_i32();
4373f3ce
PB
2828 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2829 }
b7bcbe95 2830 break;
9ee6e8bb
PB
2831 case ARM_VFP_MVFR0:
2832 case ARM_VFP_MVFR1:
2833 if (IS_USER(s)
2834 || !arm_feature(env, ARM_FEATURE_VFP3))
2835 return 1;
4373f3ce 2836 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2837 break;
b7bcbe95
FB
2838 default:
2839 return 1;
2840 }
2841 } else {
2842 gen_mov_F0_vreg(0, rn);
4373f3ce 2843 tmp = gen_vfp_mrs();
b7bcbe95
FB
2844 }
2845 if (rd == 15) {
b5ff1b31 2846 /* Set the 4 flag bits in the CPSR. */
4373f3ce 2847 gen_set_nzcv(tmp);
7d1b0095 2848 tcg_temp_free_i32(tmp);
4373f3ce
PB
2849 } else {
2850 store_reg(s, rd, tmp);
2851 }
b7bcbe95
FB
2852 } else {
2853 /* arm->vfp */
4373f3ce 2854 tmp = load_reg(s, rd);
b7bcbe95 2855 if (insn & (1 << 21)) {
40f137e1 2856 rn >>= 1;
b7bcbe95
FB
2857 /* system register */
2858 switch (rn) {
40f137e1 2859 case ARM_VFP_FPSID:
9ee6e8bb
PB
2860 case ARM_VFP_MVFR0:
2861 case ARM_VFP_MVFR1:
b7bcbe95
FB
2862 /* Writes are ignored. */
2863 break;
40f137e1 2864 case ARM_VFP_FPSCR:
4373f3ce 2865 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 2866 tcg_temp_free_i32(tmp);
b5ff1b31 2867 gen_lookup_tb(s);
b7bcbe95 2868 break;
40f137e1 2869 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2870 if (IS_USER(s))
2871 return 1;
71b3c3de
JR
2872 /* TODO: VFP subarchitecture support.
2873 * For now, keep the EN bit only */
2874 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 2875 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2876 gen_lookup_tb(s);
2877 break;
2878 case ARM_VFP_FPINST:
2879 case ARM_VFP_FPINST2:
4373f3ce 2880 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2881 break;
b7bcbe95
FB
2882 default:
2883 return 1;
2884 }
2885 } else {
4373f3ce 2886 gen_vfp_msr(tmp);
b7bcbe95
FB
2887 gen_mov_vreg_F0(0, rn);
2888 }
2889 }
2890 }
2891 } else {
2892 /* data processing */
2893 /* The opcode is in bits 23, 21, 20 and 6. */
2894 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2895 if (dp) {
2896 if (op == 15) {
2897 /* rn is opcode */
2898 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2899 } else {
2900 /* rn is register number */
9ee6e8bb 2901 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2902 }
2903
04595bf6 2904 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
b7bcbe95 2905 /* Integer or single precision destination. */
9ee6e8bb 2906 rd = VFP_SREG_D(insn);
b7bcbe95 2907 } else {
9ee6e8bb 2908 VFP_DREG_D(rd, insn);
b7bcbe95 2909 }
04595bf6
PM
2910 if (op == 15 &&
2911 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2912 /* VCVT from int is always from S reg regardless of dp bit.
2913 * VCVT with immediate frac_bits has same format as SREG_M
2914 */
2915 rm = VFP_SREG_M(insn);
b7bcbe95 2916 } else {
9ee6e8bb 2917 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2918 }
2919 } else {
9ee6e8bb 2920 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2921 if (op == 15 && rn == 15) {
2922 /* Double precision destination. */
9ee6e8bb
PB
2923 VFP_DREG_D(rd, insn);
2924 } else {
2925 rd = VFP_SREG_D(insn);
2926 }
04595bf6
PM
2927 /* NB that we implicitly rely on the encoding for the frac_bits
2928 * in VCVT of fixed to float being the same as that of an SREG_M
2929 */
9ee6e8bb 2930 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2931 }
2932
69d1fc22 2933 veclen = s->vec_len;
b7bcbe95
FB
2934 if (op == 15 && rn > 3)
2935 veclen = 0;
2936
2937 /* Shut up compiler warnings. */
2938 delta_m = 0;
2939 delta_d = 0;
2940 bank_mask = 0;
3b46e624 2941
b7bcbe95
FB
2942 if (veclen > 0) {
2943 if (dp)
2944 bank_mask = 0xc;
2945 else
2946 bank_mask = 0x18;
2947
2948 /* Figure out what type of vector operation this is. */
2949 if ((rd & bank_mask) == 0) {
2950 /* scalar */
2951 veclen = 0;
2952 } else {
2953 if (dp)
69d1fc22 2954 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 2955 else
69d1fc22 2956 delta_d = s->vec_stride + 1;
b7bcbe95
FB
2957
2958 if ((rm & bank_mask) == 0) {
2959 /* mixed scalar/vector */
2960 delta_m = 0;
2961 } else {
2962 /* vector */
2963 delta_m = delta_d;
2964 }
2965 }
2966 }
2967
2968 /* Load the initial operands. */
2969 if (op == 15) {
2970 switch (rn) {
2971 case 16:
2972 case 17:
2973 /* Integer source */
2974 gen_mov_F0_vreg(0, rm);
2975 break;
2976 case 8:
2977 case 9:
2978 /* Compare */
2979 gen_mov_F0_vreg(dp, rd);
2980 gen_mov_F1_vreg(dp, rm);
2981 break;
2982 case 10:
2983 case 11:
2984 /* Compare with zero */
2985 gen_mov_F0_vreg(dp, rd);
2986 gen_vfp_F1_ld0(dp);
2987 break;
9ee6e8bb
PB
2988 case 20:
2989 case 21:
2990 case 22:
2991 case 23:
644ad806
PB
2992 case 28:
2993 case 29:
2994 case 30:
2995 case 31:
9ee6e8bb
PB
2996 /* Source and destination the same. */
2997 gen_mov_F0_vreg(dp, rd);
2998 break;
b7bcbe95
FB
2999 default:
3000 /* One source operand. */
3001 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3002 break;
b7bcbe95
FB
3003 }
3004 } else {
3005 /* Two source operands. */
3006 gen_mov_F0_vreg(dp, rn);
3007 gen_mov_F1_vreg(dp, rm);
3008 }
3009
3010 for (;;) {
3011 /* Perform the calculation. */
3012 switch (op) {
3013 case 0: /* mac: fd + (fn * fm) */
3014 gen_vfp_mul(dp);
3015 gen_mov_F1_vreg(dp, rd);
3016 gen_vfp_add(dp);
3017 break;
3018 case 1: /* nmac: fd - (fn * fm) */
3019 gen_vfp_mul(dp);
3020 gen_vfp_neg(dp);
3021 gen_mov_F1_vreg(dp, rd);
3022 gen_vfp_add(dp);
3023 break;
3024 case 2: /* msc: -fd + (fn * fm) */
3025 gen_vfp_mul(dp);
3026 gen_mov_F1_vreg(dp, rd);
3027 gen_vfp_sub(dp);
3028 break;
3029 case 3: /* nmsc: -fd - (fn * fm) */
3030 gen_vfp_mul(dp);
b7bcbe95 3031 gen_vfp_neg(dp);
c9fb531a
PB
3032 gen_mov_F1_vreg(dp, rd);
3033 gen_vfp_sub(dp);
b7bcbe95
FB
3034 break;
3035 case 4: /* mul: fn * fm */
3036 gen_vfp_mul(dp);
3037 break;
3038 case 5: /* nmul: -(fn * fm) */
3039 gen_vfp_mul(dp);
3040 gen_vfp_neg(dp);
3041 break;
3042 case 6: /* add: fn + fm */
3043 gen_vfp_add(dp);
3044 break;
3045 case 7: /* sub: fn - fm */
3046 gen_vfp_sub(dp);
3047 break;
3048 case 8: /* div: fn / fm */
3049 gen_vfp_div(dp);
3050 break;
9ee6e8bb
PB
3051 case 14: /* fconst */
3052 if (!arm_feature(env, ARM_FEATURE_VFP3))
3053 return 1;
3054
3055 n = (insn << 12) & 0x80000000;
3056 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3057 if (dp) {
3058 if (i & 0x40)
3059 i |= 0x3f80;
3060 else
3061 i |= 0x4000;
3062 n |= i << 16;
4373f3ce 3063 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3064 } else {
3065 if (i & 0x40)
3066 i |= 0x780;
3067 else
3068 i |= 0x800;
3069 n |= i << 19;
5b340b51 3070 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3071 }
9ee6e8bb 3072 break;
b7bcbe95
FB
3073 case 15: /* extension space */
3074 switch (rn) {
3075 case 0: /* cpy */
3076 /* no-op */
3077 break;
3078 case 1: /* abs */
3079 gen_vfp_abs(dp);
3080 break;
3081 case 2: /* neg */
3082 gen_vfp_neg(dp);
3083 break;
3084 case 3: /* sqrt */
3085 gen_vfp_sqrt(dp);
3086 break;
60011498
PB
3087 case 4: /* vcvtb.f32.f16 */
3088 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3089 return 1;
3090 tmp = gen_vfp_mrs();
3091 tcg_gen_ext16u_i32(tmp, tmp);
3092 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3093 tcg_temp_free_i32(tmp);
60011498
PB
3094 break;
3095 case 5: /* vcvtt.f32.f16 */
3096 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3097 return 1;
3098 tmp = gen_vfp_mrs();
3099 tcg_gen_shri_i32(tmp, tmp, 16);
3100 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3101 tcg_temp_free_i32(tmp);
60011498
PB
3102 break;
3103 case 6: /* vcvtb.f16.f32 */
3104 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3105 return 1;
7d1b0095 3106 tmp = tcg_temp_new_i32();
60011498
PB
3107 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3108 gen_mov_F0_vreg(0, rd);
3109 tmp2 = gen_vfp_mrs();
3110 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3111 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3112 tcg_temp_free_i32(tmp2);
60011498
PB
3113 gen_vfp_msr(tmp);
3114 break;
3115 case 7: /* vcvtt.f16.f32 */
3116 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3117 return 1;
7d1b0095 3118 tmp = tcg_temp_new_i32();
60011498
PB
3119 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3120 tcg_gen_shli_i32(tmp, tmp, 16);
3121 gen_mov_F0_vreg(0, rd);
3122 tmp2 = gen_vfp_mrs();
3123 tcg_gen_ext16u_i32(tmp2, tmp2);
3124 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3125 tcg_temp_free_i32(tmp2);
60011498
PB
3126 gen_vfp_msr(tmp);
3127 break;
b7bcbe95
FB
3128 case 8: /* cmp */
3129 gen_vfp_cmp(dp);
3130 break;
3131 case 9: /* cmpe */
3132 gen_vfp_cmpe(dp);
3133 break;
3134 case 10: /* cmpz */
3135 gen_vfp_cmp(dp);
3136 break;
3137 case 11: /* cmpez */
3138 gen_vfp_F1_ld0(dp);
3139 gen_vfp_cmpe(dp);
3140 break;
3141 case 15: /* single<->double conversion */
3142 if (dp)
4373f3ce 3143 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3144 else
4373f3ce 3145 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3146 break;
3147 case 16: /* fuito */
3148 gen_vfp_uito(dp);
3149 break;
3150 case 17: /* fsito */
3151 gen_vfp_sito(dp);
3152 break;
9ee6e8bb
PB
3153 case 20: /* fshto */
3154 if (!arm_feature(env, ARM_FEATURE_VFP3))
3155 return 1;
644ad806 3156 gen_vfp_shto(dp, 16 - rm);
9ee6e8bb
PB
3157 break;
3158 case 21: /* fslto */
3159 if (!arm_feature(env, ARM_FEATURE_VFP3))
3160 return 1;
644ad806 3161 gen_vfp_slto(dp, 32 - rm);
9ee6e8bb
PB
3162 break;
3163 case 22: /* fuhto */
3164 if (!arm_feature(env, ARM_FEATURE_VFP3))
3165 return 1;
644ad806 3166 gen_vfp_uhto(dp, 16 - rm);
9ee6e8bb
PB
3167 break;
3168 case 23: /* fulto */
3169 if (!arm_feature(env, ARM_FEATURE_VFP3))
3170 return 1;
644ad806 3171 gen_vfp_ulto(dp, 32 - rm);
9ee6e8bb 3172 break;
b7bcbe95
FB
3173 case 24: /* ftoui */
3174 gen_vfp_toui(dp);
3175 break;
3176 case 25: /* ftouiz */
3177 gen_vfp_touiz(dp);
3178 break;
3179 case 26: /* ftosi */
3180 gen_vfp_tosi(dp);
3181 break;
3182 case 27: /* ftosiz */
3183 gen_vfp_tosiz(dp);
3184 break;
9ee6e8bb
PB
3185 case 28: /* ftosh */
3186 if (!arm_feature(env, ARM_FEATURE_VFP3))
3187 return 1;
644ad806 3188 gen_vfp_tosh(dp, 16 - rm);
9ee6e8bb
PB
3189 break;
3190 case 29: /* ftosl */
3191 if (!arm_feature(env, ARM_FEATURE_VFP3))
3192 return 1;
644ad806 3193 gen_vfp_tosl(dp, 32 - rm);
9ee6e8bb
PB
3194 break;
3195 case 30: /* ftouh */
3196 if (!arm_feature(env, ARM_FEATURE_VFP3))
3197 return 1;
644ad806 3198 gen_vfp_touh(dp, 16 - rm);
9ee6e8bb
PB
3199 break;
3200 case 31: /* ftoul */
3201 if (!arm_feature(env, ARM_FEATURE_VFP3))
3202 return 1;
644ad806 3203 gen_vfp_toul(dp, 32 - rm);
9ee6e8bb 3204 break;
b7bcbe95
FB
3205 default: /* undefined */
3206 printf ("rn:%d\n", rn);
3207 return 1;
3208 }
3209 break;
3210 default: /* undefined */
3211 printf ("op:%d\n", op);
3212 return 1;
3213 }
3214
3215 /* Write back the result. */
3216 if (op == 15 && (rn >= 8 && rn <= 11))
3217 ; /* Comparison, do nothing. */
04595bf6
PM
3218 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3219 /* VCVT double to int: always integer result. */
b7bcbe95
FB
3220 gen_mov_vreg_F0(0, rd);
3221 else if (op == 15 && rn == 15)
3222 /* conversion */
3223 gen_mov_vreg_F0(!dp, rd);
3224 else
3225 gen_mov_vreg_F0(dp, rd);
3226
3227 /* break out of the loop if we have finished */
3228 if (veclen == 0)
3229 break;
3230
3231 if (op == 15 && delta_m == 0) {
3232 /* single source one-many */
3233 while (veclen--) {
3234 rd = ((rd + delta_d) & (bank_mask - 1))
3235 | (rd & bank_mask);
3236 gen_mov_vreg_F0(dp, rd);
3237 }
3238 break;
3239 }
3240 /* Setup the next operands. */
3241 veclen--;
3242 rd = ((rd + delta_d) & (bank_mask - 1))
3243 | (rd & bank_mask);
3244
3245 if (op == 15) {
3246 /* One source operand. */
3247 rm = ((rm + delta_m) & (bank_mask - 1))
3248 | (rm & bank_mask);
3249 gen_mov_F0_vreg(dp, rm);
3250 } else {
3251 /* Two source operands. */
3252 rn = ((rn + delta_d) & (bank_mask - 1))
3253 | (rn & bank_mask);
3254 gen_mov_F0_vreg(dp, rn);
3255 if (delta_m) {
3256 rm = ((rm + delta_m) & (bank_mask - 1))
3257 | (rm & bank_mask);
3258 gen_mov_F1_vreg(dp, rm);
3259 }
3260 }
3261 }
3262 }
3263 break;
3264 case 0xc:
3265 case 0xd:
8387da81 3266 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3267 /* two-register transfer */
3268 rn = (insn >> 16) & 0xf;
3269 rd = (insn >> 12) & 0xf;
3270 if (dp) {
9ee6e8bb
PB
3271 VFP_DREG_M(rm, insn);
3272 } else {
3273 rm = VFP_SREG_M(insn);
3274 }
b7bcbe95 3275
18c9b560 3276 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3277 /* vfp->arm */
3278 if (dp) {
4373f3ce
PB
3279 gen_mov_F0_vreg(0, rm * 2);
3280 tmp = gen_vfp_mrs();
3281 store_reg(s, rd, tmp);
3282 gen_mov_F0_vreg(0, rm * 2 + 1);
3283 tmp = gen_vfp_mrs();
3284 store_reg(s, rn, tmp);
b7bcbe95
FB
3285 } else {
3286 gen_mov_F0_vreg(0, rm);
4373f3ce 3287 tmp = gen_vfp_mrs();
8387da81 3288 store_reg(s, rd, tmp);
b7bcbe95 3289 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3290 tmp = gen_vfp_mrs();
8387da81 3291 store_reg(s, rn, tmp);
b7bcbe95
FB
3292 }
3293 } else {
3294 /* arm->vfp */
3295 if (dp) {
4373f3ce
PB
3296 tmp = load_reg(s, rd);
3297 gen_vfp_msr(tmp);
3298 gen_mov_vreg_F0(0, rm * 2);
3299 tmp = load_reg(s, rn);
3300 gen_vfp_msr(tmp);
3301 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3302 } else {
8387da81 3303 tmp = load_reg(s, rd);
4373f3ce 3304 gen_vfp_msr(tmp);
b7bcbe95 3305 gen_mov_vreg_F0(0, rm);
8387da81 3306 tmp = load_reg(s, rn);
4373f3ce 3307 gen_vfp_msr(tmp);
b7bcbe95
FB
3308 gen_mov_vreg_F0(0, rm + 1);
3309 }
3310 }
3311 } else {
3312 /* Load/store */
3313 rn = (insn >> 16) & 0xf;
3314 if (dp)
9ee6e8bb 3315 VFP_DREG_D(rd, insn);
b7bcbe95 3316 else
9ee6e8bb
PB
3317 rd = VFP_SREG_D(insn);
3318 if (s->thumb && rn == 15) {
7d1b0095 3319 addr = tcg_temp_new_i32();
312eea9f 3320 tcg_gen_movi_i32(addr, s->pc & ~2);
9ee6e8bb 3321 } else {
312eea9f 3322 addr = load_reg(s, rn);
9ee6e8bb 3323 }
b7bcbe95
FB
3324 if ((insn & 0x01200000) == 0x01000000) {
3325 /* Single load/store */
3326 offset = (insn & 0xff) << 2;
3327 if ((insn & (1 << 23)) == 0)
3328 offset = -offset;
312eea9f 3329 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3330 if (insn & (1 << 20)) {
312eea9f 3331 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3332 gen_mov_vreg_F0(dp, rd);
3333 } else {
3334 gen_mov_F0_vreg(dp, rd);
312eea9f 3335 gen_vfp_st(s, dp, addr);
b7bcbe95 3336 }
7d1b0095 3337 tcg_temp_free_i32(addr);
b7bcbe95
FB
3338 } else {
3339 /* load/store multiple */
3340 if (dp)
3341 n = (insn >> 1) & 0x7f;
3342 else
3343 n = insn & 0xff;
3344
3345 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3346 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3347
3348 if (dp)
3349 offset = 8;
3350 else
3351 offset = 4;
3352 for (i = 0; i < n; i++) {
18c9b560 3353 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3354 /* load */
312eea9f 3355 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3356 gen_mov_vreg_F0(dp, rd + i);
3357 } else {
3358 /* store */
3359 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3360 gen_vfp_st(s, dp, addr);
b7bcbe95 3361 }
312eea9f 3362 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95
FB
3363 }
3364 if (insn & (1 << 21)) {
3365 /* writeback */
3366 if (insn & (1 << 24))
3367 offset = -offset * n;
3368 else if (dp && (insn & 1))
3369 offset = 4;
3370 else
3371 offset = 0;
3372
3373 if (offset != 0)
312eea9f
FN
3374 tcg_gen_addi_i32(addr, addr, offset);
3375 store_reg(s, rn, addr);
3376 } else {
7d1b0095 3377 tcg_temp_free_i32(addr);
b7bcbe95
FB
3378 }
3379 }
3380 }
3381 break;
3382 default:
3383 /* Should never happen. */
3384 return 1;
3385 }
3386 return 0;
3387}
3388
6e256c93 3389static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3390{
6e256c93
FB
3391 TranslationBlock *tb;
3392
3393 tb = s->tb;
3394 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3395 tcg_gen_goto_tb(n);
8984bd2e 3396 gen_set_pc_im(dest);
57fec1fe 3397 tcg_gen_exit_tb((long)tb + n);
6e256c93 3398 } else {
8984bd2e 3399 gen_set_pc_im(dest);
57fec1fe 3400 tcg_gen_exit_tb(0);
6e256c93 3401 }
c53be334
FB
3402}
3403
8aaca4c0
FB
3404static inline void gen_jmp (DisasContext *s, uint32_t dest)
3405{
551bd27f 3406 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3407 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3408 if (s->thumb)
d9ba4830
PB
3409 dest |= 1;
3410 gen_bx_im(s, dest);
8aaca4c0 3411 } else {
6e256c93 3412 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3413 s->is_jmp = DISAS_TB_JUMP;
3414 }
3415}
3416
d9ba4830 3417static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3418{
ee097184 3419 if (x)
d9ba4830 3420 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3421 else
d9ba4830 3422 gen_sxth(t0);
ee097184 3423 if (y)
d9ba4830 3424 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3425 else
d9ba4830
PB
3426 gen_sxth(t1);
3427 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3428}
3429
3430/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 3431static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3432 uint32_t mask;
3433
3434 mask = 0;
3435 if (flags & (1 << 0))
3436 mask |= 0xff;
3437 if (flags & (1 << 1))
3438 mask |= 0xff00;
3439 if (flags & (1 << 2))
3440 mask |= 0xff0000;
3441 if (flags & (1 << 3))
3442 mask |= 0xff000000;
9ee6e8bb 3443
2ae23e75 3444 /* Mask out undefined bits. */
9ee6e8bb
PB
3445 mask &= ~CPSR_RESERVED;
3446 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3447 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3448 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3449 mask &= ~CPSR_IT;
9ee6e8bb 3450 /* Mask out execution state bits. */
2ae23e75 3451 if (!spsr)
e160c51c 3452 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3453 /* Mask out privileged bits. */
3454 if (IS_USER(s))
9ee6e8bb 3455 mask &= CPSR_USER;
b5ff1b31
FB
3456 return mask;
3457}
3458
2fbac54b
FN
3459/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3460static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
b5ff1b31 3461{
d9ba4830 3462 TCGv tmp;
b5ff1b31
FB
3463 if (spsr) {
3464 /* ??? This is also undefined in system mode. */
3465 if (IS_USER(s))
3466 return 1;
d9ba4830
PB
3467
3468 tmp = load_cpu_field(spsr);
3469 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3470 tcg_gen_andi_i32(t0, t0, mask);
3471 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3472 store_cpu_field(tmp, spsr);
b5ff1b31 3473 } else {
2fbac54b 3474 gen_set_cpsr(t0, mask);
b5ff1b31 3475 }
7d1b0095 3476 tcg_temp_free_i32(t0);
b5ff1b31
FB
3477 gen_lookup_tb(s);
3478 return 0;
3479}
3480
2fbac54b
FN
3481/* Returns nonzero if access to the PSR is not permitted. */
3482static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3483{
3484 TCGv tmp;
7d1b0095 3485 tmp = tcg_temp_new_i32();
2fbac54b
FN
3486 tcg_gen_movi_i32(tmp, val);
3487 return gen_set_psr(s, mask, spsr, tmp);
3488}
3489
e9bb4aa9
JR
3490/* Generate an old-style exception return. Marks pc as dead. */
3491static void gen_exception_return(DisasContext *s, TCGv pc)
b5ff1b31 3492{
d9ba4830 3493 TCGv tmp;
e9bb4aa9 3494 store_reg(s, 15, pc);
d9ba4830
PB
3495 tmp = load_cpu_field(spsr);
3496 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 3497 tcg_temp_free_i32(tmp);
b5ff1b31
FB
3498 s->is_jmp = DISAS_UPDATE;
3499}
3500
b0109805
PB
3501/* Generate a v6 exception return. Marks both values as dead. */
3502static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3503{
b0109805 3504 gen_set_cpsr(cpsr, 0xffffffff);
7d1b0095 3505 tcg_temp_free_i32(cpsr);
b0109805 3506 store_reg(s, 15, pc);
9ee6e8bb
PB
3507 s->is_jmp = DISAS_UPDATE;
3508}
3b46e624 3509
9ee6e8bb
PB
3510static inline void
3511gen_set_condexec (DisasContext *s)
3512{
3513 if (s->condexec_mask) {
8f01245e 3514 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
7d1b0095 3515 TCGv tmp = tcg_temp_new_i32();
8f01245e 3516 tcg_gen_movi_i32(tmp, val);
d9ba4830 3517 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3518 }
3519}
3b46e624 3520
bc4a0de0
PM
3521static void gen_exception_insn(DisasContext *s, int offset, int excp)
3522{
3523 gen_set_condexec(s);
3524 gen_set_pc_im(s->pc - offset);
3525 gen_exception(excp);
3526 s->is_jmp = DISAS_JUMP;
3527}
3528
9ee6e8bb
PB
3529static void gen_nop_hint(DisasContext *s, int val)
3530{
3531 switch (val) {
3532 case 3: /* wfi */
8984bd2e 3533 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3534 s->is_jmp = DISAS_WFI;
3535 break;
3536 case 2: /* wfe */
3537 case 4: /* sev */
3538 /* TODO: Implement SEV and WFE. May help SMP performance. */
3539 default: /* nop */
3540 break;
3541 }
3542}
99c475ab 3543
ad69471c 3544#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3545
dd8fbd78 3546static inline int gen_neon_add(int size, TCGv t0, TCGv t1)
9ee6e8bb
PB
3547{
3548 switch (size) {
dd8fbd78
FN
3549 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3550 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3551 case 2: tcg_gen_add_i32(t0, t0, t1); break;
9ee6e8bb
PB
3552 default: return 1;
3553 }
3554 return 0;
3555}
3556
dd8fbd78 3557static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
ad69471c
PB
3558{
3559 switch (size) {
dd8fbd78
FN
3560 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3561 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3562 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3563 default: return;
3564 }
3565}
3566
3567/* 32-bit pairwise ops end up the same as the elementwise versions. */
3568#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3569#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3570#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3571#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3572
ad69471c
PB
3573#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3574 switch ((size << 1) | u) { \
3575 case 0: \
dd8fbd78 3576 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3577 break; \
3578 case 1: \
dd8fbd78 3579 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3580 break; \
3581 case 2: \
dd8fbd78 3582 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3583 break; \
3584 case 3: \
dd8fbd78 3585 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3586 break; \
3587 case 4: \
dd8fbd78 3588 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3589 break; \
3590 case 5: \
dd8fbd78 3591 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3592 break; \
3593 default: return 1; \
3594 }} while (0)
9ee6e8bb
PB
3595
3596#define GEN_NEON_INTEGER_OP(name) do { \
3597 switch ((size << 1) | u) { \
ad69471c 3598 case 0: \
dd8fbd78 3599 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3600 break; \
3601 case 1: \
dd8fbd78 3602 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3603 break; \
3604 case 2: \
dd8fbd78 3605 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3606 break; \
3607 case 3: \
dd8fbd78 3608 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3609 break; \
3610 case 4: \
dd8fbd78 3611 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3612 break; \
3613 case 5: \
dd8fbd78 3614 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3615 break; \
9ee6e8bb
PB
3616 default: return 1; \
3617 }} while (0)
3618
dd8fbd78 3619static TCGv neon_load_scratch(int scratch)
9ee6e8bb 3620{
7d1b0095 3621 TCGv tmp = tcg_temp_new_i32();
dd8fbd78
FN
3622 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3623 return tmp;
9ee6e8bb
PB
3624}
3625
dd8fbd78 3626static void neon_store_scratch(int scratch, TCGv var)
9ee6e8bb 3627{
dd8fbd78 3628 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 3629 tcg_temp_free_i32(var);
9ee6e8bb
PB
3630}
3631
dd8fbd78 3632static inline TCGv neon_get_scalar(int size, int reg)
9ee6e8bb 3633{
dd8fbd78 3634 TCGv tmp;
9ee6e8bb 3635 if (size == 1) {
0fad6efc
PM
3636 tmp = neon_load_reg(reg & 7, reg >> 4);
3637 if (reg & 8) {
dd8fbd78 3638 gen_neon_dup_high16(tmp);
0fad6efc
PM
3639 } else {
3640 gen_neon_dup_low16(tmp);
dd8fbd78 3641 }
0fad6efc
PM
3642 } else {
3643 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 3644 }
dd8fbd78 3645 return tmp;
9ee6e8bb
PB
3646}
3647
02acedf9 3648static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 3649{
02acedf9
PM
3650 TCGv tmp, tmp2;
3651 if (size == 3 || (!q && size == 2)) {
3652 return 1;
3653 }
3654 tmp = tcg_const_i32(rd);
3655 tmp2 = tcg_const_i32(rm);
3656 if (q) {
3657 switch (size) {
3658 case 0:
2a3f75b4 3659 gen_helper_neon_qunzip8(tmp, tmp2);
02acedf9
PM
3660 break;
3661 case 1:
2a3f75b4 3662 gen_helper_neon_qunzip16(tmp, tmp2);
02acedf9
PM
3663 break;
3664 case 2:
2a3f75b4 3665 gen_helper_neon_qunzip32(tmp, tmp2);
02acedf9
PM
3666 break;
3667 default:
3668 abort();
3669 }
3670 } else {
3671 switch (size) {
3672 case 0:
2a3f75b4 3673 gen_helper_neon_unzip8(tmp, tmp2);
02acedf9
PM
3674 break;
3675 case 1:
2a3f75b4 3676 gen_helper_neon_unzip16(tmp, tmp2);
02acedf9
PM
3677 break;
3678 default:
3679 abort();
3680 }
3681 }
3682 tcg_temp_free_i32(tmp);
3683 tcg_temp_free_i32(tmp2);
3684 return 0;
19457615
FN
3685}
3686
d68a6f3a 3687static int gen_neon_zip(int rd, int rm, int size, int q)
19457615
FN
3688{
3689 TCGv tmp, tmp2;
d68a6f3a
PM
3690 if (size == 3 || (!q && size == 2)) {
3691 return 1;
3692 }
3693 tmp = tcg_const_i32(rd);
3694 tmp2 = tcg_const_i32(rm);
3695 if (q) {
3696 switch (size) {
3697 case 0:
2a3f75b4 3698 gen_helper_neon_qzip8(tmp, tmp2);
d68a6f3a
PM
3699 break;
3700 case 1:
2a3f75b4 3701 gen_helper_neon_qzip16(tmp, tmp2);
d68a6f3a
PM
3702 break;
3703 case 2:
2a3f75b4 3704 gen_helper_neon_qzip32(tmp, tmp2);
d68a6f3a
PM
3705 break;
3706 default:
3707 abort();
3708 }
3709 } else {
3710 switch (size) {
3711 case 0:
2a3f75b4 3712 gen_helper_neon_zip8(tmp, tmp2);
d68a6f3a
PM
3713 break;
3714 case 1:
2a3f75b4 3715 gen_helper_neon_zip16(tmp, tmp2);
d68a6f3a
PM
3716 break;
3717 default:
3718 abort();
3719 }
3720 }
3721 tcg_temp_free_i32(tmp);
3722 tcg_temp_free_i32(tmp2);
3723 return 0;
19457615
FN
3724}
3725
19457615
FN
3726static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3727{
3728 TCGv rd, tmp;
3729
7d1b0095
PM
3730 rd = tcg_temp_new_i32();
3731 tmp = tcg_temp_new_i32();
19457615
FN
3732
3733 tcg_gen_shli_i32(rd, t0, 8);
3734 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3735 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3736 tcg_gen_or_i32(rd, rd, tmp);
3737
3738 tcg_gen_shri_i32(t1, t1, 8);
3739 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3740 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3741 tcg_gen_or_i32(t1, t1, tmp);
3742 tcg_gen_mov_i32(t0, rd);
3743
7d1b0095
PM
3744 tcg_temp_free_i32(tmp);
3745 tcg_temp_free_i32(rd);
19457615
FN
3746}
3747
3748static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3749{
3750 TCGv rd, tmp;
3751
7d1b0095
PM
3752 rd = tcg_temp_new_i32();
3753 tmp = tcg_temp_new_i32();
19457615
FN
3754
3755 tcg_gen_shli_i32(rd, t0, 16);
3756 tcg_gen_andi_i32(tmp, t1, 0xffff);
3757 tcg_gen_or_i32(rd, rd, tmp);
3758 tcg_gen_shri_i32(t1, t1, 16);
3759 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3760 tcg_gen_or_i32(t1, t1, tmp);
3761 tcg_gen_mov_i32(t0, rd);
3762
7d1b0095
PM
3763 tcg_temp_free_i32(tmp);
3764 tcg_temp_free_i32(rd);
19457615
FN
3765}
3766
3767
9ee6e8bb
PB
3768static struct {
3769 int nregs;
3770 int interleave;
3771 int spacing;
3772} neon_ls_element_type[11] = {
3773 {4, 4, 1},
3774 {4, 4, 2},
3775 {4, 1, 1},
3776 {4, 2, 1},
3777 {3, 3, 1},
3778 {3, 3, 2},
3779 {3, 1, 1},
3780 {1, 1, 1},
3781 {2, 2, 1},
3782 {2, 2, 2},
3783 {2, 1, 1}
3784};
3785
3786/* Translate a NEON load/store element instruction. Return nonzero if the
3787 instruction is invalid. */
3788static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3789{
3790 int rd, rn, rm;
3791 int op;
3792 int nregs;
3793 int interleave;
84496233 3794 int spacing;
9ee6e8bb
PB
3795 int stride;
3796 int size;
3797 int reg;
3798 int pass;
3799 int load;
3800 int shift;
9ee6e8bb 3801 int n;
1b2b1e54 3802 TCGv addr;
b0109805 3803 TCGv tmp;
8f8e3aa4 3804 TCGv tmp2;
84496233 3805 TCGv_i64 tmp64;
9ee6e8bb 3806
5df8bac1 3807 if (!s->vfp_enabled)
9ee6e8bb
PB
3808 return 1;
3809 VFP_DREG_D(rd, insn);
3810 rn = (insn >> 16) & 0xf;
3811 rm = insn & 0xf;
3812 load = (insn & (1 << 21)) != 0;
3813 if ((insn & (1 << 23)) == 0) {
3814 /* Load store all elements. */
3815 op = (insn >> 8) & 0xf;
3816 size = (insn >> 6) & 3;
84496233 3817 if (op > 10)
9ee6e8bb
PB
3818 return 1;
3819 nregs = neon_ls_element_type[op].nregs;
3820 interleave = neon_ls_element_type[op].interleave;
84496233
JR
3821 spacing = neon_ls_element_type[op].spacing;
3822 if (size == 3 && (interleave | spacing) != 1)
3823 return 1;
e318a60b 3824 addr = tcg_temp_new_i32();
dcc65026 3825 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3826 stride = (1 << size) * interleave;
3827 for (reg = 0; reg < nregs; reg++) {
3828 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
3829 load_reg_var(s, addr, rn);
3830 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 3831 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
3832 load_reg_var(s, addr, rn);
3833 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 3834 }
84496233
JR
3835 if (size == 3) {
3836 if (load) {
3837 tmp64 = gen_ld64(addr, IS_USER(s));
3838 neon_store_reg64(tmp64, rd);
3839 tcg_temp_free_i64(tmp64);
3840 } else {
3841 tmp64 = tcg_temp_new_i64();
3842 neon_load_reg64(tmp64, rd);
3843 gen_st64(tmp64, addr, IS_USER(s));
3844 }
3845 tcg_gen_addi_i32(addr, addr, stride);
3846 } else {
3847 for (pass = 0; pass < 2; pass++) {
3848 if (size == 2) {
3849 if (load) {
3850 tmp = gen_ld32(addr, IS_USER(s));
3851 neon_store_reg(rd, pass, tmp);
3852 } else {
3853 tmp = neon_load_reg(rd, pass);
3854 gen_st32(tmp, addr, IS_USER(s));
3855 }
1b2b1e54 3856 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
3857 } else if (size == 1) {
3858 if (load) {
3859 tmp = gen_ld16u(addr, IS_USER(s));
3860 tcg_gen_addi_i32(addr, addr, stride);
3861 tmp2 = gen_ld16u(addr, IS_USER(s));
3862 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
3863 tcg_gen_shli_i32(tmp2, tmp2, 16);
3864 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3865 tcg_temp_free_i32(tmp2);
84496233
JR
3866 neon_store_reg(rd, pass, tmp);
3867 } else {
3868 tmp = neon_load_reg(rd, pass);
7d1b0095 3869 tmp2 = tcg_temp_new_i32();
84496233
JR
3870 tcg_gen_shri_i32(tmp2, tmp, 16);
3871 gen_st16(tmp, addr, IS_USER(s));
3872 tcg_gen_addi_i32(addr, addr, stride);
3873 gen_st16(tmp2, addr, IS_USER(s));
1b2b1e54 3874 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3875 }
84496233
JR
3876 } else /* size == 0 */ {
3877 if (load) {
3878 TCGV_UNUSED(tmp2);
3879 for (n = 0; n < 4; n++) {
3880 tmp = gen_ld8u(addr, IS_USER(s));
3881 tcg_gen_addi_i32(addr, addr, stride);
3882 if (n == 0) {
3883 tmp2 = tmp;
3884 } else {
41ba8341
PB
3885 tcg_gen_shli_i32(tmp, tmp, n * 8);
3886 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 3887 tcg_temp_free_i32(tmp);
84496233 3888 }
9ee6e8bb 3889 }
84496233
JR
3890 neon_store_reg(rd, pass, tmp2);
3891 } else {
3892 tmp2 = neon_load_reg(rd, pass);
3893 for (n = 0; n < 4; n++) {
7d1b0095 3894 tmp = tcg_temp_new_i32();
84496233
JR
3895 if (n == 0) {
3896 tcg_gen_mov_i32(tmp, tmp2);
3897 } else {
3898 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3899 }
3900 gen_st8(tmp, addr, IS_USER(s));
3901 tcg_gen_addi_i32(addr, addr, stride);
3902 }
7d1b0095 3903 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3904 }
3905 }
3906 }
3907 }
84496233 3908 rd += spacing;
9ee6e8bb 3909 }
e318a60b 3910 tcg_temp_free_i32(addr);
9ee6e8bb
PB
3911 stride = nregs * 8;
3912 } else {
3913 size = (insn >> 10) & 3;
3914 if (size == 3) {
3915 /* Load single element to all lanes. */
8e18cde3
PM
3916 int a = (insn >> 4) & 1;
3917 if (!load) {
9ee6e8bb 3918 return 1;
8e18cde3 3919 }
9ee6e8bb
PB
3920 size = (insn >> 6) & 3;
3921 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
3922
3923 if (size == 3) {
3924 if (nregs != 4 || a == 0) {
9ee6e8bb 3925 return 1;
99c475ab 3926 }
8e18cde3
PM
3927 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3928 size = 2;
3929 }
3930 if (nregs == 1 && a == 1 && size == 0) {
3931 return 1;
3932 }
3933 if (nregs == 3 && a == 1) {
3934 return 1;
3935 }
e318a60b 3936 addr = tcg_temp_new_i32();
8e18cde3
PM
3937 load_reg_var(s, addr, rn);
3938 if (nregs == 1) {
3939 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
3940 tmp = gen_load_and_replicate(s, addr, size);
3941 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3942 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3943 if (insn & (1 << 5)) {
3944 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
3945 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
3946 }
3947 tcg_temp_free_i32(tmp);
3948 } else {
3949 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
3950 stride = (insn & (1 << 5)) ? 2 : 1;
3951 for (reg = 0; reg < nregs; reg++) {
3952 tmp = gen_load_and_replicate(s, addr, size);
3953 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3954 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3955 tcg_temp_free_i32(tmp);
3956 tcg_gen_addi_i32(addr, addr, 1 << size);
3957 rd += stride;
3958 }
9ee6e8bb 3959 }
e318a60b 3960 tcg_temp_free_i32(addr);
9ee6e8bb
PB
3961 stride = (1 << size) * nregs;
3962 } else {
3963 /* Single element. */
3964 pass = (insn >> 7) & 1;
3965 switch (size) {
3966 case 0:
3967 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
3968 stride = 1;
3969 break;
3970 case 1:
3971 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
3972 stride = (insn & (1 << 5)) ? 2 : 1;
3973 break;
3974 case 2:
3975 shift = 0;
9ee6e8bb
PB
3976 stride = (insn & (1 << 6)) ? 2 : 1;
3977 break;
3978 default:
3979 abort();
3980 }
3981 nregs = ((insn >> 8) & 3) + 1;
e318a60b 3982 addr = tcg_temp_new_i32();
dcc65026 3983 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3984 for (reg = 0; reg < nregs; reg++) {
3985 if (load) {
9ee6e8bb
PB
3986 switch (size) {
3987 case 0:
1b2b1e54 3988 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb
PB
3989 break;
3990 case 1:
1b2b1e54 3991 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
3992 break;
3993 case 2:
1b2b1e54 3994 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 3995 break;
a50f5b91
PB
3996 default: /* Avoid compiler warnings. */
3997 abort();
9ee6e8bb
PB
3998 }
3999 if (size != 2) {
8f8e3aa4
PB
4000 tmp2 = neon_load_reg(rd, pass);
4001 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
7d1b0095 4002 tcg_temp_free_i32(tmp2);
9ee6e8bb 4003 }
8f8e3aa4 4004 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4005 } else { /* Store */
8f8e3aa4
PB
4006 tmp = neon_load_reg(rd, pass);
4007 if (shift)
4008 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4009 switch (size) {
4010 case 0:
1b2b1e54 4011 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4012 break;
4013 case 1:
1b2b1e54 4014 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4015 break;
4016 case 2:
1b2b1e54 4017 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 4018 break;
99c475ab 4019 }
99c475ab 4020 }
9ee6e8bb 4021 rd += stride;
1b2b1e54 4022 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4023 }
e318a60b 4024 tcg_temp_free_i32(addr);
9ee6e8bb 4025 stride = nregs * (1 << size);
99c475ab 4026 }
9ee6e8bb
PB
4027 }
4028 if (rm != 15) {
b26eefb6
PB
4029 TCGv base;
4030
4031 base = load_reg(s, rn);
9ee6e8bb 4032 if (rm == 13) {
b26eefb6 4033 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4034 } else {
b26eefb6
PB
4035 TCGv index;
4036 index = load_reg(s, rm);
4037 tcg_gen_add_i32(base, base, index);
7d1b0095 4038 tcg_temp_free_i32(index);
9ee6e8bb 4039 }
b26eefb6 4040 store_reg(s, rn, base);
9ee6e8bb
PB
4041 }
4042 return 0;
4043}
3b46e624 4044
8f8e3aa4
PB
4045/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4046static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4047{
4048 tcg_gen_and_i32(t, t, c);
f669df27 4049 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4050 tcg_gen_or_i32(dest, t, f);
4051}
4052
a7812ae4 4053static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4054{
4055 switch (size) {
4056 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4057 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4058 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4059 default: abort();
4060 }
4061}
4062
a7812ae4 4063static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4064{
4065 switch (size) {
2a3f75b4
PM
4066 case 0: gen_helper_neon_narrow_sat_s8(dest, src); break;
4067 case 1: gen_helper_neon_narrow_sat_s16(dest, src); break;
4068 case 2: gen_helper_neon_narrow_sat_s32(dest, src); break;
ad69471c
PB
4069 default: abort();
4070 }
4071}
4072
a7812ae4 4073static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4074{
4075 switch (size) {
2a3f75b4
PM
4076 case 0: gen_helper_neon_narrow_sat_u8(dest, src); break;
4077 case 1: gen_helper_neon_narrow_sat_u16(dest, src); break;
4078 case 2: gen_helper_neon_narrow_sat_u32(dest, src); break;
ad69471c
PB
4079 default: abort();
4080 }
4081}
4082
af1bbf30
JR
4083static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4084{
4085 switch (size) {
2a3f75b4
PM
4086 case 0: gen_helper_neon_unarrow_sat8(dest, src); break;
4087 case 1: gen_helper_neon_unarrow_sat16(dest, src); break;
4088 case 2: gen_helper_neon_unarrow_sat32(dest, src); break;
af1bbf30
JR
4089 default: abort();
4090 }
4091}
4092
ad69471c
PB
4093static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4094 int q, int u)
4095{
4096 if (q) {
4097 if (u) {
4098 switch (size) {
4099 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4100 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4101 default: abort();
4102 }
4103 } else {
4104 switch (size) {
4105 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4106 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4107 default: abort();
4108 }
4109 }
4110 } else {
4111 if (u) {
4112 switch (size) {
b408a9b0
CL
4113 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4114 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4115 default: abort();
4116 }
4117 } else {
4118 switch (size) {
4119 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4120 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4121 default: abort();
4122 }
4123 }
4124 }
4125}
4126
a7812ae4 4127static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
4128{
4129 if (u) {
4130 switch (size) {
4131 case 0: gen_helper_neon_widen_u8(dest, src); break;
4132 case 1: gen_helper_neon_widen_u16(dest, src); break;
4133 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4134 default: abort();
4135 }
4136 } else {
4137 switch (size) {
4138 case 0: gen_helper_neon_widen_s8(dest, src); break;
4139 case 1: gen_helper_neon_widen_s16(dest, src); break;
4140 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4141 default: abort();
4142 }
4143 }
7d1b0095 4144 tcg_temp_free_i32(src);
ad69471c
PB
4145}
4146
4147static inline void gen_neon_addl(int size)
4148{
4149 switch (size) {
4150 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4151 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4152 case 2: tcg_gen_add_i64(CPU_V001); break;
4153 default: abort();
4154 }
4155}
4156
4157static inline void gen_neon_subl(int size)
4158{
4159 switch (size) {
4160 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4161 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4162 case 2: tcg_gen_sub_i64(CPU_V001); break;
4163 default: abort();
4164 }
4165}
4166
a7812ae4 4167static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4168{
4169 switch (size) {
4170 case 0: gen_helper_neon_negl_u16(var, var); break;
4171 case 1: gen_helper_neon_negl_u32(var, var); break;
4172 case 2: gen_helper_neon_negl_u64(var, var); break;
4173 default: abort();
4174 }
4175}
4176
a7812ae4 4177static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4178{
4179 switch (size) {
2a3f75b4
PM
4180 case 1: gen_helper_neon_addl_saturate_s32(op0, op0, op1); break;
4181 case 2: gen_helper_neon_addl_saturate_s64(op0, op0, op1); break;
ad69471c
PB
4182 default: abort();
4183 }
4184}
4185
a7812ae4 4186static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4187{
a7812ae4 4188 TCGv_i64 tmp;
ad69471c
PB
4189
4190 switch ((size << 1) | u) {
4191 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4192 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4193 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4194 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4195 case 4:
4196 tmp = gen_muls_i64_i32(a, b);
4197 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4198 tcg_temp_free_i64(tmp);
ad69471c
PB
4199 break;
4200 case 5:
4201 tmp = gen_mulu_i64_i32(a, b);
4202 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4203 tcg_temp_free_i64(tmp);
ad69471c
PB
4204 break;
4205 default: abort();
4206 }
c6067f04
CL
4207
4208 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4209 Don't forget to clean them now. */
4210 if (size < 2) {
7d1b0095
PM
4211 tcg_temp_free_i32(a);
4212 tcg_temp_free_i32(b);
c6067f04 4213 }
ad69471c
PB
4214}
4215
c33171c7
PM
4216static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4217{
4218 if (op) {
4219 if (u) {
4220 gen_neon_unarrow_sats(size, dest, src);
4221 } else {
4222 gen_neon_narrow(size, dest, src);
4223 }
4224 } else {
4225 if (u) {
4226 gen_neon_narrow_satu(size, dest, src);
4227 } else {
4228 gen_neon_narrow_sats(size, dest, src);
4229 }
4230 }
4231}
4232
9ee6e8bb
PB
4233/* Translate a NEON data processing instruction. Return nonzero if the
4234 instruction is invalid.
ad69471c
PB
4235 We process data in a mixture of 32-bit and 64-bit chunks.
4236 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4237
9ee6e8bb
PB
4238static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4239{
4240 int op;
4241 int q;
4242 int rd, rn, rm;
4243 int size;
4244 int shift;
4245 int pass;
4246 int count;
4247 int pairwise;
4248 int u;
4249 int n;
ca9a32e4 4250 uint32_t imm, mask;
b75263d6 4251 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4252 TCGv_i64 tmp64;
9ee6e8bb 4253
5df8bac1 4254 if (!s->vfp_enabled)
9ee6e8bb
PB
4255 return 1;
4256 q = (insn & (1 << 6)) != 0;
4257 u = (insn >> 24) & 1;
4258 VFP_DREG_D(rd, insn);
4259 VFP_DREG_N(rn, insn);
4260 VFP_DREG_M(rm, insn);
4261 size = (insn >> 20) & 3;
4262 if ((insn & (1 << 23)) == 0) {
4263 /* Three register same length. */
4264 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
ad69471c
PB
4265 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4266 || op == 10 || op == 11 || op == 16)) {
4267 /* 64-bit element instructions. */
9ee6e8bb 4268 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4269 neon_load_reg64(cpu_V0, rn + pass);
4270 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb
PB
4271 switch (op) {
4272 case 1: /* VQADD */
4273 if (u) {
2a3f75b4 4274 gen_helper_neon_qadd_u64(cpu_V0, cpu_V0, cpu_V1);
2c0262af 4275 } else {
2a3f75b4 4276 gen_helper_neon_qadd_s64(cpu_V0, cpu_V0, cpu_V1);
2c0262af 4277 }
9ee6e8bb
PB
4278 break;
4279 case 5: /* VQSUB */
4280 if (u) {
2a3f75b4 4281 gen_helper_neon_qsub_u64(cpu_V0, cpu_V0, cpu_V1);
ad69471c 4282 } else {
2a3f75b4 4283 gen_helper_neon_qsub_s64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
4284 }
4285 break;
4286 case 8: /* VSHL */
4287 if (u) {
4288 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4289 } else {
4290 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4291 }
4292 break;
4293 case 9: /* VQSHL */
4294 if (u) {
2a3f75b4 4295 gen_helper_neon_qshl_u64(cpu_V0, cpu_V1, cpu_V0);
ad69471c 4296 } else {
2a3f75b4 4297 gen_helper_neon_qshl_s64(cpu_V0, cpu_V1, cpu_V0);
ad69471c
PB
4298 }
4299 break;
4300 case 10: /* VRSHL */
4301 if (u) {
4302 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4303 } else {
ad69471c
PB
4304 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4305 }
4306 break;
4307 case 11: /* VQRSHL */
4308 if (u) {
2a3f75b4 4309 gen_helper_neon_qrshl_u64(cpu_V0, cpu_V1, cpu_V0);
ad69471c 4310 } else {
2a3f75b4 4311 gen_helper_neon_qrshl_s64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4312 }
9ee6e8bb
PB
4313 break;
4314 case 16:
4315 if (u) {
ad69471c 4316 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4317 } else {
ad69471c 4318 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4319 }
4320 break;
4321 default:
4322 abort();
2c0262af 4323 }
ad69471c 4324 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4325 }
9ee6e8bb 4326 return 0;
2c0262af 4327 }
9ee6e8bb
PB
4328 switch (op) {
4329 case 8: /* VSHL */
4330 case 9: /* VQSHL */
4331 case 10: /* VRSHL */
ad69471c 4332 case 11: /* VQRSHL */
9ee6e8bb 4333 {
ad69471c
PB
4334 int rtmp;
4335 /* Shift instruction operands are reversed. */
4336 rtmp = rn;
9ee6e8bb 4337 rn = rm;
ad69471c 4338 rm = rtmp;
9ee6e8bb
PB
4339 pairwise = 0;
4340 }
2c0262af 4341 break;
9ee6e8bb
PB
4342 case 20: /* VPMAX */
4343 case 21: /* VPMIN */
4344 case 23: /* VPADD */
4345 pairwise = 1;
2c0262af 4346 break;
9ee6e8bb
PB
4347 case 26: /* VPADD (float) */
4348 pairwise = (u && size < 2);
2c0262af 4349 break;
9ee6e8bb
PB
4350 case 30: /* VPMIN/VPMAX (float) */
4351 pairwise = u;
2c0262af 4352 break;
9ee6e8bb
PB
4353 default:
4354 pairwise = 0;
2c0262af 4355 break;
9ee6e8bb 4356 }
dd8fbd78 4357
9ee6e8bb
PB
4358 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4359
4360 if (pairwise) {
4361 /* Pairwise. */
4362 if (q)
4363 n = (pass & 1) * 2;
2c0262af 4364 else
9ee6e8bb
PB
4365 n = 0;
4366 if (pass < q + 1) {
dd8fbd78
FN
4367 tmp = neon_load_reg(rn, n);
4368 tmp2 = neon_load_reg(rn, n + 1);
9ee6e8bb 4369 } else {
dd8fbd78
FN
4370 tmp = neon_load_reg(rm, n);
4371 tmp2 = neon_load_reg(rm, n + 1);
9ee6e8bb
PB
4372 }
4373 } else {
4374 /* Elementwise. */
dd8fbd78
FN
4375 tmp = neon_load_reg(rn, pass);
4376 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4377 }
4378 switch (op) {
4379 case 0: /* VHADD */
4380 GEN_NEON_INTEGER_OP(hadd);
4381 break;
4382 case 1: /* VQADD */
2a3f75b4 4383 GEN_NEON_INTEGER_OP(qadd);
2c0262af 4384 break;
9ee6e8bb
PB
4385 case 2: /* VRHADD */
4386 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4387 break;
9ee6e8bb
PB
4388 case 3: /* Logic ops. */
4389 switch ((u << 2) | size) {
4390 case 0: /* VAND */
dd8fbd78 4391 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4392 break;
4393 case 1: /* BIC */
f669df27 4394 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4395 break;
4396 case 2: /* VORR */
dd8fbd78 4397 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4398 break;
4399 case 3: /* VORN */
f669df27 4400 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4401 break;
4402 case 4: /* VEOR */
dd8fbd78 4403 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4404 break;
4405 case 5: /* VBSL */
dd8fbd78
FN
4406 tmp3 = neon_load_reg(rd, pass);
4407 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 4408 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4409 break;
4410 case 6: /* VBIT */
dd8fbd78
FN
4411 tmp3 = neon_load_reg(rd, pass);
4412 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 4413 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4414 break;
4415 case 7: /* VBIF */
dd8fbd78
FN
4416 tmp3 = neon_load_reg(rd, pass);
4417 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 4418 tcg_temp_free_i32(tmp3);
9ee6e8bb 4419 break;
2c0262af
FB
4420 }
4421 break;
9ee6e8bb
PB
4422 case 4: /* VHSUB */
4423 GEN_NEON_INTEGER_OP(hsub);
4424 break;
4425 case 5: /* VQSUB */
2a3f75b4 4426 GEN_NEON_INTEGER_OP(qsub);
2c0262af 4427 break;
9ee6e8bb
PB
4428 case 6: /* VCGT */
4429 GEN_NEON_INTEGER_OP(cgt);
4430 break;
4431 case 7: /* VCGE */
4432 GEN_NEON_INTEGER_OP(cge);
4433 break;
4434 case 8: /* VSHL */
ad69471c 4435 GEN_NEON_INTEGER_OP(shl);
2c0262af 4436 break;
9ee6e8bb 4437 case 9: /* VQSHL */
2a3f75b4 4438 GEN_NEON_INTEGER_OP(qshl);
2c0262af 4439 break;
9ee6e8bb 4440 case 10: /* VRSHL */
ad69471c 4441 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4442 break;
9ee6e8bb 4443 case 11: /* VQRSHL */
2a3f75b4 4444 GEN_NEON_INTEGER_OP(qrshl);
9ee6e8bb
PB
4445 break;
4446 case 12: /* VMAX */
4447 GEN_NEON_INTEGER_OP(max);
4448 break;
4449 case 13: /* VMIN */
4450 GEN_NEON_INTEGER_OP(min);
4451 break;
4452 case 14: /* VABD */
4453 GEN_NEON_INTEGER_OP(abd);
4454 break;
4455 case 15: /* VABA */
4456 GEN_NEON_INTEGER_OP(abd);
7d1b0095 4457 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
4458 tmp2 = neon_load_reg(rd, pass);
4459 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4460 break;
4461 case 16:
4462 if (!u) { /* VADD */
dd8fbd78 4463 if (gen_neon_add(size, tmp, tmp2))
9ee6e8bb
PB
4464 return 1;
4465 } else { /* VSUB */
4466 switch (size) {
dd8fbd78
FN
4467 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4468 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4469 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4470 default: return 1;
4471 }
4472 }
4473 break;
4474 case 17:
4475 if (!u) { /* VTST */
4476 switch (size) {
dd8fbd78
FN
4477 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4478 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4479 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4480 default: return 1;
4481 }
4482 } else { /* VCEQ */
4483 switch (size) {
dd8fbd78
FN
4484 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4485 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4486 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4487 default: return 1;
4488 }
4489 }
4490 break;
4491 case 18: /* Multiply. */
4492 switch (size) {
dd8fbd78
FN
4493 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4494 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4495 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4496 default: return 1;
4497 }
7d1b0095 4498 tcg_temp_free_i32(tmp2);
dd8fbd78 4499 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4500 if (u) { /* VMLS */
dd8fbd78 4501 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4502 } else { /* VMLA */
dd8fbd78 4503 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4504 }
4505 break;
4506 case 19: /* VMUL */
4507 if (u) { /* polynomial */
dd8fbd78 4508 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4509 } else { /* Integer */
4510 switch (size) {
dd8fbd78
FN
4511 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4512 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4513 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4514 default: return 1;
4515 }
4516 }
4517 break;
4518 case 20: /* VPMAX */
4519 GEN_NEON_INTEGER_OP(pmax);
4520 break;
4521 case 21: /* VPMIN */
4522 GEN_NEON_INTEGER_OP(pmin);
4523 break;
4524 case 22: /* Hultiply high. */
4525 if (!u) { /* VQDMULH */
4526 switch (size) {
2a3f75b4
PM
4527 case 1: gen_helper_neon_qdmulh_s16(tmp, tmp, tmp2); break;
4528 case 2: gen_helper_neon_qdmulh_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4529 default: return 1;
4530 }
4531 } else { /* VQRDHMUL */
4532 switch (size) {
2a3f75b4
PM
4533 case 1: gen_helper_neon_qrdmulh_s16(tmp, tmp, tmp2); break;
4534 case 2: gen_helper_neon_qrdmulh_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4535 default: return 1;
4536 }
4537 }
4538 break;
4539 case 23: /* VPADD */
4540 if (u)
4541 return 1;
4542 switch (size) {
dd8fbd78
FN
4543 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4544 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4545 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4546 default: return 1;
4547 }
4548 break;
4549 case 26: /* Floating point arithnetic. */
4550 switch ((u << 2) | size) {
4551 case 0: /* VADD */
dd8fbd78 4552 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4553 break;
4554 case 2: /* VSUB */
dd8fbd78 4555 gen_helper_neon_sub_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4556 break;
4557 case 4: /* VPADD */
dd8fbd78 4558 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4559 break;
4560 case 6: /* VABD */
dd8fbd78 4561 gen_helper_neon_abd_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4562 break;
4563 default:
4564 return 1;
4565 }
4566 break;
4567 case 27: /* Float multiply. */
dd8fbd78 4568 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb 4569 if (!u) {
7d1b0095 4570 tcg_temp_free_i32(tmp2);
dd8fbd78 4571 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4572 if (size == 0) {
dd8fbd78 4573 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb 4574 } else {
dd8fbd78 4575 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
4576 }
4577 }
4578 break;
4579 case 28: /* Float compare. */
4580 if (!u) {
dd8fbd78 4581 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
b5ff1b31 4582 } else {
9ee6e8bb 4583 if (size == 0)
dd8fbd78 4584 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
9ee6e8bb 4585 else
dd8fbd78 4586 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
b5ff1b31 4587 }
2c0262af 4588 break;
9ee6e8bb
PB
4589 case 29: /* Float compare absolute. */
4590 if (!u)
4591 return 1;
4592 if (size == 0)
dd8fbd78 4593 gen_helper_neon_acge_f32(tmp, tmp, tmp2);
9ee6e8bb 4594 else
dd8fbd78 4595 gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
2c0262af 4596 break;
9ee6e8bb
PB
4597 case 30: /* Float min/max. */
4598 if (size == 0)
dd8fbd78 4599 gen_helper_neon_max_f32(tmp, tmp, tmp2);
9ee6e8bb 4600 else
dd8fbd78 4601 gen_helper_neon_min_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4602 break;
4603 case 31:
4604 if (size == 0)
dd8fbd78 4605 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 4606 else
dd8fbd78 4607 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 4608 break;
9ee6e8bb
PB
4609 default:
4610 abort();
2c0262af 4611 }
7d1b0095 4612 tcg_temp_free_i32(tmp2);
dd8fbd78 4613
9ee6e8bb
PB
4614 /* Save the result. For elementwise operations we can put it
4615 straight into the destination register. For pairwise operations
4616 we have to be careful to avoid clobbering the source operands. */
4617 if (pairwise && rd == rm) {
dd8fbd78 4618 neon_store_scratch(pass, tmp);
9ee6e8bb 4619 } else {
dd8fbd78 4620 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4621 }
4622
4623 } /* for pass */
4624 if (pairwise && rd == rm) {
4625 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
4626 tmp = neon_load_scratch(pass);
4627 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4628 }
4629 }
ad69471c 4630 /* End of 3 register same size operations. */
9ee6e8bb
PB
4631 } else if (insn & (1 << 4)) {
4632 if ((insn & 0x00380080) != 0) {
4633 /* Two registers and shift. */
4634 op = (insn >> 8) & 0xf;
4635 if (insn & (1 << 7)) {
4636 /* 64-bit shift. */
4637 size = 3;
4638 } else {
4639 size = 2;
4640 while ((insn & (1 << (size + 19))) == 0)
4641 size--;
4642 }
4643 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4644 /* To avoid excessive dumplication of ops we implement shift
4645 by immediate using the variable shift operations. */
4646 if (op < 8) {
4647 /* Shift by immediate:
4648 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4649 /* Right shifts are encoded as N - shift, where N is the
4650 element size in bits. */
4651 if (op <= 4)
4652 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4653 if (size == 3) {
4654 count = q + 1;
4655 } else {
4656 count = q ? 4: 2;
4657 }
4658 switch (size) {
4659 case 0:
4660 imm = (uint8_t) shift;
4661 imm |= imm << 8;
4662 imm |= imm << 16;
4663 break;
4664 case 1:
4665 imm = (uint16_t) shift;
4666 imm |= imm << 16;
4667 break;
4668 case 2:
4669 case 3:
4670 imm = shift;
4671 break;
4672 default:
4673 abort();
4674 }
4675
4676 for (pass = 0; pass < count; pass++) {
ad69471c
PB
4677 if (size == 3) {
4678 neon_load_reg64(cpu_V0, rm + pass);
4679 tcg_gen_movi_i64(cpu_V1, imm);
4680 switch (op) {
4681 case 0: /* VSHR */
4682 case 1: /* VSRA */
4683 if (u)
4684 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4685 else
ad69471c 4686 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4687 break;
ad69471c
PB
4688 case 2: /* VRSHR */
4689 case 3: /* VRSRA */
4690 if (u)
4691 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4692 else
ad69471c 4693 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4694 break;
ad69471c
PB
4695 case 4: /* VSRI */
4696 if (!u)
4697 return 1;
4698 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4699 break;
4700 case 5: /* VSHL, VSLI */
4701 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4702 break;
0322b26e
PM
4703 case 6: /* VQSHLU */
4704 if (u) {
2a3f75b4 4705 gen_helper_neon_qshlu_s64(cpu_V0,
0322b26e
PM
4706 cpu_V0, cpu_V1);
4707 } else {
4708 return 1;
4709 }
ad69471c 4710 break;
0322b26e
PM
4711 case 7: /* VQSHL */
4712 if (u) {
2a3f75b4 4713 gen_helper_neon_qshl_u64(cpu_V0,
0322b26e
PM
4714 cpu_V0, cpu_V1);
4715 } else {
2a3f75b4 4716 gen_helper_neon_qshl_s64(cpu_V0,
0322b26e
PM
4717 cpu_V0, cpu_V1);
4718 }
9ee6e8bb 4719 break;
9ee6e8bb 4720 }
ad69471c
PB
4721 if (op == 1 || op == 3) {
4722 /* Accumulate. */
5371cb81 4723 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
4724 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4725 } else if (op == 4 || (op == 5 && u)) {
4726 /* Insert */
923e6509
CL
4727 neon_load_reg64(cpu_V1, rd + pass);
4728 uint64_t mask;
4729 if (shift < -63 || shift > 63) {
4730 mask = 0;
4731 } else {
4732 if (op == 4) {
4733 mask = 0xffffffffffffffffull >> -shift;
4734 } else {
4735 mask = 0xffffffffffffffffull << shift;
4736 }
4737 }
4738 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
4739 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
4740 }
4741 neon_store_reg64(cpu_V0, rd + pass);
4742 } else { /* size < 3 */
4743 /* Operands in T0 and T1. */
dd8fbd78 4744 tmp = neon_load_reg(rm, pass);
7d1b0095 4745 tmp2 = tcg_temp_new_i32();
dd8fbd78 4746 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
4747 switch (op) {
4748 case 0: /* VSHR */
4749 case 1: /* VSRA */
4750 GEN_NEON_INTEGER_OP(shl);
4751 break;
4752 case 2: /* VRSHR */
4753 case 3: /* VRSRA */
4754 GEN_NEON_INTEGER_OP(rshl);
4755 break;
4756 case 4: /* VSRI */
4757 if (!u)
4758 return 1;
4759 GEN_NEON_INTEGER_OP(shl);
4760 break;
4761 case 5: /* VSHL, VSLI */
4762 switch (size) {
dd8fbd78
FN
4763 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
4764 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
4765 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
ad69471c
PB
4766 default: return 1;
4767 }
4768 break;
0322b26e
PM
4769 case 6: /* VQSHLU */
4770 if (!u) {
4771 return 1;
4772 }
ad69471c 4773 switch (size) {
0322b26e 4774 case 0:
2a3f75b4 4775 gen_helper_neon_qshlu_s8(tmp, tmp, tmp2);
0322b26e
PM
4776 break;
4777 case 1:
2a3f75b4 4778 gen_helper_neon_qshlu_s16(tmp, tmp, tmp2);
0322b26e
PM
4779 break;
4780 case 2:
2a3f75b4 4781 gen_helper_neon_qshlu_s32(tmp, tmp, tmp2);
0322b26e
PM
4782 break;
4783 default:
4784 return 1;
ad69471c
PB
4785 }
4786 break;
0322b26e 4787 case 7: /* VQSHL */
2a3f75b4 4788 GEN_NEON_INTEGER_OP(qshl);
0322b26e 4789 break;
ad69471c 4790 }
7d1b0095 4791 tcg_temp_free_i32(tmp2);
ad69471c
PB
4792
4793 if (op == 1 || op == 3) {
4794 /* Accumulate. */
dd8fbd78 4795 tmp2 = neon_load_reg(rd, pass);
5371cb81 4796 gen_neon_add(size, tmp, tmp2);
7d1b0095 4797 tcg_temp_free_i32(tmp2);
ad69471c
PB
4798 } else if (op == 4 || (op == 5 && u)) {
4799 /* Insert */
4800 switch (size) {
4801 case 0:
4802 if (op == 4)
ca9a32e4 4803 mask = 0xff >> -shift;
ad69471c 4804 else
ca9a32e4
JR
4805 mask = (uint8_t)(0xff << shift);
4806 mask |= mask << 8;
4807 mask |= mask << 16;
ad69471c
PB
4808 break;
4809 case 1:
4810 if (op == 4)
ca9a32e4 4811 mask = 0xffff >> -shift;
ad69471c 4812 else
ca9a32e4
JR
4813 mask = (uint16_t)(0xffff << shift);
4814 mask |= mask << 16;
ad69471c
PB
4815 break;
4816 case 2:
ca9a32e4
JR
4817 if (shift < -31 || shift > 31) {
4818 mask = 0;
4819 } else {
4820 if (op == 4)
4821 mask = 0xffffffffu >> -shift;
4822 else
4823 mask = 0xffffffffu << shift;
4824 }
ad69471c
PB
4825 break;
4826 default:
4827 abort();
4828 }
dd8fbd78 4829 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
4830 tcg_gen_andi_i32(tmp, tmp, mask);
4831 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 4832 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4833 tcg_temp_free_i32(tmp2);
ad69471c 4834 }
dd8fbd78 4835 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4836 }
4837 } /* for pass */
4838 } else if (op < 10) {
ad69471c 4839 /* Shift by immediate and narrow:
9ee6e8bb 4840 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd
CL
4841 int input_unsigned = (op == 8) ? !u : u;
4842
9ee6e8bb
PB
4843 shift = shift - (1 << (size + 3));
4844 size++;
92cdfaeb 4845 if (size == 3) {
a7812ae4 4846 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
4847 neon_load_reg64(cpu_V0, rm);
4848 neon_load_reg64(cpu_V1, rm + 1);
4849 for (pass = 0; pass < 2; pass++) {
4850 TCGv_i64 in;
4851 if (pass == 0) {
4852 in = cpu_V0;
4853 } else {
4854 in = cpu_V1;
4855 }
ad69471c 4856 if (q) {
0b36f4cd 4857 if (input_unsigned) {
92cdfaeb 4858 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 4859 } else {
92cdfaeb 4860 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 4861 }
ad69471c 4862 } else {
0b36f4cd 4863 if (input_unsigned) {
92cdfaeb 4864 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 4865 } else {
92cdfaeb 4866 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 4867 }
ad69471c 4868 }
7d1b0095 4869 tmp = tcg_temp_new_i32();
92cdfaeb
PM
4870 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
4871 neon_store_reg(rd, pass, tmp);
4872 } /* for pass */
4873 tcg_temp_free_i64(tmp64);
4874 } else {
4875 if (size == 1) {
4876 imm = (uint16_t)shift;
4877 imm |= imm << 16;
2c0262af 4878 } else {
92cdfaeb
PM
4879 /* size == 2 */
4880 imm = (uint32_t)shift;
4881 }
4882 tmp2 = tcg_const_i32(imm);
4883 tmp4 = neon_load_reg(rm + 1, 0);
4884 tmp5 = neon_load_reg(rm + 1, 1);
4885 for (pass = 0; pass < 2; pass++) {
4886 if (pass == 0) {
4887 tmp = neon_load_reg(rm, 0);
4888 } else {
4889 tmp = tmp4;
4890 }
0b36f4cd
CL
4891 gen_neon_shift_narrow(size, tmp, tmp2, q,
4892 input_unsigned);
92cdfaeb
PM
4893 if (pass == 0) {
4894 tmp3 = neon_load_reg(rm, 1);
4895 } else {
4896 tmp3 = tmp5;
4897 }
0b36f4cd
CL
4898 gen_neon_shift_narrow(size, tmp3, tmp2, q,
4899 input_unsigned);
36aa55dc 4900 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
4901 tcg_temp_free_i32(tmp);
4902 tcg_temp_free_i32(tmp3);
4903 tmp = tcg_temp_new_i32();
92cdfaeb
PM
4904 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
4905 neon_store_reg(rd, pass, tmp);
4906 } /* for pass */
c6067f04 4907 tcg_temp_free_i32(tmp2);
b75263d6 4908 }
9ee6e8bb
PB
4909 } else if (op == 10) {
4910 /* VSHLL */
ad69471c 4911 if (q || size == 3)
9ee6e8bb 4912 return 1;
ad69471c
PB
4913 tmp = neon_load_reg(rm, 0);
4914 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 4915 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4916 if (pass == 1)
4917 tmp = tmp2;
4918
4919 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 4920
9ee6e8bb
PB
4921 if (shift != 0) {
4922 /* The shift is less than the width of the source
ad69471c
PB
4923 type, so we can just shift the whole register. */
4924 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
4925 /* Widen the result of shift: we need to clear
4926 * the potential overflow bits resulting from
4927 * left bits of the narrow input appearing as
4928 * right bits of left the neighbour narrow
4929 * input. */
ad69471c
PB
4930 if (size < 2 || !u) {
4931 uint64_t imm64;
4932 if (size == 0) {
4933 imm = (0xffu >> (8 - shift));
4934 imm |= imm << 16;
acdf01ef 4935 } else if (size == 1) {
ad69471c 4936 imm = 0xffff >> (16 - shift);
acdf01ef
CL
4937 } else {
4938 /* size == 2 */
4939 imm = 0xffffffff >> (32 - shift);
4940 }
4941 if (size < 2) {
4942 imm64 = imm | (((uint64_t)imm) << 32);
4943 } else {
4944 imm64 = imm;
9ee6e8bb 4945 }
acdf01ef 4946 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
4947 }
4948 }
ad69471c 4949 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 4950 }
f73534a5 4951 } else if (op >= 14) {
9ee6e8bb 4952 /* VCVT fixed-point. */
f73534a5
PM
4953 /* We have already masked out the must-be-1 top bit of imm6,
4954 * hence this 32-shift where the ARM ARM has 64-imm6.
4955 */
4956 shift = 32 - shift;
9ee6e8bb 4957 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 4958 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 4959 if (!(op & 1)) {
9ee6e8bb 4960 if (u)
4373f3ce 4961 gen_vfp_ulto(0, shift);
9ee6e8bb 4962 else
4373f3ce 4963 gen_vfp_slto(0, shift);
9ee6e8bb
PB
4964 } else {
4965 if (u)
4373f3ce 4966 gen_vfp_toul(0, shift);
9ee6e8bb 4967 else
4373f3ce 4968 gen_vfp_tosl(0, shift);
2c0262af 4969 }
4373f3ce 4970 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
4971 }
4972 } else {
9ee6e8bb
PB
4973 return 1;
4974 }
4975 } else { /* (insn & 0x00380080) == 0 */
4976 int invert;
4977
4978 op = (insn >> 8) & 0xf;
4979 /* One register and immediate. */
4980 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4981 invert = (insn & (1 << 5)) != 0;
4982 switch (op) {
4983 case 0: case 1:
4984 /* no-op */
4985 break;
4986 case 2: case 3:
4987 imm <<= 8;
4988 break;
4989 case 4: case 5:
4990 imm <<= 16;
4991 break;
4992 case 6: case 7:
4993 imm <<= 24;
4994 break;
4995 case 8: case 9:
4996 imm |= imm << 16;
4997 break;
4998 case 10: case 11:
4999 imm = (imm << 8) | (imm << 24);
5000 break;
5001 case 12:
8e31209e 5002 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
5003 break;
5004 case 13:
5005 imm = (imm << 16) | 0xffff;
5006 break;
5007 case 14:
5008 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5009 if (invert)
5010 imm = ~imm;
5011 break;
5012 case 15:
5013 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5014 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5015 break;
5016 }
5017 if (invert)
5018 imm = ~imm;
5019
9ee6e8bb
PB
5020 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5021 if (op & 1 && op < 12) {
ad69471c 5022 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
5023 if (invert) {
5024 /* The immediate value has already been inverted, so
5025 BIC becomes AND. */
ad69471c 5026 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 5027 } else {
ad69471c 5028 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 5029 }
9ee6e8bb 5030 } else {
ad69471c 5031 /* VMOV, VMVN. */
7d1b0095 5032 tmp = tcg_temp_new_i32();
9ee6e8bb 5033 if (op == 14 && invert) {
ad69471c
PB
5034 uint32_t val;
5035 val = 0;
9ee6e8bb
PB
5036 for (n = 0; n < 4; n++) {
5037 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 5038 val |= 0xff << (n * 8);
9ee6e8bb 5039 }
ad69471c
PB
5040 tcg_gen_movi_i32(tmp, val);
5041 } else {
5042 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 5043 }
9ee6e8bb 5044 }
ad69471c 5045 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5046 }
5047 }
e4b3861d 5048 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
5049 if (size != 3) {
5050 op = (insn >> 8) & 0xf;
5051 if ((insn & (1 << 6)) == 0) {
5052 /* Three registers of different lengths. */
5053 int src1_wide;
5054 int src2_wide;
5055 int prewiden;
5056 /* prewiden, src1_wide, src2_wide */
5057 static const int neon_3reg_wide[16][3] = {
5058 {1, 0, 0}, /* VADDL */
5059 {1, 1, 0}, /* VADDW */
5060 {1, 0, 0}, /* VSUBL */
5061 {1, 1, 0}, /* VSUBW */
5062 {0, 1, 1}, /* VADDHN */
5063 {0, 0, 0}, /* VABAL */
5064 {0, 1, 1}, /* VSUBHN */
5065 {0, 0, 0}, /* VABDL */
5066 {0, 0, 0}, /* VMLAL */
5067 {0, 0, 0}, /* VQDMLAL */
5068 {0, 0, 0}, /* VMLSL */
5069 {0, 0, 0}, /* VQDMLSL */
5070 {0, 0, 0}, /* Integer VMULL */
5071 {0, 0, 0}, /* VQDMULL */
5072 {0, 0, 0} /* Polynomial VMULL */
5073 };
5074
5075 prewiden = neon_3reg_wide[op][0];
5076 src1_wide = neon_3reg_wide[op][1];
5077 src2_wide = neon_3reg_wide[op][2];
5078
ad69471c
PB
5079 if (size == 0 && (op == 9 || op == 11 || op == 13))
5080 return 1;
5081
9ee6e8bb
PB
5082 /* Avoid overlapping operands. Wide source operands are
5083 always aligned so will never overlap with wide
5084 destinations in problematic ways. */
8f8e3aa4 5085 if (rd == rm && !src2_wide) {
dd8fbd78
FN
5086 tmp = neon_load_reg(rm, 1);
5087 neon_store_scratch(2, tmp);
8f8e3aa4 5088 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
5089 tmp = neon_load_reg(rn, 1);
5090 neon_store_scratch(2, tmp);
9ee6e8bb 5091 }
a50f5b91 5092 TCGV_UNUSED(tmp3);
9ee6e8bb 5093 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5094 if (src1_wide) {
5095 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 5096 TCGV_UNUSED(tmp);
9ee6e8bb 5097 } else {
ad69471c 5098 if (pass == 1 && rd == rn) {
dd8fbd78 5099 tmp = neon_load_scratch(2);
9ee6e8bb 5100 } else {
ad69471c
PB
5101 tmp = neon_load_reg(rn, pass);
5102 }
5103 if (prewiden) {
5104 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5105 }
5106 }
ad69471c
PB
5107 if (src2_wide) {
5108 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 5109 TCGV_UNUSED(tmp2);
9ee6e8bb 5110 } else {
ad69471c 5111 if (pass == 1 && rd == rm) {
dd8fbd78 5112 tmp2 = neon_load_scratch(2);
9ee6e8bb 5113 } else {
ad69471c
PB
5114 tmp2 = neon_load_reg(rm, pass);
5115 }
5116 if (prewiden) {
5117 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5118 }
9ee6e8bb
PB
5119 }
5120 switch (op) {
5121 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5122 gen_neon_addl(size);
9ee6e8bb 5123 break;
79b0e534 5124 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5125 gen_neon_subl(size);
9ee6e8bb
PB
5126 break;
5127 case 5: case 7: /* VABAL, VABDL */
5128 switch ((size << 1) | u) {
ad69471c
PB
5129 case 0:
5130 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5131 break;
5132 case 1:
5133 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5134 break;
5135 case 2:
5136 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5137 break;
5138 case 3:
5139 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5140 break;
5141 case 4:
5142 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5143 break;
5144 case 5:
5145 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5146 break;
9ee6e8bb
PB
5147 default: abort();
5148 }
7d1b0095
PM
5149 tcg_temp_free_i32(tmp2);
5150 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5151 break;
5152 case 8: case 9: case 10: case 11: case 12: case 13:
5153 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5154 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
5155 break;
5156 case 14: /* Polynomial VMULL */
e5ca24cb 5157 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
5158 tcg_temp_free_i32(tmp2);
5159 tcg_temp_free_i32(tmp);
e5ca24cb 5160 break;
9ee6e8bb
PB
5161 default: /* 15 is RESERVED. */
5162 return 1;
5163 }
ebcd88ce
PM
5164 if (op == 13) {
5165 /* VQDMULL */
5166 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5167 neon_store_reg64(cpu_V0, rd + pass);
5168 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 5169 /* Accumulate. */
ebcd88ce 5170 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5171 switch (op) {
4dc064e6
PM
5172 case 10: /* VMLSL */
5173 gen_neon_negl(cpu_V0, size);
5174 /* Fall through */
5175 case 5: case 8: /* VABAL, VMLAL */
ad69471c 5176 gen_neon_addl(size);
9ee6e8bb
PB
5177 break;
5178 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 5179 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5180 if (op == 11) {
5181 gen_neon_negl(cpu_V0, size);
5182 }
ad69471c
PB
5183 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5184 break;
9ee6e8bb
PB
5185 default:
5186 abort();
5187 }
ad69471c 5188 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5189 } else if (op == 4 || op == 6) {
5190 /* Narrowing operation. */
7d1b0095 5191 tmp = tcg_temp_new_i32();
79b0e534 5192 if (!u) {
9ee6e8bb 5193 switch (size) {
ad69471c
PB
5194 case 0:
5195 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5196 break;
5197 case 1:
5198 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5199 break;
5200 case 2:
5201 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5202 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5203 break;
9ee6e8bb
PB
5204 default: abort();
5205 }
5206 } else {
5207 switch (size) {
ad69471c
PB
5208 case 0:
5209 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5210 break;
5211 case 1:
5212 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5213 break;
5214 case 2:
5215 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5216 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5217 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5218 break;
9ee6e8bb
PB
5219 default: abort();
5220 }
5221 }
ad69471c
PB
5222 if (pass == 0) {
5223 tmp3 = tmp;
5224 } else {
5225 neon_store_reg(rd, 0, tmp3);
5226 neon_store_reg(rd, 1, tmp);
5227 }
9ee6e8bb
PB
5228 } else {
5229 /* Write back the result. */
ad69471c 5230 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5231 }
5232 }
5233 } else {
5234 /* Two registers and a scalar. */
5235 switch (op) {
5236 case 0: /* Integer VMLA scalar */
5237 case 1: /* Float VMLA scalar */
5238 case 4: /* Integer VMLS scalar */
5239 case 5: /* Floating point VMLS scalar */
5240 case 8: /* Integer VMUL scalar */
5241 case 9: /* Floating point VMUL scalar */
5242 case 12: /* VQDMULH scalar */
5243 case 13: /* VQRDMULH scalar */
dd8fbd78
FN
5244 tmp = neon_get_scalar(size, rm);
5245 neon_store_scratch(0, tmp);
9ee6e8bb 5246 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5247 tmp = neon_load_scratch(0);
5248 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5249 if (op == 12) {
5250 if (size == 1) {
2a3f75b4 5251 gen_helper_neon_qdmulh_s16(tmp, tmp, tmp2);
9ee6e8bb 5252 } else {
2a3f75b4 5253 gen_helper_neon_qdmulh_s32(tmp, tmp, tmp2);
9ee6e8bb
PB
5254 }
5255 } else if (op == 13) {
5256 if (size == 1) {
2a3f75b4 5257 gen_helper_neon_qrdmulh_s16(tmp, tmp, tmp2);
9ee6e8bb 5258 } else {
2a3f75b4 5259 gen_helper_neon_qrdmulh_s32(tmp, tmp, tmp2);
9ee6e8bb
PB
5260 }
5261 } else if (op & 1) {
dd8fbd78 5262 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5263 } else {
5264 switch (size) {
dd8fbd78
FN
5265 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5266 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5267 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5268 default: return 1;
5269 }
5270 }
7d1b0095 5271 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
5272 if (op < 8) {
5273 /* Accumulate. */
dd8fbd78 5274 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5275 switch (op) {
5276 case 0:
dd8fbd78 5277 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5278 break;
5279 case 1:
dd8fbd78 5280 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5281 break;
5282 case 4:
dd8fbd78 5283 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5284 break;
5285 case 5:
dd8fbd78 5286 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
5287 break;
5288 default:
5289 abort();
5290 }
7d1b0095 5291 tcg_temp_free_i32(tmp2);
9ee6e8bb 5292 }
dd8fbd78 5293 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5294 }
5295 break;
5296 case 2: /* VMLAL sclar */
5297 case 3: /* VQDMLAL scalar */
5298 case 6: /* VMLSL scalar */
5299 case 7: /* VQDMLSL scalar */
5300 case 10: /* VMULL scalar */
5301 case 11: /* VQDMULL scalar */
ad69471c
PB
5302 if (size == 0 && (op == 3 || op == 7 || op == 11))
5303 return 1;
5304
dd8fbd78 5305 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
5306 /* We need a copy of tmp2 because gen_neon_mull
5307 * deletes it during pass 0. */
7d1b0095 5308 tmp4 = tcg_temp_new_i32();
c6067f04 5309 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 5310 tmp3 = neon_load_reg(rn, 1);
ad69471c 5311
9ee6e8bb 5312 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5313 if (pass == 0) {
5314 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5315 } else {
dd8fbd78 5316 tmp = tmp3;
c6067f04 5317 tmp2 = tmp4;
9ee6e8bb 5318 }
ad69471c 5319 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
5320 if (op != 11) {
5321 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5322 }
9ee6e8bb 5323 switch (op) {
4dc064e6
PM
5324 case 6:
5325 gen_neon_negl(cpu_V0, size);
5326 /* Fall through */
5327 case 2:
ad69471c 5328 gen_neon_addl(size);
9ee6e8bb
PB
5329 break;
5330 case 3: case 7:
ad69471c 5331 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5332 if (op == 7) {
5333 gen_neon_negl(cpu_V0, size);
5334 }
ad69471c 5335 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5336 break;
5337 case 10:
5338 /* no-op */
5339 break;
5340 case 11:
ad69471c 5341 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5342 break;
5343 default:
5344 abort();
5345 }
ad69471c 5346 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5347 }
dd8fbd78 5348
dd8fbd78 5349
9ee6e8bb
PB
5350 break;
5351 default: /* 14 and 15 are RESERVED */
5352 return 1;
5353 }
5354 }
5355 } else { /* size == 3 */
5356 if (!u) {
5357 /* Extract. */
9ee6e8bb 5358 imm = (insn >> 8) & 0xf;
ad69471c
PB
5359
5360 if (imm > 7 && !q)
5361 return 1;
5362
5363 if (imm == 0) {
5364 neon_load_reg64(cpu_V0, rn);
5365 if (q) {
5366 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5367 }
ad69471c
PB
5368 } else if (imm == 8) {
5369 neon_load_reg64(cpu_V0, rn + 1);
5370 if (q) {
5371 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5372 }
ad69471c 5373 } else if (q) {
a7812ae4 5374 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5375 if (imm < 8) {
5376 neon_load_reg64(cpu_V0, rn);
a7812ae4 5377 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5378 } else {
5379 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5380 neon_load_reg64(tmp64, rm);
ad69471c
PB
5381 }
5382 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5383 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5384 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5385 if (imm < 8) {
5386 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5387 } else {
ad69471c
PB
5388 neon_load_reg64(cpu_V1, rm + 1);
5389 imm -= 8;
9ee6e8bb 5390 }
ad69471c 5391 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5392 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5393 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 5394 tcg_temp_free_i64(tmp64);
ad69471c 5395 } else {
a7812ae4 5396 /* BUGFIX */
ad69471c 5397 neon_load_reg64(cpu_V0, rn);
a7812ae4 5398 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5399 neon_load_reg64(cpu_V1, rm);
a7812ae4 5400 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5401 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5402 }
5403 neon_store_reg64(cpu_V0, rd);
5404 if (q) {
5405 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5406 }
5407 } else if ((insn & (1 << 11)) == 0) {
5408 /* Two register misc. */
5409 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5410 size = (insn >> 18) & 3;
5411 switch (op) {
5412 case 0: /* VREV64 */
5413 if (size == 3)
5414 return 1;
5415 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5416 tmp = neon_load_reg(rm, pass * 2);
5417 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5418 switch (size) {
dd8fbd78
FN
5419 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5420 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5421 case 2: /* no-op */ break;
5422 default: abort();
5423 }
dd8fbd78 5424 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5425 if (size == 2) {
dd8fbd78 5426 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5427 } else {
9ee6e8bb 5428 switch (size) {
dd8fbd78
FN
5429 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5430 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5431 default: abort();
5432 }
dd8fbd78 5433 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5434 }
5435 }
5436 break;
5437 case 4: case 5: /* VPADDL */
5438 case 12: case 13: /* VPADAL */
9ee6e8bb
PB
5439 if (size == 3)
5440 return 1;
ad69471c
PB
5441 for (pass = 0; pass < q + 1; pass++) {
5442 tmp = neon_load_reg(rm, pass * 2);
5443 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5444 tmp = neon_load_reg(rm, pass * 2 + 1);
5445 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5446 switch (size) {
5447 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5448 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5449 case 2: tcg_gen_add_i64(CPU_V001); break;
5450 default: abort();
5451 }
9ee6e8bb
PB
5452 if (op >= 12) {
5453 /* Accumulate. */
ad69471c
PB
5454 neon_load_reg64(cpu_V1, rd + pass);
5455 gen_neon_addl(size);
9ee6e8bb 5456 }
ad69471c 5457 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5458 }
5459 break;
5460 case 33: /* VTRN */
5461 if (size == 2) {
5462 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
5463 tmp = neon_load_reg(rm, n);
5464 tmp2 = neon_load_reg(rd, n + 1);
5465 neon_store_reg(rm, n, tmp2);
5466 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
5467 }
5468 } else {
5469 goto elementwise;
5470 }
5471 break;
5472 case 34: /* VUZP */
02acedf9 5473 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 5474 return 1;
9ee6e8bb
PB
5475 }
5476 break;
5477 case 35: /* VZIP */
d68a6f3a 5478 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 5479 return 1;
9ee6e8bb
PB
5480 }
5481 break;
5482 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
ad69471c
PB
5483 if (size == 3)
5484 return 1;
a50f5b91 5485 TCGV_UNUSED(tmp2);
9ee6e8bb 5486 for (pass = 0; pass < 2; pass++) {
ad69471c 5487 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 5488 tmp = tcg_temp_new_i32();
c33171c7 5489 gen_neon_narrow_op(op == 36, q, size, tmp, cpu_V0);
ad69471c
PB
5490 if (pass == 0) {
5491 tmp2 = tmp;
5492 } else {
5493 neon_store_reg(rd, 0, tmp2);
5494 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5495 }
9ee6e8bb
PB
5496 }
5497 break;
5498 case 38: /* VSHLL */
ad69471c 5499 if (q || size == 3)
9ee6e8bb 5500 return 1;
ad69471c
PB
5501 tmp = neon_load_reg(rm, 0);
5502 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5503 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5504 if (pass == 1)
5505 tmp = tmp2;
5506 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 5507 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 5508 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5509 }
5510 break;
60011498
PB
5511 case 44: /* VCVT.F16.F32 */
5512 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5513 return 1;
7d1b0095
PM
5514 tmp = tcg_temp_new_i32();
5515 tmp2 = tcg_temp_new_i32();
60011498 5516 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 5517 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 5518 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 5519 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5520 tcg_gen_shli_i32(tmp2, tmp2, 16);
5521 tcg_gen_or_i32(tmp2, tmp2, tmp);
5522 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 5523 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
5524 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5525 neon_store_reg(rd, 0, tmp2);
7d1b0095 5526 tmp2 = tcg_temp_new_i32();
2d981da7 5527 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5528 tcg_gen_shli_i32(tmp2, tmp2, 16);
5529 tcg_gen_or_i32(tmp2, tmp2, tmp);
5530 neon_store_reg(rd, 1, tmp2);
7d1b0095 5531 tcg_temp_free_i32(tmp);
60011498
PB
5532 break;
5533 case 46: /* VCVT.F32.F16 */
5534 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5535 return 1;
7d1b0095 5536 tmp3 = tcg_temp_new_i32();
60011498
PB
5537 tmp = neon_load_reg(rm, 0);
5538 tmp2 = neon_load_reg(rm, 1);
5539 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 5540 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5541 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5542 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 5543 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5544 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 5545 tcg_temp_free_i32(tmp);
60011498 5546 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 5547 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5548 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5549 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 5550 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5551 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
5552 tcg_temp_free_i32(tmp2);
5553 tcg_temp_free_i32(tmp3);
60011498 5554 break;
9ee6e8bb
PB
5555 default:
5556 elementwise:
5557 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5558 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5559 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5560 neon_reg_offset(rm, pass));
dd8fbd78 5561 TCGV_UNUSED(tmp);
9ee6e8bb 5562 } else {
dd8fbd78 5563 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
5564 }
5565 switch (op) {
5566 case 1: /* VREV32 */
5567 switch (size) {
dd8fbd78
FN
5568 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5569 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5570 default: return 1;
5571 }
5572 break;
5573 case 2: /* VREV16 */
5574 if (size != 0)
5575 return 1;
dd8fbd78 5576 gen_rev16(tmp);
9ee6e8bb 5577 break;
9ee6e8bb
PB
5578 case 8: /* CLS */
5579 switch (size) {
dd8fbd78
FN
5580 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5581 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5582 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
9ee6e8bb
PB
5583 default: return 1;
5584 }
5585 break;
5586 case 9: /* CLZ */
5587 switch (size) {
dd8fbd78
FN
5588 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5589 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5590 case 2: gen_helper_clz(tmp, tmp); break;
9ee6e8bb
PB
5591 default: return 1;
5592 }
5593 break;
5594 case 10: /* CNT */
5595 if (size != 0)
5596 return 1;
dd8fbd78 5597 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb
PB
5598 break;
5599 case 11: /* VNOT */
5600 if (size != 0)
5601 return 1;
dd8fbd78 5602 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5603 break;
5604 case 14: /* VQABS */
5605 switch (size) {
2a3f75b4
PM
5606 case 0: gen_helper_neon_qabs_s8(tmp, tmp); break;
5607 case 1: gen_helper_neon_qabs_s16(tmp, tmp); break;
5608 case 2: gen_helper_neon_qabs_s32(tmp, tmp); break;
9ee6e8bb
PB
5609 default: return 1;
5610 }
5611 break;
5612 case 15: /* VQNEG */
5613 switch (size) {
2a3f75b4
PM
5614 case 0: gen_helper_neon_qneg_s8(tmp, tmp); break;
5615 case 1: gen_helper_neon_qneg_s16(tmp, tmp); break;
5616 case 2: gen_helper_neon_qneg_s32(tmp, tmp); break;
9ee6e8bb
PB
5617 default: return 1;
5618 }
5619 break;
5620 case 16: case 19: /* VCGT #0, VCLE #0 */
dd8fbd78 5621 tmp2 = tcg_const_i32(0);
9ee6e8bb 5622 switch(size) {
dd8fbd78
FN
5623 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5624 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5625 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5626 default: return 1;
5627 }
dd8fbd78 5628 tcg_temp_free(tmp2);
9ee6e8bb 5629 if (op == 19)
dd8fbd78 5630 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5631 break;
5632 case 17: case 20: /* VCGE #0, VCLT #0 */
dd8fbd78 5633 tmp2 = tcg_const_i32(0);
9ee6e8bb 5634 switch(size) {
dd8fbd78
FN
5635 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5636 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5637 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5638 default: return 1;
5639 }
dd8fbd78 5640 tcg_temp_free(tmp2);
9ee6e8bb 5641 if (op == 20)
dd8fbd78 5642 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5643 break;
5644 case 18: /* VCEQ #0 */
dd8fbd78 5645 tmp2 = tcg_const_i32(0);
9ee6e8bb 5646 switch(size) {
dd8fbd78
FN
5647 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5648 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5649 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5650 default: return 1;
5651 }
dd8fbd78 5652 tcg_temp_free(tmp2);
9ee6e8bb
PB
5653 break;
5654 case 22: /* VABS */
5655 switch(size) {
dd8fbd78
FN
5656 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
5657 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
5658 case 2: tcg_gen_abs_i32(tmp, tmp); break;
9ee6e8bb
PB
5659 default: return 1;
5660 }
5661 break;
5662 case 23: /* VNEG */
ad69471c
PB
5663 if (size == 3)
5664 return 1;
dd8fbd78
FN
5665 tmp2 = tcg_const_i32(0);
5666 gen_neon_rsb(size, tmp, tmp2);
5667 tcg_temp_free(tmp2);
9ee6e8bb 5668 break;
0e326109 5669 case 24: /* Float VCGT #0 */
dd8fbd78
FN
5670 tmp2 = tcg_const_i32(0);
5671 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
5672 tcg_temp_free(tmp2);
9ee6e8bb 5673 break;
0e326109 5674 case 25: /* Float VCGE #0 */
dd8fbd78
FN
5675 tmp2 = tcg_const_i32(0);
5676 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
5677 tcg_temp_free(tmp2);
9ee6e8bb
PB
5678 break;
5679 case 26: /* Float VCEQ #0 */
dd8fbd78
FN
5680 tmp2 = tcg_const_i32(0);
5681 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
5682 tcg_temp_free(tmp2);
9ee6e8bb 5683 break;
0e326109
PM
5684 case 27: /* Float VCLE #0 */
5685 tmp2 = tcg_const_i32(0);
5686 gen_helper_neon_cge_f32(tmp, tmp2, tmp);
5687 tcg_temp_free(tmp2);
5688 break;
5689 case 28: /* Float VCLT #0 */
5690 tmp2 = tcg_const_i32(0);
5691 gen_helper_neon_cgt_f32(tmp, tmp2, tmp);
5692 tcg_temp_free(tmp2);
5693 break;
9ee6e8bb 5694 case 30: /* Float VABS */
4373f3ce 5695 gen_vfp_abs(0);
9ee6e8bb
PB
5696 break;
5697 case 31: /* Float VNEG */
4373f3ce 5698 gen_vfp_neg(0);
9ee6e8bb
PB
5699 break;
5700 case 32: /* VSWP */
dd8fbd78
FN
5701 tmp2 = neon_load_reg(rd, pass);
5702 neon_store_reg(rm, pass, tmp2);
9ee6e8bb
PB
5703 break;
5704 case 33: /* VTRN */
dd8fbd78 5705 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5706 switch (size) {
dd8fbd78
FN
5707 case 0: gen_neon_trn_u8(tmp, tmp2); break;
5708 case 1: gen_neon_trn_u16(tmp, tmp2); break;
9ee6e8bb
PB
5709 case 2: abort();
5710 default: return 1;
5711 }
dd8fbd78 5712 neon_store_reg(rm, pass, tmp2);
9ee6e8bb
PB
5713 break;
5714 case 56: /* Integer VRECPE */
dd8fbd78 5715 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb
PB
5716 break;
5717 case 57: /* Integer VRSQRTE */
dd8fbd78 5718 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb
PB
5719 break;
5720 case 58: /* Float VRECPE */
4373f3ce 5721 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5722 break;
5723 case 59: /* Float VRSQRTE */
4373f3ce 5724 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5725 break;
5726 case 60: /* VCVT.F32.S32 */
d3587ef8 5727 gen_vfp_sito(0);
9ee6e8bb
PB
5728 break;
5729 case 61: /* VCVT.F32.U32 */
d3587ef8 5730 gen_vfp_uito(0);
9ee6e8bb
PB
5731 break;
5732 case 62: /* VCVT.S32.F32 */
d3587ef8 5733 gen_vfp_tosiz(0);
9ee6e8bb
PB
5734 break;
5735 case 63: /* VCVT.U32.F32 */
d3587ef8 5736 gen_vfp_touiz(0);
9ee6e8bb
PB
5737 break;
5738 default:
5739 /* Reserved: 21, 29, 39-56 */
5740 return 1;
5741 }
5742 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5743 tcg_gen_st_f32(cpu_F0s, cpu_env,
5744 neon_reg_offset(rd, pass));
9ee6e8bb 5745 } else {
dd8fbd78 5746 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5747 }
5748 }
5749 break;
5750 }
5751 } else if ((insn & (1 << 10)) == 0) {
5752 /* VTBL, VTBX. */
3018f259 5753 n = ((insn >> 5) & 0x18) + 8;
9ee6e8bb 5754 if (insn & (1 << 6)) {
8f8e3aa4 5755 tmp = neon_load_reg(rd, 0);
9ee6e8bb 5756 } else {
7d1b0095 5757 tmp = tcg_temp_new_i32();
8f8e3aa4 5758 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5759 }
8f8e3aa4 5760 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
5761 tmp4 = tcg_const_i32(rn);
5762 tmp5 = tcg_const_i32(n);
5763 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
7d1b0095 5764 tcg_temp_free_i32(tmp);
9ee6e8bb 5765 if (insn & (1 << 6)) {
8f8e3aa4 5766 tmp = neon_load_reg(rd, 1);
9ee6e8bb 5767 } else {
7d1b0095 5768 tmp = tcg_temp_new_i32();
8f8e3aa4 5769 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5770 }
8f8e3aa4 5771 tmp3 = neon_load_reg(rm, 1);
b75263d6 5772 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
5773 tcg_temp_free_i32(tmp5);
5774 tcg_temp_free_i32(tmp4);
8f8e3aa4 5775 neon_store_reg(rd, 0, tmp2);
3018f259 5776 neon_store_reg(rd, 1, tmp3);
7d1b0095 5777 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5778 } else if ((insn & 0x380) == 0) {
5779 /* VDUP */
5780 if (insn & (1 << 19)) {
dd8fbd78 5781 tmp = neon_load_reg(rm, 1);
9ee6e8bb 5782 } else {
dd8fbd78 5783 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
5784 }
5785 if (insn & (1 << 16)) {
dd8fbd78 5786 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
5787 } else if (insn & (1 << 17)) {
5788 if ((insn >> 18) & 1)
dd8fbd78 5789 gen_neon_dup_high16(tmp);
9ee6e8bb 5790 else
dd8fbd78 5791 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
5792 }
5793 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 5794 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
5795 tcg_gen_mov_i32(tmp2, tmp);
5796 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 5797 }
7d1b0095 5798 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5799 } else {
5800 return 1;
5801 }
5802 }
5803 }
5804 return 0;
5805}
5806
fe1479c3
PB
5807static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5808{
5809 int crn = (insn >> 16) & 0xf;
5810 int crm = insn & 0xf;
5811 int op1 = (insn >> 21) & 7;
5812 int op2 = (insn >> 5) & 7;
5813 int rt = (insn >> 12) & 0xf;
5814 TCGv tmp;
5815
ca27c052
PM
5816 /* Minimal set of debug registers, since we don't support debug */
5817 if (op1 == 0 && crn == 0 && op2 == 0) {
5818 switch (crm) {
5819 case 0:
5820 /* DBGDIDR: just RAZ. In particular this means the
5821 * "debug architecture version" bits will read as
5822 * a reserved value, which should cause Linux to
5823 * not try to use the debug hardware.
5824 */
5825 tmp = tcg_const_i32(0);
5826 store_reg(s, rt, tmp);
5827 return 0;
5828 case 1:
5829 case 2:
5830 /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we
5831 * don't implement memory mapped debug components
5832 */
5833 if (ENABLE_ARCH_7) {
5834 tmp = tcg_const_i32(0);
5835 store_reg(s, rt, tmp);
5836 return 0;
5837 }
5838 break;
5839 default:
5840 break;
5841 }
5842 }
5843
fe1479c3
PB
5844 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5845 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5846 /* TEECR */
5847 if (IS_USER(s))
5848 return 1;
5849 tmp = load_cpu_field(teecr);
5850 store_reg(s, rt, tmp);
5851 return 0;
5852 }
5853 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5854 /* TEEHBR */
5855 if (IS_USER(s) && (env->teecr & 1))
5856 return 1;
5857 tmp = load_cpu_field(teehbr);
5858 store_reg(s, rt, tmp);
5859 return 0;
5860 }
5861 }
5862 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5863 op1, crn, crm, op2);
5864 return 1;
5865}
5866
5867static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5868{
5869 int crn = (insn >> 16) & 0xf;
5870 int crm = insn & 0xf;
5871 int op1 = (insn >> 21) & 7;
5872 int op2 = (insn >> 5) & 7;
5873 int rt = (insn >> 12) & 0xf;
5874 TCGv tmp;
5875
5876 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5877 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5878 /* TEECR */
5879 if (IS_USER(s))
5880 return 1;
5881 tmp = load_reg(s, rt);
5882 gen_helper_set_teecr(cpu_env, tmp);
7d1b0095 5883 tcg_temp_free_i32(tmp);
fe1479c3
PB
5884 return 0;
5885 }
5886 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5887 /* TEEHBR */
5888 if (IS_USER(s) && (env->teecr & 1))
5889 return 1;
5890 tmp = load_reg(s, rt);
5891 store_cpu_field(tmp, teehbr);
5892 return 0;
5893 }
5894 }
5895 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5896 op1, crn, crm, op2);
5897 return 1;
5898}
5899
9ee6e8bb
PB
5900static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5901{
5902 int cpnum;
5903
5904 cpnum = (insn >> 8) & 0xf;
5905 if (arm_feature(env, ARM_FEATURE_XSCALE)
5906 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5907 return 1;
5908
5909 switch (cpnum) {
5910 case 0:
5911 case 1:
5912 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5913 return disas_iwmmxt_insn(env, s, insn);
5914 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5915 return disas_dsp_insn(env, s, insn);
5916 }
5917 return 1;
5918 case 10:
5919 case 11:
5920 return disas_vfp_insn (env, s, insn);
fe1479c3
PB
5921 case 14:
5922 /* Coprocessors 7-15 are architecturally reserved by ARM.
5923 Unfortunately Intel decided to ignore this. */
5924 if (arm_feature(env, ARM_FEATURE_XSCALE))
5925 goto board;
5926 if (insn & (1 << 20))
5927 return disas_cp14_read(env, s, insn);
5928 else
5929 return disas_cp14_write(env, s, insn);
9ee6e8bb
PB
5930 case 15:
5931 return disas_cp15_insn (env, s, insn);
5932 default:
fe1479c3 5933 board:
9ee6e8bb
PB
5934 /* Unknown coprocessor. See if the board has hooked it. */
5935 return disas_cp_insn (env, s, insn);
5936 }
5937}
5938
5e3f878a
PB
5939
5940/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 5941static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
5942{
5943 TCGv tmp;
7d1b0095 5944 tmp = tcg_temp_new_i32();
5e3f878a
PB
5945 tcg_gen_trunc_i64_i32(tmp, val);
5946 store_reg(s, rlow, tmp);
7d1b0095 5947 tmp = tcg_temp_new_i32();
5e3f878a
PB
5948 tcg_gen_shri_i64(val, val, 32);
5949 tcg_gen_trunc_i64_i32(tmp, val);
5950 store_reg(s, rhigh, tmp);
5951}
5952
5953/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 5954static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 5955{
a7812ae4 5956 TCGv_i64 tmp;
5e3f878a
PB
5957 TCGv tmp2;
5958
36aa55dc 5959 /* Load value and extend to 64 bits. */
a7812ae4 5960 tmp = tcg_temp_new_i64();
5e3f878a
PB
5961 tmp2 = load_reg(s, rlow);
5962 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 5963 tcg_temp_free_i32(tmp2);
5e3f878a 5964 tcg_gen_add_i64(val, val, tmp);
b75263d6 5965 tcg_temp_free_i64(tmp);
5e3f878a
PB
5966}
5967
5968/* load and add a 64-bit value from a register pair. */
a7812ae4 5969static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 5970{
a7812ae4 5971 TCGv_i64 tmp;
36aa55dc
PB
5972 TCGv tmpl;
5973 TCGv tmph;
5e3f878a
PB
5974
5975 /* Load 64-bit value rd:rn. */
36aa55dc
PB
5976 tmpl = load_reg(s, rlow);
5977 tmph = load_reg(s, rhigh);
a7812ae4 5978 tmp = tcg_temp_new_i64();
36aa55dc 5979 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
5980 tcg_temp_free_i32(tmpl);
5981 tcg_temp_free_i32(tmph);
5e3f878a 5982 tcg_gen_add_i64(val, val, tmp);
b75263d6 5983 tcg_temp_free_i64(tmp);
5e3f878a
PB
5984}
5985
5986/* Set N and Z flags from a 64-bit value. */
a7812ae4 5987static void gen_logicq_cc(TCGv_i64 val)
5e3f878a 5988{
7d1b0095 5989 TCGv tmp = tcg_temp_new_i32();
5e3f878a 5990 gen_helper_logicq_cc(tmp, val);
6fbe23d5 5991 gen_logic_CC(tmp);
7d1b0095 5992 tcg_temp_free_i32(tmp);
5e3f878a
PB
5993}
5994
426f5abc
PB
5995/* Load/Store exclusive instructions are implemented by remembering
5996 the value/address loaded, and seeing if these are the same
5997 when the store is performed. This should be is sufficient to implement
5998 the architecturally mandated semantics, and avoids having to monitor
5999 regular stores.
6000
6001 In system emulation mode only one CPU will be running at once, so
6002 this sequence is effectively atomic. In user emulation mode we
6003 throw an exception and handle the atomic operation elsewhere. */
6004static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
6005 TCGv addr, int size)
6006{
6007 TCGv tmp;
6008
6009 switch (size) {
6010 case 0:
6011 tmp = gen_ld8u(addr, IS_USER(s));
6012 break;
6013 case 1:
6014 tmp = gen_ld16u(addr, IS_USER(s));
6015 break;
6016 case 2:
6017 case 3:
6018 tmp = gen_ld32(addr, IS_USER(s));
6019 break;
6020 default:
6021 abort();
6022 }
6023 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6024 store_reg(s, rt, tmp);
6025 if (size == 3) {
7d1b0095 6026 TCGv tmp2 = tcg_temp_new_i32();
2c9adbda
PM
6027 tcg_gen_addi_i32(tmp2, addr, 4);
6028 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 6029 tcg_temp_free_i32(tmp2);
426f5abc
PB
6030 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6031 store_reg(s, rt2, tmp);
6032 }
6033 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6034}
6035
6036static void gen_clrex(DisasContext *s)
6037{
6038 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6039}
6040
6041#ifdef CONFIG_USER_ONLY
6042static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6043 TCGv addr, int size)
6044{
6045 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6046 tcg_gen_movi_i32(cpu_exclusive_info,
6047 size | (rd << 4) | (rt << 8) | (rt2 << 12));
bc4a0de0 6048 gen_exception_insn(s, 4, EXCP_STREX);
426f5abc
PB
6049}
6050#else
6051static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6052 TCGv addr, int size)
6053{
6054 TCGv tmp;
6055 int done_label;
6056 int fail_label;
6057
6058 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6059 [addr] = {Rt};
6060 {Rd} = 0;
6061 } else {
6062 {Rd} = 1;
6063 } */
6064 fail_label = gen_new_label();
6065 done_label = gen_new_label();
6066 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6067 switch (size) {
6068 case 0:
6069 tmp = gen_ld8u(addr, IS_USER(s));
6070 break;
6071 case 1:
6072 tmp = gen_ld16u(addr, IS_USER(s));
6073 break;
6074 case 2:
6075 case 3:
6076 tmp = gen_ld32(addr, IS_USER(s));
6077 break;
6078 default:
6079 abort();
6080 }
6081 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
7d1b0095 6082 tcg_temp_free_i32(tmp);
426f5abc 6083 if (size == 3) {
7d1b0095 6084 TCGv tmp2 = tcg_temp_new_i32();
426f5abc 6085 tcg_gen_addi_i32(tmp2, addr, 4);
2c9adbda 6086 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 6087 tcg_temp_free_i32(tmp2);
426f5abc 6088 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
7d1b0095 6089 tcg_temp_free_i32(tmp);
426f5abc
PB
6090 }
6091 tmp = load_reg(s, rt);
6092 switch (size) {
6093 case 0:
6094 gen_st8(tmp, addr, IS_USER(s));
6095 break;
6096 case 1:
6097 gen_st16(tmp, addr, IS_USER(s));
6098 break;
6099 case 2:
6100 case 3:
6101 gen_st32(tmp, addr, IS_USER(s));
6102 break;
6103 default:
6104 abort();
6105 }
6106 if (size == 3) {
6107 tcg_gen_addi_i32(addr, addr, 4);
6108 tmp = load_reg(s, rt2);
6109 gen_st32(tmp, addr, IS_USER(s));
6110 }
6111 tcg_gen_movi_i32(cpu_R[rd], 0);
6112 tcg_gen_br(done_label);
6113 gen_set_label(fail_label);
6114 tcg_gen_movi_i32(cpu_R[rd], 1);
6115 gen_set_label(done_label);
6116 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6117}
6118#endif
6119
9ee6e8bb
PB
6120static void disas_arm_insn(CPUState * env, DisasContext *s)
6121{
6122 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 6123 TCGv tmp;
3670669c 6124 TCGv tmp2;
6ddbc6e4 6125 TCGv tmp3;
b0109805 6126 TCGv addr;
a7812ae4 6127 TCGv_i64 tmp64;
9ee6e8bb
PB
6128
6129 insn = ldl_code(s->pc);
6130 s->pc += 4;
6131
6132 /* M variants do not implement ARM mode. */
6133 if (IS_M(env))
6134 goto illegal_op;
6135 cond = insn >> 28;
6136 if (cond == 0xf){
6137 /* Unconditional instructions. */
6138 if (((insn >> 25) & 7) == 1) {
6139 /* NEON Data processing. */
6140 if (!arm_feature(env, ARM_FEATURE_NEON))
6141 goto illegal_op;
6142
6143 if (disas_neon_data_insn(env, s, insn))
6144 goto illegal_op;
6145 return;
6146 }
6147 if ((insn & 0x0f100000) == 0x04000000) {
6148 /* NEON load/store. */
6149 if (!arm_feature(env, ARM_FEATURE_NEON))
6150 goto illegal_op;
6151
6152 if (disas_neon_ls_insn(env, s, insn))
6153 goto illegal_op;
6154 return;
6155 }
3d185e5d
PM
6156 if (((insn & 0x0f30f000) == 0x0510f000) ||
6157 ((insn & 0x0f30f010) == 0x0710f000)) {
6158 if ((insn & (1 << 22)) == 0) {
6159 /* PLDW; v7MP */
6160 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6161 goto illegal_op;
6162 }
6163 }
6164 /* Otherwise PLD; v5TE+ */
6165 return;
6166 }
6167 if (((insn & 0x0f70f000) == 0x0450f000) ||
6168 ((insn & 0x0f70f010) == 0x0650f000)) {
6169 ARCH(7);
6170 return; /* PLI; V7 */
6171 }
6172 if (((insn & 0x0f700000) == 0x04100000) ||
6173 ((insn & 0x0f700010) == 0x06100000)) {
6174 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6175 goto illegal_op;
6176 }
6177 return; /* v7MP: Unallocated memory hint: must NOP */
6178 }
6179
6180 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
6181 ARCH(6);
6182 /* setend */
6183 if (insn & (1 << 9)) {
6184 /* BE8 mode not implemented. */
6185 goto illegal_op;
6186 }
6187 return;
6188 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6189 switch ((insn >> 4) & 0xf) {
6190 case 1: /* clrex */
6191 ARCH(6K);
426f5abc 6192 gen_clrex(s);
9ee6e8bb
PB
6193 return;
6194 case 4: /* dsb */
6195 case 5: /* dmb */
6196 case 6: /* isb */
6197 ARCH(7);
6198 /* We don't emulate caches so these are a no-op. */
6199 return;
6200 default:
6201 goto illegal_op;
6202 }
6203 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6204 /* srs */
c67b6b71 6205 int32_t offset;
9ee6e8bb
PB
6206 if (IS_USER(s))
6207 goto illegal_op;
6208 ARCH(6);
6209 op1 = (insn & 0x1f);
7d1b0095 6210 addr = tcg_temp_new_i32();
39ea3d4e
PM
6211 tmp = tcg_const_i32(op1);
6212 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6213 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6214 i = (insn >> 23) & 3;
6215 switch (i) {
6216 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6217 case 1: offset = 0; break; /* IA */
6218 case 2: offset = -8; break; /* DB */
9ee6e8bb
PB
6219 case 3: offset = 4; break; /* IB */
6220 default: abort();
6221 }
6222 if (offset)
b0109805
PB
6223 tcg_gen_addi_i32(addr, addr, offset);
6224 tmp = load_reg(s, 14);
6225 gen_st32(tmp, addr, 0);
c67b6b71 6226 tmp = load_cpu_field(spsr);
b0109805
PB
6227 tcg_gen_addi_i32(addr, addr, 4);
6228 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
6229 if (insn & (1 << 21)) {
6230 /* Base writeback. */
6231 switch (i) {
6232 case 0: offset = -8; break;
c67b6b71
FN
6233 case 1: offset = 4; break;
6234 case 2: offset = -4; break;
9ee6e8bb
PB
6235 case 3: offset = 0; break;
6236 default: abort();
6237 }
6238 if (offset)
c67b6b71 6239 tcg_gen_addi_i32(addr, addr, offset);
39ea3d4e
PM
6240 tmp = tcg_const_i32(op1);
6241 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6242 tcg_temp_free_i32(tmp);
7d1b0095 6243 tcg_temp_free_i32(addr);
b0109805 6244 } else {
7d1b0095 6245 tcg_temp_free_i32(addr);
9ee6e8bb 6246 }
a990f58f 6247 return;
ea825eee 6248 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 6249 /* rfe */
c67b6b71 6250 int32_t offset;
9ee6e8bb
PB
6251 if (IS_USER(s))
6252 goto illegal_op;
6253 ARCH(6);
6254 rn = (insn >> 16) & 0xf;
b0109805 6255 addr = load_reg(s, rn);
9ee6e8bb
PB
6256 i = (insn >> 23) & 3;
6257 switch (i) {
b0109805 6258 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6259 case 1: offset = 0; break; /* IA */
6260 case 2: offset = -8; break; /* DB */
b0109805 6261 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
6262 default: abort();
6263 }
6264 if (offset)
b0109805
PB
6265 tcg_gen_addi_i32(addr, addr, offset);
6266 /* Load PC into tmp and CPSR into tmp2. */
6267 tmp = gen_ld32(addr, 0);
6268 tcg_gen_addi_i32(addr, addr, 4);
6269 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
6270 if (insn & (1 << 21)) {
6271 /* Base writeback. */
6272 switch (i) {
b0109805 6273 case 0: offset = -8; break;
c67b6b71
FN
6274 case 1: offset = 4; break;
6275 case 2: offset = -4; break;
b0109805 6276 case 3: offset = 0; break;
9ee6e8bb
PB
6277 default: abort();
6278 }
6279 if (offset)
b0109805
PB
6280 tcg_gen_addi_i32(addr, addr, offset);
6281 store_reg(s, rn, addr);
6282 } else {
7d1b0095 6283 tcg_temp_free_i32(addr);
9ee6e8bb 6284 }
b0109805 6285 gen_rfe(s, tmp, tmp2);
c67b6b71 6286 return;
9ee6e8bb
PB
6287 } else if ((insn & 0x0e000000) == 0x0a000000) {
6288 /* branch link and change to thumb (blx <offset>) */
6289 int32_t offset;
6290
6291 val = (uint32_t)s->pc;
7d1b0095 6292 tmp = tcg_temp_new_i32();
d9ba4830
PB
6293 tcg_gen_movi_i32(tmp, val);
6294 store_reg(s, 14, tmp);
9ee6e8bb
PB
6295 /* Sign-extend the 24-bit offset */
6296 offset = (((int32_t)insn) << 8) >> 8;
6297 /* offset * 4 + bit24 * 2 + (thumb bit) */
6298 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6299 /* pipeline offset */
6300 val += 4;
d9ba4830 6301 gen_bx_im(s, val);
9ee6e8bb
PB
6302 return;
6303 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6304 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6305 /* iWMMXt register transfer. */
6306 if (env->cp15.c15_cpar & (1 << 1))
6307 if (!disas_iwmmxt_insn(env, s, insn))
6308 return;
6309 }
6310 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6311 /* Coprocessor double register transfer. */
6312 } else if ((insn & 0x0f000010) == 0x0e000010) {
6313 /* Additional coprocessor register transfer. */
7997d92f 6314 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
6315 uint32_t mask;
6316 uint32_t val;
6317 /* cps (privileged) */
6318 if (IS_USER(s))
6319 return;
6320 mask = val = 0;
6321 if (insn & (1 << 19)) {
6322 if (insn & (1 << 8))
6323 mask |= CPSR_A;
6324 if (insn & (1 << 7))
6325 mask |= CPSR_I;
6326 if (insn & (1 << 6))
6327 mask |= CPSR_F;
6328 if (insn & (1 << 18))
6329 val |= mask;
6330 }
7997d92f 6331 if (insn & (1 << 17)) {
9ee6e8bb
PB
6332 mask |= CPSR_M;
6333 val |= (insn & 0x1f);
6334 }
6335 if (mask) {
2fbac54b 6336 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
6337 }
6338 return;
6339 }
6340 goto illegal_op;
6341 }
6342 if (cond != 0xe) {
6343 /* if not always execute, we generate a conditional jump to
6344 next instruction */
6345 s->condlabel = gen_new_label();
d9ba4830 6346 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
6347 s->condjmp = 1;
6348 }
6349 if ((insn & 0x0f900000) == 0x03000000) {
6350 if ((insn & (1 << 21)) == 0) {
6351 ARCH(6T2);
6352 rd = (insn >> 12) & 0xf;
6353 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6354 if ((insn & (1 << 22)) == 0) {
6355 /* MOVW */
7d1b0095 6356 tmp = tcg_temp_new_i32();
5e3f878a 6357 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
6358 } else {
6359 /* MOVT */
5e3f878a 6360 tmp = load_reg(s, rd);
86831435 6361 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 6362 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 6363 }
5e3f878a 6364 store_reg(s, rd, tmp);
9ee6e8bb
PB
6365 } else {
6366 if (((insn >> 12) & 0xf) != 0xf)
6367 goto illegal_op;
6368 if (((insn >> 16) & 0xf) == 0) {
6369 gen_nop_hint(s, insn & 0xff);
6370 } else {
6371 /* CPSR = immediate */
6372 val = insn & 0xff;
6373 shift = ((insn >> 8) & 0xf) * 2;
6374 if (shift)
6375 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 6376 i = ((insn & (1 << 22)) != 0);
2fbac54b 6377 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
6378 goto illegal_op;
6379 }
6380 }
6381 } else if ((insn & 0x0f900000) == 0x01000000
6382 && (insn & 0x00000090) != 0x00000090) {
6383 /* miscellaneous instructions */
6384 op1 = (insn >> 21) & 3;
6385 sh = (insn >> 4) & 0xf;
6386 rm = insn & 0xf;
6387 switch (sh) {
6388 case 0x0: /* move program status register */
6389 if (op1 & 1) {
6390 /* PSR = reg */
2fbac54b 6391 tmp = load_reg(s, rm);
9ee6e8bb 6392 i = ((op1 & 2) != 0);
2fbac54b 6393 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
6394 goto illegal_op;
6395 } else {
6396 /* reg = PSR */
6397 rd = (insn >> 12) & 0xf;
6398 if (op1 & 2) {
6399 if (IS_USER(s))
6400 goto illegal_op;
d9ba4830 6401 tmp = load_cpu_field(spsr);
9ee6e8bb 6402 } else {
7d1b0095 6403 tmp = tcg_temp_new_i32();
d9ba4830 6404 gen_helper_cpsr_read(tmp);
9ee6e8bb 6405 }
d9ba4830 6406 store_reg(s, rd, tmp);
9ee6e8bb
PB
6407 }
6408 break;
6409 case 0x1:
6410 if (op1 == 1) {
6411 /* branch/exchange thumb (bx). */
d9ba4830
PB
6412 tmp = load_reg(s, rm);
6413 gen_bx(s, tmp);
9ee6e8bb
PB
6414 } else if (op1 == 3) {
6415 /* clz */
6416 rd = (insn >> 12) & 0xf;
1497c961
PB
6417 tmp = load_reg(s, rm);
6418 gen_helper_clz(tmp, tmp);
6419 store_reg(s, rd, tmp);
9ee6e8bb
PB
6420 } else {
6421 goto illegal_op;
6422 }
6423 break;
6424 case 0x2:
6425 if (op1 == 1) {
6426 ARCH(5J); /* bxj */
6427 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6428 tmp = load_reg(s, rm);
6429 gen_bx(s, tmp);
9ee6e8bb
PB
6430 } else {
6431 goto illegal_op;
6432 }
6433 break;
6434 case 0x3:
6435 if (op1 != 1)
6436 goto illegal_op;
6437
6438 /* branch link/exchange thumb (blx) */
d9ba4830 6439 tmp = load_reg(s, rm);
7d1b0095 6440 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
6441 tcg_gen_movi_i32(tmp2, s->pc);
6442 store_reg(s, 14, tmp2);
6443 gen_bx(s, tmp);
9ee6e8bb
PB
6444 break;
6445 case 0x5: /* saturating add/subtract */
6446 rd = (insn >> 12) & 0xf;
6447 rn = (insn >> 16) & 0xf;
b40d0353 6448 tmp = load_reg(s, rm);
5e3f878a 6449 tmp2 = load_reg(s, rn);
9ee6e8bb 6450 if (op1 & 2)
5e3f878a 6451 gen_helper_double_saturate(tmp2, tmp2);
9ee6e8bb 6452 if (op1 & 1)
5e3f878a 6453 gen_helper_sub_saturate(tmp, tmp, tmp2);
9ee6e8bb 6454 else
5e3f878a 6455 gen_helper_add_saturate(tmp, tmp, tmp2);
7d1b0095 6456 tcg_temp_free_i32(tmp2);
5e3f878a 6457 store_reg(s, rd, tmp);
9ee6e8bb 6458 break;
49e14940
AL
6459 case 7:
6460 /* SMC instruction (op1 == 3)
6461 and undefined instructions (op1 == 0 || op1 == 2)
6462 will trap */
6463 if (op1 != 1) {
6464 goto illegal_op;
6465 }
6466 /* bkpt */
bc4a0de0 6467 gen_exception_insn(s, 4, EXCP_BKPT);
9ee6e8bb
PB
6468 break;
6469 case 0x8: /* signed multiply */
6470 case 0xa:
6471 case 0xc:
6472 case 0xe:
6473 rs = (insn >> 8) & 0xf;
6474 rn = (insn >> 12) & 0xf;
6475 rd = (insn >> 16) & 0xf;
6476 if (op1 == 1) {
6477 /* (32 * 16) >> 16 */
5e3f878a
PB
6478 tmp = load_reg(s, rm);
6479 tmp2 = load_reg(s, rs);
9ee6e8bb 6480 if (sh & 4)
5e3f878a 6481 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 6482 else
5e3f878a 6483 gen_sxth(tmp2);
a7812ae4
PB
6484 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6485 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 6486 tmp = tcg_temp_new_i32();
a7812ae4 6487 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 6488 tcg_temp_free_i64(tmp64);
9ee6e8bb 6489 if ((sh & 2) == 0) {
5e3f878a
PB
6490 tmp2 = load_reg(s, rn);
6491 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 6492 tcg_temp_free_i32(tmp2);
9ee6e8bb 6493 }
5e3f878a 6494 store_reg(s, rd, tmp);
9ee6e8bb
PB
6495 } else {
6496 /* 16 * 16 */
5e3f878a
PB
6497 tmp = load_reg(s, rm);
6498 tmp2 = load_reg(s, rs);
6499 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 6500 tcg_temp_free_i32(tmp2);
9ee6e8bb 6501 if (op1 == 2) {
a7812ae4
PB
6502 tmp64 = tcg_temp_new_i64();
6503 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 6504 tcg_temp_free_i32(tmp);
a7812ae4
PB
6505 gen_addq(s, tmp64, rn, rd);
6506 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 6507 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
6508 } else {
6509 if (op1 == 0) {
5e3f878a
PB
6510 tmp2 = load_reg(s, rn);
6511 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 6512 tcg_temp_free_i32(tmp2);
9ee6e8bb 6513 }
5e3f878a 6514 store_reg(s, rd, tmp);
9ee6e8bb
PB
6515 }
6516 }
6517 break;
6518 default:
6519 goto illegal_op;
6520 }
6521 } else if (((insn & 0x0e000000) == 0 &&
6522 (insn & 0x00000090) != 0x90) ||
6523 ((insn & 0x0e000000) == (1 << 25))) {
6524 int set_cc, logic_cc, shiftop;
6525
6526 op1 = (insn >> 21) & 0xf;
6527 set_cc = (insn >> 20) & 1;
6528 logic_cc = table_logic_cc[op1] & set_cc;
6529
6530 /* data processing instruction */
6531 if (insn & (1 << 25)) {
6532 /* immediate operand */
6533 val = insn & 0xff;
6534 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 6535 if (shift) {
9ee6e8bb 6536 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 6537 }
7d1b0095 6538 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
6539 tcg_gen_movi_i32(tmp2, val);
6540 if (logic_cc && shift) {
6541 gen_set_CF_bit31(tmp2);
6542 }
9ee6e8bb
PB
6543 } else {
6544 /* register */
6545 rm = (insn) & 0xf;
e9bb4aa9 6546 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6547 shiftop = (insn >> 5) & 3;
6548 if (!(insn & (1 << 4))) {
6549 shift = (insn >> 7) & 0x1f;
e9bb4aa9 6550 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
6551 } else {
6552 rs = (insn >> 8) & 0xf;
8984bd2e 6553 tmp = load_reg(s, rs);
e9bb4aa9 6554 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
6555 }
6556 }
6557 if (op1 != 0x0f && op1 != 0x0d) {
6558 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
6559 tmp = load_reg(s, rn);
6560 } else {
6561 TCGV_UNUSED(tmp);
9ee6e8bb
PB
6562 }
6563 rd = (insn >> 12) & 0xf;
6564 switch(op1) {
6565 case 0x00:
e9bb4aa9
JR
6566 tcg_gen_and_i32(tmp, tmp, tmp2);
6567 if (logic_cc) {
6568 gen_logic_CC(tmp);
6569 }
21aeb343 6570 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6571 break;
6572 case 0x01:
e9bb4aa9
JR
6573 tcg_gen_xor_i32(tmp, tmp, tmp2);
6574 if (logic_cc) {
6575 gen_logic_CC(tmp);
6576 }
21aeb343 6577 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6578 break;
6579 case 0x02:
6580 if (set_cc && rd == 15) {
6581 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 6582 if (IS_USER(s)) {
9ee6e8bb 6583 goto illegal_op;
e9bb4aa9
JR
6584 }
6585 gen_helper_sub_cc(tmp, tmp, tmp2);
6586 gen_exception_return(s, tmp);
9ee6e8bb 6587 } else {
e9bb4aa9
JR
6588 if (set_cc) {
6589 gen_helper_sub_cc(tmp, tmp, tmp2);
6590 } else {
6591 tcg_gen_sub_i32(tmp, tmp, tmp2);
6592 }
21aeb343 6593 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6594 }
6595 break;
6596 case 0x03:
e9bb4aa9
JR
6597 if (set_cc) {
6598 gen_helper_sub_cc(tmp, tmp2, tmp);
6599 } else {
6600 tcg_gen_sub_i32(tmp, tmp2, tmp);
6601 }
21aeb343 6602 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6603 break;
6604 case 0x04:
e9bb4aa9
JR
6605 if (set_cc) {
6606 gen_helper_add_cc(tmp, tmp, tmp2);
6607 } else {
6608 tcg_gen_add_i32(tmp, tmp, tmp2);
6609 }
21aeb343 6610 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6611 break;
6612 case 0x05:
e9bb4aa9
JR
6613 if (set_cc) {
6614 gen_helper_adc_cc(tmp, tmp, tmp2);
6615 } else {
6616 gen_add_carry(tmp, tmp, tmp2);
6617 }
21aeb343 6618 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6619 break;
6620 case 0x06:
e9bb4aa9
JR
6621 if (set_cc) {
6622 gen_helper_sbc_cc(tmp, tmp, tmp2);
6623 } else {
6624 gen_sub_carry(tmp, tmp, tmp2);
6625 }
21aeb343 6626 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6627 break;
6628 case 0x07:
e9bb4aa9
JR
6629 if (set_cc) {
6630 gen_helper_sbc_cc(tmp, tmp2, tmp);
6631 } else {
6632 gen_sub_carry(tmp, tmp2, tmp);
6633 }
21aeb343 6634 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6635 break;
6636 case 0x08:
6637 if (set_cc) {
e9bb4aa9
JR
6638 tcg_gen_and_i32(tmp, tmp, tmp2);
6639 gen_logic_CC(tmp);
9ee6e8bb 6640 }
7d1b0095 6641 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6642 break;
6643 case 0x09:
6644 if (set_cc) {
e9bb4aa9
JR
6645 tcg_gen_xor_i32(tmp, tmp, tmp2);
6646 gen_logic_CC(tmp);
9ee6e8bb 6647 }
7d1b0095 6648 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6649 break;
6650 case 0x0a:
6651 if (set_cc) {
e9bb4aa9 6652 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb 6653 }
7d1b0095 6654 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6655 break;
6656 case 0x0b:
6657 if (set_cc) {
e9bb4aa9 6658 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 6659 }
7d1b0095 6660 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6661 break;
6662 case 0x0c:
e9bb4aa9
JR
6663 tcg_gen_or_i32(tmp, tmp, tmp2);
6664 if (logic_cc) {
6665 gen_logic_CC(tmp);
6666 }
21aeb343 6667 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6668 break;
6669 case 0x0d:
6670 if (logic_cc && rd == 15) {
6671 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 6672 if (IS_USER(s)) {
9ee6e8bb 6673 goto illegal_op;
e9bb4aa9
JR
6674 }
6675 gen_exception_return(s, tmp2);
9ee6e8bb 6676 } else {
e9bb4aa9
JR
6677 if (logic_cc) {
6678 gen_logic_CC(tmp2);
6679 }
21aeb343 6680 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6681 }
6682 break;
6683 case 0x0e:
f669df27 6684 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
6685 if (logic_cc) {
6686 gen_logic_CC(tmp);
6687 }
21aeb343 6688 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6689 break;
6690 default:
6691 case 0x0f:
e9bb4aa9
JR
6692 tcg_gen_not_i32(tmp2, tmp2);
6693 if (logic_cc) {
6694 gen_logic_CC(tmp2);
6695 }
21aeb343 6696 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6697 break;
6698 }
e9bb4aa9 6699 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 6700 tcg_temp_free_i32(tmp2);
e9bb4aa9 6701 }
9ee6e8bb
PB
6702 } else {
6703 /* other instructions */
6704 op1 = (insn >> 24) & 0xf;
6705 switch(op1) {
6706 case 0x0:
6707 case 0x1:
6708 /* multiplies, extra load/stores */
6709 sh = (insn >> 5) & 3;
6710 if (sh == 0) {
6711 if (op1 == 0x0) {
6712 rd = (insn >> 16) & 0xf;
6713 rn = (insn >> 12) & 0xf;
6714 rs = (insn >> 8) & 0xf;
6715 rm = (insn) & 0xf;
6716 op1 = (insn >> 20) & 0xf;
6717 switch (op1) {
6718 case 0: case 1: case 2: case 3: case 6:
6719 /* 32 bit mul */
5e3f878a
PB
6720 tmp = load_reg(s, rs);
6721 tmp2 = load_reg(s, rm);
6722 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 6723 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6724 if (insn & (1 << 22)) {
6725 /* Subtract (mls) */
6726 ARCH(6T2);
5e3f878a
PB
6727 tmp2 = load_reg(s, rn);
6728 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 6729 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6730 } else if (insn & (1 << 21)) {
6731 /* Add */
5e3f878a
PB
6732 tmp2 = load_reg(s, rn);
6733 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 6734 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6735 }
6736 if (insn & (1 << 20))
5e3f878a
PB
6737 gen_logic_CC(tmp);
6738 store_reg(s, rd, tmp);
9ee6e8bb 6739 break;
8aac08b1
AJ
6740 case 4:
6741 /* 64 bit mul double accumulate (UMAAL) */
6742 ARCH(6);
6743 tmp = load_reg(s, rs);
6744 tmp2 = load_reg(s, rm);
6745 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6746 gen_addq_lo(s, tmp64, rn);
6747 gen_addq_lo(s, tmp64, rd);
6748 gen_storeq_reg(s, rn, rd, tmp64);
6749 tcg_temp_free_i64(tmp64);
6750 break;
6751 case 8: case 9: case 10: case 11:
6752 case 12: case 13: case 14: case 15:
6753 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
6754 tmp = load_reg(s, rs);
6755 tmp2 = load_reg(s, rm);
8aac08b1 6756 if (insn & (1 << 22)) {
a7812ae4 6757 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8aac08b1 6758 } else {
a7812ae4 6759 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8aac08b1
AJ
6760 }
6761 if (insn & (1 << 21)) { /* mult accumulate */
a7812ae4 6762 gen_addq(s, tmp64, rn, rd);
9ee6e8bb 6763 }
8aac08b1 6764 if (insn & (1 << 20)) {
a7812ae4 6765 gen_logicq_cc(tmp64);
8aac08b1 6766 }
a7812ae4 6767 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 6768 tcg_temp_free_i64(tmp64);
9ee6e8bb 6769 break;
8aac08b1
AJ
6770 default:
6771 goto illegal_op;
9ee6e8bb
PB
6772 }
6773 } else {
6774 rn = (insn >> 16) & 0xf;
6775 rd = (insn >> 12) & 0xf;
6776 if (insn & (1 << 23)) {
6777 /* load/store exclusive */
86753403
PB
6778 op1 = (insn >> 21) & 0x3;
6779 if (op1)
a47f43d2 6780 ARCH(6K);
86753403
PB
6781 else
6782 ARCH(6);
3174f8e9 6783 addr = tcg_temp_local_new_i32();
98a46317 6784 load_reg_var(s, addr, rn);
9ee6e8bb 6785 if (insn & (1 << 20)) {
86753403
PB
6786 switch (op1) {
6787 case 0: /* ldrex */
426f5abc 6788 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
6789 break;
6790 case 1: /* ldrexd */
426f5abc 6791 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
6792 break;
6793 case 2: /* ldrexb */
426f5abc 6794 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
6795 break;
6796 case 3: /* ldrexh */
426f5abc 6797 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
6798 break;
6799 default:
6800 abort();
6801 }
9ee6e8bb
PB
6802 } else {
6803 rm = insn & 0xf;
86753403
PB
6804 switch (op1) {
6805 case 0: /* strex */
426f5abc 6806 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
6807 break;
6808 case 1: /* strexd */
502e64fe 6809 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
6810 break;
6811 case 2: /* strexb */
426f5abc 6812 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
6813 break;
6814 case 3: /* strexh */
426f5abc 6815 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
6816 break;
6817 default:
6818 abort();
6819 }
9ee6e8bb 6820 }
3174f8e9 6821 tcg_temp_free(addr);
9ee6e8bb
PB
6822 } else {
6823 /* SWP instruction */
6824 rm = (insn) & 0xf;
6825
8984bd2e
PB
6826 /* ??? This is not really atomic. However we know
6827 we never have multiple CPUs running in parallel,
6828 so it is good enough. */
6829 addr = load_reg(s, rn);
6830 tmp = load_reg(s, rm);
9ee6e8bb 6831 if (insn & (1 << 22)) {
8984bd2e
PB
6832 tmp2 = gen_ld8u(addr, IS_USER(s));
6833 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 6834 } else {
8984bd2e
PB
6835 tmp2 = gen_ld32(addr, IS_USER(s));
6836 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 6837 }
7d1b0095 6838 tcg_temp_free_i32(addr);
8984bd2e 6839 store_reg(s, rd, tmp2);
9ee6e8bb
PB
6840 }
6841 }
6842 } else {
6843 int address_offset;
6844 int load;
6845 /* Misc load/store */
6846 rn = (insn >> 16) & 0xf;
6847 rd = (insn >> 12) & 0xf;
b0109805 6848 addr = load_reg(s, rn);
9ee6e8bb 6849 if (insn & (1 << 24))
b0109805 6850 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
6851 address_offset = 0;
6852 if (insn & (1 << 20)) {
6853 /* load */
6854 switch(sh) {
6855 case 1:
b0109805 6856 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
6857 break;
6858 case 2:
b0109805 6859 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
6860 break;
6861 default:
6862 case 3:
b0109805 6863 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
6864 break;
6865 }
6866 load = 1;
6867 } else if (sh & 2) {
6868 /* doubleword */
6869 if (sh & 1) {
6870 /* store */
b0109805
PB
6871 tmp = load_reg(s, rd);
6872 gen_st32(tmp, addr, IS_USER(s));
6873 tcg_gen_addi_i32(addr, addr, 4);
6874 tmp = load_reg(s, rd + 1);
6875 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6876 load = 0;
6877 } else {
6878 /* load */
b0109805
PB
6879 tmp = gen_ld32(addr, IS_USER(s));
6880 store_reg(s, rd, tmp);
6881 tcg_gen_addi_i32(addr, addr, 4);
6882 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
6883 rd++;
6884 load = 1;
6885 }
6886 address_offset = -4;
6887 } else {
6888 /* store */
b0109805
PB
6889 tmp = load_reg(s, rd);
6890 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6891 load = 0;
6892 }
6893 /* Perform base writeback before the loaded value to
6894 ensure correct behavior with overlapping index registers.
6895 ldrd with base writeback is is undefined if the
6896 destination and index registers overlap. */
6897 if (!(insn & (1 << 24))) {
b0109805
PB
6898 gen_add_datah_offset(s, insn, address_offset, addr);
6899 store_reg(s, rn, addr);
9ee6e8bb
PB
6900 } else if (insn & (1 << 21)) {
6901 if (address_offset)
b0109805
PB
6902 tcg_gen_addi_i32(addr, addr, address_offset);
6903 store_reg(s, rn, addr);
6904 } else {
7d1b0095 6905 tcg_temp_free_i32(addr);
9ee6e8bb
PB
6906 }
6907 if (load) {
6908 /* Complete the load. */
b0109805 6909 store_reg(s, rd, tmp);
9ee6e8bb
PB
6910 }
6911 }
6912 break;
6913 case 0x4:
6914 case 0x5:
6915 goto do_ldst;
6916 case 0x6:
6917 case 0x7:
6918 if (insn & (1 << 4)) {
6919 ARCH(6);
6920 /* Armv6 Media instructions. */
6921 rm = insn & 0xf;
6922 rn = (insn >> 16) & 0xf;
2c0262af 6923 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
6924 rs = (insn >> 8) & 0xf;
6925 switch ((insn >> 23) & 3) {
6926 case 0: /* Parallel add/subtract. */
6927 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
6928 tmp = load_reg(s, rn);
6929 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6930 sh = (insn >> 5) & 7;
6931 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6932 goto illegal_op;
6ddbc6e4 6933 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 6934 tcg_temp_free_i32(tmp2);
6ddbc6e4 6935 store_reg(s, rd, tmp);
9ee6e8bb
PB
6936 break;
6937 case 1:
6938 if ((insn & 0x00700020) == 0) {
6c95676b 6939 /* Halfword pack. */
3670669c
PB
6940 tmp = load_reg(s, rn);
6941 tmp2 = load_reg(s, rm);
9ee6e8bb 6942 shift = (insn >> 7) & 0x1f;
3670669c
PB
6943 if (insn & (1 << 6)) {
6944 /* pkhtb */
22478e79
AZ
6945 if (shift == 0)
6946 shift = 31;
6947 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 6948 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 6949 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
6950 } else {
6951 /* pkhbt */
22478e79
AZ
6952 if (shift)
6953 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 6954 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
6955 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6956 }
6957 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 6958 tcg_temp_free_i32(tmp2);
3670669c 6959 store_reg(s, rd, tmp);
9ee6e8bb
PB
6960 } else if ((insn & 0x00200020) == 0x00200000) {
6961 /* [us]sat */
6ddbc6e4 6962 tmp = load_reg(s, rm);
9ee6e8bb
PB
6963 shift = (insn >> 7) & 0x1f;
6964 if (insn & (1 << 6)) {
6965 if (shift == 0)
6966 shift = 31;
6ddbc6e4 6967 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 6968 } else {
6ddbc6e4 6969 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
6970 }
6971 sh = (insn >> 16) & 0x1f;
40d3c433
CL
6972 tmp2 = tcg_const_i32(sh);
6973 if (insn & (1 << 22))
6974 gen_helper_usat(tmp, tmp, tmp2);
6975 else
6976 gen_helper_ssat(tmp, tmp, tmp2);
6977 tcg_temp_free_i32(tmp2);
6ddbc6e4 6978 store_reg(s, rd, tmp);
9ee6e8bb
PB
6979 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6980 /* [us]sat16 */
6ddbc6e4 6981 tmp = load_reg(s, rm);
9ee6e8bb 6982 sh = (insn >> 16) & 0x1f;
40d3c433
CL
6983 tmp2 = tcg_const_i32(sh);
6984 if (insn & (1 << 22))
6985 gen_helper_usat16(tmp, tmp, tmp2);
6986 else
6987 gen_helper_ssat16(tmp, tmp, tmp2);
6988 tcg_temp_free_i32(tmp2);
6ddbc6e4 6989 store_reg(s, rd, tmp);
9ee6e8bb
PB
6990 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6991 /* Select bytes. */
6ddbc6e4
PB
6992 tmp = load_reg(s, rn);
6993 tmp2 = load_reg(s, rm);
7d1b0095 6994 tmp3 = tcg_temp_new_i32();
6ddbc6e4
PB
6995 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6996 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
6997 tcg_temp_free_i32(tmp3);
6998 tcg_temp_free_i32(tmp2);
6ddbc6e4 6999 store_reg(s, rd, tmp);
9ee6e8bb 7000 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 7001 tmp = load_reg(s, rm);
9ee6e8bb
PB
7002 shift = (insn >> 10) & 3;
7003 /* ??? In many cases it's not neccessary to do a
7004 rotate, a shift is sufficient. */
7005 if (shift != 0)
f669df27 7006 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7007 op1 = (insn >> 20) & 7;
7008 switch (op1) {
5e3f878a
PB
7009 case 0: gen_sxtb16(tmp); break;
7010 case 2: gen_sxtb(tmp); break;
7011 case 3: gen_sxth(tmp); break;
7012 case 4: gen_uxtb16(tmp); break;
7013 case 6: gen_uxtb(tmp); break;
7014 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
7015 default: goto illegal_op;
7016 }
7017 if (rn != 15) {
5e3f878a 7018 tmp2 = load_reg(s, rn);
9ee6e8bb 7019 if ((op1 & 3) == 0) {
5e3f878a 7020 gen_add16(tmp, tmp2);
9ee6e8bb 7021 } else {
5e3f878a 7022 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7023 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7024 }
7025 }
6c95676b 7026 store_reg(s, rd, tmp);
9ee6e8bb
PB
7027 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7028 /* rev */
b0109805 7029 tmp = load_reg(s, rm);
9ee6e8bb
PB
7030 if (insn & (1 << 22)) {
7031 if (insn & (1 << 7)) {
b0109805 7032 gen_revsh(tmp);
9ee6e8bb
PB
7033 } else {
7034 ARCH(6T2);
b0109805 7035 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7036 }
7037 } else {
7038 if (insn & (1 << 7))
b0109805 7039 gen_rev16(tmp);
9ee6e8bb 7040 else
66896cb8 7041 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 7042 }
b0109805 7043 store_reg(s, rd, tmp);
9ee6e8bb
PB
7044 } else {
7045 goto illegal_op;
7046 }
7047 break;
7048 case 2: /* Multiplies (Type 3). */
5e3f878a
PB
7049 tmp = load_reg(s, rm);
7050 tmp2 = load_reg(s, rs);
9ee6e8bb 7051 if (insn & (1 << 20)) {
838fa72d
AJ
7052 /* Signed multiply most significant [accumulate].
7053 (SMMUL, SMMLA, SMMLS) */
a7812ae4 7054 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 7055
955a7dd5 7056 if (rd != 15) {
838fa72d 7057 tmp = load_reg(s, rd);
9ee6e8bb 7058 if (insn & (1 << 6)) {
838fa72d 7059 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 7060 } else {
838fa72d 7061 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
7062 }
7063 }
838fa72d
AJ
7064 if (insn & (1 << 5)) {
7065 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7066 }
7067 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 7068 tmp = tcg_temp_new_i32();
838fa72d
AJ
7069 tcg_gen_trunc_i64_i32(tmp, tmp64);
7070 tcg_temp_free_i64(tmp64);
955a7dd5 7071 store_reg(s, rn, tmp);
9ee6e8bb
PB
7072 } else {
7073 if (insn & (1 << 5))
5e3f878a
PB
7074 gen_swap_half(tmp2);
7075 gen_smul_dual(tmp, tmp2);
5e3f878a 7076 if (insn & (1 << 6)) {
e1d177b9 7077 /* This subtraction cannot overflow. */
5e3f878a
PB
7078 tcg_gen_sub_i32(tmp, tmp, tmp2);
7079 } else {
e1d177b9
PM
7080 /* This addition cannot overflow 32 bits;
7081 * however it may overflow considered as a signed
7082 * operation, in which case we must set the Q flag.
7083 */
7084 gen_helper_add_setq(tmp, tmp, tmp2);
5e3f878a 7085 }
7d1b0095 7086 tcg_temp_free_i32(tmp2);
9ee6e8bb 7087 if (insn & (1 << 22)) {
5e3f878a 7088 /* smlald, smlsld */
a7812ae4
PB
7089 tmp64 = tcg_temp_new_i64();
7090 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7091 tcg_temp_free_i32(tmp);
a7812ae4
PB
7092 gen_addq(s, tmp64, rd, rn);
7093 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 7094 tcg_temp_free_i64(tmp64);
9ee6e8bb 7095 } else {
5e3f878a 7096 /* smuad, smusd, smlad, smlsd */
22478e79 7097 if (rd != 15)
9ee6e8bb 7098 {
22478e79 7099 tmp2 = load_reg(s, rd);
5e3f878a 7100 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 7101 tcg_temp_free_i32(tmp2);
9ee6e8bb 7102 }
22478e79 7103 store_reg(s, rn, tmp);
9ee6e8bb
PB
7104 }
7105 }
7106 break;
7107 case 3:
7108 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7109 switch (op1) {
7110 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
7111 ARCH(6);
7112 tmp = load_reg(s, rm);
7113 tmp2 = load_reg(s, rs);
7114 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 7115 tcg_temp_free_i32(tmp2);
ded9d295
AZ
7116 if (rd != 15) {
7117 tmp2 = load_reg(s, rd);
6ddbc6e4 7118 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7119 tcg_temp_free_i32(tmp2);
9ee6e8bb 7120 }
ded9d295 7121 store_reg(s, rn, tmp);
9ee6e8bb
PB
7122 break;
7123 case 0x20: case 0x24: case 0x28: case 0x2c:
7124 /* Bitfield insert/clear. */
7125 ARCH(6T2);
7126 shift = (insn >> 7) & 0x1f;
7127 i = (insn >> 16) & 0x1f;
7128 i = i + 1 - shift;
7129 if (rm == 15) {
7d1b0095 7130 tmp = tcg_temp_new_i32();
5e3f878a 7131 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7132 } else {
5e3f878a 7133 tmp = load_reg(s, rm);
9ee6e8bb
PB
7134 }
7135 if (i != 32) {
5e3f878a 7136 tmp2 = load_reg(s, rd);
8f8e3aa4 7137 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7d1b0095 7138 tcg_temp_free_i32(tmp2);
9ee6e8bb 7139 }
5e3f878a 7140 store_reg(s, rd, tmp);
9ee6e8bb
PB
7141 break;
7142 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7143 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 7144 ARCH(6T2);
5e3f878a 7145 tmp = load_reg(s, rm);
9ee6e8bb
PB
7146 shift = (insn >> 7) & 0x1f;
7147 i = ((insn >> 16) & 0x1f) + 1;
7148 if (shift + i > 32)
7149 goto illegal_op;
7150 if (i < 32) {
7151 if (op1 & 0x20) {
5e3f878a 7152 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 7153 } else {
5e3f878a 7154 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
7155 }
7156 }
5e3f878a 7157 store_reg(s, rd, tmp);
9ee6e8bb
PB
7158 break;
7159 default:
7160 goto illegal_op;
7161 }
7162 break;
7163 }
7164 break;
7165 }
7166 do_ldst:
7167 /* Check for undefined extension instructions
7168 * per the ARM Bible IE:
7169 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7170 */
7171 sh = (0xf << 20) | (0xf << 4);
7172 if (op1 == 0x7 && ((insn & sh) == sh))
7173 {
7174 goto illegal_op;
7175 }
7176 /* load/store byte/word */
7177 rn = (insn >> 16) & 0xf;
7178 rd = (insn >> 12) & 0xf;
b0109805 7179 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
7180 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7181 if (insn & (1 << 24))
b0109805 7182 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
7183 if (insn & (1 << 20)) {
7184 /* load */
9ee6e8bb 7185 if (insn & (1 << 22)) {
b0109805 7186 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 7187 } else {
b0109805 7188 tmp = gen_ld32(tmp2, i);
9ee6e8bb 7189 }
9ee6e8bb
PB
7190 } else {
7191 /* store */
b0109805 7192 tmp = load_reg(s, rd);
9ee6e8bb 7193 if (insn & (1 << 22))
b0109805 7194 gen_st8(tmp, tmp2, i);
9ee6e8bb 7195 else
b0109805 7196 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
7197 }
7198 if (!(insn & (1 << 24))) {
b0109805
PB
7199 gen_add_data_offset(s, insn, tmp2);
7200 store_reg(s, rn, tmp2);
7201 } else if (insn & (1 << 21)) {
7202 store_reg(s, rn, tmp2);
7203 } else {
7d1b0095 7204 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7205 }
7206 if (insn & (1 << 20)) {
7207 /* Complete the load. */
7208 if (rd == 15)
b0109805 7209 gen_bx(s, tmp);
9ee6e8bb 7210 else
b0109805 7211 store_reg(s, rd, tmp);
9ee6e8bb
PB
7212 }
7213 break;
7214 case 0x08:
7215 case 0x09:
7216 {
7217 int j, n, user, loaded_base;
b0109805 7218 TCGv loaded_var;
9ee6e8bb
PB
7219 /* load/store multiple words */
7220 /* XXX: store correct base if write back */
7221 user = 0;
7222 if (insn & (1 << 22)) {
7223 if (IS_USER(s))
7224 goto illegal_op; /* only usable in supervisor mode */
7225
7226 if ((insn & (1 << 15)) == 0)
7227 user = 1;
7228 }
7229 rn = (insn >> 16) & 0xf;
b0109805 7230 addr = load_reg(s, rn);
9ee6e8bb
PB
7231
7232 /* compute total size */
7233 loaded_base = 0;
a50f5b91 7234 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
7235 n = 0;
7236 for(i=0;i<16;i++) {
7237 if (insn & (1 << i))
7238 n++;
7239 }
7240 /* XXX: test invalid n == 0 case ? */
7241 if (insn & (1 << 23)) {
7242 if (insn & (1 << 24)) {
7243 /* pre increment */
b0109805 7244 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7245 } else {
7246 /* post increment */
7247 }
7248 } else {
7249 if (insn & (1 << 24)) {
7250 /* pre decrement */
b0109805 7251 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7252 } else {
7253 /* post decrement */
7254 if (n != 1)
b0109805 7255 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7256 }
7257 }
7258 j = 0;
7259 for(i=0;i<16;i++) {
7260 if (insn & (1 << i)) {
7261 if (insn & (1 << 20)) {
7262 /* load */
b0109805 7263 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 7264 if (i == 15) {
b0109805 7265 gen_bx(s, tmp);
9ee6e8bb 7266 } else if (user) {
b75263d6
JR
7267 tmp2 = tcg_const_i32(i);
7268 gen_helper_set_user_reg(tmp2, tmp);
7269 tcg_temp_free_i32(tmp2);
7d1b0095 7270 tcg_temp_free_i32(tmp);
9ee6e8bb 7271 } else if (i == rn) {
b0109805 7272 loaded_var = tmp;
9ee6e8bb
PB
7273 loaded_base = 1;
7274 } else {
b0109805 7275 store_reg(s, i, tmp);
9ee6e8bb
PB
7276 }
7277 } else {
7278 /* store */
7279 if (i == 15) {
7280 /* special case: r15 = PC + 8 */
7281 val = (long)s->pc + 4;
7d1b0095 7282 tmp = tcg_temp_new_i32();
b0109805 7283 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 7284 } else if (user) {
7d1b0095 7285 tmp = tcg_temp_new_i32();
b75263d6
JR
7286 tmp2 = tcg_const_i32(i);
7287 gen_helper_get_user_reg(tmp, tmp2);
7288 tcg_temp_free_i32(tmp2);
9ee6e8bb 7289 } else {
b0109805 7290 tmp = load_reg(s, i);
9ee6e8bb 7291 }
b0109805 7292 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7293 }
7294 j++;
7295 /* no need to add after the last transfer */
7296 if (j != n)
b0109805 7297 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7298 }
7299 }
7300 if (insn & (1 << 21)) {
7301 /* write back */
7302 if (insn & (1 << 23)) {
7303 if (insn & (1 << 24)) {
7304 /* pre increment */
7305 } else {
7306 /* post increment */
b0109805 7307 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7308 }
7309 } else {
7310 if (insn & (1 << 24)) {
7311 /* pre decrement */
7312 if (n != 1)
b0109805 7313 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7314 } else {
7315 /* post decrement */
b0109805 7316 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7317 }
7318 }
b0109805
PB
7319 store_reg(s, rn, addr);
7320 } else {
7d1b0095 7321 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7322 }
7323 if (loaded_base) {
b0109805 7324 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
7325 }
7326 if ((insn & (1 << 22)) && !user) {
7327 /* Restore CPSR from SPSR. */
d9ba4830
PB
7328 tmp = load_cpu_field(spsr);
7329 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 7330 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7331 s->is_jmp = DISAS_UPDATE;
7332 }
7333 }
7334 break;
7335 case 0xa:
7336 case 0xb:
7337 {
7338 int32_t offset;
7339
7340 /* branch (and link) */
7341 val = (int32_t)s->pc;
7342 if (insn & (1 << 24)) {
7d1b0095 7343 tmp = tcg_temp_new_i32();
5e3f878a
PB
7344 tcg_gen_movi_i32(tmp, val);
7345 store_reg(s, 14, tmp);
9ee6e8bb
PB
7346 }
7347 offset = (((int32_t)insn << 8) >> 8);
7348 val += (offset << 2) + 4;
7349 gen_jmp(s, val);
7350 }
7351 break;
7352 case 0xc:
7353 case 0xd:
7354 case 0xe:
7355 /* Coprocessor. */
7356 if (disas_coproc_insn(env, s, insn))
7357 goto illegal_op;
7358 break;
7359 case 0xf:
7360 /* swi */
5e3f878a 7361 gen_set_pc_im(s->pc);
9ee6e8bb
PB
7362 s->is_jmp = DISAS_SWI;
7363 break;
7364 default:
7365 illegal_op:
bc4a0de0 7366 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
7367 break;
7368 }
7369 }
7370}
7371
7372/* Return true if this is a Thumb-2 logical op. */
7373static int
7374thumb2_logic_op(int op)
7375{
7376 return (op < 8);
7377}
7378
7379/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7380 then set condition code flags based on the result of the operation.
7381 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7382 to the high bit of T1.
7383 Returns zero if the opcode is valid. */
7384
7385static int
396e467c 7386gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
9ee6e8bb
PB
7387{
7388 int logic_cc;
7389
7390 logic_cc = 0;
7391 switch (op) {
7392 case 0: /* and */
396e467c 7393 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
7394 logic_cc = conds;
7395 break;
7396 case 1: /* bic */
f669df27 7397 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
7398 logic_cc = conds;
7399 break;
7400 case 2: /* orr */
396e467c 7401 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7402 logic_cc = conds;
7403 break;
7404 case 3: /* orn */
29501f1b 7405 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
7406 logic_cc = conds;
7407 break;
7408 case 4: /* eor */
396e467c 7409 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
7410 logic_cc = conds;
7411 break;
7412 case 8: /* add */
7413 if (conds)
396e467c 7414 gen_helper_add_cc(t0, t0, t1);
9ee6e8bb 7415 else
396e467c 7416 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
7417 break;
7418 case 10: /* adc */
7419 if (conds)
396e467c 7420 gen_helper_adc_cc(t0, t0, t1);
9ee6e8bb 7421 else
396e467c 7422 gen_adc(t0, t1);
9ee6e8bb
PB
7423 break;
7424 case 11: /* sbc */
7425 if (conds)
396e467c 7426 gen_helper_sbc_cc(t0, t0, t1);
9ee6e8bb 7427 else
396e467c 7428 gen_sub_carry(t0, t0, t1);
9ee6e8bb
PB
7429 break;
7430 case 13: /* sub */
7431 if (conds)
396e467c 7432 gen_helper_sub_cc(t0, t0, t1);
9ee6e8bb 7433 else
396e467c 7434 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
7435 break;
7436 case 14: /* rsb */
7437 if (conds)
396e467c 7438 gen_helper_sub_cc(t0, t1, t0);
9ee6e8bb 7439 else
396e467c 7440 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
7441 break;
7442 default: /* 5, 6, 7, 9, 12, 15. */
7443 return 1;
7444 }
7445 if (logic_cc) {
396e467c 7446 gen_logic_CC(t0);
9ee6e8bb 7447 if (shifter_out)
396e467c 7448 gen_set_CF_bit31(t1);
9ee6e8bb
PB
7449 }
7450 return 0;
7451}
7452
7453/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7454 is not legal. */
7455static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7456{
b0109805 7457 uint32_t insn, imm, shift, offset;
9ee6e8bb 7458 uint32_t rd, rn, rm, rs;
b26eefb6 7459 TCGv tmp;
6ddbc6e4
PB
7460 TCGv tmp2;
7461 TCGv tmp3;
b0109805 7462 TCGv addr;
a7812ae4 7463 TCGv_i64 tmp64;
9ee6e8bb
PB
7464 int op;
7465 int shiftop;
7466 int conds;
7467 int logic_cc;
7468
7469 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7470 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 7471 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
7472 16-bit instructions to get correct prefetch abort behavior. */
7473 insn = insn_hw1;
7474 if ((insn & (1 << 12)) == 0) {
7475 /* Second half of blx. */
7476 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
7477 tmp = load_reg(s, 14);
7478 tcg_gen_addi_i32(tmp, tmp, offset);
7479 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 7480
7d1b0095 7481 tmp2 = tcg_temp_new_i32();
b0109805 7482 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7483 store_reg(s, 14, tmp2);
7484 gen_bx(s, tmp);
9ee6e8bb
PB
7485 return 0;
7486 }
7487 if (insn & (1 << 11)) {
7488 /* Second half of bl. */
7489 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 7490 tmp = load_reg(s, 14);
6a0d8a1d 7491 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 7492
7d1b0095 7493 tmp2 = tcg_temp_new_i32();
b0109805 7494 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7495 store_reg(s, 14, tmp2);
7496 gen_bx(s, tmp);
9ee6e8bb
PB
7497 return 0;
7498 }
7499 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7500 /* Instruction spans a page boundary. Implement it as two
7501 16-bit instructions in case the second half causes an
7502 prefetch abort. */
7503 offset = ((int32_t)insn << 21) >> 9;
396e467c 7504 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
7505 return 0;
7506 }
7507 /* Fall through to 32-bit decode. */
7508 }
7509
7510 insn = lduw_code(s->pc);
7511 s->pc += 2;
7512 insn |= (uint32_t)insn_hw1 << 16;
7513
7514 if ((insn & 0xf800e800) != 0xf000e800) {
7515 ARCH(6T2);
7516 }
7517
7518 rn = (insn >> 16) & 0xf;
7519 rs = (insn >> 12) & 0xf;
7520 rd = (insn >> 8) & 0xf;
7521 rm = insn & 0xf;
7522 switch ((insn >> 25) & 0xf) {
7523 case 0: case 1: case 2: case 3:
7524 /* 16-bit instructions. Should never happen. */
7525 abort();
7526 case 4:
7527 if (insn & (1 << 22)) {
7528 /* Other load/store, table branch. */
7529 if (insn & 0x01200000) {
7530 /* Load/store doubleword. */
7531 if (rn == 15) {
7d1b0095 7532 addr = tcg_temp_new_i32();
b0109805 7533 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 7534 } else {
b0109805 7535 addr = load_reg(s, rn);
9ee6e8bb
PB
7536 }
7537 offset = (insn & 0xff) * 4;
7538 if ((insn & (1 << 23)) == 0)
7539 offset = -offset;
7540 if (insn & (1 << 24)) {
b0109805 7541 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
7542 offset = 0;
7543 }
7544 if (insn & (1 << 20)) {
7545 /* ldrd */
b0109805
PB
7546 tmp = gen_ld32(addr, IS_USER(s));
7547 store_reg(s, rs, tmp);
7548 tcg_gen_addi_i32(addr, addr, 4);
7549 tmp = gen_ld32(addr, IS_USER(s));
7550 store_reg(s, rd, tmp);
9ee6e8bb
PB
7551 } else {
7552 /* strd */
b0109805
PB
7553 tmp = load_reg(s, rs);
7554 gen_st32(tmp, addr, IS_USER(s));
7555 tcg_gen_addi_i32(addr, addr, 4);
7556 tmp = load_reg(s, rd);
7557 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7558 }
7559 if (insn & (1 << 21)) {
7560 /* Base writeback. */
7561 if (rn == 15)
7562 goto illegal_op;
b0109805
PB
7563 tcg_gen_addi_i32(addr, addr, offset - 4);
7564 store_reg(s, rn, addr);
7565 } else {
7d1b0095 7566 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7567 }
7568 } else if ((insn & (1 << 23)) == 0) {
7569 /* Load/store exclusive word. */
3174f8e9 7570 addr = tcg_temp_local_new();
98a46317 7571 load_reg_var(s, addr, rn);
426f5abc 7572 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 7573 if (insn & (1 << 20)) {
426f5abc 7574 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 7575 } else {
426f5abc 7576 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 7577 }
3174f8e9 7578 tcg_temp_free(addr);
9ee6e8bb
PB
7579 } else if ((insn & (1 << 6)) == 0) {
7580 /* Table Branch. */
7581 if (rn == 15) {
7d1b0095 7582 addr = tcg_temp_new_i32();
b0109805 7583 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 7584 } else {
b0109805 7585 addr = load_reg(s, rn);
9ee6e8bb 7586 }
b26eefb6 7587 tmp = load_reg(s, rm);
b0109805 7588 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
7589 if (insn & (1 << 4)) {
7590 /* tbh */
b0109805 7591 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 7592 tcg_temp_free_i32(tmp);
b0109805 7593 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 7594 } else { /* tbb */
7d1b0095 7595 tcg_temp_free_i32(tmp);
b0109805 7596 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7597 }
7d1b0095 7598 tcg_temp_free_i32(addr);
b0109805
PB
7599 tcg_gen_shli_i32(tmp, tmp, 1);
7600 tcg_gen_addi_i32(tmp, tmp, s->pc);
7601 store_reg(s, 15, tmp);
9ee6e8bb
PB
7602 } else {
7603 /* Load/store exclusive byte/halfword/doubleword. */
426f5abc 7604 ARCH(7);
9ee6e8bb 7605 op = (insn >> 4) & 0x3;
426f5abc
PB
7606 if (op == 2) {
7607 goto illegal_op;
7608 }
3174f8e9 7609 addr = tcg_temp_local_new();
98a46317 7610 load_reg_var(s, addr, rn);
9ee6e8bb 7611 if (insn & (1 << 20)) {
426f5abc 7612 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 7613 } else {
426f5abc 7614 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 7615 }
3174f8e9 7616 tcg_temp_free(addr);
9ee6e8bb
PB
7617 }
7618 } else {
7619 /* Load/store multiple, RFE, SRS. */
7620 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7621 /* Not available in user mode. */
b0109805 7622 if (IS_USER(s))
9ee6e8bb
PB
7623 goto illegal_op;
7624 if (insn & (1 << 20)) {
7625 /* rfe */
b0109805
PB
7626 addr = load_reg(s, rn);
7627 if ((insn & (1 << 24)) == 0)
7628 tcg_gen_addi_i32(addr, addr, -8);
7629 /* Load PC into tmp and CPSR into tmp2. */
7630 tmp = gen_ld32(addr, 0);
7631 tcg_gen_addi_i32(addr, addr, 4);
7632 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
7633 if (insn & (1 << 21)) {
7634 /* Base writeback. */
b0109805
PB
7635 if (insn & (1 << 24)) {
7636 tcg_gen_addi_i32(addr, addr, 4);
7637 } else {
7638 tcg_gen_addi_i32(addr, addr, -4);
7639 }
7640 store_reg(s, rn, addr);
7641 } else {
7d1b0095 7642 tcg_temp_free_i32(addr);
9ee6e8bb 7643 }
b0109805 7644 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
7645 } else {
7646 /* srs */
7647 op = (insn & 0x1f);
7d1b0095 7648 addr = tcg_temp_new_i32();
39ea3d4e
PM
7649 tmp = tcg_const_i32(op);
7650 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7651 tcg_temp_free_i32(tmp);
9ee6e8bb 7652 if ((insn & (1 << 24)) == 0) {
b0109805 7653 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 7654 }
b0109805
PB
7655 tmp = load_reg(s, 14);
7656 gen_st32(tmp, addr, 0);
7657 tcg_gen_addi_i32(addr, addr, 4);
7d1b0095 7658 tmp = tcg_temp_new_i32();
b0109805
PB
7659 gen_helper_cpsr_read(tmp);
7660 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
7661 if (insn & (1 << 21)) {
7662 if ((insn & (1 << 24)) == 0) {
b0109805 7663 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 7664 } else {
b0109805 7665 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 7666 }
39ea3d4e
PM
7667 tmp = tcg_const_i32(op);
7668 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7669 tcg_temp_free_i32(tmp);
b0109805 7670 } else {
7d1b0095 7671 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7672 }
7673 }
7674 } else {
7675 int i;
7676 /* Load/store multiple. */
b0109805 7677 addr = load_reg(s, rn);
9ee6e8bb
PB
7678 offset = 0;
7679 for (i = 0; i < 16; i++) {
7680 if (insn & (1 << i))
7681 offset += 4;
7682 }
7683 if (insn & (1 << 24)) {
b0109805 7684 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7685 }
7686
7687 for (i = 0; i < 16; i++) {
7688 if ((insn & (1 << i)) == 0)
7689 continue;
7690 if (insn & (1 << 20)) {
7691 /* Load. */
b0109805 7692 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 7693 if (i == 15) {
b0109805 7694 gen_bx(s, tmp);
9ee6e8bb 7695 } else {
b0109805 7696 store_reg(s, i, tmp);
9ee6e8bb
PB
7697 }
7698 } else {
7699 /* Store. */
b0109805
PB
7700 tmp = load_reg(s, i);
7701 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7702 }
b0109805 7703 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7704 }
7705 if (insn & (1 << 21)) {
7706 /* Base register writeback. */
7707 if (insn & (1 << 24)) {
b0109805 7708 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7709 }
7710 /* Fault if writeback register is in register list. */
7711 if (insn & (1 << rn))
7712 goto illegal_op;
b0109805
PB
7713 store_reg(s, rn, addr);
7714 } else {
7d1b0095 7715 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7716 }
7717 }
7718 }
7719 break;
2af9ab77
JB
7720 case 5:
7721
9ee6e8bb 7722 op = (insn >> 21) & 0xf;
2af9ab77
JB
7723 if (op == 6) {
7724 /* Halfword pack. */
7725 tmp = load_reg(s, rn);
7726 tmp2 = load_reg(s, rm);
7727 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
7728 if (insn & (1 << 5)) {
7729 /* pkhtb */
7730 if (shift == 0)
7731 shift = 31;
7732 tcg_gen_sari_i32(tmp2, tmp2, shift);
7733 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
7734 tcg_gen_ext16u_i32(tmp2, tmp2);
7735 } else {
7736 /* pkhbt */
7737 if (shift)
7738 tcg_gen_shli_i32(tmp2, tmp2, shift);
7739 tcg_gen_ext16u_i32(tmp, tmp);
7740 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7741 }
7742 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 7743 tcg_temp_free_i32(tmp2);
3174f8e9
FN
7744 store_reg(s, rd, tmp);
7745 } else {
2af9ab77
JB
7746 /* Data processing register constant shift. */
7747 if (rn == 15) {
7d1b0095 7748 tmp = tcg_temp_new_i32();
2af9ab77
JB
7749 tcg_gen_movi_i32(tmp, 0);
7750 } else {
7751 tmp = load_reg(s, rn);
7752 }
7753 tmp2 = load_reg(s, rm);
7754
7755 shiftop = (insn >> 4) & 3;
7756 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7757 conds = (insn & (1 << 20)) != 0;
7758 logic_cc = (conds && thumb2_logic_op(op));
7759 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7760 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
7761 goto illegal_op;
7d1b0095 7762 tcg_temp_free_i32(tmp2);
2af9ab77
JB
7763 if (rd != 15) {
7764 store_reg(s, rd, tmp);
7765 } else {
7d1b0095 7766 tcg_temp_free_i32(tmp);
2af9ab77 7767 }
3174f8e9 7768 }
9ee6e8bb
PB
7769 break;
7770 case 13: /* Misc data processing. */
7771 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7772 if (op < 4 && (insn & 0xf000) != 0xf000)
7773 goto illegal_op;
7774 switch (op) {
7775 case 0: /* Register controlled shift. */
8984bd2e
PB
7776 tmp = load_reg(s, rn);
7777 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7778 if ((insn & 0x70) != 0)
7779 goto illegal_op;
7780 op = (insn >> 21) & 3;
8984bd2e
PB
7781 logic_cc = (insn & (1 << 20)) != 0;
7782 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7783 if (logic_cc)
7784 gen_logic_CC(tmp);
21aeb343 7785 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7786 break;
7787 case 1: /* Sign/zero extend. */
5e3f878a 7788 tmp = load_reg(s, rm);
9ee6e8bb
PB
7789 shift = (insn >> 4) & 3;
7790 /* ??? In many cases it's not neccessary to do a
7791 rotate, a shift is sufficient. */
7792 if (shift != 0)
f669df27 7793 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7794 op = (insn >> 20) & 7;
7795 switch (op) {
5e3f878a
PB
7796 case 0: gen_sxth(tmp); break;
7797 case 1: gen_uxth(tmp); break;
7798 case 2: gen_sxtb16(tmp); break;
7799 case 3: gen_uxtb16(tmp); break;
7800 case 4: gen_sxtb(tmp); break;
7801 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
7802 default: goto illegal_op;
7803 }
7804 if (rn != 15) {
5e3f878a 7805 tmp2 = load_reg(s, rn);
9ee6e8bb 7806 if ((op >> 1) == 1) {
5e3f878a 7807 gen_add16(tmp, tmp2);
9ee6e8bb 7808 } else {
5e3f878a 7809 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7810 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7811 }
7812 }
5e3f878a 7813 store_reg(s, rd, tmp);
9ee6e8bb
PB
7814 break;
7815 case 2: /* SIMD add/subtract. */
7816 op = (insn >> 20) & 7;
7817 shift = (insn >> 4) & 7;
7818 if ((op & 3) == 3 || (shift & 3) == 3)
7819 goto illegal_op;
6ddbc6e4
PB
7820 tmp = load_reg(s, rn);
7821 tmp2 = load_reg(s, rm);
7822 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 7823 tcg_temp_free_i32(tmp2);
6ddbc6e4 7824 store_reg(s, rd, tmp);
9ee6e8bb
PB
7825 break;
7826 case 3: /* Other data processing. */
7827 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7828 if (op < 4) {
7829 /* Saturating add/subtract. */
d9ba4830
PB
7830 tmp = load_reg(s, rn);
7831 tmp2 = load_reg(s, rm);
9ee6e8bb 7832 if (op & 1)
4809c612
JB
7833 gen_helper_double_saturate(tmp, tmp);
7834 if (op & 2)
d9ba4830 7835 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 7836 else
d9ba4830 7837 gen_helper_add_saturate(tmp, tmp, tmp2);
7d1b0095 7838 tcg_temp_free_i32(tmp2);
9ee6e8bb 7839 } else {
d9ba4830 7840 tmp = load_reg(s, rn);
9ee6e8bb
PB
7841 switch (op) {
7842 case 0x0a: /* rbit */
d9ba4830 7843 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7844 break;
7845 case 0x08: /* rev */
66896cb8 7846 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
7847 break;
7848 case 0x09: /* rev16 */
d9ba4830 7849 gen_rev16(tmp);
9ee6e8bb
PB
7850 break;
7851 case 0x0b: /* revsh */
d9ba4830 7852 gen_revsh(tmp);
9ee6e8bb
PB
7853 break;
7854 case 0x10: /* sel */
d9ba4830 7855 tmp2 = load_reg(s, rm);
7d1b0095 7856 tmp3 = tcg_temp_new_i32();
6ddbc6e4 7857 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
d9ba4830 7858 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
7859 tcg_temp_free_i32(tmp3);
7860 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7861 break;
7862 case 0x18: /* clz */
d9ba4830 7863 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
7864 break;
7865 default:
7866 goto illegal_op;
7867 }
7868 }
d9ba4830 7869 store_reg(s, rd, tmp);
9ee6e8bb
PB
7870 break;
7871 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7872 op = (insn >> 4) & 0xf;
d9ba4830
PB
7873 tmp = load_reg(s, rn);
7874 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7875 switch ((insn >> 20) & 7) {
7876 case 0: /* 32 x 32 -> 32 */
d9ba4830 7877 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 7878 tcg_temp_free_i32(tmp2);
9ee6e8bb 7879 if (rs != 15) {
d9ba4830 7880 tmp2 = load_reg(s, rs);
9ee6e8bb 7881 if (op)
d9ba4830 7882 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 7883 else
d9ba4830 7884 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7885 tcg_temp_free_i32(tmp2);
9ee6e8bb 7886 }
9ee6e8bb
PB
7887 break;
7888 case 1: /* 16 x 16 -> 32 */
d9ba4830 7889 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 7890 tcg_temp_free_i32(tmp2);
9ee6e8bb 7891 if (rs != 15) {
d9ba4830
PB
7892 tmp2 = load_reg(s, rs);
7893 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 7894 tcg_temp_free_i32(tmp2);
9ee6e8bb 7895 }
9ee6e8bb
PB
7896 break;
7897 case 2: /* Dual multiply add. */
7898 case 4: /* Dual multiply subtract. */
7899 if (op)
d9ba4830
PB
7900 gen_swap_half(tmp2);
7901 gen_smul_dual(tmp, tmp2);
9ee6e8bb 7902 if (insn & (1 << 22)) {
e1d177b9 7903 /* This subtraction cannot overflow. */
d9ba4830 7904 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 7905 } else {
e1d177b9
PM
7906 /* This addition cannot overflow 32 bits;
7907 * however it may overflow considered as a signed
7908 * operation, in which case we must set the Q flag.
7909 */
7910 gen_helper_add_setq(tmp, tmp, tmp2);
9ee6e8bb 7911 }
7d1b0095 7912 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7913 if (rs != 15)
7914 {
d9ba4830
PB
7915 tmp2 = load_reg(s, rs);
7916 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 7917 tcg_temp_free_i32(tmp2);
9ee6e8bb 7918 }
9ee6e8bb
PB
7919 break;
7920 case 3: /* 32 * 16 -> 32msb */
7921 if (op)
d9ba4830 7922 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7923 else
d9ba4830 7924 gen_sxth(tmp2);
a7812ae4
PB
7925 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7926 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 7927 tmp = tcg_temp_new_i32();
a7812ae4 7928 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 7929 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7930 if (rs != 15)
7931 {
d9ba4830
PB
7932 tmp2 = load_reg(s, rs);
7933 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 7934 tcg_temp_free_i32(tmp2);
9ee6e8bb 7935 }
9ee6e8bb 7936 break;
838fa72d
AJ
7937 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
7938 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 7939 if (rs != 15) {
838fa72d
AJ
7940 tmp = load_reg(s, rs);
7941 if (insn & (1 << 20)) {
7942 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 7943 } else {
838fa72d 7944 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 7945 }
2c0262af 7946 }
838fa72d
AJ
7947 if (insn & (1 << 4)) {
7948 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7949 }
7950 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 7951 tmp = tcg_temp_new_i32();
838fa72d
AJ
7952 tcg_gen_trunc_i64_i32(tmp, tmp64);
7953 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7954 break;
7955 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 7956 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 7957 tcg_temp_free_i32(tmp2);
9ee6e8bb 7958 if (rs != 15) {
d9ba4830
PB
7959 tmp2 = load_reg(s, rs);
7960 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7961 tcg_temp_free_i32(tmp2);
5fd46862 7962 }
9ee6e8bb 7963 break;
2c0262af 7964 }
d9ba4830 7965 store_reg(s, rd, tmp);
2c0262af 7966 break;
9ee6e8bb
PB
7967 case 6: case 7: /* 64-bit multiply, Divide. */
7968 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
7969 tmp = load_reg(s, rn);
7970 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7971 if ((op & 0x50) == 0x10) {
7972 /* sdiv, udiv */
7973 if (!arm_feature(env, ARM_FEATURE_DIV))
7974 goto illegal_op;
7975 if (op & 0x20)
5e3f878a 7976 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 7977 else
5e3f878a 7978 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 7979 tcg_temp_free_i32(tmp2);
5e3f878a 7980 store_reg(s, rd, tmp);
9ee6e8bb
PB
7981 } else if ((op & 0xe) == 0xc) {
7982 /* Dual multiply accumulate long. */
7983 if (op & 1)
5e3f878a
PB
7984 gen_swap_half(tmp2);
7985 gen_smul_dual(tmp, tmp2);
9ee6e8bb 7986 if (op & 0x10) {
5e3f878a 7987 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 7988 } else {
5e3f878a 7989 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 7990 }
7d1b0095 7991 tcg_temp_free_i32(tmp2);
a7812ae4
PB
7992 /* BUGFIX */
7993 tmp64 = tcg_temp_new_i64();
7994 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7995 tcg_temp_free_i32(tmp);
a7812ae4
PB
7996 gen_addq(s, tmp64, rs, rd);
7997 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 7998 tcg_temp_free_i64(tmp64);
2c0262af 7999 } else {
9ee6e8bb
PB
8000 if (op & 0x20) {
8001 /* Unsigned 64-bit multiply */
a7812ae4 8002 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 8003 } else {
9ee6e8bb
PB
8004 if (op & 8) {
8005 /* smlalxy */
5e3f878a 8006 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8007 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8008 tmp64 = tcg_temp_new_i64();
8009 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8010 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8011 } else {
8012 /* Signed 64-bit multiply */
a7812ae4 8013 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8014 }
b5ff1b31 8015 }
9ee6e8bb
PB
8016 if (op & 4) {
8017 /* umaal */
a7812ae4
PB
8018 gen_addq_lo(s, tmp64, rs);
8019 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
8020 } else if (op & 0x40) {
8021 /* 64-bit accumulate. */
a7812ae4 8022 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 8023 }
a7812ae4 8024 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8025 tcg_temp_free_i64(tmp64);
5fd46862 8026 }
2c0262af 8027 break;
9ee6e8bb
PB
8028 }
8029 break;
8030 case 6: case 7: case 14: case 15:
8031 /* Coprocessor. */
8032 if (((insn >> 24) & 3) == 3) {
8033 /* Translate into the equivalent ARM encoding. */
f06053e3 8034 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9ee6e8bb
PB
8035 if (disas_neon_data_insn(env, s, insn))
8036 goto illegal_op;
8037 } else {
8038 if (insn & (1 << 28))
8039 goto illegal_op;
8040 if (disas_coproc_insn (env, s, insn))
8041 goto illegal_op;
8042 }
8043 break;
8044 case 8: case 9: case 10: case 11:
8045 if (insn & (1 << 15)) {
8046 /* Branches, misc control. */
8047 if (insn & 0x5000) {
8048 /* Unconditional branch. */
8049 /* signextend(hw1[10:0]) -> offset[:12]. */
8050 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8051 /* hw1[10:0] -> offset[11:1]. */
8052 offset |= (insn & 0x7ff) << 1;
8053 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8054 offset[24:22] already have the same value because of the
8055 sign extension above. */
8056 offset ^= ((~insn) & (1 << 13)) << 10;
8057 offset ^= ((~insn) & (1 << 11)) << 11;
8058
9ee6e8bb
PB
8059 if (insn & (1 << 14)) {
8060 /* Branch and link. */
3174f8e9 8061 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 8062 }
3b46e624 8063
b0109805 8064 offset += s->pc;
9ee6e8bb
PB
8065 if (insn & (1 << 12)) {
8066 /* b/bl */
b0109805 8067 gen_jmp(s, offset);
9ee6e8bb
PB
8068 } else {
8069 /* blx */
b0109805
PB
8070 offset &= ~(uint32_t)2;
8071 gen_bx_im(s, offset);
2c0262af 8072 }
9ee6e8bb
PB
8073 } else if (((insn >> 23) & 7) == 7) {
8074 /* Misc control */
8075 if (insn & (1 << 13))
8076 goto illegal_op;
8077
8078 if (insn & (1 << 26)) {
8079 /* Secure monitor call (v6Z) */
8080 goto illegal_op; /* not implemented. */
2c0262af 8081 } else {
9ee6e8bb
PB
8082 op = (insn >> 20) & 7;
8083 switch (op) {
8084 case 0: /* msr cpsr. */
8085 if (IS_M(env)) {
8984bd2e
PB
8086 tmp = load_reg(s, rn);
8087 addr = tcg_const_i32(insn & 0xff);
8088 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8089 tcg_temp_free_i32(addr);
7d1b0095 8090 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8091 gen_lookup_tb(s);
8092 break;
8093 }
8094 /* fall through */
8095 case 1: /* msr spsr. */
8096 if (IS_M(env))
8097 goto illegal_op;
2fbac54b
FN
8098 tmp = load_reg(s, rn);
8099 if (gen_set_psr(s,
9ee6e8bb 8100 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 8101 op == 1, tmp))
9ee6e8bb
PB
8102 goto illegal_op;
8103 break;
8104 case 2: /* cps, nop-hint. */
8105 if (((insn >> 8) & 7) == 0) {
8106 gen_nop_hint(s, insn & 0xff);
8107 }
8108 /* Implemented as NOP in user mode. */
8109 if (IS_USER(s))
8110 break;
8111 offset = 0;
8112 imm = 0;
8113 if (insn & (1 << 10)) {
8114 if (insn & (1 << 7))
8115 offset |= CPSR_A;
8116 if (insn & (1 << 6))
8117 offset |= CPSR_I;
8118 if (insn & (1 << 5))
8119 offset |= CPSR_F;
8120 if (insn & (1 << 9))
8121 imm = CPSR_A | CPSR_I | CPSR_F;
8122 }
8123 if (insn & (1 << 8)) {
8124 offset |= 0x1f;
8125 imm |= (insn & 0x1f);
8126 }
8127 if (offset) {
2fbac54b 8128 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
8129 }
8130 break;
8131 case 3: /* Special control operations. */
426f5abc 8132 ARCH(7);
9ee6e8bb
PB
8133 op = (insn >> 4) & 0xf;
8134 switch (op) {
8135 case 2: /* clrex */
426f5abc 8136 gen_clrex(s);
9ee6e8bb
PB
8137 break;
8138 case 4: /* dsb */
8139 case 5: /* dmb */
8140 case 6: /* isb */
8141 /* These execute as NOPs. */
9ee6e8bb
PB
8142 break;
8143 default:
8144 goto illegal_op;
8145 }
8146 break;
8147 case 4: /* bxj */
8148 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8149 tmp = load_reg(s, rn);
8150 gen_bx(s, tmp);
9ee6e8bb
PB
8151 break;
8152 case 5: /* Exception return. */
b8b45b68
RV
8153 if (IS_USER(s)) {
8154 goto illegal_op;
8155 }
8156 if (rn != 14 || rd != 15) {
8157 goto illegal_op;
8158 }
8159 tmp = load_reg(s, rn);
8160 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8161 gen_exception_return(s, tmp);
8162 break;
9ee6e8bb 8163 case 6: /* mrs cpsr. */
7d1b0095 8164 tmp = tcg_temp_new_i32();
9ee6e8bb 8165 if (IS_M(env)) {
8984bd2e
PB
8166 addr = tcg_const_i32(insn & 0xff);
8167 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 8168 tcg_temp_free_i32(addr);
9ee6e8bb 8169 } else {
8984bd2e 8170 gen_helper_cpsr_read(tmp);
9ee6e8bb 8171 }
8984bd2e 8172 store_reg(s, rd, tmp);
9ee6e8bb
PB
8173 break;
8174 case 7: /* mrs spsr. */
8175 /* Not accessible in user mode. */
8176 if (IS_USER(s) || IS_M(env))
8177 goto illegal_op;
d9ba4830
PB
8178 tmp = load_cpu_field(spsr);
8179 store_reg(s, rd, tmp);
9ee6e8bb 8180 break;
2c0262af
FB
8181 }
8182 }
9ee6e8bb
PB
8183 } else {
8184 /* Conditional branch. */
8185 op = (insn >> 22) & 0xf;
8186 /* Generate a conditional jump to next instruction. */
8187 s->condlabel = gen_new_label();
d9ba4830 8188 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
8189 s->condjmp = 1;
8190
8191 /* offset[11:1] = insn[10:0] */
8192 offset = (insn & 0x7ff) << 1;
8193 /* offset[17:12] = insn[21:16]. */
8194 offset |= (insn & 0x003f0000) >> 4;
8195 /* offset[31:20] = insn[26]. */
8196 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8197 /* offset[18] = insn[13]. */
8198 offset |= (insn & (1 << 13)) << 5;
8199 /* offset[19] = insn[11]. */
8200 offset |= (insn & (1 << 11)) << 8;
8201
8202 /* jump to the offset */
b0109805 8203 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
8204 }
8205 } else {
8206 /* Data processing immediate. */
8207 if (insn & (1 << 25)) {
8208 if (insn & (1 << 24)) {
8209 if (insn & (1 << 20))
8210 goto illegal_op;
8211 /* Bitfield/Saturate. */
8212 op = (insn >> 21) & 7;
8213 imm = insn & 0x1f;
8214 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 8215 if (rn == 15) {
7d1b0095 8216 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
8217 tcg_gen_movi_i32(tmp, 0);
8218 } else {
8219 tmp = load_reg(s, rn);
8220 }
9ee6e8bb
PB
8221 switch (op) {
8222 case 2: /* Signed bitfield extract. */
8223 imm++;
8224 if (shift + imm > 32)
8225 goto illegal_op;
8226 if (imm < 32)
6ddbc6e4 8227 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
8228 break;
8229 case 6: /* Unsigned bitfield extract. */
8230 imm++;
8231 if (shift + imm > 32)
8232 goto illegal_op;
8233 if (imm < 32)
6ddbc6e4 8234 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
8235 break;
8236 case 3: /* Bitfield insert/clear. */
8237 if (imm < shift)
8238 goto illegal_op;
8239 imm = imm + 1 - shift;
8240 if (imm != 32) {
6ddbc6e4 8241 tmp2 = load_reg(s, rd);
8f8e3aa4 8242 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
7d1b0095 8243 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8244 }
8245 break;
8246 case 7:
8247 goto illegal_op;
8248 default: /* Saturate. */
9ee6e8bb
PB
8249 if (shift) {
8250 if (op & 1)
6ddbc6e4 8251 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8252 else
6ddbc6e4 8253 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 8254 }
6ddbc6e4 8255 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
8256 if (op & 4) {
8257 /* Unsigned. */
9ee6e8bb 8258 if ((op & 1) && shift == 0)
6ddbc6e4 8259 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 8260 else
6ddbc6e4 8261 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 8262 } else {
9ee6e8bb 8263 /* Signed. */
9ee6e8bb 8264 if ((op & 1) && shift == 0)
6ddbc6e4 8265 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 8266 else
6ddbc6e4 8267 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 8268 }
b75263d6 8269 tcg_temp_free_i32(tmp2);
9ee6e8bb 8270 break;
2c0262af 8271 }
6ddbc6e4 8272 store_reg(s, rd, tmp);
9ee6e8bb
PB
8273 } else {
8274 imm = ((insn & 0x04000000) >> 15)
8275 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8276 if (insn & (1 << 22)) {
8277 /* 16-bit immediate. */
8278 imm |= (insn >> 4) & 0xf000;
8279 if (insn & (1 << 23)) {
8280 /* movt */
5e3f878a 8281 tmp = load_reg(s, rd);
86831435 8282 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8283 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 8284 } else {
9ee6e8bb 8285 /* movw */
7d1b0095 8286 tmp = tcg_temp_new_i32();
5e3f878a 8287 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
8288 }
8289 } else {
9ee6e8bb
PB
8290 /* Add/sub 12-bit immediate. */
8291 if (rn == 15) {
b0109805 8292 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 8293 if (insn & (1 << 23))
b0109805 8294 offset -= imm;
9ee6e8bb 8295 else
b0109805 8296 offset += imm;
7d1b0095 8297 tmp = tcg_temp_new_i32();
5e3f878a 8298 tcg_gen_movi_i32(tmp, offset);
2c0262af 8299 } else {
5e3f878a 8300 tmp = load_reg(s, rn);
9ee6e8bb 8301 if (insn & (1 << 23))
5e3f878a 8302 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 8303 else
5e3f878a 8304 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 8305 }
9ee6e8bb 8306 }
5e3f878a 8307 store_reg(s, rd, tmp);
191abaa2 8308 }
9ee6e8bb
PB
8309 } else {
8310 int shifter_out = 0;
8311 /* modified 12-bit immediate. */
8312 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8313 imm = (insn & 0xff);
8314 switch (shift) {
8315 case 0: /* XY */
8316 /* Nothing to do. */
8317 break;
8318 case 1: /* 00XY00XY */
8319 imm |= imm << 16;
8320 break;
8321 case 2: /* XY00XY00 */
8322 imm |= imm << 16;
8323 imm <<= 8;
8324 break;
8325 case 3: /* XYXYXYXY */
8326 imm |= imm << 16;
8327 imm |= imm << 8;
8328 break;
8329 default: /* Rotated constant. */
8330 shift = (shift << 1) | (imm >> 7);
8331 imm |= 0x80;
8332 imm = imm << (32 - shift);
8333 shifter_out = 1;
8334 break;
b5ff1b31 8335 }
7d1b0095 8336 tmp2 = tcg_temp_new_i32();
3174f8e9 8337 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 8338 rn = (insn >> 16) & 0xf;
3174f8e9 8339 if (rn == 15) {
7d1b0095 8340 tmp = tcg_temp_new_i32();
3174f8e9
FN
8341 tcg_gen_movi_i32(tmp, 0);
8342 } else {
8343 tmp = load_reg(s, rn);
8344 }
9ee6e8bb
PB
8345 op = (insn >> 21) & 0xf;
8346 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 8347 shifter_out, tmp, tmp2))
9ee6e8bb 8348 goto illegal_op;
7d1b0095 8349 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8350 rd = (insn >> 8) & 0xf;
8351 if (rd != 15) {
3174f8e9
FN
8352 store_reg(s, rd, tmp);
8353 } else {
7d1b0095 8354 tcg_temp_free_i32(tmp);
2c0262af 8355 }
2c0262af 8356 }
9ee6e8bb
PB
8357 }
8358 break;
8359 case 12: /* Load/store single data item. */
8360 {
8361 int postinc = 0;
8362 int writeback = 0;
b0109805 8363 int user;
9ee6e8bb
PB
8364 if ((insn & 0x01100000) == 0x01000000) {
8365 if (disas_neon_ls_insn(env, s, insn))
c1713132 8366 goto illegal_op;
9ee6e8bb
PB
8367 break;
8368 }
a2fdc890
PM
8369 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8370 if (rs == 15) {
8371 if (!(insn & (1 << 20))) {
8372 goto illegal_op;
8373 }
8374 if (op != 2) {
8375 /* Byte or halfword load space with dest == r15 : memory hints.
8376 * Catch them early so we don't emit pointless addressing code.
8377 * This space is a mix of:
8378 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8379 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8380 * cores)
8381 * unallocated hints, which must be treated as NOPs
8382 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8383 * which is easiest for the decoding logic
8384 * Some space which must UNDEF
8385 */
8386 int op1 = (insn >> 23) & 3;
8387 int op2 = (insn >> 6) & 0x3f;
8388 if (op & 2) {
8389 goto illegal_op;
8390 }
8391 if (rn == 15) {
8392 /* UNPREDICTABLE or unallocated hint */
8393 return 0;
8394 }
8395 if (op1 & 1) {
8396 return 0; /* PLD* or unallocated hint */
8397 }
8398 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
8399 return 0; /* PLD* or unallocated hint */
8400 }
8401 /* UNDEF space, or an UNPREDICTABLE */
8402 return 1;
8403 }
8404 }
b0109805 8405 user = IS_USER(s);
9ee6e8bb 8406 if (rn == 15) {
7d1b0095 8407 addr = tcg_temp_new_i32();
9ee6e8bb
PB
8408 /* PC relative. */
8409 /* s->pc has already been incremented by 4. */
8410 imm = s->pc & 0xfffffffc;
8411 if (insn & (1 << 23))
8412 imm += insn & 0xfff;
8413 else
8414 imm -= insn & 0xfff;
b0109805 8415 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 8416 } else {
b0109805 8417 addr = load_reg(s, rn);
9ee6e8bb
PB
8418 if (insn & (1 << 23)) {
8419 /* Positive offset. */
8420 imm = insn & 0xfff;
b0109805 8421 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 8422 } else {
9ee6e8bb 8423 imm = insn & 0xff;
2a0308c5
PM
8424 switch ((insn >> 8) & 0xf) {
8425 case 0x0: /* Shifted Register. */
9ee6e8bb 8426 shift = (insn >> 4) & 0xf;
2a0308c5
PM
8427 if (shift > 3) {
8428 tcg_temp_free_i32(addr);
18c9b560 8429 goto illegal_op;
2a0308c5 8430 }
b26eefb6 8431 tmp = load_reg(s, rm);
9ee6e8bb 8432 if (shift)
b26eefb6 8433 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 8434 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8435 tcg_temp_free_i32(tmp);
9ee6e8bb 8436 break;
2a0308c5 8437 case 0xc: /* Negative offset. */
b0109805 8438 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 8439 break;
2a0308c5 8440 case 0xe: /* User privilege. */
b0109805
PB
8441 tcg_gen_addi_i32(addr, addr, imm);
8442 user = 1;
9ee6e8bb 8443 break;
2a0308c5 8444 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
8445 imm = -imm;
8446 /* Fall through. */
2a0308c5 8447 case 0xb: /* Post-increment. */
9ee6e8bb
PB
8448 postinc = 1;
8449 writeback = 1;
8450 break;
2a0308c5 8451 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
8452 imm = -imm;
8453 /* Fall through. */
2a0308c5 8454 case 0xf: /* Pre-increment. */
b0109805 8455 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8456 writeback = 1;
8457 break;
8458 default:
2a0308c5 8459 tcg_temp_free_i32(addr);
b7bcbe95 8460 goto illegal_op;
9ee6e8bb
PB
8461 }
8462 }
8463 }
9ee6e8bb
PB
8464 if (insn & (1 << 20)) {
8465 /* Load. */
a2fdc890
PM
8466 switch (op) {
8467 case 0: tmp = gen_ld8u(addr, user); break;
8468 case 4: tmp = gen_ld8s(addr, user); break;
8469 case 1: tmp = gen_ld16u(addr, user); break;
8470 case 5: tmp = gen_ld16s(addr, user); break;
8471 case 2: tmp = gen_ld32(addr, user); break;
2a0308c5
PM
8472 default:
8473 tcg_temp_free_i32(addr);
8474 goto illegal_op;
a2fdc890
PM
8475 }
8476 if (rs == 15) {
8477 gen_bx(s, tmp);
9ee6e8bb 8478 } else {
a2fdc890 8479 store_reg(s, rs, tmp);
9ee6e8bb
PB
8480 }
8481 } else {
8482 /* Store. */
b0109805 8483 tmp = load_reg(s, rs);
9ee6e8bb 8484 switch (op) {
b0109805
PB
8485 case 0: gen_st8(tmp, addr, user); break;
8486 case 1: gen_st16(tmp, addr, user); break;
8487 case 2: gen_st32(tmp, addr, user); break;
2a0308c5
PM
8488 default:
8489 tcg_temp_free_i32(addr);
8490 goto illegal_op;
b7bcbe95 8491 }
2c0262af 8492 }
9ee6e8bb 8493 if (postinc)
b0109805
PB
8494 tcg_gen_addi_i32(addr, addr, imm);
8495 if (writeback) {
8496 store_reg(s, rn, addr);
8497 } else {
7d1b0095 8498 tcg_temp_free_i32(addr);
b0109805 8499 }
9ee6e8bb
PB
8500 }
8501 break;
8502 default:
8503 goto illegal_op;
2c0262af 8504 }
9ee6e8bb
PB
8505 return 0;
8506illegal_op:
8507 return 1;
2c0262af
FB
8508}
8509
9ee6e8bb 8510static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
8511{
8512 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8513 int32_t offset;
8514 int i;
b26eefb6 8515 TCGv tmp;
d9ba4830 8516 TCGv tmp2;
b0109805 8517 TCGv addr;
99c475ab 8518
9ee6e8bb
PB
8519 if (s->condexec_mask) {
8520 cond = s->condexec_cond;
bedd2912
JB
8521 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
8522 s->condlabel = gen_new_label();
8523 gen_test_cc(cond ^ 1, s->condlabel);
8524 s->condjmp = 1;
8525 }
9ee6e8bb
PB
8526 }
8527
b5ff1b31 8528 insn = lduw_code(s->pc);
99c475ab 8529 s->pc += 2;
b5ff1b31 8530
99c475ab
FB
8531 switch (insn >> 12) {
8532 case 0: case 1:
396e467c 8533
99c475ab
FB
8534 rd = insn & 7;
8535 op = (insn >> 11) & 3;
8536 if (op == 3) {
8537 /* add/subtract */
8538 rn = (insn >> 3) & 7;
396e467c 8539 tmp = load_reg(s, rn);
99c475ab
FB
8540 if (insn & (1 << 10)) {
8541 /* immediate */
7d1b0095 8542 tmp2 = tcg_temp_new_i32();
396e467c 8543 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
8544 } else {
8545 /* reg */
8546 rm = (insn >> 6) & 7;
396e467c 8547 tmp2 = load_reg(s, rm);
99c475ab 8548 }
9ee6e8bb
PB
8549 if (insn & (1 << 9)) {
8550 if (s->condexec_mask)
396e467c 8551 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8552 else
396e467c 8553 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb
PB
8554 } else {
8555 if (s->condexec_mask)
396e467c 8556 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 8557 else
396e467c 8558 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 8559 }
7d1b0095 8560 tcg_temp_free_i32(tmp2);
396e467c 8561 store_reg(s, rd, tmp);
99c475ab
FB
8562 } else {
8563 /* shift immediate */
8564 rm = (insn >> 3) & 7;
8565 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
8566 tmp = load_reg(s, rm);
8567 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8568 if (!s->condexec_mask)
8569 gen_logic_CC(tmp);
8570 store_reg(s, rd, tmp);
99c475ab
FB
8571 }
8572 break;
8573 case 2: case 3:
8574 /* arithmetic large immediate */
8575 op = (insn >> 11) & 3;
8576 rd = (insn >> 8) & 0x7;
396e467c 8577 if (op == 0) { /* mov */
7d1b0095 8578 tmp = tcg_temp_new_i32();
396e467c 8579 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 8580 if (!s->condexec_mask)
396e467c
FN
8581 gen_logic_CC(tmp);
8582 store_reg(s, rd, tmp);
8583 } else {
8584 tmp = load_reg(s, rd);
7d1b0095 8585 tmp2 = tcg_temp_new_i32();
396e467c
FN
8586 tcg_gen_movi_i32(tmp2, insn & 0xff);
8587 switch (op) {
8588 case 1: /* cmp */
8589 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095
PM
8590 tcg_temp_free_i32(tmp);
8591 tcg_temp_free_i32(tmp2);
396e467c
FN
8592 break;
8593 case 2: /* add */
8594 if (s->condexec_mask)
8595 tcg_gen_add_i32(tmp, tmp, tmp2);
8596 else
8597 gen_helper_add_cc(tmp, tmp, tmp2);
7d1b0095 8598 tcg_temp_free_i32(tmp2);
396e467c
FN
8599 store_reg(s, rd, tmp);
8600 break;
8601 case 3: /* sub */
8602 if (s->condexec_mask)
8603 tcg_gen_sub_i32(tmp, tmp, tmp2);
8604 else
8605 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095 8606 tcg_temp_free_i32(tmp2);
396e467c
FN
8607 store_reg(s, rd, tmp);
8608 break;
8609 }
99c475ab 8610 }
99c475ab
FB
8611 break;
8612 case 4:
8613 if (insn & (1 << 11)) {
8614 rd = (insn >> 8) & 7;
5899f386
FB
8615 /* load pc-relative. Bit 1 of PC is ignored. */
8616 val = s->pc + 2 + ((insn & 0xff) * 4);
8617 val &= ~(uint32_t)2;
7d1b0095 8618 addr = tcg_temp_new_i32();
b0109805
PB
8619 tcg_gen_movi_i32(addr, val);
8620 tmp = gen_ld32(addr, IS_USER(s));
7d1b0095 8621 tcg_temp_free_i32(addr);
b0109805 8622 store_reg(s, rd, tmp);
99c475ab
FB
8623 break;
8624 }
8625 if (insn & (1 << 10)) {
8626 /* data processing extended or blx */
8627 rd = (insn & 7) | ((insn >> 4) & 8);
8628 rm = (insn >> 3) & 0xf;
8629 op = (insn >> 8) & 3;
8630 switch (op) {
8631 case 0: /* add */
396e467c
FN
8632 tmp = load_reg(s, rd);
8633 tmp2 = load_reg(s, rm);
8634 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8635 tcg_temp_free_i32(tmp2);
396e467c 8636 store_reg(s, rd, tmp);
99c475ab
FB
8637 break;
8638 case 1: /* cmp */
396e467c
FN
8639 tmp = load_reg(s, rd);
8640 tmp2 = load_reg(s, rm);
8641 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095
PM
8642 tcg_temp_free_i32(tmp2);
8643 tcg_temp_free_i32(tmp);
99c475ab
FB
8644 break;
8645 case 2: /* mov/cpy */
396e467c
FN
8646 tmp = load_reg(s, rm);
8647 store_reg(s, rd, tmp);
99c475ab
FB
8648 break;
8649 case 3:/* branch [and link] exchange thumb register */
b0109805 8650 tmp = load_reg(s, rm);
99c475ab
FB
8651 if (insn & (1 << 7)) {
8652 val = (uint32_t)s->pc | 1;
7d1b0095 8653 tmp2 = tcg_temp_new_i32();
b0109805
PB
8654 tcg_gen_movi_i32(tmp2, val);
8655 store_reg(s, 14, tmp2);
99c475ab 8656 }
d9ba4830 8657 gen_bx(s, tmp);
99c475ab
FB
8658 break;
8659 }
8660 break;
8661 }
8662
8663 /* data processing register */
8664 rd = insn & 7;
8665 rm = (insn >> 3) & 7;
8666 op = (insn >> 6) & 0xf;
8667 if (op == 2 || op == 3 || op == 4 || op == 7) {
8668 /* the shift/rotate ops want the operands backwards */
8669 val = rm;
8670 rm = rd;
8671 rd = val;
8672 val = 1;
8673 } else {
8674 val = 0;
8675 }
8676
396e467c 8677 if (op == 9) { /* neg */
7d1b0095 8678 tmp = tcg_temp_new_i32();
396e467c
FN
8679 tcg_gen_movi_i32(tmp, 0);
8680 } else if (op != 0xf) { /* mvn doesn't read its first operand */
8681 tmp = load_reg(s, rd);
8682 } else {
8683 TCGV_UNUSED(tmp);
8684 }
99c475ab 8685
396e467c 8686 tmp2 = load_reg(s, rm);
5899f386 8687 switch (op) {
99c475ab 8688 case 0x0: /* and */
396e467c 8689 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 8690 if (!s->condexec_mask)
396e467c 8691 gen_logic_CC(tmp);
99c475ab
FB
8692 break;
8693 case 0x1: /* eor */
396e467c 8694 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 8695 if (!s->condexec_mask)
396e467c 8696 gen_logic_CC(tmp);
99c475ab
FB
8697 break;
8698 case 0x2: /* lsl */
9ee6e8bb 8699 if (s->condexec_mask) {
396e467c 8700 gen_helper_shl(tmp2, tmp2, tmp);
9ee6e8bb 8701 } else {
396e467c
FN
8702 gen_helper_shl_cc(tmp2, tmp2, tmp);
8703 gen_logic_CC(tmp2);
9ee6e8bb 8704 }
99c475ab
FB
8705 break;
8706 case 0x3: /* lsr */
9ee6e8bb 8707 if (s->condexec_mask) {
396e467c 8708 gen_helper_shr(tmp2, tmp2, tmp);
9ee6e8bb 8709 } else {
396e467c
FN
8710 gen_helper_shr_cc(tmp2, tmp2, tmp);
8711 gen_logic_CC(tmp2);
9ee6e8bb 8712 }
99c475ab
FB
8713 break;
8714 case 0x4: /* asr */
9ee6e8bb 8715 if (s->condexec_mask) {
396e467c 8716 gen_helper_sar(tmp2, tmp2, tmp);
9ee6e8bb 8717 } else {
396e467c
FN
8718 gen_helper_sar_cc(tmp2, tmp2, tmp);
8719 gen_logic_CC(tmp2);
9ee6e8bb 8720 }
99c475ab
FB
8721 break;
8722 case 0x5: /* adc */
9ee6e8bb 8723 if (s->condexec_mask)
396e467c 8724 gen_adc(tmp, tmp2);
9ee6e8bb 8725 else
396e467c 8726 gen_helper_adc_cc(tmp, tmp, tmp2);
99c475ab
FB
8727 break;
8728 case 0x6: /* sbc */
9ee6e8bb 8729 if (s->condexec_mask)
396e467c 8730 gen_sub_carry(tmp, tmp, tmp2);
9ee6e8bb 8731 else
396e467c 8732 gen_helper_sbc_cc(tmp, tmp, tmp2);
99c475ab
FB
8733 break;
8734 case 0x7: /* ror */
9ee6e8bb 8735 if (s->condexec_mask) {
f669df27
AJ
8736 tcg_gen_andi_i32(tmp, tmp, 0x1f);
8737 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 8738 } else {
396e467c
FN
8739 gen_helper_ror_cc(tmp2, tmp2, tmp);
8740 gen_logic_CC(tmp2);
9ee6e8bb 8741 }
99c475ab
FB
8742 break;
8743 case 0x8: /* tst */
396e467c
FN
8744 tcg_gen_and_i32(tmp, tmp, tmp2);
8745 gen_logic_CC(tmp);
99c475ab 8746 rd = 16;
5899f386 8747 break;
99c475ab 8748 case 0x9: /* neg */
9ee6e8bb 8749 if (s->condexec_mask)
396e467c 8750 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 8751 else
396e467c 8752 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
8753 break;
8754 case 0xa: /* cmp */
396e467c 8755 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
8756 rd = 16;
8757 break;
8758 case 0xb: /* cmn */
396e467c 8759 gen_helper_add_cc(tmp, tmp, tmp2);
99c475ab
FB
8760 rd = 16;
8761 break;
8762 case 0xc: /* orr */
396e467c 8763 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 8764 if (!s->condexec_mask)
396e467c 8765 gen_logic_CC(tmp);
99c475ab
FB
8766 break;
8767 case 0xd: /* mul */
7b2919a0 8768 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 8769 if (!s->condexec_mask)
396e467c 8770 gen_logic_CC(tmp);
99c475ab
FB
8771 break;
8772 case 0xe: /* bic */
f669df27 8773 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 8774 if (!s->condexec_mask)
396e467c 8775 gen_logic_CC(tmp);
99c475ab
FB
8776 break;
8777 case 0xf: /* mvn */
396e467c 8778 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 8779 if (!s->condexec_mask)
396e467c 8780 gen_logic_CC(tmp2);
99c475ab 8781 val = 1;
5899f386 8782 rm = rd;
99c475ab
FB
8783 break;
8784 }
8785 if (rd != 16) {
396e467c
FN
8786 if (val) {
8787 store_reg(s, rm, tmp2);
8788 if (op != 0xf)
7d1b0095 8789 tcg_temp_free_i32(tmp);
396e467c
FN
8790 } else {
8791 store_reg(s, rd, tmp);
7d1b0095 8792 tcg_temp_free_i32(tmp2);
396e467c
FN
8793 }
8794 } else {
7d1b0095
PM
8795 tcg_temp_free_i32(tmp);
8796 tcg_temp_free_i32(tmp2);
99c475ab
FB
8797 }
8798 break;
8799
8800 case 5:
8801 /* load/store register offset. */
8802 rd = insn & 7;
8803 rn = (insn >> 3) & 7;
8804 rm = (insn >> 6) & 7;
8805 op = (insn >> 9) & 7;
b0109805 8806 addr = load_reg(s, rn);
b26eefb6 8807 tmp = load_reg(s, rm);
b0109805 8808 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8809 tcg_temp_free_i32(tmp);
99c475ab
FB
8810
8811 if (op < 3) /* store */
b0109805 8812 tmp = load_reg(s, rd);
99c475ab
FB
8813
8814 switch (op) {
8815 case 0: /* str */
b0109805 8816 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
8817 break;
8818 case 1: /* strh */
b0109805 8819 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
8820 break;
8821 case 2: /* strb */
b0109805 8822 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
8823 break;
8824 case 3: /* ldrsb */
b0109805 8825 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
8826 break;
8827 case 4: /* ldr */
b0109805 8828 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8829 break;
8830 case 5: /* ldrh */
b0109805 8831 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
8832 break;
8833 case 6: /* ldrb */
b0109805 8834 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
8835 break;
8836 case 7: /* ldrsh */
b0109805 8837 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
8838 break;
8839 }
8840 if (op >= 3) /* load */
b0109805 8841 store_reg(s, rd, tmp);
7d1b0095 8842 tcg_temp_free_i32(addr);
99c475ab
FB
8843 break;
8844
8845 case 6:
8846 /* load/store word immediate offset */
8847 rd = insn & 7;
8848 rn = (insn >> 3) & 7;
b0109805 8849 addr = load_reg(s, rn);
99c475ab 8850 val = (insn >> 4) & 0x7c;
b0109805 8851 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8852
8853 if (insn & (1 << 11)) {
8854 /* load */
b0109805
PB
8855 tmp = gen_ld32(addr, IS_USER(s));
8856 store_reg(s, rd, tmp);
99c475ab
FB
8857 } else {
8858 /* store */
b0109805
PB
8859 tmp = load_reg(s, rd);
8860 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8861 }
7d1b0095 8862 tcg_temp_free_i32(addr);
99c475ab
FB
8863 break;
8864
8865 case 7:
8866 /* load/store byte immediate offset */
8867 rd = insn & 7;
8868 rn = (insn >> 3) & 7;
b0109805 8869 addr = load_reg(s, rn);
99c475ab 8870 val = (insn >> 6) & 0x1f;
b0109805 8871 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8872
8873 if (insn & (1 << 11)) {
8874 /* load */
b0109805
PB
8875 tmp = gen_ld8u(addr, IS_USER(s));
8876 store_reg(s, rd, tmp);
99c475ab
FB
8877 } else {
8878 /* store */
b0109805
PB
8879 tmp = load_reg(s, rd);
8880 gen_st8(tmp, addr, IS_USER(s));
99c475ab 8881 }
7d1b0095 8882 tcg_temp_free_i32(addr);
99c475ab
FB
8883 break;
8884
8885 case 8:
8886 /* load/store halfword immediate offset */
8887 rd = insn & 7;
8888 rn = (insn >> 3) & 7;
b0109805 8889 addr = load_reg(s, rn);
99c475ab 8890 val = (insn >> 5) & 0x3e;
b0109805 8891 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8892
8893 if (insn & (1 << 11)) {
8894 /* load */
b0109805
PB
8895 tmp = gen_ld16u(addr, IS_USER(s));
8896 store_reg(s, rd, tmp);
99c475ab
FB
8897 } else {
8898 /* store */
b0109805
PB
8899 tmp = load_reg(s, rd);
8900 gen_st16(tmp, addr, IS_USER(s));
99c475ab 8901 }
7d1b0095 8902 tcg_temp_free_i32(addr);
99c475ab
FB
8903 break;
8904
8905 case 9:
8906 /* load/store from stack */
8907 rd = (insn >> 8) & 7;
b0109805 8908 addr = load_reg(s, 13);
99c475ab 8909 val = (insn & 0xff) * 4;
b0109805 8910 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8911
8912 if (insn & (1 << 11)) {
8913 /* load */
b0109805
PB
8914 tmp = gen_ld32(addr, IS_USER(s));
8915 store_reg(s, rd, tmp);
99c475ab
FB
8916 } else {
8917 /* store */
b0109805
PB
8918 tmp = load_reg(s, rd);
8919 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8920 }
7d1b0095 8921 tcg_temp_free_i32(addr);
99c475ab
FB
8922 break;
8923
8924 case 10:
8925 /* add to high reg */
8926 rd = (insn >> 8) & 7;
5899f386
FB
8927 if (insn & (1 << 11)) {
8928 /* SP */
5e3f878a 8929 tmp = load_reg(s, 13);
5899f386
FB
8930 } else {
8931 /* PC. bit 1 is ignored. */
7d1b0095 8932 tmp = tcg_temp_new_i32();
5e3f878a 8933 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 8934 }
99c475ab 8935 val = (insn & 0xff) * 4;
5e3f878a
PB
8936 tcg_gen_addi_i32(tmp, tmp, val);
8937 store_reg(s, rd, tmp);
99c475ab
FB
8938 break;
8939
8940 case 11:
8941 /* misc */
8942 op = (insn >> 8) & 0xf;
8943 switch (op) {
8944 case 0:
8945 /* adjust stack pointer */
b26eefb6 8946 tmp = load_reg(s, 13);
99c475ab
FB
8947 val = (insn & 0x7f) * 4;
8948 if (insn & (1 << 7))
6a0d8a1d 8949 val = -(int32_t)val;
b26eefb6
PB
8950 tcg_gen_addi_i32(tmp, tmp, val);
8951 store_reg(s, 13, tmp);
99c475ab
FB
8952 break;
8953
9ee6e8bb
PB
8954 case 2: /* sign/zero extend. */
8955 ARCH(6);
8956 rd = insn & 7;
8957 rm = (insn >> 3) & 7;
b0109805 8958 tmp = load_reg(s, rm);
9ee6e8bb 8959 switch ((insn >> 6) & 3) {
b0109805
PB
8960 case 0: gen_sxth(tmp); break;
8961 case 1: gen_sxtb(tmp); break;
8962 case 2: gen_uxth(tmp); break;
8963 case 3: gen_uxtb(tmp); break;
9ee6e8bb 8964 }
b0109805 8965 store_reg(s, rd, tmp);
9ee6e8bb 8966 break;
99c475ab
FB
8967 case 4: case 5: case 0xc: case 0xd:
8968 /* push/pop */
b0109805 8969 addr = load_reg(s, 13);
5899f386
FB
8970 if (insn & (1 << 8))
8971 offset = 4;
99c475ab 8972 else
5899f386
FB
8973 offset = 0;
8974 for (i = 0; i < 8; i++) {
8975 if (insn & (1 << i))
8976 offset += 4;
8977 }
8978 if ((insn & (1 << 11)) == 0) {
b0109805 8979 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8980 }
99c475ab
FB
8981 for (i = 0; i < 8; i++) {
8982 if (insn & (1 << i)) {
8983 if (insn & (1 << 11)) {
8984 /* pop */
b0109805
PB
8985 tmp = gen_ld32(addr, IS_USER(s));
8986 store_reg(s, i, tmp);
99c475ab
FB
8987 } else {
8988 /* push */
b0109805
PB
8989 tmp = load_reg(s, i);
8990 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8991 }
5899f386 8992 /* advance to the next address. */
b0109805 8993 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8994 }
8995 }
a50f5b91 8996 TCGV_UNUSED(tmp);
99c475ab
FB
8997 if (insn & (1 << 8)) {
8998 if (insn & (1 << 11)) {
8999 /* pop pc */
b0109805 9000 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
9001 /* don't set the pc until the rest of the instruction
9002 has completed */
9003 } else {
9004 /* push lr */
b0109805
PB
9005 tmp = load_reg(s, 14);
9006 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9007 }
b0109805 9008 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 9009 }
5899f386 9010 if ((insn & (1 << 11)) == 0) {
b0109805 9011 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9012 }
99c475ab 9013 /* write back the new stack pointer */
b0109805 9014 store_reg(s, 13, addr);
99c475ab
FB
9015 /* set the new PC value */
9016 if ((insn & 0x0900) == 0x0900)
b0109805 9017 gen_bx(s, tmp);
99c475ab
FB
9018 break;
9019
9ee6e8bb
PB
9020 case 1: case 3: case 9: case 11: /* czb */
9021 rm = insn & 7;
d9ba4830 9022 tmp = load_reg(s, rm);
9ee6e8bb
PB
9023 s->condlabel = gen_new_label();
9024 s->condjmp = 1;
9025 if (insn & (1 << 11))
cb63669a 9026 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 9027 else
cb63669a 9028 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 9029 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9030 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9031 val = (uint32_t)s->pc + 2;
9032 val += offset;
9033 gen_jmp(s, val);
9034 break;
9035
9036 case 15: /* IT, nop-hint. */
9037 if ((insn & 0xf) == 0) {
9038 gen_nop_hint(s, (insn >> 4) & 0xf);
9039 break;
9040 }
9041 /* If Then. */
9042 s->condexec_cond = (insn >> 4) & 0xe;
9043 s->condexec_mask = insn & 0x1f;
9044 /* No actual code generated for this insn, just setup state. */
9045 break;
9046
06c949e6 9047 case 0xe: /* bkpt */
bc4a0de0 9048 gen_exception_insn(s, 2, EXCP_BKPT);
06c949e6
PB
9049 break;
9050
9ee6e8bb
PB
9051 case 0xa: /* rev */
9052 ARCH(6);
9053 rn = (insn >> 3) & 0x7;
9054 rd = insn & 0x7;
b0109805 9055 tmp = load_reg(s, rn);
9ee6e8bb 9056 switch ((insn >> 6) & 3) {
66896cb8 9057 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
9058 case 1: gen_rev16(tmp); break;
9059 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
9060 default: goto illegal_op;
9061 }
b0109805 9062 store_reg(s, rd, tmp);
9ee6e8bb
PB
9063 break;
9064
9065 case 6: /* cps */
9066 ARCH(6);
9067 if (IS_USER(s))
9068 break;
9069 if (IS_M(env)) {
8984bd2e 9070 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9ee6e8bb 9071 /* PRIMASK */
8984bd2e
PB
9072 if (insn & 1) {
9073 addr = tcg_const_i32(16);
9074 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 9075 tcg_temp_free_i32(addr);
8984bd2e 9076 }
9ee6e8bb 9077 /* FAULTMASK */
8984bd2e
PB
9078 if (insn & 2) {
9079 addr = tcg_const_i32(17);
9080 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 9081 tcg_temp_free_i32(addr);
8984bd2e 9082 }
b75263d6 9083 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9084 gen_lookup_tb(s);
9085 } else {
9086 if (insn & (1 << 4))
9087 shift = CPSR_A | CPSR_I | CPSR_F;
9088 else
9089 shift = 0;
fa26df03 9090 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9ee6e8bb
PB
9091 }
9092 break;
9093
99c475ab
FB
9094 default:
9095 goto undef;
9096 }
9097 break;
9098
9099 case 12:
9100 /* load/store multiple */
9101 rn = (insn >> 8) & 0x7;
b0109805 9102 addr = load_reg(s, rn);
99c475ab
FB
9103 for (i = 0; i < 8; i++) {
9104 if (insn & (1 << i)) {
99c475ab
FB
9105 if (insn & (1 << 11)) {
9106 /* load */
b0109805
PB
9107 tmp = gen_ld32(addr, IS_USER(s));
9108 store_reg(s, i, tmp);
99c475ab
FB
9109 } else {
9110 /* store */
b0109805
PB
9111 tmp = load_reg(s, i);
9112 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9113 }
5899f386 9114 /* advance to the next address */
b0109805 9115 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9116 }
9117 }
5899f386 9118 /* Base register writeback. */
b0109805
PB
9119 if ((insn & (1 << rn)) == 0) {
9120 store_reg(s, rn, addr);
9121 } else {
7d1b0095 9122 tcg_temp_free_i32(addr);
b0109805 9123 }
99c475ab
FB
9124 break;
9125
9126 case 13:
9127 /* conditional branch or swi */
9128 cond = (insn >> 8) & 0xf;
9129 if (cond == 0xe)
9130 goto undef;
9131
9132 if (cond == 0xf) {
9133 /* swi */
422ebf69 9134 gen_set_pc_im(s->pc);
9ee6e8bb 9135 s->is_jmp = DISAS_SWI;
99c475ab
FB
9136 break;
9137 }
9138 /* generate a conditional jump to next instruction */
e50e6a20 9139 s->condlabel = gen_new_label();
d9ba4830 9140 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 9141 s->condjmp = 1;
99c475ab
FB
9142
9143 /* jump to the offset */
5899f386 9144 val = (uint32_t)s->pc + 2;
99c475ab 9145 offset = ((int32_t)insn << 24) >> 24;
5899f386 9146 val += offset << 1;
8aaca4c0 9147 gen_jmp(s, val);
99c475ab
FB
9148 break;
9149
9150 case 14:
358bf29e 9151 if (insn & (1 << 11)) {
9ee6e8bb
PB
9152 if (disas_thumb2_insn(env, s, insn))
9153 goto undef32;
358bf29e
PB
9154 break;
9155 }
9ee6e8bb 9156 /* unconditional branch */
99c475ab
FB
9157 val = (uint32_t)s->pc;
9158 offset = ((int32_t)insn << 21) >> 21;
9159 val += (offset << 1) + 2;
8aaca4c0 9160 gen_jmp(s, val);
99c475ab
FB
9161 break;
9162
9163 case 15:
9ee6e8bb 9164 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 9165 goto undef32;
9ee6e8bb 9166 break;
99c475ab
FB
9167 }
9168 return;
9ee6e8bb 9169undef32:
bc4a0de0 9170 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
9171 return;
9172illegal_op:
99c475ab 9173undef:
bc4a0de0 9174 gen_exception_insn(s, 2, EXCP_UDEF);
99c475ab
FB
9175}
9176
2c0262af
FB
9177/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9178 basic block 'tb'. If search_pc is TRUE, also generate PC
9179 information for each intermediate instruction. */
2cfc5f17
TS
9180static inline void gen_intermediate_code_internal(CPUState *env,
9181 TranslationBlock *tb,
9182 int search_pc)
2c0262af
FB
9183{
9184 DisasContext dc1, *dc = &dc1;
a1d1bb31 9185 CPUBreakpoint *bp;
2c0262af
FB
9186 uint16_t *gen_opc_end;
9187 int j, lj;
0fa85d43 9188 target_ulong pc_start;
b5ff1b31 9189 uint32_t next_page_start;
2e70f6ef
PB
9190 int num_insns;
9191 int max_insns;
3b46e624 9192
2c0262af 9193 /* generate intermediate code */
0fa85d43 9194 pc_start = tb->pc;
3b46e624 9195
2c0262af
FB
9196 dc->tb = tb;
9197
2c0262af 9198 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
9199
9200 dc->is_jmp = DISAS_NEXT;
9201 dc->pc = pc_start;
8aaca4c0 9202 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 9203 dc->condjmp = 0;
7204ab88 9204 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
98eac7ca
PM
9205 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9206 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
b5ff1b31 9207#if !defined(CONFIG_USER_ONLY)
61f74d6a 9208 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
b5ff1b31 9209#endif
5df8bac1 9210 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
69d1fc22
PM
9211 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9212 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
a7812ae4
PB
9213 cpu_F0s = tcg_temp_new_i32();
9214 cpu_F1s = tcg_temp_new_i32();
9215 cpu_F0d = tcg_temp_new_i64();
9216 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
9217 cpu_V0 = cpu_F0d;
9218 cpu_V1 = cpu_F1d;
e677137d 9219 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 9220 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 9221 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 9222 lj = -1;
2e70f6ef
PB
9223 num_insns = 0;
9224 max_insns = tb->cflags & CF_COUNT_MASK;
9225 if (max_insns == 0)
9226 max_insns = CF_COUNT_MASK;
9227
9228 gen_icount_start();
e12ce78d 9229
3849902c
PM
9230 tcg_clear_temp_count();
9231
e12ce78d
PM
9232 /* A note on handling of the condexec (IT) bits:
9233 *
9234 * We want to avoid the overhead of having to write the updated condexec
9235 * bits back to the CPUState for every instruction in an IT block. So:
9236 * (1) if the condexec bits are not already zero then we write
9237 * zero back into the CPUState now. This avoids complications trying
9238 * to do it at the end of the block. (For example if we don't do this
9239 * it's hard to identify whether we can safely skip writing condexec
9240 * at the end of the TB, which we definitely want to do for the case
9241 * where a TB doesn't do anything with the IT state at all.)
9242 * (2) if we are going to leave the TB then we call gen_set_condexec()
9243 * which will write the correct value into CPUState if zero is wrong.
9244 * This is done both for leaving the TB at the end, and for leaving
9245 * it because of an exception we know will happen, which is done in
9246 * gen_exception_insn(). The latter is necessary because we need to
9247 * leave the TB with the PC/IT state just prior to execution of the
9248 * instruction which caused the exception.
9249 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9250 * then the CPUState will be wrong and we need to reset it.
9251 * This is handled in the same way as restoration of the
9252 * PC in these situations: we will be called again with search_pc=1
9253 * and generate a mapping of the condexec bits for each PC in
9254 * gen_opc_condexec_bits[]. gen_pc_load[] then uses this to restore
9255 * the condexec bits.
9256 *
9257 * Note that there are no instructions which can read the condexec
9258 * bits, and none which can write non-static values to them, so
9259 * we don't need to care about whether CPUState is correct in the
9260 * middle of a TB.
9261 */
9262
9ee6e8bb
PB
9263 /* Reset the conditional execution bits immediately. This avoids
9264 complications trying to do it at the end of the block. */
98eac7ca 9265 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 9266 {
7d1b0095 9267 TCGv tmp = tcg_temp_new_i32();
8f01245e 9268 tcg_gen_movi_i32(tmp, 0);
d9ba4830 9269 store_cpu_field(tmp, condexec_bits);
8f01245e 9270 }
2c0262af 9271 do {
fbb4a2e3
PB
9272#ifdef CONFIG_USER_ONLY
9273 /* Intercept jump to the magic kernel page. */
9274 if (dc->pc >= 0xffff0000) {
9275 /* We always get here via a jump, so know we are not in a
9276 conditional execution block. */
9277 gen_exception(EXCP_KERNEL_TRAP);
9278 dc->is_jmp = DISAS_UPDATE;
9279 break;
9280 }
9281#else
9ee6e8bb
PB
9282 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9283 /* We always get here via a jump, so know we are not in a
9284 conditional execution block. */
d9ba4830 9285 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
9286 dc->is_jmp = DISAS_UPDATE;
9287 break;
9ee6e8bb
PB
9288 }
9289#endif
9290
72cf2d4f
BS
9291 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9292 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 9293 if (bp->pc == dc->pc) {
bc4a0de0 9294 gen_exception_insn(dc, 0, EXCP_DEBUG);
9ee6e8bb
PB
9295 /* Advance PC so that clearing the breakpoint will
9296 invalidate this TB. */
9297 dc->pc += 2;
9298 goto done_generating;
1fddef4b
FB
9299 break;
9300 }
9301 }
9302 }
2c0262af
FB
9303 if (search_pc) {
9304 j = gen_opc_ptr - gen_opc_buf;
9305 if (lj < j) {
9306 lj++;
9307 while (lj < j)
9308 gen_opc_instr_start[lj++] = 0;
9309 }
0fa85d43 9310 gen_opc_pc[lj] = dc->pc;
e12ce78d 9311 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
2c0262af 9312 gen_opc_instr_start[lj] = 1;
2e70f6ef 9313 gen_opc_icount[lj] = num_insns;
2c0262af 9314 }
e50e6a20 9315
2e70f6ef
PB
9316 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9317 gen_io_start();
9318
5642463a
PM
9319 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
9320 tcg_gen_debug_insn_start(dc->pc);
9321 }
9322
7204ab88 9323 if (dc->thumb) {
9ee6e8bb
PB
9324 disas_thumb_insn(env, dc);
9325 if (dc->condexec_mask) {
9326 dc->condexec_cond = (dc->condexec_cond & 0xe)
9327 | ((dc->condexec_mask >> 4) & 1);
9328 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9329 if (dc->condexec_mask == 0) {
9330 dc->condexec_cond = 0;
9331 }
9332 }
9333 } else {
9334 disas_arm_insn(env, dc);
9335 }
e50e6a20
FB
9336
9337 if (dc->condjmp && !dc->is_jmp) {
9338 gen_set_label(dc->condlabel);
9339 dc->condjmp = 0;
9340 }
3849902c
PM
9341
9342 if (tcg_check_temp_count()) {
9343 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
9344 }
9345
aaf2d97d 9346 /* Translation stops when a conditional branch is encountered.
e50e6a20 9347 * Otherwise the subsequent code could get translated several times.
b5ff1b31 9348 * Also stop translation when a page boundary is reached. This
bf20dc07 9349 * ensures prefetch aborts occur at the right place. */
2e70f6ef 9350 num_insns ++;
1fddef4b
FB
9351 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9352 !env->singlestep_enabled &&
1b530a6d 9353 !singlestep &&
2e70f6ef
PB
9354 dc->pc < next_page_start &&
9355 num_insns < max_insns);
9356
9357 if (tb->cflags & CF_LAST_IO) {
9358 if (dc->condjmp) {
9359 /* FIXME: This can theoretically happen with self-modifying
9360 code. */
9361 cpu_abort(env, "IO on conditional branch instruction");
9362 }
9363 gen_io_end();
9364 }
9ee6e8bb 9365
b5ff1b31 9366 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
9367 instruction was a conditional branch or trap, and the PC has
9368 already been written. */
551bd27f 9369 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 9370 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 9371 if (dc->condjmp) {
9ee6e8bb
PB
9372 gen_set_condexec(dc);
9373 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 9374 gen_exception(EXCP_SWI);
9ee6e8bb 9375 } else {
d9ba4830 9376 gen_exception(EXCP_DEBUG);
9ee6e8bb 9377 }
e50e6a20
FB
9378 gen_set_label(dc->condlabel);
9379 }
9380 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 9381 gen_set_pc_im(dc->pc);
e50e6a20 9382 dc->condjmp = 0;
8aaca4c0 9383 }
9ee6e8bb
PB
9384 gen_set_condexec(dc);
9385 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 9386 gen_exception(EXCP_SWI);
9ee6e8bb
PB
9387 } else {
9388 /* FIXME: Single stepping a WFI insn will not halt
9389 the CPU. */
d9ba4830 9390 gen_exception(EXCP_DEBUG);
9ee6e8bb 9391 }
8aaca4c0 9392 } else {
9ee6e8bb
PB
9393 /* While branches must always occur at the end of an IT block,
9394 there are a few other things that can cause us to terminate
9395 the TB in the middel of an IT block:
9396 - Exception generating instructions (bkpt, swi, undefined).
9397 - Page boundaries.
9398 - Hardware watchpoints.
9399 Hardware breakpoints have already been handled and skip this code.
9400 */
9401 gen_set_condexec(dc);
8aaca4c0 9402 switch(dc->is_jmp) {
8aaca4c0 9403 case DISAS_NEXT:
6e256c93 9404 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
9405 break;
9406 default:
9407 case DISAS_JUMP:
9408 case DISAS_UPDATE:
9409 /* indicate that the hash table must be used to find the next TB */
57fec1fe 9410 tcg_gen_exit_tb(0);
8aaca4c0
FB
9411 break;
9412 case DISAS_TB_JUMP:
9413 /* nothing more to generate */
9414 break;
9ee6e8bb 9415 case DISAS_WFI:
d9ba4830 9416 gen_helper_wfi();
9ee6e8bb
PB
9417 break;
9418 case DISAS_SWI:
d9ba4830 9419 gen_exception(EXCP_SWI);
9ee6e8bb 9420 break;
8aaca4c0 9421 }
e50e6a20
FB
9422 if (dc->condjmp) {
9423 gen_set_label(dc->condlabel);
9ee6e8bb 9424 gen_set_condexec(dc);
6e256c93 9425 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
9426 dc->condjmp = 0;
9427 }
2c0262af 9428 }
2e70f6ef 9429
9ee6e8bb 9430done_generating:
2e70f6ef 9431 gen_icount_end(tb, num_insns);
2c0262af
FB
9432 *gen_opc_ptr = INDEX_op_end;
9433
9434#ifdef DEBUG_DISAS
8fec2b8c 9435 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
9436 qemu_log("----------------\n");
9437 qemu_log("IN: %s\n", lookup_symbol(pc_start));
7204ab88 9438 log_target_disas(pc_start, dc->pc - pc_start, dc->thumb);
93fcfe39 9439 qemu_log("\n");
2c0262af
FB
9440 }
9441#endif
b5ff1b31
FB
9442 if (search_pc) {
9443 j = gen_opc_ptr - gen_opc_buf;
9444 lj++;
9445 while (lj <= j)
9446 gen_opc_instr_start[lj++] = 0;
b5ff1b31 9447 } else {
2c0262af 9448 tb->size = dc->pc - pc_start;
2e70f6ef 9449 tb->icount = num_insns;
b5ff1b31 9450 }
2c0262af
FB
9451}
9452
2cfc5f17 9453void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2c0262af 9454{
2cfc5f17 9455 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
9456}
9457
2cfc5f17 9458void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2c0262af 9459{
2cfc5f17 9460 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
9461}
9462
b5ff1b31
FB
9463static const char *cpu_mode_names[16] = {
9464 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9465 "???", "???", "???", "und", "???", "???", "???", "sys"
9466};
9ee6e8bb 9467
9a78eead 9468void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
7fe48483 9469 int flags)
2c0262af
FB
9470{
9471 int i;
06e80fc9 9472#if 0
bc380d17 9473 union {
b7bcbe95
FB
9474 uint32_t i;
9475 float s;
9476 } s0, s1;
9477 CPU_DoubleU d;
a94a6abf
PB
9478 /* ??? This assumes float64 and double have the same layout.
9479 Oh well, it's only debug dumps. */
9480 union {
9481 float64 f64;
9482 double d;
9483 } d0;
06e80fc9 9484#endif
b5ff1b31 9485 uint32_t psr;
2c0262af
FB
9486
9487 for(i=0;i<16;i++) {
7fe48483 9488 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 9489 if ((i % 4) == 3)
7fe48483 9490 cpu_fprintf(f, "\n");
2c0262af 9491 else
7fe48483 9492 cpu_fprintf(f, " ");
2c0262af 9493 }
b5ff1b31 9494 psr = cpsr_read(env);
687fa640
TS
9495 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9496 psr,
b5ff1b31
FB
9497 psr & (1 << 31) ? 'N' : '-',
9498 psr & (1 << 30) ? 'Z' : '-',
9499 psr & (1 << 29) ? 'C' : '-',
9500 psr & (1 << 28) ? 'V' : '-',
5fafdf24 9501 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 9502 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 9503
5e3f878a 9504#if 0
b7bcbe95 9505 for (i = 0; i < 16; i++) {
8e96005d
FB
9506 d.d = env->vfp.regs[i];
9507 s0.i = d.l.lower;
9508 s1.i = d.l.upper;
a94a6abf
PB
9509 d0.f64 = d.d;
9510 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 9511 i * 2, (int)s0.i, s0.s,
a94a6abf 9512 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 9513 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 9514 d0.d);
b7bcbe95 9515 }
40f137e1 9516 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
5e3f878a 9517#endif
2c0262af 9518}
a6b025d3 9519
d2856f1a
AJ
9520void gen_pc_load(CPUState *env, TranslationBlock *tb,
9521 unsigned long searched_pc, int pc_pos, void *puc)
9522{
9523 env->regs[15] = gen_opc_pc[pc_pos];
e12ce78d 9524 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
d2856f1a 9525}