]> git.proxmox.com Git - qemu.git/blame - target-arm/translate.c
target-arm: Set Q bit for overflow in SMUAD and SMLAD
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
28#include "exec-all.h"
29#include "disas.h"
57fec1fe 30#include "tcg-op.h"
79383c9c 31#include "qemu-log.h"
1497c961 32
a7812ae4 33#include "helpers.h"
1497c961 34#define GEN_HELPER 1
b26eefb6 35#include "helpers.h"
2c0262af 36
9ee6e8bb
PB
37#define ENABLE_ARCH_5J 0
38#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 42
86753403 43#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 44
2c0262af
FB
45/* internal defines */
46typedef struct DisasContext {
0fa85d43 47 target_ulong pc;
2c0262af 48 int is_jmp;
e50e6a20
FB
49 /* Nonzero if this instruction has been conditionally skipped. */
50 int condjmp;
51 /* The label that will be jumped to when the instruction is skipped. */
52 int condlabel;
9ee6e8bb
PB
53 /* Thumb-2 condtional execution bits. */
54 int condexec_mask;
55 int condexec_cond;
2c0262af 56 struct TranslationBlock *tb;
8aaca4c0 57 int singlestep_enabled;
5899f386 58 int thumb;
b5ff1b31
FB
59#if !defined(CONFIG_USER_ONLY)
60 int user;
61#endif
5df8bac1 62 int vfp_enabled;
69d1fc22
PM
63 int vec_len;
64 int vec_stride;
2c0262af
FB
65} DisasContext;
66
e12ce78d
PM
67static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
68
b5ff1b31
FB
69#if defined(CONFIG_USER_ONLY)
70#define IS_USER(s) 1
71#else
72#define IS_USER(s) (s->user)
73#endif
74
9ee6e8bb
PB
75/* These instructions trap after executing, so defer them until after the
76 conditional executions state has been updated. */
77#define DISAS_WFI 4
78#define DISAS_SWI 5
2c0262af 79
a7812ae4 80static TCGv_ptr cpu_env;
ad69471c 81/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 82static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 83static TCGv_i32 cpu_R[16];
426f5abc
PB
84static TCGv_i32 cpu_exclusive_addr;
85static TCGv_i32 cpu_exclusive_val;
86static TCGv_i32 cpu_exclusive_high;
87#ifdef CONFIG_USER_ONLY
88static TCGv_i32 cpu_exclusive_test;
89static TCGv_i32 cpu_exclusive_info;
90#endif
ad69471c 91
b26eefb6 92/* FIXME: These should be removed. */
a7812ae4
PB
93static TCGv cpu_F0s, cpu_F1s;
94static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 95
2e70f6ef
PB
96#include "gen-icount.h"
97
155c3eac
FN
98static const char *regnames[] =
99 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
100 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
101
b26eefb6
PB
102/* initialize TCG globals. */
103void arm_translate_init(void)
104{
155c3eac
FN
105 int i;
106
a7812ae4
PB
107 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
108
155c3eac
FN
109 for (i = 0; i < 16; i++) {
110 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
111 offsetof(CPUState, regs[i]),
112 regnames[i]);
113 }
426f5abc
PB
114 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
115 offsetof(CPUState, exclusive_addr), "exclusive_addr");
116 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
117 offsetof(CPUState, exclusive_val), "exclusive_val");
118 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUState, exclusive_high), "exclusive_high");
120#ifdef CONFIG_USER_ONLY
121 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
122 offsetof(CPUState, exclusive_test), "exclusive_test");
123 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
124 offsetof(CPUState, exclusive_info), "exclusive_info");
125#endif
155c3eac 126
a7812ae4
PB
127#define GEN_HELPER 2
128#include "helpers.h"
b26eefb6
PB
129}
130
d9ba4830
PB
131static inline TCGv load_cpu_offset(int offset)
132{
7d1b0095 133 TCGv tmp = tcg_temp_new_i32();
d9ba4830
PB
134 tcg_gen_ld_i32(tmp, cpu_env, offset);
135 return tmp;
136}
137
138#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
139
140static inline void store_cpu_offset(TCGv var, int offset)
141{
142 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 143 tcg_temp_free_i32(var);
d9ba4830
PB
144}
145
146#define store_cpu_field(var, name) \
147 store_cpu_offset(var, offsetof(CPUState, name))
148
b26eefb6
PB
149/* Set a variable to the value of a CPU register. */
150static void load_reg_var(DisasContext *s, TCGv var, int reg)
151{
152 if (reg == 15) {
153 uint32_t addr;
154 /* normaly, since we updated PC, we need only to add one insn */
155 if (s->thumb)
156 addr = (long)s->pc + 2;
157 else
158 addr = (long)s->pc + 4;
159 tcg_gen_movi_i32(var, addr);
160 } else {
155c3eac 161 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
162 }
163}
164
165/* Create a new temporary and set it to the value of a CPU register. */
166static inline TCGv load_reg(DisasContext *s, int reg)
167{
7d1b0095 168 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
169 load_reg_var(s, tmp, reg);
170 return tmp;
171}
172
173/* Set a CPU register. The source must be a temporary and will be
174 marked as dead. */
175static void store_reg(DisasContext *s, int reg, TCGv var)
176{
177 if (reg == 15) {
178 tcg_gen_andi_i32(var, var, ~1);
179 s->is_jmp = DISAS_JUMP;
180 }
155c3eac 181 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 182 tcg_temp_free_i32(var);
b26eefb6
PB
183}
184
b26eefb6 185/* Value extensions. */
86831435
PB
186#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
187#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
188#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
189#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
190
1497c961
PB
191#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
192#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 193
b26eefb6 194
b75263d6
JR
195static inline void gen_set_cpsr(TCGv var, uint32_t mask)
196{
197 TCGv tmp_mask = tcg_const_i32(mask);
198 gen_helper_cpsr_write(var, tmp_mask);
199 tcg_temp_free_i32(tmp_mask);
200}
d9ba4830
PB
201/* Set NZCV flags from the high 4 bits of var. */
202#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
203
204static void gen_exception(int excp)
205{
7d1b0095 206 TCGv tmp = tcg_temp_new_i32();
d9ba4830
PB
207 tcg_gen_movi_i32(tmp, excp);
208 gen_helper_exception(tmp);
7d1b0095 209 tcg_temp_free_i32(tmp);
d9ba4830
PB
210}
211
3670669c
PB
212static void gen_smul_dual(TCGv a, TCGv b)
213{
7d1b0095
PM
214 TCGv tmp1 = tcg_temp_new_i32();
215 TCGv tmp2 = tcg_temp_new_i32();
22478e79
AZ
216 tcg_gen_ext16s_i32(tmp1, a);
217 tcg_gen_ext16s_i32(tmp2, b);
3670669c 218 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 219 tcg_temp_free_i32(tmp2);
3670669c
PB
220 tcg_gen_sari_i32(a, a, 16);
221 tcg_gen_sari_i32(b, b, 16);
222 tcg_gen_mul_i32(b, b, a);
223 tcg_gen_mov_i32(a, tmp1);
7d1b0095 224 tcg_temp_free_i32(tmp1);
3670669c
PB
225}
226
227/* Byteswap each halfword. */
228static void gen_rev16(TCGv var)
229{
7d1b0095 230 TCGv tmp = tcg_temp_new_i32();
3670669c
PB
231 tcg_gen_shri_i32(tmp, var, 8);
232 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
233 tcg_gen_shli_i32(var, var, 8);
234 tcg_gen_andi_i32(var, var, 0xff00ff00);
235 tcg_gen_or_i32(var, var, tmp);
7d1b0095 236 tcg_temp_free_i32(tmp);
3670669c
PB
237}
238
239/* Byteswap low halfword and sign extend. */
240static void gen_revsh(TCGv var)
241{
1a855029
AJ
242 tcg_gen_ext16u_i32(var, var);
243 tcg_gen_bswap16_i32(var, var);
244 tcg_gen_ext16s_i32(var, var);
3670669c
PB
245}
246
247/* Unsigned bitfield extract. */
248static void gen_ubfx(TCGv var, int shift, uint32_t mask)
249{
250 if (shift)
251 tcg_gen_shri_i32(var, var, shift);
252 tcg_gen_andi_i32(var, var, mask);
253}
254
255/* Signed bitfield extract. */
256static void gen_sbfx(TCGv var, int shift, int width)
257{
258 uint32_t signbit;
259
260 if (shift)
261 tcg_gen_sari_i32(var, var, shift);
262 if (shift + width < 32) {
263 signbit = 1u << (width - 1);
264 tcg_gen_andi_i32(var, var, (1u << width) - 1);
265 tcg_gen_xori_i32(var, var, signbit);
266 tcg_gen_subi_i32(var, var, signbit);
267 }
268}
269
270/* Bitfield insertion. Insert val into base. Clobbers base and val. */
271static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
272{
3670669c 273 tcg_gen_andi_i32(val, val, mask);
8f8e3aa4
PB
274 tcg_gen_shli_i32(val, val, shift);
275 tcg_gen_andi_i32(base, base, ~(mask << shift));
3670669c
PB
276 tcg_gen_or_i32(dest, base, val);
277}
278
838fa72d
AJ
279/* Return (b << 32) + a. Mark inputs as dead */
280static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
3670669c 281{
838fa72d
AJ
282 TCGv_i64 tmp64 = tcg_temp_new_i64();
283
284 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 285 tcg_temp_free_i32(b);
838fa72d
AJ
286 tcg_gen_shli_i64(tmp64, tmp64, 32);
287 tcg_gen_add_i64(a, tmp64, a);
288
289 tcg_temp_free_i64(tmp64);
290 return a;
291}
292
293/* Return (b << 32) - a. Mark inputs as dead. */
294static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
295{
296 TCGv_i64 tmp64 = tcg_temp_new_i64();
297
298 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 299 tcg_temp_free_i32(b);
838fa72d
AJ
300 tcg_gen_shli_i64(tmp64, tmp64, 32);
301 tcg_gen_sub_i64(a, tmp64, a);
302
303 tcg_temp_free_i64(tmp64);
304 return a;
3670669c
PB
305}
306
8f01245e
PB
307/* FIXME: Most targets have native widening multiplication.
308 It would be good to use that instead of a full wide multiply. */
5e3f878a 309/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 310static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 311{
a7812ae4
PB
312 TCGv_i64 tmp1 = tcg_temp_new_i64();
313 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
314
315 tcg_gen_extu_i32_i64(tmp1, a);
7d1b0095 316 tcg_temp_free_i32(a);
5e3f878a 317 tcg_gen_extu_i32_i64(tmp2, b);
7d1b0095 318 tcg_temp_free_i32(b);
5e3f878a 319 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 320 tcg_temp_free_i64(tmp2);
5e3f878a
PB
321 return tmp1;
322}
323
a7812ae4 324static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 325{
a7812ae4
PB
326 TCGv_i64 tmp1 = tcg_temp_new_i64();
327 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
328
329 tcg_gen_ext_i32_i64(tmp1, a);
7d1b0095 330 tcg_temp_free_i32(a);
5e3f878a 331 tcg_gen_ext_i32_i64(tmp2, b);
7d1b0095 332 tcg_temp_free_i32(b);
5e3f878a 333 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 334 tcg_temp_free_i64(tmp2);
5e3f878a
PB
335 return tmp1;
336}
337
8f01245e
PB
338/* Swap low and high halfwords. */
339static void gen_swap_half(TCGv var)
340{
7d1b0095 341 TCGv tmp = tcg_temp_new_i32();
8f01245e
PB
342 tcg_gen_shri_i32(tmp, var, 16);
343 tcg_gen_shli_i32(var, var, 16);
344 tcg_gen_or_i32(var, var, tmp);
7d1b0095 345 tcg_temp_free_i32(tmp);
8f01245e
PB
346}
347
b26eefb6
PB
348/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
349 tmp = (t0 ^ t1) & 0x8000;
350 t0 &= ~0x8000;
351 t1 &= ~0x8000;
352 t0 = (t0 + t1) ^ tmp;
353 */
354
355static void gen_add16(TCGv t0, TCGv t1)
356{
7d1b0095 357 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
358 tcg_gen_xor_i32(tmp, t0, t1);
359 tcg_gen_andi_i32(tmp, tmp, 0x8000);
360 tcg_gen_andi_i32(t0, t0, ~0x8000);
361 tcg_gen_andi_i32(t1, t1, ~0x8000);
362 tcg_gen_add_i32(t0, t0, t1);
363 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
364 tcg_temp_free_i32(tmp);
365 tcg_temp_free_i32(t1);
b26eefb6
PB
366}
367
9a119ff6
PB
368#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
369
b26eefb6
PB
370/* Set CF to the top bit of var. */
371static void gen_set_CF_bit31(TCGv var)
372{
7d1b0095 373 TCGv tmp = tcg_temp_new_i32();
b26eefb6 374 tcg_gen_shri_i32(tmp, var, 31);
4cc633c3 375 gen_set_CF(tmp);
7d1b0095 376 tcg_temp_free_i32(tmp);
b26eefb6
PB
377}
378
379/* Set N and Z flags from var. */
380static inline void gen_logic_CC(TCGv var)
381{
6fbe23d5
PB
382 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
383 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
b26eefb6
PB
384}
385
386/* T0 += T1 + CF. */
396e467c 387static void gen_adc(TCGv t0, TCGv t1)
b26eefb6 388{
d9ba4830 389 TCGv tmp;
396e467c 390 tcg_gen_add_i32(t0, t0, t1);
d9ba4830 391 tmp = load_cpu_field(CF);
396e467c 392 tcg_gen_add_i32(t0, t0, tmp);
7d1b0095 393 tcg_temp_free_i32(tmp);
b26eefb6
PB
394}
395
e9bb4aa9
JR
396/* dest = T0 + T1 + CF. */
397static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
398{
399 TCGv tmp;
400 tcg_gen_add_i32(dest, t0, t1);
401 tmp = load_cpu_field(CF);
402 tcg_gen_add_i32(dest, dest, tmp);
7d1b0095 403 tcg_temp_free_i32(tmp);
e9bb4aa9
JR
404}
405
3670669c
PB
406/* dest = T0 - T1 + CF - 1. */
407static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
408{
d9ba4830 409 TCGv tmp;
3670669c 410 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 411 tmp = load_cpu_field(CF);
3670669c
PB
412 tcg_gen_add_i32(dest, dest, tmp);
413 tcg_gen_subi_i32(dest, dest, 1);
7d1b0095 414 tcg_temp_free_i32(tmp);
3670669c
PB
415}
416
ad69471c
PB
417/* FIXME: Implement this natively. */
418#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
419
9a119ff6 420static void shifter_out_im(TCGv var, int shift)
b26eefb6 421{
7d1b0095 422 TCGv tmp = tcg_temp_new_i32();
9a119ff6
PB
423 if (shift == 0) {
424 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 425 } else {
9a119ff6 426 tcg_gen_shri_i32(tmp, var, shift);
4cc633c3 427 if (shift != 31)
9a119ff6
PB
428 tcg_gen_andi_i32(tmp, tmp, 1);
429 }
430 gen_set_CF(tmp);
7d1b0095 431 tcg_temp_free_i32(tmp);
9a119ff6 432}
b26eefb6 433
9a119ff6
PB
434/* Shift by immediate. Includes special handling for shift == 0. */
435static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
436{
437 switch (shiftop) {
438 case 0: /* LSL */
439 if (shift != 0) {
440 if (flags)
441 shifter_out_im(var, 32 - shift);
442 tcg_gen_shli_i32(var, var, shift);
443 }
444 break;
445 case 1: /* LSR */
446 if (shift == 0) {
447 if (flags) {
448 tcg_gen_shri_i32(var, var, 31);
449 gen_set_CF(var);
450 }
451 tcg_gen_movi_i32(var, 0);
452 } else {
453 if (flags)
454 shifter_out_im(var, shift - 1);
455 tcg_gen_shri_i32(var, var, shift);
456 }
457 break;
458 case 2: /* ASR */
459 if (shift == 0)
460 shift = 32;
461 if (flags)
462 shifter_out_im(var, shift - 1);
463 if (shift == 32)
464 shift = 31;
465 tcg_gen_sari_i32(var, var, shift);
466 break;
467 case 3: /* ROR/RRX */
468 if (shift != 0) {
469 if (flags)
470 shifter_out_im(var, shift - 1);
f669df27 471 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 472 } else {
d9ba4830 473 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
474 if (flags)
475 shifter_out_im(var, 0);
476 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
477 tcg_gen_shli_i32(tmp, tmp, 31);
478 tcg_gen_or_i32(var, var, tmp);
7d1b0095 479 tcg_temp_free_i32(tmp);
b26eefb6
PB
480 }
481 }
482};
483
8984bd2e
PB
484static inline void gen_arm_shift_reg(TCGv var, int shiftop,
485 TCGv shift, int flags)
486{
487 if (flags) {
488 switch (shiftop) {
489 case 0: gen_helper_shl_cc(var, var, shift); break;
490 case 1: gen_helper_shr_cc(var, var, shift); break;
491 case 2: gen_helper_sar_cc(var, var, shift); break;
492 case 3: gen_helper_ror_cc(var, var, shift); break;
493 }
494 } else {
495 switch (shiftop) {
496 case 0: gen_helper_shl(var, var, shift); break;
497 case 1: gen_helper_shr(var, var, shift); break;
498 case 2: gen_helper_sar(var, var, shift); break;
f669df27
AJ
499 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
500 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
501 }
502 }
7d1b0095 503 tcg_temp_free_i32(shift);
8984bd2e
PB
504}
505
6ddbc6e4
PB
506#define PAS_OP(pfx) \
507 switch (op2) { \
508 case 0: gen_pas_helper(glue(pfx,add16)); break; \
509 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
510 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
511 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
512 case 4: gen_pas_helper(glue(pfx,add8)); break; \
513 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
514 }
d9ba4830 515static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 516{
a7812ae4 517 TCGv_ptr tmp;
6ddbc6e4
PB
518
519 switch (op1) {
520#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
521 case 1:
a7812ae4 522 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
523 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
524 PAS_OP(s)
b75263d6 525 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
526 break;
527 case 5:
a7812ae4 528 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
529 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
530 PAS_OP(u)
b75263d6 531 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
532 break;
533#undef gen_pas_helper
534#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
535 case 2:
536 PAS_OP(q);
537 break;
538 case 3:
539 PAS_OP(sh);
540 break;
541 case 6:
542 PAS_OP(uq);
543 break;
544 case 7:
545 PAS_OP(uh);
546 break;
547#undef gen_pas_helper
548 }
549}
9ee6e8bb
PB
550#undef PAS_OP
551
6ddbc6e4
PB
552/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
553#define PAS_OP(pfx) \
ed89a2f1 554 switch (op1) { \
6ddbc6e4
PB
555 case 0: gen_pas_helper(glue(pfx,add8)); break; \
556 case 1: gen_pas_helper(glue(pfx,add16)); break; \
557 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
558 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
559 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
560 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
561 }
d9ba4830 562static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 563{
a7812ae4 564 TCGv_ptr tmp;
6ddbc6e4 565
ed89a2f1 566 switch (op2) {
6ddbc6e4
PB
567#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
568 case 0:
a7812ae4 569 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
570 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
571 PAS_OP(s)
b75263d6 572 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
573 break;
574 case 4:
a7812ae4 575 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
576 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
577 PAS_OP(u)
b75263d6 578 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
579 break;
580#undef gen_pas_helper
581#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
582 case 1:
583 PAS_OP(q);
584 break;
585 case 2:
586 PAS_OP(sh);
587 break;
588 case 5:
589 PAS_OP(uq);
590 break;
591 case 6:
592 PAS_OP(uh);
593 break;
594#undef gen_pas_helper
595 }
596}
9ee6e8bb
PB
597#undef PAS_OP
598
d9ba4830
PB
599static void gen_test_cc(int cc, int label)
600{
601 TCGv tmp;
602 TCGv tmp2;
d9ba4830
PB
603 int inv;
604
d9ba4830
PB
605 switch (cc) {
606 case 0: /* eq: Z */
6fbe23d5 607 tmp = load_cpu_field(ZF);
cb63669a 608 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
609 break;
610 case 1: /* ne: !Z */
6fbe23d5 611 tmp = load_cpu_field(ZF);
cb63669a 612 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
613 break;
614 case 2: /* cs: C */
615 tmp = load_cpu_field(CF);
cb63669a 616 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
617 break;
618 case 3: /* cc: !C */
619 tmp = load_cpu_field(CF);
cb63669a 620 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
621 break;
622 case 4: /* mi: N */
6fbe23d5 623 tmp = load_cpu_field(NF);
cb63669a 624 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
625 break;
626 case 5: /* pl: !N */
6fbe23d5 627 tmp = load_cpu_field(NF);
cb63669a 628 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
629 break;
630 case 6: /* vs: V */
631 tmp = load_cpu_field(VF);
cb63669a 632 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
633 break;
634 case 7: /* vc: !V */
635 tmp = load_cpu_field(VF);
cb63669a 636 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
637 break;
638 case 8: /* hi: C && !Z */
639 inv = gen_new_label();
640 tmp = load_cpu_field(CF);
cb63669a 641 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
7d1b0095 642 tcg_temp_free_i32(tmp);
6fbe23d5 643 tmp = load_cpu_field(ZF);
cb63669a 644 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
645 gen_set_label(inv);
646 break;
647 case 9: /* ls: !C || Z */
648 tmp = load_cpu_field(CF);
cb63669a 649 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
7d1b0095 650 tcg_temp_free_i32(tmp);
6fbe23d5 651 tmp = load_cpu_field(ZF);
cb63669a 652 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
653 break;
654 case 10: /* ge: N == V -> N ^ V == 0 */
655 tmp = load_cpu_field(VF);
6fbe23d5 656 tmp2 = load_cpu_field(NF);
d9ba4830 657 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 658 tcg_temp_free_i32(tmp2);
cb63669a 659 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
660 break;
661 case 11: /* lt: N != V -> N ^ V != 0 */
662 tmp = load_cpu_field(VF);
6fbe23d5 663 tmp2 = load_cpu_field(NF);
d9ba4830 664 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 665 tcg_temp_free_i32(tmp2);
cb63669a 666 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
667 break;
668 case 12: /* gt: !Z && N == V */
669 inv = gen_new_label();
6fbe23d5 670 tmp = load_cpu_field(ZF);
cb63669a 671 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
7d1b0095 672 tcg_temp_free_i32(tmp);
d9ba4830 673 tmp = load_cpu_field(VF);
6fbe23d5 674 tmp2 = load_cpu_field(NF);
d9ba4830 675 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 676 tcg_temp_free_i32(tmp2);
cb63669a 677 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
678 gen_set_label(inv);
679 break;
680 case 13: /* le: Z || N != V */
6fbe23d5 681 tmp = load_cpu_field(ZF);
cb63669a 682 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
7d1b0095 683 tcg_temp_free_i32(tmp);
d9ba4830 684 tmp = load_cpu_field(VF);
6fbe23d5 685 tmp2 = load_cpu_field(NF);
d9ba4830 686 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 687 tcg_temp_free_i32(tmp2);
cb63669a 688 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
689 break;
690 default:
691 fprintf(stderr, "Bad condition code 0x%x\n", cc);
692 abort();
693 }
7d1b0095 694 tcg_temp_free_i32(tmp);
d9ba4830 695}
2c0262af 696
b1d8e52e 697static const uint8_t table_logic_cc[16] = {
2c0262af
FB
698 1, /* and */
699 1, /* xor */
700 0, /* sub */
701 0, /* rsb */
702 0, /* add */
703 0, /* adc */
704 0, /* sbc */
705 0, /* rsc */
706 1, /* andl */
707 1, /* xorl */
708 0, /* cmp */
709 0, /* cmn */
710 1, /* orr */
711 1, /* mov */
712 1, /* bic */
713 1, /* mvn */
714};
3b46e624 715
d9ba4830
PB
716/* Set PC and Thumb state from an immediate address. */
717static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 718{
b26eefb6 719 TCGv tmp;
99c475ab 720
b26eefb6 721 s->is_jmp = DISAS_UPDATE;
d9ba4830 722 if (s->thumb != (addr & 1)) {
7d1b0095 723 tmp = tcg_temp_new_i32();
d9ba4830
PB
724 tcg_gen_movi_i32(tmp, addr & 1);
725 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
7d1b0095 726 tcg_temp_free_i32(tmp);
d9ba4830 727 }
155c3eac 728 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
729}
730
731/* Set PC and Thumb state from var. var is marked as dead. */
732static inline void gen_bx(DisasContext *s, TCGv var)
733{
d9ba4830 734 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
735 tcg_gen_andi_i32(cpu_R[15], var, ~1);
736 tcg_gen_andi_i32(var, var, 1);
737 store_cpu_field(var, thumb);
d9ba4830
PB
738}
739
21aeb343
JR
740/* Variant of store_reg which uses branch&exchange logic when storing
741 to r15 in ARM architecture v7 and above. The source must be a temporary
742 and will be marked as dead. */
743static inline void store_reg_bx(CPUState *env, DisasContext *s,
744 int reg, TCGv var)
745{
746 if (reg == 15 && ENABLE_ARCH_7) {
747 gen_bx(s, var);
748 } else {
749 store_reg(s, reg, var);
750 }
751}
752
b0109805
PB
753static inline TCGv gen_ld8s(TCGv addr, int index)
754{
7d1b0095 755 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
756 tcg_gen_qemu_ld8s(tmp, addr, index);
757 return tmp;
758}
759static inline TCGv gen_ld8u(TCGv addr, int index)
760{
7d1b0095 761 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
762 tcg_gen_qemu_ld8u(tmp, addr, index);
763 return tmp;
764}
765static inline TCGv gen_ld16s(TCGv addr, int index)
766{
7d1b0095 767 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
768 tcg_gen_qemu_ld16s(tmp, addr, index);
769 return tmp;
770}
771static inline TCGv gen_ld16u(TCGv addr, int index)
772{
7d1b0095 773 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
774 tcg_gen_qemu_ld16u(tmp, addr, index);
775 return tmp;
776}
777static inline TCGv gen_ld32(TCGv addr, int index)
778{
7d1b0095 779 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
780 tcg_gen_qemu_ld32u(tmp, addr, index);
781 return tmp;
782}
84496233
JR
783static inline TCGv_i64 gen_ld64(TCGv addr, int index)
784{
785 TCGv_i64 tmp = tcg_temp_new_i64();
786 tcg_gen_qemu_ld64(tmp, addr, index);
787 return tmp;
788}
b0109805
PB
789static inline void gen_st8(TCGv val, TCGv addr, int index)
790{
791 tcg_gen_qemu_st8(val, addr, index);
7d1b0095 792 tcg_temp_free_i32(val);
b0109805
PB
793}
794static inline void gen_st16(TCGv val, TCGv addr, int index)
795{
796 tcg_gen_qemu_st16(val, addr, index);
7d1b0095 797 tcg_temp_free_i32(val);
b0109805
PB
798}
799static inline void gen_st32(TCGv val, TCGv addr, int index)
800{
801 tcg_gen_qemu_st32(val, addr, index);
7d1b0095 802 tcg_temp_free_i32(val);
b0109805 803}
84496233
JR
804static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
805{
806 tcg_gen_qemu_st64(val, addr, index);
807 tcg_temp_free_i64(val);
808}
b5ff1b31 809
5e3f878a
PB
810static inline void gen_set_pc_im(uint32_t val)
811{
155c3eac 812 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
813}
814
b5ff1b31
FB
815/* Force a TB lookup after an instruction that changes the CPU state. */
816static inline void gen_lookup_tb(DisasContext *s)
817{
a6445c52 818 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
819 s->is_jmp = DISAS_UPDATE;
820}
821
b0109805
PB
822static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
823 TCGv var)
2c0262af 824{
1e8d4eec 825 int val, rm, shift, shiftop;
b26eefb6 826 TCGv offset;
2c0262af
FB
827
828 if (!(insn & (1 << 25))) {
829 /* immediate */
830 val = insn & 0xfff;
831 if (!(insn & (1 << 23)))
832 val = -val;
537730b9 833 if (val != 0)
b0109805 834 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
835 } else {
836 /* shift/register */
837 rm = (insn) & 0xf;
838 shift = (insn >> 7) & 0x1f;
1e8d4eec 839 shiftop = (insn >> 5) & 3;
b26eefb6 840 offset = load_reg(s, rm);
9a119ff6 841 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 842 if (!(insn & (1 << 23)))
b0109805 843 tcg_gen_sub_i32(var, var, offset);
2c0262af 844 else
b0109805 845 tcg_gen_add_i32(var, var, offset);
7d1b0095 846 tcg_temp_free_i32(offset);
2c0262af
FB
847 }
848}
849
191f9a93 850static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 851 int extra, TCGv var)
2c0262af
FB
852{
853 int val, rm;
b26eefb6 854 TCGv offset;
3b46e624 855
2c0262af
FB
856 if (insn & (1 << 22)) {
857 /* immediate */
858 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
859 if (!(insn & (1 << 23)))
860 val = -val;
18acad92 861 val += extra;
537730b9 862 if (val != 0)
b0109805 863 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
864 } else {
865 /* register */
191f9a93 866 if (extra)
b0109805 867 tcg_gen_addi_i32(var, var, extra);
2c0262af 868 rm = (insn) & 0xf;
b26eefb6 869 offset = load_reg(s, rm);
2c0262af 870 if (!(insn & (1 << 23)))
b0109805 871 tcg_gen_sub_i32(var, var, offset);
2c0262af 872 else
b0109805 873 tcg_gen_add_i32(var, var, offset);
7d1b0095 874 tcg_temp_free_i32(offset);
2c0262af
FB
875 }
876}
877
4373f3ce
PB
878#define VFP_OP2(name) \
879static inline void gen_vfp_##name(int dp) \
880{ \
881 if (dp) \
882 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
883 else \
884 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
b7bcbe95
FB
885}
886
4373f3ce
PB
887VFP_OP2(add)
888VFP_OP2(sub)
889VFP_OP2(mul)
890VFP_OP2(div)
891
892#undef VFP_OP2
893
894static inline void gen_vfp_abs(int dp)
895{
896 if (dp)
897 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
898 else
899 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
900}
901
902static inline void gen_vfp_neg(int dp)
903{
904 if (dp)
905 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
906 else
907 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
908}
909
910static inline void gen_vfp_sqrt(int dp)
911{
912 if (dp)
913 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
914 else
915 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
916}
917
918static inline void gen_vfp_cmp(int dp)
919{
920 if (dp)
921 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
922 else
923 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
924}
925
926static inline void gen_vfp_cmpe(int dp)
927{
928 if (dp)
929 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
930 else
931 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
932}
933
934static inline void gen_vfp_F1_ld0(int dp)
935{
936 if (dp)
5b340b51 937 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 938 else
5b340b51 939 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
940}
941
942static inline void gen_vfp_uito(int dp)
943{
944 if (dp)
945 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
946 else
947 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
948}
949
950static inline void gen_vfp_sito(int dp)
951{
952 if (dp)
66230e0d 953 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
4373f3ce 954 else
66230e0d 955 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
4373f3ce
PB
956}
957
958static inline void gen_vfp_toui(int dp)
959{
960 if (dp)
961 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
962 else
963 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
964}
965
966static inline void gen_vfp_touiz(int dp)
967{
968 if (dp)
969 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
970 else
971 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
972}
973
974static inline void gen_vfp_tosi(int dp)
975{
976 if (dp)
977 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
978 else
979 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
980}
981
982static inline void gen_vfp_tosiz(int dp)
9ee6e8bb
PB
983{
984 if (dp)
4373f3ce 985 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
9ee6e8bb 986 else
4373f3ce
PB
987 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
988}
989
990#define VFP_GEN_FIX(name) \
991static inline void gen_vfp_##name(int dp, int shift) \
992{ \
b75263d6 993 TCGv tmp_shift = tcg_const_i32(shift); \
4373f3ce 994 if (dp) \
b75263d6 995 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
4373f3ce 996 else \
b75263d6
JR
997 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
998 tcg_temp_free_i32(tmp_shift); \
9ee6e8bb 999}
4373f3ce
PB
1000VFP_GEN_FIX(tosh)
1001VFP_GEN_FIX(tosl)
1002VFP_GEN_FIX(touh)
1003VFP_GEN_FIX(toul)
1004VFP_GEN_FIX(shto)
1005VFP_GEN_FIX(slto)
1006VFP_GEN_FIX(uhto)
1007VFP_GEN_FIX(ulto)
1008#undef VFP_GEN_FIX
9ee6e8bb 1009
312eea9f 1010static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1011{
1012 if (dp)
312eea9f 1013 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1014 else
312eea9f 1015 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1016}
1017
312eea9f 1018static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1019{
1020 if (dp)
312eea9f 1021 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1022 else
312eea9f 1023 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1024}
1025
8e96005d
FB
1026static inline long
1027vfp_reg_offset (int dp, int reg)
1028{
1029 if (dp)
1030 return offsetof(CPUARMState, vfp.regs[reg]);
1031 else if (reg & 1) {
1032 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1033 + offsetof(CPU_DoubleU, l.upper);
1034 } else {
1035 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1036 + offsetof(CPU_DoubleU, l.lower);
1037 }
1038}
9ee6e8bb
PB
1039
1040/* Return the offset of a 32-bit piece of a NEON register.
1041 zero is the least significant end of the register. */
1042static inline long
1043neon_reg_offset (int reg, int n)
1044{
1045 int sreg;
1046 sreg = reg * 2 + n;
1047 return vfp_reg_offset(0, sreg);
1048}
1049
8f8e3aa4
PB
1050static TCGv neon_load_reg(int reg, int pass)
1051{
7d1b0095 1052 TCGv tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1053 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1054 return tmp;
1055}
1056
1057static void neon_store_reg(int reg, int pass, TCGv var)
1058{
1059 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1060 tcg_temp_free_i32(var);
8f8e3aa4
PB
1061}
1062
a7812ae4 1063static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1064{
1065 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1066}
1067
a7812ae4 1068static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1069{
1070 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1071}
1072
4373f3ce
PB
1073#define tcg_gen_ld_f32 tcg_gen_ld_i32
1074#define tcg_gen_ld_f64 tcg_gen_ld_i64
1075#define tcg_gen_st_f32 tcg_gen_st_i32
1076#define tcg_gen_st_f64 tcg_gen_st_i64
1077
b7bcbe95
FB
1078static inline void gen_mov_F0_vreg(int dp, int reg)
1079{
1080 if (dp)
4373f3ce 1081 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1082 else
4373f3ce 1083 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1084}
1085
1086static inline void gen_mov_F1_vreg(int dp, int reg)
1087{
1088 if (dp)
4373f3ce 1089 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1090 else
4373f3ce 1091 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1092}
1093
1094static inline void gen_mov_vreg_F0(int dp, int reg)
1095{
1096 if (dp)
4373f3ce 1097 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1098 else
4373f3ce 1099 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1100}
1101
18c9b560
AZ
1102#define ARM_CP_RW_BIT (1 << 20)
1103
a7812ae4 1104static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d
PB
1105{
1106 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1107}
1108
a7812ae4 1109static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d
PB
1110{
1111 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1112}
1113
da6b5335 1114static inline TCGv iwmmxt_load_creg(int reg)
e677137d 1115{
7d1b0095 1116 TCGv var = tcg_temp_new_i32();
da6b5335
FN
1117 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1118 return var;
e677137d
PB
1119}
1120
da6b5335 1121static inline void iwmmxt_store_creg(int reg, TCGv var)
e677137d 1122{
da6b5335 1123 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
7d1b0095 1124 tcg_temp_free_i32(var);
e677137d
PB
1125}
1126
1127static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1128{
1129 iwmmxt_store_reg(cpu_M0, rn);
1130}
1131
1132static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1133{
1134 iwmmxt_load_reg(cpu_M0, rn);
1135}
1136
1137static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1138{
1139 iwmmxt_load_reg(cpu_V1, rn);
1140 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1141}
1142
1143static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1144{
1145 iwmmxt_load_reg(cpu_V1, rn);
1146 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1147}
1148
1149static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1150{
1151 iwmmxt_load_reg(cpu_V1, rn);
1152 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1153}
1154
1155#define IWMMXT_OP(name) \
1156static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1157{ \
1158 iwmmxt_load_reg(cpu_V1, rn); \
1159 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1160}
1161
1162#define IWMMXT_OP_ENV(name) \
1163static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1164{ \
1165 iwmmxt_load_reg(cpu_V1, rn); \
1166 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1167}
1168
1169#define IWMMXT_OP_ENV_SIZE(name) \
1170IWMMXT_OP_ENV(name##b) \
1171IWMMXT_OP_ENV(name##w) \
1172IWMMXT_OP_ENV(name##l)
1173
1174#define IWMMXT_OP_ENV1(name) \
1175static inline void gen_op_iwmmxt_##name##_M0(void) \
1176{ \
1177 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1178}
1179
1180IWMMXT_OP(maddsq)
1181IWMMXT_OP(madduq)
1182IWMMXT_OP(sadb)
1183IWMMXT_OP(sadw)
1184IWMMXT_OP(mulslw)
1185IWMMXT_OP(mulshw)
1186IWMMXT_OP(mululw)
1187IWMMXT_OP(muluhw)
1188IWMMXT_OP(macsw)
1189IWMMXT_OP(macuw)
1190
1191IWMMXT_OP_ENV_SIZE(unpackl)
1192IWMMXT_OP_ENV_SIZE(unpackh)
1193
1194IWMMXT_OP_ENV1(unpacklub)
1195IWMMXT_OP_ENV1(unpackluw)
1196IWMMXT_OP_ENV1(unpacklul)
1197IWMMXT_OP_ENV1(unpackhub)
1198IWMMXT_OP_ENV1(unpackhuw)
1199IWMMXT_OP_ENV1(unpackhul)
1200IWMMXT_OP_ENV1(unpacklsb)
1201IWMMXT_OP_ENV1(unpacklsw)
1202IWMMXT_OP_ENV1(unpacklsl)
1203IWMMXT_OP_ENV1(unpackhsb)
1204IWMMXT_OP_ENV1(unpackhsw)
1205IWMMXT_OP_ENV1(unpackhsl)
1206
1207IWMMXT_OP_ENV_SIZE(cmpeq)
1208IWMMXT_OP_ENV_SIZE(cmpgtu)
1209IWMMXT_OP_ENV_SIZE(cmpgts)
1210
1211IWMMXT_OP_ENV_SIZE(mins)
1212IWMMXT_OP_ENV_SIZE(minu)
1213IWMMXT_OP_ENV_SIZE(maxs)
1214IWMMXT_OP_ENV_SIZE(maxu)
1215
1216IWMMXT_OP_ENV_SIZE(subn)
1217IWMMXT_OP_ENV_SIZE(addn)
1218IWMMXT_OP_ENV_SIZE(subu)
1219IWMMXT_OP_ENV_SIZE(addu)
1220IWMMXT_OP_ENV_SIZE(subs)
1221IWMMXT_OP_ENV_SIZE(adds)
1222
1223IWMMXT_OP_ENV(avgb0)
1224IWMMXT_OP_ENV(avgb1)
1225IWMMXT_OP_ENV(avgw0)
1226IWMMXT_OP_ENV(avgw1)
1227
1228IWMMXT_OP(msadb)
1229
1230IWMMXT_OP_ENV(packuw)
1231IWMMXT_OP_ENV(packul)
1232IWMMXT_OP_ENV(packuq)
1233IWMMXT_OP_ENV(packsw)
1234IWMMXT_OP_ENV(packsl)
1235IWMMXT_OP_ENV(packsq)
1236
e677137d
PB
1237static void gen_op_iwmmxt_set_mup(void)
1238{
1239 TCGv tmp;
1240 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1241 tcg_gen_ori_i32(tmp, tmp, 2);
1242 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1243}
1244
1245static void gen_op_iwmmxt_set_cup(void)
1246{
1247 TCGv tmp;
1248 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1249 tcg_gen_ori_i32(tmp, tmp, 1);
1250 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1251}
1252
1253static void gen_op_iwmmxt_setpsr_nz(void)
1254{
7d1b0095 1255 TCGv tmp = tcg_temp_new_i32();
e677137d
PB
1256 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1257 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1258}
1259
1260static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1261{
1262 iwmmxt_load_reg(cpu_V1, rn);
86831435 1263 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1264 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1265}
1266
da6b5335 1267static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
18c9b560
AZ
1268{
1269 int rd;
1270 uint32_t offset;
da6b5335 1271 TCGv tmp;
18c9b560
AZ
1272
1273 rd = (insn >> 16) & 0xf;
da6b5335 1274 tmp = load_reg(s, rd);
18c9b560
AZ
1275
1276 offset = (insn & 0xff) << ((insn >> 7) & 2);
1277 if (insn & (1 << 24)) {
1278 /* Pre indexed */
1279 if (insn & (1 << 23))
da6b5335 1280 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1281 else
da6b5335
FN
1282 tcg_gen_addi_i32(tmp, tmp, -offset);
1283 tcg_gen_mov_i32(dest, tmp);
18c9b560 1284 if (insn & (1 << 21))
da6b5335
FN
1285 store_reg(s, rd, tmp);
1286 else
7d1b0095 1287 tcg_temp_free_i32(tmp);
18c9b560
AZ
1288 } else if (insn & (1 << 21)) {
1289 /* Post indexed */
da6b5335 1290 tcg_gen_mov_i32(dest, tmp);
18c9b560 1291 if (insn & (1 << 23))
da6b5335 1292 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1293 else
da6b5335
FN
1294 tcg_gen_addi_i32(tmp, tmp, -offset);
1295 store_reg(s, rd, tmp);
18c9b560
AZ
1296 } else if (!(insn & (1 << 23)))
1297 return 1;
1298 return 0;
1299}
1300
da6b5335 1301static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
18c9b560
AZ
1302{
1303 int rd = (insn >> 0) & 0xf;
da6b5335 1304 TCGv tmp;
18c9b560 1305
da6b5335
FN
1306 if (insn & (1 << 8)) {
1307 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1308 return 1;
da6b5335
FN
1309 } else {
1310 tmp = iwmmxt_load_creg(rd);
1311 }
1312 } else {
7d1b0095 1313 tmp = tcg_temp_new_i32();
da6b5335
FN
1314 iwmmxt_load_reg(cpu_V0, rd);
1315 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1316 }
1317 tcg_gen_andi_i32(tmp, tmp, mask);
1318 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1319 tcg_temp_free_i32(tmp);
18c9b560
AZ
1320 return 0;
1321}
1322
1323/* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1324 (ie. an undefined instruction). */
1325static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1326{
1327 int rd, wrd;
1328 int rdhi, rdlo, rd0, rd1, i;
da6b5335
FN
1329 TCGv addr;
1330 TCGv tmp, tmp2, tmp3;
18c9b560
AZ
1331
1332 if ((insn & 0x0e000e00) == 0x0c000000) {
1333 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1334 wrd = insn & 0xf;
1335 rdlo = (insn >> 12) & 0xf;
1336 rdhi = (insn >> 16) & 0xf;
1337 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1338 iwmmxt_load_reg(cpu_V0, wrd);
1339 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1340 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1341 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1342 } else { /* TMCRR */
da6b5335
FN
1343 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1344 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1345 gen_op_iwmmxt_set_mup();
1346 }
1347 return 0;
1348 }
1349
1350 wrd = (insn >> 12) & 0xf;
7d1b0095 1351 addr = tcg_temp_new_i32();
da6b5335 1352 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1353 tcg_temp_free_i32(addr);
18c9b560 1354 return 1;
da6b5335 1355 }
18c9b560
AZ
1356 if (insn & ARM_CP_RW_BIT) {
1357 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1358 tmp = tcg_temp_new_i32();
da6b5335
FN
1359 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1360 iwmmxt_store_creg(wrd, tmp);
18c9b560 1361 } else {
e677137d
PB
1362 i = 1;
1363 if (insn & (1 << 8)) {
1364 if (insn & (1 << 22)) { /* WLDRD */
da6b5335 1365 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1366 i = 0;
1367 } else { /* WLDRW wRd */
da6b5335 1368 tmp = gen_ld32(addr, IS_USER(s));
e677137d
PB
1369 }
1370 } else {
1371 if (insn & (1 << 22)) { /* WLDRH */
da6b5335 1372 tmp = gen_ld16u(addr, IS_USER(s));
e677137d 1373 } else { /* WLDRB */
da6b5335 1374 tmp = gen_ld8u(addr, IS_USER(s));
e677137d
PB
1375 }
1376 }
1377 if (i) {
1378 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1379 tcg_temp_free_i32(tmp);
e677137d 1380 }
18c9b560
AZ
1381 gen_op_iwmmxt_movq_wRn_M0(wrd);
1382 }
1383 } else {
1384 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335
FN
1385 tmp = iwmmxt_load_creg(wrd);
1386 gen_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1387 } else {
1388 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1389 tmp = tcg_temp_new_i32();
e677137d
PB
1390 if (insn & (1 << 8)) {
1391 if (insn & (1 << 22)) { /* WSTRD */
7d1b0095 1392 tcg_temp_free_i32(tmp);
da6b5335 1393 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1394 } else { /* WSTRW wRd */
1395 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1396 gen_st32(tmp, addr, IS_USER(s));
e677137d
PB
1397 }
1398 } else {
1399 if (insn & (1 << 22)) { /* WSTRH */
1400 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1401 gen_st16(tmp, addr, IS_USER(s));
e677137d
PB
1402 } else { /* WSTRB */
1403 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1404 gen_st8(tmp, addr, IS_USER(s));
e677137d
PB
1405 }
1406 }
18c9b560
AZ
1407 }
1408 }
7d1b0095 1409 tcg_temp_free_i32(addr);
18c9b560
AZ
1410 return 0;
1411 }
1412
1413 if ((insn & 0x0f000000) != 0x0e000000)
1414 return 1;
1415
1416 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1417 case 0x000: /* WOR */
1418 wrd = (insn >> 12) & 0xf;
1419 rd0 = (insn >> 0) & 0xf;
1420 rd1 = (insn >> 16) & 0xf;
1421 gen_op_iwmmxt_movq_M0_wRn(rd0);
1422 gen_op_iwmmxt_orq_M0_wRn(rd1);
1423 gen_op_iwmmxt_setpsr_nz();
1424 gen_op_iwmmxt_movq_wRn_M0(wrd);
1425 gen_op_iwmmxt_set_mup();
1426 gen_op_iwmmxt_set_cup();
1427 break;
1428 case 0x011: /* TMCR */
1429 if (insn & 0xf)
1430 return 1;
1431 rd = (insn >> 12) & 0xf;
1432 wrd = (insn >> 16) & 0xf;
1433 switch (wrd) {
1434 case ARM_IWMMXT_wCID:
1435 case ARM_IWMMXT_wCASF:
1436 break;
1437 case ARM_IWMMXT_wCon:
1438 gen_op_iwmmxt_set_cup();
1439 /* Fall through. */
1440 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1441 tmp = iwmmxt_load_creg(wrd);
1442 tmp2 = load_reg(s, rd);
f669df27 1443 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1444 tcg_temp_free_i32(tmp2);
da6b5335 1445 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1446 break;
1447 case ARM_IWMMXT_wCGR0:
1448 case ARM_IWMMXT_wCGR1:
1449 case ARM_IWMMXT_wCGR2:
1450 case ARM_IWMMXT_wCGR3:
1451 gen_op_iwmmxt_set_cup();
da6b5335
FN
1452 tmp = load_reg(s, rd);
1453 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1454 break;
1455 default:
1456 return 1;
1457 }
1458 break;
1459 case 0x100: /* WXOR */
1460 wrd = (insn >> 12) & 0xf;
1461 rd0 = (insn >> 0) & 0xf;
1462 rd1 = (insn >> 16) & 0xf;
1463 gen_op_iwmmxt_movq_M0_wRn(rd0);
1464 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1465 gen_op_iwmmxt_setpsr_nz();
1466 gen_op_iwmmxt_movq_wRn_M0(wrd);
1467 gen_op_iwmmxt_set_mup();
1468 gen_op_iwmmxt_set_cup();
1469 break;
1470 case 0x111: /* TMRC */
1471 if (insn & 0xf)
1472 return 1;
1473 rd = (insn >> 12) & 0xf;
1474 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1475 tmp = iwmmxt_load_creg(wrd);
1476 store_reg(s, rd, tmp);
18c9b560
AZ
1477 break;
1478 case 0x300: /* WANDN */
1479 wrd = (insn >> 12) & 0xf;
1480 rd0 = (insn >> 0) & 0xf;
1481 rd1 = (insn >> 16) & 0xf;
1482 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1483 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1484 gen_op_iwmmxt_andq_M0_wRn(rd1);
1485 gen_op_iwmmxt_setpsr_nz();
1486 gen_op_iwmmxt_movq_wRn_M0(wrd);
1487 gen_op_iwmmxt_set_mup();
1488 gen_op_iwmmxt_set_cup();
1489 break;
1490 case 0x200: /* WAND */
1491 wrd = (insn >> 12) & 0xf;
1492 rd0 = (insn >> 0) & 0xf;
1493 rd1 = (insn >> 16) & 0xf;
1494 gen_op_iwmmxt_movq_M0_wRn(rd0);
1495 gen_op_iwmmxt_andq_M0_wRn(rd1);
1496 gen_op_iwmmxt_setpsr_nz();
1497 gen_op_iwmmxt_movq_wRn_M0(wrd);
1498 gen_op_iwmmxt_set_mup();
1499 gen_op_iwmmxt_set_cup();
1500 break;
1501 case 0x810: case 0xa10: /* WMADD */
1502 wrd = (insn >> 12) & 0xf;
1503 rd0 = (insn >> 0) & 0xf;
1504 rd1 = (insn >> 16) & 0xf;
1505 gen_op_iwmmxt_movq_M0_wRn(rd0);
1506 if (insn & (1 << 21))
1507 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1508 else
1509 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1510 gen_op_iwmmxt_movq_wRn_M0(wrd);
1511 gen_op_iwmmxt_set_mup();
1512 break;
1513 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1514 wrd = (insn >> 12) & 0xf;
1515 rd0 = (insn >> 16) & 0xf;
1516 rd1 = (insn >> 0) & 0xf;
1517 gen_op_iwmmxt_movq_M0_wRn(rd0);
1518 switch ((insn >> 22) & 3) {
1519 case 0:
1520 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1521 break;
1522 case 1:
1523 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1524 break;
1525 case 2:
1526 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1527 break;
1528 case 3:
1529 return 1;
1530 }
1531 gen_op_iwmmxt_movq_wRn_M0(wrd);
1532 gen_op_iwmmxt_set_mup();
1533 gen_op_iwmmxt_set_cup();
1534 break;
1535 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1536 wrd = (insn >> 12) & 0xf;
1537 rd0 = (insn >> 16) & 0xf;
1538 rd1 = (insn >> 0) & 0xf;
1539 gen_op_iwmmxt_movq_M0_wRn(rd0);
1540 switch ((insn >> 22) & 3) {
1541 case 0:
1542 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1543 break;
1544 case 1:
1545 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1546 break;
1547 case 2:
1548 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1549 break;
1550 case 3:
1551 return 1;
1552 }
1553 gen_op_iwmmxt_movq_wRn_M0(wrd);
1554 gen_op_iwmmxt_set_mup();
1555 gen_op_iwmmxt_set_cup();
1556 break;
1557 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1558 wrd = (insn >> 12) & 0xf;
1559 rd0 = (insn >> 16) & 0xf;
1560 rd1 = (insn >> 0) & 0xf;
1561 gen_op_iwmmxt_movq_M0_wRn(rd0);
1562 if (insn & (1 << 22))
1563 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1564 else
1565 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1566 if (!(insn & (1 << 20)))
1567 gen_op_iwmmxt_addl_M0_wRn(wrd);
1568 gen_op_iwmmxt_movq_wRn_M0(wrd);
1569 gen_op_iwmmxt_set_mup();
1570 break;
1571 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1572 wrd = (insn >> 12) & 0xf;
1573 rd0 = (insn >> 16) & 0xf;
1574 rd1 = (insn >> 0) & 0xf;
1575 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1576 if (insn & (1 << 21)) {
1577 if (insn & (1 << 20))
1578 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1579 else
1580 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1581 } else {
1582 if (insn & (1 << 20))
1583 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1584 else
1585 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1586 }
18c9b560
AZ
1587 gen_op_iwmmxt_movq_wRn_M0(wrd);
1588 gen_op_iwmmxt_set_mup();
1589 break;
1590 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1591 wrd = (insn >> 12) & 0xf;
1592 rd0 = (insn >> 16) & 0xf;
1593 rd1 = (insn >> 0) & 0xf;
1594 gen_op_iwmmxt_movq_M0_wRn(rd0);
1595 if (insn & (1 << 21))
1596 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1597 else
1598 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1599 if (!(insn & (1 << 20))) {
e677137d
PB
1600 iwmmxt_load_reg(cpu_V1, wrd);
1601 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1602 }
1603 gen_op_iwmmxt_movq_wRn_M0(wrd);
1604 gen_op_iwmmxt_set_mup();
1605 break;
1606 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1607 wrd = (insn >> 12) & 0xf;
1608 rd0 = (insn >> 16) & 0xf;
1609 rd1 = (insn >> 0) & 0xf;
1610 gen_op_iwmmxt_movq_M0_wRn(rd0);
1611 switch ((insn >> 22) & 3) {
1612 case 0:
1613 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1614 break;
1615 case 1:
1616 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1617 break;
1618 case 2:
1619 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1620 break;
1621 case 3:
1622 return 1;
1623 }
1624 gen_op_iwmmxt_movq_wRn_M0(wrd);
1625 gen_op_iwmmxt_set_mup();
1626 gen_op_iwmmxt_set_cup();
1627 break;
1628 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1629 wrd = (insn >> 12) & 0xf;
1630 rd0 = (insn >> 16) & 0xf;
1631 rd1 = (insn >> 0) & 0xf;
1632 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1633 if (insn & (1 << 22)) {
1634 if (insn & (1 << 20))
1635 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1636 else
1637 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1638 } else {
1639 if (insn & (1 << 20))
1640 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1641 else
1642 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1643 }
18c9b560
AZ
1644 gen_op_iwmmxt_movq_wRn_M0(wrd);
1645 gen_op_iwmmxt_set_mup();
1646 gen_op_iwmmxt_set_cup();
1647 break;
1648 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1649 wrd = (insn >> 12) & 0xf;
1650 rd0 = (insn >> 16) & 0xf;
1651 rd1 = (insn >> 0) & 0xf;
1652 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1653 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1654 tcg_gen_andi_i32(tmp, tmp, 7);
1655 iwmmxt_load_reg(cpu_V1, rd1);
1656 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1657 tcg_temp_free_i32(tmp);
18c9b560
AZ
1658 gen_op_iwmmxt_movq_wRn_M0(wrd);
1659 gen_op_iwmmxt_set_mup();
1660 break;
1661 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1662 if (((insn >> 6) & 3) == 3)
1663 return 1;
18c9b560
AZ
1664 rd = (insn >> 12) & 0xf;
1665 wrd = (insn >> 16) & 0xf;
da6b5335 1666 tmp = load_reg(s, rd);
18c9b560
AZ
1667 gen_op_iwmmxt_movq_M0_wRn(wrd);
1668 switch ((insn >> 6) & 3) {
1669 case 0:
da6b5335
FN
1670 tmp2 = tcg_const_i32(0xff);
1671 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1672 break;
1673 case 1:
da6b5335
FN
1674 tmp2 = tcg_const_i32(0xffff);
1675 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1676 break;
1677 case 2:
da6b5335
FN
1678 tmp2 = tcg_const_i32(0xffffffff);
1679 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1680 break;
da6b5335
FN
1681 default:
1682 TCGV_UNUSED(tmp2);
1683 TCGV_UNUSED(tmp3);
18c9b560 1684 }
da6b5335
FN
1685 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1686 tcg_temp_free(tmp3);
1687 tcg_temp_free(tmp2);
7d1b0095 1688 tcg_temp_free_i32(tmp);
18c9b560
AZ
1689 gen_op_iwmmxt_movq_wRn_M0(wrd);
1690 gen_op_iwmmxt_set_mup();
1691 break;
1692 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1693 rd = (insn >> 12) & 0xf;
1694 wrd = (insn >> 16) & 0xf;
da6b5335 1695 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1696 return 1;
1697 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1698 tmp = tcg_temp_new_i32();
18c9b560
AZ
1699 switch ((insn >> 22) & 3) {
1700 case 0:
da6b5335
FN
1701 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1702 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1703 if (insn & 8) {
1704 tcg_gen_ext8s_i32(tmp, tmp);
1705 } else {
1706 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1707 }
1708 break;
1709 case 1:
da6b5335
FN
1710 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1711 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1712 if (insn & 8) {
1713 tcg_gen_ext16s_i32(tmp, tmp);
1714 } else {
1715 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1716 }
1717 break;
1718 case 2:
da6b5335
FN
1719 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1720 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1721 break;
18c9b560 1722 }
da6b5335 1723 store_reg(s, rd, tmp);
18c9b560
AZ
1724 break;
1725 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1726 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1727 return 1;
da6b5335 1728 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1729 switch ((insn >> 22) & 3) {
1730 case 0:
da6b5335 1731 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1732 break;
1733 case 1:
da6b5335 1734 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1735 break;
1736 case 2:
da6b5335 1737 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1738 break;
18c9b560 1739 }
da6b5335
FN
1740 tcg_gen_shli_i32(tmp, tmp, 28);
1741 gen_set_nzcv(tmp);
7d1b0095 1742 tcg_temp_free_i32(tmp);
18c9b560
AZ
1743 break;
1744 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1745 if (((insn >> 6) & 3) == 3)
1746 return 1;
18c9b560
AZ
1747 rd = (insn >> 12) & 0xf;
1748 wrd = (insn >> 16) & 0xf;
da6b5335 1749 tmp = load_reg(s, rd);
18c9b560
AZ
1750 switch ((insn >> 6) & 3) {
1751 case 0:
da6b5335 1752 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1753 break;
1754 case 1:
da6b5335 1755 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1756 break;
1757 case 2:
da6b5335 1758 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1759 break;
18c9b560 1760 }
7d1b0095 1761 tcg_temp_free_i32(tmp);
18c9b560
AZ
1762 gen_op_iwmmxt_movq_wRn_M0(wrd);
1763 gen_op_iwmmxt_set_mup();
1764 break;
1765 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1766 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1767 return 1;
da6b5335 1768 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1769 tmp2 = tcg_temp_new_i32();
da6b5335 1770 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1771 switch ((insn >> 22) & 3) {
1772 case 0:
1773 for (i = 0; i < 7; i ++) {
da6b5335
FN
1774 tcg_gen_shli_i32(tmp2, tmp2, 4);
1775 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1776 }
1777 break;
1778 case 1:
1779 for (i = 0; i < 3; i ++) {
da6b5335
FN
1780 tcg_gen_shli_i32(tmp2, tmp2, 8);
1781 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1782 }
1783 break;
1784 case 2:
da6b5335
FN
1785 tcg_gen_shli_i32(tmp2, tmp2, 16);
1786 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1787 break;
18c9b560 1788 }
da6b5335 1789 gen_set_nzcv(tmp);
7d1b0095
PM
1790 tcg_temp_free_i32(tmp2);
1791 tcg_temp_free_i32(tmp);
18c9b560
AZ
1792 break;
1793 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1794 wrd = (insn >> 12) & 0xf;
1795 rd0 = (insn >> 16) & 0xf;
1796 gen_op_iwmmxt_movq_M0_wRn(rd0);
1797 switch ((insn >> 22) & 3) {
1798 case 0:
e677137d 1799 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1800 break;
1801 case 1:
e677137d 1802 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1803 break;
1804 case 2:
e677137d 1805 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1806 break;
1807 case 3:
1808 return 1;
1809 }
1810 gen_op_iwmmxt_movq_wRn_M0(wrd);
1811 gen_op_iwmmxt_set_mup();
1812 break;
1813 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1814 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1815 return 1;
da6b5335 1816 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1817 tmp2 = tcg_temp_new_i32();
da6b5335 1818 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1819 switch ((insn >> 22) & 3) {
1820 case 0:
1821 for (i = 0; i < 7; i ++) {
da6b5335
FN
1822 tcg_gen_shli_i32(tmp2, tmp2, 4);
1823 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1824 }
1825 break;
1826 case 1:
1827 for (i = 0; i < 3; i ++) {
da6b5335
FN
1828 tcg_gen_shli_i32(tmp2, tmp2, 8);
1829 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1830 }
1831 break;
1832 case 2:
da6b5335
FN
1833 tcg_gen_shli_i32(tmp2, tmp2, 16);
1834 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1835 break;
18c9b560 1836 }
da6b5335 1837 gen_set_nzcv(tmp);
7d1b0095
PM
1838 tcg_temp_free_i32(tmp2);
1839 tcg_temp_free_i32(tmp);
18c9b560
AZ
1840 break;
1841 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1842 rd = (insn >> 12) & 0xf;
1843 rd0 = (insn >> 16) & 0xf;
da6b5335 1844 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1845 return 1;
1846 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 1847 tmp = tcg_temp_new_i32();
18c9b560
AZ
1848 switch ((insn >> 22) & 3) {
1849 case 0:
da6b5335 1850 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1851 break;
1852 case 1:
da6b5335 1853 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1854 break;
1855 case 2:
da6b5335 1856 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1857 break;
18c9b560 1858 }
da6b5335 1859 store_reg(s, rd, tmp);
18c9b560
AZ
1860 break;
1861 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1862 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1863 wrd = (insn >> 12) & 0xf;
1864 rd0 = (insn >> 16) & 0xf;
1865 rd1 = (insn >> 0) & 0xf;
1866 gen_op_iwmmxt_movq_M0_wRn(rd0);
1867 switch ((insn >> 22) & 3) {
1868 case 0:
1869 if (insn & (1 << 21))
1870 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1871 else
1872 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1873 break;
1874 case 1:
1875 if (insn & (1 << 21))
1876 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1877 else
1878 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1879 break;
1880 case 2:
1881 if (insn & (1 << 21))
1882 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1883 else
1884 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1885 break;
1886 case 3:
1887 return 1;
1888 }
1889 gen_op_iwmmxt_movq_wRn_M0(wrd);
1890 gen_op_iwmmxt_set_mup();
1891 gen_op_iwmmxt_set_cup();
1892 break;
1893 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1894 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1895 wrd = (insn >> 12) & 0xf;
1896 rd0 = (insn >> 16) & 0xf;
1897 gen_op_iwmmxt_movq_M0_wRn(rd0);
1898 switch ((insn >> 22) & 3) {
1899 case 0:
1900 if (insn & (1 << 21))
1901 gen_op_iwmmxt_unpacklsb_M0();
1902 else
1903 gen_op_iwmmxt_unpacklub_M0();
1904 break;
1905 case 1:
1906 if (insn & (1 << 21))
1907 gen_op_iwmmxt_unpacklsw_M0();
1908 else
1909 gen_op_iwmmxt_unpackluw_M0();
1910 break;
1911 case 2:
1912 if (insn & (1 << 21))
1913 gen_op_iwmmxt_unpacklsl_M0();
1914 else
1915 gen_op_iwmmxt_unpacklul_M0();
1916 break;
1917 case 3:
1918 return 1;
1919 }
1920 gen_op_iwmmxt_movq_wRn_M0(wrd);
1921 gen_op_iwmmxt_set_mup();
1922 gen_op_iwmmxt_set_cup();
1923 break;
1924 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1925 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1926 wrd = (insn >> 12) & 0xf;
1927 rd0 = (insn >> 16) & 0xf;
1928 gen_op_iwmmxt_movq_M0_wRn(rd0);
1929 switch ((insn >> 22) & 3) {
1930 case 0:
1931 if (insn & (1 << 21))
1932 gen_op_iwmmxt_unpackhsb_M0();
1933 else
1934 gen_op_iwmmxt_unpackhub_M0();
1935 break;
1936 case 1:
1937 if (insn & (1 << 21))
1938 gen_op_iwmmxt_unpackhsw_M0();
1939 else
1940 gen_op_iwmmxt_unpackhuw_M0();
1941 break;
1942 case 2:
1943 if (insn & (1 << 21))
1944 gen_op_iwmmxt_unpackhsl_M0();
1945 else
1946 gen_op_iwmmxt_unpackhul_M0();
1947 break;
1948 case 3:
1949 return 1;
1950 }
1951 gen_op_iwmmxt_movq_wRn_M0(wrd);
1952 gen_op_iwmmxt_set_mup();
1953 gen_op_iwmmxt_set_cup();
1954 break;
1955 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1956 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
1957 if (((insn >> 22) & 3) == 0)
1958 return 1;
18c9b560
AZ
1959 wrd = (insn >> 12) & 0xf;
1960 rd0 = (insn >> 16) & 0xf;
1961 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 1962 tmp = tcg_temp_new_i32();
da6b5335 1963 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 1964 tcg_temp_free_i32(tmp);
18c9b560 1965 return 1;
da6b5335 1966 }
18c9b560 1967 switch ((insn >> 22) & 3) {
18c9b560 1968 case 1:
da6b5335 1969 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1970 break;
1971 case 2:
da6b5335 1972 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1973 break;
1974 case 3:
da6b5335 1975 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1976 break;
1977 }
7d1b0095 1978 tcg_temp_free_i32(tmp);
18c9b560
AZ
1979 gen_op_iwmmxt_movq_wRn_M0(wrd);
1980 gen_op_iwmmxt_set_mup();
1981 gen_op_iwmmxt_set_cup();
1982 break;
1983 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1984 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
1985 if (((insn >> 22) & 3) == 0)
1986 return 1;
18c9b560
AZ
1987 wrd = (insn >> 12) & 0xf;
1988 rd0 = (insn >> 16) & 0xf;
1989 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 1990 tmp = tcg_temp_new_i32();
da6b5335 1991 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 1992 tcg_temp_free_i32(tmp);
18c9b560 1993 return 1;
da6b5335 1994 }
18c9b560 1995 switch ((insn >> 22) & 3) {
18c9b560 1996 case 1:
da6b5335 1997 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1998 break;
1999 case 2:
da6b5335 2000 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2001 break;
2002 case 3:
da6b5335 2003 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2004 break;
2005 }
7d1b0095 2006 tcg_temp_free_i32(tmp);
18c9b560
AZ
2007 gen_op_iwmmxt_movq_wRn_M0(wrd);
2008 gen_op_iwmmxt_set_mup();
2009 gen_op_iwmmxt_set_cup();
2010 break;
2011 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2012 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2013 if (((insn >> 22) & 3) == 0)
2014 return 1;
18c9b560
AZ
2015 wrd = (insn >> 12) & 0xf;
2016 rd0 = (insn >> 16) & 0xf;
2017 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2018 tmp = tcg_temp_new_i32();
da6b5335 2019 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2020 tcg_temp_free_i32(tmp);
18c9b560 2021 return 1;
da6b5335 2022 }
18c9b560 2023 switch ((insn >> 22) & 3) {
18c9b560 2024 case 1:
da6b5335 2025 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2026 break;
2027 case 2:
da6b5335 2028 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2029 break;
2030 case 3:
da6b5335 2031 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2032 break;
2033 }
7d1b0095 2034 tcg_temp_free_i32(tmp);
18c9b560
AZ
2035 gen_op_iwmmxt_movq_wRn_M0(wrd);
2036 gen_op_iwmmxt_set_mup();
2037 gen_op_iwmmxt_set_cup();
2038 break;
2039 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2040 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2041 if (((insn >> 22) & 3) == 0)
2042 return 1;
18c9b560
AZ
2043 wrd = (insn >> 12) & 0xf;
2044 rd0 = (insn >> 16) & 0xf;
2045 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2046 tmp = tcg_temp_new_i32();
18c9b560 2047 switch ((insn >> 22) & 3) {
18c9b560 2048 case 1:
da6b5335 2049 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2050 tcg_temp_free_i32(tmp);
18c9b560 2051 return 1;
da6b5335
FN
2052 }
2053 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2054 break;
2055 case 2:
da6b5335 2056 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2057 tcg_temp_free_i32(tmp);
18c9b560 2058 return 1;
da6b5335
FN
2059 }
2060 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2061 break;
2062 case 3:
da6b5335 2063 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2064 tcg_temp_free_i32(tmp);
18c9b560 2065 return 1;
da6b5335
FN
2066 }
2067 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2068 break;
2069 }
7d1b0095 2070 tcg_temp_free_i32(tmp);
18c9b560
AZ
2071 gen_op_iwmmxt_movq_wRn_M0(wrd);
2072 gen_op_iwmmxt_set_mup();
2073 gen_op_iwmmxt_set_cup();
2074 break;
2075 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2076 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2077 wrd = (insn >> 12) & 0xf;
2078 rd0 = (insn >> 16) & 0xf;
2079 rd1 = (insn >> 0) & 0xf;
2080 gen_op_iwmmxt_movq_M0_wRn(rd0);
2081 switch ((insn >> 22) & 3) {
2082 case 0:
2083 if (insn & (1 << 21))
2084 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2085 else
2086 gen_op_iwmmxt_minub_M0_wRn(rd1);
2087 break;
2088 case 1:
2089 if (insn & (1 << 21))
2090 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2091 else
2092 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2093 break;
2094 case 2:
2095 if (insn & (1 << 21))
2096 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2097 else
2098 gen_op_iwmmxt_minul_M0_wRn(rd1);
2099 break;
2100 case 3:
2101 return 1;
2102 }
2103 gen_op_iwmmxt_movq_wRn_M0(wrd);
2104 gen_op_iwmmxt_set_mup();
2105 break;
2106 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2107 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2108 wrd = (insn >> 12) & 0xf;
2109 rd0 = (insn >> 16) & 0xf;
2110 rd1 = (insn >> 0) & 0xf;
2111 gen_op_iwmmxt_movq_M0_wRn(rd0);
2112 switch ((insn >> 22) & 3) {
2113 case 0:
2114 if (insn & (1 << 21))
2115 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2116 else
2117 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2118 break;
2119 case 1:
2120 if (insn & (1 << 21))
2121 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2122 else
2123 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2124 break;
2125 case 2:
2126 if (insn & (1 << 21))
2127 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2128 else
2129 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2130 break;
2131 case 3:
2132 return 1;
2133 }
2134 gen_op_iwmmxt_movq_wRn_M0(wrd);
2135 gen_op_iwmmxt_set_mup();
2136 break;
2137 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2138 case 0x402: case 0x502: case 0x602: case 0x702:
2139 wrd = (insn >> 12) & 0xf;
2140 rd0 = (insn >> 16) & 0xf;
2141 rd1 = (insn >> 0) & 0xf;
2142 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2143 tmp = tcg_const_i32((insn >> 20) & 3);
2144 iwmmxt_load_reg(cpu_V1, rd1);
2145 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2146 tcg_temp_free(tmp);
18c9b560
AZ
2147 gen_op_iwmmxt_movq_wRn_M0(wrd);
2148 gen_op_iwmmxt_set_mup();
2149 break;
2150 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2151 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2152 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2153 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2154 wrd = (insn >> 12) & 0xf;
2155 rd0 = (insn >> 16) & 0xf;
2156 rd1 = (insn >> 0) & 0xf;
2157 gen_op_iwmmxt_movq_M0_wRn(rd0);
2158 switch ((insn >> 20) & 0xf) {
2159 case 0x0:
2160 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2161 break;
2162 case 0x1:
2163 gen_op_iwmmxt_subub_M0_wRn(rd1);
2164 break;
2165 case 0x3:
2166 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2167 break;
2168 case 0x4:
2169 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2170 break;
2171 case 0x5:
2172 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2173 break;
2174 case 0x7:
2175 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2176 break;
2177 case 0x8:
2178 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2179 break;
2180 case 0x9:
2181 gen_op_iwmmxt_subul_M0_wRn(rd1);
2182 break;
2183 case 0xb:
2184 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2185 break;
2186 default:
2187 return 1;
2188 }
2189 gen_op_iwmmxt_movq_wRn_M0(wrd);
2190 gen_op_iwmmxt_set_mup();
2191 gen_op_iwmmxt_set_cup();
2192 break;
2193 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2194 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2195 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2196 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2197 wrd = (insn >> 12) & 0xf;
2198 rd0 = (insn >> 16) & 0xf;
2199 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2200 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2201 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2202 tcg_temp_free(tmp);
18c9b560
AZ
2203 gen_op_iwmmxt_movq_wRn_M0(wrd);
2204 gen_op_iwmmxt_set_mup();
2205 gen_op_iwmmxt_set_cup();
2206 break;
2207 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2208 case 0x418: case 0x518: case 0x618: case 0x718:
2209 case 0x818: case 0x918: case 0xa18: case 0xb18:
2210 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2211 wrd = (insn >> 12) & 0xf;
2212 rd0 = (insn >> 16) & 0xf;
2213 rd1 = (insn >> 0) & 0xf;
2214 gen_op_iwmmxt_movq_M0_wRn(rd0);
2215 switch ((insn >> 20) & 0xf) {
2216 case 0x0:
2217 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2218 break;
2219 case 0x1:
2220 gen_op_iwmmxt_addub_M0_wRn(rd1);
2221 break;
2222 case 0x3:
2223 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2224 break;
2225 case 0x4:
2226 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2227 break;
2228 case 0x5:
2229 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2230 break;
2231 case 0x7:
2232 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2233 break;
2234 case 0x8:
2235 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2236 break;
2237 case 0x9:
2238 gen_op_iwmmxt_addul_M0_wRn(rd1);
2239 break;
2240 case 0xb:
2241 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2242 break;
2243 default:
2244 return 1;
2245 }
2246 gen_op_iwmmxt_movq_wRn_M0(wrd);
2247 gen_op_iwmmxt_set_mup();
2248 gen_op_iwmmxt_set_cup();
2249 break;
2250 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2251 case 0x408: case 0x508: case 0x608: case 0x708:
2252 case 0x808: case 0x908: case 0xa08: case 0xb08:
2253 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2254 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2255 return 1;
18c9b560
AZ
2256 wrd = (insn >> 12) & 0xf;
2257 rd0 = (insn >> 16) & 0xf;
2258 rd1 = (insn >> 0) & 0xf;
2259 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2260 switch ((insn >> 22) & 3) {
18c9b560
AZ
2261 case 1:
2262 if (insn & (1 << 21))
2263 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2264 else
2265 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2266 break;
2267 case 2:
2268 if (insn & (1 << 21))
2269 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2270 else
2271 gen_op_iwmmxt_packul_M0_wRn(rd1);
2272 break;
2273 case 3:
2274 if (insn & (1 << 21))
2275 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2276 else
2277 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2278 break;
2279 }
2280 gen_op_iwmmxt_movq_wRn_M0(wrd);
2281 gen_op_iwmmxt_set_mup();
2282 gen_op_iwmmxt_set_cup();
2283 break;
2284 case 0x201: case 0x203: case 0x205: case 0x207:
2285 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2286 case 0x211: case 0x213: case 0x215: case 0x217:
2287 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2288 wrd = (insn >> 5) & 0xf;
2289 rd0 = (insn >> 12) & 0xf;
2290 rd1 = (insn >> 0) & 0xf;
2291 if (rd0 == 0xf || rd1 == 0xf)
2292 return 1;
2293 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2294 tmp = load_reg(s, rd0);
2295 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2296 switch ((insn >> 16) & 0xf) {
2297 case 0x0: /* TMIA */
da6b5335 2298 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2299 break;
2300 case 0x8: /* TMIAPH */
da6b5335 2301 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2302 break;
2303 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2304 if (insn & (1 << 16))
da6b5335 2305 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2306 if (insn & (1 << 17))
da6b5335
FN
2307 tcg_gen_shri_i32(tmp2, tmp2, 16);
2308 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2309 break;
2310 default:
7d1b0095
PM
2311 tcg_temp_free_i32(tmp2);
2312 tcg_temp_free_i32(tmp);
18c9b560
AZ
2313 return 1;
2314 }
7d1b0095
PM
2315 tcg_temp_free_i32(tmp2);
2316 tcg_temp_free_i32(tmp);
18c9b560
AZ
2317 gen_op_iwmmxt_movq_wRn_M0(wrd);
2318 gen_op_iwmmxt_set_mup();
2319 break;
2320 default:
2321 return 1;
2322 }
2323
2324 return 0;
2325}
2326
2327/* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2328 (ie. an undefined instruction). */
2329static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2330{
2331 int acc, rd0, rd1, rdhi, rdlo;
3a554c0f 2332 TCGv tmp, tmp2;
18c9b560
AZ
2333
2334 if ((insn & 0x0ff00f10) == 0x0e200010) {
2335 /* Multiply with Internal Accumulate Format */
2336 rd0 = (insn >> 12) & 0xf;
2337 rd1 = insn & 0xf;
2338 acc = (insn >> 5) & 7;
2339
2340 if (acc != 0)
2341 return 1;
2342
3a554c0f
FN
2343 tmp = load_reg(s, rd0);
2344 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2345 switch ((insn >> 16) & 0xf) {
2346 case 0x0: /* MIA */
3a554c0f 2347 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2348 break;
2349 case 0x8: /* MIAPH */
3a554c0f 2350 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2351 break;
2352 case 0xc: /* MIABB */
2353 case 0xd: /* MIABT */
2354 case 0xe: /* MIATB */
2355 case 0xf: /* MIATT */
18c9b560 2356 if (insn & (1 << 16))
3a554c0f 2357 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2358 if (insn & (1 << 17))
3a554c0f
FN
2359 tcg_gen_shri_i32(tmp2, tmp2, 16);
2360 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2361 break;
2362 default:
2363 return 1;
2364 }
7d1b0095
PM
2365 tcg_temp_free_i32(tmp2);
2366 tcg_temp_free_i32(tmp);
18c9b560
AZ
2367
2368 gen_op_iwmmxt_movq_wRn_M0(acc);
2369 return 0;
2370 }
2371
2372 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2373 /* Internal Accumulator Access Format */
2374 rdhi = (insn >> 16) & 0xf;
2375 rdlo = (insn >> 12) & 0xf;
2376 acc = insn & 7;
2377
2378 if (acc != 0)
2379 return 1;
2380
2381 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2382 iwmmxt_load_reg(cpu_V0, acc);
2383 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2384 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2385 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2386 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2387 } else { /* MAR */
3a554c0f
FN
2388 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2389 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2390 }
2391 return 0;
2392 }
2393
2394 return 1;
2395}
2396
c1713132
AZ
2397/* Disassemble system coprocessor instruction. Return nonzero if
2398 instruction is not defined. */
2399static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2400{
b75263d6 2401 TCGv tmp, tmp2;
c1713132
AZ
2402 uint32_t rd = (insn >> 12) & 0xf;
2403 uint32_t cp = (insn >> 8) & 0xf;
2404 if (IS_USER(s)) {
2405 return 1;
2406 }
2407
18c9b560 2408 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2409 if (!env->cp[cp].cp_read)
2410 return 1;
8984bd2e 2411 gen_set_pc_im(s->pc);
7d1b0095 2412 tmp = tcg_temp_new_i32();
b75263d6
JR
2413 tmp2 = tcg_const_i32(insn);
2414 gen_helper_get_cp(tmp, cpu_env, tmp2);
2415 tcg_temp_free(tmp2);
8984bd2e 2416 store_reg(s, rd, tmp);
c1713132
AZ
2417 } else {
2418 if (!env->cp[cp].cp_write)
2419 return 1;
8984bd2e
PB
2420 gen_set_pc_im(s->pc);
2421 tmp = load_reg(s, rd);
b75263d6
JR
2422 tmp2 = tcg_const_i32(insn);
2423 gen_helper_set_cp(cpu_env, tmp2, tmp);
2424 tcg_temp_free(tmp2);
7d1b0095 2425 tcg_temp_free_i32(tmp);
c1713132
AZ
2426 }
2427 return 0;
2428}
2429
9ee6e8bb
PB
2430static int cp15_user_ok(uint32_t insn)
2431{
2432 int cpn = (insn >> 16) & 0xf;
2433 int cpm = insn & 0xf;
2434 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2435
2436 if (cpn == 13 && cpm == 0) {
2437 /* TLS register. */
2438 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2439 return 1;
2440 }
2441 if (cpn == 7) {
2442 /* ISB, DSB, DMB. */
2443 if ((cpm == 5 && op == 4)
2444 || (cpm == 10 && (op == 4 || op == 5)))
2445 return 1;
2446 }
2447 return 0;
2448}
2449
3f26c122
RV
2450static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2451{
2452 TCGv tmp;
2453 int cpn = (insn >> 16) & 0xf;
2454 int cpm = insn & 0xf;
2455 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2456
2457 if (!arm_feature(env, ARM_FEATURE_V6K))
2458 return 0;
2459
2460 if (!(cpn == 13 && cpm == 0))
2461 return 0;
2462
2463 if (insn & ARM_CP_RW_BIT) {
3f26c122
RV
2464 switch (op) {
2465 case 2:
c5883be2 2466 tmp = load_cpu_field(cp15.c13_tls1);
3f26c122
RV
2467 break;
2468 case 3:
c5883be2 2469 tmp = load_cpu_field(cp15.c13_tls2);
3f26c122
RV
2470 break;
2471 case 4:
c5883be2 2472 tmp = load_cpu_field(cp15.c13_tls3);
3f26c122
RV
2473 break;
2474 default:
3f26c122
RV
2475 return 0;
2476 }
2477 store_reg(s, rd, tmp);
2478
2479 } else {
2480 tmp = load_reg(s, rd);
2481 switch (op) {
2482 case 2:
c5883be2 2483 store_cpu_field(tmp, cp15.c13_tls1);
3f26c122
RV
2484 break;
2485 case 3:
c5883be2 2486 store_cpu_field(tmp, cp15.c13_tls2);
3f26c122
RV
2487 break;
2488 case 4:
c5883be2 2489 store_cpu_field(tmp, cp15.c13_tls3);
3f26c122
RV
2490 break;
2491 default:
7d1b0095 2492 tcg_temp_free_i32(tmp);
3f26c122
RV
2493 return 0;
2494 }
3f26c122
RV
2495 }
2496 return 1;
2497}
2498
b5ff1b31
FB
2499/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2500 instruction is not defined. */
a90b7318 2501static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2502{
2503 uint32_t rd;
b75263d6 2504 TCGv tmp, tmp2;
b5ff1b31 2505
9ee6e8bb
PB
2506 /* M profile cores use memory mapped registers instead of cp15. */
2507 if (arm_feature(env, ARM_FEATURE_M))
2508 return 1;
2509
2510 if ((insn & (1 << 25)) == 0) {
2511 if (insn & (1 << 20)) {
2512 /* mrrc */
2513 return 1;
2514 }
2515 /* mcrr. Used for block cache operations, so implement as no-op. */
2516 return 0;
2517 }
2518 if ((insn & (1 << 4)) == 0) {
2519 /* cdp */
2520 return 1;
2521 }
2522 if (IS_USER(s) && !cp15_user_ok(insn)) {
b5ff1b31
FB
2523 return 1;
2524 }
cc688901
PM
2525
2526 /* Pre-v7 versions of the architecture implemented WFI via coprocessor
2527 * instructions rather than a separate instruction.
2528 */
2529 if ((insn & 0x0fff0fff) == 0x0e070f90) {
2530 /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores).
2531 * In v7, this must NOP.
2532 */
2533 if (!arm_feature(env, ARM_FEATURE_V7)) {
2534 /* Wait for interrupt. */
2535 gen_set_pc_im(s->pc);
2536 s->is_jmp = DISAS_WFI;
2537 }
9332f9da
FB
2538 return 0;
2539 }
cc688901
PM
2540
2541 if ((insn & 0x0fff0fff) == 0x0e070f58) {
2542 /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI,
2543 * so this is slightly over-broad.
2544 */
2545 if (!arm_feature(env, ARM_FEATURE_V6)) {
2546 /* Wait for interrupt. */
2547 gen_set_pc_im(s->pc);
2548 s->is_jmp = DISAS_WFI;
2549 return 0;
2550 }
2551 /* Otherwise fall through to handle via helper function.
2552 * In particular, on v7 and some v6 cores this is one of
2553 * the VA-PA registers.
2554 */
2555 }
2556
b5ff1b31 2557 rd = (insn >> 12) & 0xf;
3f26c122
RV
2558
2559 if (cp15_tls_load_store(env, s, insn, rd))
2560 return 0;
2561
b75263d6 2562 tmp2 = tcg_const_i32(insn);
18c9b560 2563 if (insn & ARM_CP_RW_BIT) {
7d1b0095 2564 tmp = tcg_temp_new_i32();
b75263d6 2565 gen_helper_get_cp15(tmp, cpu_env, tmp2);
b5ff1b31
FB
2566 /* If the destination register is r15 then sets condition codes. */
2567 if (rd != 15)
8984bd2e
PB
2568 store_reg(s, rd, tmp);
2569 else
7d1b0095 2570 tcg_temp_free_i32(tmp);
b5ff1b31 2571 } else {
8984bd2e 2572 tmp = load_reg(s, rd);
b75263d6 2573 gen_helper_set_cp15(cpu_env, tmp2, tmp);
7d1b0095 2574 tcg_temp_free_i32(tmp);
a90b7318
AZ
2575 /* Normally we would always end the TB here, but Linux
2576 * arch/arm/mach-pxa/sleep.S expects two instructions following
2577 * an MMU enable to execute from cache. Imitate this behaviour. */
2578 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2579 (insn & 0x0fff0fff) != 0x0e010f10)
2580 gen_lookup_tb(s);
b5ff1b31 2581 }
b75263d6 2582 tcg_temp_free_i32(tmp2);
b5ff1b31
FB
2583 return 0;
2584}
2585
9ee6e8bb
PB
2586#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2587#define VFP_SREG(insn, bigbit, smallbit) \
2588 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2589#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2590 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2591 reg = (((insn) >> (bigbit)) & 0x0f) \
2592 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2593 } else { \
2594 if (insn & (1 << (smallbit))) \
2595 return 1; \
2596 reg = ((insn) >> (bigbit)) & 0x0f; \
2597 }} while (0)
2598
2599#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2600#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2601#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2602#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2603#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2604#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2605
4373f3ce
PB
2606/* Move between integer and VFP cores. */
2607static TCGv gen_vfp_mrs(void)
2608{
7d1b0095 2609 TCGv tmp = tcg_temp_new_i32();
4373f3ce
PB
2610 tcg_gen_mov_i32(tmp, cpu_F0s);
2611 return tmp;
2612}
2613
2614static void gen_vfp_msr(TCGv tmp)
2615{
2616 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2617 tcg_temp_free_i32(tmp);
4373f3ce
PB
2618}
2619
ad69471c
PB
2620static void gen_neon_dup_u8(TCGv var, int shift)
2621{
7d1b0095 2622 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2623 if (shift)
2624 tcg_gen_shri_i32(var, var, shift);
86831435 2625 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2626 tcg_gen_shli_i32(tmp, var, 8);
2627 tcg_gen_or_i32(var, var, tmp);
2628 tcg_gen_shli_i32(tmp, var, 16);
2629 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2630 tcg_temp_free_i32(tmp);
ad69471c
PB
2631}
2632
2633static void gen_neon_dup_low16(TCGv var)
2634{
7d1b0095 2635 TCGv tmp = tcg_temp_new_i32();
86831435 2636 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2637 tcg_gen_shli_i32(tmp, var, 16);
2638 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2639 tcg_temp_free_i32(tmp);
ad69471c
PB
2640}
2641
2642static void gen_neon_dup_high16(TCGv var)
2643{
7d1b0095 2644 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2645 tcg_gen_andi_i32(var, var, 0xffff0000);
2646 tcg_gen_shri_i32(tmp, var, 16);
2647 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2648 tcg_temp_free_i32(tmp);
ad69471c
PB
2649}
2650
b7bcbe95
FB
2651/* Disassemble a VFP instruction. Returns nonzero if an error occured
2652 (ie. an undefined instruction). */
2653static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2654{
2655 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2656 int dp, veclen;
312eea9f 2657 TCGv addr;
4373f3ce 2658 TCGv tmp;
ad69471c 2659 TCGv tmp2;
b7bcbe95 2660
40f137e1
PB
2661 if (!arm_feature(env, ARM_FEATURE_VFP))
2662 return 1;
2663
5df8bac1 2664 if (!s->vfp_enabled) {
9ee6e8bb 2665 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2666 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2667 return 1;
2668 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2669 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2670 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2671 return 1;
2672 }
b7bcbe95
FB
2673 dp = ((insn & 0xf00) == 0xb00);
2674 switch ((insn >> 24) & 0xf) {
2675 case 0xe:
2676 if (insn & (1 << 4)) {
2677 /* single register transfer */
b7bcbe95
FB
2678 rd = (insn >> 12) & 0xf;
2679 if (dp) {
9ee6e8bb
PB
2680 int size;
2681 int pass;
2682
2683 VFP_DREG_N(rn, insn);
2684 if (insn & 0xf)
b7bcbe95 2685 return 1;
9ee6e8bb
PB
2686 if (insn & 0x00c00060
2687 && !arm_feature(env, ARM_FEATURE_NEON))
2688 return 1;
2689
2690 pass = (insn >> 21) & 1;
2691 if (insn & (1 << 22)) {
2692 size = 0;
2693 offset = ((insn >> 5) & 3) * 8;
2694 } else if (insn & (1 << 5)) {
2695 size = 1;
2696 offset = (insn & (1 << 6)) ? 16 : 0;
2697 } else {
2698 size = 2;
2699 offset = 0;
2700 }
18c9b560 2701 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2702 /* vfp->arm */
ad69471c 2703 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2704 switch (size) {
2705 case 0:
9ee6e8bb 2706 if (offset)
ad69471c 2707 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2708 if (insn & (1 << 23))
ad69471c 2709 gen_uxtb(tmp);
9ee6e8bb 2710 else
ad69471c 2711 gen_sxtb(tmp);
9ee6e8bb
PB
2712 break;
2713 case 1:
9ee6e8bb
PB
2714 if (insn & (1 << 23)) {
2715 if (offset) {
ad69471c 2716 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2717 } else {
ad69471c 2718 gen_uxth(tmp);
9ee6e8bb
PB
2719 }
2720 } else {
2721 if (offset) {
ad69471c 2722 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2723 } else {
ad69471c 2724 gen_sxth(tmp);
9ee6e8bb
PB
2725 }
2726 }
2727 break;
2728 case 2:
9ee6e8bb
PB
2729 break;
2730 }
ad69471c 2731 store_reg(s, rd, tmp);
b7bcbe95
FB
2732 } else {
2733 /* arm->vfp */
ad69471c 2734 tmp = load_reg(s, rd);
9ee6e8bb
PB
2735 if (insn & (1 << 23)) {
2736 /* VDUP */
2737 if (size == 0) {
ad69471c 2738 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2739 } else if (size == 1) {
ad69471c 2740 gen_neon_dup_low16(tmp);
9ee6e8bb 2741 }
cbbccffc 2742 for (n = 0; n <= pass * 2; n++) {
7d1b0095 2743 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
2744 tcg_gen_mov_i32(tmp2, tmp);
2745 neon_store_reg(rn, n, tmp2);
2746 }
2747 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2748 } else {
2749 /* VMOV */
2750 switch (size) {
2751 case 0:
ad69471c
PB
2752 tmp2 = neon_load_reg(rn, pass);
2753 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
7d1b0095 2754 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2755 break;
2756 case 1:
ad69471c
PB
2757 tmp2 = neon_load_reg(rn, pass);
2758 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
7d1b0095 2759 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2760 break;
2761 case 2:
9ee6e8bb
PB
2762 break;
2763 }
ad69471c 2764 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2765 }
b7bcbe95 2766 }
9ee6e8bb
PB
2767 } else { /* !dp */
2768 if ((insn & 0x6f) != 0x00)
2769 return 1;
2770 rn = VFP_SREG_N(insn);
18c9b560 2771 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2772 /* vfp->arm */
2773 if (insn & (1 << 21)) {
2774 /* system register */
40f137e1 2775 rn >>= 1;
9ee6e8bb 2776
b7bcbe95 2777 switch (rn) {
40f137e1 2778 case ARM_VFP_FPSID:
4373f3ce 2779 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2780 VFP3 restricts all id registers to privileged
2781 accesses. */
2782 if (IS_USER(s)
2783 && arm_feature(env, ARM_FEATURE_VFP3))
2784 return 1;
4373f3ce 2785 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2786 break;
40f137e1 2787 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2788 if (IS_USER(s))
2789 return 1;
4373f3ce 2790 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2791 break;
40f137e1
PB
2792 case ARM_VFP_FPINST:
2793 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2794 /* Not present in VFP3. */
2795 if (IS_USER(s)
2796 || arm_feature(env, ARM_FEATURE_VFP3))
2797 return 1;
4373f3ce 2798 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2799 break;
40f137e1 2800 case ARM_VFP_FPSCR:
601d70b9 2801 if (rd == 15) {
4373f3ce
PB
2802 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2803 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2804 } else {
7d1b0095 2805 tmp = tcg_temp_new_i32();
4373f3ce
PB
2806 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2807 }
b7bcbe95 2808 break;
9ee6e8bb
PB
2809 case ARM_VFP_MVFR0:
2810 case ARM_VFP_MVFR1:
2811 if (IS_USER(s)
2812 || !arm_feature(env, ARM_FEATURE_VFP3))
2813 return 1;
4373f3ce 2814 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2815 break;
b7bcbe95
FB
2816 default:
2817 return 1;
2818 }
2819 } else {
2820 gen_mov_F0_vreg(0, rn);
4373f3ce 2821 tmp = gen_vfp_mrs();
b7bcbe95
FB
2822 }
2823 if (rd == 15) {
b5ff1b31 2824 /* Set the 4 flag bits in the CPSR. */
4373f3ce 2825 gen_set_nzcv(tmp);
7d1b0095 2826 tcg_temp_free_i32(tmp);
4373f3ce
PB
2827 } else {
2828 store_reg(s, rd, tmp);
2829 }
b7bcbe95
FB
2830 } else {
2831 /* arm->vfp */
4373f3ce 2832 tmp = load_reg(s, rd);
b7bcbe95 2833 if (insn & (1 << 21)) {
40f137e1 2834 rn >>= 1;
b7bcbe95
FB
2835 /* system register */
2836 switch (rn) {
40f137e1 2837 case ARM_VFP_FPSID:
9ee6e8bb
PB
2838 case ARM_VFP_MVFR0:
2839 case ARM_VFP_MVFR1:
b7bcbe95
FB
2840 /* Writes are ignored. */
2841 break;
40f137e1 2842 case ARM_VFP_FPSCR:
4373f3ce 2843 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 2844 tcg_temp_free_i32(tmp);
b5ff1b31 2845 gen_lookup_tb(s);
b7bcbe95 2846 break;
40f137e1 2847 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2848 if (IS_USER(s))
2849 return 1;
71b3c3de
JR
2850 /* TODO: VFP subarchitecture support.
2851 * For now, keep the EN bit only */
2852 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 2853 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2854 gen_lookup_tb(s);
2855 break;
2856 case ARM_VFP_FPINST:
2857 case ARM_VFP_FPINST2:
4373f3ce 2858 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2859 break;
b7bcbe95
FB
2860 default:
2861 return 1;
2862 }
2863 } else {
4373f3ce 2864 gen_vfp_msr(tmp);
b7bcbe95
FB
2865 gen_mov_vreg_F0(0, rn);
2866 }
2867 }
2868 }
2869 } else {
2870 /* data processing */
2871 /* The opcode is in bits 23, 21, 20 and 6. */
2872 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2873 if (dp) {
2874 if (op == 15) {
2875 /* rn is opcode */
2876 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2877 } else {
2878 /* rn is register number */
9ee6e8bb 2879 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2880 }
2881
04595bf6 2882 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
b7bcbe95 2883 /* Integer or single precision destination. */
9ee6e8bb 2884 rd = VFP_SREG_D(insn);
b7bcbe95 2885 } else {
9ee6e8bb 2886 VFP_DREG_D(rd, insn);
b7bcbe95 2887 }
04595bf6
PM
2888 if (op == 15 &&
2889 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2890 /* VCVT from int is always from S reg regardless of dp bit.
2891 * VCVT with immediate frac_bits has same format as SREG_M
2892 */
2893 rm = VFP_SREG_M(insn);
b7bcbe95 2894 } else {
9ee6e8bb 2895 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2896 }
2897 } else {
9ee6e8bb 2898 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2899 if (op == 15 && rn == 15) {
2900 /* Double precision destination. */
9ee6e8bb
PB
2901 VFP_DREG_D(rd, insn);
2902 } else {
2903 rd = VFP_SREG_D(insn);
2904 }
04595bf6
PM
2905 /* NB that we implicitly rely on the encoding for the frac_bits
2906 * in VCVT of fixed to float being the same as that of an SREG_M
2907 */
9ee6e8bb 2908 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2909 }
2910
69d1fc22 2911 veclen = s->vec_len;
b7bcbe95
FB
2912 if (op == 15 && rn > 3)
2913 veclen = 0;
2914
2915 /* Shut up compiler warnings. */
2916 delta_m = 0;
2917 delta_d = 0;
2918 bank_mask = 0;
3b46e624 2919
b7bcbe95
FB
2920 if (veclen > 0) {
2921 if (dp)
2922 bank_mask = 0xc;
2923 else
2924 bank_mask = 0x18;
2925
2926 /* Figure out what type of vector operation this is. */
2927 if ((rd & bank_mask) == 0) {
2928 /* scalar */
2929 veclen = 0;
2930 } else {
2931 if (dp)
69d1fc22 2932 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 2933 else
69d1fc22 2934 delta_d = s->vec_stride + 1;
b7bcbe95
FB
2935
2936 if ((rm & bank_mask) == 0) {
2937 /* mixed scalar/vector */
2938 delta_m = 0;
2939 } else {
2940 /* vector */
2941 delta_m = delta_d;
2942 }
2943 }
2944 }
2945
2946 /* Load the initial operands. */
2947 if (op == 15) {
2948 switch (rn) {
2949 case 16:
2950 case 17:
2951 /* Integer source */
2952 gen_mov_F0_vreg(0, rm);
2953 break;
2954 case 8:
2955 case 9:
2956 /* Compare */
2957 gen_mov_F0_vreg(dp, rd);
2958 gen_mov_F1_vreg(dp, rm);
2959 break;
2960 case 10:
2961 case 11:
2962 /* Compare with zero */
2963 gen_mov_F0_vreg(dp, rd);
2964 gen_vfp_F1_ld0(dp);
2965 break;
9ee6e8bb
PB
2966 case 20:
2967 case 21:
2968 case 22:
2969 case 23:
644ad806
PB
2970 case 28:
2971 case 29:
2972 case 30:
2973 case 31:
9ee6e8bb
PB
2974 /* Source and destination the same. */
2975 gen_mov_F0_vreg(dp, rd);
2976 break;
b7bcbe95
FB
2977 default:
2978 /* One source operand. */
2979 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 2980 break;
b7bcbe95
FB
2981 }
2982 } else {
2983 /* Two source operands. */
2984 gen_mov_F0_vreg(dp, rn);
2985 gen_mov_F1_vreg(dp, rm);
2986 }
2987
2988 for (;;) {
2989 /* Perform the calculation. */
2990 switch (op) {
2991 case 0: /* mac: fd + (fn * fm) */
2992 gen_vfp_mul(dp);
2993 gen_mov_F1_vreg(dp, rd);
2994 gen_vfp_add(dp);
2995 break;
2996 case 1: /* nmac: fd - (fn * fm) */
2997 gen_vfp_mul(dp);
2998 gen_vfp_neg(dp);
2999 gen_mov_F1_vreg(dp, rd);
3000 gen_vfp_add(dp);
3001 break;
3002 case 2: /* msc: -fd + (fn * fm) */
3003 gen_vfp_mul(dp);
3004 gen_mov_F1_vreg(dp, rd);
3005 gen_vfp_sub(dp);
3006 break;
3007 case 3: /* nmsc: -fd - (fn * fm) */
3008 gen_vfp_mul(dp);
b7bcbe95 3009 gen_vfp_neg(dp);
c9fb531a
PB
3010 gen_mov_F1_vreg(dp, rd);
3011 gen_vfp_sub(dp);
b7bcbe95
FB
3012 break;
3013 case 4: /* mul: fn * fm */
3014 gen_vfp_mul(dp);
3015 break;
3016 case 5: /* nmul: -(fn * fm) */
3017 gen_vfp_mul(dp);
3018 gen_vfp_neg(dp);
3019 break;
3020 case 6: /* add: fn + fm */
3021 gen_vfp_add(dp);
3022 break;
3023 case 7: /* sub: fn - fm */
3024 gen_vfp_sub(dp);
3025 break;
3026 case 8: /* div: fn / fm */
3027 gen_vfp_div(dp);
3028 break;
9ee6e8bb
PB
3029 case 14: /* fconst */
3030 if (!arm_feature(env, ARM_FEATURE_VFP3))
3031 return 1;
3032
3033 n = (insn << 12) & 0x80000000;
3034 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3035 if (dp) {
3036 if (i & 0x40)
3037 i |= 0x3f80;
3038 else
3039 i |= 0x4000;
3040 n |= i << 16;
4373f3ce 3041 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3042 } else {
3043 if (i & 0x40)
3044 i |= 0x780;
3045 else
3046 i |= 0x800;
3047 n |= i << 19;
5b340b51 3048 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3049 }
9ee6e8bb 3050 break;
b7bcbe95
FB
3051 case 15: /* extension space */
3052 switch (rn) {
3053 case 0: /* cpy */
3054 /* no-op */
3055 break;
3056 case 1: /* abs */
3057 gen_vfp_abs(dp);
3058 break;
3059 case 2: /* neg */
3060 gen_vfp_neg(dp);
3061 break;
3062 case 3: /* sqrt */
3063 gen_vfp_sqrt(dp);
3064 break;
60011498
PB
3065 case 4: /* vcvtb.f32.f16 */
3066 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3067 return 1;
3068 tmp = gen_vfp_mrs();
3069 tcg_gen_ext16u_i32(tmp, tmp);
3070 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3071 tcg_temp_free_i32(tmp);
60011498
PB
3072 break;
3073 case 5: /* vcvtt.f32.f16 */
3074 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3075 return 1;
3076 tmp = gen_vfp_mrs();
3077 tcg_gen_shri_i32(tmp, tmp, 16);
3078 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3079 tcg_temp_free_i32(tmp);
60011498
PB
3080 break;
3081 case 6: /* vcvtb.f16.f32 */
3082 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3083 return 1;
7d1b0095 3084 tmp = tcg_temp_new_i32();
60011498
PB
3085 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3086 gen_mov_F0_vreg(0, rd);
3087 tmp2 = gen_vfp_mrs();
3088 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3089 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3090 tcg_temp_free_i32(tmp2);
60011498
PB
3091 gen_vfp_msr(tmp);
3092 break;
3093 case 7: /* vcvtt.f16.f32 */
3094 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3095 return 1;
7d1b0095 3096 tmp = tcg_temp_new_i32();
60011498
PB
3097 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3098 tcg_gen_shli_i32(tmp, tmp, 16);
3099 gen_mov_F0_vreg(0, rd);
3100 tmp2 = gen_vfp_mrs();
3101 tcg_gen_ext16u_i32(tmp2, tmp2);
3102 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3103 tcg_temp_free_i32(tmp2);
60011498
PB
3104 gen_vfp_msr(tmp);
3105 break;
b7bcbe95
FB
3106 case 8: /* cmp */
3107 gen_vfp_cmp(dp);
3108 break;
3109 case 9: /* cmpe */
3110 gen_vfp_cmpe(dp);
3111 break;
3112 case 10: /* cmpz */
3113 gen_vfp_cmp(dp);
3114 break;
3115 case 11: /* cmpez */
3116 gen_vfp_F1_ld0(dp);
3117 gen_vfp_cmpe(dp);
3118 break;
3119 case 15: /* single<->double conversion */
3120 if (dp)
4373f3ce 3121 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3122 else
4373f3ce 3123 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3124 break;
3125 case 16: /* fuito */
3126 gen_vfp_uito(dp);
3127 break;
3128 case 17: /* fsito */
3129 gen_vfp_sito(dp);
3130 break;
9ee6e8bb
PB
3131 case 20: /* fshto */
3132 if (!arm_feature(env, ARM_FEATURE_VFP3))
3133 return 1;
644ad806 3134 gen_vfp_shto(dp, 16 - rm);
9ee6e8bb
PB
3135 break;
3136 case 21: /* fslto */
3137 if (!arm_feature(env, ARM_FEATURE_VFP3))
3138 return 1;
644ad806 3139 gen_vfp_slto(dp, 32 - rm);
9ee6e8bb
PB
3140 break;
3141 case 22: /* fuhto */
3142 if (!arm_feature(env, ARM_FEATURE_VFP3))
3143 return 1;
644ad806 3144 gen_vfp_uhto(dp, 16 - rm);
9ee6e8bb
PB
3145 break;
3146 case 23: /* fulto */
3147 if (!arm_feature(env, ARM_FEATURE_VFP3))
3148 return 1;
644ad806 3149 gen_vfp_ulto(dp, 32 - rm);
9ee6e8bb 3150 break;
b7bcbe95
FB
3151 case 24: /* ftoui */
3152 gen_vfp_toui(dp);
3153 break;
3154 case 25: /* ftouiz */
3155 gen_vfp_touiz(dp);
3156 break;
3157 case 26: /* ftosi */
3158 gen_vfp_tosi(dp);
3159 break;
3160 case 27: /* ftosiz */
3161 gen_vfp_tosiz(dp);
3162 break;
9ee6e8bb
PB
3163 case 28: /* ftosh */
3164 if (!arm_feature(env, ARM_FEATURE_VFP3))
3165 return 1;
644ad806 3166 gen_vfp_tosh(dp, 16 - rm);
9ee6e8bb
PB
3167 break;
3168 case 29: /* ftosl */
3169 if (!arm_feature(env, ARM_FEATURE_VFP3))
3170 return 1;
644ad806 3171 gen_vfp_tosl(dp, 32 - rm);
9ee6e8bb
PB
3172 break;
3173 case 30: /* ftouh */
3174 if (!arm_feature(env, ARM_FEATURE_VFP3))
3175 return 1;
644ad806 3176 gen_vfp_touh(dp, 16 - rm);
9ee6e8bb
PB
3177 break;
3178 case 31: /* ftoul */
3179 if (!arm_feature(env, ARM_FEATURE_VFP3))
3180 return 1;
644ad806 3181 gen_vfp_toul(dp, 32 - rm);
9ee6e8bb 3182 break;
b7bcbe95
FB
3183 default: /* undefined */
3184 printf ("rn:%d\n", rn);
3185 return 1;
3186 }
3187 break;
3188 default: /* undefined */
3189 printf ("op:%d\n", op);
3190 return 1;
3191 }
3192
3193 /* Write back the result. */
3194 if (op == 15 && (rn >= 8 && rn <= 11))
3195 ; /* Comparison, do nothing. */
04595bf6
PM
3196 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3197 /* VCVT double to int: always integer result. */
b7bcbe95
FB
3198 gen_mov_vreg_F0(0, rd);
3199 else if (op == 15 && rn == 15)
3200 /* conversion */
3201 gen_mov_vreg_F0(!dp, rd);
3202 else
3203 gen_mov_vreg_F0(dp, rd);
3204
3205 /* break out of the loop if we have finished */
3206 if (veclen == 0)
3207 break;
3208
3209 if (op == 15 && delta_m == 0) {
3210 /* single source one-many */
3211 while (veclen--) {
3212 rd = ((rd + delta_d) & (bank_mask - 1))
3213 | (rd & bank_mask);
3214 gen_mov_vreg_F0(dp, rd);
3215 }
3216 break;
3217 }
3218 /* Setup the next operands. */
3219 veclen--;
3220 rd = ((rd + delta_d) & (bank_mask - 1))
3221 | (rd & bank_mask);
3222
3223 if (op == 15) {
3224 /* One source operand. */
3225 rm = ((rm + delta_m) & (bank_mask - 1))
3226 | (rm & bank_mask);
3227 gen_mov_F0_vreg(dp, rm);
3228 } else {
3229 /* Two source operands. */
3230 rn = ((rn + delta_d) & (bank_mask - 1))
3231 | (rn & bank_mask);
3232 gen_mov_F0_vreg(dp, rn);
3233 if (delta_m) {
3234 rm = ((rm + delta_m) & (bank_mask - 1))
3235 | (rm & bank_mask);
3236 gen_mov_F1_vreg(dp, rm);
3237 }
3238 }
3239 }
3240 }
3241 break;
3242 case 0xc:
3243 case 0xd:
8387da81 3244 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3245 /* two-register transfer */
3246 rn = (insn >> 16) & 0xf;
3247 rd = (insn >> 12) & 0xf;
3248 if (dp) {
9ee6e8bb
PB
3249 VFP_DREG_M(rm, insn);
3250 } else {
3251 rm = VFP_SREG_M(insn);
3252 }
b7bcbe95 3253
18c9b560 3254 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3255 /* vfp->arm */
3256 if (dp) {
4373f3ce
PB
3257 gen_mov_F0_vreg(0, rm * 2);
3258 tmp = gen_vfp_mrs();
3259 store_reg(s, rd, tmp);
3260 gen_mov_F0_vreg(0, rm * 2 + 1);
3261 tmp = gen_vfp_mrs();
3262 store_reg(s, rn, tmp);
b7bcbe95
FB
3263 } else {
3264 gen_mov_F0_vreg(0, rm);
4373f3ce 3265 tmp = gen_vfp_mrs();
8387da81 3266 store_reg(s, rd, tmp);
b7bcbe95 3267 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3268 tmp = gen_vfp_mrs();
8387da81 3269 store_reg(s, rn, tmp);
b7bcbe95
FB
3270 }
3271 } else {
3272 /* arm->vfp */
3273 if (dp) {
4373f3ce
PB
3274 tmp = load_reg(s, rd);
3275 gen_vfp_msr(tmp);
3276 gen_mov_vreg_F0(0, rm * 2);
3277 tmp = load_reg(s, rn);
3278 gen_vfp_msr(tmp);
3279 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3280 } else {
8387da81 3281 tmp = load_reg(s, rd);
4373f3ce 3282 gen_vfp_msr(tmp);
b7bcbe95 3283 gen_mov_vreg_F0(0, rm);
8387da81 3284 tmp = load_reg(s, rn);
4373f3ce 3285 gen_vfp_msr(tmp);
b7bcbe95
FB
3286 gen_mov_vreg_F0(0, rm + 1);
3287 }
3288 }
3289 } else {
3290 /* Load/store */
3291 rn = (insn >> 16) & 0xf;
3292 if (dp)
9ee6e8bb 3293 VFP_DREG_D(rd, insn);
b7bcbe95 3294 else
9ee6e8bb
PB
3295 rd = VFP_SREG_D(insn);
3296 if (s->thumb && rn == 15) {
7d1b0095 3297 addr = tcg_temp_new_i32();
312eea9f 3298 tcg_gen_movi_i32(addr, s->pc & ~2);
9ee6e8bb 3299 } else {
312eea9f 3300 addr = load_reg(s, rn);
9ee6e8bb 3301 }
b7bcbe95
FB
3302 if ((insn & 0x01200000) == 0x01000000) {
3303 /* Single load/store */
3304 offset = (insn & 0xff) << 2;
3305 if ((insn & (1 << 23)) == 0)
3306 offset = -offset;
312eea9f 3307 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3308 if (insn & (1 << 20)) {
312eea9f 3309 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3310 gen_mov_vreg_F0(dp, rd);
3311 } else {
3312 gen_mov_F0_vreg(dp, rd);
312eea9f 3313 gen_vfp_st(s, dp, addr);
b7bcbe95 3314 }
7d1b0095 3315 tcg_temp_free_i32(addr);
b7bcbe95
FB
3316 } else {
3317 /* load/store multiple */
3318 if (dp)
3319 n = (insn >> 1) & 0x7f;
3320 else
3321 n = insn & 0xff;
3322
3323 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3324 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3325
3326 if (dp)
3327 offset = 8;
3328 else
3329 offset = 4;
3330 for (i = 0; i < n; i++) {
18c9b560 3331 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3332 /* load */
312eea9f 3333 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3334 gen_mov_vreg_F0(dp, rd + i);
3335 } else {
3336 /* store */
3337 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3338 gen_vfp_st(s, dp, addr);
b7bcbe95 3339 }
312eea9f 3340 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95
FB
3341 }
3342 if (insn & (1 << 21)) {
3343 /* writeback */
3344 if (insn & (1 << 24))
3345 offset = -offset * n;
3346 else if (dp && (insn & 1))
3347 offset = 4;
3348 else
3349 offset = 0;
3350
3351 if (offset != 0)
312eea9f
FN
3352 tcg_gen_addi_i32(addr, addr, offset);
3353 store_reg(s, rn, addr);
3354 } else {
7d1b0095 3355 tcg_temp_free_i32(addr);
b7bcbe95
FB
3356 }
3357 }
3358 }
3359 break;
3360 default:
3361 /* Should never happen. */
3362 return 1;
3363 }
3364 return 0;
3365}
3366
6e256c93 3367static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3368{
6e256c93
FB
3369 TranslationBlock *tb;
3370
3371 tb = s->tb;
3372 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3373 tcg_gen_goto_tb(n);
8984bd2e 3374 gen_set_pc_im(dest);
57fec1fe 3375 tcg_gen_exit_tb((long)tb + n);
6e256c93 3376 } else {
8984bd2e 3377 gen_set_pc_im(dest);
57fec1fe 3378 tcg_gen_exit_tb(0);
6e256c93 3379 }
c53be334
FB
3380}
3381
8aaca4c0
FB
3382static inline void gen_jmp (DisasContext *s, uint32_t dest)
3383{
551bd27f 3384 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3385 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3386 if (s->thumb)
d9ba4830
PB
3387 dest |= 1;
3388 gen_bx_im(s, dest);
8aaca4c0 3389 } else {
6e256c93 3390 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3391 s->is_jmp = DISAS_TB_JUMP;
3392 }
3393}
3394
d9ba4830 3395static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3396{
ee097184 3397 if (x)
d9ba4830 3398 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3399 else
d9ba4830 3400 gen_sxth(t0);
ee097184 3401 if (y)
d9ba4830 3402 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3403 else
d9ba4830
PB
3404 gen_sxth(t1);
3405 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3406}
3407
3408/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 3409static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3410 uint32_t mask;
3411
3412 mask = 0;
3413 if (flags & (1 << 0))
3414 mask |= 0xff;
3415 if (flags & (1 << 1))
3416 mask |= 0xff00;
3417 if (flags & (1 << 2))
3418 mask |= 0xff0000;
3419 if (flags & (1 << 3))
3420 mask |= 0xff000000;
9ee6e8bb 3421
2ae23e75 3422 /* Mask out undefined bits. */
9ee6e8bb
PB
3423 mask &= ~CPSR_RESERVED;
3424 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3425 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3426 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3427 mask &= ~CPSR_IT;
9ee6e8bb 3428 /* Mask out execution state bits. */
2ae23e75 3429 if (!spsr)
e160c51c 3430 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3431 /* Mask out privileged bits. */
3432 if (IS_USER(s))
9ee6e8bb 3433 mask &= CPSR_USER;
b5ff1b31
FB
3434 return mask;
3435}
3436
2fbac54b
FN
3437/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3438static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
b5ff1b31 3439{
d9ba4830 3440 TCGv tmp;
b5ff1b31
FB
3441 if (spsr) {
3442 /* ??? This is also undefined in system mode. */
3443 if (IS_USER(s))
3444 return 1;
d9ba4830
PB
3445
3446 tmp = load_cpu_field(spsr);
3447 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3448 tcg_gen_andi_i32(t0, t0, mask);
3449 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3450 store_cpu_field(tmp, spsr);
b5ff1b31 3451 } else {
2fbac54b 3452 gen_set_cpsr(t0, mask);
b5ff1b31 3453 }
7d1b0095 3454 tcg_temp_free_i32(t0);
b5ff1b31
FB
3455 gen_lookup_tb(s);
3456 return 0;
3457}
3458
2fbac54b
FN
3459/* Returns nonzero if access to the PSR is not permitted. */
3460static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3461{
3462 TCGv tmp;
7d1b0095 3463 tmp = tcg_temp_new_i32();
2fbac54b
FN
3464 tcg_gen_movi_i32(tmp, val);
3465 return gen_set_psr(s, mask, spsr, tmp);
3466}
3467
e9bb4aa9
JR
3468/* Generate an old-style exception return. Marks pc as dead. */
3469static void gen_exception_return(DisasContext *s, TCGv pc)
b5ff1b31 3470{
d9ba4830 3471 TCGv tmp;
e9bb4aa9 3472 store_reg(s, 15, pc);
d9ba4830
PB
3473 tmp = load_cpu_field(spsr);
3474 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 3475 tcg_temp_free_i32(tmp);
b5ff1b31
FB
3476 s->is_jmp = DISAS_UPDATE;
3477}
3478
b0109805
PB
3479/* Generate a v6 exception return. Marks both values as dead. */
3480static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3481{
b0109805 3482 gen_set_cpsr(cpsr, 0xffffffff);
7d1b0095 3483 tcg_temp_free_i32(cpsr);
b0109805 3484 store_reg(s, 15, pc);
9ee6e8bb
PB
3485 s->is_jmp = DISAS_UPDATE;
3486}
3b46e624 3487
9ee6e8bb
PB
3488static inline void
3489gen_set_condexec (DisasContext *s)
3490{
3491 if (s->condexec_mask) {
8f01245e 3492 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
7d1b0095 3493 TCGv tmp = tcg_temp_new_i32();
8f01245e 3494 tcg_gen_movi_i32(tmp, val);
d9ba4830 3495 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3496 }
3497}
3b46e624 3498
bc4a0de0
PM
3499static void gen_exception_insn(DisasContext *s, int offset, int excp)
3500{
3501 gen_set_condexec(s);
3502 gen_set_pc_im(s->pc - offset);
3503 gen_exception(excp);
3504 s->is_jmp = DISAS_JUMP;
3505}
3506
9ee6e8bb
PB
3507static void gen_nop_hint(DisasContext *s, int val)
3508{
3509 switch (val) {
3510 case 3: /* wfi */
8984bd2e 3511 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3512 s->is_jmp = DISAS_WFI;
3513 break;
3514 case 2: /* wfe */
3515 case 4: /* sev */
3516 /* TODO: Implement SEV and WFE. May help SMP performance. */
3517 default: /* nop */
3518 break;
3519 }
3520}
99c475ab 3521
ad69471c 3522#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3523
dd8fbd78 3524static inline int gen_neon_add(int size, TCGv t0, TCGv t1)
9ee6e8bb
PB
3525{
3526 switch (size) {
dd8fbd78
FN
3527 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3528 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3529 case 2: tcg_gen_add_i32(t0, t0, t1); break;
9ee6e8bb
PB
3530 default: return 1;
3531 }
3532 return 0;
3533}
3534
dd8fbd78 3535static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
ad69471c
PB
3536{
3537 switch (size) {
dd8fbd78
FN
3538 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3539 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3540 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3541 default: return;
3542 }
3543}
3544
3545/* 32-bit pairwise ops end up the same as the elementwise versions. */
3546#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3547#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3548#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3549#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3550
ad69471c
PB
3551#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3552 switch ((size << 1) | u) { \
3553 case 0: \
dd8fbd78 3554 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3555 break; \
3556 case 1: \
dd8fbd78 3557 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3558 break; \
3559 case 2: \
dd8fbd78 3560 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3561 break; \
3562 case 3: \
dd8fbd78 3563 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3564 break; \
3565 case 4: \
dd8fbd78 3566 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3567 break; \
3568 case 5: \
dd8fbd78 3569 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3570 break; \
3571 default: return 1; \
3572 }} while (0)
9ee6e8bb
PB
3573
3574#define GEN_NEON_INTEGER_OP(name) do { \
3575 switch ((size << 1) | u) { \
ad69471c 3576 case 0: \
dd8fbd78 3577 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3578 break; \
3579 case 1: \
dd8fbd78 3580 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3581 break; \
3582 case 2: \
dd8fbd78 3583 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3584 break; \
3585 case 3: \
dd8fbd78 3586 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3587 break; \
3588 case 4: \
dd8fbd78 3589 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3590 break; \
3591 case 5: \
dd8fbd78 3592 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3593 break; \
9ee6e8bb
PB
3594 default: return 1; \
3595 }} while (0)
3596
dd8fbd78 3597static TCGv neon_load_scratch(int scratch)
9ee6e8bb 3598{
7d1b0095 3599 TCGv tmp = tcg_temp_new_i32();
dd8fbd78
FN
3600 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3601 return tmp;
9ee6e8bb
PB
3602}
3603
dd8fbd78 3604static void neon_store_scratch(int scratch, TCGv var)
9ee6e8bb 3605{
dd8fbd78 3606 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 3607 tcg_temp_free_i32(var);
9ee6e8bb
PB
3608}
3609
dd8fbd78 3610static inline TCGv neon_get_scalar(int size, int reg)
9ee6e8bb 3611{
dd8fbd78 3612 TCGv tmp;
9ee6e8bb 3613 if (size == 1) {
0fad6efc
PM
3614 tmp = neon_load_reg(reg & 7, reg >> 4);
3615 if (reg & 8) {
dd8fbd78 3616 gen_neon_dup_high16(tmp);
0fad6efc
PM
3617 } else {
3618 gen_neon_dup_low16(tmp);
dd8fbd78 3619 }
0fad6efc
PM
3620 } else {
3621 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 3622 }
dd8fbd78 3623 return tmp;
9ee6e8bb
PB
3624}
3625
02acedf9 3626static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 3627{
02acedf9
PM
3628 TCGv tmp, tmp2;
3629 if (size == 3 || (!q && size == 2)) {
3630 return 1;
3631 }
3632 tmp = tcg_const_i32(rd);
3633 tmp2 = tcg_const_i32(rm);
3634 if (q) {
3635 switch (size) {
3636 case 0:
3637 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
3638 break;
3639 case 1:
3640 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
3641 break;
3642 case 2:
3643 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
3644 break;
3645 default:
3646 abort();
3647 }
3648 } else {
3649 switch (size) {
3650 case 0:
3651 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
3652 break;
3653 case 1:
3654 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
3655 break;
3656 default:
3657 abort();
3658 }
3659 }
3660 tcg_temp_free_i32(tmp);
3661 tcg_temp_free_i32(tmp2);
3662 return 0;
19457615
FN
3663}
3664
d68a6f3a 3665static int gen_neon_zip(int rd, int rm, int size, int q)
19457615
FN
3666{
3667 TCGv tmp, tmp2;
d68a6f3a
PM
3668 if (size == 3 || (!q && size == 2)) {
3669 return 1;
3670 }
3671 tmp = tcg_const_i32(rd);
3672 tmp2 = tcg_const_i32(rm);
3673 if (q) {
3674 switch (size) {
3675 case 0:
3676 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
3677 break;
3678 case 1:
3679 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
3680 break;
3681 case 2:
3682 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
3683 break;
3684 default:
3685 abort();
3686 }
3687 } else {
3688 switch (size) {
3689 case 0:
3690 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
3691 break;
3692 case 1:
3693 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
3694 break;
3695 default:
3696 abort();
3697 }
3698 }
3699 tcg_temp_free_i32(tmp);
3700 tcg_temp_free_i32(tmp2);
3701 return 0;
19457615
FN
3702}
3703
19457615
FN
3704static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3705{
3706 TCGv rd, tmp;
3707
7d1b0095
PM
3708 rd = tcg_temp_new_i32();
3709 tmp = tcg_temp_new_i32();
19457615
FN
3710
3711 tcg_gen_shli_i32(rd, t0, 8);
3712 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3713 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3714 tcg_gen_or_i32(rd, rd, tmp);
3715
3716 tcg_gen_shri_i32(t1, t1, 8);
3717 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3718 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3719 tcg_gen_or_i32(t1, t1, tmp);
3720 tcg_gen_mov_i32(t0, rd);
3721
7d1b0095
PM
3722 tcg_temp_free_i32(tmp);
3723 tcg_temp_free_i32(rd);
19457615
FN
3724}
3725
3726static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3727{
3728 TCGv rd, tmp;
3729
7d1b0095
PM
3730 rd = tcg_temp_new_i32();
3731 tmp = tcg_temp_new_i32();
19457615
FN
3732
3733 tcg_gen_shli_i32(rd, t0, 16);
3734 tcg_gen_andi_i32(tmp, t1, 0xffff);
3735 tcg_gen_or_i32(rd, rd, tmp);
3736 tcg_gen_shri_i32(t1, t1, 16);
3737 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3738 tcg_gen_or_i32(t1, t1, tmp);
3739 tcg_gen_mov_i32(t0, rd);
3740
7d1b0095
PM
3741 tcg_temp_free_i32(tmp);
3742 tcg_temp_free_i32(rd);
19457615
FN
3743}
3744
3745
9ee6e8bb
PB
3746static struct {
3747 int nregs;
3748 int interleave;
3749 int spacing;
3750} neon_ls_element_type[11] = {
3751 {4, 4, 1},
3752 {4, 4, 2},
3753 {4, 1, 1},
3754 {4, 2, 1},
3755 {3, 3, 1},
3756 {3, 3, 2},
3757 {3, 1, 1},
3758 {1, 1, 1},
3759 {2, 2, 1},
3760 {2, 2, 2},
3761 {2, 1, 1}
3762};
3763
3764/* Translate a NEON load/store element instruction. Return nonzero if the
3765 instruction is invalid. */
3766static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3767{
3768 int rd, rn, rm;
3769 int op;
3770 int nregs;
3771 int interleave;
84496233 3772 int spacing;
9ee6e8bb
PB
3773 int stride;
3774 int size;
3775 int reg;
3776 int pass;
3777 int load;
3778 int shift;
9ee6e8bb 3779 int n;
1b2b1e54 3780 TCGv addr;
b0109805 3781 TCGv tmp;
8f8e3aa4 3782 TCGv tmp2;
84496233 3783 TCGv_i64 tmp64;
9ee6e8bb 3784
5df8bac1 3785 if (!s->vfp_enabled)
9ee6e8bb
PB
3786 return 1;
3787 VFP_DREG_D(rd, insn);
3788 rn = (insn >> 16) & 0xf;
3789 rm = insn & 0xf;
3790 load = (insn & (1 << 21)) != 0;
7d1b0095 3791 addr = tcg_temp_new_i32();
9ee6e8bb
PB
3792 if ((insn & (1 << 23)) == 0) {
3793 /* Load store all elements. */
3794 op = (insn >> 8) & 0xf;
3795 size = (insn >> 6) & 3;
84496233 3796 if (op > 10)
9ee6e8bb
PB
3797 return 1;
3798 nregs = neon_ls_element_type[op].nregs;
3799 interleave = neon_ls_element_type[op].interleave;
84496233
JR
3800 spacing = neon_ls_element_type[op].spacing;
3801 if (size == 3 && (interleave | spacing) != 1)
3802 return 1;
dcc65026 3803 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3804 stride = (1 << size) * interleave;
3805 for (reg = 0; reg < nregs; reg++) {
3806 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
3807 load_reg_var(s, addr, rn);
3808 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 3809 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
3810 load_reg_var(s, addr, rn);
3811 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 3812 }
84496233
JR
3813 if (size == 3) {
3814 if (load) {
3815 tmp64 = gen_ld64(addr, IS_USER(s));
3816 neon_store_reg64(tmp64, rd);
3817 tcg_temp_free_i64(tmp64);
3818 } else {
3819 tmp64 = tcg_temp_new_i64();
3820 neon_load_reg64(tmp64, rd);
3821 gen_st64(tmp64, addr, IS_USER(s));
3822 }
3823 tcg_gen_addi_i32(addr, addr, stride);
3824 } else {
3825 for (pass = 0; pass < 2; pass++) {
3826 if (size == 2) {
3827 if (load) {
3828 tmp = gen_ld32(addr, IS_USER(s));
3829 neon_store_reg(rd, pass, tmp);
3830 } else {
3831 tmp = neon_load_reg(rd, pass);
3832 gen_st32(tmp, addr, IS_USER(s));
3833 }
1b2b1e54 3834 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
3835 } else if (size == 1) {
3836 if (load) {
3837 tmp = gen_ld16u(addr, IS_USER(s));
3838 tcg_gen_addi_i32(addr, addr, stride);
3839 tmp2 = gen_ld16u(addr, IS_USER(s));
3840 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
3841 tcg_gen_shli_i32(tmp2, tmp2, 16);
3842 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3843 tcg_temp_free_i32(tmp2);
84496233
JR
3844 neon_store_reg(rd, pass, tmp);
3845 } else {
3846 tmp = neon_load_reg(rd, pass);
7d1b0095 3847 tmp2 = tcg_temp_new_i32();
84496233
JR
3848 tcg_gen_shri_i32(tmp2, tmp, 16);
3849 gen_st16(tmp, addr, IS_USER(s));
3850 tcg_gen_addi_i32(addr, addr, stride);
3851 gen_st16(tmp2, addr, IS_USER(s));
1b2b1e54 3852 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3853 }
84496233
JR
3854 } else /* size == 0 */ {
3855 if (load) {
3856 TCGV_UNUSED(tmp2);
3857 for (n = 0; n < 4; n++) {
3858 tmp = gen_ld8u(addr, IS_USER(s));
3859 tcg_gen_addi_i32(addr, addr, stride);
3860 if (n == 0) {
3861 tmp2 = tmp;
3862 } else {
41ba8341
PB
3863 tcg_gen_shli_i32(tmp, tmp, n * 8);
3864 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 3865 tcg_temp_free_i32(tmp);
84496233 3866 }
9ee6e8bb 3867 }
84496233
JR
3868 neon_store_reg(rd, pass, tmp2);
3869 } else {
3870 tmp2 = neon_load_reg(rd, pass);
3871 for (n = 0; n < 4; n++) {
7d1b0095 3872 tmp = tcg_temp_new_i32();
84496233
JR
3873 if (n == 0) {
3874 tcg_gen_mov_i32(tmp, tmp2);
3875 } else {
3876 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3877 }
3878 gen_st8(tmp, addr, IS_USER(s));
3879 tcg_gen_addi_i32(addr, addr, stride);
3880 }
7d1b0095 3881 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3882 }
3883 }
3884 }
3885 }
84496233 3886 rd += spacing;
9ee6e8bb
PB
3887 }
3888 stride = nregs * 8;
3889 } else {
3890 size = (insn >> 10) & 3;
3891 if (size == 3) {
3892 /* Load single element to all lanes. */
3893 if (!load)
3894 return 1;
3895 size = (insn >> 6) & 3;
3896 nregs = ((insn >> 8) & 3) + 1;
3897 stride = (insn & (1 << 5)) ? 2 : 1;
dcc65026 3898 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3899 for (reg = 0; reg < nregs; reg++) {
3900 switch (size) {
3901 case 0:
1b2b1e54 3902 tmp = gen_ld8u(addr, IS_USER(s));
ad69471c 3903 gen_neon_dup_u8(tmp, 0);
9ee6e8bb
PB
3904 break;
3905 case 1:
1b2b1e54 3906 tmp = gen_ld16u(addr, IS_USER(s));
ad69471c 3907 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
3908 break;
3909 case 2:
1b2b1e54 3910 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
3911 break;
3912 case 3:
3913 return 1;
a50f5b91
PB
3914 default: /* Avoid compiler warnings. */
3915 abort();
99c475ab 3916 }
1b2b1e54 3917 tcg_gen_addi_i32(addr, addr, 1 << size);
7d1b0095 3918 tmp2 = tcg_temp_new_i32();
ad69471c
PB
3919 tcg_gen_mov_i32(tmp2, tmp);
3920 neon_store_reg(rd, 0, tmp2);
3018f259 3921 neon_store_reg(rd, 1, tmp);
9ee6e8bb
PB
3922 rd += stride;
3923 }
3924 stride = (1 << size) * nregs;
3925 } else {
3926 /* Single element. */
3927 pass = (insn >> 7) & 1;
3928 switch (size) {
3929 case 0:
3930 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
3931 stride = 1;
3932 break;
3933 case 1:
3934 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
3935 stride = (insn & (1 << 5)) ? 2 : 1;
3936 break;
3937 case 2:
3938 shift = 0;
9ee6e8bb
PB
3939 stride = (insn & (1 << 6)) ? 2 : 1;
3940 break;
3941 default:
3942 abort();
3943 }
3944 nregs = ((insn >> 8) & 3) + 1;
dcc65026 3945 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3946 for (reg = 0; reg < nregs; reg++) {
3947 if (load) {
9ee6e8bb
PB
3948 switch (size) {
3949 case 0:
1b2b1e54 3950 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb
PB
3951 break;
3952 case 1:
1b2b1e54 3953 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
3954 break;
3955 case 2:
1b2b1e54 3956 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 3957 break;
a50f5b91
PB
3958 default: /* Avoid compiler warnings. */
3959 abort();
9ee6e8bb
PB
3960 }
3961 if (size != 2) {
8f8e3aa4
PB
3962 tmp2 = neon_load_reg(rd, pass);
3963 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
7d1b0095 3964 tcg_temp_free_i32(tmp2);
9ee6e8bb 3965 }
8f8e3aa4 3966 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3967 } else { /* Store */
8f8e3aa4
PB
3968 tmp = neon_load_reg(rd, pass);
3969 if (shift)
3970 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
3971 switch (size) {
3972 case 0:
1b2b1e54 3973 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
3974 break;
3975 case 1:
1b2b1e54 3976 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
3977 break;
3978 case 2:
1b2b1e54 3979 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 3980 break;
99c475ab 3981 }
99c475ab 3982 }
9ee6e8bb 3983 rd += stride;
1b2b1e54 3984 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 3985 }
9ee6e8bb 3986 stride = nregs * (1 << size);
99c475ab 3987 }
9ee6e8bb 3988 }
7d1b0095 3989 tcg_temp_free_i32(addr);
9ee6e8bb 3990 if (rm != 15) {
b26eefb6
PB
3991 TCGv base;
3992
3993 base = load_reg(s, rn);
9ee6e8bb 3994 if (rm == 13) {
b26eefb6 3995 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 3996 } else {
b26eefb6
PB
3997 TCGv index;
3998 index = load_reg(s, rm);
3999 tcg_gen_add_i32(base, base, index);
7d1b0095 4000 tcg_temp_free_i32(index);
9ee6e8bb 4001 }
b26eefb6 4002 store_reg(s, rn, base);
9ee6e8bb
PB
4003 }
4004 return 0;
4005}
3b46e624 4006
8f8e3aa4
PB
4007/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4008static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4009{
4010 tcg_gen_and_i32(t, t, c);
f669df27 4011 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4012 tcg_gen_or_i32(dest, t, f);
4013}
4014
a7812ae4 4015static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4016{
4017 switch (size) {
4018 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4019 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4020 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4021 default: abort();
4022 }
4023}
4024
a7812ae4 4025static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4026{
4027 switch (size) {
4028 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4029 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4030 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
4031 default: abort();
4032 }
4033}
4034
a7812ae4 4035static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4036{
4037 switch (size) {
4038 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4039 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4040 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4041 default: abort();
4042 }
4043}
4044
af1bbf30
JR
4045static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4046{
4047 switch (size) {
4048 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4049 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4050 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
4051 default: abort();
4052 }
4053}
4054
ad69471c
PB
4055static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4056 int q, int u)
4057{
4058 if (q) {
4059 if (u) {
4060 switch (size) {
4061 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4062 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4063 default: abort();
4064 }
4065 } else {
4066 switch (size) {
4067 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4068 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4069 default: abort();
4070 }
4071 }
4072 } else {
4073 if (u) {
4074 switch (size) {
b408a9b0
CL
4075 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4076 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4077 default: abort();
4078 }
4079 } else {
4080 switch (size) {
4081 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4082 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4083 default: abort();
4084 }
4085 }
4086 }
4087}
4088
a7812ae4 4089static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
4090{
4091 if (u) {
4092 switch (size) {
4093 case 0: gen_helper_neon_widen_u8(dest, src); break;
4094 case 1: gen_helper_neon_widen_u16(dest, src); break;
4095 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4096 default: abort();
4097 }
4098 } else {
4099 switch (size) {
4100 case 0: gen_helper_neon_widen_s8(dest, src); break;
4101 case 1: gen_helper_neon_widen_s16(dest, src); break;
4102 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4103 default: abort();
4104 }
4105 }
7d1b0095 4106 tcg_temp_free_i32(src);
ad69471c
PB
4107}
4108
4109static inline void gen_neon_addl(int size)
4110{
4111 switch (size) {
4112 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4113 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4114 case 2: tcg_gen_add_i64(CPU_V001); break;
4115 default: abort();
4116 }
4117}
4118
4119static inline void gen_neon_subl(int size)
4120{
4121 switch (size) {
4122 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4123 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4124 case 2: tcg_gen_sub_i64(CPU_V001); break;
4125 default: abort();
4126 }
4127}
4128
a7812ae4 4129static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4130{
4131 switch (size) {
4132 case 0: gen_helper_neon_negl_u16(var, var); break;
4133 case 1: gen_helper_neon_negl_u32(var, var); break;
4134 case 2: gen_helper_neon_negl_u64(var, var); break;
4135 default: abort();
4136 }
4137}
4138
a7812ae4 4139static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4140{
4141 switch (size) {
4142 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4143 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4144 default: abort();
4145 }
4146}
4147
a7812ae4 4148static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4149{
a7812ae4 4150 TCGv_i64 tmp;
ad69471c
PB
4151
4152 switch ((size << 1) | u) {
4153 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4154 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4155 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4156 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4157 case 4:
4158 tmp = gen_muls_i64_i32(a, b);
4159 tcg_gen_mov_i64(dest, tmp);
4160 break;
4161 case 5:
4162 tmp = gen_mulu_i64_i32(a, b);
4163 tcg_gen_mov_i64(dest, tmp);
4164 break;
4165 default: abort();
4166 }
c6067f04
CL
4167
4168 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4169 Don't forget to clean them now. */
4170 if (size < 2) {
7d1b0095
PM
4171 tcg_temp_free_i32(a);
4172 tcg_temp_free_i32(b);
c6067f04 4173 }
ad69471c
PB
4174}
4175
c33171c7
PM
4176static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4177{
4178 if (op) {
4179 if (u) {
4180 gen_neon_unarrow_sats(size, dest, src);
4181 } else {
4182 gen_neon_narrow(size, dest, src);
4183 }
4184 } else {
4185 if (u) {
4186 gen_neon_narrow_satu(size, dest, src);
4187 } else {
4188 gen_neon_narrow_sats(size, dest, src);
4189 }
4190 }
4191}
4192
9ee6e8bb
PB
4193/* Translate a NEON data processing instruction. Return nonzero if the
4194 instruction is invalid.
ad69471c
PB
4195 We process data in a mixture of 32-bit and 64-bit chunks.
4196 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4197
9ee6e8bb
PB
4198static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4199{
4200 int op;
4201 int q;
4202 int rd, rn, rm;
4203 int size;
4204 int shift;
4205 int pass;
4206 int count;
4207 int pairwise;
4208 int u;
4209 int n;
ca9a32e4 4210 uint32_t imm, mask;
b75263d6 4211 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4212 TCGv_i64 tmp64;
9ee6e8bb 4213
5df8bac1 4214 if (!s->vfp_enabled)
9ee6e8bb
PB
4215 return 1;
4216 q = (insn & (1 << 6)) != 0;
4217 u = (insn >> 24) & 1;
4218 VFP_DREG_D(rd, insn);
4219 VFP_DREG_N(rn, insn);
4220 VFP_DREG_M(rm, insn);
4221 size = (insn >> 20) & 3;
4222 if ((insn & (1 << 23)) == 0) {
4223 /* Three register same length. */
4224 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
ad69471c
PB
4225 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4226 || op == 10 || op == 11 || op == 16)) {
4227 /* 64-bit element instructions. */
9ee6e8bb 4228 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4229 neon_load_reg64(cpu_V0, rn + pass);
4230 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb
PB
4231 switch (op) {
4232 case 1: /* VQADD */
4233 if (u) {
72902672
CL
4234 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4235 cpu_V0, cpu_V1);
2c0262af 4236 } else {
72902672
CL
4237 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4238 cpu_V0, cpu_V1);
2c0262af 4239 }
9ee6e8bb
PB
4240 break;
4241 case 5: /* VQSUB */
4242 if (u) {
72902672
CL
4243 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4244 cpu_V0, cpu_V1);
ad69471c 4245 } else {
72902672
CL
4246 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4247 cpu_V0, cpu_V1);
ad69471c
PB
4248 }
4249 break;
4250 case 8: /* VSHL */
4251 if (u) {
4252 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4253 } else {
4254 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4255 }
4256 break;
4257 case 9: /* VQSHL */
4258 if (u) {
4259 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
def126ce 4260 cpu_V1, cpu_V0);
ad69471c 4261 } else {
def126ce 4262 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
ad69471c
PB
4263 cpu_V1, cpu_V0);
4264 }
4265 break;
4266 case 10: /* VRSHL */
4267 if (u) {
4268 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4269 } else {
ad69471c
PB
4270 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4271 }
4272 break;
4273 case 11: /* VQRSHL */
4274 if (u) {
4275 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4276 cpu_V1, cpu_V0);
4277 } else {
4278 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4279 cpu_V1, cpu_V0);
1e8d4eec 4280 }
9ee6e8bb
PB
4281 break;
4282 case 16:
4283 if (u) {
ad69471c 4284 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4285 } else {
ad69471c 4286 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4287 }
4288 break;
4289 default:
4290 abort();
2c0262af 4291 }
ad69471c 4292 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4293 }
9ee6e8bb 4294 return 0;
2c0262af 4295 }
9ee6e8bb
PB
4296 switch (op) {
4297 case 8: /* VSHL */
4298 case 9: /* VQSHL */
4299 case 10: /* VRSHL */
ad69471c 4300 case 11: /* VQRSHL */
9ee6e8bb 4301 {
ad69471c
PB
4302 int rtmp;
4303 /* Shift instruction operands are reversed. */
4304 rtmp = rn;
9ee6e8bb 4305 rn = rm;
ad69471c 4306 rm = rtmp;
9ee6e8bb
PB
4307 pairwise = 0;
4308 }
2c0262af 4309 break;
9ee6e8bb
PB
4310 case 20: /* VPMAX */
4311 case 21: /* VPMIN */
4312 case 23: /* VPADD */
4313 pairwise = 1;
2c0262af 4314 break;
9ee6e8bb
PB
4315 case 26: /* VPADD (float) */
4316 pairwise = (u && size < 2);
2c0262af 4317 break;
9ee6e8bb
PB
4318 case 30: /* VPMIN/VPMAX (float) */
4319 pairwise = u;
2c0262af 4320 break;
9ee6e8bb
PB
4321 default:
4322 pairwise = 0;
2c0262af 4323 break;
9ee6e8bb 4324 }
dd8fbd78 4325
9ee6e8bb
PB
4326 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4327
4328 if (pairwise) {
4329 /* Pairwise. */
4330 if (q)
4331 n = (pass & 1) * 2;
2c0262af 4332 else
9ee6e8bb
PB
4333 n = 0;
4334 if (pass < q + 1) {
dd8fbd78
FN
4335 tmp = neon_load_reg(rn, n);
4336 tmp2 = neon_load_reg(rn, n + 1);
9ee6e8bb 4337 } else {
dd8fbd78
FN
4338 tmp = neon_load_reg(rm, n);
4339 tmp2 = neon_load_reg(rm, n + 1);
9ee6e8bb
PB
4340 }
4341 } else {
4342 /* Elementwise. */
dd8fbd78
FN
4343 tmp = neon_load_reg(rn, pass);
4344 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4345 }
4346 switch (op) {
4347 case 0: /* VHADD */
4348 GEN_NEON_INTEGER_OP(hadd);
4349 break;
4350 case 1: /* VQADD */
ad69471c 4351 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4352 break;
9ee6e8bb
PB
4353 case 2: /* VRHADD */
4354 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4355 break;
9ee6e8bb
PB
4356 case 3: /* Logic ops. */
4357 switch ((u << 2) | size) {
4358 case 0: /* VAND */
dd8fbd78 4359 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4360 break;
4361 case 1: /* BIC */
f669df27 4362 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4363 break;
4364 case 2: /* VORR */
dd8fbd78 4365 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4366 break;
4367 case 3: /* VORN */
f669df27 4368 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4369 break;
4370 case 4: /* VEOR */
dd8fbd78 4371 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4372 break;
4373 case 5: /* VBSL */
dd8fbd78
FN
4374 tmp3 = neon_load_reg(rd, pass);
4375 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 4376 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4377 break;
4378 case 6: /* VBIT */
dd8fbd78
FN
4379 tmp3 = neon_load_reg(rd, pass);
4380 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 4381 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4382 break;
4383 case 7: /* VBIF */
dd8fbd78
FN
4384 tmp3 = neon_load_reg(rd, pass);
4385 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 4386 tcg_temp_free_i32(tmp3);
9ee6e8bb 4387 break;
2c0262af
FB
4388 }
4389 break;
9ee6e8bb
PB
4390 case 4: /* VHSUB */
4391 GEN_NEON_INTEGER_OP(hsub);
4392 break;
4393 case 5: /* VQSUB */
ad69471c 4394 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4395 break;
9ee6e8bb
PB
4396 case 6: /* VCGT */
4397 GEN_NEON_INTEGER_OP(cgt);
4398 break;
4399 case 7: /* VCGE */
4400 GEN_NEON_INTEGER_OP(cge);
4401 break;
4402 case 8: /* VSHL */
ad69471c 4403 GEN_NEON_INTEGER_OP(shl);
2c0262af 4404 break;
9ee6e8bb 4405 case 9: /* VQSHL */
ad69471c 4406 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4407 break;
9ee6e8bb 4408 case 10: /* VRSHL */
ad69471c 4409 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4410 break;
9ee6e8bb 4411 case 11: /* VQRSHL */
ad69471c 4412 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb
PB
4413 break;
4414 case 12: /* VMAX */
4415 GEN_NEON_INTEGER_OP(max);
4416 break;
4417 case 13: /* VMIN */
4418 GEN_NEON_INTEGER_OP(min);
4419 break;
4420 case 14: /* VABD */
4421 GEN_NEON_INTEGER_OP(abd);
4422 break;
4423 case 15: /* VABA */
4424 GEN_NEON_INTEGER_OP(abd);
7d1b0095 4425 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
4426 tmp2 = neon_load_reg(rd, pass);
4427 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4428 break;
4429 case 16:
4430 if (!u) { /* VADD */
dd8fbd78 4431 if (gen_neon_add(size, tmp, tmp2))
9ee6e8bb
PB
4432 return 1;
4433 } else { /* VSUB */
4434 switch (size) {
dd8fbd78
FN
4435 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4436 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4437 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4438 default: return 1;
4439 }
4440 }
4441 break;
4442 case 17:
4443 if (!u) { /* VTST */
4444 switch (size) {
dd8fbd78
FN
4445 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4446 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4447 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4448 default: return 1;
4449 }
4450 } else { /* VCEQ */
4451 switch (size) {
dd8fbd78
FN
4452 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4453 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4454 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4455 default: return 1;
4456 }
4457 }
4458 break;
4459 case 18: /* Multiply. */
4460 switch (size) {
dd8fbd78
FN
4461 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4462 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4463 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4464 default: return 1;
4465 }
7d1b0095 4466 tcg_temp_free_i32(tmp2);
dd8fbd78 4467 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4468 if (u) { /* VMLS */
dd8fbd78 4469 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4470 } else { /* VMLA */
dd8fbd78 4471 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4472 }
4473 break;
4474 case 19: /* VMUL */
4475 if (u) { /* polynomial */
dd8fbd78 4476 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4477 } else { /* Integer */
4478 switch (size) {
dd8fbd78
FN
4479 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4480 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4481 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4482 default: return 1;
4483 }
4484 }
4485 break;
4486 case 20: /* VPMAX */
4487 GEN_NEON_INTEGER_OP(pmax);
4488 break;
4489 case 21: /* VPMIN */
4490 GEN_NEON_INTEGER_OP(pmin);
4491 break;
4492 case 22: /* Hultiply high. */
4493 if (!u) { /* VQDMULH */
4494 switch (size) {
dd8fbd78
FN
4495 case 1: gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4496 case 2: gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
9ee6e8bb
PB
4497 default: return 1;
4498 }
4499 } else { /* VQRDHMUL */
4500 switch (size) {
dd8fbd78
FN
4501 case 1: gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4502 case 2: gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
9ee6e8bb
PB
4503 default: return 1;
4504 }
4505 }
4506 break;
4507 case 23: /* VPADD */
4508 if (u)
4509 return 1;
4510 switch (size) {
dd8fbd78
FN
4511 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4512 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4513 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4514 default: return 1;
4515 }
4516 break;
4517 case 26: /* Floating point arithnetic. */
4518 switch ((u << 2) | size) {
4519 case 0: /* VADD */
dd8fbd78 4520 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4521 break;
4522 case 2: /* VSUB */
dd8fbd78 4523 gen_helper_neon_sub_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4524 break;
4525 case 4: /* VPADD */
dd8fbd78 4526 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4527 break;
4528 case 6: /* VABD */
dd8fbd78 4529 gen_helper_neon_abd_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4530 break;
4531 default:
4532 return 1;
4533 }
4534 break;
4535 case 27: /* Float multiply. */
dd8fbd78 4536 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb 4537 if (!u) {
7d1b0095 4538 tcg_temp_free_i32(tmp2);
dd8fbd78 4539 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4540 if (size == 0) {
dd8fbd78 4541 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb 4542 } else {
dd8fbd78 4543 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
4544 }
4545 }
4546 break;
4547 case 28: /* Float compare. */
4548 if (!u) {
dd8fbd78 4549 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
b5ff1b31 4550 } else {
9ee6e8bb 4551 if (size == 0)
dd8fbd78 4552 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
9ee6e8bb 4553 else
dd8fbd78 4554 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
b5ff1b31 4555 }
2c0262af 4556 break;
9ee6e8bb
PB
4557 case 29: /* Float compare absolute. */
4558 if (!u)
4559 return 1;
4560 if (size == 0)
dd8fbd78 4561 gen_helper_neon_acge_f32(tmp, tmp, tmp2);
9ee6e8bb 4562 else
dd8fbd78 4563 gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
2c0262af 4564 break;
9ee6e8bb
PB
4565 case 30: /* Float min/max. */
4566 if (size == 0)
dd8fbd78 4567 gen_helper_neon_max_f32(tmp, tmp, tmp2);
9ee6e8bb 4568 else
dd8fbd78 4569 gen_helper_neon_min_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4570 break;
4571 case 31:
4572 if (size == 0)
dd8fbd78 4573 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 4574 else
dd8fbd78 4575 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 4576 break;
9ee6e8bb
PB
4577 default:
4578 abort();
2c0262af 4579 }
7d1b0095 4580 tcg_temp_free_i32(tmp2);
dd8fbd78 4581
9ee6e8bb
PB
4582 /* Save the result. For elementwise operations we can put it
4583 straight into the destination register. For pairwise operations
4584 we have to be careful to avoid clobbering the source operands. */
4585 if (pairwise && rd == rm) {
dd8fbd78 4586 neon_store_scratch(pass, tmp);
9ee6e8bb 4587 } else {
dd8fbd78 4588 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4589 }
4590
4591 } /* for pass */
4592 if (pairwise && rd == rm) {
4593 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
4594 tmp = neon_load_scratch(pass);
4595 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4596 }
4597 }
ad69471c 4598 /* End of 3 register same size operations. */
9ee6e8bb
PB
4599 } else if (insn & (1 << 4)) {
4600 if ((insn & 0x00380080) != 0) {
4601 /* Two registers and shift. */
4602 op = (insn >> 8) & 0xf;
4603 if (insn & (1 << 7)) {
4604 /* 64-bit shift. */
4605 size = 3;
4606 } else {
4607 size = 2;
4608 while ((insn & (1 << (size + 19))) == 0)
4609 size--;
4610 }
4611 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4612 /* To avoid excessive dumplication of ops we implement shift
4613 by immediate using the variable shift operations. */
4614 if (op < 8) {
4615 /* Shift by immediate:
4616 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4617 /* Right shifts are encoded as N - shift, where N is the
4618 element size in bits. */
4619 if (op <= 4)
4620 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4621 if (size == 3) {
4622 count = q + 1;
4623 } else {
4624 count = q ? 4: 2;
4625 }
4626 switch (size) {
4627 case 0:
4628 imm = (uint8_t) shift;
4629 imm |= imm << 8;
4630 imm |= imm << 16;
4631 break;
4632 case 1:
4633 imm = (uint16_t) shift;
4634 imm |= imm << 16;
4635 break;
4636 case 2:
4637 case 3:
4638 imm = shift;
4639 break;
4640 default:
4641 abort();
4642 }
4643
4644 for (pass = 0; pass < count; pass++) {
ad69471c
PB
4645 if (size == 3) {
4646 neon_load_reg64(cpu_V0, rm + pass);
4647 tcg_gen_movi_i64(cpu_V1, imm);
4648 switch (op) {
4649 case 0: /* VSHR */
4650 case 1: /* VSRA */
4651 if (u)
4652 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4653 else
ad69471c 4654 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4655 break;
ad69471c
PB
4656 case 2: /* VRSHR */
4657 case 3: /* VRSRA */
4658 if (u)
4659 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4660 else
ad69471c 4661 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4662 break;
ad69471c
PB
4663 case 4: /* VSRI */
4664 if (!u)
4665 return 1;
4666 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4667 break;
4668 case 5: /* VSHL, VSLI */
4669 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4670 break;
0322b26e
PM
4671 case 6: /* VQSHLU */
4672 if (u) {
4673 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
4674 cpu_V0, cpu_V1);
4675 } else {
4676 return 1;
4677 }
ad69471c 4678 break;
0322b26e
PM
4679 case 7: /* VQSHL */
4680 if (u) {
4681 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4682 cpu_V0, cpu_V1);
4683 } else {
4684 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4685 cpu_V0, cpu_V1);
4686 }
9ee6e8bb 4687 break;
9ee6e8bb 4688 }
ad69471c
PB
4689 if (op == 1 || op == 3) {
4690 /* Accumulate. */
5371cb81 4691 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
4692 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4693 } else if (op == 4 || (op == 5 && u)) {
4694 /* Insert */
923e6509
CL
4695 neon_load_reg64(cpu_V1, rd + pass);
4696 uint64_t mask;
4697 if (shift < -63 || shift > 63) {
4698 mask = 0;
4699 } else {
4700 if (op == 4) {
4701 mask = 0xffffffffffffffffull >> -shift;
4702 } else {
4703 mask = 0xffffffffffffffffull << shift;
4704 }
4705 }
4706 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
4707 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
4708 }
4709 neon_store_reg64(cpu_V0, rd + pass);
4710 } else { /* size < 3 */
4711 /* Operands in T0 and T1. */
dd8fbd78 4712 tmp = neon_load_reg(rm, pass);
7d1b0095 4713 tmp2 = tcg_temp_new_i32();
dd8fbd78 4714 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
4715 switch (op) {
4716 case 0: /* VSHR */
4717 case 1: /* VSRA */
4718 GEN_NEON_INTEGER_OP(shl);
4719 break;
4720 case 2: /* VRSHR */
4721 case 3: /* VRSRA */
4722 GEN_NEON_INTEGER_OP(rshl);
4723 break;
4724 case 4: /* VSRI */
4725 if (!u)
4726 return 1;
4727 GEN_NEON_INTEGER_OP(shl);
4728 break;
4729 case 5: /* VSHL, VSLI */
4730 switch (size) {
dd8fbd78
FN
4731 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
4732 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
4733 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
ad69471c
PB
4734 default: return 1;
4735 }
4736 break;
0322b26e
PM
4737 case 6: /* VQSHLU */
4738 if (!u) {
4739 return 1;
4740 }
ad69471c 4741 switch (size) {
0322b26e
PM
4742 case 0:
4743 gen_helper_neon_qshlu_s8(tmp, cpu_env,
4744 tmp, tmp2);
4745 break;
4746 case 1:
4747 gen_helper_neon_qshlu_s16(tmp, cpu_env,
4748 tmp, tmp2);
4749 break;
4750 case 2:
4751 gen_helper_neon_qshlu_s32(tmp, cpu_env,
4752 tmp, tmp2);
4753 break;
4754 default:
4755 return 1;
ad69471c
PB
4756 }
4757 break;
0322b26e
PM
4758 case 7: /* VQSHL */
4759 GEN_NEON_INTEGER_OP_ENV(qshl);
4760 break;
ad69471c 4761 }
7d1b0095 4762 tcg_temp_free_i32(tmp2);
ad69471c
PB
4763
4764 if (op == 1 || op == 3) {
4765 /* Accumulate. */
dd8fbd78 4766 tmp2 = neon_load_reg(rd, pass);
5371cb81 4767 gen_neon_add(size, tmp, tmp2);
7d1b0095 4768 tcg_temp_free_i32(tmp2);
ad69471c
PB
4769 } else if (op == 4 || (op == 5 && u)) {
4770 /* Insert */
4771 switch (size) {
4772 case 0:
4773 if (op == 4)
ca9a32e4 4774 mask = 0xff >> -shift;
ad69471c 4775 else
ca9a32e4
JR
4776 mask = (uint8_t)(0xff << shift);
4777 mask |= mask << 8;
4778 mask |= mask << 16;
ad69471c
PB
4779 break;
4780 case 1:
4781 if (op == 4)
ca9a32e4 4782 mask = 0xffff >> -shift;
ad69471c 4783 else
ca9a32e4
JR
4784 mask = (uint16_t)(0xffff << shift);
4785 mask |= mask << 16;
ad69471c
PB
4786 break;
4787 case 2:
ca9a32e4
JR
4788 if (shift < -31 || shift > 31) {
4789 mask = 0;
4790 } else {
4791 if (op == 4)
4792 mask = 0xffffffffu >> -shift;
4793 else
4794 mask = 0xffffffffu << shift;
4795 }
ad69471c
PB
4796 break;
4797 default:
4798 abort();
4799 }
dd8fbd78 4800 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
4801 tcg_gen_andi_i32(tmp, tmp, mask);
4802 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 4803 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4804 tcg_temp_free_i32(tmp2);
ad69471c 4805 }
dd8fbd78 4806 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4807 }
4808 } /* for pass */
4809 } else if (op < 10) {
ad69471c 4810 /* Shift by immediate and narrow:
9ee6e8bb 4811 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd
CL
4812 int input_unsigned = (op == 8) ? !u : u;
4813
9ee6e8bb
PB
4814 shift = shift - (1 << (size + 3));
4815 size++;
92cdfaeb 4816 if (size == 3) {
a7812ae4 4817 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
4818 neon_load_reg64(cpu_V0, rm);
4819 neon_load_reg64(cpu_V1, rm + 1);
4820 for (pass = 0; pass < 2; pass++) {
4821 TCGv_i64 in;
4822 if (pass == 0) {
4823 in = cpu_V0;
4824 } else {
4825 in = cpu_V1;
4826 }
ad69471c 4827 if (q) {
0b36f4cd 4828 if (input_unsigned) {
92cdfaeb 4829 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 4830 } else {
92cdfaeb 4831 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 4832 }
ad69471c 4833 } else {
0b36f4cd 4834 if (input_unsigned) {
92cdfaeb 4835 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 4836 } else {
92cdfaeb 4837 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 4838 }
ad69471c 4839 }
7d1b0095 4840 tmp = tcg_temp_new_i32();
92cdfaeb
PM
4841 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
4842 neon_store_reg(rd, pass, tmp);
4843 } /* for pass */
4844 tcg_temp_free_i64(tmp64);
4845 } else {
4846 if (size == 1) {
4847 imm = (uint16_t)shift;
4848 imm |= imm << 16;
2c0262af 4849 } else {
92cdfaeb
PM
4850 /* size == 2 */
4851 imm = (uint32_t)shift;
4852 }
4853 tmp2 = tcg_const_i32(imm);
4854 tmp4 = neon_load_reg(rm + 1, 0);
4855 tmp5 = neon_load_reg(rm + 1, 1);
4856 for (pass = 0; pass < 2; pass++) {
4857 if (pass == 0) {
4858 tmp = neon_load_reg(rm, 0);
4859 } else {
4860 tmp = tmp4;
4861 }
0b36f4cd
CL
4862 gen_neon_shift_narrow(size, tmp, tmp2, q,
4863 input_unsigned);
92cdfaeb
PM
4864 if (pass == 0) {
4865 tmp3 = neon_load_reg(rm, 1);
4866 } else {
4867 tmp3 = tmp5;
4868 }
0b36f4cd
CL
4869 gen_neon_shift_narrow(size, tmp3, tmp2, q,
4870 input_unsigned);
36aa55dc 4871 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
4872 tcg_temp_free_i32(tmp);
4873 tcg_temp_free_i32(tmp3);
4874 tmp = tcg_temp_new_i32();
92cdfaeb
PM
4875 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
4876 neon_store_reg(rd, pass, tmp);
4877 } /* for pass */
c6067f04 4878 tcg_temp_free_i32(tmp2);
b75263d6 4879 }
9ee6e8bb
PB
4880 } else if (op == 10) {
4881 /* VSHLL */
ad69471c 4882 if (q || size == 3)
9ee6e8bb 4883 return 1;
ad69471c
PB
4884 tmp = neon_load_reg(rm, 0);
4885 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 4886 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4887 if (pass == 1)
4888 tmp = tmp2;
4889
4890 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 4891
9ee6e8bb
PB
4892 if (shift != 0) {
4893 /* The shift is less than the width of the source
ad69471c
PB
4894 type, so we can just shift the whole register. */
4895 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
4896 /* Widen the result of shift: we need to clear
4897 * the potential overflow bits resulting from
4898 * left bits of the narrow input appearing as
4899 * right bits of left the neighbour narrow
4900 * input. */
ad69471c
PB
4901 if (size < 2 || !u) {
4902 uint64_t imm64;
4903 if (size == 0) {
4904 imm = (0xffu >> (8 - shift));
4905 imm |= imm << 16;
acdf01ef 4906 } else if (size == 1) {
ad69471c 4907 imm = 0xffff >> (16 - shift);
acdf01ef
CL
4908 } else {
4909 /* size == 2 */
4910 imm = 0xffffffff >> (32 - shift);
4911 }
4912 if (size < 2) {
4913 imm64 = imm | (((uint64_t)imm) << 32);
4914 } else {
4915 imm64 = imm;
9ee6e8bb 4916 }
acdf01ef 4917 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
4918 }
4919 }
ad69471c 4920 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 4921 }
f73534a5 4922 } else if (op >= 14) {
9ee6e8bb 4923 /* VCVT fixed-point. */
f73534a5
PM
4924 /* We have already masked out the must-be-1 top bit of imm6,
4925 * hence this 32-shift where the ARM ARM has 64-imm6.
4926 */
4927 shift = 32 - shift;
9ee6e8bb 4928 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 4929 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 4930 if (!(op & 1)) {
9ee6e8bb 4931 if (u)
4373f3ce 4932 gen_vfp_ulto(0, shift);
9ee6e8bb 4933 else
4373f3ce 4934 gen_vfp_slto(0, shift);
9ee6e8bb
PB
4935 } else {
4936 if (u)
4373f3ce 4937 gen_vfp_toul(0, shift);
9ee6e8bb 4938 else
4373f3ce 4939 gen_vfp_tosl(0, shift);
2c0262af 4940 }
4373f3ce 4941 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
4942 }
4943 } else {
9ee6e8bb
PB
4944 return 1;
4945 }
4946 } else { /* (insn & 0x00380080) == 0 */
4947 int invert;
4948
4949 op = (insn >> 8) & 0xf;
4950 /* One register and immediate. */
4951 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4952 invert = (insn & (1 << 5)) != 0;
4953 switch (op) {
4954 case 0: case 1:
4955 /* no-op */
4956 break;
4957 case 2: case 3:
4958 imm <<= 8;
4959 break;
4960 case 4: case 5:
4961 imm <<= 16;
4962 break;
4963 case 6: case 7:
4964 imm <<= 24;
4965 break;
4966 case 8: case 9:
4967 imm |= imm << 16;
4968 break;
4969 case 10: case 11:
4970 imm = (imm << 8) | (imm << 24);
4971 break;
4972 case 12:
8e31209e 4973 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
4974 break;
4975 case 13:
4976 imm = (imm << 16) | 0xffff;
4977 break;
4978 case 14:
4979 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4980 if (invert)
4981 imm = ~imm;
4982 break;
4983 case 15:
4984 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4985 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4986 break;
4987 }
4988 if (invert)
4989 imm = ~imm;
4990
9ee6e8bb
PB
4991 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4992 if (op & 1 && op < 12) {
ad69471c 4993 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
4994 if (invert) {
4995 /* The immediate value has already been inverted, so
4996 BIC becomes AND. */
ad69471c 4997 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 4998 } else {
ad69471c 4999 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 5000 }
9ee6e8bb 5001 } else {
ad69471c 5002 /* VMOV, VMVN. */
7d1b0095 5003 tmp = tcg_temp_new_i32();
9ee6e8bb 5004 if (op == 14 && invert) {
ad69471c
PB
5005 uint32_t val;
5006 val = 0;
9ee6e8bb
PB
5007 for (n = 0; n < 4; n++) {
5008 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 5009 val |= 0xff << (n * 8);
9ee6e8bb 5010 }
ad69471c
PB
5011 tcg_gen_movi_i32(tmp, val);
5012 } else {
5013 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 5014 }
9ee6e8bb 5015 }
ad69471c 5016 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5017 }
5018 }
e4b3861d 5019 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
5020 if (size != 3) {
5021 op = (insn >> 8) & 0xf;
5022 if ((insn & (1 << 6)) == 0) {
5023 /* Three registers of different lengths. */
5024 int src1_wide;
5025 int src2_wide;
5026 int prewiden;
5027 /* prewiden, src1_wide, src2_wide */
5028 static const int neon_3reg_wide[16][3] = {
5029 {1, 0, 0}, /* VADDL */
5030 {1, 1, 0}, /* VADDW */
5031 {1, 0, 0}, /* VSUBL */
5032 {1, 1, 0}, /* VSUBW */
5033 {0, 1, 1}, /* VADDHN */
5034 {0, 0, 0}, /* VABAL */
5035 {0, 1, 1}, /* VSUBHN */
5036 {0, 0, 0}, /* VABDL */
5037 {0, 0, 0}, /* VMLAL */
5038 {0, 0, 0}, /* VQDMLAL */
5039 {0, 0, 0}, /* VMLSL */
5040 {0, 0, 0}, /* VQDMLSL */
5041 {0, 0, 0}, /* Integer VMULL */
5042 {0, 0, 0}, /* VQDMULL */
5043 {0, 0, 0} /* Polynomial VMULL */
5044 };
5045
5046 prewiden = neon_3reg_wide[op][0];
5047 src1_wide = neon_3reg_wide[op][1];
5048 src2_wide = neon_3reg_wide[op][2];
5049
ad69471c
PB
5050 if (size == 0 && (op == 9 || op == 11 || op == 13))
5051 return 1;
5052
9ee6e8bb
PB
5053 /* Avoid overlapping operands. Wide source operands are
5054 always aligned so will never overlap with wide
5055 destinations in problematic ways. */
8f8e3aa4 5056 if (rd == rm && !src2_wide) {
dd8fbd78
FN
5057 tmp = neon_load_reg(rm, 1);
5058 neon_store_scratch(2, tmp);
8f8e3aa4 5059 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
5060 tmp = neon_load_reg(rn, 1);
5061 neon_store_scratch(2, tmp);
9ee6e8bb 5062 }
a50f5b91 5063 TCGV_UNUSED(tmp3);
9ee6e8bb 5064 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5065 if (src1_wide) {
5066 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 5067 TCGV_UNUSED(tmp);
9ee6e8bb 5068 } else {
ad69471c 5069 if (pass == 1 && rd == rn) {
dd8fbd78 5070 tmp = neon_load_scratch(2);
9ee6e8bb 5071 } else {
ad69471c
PB
5072 tmp = neon_load_reg(rn, pass);
5073 }
5074 if (prewiden) {
5075 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5076 }
5077 }
ad69471c
PB
5078 if (src2_wide) {
5079 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 5080 TCGV_UNUSED(tmp2);
9ee6e8bb 5081 } else {
ad69471c 5082 if (pass == 1 && rd == rm) {
dd8fbd78 5083 tmp2 = neon_load_scratch(2);
9ee6e8bb 5084 } else {
ad69471c
PB
5085 tmp2 = neon_load_reg(rm, pass);
5086 }
5087 if (prewiden) {
5088 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5089 }
9ee6e8bb
PB
5090 }
5091 switch (op) {
5092 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5093 gen_neon_addl(size);
9ee6e8bb 5094 break;
79b0e534 5095 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5096 gen_neon_subl(size);
9ee6e8bb
PB
5097 break;
5098 case 5: case 7: /* VABAL, VABDL */
5099 switch ((size << 1) | u) {
ad69471c
PB
5100 case 0:
5101 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5102 break;
5103 case 1:
5104 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5105 break;
5106 case 2:
5107 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5108 break;
5109 case 3:
5110 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5111 break;
5112 case 4:
5113 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5114 break;
5115 case 5:
5116 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5117 break;
9ee6e8bb
PB
5118 default: abort();
5119 }
7d1b0095
PM
5120 tcg_temp_free_i32(tmp2);
5121 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5122 break;
5123 case 8: case 9: case 10: case 11: case 12: case 13:
5124 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5125 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
5126 break;
5127 case 14: /* Polynomial VMULL */
e5ca24cb 5128 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
5129 tcg_temp_free_i32(tmp2);
5130 tcg_temp_free_i32(tmp);
e5ca24cb 5131 break;
9ee6e8bb
PB
5132 default: /* 15 is RESERVED. */
5133 return 1;
5134 }
ebcd88ce
PM
5135 if (op == 13) {
5136 /* VQDMULL */
5137 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5138 neon_store_reg64(cpu_V0, rd + pass);
5139 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 5140 /* Accumulate. */
ebcd88ce 5141 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5142 switch (op) {
4dc064e6
PM
5143 case 10: /* VMLSL */
5144 gen_neon_negl(cpu_V0, size);
5145 /* Fall through */
5146 case 5: case 8: /* VABAL, VMLAL */
ad69471c 5147 gen_neon_addl(size);
9ee6e8bb
PB
5148 break;
5149 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 5150 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5151 if (op == 11) {
5152 gen_neon_negl(cpu_V0, size);
5153 }
ad69471c
PB
5154 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5155 break;
9ee6e8bb
PB
5156 default:
5157 abort();
5158 }
ad69471c 5159 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5160 } else if (op == 4 || op == 6) {
5161 /* Narrowing operation. */
7d1b0095 5162 tmp = tcg_temp_new_i32();
79b0e534 5163 if (!u) {
9ee6e8bb 5164 switch (size) {
ad69471c
PB
5165 case 0:
5166 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5167 break;
5168 case 1:
5169 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5170 break;
5171 case 2:
5172 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5173 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5174 break;
9ee6e8bb
PB
5175 default: abort();
5176 }
5177 } else {
5178 switch (size) {
ad69471c
PB
5179 case 0:
5180 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5181 break;
5182 case 1:
5183 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5184 break;
5185 case 2:
5186 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5187 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5188 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5189 break;
9ee6e8bb
PB
5190 default: abort();
5191 }
5192 }
ad69471c
PB
5193 if (pass == 0) {
5194 tmp3 = tmp;
5195 } else {
5196 neon_store_reg(rd, 0, tmp3);
5197 neon_store_reg(rd, 1, tmp);
5198 }
9ee6e8bb
PB
5199 } else {
5200 /* Write back the result. */
ad69471c 5201 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5202 }
5203 }
5204 } else {
5205 /* Two registers and a scalar. */
5206 switch (op) {
5207 case 0: /* Integer VMLA scalar */
5208 case 1: /* Float VMLA scalar */
5209 case 4: /* Integer VMLS scalar */
5210 case 5: /* Floating point VMLS scalar */
5211 case 8: /* Integer VMUL scalar */
5212 case 9: /* Floating point VMUL scalar */
5213 case 12: /* VQDMULH scalar */
5214 case 13: /* VQRDMULH scalar */
dd8fbd78
FN
5215 tmp = neon_get_scalar(size, rm);
5216 neon_store_scratch(0, tmp);
9ee6e8bb 5217 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5218 tmp = neon_load_scratch(0);
5219 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5220 if (op == 12) {
5221 if (size == 1) {
dd8fbd78 5222 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5223 } else {
dd8fbd78 5224 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5225 }
5226 } else if (op == 13) {
5227 if (size == 1) {
dd8fbd78 5228 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5229 } else {
dd8fbd78 5230 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5231 }
5232 } else if (op & 1) {
dd8fbd78 5233 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5234 } else {
5235 switch (size) {
dd8fbd78
FN
5236 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5237 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5238 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5239 default: return 1;
5240 }
5241 }
7d1b0095 5242 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
5243 if (op < 8) {
5244 /* Accumulate. */
dd8fbd78 5245 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5246 switch (op) {
5247 case 0:
dd8fbd78 5248 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5249 break;
5250 case 1:
dd8fbd78 5251 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5252 break;
5253 case 4:
dd8fbd78 5254 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5255 break;
5256 case 5:
dd8fbd78 5257 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
5258 break;
5259 default:
5260 abort();
5261 }
7d1b0095 5262 tcg_temp_free_i32(tmp2);
9ee6e8bb 5263 }
dd8fbd78 5264 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5265 }
5266 break;
5267 case 2: /* VMLAL sclar */
5268 case 3: /* VQDMLAL scalar */
5269 case 6: /* VMLSL scalar */
5270 case 7: /* VQDMLSL scalar */
5271 case 10: /* VMULL scalar */
5272 case 11: /* VQDMULL scalar */
ad69471c
PB
5273 if (size == 0 && (op == 3 || op == 7 || op == 11))
5274 return 1;
5275
dd8fbd78 5276 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
5277 /* We need a copy of tmp2 because gen_neon_mull
5278 * deletes it during pass 0. */
7d1b0095 5279 tmp4 = tcg_temp_new_i32();
c6067f04 5280 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 5281 tmp3 = neon_load_reg(rn, 1);
ad69471c 5282
9ee6e8bb 5283 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5284 if (pass == 0) {
5285 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5286 } else {
dd8fbd78 5287 tmp = tmp3;
c6067f04 5288 tmp2 = tmp4;
9ee6e8bb 5289 }
ad69471c 5290 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
5291 if (op != 11) {
5292 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5293 }
9ee6e8bb 5294 switch (op) {
4dc064e6
PM
5295 case 6:
5296 gen_neon_negl(cpu_V0, size);
5297 /* Fall through */
5298 case 2:
ad69471c 5299 gen_neon_addl(size);
9ee6e8bb
PB
5300 break;
5301 case 3: case 7:
ad69471c 5302 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5303 if (op == 7) {
5304 gen_neon_negl(cpu_V0, size);
5305 }
ad69471c 5306 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5307 break;
5308 case 10:
5309 /* no-op */
5310 break;
5311 case 11:
ad69471c 5312 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5313 break;
5314 default:
5315 abort();
5316 }
ad69471c 5317 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5318 }
dd8fbd78 5319
dd8fbd78 5320
9ee6e8bb
PB
5321 break;
5322 default: /* 14 and 15 are RESERVED */
5323 return 1;
5324 }
5325 }
5326 } else { /* size == 3 */
5327 if (!u) {
5328 /* Extract. */
9ee6e8bb 5329 imm = (insn >> 8) & 0xf;
ad69471c
PB
5330
5331 if (imm > 7 && !q)
5332 return 1;
5333
5334 if (imm == 0) {
5335 neon_load_reg64(cpu_V0, rn);
5336 if (q) {
5337 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5338 }
ad69471c
PB
5339 } else if (imm == 8) {
5340 neon_load_reg64(cpu_V0, rn + 1);
5341 if (q) {
5342 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5343 }
ad69471c 5344 } else if (q) {
a7812ae4 5345 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5346 if (imm < 8) {
5347 neon_load_reg64(cpu_V0, rn);
a7812ae4 5348 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5349 } else {
5350 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5351 neon_load_reg64(tmp64, rm);
ad69471c
PB
5352 }
5353 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5354 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5355 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5356 if (imm < 8) {
5357 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5358 } else {
ad69471c
PB
5359 neon_load_reg64(cpu_V1, rm + 1);
5360 imm -= 8;
9ee6e8bb 5361 }
ad69471c 5362 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5363 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5364 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 5365 tcg_temp_free_i64(tmp64);
ad69471c 5366 } else {
a7812ae4 5367 /* BUGFIX */
ad69471c 5368 neon_load_reg64(cpu_V0, rn);
a7812ae4 5369 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5370 neon_load_reg64(cpu_V1, rm);
a7812ae4 5371 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5372 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5373 }
5374 neon_store_reg64(cpu_V0, rd);
5375 if (q) {
5376 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5377 }
5378 } else if ((insn & (1 << 11)) == 0) {
5379 /* Two register misc. */
5380 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5381 size = (insn >> 18) & 3;
5382 switch (op) {
5383 case 0: /* VREV64 */
5384 if (size == 3)
5385 return 1;
5386 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5387 tmp = neon_load_reg(rm, pass * 2);
5388 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5389 switch (size) {
dd8fbd78
FN
5390 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5391 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5392 case 2: /* no-op */ break;
5393 default: abort();
5394 }
dd8fbd78 5395 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5396 if (size == 2) {
dd8fbd78 5397 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5398 } else {
9ee6e8bb 5399 switch (size) {
dd8fbd78
FN
5400 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5401 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5402 default: abort();
5403 }
dd8fbd78 5404 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5405 }
5406 }
5407 break;
5408 case 4: case 5: /* VPADDL */
5409 case 12: case 13: /* VPADAL */
9ee6e8bb
PB
5410 if (size == 3)
5411 return 1;
ad69471c
PB
5412 for (pass = 0; pass < q + 1; pass++) {
5413 tmp = neon_load_reg(rm, pass * 2);
5414 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5415 tmp = neon_load_reg(rm, pass * 2 + 1);
5416 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5417 switch (size) {
5418 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5419 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5420 case 2: tcg_gen_add_i64(CPU_V001); break;
5421 default: abort();
5422 }
9ee6e8bb
PB
5423 if (op >= 12) {
5424 /* Accumulate. */
ad69471c
PB
5425 neon_load_reg64(cpu_V1, rd + pass);
5426 gen_neon_addl(size);
9ee6e8bb 5427 }
ad69471c 5428 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5429 }
5430 break;
5431 case 33: /* VTRN */
5432 if (size == 2) {
5433 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
5434 tmp = neon_load_reg(rm, n);
5435 tmp2 = neon_load_reg(rd, n + 1);
5436 neon_store_reg(rm, n, tmp2);
5437 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
5438 }
5439 } else {
5440 goto elementwise;
5441 }
5442 break;
5443 case 34: /* VUZP */
02acedf9 5444 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 5445 return 1;
9ee6e8bb
PB
5446 }
5447 break;
5448 case 35: /* VZIP */
d68a6f3a 5449 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 5450 return 1;
9ee6e8bb
PB
5451 }
5452 break;
5453 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
ad69471c
PB
5454 if (size == 3)
5455 return 1;
a50f5b91 5456 TCGV_UNUSED(tmp2);
9ee6e8bb 5457 for (pass = 0; pass < 2; pass++) {
ad69471c 5458 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 5459 tmp = tcg_temp_new_i32();
c33171c7 5460 gen_neon_narrow_op(op == 36, q, size, tmp, cpu_V0);
ad69471c
PB
5461 if (pass == 0) {
5462 tmp2 = tmp;
5463 } else {
5464 neon_store_reg(rd, 0, tmp2);
5465 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5466 }
9ee6e8bb
PB
5467 }
5468 break;
5469 case 38: /* VSHLL */
ad69471c 5470 if (q || size == 3)
9ee6e8bb 5471 return 1;
ad69471c
PB
5472 tmp = neon_load_reg(rm, 0);
5473 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5474 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5475 if (pass == 1)
5476 tmp = tmp2;
5477 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 5478 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 5479 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5480 }
5481 break;
60011498
PB
5482 case 44: /* VCVT.F16.F32 */
5483 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5484 return 1;
7d1b0095
PM
5485 tmp = tcg_temp_new_i32();
5486 tmp2 = tcg_temp_new_i32();
60011498 5487 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 5488 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 5489 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 5490 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5491 tcg_gen_shli_i32(tmp2, tmp2, 16);
5492 tcg_gen_or_i32(tmp2, tmp2, tmp);
5493 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 5494 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
5495 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5496 neon_store_reg(rd, 0, tmp2);
7d1b0095 5497 tmp2 = tcg_temp_new_i32();
2d981da7 5498 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5499 tcg_gen_shli_i32(tmp2, tmp2, 16);
5500 tcg_gen_or_i32(tmp2, tmp2, tmp);
5501 neon_store_reg(rd, 1, tmp2);
7d1b0095 5502 tcg_temp_free_i32(tmp);
60011498
PB
5503 break;
5504 case 46: /* VCVT.F32.F16 */
5505 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5506 return 1;
7d1b0095 5507 tmp3 = tcg_temp_new_i32();
60011498
PB
5508 tmp = neon_load_reg(rm, 0);
5509 tmp2 = neon_load_reg(rm, 1);
5510 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 5511 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5512 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5513 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 5514 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5515 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 5516 tcg_temp_free_i32(tmp);
60011498 5517 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 5518 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5519 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5520 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 5521 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5522 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
5523 tcg_temp_free_i32(tmp2);
5524 tcg_temp_free_i32(tmp3);
60011498 5525 break;
9ee6e8bb
PB
5526 default:
5527 elementwise:
5528 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5529 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5530 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5531 neon_reg_offset(rm, pass));
dd8fbd78 5532 TCGV_UNUSED(tmp);
9ee6e8bb 5533 } else {
dd8fbd78 5534 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
5535 }
5536 switch (op) {
5537 case 1: /* VREV32 */
5538 switch (size) {
dd8fbd78
FN
5539 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5540 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5541 default: return 1;
5542 }
5543 break;
5544 case 2: /* VREV16 */
5545 if (size != 0)
5546 return 1;
dd8fbd78 5547 gen_rev16(tmp);
9ee6e8bb 5548 break;
9ee6e8bb
PB
5549 case 8: /* CLS */
5550 switch (size) {
dd8fbd78
FN
5551 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5552 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5553 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
9ee6e8bb
PB
5554 default: return 1;
5555 }
5556 break;
5557 case 9: /* CLZ */
5558 switch (size) {
dd8fbd78
FN
5559 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5560 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5561 case 2: gen_helper_clz(tmp, tmp); break;
9ee6e8bb
PB
5562 default: return 1;
5563 }
5564 break;
5565 case 10: /* CNT */
5566 if (size != 0)
5567 return 1;
dd8fbd78 5568 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb
PB
5569 break;
5570 case 11: /* VNOT */
5571 if (size != 0)
5572 return 1;
dd8fbd78 5573 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5574 break;
5575 case 14: /* VQABS */
5576 switch (size) {
dd8fbd78
FN
5577 case 0: gen_helper_neon_qabs_s8(tmp, cpu_env, tmp); break;
5578 case 1: gen_helper_neon_qabs_s16(tmp, cpu_env, tmp); break;
5579 case 2: gen_helper_neon_qabs_s32(tmp, cpu_env, tmp); break;
9ee6e8bb
PB
5580 default: return 1;
5581 }
5582 break;
5583 case 15: /* VQNEG */
5584 switch (size) {
dd8fbd78
FN
5585 case 0: gen_helper_neon_qneg_s8(tmp, cpu_env, tmp); break;
5586 case 1: gen_helper_neon_qneg_s16(tmp, cpu_env, tmp); break;
5587 case 2: gen_helper_neon_qneg_s32(tmp, cpu_env, tmp); break;
9ee6e8bb
PB
5588 default: return 1;
5589 }
5590 break;
5591 case 16: case 19: /* VCGT #0, VCLE #0 */
dd8fbd78 5592 tmp2 = tcg_const_i32(0);
9ee6e8bb 5593 switch(size) {
dd8fbd78
FN
5594 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5595 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5596 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5597 default: return 1;
5598 }
dd8fbd78 5599 tcg_temp_free(tmp2);
9ee6e8bb 5600 if (op == 19)
dd8fbd78 5601 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5602 break;
5603 case 17: case 20: /* VCGE #0, VCLT #0 */
dd8fbd78 5604 tmp2 = tcg_const_i32(0);
9ee6e8bb 5605 switch(size) {
dd8fbd78
FN
5606 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5607 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5608 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5609 default: return 1;
5610 }
dd8fbd78 5611 tcg_temp_free(tmp2);
9ee6e8bb 5612 if (op == 20)
dd8fbd78 5613 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5614 break;
5615 case 18: /* VCEQ #0 */
dd8fbd78 5616 tmp2 = tcg_const_i32(0);
9ee6e8bb 5617 switch(size) {
dd8fbd78
FN
5618 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5619 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5620 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5621 default: return 1;
5622 }
dd8fbd78 5623 tcg_temp_free(tmp2);
9ee6e8bb
PB
5624 break;
5625 case 22: /* VABS */
5626 switch(size) {
dd8fbd78
FN
5627 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
5628 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
5629 case 2: tcg_gen_abs_i32(tmp, tmp); break;
9ee6e8bb
PB
5630 default: return 1;
5631 }
5632 break;
5633 case 23: /* VNEG */
ad69471c
PB
5634 if (size == 3)
5635 return 1;
dd8fbd78
FN
5636 tmp2 = tcg_const_i32(0);
5637 gen_neon_rsb(size, tmp, tmp2);
5638 tcg_temp_free(tmp2);
9ee6e8bb
PB
5639 break;
5640 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
dd8fbd78
FN
5641 tmp2 = tcg_const_i32(0);
5642 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
5643 tcg_temp_free(tmp2);
9ee6e8bb 5644 if (op == 27)
dd8fbd78 5645 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5646 break;
5647 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
dd8fbd78
FN
5648 tmp2 = tcg_const_i32(0);
5649 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
5650 tcg_temp_free(tmp2);
9ee6e8bb 5651 if (op == 28)
dd8fbd78 5652 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5653 break;
5654 case 26: /* Float VCEQ #0 */
dd8fbd78
FN
5655 tmp2 = tcg_const_i32(0);
5656 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
5657 tcg_temp_free(tmp2);
9ee6e8bb
PB
5658 break;
5659 case 30: /* Float VABS */
4373f3ce 5660 gen_vfp_abs(0);
9ee6e8bb
PB
5661 break;
5662 case 31: /* Float VNEG */
4373f3ce 5663 gen_vfp_neg(0);
9ee6e8bb
PB
5664 break;
5665 case 32: /* VSWP */
dd8fbd78
FN
5666 tmp2 = neon_load_reg(rd, pass);
5667 neon_store_reg(rm, pass, tmp2);
9ee6e8bb
PB
5668 break;
5669 case 33: /* VTRN */
dd8fbd78 5670 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5671 switch (size) {
dd8fbd78
FN
5672 case 0: gen_neon_trn_u8(tmp, tmp2); break;
5673 case 1: gen_neon_trn_u16(tmp, tmp2); break;
9ee6e8bb
PB
5674 case 2: abort();
5675 default: return 1;
5676 }
dd8fbd78 5677 neon_store_reg(rm, pass, tmp2);
9ee6e8bb
PB
5678 break;
5679 case 56: /* Integer VRECPE */
dd8fbd78 5680 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb
PB
5681 break;
5682 case 57: /* Integer VRSQRTE */
dd8fbd78 5683 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb
PB
5684 break;
5685 case 58: /* Float VRECPE */
4373f3ce 5686 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5687 break;
5688 case 59: /* Float VRSQRTE */
4373f3ce 5689 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5690 break;
5691 case 60: /* VCVT.F32.S32 */
d3587ef8 5692 gen_vfp_sito(0);
9ee6e8bb
PB
5693 break;
5694 case 61: /* VCVT.F32.U32 */
d3587ef8 5695 gen_vfp_uito(0);
9ee6e8bb
PB
5696 break;
5697 case 62: /* VCVT.S32.F32 */
d3587ef8 5698 gen_vfp_tosiz(0);
9ee6e8bb
PB
5699 break;
5700 case 63: /* VCVT.U32.F32 */
d3587ef8 5701 gen_vfp_touiz(0);
9ee6e8bb
PB
5702 break;
5703 default:
5704 /* Reserved: 21, 29, 39-56 */
5705 return 1;
5706 }
5707 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5708 tcg_gen_st_f32(cpu_F0s, cpu_env,
5709 neon_reg_offset(rd, pass));
9ee6e8bb 5710 } else {
dd8fbd78 5711 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5712 }
5713 }
5714 break;
5715 }
5716 } else if ((insn & (1 << 10)) == 0) {
5717 /* VTBL, VTBX. */
3018f259 5718 n = ((insn >> 5) & 0x18) + 8;
9ee6e8bb 5719 if (insn & (1 << 6)) {
8f8e3aa4 5720 tmp = neon_load_reg(rd, 0);
9ee6e8bb 5721 } else {
7d1b0095 5722 tmp = tcg_temp_new_i32();
8f8e3aa4 5723 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5724 }
8f8e3aa4 5725 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
5726 tmp4 = tcg_const_i32(rn);
5727 tmp5 = tcg_const_i32(n);
5728 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
7d1b0095 5729 tcg_temp_free_i32(tmp);
9ee6e8bb 5730 if (insn & (1 << 6)) {
8f8e3aa4 5731 tmp = neon_load_reg(rd, 1);
9ee6e8bb 5732 } else {
7d1b0095 5733 tmp = tcg_temp_new_i32();
8f8e3aa4 5734 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5735 }
8f8e3aa4 5736 tmp3 = neon_load_reg(rm, 1);
b75263d6 5737 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
5738 tcg_temp_free_i32(tmp5);
5739 tcg_temp_free_i32(tmp4);
8f8e3aa4 5740 neon_store_reg(rd, 0, tmp2);
3018f259 5741 neon_store_reg(rd, 1, tmp3);
7d1b0095 5742 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5743 } else if ((insn & 0x380) == 0) {
5744 /* VDUP */
5745 if (insn & (1 << 19)) {
dd8fbd78 5746 tmp = neon_load_reg(rm, 1);
9ee6e8bb 5747 } else {
dd8fbd78 5748 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
5749 }
5750 if (insn & (1 << 16)) {
dd8fbd78 5751 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
5752 } else if (insn & (1 << 17)) {
5753 if ((insn >> 18) & 1)
dd8fbd78 5754 gen_neon_dup_high16(tmp);
9ee6e8bb 5755 else
dd8fbd78 5756 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
5757 }
5758 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 5759 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
5760 tcg_gen_mov_i32(tmp2, tmp);
5761 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 5762 }
7d1b0095 5763 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5764 } else {
5765 return 1;
5766 }
5767 }
5768 }
5769 return 0;
5770}
5771
fe1479c3
PB
5772static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5773{
5774 int crn = (insn >> 16) & 0xf;
5775 int crm = insn & 0xf;
5776 int op1 = (insn >> 21) & 7;
5777 int op2 = (insn >> 5) & 7;
5778 int rt = (insn >> 12) & 0xf;
5779 TCGv tmp;
5780
ca27c052
PM
5781 /* Minimal set of debug registers, since we don't support debug */
5782 if (op1 == 0 && crn == 0 && op2 == 0) {
5783 switch (crm) {
5784 case 0:
5785 /* DBGDIDR: just RAZ. In particular this means the
5786 * "debug architecture version" bits will read as
5787 * a reserved value, which should cause Linux to
5788 * not try to use the debug hardware.
5789 */
5790 tmp = tcg_const_i32(0);
5791 store_reg(s, rt, tmp);
5792 return 0;
5793 case 1:
5794 case 2:
5795 /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we
5796 * don't implement memory mapped debug components
5797 */
5798 if (ENABLE_ARCH_7) {
5799 tmp = tcg_const_i32(0);
5800 store_reg(s, rt, tmp);
5801 return 0;
5802 }
5803 break;
5804 default:
5805 break;
5806 }
5807 }
5808
fe1479c3
PB
5809 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5810 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5811 /* TEECR */
5812 if (IS_USER(s))
5813 return 1;
5814 tmp = load_cpu_field(teecr);
5815 store_reg(s, rt, tmp);
5816 return 0;
5817 }
5818 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5819 /* TEEHBR */
5820 if (IS_USER(s) && (env->teecr & 1))
5821 return 1;
5822 tmp = load_cpu_field(teehbr);
5823 store_reg(s, rt, tmp);
5824 return 0;
5825 }
5826 }
5827 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5828 op1, crn, crm, op2);
5829 return 1;
5830}
5831
5832static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5833{
5834 int crn = (insn >> 16) & 0xf;
5835 int crm = insn & 0xf;
5836 int op1 = (insn >> 21) & 7;
5837 int op2 = (insn >> 5) & 7;
5838 int rt = (insn >> 12) & 0xf;
5839 TCGv tmp;
5840
5841 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5842 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5843 /* TEECR */
5844 if (IS_USER(s))
5845 return 1;
5846 tmp = load_reg(s, rt);
5847 gen_helper_set_teecr(cpu_env, tmp);
7d1b0095 5848 tcg_temp_free_i32(tmp);
fe1479c3
PB
5849 return 0;
5850 }
5851 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5852 /* TEEHBR */
5853 if (IS_USER(s) && (env->teecr & 1))
5854 return 1;
5855 tmp = load_reg(s, rt);
5856 store_cpu_field(tmp, teehbr);
5857 return 0;
5858 }
5859 }
5860 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5861 op1, crn, crm, op2);
5862 return 1;
5863}
5864
9ee6e8bb
PB
5865static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5866{
5867 int cpnum;
5868
5869 cpnum = (insn >> 8) & 0xf;
5870 if (arm_feature(env, ARM_FEATURE_XSCALE)
5871 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5872 return 1;
5873
5874 switch (cpnum) {
5875 case 0:
5876 case 1:
5877 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5878 return disas_iwmmxt_insn(env, s, insn);
5879 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5880 return disas_dsp_insn(env, s, insn);
5881 }
5882 return 1;
5883 case 10:
5884 case 11:
5885 return disas_vfp_insn (env, s, insn);
fe1479c3
PB
5886 case 14:
5887 /* Coprocessors 7-15 are architecturally reserved by ARM.
5888 Unfortunately Intel decided to ignore this. */
5889 if (arm_feature(env, ARM_FEATURE_XSCALE))
5890 goto board;
5891 if (insn & (1 << 20))
5892 return disas_cp14_read(env, s, insn);
5893 else
5894 return disas_cp14_write(env, s, insn);
9ee6e8bb
PB
5895 case 15:
5896 return disas_cp15_insn (env, s, insn);
5897 default:
fe1479c3 5898 board:
9ee6e8bb
PB
5899 /* Unknown coprocessor. See if the board has hooked it. */
5900 return disas_cp_insn (env, s, insn);
5901 }
5902}
5903
5e3f878a
PB
5904
5905/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 5906static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
5907{
5908 TCGv tmp;
7d1b0095 5909 tmp = tcg_temp_new_i32();
5e3f878a
PB
5910 tcg_gen_trunc_i64_i32(tmp, val);
5911 store_reg(s, rlow, tmp);
7d1b0095 5912 tmp = tcg_temp_new_i32();
5e3f878a
PB
5913 tcg_gen_shri_i64(val, val, 32);
5914 tcg_gen_trunc_i64_i32(tmp, val);
5915 store_reg(s, rhigh, tmp);
5916}
5917
5918/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 5919static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 5920{
a7812ae4 5921 TCGv_i64 tmp;
5e3f878a
PB
5922 TCGv tmp2;
5923
36aa55dc 5924 /* Load value and extend to 64 bits. */
a7812ae4 5925 tmp = tcg_temp_new_i64();
5e3f878a
PB
5926 tmp2 = load_reg(s, rlow);
5927 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 5928 tcg_temp_free_i32(tmp2);
5e3f878a 5929 tcg_gen_add_i64(val, val, tmp);
b75263d6 5930 tcg_temp_free_i64(tmp);
5e3f878a
PB
5931}
5932
5933/* load and add a 64-bit value from a register pair. */
a7812ae4 5934static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 5935{
a7812ae4 5936 TCGv_i64 tmp;
36aa55dc
PB
5937 TCGv tmpl;
5938 TCGv tmph;
5e3f878a
PB
5939
5940 /* Load 64-bit value rd:rn. */
36aa55dc
PB
5941 tmpl = load_reg(s, rlow);
5942 tmph = load_reg(s, rhigh);
a7812ae4 5943 tmp = tcg_temp_new_i64();
36aa55dc 5944 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
5945 tcg_temp_free_i32(tmpl);
5946 tcg_temp_free_i32(tmph);
5e3f878a 5947 tcg_gen_add_i64(val, val, tmp);
b75263d6 5948 tcg_temp_free_i64(tmp);
5e3f878a
PB
5949}
5950
5951/* Set N and Z flags from a 64-bit value. */
a7812ae4 5952static void gen_logicq_cc(TCGv_i64 val)
5e3f878a 5953{
7d1b0095 5954 TCGv tmp = tcg_temp_new_i32();
5e3f878a 5955 gen_helper_logicq_cc(tmp, val);
6fbe23d5 5956 gen_logic_CC(tmp);
7d1b0095 5957 tcg_temp_free_i32(tmp);
5e3f878a
PB
5958}
5959
426f5abc
PB
5960/* Load/Store exclusive instructions are implemented by remembering
5961 the value/address loaded, and seeing if these are the same
5962 when the store is performed. This should be is sufficient to implement
5963 the architecturally mandated semantics, and avoids having to monitor
5964 regular stores.
5965
5966 In system emulation mode only one CPU will be running at once, so
5967 this sequence is effectively atomic. In user emulation mode we
5968 throw an exception and handle the atomic operation elsewhere. */
5969static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
5970 TCGv addr, int size)
5971{
5972 TCGv tmp;
5973
5974 switch (size) {
5975 case 0:
5976 tmp = gen_ld8u(addr, IS_USER(s));
5977 break;
5978 case 1:
5979 tmp = gen_ld16u(addr, IS_USER(s));
5980 break;
5981 case 2:
5982 case 3:
5983 tmp = gen_ld32(addr, IS_USER(s));
5984 break;
5985 default:
5986 abort();
5987 }
5988 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
5989 store_reg(s, rt, tmp);
5990 if (size == 3) {
7d1b0095 5991 TCGv tmp2 = tcg_temp_new_i32();
2c9adbda
PM
5992 tcg_gen_addi_i32(tmp2, addr, 4);
5993 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 5994 tcg_temp_free_i32(tmp2);
426f5abc
PB
5995 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
5996 store_reg(s, rt2, tmp);
5997 }
5998 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
5999}
6000
6001static void gen_clrex(DisasContext *s)
6002{
6003 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6004}
6005
6006#ifdef CONFIG_USER_ONLY
6007static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6008 TCGv addr, int size)
6009{
6010 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6011 tcg_gen_movi_i32(cpu_exclusive_info,
6012 size | (rd << 4) | (rt << 8) | (rt2 << 12));
bc4a0de0 6013 gen_exception_insn(s, 4, EXCP_STREX);
426f5abc
PB
6014}
6015#else
6016static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6017 TCGv addr, int size)
6018{
6019 TCGv tmp;
6020 int done_label;
6021 int fail_label;
6022
6023 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6024 [addr] = {Rt};
6025 {Rd} = 0;
6026 } else {
6027 {Rd} = 1;
6028 } */
6029 fail_label = gen_new_label();
6030 done_label = gen_new_label();
6031 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6032 switch (size) {
6033 case 0:
6034 tmp = gen_ld8u(addr, IS_USER(s));
6035 break;
6036 case 1:
6037 tmp = gen_ld16u(addr, IS_USER(s));
6038 break;
6039 case 2:
6040 case 3:
6041 tmp = gen_ld32(addr, IS_USER(s));
6042 break;
6043 default:
6044 abort();
6045 }
6046 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
7d1b0095 6047 tcg_temp_free_i32(tmp);
426f5abc 6048 if (size == 3) {
7d1b0095 6049 TCGv tmp2 = tcg_temp_new_i32();
426f5abc 6050 tcg_gen_addi_i32(tmp2, addr, 4);
2c9adbda 6051 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 6052 tcg_temp_free_i32(tmp2);
426f5abc 6053 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
7d1b0095 6054 tcg_temp_free_i32(tmp);
426f5abc
PB
6055 }
6056 tmp = load_reg(s, rt);
6057 switch (size) {
6058 case 0:
6059 gen_st8(tmp, addr, IS_USER(s));
6060 break;
6061 case 1:
6062 gen_st16(tmp, addr, IS_USER(s));
6063 break;
6064 case 2:
6065 case 3:
6066 gen_st32(tmp, addr, IS_USER(s));
6067 break;
6068 default:
6069 abort();
6070 }
6071 if (size == 3) {
6072 tcg_gen_addi_i32(addr, addr, 4);
6073 tmp = load_reg(s, rt2);
6074 gen_st32(tmp, addr, IS_USER(s));
6075 }
6076 tcg_gen_movi_i32(cpu_R[rd], 0);
6077 tcg_gen_br(done_label);
6078 gen_set_label(fail_label);
6079 tcg_gen_movi_i32(cpu_R[rd], 1);
6080 gen_set_label(done_label);
6081 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6082}
6083#endif
6084
9ee6e8bb
PB
6085static void disas_arm_insn(CPUState * env, DisasContext *s)
6086{
6087 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 6088 TCGv tmp;
3670669c 6089 TCGv tmp2;
6ddbc6e4 6090 TCGv tmp3;
b0109805 6091 TCGv addr;
a7812ae4 6092 TCGv_i64 tmp64;
9ee6e8bb
PB
6093
6094 insn = ldl_code(s->pc);
6095 s->pc += 4;
6096
6097 /* M variants do not implement ARM mode. */
6098 if (IS_M(env))
6099 goto illegal_op;
6100 cond = insn >> 28;
6101 if (cond == 0xf){
6102 /* Unconditional instructions. */
6103 if (((insn >> 25) & 7) == 1) {
6104 /* NEON Data processing. */
6105 if (!arm_feature(env, ARM_FEATURE_NEON))
6106 goto illegal_op;
6107
6108 if (disas_neon_data_insn(env, s, insn))
6109 goto illegal_op;
6110 return;
6111 }
6112 if ((insn & 0x0f100000) == 0x04000000) {
6113 /* NEON load/store. */
6114 if (!arm_feature(env, ARM_FEATURE_NEON))
6115 goto illegal_op;
6116
6117 if (disas_neon_ls_insn(env, s, insn))
6118 goto illegal_op;
6119 return;
6120 }
3d185e5d
PM
6121 if (((insn & 0x0f30f000) == 0x0510f000) ||
6122 ((insn & 0x0f30f010) == 0x0710f000)) {
6123 if ((insn & (1 << 22)) == 0) {
6124 /* PLDW; v7MP */
6125 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6126 goto illegal_op;
6127 }
6128 }
6129 /* Otherwise PLD; v5TE+ */
6130 return;
6131 }
6132 if (((insn & 0x0f70f000) == 0x0450f000) ||
6133 ((insn & 0x0f70f010) == 0x0650f000)) {
6134 ARCH(7);
6135 return; /* PLI; V7 */
6136 }
6137 if (((insn & 0x0f700000) == 0x04100000) ||
6138 ((insn & 0x0f700010) == 0x06100000)) {
6139 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6140 goto illegal_op;
6141 }
6142 return; /* v7MP: Unallocated memory hint: must NOP */
6143 }
6144
6145 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
6146 ARCH(6);
6147 /* setend */
6148 if (insn & (1 << 9)) {
6149 /* BE8 mode not implemented. */
6150 goto illegal_op;
6151 }
6152 return;
6153 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6154 switch ((insn >> 4) & 0xf) {
6155 case 1: /* clrex */
6156 ARCH(6K);
426f5abc 6157 gen_clrex(s);
9ee6e8bb
PB
6158 return;
6159 case 4: /* dsb */
6160 case 5: /* dmb */
6161 case 6: /* isb */
6162 ARCH(7);
6163 /* We don't emulate caches so these are a no-op. */
6164 return;
6165 default:
6166 goto illegal_op;
6167 }
6168 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6169 /* srs */
c67b6b71 6170 int32_t offset;
9ee6e8bb
PB
6171 if (IS_USER(s))
6172 goto illegal_op;
6173 ARCH(6);
6174 op1 = (insn & 0x1f);
7d1b0095 6175 addr = tcg_temp_new_i32();
39ea3d4e
PM
6176 tmp = tcg_const_i32(op1);
6177 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6178 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6179 i = (insn >> 23) & 3;
6180 switch (i) {
6181 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6182 case 1: offset = 0; break; /* IA */
6183 case 2: offset = -8; break; /* DB */
9ee6e8bb
PB
6184 case 3: offset = 4; break; /* IB */
6185 default: abort();
6186 }
6187 if (offset)
b0109805
PB
6188 tcg_gen_addi_i32(addr, addr, offset);
6189 tmp = load_reg(s, 14);
6190 gen_st32(tmp, addr, 0);
c67b6b71 6191 tmp = load_cpu_field(spsr);
b0109805
PB
6192 tcg_gen_addi_i32(addr, addr, 4);
6193 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
6194 if (insn & (1 << 21)) {
6195 /* Base writeback. */
6196 switch (i) {
6197 case 0: offset = -8; break;
c67b6b71
FN
6198 case 1: offset = 4; break;
6199 case 2: offset = -4; break;
9ee6e8bb
PB
6200 case 3: offset = 0; break;
6201 default: abort();
6202 }
6203 if (offset)
c67b6b71 6204 tcg_gen_addi_i32(addr, addr, offset);
39ea3d4e
PM
6205 tmp = tcg_const_i32(op1);
6206 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6207 tcg_temp_free_i32(tmp);
7d1b0095 6208 tcg_temp_free_i32(addr);
b0109805 6209 } else {
7d1b0095 6210 tcg_temp_free_i32(addr);
9ee6e8bb 6211 }
a990f58f 6212 return;
ea825eee 6213 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 6214 /* rfe */
c67b6b71 6215 int32_t offset;
9ee6e8bb
PB
6216 if (IS_USER(s))
6217 goto illegal_op;
6218 ARCH(6);
6219 rn = (insn >> 16) & 0xf;
b0109805 6220 addr = load_reg(s, rn);
9ee6e8bb
PB
6221 i = (insn >> 23) & 3;
6222 switch (i) {
b0109805 6223 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6224 case 1: offset = 0; break; /* IA */
6225 case 2: offset = -8; break; /* DB */
b0109805 6226 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
6227 default: abort();
6228 }
6229 if (offset)
b0109805
PB
6230 tcg_gen_addi_i32(addr, addr, offset);
6231 /* Load PC into tmp and CPSR into tmp2. */
6232 tmp = gen_ld32(addr, 0);
6233 tcg_gen_addi_i32(addr, addr, 4);
6234 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
6235 if (insn & (1 << 21)) {
6236 /* Base writeback. */
6237 switch (i) {
b0109805 6238 case 0: offset = -8; break;
c67b6b71
FN
6239 case 1: offset = 4; break;
6240 case 2: offset = -4; break;
b0109805 6241 case 3: offset = 0; break;
9ee6e8bb
PB
6242 default: abort();
6243 }
6244 if (offset)
b0109805
PB
6245 tcg_gen_addi_i32(addr, addr, offset);
6246 store_reg(s, rn, addr);
6247 } else {
7d1b0095 6248 tcg_temp_free_i32(addr);
9ee6e8bb 6249 }
b0109805 6250 gen_rfe(s, tmp, tmp2);
c67b6b71 6251 return;
9ee6e8bb
PB
6252 } else if ((insn & 0x0e000000) == 0x0a000000) {
6253 /* branch link and change to thumb (blx <offset>) */
6254 int32_t offset;
6255
6256 val = (uint32_t)s->pc;
7d1b0095 6257 tmp = tcg_temp_new_i32();
d9ba4830
PB
6258 tcg_gen_movi_i32(tmp, val);
6259 store_reg(s, 14, tmp);
9ee6e8bb
PB
6260 /* Sign-extend the 24-bit offset */
6261 offset = (((int32_t)insn) << 8) >> 8;
6262 /* offset * 4 + bit24 * 2 + (thumb bit) */
6263 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6264 /* pipeline offset */
6265 val += 4;
d9ba4830 6266 gen_bx_im(s, val);
9ee6e8bb
PB
6267 return;
6268 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6269 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6270 /* iWMMXt register transfer. */
6271 if (env->cp15.c15_cpar & (1 << 1))
6272 if (!disas_iwmmxt_insn(env, s, insn))
6273 return;
6274 }
6275 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6276 /* Coprocessor double register transfer. */
6277 } else if ((insn & 0x0f000010) == 0x0e000010) {
6278 /* Additional coprocessor register transfer. */
7997d92f 6279 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
6280 uint32_t mask;
6281 uint32_t val;
6282 /* cps (privileged) */
6283 if (IS_USER(s))
6284 return;
6285 mask = val = 0;
6286 if (insn & (1 << 19)) {
6287 if (insn & (1 << 8))
6288 mask |= CPSR_A;
6289 if (insn & (1 << 7))
6290 mask |= CPSR_I;
6291 if (insn & (1 << 6))
6292 mask |= CPSR_F;
6293 if (insn & (1 << 18))
6294 val |= mask;
6295 }
7997d92f 6296 if (insn & (1 << 17)) {
9ee6e8bb
PB
6297 mask |= CPSR_M;
6298 val |= (insn & 0x1f);
6299 }
6300 if (mask) {
2fbac54b 6301 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
6302 }
6303 return;
6304 }
6305 goto illegal_op;
6306 }
6307 if (cond != 0xe) {
6308 /* if not always execute, we generate a conditional jump to
6309 next instruction */
6310 s->condlabel = gen_new_label();
d9ba4830 6311 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
6312 s->condjmp = 1;
6313 }
6314 if ((insn & 0x0f900000) == 0x03000000) {
6315 if ((insn & (1 << 21)) == 0) {
6316 ARCH(6T2);
6317 rd = (insn >> 12) & 0xf;
6318 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6319 if ((insn & (1 << 22)) == 0) {
6320 /* MOVW */
7d1b0095 6321 tmp = tcg_temp_new_i32();
5e3f878a 6322 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
6323 } else {
6324 /* MOVT */
5e3f878a 6325 tmp = load_reg(s, rd);
86831435 6326 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 6327 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 6328 }
5e3f878a 6329 store_reg(s, rd, tmp);
9ee6e8bb
PB
6330 } else {
6331 if (((insn >> 12) & 0xf) != 0xf)
6332 goto illegal_op;
6333 if (((insn >> 16) & 0xf) == 0) {
6334 gen_nop_hint(s, insn & 0xff);
6335 } else {
6336 /* CPSR = immediate */
6337 val = insn & 0xff;
6338 shift = ((insn >> 8) & 0xf) * 2;
6339 if (shift)
6340 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 6341 i = ((insn & (1 << 22)) != 0);
2fbac54b 6342 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
6343 goto illegal_op;
6344 }
6345 }
6346 } else if ((insn & 0x0f900000) == 0x01000000
6347 && (insn & 0x00000090) != 0x00000090) {
6348 /* miscellaneous instructions */
6349 op1 = (insn >> 21) & 3;
6350 sh = (insn >> 4) & 0xf;
6351 rm = insn & 0xf;
6352 switch (sh) {
6353 case 0x0: /* move program status register */
6354 if (op1 & 1) {
6355 /* PSR = reg */
2fbac54b 6356 tmp = load_reg(s, rm);
9ee6e8bb 6357 i = ((op1 & 2) != 0);
2fbac54b 6358 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
6359 goto illegal_op;
6360 } else {
6361 /* reg = PSR */
6362 rd = (insn >> 12) & 0xf;
6363 if (op1 & 2) {
6364 if (IS_USER(s))
6365 goto illegal_op;
d9ba4830 6366 tmp = load_cpu_field(spsr);
9ee6e8bb 6367 } else {
7d1b0095 6368 tmp = tcg_temp_new_i32();
d9ba4830 6369 gen_helper_cpsr_read(tmp);
9ee6e8bb 6370 }
d9ba4830 6371 store_reg(s, rd, tmp);
9ee6e8bb
PB
6372 }
6373 break;
6374 case 0x1:
6375 if (op1 == 1) {
6376 /* branch/exchange thumb (bx). */
d9ba4830
PB
6377 tmp = load_reg(s, rm);
6378 gen_bx(s, tmp);
9ee6e8bb
PB
6379 } else if (op1 == 3) {
6380 /* clz */
6381 rd = (insn >> 12) & 0xf;
1497c961
PB
6382 tmp = load_reg(s, rm);
6383 gen_helper_clz(tmp, tmp);
6384 store_reg(s, rd, tmp);
9ee6e8bb
PB
6385 } else {
6386 goto illegal_op;
6387 }
6388 break;
6389 case 0x2:
6390 if (op1 == 1) {
6391 ARCH(5J); /* bxj */
6392 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6393 tmp = load_reg(s, rm);
6394 gen_bx(s, tmp);
9ee6e8bb
PB
6395 } else {
6396 goto illegal_op;
6397 }
6398 break;
6399 case 0x3:
6400 if (op1 != 1)
6401 goto illegal_op;
6402
6403 /* branch link/exchange thumb (blx) */
d9ba4830 6404 tmp = load_reg(s, rm);
7d1b0095 6405 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
6406 tcg_gen_movi_i32(tmp2, s->pc);
6407 store_reg(s, 14, tmp2);
6408 gen_bx(s, tmp);
9ee6e8bb
PB
6409 break;
6410 case 0x5: /* saturating add/subtract */
6411 rd = (insn >> 12) & 0xf;
6412 rn = (insn >> 16) & 0xf;
b40d0353 6413 tmp = load_reg(s, rm);
5e3f878a 6414 tmp2 = load_reg(s, rn);
9ee6e8bb 6415 if (op1 & 2)
5e3f878a 6416 gen_helper_double_saturate(tmp2, tmp2);
9ee6e8bb 6417 if (op1 & 1)
5e3f878a 6418 gen_helper_sub_saturate(tmp, tmp, tmp2);
9ee6e8bb 6419 else
5e3f878a 6420 gen_helper_add_saturate(tmp, tmp, tmp2);
7d1b0095 6421 tcg_temp_free_i32(tmp2);
5e3f878a 6422 store_reg(s, rd, tmp);
9ee6e8bb 6423 break;
49e14940
AL
6424 case 7:
6425 /* SMC instruction (op1 == 3)
6426 and undefined instructions (op1 == 0 || op1 == 2)
6427 will trap */
6428 if (op1 != 1) {
6429 goto illegal_op;
6430 }
6431 /* bkpt */
bc4a0de0 6432 gen_exception_insn(s, 4, EXCP_BKPT);
9ee6e8bb
PB
6433 break;
6434 case 0x8: /* signed multiply */
6435 case 0xa:
6436 case 0xc:
6437 case 0xe:
6438 rs = (insn >> 8) & 0xf;
6439 rn = (insn >> 12) & 0xf;
6440 rd = (insn >> 16) & 0xf;
6441 if (op1 == 1) {
6442 /* (32 * 16) >> 16 */
5e3f878a
PB
6443 tmp = load_reg(s, rm);
6444 tmp2 = load_reg(s, rs);
9ee6e8bb 6445 if (sh & 4)
5e3f878a 6446 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 6447 else
5e3f878a 6448 gen_sxth(tmp2);
a7812ae4
PB
6449 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6450 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 6451 tmp = tcg_temp_new_i32();
a7812ae4 6452 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 6453 tcg_temp_free_i64(tmp64);
9ee6e8bb 6454 if ((sh & 2) == 0) {
5e3f878a
PB
6455 tmp2 = load_reg(s, rn);
6456 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 6457 tcg_temp_free_i32(tmp2);
9ee6e8bb 6458 }
5e3f878a 6459 store_reg(s, rd, tmp);
9ee6e8bb
PB
6460 } else {
6461 /* 16 * 16 */
5e3f878a
PB
6462 tmp = load_reg(s, rm);
6463 tmp2 = load_reg(s, rs);
6464 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 6465 tcg_temp_free_i32(tmp2);
9ee6e8bb 6466 if (op1 == 2) {
a7812ae4
PB
6467 tmp64 = tcg_temp_new_i64();
6468 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 6469 tcg_temp_free_i32(tmp);
a7812ae4
PB
6470 gen_addq(s, tmp64, rn, rd);
6471 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 6472 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
6473 } else {
6474 if (op1 == 0) {
5e3f878a
PB
6475 tmp2 = load_reg(s, rn);
6476 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 6477 tcg_temp_free_i32(tmp2);
9ee6e8bb 6478 }
5e3f878a 6479 store_reg(s, rd, tmp);
9ee6e8bb
PB
6480 }
6481 }
6482 break;
6483 default:
6484 goto illegal_op;
6485 }
6486 } else if (((insn & 0x0e000000) == 0 &&
6487 (insn & 0x00000090) != 0x90) ||
6488 ((insn & 0x0e000000) == (1 << 25))) {
6489 int set_cc, logic_cc, shiftop;
6490
6491 op1 = (insn >> 21) & 0xf;
6492 set_cc = (insn >> 20) & 1;
6493 logic_cc = table_logic_cc[op1] & set_cc;
6494
6495 /* data processing instruction */
6496 if (insn & (1 << 25)) {
6497 /* immediate operand */
6498 val = insn & 0xff;
6499 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 6500 if (shift) {
9ee6e8bb 6501 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 6502 }
7d1b0095 6503 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
6504 tcg_gen_movi_i32(tmp2, val);
6505 if (logic_cc && shift) {
6506 gen_set_CF_bit31(tmp2);
6507 }
9ee6e8bb
PB
6508 } else {
6509 /* register */
6510 rm = (insn) & 0xf;
e9bb4aa9 6511 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6512 shiftop = (insn >> 5) & 3;
6513 if (!(insn & (1 << 4))) {
6514 shift = (insn >> 7) & 0x1f;
e9bb4aa9 6515 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
6516 } else {
6517 rs = (insn >> 8) & 0xf;
8984bd2e 6518 tmp = load_reg(s, rs);
e9bb4aa9 6519 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
6520 }
6521 }
6522 if (op1 != 0x0f && op1 != 0x0d) {
6523 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
6524 tmp = load_reg(s, rn);
6525 } else {
6526 TCGV_UNUSED(tmp);
9ee6e8bb
PB
6527 }
6528 rd = (insn >> 12) & 0xf;
6529 switch(op1) {
6530 case 0x00:
e9bb4aa9
JR
6531 tcg_gen_and_i32(tmp, tmp, tmp2);
6532 if (logic_cc) {
6533 gen_logic_CC(tmp);
6534 }
21aeb343 6535 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6536 break;
6537 case 0x01:
e9bb4aa9
JR
6538 tcg_gen_xor_i32(tmp, tmp, tmp2);
6539 if (logic_cc) {
6540 gen_logic_CC(tmp);
6541 }
21aeb343 6542 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6543 break;
6544 case 0x02:
6545 if (set_cc && rd == 15) {
6546 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 6547 if (IS_USER(s)) {
9ee6e8bb 6548 goto illegal_op;
e9bb4aa9
JR
6549 }
6550 gen_helper_sub_cc(tmp, tmp, tmp2);
6551 gen_exception_return(s, tmp);
9ee6e8bb 6552 } else {
e9bb4aa9
JR
6553 if (set_cc) {
6554 gen_helper_sub_cc(tmp, tmp, tmp2);
6555 } else {
6556 tcg_gen_sub_i32(tmp, tmp, tmp2);
6557 }
21aeb343 6558 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6559 }
6560 break;
6561 case 0x03:
e9bb4aa9
JR
6562 if (set_cc) {
6563 gen_helper_sub_cc(tmp, tmp2, tmp);
6564 } else {
6565 tcg_gen_sub_i32(tmp, tmp2, tmp);
6566 }
21aeb343 6567 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6568 break;
6569 case 0x04:
e9bb4aa9
JR
6570 if (set_cc) {
6571 gen_helper_add_cc(tmp, tmp, tmp2);
6572 } else {
6573 tcg_gen_add_i32(tmp, tmp, tmp2);
6574 }
21aeb343 6575 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6576 break;
6577 case 0x05:
e9bb4aa9
JR
6578 if (set_cc) {
6579 gen_helper_adc_cc(tmp, tmp, tmp2);
6580 } else {
6581 gen_add_carry(tmp, tmp, tmp2);
6582 }
21aeb343 6583 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6584 break;
6585 case 0x06:
e9bb4aa9
JR
6586 if (set_cc) {
6587 gen_helper_sbc_cc(tmp, tmp, tmp2);
6588 } else {
6589 gen_sub_carry(tmp, tmp, tmp2);
6590 }
21aeb343 6591 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6592 break;
6593 case 0x07:
e9bb4aa9
JR
6594 if (set_cc) {
6595 gen_helper_sbc_cc(tmp, tmp2, tmp);
6596 } else {
6597 gen_sub_carry(tmp, tmp2, tmp);
6598 }
21aeb343 6599 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6600 break;
6601 case 0x08:
6602 if (set_cc) {
e9bb4aa9
JR
6603 tcg_gen_and_i32(tmp, tmp, tmp2);
6604 gen_logic_CC(tmp);
9ee6e8bb 6605 }
7d1b0095 6606 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6607 break;
6608 case 0x09:
6609 if (set_cc) {
e9bb4aa9
JR
6610 tcg_gen_xor_i32(tmp, tmp, tmp2);
6611 gen_logic_CC(tmp);
9ee6e8bb 6612 }
7d1b0095 6613 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6614 break;
6615 case 0x0a:
6616 if (set_cc) {
e9bb4aa9 6617 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb 6618 }
7d1b0095 6619 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6620 break;
6621 case 0x0b:
6622 if (set_cc) {
e9bb4aa9 6623 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 6624 }
7d1b0095 6625 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6626 break;
6627 case 0x0c:
e9bb4aa9
JR
6628 tcg_gen_or_i32(tmp, tmp, tmp2);
6629 if (logic_cc) {
6630 gen_logic_CC(tmp);
6631 }
21aeb343 6632 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6633 break;
6634 case 0x0d:
6635 if (logic_cc && rd == 15) {
6636 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 6637 if (IS_USER(s)) {
9ee6e8bb 6638 goto illegal_op;
e9bb4aa9
JR
6639 }
6640 gen_exception_return(s, tmp2);
9ee6e8bb 6641 } else {
e9bb4aa9
JR
6642 if (logic_cc) {
6643 gen_logic_CC(tmp2);
6644 }
21aeb343 6645 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6646 }
6647 break;
6648 case 0x0e:
f669df27 6649 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
6650 if (logic_cc) {
6651 gen_logic_CC(tmp);
6652 }
21aeb343 6653 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6654 break;
6655 default:
6656 case 0x0f:
e9bb4aa9
JR
6657 tcg_gen_not_i32(tmp2, tmp2);
6658 if (logic_cc) {
6659 gen_logic_CC(tmp2);
6660 }
21aeb343 6661 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6662 break;
6663 }
e9bb4aa9 6664 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 6665 tcg_temp_free_i32(tmp2);
e9bb4aa9 6666 }
9ee6e8bb
PB
6667 } else {
6668 /* other instructions */
6669 op1 = (insn >> 24) & 0xf;
6670 switch(op1) {
6671 case 0x0:
6672 case 0x1:
6673 /* multiplies, extra load/stores */
6674 sh = (insn >> 5) & 3;
6675 if (sh == 0) {
6676 if (op1 == 0x0) {
6677 rd = (insn >> 16) & 0xf;
6678 rn = (insn >> 12) & 0xf;
6679 rs = (insn >> 8) & 0xf;
6680 rm = (insn) & 0xf;
6681 op1 = (insn >> 20) & 0xf;
6682 switch (op1) {
6683 case 0: case 1: case 2: case 3: case 6:
6684 /* 32 bit mul */
5e3f878a
PB
6685 tmp = load_reg(s, rs);
6686 tmp2 = load_reg(s, rm);
6687 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 6688 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6689 if (insn & (1 << 22)) {
6690 /* Subtract (mls) */
6691 ARCH(6T2);
5e3f878a
PB
6692 tmp2 = load_reg(s, rn);
6693 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 6694 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6695 } else if (insn & (1 << 21)) {
6696 /* Add */
5e3f878a
PB
6697 tmp2 = load_reg(s, rn);
6698 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 6699 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6700 }
6701 if (insn & (1 << 20))
5e3f878a
PB
6702 gen_logic_CC(tmp);
6703 store_reg(s, rd, tmp);
9ee6e8bb 6704 break;
8aac08b1
AJ
6705 case 4:
6706 /* 64 bit mul double accumulate (UMAAL) */
6707 ARCH(6);
6708 tmp = load_reg(s, rs);
6709 tmp2 = load_reg(s, rm);
6710 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6711 gen_addq_lo(s, tmp64, rn);
6712 gen_addq_lo(s, tmp64, rd);
6713 gen_storeq_reg(s, rn, rd, tmp64);
6714 tcg_temp_free_i64(tmp64);
6715 break;
6716 case 8: case 9: case 10: case 11:
6717 case 12: case 13: case 14: case 15:
6718 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
6719 tmp = load_reg(s, rs);
6720 tmp2 = load_reg(s, rm);
8aac08b1 6721 if (insn & (1 << 22)) {
a7812ae4 6722 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8aac08b1 6723 } else {
a7812ae4 6724 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8aac08b1
AJ
6725 }
6726 if (insn & (1 << 21)) { /* mult accumulate */
a7812ae4 6727 gen_addq(s, tmp64, rn, rd);
9ee6e8bb 6728 }
8aac08b1 6729 if (insn & (1 << 20)) {
a7812ae4 6730 gen_logicq_cc(tmp64);
8aac08b1 6731 }
a7812ae4 6732 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 6733 tcg_temp_free_i64(tmp64);
9ee6e8bb 6734 break;
8aac08b1
AJ
6735 default:
6736 goto illegal_op;
9ee6e8bb
PB
6737 }
6738 } else {
6739 rn = (insn >> 16) & 0xf;
6740 rd = (insn >> 12) & 0xf;
6741 if (insn & (1 << 23)) {
6742 /* load/store exclusive */
86753403
PB
6743 op1 = (insn >> 21) & 0x3;
6744 if (op1)
a47f43d2 6745 ARCH(6K);
86753403
PB
6746 else
6747 ARCH(6);
3174f8e9 6748 addr = tcg_temp_local_new_i32();
98a46317 6749 load_reg_var(s, addr, rn);
9ee6e8bb 6750 if (insn & (1 << 20)) {
86753403
PB
6751 switch (op1) {
6752 case 0: /* ldrex */
426f5abc 6753 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
6754 break;
6755 case 1: /* ldrexd */
426f5abc 6756 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
6757 break;
6758 case 2: /* ldrexb */
426f5abc 6759 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
6760 break;
6761 case 3: /* ldrexh */
426f5abc 6762 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
6763 break;
6764 default:
6765 abort();
6766 }
9ee6e8bb
PB
6767 } else {
6768 rm = insn & 0xf;
86753403
PB
6769 switch (op1) {
6770 case 0: /* strex */
426f5abc 6771 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
6772 break;
6773 case 1: /* strexd */
502e64fe 6774 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
6775 break;
6776 case 2: /* strexb */
426f5abc 6777 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
6778 break;
6779 case 3: /* strexh */
426f5abc 6780 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
6781 break;
6782 default:
6783 abort();
6784 }
9ee6e8bb 6785 }
3174f8e9 6786 tcg_temp_free(addr);
9ee6e8bb
PB
6787 } else {
6788 /* SWP instruction */
6789 rm = (insn) & 0xf;
6790
8984bd2e
PB
6791 /* ??? This is not really atomic. However we know
6792 we never have multiple CPUs running in parallel,
6793 so it is good enough. */
6794 addr = load_reg(s, rn);
6795 tmp = load_reg(s, rm);
9ee6e8bb 6796 if (insn & (1 << 22)) {
8984bd2e
PB
6797 tmp2 = gen_ld8u(addr, IS_USER(s));
6798 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 6799 } else {
8984bd2e
PB
6800 tmp2 = gen_ld32(addr, IS_USER(s));
6801 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 6802 }
7d1b0095 6803 tcg_temp_free_i32(addr);
8984bd2e 6804 store_reg(s, rd, tmp2);
9ee6e8bb
PB
6805 }
6806 }
6807 } else {
6808 int address_offset;
6809 int load;
6810 /* Misc load/store */
6811 rn = (insn >> 16) & 0xf;
6812 rd = (insn >> 12) & 0xf;
b0109805 6813 addr = load_reg(s, rn);
9ee6e8bb 6814 if (insn & (1 << 24))
b0109805 6815 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
6816 address_offset = 0;
6817 if (insn & (1 << 20)) {
6818 /* load */
6819 switch(sh) {
6820 case 1:
b0109805 6821 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
6822 break;
6823 case 2:
b0109805 6824 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
6825 break;
6826 default:
6827 case 3:
b0109805 6828 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
6829 break;
6830 }
6831 load = 1;
6832 } else if (sh & 2) {
6833 /* doubleword */
6834 if (sh & 1) {
6835 /* store */
b0109805
PB
6836 tmp = load_reg(s, rd);
6837 gen_st32(tmp, addr, IS_USER(s));
6838 tcg_gen_addi_i32(addr, addr, 4);
6839 tmp = load_reg(s, rd + 1);
6840 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6841 load = 0;
6842 } else {
6843 /* load */
b0109805
PB
6844 tmp = gen_ld32(addr, IS_USER(s));
6845 store_reg(s, rd, tmp);
6846 tcg_gen_addi_i32(addr, addr, 4);
6847 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
6848 rd++;
6849 load = 1;
6850 }
6851 address_offset = -4;
6852 } else {
6853 /* store */
b0109805
PB
6854 tmp = load_reg(s, rd);
6855 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6856 load = 0;
6857 }
6858 /* Perform base writeback before the loaded value to
6859 ensure correct behavior with overlapping index registers.
6860 ldrd with base writeback is is undefined if the
6861 destination and index registers overlap. */
6862 if (!(insn & (1 << 24))) {
b0109805
PB
6863 gen_add_datah_offset(s, insn, address_offset, addr);
6864 store_reg(s, rn, addr);
9ee6e8bb
PB
6865 } else if (insn & (1 << 21)) {
6866 if (address_offset)
b0109805
PB
6867 tcg_gen_addi_i32(addr, addr, address_offset);
6868 store_reg(s, rn, addr);
6869 } else {
7d1b0095 6870 tcg_temp_free_i32(addr);
9ee6e8bb
PB
6871 }
6872 if (load) {
6873 /* Complete the load. */
b0109805 6874 store_reg(s, rd, tmp);
9ee6e8bb
PB
6875 }
6876 }
6877 break;
6878 case 0x4:
6879 case 0x5:
6880 goto do_ldst;
6881 case 0x6:
6882 case 0x7:
6883 if (insn & (1 << 4)) {
6884 ARCH(6);
6885 /* Armv6 Media instructions. */
6886 rm = insn & 0xf;
6887 rn = (insn >> 16) & 0xf;
2c0262af 6888 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
6889 rs = (insn >> 8) & 0xf;
6890 switch ((insn >> 23) & 3) {
6891 case 0: /* Parallel add/subtract. */
6892 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
6893 tmp = load_reg(s, rn);
6894 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6895 sh = (insn >> 5) & 7;
6896 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6897 goto illegal_op;
6ddbc6e4 6898 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 6899 tcg_temp_free_i32(tmp2);
6ddbc6e4 6900 store_reg(s, rd, tmp);
9ee6e8bb
PB
6901 break;
6902 case 1:
6903 if ((insn & 0x00700020) == 0) {
6c95676b 6904 /* Halfword pack. */
3670669c
PB
6905 tmp = load_reg(s, rn);
6906 tmp2 = load_reg(s, rm);
9ee6e8bb 6907 shift = (insn >> 7) & 0x1f;
3670669c
PB
6908 if (insn & (1 << 6)) {
6909 /* pkhtb */
22478e79
AZ
6910 if (shift == 0)
6911 shift = 31;
6912 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 6913 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 6914 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
6915 } else {
6916 /* pkhbt */
22478e79
AZ
6917 if (shift)
6918 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 6919 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
6920 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6921 }
6922 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 6923 tcg_temp_free_i32(tmp2);
3670669c 6924 store_reg(s, rd, tmp);
9ee6e8bb
PB
6925 } else if ((insn & 0x00200020) == 0x00200000) {
6926 /* [us]sat */
6ddbc6e4 6927 tmp = load_reg(s, rm);
9ee6e8bb
PB
6928 shift = (insn >> 7) & 0x1f;
6929 if (insn & (1 << 6)) {
6930 if (shift == 0)
6931 shift = 31;
6ddbc6e4 6932 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 6933 } else {
6ddbc6e4 6934 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
6935 }
6936 sh = (insn >> 16) & 0x1f;
40d3c433
CL
6937 tmp2 = tcg_const_i32(sh);
6938 if (insn & (1 << 22))
6939 gen_helper_usat(tmp, tmp, tmp2);
6940 else
6941 gen_helper_ssat(tmp, tmp, tmp2);
6942 tcg_temp_free_i32(tmp2);
6ddbc6e4 6943 store_reg(s, rd, tmp);
9ee6e8bb
PB
6944 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6945 /* [us]sat16 */
6ddbc6e4 6946 tmp = load_reg(s, rm);
9ee6e8bb 6947 sh = (insn >> 16) & 0x1f;
40d3c433
CL
6948 tmp2 = tcg_const_i32(sh);
6949 if (insn & (1 << 22))
6950 gen_helper_usat16(tmp, tmp, tmp2);
6951 else
6952 gen_helper_ssat16(tmp, tmp, tmp2);
6953 tcg_temp_free_i32(tmp2);
6ddbc6e4 6954 store_reg(s, rd, tmp);
9ee6e8bb
PB
6955 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6956 /* Select bytes. */
6ddbc6e4
PB
6957 tmp = load_reg(s, rn);
6958 tmp2 = load_reg(s, rm);
7d1b0095 6959 tmp3 = tcg_temp_new_i32();
6ddbc6e4
PB
6960 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6961 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
6962 tcg_temp_free_i32(tmp3);
6963 tcg_temp_free_i32(tmp2);
6ddbc6e4 6964 store_reg(s, rd, tmp);
9ee6e8bb 6965 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 6966 tmp = load_reg(s, rm);
9ee6e8bb
PB
6967 shift = (insn >> 10) & 3;
6968 /* ??? In many cases it's not neccessary to do a
6969 rotate, a shift is sufficient. */
6970 if (shift != 0)
f669df27 6971 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
6972 op1 = (insn >> 20) & 7;
6973 switch (op1) {
5e3f878a
PB
6974 case 0: gen_sxtb16(tmp); break;
6975 case 2: gen_sxtb(tmp); break;
6976 case 3: gen_sxth(tmp); break;
6977 case 4: gen_uxtb16(tmp); break;
6978 case 6: gen_uxtb(tmp); break;
6979 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
6980 default: goto illegal_op;
6981 }
6982 if (rn != 15) {
5e3f878a 6983 tmp2 = load_reg(s, rn);
9ee6e8bb 6984 if ((op1 & 3) == 0) {
5e3f878a 6985 gen_add16(tmp, tmp2);
9ee6e8bb 6986 } else {
5e3f878a 6987 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 6988 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6989 }
6990 }
6c95676b 6991 store_reg(s, rd, tmp);
9ee6e8bb
PB
6992 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6993 /* rev */
b0109805 6994 tmp = load_reg(s, rm);
9ee6e8bb
PB
6995 if (insn & (1 << 22)) {
6996 if (insn & (1 << 7)) {
b0109805 6997 gen_revsh(tmp);
9ee6e8bb
PB
6998 } else {
6999 ARCH(6T2);
b0109805 7000 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7001 }
7002 } else {
7003 if (insn & (1 << 7))
b0109805 7004 gen_rev16(tmp);
9ee6e8bb 7005 else
66896cb8 7006 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 7007 }
b0109805 7008 store_reg(s, rd, tmp);
9ee6e8bb
PB
7009 } else {
7010 goto illegal_op;
7011 }
7012 break;
7013 case 2: /* Multiplies (Type 3). */
5e3f878a
PB
7014 tmp = load_reg(s, rm);
7015 tmp2 = load_reg(s, rs);
9ee6e8bb 7016 if (insn & (1 << 20)) {
838fa72d
AJ
7017 /* Signed multiply most significant [accumulate].
7018 (SMMUL, SMMLA, SMMLS) */
a7812ae4 7019 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 7020
955a7dd5 7021 if (rd != 15) {
838fa72d 7022 tmp = load_reg(s, rd);
9ee6e8bb 7023 if (insn & (1 << 6)) {
838fa72d 7024 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 7025 } else {
838fa72d 7026 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
7027 }
7028 }
838fa72d
AJ
7029 if (insn & (1 << 5)) {
7030 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7031 }
7032 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 7033 tmp = tcg_temp_new_i32();
838fa72d
AJ
7034 tcg_gen_trunc_i64_i32(tmp, tmp64);
7035 tcg_temp_free_i64(tmp64);
955a7dd5 7036 store_reg(s, rn, tmp);
9ee6e8bb
PB
7037 } else {
7038 if (insn & (1 << 5))
5e3f878a
PB
7039 gen_swap_half(tmp2);
7040 gen_smul_dual(tmp, tmp2);
5e3f878a 7041 if (insn & (1 << 6)) {
e1d177b9 7042 /* This subtraction cannot overflow. */
5e3f878a
PB
7043 tcg_gen_sub_i32(tmp, tmp, tmp2);
7044 } else {
e1d177b9
PM
7045 /* This addition cannot overflow 32 bits;
7046 * however it may overflow considered as a signed
7047 * operation, in which case we must set the Q flag.
7048 */
7049 gen_helper_add_setq(tmp, tmp, tmp2);
5e3f878a 7050 }
7d1b0095 7051 tcg_temp_free_i32(tmp2);
9ee6e8bb 7052 if (insn & (1 << 22)) {
5e3f878a 7053 /* smlald, smlsld */
a7812ae4
PB
7054 tmp64 = tcg_temp_new_i64();
7055 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7056 tcg_temp_free_i32(tmp);
a7812ae4
PB
7057 gen_addq(s, tmp64, rd, rn);
7058 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 7059 tcg_temp_free_i64(tmp64);
9ee6e8bb 7060 } else {
5e3f878a 7061 /* smuad, smusd, smlad, smlsd */
22478e79 7062 if (rd != 15)
9ee6e8bb 7063 {
22478e79 7064 tmp2 = load_reg(s, rd);
5e3f878a 7065 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 7066 tcg_temp_free_i32(tmp2);
9ee6e8bb 7067 }
22478e79 7068 store_reg(s, rn, tmp);
9ee6e8bb
PB
7069 }
7070 }
7071 break;
7072 case 3:
7073 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7074 switch (op1) {
7075 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
7076 ARCH(6);
7077 tmp = load_reg(s, rm);
7078 tmp2 = load_reg(s, rs);
7079 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 7080 tcg_temp_free_i32(tmp2);
ded9d295
AZ
7081 if (rd != 15) {
7082 tmp2 = load_reg(s, rd);
6ddbc6e4 7083 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7084 tcg_temp_free_i32(tmp2);
9ee6e8bb 7085 }
ded9d295 7086 store_reg(s, rn, tmp);
9ee6e8bb
PB
7087 break;
7088 case 0x20: case 0x24: case 0x28: case 0x2c:
7089 /* Bitfield insert/clear. */
7090 ARCH(6T2);
7091 shift = (insn >> 7) & 0x1f;
7092 i = (insn >> 16) & 0x1f;
7093 i = i + 1 - shift;
7094 if (rm == 15) {
7d1b0095 7095 tmp = tcg_temp_new_i32();
5e3f878a 7096 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7097 } else {
5e3f878a 7098 tmp = load_reg(s, rm);
9ee6e8bb
PB
7099 }
7100 if (i != 32) {
5e3f878a 7101 tmp2 = load_reg(s, rd);
8f8e3aa4 7102 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7d1b0095 7103 tcg_temp_free_i32(tmp2);
9ee6e8bb 7104 }
5e3f878a 7105 store_reg(s, rd, tmp);
9ee6e8bb
PB
7106 break;
7107 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7108 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 7109 ARCH(6T2);
5e3f878a 7110 tmp = load_reg(s, rm);
9ee6e8bb
PB
7111 shift = (insn >> 7) & 0x1f;
7112 i = ((insn >> 16) & 0x1f) + 1;
7113 if (shift + i > 32)
7114 goto illegal_op;
7115 if (i < 32) {
7116 if (op1 & 0x20) {
5e3f878a 7117 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 7118 } else {
5e3f878a 7119 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
7120 }
7121 }
5e3f878a 7122 store_reg(s, rd, tmp);
9ee6e8bb
PB
7123 break;
7124 default:
7125 goto illegal_op;
7126 }
7127 break;
7128 }
7129 break;
7130 }
7131 do_ldst:
7132 /* Check for undefined extension instructions
7133 * per the ARM Bible IE:
7134 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7135 */
7136 sh = (0xf << 20) | (0xf << 4);
7137 if (op1 == 0x7 && ((insn & sh) == sh))
7138 {
7139 goto illegal_op;
7140 }
7141 /* load/store byte/word */
7142 rn = (insn >> 16) & 0xf;
7143 rd = (insn >> 12) & 0xf;
b0109805 7144 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
7145 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7146 if (insn & (1 << 24))
b0109805 7147 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
7148 if (insn & (1 << 20)) {
7149 /* load */
9ee6e8bb 7150 if (insn & (1 << 22)) {
b0109805 7151 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 7152 } else {
b0109805 7153 tmp = gen_ld32(tmp2, i);
9ee6e8bb 7154 }
9ee6e8bb
PB
7155 } else {
7156 /* store */
b0109805 7157 tmp = load_reg(s, rd);
9ee6e8bb 7158 if (insn & (1 << 22))
b0109805 7159 gen_st8(tmp, tmp2, i);
9ee6e8bb 7160 else
b0109805 7161 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
7162 }
7163 if (!(insn & (1 << 24))) {
b0109805
PB
7164 gen_add_data_offset(s, insn, tmp2);
7165 store_reg(s, rn, tmp2);
7166 } else if (insn & (1 << 21)) {
7167 store_reg(s, rn, tmp2);
7168 } else {
7d1b0095 7169 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7170 }
7171 if (insn & (1 << 20)) {
7172 /* Complete the load. */
7173 if (rd == 15)
b0109805 7174 gen_bx(s, tmp);
9ee6e8bb 7175 else
b0109805 7176 store_reg(s, rd, tmp);
9ee6e8bb
PB
7177 }
7178 break;
7179 case 0x08:
7180 case 0x09:
7181 {
7182 int j, n, user, loaded_base;
b0109805 7183 TCGv loaded_var;
9ee6e8bb
PB
7184 /* load/store multiple words */
7185 /* XXX: store correct base if write back */
7186 user = 0;
7187 if (insn & (1 << 22)) {
7188 if (IS_USER(s))
7189 goto illegal_op; /* only usable in supervisor mode */
7190
7191 if ((insn & (1 << 15)) == 0)
7192 user = 1;
7193 }
7194 rn = (insn >> 16) & 0xf;
b0109805 7195 addr = load_reg(s, rn);
9ee6e8bb
PB
7196
7197 /* compute total size */
7198 loaded_base = 0;
a50f5b91 7199 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
7200 n = 0;
7201 for(i=0;i<16;i++) {
7202 if (insn & (1 << i))
7203 n++;
7204 }
7205 /* XXX: test invalid n == 0 case ? */
7206 if (insn & (1 << 23)) {
7207 if (insn & (1 << 24)) {
7208 /* pre increment */
b0109805 7209 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7210 } else {
7211 /* post increment */
7212 }
7213 } else {
7214 if (insn & (1 << 24)) {
7215 /* pre decrement */
b0109805 7216 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7217 } else {
7218 /* post decrement */
7219 if (n != 1)
b0109805 7220 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7221 }
7222 }
7223 j = 0;
7224 for(i=0;i<16;i++) {
7225 if (insn & (1 << i)) {
7226 if (insn & (1 << 20)) {
7227 /* load */
b0109805 7228 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 7229 if (i == 15) {
b0109805 7230 gen_bx(s, tmp);
9ee6e8bb 7231 } else if (user) {
b75263d6
JR
7232 tmp2 = tcg_const_i32(i);
7233 gen_helper_set_user_reg(tmp2, tmp);
7234 tcg_temp_free_i32(tmp2);
7d1b0095 7235 tcg_temp_free_i32(tmp);
9ee6e8bb 7236 } else if (i == rn) {
b0109805 7237 loaded_var = tmp;
9ee6e8bb
PB
7238 loaded_base = 1;
7239 } else {
b0109805 7240 store_reg(s, i, tmp);
9ee6e8bb
PB
7241 }
7242 } else {
7243 /* store */
7244 if (i == 15) {
7245 /* special case: r15 = PC + 8 */
7246 val = (long)s->pc + 4;
7d1b0095 7247 tmp = tcg_temp_new_i32();
b0109805 7248 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 7249 } else if (user) {
7d1b0095 7250 tmp = tcg_temp_new_i32();
b75263d6
JR
7251 tmp2 = tcg_const_i32(i);
7252 gen_helper_get_user_reg(tmp, tmp2);
7253 tcg_temp_free_i32(tmp2);
9ee6e8bb 7254 } else {
b0109805 7255 tmp = load_reg(s, i);
9ee6e8bb 7256 }
b0109805 7257 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7258 }
7259 j++;
7260 /* no need to add after the last transfer */
7261 if (j != n)
b0109805 7262 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7263 }
7264 }
7265 if (insn & (1 << 21)) {
7266 /* write back */
7267 if (insn & (1 << 23)) {
7268 if (insn & (1 << 24)) {
7269 /* pre increment */
7270 } else {
7271 /* post increment */
b0109805 7272 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7273 }
7274 } else {
7275 if (insn & (1 << 24)) {
7276 /* pre decrement */
7277 if (n != 1)
b0109805 7278 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7279 } else {
7280 /* post decrement */
b0109805 7281 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7282 }
7283 }
b0109805
PB
7284 store_reg(s, rn, addr);
7285 } else {
7d1b0095 7286 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7287 }
7288 if (loaded_base) {
b0109805 7289 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
7290 }
7291 if ((insn & (1 << 22)) && !user) {
7292 /* Restore CPSR from SPSR. */
d9ba4830
PB
7293 tmp = load_cpu_field(spsr);
7294 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 7295 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7296 s->is_jmp = DISAS_UPDATE;
7297 }
7298 }
7299 break;
7300 case 0xa:
7301 case 0xb:
7302 {
7303 int32_t offset;
7304
7305 /* branch (and link) */
7306 val = (int32_t)s->pc;
7307 if (insn & (1 << 24)) {
7d1b0095 7308 tmp = tcg_temp_new_i32();
5e3f878a
PB
7309 tcg_gen_movi_i32(tmp, val);
7310 store_reg(s, 14, tmp);
9ee6e8bb
PB
7311 }
7312 offset = (((int32_t)insn << 8) >> 8);
7313 val += (offset << 2) + 4;
7314 gen_jmp(s, val);
7315 }
7316 break;
7317 case 0xc:
7318 case 0xd:
7319 case 0xe:
7320 /* Coprocessor. */
7321 if (disas_coproc_insn(env, s, insn))
7322 goto illegal_op;
7323 break;
7324 case 0xf:
7325 /* swi */
5e3f878a 7326 gen_set_pc_im(s->pc);
9ee6e8bb
PB
7327 s->is_jmp = DISAS_SWI;
7328 break;
7329 default:
7330 illegal_op:
bc4a0de0 7331 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
7332 break;
7333 }
7334 }
7335}
7336
7337/* Return true if this is a Thumb-2 logical op. */
7338static int
7339thumb2_logic_op(int op)
7340{
7341 return (op < 8);
7342}
7343
7344/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7345 then set condition code flags based on the result of the operation.
7346 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7347 to the high bit of T1.
7348 Returns zero if the opcode is valid. */
7349
7350static int
396e467c 7351gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
9ee6e8bb
PB
7352{
7353 int logic_cc;
7354
7355 logic_cc = 0;
7356 switch (op) {
7357 case 0: /* and */
396e467c 7358 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
7359 logic_cc = conds;
7360 break;
7361 case 1: /* bic */
f669df27 7362 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
7363 logic_cc = conds;
7364 break;
7365 case 2: /* orr */
396e467c 7366 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7367 logic_cc = conds;
7368 break;
7369 case 3: /* orn */
29501f1b 7370 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
7371 logic_cc = conds;
7372 break;
7373 case 4: /* eor */
396e467c 7374 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
7375 logic_cc = conds;
7376 break;
7377 case 8: /* add */
7378 if (conds)
396e467c 7379 gen_helper_add_cc(t0, t0, t1);
9ee6e8bb 7380 else
396e467c 7381 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
7382 break;
7383 case 10: /* adc */
7384 if (conds)
396e467c 7385 gen_helper_adc_cc(t0, t0, t1);
9ee6e8bb 7386 else
396e467c 7387 gen_adc(t0, t1);
9ee6e8bb
PB
7388 break;
7389 case 11: /* sbc */
7390 if (conds)
396e467c 7391 gen_helper_sbc_cc(t0, t0, t1);
9ee6e8bb 7392 else
396e467c 7393 gen_sub_carry(t0, t0, t1);
9ee6e8bb
PB
7394 break;
7395 case 13: /* sub */
7396 if (conds)
396e467c 7397 gen_helper_sub_cc(t0, t0, t1);
9ee6e8bb 7398 else
396e467c 7399 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
7400 break;
7401 case 14: /* rsb */
7402 if (conds)
396e467c 7403 gen_helper_sub_cc(t0, t1, t0);
9ee6e8bb 7404 else
396e467c 7405 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
7406 break;
7407 default: /* 5, 6, 7, 9, 12, 15. */
7408 return 1;
7409 }
7410 if (logic_cc) {
396e467c 7411 gen_logic_CC(t0);
9ee6e8bb 7412 if (shifter_out)
396e467c 7413 gen_set_CF_bit31(t1);
9ee6e8bb
PB
7414 }
7415 return 0;
7416}
7417
7418/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7419 is not legal. */
7420static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7421{
b0109805 7422 uint32_t insn, imm, shift, offset;
9ee6e8bb 7423 uint32_t rd, rn, rm, rs;
b26eefb6 7424 TCGv tmp;
6ddbc6e4
PB
7425 TCGv tmp2;
7426 TCGv tmp3;
b0109805 7427 TCGv addr;
a7812ae4 7428 TCGv_i64 tmp64;
9ee6e8bb
PB
7429 int op;
7430 int shiftop;
7431 int conds;
7432 int logic_cc;
7433
7434 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7435 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 7436 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
7437 16-bit instructions to get correct prefetch abort behavior. */
7438 insn = insn_hw1;
7439 if ((insn & (1 << 12)) == 0) {
7440 /* Second half of blx. */
7441 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
7442 tmp = load_reg(s, 14);
7443 tcg_gen_addi_i32(tmp, tmp, offset);
7444 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 7445
7d1b0095 7446 tmp2 = tcg_temp_new_i32();
b0109805 7447 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7448 store_reg(s, 14, tmp2);
7449 gen_bx(s, tmp);
9ee6e8bb
PB
7450 return 0;
7451 }
7452 if (insn & (1 << 11)) {
7453 /* Second half of bl. */
7454 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 7455 tmp = load_reg(s, 14);
6a0d8a1d 7456 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 7457
7d1b0095 7458 tmp2 = tcg_temp_new_i32();
b0109805 7459 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7460 store_reg(s, 14, tmp2);
7461 gen_bx(s, tmp);
9ee6e8bb
PB
7462 return 0;
7463 }
7464 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7465 /* Instruction spans a page boundary. Implement it as two
7466 16-bit instructions in case the second half causes an
7467 prefetch abort. */
7468 offset = ((int32_t)insn << 21) >> 9;
396e467c 7469 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
7470 return 0;
7471 }
7472 /* Fall through to 32-bit decode. */
7473 }
7474
7475 insn = lduw_code(s->pc);
7476 s->pc += 2;
7477 insn |= (uint32_t)insn_hw1 << 16;
7478
7479 if ((insn & 0xf800e800) != 0xf000e800) {
7480 ARCH(6T2);
7481 }
7482
7483 rn = (insn >> 16) & 0xf;
7484 rs = (insn >> 12) & 0xf;
7485 rd = (insn >> 8) & 0xf;
7486 rm = insn & 0xf;
7487 switch ((insn >> 25) & 0xf) {
7488 case 0: case 1: case 2: case 3:
7489 /* 16-bit instructions. Should never happen. */
7490 abort();
7491 case 4:
7492 if (insn & (1 << 22)) {
7493 /* Other load/store, table branch. */
7494 if (insn & 0x01200000) {
7495 /* Load/store doubleword. */
7496 if (rn == 15) {
7d1b0095 7497 addr = tcg_temp_new_i32();
b0109805 7498 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 7499 } else {
b0109805 7500 addr = load_reg(s, rn);
9ee6e8bb
PB
7501 }
7502 offset = (insn & 0xff) * 4;
7503 if ((insn & (1 << 23)) == 0)
7504 offset = -offset;
7505 if (insn & (1 << 24)) {
b0109805 7506 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
7507 offset = 0;
7508 }
7509 if (insn & (1 << 20)) {
7510 /* ldrd */
b0109805
PB
7511 tmp = gen_ld32(addr, IS_USER(s));
7512 store_reg(s, rs, tmp);
7513 tcg_gen_addi_i32(addr, addr, 4);
7514 tmp = gen_ld32(addr, IS_USER(s));
7515 store_reg(s, rd, tmp);
9ee6e8bb
PB
7516 } else {
7517 /* strd */
b0109805
PB
7518 tmp = load_reg(s, rs);
7519 gen_st32(tmp, addr, IS_USER(s));
7520 tcg_gen_addi_i32(addr, addr, 4);
7521 tmp = load_reg(s, rd);
7522 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7523 }
7524 if (insn & (1 << 21)) {
7525 /* Base writeback. */
7526 if (rn == 15)
7527 goto illegal_op;
b0109805
PB
7528 tcg_gen_addi_i32(addr, addr, offset - 4);
7529 store_reg(s, rn, addr);
7530 } else {
7d1b0095 7531 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7532 }
7533 } else if ((insn & (1 << 23)) == 0) {
7534 /* Load/store exclusive word. */
3174f8e9 7535 addr = tcg_temp_local_new();
98a46317 7536 load_reg_var(s, addr, rn);
426f5abc 7537 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 7538 if (insn & (1 << 20)) {
426f5abc 7539 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 7540 } else {
426f5abc 7541 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 7542 }
3174f8e9 7543 tcg_temp_free(addr);
9ee6e8bb
PB
7544 } else if ((insn & (1 << 6)) == 0) {
7545 /* Table Branch. */
7546 if (rn == 15) {
7d1b0095 7547 addr = tcg_temp_new_i32();
b0109805 7548 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 7549 } else {
b0109805 7550 addr = load_reg(s, rn);
9ee6e8bb 7551 }
b26eefb6 7552 tmp = load_reg(s, rm);
b0109805 7553 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
7554 if (insn & (1 << 4)) {
7555 /* tbh */
b0109805 7556 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 7557 tcg_temp_free_i32(tmp);
b0109805 7558 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 7559 } else { /* tbb */
7d1b0095 7560 tcg_temp_free_i32(tmp);
b0109805 7561 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7562 }
7d1b0095 7563 tcg_temp_free_i32(addr);
b0109805
PB
7564 tcg_gen_shli_i32(tmp, tmp, 1);
7565 tcg_gen_addi_i32(tmp, tmp, s->pc);
7566 store_reg(s, 15, tmp);
9ee6e8bb
PB
7567 } else {
7568 /* Load/store exclusive byte/halfword/doubleword. */
426f5abc 7569 ARCH(7);
9ee6e8bb 7570 op = (insn >> 4) & 0x3;
426f5abc
PB
7571 if (op == 2) {
7572 goto illegal_op;
7573 }
3174f8e9 7574 addr = tcg_temp_local_new();
98a46317 7575 load_reg_var(s, addr, rn);
9ee6e8bb 7576 if (insn & (1 << 20)) {
426f5abc 7577 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 7578 } else {
426f5abc 7579 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 7580 }
3174f8e9 7581 tcg_temp_free(addr);
9ee6e8bb
PB
7582 }
7583 } else {
7584 /* Load/store multiple, RFE, SRS. */
7585 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7586 /* Not available in user mode. */
b0109805 7587 if (IS_USER(s))
9ee6e8bb
PB
7588 goto illegal_op;
7589 if (insn & (1 << 20)) {
7590 /* rfe */
b0109805
PB
7591 addr = load_reg(s, rn);
7592 if ((insn & (1 << 24)) == 0)
7593 tcg_gen_addi_i32(addr, addr, -8);
7594 /* Load PC into tmp and CPSR into tmp2. */
7595 tmp = gen_ld32(addr, 0);
7596 tcg_gen_addi_i32(addr, addr, 4);
7597 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
7598 if (insn & (1 << 21)) {
7599 /* Base writeback. */
b0109805
PB
7600 if (insn & (1 << 24)) {
7601 tcg_gen_addi_i32(addr, addr, 4);
7602 } else {
7603 tcg_gen_addi_i32(addr, addr, -4);
7604 }
7605 store_reg(s, rn, addr);
7606 } else {
7d1b0095 7607 tcg_temp_free_i32(addr);
9ee6e8bb 7608 }
b0109805 7609 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
7610 } else {
7611 /* srs */
7612 op = (insn & 0x1f);
7d1b0095 7613 addr = tcg_temp_new_i32();
39ea3d4e
PM
7614 tmp = tcg_const_i32(op);
7615 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7616 tcg_temp_free_i32(tmp);
9ee6e8bb 7617 if ((insn & (1 << 24)) == 0) {
b0109805 7618 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 7619 }
b0109805
PB
7620 tmp = load_reg(s, 14);
7621 gen_st32(tmp, addr, 0);
7622 tcg_gen_addi_i32(addr, addr, 4);
7d1b0095 7623 tmp = tcg_temp_new_i32();
b0109805
PB
7624 gen_helper_cpsr_read(tmp);
7625 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
7626 if (insn & (1 << 21)) {
7627 if ((insn & (1 << 24)) == 0) {
b0109805 7628 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 7629 } else {
b0109805 7630 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 7631 }
39ea3d4e
PM
7632 tmp = tcg_const_i32(op);
7633 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7634 tcg_temp_free_i32(tmp);
b0109805 7635 } else {
7d1b0095 7636 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7637 }
7638 }
7639 } else {
7640 int i;
7641 /* Load/store multiple. */
b0109805 7642 addr = load_reg(s, rn);
9ee6e8bb
PB
7643 offset = 0;
7644 for (i = 0; i < 16; i++) {
7645 if (insn & (1 << i))
7646 offset += 4;
7647 }
7648 if (insn & (1 << 24)) {
b0109805 7649 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7650 }
7651
7652 for (i = 0; i < 16; i++) {
7653 if ((insn & (1 << i)) == 0)
7654 continue;
7655 if (insn & (1 << 20)) {
7656 /* Load. */
b0109805 7657 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 7658 if (i == 15) {
b0109805 7659 gen_bx(s, tmp);
9ee6e8bb 7660 } else {
b0109805 7661 store_reg(s, i, tmp);
9ee6e8bb
PB
7662 }
7663 } else {
7664 /* Store. */
b0109805
PB
7665 tmp = load_reg(s, i);
7666 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7667 }
b0109805 7668 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7669 }
7670 if (insn & (1 << 21)) {
7671 /* Base register writeback. */
7672 if (insn & (1 << 24)) {
b0109805 7673 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7674 }
7675 /* Fault if writeback register is in register list. */
7676 if (insn & (1 << rn))
7677 goto illegal_op;
b0109805
PB
7678 store_reg(s, rn, addr);
7679 } else {
7d1b0095 7680 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7681 }
7682 }
7683 }
7684 break;
2af9ab77
JB
7685 case 5:
7686
9ee6e8bb 7687 op = (insn >> 21) & 0xf;
2af9ab77
JB
7688 if (op == 6) {
7689 /* Halfword pack. */
7690 tmp = load_reg(s, rn);
7691 tmp2 = load_reg(s, rm);
7692 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
7693 if (insn & (1 << 5)) {
7694 /* pkhtb */
7695 if (shift == 0)
7696 shift = 31;
7697 tcg_gen_sari_i32(tmp2, tmp2, shift);
7698 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
7699 tcg_gen_ext16u_i32(tmp2, tmp2);
7700 } else {
7701 /* pkhbt */
7702 if (shift)
7703 tcg_gen_shli_i32(tmp2, tmp2, shift);
7704 tcg_gen_ext16u_i32(tmp, tmp);
7705 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7706 }
7707 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 7708 tcg_temp_free_i32(tmp2);
3174f8e9
FN
7709 store_reg(s, rd, tmp);
7710 } else {
2af9ab77
JB
7711 /* Data processing register constant shift. */
7712 if (rn == 15) {
7d1b0095 7713 tmp = tcg_temp_new_i32();
2af9ab77
JB
7714 tcg_gen_movi_i32(tmp, 0);
7715 } else {
7716 tmp = load_reg(s, rn);
7717 }
7718 tmp2 = load_reg(s, rm);
7719
7720 shiftop = (insn >> 4) & 3;
7721 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7722 conds = (insn & (1 << 20)) != 0;
7723 logic_cc = (conds && thumb2_logic_op(op));
7724 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7725 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
7726 goto illegal_op;
7d1b0095 7727 tcg_temp_free_i32(tmp2);
2af9ab77
JB
7728 if (rd != 15) {
7729 store_reg(s, rd, tmp);
7730 } else {
7d1b0095 7731 tcg_temp_free_i32(tmp);
2af9ab77 7732 }
3174f8e9 7733 }
9ee6e8bb
PB
7734 break;
7735 case 13: /* Misc data processing. */
7736 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7737 if (op < 4 && (insn & 0xf000) != 0xf000)
7738 goto illegal_op;
7739 switch (op) {
7740 case 0: /* Register controlled shift. */
8984bd2e
PB
7741 tmp = load_reg(s, rn);
7742 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7743 if ((insn & 0x70) != 0)
7744 goto illegal_op;
7745 op = (insn >> 21) & 3;
8984bd2e
PB
7746 logic_cc = (insn & (1 << 20)) != 0;
7747 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7748 if (logic_cc)
7749 gen_logic_CC(tmp);
21aeb343 7750 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7751 break;
7752 case 1: /* Sign/zero extend. */
5e3f878a 7753 tmp = load_reg(s, rm);
9ee6e8bb
PB
7754 shift = (insn >> 4) & 3;
7755 /* ??? In many cases it's not neccessary to do a
7756 rotate, a shift is sufficient. */
7757 if (shift != 0)
f669df27 7758 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7759 op = (insn >> 20) & 7;
7760 switch (op) {
5e3f878a
PB
7761 case 0: gen_sxth(tmp); break;
7762 case 1: gen_uxth(tmp); break;
7763 case 2: gen_sxtb16(tmp); break;
7764 case 3: gen_uxtb16(tmp); break;
7765 case 4: gen_sxtb(tmp); break;
7766 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
7767 default: goto illegal_op;
7768 }
7769 if (rn != 15) {
5e3f878a 7770 tmp2 = load_reg(s, rn);
9ee6e8bb 7771 if ((op >> 1) == 1) {
5e3f878a 7772 gen_add16(tmp, tmp2);
9ee6e8bb 7773 } else {
5e3f878a 7774 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7775 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7776 }
7777 }
5e3f878a 7778 store_reg(s, rd, tmp);
9ee6e8bb
PB
7779 break;
7780 case 2: /* SIMD add/subtract. */
7781 op = (insn >> 20) & 7;
7782 shift = (insn >> 4) & 7;
7783 if ((op & 3) == 3 || (shift & 3) == 3)
7784 goto illegal_op;
6ddbc6e4
PB
7785 tmp = load_reg(s, rn);
7786 tmp2 = load_reg(s, rm);
7787 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 7788 tcg_temp_free_i32(tmp2);
6ddbc6e4 7789 store_reg(s, rd, tmp);
9ee6e8bb
PB
7790 break;
7791 case 3: /* Other data processing. */
7792 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7793 if (op < 4) {
7794 /* Saturating add/subtract. */
d9ba4830
PB
7795 tmp = load_reg(s, rn);
7796 tmp2 = load_reg(s, rm);
9ee6e8bb 7797 if (op & 1)
4809c612
JB
7798 gen_helper_double_saturate(tmp, tmp);
7799 if (op & 2)
d9ba4830 7800 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 7801 else
d9ba4830 7802 gen_helper_add_saturate(tmp, tmp, tmp2);
7d1b0095 7803 tcg_temp_free_i32(tmp2);
9ee6e8bb 7804 } else {
d9ba4830 7805 tmp = load_reg(s, rn);
9ee6e8bb
PB
7806 switch (op) {
7807 case 0x0a: /* rbit */
d9ba4830 7808 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7809 break;
7810 case 0x08: /* rev */
66896cb8 7811 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
7812 break;
7813 case 0x09: /* rev16 */
d9ba4830 7814 gen_rev16(tmp);
9ee6e8bb
PB
7815 break;
7816 case 0x0b: /* revsh */
d9ba4830 7817 gen_revsh(tmp);
9ee6e8bb
PB
7818 break;
7819 case 0x10: /* sel */
d9ba4830 7820 tmp2 = load_reg(s, rm);
7d1b0095 7821 tmp3 = tcg_temp_new_i32();
6ddbc6e4 7822 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
d9ba4830 7823 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
7824 tcg_temp_free_i32(tmp3);
7825 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7826 break;
7827 case 0x18: /* clz */
d9ba4830 7828 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
7829 break;
7830 default:
7831 goto illegal_op;
7832 }
7833 }
d9ba4830 7834 store_reg(s, rd, tmp);
9ee6e8bb
PB
7835 break;
7836 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7837 op = (insn >> 4) & 0xf;
d9ba4830
PB
7838 tmp = load_reg(s, rn);
7839 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7840 switch ((insn >> 20) & 7) {
7841 case 0: /* 32 x 32 -> 32 */
d9ba4830 7842 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 7843 tcg_temp_free_i32(tmp2);
9ee6e8bb 7844 if (rs != 15) {
d9ba4830 7845 tmp2 = load_reg(s, rs);
9ee6e8bb 7846 if (op)
d9ba4830 7847 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 7848 else
d9ba4830 7849 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7850 tcg_temp_free_i32(tmp2);
9ee6e8bb 7851 }
9ee6e8bb
PB
7852 break;
7853 case 1: /* 16 x 16 -> 32 */
d9ba4830 7854 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 7855 tcg_temp_free_i32(tmp2);
9ee6e8bb 7856 if (rs != 15) {
d9ba4830
PB
7857 tmp2 = load_reg(s, rs);
7858 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 7859 tcg_temp_free_i32(tmp2);
9ee6e8bb 7860 }
9ee6e8bb
PB
7861 break;
7862 case 2: /* Dual multiply add. */
7863 case 4: /* Dual multiply subtract. */
7864 if (op)
d9ba4830
PB
7865 gen_swap_half(tmp2);
7866 gen_smul_dual(tmp, tmp2);
9ee6e8bb 7867 if (insn & (1 << 22)) {
e1d177b9 7868 /* This subtraction cannot overflow. */
d9ba4830 7869 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 7870 } else {
e1d177b9
PM
7871 /* This addition cannot overflow 32 bits;
7872 * however it may overflow considered as a signed
7873 * operation, in which case we must set the Q flag.
7874 */
7875 gen_helper_add_setq(tmp, tmp, tmp2);
9ee6e8bb 7876 }
7d1b0095 7877 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7878 if (rs != 15)
7879 {
d9ba4830
PB
7880 tmp2 = load_reg(s, rs);
7881 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 7882 tcg_temp_free_i32(tmp2);
9ee6e8bb 7883 }
9ee6e8bb
PB
7884 break;
7885 case 3: /* 32 * 16 -> 32msb */
7886 if (op)
d9ba4830 7887 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7888 else
d9ba4830 7889 gen_sxth(tmp2);
a7812ae4
PB
7890 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7891 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 7892 tmp = tcg_temp_new_i32();
a7812ae4 7893 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 7894 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7895 if (rs != 15)
7896 {
d9ba4830
PB
7897 tmp2 = load_reg(s, rs);
7898 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 7899 tcg_temp_free_i32(tmp2);
9ee6e8bb 7900 }
9ee6e8bb 7901 break;
838fa72d
AJ
7902 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
7903 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 7904 if (rs != 15) {
838fa72d
AJ
7905 tmp = load_reg(s, rs);
7906 if (insn & (1 << 20)) {
7907 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 7908 } else {
838fa72d 7909 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 7910 }
2c0262af 7911 }
838fa72d
AJ
7912 if (insn & (1 << 4)) {
7913 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7914 }
7915 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 7916 tmp = tcg_temp_new_i32();
838fa72d
AJ
7917 tcg_gen_trunc_i64_i32(tmp, tmp64);
7918 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7919 break;
7920 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 7921 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 7922 tcg_temp_free_i32(tmp2);
9ee6e8bb 7923 if (rs != 15) {
d9ba4830
PB
7924 tmp2 = load_reg(s, rs);
7925 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7926 tcg_temp_free_i32(tmp2);
5fd46862 7927 }
9ee6e8bb 7928 break;
2c0262af 7929 }
d9ba4830 7930 store_reg(s, rd, tmp);
2c0262af 7931 break;
9ee6e8bb
PB
7932 case 6: case 7: /* 64-bit multiply, Divide. */
7933 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
7934 tmp = load_reg(s, rn);
7935 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7936 if ((op & 0x50) == 0x10) {
7937 /* sdiv, udiv */
7938 if (!arm_feature(env, ARM_FEATURE_DIV))
7939 goto illegal_op;
7940 if (op & 0x20)
5e3f878a 7941 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 7942 else
5e3f878a 7943 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 7944 tcg_temp_free_i32(tmp2);
5e3f878a 7945 store_reg(s, rd, tmp);
9ee6e8bb
PB
7946 } else if ((op & 0xe) == 0xc) {
7947 /* Dual multiply accumulate long. */
7948 if (op & 1)
5e3f878a
PB
7949 gen_swap_half(tmp2);
7950 gen_smul_dual(tmp, tmp2);
9ee6e8bb 7951 if (op & 0x10) {
5e3f878a 7952 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 7953 } else {
5e3f878a 7954 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 7955 }
7d1b0095 7956 tcg_temp_free_i32(tmp2);
a7812ae4
PB
7957 /* BUGFIX */
7958 tmp64 = tcg_temp_new_i64();
7959 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7960 tcg_temp_free_i32(tmp);
a7812ae4
PB
7961 gen_addq(s, tmp64, rs, rd);
7962 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 7963 tcg_temp_free_i64(tmp64);
2c0262af 7964 } else {
9ee6e8bb
PB
7965 if (op & 0x20) {
7966 /* Unsigned 64-bit multiply */
a7812ae4 7967 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 7968 } else {
9ee6e8bb
PB
7969 if (op & 8) {
7970 /* smlalxy */
5e3f878a 7971 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 7972 tcg_temp_free_i32(tmp2);
a7812ae4
PB
7973 tmp64 = tcg_temp_new_i64();
7974 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7975 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7976 } else {
7977 /* Signed 64-bit multiply */
a7812ae4 7978 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 7979 }
b5ff1b31 7980 }
9ee6e8bb
PB
7981 if (op & 4) {
7982 /* umaal */
a7812ae4
PB
7983 gen_addq_lo(s, tmp64, rs);
7984 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
7985 } else if (op & 0x40) {
7986 /* 64-bit accumulate. */
a7812ae4 7987 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 7988 }
a7812ae4 7989 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 7990 tcg_temp_free_i64(tmp64);
5fd46862 7991 }
2c0262af 7992 break;
9ee6e8bb
PB
7993 }
7994 break;
7995 case 6: case 7: case 14: case 15:
7996 /* Coprocessor. */
7997 if (((insn >> 24) & 3) == 3) {
7998 /* Translate into the equivalent ARM encoding. */
f06053e3 7999 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9ee6e8bb
PB
8000 if (disas_neon_data_insn(env, s, insn))
8001 goto illegal_op;
8002 } else {
8003 if (insn & (1 << 28))
8004 goto illegal_op;
8005 if (disas_coproc_insn (env, s, insn))
8006 goto illegal_op;
8007 }
8008 break;
8009 case 8: case 9: case 10: case 11:
8010 if (insn & (1 << 15)) {
8011 /* Branches, misc control. */
8012 if (insn & 0x5000) {
8013 /* Unconditional branch. */
8014 /* signextend(hw1[10:0]) -> offset[:12]. */
8015 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8016 /* hw1[10:0] -> offset[11:1]. */
8017 offset |= (insn & 0x7ff) << 1;
8018 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8019 offset[24:22] already have the same value because of the
8020 sign extension above. */
8021 offset ^= ((~insn) & (1 << 13)) << 10;
8022 offset ^= ((~insn) & (1 << 11)) << 11;
8023
9ee6e8bb
PB
8024 if (insn & (1 << 14)) {
8025 /* Branch and link. */
3174f8e9 8026 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 8027 }
3b46e624 8028
b0109805 8029 offset += s->pc;
9ee6e8bb
PB
8030 if (insn & (1 << 12)) {
8031 /* b/bl */
b0109805 8032 gen_jmp(s, offset);
9ee6e8bb
PB
8033 } else {
8034 /* blx */
b0109805
PB
8035 offset &= ~(uint32_t)2;
8036 gen_bx_im(s, offset);
2c0262af 8037 }
9ee6e8bb
PB
8038 } else if (((insn >> 23) & 7) == 7) {
8039 /* Misc control */
8040 if (insn & (1 << 13))
8041 goto illegal_op;
8042
8043 if (insn & (1 << 26)) {
8044 /* Secure monitor call (v6Z) */
8045 goto illegal_op; /* not implemented. */
2c0262af 8046 } else {
9ee6e8bb
PB
8047 op = (insn >> 20) & 7;
8048 switch (op) {
8049 case 0: /* msr cpsr. */
8050 if (IS_M(env)) {
8984bd2e
PB
8051 tmp = load_reg(s, rn);
8052 addr = tcg_const_i32(insn & 0xff);
8053 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8054 tcg_temp_free_i32(addr);
7d1b0095 8055 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8056 gen_lookup_tb(s);
8057 break;
8058 }
8059 /* fall through */
8060 case 1: /* msr spsr. */
8061 if (IS_M(env))
8062 goto illegal_op;
2fbac54b
FN
8063 tmp = load_reg(s, rn);
8064 if (gen_set_psr(s,
9ee6e8bb 8065 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 8066 op == 1, tmp))
9ee6e8bb
PB
8067 goto illegal_op;
8068 break;
8069 case 2: /* cps, nop-hint. */
8070 if (((insn >> 8) & 7) == 0) {
8071 gen_nop_hint(s, insn & 0xff);
8072 }
8073 /* Implemented as NOP in user mode. */
8074 if (IS_USER(s))
8075 break;
8076 offset = 0;
8077 imm = 0;
8078 if (insn & (1 << 10)) {
8079 if (insn & (1 << 7))
8080 offset |= CPSR_A;
8081 if (insn & (1 << 6))
8082 offset |= CPSR_I;
8083 if (insn & (1 << 5))
8084 offset |= CPSR_F;
8085 if (insn & (1 << 9))
8086 imm = CPSR_A | CPSR_I | CPSR_F;
8087 }
8088 if (insn & (1 << 8)) {
8089 offset |= 0x1f;
8090 imm |= (insn & 0x1f);
8091 }
8092 if (offset) {
2fbac54b 8093 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
8094 }
8095 break;
8096 case 3: /* Special control operations. */
426f5abc 8097 ARCH(7);
9ee6e8bb
PB
8098 op = (insn >> 4) & 0xf;
8099 switch (op) {
8100 case 2: /* clrex */
426f5abc 8101 gen_clrex(s);
9ee6e8bb
PB
8102 break;
8103 case 4: /* dsb */
8104 case 5: /* dmb */
8105 case 6: /* isb */
8106 /* These execute as NOPs. */
9ee6e8bb
PB
8107 break;
8108 default:
8109 goto illegal_op;
8110 }
8111 break;
8112 case 4: /* bxj */
8113 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8114 tmp = load_reg(s, rn);
8115 gen_bx(s, tmp);
9ee6e8bb
PB
8116 break;
8117 case 5: /* Exception return. */
b8b45b68
RV
8118 if (IS_USER(s)) {
8119 goto illegal_op;
8120 }
8121 if (rn != 14 || rd != 15) {
8122 goto illegal_op;
8123 }
8124 tmp = load_reg(s, rn);
8125 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8126 gen_exception_return(s, tmp);
8127 break;
9ee6e8bb 8128 case 6: /* mrs cpsr. */
7d1b0095 8129 tmp = tcg_temp_new_i32();
9ee6e8bb 8130 if (IS_M(env)) {
8984bd2e
PB
8131 addr = tcg_const_i32(insn & 0xff);
8132 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 8133 tcg_temp_free_i32(addr);
9ee6e8bb 8134 } else {
8984bd2e 8135 gen_helper_cpsr_read(tmp);
9ee6e8bb 8136 }
8984bd2e 8137 store_reg(s, rd, tmp);
9ee6e8bb
PB
8138 break;
8139 case 7: /* mrs spsr. */
8140 /* Not accessible in user mode. */
8141 if (IS_USER(s) || IS_M(env))
8142 goto illegal_op;
d9ba4830
PB
8143 tmp = load_cpu_field(spsr);
8144 store_reg(s, rd, tmp);
9ee6e8bb 8145 break;
2c0262af
FB
8146 }
8147 }
9ee6e8bb
PB
8148 } else {
8149 /* Conditional branch. */
8150 op = (insn >> 22) & 0xf;
8151 /* Generate a conditional jump to next instruction. */
8152 s->condlabel = gen_new_label();
d9ba4830 8153 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
8154 s->condjmp = 1;
8155
8156 /* offset[11:1] = insn[10:0] */
8157 offset = (insn & 0x7ff) << 1;
8158 /* offset[17:12] = insn[21:16]. */
8159 offset |= (insn & 0x003f0000) >> 4;
8160 /* offset[31:20] = insn[26]. */
8161 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8162 /* offset[18] = insn[13]. */
8163 offset |= (insn & (1 << 13)) << 5;
8164 /* offset[19] = insn[11]. */
8165 offset |= (insn & (1 << 11)) << 8;
8166
8167 /* jump to the offset */
b0109805 8168 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
8169 }
8170 } else {
8171 /* Data processing immediate. */
8172 if (insn & (1 << 25)) {
8173 if (insn & (1 << 24)) {
8174 if (insn & (1 << 20))
8175 goto illegal_op;
8176 /* Bitfield/Saturate. */
8177 op = (insn >> 21) & 7;
8178 imm = insn & 0x1f;
8179 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 8180 if (rn == 15) {
7d1b0095 8181 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
8182 tcg_gen_movi_i32(tmp, 0);
8183 } else {
8184 tmp = load_reg(s, rn);
8185 }
9ee6e8bb
PB
8186 switch (op) {
8187 case 2: /* Signed bitfield extract. */
8188 imm++;
8189 if (shift + imm > 32)
8190 goto illegal_op;
8191 if (imm < 32)
6ddbc6e4 8192 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
8193 break;
8194 case 6: /* Unsigned bitfield extract. */
8195 imm++;
8196 if (shift + imm > 32)
8197 goto illegal_op;
8198 if (imm < 32)
6ddbc6e4 8199 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
8200 break;
8201 case 3: /* Bitfield insert/clear. */
8202 if (imm < shift)
8203 goto illegal_op;
8204 imm = imm + 1 - shift;
8205 if (imm != 32) {
6ddbc6e4 8206 tmp2 = load_reg(s, rd);
8f8e3aa4 8207 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
7d1b0095 8208 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8209 }
8210 break;
8211 case 7:
8212 goto illegal_op;
8213 default: /* Saturate. */
9ee6e8bb
PB
8214 if (shift) {
8215 if (op & 1)
6ddbc6e4 8216 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8217 else
6ddbc6e4 8218 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 8219 }
6ddbc6e4 8220 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
8221 if (op & 4) {
8222 /* Unsigned. */
9ee6e8bb 8223 if ((op & 1) && shift == 0)
6ddbc6e4 8224 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 8225 else
6ddbc6e4 8226 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 8227 } else {
9ee6e8bb 8228 /* Signed. */
9ee6e8bb 8229 if ((op & 1) && shift == 0)
6ddbc6e4 8230 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 8231 else
6ddbc6e4 8232 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 8233 }
b75263d6 8234 tcg_temp_free_i32(tmp2);
9ee6e8bb 8235 break;
2c0262af 8236 }
6ddbc6e4 8237 store_reg(s, rd, tmp);
9ee6e8bb
PB
8238 } else {
8239 imm = ((insn & 0x04000000) >> 15)
8240 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8241 if (insn & (1 << 22)) {
8242 /* 16-bit immediate. */
8243 imm |= (insn >> 4) & 0xf000;
8244 if (insn & (1 << 23)) {
8245 /* movt */
5e3f878a 8246 tmp = load_reg(s, rd);
86831435 8247 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8248 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 8249 } else {
9ee6e8bb 8250 /* movw */
7d1b0095 8251 tmp = tcg_temp_new_i32();
5e3f878a 8252 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
8253 }
8254 } else {
9ee6e8bb
PB
8255 /* Add/sub 12-bit immediate. */
8256 if (rn == 15) {
b0109805 8257 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 8258 if (insn & (1 << 23))
b0109805 8259 offset -= imm;
9ee6e8bb 8260 else
b0109805 8261 offset += imm;
7d1b0095 8262 tmp = tcg_temp_new_i32();
5e3f878a 8263 tcg_gen_movi_i32(tmp, offset);
2c0262af 8264 } else {
5e3f878a 8265 tmp = load_reg(s, rn);
9ee6e8bb 8266 if (insn & (1 << 23))
5e3f878a 8267 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 8268 else
5e3f878a 8269 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 8270 }
9ee6e8bb 8271 }
5e3f878a 8272 store_reg(s, rd, tmp);
191abaa2 8273 }
9ee6e8bb
PB
8274 } else {
8275 int shifter_out = 0;
8276 /* modified 12-bit immediate. */
8277 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8278 imm = (insn & 0xff);
8279 switch (shift) {
8280 case 0: /* XY */
8281 /* Nothing to do. */
8282 break;
8283 case 1: /* 00XY00XY */
8284 imm |= imm << 16;
8285 break;
8286 case 2: /* XY00XY00 */
8287 imm |= imm << 16;
8288 imm <<= 8;
8289 break;
8290 case 3: /* XYXYXYXY */
8291 imm |= imm << 16;
8292 imm |= imm << 8;
8293 break;
8294 default: /* Rotated constant. */
8295 shift = (shift << 1) | (imm >> 7);
8296 imm |= 0x80;
8297 imm = imm << (32 - shift);
8298 shifter_out = 1;
8299 break;
b5ff1b31 8300 }
7d1b0095 8301 tmp2 = tcg_temp_new_i32();
3174f8e9 8302 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 8303 rn = (insn >> 16) & 0xf;
3174f8e9 8304 if (rn == 15) {
7d1b0095 8305 tmp = tcg_temp_new_i32();
3174f8e9
FN
8306 tcg_gen_movi_i32(tmp, 0);
8307 } else {
8308 tmp = load_reg(s, rn);
8309 }
9ee6e8bb
PB
8310 op = (insn >> 21) & 0xf;
8311 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 8312 shifter_out, tmp, tmp2))
9ee6e8bb 8313 goto illegal_op;
7d1b0095 8314 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8315 rd = (insn >> 8) & 0xf;
8316 if (rd != 15) {
3174f8e9
FN
8317 store_reg(s, rd, tmp);
8318 } else {
7d1b0095 8319 tcg_temp_free_i32(tmp);
2c0262af 8320 }
2c0262af 8321 }
9ee6e8bb
PB
8322 }
8323 break;
8324 case 12: /* Load/store single data item. */
8325 {
8326 int postinc = 0;
8327 int writeback = 0;
b0109805 8328 int user;
9ee6e8bb
PB
8329 if ((insn & 0x01100000) == 0x01000000) {
8330 if (disas_neon_ls_insn(env, s, insn))
c1713132 8331 goto illegal_op;
9ee6e8bb
PB
8332 break;
8333 }
a2fdc890
PM
8334 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8335 if (rs == 15) {
8336 if (!(insn & (1 << 20))) {
8337 goto illegal_op;
8338 }
8339 if (op != 2) {
8340 /* Byte or halfword load space with dest == r15 : memory hints.
8341 * Catch them early so we don't emit pointless addressing code.
8342 * This space is a mix of:
8343 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8344 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8345 * cores)
8346 * unallocated hints, which must be treated as NOPs
8347 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8348 * which is easiest for the decoding logic
8349 * Some space which must UNDEF
8350 */
8351 int op1 = (insn >> 23) & 3;
8352 int op2 = (insn >> 6) & 0x3f;
8353 if (op & 2) {
8354 goto illegal_op;
8355 }
8356 if (rn == 15) {
8357 /* UNPREDICTABLE or unallocated hint */
8358 return 0;
8359 }
8360 if (op1 & 1) {
8361 return 0; /* PLD* or unallocated hint */
8362 }
8363 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
8364 return 0; /* PLD* or unallocated hint */
8365 }
8366 /* UNDEF space, or an UNPREDICTABLE */
8367 return 1;
8368 }
8369 }
b0109805 8370 user = IS_USER(s);
9ee6e8bb 8371 if (rn == 15) {
7d1b0095 8372 addr = tcg_temp_new_i32();
9ee6e8bb
PB
8373 /* PC relative. */
8374 /* s->pc has already been incremented by 4. */
8375 imm = s->pc & 0xfffffffc;
8376 if (insn & (1 << 23))
8377 imm += insn & 0xfff;
8378 else
8379 imm -= insn & 0xfff;
b0109805 8380 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 8381 } else {
b0109805 8382 addr = load_reg(s, rn);
9ee6e8bb
PB
8383 if (insn & (1 << 23)) {
8384 /* Positive offset. */
8385 imm = insn & 0xfff;
b0109805 8386 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 8387 } else {
9ee6e8bb 8388 imm = insn & 0xff;
2a0308c5
PM
8389 switch ((insn >> 8) & 0xf) {
8390 case 0x0: /* Shifted Register. */
9ee6e8bb 8391 shift = (insn >> 4) & 0xf;
2a0308c5
PM
8392 if (shift > 3) {
8393 tcg_temp_free_i32(addr);
18c9b560 8394 goto illegal_op;
2a0308c5 8395 }
b26eefb6 8396 tmp = load_reg(s, rm);
9ee6e8bb 8397 if (shift)
b26eefb6 8398 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 8399 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8400 tcg_temp_free_i32(tmp);
9ee6e8bb 8401 break;
2a0308c5 8402 case 0xc: /* Negative offset. */
b0109805 8403 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 8404 break;
2a0308c5 8405 case 0xe: /* User privilege. */
b0109805
PB
8406 tcg_gen_addi_i32(addr, addr, imm);
8407 user = 1;
9ee6e8bb 8408 break;
2a0308c5 8409 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
8410 imm = -imm;
8411 /* Fall through. */
2a0308c5 8412 case 0xb: /* Post-increment. */
9ee6e8bb
PB
8413 postinc = 1;
8414 writeback = 1;
8415 break;
2a0308c5 8416 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
8417 imm = -imm;
8418 /* Fall through. */
2a0308c5 8419 case 0xf: /* Pre-increment. */
b0109805 8420 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8421 writeback = 1;
8422 break;
8423 default:
2a0308c5 8424 tcg_temp_free_i32(addr);
b7bcbe95 8425 goto illegal_op;
9ee6e8bb
PB
8426 }
8427 }
8428 }
9ee6e8bb
PB
8429 if (insn & (1 << 20)) {
8430 /* Load. */
a2fdc890
PM
8431 switch (op) {
8432 case 0: tmp = gen_ld8u(addr, user); break;
8433 case 4: tmp = gen_ld8s(addr, user); break;
8434 case 1: tmp = gen_ld16u(addr, user); break;
8435 case 5: tmp = gen_ld16s(addr, user); break;
8436 case 2: tmp = gen_ld32(addr, user); break;
2a0308c5
PM
8437 default:
8438 tcg_temp_free_i32(addr);
8439 goto illegal_op;
a2fdc890
PM
8440 }
8441 if (rs == 15) {
8442 gen_bx(s, tmp);
9ee6e8bb 8443 } else {
a2fdc890 8444 store_reg(s, rs, tmp);
9ee6e8bb
PB
8445 }
8446 } else {
8447 /* Store. */
b0109805 8448 tmp = load_reg(s, rs);
9ee6e8bb 8449 switch (op) {
b0109805
PB
8450 case 0: gen_st8(tmp, addr, user); break;
8451 case 1: gen_st16(tmp, addr, user); break;
8452 case 2: gen_st32(tmp, addr, user); break;
2a0308c5
PM
8453 default:
8454 tcg_temp_free_i32(addr);
8455 goto illegal_op;
b7bcbe95 8456 }
2c0262af 8457 }
9ee6e8bb 8458 if (postinc)
b0109805
PB
8459 tcg_gen_addi_i32(addr, addr, imm);
8460 if (writeback) {
8461 store_reg(s, rn, addr);
8462 } else {
7d1b0095 8463 tcg_temp_free_i32(addr);
b0109805 8464 }
9ee6e8bb
PB
8465 }
8466 break;
8467 default:
8468 goto illegal_op;
2c0262af 8469 }
9ee6e8bb
PB
8470 return 0;
8471illegal_op:
8472 return 1;
2c0262af
FB
8473}
8474
9ee6e8bb 8475static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
8476{
8477 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8478 int32_t offset;
8479 int i;
b26eefb6 8480 TCGv tmp;
d9ba4830 8481 TCGv tmp2;
b0109805 8482 TCGv addr;
99c475ab 8483
9ee6e8bb
PB
8484 if (s->condexec_mask) {
8485 cond = s->condexec_cond;
bedd2912
JB
8486 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
8487 s->condlabel = gen_new_label();
8488 gen_test_cc(cond ^ 1, s->condlabel);
8489 s->condjmp = 1;
8490 }
9ee6e8bb
PB
8491 }
8492
b5ff1b31 8493 insn = lduw_code(s->pc);
99c475ab 8494 s->pc += 2;
b5ff1b31 8495
99c475ab
FB
8496 switch (insn >> 12) {
8497 case 0: case 1:
396e467c 8498
99c475ab
FB
8499 rd = insn & 7;
8500 op = (insn >> 11) & 3;
8501 if (op == 3) {
8502 /* add/subtract */
8503 rn = (insn >> 3) & 7;
396e467c 8504 tmp = load_reg(s, rn);
99c475ab
FB
8505 if (insn & (1 << 10)) {
8506 /* immediate */
7d1b0095 8507 tmp2 = tcg_temp_new_i32();
396e467c 8508 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
8509 } else {
8510 /* reg */
8511 rm = (insn >> 6) & 7;
396e467c 8512 tmp2 = load_reg(s, rm);
99c475ab 8513 }
9ee6e8bb
PB
8514 if (insn & (1 << 9)) {
8515 if (s->condexec_mask)
396e467c 8516 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8517 else
396e467c 8518 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb
PB
8519 } else {
8520 if (s->condexec_mask)
396e467c 8521 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 8522 else
396e467c 8523 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 8524 }
7d1b0095 8525 tcg_temp_free_i32(tmp2);
396e467c 8526 store_reg(s, rd, tmp);
99c475ab
FB
8527 } else {
8528 /* shift immediate */
8529 rm = (insn >> 3) & 7;
8530 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
8531 tmp = load_reg(s, rm);
8532 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8533 if (!s->condexec_mask)
8534 gen_logic_CC(tmp);
8535 store_reg(s, rd, tmp);
99c475ab
FB
8536 }
8537 break;
8538 case 2: case 3:
8539 /* arithmetic large immediate */
8540 op = (insn >> 11) & 3;
8541 rd = (insn >> 8) & 0x7;
396e467c 8542 if (op == 0) { /* mov */
7d1b0095 8543 tmp = tcg_temp_new_i32();
396e467c 8544 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 8545 if (!s->condexec_mask)
396e467c
FN
8546 gen_logic_CC(tmp);
8547 store_reg(s, rd, tmp);
8548 } else {
8549 tmp = load_reg(s, rd);
7d1b0095 8550 tmp2 = tcg_temp_new_i32();
396e467c
FN
8551 tcg_gen_movi_i32(tmp2, insn & 0xff);
8552 switch (op) {
8553 case 1: /* cmp */
8554 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095
PM
8555 tcg_temp_free_i32(tmp);
8556 tcg_temp_free_i32(tmp2);
396e467c
FN
8557 break;
8558 case 2: /* add */
8559 if (s->condexec_mask)
8560 tcg_gen_add_i32(tmp, tmp, tmp2);
8561 else
8562 gen_helper_add_cc(tmp, tmp, tmp2);
7d1b0095 8563 tcg_temp_free_i32(tmp2);
396e467c
FN
8564 store_reg(s, rd, tmp);
8565 break;
8566 case 3: /* sub */
8567 if (s->condexec_mask)
8568 tcg_gen_sub_i32(tmp, tmp, tmp2);
8569 else
8570 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095 8571 tcg_temp_free_i32(tmp2);
396e467c
FN
8572 store_reg(s, rd, tmp);
8573 break;
8574 }
99c475ab 8575 }
99c475ab
FB
8576 break;
8577 case 4:
8578 if (insn & (1 << 11)) {
8579 rd = (insn >> 8) & 7;
5899f386
FB
8580 /* load pc-relative. Bit 1 of PC is ignored. */
8581 val = s->pc + 2 + ((insn & 0xff) * 4);
8582 val &= ~(uint32_t)2;
7d1b0095 8583 addr = tcg_temp_new_i32();
b0109805
PB
8584 tcg_gen_movi_i32(addr, val);
8585 tmp = gen_ld32(addr, IS_USER(s));
7d1b0095 8586 tcg_temp_free_i32(addr);
b0109805 8587 store_reg(s, rd, tmp);
99c475ab
FB
8588 break;
8589 }
8590 if (insn & (1 << 10)) {
8591 /* data processing extended or blx */
8592 rd = (insn & 7) | ((insn >> 4) & 8);
8593 rm = (insn >> 3) & 0xf;
8594 op = (insn >> 8) & 3;
8595 switch (op) {
8596 case 0: /* add */
396e467c
FN
8597 tmp = load_reg(s, rd);
8598 tmp2 = load_reg(s, rm);
8599 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8600 tcg_temp_free_i32(tmp2);
396e467c 8601 store_reg(s, rd, tmp);
99c475ab
FB
8602 break;
8603 case 1: /* cmp */
396e467c
FN
8604 tmp = load_reg(s, rd);
8605 tmp2 = load_reg(s, rm);
8606 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095
PM
8607 tcg_temp_free_i32(tmp2);
8608 tcg_temp_free_i32(tmp);
99c475ab
FB
8609 break;
8610 case 2: /* mov/cpy */
396e467c
FN
8611 tmp = load_reg(s, rm);
8612 store_reg(s, rd, tmp);
99c475ab
FB
8613 break;
8614 case 3:/* branch [and link] exchange thumb register */
b0109805 8615 tmp = load_reg(s, rm);
99c475ab
FB
8616 if (insn & (1 << 7)) {
8617 val = (uint32_t)s->pc | 1;
7d1b0095 8618 tmp2 = tcg_temp_new_i32();
b0109805
PB
8619 tcg_gen_movi_i32(tmp2, val);
8620 store_reg(s, 14, tmp2);
99c475ab 8621 }
d9ba4830 8622 gen_bx(s, tmp);
99c475ab
FB
8623 break;
8624 }
8625 break;
8626 }
8627
8628 /* data processing register */
8629 rd = insn & 7;
8630 rm = (insn >> 3) & 7;
8631 op = (insn >> 6) & 0xf;
8632 if (op == 2 || op == 3 || op == 4 || op == 7) {
8633 /* the shift/rotate ops want the operands backwards */
8634 val = rm;
8635 rm = rd;
8636 rd = val;
8637 val = 1;
8638 } else {
8639 val = 0;
8640 }
8641
396e467c 8642 if (op == 9) { /* neg */
7d1b0095 8643 tmp = tcg_temp_new_i32();
396e467c
FN
8644 tcg_gen_movi_i32(tmp, 0);
8645 } else if (op != 0xf) { /* mvn doesn't read its first operand */
8646 tmp = load_reg(s, rd);
8647 } else {
8648 TCGV_UNUSED(tmp);
8649 }
99c475ab 8650
396e467c 8651 tmp2 = load_reg(s, rm);
5899f386 8652 switch (op) {
99c475ab 8653 case 0x0: /* and */
396e467c 8654 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 8655 if (!s->condexec_mask)
396e467c 8656 gen_logic_CC(tmp);
99c475ab
FB
8657 break;
8658 case 0x1: /* eor */
396e467c 8659 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 8660 if (!s->condexec_mask)
396e467c 8661 gen_logic_CC(tmp);
99c475ab
FB
8662 break;
8663 case 0x2: /* lsl */
9ee6e8bb 8664 if (s->condexec_mask) {
396e467c 8665 gen_helper_shl(tmp2, tmp2, tmp);
9ee6e8bb 8666 } else {
396e467c
FN
8667 gen_helper_shl_cc(tmp2, tmp2, tmp);
8668 gen_logic_CC(tmp2);
9ee6e8bb 8669 }
99c475ab
FB
8670 break;
8671 case 0x3: /* lsr */
9ee6e8bb 8672 if (s->condexec_mask) {
396e467c 8673 gen_helper_shr(tmp2, tmp2, tmp);
9ee6e8bb 8674 } else {
396e467c
FN
8675 gen_helper_shr_cc(tmp2, tmp2, tmp);
8676 gen_logic_CC(tmp2);
9ee6e8bb 8677 }
99c475ab
FB
8678 break;
8679 case 0x4: /* asr */
9ee6e8bb 8680 if (s->condexec_mask) {
396e467c 8681 gen_helper_sar(tmp2, tmp2, tmp);
9ee6e8bb 8682 } else {
396e467c
FN
8683 gen_helper_sar_cc(tmp2, tmp2, tmp);
8684 gen_logic_CC(tmp2);
9ee6e8bb 8685 }
99c475ab
FB
8686 break;
8687 case 0x5: /* adc */
9ee6e8bb 8688 if (s->condexec_mask)
396e467c 8689 gen_adc(tmp, tmp2);
9ee6e8bb 8690 else
396e467c 8691 gen_helper_adc_cc(tmp, tmp, tmp2);
99c475ab
FB
8692 break;
8693 case 0x6: /* sbc */
9ee6e8bb 8694 if (s->condexec_mask)
396e467c 8695 gen_sub_carry(tmp, tmp, tmp2);
9ee6e8bb 8696 else
396e467c 8697 gen_helper_sbc_cc(tmp, tmp, tmp2);
99c475ab
FB
8698 break;
8699 case 0x7: /* ror */
9ee6e8bb 8700 if (s->condexec_mask) {
f669df27
AJ
8701 tcg_gen_andi_i32(tmp, tmp, 0x1f);
8702 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 8703 } else {
396e467c
FN
8704 gen_helper_ror_cc(tmp2, tmp2, tmp);
8705 gen_logic_CC(tmp2);
9ee6e8bb 8706 }
99c475ab
FB
8707 break;
8708 case 0x8: /* tst */
396e467c
FN
8709 tcg_gen_and_i32(tmp, tmp, tmp2);
8710 gen_logic_CC(tmp);
99c475ab 8711 rd = 16;
5899f386 8712 break;
99c475ab 8713 case 0x9: /* neg */
9ee6e8bb 8714 if (s->condexec_mask)
396e467c 8715 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 8716 else
396e467c 8717 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
8718 break;
8719 case 0xa: /* cmp */
396e467c 8720 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
8721 rd = 16;
8722 break;
8723 case 0xb: /* cmn */
396e467c 8724 gen_helper_add_cc(tmp, tmp, tmp2);
99c475ab
FB
8725 rd = 16;
8726 break;
8727 case 0xc: /* orr */
396e467c 8728 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 8729 if (!s->condexec_mask)
396e467c 8730 gen_logic_CC(tmp);
99c475ab
FB
8731 break;
8732 case 0xd: /* mul */
7b2919a0 8733 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 8734 if (!s->condexec_mask)
396e467c 8735 gen_logic_CC(tmp);
99c475ab
FB
8736 break;
8737 case 0xe: /* bic */
f669df27 8738 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 8739 if (!s->condexec_mask)
396e467c 8740 gen_logic_CC(tmp);
99c475ab
FB
8741 break;
8742 case 0xf: /* mvn */
396e467c 8743 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 8744 if (!s->condexec_mask)
396e467c 8745 gen_logic_CC(tmp2);
99c475ab 8746 val = 1;
5899f386 8747 rm = rd;
99c475ab
FB
8748 break;
8749 }
8750 if (rd != 16) {
396e467c
FN
8751 if (val) {
8752 store_reg(s, rm, tmp2);
8753 if (op != 0xf)
7d1b0095 8754 tcg_temp_free_i32(tmp);
396e467c
FN
8755 } else {
8756 store_reg(s, rd, tmp);
7d1b0095 8757 tcg_temp_free_i32(tmp2);
396e467c
FN
8758 }
8759 } else {
7d1b0095
PM
8760 tcg_temp_free_i32(tmp);
8761 tcg_temp_free_i32(tmp2);
99c475ab
FB
8762 }
8763 break;
8764
8765 case 5:
8766 /* load/store register offset. */
8767 rd = insn & 7;
8768 rn = (insn >> 3) & 7;
8769 rm = (insn >> 6) & 7;
8770 op = (insn >> 9) & 7;
b0109805 8771 addr = load_reg(s, rn);
b26eefb6 8772 tmp = load_reg(s, rm);
b0109805 8773 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8774 tcg_temp_free_i32(tmp);
99c475ab
FB
8775
8776 if (op < 3) /* store */
b0109805 8777 tmp = load_reg(s, rd);
99c475ab
FB
8778
8779 switch (op) {
8780 case 0: /* str */
b0109805 8781 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
8782 break;
8783 case 1: /* strh */
b0109805 8784 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
8785 break;
8786 case 2: /* strb */
b0109805 8787 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
8788 break;
8789 case 3: /* ldrsb */
b0109805 8790 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
8791 break;
8792 case 4: /* ldr */
b0109805 8793 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8794 break;
8795 case 5: /* ldrh */
b0109805 8796 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
8797 break;
8798 case 6: /* ldrb */
b0109805 8799 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
8800 break;
8801 case 7: /* ldrsh */
b0109805 8802 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
8803 break;
8804 }
8805 if (op >= 3) /* load */
b0109805 8806 store_reg(s, rd, tmp);
7d1b0095 8807 tcg_temp_free_i32(addr);
99c475ab
FB
8808 break;
8809
8810 case 6:
8811 /* load/store word immediate offset */
8812 rd = insn & 7;
8813 rn = (insn >> 3) & 7;
b0109805 8814 addr = load_reg(s, rn);
99c475ab 8815 val = (insn >> 4) & 0x7c;
b0109805 8816 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8817
8818 if (insn & (1 << 11)) {
8819 /* load */
b0109805
PB
8820 tmp = gen_ld32(addr, IS_USER(s));
8821 store_reg(s, rd, tmp);
99c475ab
FB
8822 } else {
8823 /* store */
b0109805
PB
8824 tmp = load_reg(s, rd);
8825 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8826 }
7d1b0095 8827 tcg_temp_free_i32(addr);
99c475ab
FB
8828 break;
8829
8830 case 7:
8831 /* load/store byte immediate offset */
8832 rd = insn & 7;
8833 rn = (insn >> 3) & 7;
b0109805 8834 addr = load_reg(s, rn);
99c475ab 8835 val = (insn >> 6) & 0x1f;
b0109805 8836 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8837
8838 if (insn & (1 << 11)) {
8839 /* load */
b0109805
PB
8840 tmp = gen_ld8u(addr, IS_USER(s));
8841 store_reg(s, rd, tmp);
99c475ab
FB
8842 } else {
8843 /* store */
b0109805
PB
8844 tmp = load_reg(s, rd);
8845 gen_st8(tmp, addr, IS_USER(s));
99c475ab 8846 }
7d1b0095 8847 tcg_temp_free_i32(addr);
99c475ab
FB
8848 break;
8849
8850 case 8:
8851 /* load/store halfword immediate offset */
8852 rd = insn & 7;
8853 rn = (insn >> 3) & 7;
b0109805 8854 addr = load_reg(s, rn);
99c475ab 8855 val = (insn >> 5) & 0x3e;
b0109805 8856 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8857
8858 if (insn & (1 << 11)) {
8859 /* load */
b0109805
PB
8860 tmp = gen_ld16u(addr, IS_USER(s));
8861 store_reg(s, rd, tmp);
99c475ab
FB
8862 } else {
8863 /* store */
b0109805
PB
8864 tmp = load_reg(s, rd);
8865 gen_st16(tmp, addr, IS_USER(s));
99c475ab 8866 }
7d1b0095 8867 tcg_temp_free_i32(addr);
99c475ab
FB
8868 break;
8869
8870 case 9:
8871 /* load/store from stack */
8872 rd = (insn >> 8) & 7;
b0109805 8873 addr = load_reg(s, 13);
99c475ab 8874 val = (insn & 0xff) * 4;
b0109805 8875 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8876
8877 if (insn & (1 << 11)) {
8878 /* load */
b0109805
PB
8879 tmp = gen_ld32(addr, IS_USER(s));
8880 store_reg(s, rd, tmp);
99c475ab
FB
8881 } else {
8882 /* store */
b0109805
PB
8883 tmp = load_reg(s, rd);
8884 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8885 }
7d1b0095 8886 tcg_temp_free_i32(addr);
99c475ab
FB
8887 break;
8888
8889 case 10:
8890 /* add to high reg */
8891 rd = (insn >> 8) & 7;
5899f386
FB
8892 if (insn & (1 << 11)) {
8893 /* SP */
5e3f878a 8894 tmp = load_reg(s, 13);
5899f386
FB
8895 } else {
8896 /* PC. bit 1 is ignored. */
7d1b0095 8897 tmp = tcg_temp_new_i32();
5e3f878a 8898 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 8899 }
99c475ab 8900 val = (insn & 0xff) * 4;
5e3f878a
PB
8901 tcg_gen_addi_i32(tmp, tmp, val);
8902 store_reg(s, rd, tmp);
99c475ab
FB
8903 break;
8904
8905 case 11:
8906 /* misc */
8907 op = (insn >> 8) & 0xf;
8908 switch (op) {
8909 case 0:
8910 /* adjust stack pointer */
b26eefb6 8911 tmp = load_reg(s, 13);
99c475ab
FB
8912 val = (insn & 0x7f) * 4;
8913 if (insn & (1 << 7))
6a0d8a1d 8914 val = -(int32_t)val;
b26eefb6
PB
8915 tcg_gen_addi_i32(tmp, tmp, val);
8916 store_reg(s, 13, tmp);
99c475ab
FB
8917 break;
8918
9ee6e8bb
PB
8919 case 2: /* sign/zero extend. */
8920 ARCH(6);
8921 rd = insn & 7;
8922 rm = (insn >> 3) & 7;
b0109805 8923 tmp = load_reg(s, rm);
9ee6e8bb 8924 switch ((insn >> 6) & 3) {
b0109805
PB
8925 case 0: gen_sxth(tmp); break;
8926 case 1: gen_sxtb(tmp); break;
8927 case 2: gen_uxth(tmp); break;
8928 case 3: gen_uxtb(tmp); break;
9ee6e8bb 8929 }
b0109805 8930 store_reg(s, rd, tmp);
9ee6e8bb 8931 break;
99c475ab
FB
8932 case 4: case 5: case 0xc: case 0xd:
8933 /* push/pop */
b0109805 8934 addr = load_reg(s, 13);
5899f386
FB
8935 if (insn & (1 << 8))
8936 offset = 4;
99c475ab 8937 else
5899f386
FB
8938 offset = 0;
8939 for (i = 0; i < 8; i++) {
8940 if (insn & (1 << i))
8941 offset += 4;
8942 }
8943 if ((insn & (1 << 11)) == 0) {
b0109805 8944 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8945 }
99c475ab
FB
8946 for (i = 0; i < 8; i++) {
8947 if (insn & (1 << i)) {
8948 if (insn & (1 << 11)) {
8949 /* pop */
b0109805
PB
8950 tmp = gen_ld32(addr, IS_USER(s));
8951 store_reg(s, i, tmp);
99c475ab
FB
8952 } else {
8953 /* push */
b0109805
PB
8954 tmp = load_reg(s, i);
8955 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8956 }
5899f386 8957 /* advance to the next address. */
b0109805 8958 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8959 }
8960 }
a50f5b91 8961 TCGV_UNUSED(tmp);
99c475ab
FB
8962 if (insn & (1 << 8)) {
8963 if (insn & (1 << 11)) {
8964 /* pop pc */
b0109805 8965 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8966 /* don't set the pc until the rest of the instruction
8967 has completed */
8968 } else {
8969 /* push lr */
b0109805
PB
8970 tmp = load_reg(s, 14);
8971 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8972 }
b0109805 8973 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 8974 }
5899f386 8975 if ((insn & (1 << 11)) == 0) {
b0109805 8976 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8977 }
99c475ab 8978 /* write back the new stack pointer */
b0109805 8979 store_reg(s, 13, addr);
99c475ab
FB
8980 /* set the new PC value */
8981 if ((insn & 0x0900) == 0x0900)
b0109805 8982 gen_bx(s, tmp);
99c475ab
FB
8983 break;
8984
9ee6e8bb
PB
8985 case 1: case 3: case 9: case 11: /* czb */
8986 rm = insn & 7;
d9ba4830 8987 tmp = load_reg(s, rm);
9ee6e8bb
PB
8988 s->condlabel = gen_new_label();
8989 s->condjmp = 1;
8990 if (insn & (1 << 11))
cb63669a 8991 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 8992 else
cb63669a 8993 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 8994 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8995 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8996 val = (uint32_t)s->pc + 2;
8997 val += offset;
8998 gen_jmp(s, val);
8999 break;
9000
9001 case 15: /* IT, nop-hint. */
9002 if ((insn & 0xf) == 0) {
9003 gen_nop_hint(s, (insn >> 4) & 0xf);
9004 break;
9005 }
9006 /* If Then. */
9007 s->condexec_cond = (insn >> 4) & 0xe;
9008 s->condexec_mask = insn & 0x1f;
9009 /* No actual code generated for this insn, just setup state. */
9010 break;
9011
06c949e6 9012 case 0xe: /* bkpt */
bc4a0de0 9013 gen_exception_insn(s, 2, EXCP_BKPT);
06c949e6
PB
9014 break;
9015
9ee6e8bb
PB
9016 case 0xa: /* rev */
9017 ARCH(6);
9018 rn = (insn >> 3) & 0x7;
9019 rd = insn & 0x7;
b0109805 9020 tmp = load_reg(s, rn);
9ee6e8bb 9021 switch ((insn >> 6) & 3) {
66896cb8 9022 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
9023 case 1: gen_rev16(tmp); break;
9024 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
9025 default: goto illegal_op;
9026 }
b0109805 9027 store_reg(s, rd, tmp);
9ee6e8bb
PB
9028 break;
9029
9030 case 6: /* cps */
9031 ARCH(6);
9032 if (IS_USER(s))
9033 break;
9034 if (IS_M(env)) {
8984bd2e 9035 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9ee6e8bb 9036 /* PRIMASK */
8984bd2e
PB
9037 if (insn & 1) {
9038 addr = tcg_const_i32(16);
9039 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 9040 tcg_temp_free_i32(addr);
8984bd2e 9041 }
9ee6e8bb 9042 /* FAULTMASK */
8984bd2e
PB
9043 if (insn & 2) {
9044 addr = tcg_const_i32(17);
9045 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 9046 tcg_temp_free_i32(addr);
8984bd2e 9047 }
b75263d6 9048 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9049 gen_lookup_tb(s);
9050 } else {
9051 if (insn & (1 << 4))
9052 shift = CPSR_A | CPSR_I | CPSR_F;
9053 else
9054 shift = 0;
fa26df03 9055 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9ee6e8bb
PB
9056 }
9057 break;
9058
99c475ab
FB
9059 default:
9060 goto undef;
9061 }
9062 break;
9063
9064 case 12:
9065 /* load/store multiple */
9066 rn = (insn >> 8) & 0x7;
b0109805 9067 addr = load_reg(s, rn);
99c475ab
FB
9068 for (i = 0; i < 8; i++) {
9069 if (insn & (1 << i)) {
99c475ab
FB
9070 if (insn & (1 << 11)) {
9071 /* load */
b0109805
PB
9072 tmp = gen_ld32(addr, IS_USER(s));
9073 store_reg(s, i, tmp);
99c475ab
FB
9074 } else {
9075 /* store */
b0109805
PB
9076 tmp = load_reg(s, i);
9077 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9078 }
5899f386 9079 /* advance to the next address */
b0109805 9080 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9081 }
9082 }
5899f386 9083 /* Base register writeback. */
b0109805
PB
9084 if ((insn & (1 << rn)) == 0) {
9085 store_reg(s, rn, addr);
9086 } else {
7d1b0095 9087 tcg_temp_free_i32(addr);
b0109805 9088 }
99c475ab
FB
9089 break;
9090
9091 case 13:
9092 /* conditional branch or swi */
9093 cond = (insn >> 8) & 0xf;
9094 if (cond == 0xe)
9095 goto undef;
9096
9097 if (cond == 0xf) {
9098 /* swi */
422ebf69 9099 gen_set_pc_im(s->pc);
9ee6e8bb 9100 s->is_jmp = DISAS_SWI;
99c475ab
FB
9101 break;
9102 }
9103 /* generate a conditional jump to next instruction */
e50e6a20 9104 s->condlabel = gen_new_label();
d9ba4830 9105 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 9106 s->condjmp = 1;
99c475ab
FB
9107
9108 /* jump to the offset */
5899f386 9109 val = (uint32_t)s->pc + 2;
99c475ab 9110 offset = ((int32_t)insn << 24) >> 24;
5899f386 9111 val += offset << 1;
8aaca4c0 9112 gen_jmp(s, val);
99c475ab
FB
9113 break;
9114
9115 case 14:
358bf29e 9116 if (insn & (1 << 11)) {
9ee6e8bb
PB
9117 if (disas_thumb2_insn(env, s, insn))
9118 goto undef32;
358bf29e
PB
9119 break;
9120 }
9ee6e8bb 9121 /* unconditional branch */
99c475ab
FB
9122 val = (uint32_t)s->pc;
9123 offset = ((int32_t)insn << 21) >> 21;
9124 val += (offset << 1) + 2;
8aaca4c0 9125 gen_jmp(s, val);
99c475ab
FB
9126 break;
9127
9128 case 15:
9ee6e8bb 9129 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 9130 goto undef32;
9ee6e8bb 9131 break;
99c475ab
FB
9132 }
9133 return;
9ee6e8bb 9134undef32:
bc4a0de0 9135 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
9136 return;
9137illegal_op:
99c475ab 9138undef:
bc4a0de0 9139 gen_exception_insn(s, 2, EXCP_UDEF);
99c475ab
FB
9140}
9141
2c0262af
FB
9142/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9143 basic block 'tb'. If search_pc is TRUE, also generate PC
9144 information for each intermediate instruction. */
2cfc5f17
TS
9145static inline void gen_intermediate_code_internal(CPUState *env,
9146 TranslationBlock *tb,
9147 int search_pc)
2c0262af
FB
9148{
9149 DisasContext dc1, *dc = &dc1;
a1d1bb31 9150 CPUBreakpoint *bp;
2c0262af
FB
9151 uint16_t *gen_opc_end;
9152 int j, lj;
0fa85d43 9153 target_ulong pc_start;
b5ff1b31 9154 uint32_t next_page_start;
2e70f6ef
PB
9155 int num_insns;
9156 int max_insns;
3b46e624 9157
2c0262af 9158 /* generate intermediate code */
0fa85d43 9159 pc_start = tb->pc;
3b46e624 9160
2c0262af
FB
9161 dc->tb = tb;
9162
2c0262af 9163 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
9164
9165 dc->is_jmp = DISAS_NEXT;
9166 dc->pc = pc_start;
8aaca4c0 9167 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 9168 dc->condjmp = 0;
7204ab88 9169 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
98eac7ca
PM
9170 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9171 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
b5ff1b31 9172#if !defined(CONFIG_USER_ONLY)
61f74d6a 9173 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
b5ff1b31 9174#endif
5df8bac1 9175 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
69d1fc22
PM
9176 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9177 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
a7812ae4
PB
9178 cpu_F0s = tcg_temp_new_i32();
9179 cpu_F1s = tcg_temp_new_i32();
9180 cpu_F0d = tcg_temp_new_i64();
9181 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
9182 cpu_V0 = cpu_F0d;
9183 cpu_V1 = cpu_F1d;
e677137d 9184 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 9185 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 9186 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 9187 lj = -1;
2e70f6ef
PB
9188 num_insns = 0;
9189 max_insns = tb->cflags & CF_COUNT_MASK;
9190 if (max_insns == 0)
9191 max_insns = CF_COUNT_MASK;
9192
9193 gen_icount_start();
e12ce78d 9194
3849902c
PM
9195 tcg_clear_temp_count();
9196
e12ce78d
PM
9197 /* A note on handling of the condexec (IT) bits:
9198 *
9199 * We want to avoid the overhead of having to write the updated condexec
9200 * bits back to the CPUState for every instruction in an IT block. So:
9201 * (1) if the condexec bits are not already zero then we write
9202 * zero back into the CPUState now. This avoids complications trying
9203 * to do it at the end of the block. (For example if we don't do this
9204 * it's hard to identify whether we can safely skip writing condexec
9205 * at the end of the TB, which we definitely want to do for the case
9206 * where a TB doesn't do anything with the IT state at all.)
9207 * (2) if we are going to leave the TB then we call gen_set_condexec()
9208 * which will write the correct value into CPUState if zero is wrong.
9209 * This is done both for leaving the TB at the end, and for leaving
9210 * it because of an exception we know will happen, which is done in
9211 * gen_exception_insn(). The latter is necessary because we need to
9212 * leave the TB with the PC/IT state just prior to execution of the
9213 * instruction which caused the exception.
9214 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9215 * then the CPUState will be wrong and we need to reset it.
9216 * This is handled in the same way as restoration of the
9217 * PC in these situations: we will be called again with search_pc=1
9218 * and generate a mapping of the condexec bits for each PC in
9219 * gen_opc_condexec_bits[]. gen_pc_load[] then uses this to restore
9220 * the condexec bits.
9221 *
9222 * Note that there are no instructions which can read the condexec
9223 * bits, and none which can write non-static values to them, so
9224 * we don't need to care about whether CPUState is correct in the
9225 * middle of a TB.
9226 */
9227
9ee6e8bb
PB
9228 /* Reset the conditional execution bits immediately. This avoids
9229 complications trying to do it at the end of the block. */
98eac7ca 9230 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 9231 {
7d1b0095 9232 TCGv tmp = tcg_temp_new_i32();
8f01245e 9233 tcg_gen_movi_i32(tmp, 0);
d9ba4830 9234 store_cpu_field(tmp, condexec_bits);
8f01245e 9235 }
2c0262af 9236 do {
fbb4a2e3
PB
9237#ifdef CONFIG_USER_ONLY
9238 /* Intercept jump to the magic kernel page. */
9239 if (dc->pc >= 0xffff0000) {
9240 /* We always get here via a jump, so know we are not in a
9241 conditional execution block. */
9242 gen_exception(EXCP_KERNEL_TRAP);
9243 dc->is_jmp = DISAS_UPDATE;
9244 break;
9245 }
9246#else
9ee6e8bb
PB
9247 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9248 /* We always get here via a jump, so know we are not in a
9249 conditional execution block. */
d9ba4830 9250 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
9251 dc->is_jmp = DISAS_UPDATE;
9252 break;
9ee6e8bb
PB
9253 }
9254#endif
9255
72cf2d4f
BS
9256 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9257 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 9258 if (bp->pc == dc->pc) {
bc4a0de0 9259 gen_exception_insn(dc, 0, EXCP_DEBUG);
9ee6e8bb
PB
9260 /* Advance PC so that clearing the breakpoint will
9261 invalidate this TB. */
9262 dc->pc += 2;
9263 goto done_generating;
1fddef4b
FB
9264 break;
9265 }
9266 }
9267 }
2c0262af
FB
9268 if (search_pc) {
9269 j = gen_opc_ptr - gen_opc_buf;
9270 if (lj < j) {
9271 lj++;
9272 while (lj < j)
9273 gen_opc_instr_start[lj++] = 0;
9274 }
0fa85d43 9275 gen_opc_pc[lj] = dc->pc;
e12ce78d 9276 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
2c0262af 9277 gen_opc_instr_start[lj] = 1;
2e70f6ef 9278 gen_opc_icount[lj] = num_insns;
2c0262af 9279 }
e50e6a20 9280
2e70f6ef
PB
9281 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9282 gen_io_start();
9283
5642463a
PM
9284 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
9285 tcg_gen_debug_insn_start(dc->pc);
9286 }
9287
7204ab88 9288 if (dc->thumb) {
9ee6e8bb
PB
9289 disas_thumb_insn(env, dc);
9290 if (dc->condexec_mask) {
9291 dc->condexec_cond = (dc->condexec_cond & 0xe)
9292 | ((dc->condexec_mask >> 4) & 1);
9293 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9294 if (dc->condexec_mask == 0) {
9295 dc->condexec_cond = 0;
9296 }
9297 }
9298 } else {
9299 disas_arm_insn(env, dc);
9300 }
e50e6a20
FB
9301
9302 if (dc->condjmp && !dc->is_jmp) {
9303 gen_set_label(dc->condlabel);
9304 dc->condjmp = 0;
9305 }
3849902c
PM
9306
9307 if (tcg_check_temp_count()) {
9308 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
9309 }
9310
aaf2d97d 9311 /* Translation stops when a conditional branch is encountered.
e50e6a20 9312 * Otherwise the subsequent code could get translated several times.
b5ff1b31 9313 * Also stop translation when a page boundary is reached. This
bf20dc07 9314 * ensures prefetch aborts occur at the right place. */
2e70f6ef 9315 num_insns ++;
1fddef4b
FB
9316 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9317 !env->singlestep_enabled &&
1b530a6d 9318 !singlestep &&
2e70f6ef
PB
9319 dc->pc < next_page_start &&
9320 num_insns < max_insns);
9321
9322 if (tb->cflags & CF_LAST_IO) {
9323 if (dc->condjmp) {
9324 /* FIXME: This can theoretically happen with self-modifying
9325 code. */
9326 cpu_abort(env, "IO on conditional branch instruction");
9327 }
9328 gen_io_end();
9329 }
9ee6e8bb 9330
b5ff1b31 9331 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
9332 instruction was a conditional branch or trap, and the PC has
9333 already been written. */
551bd27f 9334 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 9335 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 9336 if (dc->condjmp) {
9ee6e8bb
PB
9337 gen_set_condexec(dc);
9338 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 9339 gen_exception(EXCP_SWI);
9ee6e8bb 9340 } else {
d9ba4830 9341 gen_exception(EXCP_DEBUG);
9ee6e8bb 9342 }
e50e6a20
FB
9343 gen_set_label(dc->condlabel);
9344 }
9345 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 9346 gen_set_pc_im(dc->pc);
e50e6a20 9347 dc->condjmp = 0;
8aaca4c0 9348 }
9ee6e8bb
PB
9349 gen_set_condexec(dc);
9350 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 9351 gen_exception(EXCP_SWI);
9ee6e8bb
PB
9352 } else {
9353 /* FIXME: Single stepping a WFI insn will not halt
9354 the CPU. */
d9ba4830 9355 gen_exception(EXCP_DEBUG);
9ee6e8bb 9356 }
8aaca4c0 9357 } else {
9ee6e8bb
PB
9358 /* While branches must always occur at the end of an IT block,
9359 there are a few other things that can cause us to terminate
9360 the TB in the middel of an IT block:
9361 - Exception generating instructions (bkpt, swi, undefined).
9362 - Page boundaries.
9363 - Hardware watchpoints.
9364 Hardware breakpoints have already been handled and skip this code.
9365 */
9366 gen_set_condexec(dc);
8aaca4c0 9367 switch(dc->is_jmp) {
8aaca4c0 9368 case DISAS_NEXT:
6e256c93 9369 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
9370 break;
9371 default:
9372 case DISAS_JUMP:
9373 case DISAS_UPDATE:
9374 /* indicate that the hash table must be used to find the next TB */
57fec1fe 9375 tcg_gen_exit_tb(0);
8aaca4c0
FB
9376 break;
9377 case DISAS_TB_JUMP:
9378 /* nothing more to generate */
9379 break;
9ee6e8bb 9380 case DISAS_WFI:
d9ba4830 9381 gen_helper_wfi();
9ee6e8bb
PB
9382 break;
9383 case DISAS_SWI:
d9ba4830 9384 gen_exception(EXCP_SWI);
9ee6e8bb 9385 break;
8aaca4c0 9386 }
e50e6a20
FB
9387 if (dc->condjmp) {
9388 gen_set_label(dc->condlabel);
9ee6e8bb 9389 gen_set_condexec(dc);
6e256c93 9390 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
9391 dc->condjmp = 0;
9392 }
2c0262af 9393 }
2e70f6ef 9394
9ee6e8bb 9395done_generating:
2e70f6ef 9396 gen_icount_end(tb, num_insns);
2c0262af
FB
9397 *gen_opc_ptr = INDEX_op_end;
9398
9399#ifdef DEBUG_DISAS
8fec2b8c 9400 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
9401 qemu_log("----------------\n");
9402 qemu_log("IN: %s\n", lookup_symbol(pc_start));
7204ab88 9403 log_target_disas(pc_start, dc->pc - pc_start, dc->thumb);
93fcfe39 9404 qemu_log("\n");
2c0262af
FB
9405 }
9406#endif
b5ff1b31
FB
9407 if (search_pc) {
9408 j = gen_opc_ptr - gen_opc_buf;
9409 lj++;
9410 while (lj <= j)
9411 gen_opc_instr_start[lj++] = 0;
b5ff1b31 9412 } else {
2c0262af 9413 tb->size = dc->pc - pc_start;
2e70f6ef 9414 tb->icount = num_insns;
b5ff1b31 9415 }
2c0262af
FB
9416}
9417
2cfc5f17 9418void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2c0262af 9419{
2cfc5f17 9420 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
9421}
9422
2cfc5f17 9423void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2c0262af 9424{
2cfc5f17 9425 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
9426}
9427
b5ff1b31
FB
9428static const char *cpu_mode_names[16] = {
9429 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9430 "???", "???", "???", "und", "???", "???", "???", "sys"
9431};
9ee6e8bb 9432
9a78eead 9433void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
7fe48483 9434 int flags)
2c0262af
FB
9435{
9436 int i;
06e80fc9 9437#if 0
bc380d17 9438 union {
b7bcbe95
FB
9439 uint32_t i;
9440 float s;
9441 } s0, s1;
9442 CPU_DoubleU d;
a94a6abf
PB
9443 /* ??? This assumes float64 and double have the same layout.
9444 Oh well, it's only debug dumps. */
9445 union {
9446 float64 f64;
9447 double d;
9448 } d0;
06e80fc9 9449#endif
b5ff1b31 9450 uint32_t psr;
2c0262af
FB
9451
9452 for(i=0;i<16;i++) {
7fe48483 9453 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 9454 if ((i % 4) == 3)
7fe48483 9455 cpu_fprintf(f, "\n");
2c0262af 9456 else
7fe48483 9457 cpu_fprintf(f, " ");
2c0262af 9458 }
b5ff1b31 9459 psr = cpsr_read(env);
687fa640
TS
9460 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9461 psr,
b5ff1b31
FB
9462 psr & (1 << 31) ? 'N' : '-',
9463 psr & (1 << 30) ? 'Z' : '-',
9464 psr & (1 << 29) ? 'C' : '-',
9465 psr & (1 << 28) ? 'V' : '-',
5fafdf24 9466 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 9467 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 9468
5e3f878a 9469#if 0
b7bcbe95 9470 for (i = 0; i < 16; i++) {
8e96005d
FB
9471 d.d = env->vfp.regs[i];
9472 s0.i = d.l.lower;
9473 s1.i = d.l.upper;
a94a6abf
PB
9474 d0.f64 = d.d;
9475 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 9476 i * 2, (int)s0.i, s0.s,
a94a6abf 9477 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 9478 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 9479 d0.d);
b7bcbe95 9480 }
40f137e1 9481 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
5e3f878a 9482#endif
2c0262af 9483}
a6b025d3 9484
d2856f1a
AJ
9485void gen_pc_load(CPUState *env, TranslationBlock *tb,
9486 unsigned long searched_pc, int pc_pos, void *puc)
9487{
9488 env->regs[15] = gen_opc_pc[pc_pos];
e12ce78d 9489 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
d2856f1a 9490}