]> git.proxmox.com Git - qemu.git/blame - target-arm/translate.c
vnc: tight: Fix crash after 2GB of output
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
28#include "exec-all.h"
29#include "disas.h"
57fec1fe 30#include "tcg-op.h"
79383c9c 31#include "qemu-log.h"
1497c961 32
a7812ae4 33#include "helpers.h"
1497c961 34#define GEN_HELPER 1
b26eefb6 35#include "helpers.h"
2c0262af 36
9ee6e8bb
PB
37#define ENABLE_ARCH_5J 0
38#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 42
86753403 43#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 44
2c0262af
FB
45/* internal defines */
46typedef struct DisasContext {
0fa85d43 47 target_ulong pc;
2c0262af 48 int is_jmp;
e50e6a20
FB
49 /* Nonzero if this instruction has been conditionally skipped. */
50 int condjmp;
51 /* The label that will be jumped to when the instruction is skipped. */
52 int condlabel;
9ee6e8bb
PB
53 /* Thumb-2 condtional execution bits. */
54 int condexec_mask;
55 int condexec_cond;
2c0262af 56 struct TranslationBlock *tb;
8aaca4c0 57 int singlestep_enabled;
5899f386 58 int thumb;
b5ff1b31
FB
59#if !defined(CONFIG_USER_ONLY)
60 int user;
61#endif
5df8bac1 62 int vfp_enabled;
69d1fc22
PM
63 int vec_len;
64 int vec_stride;
2c0262af
FB
65} DisasContext;
66
e12ce78d
PM
67static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
68
b5ff1b31
FB
69#if defined(CONFIG_USER_ONLY)
70#define IS_USER(s) 1
71#else
72#define IS_USER(s) (s->user)
73#endif
74
9ee6e8bb
PB
75/* These instructions trap after executing, so defer them until after the
76 conditional executions state has been updated. */
77#define DISAS_WFI 4
78#define DISAS_SWI 5
2c0262af 79
a7812ae4 80static TCGv_ptr cpu_env;
ad69471c 81/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 82static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 83static TCGv_i32 cpu_R[16];
426f5abc
PB
84static TCGv_i32 cpu_exclusive_addr;
85static TCGv_i32 cpu_exclusive_val;
86static TCGv_i32 cpu_exclusive_high;
87#ifdef CONFIG_USER_ONLY
88static TCGv_i32 cpu_exclusive_test;
89static TCGv_i32 cpu_exclusive_info;
90#endif
ad69471c 91
b26eefb6 92/* FIXME: These should be removed. */
a7812ae4
PB
93static TCGv cpu_F0s, cpu_F1s;
94static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 95
2e70f6ef
PB
96#include "gen-icount.h"
97
155c3eac
FN
98static const char *regnames[] =
99 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
100 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
101
b26eefb6
PB
102/* initialize TCG globals. */
103void arm_translate_init(void)
104{
155c3eac
FN
105 int i;
106
a7812ae4
PB
107 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
108
155c3eac
FN
109 for (i = 0; i < 16; i++) {
110 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
111 offsetof(CPUState, regs[i]),
112 regnames[i]);
113 }
426f5abc
PB
114 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
115 offsetof(CPUState, exclusive_addr), "exclusive_addr");
116 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
117 offsetof(CPUState, exclusive_val), "exclusive_val");
118 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUState, exclusive_high), "exclusive_high");
120#ifdef CONFIG_USER_ONLY
121 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
122 offsetof(CPUState, exclusive_test), "exclusive_test");
123 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
124 offsetof(CPUState, exclusive_info), "exclusive_info");
125#endif
155c3eac 126
a7812ae4
PB
127#define GEN_HELPER 2
128#include "helpers.h"
b26eefb6
PB
129}
130
d9ba4830
PB
131static inline TCGv load_cpu_offset(int offset)
132{
7d1b0095 133 TCGv tmp = tcg_temp_new_i32();
d9ba4830
PB
134 tcg_gen_ld_i32(tmp, cpu_env, offset);
135 return tmp;
136}
137
138#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
139
140static inline void store_cpu_offset(TCGv var, int offset)
141{
142 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 143 tcg_temp_free_i32(var);
d9ba4830
PB
144}
145
146#define store_cpu_field(var, name) \
147 store_cpu_offset(var, offsetof(CPUState, name))
148
b26eefb6
PB
149/* Set a variable to the value of a CPU register. */
150static void load_reg_var(DisasContext *s, TCGv var, int reg)
151{
152 if (reg == 15) {
153 uint32_t addr;
154 /* normaly, since we updated PC, we need only to add one insn */
155 if (s->thumb)
156 addr = (long)s->pc + 2;
157 else
158 addr = (long)s->pc + 4;
159 tcg_gen_movi_i32(var, addr);
160 } else {
155c3eac 161 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
162 }
163}
164
165/* Create a new temporary and set it to the value of a CPU register. */
166static inline TCGv load_reg(DisasContext *s, int reg)
167{
7d1b0095 168 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
169 load_reg_var(s, tmp, reg);
170 return tmp;
171}
172
173/* Set a CPU register. The source must be a temporary and will be
174 marked as dead. */
175static void store_reg(DisasContext *s, int reg, TCGv var)
176{
177 if (reg == 15) {
178 tcg_gen_andi_i32(var, var, ~1);
179 s->is_jmp = DISAS_JUMP;
180 }
155c3eac 181 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 182 tcg_temp_free_i32(var);
b26eefb6
PB
183}
184
b26eefb6 185/* Value extensions. */
86831435
PB
186#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
187#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
188#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
189#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
190
1497c961
PB
191#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
192#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 193
b26eefb6 194
b75263d6
JR
195static inline void gen_set_cpsr(TCGv var, uint32_t mask)
196{
197 TCGv tmp_mask = tcg_const_i32(mask);
198 gen_helper_cpsr_write(var, tmp_mask);
199 tcg_temp_free_i32(tmp_mask);
200}
d9ba4830
PB
201/* Set NZCV flags from the high 4 bits of var. */
202#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
203
204static void gen_exception(int excp)
205{
7d1b0095 206 TCGv tmp = tcg_temp_new_i32();
d9ba4830
PB
207 tcg_gen_movi_i32(tmp, excp);
208 gen_helper_exception(tmp);
7d1b0095 209 tcg_temp_free_i32(tmp);
d9ba4830
PB
210}
211
3670669c
PB
212static void gen_smul_dual(TCGv a, TCGv b)
213{
7d1b0095
PM
214 TCGv tmp1 = tcg_temp_new_i32();
215 TCGv tmp2 = tcg_temp_new_i32();
22478e79
AZ
216 tcg_gen_ext16s_i32(tmp1, a);
217 tcg_gen_ext16s_i32(tmp2, b);
3670669c 218 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 219 tcg_temp_free_i32(tmp2);
3670669c
PB
220 tcg_gen_sari_i32(a, a, 16);
221 tcg_gen_sari_i32(b, b, 16);
222 tcg_gen_mul_i32(b, b, a);
223 tcg_gen_mov_i32(a, tmp1);
7d1b0095 224 tcg_temp_free_i32(tmp1);
3670669c
PB
225}
226
227/* Byteswap each halfword. */
228static void gen_rev16(TCGv var)
229{
7d1b0095 230 TCGv tmp = tcg_temp_new_i32();
3670669c
PB
231 tcg_gen_shri_i32(tmp, var, 8);
232 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
233 tcg_gen_shli_i32(var, var, 8);
234 tcg_gen_andi_i32(var, var, 0xff00ff00);
235 tcg_gen_or_i32(var, var, tmp);
7d1b0095 236 tcg_temp_free_i32(tmp);
3670669c
PB
237}
238
239/* Byteswap low halfword and sign extend. */
240static void gen_revsh(TCGv var)
241{
1a855029
AJ
242 tcg_gen_ext16u_i32(var, var);
243 tcg_gen_bswap16_i32(var, var);
244 tcg_gen_ext16s_i32(var, var);
3670669c
PB
245}
246
247/* Unsigned bitfield extract. */
248static void gen_ubfx(TCGv var, int shift, uint32_t mask)
249{
250 if (shift)
251 tcg_gen_shri_i32(var, var, shift);
252 tcg_gen_andi_i32(var, var, mask);
253}
254
255/* Signed bitfield extract. */
256static void gen_sbfx(TCGv var, int shift, int width)
257{
258 uint32_t signbit;
259
260 if (shift)
261 tcg_gen_sari_i32(var, var, shift);
262 if (shift + width < 32) {
263 signbit = 1u << (width - 1);
264 tcg_gen_andi_i32(var, var, (1u << width) - 1);
265 tcg_gen_xori_i32(var, var, signbit);
266 tcg_gen_subi_i32(var, var, signbit);
267 }
268}
269
270/* Bitfield insertion. Insert val into base. Clobbers base and val. */
271static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
272{
3670669c 273 tcg_gen_andi_i32(val, val, mask);
8f8e3aa4
PB
274 tcg_gen_shli_i32(val, val, shift);
275 tcg_gen_andi_i32(base, base, ~(mask << shift));
3670669c
PB
276 tcg_gen_or_i32(dest, base, val);
277}
278
838fa72d
AJ
279/* Return (b << 32) + a. Mark inputs as dead */
280static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
3670669c 281{
838fa72d
AJ
282 TCGv_i64 tmp64 = tcg_temp_new_i64();
283
284 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 285 tcg_temp_free_i32(b);
838fa72d
AJ
286 tcg_gen_shli_i64(tmp64, tmp64, 32);
287 tcg_gen_add_i64(a, tmp64, a);
288
289 tcg_temp_free_i64(tmp64);
290 return a;
291}
292
293/* Return (b << 32) - a. Mark inputs as dead. */
294static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
295{
296 TCGv_i64 tmp64 = tcg_temp_new_i64();
297
298 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 299 tcg_temp_free_i32(b);
838fa72d
AJ
300 tcg_gen_shli_i64(tmp64, tmp64, 32);
301 tcg_gen_sub_i64(a, tmp64, a);
302
303 tcg_temp_free_i64(tmp64);
304 return a;
3670669c
PB
305}
306
8f01245e
PB
307/* FIXME: Most targets have native widening multiplication.
308 It would be good to use that instead of a full wide multiply. */
5e3f878a 309/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 310static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 311{
a7812ae4
PB
312 TCGv_i64 tmp1 = tcg_temp_new_i64();
313 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
314
315 tcg_gen_extu_i32_i64(tmp1, a);
7d1b0095 316 tcg_temp_free_i32(a);
5e3f878a 317 tcg_gen_extu_i32_i64(tmp2, b);
7d1b0095 318 tcg_temp_free_i32(b);
5e3f878a 319 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 320 tcg_temp_free_i64(tmp2);
5e3f878a
PB
321 return tmp1;
322}
323
a7812ae4 324static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 325{
a7812ae4
PB
326 TCGv_i64 tmp1 = tcg_temp_new_i64();
327 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
328
329 tcg_gen_ext_i32_i64(tmp1, a);
7d1b0095 330 tcg_temp_free_i32(a);
5e3f878a 331 tcg_gen_ext_i32_i64(tmp2, b);
7d1b0095 332 tcg_temp_free_i32(b);
5e3f878a 333 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 334 tcg_temp_free_i64(tmp2);
5e3f878a
PB
335 return tmp1;
336}
337
8f01245e
PB
338/* Swap low and high halfwords. */
339static void gen_swap_half(TCGv var)
340{
7d1b0095 341 TCGv tmp = tcg_temp_new_i32();
8f01245e
PB
342 tcg_gen_shri_i32(tmp, var, 16);
343 tcg_gen_shli_i32(var, var, 16);
344 tcg_gen_or_i32(var, var, tmp);
7d1b0095 345 tcg_temp_free_i32(tmp);
8f01245e
PB
346}
347
b26eefb6
PB
348/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
349 tmp = (t0 ^ t1) & 0x8000;
350 t0 &= ~0x8000;
351 t1 &= ~0x8000;
352 t0 = (t0 + t1) ^ tmp;
353 */
354
355static void gen_add16(TCGv t0, TCGv t1)
356{
7d1b0095 357 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
358 tcg_gen_xor_i32(tmp, t0, t1);
359 tcg_gen_andi_i32(tmp, tmp, 0x8000);
360 tcg_gen_andi_i32(t0, t0, ~0x8000);
361 tcg_gen_andi_i32(t1, t1, ~0x8000);
362 tcg_gen_add_i32(t0, t0, t1);
363 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
364 tcg_temp_free_i32(tmp);
365 tcg_temp_free_i32(t1);
b26eefb6
PB
366}
367
9a119ff6
PB
368#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
369
b26eefb6
PB
370/* Set CF to the top bit of var. */
371static void gen_set_CF_bit31(TCGv var)
372{
7d1b0095 373 TCGv tmp = tcg_temp_new_i32();
b26eefb6 374 tcg_gen_shri_i32(tmp, var, 31);
4cc633c3 375 gen_set_CF(tmp);
7d1b0095 376 tcg_temp_free_i32(tmp);
b26eefb6
PB
377}
378
379/* Set N and Z flags from var. */
380static inline void gen_logic_CC(TCGv var)
381{
6fbe23d5
PB
382 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
383 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
b26eefb6
PB
384}
385
386/* T0 += T1 + CF. */
396e467c 387static void gen_adc(TCGv t0, TCGv t1)
b26eefb6 388{
d9ba4830 389 TCGv tmp;
396e467c 390 tcg_gen_add_i32(t0, t0, t1);
d9ba4830 391 tmp = load_cpu_field(CF);
396e467c 392 tcg_gen_add_i32(t0, t0, tmp);
7d1b0095 393 tcg_temp_free_i32(tmp);
b26eefb6
PB
394}
395
e9bb4aa9
JR
396/* dest = T0 + T1 + CF. */
397static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
398{
399 TCGv tmp;
400 tcg_gen_add_i32(dest, t0, t1);
401 tmp = load_cpu_field(CF);
402 tcg_gen_add_i32(dest, dest, tmp);
7d1b0095 403 tcg_temp_free_i32(tmp);
e9bb4aa9
JR
404}
405
3670669c
PB
406/* dest = T0 - T1 + CF - 1. */
407static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
408{
d9ba4830 409 TCGv tmp;
3670669c 410 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 411 tmp = load_cpu_field(CF);
3670669c
PB
412 tcg_gen_add_i32(dest, dest, tmp);
413 tcg_gen_subi_i32(dest, dest, 1);
7d1b0095 414 tcg_temp_free_i32(tmp);
3670669c
PB
415}
416
ad69471c
PB
417/* FIXME: Implement this natively. */
418#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
419
9a119ff6 420static void shifter_out_im(TCGv var, int shift)
b26eefb6 421{
7d1b0095 422 TCGv tmp = tcg_temp_new_i32();
9a119ff6
PB
423 if (shift == 0) {
424 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 425 } else {
9a119ff6 426 tcg_gen_shri_i32(tmp, var, shift);
4cc633c3 427 if (shift != 31)
9a119ff6
PB
428 tcg_gen_andi_i32(tmp, tmp, 1);
429 }
430 gen_set_CF(tmp);
7d1b0095 431 tcg_temp_free_i32(tmp);
9a119ff6 432}
b26eefb6 433
9a119ff6
PB
434/* Shift by immediate. Includes special handling for shift == 0. */
435static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
436{
437 switch (shiftop) {
438 case 0: /* LSL */
439 if (shift != 0) {
440 if (flags)
441 shifter_out_im(var, 32 - shift);
442 tcg_gen_shli_i32(var, var, shift);
443 }
444 break;
445 case 1: /* LSR */
446 if (shift == 0) {
447 if (flags) {
448 tcg_gen_shri_i32(var, var, 31);
449 gen_set_CF(var);
450 }
451 tcg_gen_movi_i32(var, 0);
452 } else {
453 if (flags)
454 shifter_out_im(var, shift - 1);
455 tcg_gen_shri_i32(var, var, shift);
456 }
457 break;
458 case 2: /* ASR */
459 if (shift == 0)
460 shift = 32;
461 if (flags)
462 shifter_out_im(var, shift - 1);
463 if (shift == 32)
464 shift = 31;
465 tcg_gen_sari_i32(var, var, shift);
466 break;
467 case 3: /* ROR/RRX */
468 if (shift != 0) {
469 if (flags)
470 shifter_out_im(var, shift - 1);
f669df27 471 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 472 } else {
d9ba4830 473 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
474 if (flags)
475 shifter_out_im(var, 0);
476 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
477 tcg_gen_shli_i32(tmp, tmp, 31);
478 tcg_gen_or_i32(var, var, tmp);
7d1b0095 479 tcg_temp_free_i32(tmp);
b26eefb6
PB
480 }
481 }
482};
483
8984bd2e
PB
484static inline void gen_arm_shift_reg(TCGv var, int shiftop,
485 TCGv shift, int flags)
486{
487 if (flags) {
488 switch (shiftop) {
489 case 0: gen_helper_shl_cc(var, var, shift); break;
490 case 1: gen_helper_shr_cc(var, var, shift); break;
491 case 2: gen_helper_sar_cc(var, var, shift); break;
492 case 3: gen_helper_ror_cc(var, var, shift); break;
493 }
494 } else {
495 switch (shiftop) {
496 case 0: gen_helper_shl(var, var, shift); break;
497 case 1: gen_helper_shr(var, var, shift); break;
498 case 2: gen_helper_sar(var, var, shift); break;
f669df27
AJ
499 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
500 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
501 }
502 }
7d1b0095 503 tcg_temp_free_i32(shift);
8984bd2e
PB
504}
505
6ddbc6e4
PB
506#define PAS_OP(pfx) \
507 switch (op2) { \
508 case 0: gen_pas_helper(glue(pfx,add16)); break; \
509 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
510 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
511 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
512 case 4: gen_pas_helper(glue(pfx,add8)); break; \
513 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
514 }
d9ba4830 515static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 516{
a7812ae4 517 TCGv_ptr tmp;
6ddbc6e4
PB
518
519 switch (op1) {
520#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
521 case 1:
a7812ae4 522 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
523 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
524 PAS_OP(s)
b75263d6 525 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
526 break;
527 case 5:
a7812ae4 528 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
529 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
530 PAS_OP(u)
b75263d6 531 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
532 break;
533#undef gen_pas_helper
534#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
535 case 2:
536 PAS_OP(q);
537 break;
538 case 3:
539 PAS_OP(sh);
540 break;
541 case 6:
542 PAS_OP(uq);
543 break;
544 case 7:
545 PAS_OP(uh);
546 break;
547#undef gen_pas_helper
548 }
549}
9ee6e8bb
PB
550#undef PAS_OP
551
6ddbc6e4
PB
552/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
553#define PAS_OP(pfx) \
ed89a2f1 554 switch (op1) { \
6ddbc6e4
PB
555 case 0: gen_pas_helper(glue(pfx,add8)); break; \
556 case 1: gen_pas_helper(glue(pfx,add16)); break; \
557 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
558 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
559 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
560 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
561 }
d9ba4830 562static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 563{
a7812ae4 564 TCGv_ptr tmp;
6ddbc6e4 565
ed89a2f1 566 switch (op2) {
6ddbc6e4
PB
567#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
568 case 0:
a7812ae4 569 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
570 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
571 PAS_OP(s)
b75263d6 572 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
573 break;
574 case 4:
a7812ae4 575 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
576 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
577 PAS_OP(u)
b75263d6 578 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
579 break;
580#undef gen_pas_helper
581#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
582 case 1:
583 PAS_OP(q);
584 break;
585 case 2:
586 PAS_OP(sh);
587 break;
588 case 5:
589 PAS_OP(uq);
590 break;
591 case 6:
592 PAS_OP(uh);
593 break;
594#undef gen_pas_helper
595 }
596}
9ee6e8bb
PB
597#undef PAS_OP
598
d9ba4830
PB
599static void gen_test_cc(int cc, int label)
600{
601 TCGv tmp;
602 TCGv tmp2;
d9ba4830
PB
603 int inv;
604
d9ba4830
PB
605 switch (cc) {
606 case 0: /* eq: Z */
6fbe23d5 607 tmp = load_cpu_field(ZF);
cb63669a 608 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
609 break;
610 case 1: /* ne: !Z */
6fbe23d5 611 tmp = load_cpu_field(ZF);
cb63669a 612 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
613 break;
614 case 2: /* cs: C */
615 tmp = load_cpu_field(CF);
cb63669a 616 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
617 break;
618 case 3: /* cc: !C */
619 tmp = load_cpu_field(CF);
cb63669a 620 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
621 break;
622 case 4: /* mi: N */
6fbe23d5 623 tmp = load_cpu_field(NF);
cb63669a 624 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
625 break;
626 case 5: /* pl: !N */
6fbe23d5 627 tmp = load_cpu_field(NF);
cb63669a 628 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
629 break;
630 case 6: /* vs: V */
631 tmp = load_cpu_field(VF);
cb63669a 632 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
633 break;
634 case 7: /* vc: !V */
635 tmp = load_cpu_field(VF);
cb63669a 636 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
637 break;
638 case 8: /* hi: C && !Z */
639 inv = gen_new_label();
640 tmp = load_cpu_field(CF);
cb63669a 641 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
7d1b0095 642 tcg_temp_free_i32(tmp);
6fbe23d5 643 tmp = load_cpu_field(ZF);
cb63669a 644 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
645 gen_set_label(inv);
646 break;
647 case 9: /* ls: !C || Z */
648 tmp = load_cpu_field(CF);
cb63669a 649 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
7d1b0095 650 tcg_temp_free_i32(tmp);
6fbe23d5 651 tmp = load_cpu_field(ZF);
cb63669a 652 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
653 break;
654 case 10: /* ge: N == V -> N ^ V == 0 */
655 tmp = load_cpu_field(VF);
6fbe23d5 656 tmp2 = load_cpu_field(NF);
d9ba4830 657 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 658 tcg_temp_free_i32(tmp2);
cb63669a 659 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
660 break;
661 case 11: /* lt: N != V -> N ^ V != 0 */
662 tmp = load_cpu_field(VF);
6fbe23d5 663 tmp2 = load_cpu_field(NF);
d9ba4830 664 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 665 tcg_temp_free_i32(tmp2);
cb63669a 666 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
667 break;
668 case 12: /* gt: !Z && N == V */
669 inv = gen_new_label();
6fbe23d5 670 tmp = load_cpu_field(ZF);
cb63669a 671 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
7d1b0095 672 tcg_temp_free_i32(tmp);
d9ba4830 673 tmp = load_cpu_field(VF);
6fbe23d5 674 tmp2 = load_cpu_field(NF);
d9ba4830 675 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 676 tcg_temp_free_i32(tmp2);
cb63669a 677 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
678 gen_set_label(inv);
679 break;
680 case 13: /* le: Z || N != V */
6fbe23d5 681 tmp = load_cpu_field(ZF);
cb63669a 682 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
7d1b0095 683 tcg_temp_free_i32(tmp);
d9ba4830 684 tmp = load_cpu_field(VF);
6fbe23d5 685 tmp2 = load_cpu_field(NF);
d9ba4830 686 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 687 tcg_temp_free_i32(tmp2);
cb63669a 688 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
689 break;
690 default:
691 fprintf(stderr, "Bad condition code 0x%x\n", cc);
692 abort();
693 }
7d1b0095 694 tcg_temp_free_i32(tmp);
d9ba4830 695}
2c0262af 696
b1d8e52e 697static const uint8_t table_logic_cc[16] = {
2c0262af
FB
698 1, /* and */
699 1, /* xor */
700 0, /* sub */
701 0, /* rsb */
702 0, /* add */
703 0, /* adc */
704 0, /* sbc */
705 0, /* rsc */
706 1, /* andl */
707 1, /* xorl */
708 0, /* cmp */
709 0, /* cmn */
710 1, /* orr */
711 1, /* mov */
712 1, /* bic */
713 1, /* mvn */
714};
3b46e624 715
d9ba4830
PB
716/* Set PC and Thumb state from an immediate address. */
717static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 718{
b26eefb6 719 TCGv tmp;
99c475ab 720
b26eefb6 721 s->is_jmp = DISAS_UPDATE;
d9ba4830 722 if (s->thumb != (addr & 1)) {
7d1b0095 723 tmp = tcg_temp_new_i32();
d9ba4830
PB
724 tcg_gen_movi_i32(tmp, addr & 1);
725 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
7d1b0095 726 tcg_temp_free_i32(tmp);
d9ba4830 727 }
155c3eac 728 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
729}
730
731/* Set PC and Thumb state from var. var is marked as dead. */
732static inline void gen_bx(DisasContext *s, TCGv var)
733{
d9ba4830 734 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
735 tcg_gen_andi_i32(cpu_R[15], var, ~1);
736 tcg_gen_andi_i32(var, var, 1);
737 store_cpu_field(var, thumb);
d9ba4830
PB
738}
739
21aeb343
JR
740/* Variant of store_reg which uses branch&exchange logic when storing
741 to r15 in ARM architecture v7 and above. The source must be a temporary
742 and will be marked as dead. */
743static inline void store_reg_bx(CPUState *env, DisasContext *s,
744 int reg, TCGv var)
745{
746 if (reg == 15 && ENABLE_ARCH_7) {
747 gen_bx(s, var);
748 } else {
749 store_reg(s, reg, var);
750 }
751}
752
b0109805
PB
753static inline TCGv gen_ld8s(TCGv addr, int index)
754{
7d1b0095 755 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
756 tcg_gen_qemu_ld8s(tmp, addr, index);
757 return tmp;
758}
759static inline TCGv gen_ld8u(TCGv addr, int index)
760{
7d1b0095 761 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
762 tcg_gen_qemu_ld8u(tmp, addr, index);
763 return tmp;
764}
765static inline TCGv gen_ld16s(TCGv addr, int index)
766{
7d1b0095 767 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
768 tcg_gen_qemu_ld16s(tmp, addr, index);
769 return tmp;
770}
771static inline TCGv gen_ld16u(TCGv addr, int index)
772{
7d1b0095 773 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
774 tcg_gen_qemu_ld16u(tmp, addr, index);
775 return tmp;
776}
777static inline TCGv gen_ld32(TCGv addr, int index)
778{
7d1b0095 779 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
780 tcg_gen_qemu_ld32u(tmp, addr, index);
781 return tmp;
782}
84496233
JR
783static inline TCGv_i64 gen_ld64(TCGv addr, int index)
784{
785 TCGv_i64 tmp = tcg_temp_new_i64();
786 tcg_gen_qemu_ld64(tmp, addr, index);
787 return tmp;
788}
b0109805
PB
789static inline void gen_st8(TCGv val, TCGv addr, int index)
790{
791 tcg_gen_qemu_st8(val, addr, index);
7d1b0095 792 tcg_temp_free_i32(val);
b0109805
PB
793}
794static inline void gen_st16(TCGv val, TCGv addr, int index)
795{
796 tcg_gen_qemu_st16(val, addr, index);
7d1b0095 797 tcg_temp_free_i32(val);
b0109805
PB
798}
799static inline void gen_st32(TCGv val, TCGv addr, int index)
800{
801 tcg_gen_qemu_st32(val, addr, index);
7d1b0095 802 tcg_temp_free_i32(val);
b0109805 803}
84496233
JR
804static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
805{
806 tcg_gen_qemu_st64(val, addr, index);
807 tcg_temp_free_i64(val);
808}
b5ff1b31 809
5e3f878a
PB
810static inline void gen_set_pc_im(uint32_t val)
811{
155c3eac 812 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
813}
814
b5ff1b31
FB
815/* Force a TB lookup after an instruction that changes the CPU state. */
816static inline void gen_lookup_tb(DisasContext *s)
817{
a6445c52 818 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
819 s->is_jmp = DISAS_UPDATE;
820}
821
b0109805
PB
822static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
823 TCGv var)
2c0262af 824{
1e8d4eec 825 int val, rm, shift, shiftop;
b26eefb6 826 TCGv offset;
2c0262af
FB
827
828 if (!(insn & (1 << 25))) {
829 /* immediate */
830 val = insn & 0xfff;
831 if (!(insn & (1 << 23)))
832 val = -val;
537730b9 833 if (val != 0)
b0109805 834 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
835 } else {
836 /* shift/register */
837 rm = (insn) & 0xf;
838 shift = (insn >> 7) & 0x1f;
1e8d4eec 839 shiftop = (insn >> 5) & 3;
b26eefb6 840 offset = load_reg(s, rm);
9a119ff6 841 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 842 if (!(insn & (1 << 23)))
b0109805 843 tcg_gen_sub_i32(var, var, offset);
2c0262af 844 else
b0109805 845 tcg_gen_add_i32(var, var, offset);
7d1b0095 846 tcg_temp_free_i32(offset);
2c0262af
FB
847 }
848}
849
191f9a93 850static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 851 int extra, TCGv var)
2c0262af
FB
852{
853 int val, rm;
b26eefb6 854 TCGv offset;
3b46e624 855
2c0262af
FB
856 if (insn & (1 << 22)) {
857 /* immediate */
858 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
859 if (!(insn & (1 << 23)))
860 val = -val;
18acad92 861 val += extra;
537730b9 862 if (val != 0)
b0109805 863 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
864 } else {
865 /* register */
191f9a93 866 if (extra)
b0109805 867 tcg_gen_addi_i32(var, var, extra);
2c0262af 868 rm = (insn) & 0xf;
b26eefb6 869 offset = load_reg(s, rm);
2c0262af 870 if (!(insn & (1 << 23)))
b0109805 871 tcg_gen_sub_i32(var, var, offset);
2c0262af 872 else
b0109805 873 tcg_gen_add_i32(var, var, offset);
7d1b0095 874 tcg_temp_free_i32(offset);
2c0262af
FB
875 }
876}
877
4373f3ce
PB
878#define VFP_OP2(name) \
879static inline void gen_vfp_##name(int dp) \
880{ \
881 if (dp) \
882 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
883 else \
884 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
b7bcbe95
FB
885}
886
4373f3ce
PB
887VFP_OP2(add)
888VFP_OP2(sub)
889VFP_OP2(mul)
890VFP_OP2(div)
891
892#undef VFP_OP2
893
894static inline void gen_vfp_abs(int dp)
895{
896 if (dp)
897 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
898 else
899 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
900}
901
902static inline void gen_vfp_neg(int dp)
903{
904 if (dp)
905 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
906 else
907 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
908}
909
910static inline void gen_vfp_sqrt(int dp)
911{
912 if (dp)
913 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
914 else
915 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
916}
917
918static inline void gen_vfp_cmp(int dp)
919{
920 if (dp)
921 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
922 else
923 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
924}
925
926static inline void gen_vfp_cmpe(int dp)
927{
928 if (dp)
929 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
930 else
931 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
932}
933
934static inline void gen_vfp_F1_ld0(int dp)
935{
936 if (dp)
5b340b51 937 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 938 else
5b340b51 939 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
940}
941
942static inline void gen_vfp_uito(int dp)
943{
944 if (dp)
945 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
946 else
947 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
948}
949
950static inline void gen_vfp_sito(int dp)
951{
952 if (dp)
66230e0d 953 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
4373f3ce 954 else
66230e0d 955 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
4373f3ce
PB
956}
957
958static inline void gen_vfp_toui(int dp)
959{
960 if (dp)
961 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
962 else
963 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
964}
965
966static inline void gen_vfp_touiz(int dp)
967{
968 if (dp)
969 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
970 else
971 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
972}
973
974static inline void gen_vfp_tosi(int dp)
975{
976 if (dp)
977 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
978 else
979 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
980}
981
982static inline void gen_vfp_tosiz(int dp)
9ee6e8bb
PB
983{
984 if (dp)
4373f3ce 985 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
9ee6e8bb 986 else
4373f3ce
PB
987 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
988}
989
990#define VFP_GEN_FIX(name) \
991static inline void gen_vfp_##name(int dp, int shift) \
992{ \
b75263d6 993 TCGv tmp_shift = tcg_const_i32(shift); \
4373f3ce 994 if (dp) \
b75263d6 995 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
4373f3ce 996 else \
b75263d6
JR
997 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
998 tcg_temp_free_i32(tmp_shift); \
9ee6e8bb 999}
4373f3ce
PB
1000VFP_GEN_FIX(tosh)
1001VFP_GEN_FIX(tosl)
1002VFP_GEN_FIX(touh)
1003VFP_GEN_FIX(toul)
1004VFP_GEN_FIX(shto)
1005VFP_GEN_FIX(slto)
1006VFP_GEN_FIX(uhto)
1007VFP_GEN_FIX(ulto)
1008#undef VFP_GEN_FIX
9ee6e8bb 1009
312eea9f 1010static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1011{
1012 if (dp)
312eea9f 1013 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1014 else
312eea9f 1015 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1016}
1017
312eea9f 1018static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1019{
1020 if (dp)
312eea9f 1021 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1022 else
312eea9f 1023 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1024}
1025
8e96005d
FB
1026static inline long
1027vfp_reg_offset (int dp, int reg)
1028{
1029 if (dp)
1030 return offsetof(CPUARMState, vfp.regs[reg]);
1031 else if (reg & 1) {
1032 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1033 + offsetof(CPU_DoubleU, l.upper);
1034 } else {
1035 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1036 + offsetof(CPU_DoubleU, l.lower);
1037 }
1038}
9ee6e8bb
PB
1039
1040/* Return the offset of a 32-bit piece of a NEON register.
1041 zero is the least significant end of the register. */
1042static inline long
1043neon_reg_offset (int reg, int n)
1044{
1045 int sreg;
1046 sreg = reg * 2 + n;
1047 return vfp_reg_offset(0, sreg);
1048}
1049
8f8e3aa4
PB
1050static TCGv neon_load_reg(int reg, int pass)
1051{
7d1b0095 1052 TCGv tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1053 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1054 return tmp;
1055}
1056
1057static void neon_store_reg(int reg, int pass, TCGv var)
1058{
1059 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1060 tcg_temp_free_i32(var);
8f8e3aa4
PB
1061}
1062
a7812ae4 1063static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1064{
1065 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1066}
1067
a7812ae4 1068static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1069{
1070 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1071}
1072
4373f3ce
PB
1073#define tcg_gen_ld_f32 tcg_gen_ld_i32
1074#define tcg_gen_ld_f64 tcg_gen_ld_i64
1075#define tcg_gen_st_f32 tcg_gen_st_i32
1076#define tcg_gen_st_f64 tcg_gen_st_i64
1077
b7bcbe95
FB
1078static inline void gen_mov_F0_vreg(int dp, int reg)
1079{
1080 if (dp)
4373f3ce 1081 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1082 else
4373f3ce 1083 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1084}
1085
1086static inline void gen_mov_F1_vreg(int dp, int reg)
1087{
1088 if (dp)
4373f3ce 1089 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1090 else
4373f3ce 1091 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1092}
1093
1094static inline void gen_mov_vreg_F0(int dp, int reg)
1095{
1096 if (dp)
4373f3ce 1097 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1098 else
4373f3ce 1099 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1100}
1101
18c9b560
AZ
1102#define ARM_CP_RW_BIT (1 << 20)
1103
a7812ae4 1104static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d
PB
1105{
1106 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1107}
1108
a7812ae4 1109static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d
PB
1110{
1111 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1112}
1113
da6b5335 1114static inline TCGv iwmmxt_load_creg(int reg)
e677137d 1115{
7d1b0095 1116 TCGv var = tcg_temp_new_i32();
da6b5335
FN
1117 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1118 return var;
e677137d
PB
1119}
1120
da6b5335 1121static inline void iwmmxt_store_creg(int reg, TCGv var)
e677137d 1122{
da6b5335 1123 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
7d1b0095 1124 tcg_temp_free_i32(var);
e677137d
PB
1125}
1126
1127static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1128{
1129 iwmmxt_store_reg(cpu_M0, rn);
1130}
1131
1132static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1133{
1134 iwmmxt_load_reg(cpu_M0, rn);
1135}
1136
1137static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1138{
1139 iwmmxt_load_reg(cpu_V1, rn);
1140 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1141}
1142
1143static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1144{
1145 iwmmxt_load_reg(cpu_V1, rn);
1146 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1147}
1148
1149static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1150{
1151 iwmmxt_load_reg(cpu_V1, rn);
1152 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1153}
1154
1155#define IWMMXT_OP(name) \
1156static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1157{ \
1158 iwmmxt_load_reg(cpu_V1, rn); \
1159 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1160}
1161
947a2fa2
PM
1162#define IWMMXT_OP_SIZE(name) \
1163IWMMXT_OP(name##b) \
1164IWMMXT_OP(name##w) \
1165IWMMXT_OP(name##l)
e677137d 1166
947a2fa2 1167#define IWMMXT_OP_1(name) \
e677137d
PB
1168static inline void gen_op_iwmmxt_##name##_M0(void) \
1169{ \
947a2fa2 1170 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0); \
e677137d
PB
1171}
1172
1173IWMMXT_OP(maddsq)
1174IWMMXT_OP(madduq)
1175IWMMXT_OP(sadb)
1176IWMMXT_OP(sadw)
1177IWMMXT_OP(mulslw)
1178IWMMXT_OP(mulshw)
1179IWMMXT_OP(mululw)
1180IWMMXT_OP(muluhw)
1181IWMMXT_OP(macsw)
1182IWMMXT_OP(macuw)
1183
947a2fa2
PM
1184IWMMXT_OP_SIZE(unpackl)
1185IWMMXT_OP_SIZE(unpackh)
1186
1187IWMMXT_OP_1(unpacklub)
1188IWMMXT_OP_1(unpackluw)
1189IWMMXT_OP_1(unpacklul)
1190IWMMXT_OP_1(unpackhub)
1191IWMMXT_OP_1(unpackhuw)
1192IWMMXT_OP_1(unpackhul)
1193IWMMXT_OP_1(unpacklsb)
1194IWMMXT_OP_1(unpacklsw)
1195IWMMXT_OP_1(unpacklsl)
1196IWMMXT_OP_1(unpackhsb)
1197IWMMXT_OP_1(unpackhsw)
1198IWMMXT_OP_1(unpackhsl)
1199
1200IWMMXT_OP_SIZE(cmpeq)
1201IWMMXT_OP_SIZE(cmpgtu)
1202IWMMXT_OP_SIZE(cmpgts)
1203
1204IWMMXT_OP_SIZE(mins)
1205IWMMXT_OP_SIZE(minu)
1206IWMMXT_OP_SIZE(maxs)
1207IWMMXT_OP_SIZE(maxu)
1208
1209IWMMXT_OP_SIZE(subn)
1210IWMMXT_OP_SIZE(addn)
1211IWMMXT_OP_SIZE(subu)
1212IWMMXT_OP_SIZE(addu)
1213IWMMXT_OP_SIZE(subs)
1214IWMMXT_OP_SIZE(adds)
1215
1216IWMMXT_OP(avgb0)
1217IWMMXT_OP(avgb1)
1218IWMMXT_OP(avgw0)
1219IWMMXT_OP(avgw1)
e677137d
PB
1220
1221IWMMXT_OP(msadb)
1222
947a2fa2
PM
1223IWMMXT_OP(packuw)
1224IWMMXT_OP(packul)
1225IWMMXT_OP(packuq)
1226IWMMXT_OP(packsw)
1227IWMMXT_OP(packsl)
1228IWMMXT_OP(packsq)
e677137d 1229
e677137d
PB
1230static void gen_op_iwmmxt_set_mup(void)
1231{
1232 TCGv tmp;
1233 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1234 tcg_gen_ori_i32(tmp, tmp, 2);
1235 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1236}
1237
1238static void gen_op_iwmmxt_set_cup(void)
1239{
1240 TCGv tmp;
1241 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1242 tcg_gen_ori_i32(tmp, tmp, 1);
1243 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1244}
1245
1246static void gen_op_iwmmxt_setpsr_nz(void)
1247{
7d1b0095 1248 TCGv tmp = tcg_temp_new_i32();
e677137d
PB
1249 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1250 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1251}
1252
1253static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1254{
1255 iwmmxt_load_reg(cpu_V1, rn);
86831435 1256 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1257 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1258}
1259
da6b5335 1260static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
18c9b560
AZ
1261{
1262 int rd;
1263 uint32_t offset;
da6b5335 1264 TCGv tmp;
18c9b560
AZ
1265
1266 rd = (insn >> 16) & 0xf;
da6b5335 1267 tmp = load_reg(s, rd);
18c9b560
AZ
1268
1269 offset = (insn & 0xff) << ((insn >> 7) & 2);
1270 if (insn & (1 << 24)) {
1271 /* Pre indexed */
1272 if (insn & (1 << 23))
da6b5335 1273 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1274 else
da6b5335
FN
1275 tcg_gen_addi_i32(tmp, tmp, -offset);
1276 tcg_gen_mov_i32(dest, tmp);
18c9b560 1277 if (insn & (1 << 21))
da6b5335
FN
1278 store_reg(s, rd, tmp);
1279 else
7d1b0095 1280 tcg_temp_free_i32(tmp);
18c9b560
AZ
1281 } else if (insn & (1 << 21)) {
1282 /* Post indexed */
da6b5335 1283 tcg_gen_mov_i32(dest, tmp);
18c9b560 1284 if (insn & (1 << 23))
da6b5335 1285 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1286 else
da6b5335
FN
1287 tcg_gen_addi_i32(tmp, tmp, -offset);
1288 store_reg(s, rd, tmp);
18c9b560
AZ
1289 } else if (!(insn & (1 << 23)))
1290 return 1;
1291 return 0;
1292}
1293
da6b5335 1294static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
18c9b560
AZ
1295{
1296 int rd = (insn >> 0) & 0xf;
da6b5335 1297 TCGv tmp;
18c9b560 1298
da6b5335
FN
1299 if (insn & (1 << 8)) {
1300 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1301 return 1;
da6b5335
FN
1302 } else {
1303 tmp = iwmmxt_load_creg(rd);
1304 }
1305 } else {
7d1b0095 1306 tmp = tcg_temp_new_i32();
da6b5335
FN
1307 iwmmxt_load_reg(cpu_V0, rd);
1308 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1309 }
1310 tcg_gen_andi_i32(tmp, tmp, mask);
1311 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1312 tcg_temp_free_i32(tmp);
18c9b560
AZ
1313 return 0;
1314}
1315
1316/* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1317 (ie. an undefined instruction). */
1318static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1319{
1320 int rd, wrd;
1321 int rdhi, rdlo, rd0, rd1, i;
da6b5335
FN
1322 TCGv addr;
1323 TCGv tmp, tmp2, tmp3;
18c9b560
AZ
1324
1325 if ((insn & 0x0e000e00) == 0x0c000000) {
1326 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1327 wrd = insn & 0xf;
1328 rdlo = (insn >> 12) & 0xf;
1329 rdhi = (insn >> 16) & 0xf;
1330 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1331 iwmmxt_load_reg(cpu_V0, wrd);
1332 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1333 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1334 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1335 } else { /* TMCRR */
da6b5335
FN
1336 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1337 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1338 gen_op_iwmmxt_set_mup();
1339 }
1340 return 0;
1341 }
1342
1343 wrd = (insn >> 12) & 0xf;
7d1b0095 1344 addr = tcg_temp_new_i32();
da6b5335 1345 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1346 tcg_temp_free_i32(addr);
18c9b560 1347 return 1;
da6b5335 1348 }
18c9b560
AZ
1349 if (insn & ARM_CP_RW_BIT) {
1350 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1351 tmp = tcg_temp_new_i32();
da6b5335
FN
1352 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1353 iwmmxt_store_creg(wrd, tmp);
18c9b560 1354 } else {
e677137d
PB
1355 i = 1;
1356 if (insn & (1 << 8)) {
1357 if (insn & (1 << 22)) { /* WLDRD */
da6b5335 1358 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1359 i = 0;
1360 } else { /* WLDRW wRd */
da6b5335 1361 tmp = gen_ld32(addr, IS_USER(s));
e677137d
PB
1362 }
1363 } else {
1364 if (insn & (1 << 22)) { /* WLDRH */
da6b5335 1365 tmp = gen_ld16u(addr, IS_USER(s));
e677137d 1366 } else { /* WLDRB */
da6b5335 1367 tmp = gen_ld8u(addr, IS_USER(s));
e677137d
PB
1368 }
1369 }
1370 if (i) {
1371 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1372 tcg_temp_free_i32(tmp);
e677137d 1373 }
18c9b560
AZ
1374 gen_op_iwmmxt_movq_wRn_M0(wrd);
1375 }
1376 } else {
1377 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335
FN
1378 tmp = iwmmxt_load_creg(wrd);
1379 gen_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1380 } else {
1381 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1382 tmp = tcg_temp_new_i32();
e677137d
PB
1383 if (insn & (1 << 8)) {
1384 if (insn & (1 << 22)) { /* WSTRD */
7d1b0095 1385 tcg_temp_free_i32(tmp);
da6b5335 1386 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1387 } else { /* WSTRW wRd */
1388 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1389 gen_st32(tmp, addr, IS_USER(s));
e677137d
PB
1390 }
1391 } else {
1392 if (insn & (1 << 22)) { /* WSTRH */
1393 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1394 gen_st16(tmp, addr, IS_USER(s));
e677137d
PB
1395 } else { /* WSTRB */
1396 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1397 gen_st8(tmp, addr, IS_USER(s));
e677137d
PB
1398 }
1399 }
18c9b560
AZ
1400 }
1401 }
7d1b0095 1402 tcg_temp_free_i32(addr);
18c9b560
AZ
1403 return 0;
1404 }
1405
1406 if ((insn & 0x0f000000) != 0x0e000000)
1407 return 1;
1408
1409 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1410 case 0x000: /* WOR */
1411 wrd = (insn >> 12) & 0xf;
1412 rd0 = (insn >> 0) & 0xf;
1413 rd1 = (insn >> 16) & 0xf;
1414 gen_op_iwmmxt_movq_M0_wRn(rd0);
1415 gen_op_iwmmxt_orq_M0_wRn(rd1);
1416 gen_op_iwmmxt_setpsr_nz();
1417 gen_op_iwmmxt_movq_wRn_M0(wrd);
1418 gen_op_iwmmxt_set_mup();
1419 gen_op_iwmmxt_set_cup();
1420 break;
1421 case 0x011: /* TMCR */
1422 if (insn & 0xf)
1423 return 1;
1424 rd = (insn >> 12) & 0xf;
1425 wrd = (insn >> 16) & 0xf;
1426 switch (wrd) {
1427 case ARM_IWMMXT_wCID:
1428 case ARM_IWMMXT_wCASF:
1429 break;
1430 case ARM_IWMMXT_wCon:
1431 gen_op_iwmmxt_set_cup();
1432 /* Fall through. */
1433 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1434 tmp = iwmmxt_load_creg(wrd);
1435 tmp2 = load_reg(s, rd);
f669df27 1436 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1437 tcg_temp_free_i32(tmp2);
da6b5335 1438 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1439 break;
1440 case ARM_IWMMXT_wCGR0:
1441 case ARM_IWMMXT_wCGR1:
1442 case ARM_IWMMXT_wCGR2:
1443 case ARM_IWMMXT_wCGR3:
1444 gen_op_iwmmxt_set_cup();
da6b5335
FN
1445 tmp = load_reg(s, rd);
1446 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1447 break;
1448 default:
1449 return 1;
1450 }
1451 break;
1452 case 0x100: /* WXOR */
1453 wrd = (insn >> 12) & 0xf;
1454 rd0 = (insn >> 0) & 0xf;
1455 rd1 = (insn >> 16) & 0xf;
1456 gen_op_iwmmxt_movq_M0_wRn(rd0);
1457 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1458 gen_op_iwmmxt_setpsr_nz();
1459 gen_op_iwmmxt_movq_wRn_M0(wrd);
1460 gen_op_iwmmxt_set_mup();
1461 gen_op_iwmmxt_set_cup();
1462 break;
1463 case 0x111: /* TMRC */
1464 if (insn & 0xf)
1465 return 1;
1466 rd = (insn >> 12) & 0xf;
1467 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1468 tmp = iwmmxt_load_creg(wrd);
1469 store_reg(s, rd, tmp);
18c9b560
AZ
1470 break;
1471 case 0x300: /* WANDN */
1472 wrd = (insn >> 12) & 0xf;
1473 rd0 = (insn >> 0) & 0xf;
1474 rd1 = (insn >> 16) & 0xf;
1475 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1476 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1477 gen_op_iwmmxt_andq_M0_wRn(rd1);
1478 gen_op_iwmmxt_setpsr_nz();
1479 gen_op_iwmmxt_movq_wRn_M0(wrd);
1480 gen_op_iwmmxt_set_mup();
1481 gen_op_iwmmxt_set_cup();
1482 break;
1483 case 0x200: /* WAND */
1484 wrd = (insn >> 12) & 0xf;
1485 rd0 = (insn >> 0) & 0xf;
1486 rd1 = (insn >> 16) & 0xf;
1487 gen_op_iwmmxt_movq_M0_wRn(rd0);
1488 gen_op_iwmmxt_andq_M0_wRn(rd1);
1489 gen_op_iwmmxt_setpsr_nz();
1490 gen_op_iwmmxt_movq_wRn_M0(wrd);
1491 gen_op_iwmmxt_set_mup();
1492 gen_op_iwmmxt_set_cup();
1493 break;
1494 case 0x810: case 0xa10: /* WMADD */
1495 wrd = (insn >> 12) & 0xf;
1496 rd0 = (insn >> 0) & 0xf;
1497 rd1 = (insn >> 16) & 0xf;
1498 gen_op_iwmmxt_movq_M0_wRn(rd0);
1499 if (insn & (1 << 21))
1500 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1501 else
1502 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1503 gen_op_iwmmxt_movq_wRn_M0(wrd);
1504 gen_op_iwmmxt_set_mup();
1505 break;
1506 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1507 wrd = (insn >> 12) & 0xf;
1508 rd0 = (insn >> 16) & 0xf;
1509 rd1 = (insn >> 0) & 0xf;
1510 gen_op_iwmmxt_movq_M0_wRn(rd0);
1511 switch ((insn >> 22) & 3) {
1512 case 0:
1513 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1514 break;
1515 case 1:
1516 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1517 break;
1518 case 2:
1519 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1520 break;
1521 case 3:
1522 return 1;
1523 }
1524 gen_op_iwmmxt_movq_wRn_M0(wrd);
1525 gen_op_iwmmxt_set_mup();
1526 gen_op_iwmmxt_set_cup();
1527 break;
1528 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1529 wrd = (insn >> 12) & 0xf;
1530 rd0 = (insn >> 16) & 0xf;
1531 rd1 = (insn >> 0) & 0xf;
1532 gen_op_iwmmxt_movq_M0_wRn(rd0);
1533 switch ((insn >> 22) & 3) {
1534 case 0:
1535 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1536 break;
1537 case 1:
1538 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1539 break;
1540 case 2:
1541 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1542 break;
1543 case 3:
1544 return 1;
1545 }
1546 gen_op_iwmmxt_movq_wRn_M0(wrd);
1547 gen_op_iwmmxt_set_mup();
1548 gen_op_iwmmxt_set_cup();
1549 break;
1550 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1551 wrd = (insn >> 12) & 0xf;
1552 rd0 = (insn >> 16) & 0xf;
1553 rd1 = (insn >> 0) & 0xf;
1554 gen_op_iwmmxt_movq_M0_wRn(rd0);
1555 if (insn & (1 << 22))
1556 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1557 else
1558 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1559 if (!(insn & (1 << 20)))
1560 gen_op_iwmmxt_addl_M0_wRn(wrd);
1561 gen_op_iwmmxt_movq_wRn_M0(wrd);
1562 gen_op_iwmmxt_set_mup();
1563 break;
1564 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1565 wrd = (insn >> 12) & 0xf;
1566 rd0 = (insn >> 16) & 0xf;
1567 rd1 = (insn >> 0) & 0xf;
1568 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1569 if (insn & (1 << 21)) {
1570 if (insn & (1 << 20))
1571 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1572 else
1573 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1574 } else {
1575 if (insn & (1 << 20))
1576 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1577 else
1578 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1579 }
18c9b560
AZ
1580 gen_op_iwmmxt_movq_wRn_M0(wrd);
1581 gen_op_iwmmxt_set_mup();
1582 break;
1583 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1584 wrd = (insn >> 12) & 0xf;
1585 rd0 = (insn >> 16) & 0xf;
1586 rd1 = (insn >> 0) & 0xf;
1587 gen_op_iwmmxt_movq_M0_wRn(rd0);
1588 if (insn & (1 << 21))
1589 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1590 else
1591 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1592 if (!(insn & (1 << 20))) {
e677137d
PB
1593 iwmmxt_load_reg(cpu_V1, wrd);
1594 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1595 }
1596 gen_op_iwmmxt_movq_wRn_M0(wrd);
1597 gen_op_iwmmxt_set_mup();
1598 break;
1599 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1600 wrd = (insn >> 12) & 0xf;
1601 rd0 = (insn >> 16) & 0xf;
1602 rd1 = (insn >> 0) & 0xf;
1603 gen_op_iwmmxt_movq_M0_wRn(rd0);
1604 switch ((insn >> 22) & 3) {
1605 case 0:
1606 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1607 break;
1608 case 1:
1609 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1610 break;
1611 case 2:
1612 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1613 break;
1614 case 3:
1615 return 1;
1616 }
1617 gen_op_iwmmxt_movq_wRn_M0(wrd);
1618 gen_op_iwmmxt_set_mup();
1619 gen_op_iwmmxt_set_cup();
1620 break;
1621 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1622 wrd = (insn >> 12) & 0xf;
1623 rd0 = (insn >> 16) & 0xf;
1624 rd1 = (insn >> 0) & 0xf;
1625 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1626 if (insn & (1 << 22)) {
1627 if (insn & (1 << 20))
1628 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1629 else
1630 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1631 } else {
1632 if (insn & (1 << 20))
1633 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1634 else
1635 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1636 }
18c9b560
AZ
1637 gen_op_iwmmxt_movq_wRn_M0(wrd);
1638 gen_op_iwmmxt_set_mup();
1639 gen_op_iwmmxt_set_cup();
1640 break;
1641 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1642 wrd = (insn >> 12) & 0xf;
1643 rd0 = (insn >> 16) & 0xf;
1644 rd1 = (insn >> 0) & 0xf;
1645 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1646 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1647 tcg_gen_andi_i32(tmp, tmp, 7);
1648 iwmmxt_load_reg(cpu_V1, rd1);
1649 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1650 tcg_temp_free_i32(tmp);
18c9b560
AZ
1651 gen_op_iwmmxt_movq_wRn_M0(wrd);
1652 gen_op_iwmmxt_set_mup();
1653 break;
1654 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1655 if (((insn >> 6) & 3) == 3)
1656 return 1;
18c9b560
AZ
1657 rd = (insn >> 12) & 0xf;
1658 wrd = (insn >> 16) & 0xf;
da6b5335 1659 tmp = load_reg(s, rd);
18c9b560
AZ
1660 gen_op_iwmmxt_movq_M0_wRn(wrd);
1661 switch ((insn >> 6) & 3) {
1662 case 0:
da6b5335
FN
1663 tmp2 = tcg_const_i32(0xff);
1664 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1665 break;
1666 case 1:
da6b5335
FN
1667 tmp2 = tcg_const_i32(0xffff);
1668 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1669 break;
1670 case 2:
da6b5335
FN
1671 tmp2 = tcg_const_i32(0xffffffff);
1672 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1673 break;
da6b5335
FN
1674 default:
1675 TCGV_UNUSED(tmp2);
1676 TCGV_UNUSED(tmp3);
18c9b560 1677 }
da6b5335
FN
1678 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1679 tcg_temp_free(tmp3);
1680 tcg_temp_free(tmp2);
7d1b0095 1681 tcg_temp_free_i32(tmp);
18c9b560
AZ
1682 gen_op_iwmmxt_movq_wRn_M0(wrd);
1683 gen_op_iwmmxt_set_mup();
1684 break;
1685 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1686 rd = (insn >> 12) & 0xf;
1687 wrd = (insn >> 16) & 0xf;
da6b5335 1688 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1689 return 1;
1690 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1691 tmp = tcg_temp_new_i32();
18c9b560
AZ
1692 switch ((insn >> 22) & 3) {
1693 case 0:
da6b5335
FN
1694 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1695 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1696 if (insn & 8) {
1697 tcg_gen_ext8s_i32(tmp, tmp);
1698 } else {
1699 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1700 }
1701 break;
1702 case 1:
da6b5335
FN
1703 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1704 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1705 if (insn & 8) {
1706 tcg_gen_ext16s_i32(tmp, tmp);
1707 } else {
1708 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1709 }
1710 break;
1711 case 2:
da6b5335
FN
1712 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1713 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1714 break;
18c9b560 1715 }
da6b5335 1716 store_reg(s, rd, tmp);
18c9b560
AZ
1717 break;
1718 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1719 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1720 return 1;
da6b5335 1721 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1722 switch ((insn >> 22) & 3) {
1723 case 0:
da6b5335 1724 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1725 break;
1726 case 1:
da6b5335 1727 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1728 break;
1729 case 2:
da6b5335 1730 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1731 break;
18c9b560 1732 }
da6b5335
FN
1733 tcg_gen_shli_i32(tmp, tmp, 28);
1734 gen_set_nzcv(tmp);
7d1b0095 1735 tcg_temp_free_i32(tmp);
18c9b560
AZ
1736 break;
1737 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1738 if (((insn >> 6) & 3) == 3)
1739 return 1;
18c9b560
AZ
1740 rd = (insn >> 12) & 0xf;
1741 wrd = (insn >> 16) & 0xf;
da6b5335 1742 tmp = load_reg(s, rd);
18c9b560
AZ
1743 switch ((insn >> 6) & 3) {
1744 case 0:
da6b5335 1745 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1746 break;
1747 case 1:
da6b5335 1748 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1749 break;
1750 case 2:
da6b5335 1751 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1752 break;
18c9b560 1753 }
7d1b0095 1754 tcg_temp_free_i32(tmp);
18c9b560
AZ
1755 gen_op_iwmmxt_movq_wRn_M0(wrd);
1756 gen_op_iwmmxt_set_mup();
1757 break;
1758 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1759 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1760 return 1;
da6b5335 1761 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1762 tmp2 = tcg_temp_new_i32();
da6b5335 1763 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1764 switch ((insn >> 22) & 3) {
1765 case 0:
1766 for (i = 0; i < 7; i ++) {
da6b5335
FN
1767 tcg_gen_shli_i32(tmp2, tmp2, 4);
1768 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1769 }
1770 break;
1771 case 1:
1772 for (i = 0; i < 3; i ++) {
da6b5335
FN
1773 tcg_gen_shli_i32(tmp2, tmp2, 8);
1774 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1775 }
1776 break;
1777 case 2:
da6b5335
FN
1778 tcg_gen_shli_i32(tmp2, tmp2, 16);
1779 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1780 break;
18c9b560 1781 }
da6b5335 1782 gen_set_nzcv(tmp);
7d1b0095
PM
1783 tcg_temp_free_i32(tmp2);
1784 tcg_temp_free_i32(tmp);
18c9b560
AZ
1785 break;
1786 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1787 wrd = (insn >> 12) & 0xf;
1788 rd0 = (insn >> 16) & 0xf;
1789 gen_op_iwmmxt_movq_M0_wRn(rd0);
1790 switch ((insn >> 22) & 3) {
1791 case 0:
e677137d 1792 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1793 break;
1794 case 1:
e677137d 1795 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1796 break;
1797 case 2:
e677137d 1798 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1799 break;
1800 case 3:
1801 return 1;
1802 }
1803 gen_op_iwmmxt_movq_wRn_M0(wrd);
1804 gen_op_iwmmxt_set_mup();
1805 break;
1806 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1807 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1808 return 1;
da6b5335 1809 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1810 tmp2 = tcg_temp_new_i32();
da6b5335 1811 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1812 switch ((insn >> 22) & 3) {
1813 case 0:
1814 for (i = 0; i < 7; i ++) {
da6b5335
FN
1815 tcg_gen_shli_i32(tmp2, tmp2, 4);
1816 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1817 }
1818 break;
1819 case 1:
1820 for (i = 0; i < 3; i ++) {
da6b5335
FN
1821 tcg_gen_shli_i32(tmp2, tmp2, 8);
1822 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1823 }
1824 break;
1825 case 2:
da6b5335
FN
1826 tcg_gen_shli_i32(tmp2, tmp2, 16);
1827 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1828 break;
18c9b560 1829 }
da6b5335 1830 gen_set_nzcv(tmp);
7d1b0095
PM
1831 tcg_temp_free_i32(tmp2);
1832 tcg_temp_free_i32(tmp);
18c9b560
AZ
1833 break;
1834 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1835 rd = (insn >> 12) & 0xf;
1836 rd0 = (insn >> 16) & 0xf;
da6b5335 1837 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1838 return 1;
1839 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 1840 tmp = tcg_temp_new_i32();
18c9b560
AZ
1841 switch ((insn >> 22) & 3) {
1842 case 0:
da6b5335 1843 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1844 break;
1845 case 1:
da6b5335 1846 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1847 break;
1848 case 2:
da6b5335 1849 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1850 break;
18c9b560 1851 }
da6b5335 1852 store_reg(s, rd, tmp);
18c9b560
AZ
1853 break;
1854 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1855 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1856 wrd = (insn >> 12) & 0xf;
1857 rd0 = (insn >> 16) & 0xf;
1858 rd1 = (insn >> 0) & 0xf;
1859 gen_op_iwmmxt_movq_M0_wRn(rd0);
1860 switch ((insn >> 22) & 3) {
1861 case 0:
1862 if (insn & (1 << 21))
1863 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1864 else
1865 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1866 break;
1867 case 1:
1868 if (insn & (1 << 21))
1869 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1870 else
1871 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1872 break;
1873 case 2:
1874 if (insn & (1 << 21))
1875 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1876 else
1877 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1878 break;
1879 case 3:
1880 return 1;
1881 }
1882 gen_op_iwmmxt_movq_wRn_M0(wrd);
1883 gen_op_iwmmxt_set_mup();
1884 gen_op_iwmmxt_set_cup();
1885 break;
1886 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1887 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1888 wrd = (insn >> 12) & 0xf;
1889 rd0 = (insn >> 16) & 0xf;
1890 gen_op_iwmmxt_movq_M0_wRn(rd0);
1891 switch ((insn >> 22) & 3) {
1892 case 0:
1893 if (insn & (1 << 21))
1894 gen_op_iwmmxt_unpacklsb_M0();
1895 else
1896 gen_op_iwmmxt_unpacklub_M0();
1897 break;
1898 case 1:
1899 if (insn & (1 << 21))
1900 gen_op_iwmmxt_unpacklsw_M0();
1901 else
1902 gen_op_iwmmxt_unpackluw_M0();
1903 break;
1904 case 2:
1905 if (insn & (1 << 21))
1906 gen_op_iwmmxt_unpacklsl_M0();
1907 else
1908 gen_op_iwmmxt_unpacklul_M0();
1909 break;
1910 case 3:
1911 return 1;
1912 }
1913 gen_op_iwmmxt_movq_wRn_M0(wrd);
1914 gen_op_iwmmxt_set_mup();
1915 gen_op_iwmmxt_set_cup();
1916 break;
1917 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1918 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1919 wrd = (insn >> 12) & 0xf;
1920 rd0 = (insn >> 16) & 0xf;
1921 gen_op_iwmmxt_movq_M0_wRn(rd0);
1922 switch ((insn >> 22) & 3) {
1923 case 0:
1924 if (insn & (1 << 21))
1925 gen_op_iwmmxt_unpackhsb_M0();
1926 else
1927 gen_op_iwmmxt_unpackhub_M0();
1928 break;
1929 case 1:
1930 if (insn & (1 << 21))
1931 gen_op_iwmmxt_unpackhsw_M0();
1932 else
1933 gen_op_iwmmxt_unpackhuw_M0();
1934 break;
1935 case 2:
1936 if (insn & (1 << 21))
1937 gen_op_iwmmxt_unpackhsl_M0();
1938 else
1939 gen_op_iwmmxt_unpackhul_M0();
1940 break;
1941 case 3:
1942 return 1;
1943 }
1944 gen_op_iwmmxt_movq_wRn_M0(wrd);
1945 gen_op_iwmmxt_set_mup();
1946 gen_op_iwmmxt_set_cup();
1947 break;
1948 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1949 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
1950 if (((insn >> 22) & 3) == 0)
1951 return 1;
18c9b560
AZ
1952 wrd = (insn >> 12) & 0xf;
1953 rd0 = (insn >> 16) & 0xf;
1954 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 1955 tmp = tcg_temp_new_i32();
da6b5335 1956 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 1957 tcg_temp_free_i32(tmp);
18c9b560 1958 return 1;
da6b5335 1959 }
18c9b560 1960 switch ((insn >> 22) & 3) {
18c9b560 1961 case 1:
947a2fa2 1962 gen_helper_iwmmxt_srlw(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
1963 break;
1964 case 2:
947a2fa2 1965 gen_helper_iwmmxt_srll(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
1966 break;
1967 case 3:
947a2fa2 1968 gen_helper_iwmmxt_srlq(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
1969 break;
1970 }
7d1b0095 1971 tcg_temp_free_i32(tmp);
18c9b560
AZ
1972 gen_op_iwmmxt_movq_wRn_M0(wrd);
1973 gen_op_iwmmxt_set_mup();
1974 gen_op_iwmmxt_set_cup();
1975 break;
1976 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1977 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
1978 if (((insn >> 22) & 3) == 0)
1979 return 1;
18c9b560
AZ
1980 wrd = (insn >> 12) & 0xf;
1981 rd0 = (insn >> 16) & 0xf;
1982 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 1983 tmp = tcg_temp_new_i32();
da6b5335 1984 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 1985 tcg_temp_free_i32(tmp);
18c9b560 1986 return 1;
da6b5335 1987 }
18c9b560 1988 switch ((insn >> 22) & 3) {
18c9b560 1989 case 1:
947a2fa2 1990 gen_helper_iwmmxt_sraw(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
1991 break;
1992 case 2:
947a2fa2 1993 gen_helper_iwmmxt_sral(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
1994 break;
1995 case 3:
947a2fa2 1996 gen_helper_iwmmxt_sraq(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
1997 break;
1998 }
7d1b0095 1999 tcg_temp_free_i32(tmp);
18c9b560
AZ
2000 gen_op_iwmmxt_movq_wRn_M0(wrd);
2001 gen_op_iwmmxt_set_mup();
2002 gen_op_iwmmxt_set_cup();
2003 break;
2004 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2005 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2006 if (((insn >> 22) & 3) == 0)
2007 return 1;
18c9b560
AZ
2008 wrd = (insn >> 12) & 0xf;
2009 rd0 = (insn >> 16) & 0xf;
2010 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2011 tmp = tcg_temp_new_i32();
da6b5335 2012 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2013 tcg_temp_free_i32(tmp);
18c9b560 2014 return 1;
da6b5335 2015 }
18c9b560 2016 switch ((insn >> 22) & 3) {
18c9b560 2017 case 1:
947a2fa2 2018 gen_helper_iwmmxt_sllw(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2019 break;
2020 case 2:
947a2fa2 2021 gen_helper_iwmmxt_slll(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2022 break;
2023 case 3:
947a2fa2 2024 gen_helper_iwmmxt_sllq(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2025 break;
2026 }
7d1b0095 2027 tcg_temp_free_i32(tmp);
18c9b560
AZ
2028 gen_op_iwmmxt_movq_wRn_M0(wrd);
2029 gen_op_iwmmxt_set_mup();
2030 gen_op_iwmmxt_set_cup();
2031 break;
2032 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2033 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2034 if (((insn >> 22) & 3) == 0)
2035 return 1;
18c9b560
AZ
2036 wrd = (insn >> 12) & 0xf;
2037 rd0 = (insn >> 16) & 0xf;
2038 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2039 tmp = tcg_temp_new_i32();
18c9b560 2040 switch ((insn >> 22) & 3) {
18c9b560 2041 case 1:
da6b5335 2042 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2043 tcg_temp_free_i32(tmp);
18c9b560 2044 return 1;
da6b5335 2045 }
947a2fa2 2046 gen_helper_iwmmxt_rorw(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2047 break;
2048 case 2:
da6b5335 2049 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2050 tcg_temp_free_i32(tmp);
18c9b560 2051 return 1;
da6b5335 2052 }
947a2fa2 2053 gen_helper_iwmmxt_rorl(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2054 break;
2055 case 3:
da6b5335 2056 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2057 tcg_temp_free_i32(tmp);
18c9b560 2058 return 1;
da6b5335 2059 }
947a2fa2 2060 gen_helper_iwmmxt_rorq(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2061 break;
2062 }
7d1b0095 2063 tcg_temp_free_i32(tmp);
18c9b560
AZ
2064 gen_op_iwmmxt_movq_wRn_M0(wrd);
2065 gen_op_iwmmxt_set_mup();
2066 gen_op_iwmmxt_set_cup();
2067 break;
2068 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2069 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2070 wrd = (insn >> 12) & 0xf;
2071 rd0 = (insn >> 16) & 0xf;
2072 rd1 = (insn >> 0) & 0xf;
2073 gen_op_iwmmxt_movq_M0_wRn(rd0);
2074 switch ((insn >> 22) & 3) {
2075 case 0:
2076 if (insn & (1 << 21))
2077 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2078 else
2079 gen_op_iwmmxt_minub_M0_wRn(rd1);
2080 break;
2081 case 1:
2082 if (insn & (1 << 21))
2083 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2084 else
2085 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2086 break;
2087 case 2:
2088 if (insn & (1 << 21))
2089 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2090 else
2091 gen_op_iwmmxt_minul_M0_wRn(rd1);
2092 break;
2093 case 3:
2094 return 1;
2095 }
2096 gen_op_iwmmxt_movq_wRn_M0(wrd);
2097 gen_op_iwmmxt_set_mup();
2098 break;
2099 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2100 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2101 wrd = (insn >> 12) & 0xf;
2102 rd0 = (insn >> 16) & 0xf;
2103 rd1 = (insn >> 0) & 0xf;
2104 gen_op_iwmmxt_movq_M0_wRn(rd0);
2105 switch ((insn >> 22) & 3) {
2106 case 0:
2107 if (insn & (1 << 21))
2108 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2109 else
2110 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2111 break;
2112 case 1:
2113 if (insn & (1 << 21))
2114 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2115 else
2116 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2117 break;
2118 case 2:
2119 if (insn & (1 << 21))
2120 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2121 else
2122 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2123 break;
2124 case 3:
2125 return 1;
2126 }
2127 gen_op_iwmmxt_movq_wRn_M0(wrd);
2128 gen_op_iwmmxt_set_mup();
2129 break;
2130 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2131 case 0x402: case 0x502: case 0x602: case 0x702:
2132 wrd = (insn >> 12) & 0xf;
2133 rd0 = (insn >> 16) & 0xf;
2134 rd1 = (insn >> 0) & 0xf;
2135 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2136 tmp = tcg_const_i32((insn >> 20) & 3);
2137 iwmmxt_load_reg(cpu_V1, rd1);
2138 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2139 tcg_temp_free(tmp);
18c9b560
AZ
2140 gen_op_iwmmxt_movq_wRn_M0(wrd);
2141 gen_op_iwmmxt_set_mup();
2142 break;
2143 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2144 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2145 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2146 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2147 wrd = (insn >> 12) & 0xf;
2148 rd0 = (insn >> 16) & 0xf;
2149 rd1 = (insn >> 0) & 0xf;
2150 gen_op_iwmmxt_movq_M0_wRn(rd0);
2151 switch ((insn >> 20) & 0xf) {
2152 case 0x0:
2153 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2154 break;
2155 case 0x1:
2156 gen_op_iwmmxt_subub_M0_wRn(rd1);
2157 break;
2158 case 0x3:
2159 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2160 break;
2161 case 0x4:
2162 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2163 break;
2164 case 0x5:
2165 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2166 break;
2167 case 0x7:
2168 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2169 break;
2170 case 0x8:
2171 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2172 break;
2173 case 0x9:
2174 gen_op_iwmmxt_subul_M0_wRn(rd1);
2175 break;
2176 case 0xb:
2177 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2178 break;
2179 default:
2180 return 1;
2181 }
2182 gen_op_iwmmxt_movq_wRn_M0(wrd);
2183 gen_op_iwmmxt_set_mup();
2184 gen_op_iwmmxt_set_cup();
2185 break;
2186 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2187 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2188 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2189 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2190 wrd = (insn >> 12) & 0xf;
2191 rd0 = (insn >> 16) & 0xf;
2192 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2193 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
947a2fa2 2194 gen_helper_iwmmxt_shufh(cpu_M0, cpu_M0, tmp);
da6b5335 2195 tcg_temp_free(tmp);
18c9b560
AZ
2196 gen_op_iwmmxt_movq_wRn_M0(wrd);
2197 gen_op_iwmmxt_set_mup();
2198 gen_op_iwmmxt_set_cup();
2199 break;
2200 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2201 case 0x418: case 0x518: case 0x618: case 0x718:
2202 case 0x818: case 0x918: case 0xa18: case 0xb18:
2203 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2204 wrd = (insn >> 12) & 0xf;
2205 rd0 = (insn >> 16) & 0xf;
2206 rd1 = (insn >> 0) & 0xf;
2207 gen_op_iwmmxt_movq_M0_wRn(rd0);
2208 switch ((insn >> 20) & 0xf) {
2209 case 0x0:
2210 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2211 break;
2212 case 0x1:
2213 gen_op_iwmmxt_addub_M0_wRn(rd1);
2214 break;
2215 case 0x3:
2216 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2217 break;
2218 case 0x4:
2219 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2220 break;
2221 case 0x5:
2222 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2223 break;
2224 case 0x7:
2225 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2226 break;
2227 case 0x8:
2228 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2229 break;
2230 case 0x9:
2231 gen_op_iwmmxt_addul_M0_wRn(rd1);
2232 break;
2233 case 0xb:
2234 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2235 break;
2236 default:
2237 return 1;
2238 }
2239 gen_op_iwmmxt_movq_wRn_M0(wrd);
2240 gen_op_iwmmxt_set_mup();
2241 gen_op_iwmmxt_set_cup();
2242 break;
2243 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2244 case 0x408: case 0x508: case 0x608: case 0x708:
2245 case 0x808: case 0x908: case 0xa08: case 0xb08:
2246 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2247 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2248 return 1;
18c9b560
AZ
2249 wrd = (insn >> 12) & 0xf;
2250 rd0 = (insn >> 16) & 0xf;
2251 rd1 = (insn >> 0) & 0xf;
2252 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2253 switch ((insn >> 22) & 3) {
18c9b560
AZ
2254 case 1:
2255 if (insn & (1 << 21))
2256 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2257 else
2258 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2259 break;
2260 case 2:
2261 if (insn & (1 << 21))
2262 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2263 else
2264 gen_op_iwmmxt_packul_M0_wRn(rd1);
2265 break;
2266 case 3:
2267 if (insn & (1 << 21))
2268 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2269 else
2270 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2271 break;
2272 }
2273 gen_op_iwmmxt_movq_wRn_M0(wrd);
2274 gen_op_iwmmxt_set_mup();
2275 gen_op_iwmmxt_set_cup();
2276 break;
2277 case 0x201: case 0x203: case 0x205: case 0x207:
2278 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2279 case 0x211: case 0x213: case 0x215: case 0x217:
2280 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2281 wrd = (insn >> 5) & 0xf;
2282 rd0 = (insn >> 12) & 0xf;
2283 rd1 = (insn >> 0) & 0xf;
2284 if (rd0 == 0xf || rd1 == 0xf)
2285 return 1;
2286 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2287 tmp = load_reg(s, rd0);
2288 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2289 switch ((insn >> 16) & 0xf) {
2290 case 0x0: /* TMIA */
da6b5335 2291 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2292 break;
2293 case 0x8: /* TMIAPH */
da6b5335 2294 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2295 break;
2296 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2297 if (insn & (1 << 16))
da6b5335 2298 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2299 if (insn & (1 << 17))
da6b5335
FN
2300 tcg_gen_shri_i32(tmp2, tmp2, 16);
2301 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2302 break;
2303 default:
7d1b0095
PM
2304 tcg_temp_free_i32(tmp2);
2305 tcg_temp_free_i32(tmp);
18c9b560
AZ
2306 return 1;
2307 }
7d1b0095
PM
2308 tcg_temp_free_i32(tmp2);
2309 tcg_temp_free_i32(tmp);
18c9b560
AZ
2310 gen_op_iwmmxt_movq_wRn_M0(wrd);
2311 gen_op_iwmmxt_set_mup();
2312 break;
2313 default:
2314 return 1;
2315 }
2316
2317 return 0;
2318}
2319
2320/* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2321 (ie. an undefined instruction). */
2322static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2323{
2324 int acc, rd0, rd1, rdhi, rdlo;
3a554c0f 2325 TCGv tmp, tmp2;
18c9b560
AZ
2326
2327 if ((insn & 0x0ff00f10) == 0x0e200010) {
2328 /* Multiply with Internal Accumulate Format */
2329 rd0 = (insn >> 12) & 0xf;
2330 rd1 = insn & 0xf;
2331 acc = (insn >> 5) & 7;
2332
2333 if (acc != 0)
2334 return 1;
2335
3a554c0f
FN
2336 tmp = load_reg(s, rd0);
2337 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2338 switch ((insn >> 16) & 0xf) {
2339 case 0x0: /* MIA */
3a554c0f 2340 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2341 break;
2342 case 0x8: /* MIAPH */
3a554c0f 2343 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2344 break;
2345 case 0xc: /* MIABB */
2346 case 0xd: /* MIABT */
2347 case 0xe: /* MIATB */
2348 case 0xf: /* MIATT */
18c9b560 2349 if (insn & (1 << 16))
3a554c0f 2350 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2351 if (insn & (1 << 17))
3a554c0f
FN
2352 tcg_gen_shri_i32(tmp2, tmp2, 16);
2353 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2354 break;
2355 default:
2356 return 1;
2357 }
7d1b0095
PM
2358 tcg_temp_free_i32(tmp2);
2359 tcg_temp_free_i32(tmp);
18c9b560
AZ
2360
2361 gen_op_iwmmxt_movq_wRn_M0(acc);
2362 return 0;
2363 }
2364
2365 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2366 /* Internal Accumulator Access Format */
2367 rdhi = (insn >> 16) & 0xf;
2368 rdlo = (insn >> 12) & 0xf;
2369 acc = insn & 7;
2370
2371 if (acc != 0)
2372 return 1;
2373
2374 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2375 iwmmxt_load_reg(cpu_V0, acc);
2376 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2377 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2378 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2379 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2380 } else { /* MAR */
3a554c0f
FN
2381 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2382 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2383 }
2384 return 0;
2385 }
2386
2387 return 1;
2388}
2389
c1713132
AZ
2390/* Disassemble system coprocessor instruction. Return nonzero if
2391 instruction is not defined. */
2392static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2393{
b75263d6 2394 TCGv tmp, tmp2;
c1713132
AZ
2395 uint32_t rd = (insn >> 12) & 0xf;
2396 uint32_t cp = (insn >> 8) & 0xf;
2397 if (IS_USER(s)) {
2398 return 1;
2399 }
2400
18c9b560 2401 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2402 if (!env->cp[cp].cp_read)
2403 return 1;
8984bd2e 2404 gen_set_pc_im(s->pc);
7d1b0095 2405 tmp = tcg_temp_new_i32();
b75263d6
JR
2406 tmp2 = tcg_const_i32(insn);
2407 gen_helper_get_cp(tmp, cpu_env, tmp2);
2408 tcg_temp_free(tmp2);
8984bd2e 2409 store_reg(s, rd, tmp);
c1713132
AZ
2410 } else {
2411 if (!env->cp[cp].cp_write)
2412 return 1;
8984bd2e
PB
2413 gen_set_pc_im(s->pc);
2414 tmp = load_reg(s, rd);
b75263d6
JR
2415 tmp2 = tcg_const_i32(insn);
2416 gen_helper_set_cp(cpu_env, tmp2, tmp);
2417 tcg_temp_free(tmp2);
7d1b0095 2418 tcg_temp_free_i32(tmp);
c1713132
AZ
2419 }
2420 return 0;
2421}
2422
9ee6e8bb
PB
2423static int cp15_user_ok(uint32_t insn)
2424{
2425 int cpn = (insn >> 16) & 0xf;
2426 int cpm = insn & 0xf;
2427 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2428
2429 if (cpn == 13 && cpm == 0) {
2430 /* TLS register. */
2431 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2432 return 1;
2433 }
2434 if (cpn == 7) {
2435 /* ISB, DSB, DMB. */
2436 if ((cpm == 5 && op == 4)
2437 || (cpm == 10 && (op == 4 || op == 5)))
2438 return 1;
2439 }
2440 return 0;
2441}
2442
3f26c122
RV
2443static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2444{
2445 TCGv tmp;
2446 int cpn = (insn >> 16) & 0xf;
2447 int cpm = insn & 0xf;
2448 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2449
2450 if (!arm_feature(env, ARM_FEATURE_V6K))
2451 return 0;
2452
2453 if (!(cpn == 13 && cpm == 0))
2454 return 0;
2455
2456 if (insn & ARM_CP_RW_BIT) {
3f26c122
RV
2457 switch (op) {
2458 case 2:
c5883be2 2459 tmp = load_cpu_field(cp15.c13_tls1);
3f26c122
RV
2460 break;
2461 case 3:
c5883be2 2462 tmp = load_cpu_field(cp15.c13_tls2);
3f26c122
RV
2463 break;
2464 case 4:
c5883be2 2465 tmp = load_cpu_field(cp15.c13_tls3);
3f26c122
RV
2466 break;
2467 default:
3f26c122
RV
2468 return 0;
2469 }
2470 store_reg(s, rd, tmp);
2471
2472 } else {
2473 tmp = load_reg(s, rd);
2474 switch (op) {
2475 case 2:
c5883be2 2476 store_cpu_field(tmp, cp15.c13_tls1);
3f26c122
RV
2477 break;
2478 case 3:
c5883be2 2479 store_cpu_field(tmp, cp15.c13_tls2);
3f26c122
RV
2480 break;
2481 case 4:
c5883be2 2482 store_cpu_field(tmp, cp15.c13_tls3);
3f26c122
RV
2483 break;
2484 default:
7d1b0095 2485 tcg_temp_free_i32(tmp);
3f26c122
RV
2486 return 0;
2487 }
3f26c122
RV
2488 }
2489 return 1;
2490}
2491
b5ff1b31
FB
2492/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2493 instruction is not defined. */
a90b7318 2494static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2495{
2496 uint32_t rd;
b75263d6 2497 TCGv tmp, tmp2;
b5ff1b31 2498
9ee6e8bb
PB
2499 /* M profile cores use memory mapped registers instead of cp15. */
2500 if (arm_feature(env, ARM_FEATURE_M))
2501 return 1;
2502
2503 if ((insn & (1 << 25)) == 0) {
2504 if (insn & (1 << 20)) {
2505 /* mrrc */
2506 return 1;
2507 }
2508 /* mcrr. Used for block cache operations, so implement as no-op. */
2509 return 0;
2510 }
2511 if ((insn & (1 << 4)) == 0) {
2512 /* cdp */
2513 return 1;
2514 }
2515 if (IS_USER(s) && !cp15_user_ok(insn)) {
b5ff1b31
FB
2516 return 1;
2517 }
cc688901
PM
2518
2519 /* Pre-v7 versions of the architecture implemented WFI via coprocessor
2520 * instructions rather than a separate instruction.
2521 */
2522 if ((insn & 0x0fff0fff) == 0x0e070f90) {
2523 /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores).
2524 * In v7, this must NOP.
2525 */
2526 if (!arm_feature(env, ARM_FEATURE_V7)) {
2527 /* Wait for interrupt. */
2528 gen_set_pc_im(s->pc);
2529 s->is_jmp = DISAS_WFI;
2530 }
9332f9da
FB
2531 return 0;
2532 }
cc688901
PM
2533
2534 if ((insn & 0x0fff0fff) == 0x0e070f58) {
2535 /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI,
2536 * so this is slightly over-broad.
2537 */
2538 if (!arm_feature(env, ARM_FEATURE_V6)) {
2539 /* Wait for interrupt. */
2540 gen_set_pc_im(s->pc);
2541 s->is_jmp = DISAS_WFI;
2542 return 0;
2543 }
2544 /* Otherwise fall through to handle via helper function.
2545 * In particular, on v7 and some v6 cores this is one of
2546 * the VA-PA registers.
2547 */
2548 }
2549
b5ff1b31 2550 rd = (insn >> 12) & 0xf;
3f26c122
RV
2551
2552 if (cp15_tls_load_store(env, s, insn, rd))
2553 return 0;
2554
b75263d6 2555 tmp2 = tcg_const_i32(insn);
18c9b560 2556 if (insn & ARM_CP_RW_BIT) {
7d1b0095 2557 tmp = tcg_temp_new_i32();
b75263d6 2558 gen_helper_get_cp15(tmp, cpu_env, tmp2);
b5ff1b31
FB
2559 /* If the destination register is r15 then sets condition codes. */
2560 if (rd != 15)
8984bd2e
PB
2561 store_reg(s, rd, tmp);
2562 else
7d1b0095 2563 tcg_temp_free_i32(tmp);
b5ff1b31 2564 } else {
8984bd2e 2565 tmp = load_reg(s, rd);
b75263d6 2566 gen_helper_set_cp15(cpu_env, tmp2, tmp);
7d1b0095 2567 tcg_temp_free_i32(tmp);
a90b7318
AZ
2568 /* Normally we would always end the TB here, but Linux
2569 * arch/arm/mach-pxa/sleep.S expects two instructions following
2570 * an MMU enable to execute from cache. Imitate this behaviour. */
2571 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2572 (insn & 0x0fff0fff) != 0x0e010f10)
2573 gen_lookup_tb(s);
b5ff1b31 2574 }
b75263d6 2575 tcg_temp_free_i32(tmp2);
b5ff1b31
FB
2576 return 0;
2577}
2578
9ee6e8bb
PB
2579#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2580#define VFP_SREG(insn, bigbit, smallbit) \
2581 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2582#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2583 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2584 reg = (((insn) >> (bigbit)) & 0x0f) \
2585 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2586 } else { \
2587 if (insn & (1 << (smallbit))) \
2588 return 1; \
2589 reg = ((insn) >> (bigbit)) & 0x0f; \
2590 }} while (0)
2591
2592#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2593#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2594#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2595#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2596#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2597#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2598
4373f3ce
PB
2599/* Move between integer and VFP cores. */
2600static TCGv gen_vfp_mrs(void)
2601{
7d1b0095 2602 TCGv tmp = tcg_temp_new_i32();
4373f3ce
PB
2603 tcg_gen_mov_i32(tmp, cpu_F0s);
2604 return tmp;
2605}
2606
2607static void gen_vfp_msr(TCGv tmp)
2608{
2609 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2610 tcg_temp_free_i32(tmp);
4373f3ce
PB
2611}
2612
ad69471c
PB
2613static void gen_neon_dup_u8(TCGv var, int shift)
2614{
7d1b0095 2615 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2616 if (shift)
2617 tcg_gen_shri_i32(var, var, shift);
86831435 2618 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2619 tcg_gen_shli_i32(tmp, var, 8);
2620 tcg_gen_or_i32(var, var, tmp);
2621 tcg_gen_shli_i32(tmp, var, 16);
2622 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2623 tcg_temp_free_i32(tmp);
ad69471c
PB
2624}
2625
2626static void gen_neon_dup_low16(TCGv var)
2627{
7d1b0095 2628 TCGv tmp = tcg_temp_new_i32();
86831435 2629 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2630 tcg_gen_shli_i32(tmp, var, 16);
2631 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2632 tcg_temp_free_i32(tmp);
ad69471c
PB
2633}
2634
2635static void gen_neon_dup_high16(TCGv var)
2636{
7d1b0095 2637 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2638 tcg_gen_andi_i32(var, var, 0xffff0000);
2639 tcg_gen_shri_i32(tmp, var, 16);
2640 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2641 tcg_temp_free_i32(tmp);
ad69471c
PB
2642}
2643
8e18cde3
PM
2644static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size)
2645{
2646 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2647 TCGv tmp;
2648 switch (size) {
2649 case 0:
2650 tmp = gen_ld8u(addr, IS_USER(s));
2651 gen_neon_dup_u8(tmp, 0);
2652 break;
2653 case 1:
2654 tmp = gen_ld16u(addr, IS_USER(s));
2655 gen_neon_dup_low16(tmp);
2656 break;
2657 case 2:
2658 tmp = gen_ld32(addr, IS_USER(s));
2659 break;
2660 default: /* Avoid compiler warnings. */
2661 abort();
2662 }
2663 return tmp;
2664}
2665
b7bcbe95
FB
2666/* Disassemble a VFP instruction. Returns nonzero if an error occured
2667 (ie. an undefined instruction). */
2668static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2669{
2670 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2671 int dp, veclen;
312eea9f 2672 TCGv addr;
4373f3ce 2673 TCGv tmp;
ad69471c 2674 TCGv tmp2;
b7bcbe95 2675
40f137e1
PB
2676 if (!arm_feature(env, ARM_FEATURE_VFP))
2677 return 1;
2678
5df8bac1 2679 if (!s->vfp_enabled) {
9ee6e8bb 2680 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2681 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2682 return 1;
2683 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2684 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2685 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2686 return 1;
2687 }
b7bcbe95
FB
2688 dp = ((insn & 0xf00) == 0xb00);
2689 switch ((insn >> 24) & 0xf) {
2690 case 0xe:
2691 if (insn & (1 << 4)) {
2692 /* single register transfer */
b7bcbe95
FB
2693 rd = (insn >> 12) & 0xf;
2694 if (dp) {
9ee6e8bb
PB
2695 int size;
2696 int pass;
2697
2698 VFP_DREG_N(rn, insn);
2699 if (insn & 0xf)
b7bcbe95 2700 return 1;
9ee6e8bb
PB
2701 if (insn & 0x00c00060
2702 && !arm_feature(env, ARM_FEATURE_NEON))
2703 return 1;
2704
2705 pass = (insn >> 21) & 1;
2706 if (insn & (1 << 22)) {
2707 size = 0;
2708 offset = ((insn >> 5) & 3) * 8;
2709 } else if (insn & (1 << 5)) {
2710 size = 1;
2711 offset = (insn & (1 << 6)) ? 16 : 0;
2712 } else {
2713 size = 2;
2714 offset = 0;
2715 }
18c9b560 2716 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2717 /* vfp->arm */
ad69471c 2718 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2719 switch (size) {
2720 case 0:
9ee6e8bb 2721 if (offset)
ad69471c 2722 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2723 if (insn & (1 << 23))
ad69471c 2724 gen_uxtb(tmp);
9ee6e8bb 2725 else
ad69471c 2726 gen_sxtb(tmp);
9ee6e8bb
PB
2727 break;
2728 case 1:
9ee6e8bb
PB
2729 if (insn & (1 << 23)) {
2730 if (offset) {
ad69471c 2731 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2732 } else {
ad69471c 2733 gen_uxth(tmp);
9ee6e8bb
PB
2734 }
2735 } else {
2736 if (offset) {
ad69471c 2737 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2738 } else {
ad69471c 2739 gen_sxth(tmp);
9ee6e8bb
PB
2740 }
2741 }
2742 break;
2743 case 2:
9ee6e8bb
PB
2744 break;
2745 }
ad69471c 2746 store_reg(s, rd, tmp);
b7bcbe95
FB
2747 } else {
2748 /* arm->vfp */
ad69471c 2749 tmp = load_reg(s, rd);
9ee6e8bb
PB
2750 if (insn & (1 << 23)) {
2751 /* VDUP */
2752 if (size == 0) {
ad69471c 2753 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2754 } else if (size == 1) {
ad69471c 2755 gen_neon_dup_low16(tmp);
9ee6e8bb 2756 }
cbbccffc 2757 for (n = 0; n <= pass * 2; n++) {
7d1b0095 2758 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
2759 tcg_gen_mov_i32(tmp2, tmp);
2760 neon_store_reg(rn, n, tmp2);
2761 }
2762 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2763 } else {
2764 /* VMOV */
2765 switch (size) {
2766 case 0:
ad69471c
PB
2767 tmp2 = neon_load_reg(rn, pass);
2768 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
7d1b0095 2769 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2770 break;
2771 case 1:
ad69471c
PB
2772 tmp2 = neon_load_reg(rn, pass);
2773 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
7d1b0095 2774 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2775 break;
2776 case 2:
9ee6e8bb
PB
2777 break;
2778 }
ad69471c 2779 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2780 }
b7bcbe95 2781 }
9ee6e8bb
PB
2782 } else { /* !dp */
2783 if ((insn & 0x6f) != 0x00)
2784 return 1;
2785 rn = VFP_SREG_N(insn);
18c9b560 2786 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2787 /* vfp->arm */
2788 if (insn & (1 << 21)) {
2789 /* system register */
40f137e1 2790 rn >>= 1;
9ee6e8bb 2791
b7bcbe95 2792 switch (rn) {
40f137e1 2793 case ARM_VFP_FPSID:
4373f3ce 2794 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2795 VFP3 restricts all id registers to privileged
2796 accesses. */
2797 if (IS_USER(s)
2798 && arm_feature(env, ARM_FEATURE_VFP3))
2799 return 1;
4373f3ce 2800 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2801 break;
40f137e1 2802 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2803 if (IS_USER(s))
2804 return 1;
4373f3ce 2805 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2806 break;
40f137e1
PB
2807 case ARM_VFP_FPINST:
2808 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2809 /* Not present in VFP3. */
2810 if (IS_USER(s)
2811 || arm_feature(env, ARM_FEATURE_VFP3))
2812 return 1;
4373f3ce 2813 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2814 break;
40f137e1 2815 case ARM_VFP_FPSCR:
601d70b9 2816 if (rd == 15) {
4373f3ce
PB
2817 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2818 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2819 } else {
7d1b0095 2820 tmp = tcg_temp_new_i32();
4373f3ce
PB
2821 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2822 }
b7bcbe95 2823 break;
9ee6e8bb
PB
2824 case ARM_VFP_MVFR0:
2825 case ARM_VFP_MVFR1:
2826 if (IS_USER(s)
2827 || !arm_feature(env, ARM_FEATURE_VFP3))
2828 return 1;
4373f3ce 2829 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2830 break;
b7bcbe95
FB
2831 default:
2832 return 1;
2833 }
2834 } else {
2835 gen_mov_F0_vreg(0, rn);
4373f3ce 2836 tmp = gen_vfp_mrs();
b7bcbe95
FB
2837 }
2838 if (rd == 15) {
b5ff1b31 2839 /* Set the 4 flag bits in the CPSR. */
4373f3ce 2840 gen_set_nzcv(tmp);
7d1b0095 2841 tcg_temp_free_i32(tmp);
4373f3ce
PB
2842 } else {
2843 store_reg(s, rd, tmp);
2844 }
b7bcbe95
FB
2845 } else {
2846 /* arm->vfp */
4373f3ce 2847 tmp = load_reg(s, rd);
b7bcbe95 2848 if (insn & (1 << 21)) {
40f137e1 2849 rn >>= 1;
b7bcbe95
FB
2850 /* system register */
2851 switch (rn) {
40f137e1 2852 case ARM_VFP_FPSID:
9ee6e8bb
PB
2853 case ARM_VFP_MVFR0:
2854 case ARM_VFP_MVFR1:
b7bcbe95
FB
2855 /* Writes are ignored. */
2856 break;
40f137e1 2857 case ARM_VFP_FPSCR:
4373f3ce 2858 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 2859 tcg_temp_free_i32(tmp);
b5ff1b31 2860 gen_lookup_tb(s);
b7bcbe95 2861 break;
40f137e1 2862 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2863 if (IS_USER(s))
2864 return 1;
71b3c3de
JR
2865 /* TODO: VFP subarchitecture support.
2866 * For now, keep the EN bit only */
2867 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 2868 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2869 gen_lookup_tb(s);
2870 break;
2871 case ARM_VFP_FPINST:
2872 case ARM_VFP_FPINST2:
4373f3ce 2873 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2874 break;
b7bcbe95
FB
2875 default:
2876 return 1;
2877 }
2878 } else {
4373f3ce 2879 gen_vfp_msr(tmp);
b7bcbe95
FB
2880 gen_mov_vreg_F0(0, rn);
2881 }
2882 }
2883 }
2884 } else {
2885 /* data processing */
2886 /* The opcode is in bits 23, 21, 20 and 6. */
2887 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2888 if (dp) {
2889 if (op == 15) {
2890 /* rn is opcode */
2891 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2892 } else {
2893 /* rn is register number */
9ee6e8bb 2894 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2895 }
2896
04595bf6 2897 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
b7bcbe95 2898 /* Integer or single precision destination. */
9ee6e8bb 2899 rd = VFP_SREG_D(insn);
b7bcbe95 2900 } else {
9ee6e8bb 2901 VFP_DREG_D(rd, insn);
b7bcbe95 2902 }
04595bf6
PM
2903 if (op == 15 &&
2904 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2905 /* VCVT from int is always from S reg regardless of dp bit.
2906 * VCVT with immediate frac_bits has same format as SREG_M
2907 */
2908 rm = VFP_SREG_M(insn);
b7bcbe95 2909 } else {
9ee6e8bb 2910 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2911 }
2912 } else {
9ee6e8bb 2913 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2914 if (op == 15 && rn == 15) {
2915 /* Double precision destination. */
9ee6e8bb
PB
2916 VFP_DREG_D(rd, insn);
2917 } else {
2918 rd = VFP_SREG_D(insn);
2919 }
04595bf6
PM
2920 /* NB that we implicitly rely on the encoding for the frac_bits
2921 * in VCVT of fixed to float being the same as that of an SREG_M
2922 */
9ee6e8bb 2923 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2924 }
2925
69d1fc22 2926 veclen = s->vec_len;
b7bcbe95
FB
2927 if (op == 15 && rn > 3)
2928 veclen = 0;
2929
2930 /* Shut up compiler warnings. */
2931 delta_m = 0;
2932 delta_d = 0;
2933 bank_mask = 0;
3b46e624 2934
b7bcbe95
FB
2935 if (veclen > 0) {
2936 if (dp)
2937 bank_mask = 0xc;
2938 else
2939 bank_mask = 0x18;
2940
2941 /* Figure out what type of vector operation this is. */
2942 if ((rd & bank_mask) == 0) {
2943 /* scalar */
2944 veclen = 0;
2945 } else {
2946 if (dp)
69d1fc22 2947 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 2948 else
69d1fc22 2949 delta_d = s->vec_stride + 1;
b7bcbe95
FB
2950
2951 if ((rm & bank_mask) == 0) {
2952 /* mixed scalar/vector */
2953 delta_m = 0;
2954 } else {
2955 /* vector */
2956 delta_m = delta_d;
2957 }
2958 }
2959 }
2960
2961 /* Load the initial operands. */
2962 if (op == 15) {
2963 switch (rn) {
2964 case 16:
2965 case 17:
2966 /* Integer source */
2967 gen_mov_F0_vreg(0, rm);
2968 break;
2969 case 8:
2970 case 9:
2971 /* Compare */
2972 gen_mov_F0_vreg(dp, rd);
2973 gen_mov_F1_vreg(dp, rm);
2974 break;
2975 case 10:
2976 case 11:
2977 /* Compare with zero */
2978 gen_mov_F0_vreg(dp, rd);
2979 gen_vfp_F1_ld0(dp);
2980 break;
9ee6e8bb
PB
2981 case 20:
2982 case 21:
2983 case 22:
2984 case 23:
644ad806
PB
2985 case 28:
2986 case 29:
2987 case 30:
2988 case 31:
9ee6e8bb
PB
2989 /* Source and destination the same. */
2990 gen_mov_F0_vreg(dp, rd);
2991 break;
b7bcbe95
FB
2992 default:
2993 /* One source operand. */
2994 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 2995 break;
b7bcbe95
FB
2996 }
2997 } else {
2998 /* Two source operands. */
2999 gen_mov_F0_vreg(dp, rn);
3000 gen_mov_F1_vreg(dp, rm);
3001 }
3002
3003 for (;;) {
3004 /* Perform the calculation. */
3005 switch (op) {
3006 case 0: /* mac: fd + (fn * fm) */
3007 gen_vfp_mul(dp);
3008 gen_mov_F1_vreg(dp, rd);
3009 gen_vfp_add(dp);
3010 break;
3011 case 1: /* nmac: fd - (fn * fm) */
3012 gen_vfp_mul(dp);
3013 gen_vfp_neg(dp);
3014 gen_mov_F1_vreg(dp, rd);
3015 gen_vfp_add(dp);
3016 break;
3017 case 2: /* msc: -fd + (fn * fm) */
3018 gen_vfp_mul(dp);
3019 gen_mov_F1_vreg(dp, rd);
3020 gen_vfp_sub(dp);
3021 break;
3022 case 3: /* nmsc: -fd - (fn * fm) */
3023 gen_vfp_mul(dp);
b7bcbe95 3024 gen_vfp_neg(dp);
c9fb531a
PB
3025 gen_mov_F1_vreg(dp, rd);
3026 gen_vfp_sub(dp);
b7bcbe95
FB
3027 break;
3028 case 4: /* mul: fn * fm */
3029 gen_vfp_mul(dp);
3030 break;
3031 case 5: /* nmul: -(fn * fm) */
3032 gen_vfp_mul(dp);
3033 gen_vfp_neg(dp);
3034 break;
3035 case 6: /* add: fn + fm */
3036 gen_vfp_add(dp);
3037 break;
3038 case 7: /* sub: fn - fm */
3039 gen_vfp_sub(dp);
3040 break;
3041 case 8: /* div: fn / fm */
3042 gen_vfp_div(dp);
3043 break;
9ee6e8bb
PB
3044 case 14: /* fconst */
3045 if (!arm_feature(env, ARM_FEATURE_VFP3))
3046 return 1;
3047
3048 n = (insn << 12) & 0x80000000;
3049 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3050 if (dp) {
3051 if (i & 0x40)
3052 i |= 0x3f80;
3053 else
3054 i |= 0x4000;
3055 n |= i << 16;
4373f3ce 3056 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3057 } else {
3058 if (i & 0x40)
3059 i |= 0x780;
3060 else
3061 i |= 0x800;
3062 n |= i << 19;
5b340b51 3063 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3064 }
9ee6e8bb 3065 break;
b7bcbe95
FB
3066 case 15: /* extension space */
3067 switch (rn) {
3068 case 0: /* cpy */
3069 /* no-op */
3070 break;
3071 case 1: /* abs */
3072 gen_vfp_abs(dp);
3073 break;
3074 case 2: /* neg */
3075 gen_vfp_neg(dp);
3076 break;
3077 case 3: /* sqrt */
3078 gen_vfp_sqrt(dp);
3079 break;
60011498
PB
3080 case 4: /* vcvtb.f32.f16 */
3081 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3082 return 1;
3083 tmp = gen_vfp_mrs();
3084 tcg_gen_ext16u_i32(tmp, tmp);
3085 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3086 tcg_temp_free_i32(tmp);
60011498
PB
3087 break;
3088 case 5: /* vcvtt.f32.f16 */
3089 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3090 return 1;
3091 tmp = gen_vfp_mrs();
3092 tcg_gen_shri_i32(tmp, tmp, 16);
3093 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3094 tcg_temp_free_i32(tmp);
60011498
PB
3095 break;
3096 case 6: /* vcvtb.f16.f32 */
3097 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3098 return 1;
7d1b0095 3099 tmp = tcg_temp_new_i32();
60011498
PB
3100 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3101 gen_mov_F0_vreg(0, rd);
3102 tmp2 = gen_vfp_mrs();
3103 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3104 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3105 tcg_temp_free_i32(tmp2);
60011498
PB
3106 gen_vfp_msr(tmp);
3107 break;
3108 case 7: /* vcvtt.f16.f32 */
3109 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3110 return 1;
7d1b0095 3111 tmp = tcg_temp_new_i32();
60011498
PB
3112 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3113 tcg_gen_shli_i32(tmp, tmp, 16);
3114 gen_mov_F0_vreg(0, rd);
3115 tmp2 = gen_vfp_mrs();
3116 tcg_gen_ext16u_i32(tmp2, tmp2);
3117 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3118 tcg_temp_free_i32(tmp2);
60011498
PB
3119 gen_vfp_msr(tmp);
3120 break;
b7bcbe95
FB
3121 case 8: /* cmp */
3122 gen_vfp_cmp(dp);
3123 break;
3124 case 9: /* cmpe */
3125 gen_vfp_cmpe(dp);
3126 break;
3127 case 10: /* cmpz */
3128 gen_vfp_cmp(dp);
3129 break;
3130 case 11: /* cmpez */
3131 gen_vfp_F1_ld0(dp);
3132 gen_vfp_cmpe(dp);
3133 break;
3134 case 15: /* single<->double conversion */
3135 if (dp)
4373f3ce 3136 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3137 else
4373f3ce 3138 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3139 break;
3140 case 16: /* fuito */
3141 gen_vfp_uito(dp);
3142 break;
3143 case 17: /* fsito */
3144 gen_vfp_sito(dp);
3145 break;
9ee6e8bb
PB
3146 case 20: /* fshto */
3147 if (!arm_feature(env, ARM_FEATURE_VFP3))
3148 return 1;
644ad806 3149 gen_vfp_shto(dp, 16 - rm);
9ee6e8bb
PB
3150 break;
3151 case 21: /* fslto */
3152 if (!arm_feature(env, ARM_FEATURE_VFP3))
3153 return 1;
644ad806 3154 gen_vfp_slto(dp, 32 - rm);
9ee6e8bb
PB
3155 break;
3156 case 22: /* fuhto */
3157 if (!arm_feature(env, ARM_FEATURE_VFP3))
3158 return 1;
644ad806 3159 gen_vfp_uhto(dp, 16 - rm);
9ee6e8bb
PB
3160 break;
3161 case 23: /* fulto */
3162 if (!arm_feature(env, ARM_FEATURE_VFP3))
3163 return 1;
644ad806 3164 gen_vfp_ulto(dp, 32 - rm);
9ee6e8bb 3165 break;
b7bcbe95
FB
3166 case 24: /* ftoui */
3167 gen_vfp_toui(dp);
3168 break;
3169 case 25: /* ftouiz */
3170 gen_vfp_touiz(dp);
3171 break;
3172 case 26: /* ftosi */
3173 gen_vfp_tosi(dp);
3174 break;
3175 case 27: /* ftosiz */
3176 gen_vfp_tosiz(dp);
3177 break;
9ee6e8bb
PB
3178 case 28: /* ftosh */
3179 if (!arm_feature(env, ARM_FEATURE_VFP3))
3180 return 1;
644ad806 3181 gen_vfp_tosh(dp, 16 - rm);
9ee6e8bb
PB
3182 break;
3183 case 29: /* ftosl */
3184 if (!arm_feature(env, ARM_FEATURE_VFP3))
3185 return 1;
644ad806 3186 gen_vfp_tosl(dp, 32 - rm);
9ee6e8bb
PB
3187 break;
3188 case 30: /* ftouh */
3189 if (!arm_feature(env, ARM_FEATURE_VFP3))
3190 return 1;
644ad806 3191 gen_vfp_touh(dp, 16 - rm);
9ee6e8bb
PB
3192 break;
3193 case 31: /* ftoul */
3194 if (!arm_feature(env, ARM_FEATURE_VFP3))
3195 return 1;
644ad806 3196 gen_vfp_toul(dp, 32 - rm);
9ee6e8bb 3197 break;
b7bcbe95
FB
3198 default: /* undefined */
3199 printf ("rn:%d\n", rn);
3200 return 1;
3201 }
3202 break;
3203 default: /* undefined */
3204 printf ("op:%d\n", op);
3205 return 1;
3206 }
3207
3208 /* Write back the result. */
3209 if (op == 15 && (rn >= 8 && rn <= 11))
3210 ; /* Comparison, do nothing. */
04595bf6
PM
3211 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3212 /* VCVT double to int: always integer result. */
b7bcbe95
FB
3213 gen_mov_vreg_F0(0, rd);
3214 else if (op == 15 && rn == 15)
3215 /* conversion */
3216 gen_mov_vreg_F0(!dp, rd);
3217 else
3218 gen_mov_vreg_F0(dp, rd);
3219
3220 /* break out of the loop if we have finished */
3221 if (veclen == 0)
3222 break;
3223
3224 if (op == 15 && delta_m == 0) {
3225 /* single source one-many */
3226 while (veclen--) {
3227 rd = ((rd + delta_d) & (bank_mask - 1))
3228 | (rd & bank_mask);
3229 gen_mov_vreg_F0(dp, rd);
3230 }
3231 break;
3232 }
3233 /* Setup the next operands. */
3234 veclen--;
3235 rd = ((rd + delta_d) & (bank_mask - 1))
3236 | (rd & bank_mask);
3237
3238 if (op == 15) {
3239 /* One source operand. */
3240 rm = ((rm + delta_m) & (bank_mask - 1))
3241 | (rm & bank_mask);
3242 gen_mov_F0_vreg(dp, rm);
3243 } else {
3244 /* Two source operands. */
3245 rn = ((rn + delta_d) & (bank_mask - 1))
3246 | (rn & bank_mask);
3247 gen_mov_F0_vreg(dp, rn);
3248 if (delta_m) {
3249 rm = ((rm + delta_m) & (bank_mask - 1))
3250 | (rm & bank_mask);
3251 gen_mov_F1_vreg(dp, rm);
3252 }
3253 }
3254 }
3255 }
3256 break;
3257 case 0xc:
3258 case 0xd:
8387da81 3259 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3260 /* two-register transfer */
3261 rn = (insn >> 16) & 0xf;
3262 rd = (insn >> 12) & 0xf;
3263 if (dp) {
9ee6e8bb
PB
3264 VFP_DREG_M(rm, insn);
3265 } else {
3266 rm = VFP_SREG_M(insn);
3267 }
b7bcbe95 3268
18c9b560 3269 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3270 /* vfp->arm */
3271 if (dp) {
4373f3ce
PB
3272 gen_mov_F0_vreg(0, rm * 2);
3273 tmp = gen_vfp_mrs();
3274 store_reg(s, rd, tmp);
3275 gen_mov_F0_vreg(0, rm * 2 + 1);
3276 tmp = gen_vfp_mrs();
3277 store_reg(s, rn, tmp);
b7bcbe95
FB
3278 } else {
3279 gen_mov_F0_vreg(0, rm);
4373f3ce 3280 tmp = gen_vfp_mrs();
8387da81 3281 store_reg(s, rd, tmp);
b7bcbe95 3282 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3283 tmp = gen_vfp_mrs();
8387da81 3284 store_reg(s, rn, tmp);
b7bcbe95
FB
3285 }
3286 } else {
3287 /* arm->vfp */
3288 if (dp) {
4373f3ce
PB
3289 tmp = load_reg(s, rd);
3290 gen_vfp_msr(tmp);
3291 gen_mov_vreg_F0(0, rm * 2);
3292 tmp = load_reg(s, rn);
3293 gen_vfp_msr(tmp);
3294 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3295 } else {
8387da81 3296 tmp = load_reg(s, rd);
4373f3ce 3297 gen_vfp_msr(tmp);
b7bcbe95 3298 gen_mov_vreg_F0(0, rm);
8387da81 3299 tmp = load_reg(s, rn);
4373f3ce 3300 gen_vfp_msr(tmp);
b7bcbe95
FB
3301 gen_mov_vreg_F0(0, rm + 1);
3302 }
3303 }
3304 } else {
3305 /* Load/store */
3306 rn = (insn >> 16) & 0xf;
3307 if (dp)
9ee6e8bb 3308 VFP_DREG_D(rd, insn);
b7bcbe95 3309 else
9ee6e8bb
PB
3310 rd = VFP_SREG_D(insn);
3311 if (s->thumb && rn == 15) {
7d1b0095 3312 addr = tcg_temp_new_i32();
312eea9f 3313 tcg_gen_movi_i32(addr, s->pc & ~2);
9ee6e8bb 3314 } else {
312eea9f 3315 addr = load_reg(s, rn);
9ee6e8bb 3316 }
b7bcbe95
FB
3317 if ((insn & 0x01200000) == 0x01000000) {
3318 /* Single load/store */
3319 offset = (insn & 0xff) << 2;
3320 if ((insn & (1 << 23)) == 0)
3321 offset = -offset;
312eea9f 3322 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3323 if (insn & (1 << 20)) {
312eea9f 3324 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3325 gen_mov_vreg_F0(dp, rd);
3326 } else {
3327 gen_mov_F0_vreg(dp, rd);
312eea9f 3328 gen_vfp_st(s, dp, addr);
b7bcbe95 3329 }
7d1b0095 3330 tcg_temp_free_i32(addr);
b7bcbe95
FB
3331 } else {
3332 /* load/store multiple */
3333 if (dp)
3334 n = (insn >> 1) & 0x7f;
3335 else
3336 n = insn & 0xff;
3337
3338 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3339 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3340
3341 if (dp)
3342 offset = 8;
3343 else
3344 offset = 4;
3345 for (i = 0; i < n; i++) {
18c9b560 3346 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3347 /* load */
312eea9f 3348 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3349 gen_mov_vreg_F0(dp, rd + i);
3350 } else {
3351 /* store */
3352 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3353 gen_vfp_st(s, dp, addr);
b7bcbe95 3354 }
312eea9f 3355 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95
FB
3356 }
3357 if (insn & (1 << 21)) {
3358 /* writeback */
3359 if (insn & (1 << 24))
3360 offset = -offset * n;
3361 else if (dp && (insn & 1))
3362 offset = 4;
3363 else
3364 offset = 0;
3365
3366 if (offset != 0)
312eea9f
FN
3367 tcg_gen_addi_i32(addr, addr, offset);
3368 store_reg(s, rn, addr);
3369 } else {
7d1b0095 3370 tcg_temp_free_i32(addr);
b7bcbe95
FB
3371 }
3372 }
3373 }
3374 break;
3375 default:
3376 /* Should never happen. */
3377 return 1;
3378 }
3379 return 0;
3380}
3381
6e256c93 3382static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3383{
6e256c93
FB
3384 TranslationBlock *tb;
3385
3386 tb = s->tb;
3387 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3388 tcg_gen_goto_tb(n);
8984bd2e 3389 gen_set_pc_im(dest);
57fec1fe 3390 tcg_gen_exit_tb((long)tb + n);
6e256c93 3391 } else {
8984bd2e 3392 gen_set_pc_im(dest);
57fec1fe 3393 tcg_gen_exit_tb(0);
6e256c93 3394 }
c53be334
FB
3395}
3396
8aaca4c0
FB
3397static inline void gen_jmp (DisasContext *s, uint32_t dest)
3398{
551bd27f 3399 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3400 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3401 if (s->thumb)
d9ba4830
PB
3402 dest |= 1;
3403 gen_bx_im(s, dest);
8aaca4c0 3404 } else {
6e256c93 3405 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3406 s->is_jmp = DISAS_TB_JUMP;
3407 }
3408}
3409
d9ba4830 3410static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3411{
ee097184 3412 if (x)
d9ba4830 3413 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3414 else
d9ba4830 3415 gen_sxth(t0);
ee097184 3416 if (y)
d9ba4830 3417 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3418 else
d9ba4830
PB
3419 gen_sxth(t1);
3420 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3421}
3422
3423/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 3424static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3425 uint32_t mask;
3426
3427 mask = 0;
3428 if (flags & (1 << 0))
3429 mask |= 0xff;
3430 if (flags & (1 << 1))
3431 mask |= 0xff00;
3432 if (flags & (1 << 2))
3433 mask |= 0xff0000;
3434 if (flags & (1 << 3))
3435 mask |= 0xff000000;
9ee6e8bb 3436
2ae23e75 3437 /* Mask out undefined bits. */
9ee6e8bb
PB
3438 mask &= ~CPSR_RESERVED;
3439 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3440 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3441 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3442 mask &= ~CPSR_IT;
9ee6e8bb 3443 /* Mask out execution state bits. */
2ae23e75 3444 if (!spsr)
e160c51c 3445 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3446 /* Mask out privileged bits. */
3447 if (IS_USER(s))
9ee6e8bb 3448 mask &= CPSR_USER;
b5ff1b31
FB
3449 return mask;
3450}
3451
2fbac54b
FN
3452/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3453static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
b5ff1b31 3454{
d9ba4830 3455 TCGv tmp;
b5ff1b31
FB
3456 if (spsr) {
3457 /* ??? This is also undefined in system mode. */
3458 if (IS_USER(s))
3459 return 1;
d9ba4830
PB
3460
3461 tmp = load_cpu_field(spsr);
3462 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3463 tcg_gen_andi_i32(t0, t0, mask);
3464 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3465 store_cpu_field(tmp, spsr);
b5ff1b31 3466 } else {
2fbac54b 3467 gen_set_cpsr(t0, mask);
b5ff1b31 3468 }
7d1b0095 3469 tcg_temp_free_i32(t0);
b5ff1b31
FB
3470 gen_lookup_tb(s);
3471 return 0;
3472}
3473
2fbac54b
FN
3474/* Returns nonzero if access to the PSR is not permitted. */
3475static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3476{
3477 TCGv tmp;
7d1b0095 3478 tmp = tcg_temp_new_i32();
2fbac54b
FN
3479 tcg_gen_movi_i32(tmp, val);
3480 return gen_set_psr(s, mask, spsr, tmp);
3481}
3482
e9bb4aa9
JR
3483/* Generate an old-style exception return. Marks pc as dead. */
3484static void gen_exception_return(DisasContext *s, TCGv pc)
b5ff1b31 3485{
d9ba4830 3486 TCGv tmp;
e9bb4aa9 3487 store_reg(s, 15, pc);
d9ba4830
PB
3488 tmp = load_cpu_field(spsr);
3489 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 3490 tcg_temp_free_i32(tmp);
b5ff1b31
FB
3491 s->is_jmp = DISAS_UPDATE;
3492}
3493
b0109805
PB
3494/* Generate a v6 exception return. Marks both values as dead. */
3495static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3496{
b0109805 3497 gen_set_cpsr(cpsr, 0xffffffff);
7d1b0095 3498 tcg_temp_free_i32(cpsr);
b0109805 3499 store_reg(s, 15, pc);
9ee6e8bb
PB
3500 s->is_jmp = DISAS_UPDATE;
3501}
3b46e624 3502
9ee6e8bb
PB
3503static inline void
3504gen_set_condexec (DisasContext *s)
3505{
3506 if (s->condexec_mask) {
8f01245e 3507 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
7d1b0095 3508 TCGv tmp = tcg_temp_new_i32();
8f01245e 3509 tcg_gen_movi_i32(tmp, val);
d9ba4830 3510 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3511 }
3512}
3b46e624 3513
bc4a0de0
PM
3514static void gen_exception_insn(DisasContext *s, int offset, int excp)
3515{
3516 gen_set_condexec(s);
3517 gen_set_pc_im(s->pc - offset);
3518 gen_exception(excp);
3519 s->is_jmp = DISAS_JUMP;
3520}
3521
9ee6e8bb
PB
3522static void gen_nop_hint(DisasContext *s, int val)
3523{
3524 switch (val) {
3525 case 3: /* wfi */
8984bd2e 3526 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3527 s->is_jmp = DISAS_WFI;
3528 break;
3529 case 2: /* wfe */
3530 case 4: /* sev */
3531 /* TODO: Implement SEV and WFE. May help SMP performance. */
3532 default: /* nop */
3533 break;
3534 }
3535}
99c475ab 3536
ad69471c 3537#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3538
dd8fbd78 3539static inline int gen_neon_add(int size, TCGv t0, TCGv t1)
9ee6e8bb
PB
3540{
3541 switch (size) {
dd8fbd78
FN
3542 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3543 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3544 case 2: tcg_gen_add_i32(t0, t0, t1); break;
9ee6e8bb
PB
3545 default: return 1;
3546 }
3547 return 0;
3548}
3549
dd8fbd78 3550static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
ad69471c
PB
3551{
3552 switch (size) {
dd8fbd78
FN
3553 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3554 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3555 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3556 default: return;
3557 }
3558}
3559
3560/* 32-bit pairwise ops end up the same as the elementwise versions. */
3561#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3562#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3563#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3564#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3565
ad69471c
PB
3566#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3567 switch ((size << 1) | u) { \
3568 case 0: \
dd8fbd78 3569 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3570 break; \
3571 case 1: \
dd8fbd78 3572 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3573 break; \
3574 case 2: \
dd8fbd78 3575 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3576 break; \
3577 case 3: \
dd8fbd78 3578 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3579 break; \
3580 case 4: \
dd8fbd78 3581 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3582 break; \
3583 case 5: \
dd8fbd78 3584 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3585 break; \
3586 default: return 1; \
3587 }} while (0)
9ee6e8bb
PB
3588
3589#define GEN_NEON_INTEGER_OP(name) do { \
3590 switch ((size << 1) | u) { \
ad69471c 3591 case 0: \
dd8fbd78 3592 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3593 break; \
3594 case 1: \
dd8fbd78 3595 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3596 break; \
3597 case 2: \
dd8fbd78 3598 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3599 break; \
3600 case 3: \
dd8fbd78 3601 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3602 break; \
3603 case 4: \
dd8fbd78 3604 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3605 break; \
3606 case 5: \
dd8fbd78 3607 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3608 break; \
9ee6e8bb
PB
3609 default: return 1; \
3610 }} while (0)
3611
dd8fbd78 3612static TCGv neon_load_scratch(int scratch)
9ee6e8bb 3613{
7d1b0095 3614 TCGv tmp = tcg_temp_new_i32();
dd8fbd78
FN
3615 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3616 return tmp;
9ee6e8bb
PB
3617}
3618
dd8fbd78 3619static void neon_store_scratch(int scratch, TCGv var)
9ee6e8bb 3620{
dd8fbd78 3621 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 3622 tcg_temp_free_i32(var);
9ee6e8bb
PB
3623}
3624
dd8fbd78 3625static inline TCGv neon_get_scalar(int size, int reg)
9ee6e8bb 3626{
dd8fbd78 3627 TCGv tmp;
9ee6e8bb 3628 if (size == 1) {
0fad6efc
PM
3629 tmp = neon_load_reg(reg & 7, reg >> 4);
3630 if (reg & 8) {
dd8fbd78 3631 gen_neon_dup_high16(tmp);
0fad6efc
PM
3632 } else {
3633 gen_neon_dup_low16(tmp);
dd8fbd78 3634 }
0fad6efc
PM
3635 } else {
3636 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 3637 }
dd8fbd78 3638 return tmp;
9ee6e8bb
PB
3639}
3640
02acedf9 3641static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 3642{
02acedf9
PM
3643 TCGv tmp, tmp2;
3644 if (size == 3 || (!q && size == 2)) {
3645 return 1;
3646 }
3647 tmp = tcg_const_i32(rd);
3648 tmp2 = tcg_const_i32(rm);
3649 if (q) {
3650 switch (size) {
3651 case 0:
2a3f75b4 3652 gen_helper_neon_qunzip8(tmp, tmp2);
02acedf9
PM
3653 break;
3654 case 1:
2a3f75b4 3655 gen_helper_neon_qunzip16(tmp, tmp2);
02acedf9
PM
3656 break;
3657 case 2:
2a3f75b4 3658 gen_helper_neon_qunzip32(tmp, tmp2);
02acedf9
PM
3659 break;
3660 default:
3661 abort();
3662 }
3663 } else {
3664 switch (size) {
3665 case 0:
2a3f75b4 3666 gen_helper_neon_unzip8(tmp, tmp2);
02acedf9
PM
3667 break;
3668 case 1:
2a3f75b4 3669 gen_helper_neon_unzip16(tmp, tmp2);
02acedf9
PM
3670 break;
3671 default:
3672 abort();
3673 }
3674 }
3675 tcg_temp_free_i32(tmp);
3676 tcg_temp_free_i32(tmp2);
3677 return 0;
19457615
FN
3678}
3679
d68a6f3a 3680static int gen_neon_zip(int rd, int rm, int size, int q)
19457615
FN
3681{
3682 TCGv tmp, tmp2;
d68a6f3a
PM
3683 if (size == 3 || (!q && size == 2)) {
3684 return 1;
3685 }
3686 tmp = tcg_const_i32(rd);
3687 tmp2 = tcg_const_i32(rm);
3688 if (q) {
3689 switch (size) {
3690 case 0:
2a3f75b4 3691 gen_helper_neon_qzip8(tmp, tmp2);
d68a6f3a
PM
3692 break;
3693 case 1:
2a3f75b4 3694 gen_helper_neon_qzip16(tmp, tmp2);
d68a6f3a
PM
3695 break;
3696 case 2:
2a3f75b4 3697 gen_helper_neon_qzip32(tmp, tmp2);
d68a6f3a
PM
3698 break;
3699 default:
3700 abort();
3701 }
3702 } else {
3703 switch (size) {
3704 case 0:
2a3f75b4 3705 gen_helper_neon_zip8(tmp, tmp2);
d68a6f3a
PM
3706 break;
3707 case 1:
2a3f75b4 3708 gen_helper_neon_zip16(tmp, tmp2);
d68a6f3a
PM
3709 break;
3710 default:
3711 abort();
3712 }
3713 }
3714 tcg_temp_free_i32(tmp);
3715 tcg_temp_free_i32(tmp2);
3716 return 0;
19457615
FN
3717}
3718
19457615
FN
3719static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3720{
3721 TCGv rd, tmp;
3722
7d1b0095
PM
3723 rd = tcg_temp_new_i32();
3724 tmp = tcg_temp_new_i32();
19457615
FN
3725
3726 tcg_gen_shli_i32(rd, t0, 8);
3727 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3728 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3729 tcg_gen_or_i32(rd, rd, tmp);
3730
3731 tcg_gen_shri_i32(t1, t1, 8);
3732 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3733 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3734 tcg_gen_or_i32(t1, t1, tmp);
3735 tcg_gen_mov_i32(t0, rd);
3736
7d1b0095
PM
3737 tcg_temp_free_i32(tmp);
3738 tcg_temp_free_i32(rd);
19457615
FN
3739}
3740
3741static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3742{
3743 TCGv rd, tmp;
3744
7d1b0095
PM
3745 rd = tcg_temp_new_i32();
3746 tmp = tcg_temp_new_i32();
19457615
FN
3747
3748 tcg_gen_shli_i32(rd, t0, 16);
3749 tcg_gen_andi_i32(tmp, t1, 0xffff);
3750 tcg_gen_or_i32(rd, rd, tmp);
3751 tcg_gen_shri_i32(t1, t1, 16);
3752 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3753 tcg_gen_or_i32(t1, t1, tmp);
3754 tcg_gen_mov_i32(t0, rd);
3755
7d1b0095
PM
3756 tcg_temp_free_i32(tmp);
3757 tcg_temp_free_i32(rd);
19457615
FN
3758}
3759
3760
9ee6e8bb
PB
3761static struct {
3762 int nregs;
3763 int interleave;
3764 int spacing;
3765} neon_ls_element_type[11] = {
3766 {4, 4, 1},
3767 {4, 4, 2},
3768 {4, 1, 1},
3769 {4, 2, 1},
3770 {3, 3, 1},
3771 {3, 3, 2},
3772 {3, 1, 1},
3773 {1, 1, 1},
3774 {2, 2, 1},
3775 {2, 2, 2},
3776 {2, 1, 1}
3777};
3778
3779/* Translate a NEON load/store element instruction. Return nonzero if the
3780 instruction is invalid. */
3781static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3782{
3783 int rd, rn, rm;
3784 int op;
3785 int nregs;
3786 int interleave;
84496233 3787 int spacing;
9ee6e8bb
PB
3788 int stride;
3789 int size;
3790 int reg;
3791 int pass;
3792 int load;
3793 int shift;
9ee6e8bb 3794 int n;
1b2b1e54 3795 TCGv addr;
b0109805 3796 TCGv tmp;
8f8e3aa4 3797 TCGv tmp2;
84496233 3798 TCGv_i64 tmp64;
9ee6e8bb 3799
5df8bac1 3800 if (!s->vfp_enabled)
9ee6e8bb
PB
3801 return 1;
3802 VFP_DREG_D(rd, insn);
3803 rn = (insn >> 16) & 0xf;
3804 rm = insn & 0xf;
3805 load = (insn & (1 << 21)) != 0;
3806 if ((insn & (1 << 23)) == 0) {
3807 /* Load store all elements. */
3808 op = (insn >> 8) & 0xf;
3809 size = (insn >> 6) & 3;
84496233 3810 if (op > 10)
9ee6e8bb
PB
3811 return 1;
3812 nregs = neon_ls_element_type[op].nregs;
3813 interleave = neon_ls_element_type[op].interleave;
84496233
JR
3814 spacing = neon_ls_element_type[op].spacing;
3815 if (size == 3 && (interleave | spacing) != 1)
3816 return 1;
e318a60b 3817 addr = tcg_temp_new_i32();
dcc65026 3818 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3819 stride = (1 << size) * interleave;
3820 for (reg = 0; reg < nregs; reg++) {
3821 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
3822 load_reg_var(s, addr, rn);
3823 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 3824 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
3825 load_reg_var(s, addr, rn);
3826 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 3827 }
84496233
JR
3828 if (size == 3) {
3829 if (load) {
3830 tmp64 = gen_ld64(addr, IS_USER(s));
3831 neon_store_reg64(tmp64, rd);
3832 tcg_temp_free_i64(tmp64);
3833 } else {
3834 tmp64 = tcg_temp_new_i64();
3835 neon_load_reg64(tmp64, rd);
3836 gen_st64(tmp64, addr, IS_USER(s));
3837 }
3838 tcg_gen_addi_i32(addr, addr, stride);
3839 } else {
3840 for (pass = 0; pass < 2; pass++) {
3841 if (size == 2) {
3842 if (load) {
3843 tmp = gen_ld32(addr, IS_USER(s));
3844 neon_store_reg(rd, pass, tmp);
3845 } else {
3846 tmp = neon_load_reg(rd, pass);
3847 gen_st32(tmp, addr, IS_USER(s));
3848 }
1b2b1e54 3849 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
3850 } else if (size == 1) {
3851 if (load) {
3852 tmp = gen_ld16u(addr, IS_USER(s));
3853 tcg_gen_addi_i32(addr, addr, stride);
3854 tmp2 = gen_ld16u(addr, IS_USER(s));
3855 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
3856 tcg_gen_shli_i32(tmp2, tmp2, 16);
3857 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3858 tcg_temp_free_i32(tmp2);
84496233
JR
3859 neon_store_reg(rd, pass, tmp);
3860 } else {
3861 tmp = neon_load_reg(rd, pass);
7d1b0095 3862 tmp2 = tcg_temp_new_i32();
84496233
JR
3863 tcg_gen_shri_i32(tmp2, tmp, 16);
3864 gen_st16(tmp, addr, IS_USER(s));
3865 tcg_gen_addi_i32(addr, addr, stride);
3866 gen_st16(tmp2, addr, IS_USER(s));
1b2b1e54 3867 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3868 }
84496233
JR
3869 } else /* size == 0 */ {
3870 if (load) {
3871 TCGV_UNUSED(tmp2);
3872 for (n = 0; n < 4; n++) {
3873 tmp = gen_ld8u(addr, IS_USER(s));
3874 tcg_gen_addi_i32(addr, addr, stride);
3875 if (n == 0) {
3876 tmp2 = tmp;
3877 } else {
41ba8341
PB
3878 tcg_gen_shli_i32(tmp, tmp, n * 8);
3879 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 3880 tcg_temp_free_i32(tmp);
84496233 3881 }
9ee6e8bb 3882 }
84496233
JR
3883 neon_store_reg(rd, pass, tmp2);
3884 } else {
3885 tmp2 = neon_load_reg(rd, pass);
3886 for (n = 0; n < 4; n++) {
7d1b0095 3887 tmp = tcg_temp_new_i32();
84496233
JR
3888 if (n == 0) {
3889 tcg_gen_mov_i32(tmp, tmp2);
3890 } else {
3891 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3892 }
3893 gen_st8(tmp, addr, IS_USER(s));
3894 tcg_gen_addi_i32(addr, addr, stride);
3895 }
7d1b0095 3896 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3897 }
3898 }
3899 }
3900 }
84496233 3901 rd += spacing;
9ee6e8bb 3902 }
e318a60b 3903 tcg_temp_free_i32(addr);
9ee6e8bb
PB
3904 stride = nregs * 8;
3905 } else {
3906 size = (insn >> 10) & 3;
3907 if (size == 3) {
3908 /* Load single element to all lanes. */
8e18cde3
PM
3909 int a = (insn >> 4) & 1;
3910 if (!load) {
9ee6e8bb 3911 return 1;
8e18cde3 3912 }
9ee6e8bb
PB
3913 size = (insn >> 6) & 3;
3914 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
3915
3916 if (size == 3) {
3917 if (nregs != 4 || a == 0) {
9ee6e8bb 3918 return 1;
99c475ab 3919 }
8e18cde3
PM
3920 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3921 size = 2;
3922 }
3923 if (nregs == 1 && a == 1 && size == 0) {
3924 return 1;
3925 }
3926 if (nregs == 3 && a == 1) {
3927 return 1;
3928 }
e318a60b 3929 addr = tcg_temp_new_i32();
8e18cde3
PM
3930 load_reg_var(s, addr, rn);
3931 if (nregs == 1) {
3932 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
3933 tmp = gen_load_and_replicate(s, addr, size);
3934 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3935 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3936 if (insn & (1 << 5)) {
3937 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
3938 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
3939 }
3940 tcg_temp_free_i32(tmp);
3941 } else {
3942 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
3943 stride = (insn & (1 << 5)) ? 2 : 1;
3944 for (reg = 0; reg < nregs; reg++) {
3945 tmp = gen_load_and_replicate(s, addr, size);
3946 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3947 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3948 tcg_temp_free_i32(tmp);
3949 tcg_gen_addi_i32(addr, addr, 1 << size);
3950 rd += stride;
3951 }
9ee6e8bb 3952 }
e318a60b 3953 tcg_temp_free_i32(addr);
9ee6e8bb
PB
3954 stride = (1 << size) * nregs;
3955 } else {
3956 /* Single element. */
3957 pass = (insn >> 7) & 1;
3958 switch (size) {
3959 case 0:
3960 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
3961 stride = 1;
3962 break;
3963 case 1:
3964 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
3965 stride = (insn & (1 << 5)) ? 2 : 1;
3966 break;
3967 case 2:
3968 shift = 0;
9ee6e8bb
PB
3969 stride = (insn & (1 << 6)) ? 2 : 1;
3970 break;
3971 default:
3972 abort();
3973 }
3974 nregs = ((insn >> 8) & 3) + 1;
e318a60b 3975 addr = tcg_temp_new_i32();
dcc65026 3976 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3977 for (reg = 0; reg < nregs; reg++) {
3978 if (load) {
9ee6e8bb
PB
3979 switch (size) {
3980 case 0:
1b2b1e54 3981 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb
PB
3982 break;
3983 case 1:
1b2b1e54 3984 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
3985 break;
3986 case 2:
1b2b1e54 3987 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 3988 break;
a50f5b91
PB
3989 default: /* Avoid compiler warnings. */
3990 abort();
9ee6e8bb
PB
3991 }
3992 if (size != 2) {
8f8e3aa4
PB
3993 tmp2 = neon_load_reg(rd, pass);
3994 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
7d1b0095 3995 tcg_temp_free_i32(tmp2);
9ee6e8bb 3996 }
8f8e3aa4 3997 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3998 } else { /* Store */
8f8e3aa4
PB
3999 tmp = neon_load_reg(rd, pass);
4000 if (shift)
4001 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4002 switch (size) {
4003 case 0:
1b2b1e54 4004 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4005 break;
4006 case 1:
1b2b1e54 4007 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4008 break;
4009 case 2:
1b2b1e54 4010 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 4011 break;
99c475ab 4012 }
99c475ab 4013 }
9ee6e8bb 4014 rd += stride;
1b2b1e54 4015 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4016 }
e318a60b 4017 tcg_temp_free_i32(addr);
9ee6e8bb 4018 stride = nregs * (1 << size);
99c475ab 4019 }
9ee6e8bb
PB
4020 }
4021 if (rm != 15) {
b26eefb6
PB
4022 TCGv base;
4023
4024 base = load_reg(s, rn);
9ee6e8bb 4025 if (rm == 13) {
b26eefb6 4026 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4027 } else {
b26eefb6
PB
4028 TCGv index;
4029 index = load_reg(s, rm);
4030 tcg_gen_add_i32(base, base, index);
7d1b0095 4031 tcg_temp_free_i32(index);
9ee6e8bb 4032 }
b26eefb6 4033 store_reg(s, rn, base);
9ee6e8bb
PB
4034 }
4035 return 0;
4036}
3b46e624 4037
8f8e3aa4
PB
4038/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4039static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4040{
4041 tcg_gen_and_i32(t, t, c);
f669df27 4042 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4043 tcg_gen_or_i32(dest, t, f);
4044}
4045
a7812ae4 4046static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4047{
4048 switch (size) {
4049 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4050 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4051 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4052 default: abort();
4053 }
4054}
4055
a7812ae4 4056static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4057{
4058 switch (size) {
2a3f75b4
PM
4059 case 0: gen_helper_neon_narrow_sat_s8(dest, src); break;
4060 case 1: gen_helper_neon_narrow_sat_s16(dest, src); break;
4061 case 2: gen_helper_neon_narrow_sat_s32(dest, src); break;
ad69471c
PB
4062 default: abort();
4063 }
4064}
4065
a7812ae4 4066static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4067{
4068 switch (size) {
2a3f75b4
PM
4069 case 0: gen_helper_neon_narrow_sat_u8(dest, src); break;
4070 case 1: gen_helper_neon_narrow_sat_u16(dest, src); break;
4071 case 2: gen_helper_neon_narrow_sat_u32(dest, src); break;
ad69471c
PB
4072 default: abort();
4073 }
4074}
4075
af1bbf30
JR
4076static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4077{
4078 switch (size) {
2a3f75b4
PM
4079 case 0: gen_helper_neon_unarrow_sat8(dest, src); break;
4080 case 1: gen_helper_neon_unarrow_sat16(dest, src); break;
4081 case 2: gen_helper_neon_unarrow_sat32(dest, src); break;
af1bbf30
JR
4082 default: abort();
4083 }
4084}
4085
ad69471c
PB
4086static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4087 int q, int u)
4088{
4089 if (q) {
4090 if (u) {
4091 switch (size) {
4092 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4093 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4094 default: abort();
4095 }
4096 } else {
4097 switch (size) {
4098 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4099 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4100 default: abort();
4101 }
4102 }
4103 } else {
4104 if (u) {
4105 switch (size) {
b408a9b0
CL
4106 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4107 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4108 default: abort();
4109 }
4110 } else {
4111 switch (size) {
4112 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4113 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4114 default: abort();
4115 }
4116 }
4117 }
4118}
4119
a7812ae4 4120static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
4121{
4122 if (u) {
4123 switch (size) {
4124 case 0: gen_helper_neon_widen_u8(dest, src); break;
4125 case 1: gen_helper_neon_widen_u16(dest, src); break;
4126 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4127 default: abort();
4128 }
4129 } else {
4130 switch (size) {
4131 case 0: gen_helper_neon_widen_s8(dest, src); break;
4132 case 1: gen_helper_neon_widen_s16(dest, src); break;
4133 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4134 default: abort();
4135 }
4136 }
7d1b0095 4137 tcg_temp_free_i32(src);
ad69471c
PB
4138}
4139
4140static inline void gen_neon_addl(int size)
4141{
4142 switch (size) {
4143 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4144 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4145 case 2: tcg_gen_add_i64(CPU_V001); break;
4146 default: abort();
4147 }
4148}
4149
4150static inline void gen_neon_subl(int size)
4151{
4152 switch (size) {
4153 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4154 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4155 case 2: tcg_gen_sub_i64(CPU_V001); break;
4156 default: abort();
4157 }
4158}
4159
a7812ae4 4160static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4161{
4162 switch (size) {
4163 case 0: gen_helper_neon_negl_u16(var, var); break;
4164 case 1: gen_helper_neon_negl_u32(var, var); break;
4165 case 2: gen_helper_neon_negl_u64(var, var); break;
4166 default: abort();
4167 }
4168}
4169
a7812ae4 4170static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4171{
4172 switch (size) {
2a3f75b4
PM
4173 case 1: gen_helper_neon_addl_saturate_s32(op0, op0, op1); break;
4174 case 2: gen_helper_neon_addl_saturate_s64(op0, op0, op1); break;
ad69471c
PB
4175 default: abort();
4176 }
4177}
4178
a7812ae4 4179static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4180{
a7812ae4 4181 TCGv_i64 tmp;
ad69471c
PB
4182
4183 switch ((size << 1) | u) {
4184 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4185 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4186 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4187 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4188 case 4:
4189 tmp = gen_muls_i64_i32(a, b);
4190 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4191 tcg_temp_free_i64(tmp);
ad69471c
PB
4192 break;
4193 case 5:
4194 tmp = gen_mulu_i64_i32(a, b);
4195 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4196 tcg_temp_free_i64(tmp);
ad69471c
PB
4197 break;
4198 default: abort();
4199 }
c6067f04
CL
4200
4201 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4202 Don't forget to clean them now. */
4203 if (size < 2) {
7d1b0095
PM
4204 tcg_temp_free_i32(a);
4205 tcg_temp_free_i32(b);
c6067f04 4206 }
ad69471c
PB
4207}
4208
c33171c7
PM
4209static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4210{
4211 if (op) {
4212 if (u) {
4213 gen_neon_unarrow_sats(size, dest, src);
4214 } else {
4215 gen_neon_narrow(size, dest, src);
4216 }
4217 } else {
4218 if (u) {
4219 gen_neon_narrow_satu(size, dest, src);
4220 } else {
4221 gen_neon_narrow_sats(size, dest, src);
4222 }
4223 }
4224}
4225
9ee6e8bb
PB
4226/* Translate a NEON data processing instruction. Return nonzero if the
4227 instruction is invalid.
ad69471c
PB
4228 We process data in a mixture of 32-bit and 64-bit chunks.
4229 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4230
9ee6e8bb
PB
4231static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4232{
4233 int op;
4234 int q;
4235 int rd, rn, rm;
4236 int size;
4237 int shift;
4238 int pass;
4239 int count;
4240 int pairwise;
4241 int u;
4242 int n;
ca9a32e4 4243 uint32_t imm, mask;
b75263d6 4244 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4245 TCGv_i64 tmp64;
9ee6e8bb 4246
5df8bac1 4247 if (!s->vfp_enabled)
9ee6e8bb
PB
4248 return 1;
4249 q = (insn & (1 << 6)) != 0;
4250 u = (insn >> 24) & 1;
4251 VFP_DREG_D(rd, insn);
4252 VFP_DREG_N(rn, insn);
4253 VFP_DREG_M(rm, insn);
4254 size = (insn >> 20) & 3;
4255 if ((insn & (1 << 23)) == 0) {
4256 /* Three register same length. */
4257 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
ad69471c
PB
4258 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4259 || op == 10 || op == 11 || op == 16)) {
4260 /* 64-bit element instructions. */
9ee6e8bb 4261 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4262 neon_load_reg64(cpu_V0, rn + pass);
4263 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb
PB
4264 switch (op) {
4265 case 1: /* VQADD */
4266 if (u) {
2a3f75b4 4267 gen_helper_neon_qadd_u64(cpu_V0, cpu_V0, cpu_V1);
2c0262af 4268 } else {
2a3f75b4 4269 gen_helper_neon_qadd_s64(cpu_V0, cpu_V0, cpu_V1);
2c0262af 4270 }
9ee6e8bb
PB
4271 break;
4272 case 5: /* VQSUB */
4273 if (u) {
2a3f75b4 4274 gen_helper_neon_qsub_u64(cpu_V0, cpu_V0, cpu_V1);
ad69471c 4275 } else {
2a3f75b4 4276 gen_helper_neon_qsub_s64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
4277 }
4278 break;
4279 case 8: /* VSHL */
4280 if (u) {
4281 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4282 } else {
4283 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4284 }
4285 break;
4286 case 9: /* VQSHL */
4287 if (u) {
2a3f75b4 4288 gen_helper_neon_qshl_u64(cpu_V0, cpu_V1, cpu_V0);
ad69471c 4289 } else {
2a3f75b4 4290 gen_helper_neon_qshl_s64(cpu_V0, cpu_V1, cpu_V0);
ad69471c
PB
4291 }
4292 break;
4293 case 10: /* VRSHL */
4294 if (u) {
4295 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4296 } else {
ad69471c
PB
4297 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4298 }
4299 break;
4300 case 11: /* VQRSHL */
4301 if (u) {
2a3f75b4 4302 gen_helper_neon_qrshl_u64(cpu_V0, cpu_V1, cpu_V0);
ad69471c 4303 } else {
2a3f75b4 4304 gen_helper_neon_qrshl_s64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4305 }
9ee6e8bb
PB
4306 break;
4307 case 16:
4308 if (u) {
ad69471c 4309 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4310 } else {
ad69471c 4311 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4312 }
4313 break;
4314 default:
4315 abort();
2c0262af 4316 }
ad69471c 4317 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4318 }
9ee6e8bb 4319 return 0;
2c0262af 4320 }
9ee6e8bb
PB
4321 switch (op) {
4322 case 8: /* VSHL */
4323 case 9: /* VQSHL */
4324 case 10: /* VRSHL */
ad69471c 4325 case 11: /* VQRSHL */
9ee6e8bb 4326 {
ad69471c
PB
4327 int rtmp;
4328 /* Shift instruction operands are reversed. */
4329 rtmp = rn;
9ee6e8bb 4330 rn = rm;
ad69471c 4331 rm = rtmp;
9ee6e8bb
PB
4332 pairwise = 0;
4333 }
2c0262af 4334 break;
9ee6e8bb
PB
4335 case 20: /* VPMAX */
4336 case 21: /* VPMIN */
4337 case 23: /* VPADD */
4338 pairwise = 1;
2c0262af 4339 break;
9ee6e8bb
PB
4340 case 26: /* VPADD (float) */
4341 pairwise = (u && size < 2);
2c0262af 4342 break;
9ee6e8bb
PB
4343 case 30: /* VPMIN/VPMAX (float) */
4344 pairwise = u;
2c0262af 4345 break;
9ee6e8bb
PB
4346 default:
4347 pairwise = 0;
2c0262af 4348 break;
9ee6e8bb 4349 }
dd8fbd78 4350
9ee6e8bb
PB
4351 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4352
4353 if (pairwise) {
4354 /* Pairwise. */
4355 if (q)
4356 n = (pass & 1) * 2;
2c0262af 4357 else
9ee6e8bb
PB
4358 n = 0;
4359 if (pass < q + 1) {
dd8fbd78
FN
4360 tmp = neon_load_reg(rn, n);
4361 tmp2 = neon_load_reg(rn, n + 1);
9ee6e8bb 4362 } else {
dd8fbd78
FN
4363 tmp = neon_load_reg(rm, n);
4364 tmp2 = neon_load_reg(rm, n + 1);
9ee6e8bb
PB
4365 }
4366 } else {
4367 /* Elementwise. */
dd8fbd78
FN
4368 tmp = neon_load_reg(rn, pass);
4369 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4370 }
4371 switch (op) {
4372 case 0: /* VHADD */
4373 GEN_NEON_INTEGER_OP(hadd);
4374 break;
4375 case 1: /* VQADD */
2a3f75b4 4376 GEN_NEON_INTEGER_OP(qadd);
2c0262af 4377 break;
9ee6e8bb
PB
4378 case 2: /* VRHADD */
4379 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4380 break;
9ee6e8bb
PB
4381 case 3: /* Logic ops. */
4382 switch ((u << 2) | size) {
4383 case 0: /* VAND */
dd8fbd78 4384 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4385 break;
4386 case 1: /* BIC */
f669df27 4387 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4388 break;
4389 case 2: /* VORR */
dd8fbd78 4390 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4391 break;
4392 case 3: /* VORN */
f669df27 4393 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4394 break;
4395 case 4: /* VEOR */
dd8fbd78 4396 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4397 break;
4398 case 5: /* VBSL */
dd8fbd78
FN
4399 tmp3 = neon_load_reg(rd, pass);
4400 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 4401 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4402 break;
4403 case 6: /* VBIT */
dd8fbd78
FN
4404 tmp3 = neon_load_reg(rd, pass);
4405 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 4406 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4407 break;
4408 case 7: /* VBIF */
dd8fbd78
FN
4409 tmp3 = neon_load_reg(rd, pass);
4410 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 4411 tcg_temp_free_i32(tmp3);
9ee6e8bb 4412 break;
2c0262af
FB
4413 }
4414 break;
9ee6e8bb
PB
4415 case 4: /* VHSUB */
4416 GEN_NEON_INTEGER_OP(hsub);
4417 break;
4418 case 5: /* VQSUB */
2a3f75b4 4419 GEN_NEON_INTEGER_OP(qsub);
2c0262af 4420 break;
9ee6e8bb
PB
4421 case 6: /* VCGT */
4422 GEN_NEON_INTEGER_OP(cgt);
4423 break;
4424 case 7: /* VCGE */
4425 GEN_NEON_INTEGER_OP(cge);
4426 break;
4427 case 8: /* VSHL */
ad69471c 4428 GEN_NEON_INTEGER_OP(shl);
2c0262af 4429 break;
9ee6e8bb 4430 case 9: /* VQSHL */
2a3f75b4 4431 GEN_NEON_INTEGER_OP(qshl);
2c0262af 4432 break;
9ee6e8bb 4433 case 10: /* VRSHL */
ad69471c 4434 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4435 break;
9ee6e8bb 4436 case 11: /* VQRSHL */
2a3f75b4 4437 GEN_NEON_INTEGER_OP(qrshl);
9ee6e8bb
PB
4438 break;
4439 case 12: /* VMAX */
4440 GEN_NEON_INTEGER_OP(max);
4441 break;
4442 case 13: /* VMIN */
4443 GEN_NEON_INTEGER_OP(min);
4444 break;
4445 case 14: /* VABD */
4446 GEN_NEON_INTEGER_OP(abd);
4447 break;
4448 case 15: /* VABA */
4449 GEN_NEON_INTEGER_OP(abd);
7d1b0095 4450 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
4451 tmp2 = neon_load_reg(rd, pass);
4452 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4453 break;
4454 case 16:
4455 if (!u) { /* VADD */
dd8fbd78 4456 if (gen_neon_add(size, tmp, tmp2))
9ee6e8bb
PB
4457 return 1;
4458 } else { /* VSUB */
4459 switch (size) {
dd8fbd78
FN
4460 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4461 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4462 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4463 default: return 1;
4464 }
4465 }
4466 break;
4467 case 17:
4468 if (!u) { /* VTST */
4469 switch (size) {
dd8fbd78
FN
4470 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4471 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4472 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4473 default: return 1;
4474 }
4475 } else { /* VCEQ */
4476 switch (size) {
dd8fbd78
FN
4477 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4478 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4479 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4480 default: return 1;
4481 }
4482 }
4483 break;
4484 case 18: /* Multiply. */
4485 switch (size) {
dd8fbd78
FN
4486 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4487 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4488 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4489 default: return 1;
4490 }
7d1b0095 4491 tcg_temp_free_i32(tmp2);
dd8fbd78 4492 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4493 if (u) { /* VMLS */
dd8fbd78 4494 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4495 } else { /* VMLA */
dd8fbd78 4496 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4497 }
4498 break;
4499 case 19: /* VMUL */
4500 if (u) { /* polynomial */
dd8fbd78 4501 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4502 } else { /* Integer */
4503 switch (size) {
dd8fbd78
FN
4504 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4505 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4506 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4507 default: return 1;
4508 }
4509 }
4510 break;
4511 case 20: /* VPMAX */
4512 GEN_NEON_INTEGER_OP(pmax);
4513 break;
4514 case 21: /* VPMIN */
4515 GEN_NEON_INTEGER_OP(pmin);
4516 break;
4517 case 22: /* Hultiply high. */
4518 if (!u) { /* VQDMULH */
4519 switch (size) {
2a3f75b4
PM
4520 case 1: gen_helper_neon_qdmulh_s16(tmp, tmp, tmp2); break;
4521 case 2: gen_helper_neon_qdmulh_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4522 default: return 1;
4523 }
4524 } else { /* VQRDHMUL */
4525 switch (size) {
2a3f75b4
PM
4526 case 1: gen_helper_neon_qrdmulh_s16(tmp, tmp, tmp2); break;
4527 case 2: gen_helper_neon_qrdmulh_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4528 default: return 1;
4529 }
4530 }
4531 break;
4532 case 23: /* VPADD */
4533 if (u)
4534 return 1;
4535 switch (size) {
dd8fbd78
FN
4536 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4537 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4538 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4539 default: return 1;
4540 }
4541 break;
4542 case 26: /* Floating point arithnetic. */
4543 switch ((u << 2) | size) {
4544 case 0: /* VADD */
dd8fbd78 4545 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4546 break;
4547 case 2: /* VSUB */
dd8fbd78 4548 gen_helper_neon_sub_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4549 break;
4550 case 4: /* VPADD */
dd8fbd78 4551 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4552 break;
4553 case 6: /* VABD */
dd8fbd78 4554 gen_helper_neon_abd_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4555 break;
4556 default:
4557 return 1;
4558 }
4559 break;
4560 case 27: /* Float multiply. */
dd8fbd78 4561 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb 4562 if (!u) {
7d1b0095 4563 tcg_temp_free_i32(tmp2);
dd8fbd78 4564 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4565 if (size == 0) {
dd8fbd78 4566 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb 4567 } else {
dd8fbd78 4568 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
4569 }
4570 }
4571 break;
4572 case 28: /* Float compare. */
4573 if (!u) {
dd8fbd78 4574 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
b5ff1b31 4575 } else {
9ee6e8bb 4576 if (size == 0)
dd8fbd78 4577 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
9ee6e8bb 4578 else
dd8fbd78 4579 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
b5ff1b31 4580 }
2c0262af 4581 break;
9ee6e8bb
PB
4582 case 29: /* Float compare absolute. */
4583 if (!u)
4584 return 1;
4585 if (size == 0)
dd8fbd78 4586 gen_helper_neon_acge_f32(tmp, tmp, tmp2);
9ee6e8bb 4587 else
dd8fbd78 4588 gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
2c0262af 4589 break;
9ee6e8bb
PB
4590 case 30: /* Float min/max. */
4591 if (size == 0)
dd8fbd78 4592 gen_helper_neon_max_f32(tmp, tmp, tmp2);
9ee6e8bb 4593 else
dd8fbd78 4594 gen_helper_neon_min_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4595 break;
4596 case 31:
4597 if (size == 0)
dd8fbd78 4598 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 4599 else
dd8fbd78 4600 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 4601 break;
9ee6e8bb
PB
4602 default:
4603 abort();
2c0262af 4604 }
7d1b0095 4605 tcg_temp_free_i32(tmp2);
dd8fbd78 4606
9ee6e8bb
PB
4607 /* Save the result. For elementwise operations we can put it
4608 straight into the destination register. For pairwise operations
4609 we have to be careful to avoid clobbering the source operands. */
4610 if (pairwise && rd == rm) {
dd8fbd78 4611 neon_store_scratch(pass, tmp);
9ee6e8bb 4612 } else {
dd8fbd78 4613 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4614 }
4615
4616 } /* for pass */
4617 if (pairwise && rd == rm) {
4618 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
4619 tmp = neon_load_scratch(pass);
4620 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4621 }
4622 }
ad69471c 4623 /* End of 3 register same size operations. */
9ee6e8bb
PB
4624 } else if (insn & (1 << 4)) {
4625 if ((insn & 0x00380080) != 0) {
4626 /* Two registers and shift. */
4627 op = (insn >> 8) & 0xf;
4628 if (insn & (1 << 7)) {
4629 /* 64-bit shift. */
4630 size = 3;
4631 } else {
4632 size = 2;
4633 while ((insn & (1 << (size + 19))) == 0)
4634 size--;
4635 }
4636 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4637 /* To avoid excessive dumplication of ops we implement shift
4638 by immediate using the variable shift operations. */
4639 if (op < 8) {
4640 /* Shift by immediate:
4641 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4642 /* Right shifts are encoded as N - shift, where N is the
4643 element size in bits. */
4644 if (op <= 4)
4645 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4646 if (size == 3) {
4647 count = q + 1;
4648 } else {
4649 count = q ? 4: 2;
4650 }
4651 switch (size) {
4652 case 0:
4653 imm = (uint8_t) shift;
4654 imm |= imm << 8;
4655 imm |= imm << 16;
4656 break;
4657 case 1:
4658 imm = (uint16_t) shift;
4659 imm |= imm << 16;
4660 break;
4661 case 2:
4662 case 3:
4663 imm = shift;
4664 break;
4665 default:
4666 abort();
4667 }
4668
4669 for (pass = 0; pass < count; pass++) {
ad69471c
PB
4670 if (size == 3) {
4671 neon_load_reg64(cpu_V0, rm + pass);
4672 tcg_gen_movi_i64(cpu_V1, imm);
4673 switch (op) {
4674 case 0: /* VSHR */
4675 case 1: /* VSRA */
4676 if (u)
4677 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4678 else
ad69471c 4679 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4680 break;
ad69471c
PB
4681 case 2: /* VRSHR */
4682 case 3: /* VRSRA */
4683 if (u)
4684 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4685 else
ad69471c 4686 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4687 break;
ad69471c
PB
4688 case 4: /* VSRI */
4689 if (!u)
4690 return 1;
4691 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4692 break;
4693 case 5: /* VSHL, VSLI */
4694 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4695 break;
0322b26e
PM
4696 case 6: /* VQSHLU */
4697 if (u) {
2a3f75b4 4698 gen_helper_neon_qshlu_s64(cpu_V0,
0322b26e
PM
4699 cpu_V0, cpu_V1);
4700 } else {
4701 return 1;
4702 }
ad69471c 4703 break;
0322b26e
PM
4704 case 7: /* VQSHL */
4705 if (u) {
2a3f75b4 4706 gen_helper_neon_qshl_u64(cpu_V0,
0322b26e
PM
4707 cpu_V0, cpu_V1);
4708 } else {
2a3f75b4 4709 gen_helper_neon_qshl_s64(cpu_V0,
0322b26e
PM
4710 cpu_V0, cpu_V1);
4711 }
9ee6e8bb 4712 break;
9ee6e8bb 4713 }
ad69471c
PB
4714 if (op == 1 || op == 3) {
4715 /* Accumulate. */
5371cb81 4716 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
4717 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4718 } else if (op == 4 || (op == 5 && u)) {
4719 /* Insert */
923e6509
CL
4720 neon_load_reg64(cpu_V1, rd + pass);
4721 uint64_t mask;
4722 if (shift < -63 || shift > 63) {
4723 mask = 0;
4724 } else {
4725 if (op == 4) {
4726 mask = 0xffffffffffffffffull >> -shift;
4727 } else {
4728 mask = 0xffffffffffffffffull << shift;
4729 }
4730 }
4731 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
4732 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
4733 }
4734 neon_store_reg64(cpu_V0, rd + pass);
4735 } else { /* size < 3 */
4736 /* Operands in T0 and T1. */
dd8fbd78 4737 tmp = neon_load_reg(rm, pass);
7d1b0095 4738 tmp2 = tcg_temp_new_i32();
dd8fbd78 4739 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
4740 switch (op) {
4741 case 0: /* VSHR */
4742 case 1: /* VSRA */
4743 GEN_NEON_INTEGER_OP(shl);
4744 break;
4745 case 2: /* VRSHR */
4746 case 3: /* VRSRA */
4747 GEN_NEON_INTEGER_OP(rshl);
4748 break;
4749 case 4: /* VSRI */
4750 if (!u)
4751 return 1;
4752 GEN_NEON_INTEGER_OP(shl);
4753 break;
4754 case 5: /* VSHL, VSLI */
4755 switch (size) {
dd8fbd78
FN
4756 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
4757 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
4758 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
ad69471c
PB
4759 default: return 1;
4760 }
4761 break;
0322b26e
PM
4762 case 6: /* VQSHLU */
4763 if (!u) {
4764 return 1;
4765 }
ad69471c 4766 switch (size) {
0322b26e 4767 case 0:
2a3f75b4 4768 gen_helper_neon_qshlu_s8(tmp, tmp, tmp2);
0322b26e
PM
4769 break;
4770 case 1:
2a3f75b4 4771 gen_helper_neon_qshlu_s16(tmp, tmp, tmp2);
0322b26e
PM
4772 break;
4773 case 2:
2a3f75b4 4774 gen_helper_neon_qshlu_s32(tmp, tmp, tmp2);
0322b26e
PM
4775 break;
4776 default:
4777 return 1;
ad69471c
PB
4778 }
4779 break;
0322b26e 4780 case 7: /* VQSHL */
2a3f75b4 4781 GEN_NEON_INTEGER_OP(qshl);
0322b26e 4782 break;
ad69471c 4783 }
7d1b0095 4784 tcg_temp_free_i32(tmp2);
ad69471c
PB
4785
4786 if (op == 1 || op == 3) {
4787 /* Accumulate. */
dd8fbd78 4788 tmp2 = neon_load_reg(rd, pass);
5371cb81 4789 gen_neon_add(size, tmp, tmp2);
7d1b0095 4790 tcg_temp_free_i32(tmp2);
ad69471c
PB
4791 } else if (op == 4 || (op == 5 && u)) {
4792 /* Insert */
4793 switch (size) {
4794 case 0:
4795 if (op == 4)
ca9a32e4 4796 mask = 0xff >> -shift;
ad69471c 4797 else
ca9a32e4
JR
4798 mask = (uint8_t)(0xff << shift);
4799 mask |= mask << 8;
4800 mask |= mask << 16;
ad69471c
PB
4801 break;
4802 case 1:
4803 if (op == 4)
ca9a32e4 4804 mask = 0xffff >> -shift;
ad69471c 4805 else
ca9a32e4
JR
4806 mask = (uint16_t)(0xffff << shift);
4807 mask |= mask << 16;
ad69471c
PB
4808 break;
4809 case 2:
ca9a32e4
JR
4810 if (shift < -31 || shift > 31) {
4811 mask = 0;
4812 } else {
4813 if (op == 4)
4814 mask = 0xffffffffu >> -shift;
4815 else
4816 mask = 0xffffffffu << shift;
4817 }
ad69471c
PB
4818 break;
4819 default:
4820 abort();
4821 }
dd8fbd78 4822 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
4823 tcg_gen_andi_i32(tmp, tmp, mask);
4824 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 4825 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4826 tcg_temp_free_i32(tmp2);
ad69471c 4827 }
dd8fbd78 4828 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4829 }
4830 } /* for pass */
4831 } else if (op < 10) {
ad69471c 4832 /* Shift by immediate and narrow:
9ee6e8bb 4833 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd
CL
4834 int input_unsigned = (op == 8) ? !u : u;
4835
9ee6e8bb
PB
4836 shift = shift - (1 << (size + 3));
4837 size++;
92cdfaeb 4838 if (size == 3) {
a7812ae4 4839 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
4840 neon_load_reg64(cpu_V0, rm);
4841 neon_load_reg64(cpu_V1, rm + 1);
4842 for (pass = 0; pass < 2; pass++) {
4843 TCGv_i64 in;
4844 if (pass == 0) {
4845 in = cpu_V0;
4846 } else {
4847 in = cpu_V1;
4848 }
ad69471c 4849 if (q) {
0b36f4cd 4850 if (input_unsigned) {
92cdfaeb 4851 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 4852 } else {
92cdfaeb 4853 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 4854 }
ad69471c 4855 } else {
0b36f4cd 4856 if (input_unsigned) {
92cdfaeb 4857 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 4858 } else {
92cdfaeb 4859 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 4860 }
ad69471c 4861 }
7d1b0095 4862 tmp = tcg_temp_new_i32();
92cdfaeb
PM
4863 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
4864 neon_store_reg(rd, pass, tmp);
4865 } /* for pass */
4866 tcg_temp_free_i64(tmp64);
4867 } else {
4868 if (size == 1) {
4869 imm = (uint16_t)shift;
4870 imm |= imm << 16;
2c0262af 4871 } else {
92cdfaeb
PM
4872 /* size == 2 */
4873 imm = (uint32_t)shift;
4874 }
4875 tmp2 = tcg_const_i32(imm);
4876 tmp4 = neon_load_reg(rm + 1, 0);
4877 tmp5 = neon_load_reg(rm + 1, 1);
4878 for (pass = 0; pass < 2; pass++) {
4879 if (pass == 0) {
4880 tmp = neon_load_reg(rm, 0);
4881 } else {
4882 tmp = tmp4;
4883 }
0b36f4cd
CL
4884 gen_neon_shift_narrow(size, tmp, tmp2, q,
4885 input_unsigned);
92cdfaeb
PM
4886 if (pass == 0) {
4887 tmp3 = neon_load_reg(rm, 1);
4888 } else {
4889 tmp3 = tmp5;
4890 }
0b36f4cd
CL
4891 gen_neon_shift_narrow(size, tmp3, tmp2, q,
4892 input_unsigned);
36aa55dc 4893 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
4894 tcg_temp_free_i32(tmp);
4895 tcg_temp_free_i32(tmp3);
4896 tmp = tcg_temp_new_i32();
92cdfaeb
PM
4897 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
4898 neon_store_reg(rd, pass, tmp);
4899 } /* for pass */
c6067f04 4900 tcg_temp_free_i32(tmp2);
b75263d6 4901 }
9ee6e8bb
PB
4902 } else if (op == 10) {
4903 /* VSHLL */
ad69471c 4904 if (q || size == 3)
9ee6e8bb 4905 return 1;
ad69471c
PB
4906 tmp = neon_load_reg(rm, 0);
4907 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 4908 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4909 if (pass == 1)
4910 tmp = tmp2;
4911
4912 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 4913
9ee6e8bb
PB
4914 if (shift != 0) {
4915 /* The shift is less than the width of the source
ad69471c
PB
4916 type, so we can just shift the whole register. */
4917 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
4918 /* Widen the result of shift: we need to clear
4919 * the potential overflow bits resulting from
4920 * left bits of the narrow input appearing as
4921 * right bits of left the neighbour narrow
4922 * input. */
ad69471c
PB
4923 if (size < 2 || !u) {
4924 uint64_t imm64;
4925 if (size == 0) {
4926 imm = (0xffu >> (8 - shift));
4927 imm |= imm << 16;
acdf01ef 4928 } else if (size == 1) {
ad69471c 4929 imm = 0xffff >> (16 - shift);
acdf01ef
CL
4930 } else {
4931 /* size == 2 */
4932 imm = 0xffffffff >> (32 - shift);
4933 }
4934 if (size < 2) {
4935 imm64 = imm | (((uint64_t)imm) << 32);
4936 } else {
4937 imm64 = imm;
9ee6e8bb 4938 }
acdf01ef 4939 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
4940 }
4941 }
ad69471c 4942 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 4943 }
f73534a5 4944 } else if (op >= 14) {
9ee6e8bb 4945 /* VCVT fixed-point. */
f73534a5
PM
4946 /* We have already masked out the must-be-1 top bit of imm6,
4947 * hence this 32-shift where the ARM ARM has 64-imm6.
4948 */
4949 shift = 32 - shift;
9ee6e8bb 4950 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 4951 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 4952 if (!(op & 1)) {
9ee6e8bb 4953 if (u)
4373f3ce 4954 gen_vfp_ulto(0, shift);
9ee6e8bb 4955 else
4373f3ce 4956 gen_vfp_slto(0, shift);
9ee6e8bb
PB
4957 } else {
4958 if (u)
4373f3ce 4959 gen_vfp_toul(0, shift);
9ee6e8bb 4960 else
4373f3ce 4961 gen_vfp_tosl(0, shift);
2c0262af 4962 }
4373f3ce 4963 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
4964 }
4965 } else {
9ee6e8bb
PB
4966 return 1;
4967 }
4968 } else { /* (insn & 0x00380080) == 0 */
4969 int invert;
4970
4971 op = (insn >> 8) & 0xf;
4972 /* One register and immediate. */
4973 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4974 invert = (insn & (1 << 5)) != 0;
4975 switch (op) {
4976 case 0: case 1:
4977 /* no-op */
4978 break;
4979 case 2: case 3:
4980 imm <<= 8;
4981 break;
4982 case 4: case 5:
4983 imm <<= 16;
4984 break;
4985 case 6: case 7:
4986 imm <<= 24;
4987 break;
4988 case 8: case 9:
4989 imm |= imm << 16;
4990 break;
4991 case 10: case 11:
4992 imm = (imm << 8) | (imm << 24);
4993 break;
4994 case 12:
8e31209e 4995 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
4996 break;
4997 case 13:
4998 imm = (imm << 16) | 0xffff;
4999 break;
5000 case 14:
5001 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5002 if (invert)
5003 imm = ~imm;
5004 break;
5005 case 15:
5006 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5007 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5008 break;
5009 }
5010 if (invert)
5011 imm = ~imm;
5012
9ee6e8bb
PB
5013 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5014 if (op & 1 && op < 12) {
ad69471c 5015 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
5016 if (invert) {
5017 /* The immediate value has already been inverted, so
5018 BIC becomes AND. */
ad69471c 5019 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 5020 } else {
ad69471c 5021 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 5022 }
9ee6e8bb 5023 } else {
ad69471c 5024 /* VMOV, VMVN. */
7d1b0095 5025 tmp = tcg_temp_new_i32();
9ee6e8bb 5026 if (op == 14 && invert) {
ad69471c
PB
5027 uint32_t val;
5028 val = 0;
9ee6e8bb
PB
5029 for (n = 0; n < 4; n++) {
5030 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 5031 val |= 0xff << (n * 8);
9ee6e8bb 5032 }
ad69471c
PB
5033 tcg_gen_movi_i32(tmp, val);
5034 } else {
5035 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 5036 }
9ee6e8bb 5037 }
ad69471c 5038 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5039 }
5040 }
e4b3861d 5041 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
5042 if (size != 3) {
5043 op = (insn >> 8) & 0xf;
5044 if ((insn & (1 << 6)) == 0) {
5045 /* Three registers of different lengths. */
5046 int src1_wide;
5047 int src2_wide;
5048 int prewiden;
5049 /* prewiden, src1_wide, src2_wide */
5050 static const int neon_3reg_wide[16][3] = {
5051 {1, 0, 0}, /* VADDL */
5052 {1, 1, 0}, /* VADDW */
5053 {1, 0, 0}, /* VSUBL */
5054 {1, 1, 0}, /* VSUBW */
5055 {0, 1, 1}, /* VADDHN */
5056 {0, 0, 0}, /* VABAL */
5057 {0, 1, 1}, /* VSUBHN */
5058 {0, 0, 0}, /* VABDL */
5059 {0, 0, 0}, /* VMLAL */
5060 {0, 0, 0}, /* VQDMLAL */
5061 {0, 0, 0}, /* VMLSL */
5062 {0, 0, 0}, /* VQDMLSL */
5063 {0, 0, 0}, /* Integer VMULL */
5064 {0, 0, 0}, /* VQDMULL */
5065 {0, 0, 0} /* Polynomial VMULL */
5066 };
5067
5068 prewiden = neon_3reg_wide[op][0];
5069 src1_wide = neon_3reg_wide[op][1];
5070 src2_wide = neon_3reg_wide[op][2];
5071
ad69471c
PB
5072 if (size == 0 && (op == 9 || op == 11 || op == 13))
5073 return 1;
5074
9ee6e8bb
PB
5075 /* Avoid overlapping operands. Wide source operands are
5076 always aligned so will never overlap with wide
5077 destinations in problematic ways. */
8f8e3aa4 5078 if (rd == rm && !src2_wide) {
dd8fbd78
FN
5079 tmp = neon_load_reg(rm, 1);
5080 neon_store_scratch(2, tmp);
8f8e3aa4 5081 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
5082 tmp = neon_load_reg(rn, 1);
5083 neon_store_scratch(2, tmp);
9ee6e8bb 5084 }
a50f5b91 5085 TCGV_UNUSED(tmp3);
9ee6e8bb 5086 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5087 if (src1_wide) {
5088 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 5089 TCGV_UNUSED(tmp);
9ee6e8bb 5090 } else {
ad69471c 5091 if (pass == 1 && rd == rn) {
dd8fbd78 5092 tmp = neon_load_scratch(2);
9ee6e8bb 5093 } else {
ad69471c
PB
5094 tmp = neon_load_reg(rn, pass);
5095 }
5096 if (prewiden) {
5097 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5098 }
5099 }
ad69471c
PB
5100 if (src2_wide) {
5101 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 5102 TCGV_UNUSED(tmp2);
9ee6e8bb 5103 } else {
ad69471c 5104 if (pass == 1 && rd == rm) {
dd8fbd78 5105 tmp2 = neon_load_scratch(2);
9ee6e8bb 5106 } else {
ad69471c
PB
5107 tmp2 = neon_load_reg(rm, pass);
5108 }
5109 if (prewiden) {
5110 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5111 }
9ee6e8bb
PB
5112 }
5113 switch (op) {
5114 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5115 gen_neon_addl(size);
9ee6e8bb 5116 break;
79b0e534 5117 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5118 gen_neon_subl(size);
9ee6e8bb
PB
5119 break;
5120 case 5: case 7: /* VABAL, VABDL */
5121 switch ((size << 1) | u) {
ad69471c
PB
5122 case 0:
5123 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5124 break;
5125 case 1:
5126 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5127 break;
5128 case 2:
5129 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5130 break;
5131 case 3:
5132 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5133 break;
5134 case 4:
5135 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5136 break;
5137 case 5:
5138 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5139 break;
9ee6e8bb
PB
5140 default: abort();
5141 }
7d1b0095
PM
5142 tcg_temp_free_i32(tmp2);
5143 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5144 break;
5145 case 8: case 9: case 10: case 11: case 12: case 13:
5146 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5147 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
5148 break;
5149 case 14: /* Polynomial VMULL */
e5ca24cb 5150 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
5151 tcg_temp_free_i32(tmp2);
5152 tcg_temp_free_i32(tmp);
e5ca24cb 5153 break;
9ee6e8bb
PB
5154 default: /* 15 is RESERVED. */
5155 return 1;
5156 }
ebcd88ce
PM
5157 if (op == 13) {
5158 /* VQDMULL */
5159 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5160 neon_store_reg64(cpu_V0, rd + pass);
5161 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 5162 /* Accumulate. */
ebcd88ce 5163 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5164 switch (op) {
4dc064e6
PM
5165 case 10: /* VMLSL */
5166 gen_neon_negl(cpu_V0, size);
5167 /* Fall through */
5168 case 5: case 8: /* VABAL, VMLAL */
ad69471c 5169 gen_neon_addl(size);
9ee6e8bb
PB
5170 break;
5171 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 5172 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5173 if (op == 11) {
5174 gen_neon_negl(cpu_V0, size);
5175 }
ad69471c
PB
5176 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5177 break;
9ee6e8bb
PB
5178 default:
5179 abort();
5180 }
ad69471c 5181 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5182 } else if (op == 4 || op == 6) {
5183 /* Narrowing operation. */
7d1b0095 5184 tmp = tcg_temp_new_i32();
79b0e534 5185 if (!u) {
9ee6e8bb 5186 switch (size) {
ad69471c
PB
5187 case 0:
5188 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5189 break;
5190 case 1:
5191 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5192 break;
5193 case 2:
5194 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5195 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5196 break;
9ee6e8bb
PB
5197 default: abort();
5198 }
5199 } else {
5200 switch (size) {
ad69471c
PB
5201 case 0:
5202 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5203 break;
5204 case 1:
5205 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5206 break;
5207 case 2:
5208 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5209 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5210 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5211 break;
9ee6e8bb
PB
5212 default: abort();
5213 }
5214 }
ad69471c
PB
5215 if (pass == 0) {
5216 tmp3 = tmp;
5217 } else {
5218 neon_store_reg(rd, 0, tmp3);
5219 neon_store_reg(rd, 1, tmp);
5220 }
9ee6e8bb
PB
5221 } else {
5222 /* Write back the result. */
ad69471c 5223 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5224 }
5225 }
5226 } else {
5227 /* Two registers and a scalar. */
5228 switch (op) {
5229 case 0: /* Integer VMLA scalar */
5230 case 1: /* Float VMLA scalar */
5231 case 4: /* Integer VMLS scalar */
5232 case 5: /* Floating point VMLS scalar */
5233 case 8: /* Integer VMUL scalar */
5234 case 9: /* Floating point VMUL scalar */
5235 case 12: /* VQDMULH scalar */
5236 case 13: /* VQRDMULH scalar */
dd8fbd78
FN
5237 tmp = neon_get_scalar(size, rm);
5238 neon_store_scratch(0, tmp);
9ee6e8bb 5239 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5240 tmp = neon_load_scratch(0);
5241 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5242 if (op == 12) {
5243 if (size == 1) {
2a3f75b4 5244 gen_helper_neon_qdmulh_s16(tmp, tmp, tmp2);
9ee6e8bb 5245 } else {
2a3f75b4 5246 gen_helper_neon_qdmulh_s32(tmp, tmp, tmp2);
9ee6e8bb
PB
5247 }
5248 } else if (op == 13) {
5249 if (size == 1) {
2a3f75b4 5250 gen_helper_neon_qrdmulh_s16(tmp, tmp, tmp2);
9ee6e8bb 5251 } else {
2a3f75b4 5252 gen_helper_neon_qrdmulh_s32(tmp, tmp, tmp2);
9ee6e8bb
PB
5253 }
5254 } else if (op & 1) {
dd8fbd78 5255 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5256 } else {
5257 switch (size) {
dd8fbd78
FN
5258 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5259 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5260 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5261 default: return 1;
5262 }
5263 }
7d1b0095 5264 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
5265 if (op < 8) {
5266 /* Accumulate. */
dd8fbd78 5267 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5268 switch (op) {
5269 case 0:
dd8fbd78 5270 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5271 break;
5272 case 1:
dd8fbd78 5273 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5274 break;
5275 case 4:
dd8fbd78 5276 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5277 break;
5278 case 5:
dd8fbd78 5279 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
5280 break;
5281 default:
5282 abort();
5283 }
7d1b0095 5284 tcg_temp_free_i32(tmp2);
9ee6e8bb 5285 }
dd8fbd78 5286 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5287 }
5288 break;
5289 case 2: /* VMLAL sclar */
5290 case 3: /* VQDMLAL scalar */
5291 case 6: /* VMLSL scalar */
5292 case 7: /* VQDMLSL scalar */
5293 case 10: /* VMULL scalar */
5294 case 11: /* VQDMULL scalar */
ad69471c
PB
5295 if (size == 0 && (op == 3 || op == 7 || op == 11))
5296 return 1;
5297
dd8fbd78 5298 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
5299 /* We need a copy of tmp2 because gen_neon_mull
5300 * deletes it during pass 0. */
7d1b0095 5301 tmp4 = tcg_temp_new_i32();
c6067f04 5302 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 5303 tmp3 = neon_load_reg(rn, 1);
ad69471c 5304
9ee6e8bb 5305 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5306 if (pass == 0) {
5307 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5308 } else {
dd8fbd78 5309 tmp = tmp3;
c6067f04 5310 tmp2 = tmp4;
9ee6e8bb 5311 }
ad69471c 5312 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
5313 if (op != 11) {
5314 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5315 }
9ee6e8bb 5316 switch (op) {
4dc064e6
PM
5317 case 6:
5318 gen_neon_negl(cpu_V0, size);
5319 /* Fall through */
5320 case 2:
ad69471c 5321 gen_neon_addl(size);
9ee6e8bb
PB
5322 break;
5323 case 3: case 7:
ad69471c 5324 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5325 if (op == 7) {
5326 gen_neon_negl(cpu_V0, size);
5327 }
ad69471c 5328 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5329 break;
5330 case 10:
5331 /* no-op */
5332 break;
5333 case 11:
ad69471c 5334 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5335 break;
5336 default:
5337 abort();
5338 }
ad69471c 5339 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5340 }
dd8fbd78 5341
dd8fbd78 5342
9ee6e8bb
PB
5343 break;
5344 default: /* 14 and 15 are RESERVED */
5345 return 1;
5346 }
5347 }
5348 } else { /* size == 3 */
5349 if (!u) {
5350 /* Extract. */
9ee6e8bb 5351 imm = (insn >> 8) & 0xf;
ad69471c
PB
5352
5353 if (imm > 7 && !q)
5354 return 1;
5355
5356 if (imm == 0) {
5357 neon_load_reg64(cpu_V0, rn);
5358 if (q) {
5359 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5360 }
ad69471c
PB
5361 } else if (imm == 8) {
5362 neon_load_reg64(cpu_V0, rn + 1);
5363 if (q) {
5364 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5365 }
ad69471c 5366 } else if (q) {
a7812ae4 5367 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5368 if (imm < 8) {
5369 neon_load_reg64(cpu_V0, rn);
a7812ae4 5370 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5371 } else {
5372 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5373 neon_load_reg64(tmp64, rm);
ad69471c
PB
5374 }
5375 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5376 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5377 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5378 if (imm < 8) {
5379 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5380 } else {
ad69471c
PB
5381 neon_load_reg64(cpu_V1, rm + 1);
5382 imm -= 8;
9ee6e8bb 5383 }
ad69471c 5384 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5385 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5386 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 5387 tcg_temp_free_i64(tmp64);
ad69471c 5388 } else {
a7812ae4 5389 /* BUGFIX */
ad69471c 5390 neon_load_reg64(cpu_V0, rn);
a7812ae4 5391 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5392 neon_load_reg64(cpu_V1, rm);
a7812ae4 5393 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5394 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5395 }
5396 neon_store_reg64(cpu_V0, rd);
5397 if (q) {
5398 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5399 }
5400 } else if ((insn & (1 << 11)) == 0) {
5401 /* Two register misc. */
5402 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5403 size = (insn >> 18) & 3;
5404 switch (op) {
5405 case 0: /* VREV64 */
5406 if (size == 3)
5407 return 1;
5408 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5409 tmp = neon_load_reg(rm, pass * 2);
5410 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5411 switch (size) {
dd8fbd78
FN
5412 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5413 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5414 case 2: /* no-op */ break;
5415 default: abort();
5416 }
dd8fbd78 5417 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5418 if (size == 2) {
dd8fbd78 5419 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5420 } else {
9ee6e8bb 5421 switch (size) {
dd8fbd78
FN
5422 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5423 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5424 default: abort();
5425 }
dd8fbd78 5426 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5427 }
5428 }
5429 break;
5430 case 4: case 5: /* VPADDL */
5431 case 12: case 13: /* VPADAL */
9ee6e8bb
PB
5432 if (size == 3)
5433 return 1;
ad69471c
PB
5434 for (pass = 0; pass < q + 1; pass++) {
5435 tmp = neon_load_reg(rm, pass * 2);
5436 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5437 tmp = neon_load_reg(rm, pass * 2 + 1);
5438 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5439 switch (size) {
5440 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5441 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5442 case 2: tcg_gen_add_i64(CPU_V001); break;
5443 default: abort();
5444 }
9ee6e8bb
PB
5445 if (op >= 12) {
5446 /* Accumulate. */
ad69471c
PB
5447 neon_load_reg64(cpu_V1, rd + pass);
5448 gen_neon_addl(size);
9ee6e8bb 5449 }
ad69471c 5450 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5451 }
5452 break;
5453 case 33: /* VTRN */
5454 if (size == 2) {
5455 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
5456 tmp = neon_load_reg(rm, n);
5457 tmp2 = neon_load_reg(rd, n + 1);
5458 neon_store_reg(rm, n, tmp2);
5459 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
5460 }
5461 } else {
5462 goto elementwise;
5463 }
5464 break;
5465 case 34: /* VUZP */
02acedf9 5466 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 5467 return 1;
9ee6e8bb
PB
5468 }
5469 break;
5470 case 35: /* VZIP */
d68a6f3a 5471 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 5472 return 1;
9ee6e8bb
PB
5473 }
5474 break;
5475 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
ad69471c
PB
5476 if (size == 3)
5477 return 1;
a50f5b91 5478 TCGV_UNUSED(tmp2);
9ee6e8bb 5479 for (pass = 0; pass < 2; pass++) {
ad69471c 5480 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 5481 tmp = tcg_temp_new_i32();
c33171c7 5482 gen_neon_narrow_op(op == 36, q, size, tmp, cpu_V0);
ad69471c
PB
5483 if (pass == 0) {
5484 tmp2 = tmp;
5485 } else {
5486 neon_store_reg(rd, 0, tmp2);
5487 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5488 }
9ee6e8bb
PB
5489 }
5490 break;
5491 case 38: /* VSHLL */
ad69471c 5492 if (q || size == 3)
9ee6e8bb 5493 return 1;
ad69471c
PB
5494 tmp = neon_load_reg(rm, 0);
5495 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5496 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5497 if (pass == 1)
5498 tmp = tmp2;
5499 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 5500 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 5501 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5502 }
5503 break;
60011498
PB
5504 case 44: /* VCVT.F16.F32 */
5505 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5506 return 1;
7d1b0095
PM
5507 tmp = tcg_temp_new_i32();
5508 tmp2 = tcg_temp_new_i32();
60011498 5509 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 5510 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 5511 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 5512 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5513 tcg_gen_shli_i32(tmp2, tmp2, 16);
5514 tcg_gen_or_i32(tmp2, tmp2, tmp);
5515 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 5516 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
5517 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5518 neon_store_reg(rd, 0, tmp2);
7d1b0095 5519 tmp2 = tcg_temp_new_i32();
2d981da7 5520 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5521 tcg_gen_shli_i32(tmp2, tmp2, 16);
5522 tcg_gen_or_i32(tmp2, tmp2, tmp);
5523 neon_store_reg(rd, 1, tmp2);
7d1b0095 5524 tcg_temp_free_i32(tmp);
60011498
PB
5525 break;
5526 case 46: /* VCVT.F32.F16 */
5527 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5528 return 1;
7d1b0095 5529 tmp3 = tcg_temp_new_i32();
60011498
PB
5530 tmp = neon_load_reg(rm, 0);
5531 tmp2 = neon_load_reg(rm, 1);
5532 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 5533 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5534 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5535 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 5536 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5537 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 5538 tcg_temp_free_i32(tmp);
60011498 5539 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 5540 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5541 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5542 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 5543 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5544 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
5545 tcg_temp_free_i32(tmp2);
5546 tcg_temp_free_i32(tmp3);
60011498 5547 break;
9ee6e8bb
PB
5548 default:
5549 elementwise:
5550 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5551 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5552 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5553 neon_reg_offset(rm, pass));
dd8fbd78 5554 TCGV_UNUSED(tmp);
9ee6e8bb 5555 } else {
dd8fbd78 5556 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
5557 }
5558 switch (op) {
5559 case 1: /* VREV32 */
5560 switch (size) {
dd8fbd78
FN
5561 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5562 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5563 default: return 1;
5564 }
5565 break;
5566 case 2: /* VREV16 */
5567 if (size != 0)
5568 return 1;
dd8fbd78 5569 gen_rev16(tmp);
9ee6e8bb 5570 break;
9ee6e8bb
PB
5571 case 8: /* CLS */
5572 switch (size) {
dd8fbd78
FN
5573 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5574 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5575 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
9ee6e8bb
PB
5576 default: return 1;
5577 }
5578 break;
5579 case 9: /* CLZ */
5580 switch (size) {
dd8fbd78
FN
5581 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5582 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5583 case 2: gen_helper_clz(tmp, tmp); break;
9ee6e8bb
PB
5584 default: return 1;
5585 }
5586 break;
5587 case 10: /* CNT */
5588 if (size != 0)
5589 return 1;
dd8fbd78 5590 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb
PB
5591 break;
5592 case 11: /* VNOT */
5593 if (size != 0)
5594 return 1;
dd8fbd78 5595 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5596 break;
5597 case 14: /* VQABS */
5598 switch (size) {
2a3f75b4
PM
5599 case 0: gen_helper_neon_qabs_s8(tmp, tmp); break;
5600 case 1: gen_helper_neon_qabs_s16(tmp, tmp); break;
5601 case 2: gen_helper_neon_qabs_s32(tmp, tmp); break;
9ee6e8bb
PB
5602 default: return 1;
5603 }
5604 break;
5605 case 15: /* VQNEG */
5606 switch (size) {
2a3f75b4
PM
5607 case 0: gen_helper_neon_qneg_s8(tmp, tmp); break;
5608 case 1: gen_helper_neon_qneg_s16(tmp, tmp); break;
5609 case 2: gen_helper_neon_qneg_s32(tmp, tmp); break;
9ee6e8bb
PB
5610 default: return 1;
5611 }
5612 break;
5613 case 16: case 19: /* VCGT #0, VCLE #0 */
dd8fbd78 5614 tmp2 = tcg_const_i32(0);
9ee6e8bb 5615 switch(size) {
dd8fbd78
FN
5616 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5617 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5618 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5619 default: return 1;
5620 }
dd8fbd78 5621 tcg_temp_free(tmp2);
9ee6e8bb 5622 if (op == 19)
dd8fbd78 5623 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5624 break;
5625 case 17: case 20: /* VCGE #0, VCLT #0 */
dd8fbd78 5626 tmp2 = tcg_const_i32(0);
9ee6e8bb 5627 switch(size) {
dd8fbd78
FN
5628 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5629 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5630 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5631 default: return 1;
5632 }
dd8fbd78 5633 tcg_temp_free(tmp2);
9ee6e8bb 5634 if (op == 20)
dd8fbd78 5635 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5636 break;
5637 case 18: /* VCEQ #0 */
dd8fbd78 5638 tmp2 = tcg_const_i32(0);
9ee6e8bb 5639 switch(size) {
dd8fbd78
FN
5640 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5641 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5642 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5643 default: return 1;
5644 }
dd8fbd78 5645 tcg_temp_free(tmp2);
9ee6e8bb
PB
5646 break;
5647 case 22: /* VABS */
5648 switch(size) {
dd8fbd78
FN
5649 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
5650 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
5651 case 2: tcg_gen_abs_i32(tmp, tmp); break;
9ee6e8bb
PB
5652 default: return 1;
5653 }
5654 break;
5655 case 23: /* VNEG */
ad69471c
PB
5656 if (size == 3)
5657 return 1;
dd8fbd78
FN
5658 tmp2 = tcg_const_i32(0);
5659 gen_neon_rsb(size, tmp, tmp2);
5660 tcg_temp_free(tmp2);
9ee6e8bb 5661 break;
0e326109 5662 case 24: /* Float VCGT #0 */
dd8fbd78
FN
5663 tmp2 = tcg_const_i32(0);
5664 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
5665 tcg_temp_free(tmp2);
9ee6e8bb 5666 break;
0e326109 5667 case 25: /* Float VCGE #0 */
dd8fbd78
FN
5668 tmp2 = tcg_const_i32(0);
5669 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
5670 tcg_temp_free(tmp2);
9ee6e8bb
PB
5671 break;
5672 case 26: /* Float VCEQ #0 */
dd8fbd78
FN
5673 tmp2 = tcg_const_i32(0);
5674 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
5675 tcg_temp_free(tmp2);
9ee6e8bb 5676 break;
0e326109
PM
5677 case 27: /* Float VCLE #0 */
5678 tmp2 = tcg_const_i32(0);
5679 gen_helper_neon_cge_f32(tmp, tmp2, tmp);
5680 tcg_temp_free(tmp2);
5681 break;
5682 case 28: /* Float VCLT #0 */
5683 tmp2 = tcg_const_i32(0);
5684 gen_helper_neon_cgt_f32(tmp, tmp2, tmp);
5685 tcg_temp_free(tmp2);
5686 break;
9ee6e8bb 5687 case 30: /* Float VABS */
4373f3ce 5688 gen_vfp_abs(0);
9ee6e8bb
PB
5689 break;
5690 case 31: /* Float VNEG */
4373f3ce 5691 gen_vfp_neg(0);
9ee6e8bb
PB
5692 break;
5693 case 32: /* VSWP */
dd8fbd78
FN
5694 tmp2 = neon_load_reg(rd, pass);
5695 neon_store_reg(rm, pass, tmp2);
9ee6e8bb
PB
5696 break;
5697 case 33: /* VTRN */
dd8fbd78 5698 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5699 switch (size) {
dd8fbd78
FN
5700 case 0: gen_neon_trn_u8(tmp, tmp2); break;
5701 case 1: gen_neon_trn_u16(tmp, tmp2); break;
9ee6e8bb
PB
5702 case 2: abort();
5703 default: return 1;
5704 }
dd8fbd78 5705 neon_store_reg(rm, pass, tmp2);
9ee6e8bb
PB
5706 break;
5707 case 56: /* Integer VRECPE */
dd8fbd78 5708 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb
PB
5709 break;
5710 case 57: /* Integer VRSQRTE */
dd8fbd78 5711 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb
PB
5712 break;
5713 case 58: /* Float VRECPE */
4373f3ce 5714 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5715 break;
5716 case 59: /* Float VRSQRTE */
4373f3ce 5717 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5718 break;
5719 case 60: /* VCVT.F32.S32 */
d3587ef8 5720 gen_vfp_sito(0);
9ee6e8bb
PB
5721 break;
5722 case 61: /* VCVT.F32.U32 */
d3587ef8 5723 gen_vfp_uito(0);
9ee6e8bb
PB
5724 break;
5725 case 62: /* VCVT.S32.F32 */
d3587ef8 5726 gen_vfp_tosiz(0);
9ee6e8bb
PB
5727 break;
5728 case 63: /* VCVT.U32.F32 */
d3587ef8 5729 gen_vfp_touiz(0);
9ee6e8bb
PB
5730 break;
5731 default:
5732 /* Reserved: 21, 29, 39-56 */
5733 return 1;
5734 }
5735 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5736 tcg_gen_st_f32(cpu_F0s, cpu_env,
5737 neon_reg_offset(rd, pass));
9ee6e8bb 5738 } else {
dd8fbd78 5739 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5740 }
5741 }
5742 break;
5743 }
5744 } else if ((insn & (1 << 10)) == 0) {
5745 /* VTBL, VTBX. */
3018f259 5746 n = ((insn >> 5) & 0x18) + 8;
9ee6e8bb 5747 if (insn & (1 << 6)) {
8f8e3aa4 5748 tmp = neon_load_reg(rd, 0);
9ee6e8bb 5749 } else {
7d1b0095 5750 tmp = tcg_temp_new_i32();
8f8e3aa4 5751 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5752 }
8f8e3aa4 5753 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
5754 tmp4 = tcg_const_i32(rn);
5755 tmp5 = tcg_const_i32(n);
5756 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
7d1b0095 5757 tcg_temp_free_i32(tmp);
9ee6e8bb 5758 if (insn & (1 << 6)) {
8f8e3aa4 5759 tmp = neon_load_reg(rd, 1);
9ee6e8bb 5760 } else {
7d1b0095 5761 tmp = tcg_temp_new_i32();
8f8e3aa4 5762 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5763 }
8f8e3aa4 5764 tmp3 = neon_load_reg(rm, 1);
b75263d6 5765 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
5766 tcg_temp_free_i32(tmp5);
5767 tcg_temp_free_i32(tmp4);
8f8e3aa4 5768 neon_store_reg(rd, 0, tmp2);
3018f259 5769 neon_store_reg(rd, 1, tmp3);
7d1b0095 5770 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5771 } else if ((insn & 0x380) == 0) {
5772 /* VDUP */
5773 if (insn & (1 << 19)) {
dd8fbd78 5774 tmp = neon_load_reg(rm, 1);
9ee6e8bb 5775 } else {
dd8fbd78 5776 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
5777 }
5778 if (insn & (1 << 16)) {
dd8fbd78 5779 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
5780 } else if (insn & (1 << 17)) {
5781 if ((insn >> 18) & 1)
dd8fbd78 5782 gen_neon_dup_high16(tmp);
9ee6e8bb 5783 else
dd8fbd78 5784 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
5785 }
5786 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 5787 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
5788 tcg_gen_mov_i32(tmp2, tmp);
5789 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 5790 }
7d1b0095 5791 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5792 } else {
5793 return 1;
5794 }
5795 }
5796 }
5797 return 0;
5798}
5799
fe1479c3
PB
5800static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5801{
5802 int crn = (insn >> 16) & 0xf;
5803 int crm = insn & 0xf;
5804 int op1 = (insn >> 21) & 7;
5805 int op2 = (insn >> 5) & 7;
5806 int rt = (insn >> 12) & 0xf;
5807 TCGv tmp;
5808
ca27c052
PM
5809 /* Minimal set of debug registers, since we don't support debug */
5810 if (op1 == 0 && crn == 0 && op2 == 0) {
5811 switch (crm) {
5812 case 0:
5813 /* DBGDIDR: just RAZ. In particular this means the
5814 * "debug architecture version" bits will read as
5815 * a reserved value, which should cause Linux to
5816 * not try to use the debug hardware.
5817 */
5818 tmp = tcg_const_i32(0);
5819 store_reg(s, rt, tmp);
5820 return 0;
5821 case 1:
5822 case 2:
5823 /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we
5824 * don't implement memory mapped debug components
5825 */
5826 if (ENABLE_ARCH_7) {
5827 tmp = tcg_const_i32(0);
5828 store_reg(s, rt, tmp);
5829 return 0;
5830 }
5831 break;
5832 default:
5833 break;
5834 }
5835 }
5836
fe1479c3
PB
5837 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5838 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5839 /* TEECR */
5840 if (IS_USER(s))
5841 return 1;
5842 tmp = load_cpu_field(teecr);
5843 store_reg(s, rt, tmp);
5844 return 0;
5845 }
5846 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5847 /* TEEHBR */
5848 if (IS_USER(s) && (env->teecr & 1))
5849 return 1;
5850 tmp = load_cpu_field(teehbr);
5851 store_reg(s, rt, tmp);
5852 return 0;
5853 }
5854 }
5855 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5856 op1, crn, crm, op2);
5857 return 1;
5858}
5859
5860static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5861{
5862 int crn = (insn >> 16) & 0xf;
5863 int crm = insn & 0xf;
5864 int op1 = (insn >> 21) & 7;
5865 int op2 = (insn >> 5) & 7;
5866 int rt = (insn >> 12) & 0xf;
5867 TCGv tmp;
5868
5869 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5870 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5871 /* TEECR */
5872 if (IS_USER(s))
5873 return 1;
5874 tmp = load_reg(s, rt);
5875 gen_helper_set_teecr(cpu_env, tmp);
7d1b0095 5876 tcg_temp_free_i32(tmp);
fe1479c3
PB
5877 return 0;
5878 }
5879 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5880 /* TEEHBR */
5881 if (IS_USER(s) && (env->teecr & 1))
5882 return 1;
5883 tmp = load_reg(s, rt);
5884 store_cpu_field(tmp, teehbr);
5885 return 0;
5886 }
5887 }
5888 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5889 op1, crn, crm, op2);
5890 return 1;
5891}
5892
9ee6e8bb
PB
5893static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5894{
5895 int cpnum;
5896
5897 cpnum = (insn >> 8) & 0xf;
5898 if (arm_feature(env, ARM_FEATURE_XSCALE)
5899 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5900 return 1;
5901
5902 switch (cpnum) {
5903 case 0:
5904 case 1:
5905 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5906 return disas_iwmmxt_insn(env, s, insn);
5907 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5908 return disas_dsp_insn(env, s, insn);
5909 }
5910 return 1;
5911 case 10:
5912 case 11:
5913 return disas_vfp_insn (env, s, insn);
fe1479c3
PB
5914 case 14:
5915 /* Coprocessors 7-15 are architecturally reserved by ARM.
5916 Unfortunately Intel decided to ignore this. */
5917 if (arm_feature(env, ARM_FEATURE_XSCALE))
5918 goto board;
5919 if (insn & (1 << 20))
5920 return disas_cp14_read(env, s, insn);
5921 else
5922 return disas_cp14_write(env, s, insn);
9ee6e8bb
PB
5923 case 15:
5924 return disas_cp15_insn (env, s, insn);
5925 default:
fe1479c3 5926 board:
9ee6e8bb
PB
5927 /* Unknown coprocessor. See if the board has hooked it. */
5928 return disas_cp_insn (env, s, insn);
5929 }
5930}
5931
5e3f878a
PB
5932
5933/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 5934static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
5935{
5936 TCGv tmp;
7d1b0095 5937 tmp = tcg_temp_new_i32();
5e3f878a
PB
5938 tcg_gen_trunc_i64_i32(tmp, val);
5939 store_reg(s, rlow, tmp);
7d1b0095 5940 tmp = tcg_temp_new_i32();
5e3f878a
PB
5941 tcg_gen_shri_i64(val, val, 32);
5942 tcg_gen_trunc_i64_i32(tmp, val);
5943 store_reg(s, rhigh, tmp);
5944}
5945
5946/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 5947static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 5948{
a7812ae4 5949 TCGv_i64 tmp;
5e3f878a
PB
5950 TCGv tmp2;
5951
36aa55dc 5952 /* Load value and extend to 64 bits. */
a7812ae4 5953 tmp = tcg_temp_new_i64();
5e3f878a
PB
5954 tmp2 = load_reg(s, rlow);
5955 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 5956 tcg_temp_free_i32(tmp2);
5e3f878a 5957 tcg_gen_add_i64(val, val, tmp);
b75263d6 5958 tcg_temp_free_i64(tmp);
5e3f878a
PB
5959}
5960
5961/* load and add a 64-bit value from a register pair. */
a7812ae4 5962static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 5963{
a7812ae4 5964 TCGv_i64 tmp;
36aa55dc
PB
5965 TCGv tmpl;
5966 TCGv tmph;
5e3f878a
PB
5967
5968 /* Load 64-bit value rd:rn. */
36aa55dc
PB
5969 tmpl = load_reg(s, rlow);
5970 tmph = load_reg(s, rhigh);
a7812ae4 5971 tmp = tcg_temp_new_i64();
36aa55dc 5972 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
5973 tcg_temp_free_i32(tmpl);
5974 tcg_temp_free_i32(tmph);
5e3f878a 5975 tcg_gen_add_i64(val, val, tmp);
b75263d6 5976 tcg_temp_free_i64(tmp);
5e3f878a
PB
5977}
5978
5979/* Set N and Z flags from a 64-bit value. */
a7812ae4 5980static void gen_logicq_cc(TCGv_i64 val)
5e3f878a 5981{
7d1b0095 5982 TCGv tmp = tcg_temp_new_i32();
5e3f878a 5983 gen_helper_logicq_cc(tmp, val);
6fbe23d5 5984 gen_logic_CC(tmp);
7d1b0095 5985 tcg_temp_free_i32(tmp);
5e3f878a
PB
5986}
5987
426f5abc
PB
5988/* Load/Store exclusive instructions are implemented by remembering
5989 the value/address loaded, and seeing if these are the same
5990 when the store is performed. This should be is sufficient to implement
5991 the architecturally mandated semantics, and avoids having to monitor
5992 regular stores.
5993
5994 In system emulation mode only one CPU will be running at once, so
5995 this sequence is effectively atomic. In user emulation mode we
5996 throw an exception and handle the atomic operation elsewhere. */
5997static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
5998 TCGv addr, int size)
5999{
6000 TCGv tmp;
6001
6002 switch (size) {
6003 case 0:
6004 tmp = gen_ld8u(addr, IS_USER(s));
6005 break;
6006 case 1:
6007 tmp = gen_ld16u(addr, IS_USER(s));
6008 break;
6009 case 2:
6010 case 3:
6011 tmp = gen_ld32(addr, IS_USER(s));
6012 break;
6013 default:
6014 abort();
6015 }
6016 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6017 store_reg(s, rt, tmp);
6018 if (size == 3) {
7d1b0095 6019 TCGv tmp2 = tcg_temp_new_i32();
2c9adbda
PM
6020 tcg_gen_addi_i32(tmp2, addr, 4);
6021 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 6022 tcg_temp_free_i32(tmp2);
426f5abc
PB
6023 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6024 store_reg(s, rt2, tmp);
6025 }
6026 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6027}
6028
6029static void gen_clrex(DisasContext *s)
6030{
6031 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6032}
6033
6034#ifdef CONFIG_USER_ONLY
6035static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6036 TCGv addr, int size)
6037{
6038 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6039 tcg_gen_movi_i32(cpu_exclusive_info,
6040 size | (rd << 4) | (rt << 8) | (rt2 << 12));
bc4a0de0 6041 gen_exception_insn(s, 4, EXCP_STREX);
426f5abc
PB
6042}
6043#else
6044static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6045 TCGv addr, int size)
6046{
6047 TCGv tmp;
6048 int done_label;
6049 int fail_label;
6050
6051 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6052 [addr] = {Rt};
6053 {Rd} = 0;
6054 } else {
6055 {Rd} = 1;
6056 } */
6057 fail_label = gen_new_label();
6058 done_label = gen_new_label();
6059 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6060 switch (size) {
6061 case 0:
6062 tmp = gen_ld8u(addr, IS_USER(s));
6063 break;
6064 case 1:
6065 tmp = gen_ld16u(addr, IS_USER(s));
6066 break;
6067 case 2:
6068 case 3:
6069 tmp = gen_ld32(addr, IS_USER(s));
6070 break;
6071 default:
6072 abort();
6073 }
6074 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
7d1b0095 6075 tcg_temp_free_i32(tmp);
426f5abc 6076 if (size == 3) {
7d1b0095 6077 TCGv tmp2 = tcg_temp_new_i32();
426f5abc 6078 tcg_gen_addi_i32(tmp2, addr, 4);
2c9adbda 6079 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 6080 tcg_temp_free_i32(tmp2);
426f5abc 6081 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
7d1b0095 6082 tcg_temp_free_i32(tmp);
426f5abc
PB
6083 }
6084 tmp = load_reg(s, rt);
6085 switch (size) {
6086 case 0:
6087 gen_st8(tmp, addr, IS_USER(s));
6088 break;
6089 case 1:
6090 gen_st16(tmp, addr, IS_USER(s));
6091 break;
6092 case 2:
6093 case 3:
6094 gen_st32(tmp, addr, IS_USER(s));
6095 break;
6096 default:
6097 abort();
6098 }
6099 if (size == 3) {
6100 tcg_gen_addi_i32(addr, addr, 4);
6101 tmp = load_reg(s, rt2);
6102 gen_st32(tmp, addr, IS_USER(s));
6103 }
6104 tcg_gen_movi_i32(cpu_R[rd], 0);
6105 tcg_gen_br(done_label);
6106 gen_set_label(fail_label);
6107 tcg_gen_movi_i32(cpu_R[rd], 1);
6108 gen_set_label(done_label);
6109 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6110}
6111#endif
6112
9ee6e8bb
PB
6113static void disas_arm_insn(CPUState * env, DisasContext *s)
6114{
6115 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 6116 TCGv tmp;
3670669c 6117 TCGv tmp2;
6ddbc6e4 6118 TCGv tmp3;
b0109805 6119 TCGv addr;
a7812ae4 6120 TCGv_i64 tmp64;
9ee6e8bb
PB
6121
6122 insn = ldl_code(s->pc);
6123 s->pc += 4;
6124
6125 /* M variants do not implement ARM mode. */
6126 if (IS_M(env))
6127 goto illegal_op;
6128 cond = insn >> 28;
6129 if (cond == 0xf){
6130 /* Unconditional instructions. */
6131 if (((insn >> 25) & 7) == 1) {
6132 /* NEON Data processing. */
6133 if (!arm_feature(env, ARM_FEATURE_NEON))
6134 goto illegal_op;
6135
6136 if (disas_neon_data_insn(env, s, insn))
6137 goto illegal_op;
6138 return;
6139 }
6140 if ((insn & 0x0f100000) == 0x04000000) {
6141 /* NEON load/store. */
6142 if (!arm_feature(env, ARM_FEATURE_NEON))
6143 goto illegal_op;
6144
6145 if (disas_neon_ls_insn(env, s, insn))
6146 goto illegal_op;
6147 return;
6148 }
3d185e5d
PM
6149 if (((insn & 0x0f30f000) == 0x0510f000) ||
6150 ((insn & 0x0f30f010) == 0x0710f000)) {
6151 if ((insn & (1 << 22)) == 0) {
6152 /* PLDW; v7MP */
6153 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6154 goto illegal_op;
6155 }
6156 }
6157 /* Otherwise PLD; v5TE+ */
6158 return;
6159 }
6160 if (((insn & 0x0f70f000) == 0x0450f000) ||
6161 ((insn & 0x0f70f010) == 0x0650f000)) {
6162 ARCH(7);
6163 return; /* PLI; V7 */
6164 }
6165 if (((insn & 0x0f700000) == 0x04100000) ||
6166 ((insn & 0x0f700010) == 0x06100000)) {
6167 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6168 goto illegal_op;
6169 }
6170 return; /* v7MP: Unallocated memory hint: must NOP */
6171 }
6172
6173 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
6174 ARCH(6);
6175 /* setend */
6176 if (insn & (1 << 9)) {
6177 /* BE8 mode not implemented. */
6178 goto illegal_op;
6179 }
6180 return;
6181 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6182 switch ((insn >> 4) & 0xf) {
6183 case 1: /* clrex */
6184 ARCH(6K);
426f5abc 6185 gen_clrex(s);
9ee6e8bb
PB
6186 return;
6187 case 4: /* dsb */
6188 case 5: /* dmb */
6189 case 6: /* isb */
6190 ARCH(7);
6191 /* We don't emulate caches so these are a no-op. */
6192 return;
6193 default:
6194 goto illegal_op;
6195 }
6196 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6197 /* srs */
c67b6b71 6198 int32_t offset;
9ee6e8bb
PB
6199 if (IS_USER(s))
6200 goto illegal_op;
6201 ARCH(6);
6202 op1 = (insn & 0x1f);
7d1b0095 6203 addr = tcg_temp_new_i32();
39ea3d4e
PM
6204 tmp = tcg_const_i32(op1);
6205 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6206 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6207 i = (insn >> 23) & 3;
6208 switch (i) {
6209 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6210 case 1: offset = 0; break; /* IA */
6211 case 2: offset = -8; break; /* DB */
9ee6e8bb
PB
6212 case 3: offset = 4; break; /* IB */
6213 default: abort();
6214 }
6215 if (offset)
b0109805
PB
6216 tcg_gen_addi_i32(addr, addr, offset);
6217 tmp = load_reg(s, 14);
6218 gen_st32(tmp, addr, 0);
c67b6b71 6219 tmp = load_cpu_field(spsr);
b0109805
PB
6220 tcg_gen_addi_i32(addr, addr, 4);
6221 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
6222 if (insn & (1 << 21)) {
6223 /* Base writeback. */
6224 switch (i) {
6225 case 0: offset = -8; break;
c67b6b71
FN
6226 case 1: offset = 4; break;
6227 case 2: offset = -4; break;
9ee6e8bb
PB
6228 case 3: offset = 0; break;
6229 default: abort();
6230 }
6231 if (offset)
c67b6b71 6232 tcg_gen_addi_i32(addr, addr, offset);
39ea3d4e
PM
6233 tmp = tcg_const_i32(op1);
6234 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6235 tcg_temp_free_i32(tmp);
7d1b0095 6236 tcg_temp_free_i32(addr);
b0109805 6237 } else {
7d1b0095 6238 tcg_temp_free_i32(addr);
9ee6e8bb 6239 }
a990f58f 6240 return;
ea825eee 6241 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 6242 /* rfe */
c67b6b71 6243 int32_t offset;
9ee6e8bb
PB
6244 if (IS_USER(s))
6245 goto illegal_op;
6246 ARCH(6);
6247 rn = (insn >> 16) & 0xf;
b0109805 6248 addr = load_reg(s, rn);
9ee6e8bb
PB
6249 i = (insn >> 23) & 3;
6250 switch (i) {
b0109805 6251 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6252 case 1: offset = 0; break; /* IA */
6253 case 2: offset = -8; break; /* DB */
b0109805 6254 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
6255 default: abort();
6256 }
6257 if (offset)
b0109805
PB
6258 tcg_gen_addi_i32(addr, addr, offset);
6259 /* Load PC into tmp and CPSR into tmp2. */
6260 tmp = gen_ld32(addr, 0);
6261 tcg_gen_addi_i32(addr, addr, 4);
6262 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
6263 if (insn & (1 << 21)) {
6264 /* Base writeback. */
6265 switch (i) {
b0109805 6266 case 0: offset = -8; break;
c67b6b71
FN
6267 case 1: offset = 4; break;
6268 case 2: offset = -4; break;
b0109805 6269 case 3: offset = 0; break;
9ee6e8bb
PB
6270 default: abort();
6271 }
6272 if (offset)
b0109805
PB
6273 tcg_gen_addi_i32(addr, addr, offset);
6274 store_reg(s, rn, addr);
6275 } else {
7d1b0095 6276 tcg_temp_free_i32(addr);
9ee6e8bb 6277 }
b0109805 6278 gen_rfe(s, tmp, tmp2);
c67b6b71 6279 return;
9ee6e8bb
PB
6280 } else if ((insn & 0x0e000000) == 0x0a000000) {
6281 /* branch link and change to thumb (blx <offset>) */
6282 int32_t offset;
6283
6284 val = (uint32_t)s->pc;
7d1b0095 6285 tmp = tcg_temp_new_i32();
d9ba4830
PB
6286 tcg_gen_movi_i32(tmp, val);
6287 store_reg(s, 14, tmp);
9ee6e8bb
PB
6288 /* Sign-extend the 24-bit offset */
6289 offset = (((int32_t)insn) << 8) >> 8;
6290 /* offset * 4 + bit24 * 2 + (thumb bit) */
6291 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6292 /* pipeline offset */
6293 val += 4;
d9ba4830 6294 gen_bx_im(s, val);
9ee6e8bb
PB
6295 return;
6296 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6297 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6298 /* iWMMXt register transfer. */
6299 if (env->cp15.c15_cpar & (1 << 1))
6300 if (!disas_iwmmxt_insn(env, s, insn))
6301 return;
6302 }
6303 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6304 /* Coprocessor double register transfer. */
6305 } else if ((insn & 0x0f000010) == 0x0e000010) {
6306 /* Additional coprocessor register transfer. */
7997d92f 6307 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
6308 uint32_t mask;
6309 uint32_t val;
6310 /* cps (privileged) */
6311 if (IS_USER(s))
6312 return;
6313 mask = val = 0;
6314 if (insn & (1 << 19)) {
6315 if (insn & (1 << 8))
6316 mask |= CPSR_A;
6317 if (insn & (1 << 7))
6318 mask |= CPSR_I;
6319 if (insn & (1 << 6))
6320 mask |= CPSR_F;
6321 if (insn & (1 << 18))
6322 val |= mask;
6323 }
7997d92f 6324 if (insn & (1 << 17)) {
9ee6e8bb
PB
6325 mask |= CPSR_M;
6326 val |= (insn & 0x1f);
6327 }
6328 if (mask) {
2fbac54b 6329 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
6330 }
6331 return;
6332 }
6333 goto illegal_op;
6334 }
6335 if (cond != 0xe) {
6336 /* if not always execute, we generate a conditional jump to
6337 next instruction */
6338 s->condlabel = gen_new_label();
d9ba4830 6339 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
6340 s->condjmp = 1;
6341 }
6342 if ((insn & 0x0f900000) == 0x03000000) {
6343 if ((insn & (1 << 21)) == 0) {
6344 ARCH(6T2);
6345 rd = (insn >> 12) & 0xf;
6346 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6347 if ((insn & (1 << 22)) == 0) {
6348 /* MOVW */
7d1b0095 6349 tmp = tcg_temp_new_i32();
5e3f878a 6350 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
6351 } else {
6352 /* MOVT */
5e3f878a 6353 tmp = load_reg(s, rd);
86831435 6354 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 6355 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 6356 }
5e3f878a 6357 store_reg(s, rd, tmp);
9ee6e8bb
PB
6358 } else {
6359 if (((insn >> 12) & 0xf) != 0xf)
6360 goto illegal_op;
6361 if (((insn >> 16) & 0xf) == 0) {
6362 gen_nop_hint(s, insn & 0xff);
6363 } else {
6364 /* CPSR = immediate */
6365 val = insn & 0xff;
6366 shift = ((insn >> 8) & 0xf) * 2;
6367 if (shift)
6368 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 6369 i = ((insn & (1 << 22)) != 0);
2fbac54b 6370 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
6371 goto illegal_op;
6372 }
6373 }
6374 } else if ((insn & 0x0f900000) == 0x01000000
6375 && (insn & 0x00000090) != 0x00000090) {
6376 /* miscellaneous instructions */
6377 op1 = (insn >> 21) & 3;
6378 sh = (insn >> 4) & 0xf;
6379 rm = insn & 0xf;
6380 switch (sh) {
6381 case 0x0: /* move program status register */
6382 if (op1 & 1) {
6383 /* PSR = reg */
2fbac54b 6384 tmp = load_reg(s, rm);
9ee6e8bb 6385 i = ((op1 & 2) != 0);
2fbac54b 6386 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
6387 goto illegal_op;
6388 } else {
6389 /* reg = PSR */
6390 rd = (insn >> 12) & 0xf;
6391 if (op1 & 2) {
6392 if (IS_USER(s))
6393 goto illegal_op;
d9ba4830 6394 tmp = load_cpu_field(spsr);
9ee6e8bb 6395 } else {
7d1b0095 6396 tmp = tcg_temp_new_i32();
d9ba4830 6397 gen_helper_cpsr_read(tmp);
9ee6e8bb 6398 }
d9ba4830 6399 store_reg(s, rd, tmp);
9ee6e8bb
PB
6400 }
6401 break;
6402 case 0x1:
6403 if (op1 == 1) {
6404 /* branch/exchange thumb (bx). */
d9ba4830
PB
6405 tmp = load_reg(s, rm);
6406 gen_bx(s, tmp);
9ee6e8bb
PB
6407 } else if (op1 == 3) {
6408 /* clz */
6409 rd = (insn >> 12) & 0xf;
1497c961
PB
6410 tmp = load_reg(s, rm);
6411 gen_helper_clz(tmp, tmp);
6412 store_reg(s, rd, tmp);
9ee6e8bb
PB
6413 } else {
6414 goto illegal_op;
6415 }
6416 break;
6417 case 0x2:
6418 if (op1 == 1) {
6419 ARCH(5J); /* bxj */
6420 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6421 tmp = load_reg(s, rm);
6422 gen_bx(s, tmp);
9ee6e8bb
PB
6423 } else {
6424 goto illegal_op;
6425 }
6426 break;
6427 case 0x3:
6428 if (op1 != 1)
6429 goto illegal_op;
6430
6431 /* branch link/exchange thumb (blx) */
d9ba4830 6432 tmp = load_reg(s, rm);
7d1b0095 6433 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
6434 tcg_gen_movi_i32(tmp2, s->pc);
6435 store_reg(s, 14, tmp2);
6436 gen_bx(s, tmp);
9ee6e8bb
PB
6437 break;
6438 case 0x5: /* saturating add/subtract */
6439 rd = (insn >> 12) & 0xf;
6440 rn = (insn >> 16) & 0xf;
b40d0353 6441 tmp = load_reg(s, rm);
5e3f878a 6442 tmp2 = load_reg(s, rn);
9ee6e8bb 6443 if (op1 & 2)
5e3f878a 6444 gen_helper_double_saturate(tmp2, tmp2);
9ee6e8bb 6445 if (op1 & 1)
5e3f878a 6446 gen_helper_sub_saturate(tmp, tmp, tmp2);
9ee6e8bb 6447 else
5e3f878a 6448 gen_helper_add_saturate(tmp, tmp, tmp2);
7d1b0095 6449 tcg_temp_free_i32(tmp2);
5e3f878a 6450 store_reg(s, rd, tmp);
9ee6e8bb 6451 break;
49e14940
AL
6452 case 7:
6453 /* SMC instruction (op1 == 3)
6454 and undefined instructions (op1 == 0 || op1 == 2)
6455 will trap */
6456 if (op1 != 1) {
6457 goto illegal_op;
6458 }
6459 /* bkpt */
bc4a0de0 6460 gen_exception_insn(s, 4, EXCP_BKPT);
9ee6e8bb
PB
6461 break;
6462 case 0x8: /* signed multiply */
6463 case 0xa:
6464 case 0xc:
6465 case 0xe:
6466 rs = (insn >> 8) & 0xf;
6467 rn = (insn >> 12) & 0xf;
6468 rd = (insn >> 16) & 0xf;
6469 if (op1 == 1) {
6470 /* (32 * 16) >> 16 */
5e3f878a
PB
6471 tmp = load_reg(s, rm);
6472 tmp2 = load_reg(s, rs);
9ee6e8bb 6473 if (sh & 4)
5e3f878a 6474 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 6475 else
5e3f878a 6476 gen_sxth(tmp2);
a7812ae4
PB
6477 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6478 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 6479 tmp = tcg_temp_new_i32();
a7812ae4 6480 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 6481 tcg_temp_free_i64(tmp64);
9ee6e8bb 6482 if ((sh & 2) == 0) {
5e3f878a
PB
6483 tmp2 = load_reg(s, rn);
6484 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 6485 tcg_temp_free_i32(tmp2);
9ee6e8bb 6486 }
5e3f878a 6487 store_reg(s, rd, tmp);
9ee6e8bb
PB
6488 } else {
6489 /* 16 * 16 */
5e3f878a
PB
6490 tmp = load_reg(s, rm);
6491 tmp2 = load_reg(s, rs);
6492 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 6493 tcg_temp_free_i32(tmp2);
9ee6e8bb 6494 if (op1 == 2) {
a7812ae4
PB
6495 tmp64 = tcg_temp_new_i64();
6496 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 6497 tcg_temp_free_i32(tmp);
a7812ae4
PB
6498 gen_addq(s, tmp64, rn, rd);
6499 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 6500 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
6501 } else {
6502 if (op1 == 0) {
5e3f878a
PB
6503 tmp2 = load_reg(s, rn);
6504 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 6505 tcg_temp_free_i32(tmp2);
9ee6e8bb 6506 }
5e3f878a 6507 store_reg(s, rd, tmp);
9ee6e8bb
PB
6508 }
6509 }
6510 break;
6511 default:
6512 goto illegal_op;
6513 }
6514 } else if (((insn & 0x0e000000) == 0 &&
6515 (insn & 0x00000090) != 0x90) ||
6516 ((insn & 0x0e000000) == (1 << 25))) {
6517 int set_cc, logic_cc, shiftop;
6518
6519 op1 = (insn >> 21) & 0xf;
6520 set_cc = (insn >> 20) & 1;
6521 logic_cc = table_logic_cc[op1] & set_cc;
6522
6523 /* data processing instruction */
6524 if (insn & (1 << 25)) {
6525 /* immediate operand */
6526 val = insn & 0xff;
6527 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 6528 if (shift) {
9ee6e8bb 6529 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 6530 }
7d1b0095 6531 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
6532 tcg_gen_movi_i32(tmp2, val);
6533 if (logic_cc && shift) {
6534 gen_set_CF_bit31(tmp2);
6535 }
9ee6e8bb
PB
6536 } else {
6537 /* register */
6538 rm = (insn) & 0xf;
e9bb4aa9 6539 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6540 shiftop = (insn >> 5) & 3;
6541 if (!(insn & (1 << 4))) {
6542 shift = (insn >> 7) & 0x1f;
e9bb4aa9 6543 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
6544 } else {
6545 rs = (insn >> 8) & 0xf;
8984bd2e 6546 tmp = load_reg(s, rs);
e9bb4aa9 6547 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
6548 }
6549 }
6550 if (op1 != 0x0f && op1 != 0x0d) {
6551 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
6552 tmp = load_reg(s, rn);
6553 } else {
6554 TCGV_UNUSED(tmp);
9ee6e8bb
PB
6555 }
6556 rd = (insn >> 12) & 0xf;
6557 switch(op1) {
6558 case 0x00:
e9bb4aa9
JR
6559 tcg_gen_and_i32(tmp, tmp, tmp2);
6560 if (logic_cc) {
6561 gen_logic_CC(tmp);
6562 }
21aeb343 6563 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6564 break;
6565 case 0x01:
e9bb4aa9
JR
6566 tcg_gen_xor_i32(tmp, tmp, tmp2);
6567 if (logic_cc) {
6568 gen_logic_CC(tmp);
6569 }
21aeb343 6570 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6571 break;
6572 case 0x02:
6573 if (set_cc && rd == 15) {
6574 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 6575 if (IS_USER(s)) {
9ee6e8bb 6576 goto illegal_op;
e9bb4aa9
JR
6577 }
6578 gen_helper_sub_cc(tmp, tmp, tmp2);
6579 gen_exception_return(s, tmp);
9ee6e8bb 6580 } else {
e9bb4aa9
JR
6581 if (set_cc) {
6582 gen_helper_sub_cc(tmp, tmp, tmp2);
6583 } else {
6584 tcg_gen_sub_i32(tmp, tmp, tmp2);
6585 }
21aeb343 6586 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6587 }
6588 break;
6589 case 0x03:
e9bb4aa9
JR
6590 if (set_cc) {
6591 gen_helper_sub_cc(tmp, tmp2, tmp);
6592 } else {
6593 tcg_gen_sub_i32(tmp, tmp2, tmp);
6594 }
21aeb343 6595 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6596 break;
6597 case 0x04:
e9bb4aa9
JR
6598 if (set_cc) {
6599 gen_helper_add_cc(tmp, tmp, tmp2);
6600 } else {
6601 tcg_gen_add_i32(tmp, tmp, tmp2);
6602 }
21aeb343 6603 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6604 break;
6605 case 0x05:
e9bb4aa9
JR
6606 if (set_cc) {
6607 gen_helper_adc_cc(tmp, tmp, tmp2);
6608 } else {
6609 gen_add_carry(tmp, tmp, tmp2);
6610 }
21aeb343 6611 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6612 break;
6613 case 0x06:
e9bb4aa9
JR
6614 if (set_cc) {
6615 gen_helper_sbc_cc(tmp, tmp, tmp2);
6616 } else {
6617 gen_sub_carry(tmp, tmp, tmp2);
6618 }
21aeb343 6619 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6620 break;
6621 case 0x07:
e9bb4aa9
JR
6622 if (set_cc) {
6623 gen_helper_sbc_cc(tmp, tmp2, tmp);
6624 } else {
6625 gen_sub_carry(tmp, tmp2, tmp);
6626 }
21aeb343 6627 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6628 break;
6629 case 0x08:
6630 if (set_cc) {
e9bb4aa9
JR
6631 tcg_gen_and_i32(tmp, tmp, tmp2);
6632 gen_logic_CC(tmp);
9ee6e8bb 6633 }
7d1b0095 6634 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6635 break;
6636 case 0x09:
6637 if (set_cc) {
e9bb4aa9
JR
6638 tcg_gen_xor_i32(tmp, tmp, tmp2);
6639 gen_logic_CC(tmp);
9ee6e8bb 6640 }
7d1b0095 6641 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6642 break;
6643 case 0x0a:
6644 if (set_cc) {
e9bb4aa9 6645 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb 6646 }
7d1b0095 6647 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6648 break;
6649 case 0x0b:
6650 if (set_cc) {
e9bb4aa9 6651 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 6652 }
7d1b0095 6653 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6654 break;
6655 case 0x0c:
e9bb4aa9
JR
6656 tcg_gen_or_i32(tmp, tmp, tmp2);
6657 if (logic_cc) {
6658 gen_logic_CC(tmp);
6659 }
21aeb343 6660 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6661 break;
6662 case 0x0d:
6663 if (logic_cc && rd == 15) {
6664 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 6665 if (IS_USER(s)) {
9ee6e8bb 6666 goto illegal_op;
e9bb4aa9
JR
6667 }
6668 gen_exception_return(s, tmp2);
9ee6e8bb 6669 } else {
e9bb4aa9
JR
6670 if (logic_cc) {
6671 gen_logic_CC(tmp2);
6672 }
21aeb343 6673 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6674 }
6675 break;
6676 case 0x0e:
f669df27 6677 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
6678 if (logic_cc) {
6679 gen_logic_CC(tmp);
6680 }
21aeb343 6681 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6682 break;
6683 default:
6684 case 0x0f:
e9bb4aa9
JR
6685 tcg_gen_not_i32(tmp2, tmp2);
6686 if (logic_cc) {
6687 gen_logic_CC(tmp2);
6688 }
21aeb343 6689 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6690 break;
6691 }
e9bb4aa9 6692 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 6693 tcg_temp_free_i32(tmp2);
e9bb4aa9 6694 }
9ee6e8bb
PB
6695 } else {
6696 /* other instructions */
6697 op1 = (insn >> 24) & 0xf;
6698 switch(op1) {
6699 case 0x0:
6700 case 0x1:
6701 /* multiplies, extra load/stores */
6702 sh = (insn >> 5) & 3;
6703 if (sh == 0) {
6704 if (op1 == 0x0) {
6705 rd = (insn >> 16) & 0xf;
6706 rn = (insn >> 12) & 0xf;
6707 rs = (insn >> 8) & 0xf;
6708 rm = (insn) & 0xf;
6709 op1 = (insn >> 20) & 0xf;
6710 switch (op1) {
6711 case 0: case 1: case 2: case 3: case 6:
6712 /* 32 bit mul */
5e3f878a
PB
6713 tmp = load_reg(s, rs);
6714 tmp2 = load_reg(s, rm);
6715 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 6716 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6717 if (insn & (1 << 22)) {
6718 /* Subtract (mls) */
6719 ARCH(6T2);
5e3f878a
PB
6720 tmp2 = load_reg(s, rn);
6721 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 6722 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6723 } else if (insn & (1 << 21)) {
6724 /* Add */
5e3f878a
PB
6725 tmp2 = load_reg(s, rn);
6726 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 6727 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6728 }
6729 if (insn & (1 << 20))
5e3f878a
PB
6730 gen_logic_CC(tmp);
6731 store_reg(s, rd, tmp);
9ee6e8bb 6732 break;
8aac08b1
AJ
6733 case 4:
6734 /* 64 bit mul double accumulate (UMAAL) */
6735 ARCH(6);
6736 tmp = load_reg(s, rs);
6737 tmp2 = load_reg(s, rm);
6738 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6739 gen_addq_lo(s, tmp64, rn);
6740 gen_addq_lo(s, tmp64, rd);
6741 gen_storeq_reg(s, rn, rd, tmp64);
6742 tcg_temp_free_i64(tmp64);
6743 break;
6744 case 8: case 9: case 10: case 11:
6745 case 12: case 13: case 14: case 15:
6746 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
6747 tmp = load_reg(s, rs);
6748 tmp2 = load_reg(s, rm);
8aac08b1 6749 if (insn & (1 << 22)) {
a7812ae4 6750 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8aac08b1 6751 } else {
a7812ae4 6752 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8aac08b1
AJ
6753 }
6754 if (insn & (1 << 21)) { /* mult accumulate */
a7812ae4 6755 gen_addq(s, tmp64, rn, rd);
9ee6e8bb 6756 }
8aac08b1 6757 if (insn & (1 << 20)) {
a7812ae4 6758 gen_logicq_cc(tmp64);
8aac08b1 6759 }
a7812ae4 6760 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 6761 tcg_temp_free_i64(tmp64);
9ee6e8bb 6762 break;
8aac08b1
AJ
6763 default:
6764 goto illegal_op;
9ee6e8bb
PB
6765 }
6766 } else {
6767 rn = (insn >> 16) & 0xf;
6768 rd = (insn >> 12) & 0xf;
6769 if (insn & (1 << 23)) {
6770 /* load/store exclusive */
86753403
PB
6771 op1 = (insn >> 21) & 0x3;
6772 if (op1)
a47f43d2 6773 ARCH(6K);
86753403
PB
6774 else
6775 ARCH(6);
3174f8e9 6776 addr = tcg_temp_local_new_i32();
98a46317 6777 load_reg_var(s, addr, rn);
9ee6e8bb 6778 if (insn & (1 << 20)) {
86753403
PB
6779 switch (op1) {
6780 case 0: /* ldrex */
426f5abc 6781 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
6782 break;
6783 case 1: /* ldrexd */
426f5abc 6784 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
6785 break;
6786 case 2: /* ldrexb */
426f5abc 6787 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
6788 break;
6789 case 3: /* ldrexh */
426f5abc 6790 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
6791 break;
6792 default:
6793 abort();
6794 }
9ee6e8bb
PB
6795 } else {
6796 rm = insn & 0xf;
86753403
PB
6797 switch (op1) {
6798 case 0: /* strex */
426f5abc 6799 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
6800 break;
6801 case 1: /* strexd */
502e64fe 6802 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
6803 break;
6804 case 2: /* strexb */
426f5abc 6805 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
6806 break;
6807 case 3: /* strexh */
426f5abc 6808 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
6809 break;
6810 default:
6811 abort();
6812 }
9ee6e8bb 6813 }
3174f8e9 6814 tcg_temp_free(addr);
9ee6e8bb
PB
6815 } else {
6816 /* SWP instruction */
6817 rm = (insn) & 0xf;
6818
8984bd2e
PB
6819 /* ??? This is not really atomic. However we know
6820 we never have multiple CPUs running in parallel,
6821 so it is good enough. */
6822 addr = load_reg(s, rn);
6823 tmp = load_reg(s, rm);
9ee6e8bb 6824 if (insn & (1 << 22)) {
8984bd2e
PB
6825 tmp2 = gen_ld8u(addr, IS_USER(s));
6826 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 6827 } else {
8984bd2e
PB
6828 tmp2 = gen_ld32(addr, IS_USER(s));
6829 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 6830 }
7d1b0095 6831 tcg_temp_free_i32(addr);
8984bd2e 6832 store_reg(s, rd, tmp2);
9ee6e8bb
PB
6833 }
6834 }
6835 } else {
6836 int address_offset;
6837 int load;
6838 /* Misc load/store */
6839 rn = (insn >> 16) & 0xf;
6840 rd = (insn >> 12) & 0xf;
b0109805 6841 addr = load_reg(s, rn);
9ee6e8bb 6842 if (insn & (1 << 24))
b0109805 6843 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
6844 address_offset = 0;
6845 if (insn & (1 << 20)) {
6846 /* load */
6847 switch(sh) {
6848 case 1:
b0109805 6849 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
6850 break;
6851 case 2:
b0109805 6852 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
6853 break;
6854 default:
6855 case 3:
b0109805 6856 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
6857 break;
6858 }
6859 load = 1;
6860 } else if (sh & 2) {
6861 /* doubleword */
6862 if (sh & 1) {
6863 /* store */
b0109805
PB
6864 tmp = load_reg(s, rd);
6865 gen_st32(tmp, addr, IS_USER(s));
6866 tcg_gen_addi_i32(addr, addr, 4);
6867 tmp = load_reg(s, rd + 1);
6868 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6869 load = 0;
6870 } else {
6871 /* load */
b0109805
PB
6872 tmp = gen_ld32(addr, IS_USER(s));
6873 store_reg(s, rd, tmp);
6874 tcg_gen_addi_i32(addr, addr, 4);
6875 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
6876 rd++;
6877 load = 1;
6878 }
6879 address_offset = -4;
6880 } else {
6881 /* store */
b0109805
PB
6882 tmp = load_reg(s, rd);
6883 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6884 load = 0;
6885 }
6886 /* Perform base writeback before the loaded value to
6887 ensure correct behavior with overlapping index registers.
6888 ldrd with base writeback is is undefined if the
6889 destination and index registers overlap. */
6890 if (!(insn & (1 << 24))) {
b0109805
PB
6891 gen_add_datah_offset(s, insn, address_offset, addr);
6892 store_reg(s, rn, addr);
9ee6e8bb
PB
6893 } else if (insn & (1 << 21)) {
6894 if (address_offset)
b0109805
PB
6895 tcg_gen_addi_i32(addr, addr, address_offset);
6896 store_reg(s, rn, addr);
6897 } else {
7d1b0095 6898 tcg_temp_free_i32(addr);
9ee6e8bb
PB
6899 }
6900 if (load) {
6901 /* Complete the load. */
b0109805 6902 store_reg(s, rd, tmp);
9ee6e8bb
PB
6903 }
6904 }
6905 break;
6906 case 0x4:
6907 case 0x5:
6908 goto do_ldst;
6909 case 0x6:
6910 case 0x7:
6911 if (insn & (1 << 4)) {
6912 ARCH(6);
6913 /* Armv6 Media instructions. */
6914 rm = insn & 0xf;
6915 rn = (insn >> 16) & 0xf;
2c0262af 6916 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
6917 rs = (insn >> 8) & 0xf;
6918 switch ((insn >> 23) & 3) {
6919 case 0: /* Parallel add/subtract. */
6920 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
6921 tmp = load_reg(s, rn);
6922 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6923 sh = (insn >> 5) & 7;
6924 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6925 goto illegal_op;
6ddbc6e4 6926 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 6927 tcg_temp_free_i32(tmp2);
6ddbc6e4 6928 store_reg(s, rd, tmp);
9ee6e8bb
PB
6929 break;
6930 case 1:
6931 if ((insn & 0x00700020) == 0) {
6c95676b 6932 /* Halfword pack. */
3670669c
PB
6933 tmp = load_reg(s, rn);
6934 tmp2 = load_reg(s, rm);
9ee6e8bb 6935 shift = (insn >> 7) & 0x1f;
3670669c
PB
6936 if (insn & (1 << 6)) {
6937 /* pkhtb */
22478e79
AZ
6938 if (shift == 0)
6939 shift = 31;
6940 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 6941 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 6942 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
6943 } else {
6944 /* pkhbt */
22478e79
AZ
6945 if (shift)
6946 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 6947 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
6948 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6949 }
6950 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 6951 tcg_temp_free_i32(tmp2);
3670669c 6952 store_reg(s, rd, tmp);
9ee6e8bb
PB
6953 } else if ((insn & 0x00200020) == 0x00200000) {
6954 /* [us]sat */
6ddbc6e4 6955 tmp = load_reg(s, rm);
9ee6e8bb
PB
6956 shift = (insn >> 7) & 0x1f;
6957 if (insn & (1 << 6)) {
6958 if (shift == 0)
6959 shift = 31;
6ddbc6e4 6960 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 6961 } else {
6ddbc6e4 6962 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
6963 }
6964 sh = (insn >> 16) & 0x1f;
40d3c433
CL
6965 tmp2 = tcg_const_i32(sh);
6966 if (insn & (1 << 22))
6967 gen_helper_usat(tmp, tmp, tmp2);
6968 else
6969 gen_helper_ssat(tmp, tmp, tmp2);
6970 tcg_temp_free_i32(tmp2);
6ddbc6e4 6971 store_reg(s, rd, tmp);
9ee6e8bb
PB
6972 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6973 /* [us]sat16 */
6ddbc6e4 6974 tmp = load_reg(s, rm);
9ee6e8bb 6975 sh = (insn >> 16) & 0x1f;
40d3c433
CL
6976 tmp2 = tcg_const_i32(sh);
6977 if (insn & (1 << 22))
6978 gen_helper_usat16(tmp, tmp, tmp2);
6979 else
6980 gen_helper_ssat16(tmp, tmp, tmp2);
6981 tcg_temp_free_i32(tmp2);
6ddbc6e4 6982 store_reg(s, rd, tmp);
9ee6e8bb
PB
6983 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6984 /* Select bytes. */
6ddbc6e4
PB
6985 tmp = load_reg(s, rn);
6986 tmp2 = load_reg(s, rm);
7d1b0095 6987 tmp3 = tcg_temp_new_i32();
6ddbc6e4
PB
6988 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6989 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
6990 tcg_temp_free_i32(tmp3);
6991 tcg_temp_free_i32(tmp2);
6ddbc6e4 6992 store_reg(s, rd, tmp);
9ee6e8bb 6993 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 6994 tmp = load_reg(s, rm);
9ee6e8bb
PB
6995 shift = (insn >> 10) & 3;
6996 /* ??? In many cases it's not neccessary to do a
6997 rotate, a shift is sufficient. */
6998 if (shift != 0)
f669df27 6999 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7000 op1 = (insn >> 20) & 7;
7001 switch (op1) {
5e3f878a
PB
7002 case 0: gen_sxtb16(tmp); break;
7003 case 2: gen_sxtb(tmp); break;
7004 case 3: gen_sxth(tmp); break;
7005 case 4: gen_uxtb16(tmp); break;
7006 case 6: gen_uxtb(tmp); break;
7007 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
7008 default: goto illegal_op;
7009 }
7010 if (rn != 15) {
5e3f878a 7011 tmp2 = load_reg(s, rn);
9ee6e8bb 7012 if ((op1 & 3) == 0) {
5e3f878a 7013 gen_add16(tmp, tmp2);
9ee6e8bb 7014 } else {
5e3f878a 7015 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7016 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7017 }
7018 }
6c95676b 7019 store_reg(s, rd, tmp);
9ee6e8bb
PB
7020 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7021 /* rev */
b0109805 7022 tmp = load_reg(s, rm);
9ee6e8bb
PB
7023 if (insn & (1 << 22)) {
7024 if (insn & (1 << 7)) {
b0109805 7025 gen_revsh(tmp);
9ee6e8bb
PB
7026 } else {
7027 ARCH(6T2);
b0109805 7028 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7029 }
7030 } else {
7031 if (insn & (1 << 7))
b0109805 7032 gen_rev16(tmp);
9ee6e8bb 7033 else
66896cb8 7034 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 7035 }
b0109805 7036 store_reg(s, rd, tmp);
9ee6e8bb
PB
7037 } else {
7038 goto illegal_op;
7039 }
7040 break;
7041 case 2: /* Multiplies (Type 3). */
5e3f878a
PB
7042 tmp = load_reg(s, rm);
7043 tmp2 = load_reg(s, rs);
9ee6e8bb 7044 if (insn & (1 << 20)) {
838fa72d
AJ
7045 /* Signed multiply most significant [accumulate].
7046 (SMMUL, SMMLA, SMMLS) */
a7812ae4 7047 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 7048
955a7dd5 7049 if (rd != 15) {
838fa72d 7050 tmp = load_reg(s, rd);
9ee6e8bb 7051 if (insn & (1 << 6)) {
838fa72d 7052 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 7053 } else {
838fa72d 7054 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
7055 }
7056 }
838fa72d
AJ
7057 if (insn & (1 << 5)) {
7058 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7059 }
7060 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 7061 tmp = tcg_temp_new_i32();
838fa72d
AJ
7062 tcg_gen_trunc_i64_i32(tmp, tmp64);
7063 tcg_temp_free_i64(tmp64);
955a7dd5 7064 store_reg(s, rn, tmp);
9ee6e8bb
PB
7065 } else {
7066 if (insn & (1 << 5))
5e3f878a
PB
7067 gen_swap_half(tmp2);
7068 gen_smul_dual(tmp, tmp2);
5e3f878a 7069 if (insn & (1 << 6)) {
e1d177b9 7070 /* This subtraction cannot overflow. */
5e3f878a
PB
7071 tcg_gen_sub_i32(tmp, tmp, tmp2);
7072 } else {
e1d177b9
PM
7073 /* This addition cannot overflow 32 bits;
7074 * however it may overflow considered as a signed
7075 * operation, in which case we must set the Q flag.
7076 */
7077 gen_helper_add_setq(tmp, tmp, tmp2);
5e3f878a 7078 }
7d1b0095 7079 tcg_temp_free_i32(tmp2);
9ee6e8bb 7080 if (insn & (1 << 22)) {
5e3f878a 7081 /* smlald, smlsld */
a7812ae4
PB
7082 tmp64 = tcg_temp_new_i64();
7083 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7084 tcg_temp_free_i32(tmp);
a7812ae4
PB
7085 gen_addq(s, tmp64, rd, rn);
7086 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 7087 tcg_temp_free_i64(tmp64);
9ee6e8bb 7088 } else {
5e3f878a 7089 /* smuad, smusd, smlad, smlsd */
22478e79 7090 if (rd != 15)
9ee6e8bb 7091 {
22478e79 7092 tmp2 = load_reg(s, rd);
5e3f878a 7093 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 7094 tcg_temp_free_i32(tmp2);
9ee6e8bb 7095 }
22478e79 7096 store_reg(s, rn, tmp);
9ee6e8bb
PB
7097 }
7098 }
7099 break;
7100 case 3:
7101 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7102 switch (op1) {
7103 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
7104 ARCH(6);
7105 tmp = load_reg(s, rm);
7106 tmp2 = load_reg(s, rs);
7107 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 7108 tcg_temp_free_i32(tmp2);
ded9d295
AZ
7109 if (rd != 15) {
7110 tmp2 = load_reg(s, rd);
6ddbc6e4 7111 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7112 tcg_temp_free_i32(tmp2);
9ee6e8bb 7113 }
ded9d295 7114 store_reg(s, rn, tmp);
9ee6e8bb
PB
7115 break;
7116 case 0x20: case 0x24: case 0x28: case 0x2c:
7117 /* Bitfield insert/clear. */
7118 ARCH(6T2);
7119 shift = (insn >> 7) & 0x1f;
7120 i = (insn >> 16) & 0x1f;
7121 i = i + 1 - shift;
7122 if (rm == 15) {
7d1b0095 7123 tmp = tcg_temp_new_i32();
5e3f878a 7124 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7125 } else {
5e3f878a 7126 tmp = load_reg(s, rm);
9ee6e8bb
PB
7127 }
7128 if (i != 32) {
5e3f878a 7129 tmp2 = load_reg(s, rd);
8f8e3aa4 7130 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7d1b0095 7131 tcg_temp_free_i32(tmp2);
9ee6e8bb 7132 }
5e3f878a 7133 store_reg(s, rd, tmp);
9ee6e8bb
PB
7134 break;
7135 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7136 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 7137 ARCH(6T2);
5e3f878a 7138 tmp = load_reg(s, rm);
9ee6e8bb
PB
7139 shift = (insn >> 7) & 0x1f;
7140 i = ((insn >> 16) & 0x1f) + 1;
7141 if (shift + i > 32)
7142 goto illegal_op;
7143 if (i < 32) {
7144 if (op1 & 0x20) {
5e3f878a 7145 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 7146 } else {
5e3f878a 7147 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
7148 }
7149 }
5e3f878a 7150 store_reg(s, rd, tmp);
9ee6e8bb
PB
7151 break;
7152 default:
7153 goto illegal_op;
7154 }
7155 break;
7156 }
7157 break;
7158 }
7159 do_ldst:
7160 /* Check for undefined extension instructions
7161 * per the ARM Bible IE:
7162 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7163 */
7164 sh = (0xf << 20) | (0xf << 4);
7165 if (op1 == 0x7 && ((insn & sh) == sh))
7166 {
7167 goto illegal_op;
7168 }
7169 /* load/store byte/word */
7170 rn = (insn >> 16) & 0xf;
7171 rd = (insn >> 12) & 0xf;
b0109805 7172 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
7173 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7174 if (insn & (1 << 24))
b0109805 7175 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
7176 if (insn & (1 << 20)) {
7177 /* load */
9ee6e8bb 7178 if (insn & (1 << 22)) {
b0109805 7179 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 7180 } else {
b0109805 7181 tmp = gen_ld32(tmp2, i);
9ee6e8bb 7182 }
9ee6e8bb
PB
7183 } else {
7184 /* store */
b0109805 7185 tmp = load_reg(s, rd);
9ee6e8bb 7186 if (insn & (1 << 22))
b0109805 7187 gen_st8(tmp, tmp2, i);
9ee6e8bb 7188 else
b0109805 7189 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
7190 }
7191 if (!(insn & (1 << 24))) {
b0109805
PB
7192 gen_add_data_offset(s, insn, tmp2);
7193 store_reg(s, rn, tmp2);
7194 } else if (insn & (1 << 21)) {
7195 store_reg(s, rn, tmp2);
7196 } else {
7d1b0095 7197 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7198 }
7199 if (insn & (1 << 20)) {
7200 /* Complete the load. */
7201 if (rd == 15)
b0109805 7202 gen_bx(s, tmp);
9ee6e8bb 7203 else
b0109805 7204 store_reg(s, rd, tmp);
9ee6e8bb
PB
7205 }
7206 break;
7207 case 0x08:
7208 case 0x09:
7209 {
7210 int j, n, user, loaded_base;
b0109805 7211 TCGv loaded_var;
9ee6e8bb
PB
7212 /* load/store multiple words */
7213 /* XXX: store correct base if write back */
7214 user = 0;
7215 if (insn & (1 << 22)) {
7216 if (IS_USER(s))
7217 goto illegal_op; /* only usable in supervisor mode */
7218
7219 if ((insn & (1 << 15)) == 0)
7220 user = 1;
7221 }
7222 rn = (insn >> 16) & 0xf;
b0109805 7223 addr = load_reg(s, rn);
9ee6e8bb
PB
7224
7225 /* compute total size */
7226 loaded_base = 0;
a50f5b91 7227 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
7228 n = 0;
7229 for(i=0;i<16;i++) {
7230 if (insn & (1 << i))
7231 n++;
7232 }
7233 /* XXX: test invalid n == 0 case ? */
7234 if (insn & (1 << 23)) {
7235 if (insn & (1 << 24)) {
7236 /* pre increment */
b0109805 7237 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7238 } else {
7239 /* post increment */
7240 }
7241 } else {
7242 if (insn & (1 << 24)) {
7243 /* pre decrement */
b0109805 7244 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7245 } else {
7246 /* post decrement */
7247 if (n != 1)
b0109805 7248 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7249 }
7250 }
7251 j = 0;
7252 for(i=0;i<16;i++) {
7253 if (insn & (1 << i)) {
7254 if (insn & (1 << 20)) {
7255 /* load */
b0109805 7256 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 7257 if (i == 15) {
b0109805 7258 gen_bx(s, tmp);
9ee6e8bb 7259 } else if (user) {
b75263d6
JR
7260 tmp2 = tcg_const_i32(i);
7261 gen_helper_set_user_reg(tmp2, tmp);
7262 tcg_temp_free_i32(tmp2);
7d1b0095 7263 tcg_temp_free_i32(tmp);
9ee6e8bb 7264 } else if (i == rn) {
b0109805 7265 loaded_var = tmp;
9ee6e8bb
PB
7266 loaded_base = 1;
7267 } else {
b0109805 7268 store_reg(s, i, tmp);
9ee6e8bb
PB
7269 }
7270 } else {
7271 /* store */
7272 if (i == 15) {
7273 /* special case: r15 = PC + 8 */
7274 val = (long)s->pc + 4;
7d1b0095 7275 tmp = tcg_temp_new_i32();
b0109805 7276 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 7277 } else if (user) {
7d1b0095 7278 tmp = tcg_temp_new_i32();
b75263d6
JR
7279 tmp2 = tcg_const_i32(i);
7280 gen_helper_get_user_reg(tmp, tmp2);
7281 tcg_temp_free_i32(tmp2);
9ee6e8bb 7282 } else {
b0109805 7283 tmp = load_reg(s, i);
9ee6e8bb 7284 }
b0109805 7285 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7286 }
7287 j++;
7288 /* no need to add after the last transfer */
7289 if (j != n)
b0109805 7290 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7291 }
7292 }
7293 if (insn & (1 << 21)) {
7294 /* write back */
7295 if (insn & (1 << 23)) {
7296 if (insn & (1 << 24)) {
7297 /* pre increment */
7298 } else {
7299 /* post increment */
b0109805 7300 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7301 }
7302 } else {
7303 if (insn & (1 << 24)) {
7304 /* pre decrement */
7305 if (n != 1)
b0109805 7306 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7307 } else {
7308 /* post decrement */
b0109805 7309 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7310 }
7311 }
b0109805
PB
7312 store_reg(s, rn, addr);
7313 } else {
7d1b0095 7314 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7315 }
7316 if (loaded_base) {
b0109805 7317 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
7318 }
7319 if ((insn & (1 << 22)) && !user) {
7320 /* Restore CPSR from SPSR. */
d9ba4830
PB
7321 tmp = load_cpu_field(spsr);
7322 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 7323 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7324 s->is_jmp = DISAS_UPDATE;
7325 }
7326 }
7327 break;
7328 case 0xa:
7329 case 0xb:
7330 {
7331 int32_t offset;
7332
7333 /* branch (and link) */
7334 val = (int32_t)s->pc;
7335 if (insn & (1 << 24)) {
7d1b0095 7336 tmp = tcg_temp_new_i32();
5e3f878a
PB
7337 tcg_gen_movi_i32(tmp, val);
7338 store_reg(s, 14, tmp);
9ee6e8bb
PB
7339 }
7340 offset = (((int32_t)insn << 8) >> 8);
7341 val += (offset << 2) + 4;
7342 gen_jmp(s, val);
7343 }
7344 break;
7345 case 0xc:
7346 case 0xd:
7347 case 0xe:
7348 /* Coprocessor. */
7349 if (disas_coproc_insn(env, s, insn))
7350 goto illegal_op;
7351 break;
7352 case 0xf:
7353 /* swi */
5e3f878a 7354 gen_set_pc_im(s->pc);
9ee6e8bb
PB
7355 s->is_jmp = DISAS_SWI;
7356 break;
7357 default:
7358 illegal_op:
bc4a0de0 7359 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
7360 break;
7361 }
7362 }
7363}
7364
7365/* Return true if this is a Thumb-2 logical op. */
7366static int
7367thumb2_logic_op(int op)
7368{
7369 return (op < 8);
7370}
7371
7372/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7373 then set condition code flags based on the result of the operation.
7374 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7375 to the high bit of T1.
7376 Returns zero if the opcode is valid. */
7377
7378static int
396e467c 7379gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
9ee6e8bb
PB
7380{
7381 int logic_cc;
7382
7383 logic_cc = 0;
7384 switch (op) {
7385 case 0: /* and */
396e467c 7386 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
7387 logic_cc = conds;
7388 break;
7389 case 1: /* bic */
f669df27 7390 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
7391 logic_cc = conds;
7392 break;
7393 case 2: /* orr */
396e467c 7394 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7395 logic_cc = conds;
7396 break;
7397 case 3: /* orn */
29501f1b 7398 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
7399 logic_cc = conds;
7400 break;
7401 case 4: /* eor */
396e467c 7402 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
7403 logic_cc = conds;
7404 break;
7405 case 8: /* add */
7406 if (conds)
396e467c 7407 gen_helper_add_cc(t0, t0, t1);
9ee6e8bb 7408 else
396e467c 7409 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
7410 break;
7411 case 10: /* adc */
7412 if (conds)
396e467c 7413 gen_helper_adc_cc(t0, t0, t1);
9ee6e8bb 7414 else
396e467c 7415 gen_adc(t0, t1);
9ee6e8bb
PB
7416 break;
7417 case 11: /* sbc */
7418 if (conds)
396e467c 7419 gen_helper_sbc_cc(t0, t0, t1);
9ee6e8bb 7420 else
396e467c 7421 gen_sub_carry(t0, t0, t1);
9ee6e8bb
PB
7422 break;
7423 case 13: /* sub */
7424 if (conds)
396e467c 7425 gen_helper_sub_cc(t0, t0, t1);
9ee6e8bb 7426 else
396e467c 7427 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
7428 break;
7429 case 14: /* rsb */
7430 if (conds)
396e467c 7431 gen_helper_sub_cc(t0, t1, t0);
9ee6e8bb 7432 else
396e467c 7433 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
7434 break;
7435 default: /* 5, 6, 7, 9, 12, 15. */
7436 return 1;
7437 }
7438 if (logic_cc) {
396e467c 7439 gen_logic_CC(t0);
9ee6e8bb 7440 if (shifter_out)
396e467c 7441 gen_set_CF_bit31(t1);
9ee6e8bb
PB
7442 }
7443 return 0;
7444}
7445
7446/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7447 is not legal. */
7448static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7449{
b0109805 7450 uint32_t insn, imm, shift, offset;
9ee6e8bb 7451 uint32_t rd, rn, rm, rs;
b26eefb6 7452 TCGv tmp;
6ddbc6e4
PB
7453 TCGv tmp2;
7454 TCGv tmp3;
b0109805 7455 TCGv addr;
a7812ae4 7456 TCGv_i64 tmp64;
9ee6e8bb
PB
7457 int op;
7458 int shiftop;
7459 int conds;
7460 int logic_cc;
7461
7462 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7463 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 7464 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
7465 16-bit instructions to get correct prefetch abort behavior. */
7466 insn = insn_hw1;
7467 if ((insn & (1 << 12)) == 0) {
7468 /* Second half of blx. */
7469 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
7470 tmp = load_reg(s, 14);
7471 tcg_gen_addi_i32(tmp, tmp, offset);
7472 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 7473
7d1b0095 7474 tmp2 = tcg_temp_new_i32();
b0109805 7475 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7476 store_reg(s, 14, tmp2);
7477 gen_bx(s, tmp);
9ee6e8bb
PB
7478 return 0;
7479 }
7480 if (insn & (1 << 11)) {
7481 /* Second half of bl. */
7482 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 7483 tmp = load_reg(s, 14);
6a0d8a1d 7484 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 7485
7d1b0095 7486 tmp2 = tcg_temp_new_i32();
b0109805 7487 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7488 store_reg(s, 14, tmp2);
7489 gen_bx(s, tmp);
9ee6e8bb
PB
7490 return 0;
7491 }
7492 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7493 /* Instruction spans a page boundary. Implement it as two
7494 16-bit instructions in case the second half causes an
7495 prefetch abort. */
7496 offset = ((int32_t)insn << 21) >> 9;
396e467c 7497 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
7498 return 0;
7499 }
7500 /* Fall through to 32-bit decode. */
7501 }
7502
7503 insn = lduw_code(s->pc);
7504 s->pc += 2;
7505 insn |= (uint32_t)insn_hw1 << 16;
7506
7507 if ((insn & 0xf800e800) != 0xf000e800) {
7508 ARCH(6T2);
7509 }
7510
7511 rn = (insn >> 16) & 0xf;
7512 rs = (insn >> 12) & 0xf;
7513 rd = (insn >> 8) & 0xf;
7514 rm = insn & 0xf;
7515 switch ((insn >> 25) & 0xf) {
7516 case 0: case 1: case 2: case 3:
7517 /* 16-bit instructions. Should never happen. */
7518 abort();
7519 case 4:
7520 if (insn & (1 << 22)) {
7521 /* Other load/store, table branch. */
7522 if (insn & 0x01200000) {
7523 /* Load/store doubleword. */
7524 if (rn == 15) {
7d1b0095 7525 addr = tcg_temp_new_i32();
b0109805 7526 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 7527 } else {
b0109805 7528 addr = load_reg(s, rn);
9ee6e8bb
PB
7529 }
7530 offset = (insn & 0xff) * 4;
7531 if ((insn & (1 << 23)) == 0)
7532 offset = -offset;
7533 if (insn & (1 << 24)) {
b0109805 7534 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
7535 offset = 0;
7536 }
7537 if (insn & (1 << 20)) {
7538 /* ldrd */
b0109805
PB
7539 tmp = gen_ld32(addr, IS_USER(s));
7540 store_reg(s, rs, tmp);
7541 tcg_gen_addi_i32(addr, addr, 4);
7542 tmp = gen_ld32(addr, IS_USER(s));
7543 store_reg(s, rd, tmp);
9ee6e8bb
PB
7544 } else {
7545 /* strd */
b0109805
PB
7546 tmp = load_reg(s, rs);
7547 gen_st32(tmp, addr, IS_USER(s));
7548 tcg_gen_addi_i32(addr, addr, 4);
7549 tmp = load_reg(s, rd);
7550 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7551 }
7552 if (insn & (1 << 21)) {
7553 /* Base writeback. */
7554 if (rn == 15)
7555 goto illegal_op;
b0109805
PB
7556 tcg_gen_addi_i32(addr, addr, offset - 4);
7557 store_reg(s, rn, addr);
7558 } else {
7d1b0095 7559 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7560 }
7561 } else if ((insn & (1 << 23)) == 0) {
7562 /* Load/store exclusive word. */
3174f8e9 7563 addr = tcg_temp_local_new();
98a46317 7564 load_reg_var(s, addr, rn);
426f5abc 7565 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 7566 if (insn & (1 << 20)) {
426f5abc 7567 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 7568 } else {
426f5abc 7569 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 7570 }
3174f8e9 7571 tcg_temp_free(addr);
9ee6e8bb
PB
7572 } else if ((insn & (1 << 6)) == 0) {
7573 /* Table Branch. */
7574 if (rn == 15) {
7d1b0095 7575 addr = tcg_temp_new_i32();
b0109805 7576 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 7577 } else {
b0109805 7578 addr = load_reg(s, rn);
9ee6e8bb 7579 }
b26eefb6 7580 tmp = load_reg(s, rm);
b0109805 7581 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
7582 if (insn & (1 << 4)) {
7583 /* tbh */
b0109805 7584 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 7585 tcg_temp_free_i32(tmp);
b0109805 7586 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 7587 } else { /* tbb */
7d1b0095 7588 tcg_temp_free_i32(tmp);
b0109805 7589 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7590 }
7d1b0095 7591 tcg_temp_free_i32(addr);
b0109805
PB
7592 tcg_gen_shli_i32(tmp, tmp, 1);
7593 tcg_gen_addi_i32(tmp, tmp, s->pc);
7594 store_reg(s, 15, tmp);
9ee6e8bb
PB
7595 } else {
7596 /* Load/store exclusive byte/halfword/doubleword. */
426f5abc 7597 ARCH(7);
9ee6e8bb 7598 op = (insn >> 4) & 0x3;
426f5abc
PB
7599 if (op == 2) {
7600 goto illegal_op;
7601 }
3174f8e9 7602 addr = tcg_temp_local_new();
98a46317 7603 load_reg_var(s, addr, rn);
9ee6e8bb 7604 if (insn & (1 << 20)) {
426f5abc 7605 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 7606 } else {
426f5abc 7607 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 7608 }
3174f8e9 7609 tcg_temp_free(addr);
9ee6e8bb
PB
7610 }
7611 } else {
7612 /* Load/store multiple, RFE, SRS. */
7613 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7614 /* Not available in user mode. */
b0109805 7615 if (IS_USER(s))
9ee6e8bb
PB
7616 goto illegal_op;
7617 if (insn & (1 << 20)) {
7618 /* rfe */
b0109805
PB
7619 addr = load_reg(s, rn);
7620 if ((insn & (1 << 24)) == 0)
7621 tcg_gen_addi_i32(addr, addr, -8);
7622 /* Load PC into tmp and CPSR into tmp2. */
7623 tmp = gen_ld32(addr, 0);
7624 tcg_gen_addi_i32(addr, addr, 4);
7625 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
7626 if (insn & (1 << 21)) {
7627 /* Base writeback. */
b0109805
PB
7628 if (insn & (1 << 24)) {
7629 tcg_gen_addi_i32(addr, addr, 4);
7630 } else {
7631 tcg_gen_addi_i32(addr, addr, -4);
7632 }
7633 store_reg(s, rn, addr);
7634 } else {
7d1b0095 7635 tcg_temp_free_i32(addr);
9ee6e8bb 7636 }
b0109805 7637 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
7638 } else {
7639 /* srs */
7640 op = (insn & 0x1f);
7d1b0095 7641 addr = tcg_temp_new_i32();
39ea3d4e
PM
7642 tmp = tcg_const_i32(op);
7643 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7644 tcg_temp_free_i32(tmp);
9ee6e8bb 7645 if ((insn & (1 << 24)) == 0) {
b0109805 7646 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 7647 }
b0109805
PB
7648 tmp = load_reg(s, 14);
7649 gen_st32(tmp, addr, 0);
7650 tcg_gen_addi_i32(addr, addr, 4);
7d1b0095 7651 tmp = tcg_temp_new_i32();
b0109805
PB
7652 gen_helper_cpsr_read(tmp);
7653 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
7654 if (insn & (1 << 21)) {
7655 if ((insn & (1 << 24)) == 0) {
b0109805 7656 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 7657 } else {
b0109805 7658 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 7659 }
39ea3d4e
PM
7660 tmp = tcg_const_i32(op);
7661 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7662 tcg_temp_free_i32(tmp);
b0109805 7663 } else {
7d1b0095 7664 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7665 }
7666 }
7667 } else {
7668 int i;
7669 /* Load/store multiple. */
b0109805 7670 addr = load_reg(s, rn);
9ee6e8bb
PB
7671 offset = 0;
7672 for (i = 0; i < 16; i++) {
7673 if (insn & (1 << i))
7674 offset += 4;
7675 }
7676 if (insn & (1 << 24)) {
b0109805 7677 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7678 }
7679
7680 for (i = 0; i < 16; i++) {
7681 if ((insn & (1 << i)) == 0)
7682 continue;
7683 if (insn & (1 << 20)) {
7684 /* Load. */
b0109805 7685 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 7686 if (i == 15) {
b0109805 7687 gen_bx(s, tmp);
9ee6e8bb 7688 } else {
b0109805 7689 store_reg(s, i, tmp);
9ee6e8bb
PB
7690 }
7691 } else {
7692 /* Store. */
b0109805
PB
7693 tmp = load_reg(s, i);
7694 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7695 }
b0109805 7696 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7697 }
7698 if (insn & (1 << 21)) {
7699 /* Base register writeback. */
7700 if (insn & (1 << 24)) {
b0109805 7701 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7702 }
7703 /* Fault if writeback register is in register list. */
7704 if (insn & (1 << rn))
7705 goto illegal_op;
b0109805
PB
7706 store_reg(s, rn, addr);
7707 } else {
7d1b0095 7708 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7709 }
7710 }
7711 }
7712 break;
2af9ab77
JB
7713 case 5:
7714
9ee6e8bb 7715 op = (insn >> 21) & 0xf;
2af9ab77
JB
7716 if (op == 6) {
7717 /* Halfword pack. */
7718 tmp = load_reg(s, rn);
7719 tmp2 = load_reg(s, rm);
7720 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
7721 if (insn & (1 << 5)) {
7722 /* pkhtb */
7723 if (shift == 0)
7724 shift = 31;
7725 tcg_gen_sari_i32(tmp2, tmp2, shift);
7726 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
7727 tcg_gen_ext16u_i32(tmp2, tmp2);
7728 } else {
7729 /* pkhbt */
7730 if (shift)
7731 tcg_gen_shli_i32(tmp2, tmp2, shift);
7732 tcg_gen_ext16u_i32(tmp, tmp);
7733 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7734 }
7735 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 7736 tcg_temp_free_i32(tmp2);
3174f8e9
FN
7737 store_reg(s, rd, tmp);
7738 } else {
2af9ab77
JB
7739 /* Data processing register constant shift. */
7740 if (rn == 15) {
7d1b0095 7741 tmp = tcg_temp_new_i32();
2af9ab77
JB
7742 tcg_gen_movi_i32(tmp, 0);
7743 } else {
7744 tmp = load_reg(s, rn);
7745 }
7746 tmp2 = load_reg(s, rm);
7747
7748 shiftop = (insn >> 4) & 3;
7749 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7750 conds = (insn & (1 << 20)) != 0;
7751 logic_cc = (conds && thumb2_logic_op(op));
7752 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7753 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
7754 goto illegal_op;
7d1b0095 7755 tcg_temp_free_i32(tmp2);
2af9ab77
JB
7756 if (rd != 15) {
7757 store_reg(s, rd, tmp);
7758 } else {
7d1b0095 7759 tcg_temp_free_i32(tmp);
2af9ab77 7760 }
3174f8e9 7761 }
9ee6e8bb
PB
7762 break;
7763 case 13: /* Misc data processing. */
7764 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7765 if (op < 4 && (insn & 0xf000) != 0xf000)
7766 goto illegal_op;
7767 switch (op) {
7768 case 0: /* Register controlled shift. */
8984bd2e
PB
7769 tmp = load_reg(s, rn);
7770 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7771 if ((insn & 0x70) != 0)
7772 goto illegal_op;
7773 op = (insn >> 21) & 3;
8984bd2e
PB
7774 logic_cc = (insn & (1 << 20)) != 0;
7775 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7776 if (logic_cc)
7777 gen_logic_CC(tmp);
21aeb343 7778 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7779 break;
7780 case 1: /* Sign/zero extend. */
5e3f878a 7781 tmp = load_reg(s, rm);
9ee6e8bb
PB
7782 shift = (insn >> 4) & 3;
7783 /* ??? In many cases it's not neccessary to do a
7784 rotate, a shift is sufficient. */
7785 if (shift != 0)
f669df27 7786 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7787 op = (insn >> 20) & 7;
7788 switch (op) {
5e3f878a
PB
7789 case 0: gen_sxth(tmp); break;
7790 case 1: gen_uxth(tmp); break;
7791 case 2: gen_sxtb16(tmp); break;
7792 case 3: gen_uxtb16(tmp); break;
7793 case 4: gen_sxtb(tmp); break;
7794 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
7795 default: goto illegal_op;
7796 }
7797 if (rn != 15) {
5e3f878a 7798 tmp2 = load_reg(s, rn);
9ee6e8bb 7799 if ((op >> 1) == 1) {
5e3f878a 7800 gen_add16(tmp, tmp2);
9ee6e8bb 7801 } else {
5e3f878a 7802 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7803 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7804 }
7805 }
5e3f878a 7806 store_reg(s, rd, tmp);
9ee6e8bb
PB
7807 break;
7808 case 2: /* SIMD add/subtract. */
7809 op = (insn >> 20) & 7;
7810 shift = (insn >> 4) & 7;
7811 if ((op & 3) == 3 || (shift & 3) == 3)
7812 goto illegal_op;
6ddbc6e4
PB
7813 tmp = load_reg(s, rn);
7814 tmp2 = load_reg(s, rm);
7815 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 7816 tcg_temp_free_i32(tmp2);
6ddbc6e4 7817 store_reg(s, rd, tmp);
9ee6e8bb
PB
7818 break;
7819 case 3: /* Other data processing. */
7820 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7821 if (op < 4) {
7822 /* Saturating add/subtract. */
d9ba4830
PB
7823 tmp = load_reg(s, rn);
7824 tmp2 = load_reg(s, rm);
9ee6e8bb 7825 if (op & 1)
4809c612
JB
7826 gen_helper_double_saturate(tmp, tmp);
7827 if (op & 2)
d9ba4830 7828 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 7829 else
d9ba4830 7830 gen_helper_add_saturate(tmp, tmp, tmp2);
7d1b0095 7831 tcg_temp_free_i32(tmp2);
9ee6e8bb 7832 } else {
d9ba4830 7833 tmp = load_reg(s, rn);
9ee6e8bb
PB
7834 switch (op) {
7835 case 0x0a: /* rbit */
d9ba4830 7836 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7837 break;
7838 case 0x08: /* rev */
66896cb8 7839 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
7840 break;
7841 case 0x09: /* rev16 */
d9ba4830 7842 gen_rev16(tmp);
9ee6e8bb
PB
7843 break;
7844 case 0x0b: /* revsh */
d9ba4830 7845 gen_revsh(tmp);
9ee6e8bb
PB
7846 break;
7847 case 0x10: /* sel */
d9ba4830 7848 tmp2 = load_reg(s, rm);
7d1b0095 7849 tmp3 = tcg_temp_new_i32();
6ddbc6e4 7850 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
d9ba4830 7851 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
7852 tcg_temp_free_i32(tmp3);
7853 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7854 break;
7855 case 0x18: /* clz */
d9ba4830 7856 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
7857 break;
7858 default:
7859 goto illegal_op;
7860 }
7861 }
d9ba4830 7862 store_reg(s, rd, tmp);
9ee6e8bb
PB
7863 break;
7864 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7865 op = (insn >> 4) & 0xf;
d9ba4830
PB
7866 tmp = load_reg(s, rn);
7867 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7868 switch ((insn >> 20) & 7) {
7869 case 0: /* 32 x 32 -> 32 */
d9ba4830 7870 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 7871 tcg_temp_free_i32(tmp2);
9ee6e8bb 7872 if (rs != 15) {
d9ba4830 7873 tmp2 = load_reg(s, rs);
9ee6e8bb 7874 if (op)
d9ba4830 7875 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 7876 else
d9ba4830 7877 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7878 tcg_temp_free_i32(tmp2);
9ee6e8bb 7879 }
9ee6e8bb
PB
7880 break;
7881 case 1: /* 16 x 16 -> 32 */
d9ba4830 7882 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 7883 tcg_temp_free_i32(tmp2);
9ee6e8bb 7884 if (rs != 15) {
d9ba4830
PB
7885 tmp2 = load_reg(s, rs);
7886 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 7887 tcg_temp_free_i32(tmp2);
9ee6e8bb 7888 }
9ee6e8bb
PB
7889 break;
7890 case 2: /* Dual multiply add. */
7891 case 4: /* Dual multiply subtract. */
7892 if (op)
d9ba4830
PB
7893 gen_swap_half(tmp2);
7894 gen_smul_dual(tmp, tmp2);
9ee6e8bb 7895 if (insn & (1 << 22)) {
e1d177b9 7896 /* This subtraction cannot overflow. */
d9ba4830 7897 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 7898 } else {
e1d177b9
PM
7899 /* This addition cannot overflow 32 bits;
7900 * however it may overflow considered as a signed
7901 * operation, in which case we must set the Q flag.
7902 */
7903 gen_helper_add_setq(tmp, tmp, tmp2);
9ee6e8bb 7904 }
7d1b0095 7905 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7906 if (rs != 15)
7907 {
d9ba4830
PB
7908 tmp2 = load_reg(s, rs);
7909 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 7910 tcg_temp_free_i32(tmp2);
9ee6e8bb 7911 }
9ee6e8bb
PB
7912 break;
7913 case 3: /* 32 * 16 -> 32msb */
7914 if (op)
d9ba4830 7915 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7916 else
d9ba4830 7917 gen_sxth(tmp2);
a7812ae4
PB
7918 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7919 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 7920 tmp = tcg_temp_new_i32();
a7812ae4 7921 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 7922 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7923 if (rs != 15)
7924 {
d9ba4830
PB
7925 tmp2 = load_reg(s, rs);
7926 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 7927 tcg_temp_free_i32(tmp2);
9ee6e8bb 7928 }
9ee6e8bb 7929 break;
838fa72d
AJ
7930 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
7931 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 7932 if (rs != 15) {
838fa72d
AJ
7933 tmp = load_reg(s, rs);
7934 if (insn & (1 << 20)) {
7935 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 7936 } else {
838fa72d 7937 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 7938 }
2c0262af 7939 }
838fa72d
AJ
7940 if (insn & (1 << 4)) {
7941 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7942 }
7943 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 7944 tmp = tcg_temp_new_i32();
838fa72d
AJ
7945 tcg_gen_trunc_i64_i32(tmp, tmp64);
7946 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7947 break;
7948 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 7949 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 7950 tcg_temp_free_i32(tmp2);
9ee6e8bb 7951 if (rs != 15) {
d9ba4830
PB
7952 tmp2 = load_reg(s, rs);
7953 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7954 tcg_temp_free_i32(tmp2);
5fd46862 7955 }
9ee6e8bb 7956 break;
2c0262af 7957 }
d9ba4830 7958 store_reg(s, rd, tmp);
2c0262af 7959 break;
9ee6e8bb
PB
7960 case 6: case 7: /* 64-bit multiply, Divide. */
7961 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
7962 tmp = load_reg(s, rn);
7963 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7964 if ((op & 0x50) == 0x10) {
7965 /* sdiv, udiv */
7966 if (!arm_feature(env, ARM_FEATURE_DIV))
7967 goto illegal_op;
7968 if (op & 0x20)
5e3f878a 7969 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 7970 else
5e3f878a 7971 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 7972 tcg_temp_free_i32(tmp2);
5e3f878a 7973 store_reg(s, rd, tmp);
9ee6e8bb
PB
7974 } else if ((op & 0xe) == 0xc) {
7975 /* Dual multiply accumulate long. */
7976 if (op & 1)
5e3f878a
PB
7977 gen_swap_half(tmp2);
7978 gen_smul_dual(tmp, tmp2);
9ee6e8bb 7979 if (op & 0x10) {
5e3f878a 7980 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 7981 } else {
5e3f878a 7982 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 7983 }
7d1b0095 7984 tcg_temp_free_i32(tmp2);
a7812ae4
PB
7985 /* BUGFIX */
7986 tmp64 = tcg_temp_new_i64();
7987 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7988 tcg_temp_free_i32(tmp);
a7812ae4
PB
7989 gen_addq(s, tmp64, rs, rd);
7990 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 7991 tcg_temp_free_i64(tmp64);
2c0262af 7992 } else {
9ee6e8bb
PB
7993 if (op & 0x20) {
7994 /* Unsigned 64-bit multiply */
a7812ae4 7995 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 7996 } else {
9ee6e8bb
PB
7997 if (op & 8) {
7998 /* smlalxy */
5e3f878a 7999 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8000 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8001 tmp64 = tcg_temp_new_i64();
8002 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8003 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8004 } else {
8005 /* Signed 64-bit multiply */
a7812ae4 8006 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8007 }
b5ff1b31 8008 }
9ee6e8bb
PB
8009 if (op & 4) {
8010 /* umaal */
a7812ae4
PB
8011 gen_addq_lo(s, tmp64, rs);
8012 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
8013 } else if (op & 0x40) {
8014 /* 64-bit accumulate. */
a7812ae4 8015 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 8016 }
a7812ae4 8017 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8018 tcg_temp_free_i64(tmp64);
5fd46862 8019 }
2c0262af 8020 break;
9ee6e8bb
PB
8021 }
8022 break;
8023 case 6: case 7: case 14: case 15:
8024 /* Coprocessor. */
8025 if (((insn >> 24) & 3) == 3) {
8026 /* Translate into the equivalent ARM encoding. */
f06053e3 8027 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9ee6e8bb
PB
8028 if (disas_neon_data_insn(env, s, insn))
8029 goto illegal_op;
8030 } else {
8031 if (insn & (1 << 28))
8032 goto illegal_op;
8033 if (disas_coproc_insn (env, s, insn))
8034 goto illegal_op;
8035 }
8036 break;
8037 case 8: case 9: case 10: case 11:
8038 if (insn & (1 << 15)) {
8039 /* Branches, misc control. */
8040 if (insn & 0x5000) {
8041 /* Unconditional branch. */
8042 /* signextend(hw1[10:0]) -> offset[:12]. */
8043 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8044 /* hw1[10:0] -> offset[11:1]. */
8045 offset |= (insn & 0x7ff) << 1;
8046 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8047 offset[24:22] already have the same value because of the
8048 sign extension above. */
8049 offset ^= ((~insn) & (1 << 13)) << 10;
8050 offset ^= ((~insn) & (1 << 11)) << 11;
8051
9ee6e8bb
PB
8052 if (insn & (1 << 14)) {
8053 /* Branch and link. */
3174f8e9 8054 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 8055 }
3b46e624 8056
b0109805 8057 offset += s->pc;
9ee6e8bb
PB
8058 if (insn & (1 << 12)) {
8059 /* b/bl */
b0109805 8060 gen_jmp(s, offset);
9ee6e8bb
PB
8061 } else {
8062 /* blx */
b0109805
PB
8063 offset &= ~(uint32_t)2;
8064 gen_bx_im(s, offset);
2c0262af 8065 }
9ee6e8bb
PB
8066 } else if (((insn >> 23) & 7) == 7) {
8067 /* Misc control */
8068 if (insn & (1 << 13))
8069 goto illegal_op;
8070
8071 if (insn & (1 << 26)) {
8072 /* Secure monitor call (v6Z) */
8073 goto illegal_op; /* not implemented. */
2c0262af 8074 } else {
9ee6e8bb
PB
8075 op = (insn >> 20) & 7;
8076 switch (op) {
8077 case 0: /* msr cpsr. */
8078 if (IS_M(env)) {
8984bd2e
PB
8079 tmp = load_reg(s, rn);
8080 addr = tcg_const_i32(insn & 0xff);
8081 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8082 tcg_temp_free_i32(addr);
7d1b0095 8083 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8084 gen_lookup_tb(s);
8085 break;
8086 }
8087 /* fall through */
8088 case 1: /* msr spsr. */
8089 if (IS_M(env))
8090 goto illegal_op;
2fbac54b
FN
8091 tmp = load_reg(s, rn);
8092 if (gen_set_psr(s,
9ee6e8bb 8093 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 8094 op == 1, tmp))
9ee6e8bb
PB
8095 goto illegal_op;
8096 break;
8097 case 2: /* cps, nop-hint. */
8098 if (((insn >> 8) & 7) == 0) {
8099 gen_nop_hint(s, insn & 0xff);
8100 }
8101 /* Implemented as NOP in user mode. */
8102 if (IS_USER(s))
8103 break;
8104 offset = 0;
8105 imm = 0;
8106 if (insn & (1 << 10)) {
8107 if (insn & (1 << 7))
8108 offset |= CPSR_A;
8109 if (insn & (1 << 6))
8110 offset |= CPSR_I;
8111 if (insn & (1 << 5))
8112 offset |= CPSR_F;
8113 if (insn & (1 << 9))
8114 imm = CPSR_A | CPSR_I | CPSR_F;
8115 }
8116 if (insn & (1 << 8)) {
8117 offset |= 0x1f;
8118 imm |= (insn & 0x1f);
8119 }
8120 if (offset) {
2fbac54b 8121 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
8122 }
8123 break;
8124 case 3: /* Special control operations. */
426f5abc 8125 ARCH(7);
9ee6e8bb
PB
8126 op = (insn >> 4) & 0xf;
8127 switch (op) {
8128 case 2: /* clrex */
426f5abc 8129 gen_clrex(s);
9ee6e8bb
PB
8130 break;
8131 case 4: /* dsb */
8132 case 5: /* dmb */
8133 case 6: /* isb */
8134 /* These execute as NOPs. */
9ee6e8bb
PB
8135 break;
8136 default:
8137 goto illegal_op;
8138 }
8139 break;
8140 case 4: /* bxj */
8141 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8142 tmp = load_reg(s, rn);
8143 gen_bx(s, tmp);
9ee6e8bb
PB
8144 break;
8145 case 5: /* Exception return. */
b8b45b68
RV
8146 if (IS_USER(s)) {
8147 goto illegal_op;
8148 }
8149 if (rn != 14 || rd != 15) {
8150 goto illegal_op;
8151 }
8152 tmp = load_reg(s, rn);
8153 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8154 gen_exception_return(s, tmp);
8155 break;
9ee6e8bb 8156 case 6: /* mrs cpsr. */
7d1b0095 8157 tmp = tcg_temp_new_i32();
9ee6e8bb 8158 if (IS_M(env)) {
8984bd2e
PB
8159 addr = tcg_const_i32(insn & 0xff);
8160 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 8161 tcg_temp_free_i32(addr);
9ee6e8bb 8162 } else {
8984bd2e 8163 gen_helper_cpsr_read(tmp);
9ee6e8bb 8164 }
8984bd2e 8165 store_reg(s, rd, tmp);
9ee6e8bb
PB
8166 break;
8167 case 7: /* mrs spsr. */
8168 /* Not accessible in user mode. */
8169 if (IS_USER(s) || IS_M(env))
8170 goto illegal_op;
d9ba4830
PB
8171 tmp = load_cpu_field(spsr);
8172 store_reg(s, rd, tmp);
9ee6e8bb 8173 break;
2c0262af
FB
8174 }
8175 }
9ee6e8bb
PB
8176 } else {
8177 /* Conditional branch. */
8178 op = (insn >> 22) & 0xf;
8179 /* Generate a conditional jump to next instruction. */
8180 s->condlabel = gen_new_label();
d9ba4830 8181 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
8182 s->condjmp = 1;
8183
8184 /* offset[11:1] = insn[10:0] */
8185 offset = (insn & 0x7ff) << 1;
8186 /* offset[17:12] = insn[21:16]. */
8187 offset |= (insn & 0x003f0000) >> 4;
8188 /* offset[31:20] = insn[26]. */
8189 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8190 /* offset[18] = insn[13]. */
8191 offset |= (insn & (1 << 13)) << 5;
8192 /* offset[19] = insn[11]. */
8193 offset |= (insn & (1 << 11)) << 8;
8194
8195 /* jump to the offset */
b0109805 8196 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
8197 }
8198 } else {
8199 /* Data processing immediate. */
8200 if (insn & (1 << 25)) {
8201 if (insn & (1 << 24)) {
8202 if (insn & (1 << 20))
8203 goto illegal_op;
8204 /* Bitfield/Saturate. */
8205 op = (insn >> 21) & 7;
8206 imm = insn & 0x1f;
8207 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 8208 if (rn == 15) {
7d1b0095 8209 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
8210 tcg_gen_movi_i32(tmp, 0);
8211 } else {
8212 tmp = load_reg(s, rn);
8213 }
9ee6e8bb
PB
8214 switch (op) {
8215 case 2: /* Signed bitfield extract. */
8216 imm++;
8217 if (shift + imm > 32)
8218 goto illegal_op;
8219 if (imm < 32)
6ddbc6e4 8220 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
8221 break;
8222 case 6: /* Unsigned bitfield extract. */
8223 imm++;
8224 if (shift + imm > 32)
8225 goto illegal_op;
8226 if (imm < 32)
6ddbc6e4 8227 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
8228 break;
8229 case 3: /* Bitfield insert/clear. */
8230 if (imm < shift)
8231 goto illegal_op;
8232 imm = imm + 1 - shift;
8233 if (imm != 32) {
6ddbc6e4 8234 tmp2 = load_reg(s, rd);
8f8e3aa4 8235 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
7d1b0095 8236 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8237 }
8238 break;
8239 case 7:
8240 goto illegal_op;
8241 default: /* Saturate. */
9ee6e8bb
PB
8242 if (shift) {
8243 if (op & 1)
6ddbc6e4 8244 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8245 else
6ddbc6e4 8246 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 8247 }
6ddbc6e4 8248 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
8249 if (op & 4) {
8250 /* Unsigned. */
9ee6e8bb 8251 if ((op & 1) && shift == 0)
6ddbc6e4 8252 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 8253 else
6ddbc6e4 8254 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 8255 } else {
9ee6e8bb 8256 /* Signed. */
9ee6e8bb 8257 if ((op & 1) && shift == 0)
6ddbc6e4 8258 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 8259 else
6ddbc6e4 8260 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 8261 }
b75263d6 8262 tcg_temp_free_i32(tmp2);
9ee6e8bb 8263 break;
2c0262af 8264 }
6ddbc6e4 8265 store_reg(s, rd, tmp);
9ee6e8bb
PB
8266 } else {
8267 imm = ((insn & 0x04000000) >> 15)
8268 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8269 if (insn & (1 << 22)) {
8270 /* 16-bit immediate. */
8271 imm |= (insn >> 4) & 0xf000;
8272 if (insn & (1 << 23)) {
8273 /* movt */
5e3f878a 8274 tmp = load_reg(s, rd);
86831435 8275 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8276 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 8277 } else {
9ee6e8bb 8278 /* movw */
7d1b0095 8279 tmp = tcg_temp_new_i32();
5e3f878a 8280 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
8281 }
8282 } else {
9ee6e8bb
PB
8283 /* Add/sub 12-bit immediate. */
8284 if (rn == 15) {
b0109805 8285 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 8286 if (insn & (1 << 23))
b0109805 8287 offset -= imm;
9ee6e8bb 8288 else
b0109805 8289 offset += imm;
7d1b0095 8290 tmp = tcg_temp_new_i32();
5e3f878a 8291 tcg_gen_movi_i32(tmp, offset);
2c0262af 8292 } else {
5e3f878a 8293 tmp = load_reg(s, rn);
9ee6e8bb 8294 if (insn & (1 << 23))
5e3f878a 8295 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 8296 else
5e3f878a 8297 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 8298 }
9ee6e8bb 8299 }
5e3f878a 8300 store_reg(s, rd, tmp);
191abaa2 8301 }
9ee6e8bb
PB
8302 } else {
8303 int shifter_out = 0;
8304 /* modified 12-bit immediate. */
8305 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8306 imm = (insn & 0xff);
8307 switch (shift) {
8308 case 0: /* XY */
8309 /* Nothing to do. */
8310 break;
8311 case 1: /* 00XY00XY */
8312 imm |= imm << 16;
8313 break;
8314 case 2: /* XY00XY00 */
8315 imm |= imm << 16;
8316 imm <<= 8;
8317 break;
8318 case 3: /* XYXYXYXY */
8319 imm |= imm << 16;
8320 imm |= imm << 8;
8321 break;
8322 default: /* Rotated constant. */
8323 shift = (shift << 1) | (imm >> 7);
8324 imm |= 0x80;
8325 imm = imm << (32 - shift);
8326 shifter_out = 1;
8327 break;
b5ff1b31 8328 }
7d1b0095 8329 tmp2 = tcg_temp_new_i32();
3174f8e9 8330 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 8331 rn = (insn >> 16) & 0xf;
3174f8e9 8332 if (rn == 15) {
7d1b0095 8333 tmp = tcg_temp_new_i32();
3174f8e9
FN
8334 tcg_gen_movi_i32(tmp, 0);
8335 } else {
8336 tmp = load_reg(s, rn);
8337 }
9ee6e8bb
PB
8338 op = (insn >> 21) & 0xf;
8339 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 8340 shifter_out, tmp, tmp2))
9ee6e8bb 8341 goto illegal_op;
7d1b0095 8342 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8343 rd = (insn >> 8) & 0xf;
8344 if (rd != 15) {
3174f8e9
FN
8345 store_reg(s, rd, tmp);
8346 } else {
7d1b0095 8347 tcg_temp_free_i32(tmp);
2c0262af 8348 }
2c0262af 8349 }
9ee6e8bb
PB
8350 }
8351 break;
8352 case 12: /* Load/store single data item. */
8353 {
8354 int postinc = 0;
8355 int writeback = 0;
b0109805 8356 int user;
9ee6e8bb
PB
8357 if ((insn & 0x01100000) == 0x01000000) {
8358 if (disas_neon_ls_insn(env, s, insn))
c1713132 8359 goto illegal_op;
9ee6e8bb
PB
8360 break;
8361 }
a2fdc890
PM
8362 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8363 if (rs == 15) {
8364 if (!(insn & (1 << 20))) {
8365 goto illegal_op;
8366 }
8367 if (op != 2) {
8368 /* Byte or halfword load space with dest == r15 : memory hints.
8369 * Catch them early so we don't emit pointless addressing code.
8370 * This space is a mix of:
8371 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8372 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8373 * cores)
8374 * unallocated hints, which must be treated as NOPs
8375 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8376 * which is easiest for the decoding logic
8377 * Some space which must UNDEF
8378 */
8379 int op1 = (insn >> 23) & 3;
8380 int op2 = (insn >> 6) & 0x3f;
8381 if (op & 2) {
8382 goto illegal_op;
8383 }
8384 if (rn == 15) {
8385 /* UNPREDICTABLE or unallocated hint */
8386 return 0;
8387 }
8388 if (op1 & 1) {
8389 return 0; /* PLD* or unallocated hint */
8390 }
8391 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
8392 return 0; /* PLD* or unallocated hint */
8393 }
8394 /* UNDEF space, or an UNPREDICTABLE */
8395 return 1;
8396 }
8397 }
b0109805 8398 user = IS_USER(s);
9ee6e8bb 8399 if (rn == 15) {
7d1b0095 8400 addr = tcg_temp_new_i32();
9ee6e8bb
PB
8401 /* PC relative. */
8402 /* s->pc has already been incremented by 4. */
8403 imm = s->pc & 0xfffffffc;
8404 if (insn & (1 << 23))
8405 imm += insn & 0xfff;
8406 else
8407 imm -= insn & 0xfff;
b0109805 8408 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 8409 } else {
b0109805 8410 addr = load_reg(s, rn);
9ee6e8bb
PB
8411 if (insn & (1 << 23)) {
8412 /* Positive offset. */
8413 imm = insn & 0xfff;
b0109805 8414 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 8415 } else {
9ee6e8bb 8416 imm = insn & 0xff;
2a0308c5
PM
8417 switch ((insn >> 8) & 0xf) {
8418 case 0x0: /* Shifted Register. */
9ee6e8bb 8419 shift = (insn >> 4) & 0xf;
2a0308c5
PM
8420 if (shift > 3) {
8421 tcg_temp_free_i32(addr);
18c9b560 8422 goto illegal_op;
2a0308c5 8423 }
b26eefb6 8424 tmp = load_reg(s, rm);
9ee6e8bb 8425 if (shift)
b26eefb6 8426 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 8427 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8428 tcg_temp_free_i32(tmp);
9ee6e8bb 8429 break;
2a0308c5 8430 case 0xc: /* Negative offset. */
b0109805 8431 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 8432 break;
2a0308c5 8433 case 0xe: /* User privilege. */
b0109805
PB
8434 tcg_gen_addi_i32(addr, addr, imm);
8435 user = 1;
9ee6e8bb 8436 break;
2a0308c5 8437 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
8438 imm = -imm;
8439 /* Fall through. */
2a0308c5 8440 case 0xb: /* Post-increment. */
9ee6e8bb
PB
8441 postinc = 1;
8442 writeback = 1;
8443 break;
2a0308c5 8444 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
8445 imm = -imm;
8446 /* Fall through. */
2a0308c5 8447 case 0xf: /* Pre-increment. */
b0109805 8448 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8449 writeback = 1;
8450 break;
8451 default:
2a0308c5 8452 tcg_temp_free_i32(addr);
b7bcbe95 8453 goto illegal_op;
9ee6e8bb
PB
8454 }
8455 }
8456 }
9ee6e8bb
PB
8457 if (insn & (1 << 20)) {
8458 /* Load. */
a2fdc890
PM
8459 switch (op) {
8460 case 0: tmp = gen_ld8u(addr, user); break;
8461 case 4: tmp = gen_ld8s(addr, user); break;
8462 case 1: tmp = gen_ld16u(addr, user); break;
8463 case 5: tmp = gen_ld16s(addr, user); break;
8464 case 2: tmp = gen_ld32(addr, user); break;
2a0308c5
PM
8465 default:
8466 tcg_temp_free_i32(addr);
8467 goto illegal_op;
a2fdc890
PM
8468 }
8469 if (rs == 15) {
8470 gen_bx(s, tmp);
9ee6e8bb 8471 } else {
a2fdc890 8472 store_reg(s, rs, tmp);
9ee6e8bb
PB
8473 }
8474 } else {
8475 /* Store. */
b0109805 8476 tmp = load_reg(s, rs);
9ee6e8bb 8477 switch (op) {
b0109805
PB
8478 case 0: gen_st8(tmp, addr, user); break;
8479 case 1: gen_st16(tmp, addr, user); break;
8480 case 2: gen_st32(tmp, addr, user); break;
2a0308c5
PM
8481 default:
8482 tcg_temp_free_i32(addr);
8483 goto illegal_op;
b7bcbe95 8484 }
2c0262af 8485 }
9ee6e8bb 8486 if (postinc)
b0109805
PB
8487 tcg_gen_addi_i32(addr, addr, imm);
8488 if (writeback) {
8489 store_reg(s, rn, addr);
8490 } else {
7d1b0095 8491 tcg_temp_free_i32(addr);
b0109805 8492 }
9ee6e8bb
PB
8493 }
8494 break;
8495 default:
8496 goto illegal_op;
2c0262af 8497 }
9ee6e8bb
PB
8498 return 0;
8499illegal_op:
8500 return 1;
2c0262af
FB
8501}
8502
9ee6e8bb 8503static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
8504{
8505 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8506 int32_t offset;
8507 int i;
b26eefb6 8508 TCGv tmp;
d9ba4830 8509 TCGv tmp2;
b0109805 8510 TCGv addr;
99c475ab 8511
9ee6e8bb
PB
8512 if (s->condexec_mask) {
8513 cond = s->condexec_cond;
bedd2912
JB
8514 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
8515 s->condlabel = gen_new_label();
8516 gen_test_cc(cond ^ 1, s->condlabel);
8517 s->condjmp = 1;
8518 }
9ee6e8bb
PB
8519 }
8520
b5ff1b31 8521 insn = lduw_code(s->pc);
99c475ab 8522 s->pc += 2;
b5ff1b31 8523
99c475ab
FB
8524 switch (insn >> 12) {
8525 case 0: case 1:
396e467c 8526
99c475ab
FB
8527 rd = insn & 7;
8528 op = (insn >> 11) & 3;
8529 if (op == 3) {
8530 /* add/subtract */
8531 rn = (insn >> 3) & 7;
396e467c 8532 tmp = load_reg(s, rn);
99c475ab
FB
8533 if (insn & (1 << 10)) {
8534 /* immediate */
7d1b0095 8535 tmp2 = tcg_temp_new_i32();
396e467c 8536 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
8537 } else {
8538 /* reg */
8539 rm = (insn >> 6) & 7;
396e467c 8540 tmp2 = load_reg(s, rm);
99c475ab 8541 }
9ee6e8bb
PB
8542 if (insn & (1 << 9)) {
8543 if (s->condexec_mask)
396e467c 8544 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8545 else
396e467c 8546 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb
PB
8547 } else {
8548 if (s->condexec_mask)
396e467c 8549 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 8550 else
396e467c 8551 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 8552 }
7d1b0095 8553 tcg_temp_free_i32(tmp2);
396e467c 8554 store_reg(s, rd, tmp);
99c475ab
FB
8555 } else {
8556 /* shift immediate */
8557 rm = (insn >> 3) & 7;
8558 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
8559 tmp = load_reg(s, rm);
8560 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8561 if (!s->condexec_mask)
8562 gen_logic_CC(tmp);
8563 store_reg(s, rd, tmp);
99c475ab
FB
8564 }
8565 break;
8566 case 2: case 3:
8567 /* arithmetic large immediate */
8568 op = (insn >> 11) & 3;
8569 rd = (insn >> 8) & 0x7;
396e467c 8570 if (op == 0) { /* mov */
7d1b0095 8571 tmp = tcg_temp_new_i32();
396e467c 8572 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 8573 if (!s->condexec_mask)
396e467c
FN
8574 gen_logic_CC(tmp);
8575 store_reg(s, rd, tmp);
8576 } else {
8577 tmp = load_reg(s, rd);
7d1b0095 8578 tmp2 = tcg_temp_new_i32();
396e467c
FN
8579 tcg_gen_movi_i32(tmp2, insn & 0xff);
8580 switch (op) {
8581 case 1: /* cmp */
8582 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095
PM
8583 tcg_temp_free_i32(tmp);
8584 tcg_temp_free_i32(tmp2);
396e467c
FN
8585 break;
8586 case 2: /* add */
8587 if (s->condexec_mask)
8588 tcg_gen_add_i32(tmp, tmp, tmp2);
8589 else
8590 gen_helper_add_cc(tmp, tmp, tmp2);
7d1b0095 8591 tcg_temp_free_i32(tmp2);
396e467c
FN
8592 store_reg(s, rd, tmp);
8593 break;
8594 case 3: /* sub */
8595 if (s->condexec_mask)
8596 tcg_gen_sub_i32(tmp, tmp, tmp2);
8597 else
8598 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095 8599 tcg_temp_free_i32(tmp2);
396e467c
FN
8600 store_reg(s, rd, tmp);
8601 break;
8602 }
99c475ab 8603 }
99c475ab
FB
8604 break;
8605 case 4:
8606 if (insn & (1 << 11)) {
8607 rd = (insn >> 8) & 7;
5899f386
FB
8608 /* load pc-relative. Bit 1 of PC is ignored. */
8609 val = s->pc + 2 + ((insn & 0xff) * 4);
8610 val &= ~(uint32_t)2;
7d1b0095 8611 addr = tcg_temp_new_i32();
b0109805
PB
8612 tcg_gen_movi_i32(addr, val);
8613 tmp = gen_ld32(addr, IS_USER(s));
7d1b0095 8614 tcg_temp_free_i32(addr);
b0109805 8615 store_reg(s, rd, tmp);
99c475ab
FB
8616 break;
8617 }
8618 if (insn & (1 << 10)) {
8619 /* data processing extended or blx */
8620 rd = (insn & 7) | ((insn >> 4) & 8);
8621 rm = (insn >> 3) & 0xf;
8622 op = (insn >> 8) & 3;
8623 switch (op) {
8624 case 0: /* add */
396e467c
FN
8625 tmp = load_reg(s, rd);
8626 tmp2 = load_reg(s, rm);
8627 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8628 tcg_temp_free_i32(tmp2);
396e467c 8629 store_reg(s, rd, tmp);
99c475ab
FB
8630 break;
8631 case 1: /* cmp */
396e467c
FN
8632 tmp = load_reg(s, rd);
8633 tmp2 = load_reg(s, rm);
8634 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095
PM
8635 tcg_temp_free_i32(tmp2);
8636 tcg_temp_free_i32(tmp);
99c475ab
FB
8637 break;
8638 case 2: /* mov/cpy */
396e467c
FN
8639 tmp = load_reg(s, rm);
8640 store_reg(s, rd, tmp);
99c475ab
FB
8641 break;
8642 case 3:/* branch [and link] exchange thumb register */
b0109805 8643 tmp = load_reg(s, rm);
99c475ab
FB
8644 if (insn & (1 << 7)) {
8645 val = (uint32_t)s->pc | 1;
7d1b0095 8646 tmp2 = tcg_temp_new_i32();
b0109805
PB
8647 tcg_gen_movi_i32(tmp2, val);
8648 store_reg(s, 14, tmp2);
99c475ab 8649 }
d9ba4830 8650 gen_bx(s, tmp);
99c475ab
FB
8651 break;
8652 }
8653 break;
8654 }
8655
8656 /* data processing register */
8657 rd = insn & 7;
8658 rm = (insn >> 3) & 7;
8659 op = (insn >> 6) & 0xf;
8660 if (op == 2 || op == 3 || op == 4 || op == 7) {
8661 /* the shift/rotate ops want the operands backwards */
8662 val = rm;
8663 rm = rd;
8664 rd = val;
8665 val = 1;
8666 } else {
8667 val = 0;
8668 }
8669
396e467c 8670 if (op == 9) { /* neg */
7d1b0095 8671 tmp = tcg_temp_new_i32();
396e467c
FN
8672 tcg_gen_movi_i32(tmp, 0);
8673 } else if (op != 0xf) { /* mvn doesn't read its first operand */
8674 tmp = load_reg(s, rd);
8675 } else {
8676 TCGV_UNUSED(tmp);
8677 }
99c475ab 8678
396e467c 8679 tmp2 = load_reg(s, rm);
5899f386 8680 switch (op) {
99c475ab 8681 case 0x0: /* and */
396e467c 8682 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 8683 if (!s->condexec_mask)
396e467c 8684 gen_logic_CC(tmp);
99c475ab
FB
8685 break;
8686 case 0x1: /* eor */
396e467c 8687 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 8688 if (!s->condexec_mask)
396e467c 8689 gen_logic_CC(tmp);
99c475ab
FB
8690 break;
8691 case 0x2: /* lsl */
9ee6e8bb 8692 if (s->condexec_mask) {
396e467c 8693 gen_helper_shl(tmp2, tmp2, tmp);
9ee6e8bb 8694 } else {
396e467c
FN
8695 gen_helper_shl_cc(tmp2, tmp2, tmp);
8696 gen_logic_CC(tmp2);
9ee6e8bb 8697 }
99c475ab
FB
8698 break;
8699 case 0x3: /* lsr */
9ee6e8bb 8700 if (s->condexec_mask) {
396e467c 8701 gen_helper_shr(tmp2, tmp2, tmp);
9ee6e8bb 8702 } else {
396e467c
FN
8703 gen_helper_shr_cc(tmp2, tmp2, tmp);
8704 gen_logic_CC(tmp2);
9ee6e8bb 8705 }
99c475ab
FB
8706 break;
8707 case 0x4: /* asr */
9ee6e8bb 8708 if (s->condexec_mask) {
396e467c 8709 gen_helper_sar(tmp2, tmp2, tmp);
9ee6e8bb 8710 } else {
396e467c
FN
8711 gen_helper_sar_cc(tmp2, tmp2, tmp);
8712 gen_logic_CC(tmp2);
9ee6e8bb 8713 }
99c475ab
FB
8714 break;
8715 case 0x5: /* adc */
9ee6e8bb 8716 if (s->condexec_mask)
396e467c 8717 gen_adc(tmp, tmp2);
9ee6e8bb 8718 else
396e467c 8719 gen_helper_adc_cc(tmp, tmp, tmp2);
99c475ab
FB
8720 break;
8721 case 0x6: /* sbc */
9ee6e8bb 8722 if (s->condexec_mask)
396e467c 8723 gen_sub_carry(tmp, tmp, tmp2);
9ee6e8bb 8724 else
396e467c 8725 gen_helper_sbc_cc(tmp, tmp, tmp2);
99c475ab
FB
8726 break;
8727 case 0x7: /* ror */
9ee6e8bb 8728 if (s->condexec_mask) {
f669df27
AJ
8729 tcg_gen_andi_i32(tmp, tmp, 0x1f);
8730 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 8731 } else {
396e467c
FN
8732 gen_helper_ror_cc(tmp2, tmp2, tmp);
8733 gen_logic_CC(tmp2);
9ee6e8bb 8734 }
99c475ab
FB
8735 break;
8736 case 0x8: /* tst */
396e467c
FN
8737 tcg_gen_and_i32(tmp, tmp, tmp2);
8738 gen_logic_CC(tmp);
99c475ab 8739 rd = 16;
5899f386 8740 break;
99c475ab 8741 case 0x9: /* neg */
9ee6e8bb 8742 if (s->condexec_mask)
396e467c 8743 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 8744 else
396e467c 8745 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
8746 break;
8747 case 0xa: /* cmp */
396e467c 8748 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
8749 rd = 16;
8750 break;
8751 case 0xb: /* cmn */
396e467c 8752 gen_helper_add_cc(tmp, tmp, tmp2);
99c475ab
FB
8753 rd = 16;
8754 break;
8755 case 0xc: /* orr */
396e467c 8756 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 8757 if (!s->condexec_mask)
396e467c 8758 gen_logic_CC(tmp);
99c475ab
FB
8759 break;
8760 case 0xd: /* mul */
7b2919a0 8761 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 8762 if (!s->condexec_mask)
396e467c 8763 gen_logic_CC(tmp);
99c475ab
FB
8764 break;
8765 case 0xe: /* bic */
f669df27 8766 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 8767 if (!s->condexec_mask)
396e467c 8768 gen_logic_CC(tmp);
99c475ab
FB
8769 break;
8770 case 0xf: /* mvn */
396e467c 8771 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 8772 if (!s->condexec_mask)
396e467c 8773 gen_logic_CC(tmp2);
99c475ab 8774 val = 1;
5899f386 8775 rm = rd;
99c475ab
FB
8776 break;
8777 }
8778 if (rd != 16) {
396e467c
FN
8779 if (val) {
8780 store_reg(s, rm, tmp2);
8781 if (op != 0xf)
7d1b0095 8782 tcg_temp_free_i32(tmp);
396e467c
FN
8783 } else {
8784 store_reg(s, rd, tmp);
7d1b0095 8785 tcg_temp_free_i32(tmp2);
396e467c
FN
8786 }
8787 } else {
7d1b0095
PM
8788 tcg_temp_free_i32(tmp);
8789 tcg_temp_free_i32(tmp2);
99c475ab
FB
8790 }
8791 break;
8792
8793 case 5:
8794 /* load/store register offset. */
8795 rd = insn & 7;
8796 rn = (insn >> 3) & 7;
8797 rm = (insn >> 6) & 7;
8798 op = (insn >> 9) & 7;
b0109805 8799 addr = load_reg(s, rn);
b26eefb6 8800 tmp = load_reg(s, rm);
b0109805 8801 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8802 tcg_temp_free_i32(tmp);
99c475ab
FB
8803
8804 if (op < 3) /* store */
b0109805 8805 tmp = load_reg(s, rd);
99c475ab
FB
8806
8807 switch (op) {
8808 case 0: /* str */
b0109805 8809 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
8810 break;
8811 case 1: /* strh */
b0109805 8812 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
8813 break;
8814 case 2: /* strb */
b0109805 8815 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
8816 break;
8817 case 3: /* ldrsb */
b0109805 8818 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
8819 break;
8820 case 4: /* ldr */
b0109805 8821 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8822 break;
8823 case 5: /* ldrh */
b0109805 8824 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
8825 break;
8826 case 6: /* ldrb */
b0109805 8827 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
8828 break;
8829 case 7: /* ldrsh */
b0109805 8830 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
8831 break;
8832 }
8833 if (op >= 3) /* load */
b0109805 8834 store_reg(s, rd, tmp);
7d1b0095 8835 tcg_temp_free_i32(addr);
99c475ab
FB
8836 break;
8837
8838 case 6:
8839 /* load/store word immediate offset */
8840 rd = insn & 7;
8841 rn = (insn >> 3) & 7;
b0109805 8842 addr = load_reg(s, rn);
99c475ab 8843 val = (insn >> 4) & 0x7c;
b0109805 8844 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8845
8846 if (insn & (1 << 11)) {
8847 /* load */
b0109805
PB
8848 tmp = gen_ld32(addr, IS_USER(s));
8849 store_reg(s, rd, tmp);
99c475ab
FB
8850 } else {
8851 /* store */
b0109805
PB
8852 tmp = load_reg(s, rd);
8853 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8854 }
7d1b0095 8855 tcg_temp_free_i32(addr);
99c475ab
FB
8856 break;
8857
8858 case 7:
8859 /* load/store byte immediate offset */
8860 rd = insn & 7;
8861 rn = (insn >> 3) & 7;
b0109805 8862 addr = load_reg(s, rn);
99c475ab 8863 val = (insn >> 6) & 0x1f;
b0109805 8864 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8865
8866 if (insn & (1 << 11)) {
8867 /* load */
b0109805
PB
8868 tmp = gen_ld8u(addr, IS_USER(s));
8869 store_reg(s, rd, tmp);
99c475ab
FB
8870 } else {
8871 /* store */
b0109805
PB
8872 tmp = load_reg(s, rd);
8873 gen_st8(tmp, addr, IS_USER(s));
99c475ab 8874 }
7d1b0095 8875 tcg_temp_free_i32(addr);
99c475ab
FB
8876 break;
8877
8878 case 8:
8879 /* load/store halfword immediate offset */
8880 rd = insn & 7;
8881 rn = (insn >> 3) & 7;
b0109805 8882 addr = load_reg(s, rn);
99c475ab 8883 val = (insn >> 5) & 0x3e;
b0109805 8884 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8885
8886 if (insn & (1 << 11)) {
8887 /* load */
b0109805
PB
8888 tmp = gen_ld16u(addr, IS_USER(s));
8889 store_reg(s, rd, tmp);
99c475ab
FB
8890 } else {
8891 /* store */
b0109805
PB
8892 tmp = load_reg(s, rd);
8893 gen_st16(tmp, addr, IS_USER(s));
99c475ab 8894 }
7d1b0095 8895 tcg_temp_free_i32(addr);
99c475ab
FB
8896 break;
8897
8898 case 9:
8899 /* load/store from stack */
8900 rd = (insn >> 8) & 7;
b0109805 8901 addr = load_reg(s, 13);
99c475ab 8902 val = (insn & 0xff) * 4;
b0109805 8903 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8904
8905 if (insn & (1 << 11)) {
8906 /* load */
b0109805
PB
8907 tmp = gen_ld32(addr, IS_USER(s));
8908 store_reg(s, rd, tmp);
99c475ab
FB
8909 } else {
8910 /* store */
b0109805
PB
8911 tmp = load_reg(s, rd);
8912 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8913 }
7d1b0095 8914 tcg_temp_free_i32(addr);
99c475ab
FB
8915 break;
8916
8917 case 10:
8918 /* add to high reg */
8919 rd = (insn >> 8) & 7;
5899f386
FB
8920 if (insn & (1 << 11)) {
8921 /* SP */
5e3f878a 8922 tmp = load_reg(s, 13);
5899f386
FB
8923 } else {
8924 /* PC. bit 1 is ignored. */
7d1b0095 8925 tmp = tcg_temp_new_i32();
5e3f878a 8926 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 8927 }
99c475ab 8928 val = (insn & 0xff) * 4;
5e3f878a
PB
8929 tcg_gen_addi_i32(tmp, tmp, val);
8930 store_reg(s, rd, tmp);
99c475ab
FB
8931 break;
8932
8933 case 11:
8934 /* misc */
8935 op = (insn >> 8) & 0xf;
8936 switch (op) {
8937 case 0:
8938 /* adjust stack pointer */
b26eefb6 8939 tmp = load_reg(s, 13);
99c475ab
FB
8940 val = (insn & 0x7f) * 4;
8941 if (insn & (1 << 7))
6a0d8a1d 8942 val = -(int32_t)val;
b26eefb6
PB
8943 tcg_gen_addi_i32(tmp, tmp, val);
8944 store_reg(s, 13, tmp);
99c475ab
FB
8945 break;
8946
9ee6e8bb
PB
8947 case 2: /* sign/zero extend. */
8948 ARCH(6);
8949 rd = insn & 7;
8950 rm = (insn >> 3) & 7;
b0109805 8951 tmp = load_reg(s, rm);
9ee6e8bb 8952 switch ((insn >> 6) & 3) {
b0109805
PB
8953 case 0: gen_sxth(tmp); break;
8954 case 1: gen_sxtb(tmp); break;
8955 case 2: gen_uxth(tmp); break;
8956 case 3: gen_uxtb(tmp); break;
9ee6e8bb 8957 }
b0109805 8958 store_reg(s, rd, tmp);
9ee6e8bb 8959 break;
99c475ab
FB
8960 case 4: case 5: case 0xc: case 0xd:
8961 /* push/pop */
b0109805 8962 addr = load_reg(s, 13);
5899f386
FB
8963 if (insn & (1 << 8))
8964 offset = 4;
99c475ab 8965 else
5899f386
FB
8966 offset = 0;
8967 for (i = 0; i < 8; i++) {
8968 if (insn & (1 << i))
8969 offset += 4;
8970 }
8971 if ((insn & (1 << 11)) == 0) {
b0109805 8972 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8973 }
99c475ab
FB
8974 for (i = 0; i < 8; i++) {
8975 if (insn & (1 << i)) {
8976 if (insn & (1 << 11)) {
8977 /* pop */
b0109805
PB
8978 tmp = gen_ld32(addr, IS_USER(s));
8979 store_reg(s, i, tmp);
99c475ab
FB
8980 } else {
8981 /* push */
b0109805
PB
8982 tmp = load_reg(s, i);
8983 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8984 }
5899f386 8985 /* advance to the next address. */
b0109805 8986 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8987 }
8988 }
a50f5b91 8989 TCGV_UNUSED(tmp);
99c475ab
FB
8990 if (insn & (1 << 8)) {
8991 if (insn & (1 << 11)) {
8992 /* pop pc */
b0109805 8993 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8994 /* don't set the pc until the rest of the instruction
8995 has completed */
8996 } else {
8997 /* push lr */
b0109805
PB
8998 tmp = load_reg(s, 14);
8999 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9000 }
b0109805 9001 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 9002 }
5899f386 9003 if ((insn & (1 << 11)) == 0) {
b0109805 9004 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9005 }
99c475ab 9006 /* write back the new stack pointer */
b0109805 9007 store_reg(s, 13, addr);
99c475ab
FB
9008 /* set the new PC value */
9009 if ((insn & 0x0900) == 0x0900)
b0109805 9010 gen_bx(s, tmp);
99c475ab
FB
9011 break;
9012
9ee6e8bb
PB
9013 case 1: case 3: case 9: case 11: /* czb */
9014 rm = insn & 7;
d9ba4830 9015 tmp = load_reg(s, rm);
9ee6e8bb
PB
9016 s->condlabel = gen_new_label();
9017 s->condjmp = 1;
9018 if (insn & (1 << 11))
cb63669a 9019 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 9020 else
cb63669a 9021 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 9022 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9023 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9024 val = (uint32_t)s->pc + 2;
9025 val += offset;
9026 gen_jmp(s, val);
9027 break;
9028
9029 case 15: /* IT, nop-hint. */
9030 if ((insn & 0xf) == 0) {
9031 gen_nop_hint(s, (insn >> 4) & 0xf);
9032 break;
9033 }
9034 /* If Then. */
9035 s->condexec_cond = (insn >> 4) & 0xe;
9036 s->condexec_mask = insn & 0x1f;
9037 /* No actual code generated for this insn, just setup state. */
9038 break;
9039
06c949e6 9040 case 0xe: /* bkpt */
bc4a0de0 9041 gen_exception_insn(s, 2, EXCP_BKPT);
06c949e6
PB
9042 break;
9043
9ee6e8bb
PB
9044 case 0xa: /* rev */
9045 ARCH(6);
9046 rn = (insn >> 3) & 0x7;
9047 rd = insn & 0x7;
b0109805 9048 tmp = load_reg(s, rn);
9ee6e8bb 9049 switch ((insn >> 6) & 3) {
66896cb8 9050 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
9051 case 1: gen_rev16(tmp); break;
9052 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
9053 default: goto illegal_op;
9054 }
b0109805 9055 store_reg(s, rd, tmp);
9ee6e8bb
PB
9056 break;
9057
9058 case 6: /* cps */
9059 ARCH(6);
9060 if (IS_USER(s))
9061 break;
9062 if (IS_M(env)) {
8984bd2e 9063 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9ee6e8bb 9064 /* PRIMASK */
8984bd2e
PB
9065 if (insn & 1) {
9066 addr = tcg_const_i32(16);
9067 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 9068 tcg_temp_free_i32(addr);
8984bd2e 9069 }
9ee6e8bb 9070 /* FAULTMASK */
8984bd2e
PB
9071 if (insn & 2) {
9072 addr = tcg_const_i32(17);
9073 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 9074 tcg_temp_free_i32(addr);
8984bd2e 9075 }
b75263d6 9076 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9077 gen_lookup_tb(s);
9078 } else {
9079 if (insn & (1 << 4))
9080 shift = CPSR_A | CPSR_I | CPSR_F;
9081 else
9082 shift = 0;
fa26df03 9083 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9ee6e8bb
PB
9084 }
9085 break;
9086
99c475ab
FB
9087 default:
9088 goto undef;
9089 }
9090 break;
9091
9092 case 12:
9093 /* load/store multiple */
9094 rn = (insn >> 8) & 0x7;
b0109805 9095 addr = load_reg(s, rn);
99c475ab
FB
9096 for (i = 0; i < 8; i++) {
9097 if (insn & (1 << i)) {
99c475ab
FB
9098 if (insn & (1 << 11)) {
9099 /* load */
b0109805
PB
9100 tmp = gen_ld32(addr, IS_USER(s));
9101 store_reg(s, i, tmp);
99c475ab
FB
9102 } else {
9103 /* store */
b0109805
PB
9104 tmp = load_reg(s, i);
9105 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9106 }
5899f386 9107 /* advance to the next address */
b0109805 9108 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9109 }
9110 }
5899f386 9111 /* Base register writeback. */
b0109805
PB
9112 if ((insn & (1 << rn)) == 0) {
9113 store_reg(s, rn, addr);
9114 } else {
7d1b0095 9115 tcg_temp_free_i32(addr);
b0109805 9116 }
99c475ab
FB
9117 break;
9118
9119 case 13:
9120 /* conditional branch or swi */
9121 cond = (insn >> 8) & 0xf;
9122 if (cond == 0xe)
9123 goto undef;
9124
9125 if (cond == 0xf) {
9126 /* swi */
422ebf69 9127 gen_set_pc_im(s->pc);
9ee6e8bb 9128 s->is_jmp = DISAS_SWI;
99c475ab
FB
9129 break;
9130 }
9131 /* generate a conditional jump to next instruction */
e50e6a20 9132 s->condlabel = gen_new_label();
d9ba4830 9133 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 9134 s->condjmp = 1;
99c475ab
FB
9135
9136 /* jump to the offset */
5899f386 9137 val = (uint32_t)s->pc + 2;
99c475ab 9138 offset = ((int32_t)insn << 24) >> 24;
5899f386 9139 val += offset << 1;
8aaca4c0 9140 gen_jmp(s, val);
99c475ab
FB
9141 break;
9142
9143 case 14:
358bf29e 9144 if (insn & (1 << 11)) {
9ee6e8bb
PB
9145 if (disas_thumb2_insn(env, s, insn))
9146 goto undef32;
358bf29e
PB
9147 break;
9148 }
9ee6e8bb 9149 /* unconditional branch */
99c475ab
FB
9150 val = (uint32_t)s->pc;
9151 offset = ((int32_t)insn << 21) >> 21;
9152 val += (offset << 1) + 2;
8aaca4c0 9153 gen_jmp(s, val);
99c475ab
FB
9154 break;
9155
9156 case 15:
9ee6e8bb 9157 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 9158 goto undef32;
9ee6e8bb 9159 break;
99c475ab
FB
9160 }
9161 return;
9ee6e8bb 9162undef32:
bc4a0de0 9163 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
9164 return;
9165illegal_op:
99c475ab 9166undef:
bc4a0de0 9167 gen_exception_insn(s, 2, EXCP_UDEF);
99c475ab
FB
9168}
9169
2c0262af
FB
9170/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9171 basic block 'tb'. If search_pc is TRUE, also generate PC
9172 information for each intermediate instruction. */
2cfc5f17
TS
9173static inline void gen_intermediate_code_internal(CPUState *env,
9174 TranslationBlock *tb,
9175 int search_pc)
2c0262af
FB
9176{
9177 DisasContext dc1, *dc = &dc1;
a1d1bb31 9178 CPUBreakpoint *bp;
2c0262af
FB
9179 uint16_t *gen_opc_end;
9180 int j, lj;
0fa85d43 9181 target_ulong pc_start;
b5ff1b31 9182 uint32_t next_page_start;
2e70f6ef
PB
9183 int num_insns;
9184 int max_insns;
3b46e624 9185
2c0262af 9186 /* generate intermediate code */
0fa85d43 9187 pc_start = tb->pc;
3b46e624 9188
2c0262af
FB
9189 dc->tb = tb;
9190
2c0262af 9191 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
9192
9193 dc->is_jmp = DISAS_NEXT;
9194 dc->pc = pc_start;
8aaca4c0 9195 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 9196 dc->condjmp = 0;
7204ab88 9197 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
98eac7ca
PM
9198 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9199 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
b5ff1b31 9200#if !defined(CONFIG_USER_ONLY)
61f74d6a 9201 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
b5ff1b31 9202#endif
5df8bac1 9203 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
69d1fc22
PM
9204 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9205 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
a7812ae4
PB
9206 cpu_F0s = tcg_temp_new_i32();
9207 cpu_F1s = tcg_temp_new_i32();
9208 cpu_F0d = tcg_temp_new_i64();
9209 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
9210 cpu_V0 = cpu_F0d;
9211 cpu_V1 = cpu_F1d;
e677137d 9212 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 9213 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 9214 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 9215 lj = -1;
2e70f6ef
PB
9216 num_insns = 0;
9217 max_insns = tb->cflags & CF_COUNT_MASK;
9218 if (max_insns == 0)
9219 max_insns = CF_COUNT_MASK;
9220
9221 gen_icount_start();
e12ce78d 9222
3849902c
PM
9223 tcg_clear_temp_count();
9224
e12ce78d
PM
9225 /* A note on handling of the condexec (IT) bits:
9226 *
9227 * We want to avoid the overhead of having to write the updated condexec
9228 * bits back to the CPUState for every instruction in an IT block. So:
9229 * (1) if the condexec bits are not already zero then we write
9230 * zero back into the CPUState now. This avoids complications trying
9231 * to do it at the end of the block. (For example if we don't do this
9232 * it's hard to identify whether we can safely skip writing condexec
9233 * at the end of the TB, which we definitely want to do for the case
9234 * where a TB doesn't do anything with the IT state at all.)
9235 * (2) if we are going to leave the TB then we call gen_set_condexec()
9236 * which will write the correct value into CPUState if zero is wrong.
9237 * This is done both for leaving the TB at the end, and for leaving
9238 * it because of an exception we know will happen, which is done in
9239 * gen_exception_insn(). The latter is necessary because we need to
9240 * leave the TB with the PC/IT state just prior to execution of the
9241 * instruction which caused the exception.
9242 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9243 * then the CPUState will be wrong and we need to reset it.
9244 * This is handled in the same way as restoration of the
9245 * PC in these situations: we will be called again with search_pc=1
9246 * and generate a mapping of the condexec bits for each PC in
9247 * gen_opc_condexec_bits[]. gen_pc_load[] then uses this to restore
9248 * the condexec bits.
9249 *
9250 * Note that there are no instructions which can read the condexec
9251 * bits, and none which can write non-static values to them, so
9252 * we don't need to care about whether CPUState is correct in the
9253 * middle of a TB.
9254 */
9255
9ee6e8bb
PB
9256 /* Reset the conditional execution bits immediately. This avoids
9257 complications trying to do it at the end of the block. */
98eac7ca 9258 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 9259 {
7d1b0095 9260 TCGv tmp = tcg_temp_new_i32();
8f01245e 9261 tcg_gen_movi_i32(tmp, 0);
d9ba4830 9262 store_cpu_field(tmp, condexec_bits);
8f01245e 9263 }
2c0262af 9264 do {
fbb4a2e3
PB
9265#ifdef CONFIG_USER_ONLY
9266 /* Intercept jump to the magic kernel page. */
9267 if (dc->pc >= 0xffff0000) {
9268 /* We always get here via a jump, so know we are not in a
9269 conditional execution block. */
9270 gen_exception(EXCP_KERNEL_TRAP);
9271 dc->is_jmp = DISAS_UPDATE;
9272 break;
9273 }
9274#else
9ee6e8bb
PB
9275 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9276 /* We always get here via a jump, so know we are not in a
9277 conditional execution block. */
d9ba4830 9278 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
9279 dc->is_jmp = DISAS_UPDATE;
9280 break;
9ee6e8bb
PB
9281 }
9282#endif
9283
72cf2d4f
BS
9284 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9285 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 9286 if (bp->pc == dc->pc) {
bc4a0de0 9287 gen_exception_insn(dc, 0, EXCP_DEBUG);
9ee6e8bb
PB
9288 /* Advance PC so that clearing the breakpoint will
9289 invalidate this TB. */
9290 dc->pc += 2;
9291 goto done_generating;
1fddef4b
FB
9292 break;
9293 }
9294 }
9295 }
2c0262af
FB
9296 if (search_pc) {
9297 j = gen_opc_ptr - gen_opc_buf;
9298 if (lj < j) {
9299 lj++;
9300 while (lj < j)
9301 gen_opc_instr_start[lj++] = 0;
9302 }
0fa85d43 9303 gen_opc_pc[lj] = dc->pc;
e12ce78d 9304 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
2c0262af 9305 gen_opc_instr_start[lj] = 1;
2e70f6ef 9306 gen_opc_icount[lj] = num_insns;
2c0262af 9307 }
e50e6a20 9308
2e70f6ef
PB
9309 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9310 gen_io_start();
9311
5642463a
PM
9312 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
9313 tcg_gen_debug_insn_start(dc->pc);
9314 }
9315
7204ab88 9316 if (dc->thumb) {
9ee6e8bb
PB
9317 disas_thumb_insn(env, dc);
9318 if (dc->condexec_mask) {
9319 dc->condexec_cond = (dc->condexec_cond & 0xe)
9320 | ((dc->condexec_mask >> 4) & 1);
9321 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9322 if (dc->condexec_mask == 0) {
9323 dc->condexec_cond = 0;
9324 }
9325 }
9326 } else {
9327 disas_arm_insn(env, dc);
9328 }
e50e6a20
FB
9329
9330 if (dc->condjmp && !dc->is_jmp) {
9331 gen_set_label(dc->condlabel);
9332 dc->condjmp = 0;
9333 }
3849902c
PM
9334
9335 if (tcg_check_temp_count()) {
9336 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
9337 }
9338
aaf2d97d 9339 /* Translation stops when a conditional branch is encountered.
e50e6a20 9340 * Otherwise the subsequent code could get translated several times.
b5ff1b31 9341 * Also stop translation when a page boundary is reached. This
bf20dc07 9342 * ensures prefetch aborts occur at the right place. */
2e70f6ef 9343 num_insns ++;
1fddef4b
FB
9344 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9345 !env->singlestep_enabled &&
1b530a6d 9346 !singlestep &&
2e70f6ef
PB
9347 dc->pc < next_page_start &&
9348 num_insns < max_insns);
9349
9350 if (tb->cflags & CF_LAST_IO) {
9351 if (dc->condjmp) {
9352 /* FIXME: This can theoretically happen with self-modifying
9353 code. */
9354 cpu_abort(env, "IO on conditional branch instruction");
9355 }
9356 gen_io_end();
9357 }
9ee6e8bb 9358
b5ff1b31 9359 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
9360 instruction was a conditional branch or trap, and the PC has
9361 already been written. */
551bd27f 9362 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 9363 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 9364 if (dc->condjmp) {
9ee6e8bb
PB
9365 gen_set_condexec(dc);
9366 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 9367 gen_exception(EXCP_SWI);
9ee6e8bb 9368 } else {
d9ba4830 9369 gen_exception(EXCP_DEBUG);
9ee6e8bb 9370 }
e50e6a20
FB
9371 gen_set_label(dc->condlabel);
9372 }
9373 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 9374 gen_set_pc_im(dc->pc);
e50e6a20 9375 dc->condjmp = 0;
8aaca4c0 9376 }
9ee6e8bb
PB
9377 gen_set_condexec(dc);
9378 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 9379 gen_exception(EXCP_SWI);
9ee6e8bb
PB
9380 } else {
9381 /* FIXME: Single stepping a WFI insn will not halt
9382 the CPU. */
d9ba4830 9383 gen_exception(EXCP_DEBUG);
9ee6e8bb 9384 }
8aaca4c0 9385 } else {
9ee6e8bb
PB
9386 /* While branches must always occur at the end of an IT block,
9387 there are a few other things that can cause us to terminate
9388 the TB in the middel of an IT block:
9389 - Exception generating instructions (bkpt, swi, undefined).
9390 - Page boundaries.
9391 - Hardware watchpoints.
9392 Hardware breakpoints have already been handled and skip this code.
9393 */
9394 gen_set_condexec(dc);
8aaca4c0 9395 switch(dc->is_jmp) {
8aaca4c0 9396 case DISAS_NEXT:
6e256c93 9397 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
9398 break;
9399 default:
9400 case DISAS_JUMP:
9401 case DISAS_UPDATE:
9402 /* indicate that the hash table must be used to find the next TB */
57fec1fe 9403 tcg_gen_exit_tb(0);
8aaca4c0
FB
9404 break;
9405 case DISAS_TB_JUMP:
9406 /* nothing more to generate */
9407 break;
9ee6e8bb 9408 case DISAS_WFI:
d9ba4830 9409 gen_helper_wfi();
9ee6e8bb
PB
9410 break;
9411 case DISAS_SWI:
d9ba4830 9412 gen_exception(EXCP_SWI);
9ee6e8bb 9413 break;
8aaca4c0 9414 }
e50e6a20
FB
9415 if (dc->condjmp) {
9416 gen_set_label(dc->condlabel);
9ee6e8bb 9417 gen_set_condexec(dc);
6e256c93 9418 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
9419 dc->condjmp = 0;
9420 }
2c0262af 9421 }
2e70f6ef 9422
9ee6e8bb 9423done_generating:
2e70f6ef 9424 gen_icount_end(tb, num_insns);
2c0262af
FB
9425 *gen_opc_ptr = INDEX_op_end;
9426
9427#ifdef DEBUG_DISAS
8fec2b8c 9428 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
9429 qemu_log("----------------\n");
9430 qemu_log("IN: %s\n", lookup_symbol(pc_start));
7204ab88 9431 log_target_disas(pc_start, dc->pc - pc_start, dc->thumb);
93fcfe39 9432 qemu_log("\n");
2c0262af
FB
9433 }
9434#endif
b5ff1b31
FB
9435 if (search_pc) {
9436 j = gen_opc_ptr - gen_opc_buf;
9437 lj++;
9438 while (lj <= j)
9439 gen_opc_instr_start[lj++] = 0;
b5ff1b31 9440 } else {
2c0262af 9441 tb->size = dc->pc - pc_start;
2e70f6ef 9442 tb->icount = num_insns;
b5ff1b31 9443 }
2c0262af
FB
9444}
9445
2cfc5f17 9446void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2c0262af 9447{
2cfc5f17 9448 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
9449}
9450
2cfc5f17 9451void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2c0262af 9452{
2cfc5f17 9453 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
9454}
9455
b5ff1b31
FB
9456static const char *cpu_mode_names[16] = {
9457 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9458 "???", "???", "???", "und", "???", "???", "???", "sys"
9459};
9ee6e8bb 9460
9a78eead 9461void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
7fe48483 9462 int flags)
2c0262af
FB
9463{
9464 int i;
06e80fc9 9465#if 0
bc380d17 9466 union {
b7bcbe95
FB
9467 uint32_t i;
9468 float s;
9469 } s0, s1;
9470 CPU_DoubleU d;
a94a6abf
PB
9471 /* ??? This assumes float64 and double have the same layout.
9472 Oh well, it's only debug dumps. */
9473 union {
9474 float64 f64;
9475 double d;
9476 } d0;
06e80fc9 9477#endif
b5ff1b31 9478 uint32_t psr;
2c0262af
FB
9479
9480 for(i=0;i<16;i++) {
7fe48483 9481 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 9482 if ((i % 4) == 3)
7fe48483 9483 cpu_fprintf(f, "\n");
2c0262af 9484 else
7fe48483 9485 cpu_fprintf(f, " ");
2c0262af 9486 }
b5ff1b31 9487 psr = cpsr_read(env);
687fa640
TS
9488 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9489 psr,
b5ff1b31
FB
9490 psr & (1 << 31) ? 'N' : '-',
9491 psr & (1 << 30) ? 'Z' : '-',
9492 psr & (1 << 29) ? 'C' : '-',
9493 psr & (1 << 28) ? 'V' : '-',
5fafdf24 9494 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 9495 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 9496
5e3f878a 9497#if 0
b7bcbe95 9498 for (i = 0; i < 16; i++) {
8e96005d
FB
9499 d.d = env->vfp.regs[i];
9500 s0.i = d.l.lower;
9501 s1.i = d.l.upper;
a94a6abf
PB
9502 d0.f64 = d.d;
9503 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 9504 i * 2, (int)s0.i, s0.s,
a94a6abf 9505 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 9506 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 9507 d0.d);
b7bcbe95 9508 }
40f137e1 9509 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
5e3f878a 9510#endif
2c0262af 9511}
a6b025d3 9512
d2856f1a
AJ
9513void gen_pc_load(CPUState *env, TranslationBlock *tb,
9514 unsigned long searched_pc, int pc_pos, void *puc)
9515{
9516 env->regs[15] = gen_opc_pc[pc_pos];
e12ce78d 9517 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
d2856f1a 9518}