]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/translate.c
target-i386: implement lzcnt emulation
[mirror_qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
28#include "exec-all.h"
29#include "disas.h"
57fec1fe 30#include "tcg-op.h"
79383c9c 31#include "qemu-log.h"
1497c961 32
a7812ae4 33#include "helpers.h"
1497c961 34#define GEN_HELPER 1
b26eefb6 35#include "helpers.h"
2c0262af 36
9ee6e8bb
PB
37#define ENABLE_ARCH_5J 0
38#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 42
86753403 43#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 44
2c0262af
FB
45/* internal defines */
46typedef struct DisasContext {
0fa85d43 47 target_ulong pc;
2c0262af 48 int is_jmp;
e50e6a20
FB
49 /* Nonzero if this instruction has been conditionally skipped. */
50 int condjmp;
51 /* The label that will be jumped to when the instruction is skipped. */
52 int condlabel;
9ee6e8bb
PB
53 /* Thumb-2 condtional execution bits. */
54 int condexec_mask;
55 int condexec_cond;
2c0262af 56 struct TranslationBlock *tb;
8aaca4c0 57 int singlestep_enabled;
5899f386 58 int thumb;
b5ff1b31
FB
59#if !defined(CONFIG_USER_ONLY)
60 int user;
61#endif
2c0262af
FB
62} DisasContext;
63
b5ff1b31
FB
64#if defined(CONFIG_USER_ONLY)
65#define IS_USER(s) 1
66#else
67#define IS_USER(s) (s->user)
68#endif
69
9ee6e8bb
PB
70/* These instructions trap after executing, so defer them until after the
71 conditional executions state has been updated. */
72#define DISAS_WFI 4
73#define DISAS_SWI 5
2c0262af 74
a7812ae4 75static TCGv_ptr cpu_env;
ad69471c 76/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 77static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 78static TCGv_i32 cpu_R[16];
ad69471c 79
b26eefb6 80/* FIXME: These should be removed. */
a7812ae4
PB
81static TCGv cpu_F0s, cpu_F1s;
82static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 83
2e70f6ef
PB
84#include "gen-icount.h"
85
155c3eac
FN
86static const char *regnames[] =
87 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
88 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
89
b26eefb6
PB
90/* initialize TCG globals. */
91void arm_translate_init(void)
92{
155c3eac
FN
93 int i;
94
a7812ae4
PB
95 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
96
155c3eac
FN
97 for (i = 0; i < 16; i++) {
98 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
99 offsetof(CPUState, regs[i]),
100 regnames[i]);
101 }
102
a7812ae4
PB
103#define GEN_HELPER 2
104#include "helpers.h"
b26eefb6
PB
105}
106
b26eefb6 107static int num_temps;
b26eefb6
PB
108
109/* Allocate a temporary variable. */
a7812ae4 110static TCGv_i32 new_tmp(void)
b26eefb6 111{
12edd4f2
FN
112 num_temps++;
113 return tcg_temp_new_i32();
b26eefb6
PB
114}
115
116/* Release a temporary variable. */
117static void dead_tmp(TCGv tmp)
118{
12edd4f2 119 tcg_temp_free(tmp);
b26eefb6 120 num_temps--;
b26eefb6
PB
121}
122
d9ba4830
PB
123static inline TCGv load_cpu_offset(int offset)
124{
125 TCGv tmp = new_tmp();
126 tcg_gen_ld_i32(tmp, cpu_env, offset);
127 return tmp;
128}
129
130#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
131
132static inline void store_cpu_offset(TCGv var, int offset)
133{
134 tcg_gen_st_i32(var, cpu_env, offset);
135 dead_tmp(var);
136}
137
138#define store_cpu_field(var, name) \
139 store_cpu_offset(var, offsetof(CPUState, name))
140
b26eefb6
PB
141/* Set a variable to the value of a CPU register. */
142static void load_reg_var(DisasContext *s, TCGv var, int reg)
143{
144 if (reg == 15) {
145 uint32_t addr;
146 /* normaly, since we updated PC, we need only to add one insn */
147 if (s->thumb)
148 addr = (long)s->pc + 2;
149 else
150 addr = (long)s->pc + 4;
151 tcg_gen_movi_i32(var, addr);
152 } else {
155c3eac 153 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
154 }
155}
156
157/* Create a new temporary and set it to the value of a CPU register. */
158static inline TCGv load_reg(DisasContext *s, int reg)
159{
160 TCGv tmp = new_tmp();
161 load_reg_var(s, tmp, reg);
162 return tmp;
163}
164
165/* Set a CPU register. The source must be a temporary and will be
166 marked as dead. */
167static void store_reg(DisasContext *s, int reg, TCGv var)
168{
169 if (reg == 15) {
170 tcg_gen_andi_i32(var, var, ~1);
171 s->is_jmp = DISAS_JUMP;
172 }
155c3eac 173 tcg_gen_mov_i32(cpu_R[reg], var);
b26eefb6
PB
174 dead_tmp(var);
175}
176
b26eefb6 177/* Value extensions. */
86831435
PB
178#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
179#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
180#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
181#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
182
1497c961
PB
183#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
184#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 185
b26eefb6 186
d9ba4830
PB
187#define gen_set_cpsr(var, mask) gen_helper_cpsr_write(var, tcg_const_i32(mask))
188/* Set NZCV flags from the high 4 bits of var. */
189#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
190
191static void gen_exception(int excp)
192{
193 TCGv tmp = new_tmp();
194 tcg_gen_movi_i32(tmp, excp);
195 gen_helper_exception(tmp);
196 dead_tmp(tmp);
197}
198
3670669c
PB
199static void gen_smul_dual(TCGv a, TCGv b)
200{
201 TCGv tmp1 = new_tmp();
202 TCGv tmp2 = new_tmp();
22478e79
AZ
203 tcg_gen_ext16s_i32(tmp1, a);
204 tcg_gen_ext16s_i32(tmp2, b);
3670669c
PB
205 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
206 dead_tmp(tmp2);
207 tcg_gen_sari_i32(a, a, 16);
208 tcg_gen_sari_i32(b, b, 16);
209 tcg_gen_mul_i32(b, b, a);
210 tcg_gen_mov_i32(a, tmp1);
211 dead_tmp(tmp1);
212}
213
214/* Byteswap each halfword. */
215static void gen_rev16(TCGv var)
216{
217 TCGv tmp = new_tmp();
218 tcg_gen_shri_i32(tmp, var, 8);
219 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
220 tcg_gen_shli_i32(var, var, 8);
221 tcg_gen_andi_i32(var, var, 0xff00ff00);
222 tcg_gen_or_i32(var, var, tmp);
223 dead_tmp(tmp);
224}
225
226/* Byteswap low halfword and sign extend. */
227static void gen_revsh(TCGv var)
228{
229 TCGv tmp = new_tmp();
230 tcg_gen_shri_i32(tmp, var, 8);
231 tcg_gen_andi_i32(tmp, tmp, 0x00ff);
232 tcg_gen_shli_i32(var, var, 8);
233 tcg_gen_ext8s_i32(var, var);
234 tcg_gen_or_i32(var, var, tmp);
235 dead_tmp(tmp);
236}
237
238/* Unsigned bitfield extract. */
239static void gen_ubfx(TCGv var, int shift, uint32_t mask)
240{
241 if (shift)
242 tcg_gen_shri_i32(var, var, shift);
243 tcg_gen_andi_i32(var, var, mask);
244}
245
246/* Signed bitfield extract. */
247static void gen_sbfx(TCGv var, int shift, int width)
248{
249 uint32_t signbit;
250
251 if (shift)
252 tcg_gen_sari_i32(var, var, shift);
253 if (shift + width < 32) {
254 signbit = 1u << (width - 1);
255 tcg_gen_andi_i32(var, var, (1u << width) - 1);
256 tcg_gen_xori_i32(var, var, signbit);
257 tcg_gen_subi_i32(var, var, signbit);
258 }
259}
260
261/* Bitfield insertion. Insert val into base. Clobbers base and val. */
262static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
263{
3670669c 264 tcg_gen_andi_i32(val, val, mask);
8f8e3aa4
PB
265 tcg_gen_shli_i32(val, val, shift);
266 tcg_gen_andi_i32(base, base, ~(mask << shift));
3670669c
PB
267 tcg_gen_or_i32(dest, base, val);
268}
269
d9ba4830
PB
270/* Round the top 32 bits of a 64-bit value. */
271static void gen_roundqd(TCGv a, TCGv b)
3670669c 272{
d9ba4830
PB
273 tcg_gen_shri_i32(a, a, 31);
274 tcg_gen_add_i32(a, a, b);
3670669c
PB
275}
276
8f01245e
PB
277/* FIXME: Most targets have native widening multiplication.
278 It would be good to use that instead of a full wide multiply. */
5e3f878a 279/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 280static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 281{
a7812ae4
PB
282 TCGv_i64 tmp1 = tcg_temp_new_i64();
283 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
284
285 tcg_gen_extu_i32_i64(tmp1, a);
286 dead_tmp(a);
287 tcg_gen_extu_i32_i64(tmp2, b);
288 dead_tmp(b);
289 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
290 return tmp1;
291}
292
a7812ae4 293static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 294{
a7812ae4
PB
295 TCGv_i64 tmp1 = tcg_temp_new_i64();
296 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
297
298 tcg_gen_ext_i32_i64(tmp1, a);
299 dead_tmp(a);
300 tcg_gen_ext_i32_i64(tmp2, b);
301 dead_tmp(b);
302 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
303 return tmp1;
304}
305
8f01245e 306/* Unsigned 32x32->64 multiply. */
396e467c 307static void gen_mull(TCGv a, TCGv b)
8f01245e 308{
a7812ae4
PB
309 TCGv_i64 tmp1 = tcg_temp_new_i64();
310 TCGv_i64 tmp2 = tcg_temp_new_i64();
8f01245e 311
396e467c
FN
312 tcg_gen_extu_i32_i64(tmp1, a);
313 tcg_gen_extu_i32_i64(tmp2, b);
8f01245e 314 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
396e467c 315 tcg_gen_trunc_i64_i32(a, tmp1);
8f01245e 316 tcg_gen_shri_i64(tmp1, tmp1, 32);
396e467c 317 tcg_gen_trunc_i64_i32(b, tmp1);
8f01245e
PB
318}
319
320/* Signed 32x32->64 multiply. */
d9ba4830 321static void gen_imull(TCGv a, TCGv b)
8f01245e 322{
a7812ae4
PB
323 TCGv_i64 tmp1 = tcg_temp_new_i64();
324 TCGv_i64 tmp2 = tcg_temp_new_i64();
8f01245e 325
d9ba4830
PB
326 tcg_gen_ext_i32_i64(tmp1, a);
327 tcg_gen_ext_i32_i64(tmp2, b);
8f01245e 328 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
d9ba4830 329 tcg_gen_trunc_i64_i32(a, tmp1);
8f01245e 330 tcg_gen_shri_i64(tmp1, tmp1, 32);
d9ba4830
PB
331 tcg_gen_trunc_i64_i32(b, tmp1);
332}
d9ba4830 333
8f01245e
PB
334/* Swap low and high halfwords. */
335static void gen_swap_half(TCGv var)
336{
337 TCGv tmp = new_tmp();
338 tcg_gen_shri_i32(tmp, var, 16);
339 tcg_gen_shli_i32(var, var, 16);
340 tcg_gen_or_i32(var, var, tmp);
3670669c 341 dead_tmp(tmp);
8f01245e
PB
342}
343
b26eefb6
PB
344/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
345 tmp = (t0 ^ t1) & 0x8000;
346 t0 &= ~0x8000;
347 t1 &= ~0x8000;
348 t0 = (t0 + t1) ^ tmp;
349 */
350
351static void gen_add16(TCGv t0, TCGv t1)
352{
353 TCGv tmp = new_tmp();
354 tcg_gen_xor_i32(tmp, t0, t1);
355 tcg_gen_andi_i32(tmp, tmp, 0x8000);
356 tcg_gen_andi_i32(t0, t0, ~0x8000);
357 tcg_gen_andi_i32(t1, t1, ~0x8000);
358 tcg_gen_add_i32(t0, t0, t1);
359 tcg_gen_xor_i32(t0, t0, tmp);
360 dead_tmp(tmp);
361 dead_tmp(t1);
362}
363
9a119ff6
PB
364#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
365
b26eefb6
PB
366/* Set CF to the top bit of var. */
367static void gen_set_CF_bit31(TCGv var)
368{
369 TCGv tmp = new_tmp();
370 tcg_gen_shri_i32(tmp, var, 31);
4cc633c3 371 gen_set_CF(tmp);
b26eefb6
PB
372 dead_tmp(tmp);
373}
374
375/* Set N and Z flags from var. */
376static inline void gen_logic_CC(TCGv var)
377{
6fbe23d5
PB
378 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
379 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
b26eefb6
PB
380}
381
382/* T0 += T1 + CF. */
396e467c 383static void gen_adc(TCGv t0, TCGv t1)
b26eefb6 384{
d9ba4830 385 TCGv tmp;
396e467c 386 tcg_gen_add_i32(t0, t0, t1);
d9ba4830 387 tmp = load_cpu_field(CF);
396e467c 388 tcg_gen_add_i32(t0, t0, tmp);
b26eefb6
PB
389 dead_tmp(tmp);
390}
391
e9bb4aa9
JR
392/* dest = T0 + T1 + CF. */
393static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
394{
395 TCGv tmp;
396 tcg_gen_add_i32(dest, t0, t1);
397 tmp = load_cpu_field(CF);
398 tcg_gen_add_i32(dest, dest, tmp);
399 dead_tmp(tmp);
400}
401
3670669c
PB
402/* dest = T0 - T1 + CF - 1. */
403static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
404{
d9ba4830 405 TCGv tmp;
3670669c 406 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 407 tmp = load_cpu_field(CF);
3670669c
PB
408 tcg_gen_add_i32(dest, dest, tmp);
409 tcg_gen_subi_i32(dest, dest, 1);
410 dead_tmp(tmp);
411}
412
b26eefb6
PB
413/* T0 &= ~T1. Clobbers T1. */
414/* FIXME: Implement bic natively. */
8f8e3aa4
PB
415static inline void tcg_gen_bic_i32(TCGv dest, TCGv t0, TCGv t1)
416{
417 TCGv tmp = new_tmp();
418 tcg_gen_not_i32(tmp, t1);
419 tcg_gen_and_i32(dest, t0, tmp);
420 dead_tmp(tmp);
421}
b26eefb6 422
ad69471c
PB
423/* FIXME: Implement this natively. */
424#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
425
b26eefb6
PB
426/* FIXME: Implement this natively. */
427static void tcg_gen_rori_i32(TCGv t0, TCGv t1, int i)
428{
429 TCGv tmp;
430
431 if (i == 0)
432 return;
433
434 tmp = new_tmp();
435 tcg_gen_shri_i32(tmp, t1, i);
436 tcg_gen_shli_i32(t1, t1, 32 - i);
437 tcg_gen_or_i32(t0, t1, tmp);
438 dead_tmp(tmp);
439}
440
9a119ff6 441static void shifter_out_im(TCGv var, int shift)
b26eefb6 442{
9a119ff6
PB
443 TCGv tmp = new_tmp();
444 if (shift == 0) {
445 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 446 } else {
9a119ff6 447 tcg_gen_shri_i32(tmp, var, shift);
4cc633c3 448 if (shift != 31)
9a119ff6
PB
449 tcg_gen_andi_i32(tmp, tmp, 1);
450 }
451 gen_set_CF(tmp);
452 dead_tmp(tmp);
453}
b26eefb6 454
9a119ff6
PB
455/* Shift by immediate. Includes special handling for shift == 0. */
456static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
457{
458 switch (shiftop) {
459 case 0: /* LSL */
460 if (shift != 0) {
461 if (flags)
462 shifter_out_im(var, 32 - shift);
463 tcg_gen_shli_i32(var, var, shift);
464 }
465 break;
466 case 1: /* LSR */
467 if (shift == 0) {
468 if (flags) {
469 tcg_gen_shri_i32(var, var, 31);
470 gen_set_CF(var);
471 }
472 tcg_gen_movi_i32(var, 0);
473 } else {
474 if (flags)
475 shifter_out_im(var, shift - 1);
476 tcg_gen_shri_i32(var, var, shift);
477 }
478 break;
479 case 2: /* ASR */
480 if (shift == 0)
481 shift = 32;
482 if (flags)
483 shifter_out_im(var, shift - 1);
484 if (shift == 32)
485 shift = 31;
486 tcg_gen_sari_i32(var, var, shift);
487 break;
488 case 3: /* ROR/RRX */
489 if (shift != 0) {
490 if (flags)
491 shifter_out_im(var, shift - 1);
492 tcg_gen_rori_i32(var, var, shift); break;
493 } else {
d9ba4830 494 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
495 if (flags)
496 shifter_out_im(var, 0);
497 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
498 tcg_gen_shli_i32(tmp, tmp, 31);
499 tcg_gen_or_i32(var, var, tmp);
500 dead_tmp(tmp);
b26eefb6
PB
501 }
502 }
503};
504
8984bd2e
PB
505static inline void gen_arm_shift_reg(TCGv var, int shiftop,
506 TCGv shift, int flags)
507{
508 if (flags) {
509 switch (shiftop) {
510 case 0: gen_helper_shl_cc(var, var, shift); break;
511 case 1: gen_helper_shr_cc(var, var, shift); break;
512 case 2: gen_helper_sar_cc(var, var, shift); break;
513 case 3: gen_helper_ror_cc(var, var, shift); break;
514 }
515 } else {
516 switch (shiftop) {
517 case 0: gen_helper_shl(var, var, shift); break;
518 case 1: gen_helper_shr(var, var, shift); break;
519 case 2: gen_helper_sar(var, var, shift); break;
520 case 3: gen_helper_ror(var, var, shift); break;
521 }
522 }
523 dead_tmp(shift);
524}
525
6ddbc6e4
PB
526#define PAS_OP(pfx) \
527 switch (op2) { \
528 case 0: gen_pas_helper(glue(pfx,add16)); break; \
529 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
530 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
531 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
532 case 4: gen_pas_helper(glue(pfx,add8)); break; \
533 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
534 }
d9ba4830 535static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 536{
a7812ae4 537 TCGv_ptr tmp;
6ddbc6e4
PB
538
539 switch (op1) {
540#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
541 case 1:
a7812ae4 542 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
543 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
544 PAS_OP(s)
545 break;
546 case 5:
a7812ae4 547 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
548 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
549 PAS_OP(u)
550 break;
551#undef gen_pas_helper
552#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
553 case 2:
554 PAS_OP(q);
555 break;
556 case 3:
557 PAS_OP(sh);
558 break;
559 case 6:
560 PAS_OP(uq);
561 break;
562 case 7:
563 PAS_OP(uh);
564 break;
565#undef gen_pas_helper
566 }
567}
9ee6e8bb
PB
568#undef PAS_OP
569
6ddbc6e4
PB
570/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
571#define PAS_OP(pfx) \
572 switch (op2) { \
573 case 0: gen_pas_helper(glue(pfx,add8)); break; \
574 case 1: gen_pas_helper(glue(pfx,add16)); break; \
575 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
576 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
577 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
578 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
579 }
d9ba4830 580static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 581{
a7812ae4 582 TCGv_ptr tmp;
6ddbc6e4
PB
583
584 switch (op1) {
585#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
586 case 0:
a7812ae4 587 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
588 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
589 PAS_OP(s)
590 break;
591 case 4:
a7812ae4 592 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
593 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
594 PAS_OP(u)
595 break;
596#undef gen_pas_helper
597#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
598 case 1:
599 PAS_OP(q);
600 break;
601 case 2:
602 PAS_OP(sh);
603 break;
604 case 5:
605 PAS_OP(uq);
606 break;
607 case 6:
608 PAS_OP(uh);
609 break;
610#undef gen_pas_helper
611 }
612}
9ee6e8bb
PB
613#undef PAS_OP
614
d9ba4830
PB
615static void gen_test_cc(int cc, int label)
616{
617 TCGv tmp;
618 TCGv tmp2;
d9ba4830
PB
619 int inv;
620
d9ba4830
PB
621 switch (cc) {
622 case 0: /* eq: Z */
6fbe23d5 623 tmp = load_cpu_field(ZF);
cb63669a 624 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
625 break;
626 case 1: /* ne: !Z */
6fbe23d5 627 tmp = load_cpu_field(ZF);
cb63669a 628 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
629 break;
630 case 2: /* cs: C */
631 tmp = load_cpu_field(CF);
cb63669a 632 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
633 break;
634 case 3: /* cc: !C */
635 tmp = load_cpu_field(CF);
cb63669a 636 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
637 break;
638 case 4: /* mi: N */
6fbe23d5 639 tmp = load_cpu_field(NF);
cb63669a 640 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
641 break;
642 case 5: /* pl: !N */
6fbe23d5 643 tmp = load_cpu_field(NF);
cb63669a 644 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
645 break;
646 case 6: /* vs: V */
647 tmp = load_cpu_field(VF);
cb63669a 648 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
649 break;
650 case 7: /* vc: !V */
651 tmp = load_cpu_field(VF);
cb63669a 652 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
653 break;
654 case 8: /* hi: C && !Z */
655 inv = gen_new_label();
656 tmp = load_cpu_field(CF);
cb63669a 657 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830 658 dead_tmp(tmp);
6fbe23d5 659 tmp = load_cpu_field(ZF);
cb63669a 660 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
661 gen_set_label(inv);
662 break;
663 case 9: /* ls: !C || Z */
664 tmp = load_cpu_field(CF);
cb63669a 665 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830 666 dead_tmp(tmp);
6fbe23d5 667 tmp = load_cpu_field(ZF);
cb63669a 668 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
669 break;
670 case 10: /* ge: N == V -> N ^ V == 0 */
671 tmp = load_cpu_field(VF);
6fbe23d5 672 tmp2 = load_cpu_field(NF);
d9ba4830
PB
673 tcg_gen_xor_i32(tmp, tmp, tmp2);
674 dead_tmp(tmp2);
cb63669a 675 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
676 break;
677 case 11: /* lt: N != V -> N ^ V != 0 */
678 tmp = load_cpu_field(VF);
6fbe23d5 679 tmp2 = load_cpu_field(NF);
d9ba4830
PB
680 tcg_gen_xor_i32(tmp, tmp, tmp2);
681 dead_tmp(tmp2);
cb63669a 682 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
683 break;
684 case 12: /* gt: !Z && N == V */
685 inv = gen_new_label();
6fbe23d5 686 tmp = load_cpu_field(ZF);
cb63669a 687 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830
PB
688 dead_tmp(tmp);
689 tmp = load_cpu_field(VF);
6fbe23d5 690 tmp2 = load_cpu_field(NF);
d9ba4830
PB
691 tcg_gen_xor_i32(tmp, tmp, tmp2);
692 dead_tmp(tmp2);
cb63669a 693 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
694 gen_set_label(inv);
695 break;
696 case 13: /* le: Z || N != V */
6fbe23d5 697 tmp = load_cpu_field(ZF);
cb63669a 698 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
699 dead_tmp(tmp);
700 tmp = load_cpu_field(VF);
6fbe23d5 701 tmp2 = load_cpu_field(NF);
d9ba4830
PB
702 tcg_gen_xor_i32(tmp, tmp, tmp2);
703 dead_tmp(tmp2);
cb63669a 704 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
705 break;
706 default:
707 fprintf(stderr, "Bad condition code 0x%x\n", cc);
708 abort();
709 }
710 dead_tmp(tmp);
711}
2c0262af 712
b1d8e52e 713static const uint8_t table_logic_cc[16] = {
2c0262af
FB
714 1, /* and */
715 1, /* xor */
716 0, /* sub */
717 0, /* rsb */
718 0, /* add */
719 0, /* adc */
720 0, /* sbc */
721 0, /* rsc */
722 1, /* andl */
723 1, /* xorl */
724 0, /* cmp */
725 0, /* cmn */
726 1, /* orr */
727 1, /* mov */
728 1, /* bic */
729 1, /* mvn */
730};
3b46e624 731
d9ba4830
PB
732/* Set PC and Thumb state from an immediate address. */
733static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 734{
b26eefb6 735 TCGv tmp;
99c475ab 736
b26eefb6 737 s->is_jmp = DISAS_UPDATE;
d9ba4830 738 if (s->thumb != (addr & 1)) {
155c3eac 739 tmp = new_tmp();
d9ba4830
PB
740 tcg_gen_movi_i32(tmp, addr & 1);
741 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
155c3eac 742 dead_tmp(tmp);
d9ba4830 743 }
155c3eac 744 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
745}
746
747/* Set PC and Thumb state from var. var is marked as dead. */
748static inline void gen_bx(DisasContext *s, TCGv var)
749{
d9ba4830 750 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
751 tcg_gen_andi_i32(cpu_R[15], var, ~1);
752 tcg_gen_andi_i32(var, var, 1);
753 store_cpu_field(var, thumb);
d9ba4830
PB
754}
755
21aeb343
JR
756/* Variant of store_reg which uses branch&exchange logic when storing
757 to r15 in ARM architecture v7 and above. The source must be a temporary
758 and will be marked as dead. */
759static inline void store_reg_bx(CPUState *env, DisasContext *s,
760 int reg, TCGv var)
761{
762 if (reg == 15 && ENABLE_ARCH_7) {
763 gen_bx(s, var);
764 } else {
765 store_reg(s, reg, var);
766 }
767}
768
b0109805
PB
769static inline TCGv gen_ld8s(TCGv addr, int index)
770{
771 TCGv tmp = new_tmp();
772 tcg_gen_qemu_ld8s(tmp, addr, index);
773 return tmp;
774}
775static inline TCGv gen_ld8u(TCGv addr, int index)
776{
777 TCGv tmp = new_tmp();
778 tcg_gen_qemu_ld8u(tmp, addr, index);
779 return tmp;
780}
781static inline TCGv gen_ld16s(TCGv addr, int index)
782{
783 TCGv tmp = new_tmp();
784 tcg_gen_qemu_ld16s(tmp, addr, index);
785 return tmp;
786}
787static inline TCGv gen_ld16u(TCGv addr, int index)
788{
789 TCGv tmp = new_tmp();
790 tcg_gen_qemu_ld16u(tmp, addr, index);
791 return tmp;
792}
793static inline TCGv gen_ld32(TCGv addr, int index)
794{
795 TCGv tmp = new_tmp();
796 tcg_gen_qemu_ld32u(tmp, addr, index);
797 return tmp;
798}
799static inline void gen_st8(TCGv val, TCGv addr, int index)
800{
801 tcg_gen_qemu_st8(val, addr, index);
802 dead_tmp(val);
803}
804static inline void gen_st16(TCGv val, TCGv addr, int index)
805{
806 tcg_gen_qemu_st16(val, addr, index);
807 dead_tmp(val);
808}
809static inline void gen_st32(TCGv val, TCGv addr, int index)
810{
811 tcg_gen_qemu_st32(val, addr, index);
812 dead_tmp(val);
813}
b5ff1b31 814
5e3f878a
PB
815static inline void gen_set_pc_im(uint32_t val)
816{
155c3eac 817 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
818}
819
b5ff1b31
FB
820/* Force a TB lookup after an instruction that changes the CPU state. */
821static inline void gen_lookup_tb(DisasContext *s)
822{
a6445c52 823 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
824 s->is_jmp = DISAS_UPDATE;
825}
826
b0109805
PB
827static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
828 TCGv var)
2c0262af 829{
1e8d4eec 830 int val, rm, shift, shiftop;
b26eefb6 831 TCGv offset;
2c0262af
FB
832
833 if (!(insn & (1 << 25))) {
834 /* immediate */
835 val = insn & 0xfff;
836 if (!(insn & (1 << 23)))
837 val = -val;
537730b9 838 if (val != 0)
b0109805 839 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
840 } else {
841 /* shift/register */
842 rm = (insn) & 0xf;
843 shift = (insn >> 7) & 0x1f;
1e8d4eec 844 shiftop = (insn >> 5) & 3;
b26eefb6 845 offset = load_reg(s, rm);
9a119ff6 846 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 847 if (!(insn & (1 << 23)))
b0109805 848 tcg_gen_sub_i32(var, var, offset);
2c0262af 849 else
b0109805 850 tcg_gen_add_i32(var, var, offset);
b26eefb6 851 dead_tmp(offset);
2c0262af
FB
852 }
853}
854
191f9a93 855static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 856 int extra, TCGv var)
2c0262af
FB
857{
858 int val, rm;
b26eefb6 859 TCGv offset;
3b46e624 860
2c0262af
FB
861 if (insn & (1 << 22)) {
862 /* immediate */
863 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
864 if (!(insn & (1 << 23)))
865 val = -val;
18acad92 866 val += extra;
537730b9 867 if (val != 0)
b0109805 868 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
869 } else {
870 /* register */
191f9a93 871 if (extra)
b0109805 872 tcg_gen_addi_i32(var, var, extra);
2c0262af 873 rm = (insn) & 0xf;
b26eefb6 874 offset = load_reg(s, rm);
2c0262af 875 if (!(insn & (1 << 23)))
b0109805 876 tcg_gen_sub_i32(var, var, offset);
2c0262af 877 else
b0109805 878 tcg_gen_add_i32(var, var, offset);
b26eefb6 879 dead_tmp(offset);
2c0262af
FB
880 }
881}
882
4373f3ce
PB
883#define VFP_OP2(name) \
884static inline void gen_vfp_##name(int dp) \
885{ \
886 if (dp) \
887 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
888 else \
889 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
b7bcbe95
FB
890}
891
4373f3ce
PB
892VFP_OP2(add)
893VFP_OP2(sub)
894VFP_OP2(mul)
895VFP_OP2(div)
896
897#undef VFP_OP2
898
899static inline void gen_vfp_abs(int dp)
900{
901 if (dp)
902 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
903 else
904 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
905}
906
907static inline void gen_vfp_neg(int dp)
908{
909 if (dp)
910 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
911 else
912 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
913}
914
915static inline void gen_vfp_sqrt(int dp)
916{
917 if (dp)
918 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
919 else
920 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
921}
922
923static inline void gen_vfp_cmp(int dp)
924{
925 if (dp)
926 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
927 else
928 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
929}
930
931static inline void gen_vfp_cmpe(int dp)
932{
933 if (dp)
934 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
935 else
936 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
937}
938
939static inline void gen_vfp_F1_ld0(int dp)
940{
941 if (dp)
5b340b51 942 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 943 else
5b340b51 944 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
945}
946
947static inline void gen_vfp_uito(int dp)
948{
949 if (dp)
950 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
951 else
952 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
953}
954
955static inline void gen_vfp_sito(int dp)
956{
957 if (dp)
66230e0d 958 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
4373f3ce 959 else
66230e0d 960 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
4373f3ce
PB
961}
962
963static inline void gen_vfp_toui(int dp)
964{
965 if (dp)
966 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
967 else
968 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
969}
970
971static inline void gen_vfp_touiz(int dp)
972{
973 if (dp)
974 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
975 else
976 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
977}
978
979static inline void gen_vfp_tosi(int dp)
980{
981 if (dp)
982 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
983 else
984 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
985}
986
987static inline void gen_vfp_tosiz(int dp)
9ee6e8bb
PB
988{
989 if (dp)
4373f3ce 990 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
9ee6e8bb 991 else
4373f3ce
PB
992 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
993}
994
995#define VFP_GEN_FIX(name) \
996static inline void gen_vfp_##name(int dp, int shift) \
997{ \
998 if (dp) \
999 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tcg_const_i32(shift), cpu_env);\
1000 else \
1001 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tcg_const_i32(shift), cpu_env);\
9ee6e8bb 1002}
4373f3ce
PB
1003VFP_GEN_FIX(tosh)
1004VFP_GEN_FIX(tosl)
1005VFP_GEN_FIX(touh)
1006VFP_GEN_FIX(toul)
1007VFP_GEN_FIX(shto)
1008VFP_GEN_FIX(slto)
1009VFP_GEN_FIX(uhto)
1010VFP_GEN_FIX(ulto)
1011#undef VFP_GEN_FIX
9ee6e8bb 1012
312eea9f 1013static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1014{
1015 if (dp)
312eea9f 1016 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1017 else
312eea9f 1018 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1019}
1020
312eea9f 1021static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1022{
1023 if (dp)
312eea9f 1024 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1025 else
312eea9f 1026 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1027}
1028
8e96005d
FB
1029static inline long
1030vfp_reg_offset (int dp, int reg)
1031{
1032 if (dp)
1033 return offsetof(CPUARMState, vfp.regs[reg]);
1034 else if (reg & 1) {
1035 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1036 + offsetof(CPU_DoubleU, l.upper);
1037 } else {
1038 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1039 + offsetof(CPU_DoubleU, l.lower);
1040 }
1041}
9ee6e8bb
PB
1042
1043/* Return the offset of a 32-bit piece of a NEON register.
1044 zero is the least significant end of the register. */
1045static inline long
1046neon_reg_offset (int reg, int n)
1047{
1048 int sreg;
1049 sreg = reg * 2 + n;
1050 return vfp_reg_offset(0, sreg);
1051}
1052
8f8e3aa4
PB
1053static TCGv neon_load_reg(int reg, int pass)
1054{
1055 TCGv tmp = new_tmp();
1056 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1057 return tmp;
1058}
1059
1060static void neon_store_reg(int reg, int pass, TCGv var)
1061{
1062 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1063 dead_tmp(var);
1064}
1065
a7812ae4 1066static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1067{
1068 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1069}
1070
a7812ae4 1071static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1072{
1073 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1074}
1075
4373f3ce
PB
1076#define tcg_gen_ld_f32 tcg_gen_ld_i32
1077#define tcg_gen_ld_f64 tcg_gen_ld_i64
1078#define tcg_gen_st_f32 tcg_gen_st_i32
1079#define tcg_gen_st_f64 tcg_gen_st_i64
1080
b7bcbe95
FB
1081static inline void gen_mov_F0_vreg(int dp, int reg)
1082{
1083 if (dp)
4373f3ce 1084 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1085 else
4373f3ce 1086 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1087}
1088
1089static inline void gen_mov_F1_vreg(int dp, int reg)
1090{
1091 if (dp)
4373f3ce 1092 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1093 else
4373f3ce 1094 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1095}
1096
1097static inline void gen_mov_vreg_F0(int dp, int reg)
1098{
1099 if (dp)
4373f3ce 1100 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1101 else
4373f3ce 1102 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1103}
1104
18c9b560
AZ
1105#define ARM_CP_RW_BIT (1 << 20)
1106
a7812ae4 1107static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d
PB
1108{
1109 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1110}
1111
a7812ae4 1112static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d
PB
1113{
1114 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1115}
1116
da6b5335 1117static inline TCGv iwmmxt_load_creg(int reg)
e677137d 1118{
da6b5335
FN
1119 TCGv var = new_tmp();
1120 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1121 return var;
e677137d
PB
1122}
1123
da6b5335 1124static inline void iwmmxt_store_creg(int reg, TCGv var)
e677137d 1125{
da6b5335 1126 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
e677137d
PB
1127}
1128
1129static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1130{
1131 iwmmxt_store_reg(cpu_M0, rn);
1132}
1133
1134static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1135{
1136 iwmmxt_load_reg(cpu_M0, rn);
1137}
1138
1139static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1140{
1141 iwmmxt_load_reg(cpu_V1, rn);
1142 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1143}
1144
1145static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1146{
1147 iwmmxt_load_reg(cpu_V1, rn);
1148 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1149}
1150
1151static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1152{
1153 iwmmxt_load_reg(cpu_V1, rn);
1154 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1155}
1156
1157#define IWMMXT_OP(name) \
1158static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1159{ \
1160 iwmmxt_load_reg(cpu_V1, rn); \
1161 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1162}
1163
1164#define IWMMXT_OP_ENV(name) \
1165static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1166{ \
1167 iwmmxt_load_reg(cpu_V1, rn); \
1168 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1169}
1170
1171#define IWMMXT_OP_ENV_SIZE(name) \
1172IWMMXT_OP_ENV(name##b) \
1173IWMMXT_OP_ENV(name##w) \
1174IWMMXT_OP_ENV(name##l)
1175
1176#define IWMMXT_OP_ENV1(name) \
1177static inline void gen_op_iwmmxt_##name##_M0(void) \
1178{ \
1179 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1180}
1181
1182IWMMXT_OP(maddsq)
1183IWMMXT_OP(madduq)
1184IWMMXT_OP(sadb)
1185IWMMXT_OP(sadw)
1186IWMMXT_OP(mulslw)
1187IWMMXT_OP(mulshw)
1188IWMMXT_OP(mululw)
1189IWMMXT_OP(muluhw)
1190IWMMXT_OP(macsw)
1191IWMMXT_OP(macuw)
1192
1193IWMMXT_OP_ENV_SIZE(unpackl)
1194IWMMXT_OP_ENV_SIZE(unpackh)
1195
1196IWMMXT_OP_ENV1(unpacklub)
1197IWMMXT_OP_ENV1(unpackluw)
1198IWMMXT_OP_ENV1(unpacklul)
1199IWMMXT_OP_ENV1(unpackhub)
1200IWMMXT_OP_ENV1(unpackhuw)
1201IWMMXT_OP_ENV1(unpackhul)
1202IWMMXT_OP_ENV1(unpacklsb)
1203IWMMXT_OP_ENV1(unpacklsw)
1204IWMMXT_OP_ENV1(unpacklsl)
1205IWMMXT_OP_ENV1(unpackhsb)
1206IWMMXT_OP_ENV1(unpackhsw)
1207IWMMXT_OP_ENV1(unpackhsl)
1208
1209IWMMXT_OP_ENV_SIZE(cmpeq)
1210IWMMXT_OP_ENV_SIZE(cmpgtu)
1211IWMMXT_OP_ENV_SIZE(cmpgts)
1212
1213IWMMXT_OP_ENV_SIZE(mins)
1214IWMMXT_OP_ENV_SIZE(minu)
1215IWMMXT_OP_ENV_SIZE(maxs)
1216IWMMXT_OP_ENV_SIZE(maxu)
1217
1218IWMMXT_OP_ENV_SIZE(subn)
1219IWMMXT_OP_ENV_SIZE(addn)
1220IWMMXT_OP_ENV_SIZE(subu)
1221IWMMXT_OP_ENV_SIZE(addu)
1222IWMMXT_OP_ENV_SIZE(subs)
1223IWMMXT_OP_ENV_SIZE(adds)
1224
1225IWMMXT_OP_ENV(avgb0)
1226IWMMXT_OP_ENV(avgb1)
1227IWMMXT_OP_ENV(avgw0)
1228IWMMXT_OP_ENV(avgw1)
1229
1230IWMMXT_OP(msadb)
1231
1232IWMMXT_OP_ENV(packuw)
1233IWMMXT_OP_ENV(packul)
1234IWMMXT_OP_ENV(packuq)
1235IWMMXT_OP_ENV(packsw)
1236IWMMXT_OP_ENV(packsl)
1237IWMMXT_OP_ENV(packsq)
1238
e677137d
PB
1239static void gen_op_iwmmxt_set_mup(void)
1240{
1241 TCGv tmp;
1242 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1243 tcg_gen_ori_i32(tmp, tmp, 2);
1244 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1245}
1246
1247static void gen_op_iwmmxt_set_cup(void)
1248{
1249 TCGv tmp;
1250 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1251 tcg_gen_ori_i32(tmp, tmp, 1);
1252 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1253}
1254
1255static void gen_op_iwmmxt_setpsr_nz(void)
1256{
1257 TCGv tmp = new_tmp();
1258 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1259 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1260}
1261
1262static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1263{
1264 iwmmxt_load_reg(cpu_V1, rn);
86831435 1265 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1266 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1267}
1268
da6b5335 1269static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
18c9b560
AZ
1270{
1271 int rd;
1272 uint32_t offset;
da6b5335 1273 TCGv tmp;
18c9b560
AZ
1274
1275 rd = (insn >> 16) & 0xf;
da6b5335 1276 tmp = load_reg(s, rd);
18c9b560
AZ
1277
1278 offset = (insn & 0xff) << ((insn >> 7) & 2);
1279 if (insn & (1 << 24)) {
1280 /* Pre indexed */
1281 if (insn & (1 << 23))
da6b5335 1282 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1283 else
da6b5335
FN
1284 tcg_gen_addi_i32(tmp, tmp, -offset);
1285 tcg_gen_mov_i32(dest, tmp);
18c9b560 1286 if (insn & (1 << 21))
da6b5335
FN
1287 store_reg(s, rd, tmp);
1288 else
1289 dead_tmp(tmp);
18c9b560
AZ
1290 } else if (insn & (1 << 21)) {
1291 /* Post indexed */
da6b5335 1292 tcg_gen_mov_i32(dest, tmp);
18c9b560 1293 if (insn & (1 << 23))
da6b5335 1294 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1295 else
da6b5335
FN
1296 tcg_gen_addi_i32(tmp, tmp, -offset);
1297 store_reg(s, rd, tmp);
18c9b560
AZ
1298 } else if (!(insn & (1 << 23)))
1299 return 1;
1300 return 0;
1301}
1302
da6b5335 1303static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
18c9b560
AZ
1304{
1305 int rd = (insn >> 0) & 0xf;
da6b5335 1306 TCGv tmp;
18c9b560 1307
da6b5335
FN
1308 if (insn & (1 << 8)) {
1309 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1310 return 1;
da6b5335
FN
1311 } else {
1312 tmp = iwmmxt_load_creg(rd);
1313 }
1314 } else {
1315 tmp = new_tmp();
1316 iwmmxt_load_reg(cpu_V0, rd);
1317 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1318 }
1319 tcg_gen_andi_i32(tmp, tmp, mask);
1320 tcg_gen_mov_i32(dest, tmp);
1321 dead_tmp(tmp);
18c9b560
AZ
1322 return 0;
1323}
1324
1325/* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1326 (ie. an undefined instruction). */
1327static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1328{
1329 int rd, wrd;
1330 int rdhi, rdlo, rd0, rd1, i;
da6b5335
FN
1331 TCGv addr;
1332 TCGv tmp, tmp2, tmp3;
18c9b560
AZ
1333
1334 if ((insn & 0x0e000e00) == 0x0c000000) {
1335 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1336 wrd = insn & 0xf;
1337 rdlo = (insn >> 12) & 0xf;
1338 rdhi = (insn >> 16) & 0xf;
1339 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1340 iwmmxt_load_reg(cpu_V0, wrd);
1341 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1342 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1343 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1344 } else { /* TMCRR */
da6b5335
FN
1345 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1346 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1347 gen_op_iwmmxt_set_mup();
1348 }
1349 return 0;
1350 }
1351
1352 wrd = (insn >> 12) & 0xf;
da6b5335
FN
1353 addr = new_tmp();
1354 if (gen_iwmmxt_address(s, insn, addr)) {
1355 dead_tmp(addr);
18c9b560 1356 return 1;
da6b5335 1357 }
18c9b560
AZ
1358 if (insn & ARM_CP_RW_BIT) {
1359 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
da6b5335
FN
1360 tmp = new_tmp();
1361 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1362 iwmmxt_store_creg(wrd, tmp);
18c9b560 1363 } else {
e677137d
PB
1364 i = 1;
1365 if (insn & (1 << 8)) {
1366 if (insn & (1 << 22)) { /* WLDRD */
da6b5335 1367 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1368 i = 0;
1369 } else { /* WLDRW wRd */
da6b5335 1370 tmp = gen_ld32(addr, IS_USER(s));
e677137d
PB
1371 }
1372 } else {
1373 if (insn & (1 << 22)) { /* WLDRH */
da6b5335 1374 tmp = gen_ld16u(addr, IS_USER(s));
e677137d 1375 } else { /* WLDRB */
da6b5335 1376 tmp = gen_ld8u(addr, IS_USER(s));
e677137d
PB
1377 }
1378 }
1379 if (i) {
1380 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1381 dead_tmp(tmp);
1382 }
18c9b560
AZ
1383 gen_op_iwmmxt_movq_wRn_M0(wrd);
1384 }
1385 } else {
1386 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335
FN
1387 tmp = iwmmxt_load_creg(wrd);
1388 gen_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1389 } else {
1390 gen_op_iwmmxt_movq_M0_wRn(wrd);
e677137d
PB
1391 tmp = new_tmp();
1392 if (insn & (1 << 8)) {
1393 if (insn & (1 << 22)) { /* WSTRD */
1394 dead_tmp(tmp);
da6b5335 1395 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1396 } else { /* WSTRW wRd */
1397 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1398 gen_st32(tmp, addr, IS_USER(s));
e677137d
PB
1399 }
1400 } else {
1401 if (insn & (1 << 22)) { /* WSTRH */
1402 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1403 gen_st16(tmp, addr, IS_USER(s));
e677137d
PB
1404 } else { /* WSTRB */
1405 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1406 gen_st8(tmp, addr, IS_USER(s));
e677137d
PB
1407 }
1408 }
18c9b560
AZ
1409 }
1410 }
1411 return 0;
1412 }
1413
1414 if ((insn & 0x0f000000) != 0x0e000000)
1415 return 1;
1416
1417 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1418 case 0x000: /* WOR */
1419 wrd = (insn >> 12) & 0xf;
1420 rd0 = (insn >> 0) & 0xf;
1421 rd1 = (insn >> 16) & 0xf;
1422 gen_op_iwmmxt_movq_M0_wRn(rd0);
1423 gen_op_iwmmxt_orq_M0_wRn(rd1);
1424 gen_op_iwmmxt_setpsr_nz();
1425 gen_op_iwmmxt_movq_wRn_M0(wrd);
1426 gen_op_iwmmxt_set_mup();
1427 gen_op_iwmmxt_set_cup();
1428 break;
1429 case 0x011: /* TMCR */
1430 if (insn & 0xf)
1431 return 1;
1432 rd = (insn >> 12) & 0xf;
1433 wrd = (insn >> 16) & 0xf;
1434 switch (wrd) {
1435 case ARM_IWMMXT_wCID:
1436 case ARM_IWMMXT_wCASF:
1437 break;
1438 case ARM_IWMMXT_wCon:
1439 gen_op_iwmmxt_set_cup();
1440 /* Fall through. */
1441 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1442 tmp = iwmmxt_load_creg(wrd);
1443 tmp2 = load_reg(s, rd);
1444 tcg_gen_bic_i32(tmp, tmp, tmp2);
1445 dead_tmp(tmp2);
1446 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1447 break;
1448 case ARM_IWMMXT_wCGR0:
1449 case ARM_IWMMXT_wCGR1:
1450 case ARM_IWMMXT_wCGR2:
1451 case ARM_IWMMXT_wCGR3:
1452 gen_op_iwmmxt_set_cup();
da6b5335
FN
1453 tmp = load_reg(s, rd);
1454 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1455 break;
1456 default:
1457 return 1;
1458 }
1459 break;
1460 case 0x100: /* WXOR */
1461 wrd = (insn >> 12) & 0xf;
1462 rd0 = (insn >> 0) & 0xf;
1463 rd1 = (insn >> 16) & 0xf;
1464 gen_op_iwmmxt_movq_M0_wRn(rd0);
1465 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1466 gen_op_iwmmxt_setpsr_nz();
1467 gen_op_iwmmxt_movq_wRn_M0(wrd);
1468 gen_op_iwmmxt_set_mup();
1469 gen_op_iwmmxt_set_cup();
1470 break;
1471 case 0x111: /* TMRC */
1472 if (insn & 0xf)
1473 return 1;
1474 rd = (insn >> 12) & 0xf;
1475 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1476 tmp = iwmmxt_load_creg(wrd);
1477 store_reg(s, rd, tmp);
18c9b560
AZ
1478 break;
1479 case 0x300: /* WANDN */
1480 wrd = (insn >> 12) & 0xf;
1481 rd0 = (insn >> 0) & 0xf;
1482 rd1 = (insn >> 16) & 0xf;
1483 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1484 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1485 gen_op_iwmmxt_andq_M0_wRn(rd1);
1486 gen_op_iwmmxt_setpsr_nz();
1487 gen_op_iwmmxt_movq_wRn_M0(wrd);
1488 gen_op_iwmmxt_set_mup();
1489 gen_op_iwmmxt_set_cup();
1490 break;
1491 case 0x200: /* WAND */
1492 wrd = (insn >> 12) & 0xf;
1493 rd0 = (insn >> 0) & 0xf;
1494 rd1 = (insn >> 16) & 0xf;
1495 gen_op_iwmmxt_movq_M0_wRn(rd0);
1496 gen_op_iwmmxt_andq_M0_wRn(rd1);
1497 gen_op_iwmmxt_setpsr_nz();
1498 gen_op_iwmmxt_movq_wRn_M0(wrd);
1499 gen_op_iwmmxt_set_mup();
1500 gen_op_iwmmxt_set_cup();
1501 break;
1502 case 0x810: case 0xa10: /* WMADD */
1503 wrd = (insn >> 12) & 0xf;
1504 rd0 = (insn >> 0) & 0xf;
1505 rd1 = (insn >> 16) & 0xf;
1506 gen_op_iwmmxt_movq_M0_wRn(rd0);
1507 if (insn & (1 << 21))
1508 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1509 else
1510 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1511 gen_op_iwmmxt_movq_wRn_M0(wrd);
1512 gen_op_iwmmxt_set_mup();
1513 break;
1514 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1515 wrd = (insn >> 12) & 0xf;
1516 rd0 = (insn >> 16) & 0xf;
1517 rd1 = (insn >> 0) & 0xf;
1518 gen_op_iwmmxt_movq_M0_wRn(rd0);
1519 switch ((insn >> 22) & 3) {
1520 case 0:
1521 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1522 break;
1523 case 1:
1524 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1525 break;
1526 case 2:
1527 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1528 break;
1529 case 3:
1530 return 1;
1531 }
1532 gen_op_iwmmxt_movq_wRn_M0(wrd);
1533 gen_op_iwmmxt_set_mup();
1534 gen_op_iwmmxt_set_cup();
1535 break;
1536 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1537 wrd = (insn >> 12) & 0xf;
1538 rd0 = (insn >> 16) & 0xf;
1539 rd1 = (insn >> 0) & 0xf;
1540 gen_op_iwmmxt_movq_M0_wRn(rd0);
1541 switch ((insn >> 22) & 3) {
1542 case 0:
1543 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1544 break;
1545 case 1:
1546 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1547 break;
1548 case 2:
1549 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1550 break;
1551 case 3:
1552 return 1;
1553 }
1554 gen_op_iwmmxt_movq_wRn_M0(wrd);
1555 gen_op_iwmmxt_set_mup();
1556 gen_op_iwmmxt_set_cup();
1557 break;
1558 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1559 wrd = (insn >> 12) & 0xf;
1560 rd0 = (insn >> 16) & 0xf;
1561 rd1 = (insn >> 0) & 0xf;
1562 gen_op_iwmmxt_movq_M0_wRn(rd0);
1563 if (insn & (1 << 22))
1564 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1565 else
1566 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1567 if (!(insn & (1 << 20)))
1568 gen_op_iwmmxt_addl_M0_wRn(wrd);
1569 gen_op_iwmmxt_movq_wRn_M0(wrd);
1570 gen_op_iwmmxt_set_mup();
1571 break;
1572 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1573 wrd = (insn >> 12) & 0xf;
1574 rd0 = (insn >> 16) & 0xf;
1575 rd1 = (insn >> 0) & 0xf;
1576 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1577 if (insn & (1 << 21)) {
1578 if (insn & (1 << 20))
1579 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1580 else
1581 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1582 } else {
1583 if (insn & (1 << 20))
1584 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1585 else
1586 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1587 }
18c9b560
AZ
1588 gen_op_iwmmxt_movq_wRn_M0(wrd);
1589 gen_op_iwmmxt_set_mup();
1590 break;
1591 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1592 wrd = (insn >> 12) & 0xf;
1593 rd0 = (insn >> 16) & 0xf;
1594 rd1 = (insn >> 0) & 0xf;
1595 gen_op_iwmmxt_movq_M0_wRn(rd0);
1596 if (insn & (1 << 21))
1597 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1598 else
1599 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1600 if (!(insn & (1 << 20))) {
e677137d
PB
1601 iwmmxt_load_reg(cpu_V1, wrd);
1602 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1603 }
1604 gen_op_iwmmxt_movq_wRn_M0(wrd);
1605 gen_op_iwmmxt_set_mup();
1606 break;
1607 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1608 wrd = (insn >> 12) & 0xf;
1609 rd0 = (insn >> 16) & 0xf;
1610 rd1 = (insn >> 0) & 0xf;
1611 gen_op_iwmmxt_movq_M0_wRn(rd0);
1612 switch ((insn >> 22) & 3) {
1613 case 0:
1614 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1615 break;
1616 case 1:
1617 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1618 break;
1619 case 2:
1620 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1621 break;
1622 case 3:
1623 return 1;
1624 }
1625 gen_op_iwmmxt_movq_wRn_M0(wrd);
1626 gen_op_iwmmxt_set_mup();
1627 gen_op_iwmmxt_set_cup();
1628 break;
1629 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1630 wrd = (insn >> 12) & 0xf;
1631 rd0 = (insn >> 16) & 0xf;
1632 rd1 = (insn >> 0) & 0xf;
1633 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1634 if (insn & (1 << 22)) {
1635 if (insn & (1 << 20))
1636 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1637 else
1638 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1639 } else {
1640 if (insn & (1 << 20))
1641 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1642 else
1643 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1644 }
18c9b560
AZ
1645 gen_op_iwmmxt_movq_wRn_M0(wrd);
1646 gen_op_iwmmxt_set_mup();
1647 gen_op_iwmmxt_set_cup();
1648 break;
1649 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1650 wrd = (insn >> 12) & 0xf;
1651 rd0 = (insn >> 16) & 0xf;
1652 rd1 = (insn >> 0) & 0xf;
1653 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1654 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1655 tcg_gen_andi_i32(tmp, tmp, 7);
1656 iwmmxt_load_reg(cpu_V1, rd1);
1657 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1658 dead_tmp(tmp);
18c9b560
AZ
1659 gen_op_iwmmxt_movq_wRn_M0(wrd);
1660 gen_op_iwmmxt_set_mup();
1661 break;
1662 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1663 if (((insn >> 6) & 3) == 3)
1664 return 1;
18c9b560
AZ
1665 rd = (insn >> 12) & 0xf;
1666 wrd = (insn >> 16) & 0xf;
da6b5335 1667 tmp = load_reg(s, rd);
18c9b560
AZ
1668 gen_op_iwmmxt_movq_M0_wRn(wrd);
1669 switch ((insn >> 6) & 3) {
1670 case 0:
da6b5335
FN
1671 tmp2 = tcg_const_i32(0xff);
1672 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1673 break;
1674 case 1:
da6b5335
FN
1675 tmp2 = tcg_const_i32(0xffff);
1676 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1677 break;
1678 case 2:
da6b5335
FN
1679 tmp2 = tcg_const_i32(0xffffffff);
1680 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1681 break;
da6b5335
FN
1682 default:
1683 TCGV_UNUSED(tmp2);
1684 TCGV_UNUSED(tmp3);
18c9b560 1685 }
da6b5335
FN
1686 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1687 tcg_temp_free(tmp3);
1688 tcg_temp_free(tmp2);
1689 dead_tmp(tmp);
18c9b560
AZ
1690 gen_op_iwmmxt_movq_wRn_M0(wrd);
1691 gen_op_iwmmxt_set_mup();
1692 break;
1693 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1694 rd = (insn >> 12) & 0xf;
1695 wrd = (insn >> 16) & 0xf;
da6b5335 1696 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1697 return 1;
1698 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335 1699 tmp = new_tmp();
18c9b560
AZ
1700 switch ((insn >> 22) & 3) {
1701 case 0:
da6b5335
FN
1702 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1703 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1704 if (insn & 8) {
1705 tcg_gen_ext8s_i32(tmp, tmp);
1706 } else {
1707 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1708 }
1709 break;
1710 case 1:
da6b5335
FN
1711 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1712 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1713 if (insn & 8) {
1714 tcg_gen_ext16s_i32(tmp, tmp);
1715 } else {
1716 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1717 }
1718 break;
1719 case 2:
da6b5335
FN
1720 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1721 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1722 break;
18c9b560 1723 }
da6b5335 1724 store_reg(s, rd, tmp);
18c9b560
AZ
1725 break;
1726 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1727 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1728 return 1;
da6b5335 1729 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1730 switch ((insn >> 22) & 3) {
1731 case 0:
da6b5335 1732 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1733 break;
1734 case 1:
da6b5335 1735 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1736 break;
1737 case 2:
da6b5335 1738 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1739 break;
18c9b560 1740 }
da6b5335
FN
1741 tcg_gen_shli_i32(tmp, tmp, 28);
1742 gen_set_nzcv(tmp);
1743 dead_tmp(tmp);
18c9b560
AZ
1744 break;
1745 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1746 if (((insn >> 6) & 3) == 3)
1747 return 1;
18c9b560
AZ
1748 rd = (insn >> 12) & 0xf;
1749 wrd = (insn >> 16) & 0xf;
da6b5335 1750 tmp = load_reg(s, rd);
18c9b560
AZ
1751 switch ((insn >> 6) & 3) {
1752 case 0:
da6b5335 1753 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1754 break;
1755 case 1:
da6b5335 1756 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1757 break;
1758 case 2:
da6b5335 1759 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1760 break;
18c9b560 1761 }
da6b5335 1762 dead_tmp(tmp);
18c9b560
AZ
1763 gen_op_iwmmxt_movq_wRn_M0(wrd);
1764 gen_op_iwmmxt_set_mup();
1765 break;
1766 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1767 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1768 return 1;
da6b5335
FN
1769 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1770 tmp2 = new_tmp();
1771 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1772 switch ((insn >> 22) & 3) {
1773 case 0:
1774 for (i = 0; i < 7; i ++) {
da6b5335
FN
1775 tcg_gen_shli_i32(tmp2, tmp2, 4);
1776 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1777 }
1778 break;
1779 case 1:
1780 for (i = 0; i < 3; i ++) {
da6b5335
FN
1781 tcg_gen_shli_i32(tmp2, tmp2, 8);
1782 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1783 }
1784 break;
1785 case 2:
da6b5335
FN
1786 tcg_gen_shli_i32(tmp2, tmp2, 16);
1787 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1788 break;
18c9b560 1789 }
da6b5335
FN
1790 gen_set_nzcv(tmp);
1791 dead_tmp(tmp2);
1792 dead_tmp(tmp);
18c9b560
AZ
1793 break;
1794 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1795 wrd = (insn >> 12) & 0xf;
1796 rd0 = (insn >> 16) & 0xf;
1797 gen_op_iwmmxt_movq_M0_wRn(rd0);
1798 switch ((insn >> 22) & 3) {
1799 case 0:
e677137d 1800 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1801 break;
1802 case 1:
e677137d 1803 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1804 break;
1805 case 2:
e677137d 1806 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1807 break;
1808 case 3:
1809 return 1;
1810 }
1811 gen_op_iwmmxt_movq_wRn_M0(wrd);
1812 gen_op_iwmmxt_set_mup();
1813 break;
1814 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1815 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1816 return 1;
da6b5335
FN
1817 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1818 tmp2 = new_tmp();
1819 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1820 switch ((insn >> 22) & 3) {
1821 case 0:
1822 for (i = 0; i < 7; i ++) {
da6b5335
FN
1823 tcg_gen_shli_i32(tmp2, tmp2, 4);
1824 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1825 }
1826 break;
1827 case 1:
1828 for (i = 0; i < 3; i ++) {
da6b5335
FN
1829 tcg_gen_shli_i32(tmp2, tmp2, 8);
1830 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1831 }
1832 break;
1833 case 2:
da6b5335
FN
1834 tcg_gen_shli_i32(tmp2, tmp2, 16);
1835 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1836 break;
18c9b560 1837 }
da6b5335
FN
1838 gen_set_nzcv(tmp);
1839 dead_tmp(tmp2);
1840 dead_tmp(tmp);
18c9b560
AZ
1841 break;
1842 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1843 rd = (insn >> 12) & 0xf;
1844 rd0 = (insn >> 16) & 0xf;
da6b5335 1845 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1846 return 1;
1847 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 1848 tmp = new_tmp();
18c9b560
AZ
1849 switch ((insn >> 22) & 3) {
1850 case 0:
da6b5335 1851 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1852 break;
1853 case 1:
da6b5335 1854 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1855 break;
1856 case 2:
da6b5335 1857 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1858 break;
18c9b560 1859 }
da6b5335 1860 store_reg(s, rd, tmp);
18c9b560
AZ
1861 break;
1862 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1863 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1864 wrd = (insn >> 12) & 0xf;
1865 rd0 = (insn >> 16) & 0xf;
1866 rd1 = (insn >> 0) & 0xf;
1867 gen_op_iwmmxt_movq_M0_wRn(rd0);
1868 switch ((insn >> 22) & 3) {
1869 case 0:
1870 if (insn & (1 << 21))
1871 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1872 else
1873 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1874 break;
1875 case 1:
1876 if (insn & (1 << 21))
1877 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1878 else
1879 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1880 break;
1881 case 2:
1882 if (insn & (1 << 21))
1883 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1884 else
1885 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1886 break;
1887 case 3:
1888 return 1;
1889 }
1890 gen_op_iwmmxt_movq_wRn_M0(wrd);
1891 gen_op_iwmmxt_set_mup();
1892 gen_op_iwmmxt_set_cup();
1893 break;
1894 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1895 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1896 wrd = (insn >> 12) & 0xf;
1897 rd0 = (insn >> 16) & 0xf;
1898 gen_op_iwmmxt_movq_M0_wRn(rd0);
1899 switch ((insn >> 22) & 3) {
1900 case 0:
1901 if (insn & (1 << 21))
1902 gen_op_iwmmxt_unpacklsb_M0();
1903 else
1904 gen_op_iwmmxt_unpacklub_M0();
1905 break;
1906 case 1:
1907 if (insn & (1 << 21))
1908 gen_op_iwmmxt_unpacklsw_M0();
1909 else
1910 gen_op_iwmmxt_unpackluw_M0();
1911 break;
1912 case 2:
1913 if (insn & (1 << 21))
1914 gen_op_iwmmxt_unpacklsl_M0();
1915 else
1916 gen_op_iwmmxt_unpacklul_M0();
1917 break;
1918 case 3:
1919 return 1;
1920 }
1921 gen_op_iwmmxt_movq_wRn_M0(wrd);
1922 gen_op_iwmmxt_set_mup();
1923 gen_op_iwmmxt_set_cup();
1924 break;
1925 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1926 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1927 wrd = (insn >> 12) & 0xf;
1928 rd0 = (insn >> 16) & 0xf;
1929 gen_op_iwmmxt_movq_M0_wRn(rd0);
1930 switch ((insn >> 22) & 3) {
1931 case 0:
1932 if (insn & (1 << 21))
1933 gen_op_iwmmxt_unpackhsb_M0();
1934 else
1935 gen_op_iwmmxt_unpackhub_M0();
1936 break;
1937 case 1:
1938 if (insn & (1 << 21))
1939 gen_op_iwmmxt_unpackhsw_M0();
1940 else
1941 gen_op_iwmmxt_unpackhuw_M0();
1942 break;
1943 case 2:
1944 if (insn & (1 << 21))
1945 gen_op_iwmmxt_unpackhsl_M0();
1946 else
1947 gen_op_iwmmxt_unpackhul_M0();
1948 break;
1949 case 3:
1950 return 1;
1951 }
1952 gen_op_iwmmxt_movq_wRn_M0(wrd);
1953 gen_op_iwmmxt_set_mup();
1954 gen_op_iwmmxt_set_cup();
1955 break;
1956 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1957 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
1958 if (((insn >> 22) & 3) == 0)
1959 return 1;
18c9b560
AZ
1960 wrd = (insn >> 12) & 0xf;
1961 rd0 = (insn >> 16) & 0xf;
1962 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1963 tmp = new_tmp();
1964 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
1965 dead_tmp(tmp);
18c9b560 1966 return 1;
da6b5335 1967 }
18c9b560 1968 switch ((insn >> 22) & 3) {
18c9b560 1969 case 1:
da6b5335 1970 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1971 break;
1972 case 2:
da6b5335 1973 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1974 break;
1975 case 3:
da6b5335 1976 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1977 break;
1978 }
da6b5335 1979 dead_tmp(tmp);
18c9b560
AZ
1980 gen_op_iwmmxt_movq_wRn_M0(wrd);
1981 gen_op_iwmmxt_set_mup();
1982 gen_op_iwmmxt_set_cup();
1983 break;
1984 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1985 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
1986 if (((insn >> 22) & 3) == 0)
1987 return 1;
18c9b560
AZ
1988 wrd = (insn >> 12) & 0xf;
1989 rd0 = (insn >> 16) & 0xf;
1990 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1991 tmp = new_tmp();
1992 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
1993 dead_tmp(tmp);
18c9b560 1994 return 1;
da6b5335 1995 }
18c9b560 1996 switch ((insn >> 22) & 3) {
18c9b560 1997 case 1:
da6b5335 1998 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1999 break;
2000 case 2:
da6b5335 2001 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2002 break;
2003 case 3:
da6b5335 2004 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2005 break;
2006 }
da6b5335 2007 dead_tmp(tmp);
18c9b560
AZ
2008 gen_op_iwmmxt_movq_wRn_M0(wrd);
2009 gen_op_iwmmxt_set_mup();
2010 gen_op_iwmmxt_set_cup();
2011 break;
2012 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2013 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2014 if (((insn >> 22) & 3) == 0)
2015 return 1;
18c9b560
AZ
2016 wrd = (insn >> 12) & 0xf;
2017 rd0 = (insn >> 16) & 0xf;
2018 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2019 tmp = new_tmp();
2020 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2021 dead_tmp(tmp);
18c9b560 2022 return 1;
da6b5335 2023 }
18c9b560 2024 switch ((insn >> 22) & 3) {
18c9b560 2025 case 1:
da6b5335 2026 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2027 break;
2028 case 2:
da6b5335 2029 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2030 break;
2031 case 3:
da6b5335 2032 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2033 break;
2034 }
da6b5335 2035 dead_tmp(tmp);
18c9b560
AZ
2036 gen_op_iwmmxt_movq_wRn_M0(wrd);
2037 gen_op_iwmmxt_set_mup();
2038 gen_op_iwmmxt_set_cup();
2039 break;
2040 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2041 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2042 if (((insn >> 22) & 3) == 0)
2043 return 1;
18c9b560
AZ
2044 wrd = (insn >> 12) & 0xf;
2045 rd0 = (insn >> 16) & 0xf;
2046 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2047 tmp = new_tmp();
18c9b560 2048 switch ((insn >> 22) & 3) {
18c9b560 2049 case 1:
da6b5335
FN
2050 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2051 dead_tmp(tmp);
18c9b560 2052 return 1;
da6b5335
FN
2053 }
2054 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2055 break;
2056 case 2:
da6b5335
FN
2057 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2058 dead_tmp(tmp);
18c9b560 2059 return 1;
da6b5335
FN
2060 }
2061 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2062 break;
2063 case 3:
da6b5335
FN
2064 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2065 dead_tmp(tmp);
18c9b560 2066 return 1;
da6b5335
FN
2067 }
2068 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2069 break;
2070 }
da6b5335 2071 dead_tmp(tmp);
18c9b560
AZ
2072 gen_op_iwmmxt_movq_wRn_M0(wrd);
2073 gen_op_iwmmxt_set_mup();
2074 gen_op_iwmmxt_set_cup();
2075 break;
2076 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2077 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2078 wrd = (insn >> 12) & 0xf;
2079 rd0 = (insn >> 16) & 0xf;
2080 rd1 = (insn >> 0) & 0xf;
2081 gen_op_iwmmxt_movq_M0_wRn(rd0);
2082 switch ((insn >> 22) & 3) {
2083 case 0:
2084 if (insn & (1 << 21))
2085 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2086 else
2087 gen_op_iwmmxt_minub_M0_wRn(rd1);
2088 break;
2089 case 1:
2090 if (insn & (1 << 21))
2091 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2092 else
2093 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2094 break;
2095 case 2:
2096 if (insn & (1 << 21))
2097 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2098 else
2099 gen_op_iwmmxt_minul_M0_wRn(rd1);
2100 break;
2101 case 3:
2102 return 1;
2103 }
2104 gen_op_iwmmxt_movq_wRn_M0(wrd);
2105 gen_op_iwmmxt_set_mup();
2106 break;
2107 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2108 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2109 wrd = (insn >> 12) & 0xf;
2110 rd0 = (insn >> 16) & 0xf;
2111 rd1 = (insn >> 0) & 0xf;
2112 gen_op_iwmmxt_movq_M0_wRn(rd0);
2113 switch ((insn >> 22) & 3) {
2114 case 0:
2115 if (insn & (1 << 21))
2116 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2117 else
2118 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2119 break;
2120 case 1:
2121 if (insn & (1 << 21))
2122 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2123 else
2124 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2125 break;
2126 case 2:
2127 if (insn & (1 << 21))
2128 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2129 else
2130 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2131 break;
2132 case 3:
2133 return 1;
2134 }
2135 gen_op_iwmmxt_movq_wRn_M0(wrd);
2136 gen_op_iwmmxt_set_mup();
2137 break;
2138 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2139 case 0x402: case 0x502: case 0x602: case 0x702:
2140 wrd = (insn >> 12) & 0xf;
2141 rd0 = (insn >> 16) & 0xf;
2142 rd1 = (insn >> 0) & 0xf;
2143 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2144 tmp = tcg_const_i32((insn >> 20) & 3);
2145 iwmmxt_load_reg(cpu_V1, rd1);
2146 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2147 tcg_temp_free(tmp);
18c9b560
AZ
2148 gen_op_iwmmxt_movq_wRn_M0(wrd);
2149 gen_op_iwmmxt_set_mup();
2150 break;
2151 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2152 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2153 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2154 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2155 wrd = (insn >> 12) & 0xf;
2156 rd0 = (insn >> 16) & 0xf;
2157 rd1 = (insn >> 0) & 0xf;
2158 gen_op_iwmmxt_movq_M0_wRn(rd0);
2159 switch ((insn >> 20) & 0xf) {
2160 case 0x0:
2161 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2162 break;
2163 case 0x1:
2164 gen_op_iwmmxt_subub_M0_wRn(rd1);
2165 break;
2166 case 0x3:
2167 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2168 break;
2169 case 0x4:
2170 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2171 break;
2172 case 0x5:
2173 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2174 break;
2175 case 0x7:
2176 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2177 break;
2178 case 0x8:
2179 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2180 break;
2181 case 0x9:
2182 gen_op_iwmmxt_subul_M0_wRn(rd1);
2183 break;
2184 case 0xb:
2185 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2186 break;
2187 default:
2188 return 1;
2189 }
2190 gen_op_iwmmxt_movq_wRn_M0(wrd);
2191 gen_op_iwmmxt_set_mup();
2192 gen_op_iwmmxt_set_cup();
2193 break;
2194 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2195 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2196 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2197 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2198 wrd = (insn >> 12) & 0xf;
2199 rd0 = (insn >> 16) & 0xf;
2200 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2201 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2202 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2203 tcg_temp_free(tmp);
18c9b560
AZ
2204 gen_op_iwmmxt_movq_wRn_M0(wrd);
2205 gen_op_iwmmxt_set_mup();
2206 gen_op_iwmmxt_set_cup();
2207 break;
2208 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2209 case 0x418: case 0x518: case 0x618: case 0x718:
2210 case 0x818: case 0x918: case 0xa18: case 0xb18:
2211 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2212 wrd = (insn >> 12) & 0xf;
2213 rd0 = (insn >> 16) & 0xf;
2214 rd1 = (insn >> 0) & 0xf;
2215 gen_op_iwmmxt_movq_M0_wRn(rd0);
2216 switch ((insn >> 20) & 0xf) {
2217 case 0x0:
2218 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2219 break;
2220 case 0x1:
2221 gen_op_iwmmxt_addub_M0_wRn(rd1);
2222 break;
2223 case 0x3:
2224 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2225 break;
2226 case 0x4:
2227 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2228 break;
2229 case 0x5:
2230 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2231 break;
2232 case 0x7:
2233 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2234 break;
2235 case 0x8:
2236 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2237 break;
2238 case 0x9:
2239 gen_op_iwmmxt_addul_M0_wRn(rd1);
2240 break;
2241 case 0xb:
2242 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2243 break;
2244 default:
2245 return 1;
2246 }
2247 gen_op_iwmmxt_movq_wRn_M0(wrd);
2248 gen_op_iwmmxt_set_mup();
2249 gen_op_iwmmxt_set_cup();
2250 break;
2251 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2252 case 0x408: case 0x508: case 0x608: case 0x708:
2253 case 0x808: case 0x908: case 0xa08: case 0xb08:
2254 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2255 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2256 return 1;
18c9b560
AZ
2257 wrd = (insn >> 12) & 0xf;
2258 rd0 = (insn >> 16) & 0xf;
2259 rd1 = (insn >> 0) & 0xf;
2260 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2261 switch ((insn >> 22) & 3) {
18c9b560
AZ
2262 case 1:
2263 if (insn & (1 << 21))
2264 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2265 else
2266 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2267 break;
2268 case 2:
2269 if (insn & (1 << 21))
2270 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2271 else
2272 gen_op_iwmmxt_packul_M0_wRn(rd1);
2273 break;
2274 case 3:
2275 if (insn & (1 << 21))
2276 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2277 else
2278 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2279 break;
2280 }
2281 gen_op_iwmmxt_movq_wRn_M0(wrd);
2282 gen_op_iwmmxt_set_mup();
2283 gen_op_iwmmxt_set_cup();
2284 break;
2285 case 0x201: case 0x203: case 0x205: case 0x207:
2286 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2287 case 0x211: case 0x213: case 0x215: case 0x217:
2288 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2289 wrd = (insn >> 5) & 0xf;
2290 rd0 = (insn >> 12) & 0xf;
2291 rd1 = (insn >> 0) & 0xf;
2292 if (rd0 == 0xf || rd1 == 0xf)
2293 return 1;
2294 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2295 tmp = load_reg(s, rd0);
2296 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2297 switch ((insn >> 16) & 0xf) {
2298 case 0x0: /* TMIA */
da6b5335 2299 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2300 break;
2301 case 0x8: /* TMIAPH */
da6b5335 2302 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2303 break;
2304 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2305 if (insn & (1 << 16))
da6b5335 2306 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2307 if (insn & (1 << 17))
da6b5335
FN
2308 tcg_gen_shri_i32(tmp2, tmp2, 16);
2309 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2310 break;
2311 default:
da6b5335
FN
2312 dead_tmp(tmp2);
2313 dead_tmp(tmp);
18c9b560
AZ
2314 return 1;
2315 }
da6b5335
FN
2316 dead_tmp(tmp2);
2317 dead_tmp(tmp);
18c9b560
AZ
2318 gen_op_iwmmxt_movq_wRn_M0(wrd);
2319 gen_op_iwmmxt_set_mup();
2320 break;
2321 default:
2322 return 1;
2323 }
2324
2325 return 0;
2326}
2327
2328/* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2329 (ie. an undefined instruction). */
2330static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2331{
2332 int acc, rd0, rd1, rdhi, rdlo;
3a554c0f 2333 TCGv tmp, tmp2;
18c9b560
AZ
2334
2335 if ((insn & 0x0ff00f10) == 0x0e200010) {
2336 /* Multiply with Internal Accumulate Format */
2337 rd0 = (insn >> 12) & 0xf;
2338 rd1 = insn & 0xf;
2339 acc = (insn >> 5) & 7;
2340
2341 if (acc != 0)
2342 return 1;
2343
3a554c0f
FN
2344 tmp = load_reg(s, rd0);
2345 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2346 switch ((insn >> 16) & 0xf) {
2347 case 0x0: /* MIA */
3a554c0f 2348 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2349 break;
2350 case 0x8: /* MIAPH */
3a554c0f 2351 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2352 break;
2353 case 0xc: /* MIABB */
2354 case 0xd: /* MIABT */
2355 case 0xe: /* MIATB */
2356 case 0xf: /* MIATT */
18c9b560 2357 if (insn & (1 << 16))
3a554c0f 2358 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2359 if (insn & (1 << 17))
3a554c0f
FN
2360 tcg_gen_shri_i32(tmp2, tmp2, 16);
2361 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2362 break;
2363 default:
2364 return 1;
2365 }
3a554c0f
FN
2366 dead_tmp(tmp2);
2367 dead_tmp(tmp);
18c9b560
AZ
2368
2369 gen_op_iwmmxt_movq_wRn_M0(acc);
2370 return 0;
2371 }
2372
2373 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2374 /* Internal Accumulator Access Format */
2375 rdhi = (insn >> 16) & 0xf;
2376 rdlo = (insn >> 12) & 0xf;
2377 acc = insn & 7;
2378
2379 if (acc != 0)
2380 return 1;
2381
2382 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2383 iwmmxt_load_reg(cpu_V0, acc);
2384 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2385 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2386 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2387 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2388 } else { /* MAR */
3a554c0f
FN
2389 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2390 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2391 }
2392 return 0;
2393 }
2394
2395 return 1;
2396}
2397
c1713132
AZ
2398/* Disassemble system coprocessor instruction. Return nonzero if
2399 instruction is not defined. */
2400static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2401{
8984bd2e 2402 TCGv tmp;
c1713132
AZ
2403 uint32_t rd = (insn >> 12) & 0xf;
2404 uint32_t cp = (insn >> 8) & 0xf;
2405 if (IS_USER(s)) {
2406 return 1;
2407 }
2408
18c9b560 2409 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2410 if (!env->cp[cp].cp_read)
2411 return 1;
8984bd2e
PB
2412 gen_set_pc_im(s->pc);
2413 tmp = new_tmp();
2414 gen_helper_get_cp(tmp, cpu_env, tcg_const_i32(insn));
2415 store_reg(s, rd, tmp);
c1713132
AZ
2416 } else {
2417 if (!env->cp[cp].cp_write)
2418 return 1;
8984bd2e
PB
2419 gen_set_pc_im(s->pc);
2420 tmp = load_reg(s, rd);
2421 gen_helper_set_cp(cpu_env, tcg_const_i32(insn), tmp);
a60de947 2422 dead_tmp(tmp);
c1713132
AZ
2423 }
2424 return 0;
2425}
2426
9ee6e8bb
PB
2427static int cp15_user_ok(uint32_t insn)
2428{
2429 int cpn = (insn >> 16) & 0xf;
2430 int cpm = insn & 0xf;
2431 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2432
2433 if (cpn == 13 && cpm == 0) {
2434 /* TLS register. */
2435 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2436 return 1;
2437 }
2438 if (cpn == 7) {
2439 /* ISB, DSB, DMB. */
2440 if ((cpm == 5 && op == 4)
2441 || (cpm == 10 && (op == 4 || op == 5)))
2442 return 1;
2443 }
2444 return 0;
2445}
2446
b5ff1b31
FB
2447/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2448 instruction is not defined. */
a90b7318 2449static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2450{
2451 uint32_t rd;
8984bd2e 2452 TCGv tmp;
b5ff1b31 2453
9ee6e8bb
PB
2454 /* M profile cores use memory mapped registers instead of cp15. */
2455 if (arm_feature(env, ARM_FEATURE_M))
2456 return 1;
2457
2458 if ((insn & (1 << 25)) == 0) {
2459 if (insn & (1 << 20)) {
2460 /* mrrc */
2461 return 1;
2462 }
2463 /* mcrr. Used for block cache operations, so implement as no-op. */
2464 return 0;
2465 }
2466 if ((insn & (1 << 4)) == 0) {
2467 /* cdp */
2468 return 1;
2469 }
2470 if (IS_USER(s) && !cp15_user_ok(insn)) {
b5ff1b31
FB
2471 return 1;
2472 }
9332f9da
FB
2473 if ((insn & 0x0fff0fff) == 0x0e070f90
2474 || (insn & 0x0fff0fff) == 0x0e070f58) {
2475 /* Wait for interrupt. */
8984bd2e 2476 gen_set_pc_im(s->pc);
9ee6e8bb 2477 s->is_jmp = DISAS_WFI;
9332f9da
FB
2478 return 0;
2479 }
b5ff1b31 2480 rd = (insn >> 12) & 0xf;
18c9b560 2481 if (insn & ARM_CP_RW_BIT) {
8984bd2e
PB
2482 tmp = new_tmp();
2483 gen_helper_get_cp15(tmp, cpu_env, tcg_const_i32(insn));
b5ff1b31
FB
2484 /* If the destination register is r15 then sets condition codes. */
2485 if (rd != 15)
8984bd2e
PB
2486 store_reg(s, rd, tmp);
2487 else
2488 dead_tmp(tmp);
b5ff1b31 2489 } else {
8984bd2e
PB
2490 tmp = load_reg(s, rd);
2491 gen_helper_set_cp15(cpu_env, tcg_const_i32(insn), tmp);
2492 dead_tmp(tmp);
a90b7318
AZ
2493 /* Normally we would always end the TB here, but Linux
2494 * arch/arm/mach-pxa/sleep.S expects two instructions following
2495 * an MMU enable to execute from cache. Imitate this behaviour. */
2496 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2497 (insn & 0x0fff0fff) != 0x0e010f10)
2498 gen_lookup_tb(s);
b5ff1b31 2499 }
b5ff1b31
FB
2500 return 0;
2501}
2502
9ee6e8bb
PB
2503#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2504#define VFP_SREG(insn, bigbit, smallbit) \
2505 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2506#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2507 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2508 reg = (((insn) >> (bigbit)) & 0x0f) \
2509 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2510 } else { \
2511 if (insn & (1 << (smallbit))) \
2512 return 1; \
2513 reg = ((insn) >> (bigbit)) & 0x0f; \
2514 }} while (0)
2515
2516#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2517#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2518#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2519#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2520#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2521#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2522
4373f3ce
PB
2523/* Move between integer and VFP cores. */
2524static TCGv gen_vfp_mrs(void)
2525{
2526 TCGv tmp = new_tmp();
2527 tcg_gen_mov_i32(tmp, cpu_F0s);
2528 return tmp;
2529}
2530
2531static void gen_vfp_msr(TCGv tmp)
2532{
2533 tcg_gen_mov_i32(cpu_F0s, tmp);
2534 dead_tmp(tmp);
2535}
2536
9ee6e8bb
PB
2537static inline int
2538vfp_enabled(CPUState * env)
2539{
2540 return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
2541}
2542
ad69471c
PB
2543static void gen_neon_dup_u8(TCGv var, int shift)
2544{
2545 TCGv tmp = new_tmp();
2546 if (shift)
2547 tcg_gen_shri_i32(var, var, shift);
86831435 2548 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2549 tcg_gen_shli_i32(tmp, var, 8);
2550 tcg_gen_or_i32(var, var, tmp);
2551 tcg_gen_shli_i32(tmp, var, 16);
2552 tcg_gen_or_i32(var, var, tmp);
2553 dead_tmp(tmp);
2554}
2555
2556static void gen_neon_dup_low16(TCGv var)
2557{
2558 TCGv tmp = new_tmp();
86831435 2559 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2560 tcg_gen_shli_i32(tmp, var, 16);
2561 tcg_gen_or_i32(var, var, tmp);
2562 dead_tmp(tmp);
2563}
2564
2565static void gen_neon_dup_high16(TCGv var)
2566{
2567 TCGv tmp = new_tmp();
2568 tcg_gen_andi_i32(var, var, 0xffff0000);
2569 tcg_gen_shri_i32(tmp, var, 16);
2570 tcg_gen_or_i32(var, var, tmp);
2571 dead_tmp(tmp);
2572}
2573
b7bcbe95
FB
2574/* Disassemble a VFP instruction. Returns nonzero if an error occured
2575 (ie. an undefined instruction). */
2576static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2577{
2578 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2579 int dp, veclen;
312eea9f 2580 TCGv addr;
4373f3ce 2581 TCGv tmp;
ad69471c 2582 TCGv tmp2;
b7bcbe95 2583
40f137e1
PB
2584 if (!arm_feature(env, ARM_FEATURE_VFP))
2585 return 1;
2586
9ee6e8bb
PB
2587 if (!vfp_enabled(env)) {
2588 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2589 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2590 return 1;
2591 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2592 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2593 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2594 return 1;
2595 }
b7bcbe95
FB
2596 dp = ((insn & 0xf00) == 0xb00);
2597 switch ((insn >> 24) & 0xf) {
2598 case 0xe:
2599 if (insn & (1 << 4)) {
2600 /* single register transfer */
b7bcbe95
FB
2601 rd = (insn >> 12) & 0xf;
2602 if (dp) {
9ee6e8bb
PB
2603 int size;
2604 int pass;
2605
2606 VFP_DREG_N(rn, insn);
2607 if (insn & 0xf)
b7bcbe95 2608 return 1;
9ee6e8bb
PB
2609 if (insn & 0x00c00060
2610 && !arm_feature(env, ARM_FEATURE_NEON))
2611 return 1;
2612
2613 pass = (insn >> 21) & 1;
2614 if (insn & (1 << 22)) {
2615 size = 0;
2616 offset = ((insn >> 5) & 3) * 8;
2617 } else if (insn & (1 << 5)) {
2618 size = 1;
2619 offset = (insn & (1 << 6)) ? 16 : 0;
2620 } else {
2621 size = 2;
2622 offset = 0;
2623 }
18c9b560 2624 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2625 /* vfp->arm */
ad69471c 2626 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2627 switch (size) {
2628 case 0:
9ee6e8bb 2629 if (offset)
ad69471c 2630 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2631 if (insn & (1 << 23))
ad69471c 2632 gen_uxtb(tmp);
9ee6e8bb 2633 else
ad69471c 2634 gen_sxtb(tmp);
9ee6e8bb
PB
2635 break;
2636 case 1:
9ee6e8bb
PB
2637 if (insn & (1 << 23)) {
2638 if (offset) {
ad69471c 2639 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2640 } else {
ad69471c 2641 gen_uxth(tmp);
9ee6e8bb
PB
2642 }
2643 } else {
2644 if (offset) {
ad69471c 2645 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2646 } else {
ad69471c 2647 gen_sxth(tmp);
9ee6e8bb
PB
2648 }
2649 }
2650 break;
2651 case 2:
9ee6e8bb
PB
2652 break;
2653 }
ad69471c 2654 store_reg(s, rd, tmp);
b7bcbe95
FB
2655 } else {
2656 /* arm->vfp */
ad69471c 2657 tmp = load_reg(s, rd);
9ee6e8bb
PB
2658 if (insn & (1 << 23)) {
2659 /* VDUP */
2660 if (size == 0) {
ad69471c 2661 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2662 } else if (size == 1) {
ad69471c 2663 gen_neon_dup_low16(tmp);
9ee6e8bb 2664 }
cbbccffc
PB
2665 for (n = 0; n <= pass * 2; n++) {
2666 tmp2 = new_tmp();
2667 tcg_gen_mov_i32(tmp2, tmp);
2668 neon_store_reg(rn, n, tmp2);
2669 }
2670 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2671 } else {
2672 /* VMOV */
2673 switch (size) {
2674 case 0:
ad69471c
PB
2675 tmp2 = neon_load_reg(rn, pass);
2676 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2677 dead_tmp(tmp2);
9ee6e8bb
PB
2678 break;
2679 case 1:
ad69471c
PB
2680 tmp2 = neon_load_reg(rn, pass);
2681 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2682 dead_tmp(tmp2);
9ee6e8bb
PB
2683 break;
2684 case 2:
9ee6e8bb
PB
2685 break;
2686 }
ad69471c 2687 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2688 }
b7bcbe95 2689 }
9ee6e8bb
PB
2690 } else { /* !dp */
2691 if ((insn & 0x6f) != 0x00)
2692 return 1;
2693 rn = VFP_SREG_N(insn);
18c9b560 2694 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2695 /* vfp->arm */
2696 if (insn & (1 << 21)) {
2697 /* system register */
40f137e1 2698 rn >>= 1;
9ee6e8bb 2699
b7bcbe95 2700 switch (rn) {
40f137e1 2701 case ARM_VFP_FPSID:
4373f3ce 2702 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2703 VFP3 restricts all id registers to privileged
2704 accesses. */
2705 if (IS_USER(s)
2706 && arm_feature(env, ARM_FEATURE_VFP3))
2707 return 1;
4373f3ce 2708 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2709 break;
40f137e1 2710 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2711 if (IS_USER(s))
2712 return 1;
4373f3ce 2713 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2714 break;
40f137e1
PB
2715 case ARM_VFP_FPINST:
2716 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2717 /* Not present in VFP3. */
2718 if (IS_USER(s)
2719 || arm_feature(env, ARM_FEATURE_VFP3))
2720 return 1;
4373f3ce 2721 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2722 break;
40f137e1 2723 case ARM_VFP_FPSCR:
601d70b9 2724 if (rd == 15) {
4373f3ce
PB
2725 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2726 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2727 } else {
2728 tmp = new_tmp();
2729 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2730 }
b7bcbe95 2731 break;
9ee6e8bb
PB
2732 case ARM_VFP_MVFR0:
2733 case ARM_VFP_MVFR1:
2734 if (IS_USER(s)
2735 || !arm_feature(env, ARM_FEATURE_VFP3))
2736 return 1;
4373f3ce 2737 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2738 break;
b7bcbe95
FB
2739 default:
2740 return 1;
2741 }
2742 } else {
2743 gen_mov_F0_vreg(0, rn);
4373f3ce 2744 tmp = gen_vfp_mrs();
b7bcbe95
FB
2745 }
2746 if (rd == 15) {
b5ff1b31 2747 /* Set the 4 flag bits in the CPSR. */
4373f3ce
PB
2748 gen_set_nzcv(tmp);
2749 dead_tmp(tmp);
2750 } else {
2751 store_reg(s, rd, tmp);
2752 }
b7bcbe95
FB
2753 } else {
2754 /* arm->vfp */
4373f3ce 2755 tmp = load_reg(s, rd);
b7bcbe95 2756 if (insn & (1 << 21)) {
40f137e1 2757 rn >>= 1;
b7bcbe95
FB
2758 /* system register */
2759 switch (rn) {
40f137e1 2760 case ARM_VFP_FPSID:
9ee6e8bb
PB
2761 case ARM_VFP_MVFR0:
2762 case ARM_VFP_MVFR1:
b7bcbe95
FB
2763 /* Writes are ignored. */
2764 break;
40f137e1 2765 case ARM_VFP_FPSCR:
4373f3ce
PB
2766 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2767 dead_tmp(tmp);
b5ff1b31 2768 gen_lookup_tb(s);
b7bcbe95 2769 break;
40f137e1 2770 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2771 if (IS_USER(s))
2772 return 1;
4373f3ce 2773 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2774 gen_lookup_tb(s);
2775 break;
2776 case ARM_VFP_FPINST:
2777 case ARM_VFP_FPINST2:
4373f3ce 2778 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2779 break;
b7bcbe95
FB
2780 default:
2781 return 1;
2782 }
2783 } else {
4373f3ce 2784 gen_vfp_msr(tmp);
b7bcbe95
FB
2785 gen_mov_vreg_F0(0, rn);
2786 }
2787 }
2788 }
2789 } else {
2790 /* data processing */
2791 /* The opcode is in bits 23, 21, 20 and 6. */
2792 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2793 if (dp) {
2794 if (op == 15) {
2795 /* rn is opcode */
2796 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2797 } else {
2798 /* rn is register number */
9ee6e8bb 2799 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2800 }
2801
2802 if (op == 15 && (rn == 15 || rn > 17)) {
2803 /* Integer or single precision destination. */
9ee6e8bb 2804 rd = VFP_SREG_D(insn);
b7bcbe95 2805 } else {
9ee6e8bb 2806 VFP_DREG_D(rd, insn);
b7bcbe95
FB
2807 }
2808
2809 if (op == 15 && (rn == 16 || rn == 17)) {
2810 /* Integer source. */
2811 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
2812 } else {
9ee6e8bb 2813 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2814 }
2815 } else {
9ee6e8bb 2816 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2817 if (op == 15 && rn == 15) {
2818 /* Double precision destination. */
9ee6e8bb
PB
2819 VFP_DREG_D(rd, insn);
2820 } else {
2821 rd = VFP_SREG_D(insn);
2822 }
2823 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2824 }
2825
2826 veclen = env->vfp.vec_len;
2827 if (op == 15 && rn > 3)
2828 veclen = 0;
2829
2830 /* Shut up compiler warnings. */
2831 delta_m = 0;
2832 delta_d = 0;
2833 bank_mask = 0;
3b46e624 2834
b7bcbe95
FB
2835 if (veclen > 0) {
2836 if (dp)
2837 bank_mask = 0xc;
2838 else
2839 bank_mask = 0x18;
2840
2841 /* Figure out what type of vector operation this is. */
2842 if ((rd & bank_mask) == 0) {
2843 /* scalar */
2844 veclen = 0;
2845 } else {
2846 if (dp)
2847 delta_d = (env->vfp.vec_stride >> 1) + 1;
2848 else
2849 delta_d = env->vfp.vec_stride + 1;
2850
2851 if ((rm & bank_mask) == 0) {
2852 /* mixed scalar/vector */
2853 delta_m = 0;
2854 } else {
2855 /* vector */
2856 delta_m = delta_d;
2857 }
2858 }
2859 }
2860
2861 /* Load the initial operands. */
2862 if (op == 15) {
2863 switch (rn) {
2864 case 16:
2865 case 17:
2866 /* Integer source */
2867 gen_mov_F0_vreg(0, rm);
2868 break;
2869 case 8:
2870 case 9:
2871 /* Compare */
2872 gen_mov_F0_vreg(dp, rd);
2873 gen_mov_F1_vreg(dp, rm);
2874 break;
2875 case 10:
2876 case 11:
2877 /* Compare with zero */
2878 gen_mov_F0_vreg(dp, rd);
2879 gen_vfp_F1_ld0(dp);
2880 break;
9ee6e8bb
PB
2881 case 20:
2882 case 21:
2883 case 22:
2884 case 23:
644ad806
PB
2885 case 28:
2886 case 29:
2887 case 30:
2888 case 31:
9ee6e8bb
PB
2889 /* Source and destination the same. */
2890 gen_mov_F0_vreg(dp, rd);
2891 break;
b7bcbe95
FB
2892 default:
2893 /* One source operand. */
2894 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 2895 break;
b7bcbe95
FB
2896 }
2897 } else {
2898 /* Two source operands. */
2899 gen_mov_F0_vreg(dp, rn);
2900 gen_mov_F1_vreg(dp, rm);
2901 }
2902
2903 for (;;) {
2904 /* Perform the calculation. */
2905 switch (op) {
2906 case 0: /* mac: fd + (fn * fm) */
2907 gen_vfp_mul(dp);
2908 gen_mov_F1_vreg(dp, rd);
2909 gen_vfp_add(dp);
2910 break;
2911 case 1: /* nmac: fd - (fn * fm) */
2912 gen_vfp_mul(dp);
2913 gen_vfp_neg(dp);
2914 gen_mov_F1_vreg(dp, rd);
2915 gen_vfp_add(dp);
2916 break;
2917 case 2: /* msc: -fd + (fn * fm) */
2918 gen_vfp_mul(dp);
2919 gen_mov_F1_vreg(dp, rd);
2920 gen_vfp_sub(dp);
2921 break;
2922 case 3: /* nmsc: -fd - (fn * fm) */
2923 gen_vfp_mul(dp);
b7bcbe95 2924 gen_vfp_neg(dp);
c9fb531a
PB
2925 gen_mov_F1_vreg(dp, rd);
2926 gen_vfp_sub(dp);
b7bcbe95
FB
2927 break;
2928 case 4: /* mul: fn * fm */
2929 gen_vfp_mul(dp);
2930 break;
2931 case 5: /* nmul: -(fn * fm) */
2932 gen_vfp_mul(dp);
2933 gen_vfp_neg(dp);
2934 break;
2935 case 6: /* add: fn + fm */
2936 gen_vfp_add(dp);
2937 break;
2938 case 7: /* sub: fn - fm */
2939 gen_vfp_sub(dp);
2940 break;
2941 case 8: /* div: fn / fm */
2942 gen_vfp_div(dp);
2943 break;
9ee6e8bb
PB
2944 case 14: /* fconst */
2945 if (!arm_feature(env, ARM_FEATURE_VFP3))
2946 return 1;
2947
2948 n = (insn << 12) & 0x80000000;
2949 i = ((insn >> 12) & 0x70) | (insn & 0xf);
2950 if (dp) {
2951 if (i & 0x40)
2952 i |= 0x3f80;
2953 else
2954 i |= 0x4000;
2955 n |= i << 16;
4373f3ce 2956 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
2957 } else {
2958 if (i & 0x40)
2959 i |= 0x780;
2960 else
2961 i |= 0x800;
2962 n |= i << 19;
5b340b51 2963 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 2964 }
9ee6e8bb 2965 break;
b7bcbe95
FB
2966 case 15: /* extension space */
2967 switch (rn) {
2968 case 0: /* cpy */
2969 /* no-op */
2970 break;
2971 case 1: /* abs */
2972 gen_vfp_abs(dp);
2973 break;
2974 case 2: /* neg */
2975 gen_vfp_neg(dp);
2976 break;
2977 case 3: /* sqrt */
2978 gen_vfp_sqrt(dp);
2979 break;
2980 case 8: /* cmp */
2981 gen_vfp_cmp(dp);
2982 break;
2983 case 9: /* cmpe */
2984 gen_vfp_cmpe(dp);
2985 break;
2986 case 10: /* cmpz */
2987 gen_vfp_cmp(dp);
2988 break;
2989 case 11: /* cmpez */
2990 gen_vfp_F1_ld0(dp);
2991 gen_vfp_cmpe(dp);
2992 break;
2993 case 15: /* single<->double conversion */
2994 if (dp)
4373f3ce 2995 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 2996 else
4373f3ce 2997 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
2998 break;
2999 case 16: /* fuito */
3000 gen_vfp_uito(dp);
3001 break;
3002 case 17: /* fsito */
3003 gen_vfp_sito(dp);
3004 break;
9ee6e8bb
PB
3005 case 20: /* fshto */
3006 if (!arm_feature(env, ARM_FEATURE_VFP3))
3007 return 1;
644ad806 3008 gen_vfp_shto(dp, 16 - rm);
9ee6e8bb
PB
3009 break;
3010 case 21: /* fslto */
3011 if (!arm_feature(env, ARM_FEATURE_VFP3))
3012 return 1;
644ad806 3013 gen_vfp_slto(dp, 32 - rm);
9ee6e8bb
PB
3014 break;
3015 case 22: /* fuhto */
3016 if (!arm_feature(env, ARM_FEATURE_VFP3))
3017 return 1;
644ad806 3018 gen_vfp_uhto(dp, 16 - rm);
9ee6e8bb
PB
3019 break;
3020 case 23: /* fulto */
3021 if (!arm_feature(env, ARM_FEATURE_VFP3))
3022 return 1;
644ad806 3023 gen_vfp_ulto(dp, 32 - rm);
9ee6e8bb 3024 break;
b7bcbe95
FB
3025 case 24: /* ftoui */
3026 gen_vfp_toui(dp);
3027 break;
3028 case 25: /* ftouiz */
3029 gen_vfp_touiz(dp);
3030 break;
3031 case 26: /* ftosi */
3032 gen_vfp_tosi(dp);
3033 break;
3034 case 27: /* ftosiz */
3035 gen_vfp_tosiz(dp);
3036 break;
9ee6e8bb
PB
3037 case 28: /* ftosh */
3038 if (!arm_feature(env, ARM_FEATURE_VFP3))
3039 return 1;
644ad806 3040 gen_vfp_tosh(dp, 16 - rm);
9ee6e8bb
PB
3041 break;
3042 case 29: /* ftosl */
3043 if (!arm_feature(env, ARM_FEATURE_VFP3))
3044 return 1;
644ad806 3045 gen_vfp_tosl(dp, 32 - rm);
9ee6e8bb
PB
3046 break;
3047 case 30: /* ftouh */
3048 if (!arm_feature(env, ARM_FEATURE_VFP3))
3049 return 1;
644ad806 3050 gen_vfp_touh(dp, 16 - rm);
9ee6e8bb
PB
3051 break;
3052 case 31: /* ftoul */
3053 if (!arm_feature(env, ARM_FEATURE_VFP3))
3054 return 1;
644ad806 3055 gen_vfp_toul(dp, 32 - rm);
9ee6e8bb 3056 break;
b7bcbe95
FB
3057 default: /* undefined */
3058 printf ("rn:%d\n", rn);
3059 return 1;
3060 }
3061 break;
3062 default: /* undefined */
3063 printf ("op:%d\n", op);
3064 return 1;
3065 }
3066
3067 /* Write back the result. */
3068 if (op == 15 && (rn >= 8 && rn <= 11))
3069 ; /* Comparison, do nothing. */
3070 else if (op == 15 && rn > 17)
3071 /* Integer result. */
3072 gen_mov_vreg_F0(0, rd);
3073 else if (op == 15 && rn == 15)
3074 /* conversion */
3075 gen_mov_vreg_F0(!dp, rd);
3076 else
3077 gen_mov_vreg_F0(dp, rd);
3078
3079 /* break out of the loop if we have finished */
3080 if (veclen == 0)
3081 break;
3082
3083 if (op == 15 && delta_m == 0) {
3084 /* single source one-many */
3085 while (veclen--) {
3086 rd = ((rd + delta_d) & (bank_mask - 1))
3087 | (rd & bank_mask);
3088 gen_mov_vreg_F0(dp, rd);
3089 }
3090 break;
3091 }
3092 /* Setup the next operands. */
3093 veclen--;
3094 rd = ((rd + delta_d) & (bank_mask - 1))
3095 | (rd & bank_mask);
3096
3097 if (op == 15) {
3098 /* One source operand. */
3099 rm = ((rm + delta_m) & (bank_mask - 1))
3100 | (rm & bank_mask);
3101 gen_mov_F0_vreg(dp, rm);
3102 } else {
3103 /* Two source operands. */
3104 rn = ((rn + delta_d) & (bank_mask - 1))
3105 | (rn & bank_mask);
3106 gen_mov_F0_vreg(dp, rn);
3107 if (delta_m) {
3108 rm = ((rm + delta_m) & (bank_mask - 1))
3109 | (rm & bank_mask);
3110 gen_mov_F1_vreg(dp, rm);
3111 }
3112 }
3113 }
3114 }
3115 break;
3116 case 0xc:
3117 case 0xd:
9ee6e8bb 3118 if (dp && (insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3119 /* two-register transfer */
3120 rn = (insn >> 16) & 0xf;
3121 rd = (insn >> 12) & 0xf;
3122 if (dp) {
9ee6e8bb
PB
3123 VFP_DREG_M(rm, insn);
3124 } else {
3125 rm = VFP_SREG_M(insn);
3126 }
b7bcbe95 3127
18c9b560 3128 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3129 /* vfp->arm */
3130 if (dp) {
4373f3ce
PB
3131 gen_mov_F0_vreg(0, rm * 2);
3132 tmp = gen_vfp_mrs();
3133 store_reg(s, rd, tmp);
3134 gen_mov_F0_vreg(0, rm * 2 + 1);
3135 tmp = gen_vfp_mrs();
3136 store_reg(s, rn, tmp);
b7bcbe95
FB
3137 } else {
3138 gen_mov_F0_vreg(0, rm);
4373f3ce
PB
3139 tmp = gen_vfp_mrs();
3140 store_reg(s, rn, tmp);
b7bcbe95 3141 gen_mov_F0_vreg(0, rm + 1);
4373f3ce
PB
3142 tmp = gen_vfp_mrs();
3143 store_reg(s, rd, tmp);
b7bcbe95
FB
3144 }
3145 } else {
3146 /* arm->vfp */
3147 if (dp) {
4373f3ce
PB
3148 tmp = load_reg(s, rd);
3149 gen_vfp_msr(tmp);
3150 gen_mov_vreg_F0(0, rm * 2);
3151 tmp = load_reg(s, rn);
3152 gen_vfp_msr(tmp);
3153 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3154 } else {
4373f3ce
PB
3155 tmp = load_reg(s, rn);
3156 gen_vfp_msr(tmp);
b7bcbe95 3157 gen_mov_vreg_F0(0, rm);
4373f3ce
PB
3158 tmp = load_reg(s, rd);
3159 gen_vfp_msr(tmp);
b7bcbe95
FB
3160 gen_mov_vreg_F0(0, rm + 1);
3161 }
3162 }
3163 } else {
3164 /* Load/store */
3165 rn = (insn >> 16) & 0xf;
3166 if (dp)
9ee6e8bb 3167 VFP_DREG_D(rd, insn);
b7bcbe95 3168 else
9ee6e8bb
PB
3169 rd = VFP_SREG_D(insn);
3170 if (s->thumb && rn == 15) {
312eea9f
FN
3171 addr = new_tmp();
3172 tcg_gen_movi_i32(addr, s->pc & ~2);
9ee6e8bb 3173 } else {
312eea9f 3174 addr = load_reg(s, rn);
9ee6e8bb 3175 }
b7bcbe95
FB
3176 if ((insn & 0x01200000) == 0x01000000) {
3177 /* Single load/store */
3178 offset = (insn & 0xff) << 2;
3179 if ((insn & (1 << 23)) == 0)
3180 offset = -offset;
312eea9f 3181 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3182 if (insn & (1 << 20)) {
312eea9f 3183 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3184 gen_mov_vreg_F0(dp, rd);
3185 } else {
3186 gen_mov_F0_vreg(dp, rd);
312eea9f 3187 gen_vfp_st(s, dp, addr);
b7bcbe95 3188 }
312eea9f 3189 dead_tmp(addr);
b7bcbe95
FB
3190 } else {
3191 /* load/store multiple */
3192 if (dp)
3193 n = (insn >> 1) & 0x7f;
3194 else
3195 n = insn & 0xff;
3196
3197 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3198 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3199
3200 if (dp)
3201 offset = 8;
3202 else
3203 offset = 4;
3204 for (i = 0; i < n; i++) {
18c9b560 3205 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3206 /* load */
312eea9f 3207 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3208 gen_mov_vreg_F0(dp, rd + i);
3209 } else {
3210 /* store */
3211 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3212 gen_vfp_st(s, dp, addr);
b7bcbe95 3213 }
312eea9f 3214 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95
FB
3215 }
3216 if (insn & (1 << 21)) {
3217 /* writeback */
3218 if (insn & (1 << 24))
3219 offset = -offset * n;
3220 else if (dp && (insn & 1))
3221 offset = 4;
3222 else
3223 offset = 0;
3224
3225 if (offset != 0)
312eea9f
FN
3226 tcg_gen_addi_i32(addr, addr, offset);
3227 store_reg(s, rn, addr);
3228 } else {
3229 dead_tmp(addr);
b7bcbe95
FB
3230 }
3231 }
3232 }
3233 break;
3234 default:
3235 /* Should never happen. */
3236 return 1;
3237 }
3238 return 0;
3239}
3240
6e256c93 3241static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3242{
6e256c93
FB
3243 TranslationBlock *tb;
3244
3245 tb = s->tb;
3246 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3247 tcg_gen_goto_tb(n);
8984bd2e 3248 gen_set_pc_im(dest);
57fec1fe 3249 tcg_gen_exit_tb((long)tb + n);
6e256c93 3250 } else {
8984bd2e 3251 gen_set_pc_im(dest);
57fec1fe 3252 tcg_gen_exit_tb(0);
6e256c93 3253 }
c53be334
FB
3254}
3255
8aaca4c0
FB
3256static inline void gen_jmp (DisasContext *s, uint32_t dest)
3257{
551bd27f 3258 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3259 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3260 if (s->thumb)
d9ba4830
PB
3261 dest |= 1;
3262 gen_bx_im(s, dest);
8aaca4c0 3263 } else {
6e256c93 3264 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3265 s->is_jmp = DISAS_TB_JUMP;
3266 }
3267}
3268
d9ba4830 3269static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3270{
ee097184 3271 if (x)
d9ba4830 3272 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3273 else
d9ba4830 3274 gen_sxth(t0);
ee097184 3275 if (y)
d9ba4830 3276 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3277 else
d9ba4830
PB
3278 gen_sxth(t1);
3279 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3280}
3281
3282/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 3283static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3284 uint32_t mask;
3285
3286 mask = 0;
3287 if (flags & (1 << 0))
3288 mask |= 0xff;
3289 if (flags & (1 << 1))
3290 mask |= 0xff00;
3291 if (flags & (1 << 2))
3292 mask |= 0xff0000;
3293 if (flags & (1 << 3))
3294 mask |= 0xff000000;
9ee6e8bb 3295
2ae23e75 3296 /* Mask out undefined bits. */
9ee6e8bb
PB
3297 mask &= ~CPSR_RESERVED;
3298 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3299 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3300 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3301 mask &= ~CPSR_IT;
9ee6e8bb 3302 /* Mask out execution state bits. */
2ae23e75 3303 if (!spsr)
e160c51c 3304 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3305 /* Mask out privileged bits. */
3306 if (IS_USER(s))
9ee6e8bb 3307 mask &= CPSR_USER;
b5ff1b31
FB
3308 return mask;
3309}
3310
2fbac54b
FN
3311/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3312static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
b5ff1b31 3313{
d9ba4830 3314 TCGv tmp;
b5ff1b31
FB
3315 if (spsr) {
3316 /* ??? This is also undefined in system mode. */
3317 if (IS_USER(s))
3318 return 1;
d9ba4830
PB
3319
3320 tmp = load_cpu_field(spsr);
3321 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3322 tcg_gen_andi_i32(t0, t0, mask);
3323 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3324 store_cpu_field(tmp, spsr);
b5ff1b31 3325 } else {
2fbac54b 3326 gen_set_cpsr(t0, mask);
b5ff1b31 3327 }
2fbac54b 3328 dead_tmp(t0);
b5ff1b31
FB
3329 gen_lookup_tb(s);
3330 return 0;
3331}
3332
2fbac54b
FN
3333/* Returns nonzero if access to the PSR is not permitted. */
3334static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3335{
3336 TCGv tmp;
3337 tmp = new_tmp();
3338 tcg_gen_movi_i32(tmp, val);
3339 return gen_set_psr(s, mask, spsr, tmp);
3340}
3341
e9bb4aa9
JR
3342/* Generate an old-style exception return. Marks pc as dead. */
3343static void gen_exception_return(DisasContext *s, TCGv pc)
b5ff1b31 3344{
d9ba4830 3345 TCGv tmp;
e9bb4aa9 3346 store_reg(s, 15, pc);
d9ba4830
PB
3347 tmp = load_cpu_field(spsr);
3348 gen_set_cpsr(tmp, 0xffffffff);
3349 dead_tmp(tmp);
b5ff1b31
FB
3350 s->is_jmp = DISAS_UPDATE;
3351}
3352
b0109805
PB
3353/* Generate a v6 exception return. Marks both values as dead. */
3354static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3355{
b0109805
PB
3356 gen_set_cpsr(cpsr, 0xffffffff);
3357 dead_tmp(cpsr);
3358 store_reg(s, 15, pc);
9ee6e8bb
PB
3359 s->is_jmp = DISAS_UPDATE;
3360}
3b46e624 3361
9ee6e8bb
PB
3362static inline void
3363gen_set_condexec (DisasContext *s)
3364{
3365 if (s->condexec_mask) {
8f01245e
PB
3366 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3367 TCGv tmp = new_tmp();
3368 tcg_gen_movi_i32(tmp, val);
d9ba4830 3369 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3370 }
3371}
3b46e624 3372
9ee6e8bb
PB
3373static void gen_nop_hint(DisasContext *s, int val)
3374{
3375 switch (val) {
3376 case 3: /* wfi */
8984bd2e 3377 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3378 s->is_jmp = DISAS_WFI;
3379 break;
3380 case 2: /* wfe */
3381 case 4: /* sev */
3382 /* TODO: Implement SEV and WFE. May help SMP performance. */
3383 default: /* nop */
3384 break;
3385 }
3386}
99c475ab 3387
ad69471c 3388#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3389
dd8fbd78 3390static inline int gen_neon_add(int size, TCGv t0, TCGv t1)
9ee6e8bb
PB
3391{
3392 switch (size) {
dd8fbd78
FN
3393 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3394 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3395 case 2: tcg_gen_add_i32(t0, t0, t1); break;
9ee6e8bb
PB
3396 default: return 1;
3397 }
3398 return 0;
3399}
3400
dd8fbd78 3401static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
ad69471c
PB
3402{
3403 switch (size) {
dd8fbd78
FN
3404 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3405 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3406 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3407 default: return;
3408 }
3409}
3410
3411/* 32-bit pairwise ops end up the same as the elementwise versions. */
3412#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3413#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3414#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3415#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3416
3417/* FIXME: This is wrong. They set the wrong overflow bit. */
3418#define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3419#define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3420#define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3421#define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3422
3423#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3424 switch ((size << 1) | u) { \
3425 case 0: \
dd8fbd78 3426 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3427 break; \
3428 case 1: \
dd8fbd78 3429 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3430 break; \
3431 case 2: \
dd8fbd78 3432 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3433 break; \
3434 case 3: \
dd8fbd78 3435 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3436 break; \
3437 case 4: \
dd8fbd78 3438 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3439 break; \
3440 case 5: \
dd8fbd78 3441 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3442 break; \
3443 default: return 1; \
3444 }} while (0)
9ee6e8bb
PB
3445
3446#define GEN_NEON_INTEGER_OP(name) do { \
3447 switch ((size << 1) | u) { \
ad69471c 3448 case 0: \
dd8fbd78 3449 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3450 break; \
3451 case 1: \
dd8fbd78 3452 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3453 break; \
3454 case 2: \
dd8fbd78 3455 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3456 break; \
3457 case 3: \
dd8fbd78 3458 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3459 break; \
3460 case 4: \
dd8fbd78 3461 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3462 break; \
3463 case 5: \
dd8fbd78 3464 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3465 break; \
9ee6e8bb
PB
3466 default: return 1; \
3467 }} while (0)
3468
dd8fbd78 3469static TCGv neon_load_scratch(int scratch)
9ee6e8bb 3470{
dd8fbd78
FN
3471 TCGv tmp = new_tmp();
3472 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3473 return tmp;
9ee6e8bb
PB
3474}
3475
dd8fbd78 3476static void neon_store_scratch(int scratch, TCGv var)
9ee6e8bb 3477{
dd8fbd78
FN
3478 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3479 dead_tmp(var);
9ee6e8bb
PB
3480}
3481
dd8fbd78 3482static inline TCGv neon_get_scalar(int size, int reg)
9ee6e8bb 3483{
dd8fbd78 3484 TCGv tmp;
9ee6e8bb 3485 if (size == 1) {
dd8fbd78 3486 tmp = neon_load_reg(reg >> 1, reg & 1);
9ee6e8bb 3487 } else {
dd8fbd78
FN
3488 tmp = neon_load_reg(reg >> 2, (reg >> 1) & 1);
3489 if (reg & 1) {
3490 gen_neon_dup_low16(tmp);
3491 } else {
3492 gen_neon_dup_high16(tmp);
3493 }
9ee6e8bb 3494 }
dd8fbd78 3495 return tmp;
9ee6e8bb
PB
3496}
3497
19457615
FN
3498static void gen_neon_unzip_u8(TCGv t0, TCGv t1)
3499{
3500 TCGv rd, rm, tmp;
3501
3502 rd = new_tmp();
3503 rm = new_tmp();
3504 tmp = new_tmp();
3505
3506 tcg_gen_andi_i32(rd, t0, 0xff);
3507 tcg_gen_shri_i32(tmp, t0, 8);
3508 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3509 tcg_gen_or_i32(rd, rd, tmp);
3510 tcg_gen_shli_i32(tmp, t1, 16);
3511 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3512 tcg_gen_or_i32(rd, rd, tmp);
3513 tcg_gen_shli_i32(tmp, t1, 8);
3514 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3515 tcg_gen_or_i32(rd, rd, tmp);
3516
3517 tcg_gen_shri_i32(rm, t0, 8);
3518 tcg_gen_andi_i32(rm, rm, 0xff);
3519 tcg_gen_shri_i32(tmp, t0, 16);
3520 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3521 tcg_gen_or_i32(rm, rm, tmp);
3522 tcg_gen_shli_i32(tmp, t1, 8);
3523 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3524 tcg_gen_or_i32(rm, rm, tmp);
3525 tcg_gen_andi_i32(tmp, t1, 0xff000000);
3526 tcg_gen_or_i32(t1, rm, tmp);
3527 tcg_gen_mov_i32(t0, rd);
3528
3529 dead_tmp(tmp);
3530 dead_tmp(rm);
3531 dead_tmp(rd);
3532}
3533
3534static void gen_neon_zip_u8(TCGv t0, TCGv t1)
3535{
3536 TCGv rd, rm, tmp;
3537
3538 rd = new_tmp();
3539 rm = new_tmp();
3540 tmp = new_tmp();
3541
3542 tcg_gen_andi_i32(rd, t0, 0xff);
3543 tcg_gen_shli_i32(tmp, t1, 8);
3544 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3545 tcg_gen_or_i32(rd, rd, tmp);
3546 tcg_gen_shli_i32(tmp, t0, 16);
3547 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3548 tcg_gen_or_i32(rd, rd, tmp);
3549 tcg_gen_shli_i32(tmp, t1, 24);
3550 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3551 tcg_gen_or_i32(rd, rd, tmp);
3552
3553 tcg_gen_andi_i32(rm, t1, 0xff000000);
3554 tcg_gen_shri_i32(tmp, t0, 8);
3555 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3556 tcg_gen_or_i32(rm, rm, tmp);
3557 tcg_gen_shri_i32(tmp, t1, 8);
3558 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3559 tcg_gen_or_i32(rm, rm, tmp);
3560 tcg_gen_shri_i32(tmp, t0, 16);
3561 tcg_gen_andi_i32(tmp, tmp, 0xff);
3562 tcg_gen_or_i32(t1, rm, tmp);
3563 tcg_gen_mov_i32(t0, rd);
3564
3565 dead_tmp(tmp);
3566 dead_tmp(rm);
3567 dead_tmp(rd);
3568}
3569
3570static void gen_neon_zip_u16(TCGv t0, TCGv t1)
3571{
3572 TCGv tmp, tmp2;
3573
3574 tmp = new_tmp();
3575 tmp2 = new_tmp();
3576
3577 tcg_gen_andi_i32(tmp, t0, 0xffff);
3578 tcg_gen_shli_i32(tmp2, t1, 16);
3579 tcg_gen_or_i32(tmp, tmp, tmp2);
3580 tcg_gen_andi_i32(t1, t1, 0xffff0000);
3581 tcg_gen_shri_i32(tmp2, t0, 16);
3582 tcg_gen_or_i32(t1, t1, tmp2);
3583 tcg_gen_mov_i32(t0, tmp);
3584
3585 dead_tmp(tmp2);
3586 dead_tmp(tmp);
3587}
3588
9ee6e8bb
PB
3589static void gen_neon_unzip(int reg, int q, int tmp, int size)
3590{
3591 int n;
dd8fbd78 3592 TCGv t0, t1;
9ee6e8bb
PB
3593
3594 for (n = 0; n < q + 1; n += 2) {
dd8fbd78
FN
3595 t0 = neon_load_reg(reg, n);
3596 t1 = neon_load_reg(reg, n + 1);
9ee6e8bb 3597 switch (size) {
dd8fbd78
FN
3598 case 0: gen_neon_unzip_u8(t0, t1); break;
3599 case 1: gen_neon_zip_u16(t0, t1); break; /* zip and unzip are the same. */
9ee6e8bb
PB
3600 case 2: /* no-op */; break;
3601 default: abort();
3602 }
dd8fbd78
FN
3603 neon_store_scratch(tmp + n, t0);
3604 neon_store_scratch(tmp + n + 1, t1);
9ee6e8bb
PB
3605 }
3606}
3607
19457615
FN
3608static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3609{
3610 TCGv rd, tmp;
3611
3612 rd = new_tmp();
3613 tmp = new_tmp();
3614
3615 tcg_gen_shli_i32(rd, t0, 8);
3616 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3617 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3618 tcg_gen_or_i32(rd, rd, tmp);
3619
3620 tcg_gen_shri_i32(t1, t1, 8);
3621 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3622 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3623 tcg_gen_or_i32(t1, t1, tmp);
3624 tcg_gen_mov_i32(t0, rd);
3625
3626 dead_tmp(tmp);
3627 dead_tmp(rd);
3628}
3629
3630static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3631{
3632 TCGv rd, tmp;
3633
3634 rd = new_tmp();
3635 tmp = new_tmp();
3636
3637 tcg_gen_shli_i32(rd, t0, 16);
3638 tcg_gen_andi_i32(tmp, t1, 0xffff);
3639 tcg_gen_or_i32(rd, rd, tmp);
3640 tcg_gen_shri_i32(t1, t1, 16);
3641 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3642 tcg_gen_or_i32(t1, t1, tmp);
3643 tcg_gen_mov_i32(t0, rd);
3644
3645 dead_tmp(tmp);
3646 dead_tmp(rd);
3647}
3648
3649
9ee6e8bb
PB
3650static struct {
3651 int nregs;
3652 int interleave;
3653 int spacing;
3654} neon_ls_element_type[11] = {
3655 {4, 4, 1},
3656 {4, 4, 2},
3657 {4, 1, 1},
3658 {4, 2, 1},
3659 {3, 3, 1},
3660 {3, 3, 2},
3661 {3, 1, 1},
3662 {1, 1, 1},
3663 {2, 2, 1},
3664 {2, 2, 2},
3665 {2, 1, 1}
3666};
3667
3668/* Translate a NEON load/store element instruction. Return nonzero if the
3669 instruction is invalid. */
3670static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3671{
3672 int rd, rn, rm;
3673 int op;
3674 int nregs;
3675 int interleave;
3676 int stride;
3677 int size;
3678 int reg;
3679 int pass;
3680 int load;
3681 int shift;
9ee6e8bb 3682 int n;
1b2b1e54 3683 TCGv addr;
b0109805 3684 TCGv tmp;
8f8e3aa4 3685 TCGv tmp2;
9ee6e8bb
PB
3686
3687 if (!vfp_enabled(env))
3688 return 1;
3689 VFP_DREG_D(rd, insn);
3690 rn = (insn >> 16) & 0xf;
3691 rm = insn & 0xf;
3692 load = (insn & (1 << 21)) != 0;
1b2b1e54 3693 addr = new_tmp();
9ee6e8bb
PB
3694 if ((insn & (1 << 23)) == 0) {
3695 /* Load store all elements. */
3696 op = (insn >> 8) & 0xf;
3697 size = (insn >> 6) & 3;
3698 if (op > 10 || size == 3)
3699 return 1;
3700 nregs = neon_ls_element_type[op].nregs;
3701 interleave = neon_ls_element_type[op].interleave;
dcc65026 3702 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3703 stride = (1 << size) * interleave;
3704 for (reg = 0; reg < nregs; reg++) {
3705 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
3706 load_reg_var(s, addr, rn);
3707 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 3708 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
3709 load_reg_var(s, addr, rn);
3710 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb
PB
3711 }
3712 for (pass = 0; pass < 2; pass++) {
3713 if (size == 2) {
3714 if (load) {
1b2b1e54 3715 tmp = gen_ld32(addr, IS_USER(s));
ad69471c 3716 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3717 } else {
ad69471c 3718 tmp = neon_load_reg(rd, pass);
1b2b1e54 3719 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 3720 }
1b2b1e54 3721 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb
PB
3722 } else if (size == 1) {
3723 if (load) {
1b2b1e54
FN
3724 tmp = gen_ld16u(addr, IS_USER(s));
3725 tcg_gen_addi_i32(addr, addr, stride);
3726 tmp2 = gen_ld16u(addr, IS_USER(s));
3727 tcg_gen_addi_i32(addr, addr, stride);
8f8e3aa4
PB
3728 gen_bfi(tmp, tmp, tmp2, 16, 0xffff);
3729 dead_tmp(tmp2);
3730 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3731 } else {
8f8e3aa4
PB
3732 tmp = neon_load_reg(rd, pass);
3733 tmp2 = new_tmp();
3734 tcg_gen_shri_i32(tmp2, tmp, 16);
1b2b1e54
FN
3735 gen_st16(tmp, addr, IS_USER(s));
3736 tcg_gen_addi_i32(addr, addr, stride);
3737 gen_st16(tmp2, addr, IS_USER(s));
3738 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb
PB
3739 }
3740 } else /* size == 0 */ {
3741 if (load) {
a50f5b91 3742 TCGV_UNUSED(tmp2);
9ee6e8bb 3743 for (n = 0; n < 4; n++) {
1b2b1e54
FN
3744 tmp = gen_ld8u(addr, IS_USER(s));
3745 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3746 if (n == 0) {
8f8e3aa4 3747 tmp2 = tmp;
9ee6e8bb 3748 } else {
8f8e3aa4
PB
3749 gen_bfi(tmp2, tmp2, tmp, n * 8, 0xff);
3750 dead_tmp(tmp);
9ee6e8bb 3751 }
9ee6e8bb 3752 }
8f8e3aa4 3753 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 3754 } else {
8f8e3aa4 3755 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 3756 for (n = 0; n < 4; n++) {
8f8e3aa4 3757 tmp = new_tmp();
9ee6e8bb 3758 if (n == 0) {
8f8e3aa4 3759 tcg_gen_mov_i32(tmp, tmp2);
9ee6e8bb 3760 } else {
8f8e3aa4 3761 tcg_gen_shri_i32(tmp, tmp2, n * 8);
9ee6e8bb 3762 }
1b2b1e54
FN
3763 gen_st8(tmp, addr, IS_USER(s));
3764 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3765 }
8f8e3aa4 3766 dead_tmp(tmp2);
9ee6e8bb
PB
3767 }
3768 }
3769 }
3770 rd += neon_ls_element_type[op].spacing;
3771 }
3772 stride = nregs * 8;
3773 } else {
3774 size = (insn >> 10) & 3;
3775 if (size == 3) {
3776 /* Load single element to all lanes. */
3777 if (!load)
3778 return 1;
3779 size = (insn >> 6) & 3;
3780 nregs = ((insn >> 8) & 3) + 1;
3781 stride = (insn & (1 << 5)) ? 2 : 1;
dcc65026 3782 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3783 for (reg = 0; reg < nregs; reg++) {
3784 switch (size) {
3785 case 0:
1b2b1e54 3786 tmp = gen_ld8u(addr, IS_USER(s));
ad69471c 3787 gen_neon_dup_u8(tmp, 0);
9ee6e8bb
PB
3788 break;
3789 case 1:
1b2b1e54 3790 tmp = gen_ld16u(addr, IS_USER(s));
ad69471c 3791 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
3792 break;
3793 case 2:
1b2b1e54 3794 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
3795 break;
3796 case 3:
3797 return 1;
a50f5b91
PB
3798 default: /* Avoid compiler warnings. */
3799 abort();
99c475ab 3800 }
1b2b1e54 3801 tcg_gen_addi_i32(addr, addr, 1 << size);
ad69471c
PB
3802 tmp2 = new_tmp();
3803 tcg_gen_mov_i32(tmp2, tmp);
3804 neon_store_reg(rd, 0, tmp2);
3018f259 3805 neon_store_reg(rd, 1, tmp);
9ee6e8bb
PB
3806 rd += stride;
3807 }
3808 stride = (1 << size) * nregs;
3809 } else {
3810 /* Single element. */
3811 pass = (insn >> 7) & 1;
3812 switch (size) {
3813 case 0:
3814 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
3815 stride = 1;
3816 break;
3817 case 1:
3818 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
3819 stride = (insn & (1 << 5)) ? 2 : 1;
3820 break;
3821 case 2:
3822 shift = 0;
9ee6e8bb
PB
3823 stride = (insn & (1 << 6)) ? 2 : 1;
3824 break;
3825 default:
3826 abort();
3827 }
3828 nregs = ((insn >> 8) & 3) + 1;
dcc65026 3829 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3830 for (reg = 0; reg < nregs; reg++) {
3831 if (load) {
9ee6e8bb
PB
3832 switch (size) {
3833 case 0:
1b2b1e54 3834 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb
PB
3835 break;
3836 case 1:
1b2b1e54 3837 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
3838 break;
3839 case 2:
1b2b1e54 3840 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 3841 break;
a50f5b91
PB
3842 default: /* Avoid compiler warnings. */
3843 abort();
9ee6e8bb
PB
3844 }
3845 if (size != 2) {
8f8e3aa4
PB
3846 tmp2 = neon_load_reg(rd, pass);
3847 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3848 dead_tmp(tmp2);
9ee6e8bb 3849 }
8f8e3aa4 3850 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3851 } else { /* Store */
8f8e3aa4
PB
3852 tmp = neon_load_reg(rd, pass);
3853 if (shift)
3854 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
3855 switch (size) {
3856 case 0:
1b2b1e54 3857 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
3858 break;
3859 case 1:
1b2b1e54 3860 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
3861 break;
3862 case 2:
1b2b1e54 3863 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 3864 break;
99c475ab 3865 }
99c475ab 3866 }
9ee6e8bb 3867 rd += stride;
1b2b1e54 3868 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 3869 }
9ee6e8bb 3870 stride = nregs * (1 << size);
99c475ab 3871 }
9ee6e8bb 3872 }
1b2b1e54 3873 dead_tmp(addr);
9ee6e8bb 3874 if (rm != 15) {
b26eefb6
PB
3875 TCGv base;
3876
3877 base = load_reg(s, rn);
9ee6e8bb 3878 if (rm == 13) {
b26eefb6 3879 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 3880 } else {
b26eefb6
PB
3881 TCGv index;
3882 index = load_reg(s, rm);
3883 tcg_gen_add_i32(base, base, index);
3884 dead_tmp(index);
9ee6e8bb 3885 }
b26eefb6 3886 store_reg(s, rn, base);
9ee6e8bb
PB
3887 }
3888 return 0;
3889}
3b46e624 3890
8f8e3aa4
PB
3891/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
3892static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
3893{
3894 tcg_gen_and_i32(t, t, c);
3895 tcg_gen_bic_i32(f, f, c);
3896 tcg_gen_or_i32(dest, t, f);
3897}
3898
a7812ae4 3899static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
3900{
3901 switch (size) {
3902 case 0: gen_helper_neon_narrow_u8(dest, src); break;
3903 case 1: gen_helper_neon_narrow_u16(dest, src); break;
3904 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
3905 default: abort();
3906 }
3907}
3908
a7812ae4 3909static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
3910{
3911 switch (size) {
3912 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
3913 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
3914 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
3915 default: abort();
3916 }
3917}
3918
a7812ae4 3919static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
3920{
3921 switch (size) {
3922 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
3923 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
3924 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
3925 default: abort();
3926 }
3927}
3928
3929static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
3930 int q, int u)
3931{
3932 if (q) {
3933 if (u) {
3934 switch (size) {
3935 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3936 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3937 default: abort();
3938 }
3939 } else {
3940 switch (size) {
3941 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
3942 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
3943 default: abort();
3944 }
3945 }
3946 } else {
3947 if (u) {
3948 switch (size) {
3949 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3950 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3951 default: abort();
3952 }
3953 } else {
3954 switch (size) {
3955 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
3956 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
3957 default: abort();
3958 }
3959 }
3960 }
3961}
3962
a7812ae4 3963static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
3964{
3965 if (u) {
3966 switch (size) {
3967 case 0: gen_helper_neon_widen_u8(dest, src); break;
3968 case 1: gen_helper_neon_widen_u16(dest, src); break;
3969 case 2: tcg_gen_extu_i32_i64(dest, src); break;
3970 default: abort();
3971 }
3972 } else {
3973 switch (size) {
3974 case 0: gen_helper_neon_widen_s8(dest, src); break;
3975 case 1: gen_helper_neon_widen_s16(dest, src); break;
3976 case 2: tcg_gen_ext_i32_i64(dest, src); break;
3977 default: abort();
3978 }
3979 }
3980 dead_tmp(src);
3981}
3982
3983static inline void gen_neon_addl(int size)
3984{
3985 switch (size) {
3986 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
3987 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
3988 case 2: tcg_gen_add_i64(CPU_V001); break;
3989 default: abort();
3990 }
3991}
3992
3993static inline void gen_neon_subl(int size)
3994{
3995 switch (size) {
3996 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
3997 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
3998 case 2: tcg_gen_sub_i64(CPU_V001); break;
3999 default: abort();
4000 }
4001}
4002
a7812ae4 4003static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4004{
4005 switch (size) {
4006 case 0: gen_helper_neon_negl_u16(var, var); break;
4007 case 1: gen_helper_neon_negl_u32(var, var); break;
4008 case 2: gen_helper_neon_negl_u64(var, var); break;
4009 default: abort();
4010 }
4011}
4012
a7812ae4 4013static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4014{
4015 switch (size) {
4016 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4017 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4018 default: abort();
4019 }
4020}
4021
a7812ae4 4022static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4023{
a7812ae4 4024 TCGv_i64 tmp;
ad69471c
PB
4025
4026 switch ((size << 1) | u) {
4027 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4028 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4029 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4030 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4031 case 4:
4032 tmp = gen_muls_i64_i32(a, b);
4033 tcg_gen_mov_i64(dest, tmp);
4034 break;
4035 case 5:
4036 tmp = gen_mulu_i64_i32(a, b);
4037 tcg_gen_mov_i64(dest, tmp);
4038 break;
4039 default: abort();
4040 }
ad69471c
PB
4041}
4042
9ee6e8bb
PB
4043/* Translate a NEON data processing instruction. Return nonzero if the
4044 instruction is invalid.
ad69471c
PB
4045 We process data in a mixture of 32-bit and 64-bit chunks.
4046 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4047
9ee6e8bb
PB
4048static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4049{
4050 int op;
4051 int q;
4052 int rd, rn, rm;
4053 int size;
4054 int shift;
4055 int pass;
4056 int count;
4057 int pairwise;
4058 int u;
4059 int n;
4060 uint32_t imm;
8f8e3aa4
PB
4061 TCGv tmp;
4062 TCGv tmp2;
4063 TCGv tmp3;
a7812ae4 4064 TCGv_i64 tmp64;
9ee6e8bb
PB
4065
4066 if (!vfp_enabled(env))
4067 return 1;
4068 q = (insn & (1 << 6)) != 0;
4069 u = (insn >> 24) & 1;
4070 VFP_DREG_D(rd, insn);
4071 VFP_DREG_N(rn, insn);
4072 VFP_DREG_M(rm, insn);
4073 size = (insn >> 20) & 3;
4074 if ((insn & (1 << 23)) == 0) {
4075 /* Three register same length. */
4076 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
ad69471c
PB
4077 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4078 || op == 10 || op == 11 || op == 16)) {
4079 /* 64-bit element instructions. */
9ee6e8bb 4080 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4081 neon_load_reg64(cpu_V0, rn + pass);
4082 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb
PB
4083 switch (op) {
4084 case 1: /* VQADD */
4085 if (u) {
ad69471c 4086 gen_helper_neon_add_saturate_u64(CPU_V001);
2c0262af 4087 } else {
ad69471c 4088 gen_helper_neon_add_saturate_s64(CPU_V001);
2c0262af 4089 }
9ee6e8bb
PB
4090 break;
4091 case 5: /* VQSUB */
4092 if (u) {
ad69471c
PB
4093 gen_helper_neon_sub_saturate_u64(CPU_V001);
4094 } else {
4095 gen_helper_neon_sub_saturate_s64(CPU_V001);
4096 }
4097 break;
4098 case 8: /* VSHL */
4099 if (u) {
4100 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4101 } else {
4102 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4103 }
4104 break;
4105 case 9: /* VQSHL */
4106 if (u) {
4107 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4108 cpu_V0, cpu_V0);
4109 } else {
4110 gen_helper_neon_qshl_s64(cpu_V1, cpu_env,
4111 cpu_V1, cpu_V0);
4112 }
4113 break;
4114 case 10: /* VRSHL */
4115 if (u) {
4116 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4117 } else {
ad69471c
PB
4118 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4119 }
4120 break;
4121 case 11: /* VQRSHL */
4122 if (u) {
4123 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4124 cpu_V1, cpu_V0);
4125 } else {
4126 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4127 cpu_V1, cpu_V0);
1e8d4eec 4128 }
9ee6e8bb
PB
4129 break;
4130 case 16:
4131 if (u) {
ad69471c 4132 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4133 } else {
ad69471c 4134 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4135 }
4136 break;
4137 default:
4138 abort();
2c0262af 4139 }
ad69471c 4140 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4141 }
9ee6e8bb 4142 return 0;
2c0262af 4143 }
9ee6e8bb
PB
4144 switch (op) {
4145 case 8: /* VSHL */
4146 case 9: /* VQSHL */
4147 case 10: /* VRSHL */
ad69471c 4148 case 11: /* VQRSHL */
9ee6e8bb 4149 {
ad69471c
PB
4150 int rtmp;
4151 /* Shift instruction operands are reversed. */
4152 rtmp = rn;
9ee6e8bb 4153 rn = rm;
ad69471c 4154 rm = rtmp;
9ee6e8bb
PB
4155 pairwise = 0;
4156 }
2c0262af 4157 break;
9ee6e8bb
PB
4158 case 20: /* VPMAX */
4159 case 21: /* VPMIN */
4160 case 23: /* VPADD */
4161 pairwise = 1;
2c0262af 4162 break;
9ee6e8bb
PB
4163 case 26: /* VPADD (float) */
4164 pairwise = (u && size < 2);
2c0262af 4165 break;
9ee6e8bb
PB
4166 case 30: /* VPMIN/VPMAX (float) */
4167 pairwise = u;
2c0262af 4168 break;
9ee6e8bb
PB
4169 default:
4170 pairwise = 0;
2c0262af 4171 break;
9ee6e8bb 4172 }
dd8fbd78 4173
9ee6e8bb
PB
4174 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4175
4176 if (pairwise) {
4177 /* Pairwise. */
4178 if (q)
4179 n = (pass & 1) * 2;
2c0262af 4180 else
9ee6e8bb
PB
4181 n = 0;
4182 if (pass < q + 1) {
dd8fbd78
FN
4183 tmp = neon_load_reg(rn, n);
4184 tmp2 = neon_load_reg(rn, n + 1);
9ee6e8bb 4185 } else {
dd8fbd78
FN
4186 tmp = neon_load_reg(rm, n);
4187 tmp2 = neon_load_reg(rm, n + 1);
9ee6e8bb
PB
4188 }
4189 } else {
4190 /* Elementwise. */
dd8fbd78
FN
4191 tmp = neon_load_reg(rn, pass);
4192 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4193 }
4194 switch (op) {
4195 case 0: /* VHADD */
4196 GEN_NEON_INTEGER_OP(hadd);
4197 break;
4198 case 1: /* VQADD */
ad69471c 4199 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4200 break;
9ee6e8bb
PB
4201 case 2: /* VRHADD */
4202 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4203 break;
9ee6e8bb
PB
4204 case 3: /* Logic ops. */
4205 switch ((u << 2) | size) {
4206 case 0: /* VAND */
dd8fbd78 4207 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4208 break;
4209 case 1: /* BIC */
dd8fbd78 4210 tcg_gen_bic_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4211 break;
4212 case 2: /* VORR */
dd8fbd78 4213 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4214 break;
4215 case 3: /* VORN */
dd8fbd78
FN
4216 tcg_gen_not_i32(tmp2, tmp2);
4217 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4218 break;
4219 case 4: /* VEOR */
dd8fbd78 4220 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4221 break;
4222 case 5: /* VBSL */
dd8fbd78
FN
4223 tmp3 = neon_load_reg(rd, pass);
4224 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4225 dead_tmp(tmp3);
9ee6e8bb
PB
4226 break;
4227 case 6: /* VBIT */
dd8fbd78
FN
4228 tmp3 = neon_load_reg(rd, pass);
4229 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4230 dead_tmp(tmp3);
9ee6e8bb
PB
4231 break;
4232 case 7: /* VBIF */
dd8fbd78
FN
4233 tmp3 = neon_load_reg(rd, pass);
4234 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4235 dead_tmp(tmp3);
9ee6e8bb 4236 break;
2c0262af
FB
4237 }
4238 break;
9ee6e8bb
PB
4239 case 4: /* VHSUB */
4240 GEN_NEON_INTEGER_OP(hsub);
4241 break;
4242 case 5: /* VQSUB */
ad69471c 4243 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4244 break;
9ee6e8bb
PB
4245 case 6: /* VCGT */
4246 GEN_NEON_INTEGER_OP(cgt);
4247 break;
4248 case 7: /* VCGE */
4249 GEN_NEON_INTEGER_OP(cge);
4250 break;
4251 case 8: /* VSHL */
ad69471c 4252 GEN_NEON_INTEGER_OP(shl);
2c0262af 4253 break;
9ee6e8bb 4254 case 9: /* VQSHL */
ad69471c 4255 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4256 break;
9ee6e8bb 4257 case 10: /* VRSHL */
ad69471c 4258 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4259 break;
9ee6e8bb 4260 case 11: /* VQRSHL */
ad69471c 4261 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb
PB
4262 break;
4263 case 12: /* VMAX */
4264 GEN_NEON_INTEGER_OP(max);
4265 break;
4266 case 13: /* VMIN */
4267 GEN_NEON_INTEGER_OP(min);
4268 break;
4269 case 14: /* VABD */
4270 GEN_NEON_INTEGER_OP(abd);
4271 break;
4272 case 15: /* VABA */
4273 GEN_NEON_INTEGER_OP(abd);
dd8fbd78
FN
4274 dead_tmp(tmp2);
4275 tmp2 = neon_load_reg(rd, pass);
4276 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4277 break;
4278 case 16:
4279 if (!u) { /* VADD */
dd8fbd78 4280 if (gen_neon_add(size, tmp, tmp2))
9ee6e8bb
PB
4281 return 1;
4282 } else { /* VSUB */
4283 switch (size) {
dd8fbd78
FN
4284 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4285 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4286 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4287 default: return 1;
4288 }
4289 }
4290 break;
4291 case 17:
4292 if (!u) { /* VTST */
4293 switch (size) {
dd8fbd78
FN
4294 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4295 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4296 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4297 default: return 1;
4298 }
4299 } else { /* VCEQ */
4300 switch (size) {
dd8fbd78
FN
4301 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4302 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4303 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4304 default: return 1;
4305 }
4306 }
4307 break;
4308 case 18: /* Multiply. */
4309 switch (size) {
dd8fbd78
FN
4310 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4311 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4312 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4313 default: return 1;
4314 }
dd8fbd78
FN
4315 dead_tmp(tmp2);
4316 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4317 if (u) { /* VMLS */
dd8fbd78 4318 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4319 } else { /* VMLA */
dd8fbd78 4320 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4321 }
4322 break;
4323 case 19: /* VMUL */
4324 if (u) { /* polynomial */
dd8fbd78 4325 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4326 } else { /* Integer */
4327 switch (size) {
dd8fbd78
FN
4328 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4329 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4330 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4331 default: return 1;
4332 }
4333 }
4334 break;
4335 case 20: /* VPMAX */
4336 GEN_NEON_INTEGER_OP(pmax);
4337 break;
4338 case 21: /* VPMIN */
4339 GEN_NEON_INTEGER_OP(pmin);
4340 break;
4341 case 22: /* Hultiply high. */
4342 if (!u) { /* VQDMULH */
4343 switch (size) {
dd8fbd78
FN
4344 case 1: gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4345 case 2: gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
9ee6e8bb
PB
4346 default: return 1;
4347 }
4348 } else { /* VQRDHMUL */
4349 switch (size) {
dd8fbd78
FN
4350 case 1: gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4351 case 2: gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
9ee6e8bb
PB
4352 default: return 1;
4353 }
4354 }
4355 break;
4356 case 23: /* VPADD */
4357 if (u)
4358 return 1;
4359 switch (size) {
dd8fbd78
FN
4360 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4361 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4362 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4363 default: return 1;
4364 }
4365 break;
4366 case 26: /* Floating point arithnetic. */
4367 switch ((u << 2) | size) {
4368 case 0: /* VADD */
dd8fbd78 4369 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4370 break;
4371 case 2: /* VSUB */
dd8fbd78 4372 gen_helper_neon_sub_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4373 break;
4374 case 4: /* VPADD */
dd8fbd78 4375 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4376 break;
4377 case 6: /* VABD */
dd8fbd78 4378 gen_helper_neon_abd_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4379 break;
4380 default:
4381 return 1;
4382 }
4383 break;
4384 case 27: /* Float multiply. */
dd8fbd78 4385 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb 4386 if (!u) {
dd8fbd78
FN
4387 dead_tmp(tmp2);
4388 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4389 if (size == 0) {
dd8fbd78 4390 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb 4391 } else {
dd8fbd78 4392 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
4393 }
4394 }
4395 break;
4396 case 28: /* Float compare. */
4397 if (!u) {
dd8fbd78 4398 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
b5ff1b31 4399 } else {
9ee6e8bb 4400 if (size == 0)
dd8fbd78 4401 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
9ee6e8bb 4402 else
dd8fbd78 4403 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
b5ff1b31 4404 }
2c0262af 4405 break;
9ee6e8bb
PB
4406 case 29: /* Float compare absolute. */
4407 if (!u)
4408 return 1;
4409 if (size == 0)
dd8fbd78 4410 gen_helper_neon_acge_f32(tmp, tmp, tmp2);
9ee6e8bb 4411 else
dd8fbd78 4412 gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
2c0262af 4413 break;
9ee6e8bb
PB
4414 case 30: /* Float min/max. */
4415 if (size == 0)
dd8fbd78 4416 gen_helper_neon_max_f32(tmp, tmp, tmp2);
9ee6e8bb 4417 else
dd8fbd78 4418 gen_helper_neon_min_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4419 break;
4420 case 31:
4421 if (size == 0)
dd8fbd78 4422 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 4423 else
dd8fbd78 4424 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 4425 break;
9ee6e8bb
PB
4426 default:
4427 abort();
2c0262af 4428 }
dd8fbd78
FN
4429 dead_tmp(tmp2);
4430
9ee6e8bb
PB
4431 /* Save the result. For elementwise operations we can put it
4432 straight into the destination register. For pairwise operations
4433 we have to be careful to avoid clobbering the source operands. */
4434 if (pairwise && rd == rm) {
dd8fbd78 4435 neon_store_scratch(pass, tmp);
9ee6e8bb 4436 } else {
dd8fbd78 4437 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4438 }
4439
4440 } /* for pass */
4441 if (pairwise && rd == rm) {
4442 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
4443 tmp = neon_load_scratch(pass);
4444 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4445 }
4446 }
ad69471c 4447 /* End of 3 register same size operations. */
9ee6e8bb
PB
4448 } else if (insn & (1 << 4)) {
4449 if ((insn & 0x00380080) != 0) {
4450 /* Two registers and shift. */
4451 op = (insn >> 8) & 0xf;
4452 if (insn & (1 << 7)) {
4453 /* 64-bit shift. */
4454 size = 3;
4455 } else {
4456 size = 2;
4457 while ((insn & (1 << (size + 19))) == 0)
4458 size--;
4459 }
4460 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4461 /* To avoid excessive dumplication of ops we implement shift
4462 by immediate using the variable shift operations. */
4463 if (op < 8) {
4464 /* Shift by immediate:
4465 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4466 /* Right shifts are encoded as N - shift, where N is the
4467 element size in bits. */
4468 if (op <= 4)
4469 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4470 if (size == 3) {
4471 count = q + 1;
4472 } else {
4473 count = q ? 4: 2;
4474 }
4475 switch (size) {
4476 case 0:
4477 imm = (uint8_t) shift;
4478 imm |= imm << 8;
4479 imm |= imm << 16;
4480 break;
4481 case 1:
4482 imm = (uint16_t) shift;
4483 imm |= imm << 16;
4484 break;
4485 case 2:
4486 case 3:
4487 imm = shift;
4488 break;
4489 default:
4490 abort();
4491 }
4492
4493 for (pass = 0; pass < count; pass++) {
ad69471c
PB
4494 if (size == 3) {
4495 neon_load_reg64(cpu_V0, rm + pass);
4496 tcg_gen_movi_i64(cpu_V1, imm);
4497 switch (op) {
4498 case 0: /* VSHR */
4499 case 1: /* VSRA */
4500 if (u)
4501 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4502 else
ad69471c 4503 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4504 break;
ad69471c
PB
4505 case 2: /* VRSHR */
4506 case 3: /* VRSRA */
4507 if (u)
4508 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4509 else
ad69471c 4510 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4511 break;
ad69471c
PB
4512 case 4: /* VSRI */
4513 if (!u)
4514 return 1;
4515 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4516 break;
4517 case 5: /* VSHL, VSLI */
4518 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4519 break;
4520 case 6: /* VQSHL */
4521 if (u)
4522 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4523 else
ad69471c
PB
4524 gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4525 break;
4526 case 7: /* VQSHLU */
4527 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4528 break;
9ee6e8bb 4529 }
ad69471c
PB
4530 if (op == 1 || op == 3) {
4531 /* Accumulate. */
4532 neon_load_reg64(cpu_V0, rd + pass);
4533 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4534 } else if (op == 4 || (op == 5 && u)) {
4535 /* Insert */
4536 cpu_abort(env, "VS[LR]I.64 not implemented");
4537 }
4538 neon_store_reg64(cpu_V0, rd + pass);
4539 } else { /* size < 3 */
4540 /* Operands in T0 and T1. */
dd8fbd78
FN
4541 tmp = neon_load_reg(rm, pass);
4542 tmp2 = new_tmp();
4543 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
4544 switch (op) {
4545 case 0: /* VSHR */
4546 case 1: /* VSRA */
4547 GEN_NEON_INTEGER_OP(shl);
4548 break;
4549 case 2: /* VRSHR */
4550 case 3: /* VRSRA */
4551 GEN_NEON_INTEGER_OP(rshl);
4552 break;
4553 case 4: /* VSRI */
4554 if (!u)
4555 return 1;
4556 GEN_NEON_INTEGER_OP(shl);
4557 break;
4558 case 5: /* VSHL, VSLI */
4559 switch (size) {
dd8fbd78
FN
4560 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
4561 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
4562 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
ad69471c
PB
4563 default: return 1;
4564 }
4565 break;
4566 case 6: /* VQSHL */
4567 GEN_NEON_INTEGER_OP_ENV(qshl);
4568 break;
4569 case 7: /* VQSHLU */
4570 switch (size) {
dd8fbd78
FN
4571 case 0: gen_helper_neon_qshl_u8(tmp, cpu_env, tmp, tmp2); break;
4572 case 1: gen_helper_neon_qshl_u16(tmp, cpu_env, tmp, tmp2); break;
4573 case 2: gen_helper_neon_qshl_u32(tmp, cpu_env, tmp, tmp2); break;
ad69471c
PB
4574 default: return 1;
4575 }
4576 break;
4577 }
dd8fbd78 4578 dead_tmp(tmp2);
ad69471c
PB
4579
4580 if (op == 1 || op == 3) {
4581 /* Accumulate. */
dd8fbd78
FN
4582 tmp2 = neon_load_reg(rd, pass);
4583 gen_neon_add(size, tmp2, tmp);
4584 dead_tmp(tmp2);
ad69471c
PB
4585 } else if (op == 4 || (op == 5 && u)) {
4586 /* Insert */
4587 switch (size) {
4588 case 0:
4589 if (op == 4)
4590 imm = 0xff >> -shift;
4591 else
4592 imm = (uint8_t)(0xff << shift);
4593 imm |= imm << 8;
4594 imm |= imm << 16;
4595 break;
4596 case 1:
4597 if (op == 4)
4598 imm = 0xffff >> -shift;
4599 else
4600 imm = (uint16_t)(0xffff << shift);
4601 imm |= imm << 16;
4602 break;
4603 case 2:
4604 if (op == 4)
4605 imm = 0xffffffffu >> -shift;
4606 else
4607 imm = 0xffffffffu << shift;
4608 break;
4609 default:
4610 abort();
4611 }
dd8fbd78
FN
4612 tmp2 = neon_load_reg(rd, pass);
4613 tcg_gen_andi_i32(tmp, tmp, imm);
4614 tcg_gen_andi_i32(tmp2, tmp2, ~imm);
4615 tcg_gen_or_i32(tmp, tmp, tmp2);
4616 dead_tmp(tmp2);
ad69471c 4617 }
dd8fbd78 4618 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4619 }
4620 } /* for pass */
4621 } else if (op < 10) {
ad69471c 4622 /* Shift by immediate and narrow:
9ee6e8bb
PB
4623 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4624 shift = shift - (1 << (size + 3));
4625 size++;
9ee6e8bb
PB
4626 switch (size) {
4627 case 1:
ad69471c 4628 imm = (uint16_t)shift;
9ee6e8bb 4629 imm |= imm << 16;
ad69471c 4630 tmp2 = tcg_const_i32(imm);
a7812ae4 4631 TCGV_UNUSED_I64(tmp64);
9ee6e8bb
PB
4632 break;
4633 case 2:
ad69471c
PB
4634 imm = (uint32_t)shift;
4635 tmp2 = tcg_const_i32(imm);
a7812ae4 4636 TCGV_UNUSED_I64(tmp64);
4cc633c3 4637 break;
9ee6e8bb 4638 case 3:
a7812ae4
PB
4639 tmp64 = tcg_const_i64(shift);
4640 TCGV_UNUSED(tmp2);
9ee6e8bb
PB
4641 break;
4642 default:
4643 abort();
4644 }
4645
ad69471c
PB
4646 for (pass = 0; pass < 2; pass++) {
4647 if (size == 3) {
4648 neon_load_reg64(cpu_V0, rm + pass);
4649 if (q) {
4650 if (u)
a7812ae4 4651 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4652 else
a7812ae4 4653 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c
PB
4654 } else {
4655 if (u)
a7812ae4 4656 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4657 else
a7812ae4 4658 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c 4659 }
2c0262af 4660 } else {
ad69471c
PB
4661 tmp = neon_load_reg(rm + pass, 0);
4662 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
36aa55dc
PB
4663 tmp3 = neon_load_reg(rm + pass, 1);
4664 gen_neon_shift_narrow(size, tmp3, tmp2, q, u);
4665 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
ad69471c 4666 dead_tmp(tmp);
36aa55dc 4667 dead_tmp(tmp3);
9ee6e8bb 4668 }
ad69471c
PB
4669 tmp = new_tmp();
4670 if (op == 8 && !u) {
4671 gen_neon_narrow(size - 1, tmp, cpu_V0);
9ee6e8bb 4672 } else {
ad69471c
PB
4673 if (op == 8)
4674 gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
9ee6e8bb 4675 else
ad69471c
PB
4676 gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
4677 }
4678 if (pass == 0) {
4679 tmp2 = tmp;
4680 } else {
4681 neon_store_reg(rd, 0, tmp2);
4682 neon_store_reg(rd, 1, tmp);
9ee6e8bb
PB
4683 }
4684 } /* for pass */
4685 } else if (op == 10) {
4686 /* VSHLL */
ad69471c 4687 if (q || size == 3)
9ee6e8bb 4688 return 1;
ad69471c
PB
4689 tmp = neon_load_reg(rm, 0);
4690 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 4691 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4692 if (pass == 1)
4693 tmp = tmp2;
4694
4695 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 4696
9ee6e8bb
PB
4697 if (shift != 0) {
4698 /* The shift is less than the width of the source
ad69471c
PB
4699 type, so we can just shift the whole register. */
4700 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4701 if (size < 2 || !u) {
4702 uint64_t imm64;
4703 if (size == 0) {
4704 imm = (0xffu >> (8 - shift));
4705 imm |= imm << 16;
4706 } else {
4707 imm = 0xffff >> (16 - shift);
9ee6e8bb 4708 }
ad69471c
PB
4709 imm64 = imm | (((uint64_t)imm) << 32);
4710 tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
9ee6e8bb
PB
4711 }
4712 }
ad69471c 4713 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4714 }
4715 } else if (op == 15 || op == 16) {
4716 /* VCVT fixed-point. */
4717 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 4718 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
9ee6e8bb
PB
4719 if (op & 1) {
4720 if (u)
4373f3ce 4721 gen_vfp_ulto(0, shift);
9ee6e8bb 4722 else
4373f3ce 4723 gen_vfp_slto(0, shift);
9ee6e8bb
PB
4724 } else {
4725 if (u)
4373f3ce 4726 gen_vfp_toul(0, shift);
9ee6e8bb 4727 else
4373f3ce 4728 gen_vfp_tosl(0, shift);
2c0262af 4729 }
4373f3ce 4730 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
4731 }
4732 } else {
9ee6e8bb
PB
4733 return 1;
4734 }
4735 } else { /* (insn & 0x00380080) == 0 */
4736 int invert;
4737
4738 op = (insn >> 8) & 0xf;
4739 /* One register and immediate. */
4740 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4741 invert = (insn & (1 << 5)) != 0;
4742 switch (op) {
4743 case 0: case 1:
4744 /* no-op */
4745 break;
4746 case 2: case 3:
4747 imm <<= 8;
4748 break;
4749 case 4: case 5:
4750 imm <<= 16;
4751 break;
4752 case 6: case 7:
4753 imm <<= 24;
4754 break;
4755 case 8: case 9:
4756 imm |= imm << 16;
4757 break;
4758 case 10: case 11:
4759 imm = (imm << 8) | (imm << 24);
4760 break;
4761 case 12:
4762 imm = (imm < 8) | 0xff;
4763 break;
4764 case 13:
4765 imm = (imm << 16) | 0xffff;
4766 break;
4767 case 14:
4768 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4769 if (invert)
4770 imm = ~imm;
4771 break;
4772 case 15:
4773 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4774 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4775 break;
4776 }
4777 if (invert)
4778 imm = ~imm;
4779
9ee6e8bb
PB
4780 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4781 if (op & 1 && op < 12) {
ad69471c 4782 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
4783 if (invert) {
4784 /* The immediate value has already been inverted, so
4785 BIC becomes AND. */
ad69471c 4786 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 4787 } else {
ad69471c 4788 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 4789 }
9ee6e8bb 4790 } else {
ad69471c
PB
4791 /* VMOV, VMVN. */
4792 tmp = new_tmp();
9ee6e8bb 4793 if (op == 14 && invert) {
ad69471c
PB
4794 uint32_t val;
4795 val = 0;
9ee6e8bb
PB
4796 for (n = 0; n < 4; n++) {
4797 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 4798 val |= 0xff << (n * 8);
9ee6e8bb 4799 }
ad69471c
PB
4800 tcg_gen_movi_i32(tmp, val);
4801 } else {
4802 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 4803 }
9ee6e8bb 4804 }
ad69471c 4805 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4806 }
4807 }
e4b3861d 4808 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
4809 if (size != 3) {
4810 op = (insn >> 8) & 0xf;
4811 if ((insn & (1 << 6)) == 0) {
4812 /* Three registers of different lengths. */
4813 int src1_wide;
4814 int src2_wide;
4815 int prewiden;
4816 /* prewiden, src1_wide, src2_wide */
4817 static const int neon_3reg_wide[16][3] = {
4818 {1, 0, 0}, /* VADDL */
4819 {1, 1, 0}, /* VADDW */
4820 {1, 0, 0}, /* VSUBL */
4821 {1, 1, 0}, /* VSUBW */
4822 {0, 1, 1}, /* VADDHN */
4823 {0, 0, 0}, /* VABAL */
4824 {0, 1, 1}, /* VSUBHN */
4825 {0, 0, 0}, /* VABDL */
4826 {0, 0, 0}, /* VMLAL */
4827 {0, 0, 0}, /* VQDMLAL */
4828 {0, 0, 0}, /* VMLSL */
4829 {0, 0, 0}, /* VQDMLSL */
4830 {0, 0, 0}, /* Integer VMULL */
4831 {0, 0, 0}, /* VQDMULL */
4832 {0, 0, 0} /* Polynomial VMULL */
4833 };
4834
4835 prewiden = neon_3reg_wide[op][0];
4836 src1_wide = neon_3reg_wide[op][1];
4837 src2_wide = neon_3reg_wide[op][2];
4838
ad69471c
PB
4839 if (size == 0 && (op == 9 || op == 11 || op == 13))
4840 return 1;
4841
9ee6e8bb
PB
4842 /* Avoid overlapping operands. Wide source operands are
4843 always aligned so will never overlap with wide
4844 destinations in problematic ways. */
8f8e3aa4 4845 if (rd == rm && !src2_wide) {
dd8fbd78
FN
4846 tmp = neon_load_reg(rm, 1);
4847 neon_store_scratch(2, tmp);
8f8e3aa4 4848 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
4849 tmp = neon_load_reg(rn, 1);
4850 neon_store_scratch(2, tmp);
9ee6e8bb 4851 }
a50f5b91 4852 TCGV_UNUSED(tmp3);
9ee6e8bb 4853 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4854 if (src1_wide) {
4855 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 4856 TCGV_UNUSED(tmp);
9ee6e8bb 4857 } else {
ad69471c 4858 if (pass == 1 && rd == rn) {
dd8fbd78 4859 tmp = neon_load_scratch(2);
9ee6e8bb 4860 } else {
ad69471c
PB
4861 tmp = neon_load_reg(rn, pass);
4862 }
4863 if (prewiden) {
4864 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
4865 }
4866 }
ad69471c
PB
4867 if (src2_wide) {
4868 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 4869 TCGV_UNUSED(tmp2);
9ee6e8bb 4870 } else {
ad69471c 4871 if (pass == 1 && rd == rm) {
dd8fbd78 4872 tmp2 = neon_load_scratch(2);
9ee6e8bb 4873 } else {
ad69471c
PB
4874 tmp2 = neon_load_reg(rm, pass);
4875 }
4876 if (prewiden) {
4877 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 4878 }
9ee6e8bb
PB
4879 }
4880 switch (op) {
4881 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 4882 gen_neon_addl(size);
9ee6e8bb
PB
4883 break;
4884 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
ad69471c 4885 gen_neon_subl(size);
9ee6e8bb
PB
4886 break;
4887 case 5: case 7: /* VABAL, VABDL */
4888 switch ((size << 1) | u) {
ad69471c
PB
4889 case 0:
4890 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
4891 break;
4892 case 1:
4893 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
4894 break;
4895 case 2:
4896 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
4897 break;
4898 case 3:
4899 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
4900 break;
4901 case 4:
4902 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
4903 break;
4904 case 5:
4905 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
4906 break;
9ee6e8bb
PB
4907 default: abort();
4908 }
ad69471c
PB
4909 dead_tmp(tmp2);
4910 dead_tmp(tmp);
9ee6e8bb
PB
4911 break;
4912 case 8: case 9: case 10: case 11: case 12: case 13:
4913 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 4914 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
dd8fbd78
FN
4915 dead_tmp(tmp2);
4916 dead_tmp(tmp);
9ee6e8bb
PB
4917 break;
4918 case 14: /* Polynomial VMULL */
4919 cpu_abort(env, "Polynomial VMULL not implemented");
4920
4921 default: /* 15 is RESERVED. */
4922 return 1;
4923 }
4924 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
4925 /* Accumulate. */
4926 if (op == 10 || op == 11) {
ad69471c 4927 gen_neon_negl(cpu_V0, size);
9ee6e8bb
PB
4928 }
4929
9ee6e8bb 4930 if (op != 13) {
ad69471c 4931 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb
PB
4932 }
4933
4934 switch (op) {
4935 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
ad69471c 4936 gen_neon_addl(size);
9ee6e8bb
PB
4937 break;
4938 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c
PB
4939 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4940 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
4941 break;
9ee6e8bb
PB
4942 /* Fall through. */
4943 case 13: /* VQDMULL */
ad69471c 4944 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
4945 break;
4946 default:
4947 abort();
4948 }
ad69471c 4949 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4950 } else if (op == 4 || op == 6) {
4951 /* Narrowing operation. */
ad69471c 4952 tmp = new_tmp();
9ee6e8bb
PB
4953 if (u) {
4954 switch (size) {
ad69471c
PB
4955 case 0:
4956 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
4957 break;
4958 case 1:
4959 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
4960 break;
4961 case 2:
4962 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4963 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
4964 break;
9ee6e8bb
PB
4965 default: abort();
4966 }
4967 } else {
4968 switch (size) {
ad69471c
PB
4969 case 0:
4970 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
4971 break;
4972 case 1:
4973 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
4974 break;
4975 case 2:
4976 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
4977 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4978 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
4979 break;
9ee6e8bb
PB
4980 default: abort();
4981 }
4982 }
ad69471c
PB
4983 if (pass == 0) {
4984 tmp3 = tmp;
4985 } else {
4986 neon_store_reg(rd, 0, tmp3);
4987 neon_store_reg(rd, 1, tmp);
4988 }
9ee6e8bb
PB
4989 } else {
4990 /* Write back the result. */
ad69471c 4991 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4992 }
4993 }
4994 } else {
4995 /* Two registers and a scalar. */
4996 switch (op) {
4997 case 0: /* Integer VMLA scalar */
4998 case 1: /* Float VMLA scalar */
4999 case 4: /* Integer VMLS scalar */
5000 case 5: /* Floating point VMLS scalar */
5001 case 8: /* Integer VMUL scalar */
5002 case 9: /* Floating point VMUL scalar */
5003 case 12: /* VQDMULH scalar */
5004 case 13: /* VQRDMULH scalar */
dd8fbd78
FN
5005 tmp = neon_get_scalar(size, rm);
5006 neon_store_scratch(0, tmp);
9ee6e8bb 5007 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5008 tmp = neon_load_scratch(0);
5009 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5010 if (op == 12) {
5011 if (size == 1) {
dd8fbd78 5012 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5013 } else {
dd8fbd78 5014 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5015 }
5016 } else if (op == 13) {
5017 if (size == 1) {
dd8fbd78 5018 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5019 } else {
dd8fbd78 5020 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5021 }
5022 } else if (op & 1) {
dd8fbd78 5023 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5024 } else {
5025 switch (size) {
dd8fbd78
FN
5026 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5027 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5028 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5029 default: return 1;
5030 }
5031 }
dd8fbd78 5032 dead_tmp(tmp2);
9ee6e8bb
PB
5033 if (op < 8) {
5034 /* Accumulate. */
dd8fbd78 5035 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5036 switch (op) {
5037 case 0:
dd8fbd78 5038 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5039 break;
5040 case 1:
dd8fbd78 5041 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5042 break;
5043 case 4:
dd8fbd78 5044 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5045 break;
5046 case 5:
dd8fbd78 5047 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
5048 break;
5049 default:
5050 abort();
5051 }
dd8fbd78 5052 dead_tmp(tmp2);
9ee6e8bb 5053 }
dd8fbd78 5054 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5055 }
5056 break;
5057 case 2: /* VMLAL sclar */
5058 case 3: /* VQDMLAL scalar */
5059 case 6: /* VMLSL scalar */
5060 case 7: /* VQDMLSL scalar */
5061 case 10: /* VMULL scalar */
5062 case 11: /* VQDMULL scalar */
ad69471c
PB
5063 if (size == 0 && (op == 3 || op == 7 || op == 11))
5064 return 1;
5065
dd8fbd78
FN
5066 tmp2 = neon_get_scalar(size, rm);
5067 tmp3 = neon_load_reg(rn, 1);
ad69471c 5068
9ee6e8bb 5069 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5070 if (pass == 0) {
5071 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5072 } else {
dd8fbd78 5073 tmp = tmp3;
9ee6e8bb 5074 }
ad69471c 5075 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
dd8fbd78 5076 dead_tmp(tmp);
9ee6e8bb 5077 if (op == 6 || op == 7) {
ad69471c
PB
5078 gen_neon_negl(cpu_V0, size);
5079 }
5080 if (op != 11) {
5081 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5082 }
9ee6e8bb
PB
5083 switch (op) {
5084 case 2: case 6:
ad69471c 5085 gen_neon_addl(size);
9ee6e8bb
PB
5086 break;
5087 case 3: case 7:
ad69471c
PB
5088 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5089 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5090 break;
5091 case 10:
5092 /* no-op */
5093 break;
5094 case 11:
ad69471c 5095 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5096 break;
5097 default:
5098 abort();
5099 }
ad69471c 5100 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5101 }
dd8fbd78
FN
5102
5103 dead_tmp(tmp2);
5104
9ee6e8bb
PB
5105 break;
5106 default: /* 14 and 15 are RESERVED */
5107 return 1;
5108 }
5109 }
5110 } else { /* size == 3 */
5111 if (!u) {
5112 /* Extract. */
9ee6e8bb 5113 imm = (insn >> 8) & 0xf;
ad69471c
PB
5114 count = q + 1;
5115
5116 if (imm > 7 && !q)
5117 return 1;
5118
5119 if (imm == 0) {
5120 neon_load_reg64(cpu_V0, rn);
5121 if (q) {
5122 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5123 }
ad69471c
PB
5124 } else if (imm == 8) {
5125 neon_load_reg64(cpu_V0, rn + 1);
5126 if (q) {
5127 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5128 }
ad69471c 5129 } else if (q) {
a7812ae4 5130 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5131 if (imm < 8) {
5132 neon_load_reg64(cpu_V0, rn);
a7812ae4 5133 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5134 } else {
5135 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5136 neon_load_reg64(tmp64, rm);
ad69471c
PB
5137 }
5138 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5139 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5140 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5141 if (imm < 8) {
5142 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5143 } else {
ad69471c
PB
5144 neon_load_reg64(cpu_V1, rm + 1);
5145 imm -= 8;
9ee6e8bb 5146 }
ad69471c 5147 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5148 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5149 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
ad69471c 5150 } else {
a7812ae4 5151 /* BUGFIX */
ad69471c 5152 neon_load_reg64(cpu_V0, rn);
a7812ae4 5153 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5154 neon_load_reg64(cpu_V1, rm);
a7812ae4 5155 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5156 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5157 }
5158 neon_store_reg64(cpu_V0, rd);
5159 if (q) {
5160 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5161 }
5162 } else if ((insn & (1 << 11)) == 0) {
5163 /* Two register misc. */
5164 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5165 size = (insn >> 18) & 3;
5166 switch (op) {
5167 case 0: /* VREV64 */
5168 if (size == 3)
5169 return 1;
5170 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5171 tmp = neon_load_reg(rm, pass * 2);
5172 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5173 switch (size) {
dd8fbd78
FN
5174 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5175 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5176 case 2: /* no-op */ break;
5177 default: abort();
5178 }
dd8fbd78 5179 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5180 if (size == 2) {
dd8fbd78 5181 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5182 } else {
9ee6e8bb 5183 switch (size) {
dd8fbd78
FN
5184 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5185 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5186 default: abort();
5187 }
dd8fbd78 5188 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5189 }
5190 }
5191 break;
5192 case 4: case 5: /* VPADDL */
5193 case 12: case 13: /* VPADAL */
9ee6e8bb
PB
5194 if (size == 3)
5195 return 1;
ad69471c
PB
5196 for (pass = 0; pass < q + 1; pass++) {
5197 tmp = neon_load_reg(rm, pass * 2);
5198 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5199 tmp = neon_load_reg(rm, pass * 2 + 1);
5200 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5201 switch (size) {
5202 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5203 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5204 case 2: tcg_gen_add_i64(CPU_V001); break;
5205 default: abort();
5206 }
9ee6e8bb
PB
5207 if (op >= 12) {
5208 /* Accumulate. */
ad69471c
PB
5209 neon_load_reg64(cpu_V1, rd + pass);
5210 gen_neon_addl(size);
9ee6e8bb 5211 }
ad69471c 5212 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5213 }
5214 break;
5215 case 33: /* VTRN */
5216 if (size == 2) {
5217 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
5218 tmp = neon_load_reg(rm, n);
5219 tmp2 = neon_load_reg(rd, n + 1);
5220 neon_store_reg(rm, n, tmp2);
5221 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
5222 }
5223 } else {
5224 goto elementwise;
5225 }
5226 break;
5227 case 34: /* VUZP */
5228 /* Reg Before After
5229 Rd A3 A2 A1 A0 B2 B0 A2 A0
5230 Rm B3 B2 B1 B0 B3 B1 A3 A1
5231 */
5232 if (size == 3)
5233 return 1;
5234 gen_neon_unzip(rd, q, 0, size);
5235 gen_neon_unzip(rm, q, 4, size);
5236 if (q) {
5237 static int unzip_order_q[8] =
5238 {0, 2, 4, 6, 1, 3, 5, 7};
5239 for (n = 0; n < 8; n++) {
5240 int reg = (n < 4) ? rd : rm;
dd8fbd78
FN
5241 tmp = neon_load_scratch(unzip_order_q[n]);
5242 neon_store_reg(reg, n % 4, tmp);
9ee6e8bb
PB
5243 }
5244 } else {
5245 static int unzip_order[4] =
5246 {0, 4, 1, 5};
5247 for (n = 0; n < 4; n++) {
5248 int reg = (n < 2) ? rd : rm;
dd8fbd78
FN
5249 tmp = neon_load_scratch(unzip_order[n]);
5250 neon_store_reg(reg, n % 2, tmp);
9ee6e8bb
PB
5251 }
5252 }
5253 break;
5254 case 35: /* VZIP */
5255 /* Reg Before After
5256 Rd A3 A2 A1 A0 B1 A1 B0 A0
5257 Rm B3 B2 B1 B0 B3 A3 B2 A2
5258 */
5259 if (size == 3)
5260 return 1;
5261 count = (q ? 4 : 2);
5262 for (n = 0; n < count; n++) {
dd8fbd78
FN
5263 tmp = neon_load_reg(rd, n);
5264 tmp2 = neon_load_reg(rd, n);
9ee6e8bb 5265 switch (size) {
dd8fbd78
FN
5266 case 0: gen_neon_zip_u8(tmp, tmp2); break;
5267 case 1: gen_neon_zip_u16(tmp, tmp2); break;
9ee6e8bb
PB
5268 case 2: /* no-op */; break;
5269 default: abort();
5270 }
dd8fbd78
FN
5271 neon_store_scratch(n * 2, tmp);
5272 neon_store_scratch(n * 2 + 1, tmp2);
9ee6e8bb
PB
5273 }
5274 for (n = 0; n < count * 2; n++) {
5275 int reg = (n < count) ? rd : rm;
dd8fbd78
FN
5276 tmp = neon_load_scratch(n);
5277 neon_store_reg(reg, n % count, tmp);
9ee6e8bb
PB
5278 }
5279 break;
5280 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
ad69471c
PB
5281 if (size == 3)
5282 return 1;
a50f5b91 5283 TCGV_UNUSED(tmp2);
9ee6e8bb 5284 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5285 neon_load_reg64(cpu_V0, rm + pass);
5286 tmp = new_tmp();
9ee6e8bb 5287 if (op == 36 && q == 0) {
ad69471c 5288 gen_neon_narrow(size, tmp, cpu_V0);
9ee6e8bb 5289 } else if (q) {
ad69471c 5290 gen_neon_narrow_satu(size, tmp, cpu_V0);
9ee6e8bb 5291 } else {
ad69471c
PB
5292 gen_neon_narrow_sats(size, tmp, cpu_V0);
5293 }
5294 if (pass == 0) {
5295 tmp2 = tmp;
5296 } else {
5297 neon_store_reg(rd, 0, tmp2);
5298 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5299 }
9ee6e8bb
PB
5300 }
5301 break;
5302 case 38: /* VSHLL */
ad69471c 5303 if (q || size == 3)
9ee6e8bb 5304 return 1;
ad69471c
PB
5305 tmp = neon_load_reg(rm, 0);
5306 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5307 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5308 if (pass == 1)
5309 tmp = tmp2;
5310 gen_neon_widen(cpu_V0, tmp, size, 1);
5311 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5312 }
5313 break;
5314 default:
5315 elementwise:
5316 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5317 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5318 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5319 neon_reg_offset(rm, pass));
dd8fbd78 5320 TCGV_UNUSED(tmp);
9ee6e8bb 5321 } else {
dd8fbd78 5322 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
5323 }
5324 switch (op) {
5325 case 1: /* VREV32 */
5326 switch (size) {
dd8fbd78
FN
5327 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5328 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5329 default: return 1;
5330 }
5331 break;
5332 case 2: /* VREV16 */
5333 if (size != 0)
5334 return 1;
dd8fbd78 5335 gen_rev16(tmp);
9ee6e8bb 5336 break;
9ee6e8bb
PB
5337 case 8: /* CLS */
5338 switch (size) {
dd8fbd78
FN
5339 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5340 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5341 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
9ee6e8bb
PB
5342 default: return 1;
5343 }
5344 break;
5345 case 9: /* CLZ */
5346 switch (size) {
dd8fbd78
FN
5347 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5348 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5349 case 2: gen_helper_clz(tmp, tmp); break;
9ee6e8bb
PB
5350 default: return 1;
5351 }
5352 break;
5353 case 10: /* CNT */
5354 if (size != 0)
5355 return 1;
dd8fbd78 5356 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb
PB
5357 break;
5358 case 11: /* VNOT */
5359 if (size != 0)
5360 return 1;
dd8fbd78 5361 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5362 break;
5363 case 14: /* VQABS */
5364 switch (size) {
dd8fbd78
FN
5365 case 0: gen_helper_neon_qabs_s8(tmp, cpu_env, tmp); break;
5366 case 1: gen_helper_neon_qabs_s16(tmp, cpu_env, tmp); break;
5367 case 2: gen_helper_neon_qabs_s32(tmp, cpu_env, tmp); break;
9ee6e8bb
PB
5368 default: return 1;
5369 }
5370 break;
5371 case 15: /* VQNEG */
5372 switch (size) {
dd8fbd78
FN
5373 case 0: gen_helper_neon_qneg_s8(tmp, cpu_env, tmp); break;
5374 case 1: gen_helper_neon_qneg_s16(tmp, cpu_env, tmp); break;
5375 case 2: gen_helper_neon_qneg_s32(tmp, cpu_env, tmp); break;
9ee6e8bb
PB
5376 default: return 1;
5377 }
5378 break;
5379 case 16: case 19: /* VCGT #0, VCLE #0 */
dd8fbd78 5380 tmp2 = tcg_const_i32(0);
9ee6e8bb 5381 switch(size) {
dd8fbd78
FN
5382 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5383 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5384 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5385 default: return 1;
5386 }
dd8fbd78 5387 tcg_temp_free(tmp2);
9ee6e8bb 5388 if (op == 19)
dd8fbd78 5389 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5390 break;
5391 case 17: case 20: /* VCGE #0, VCLT #0 */
dd8fbd78 5392 tmp2 = tcg_const_i32(0);
9ee6e8bb 5393 switch(size) {
dd8fbd78
FN
5394 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5395 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5396 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5397 default: return 1;
5398 }
dd8fbd78 5399 tcg_temp_free(tmp2);
9ee6e8bb 5400 if (op == 20)
dd8fbd78 5401 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5402 break;
5403 case 18: /* VCEQ #0 */
dd8fbd78 5404 tmp2 = tcg_const_i32(0);
9ee6e8bb 5405 switch(size) {
dd8fbd78
FN
5406 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5407 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5408 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5409 default: return 1;
5410 }
dd8fbd78 5411 tcg_temp_free(tmp2);
9ee6e8bb
PB
5412 break;
5413 case 22: /* VABS */
5414 switch(size) {
dd8fbd78
FN
5415 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
5416 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
5417 case 2: tcg_gen_abs_i32(tmp, tmp); break;
9ee6e8bb
PB
5418 default: return 1;
5419 }
5420 break;
5421 case 23: /* VNEG */
ad69471c
PB
5422 if (size == 3)
5423 return 1;
dd8fbd78
FN
5424 tmp2 = tcg_const_i32(0);
5425 gen_neon_rsb(size, tmp, tmp2);
5426 tcg_temp_free(tmp2);
9ee6e8bb
PB
5427 break;
5428 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
dd8fbd78
FN
5429 tmp2 = tcg_const_i32(0);
5430 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
5431 tcg_temp_free(tmp2);
9ee6e8bb 5432 if (op == 27)
dd8fbd78 5433 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5434 break;
5435 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
dd8fbd78
FN
5436 tmp2 = tcg_const_i32(0);
5437 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
5438 tcg_temp_free(tmp2);
9ee6e8bb 5439 if (op == 28)
dd8fbd78 5440 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5441 break;
5442 case 26: /* Float VCEQ #0 */
dd8fbd78
FN
5443 tmp2 = tcg_const_i32(0);
5444 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
5445 tcg_temp_free(tmp2);
9ee6e8bb
PB
5446 break;
5447 case 30: /* Float VABS */
4373f3ce 5448 gen_vfp_abs(0);
9ee6e8bb
PB
5449 break;
5450 case 31: /* Float VNEG */
4373f3ce 5451 gen_vfp_neg(0);
9ee6e8bb
PB
5452 break;
5453 case 32: /* VSWP */
dd8fbd78
FN
5454 tmp2 = neon_load_reg(rd, pass);
5455 neon_store_reg(rm, pass, tmp2);
9ee6e8bb
PB
5456 break;
5457 case 33: /* VTRN */
dd8fbd78 5458 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5459 switch (size) {
dd8fbd78
FN
5460 case 0: gen_neon_trn_u8(tmp, tmp2); break;
5461 case 1: gen_neon_trn_u16(tmp, tmp2); break;
9ee6e8bb
PB
5462 case 2: abort();
5463 default: return 1;
5464 }
dd8fbd78 5465 neon_store_reg(rm, pass, tmp2);
9ee6e8bb
PB
5466 break;
5467 case 56: /* Integer VRECPE */
dd8fbd78 5468 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb
PB
5469 break;
5470 case 57: /* Integer VRSQRTE */
dd8fbd78 5471 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb
PB
5472 break;
5473 case 58: /* Float VRECPE */
4373f3ce 5474 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5475 break;
5476 case 59: /* Float VRSQRTE */
4373f3ce 5477 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5478 break;
5479 case 60: /* VCVT.F32.S32 */
4373f3ce 5480 gen_vfp_tosiz(0);
9ee6e8bb
PB
5481 break;
5482 case 61: /* VCVT.F32.U32 */
4373f3ce 5483 gen_vfp_touiz(0);
9ee6e8bb
PB
5484 break;
5485 case 62: /* VCVT.S32.F32 */
4373f3ce 5486 gen_vfp_sito(0);
9ee6e8bb
PB
5487 break;
5488 case 63: /* VCVT.U32.F32 */
4373f3ce 5489 gen_vfp_uito(0);
9ee6e8bb
PB
5490 break;
5491 default:
5492 /* Reserved: 21, 29, 39-56 */
5493 return 1;
5494 }
5495 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5496 tcg_gen_st_f32(cpu_F0s, cpu_env,
5497 neon_reg_offset(rd, pass));
9ee6e8bb 5498 } else {
dd8fbd78 5499 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5500 }
5501 }
5502 break;
5503 }
5504 } else if ((insn & (1 << 10)) == 0) {
5505 /* VTBL, VTBX. */
3018f259 5506 n = ((insn >> 5) & 0x18) + 8;
9ee6e8bb 5507 if (insn & (1 << 6)) {
8f8e3aa4 5508 tmp = neon_load_reg(rd, 0);
9ee6e8bb 5509 } else {
8f8e3aa4
PB
5510 tmp = new_tmp();
5511 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5512 }
8f8e3aa4
PB
5513 tmp2 = neon_load_reg(rm, 0);
5514 gen_helper_neon_tbl(tmp2, tmp2, tmp, tcg_const_i32(rn),
5515 tcg_const_i32(n));
3018f259 5516 dead_tmp(tmp);
9ee6e8bb 5517 if (insn & (1 << 6)) {
8f8e3aa4 5518 tmp = neon_load_reg(rd, 1);
9ee6e8bb 5519 } else {
8f8e3aa4
PB
5520 tmp = new_tmp();
5521 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5522 }
8f8e3aa4
PB
5523 tmp3 = neon_load_reg(rm, 1);
5524 gen_helper_neon_tbl(tmp3, tmp3, tmp, tcg_const_i32(rn),
5525 tcg_const_i32(n));
5526 neon_store_reg(rd, 0, tmp2);
3018f259
PB
5527 neon_store_reg(rd, 1, tmp3);
5528 dead_tmp(tmp);
9ee6e8bb
PB
5529 } else if ((insn & 0x380) == 0) {
5530 /* VDUP */
5531 if (insn & (1 << 19)) {
dd8fbd78 5532 tmp = neon_load_reg(rm, 1);
9ee6e8bb 5533 } else {
dd8fbd78 5534 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
5535 }
5536 if (insn & (1 << 16)) {
dd8fbd78 5537 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
5538 } else if (insn & (1 << 17)) {
5539 if ((insn >> 18) & 1)
dd8fbd78 5540 gen_neon_dup_high16(tmp);
9ee6e8bb 5541 else
dd8fbd78 5542 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
5543 }
5544 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5545 tmp2 = new_tmp();
5546 tcg_gen_mov_i32(tmp2, tmp);
5547 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 5548 }
dd8fbd78 5549 dead_tmp(tmp);
9ee6e8bb
PB
5550 } else {
5551 return 1;
5552 }
5553 }
5554 }
5555 return 0;
5556}
5557
fe1479c3
PB
5558static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5559{
5560 int crn = (insn >> 16) & 0xf;
5561 int crm = insn & 0xf;
5562 int op1 = (insn >> 21) & 7;
5563 int op2 = (insn >> 5) & 7;
5564 int rt = (insn >> 12) & 0xf;
5565 TCGv tmp;
5566
5567 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5568 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5569 /* TEECR */
5570 if (IS_USER(s))
5571 return 1;
5572 tmp = load_cpu_field(teecr);
5573 store_reg(s, rt, tmp);
5574 return 0;
5575 }
5576 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5577 /* TEEHBR */
5578 if (IS_USER(s) && (env->teecr & 1))
5579 return 1;
5580 tmp = load_cpu_field(teehbr);
5581 store_reg(s, rt, tmp);
5582 return 0;
5583 }
5584 }
5585 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5586 op1, crn, crm, op2);
5587 return 1;
5588}
5589
5590static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5591{
5592 int crn = (insn >> 16) & 0xf;
5593 int crm = insn & 0xf;
5594 int op1 = (insn >> 21) & 7;
5595 int op2 = (insn >> 5) & 7;
5596 int rt = (insn >> 12) & 0xf;
5597 TCGv tmp;
5598
5599 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5600 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5601 /* TEECR */
5602 if (IS_USER(s))
5603 return 1;
5604 tmp = load_reg(s, rt);
5605 gen_helper_set_teecr(cpu_env, tmp);
5606 dead_tmp(tmp);
5607 return 0;
5608 }
5609 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5610 /* TEEHBR */
5611 if (IS_USER(s) && (env->teecr & 1))
5612 return 1;
5613 tmp = load_reg(s, rt);
5614 store_cpu_field(tmp, teehbr);
5615 return 0;
5616 }
5617 }
5618 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5619 op1, crn, crm, op2);
5620 return 1;
5621}
5622
9ee6e8bb
PB
5623static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5624{
5625 int cpnum;
5626
5627 cpnum = (insn >> 8) & 0xf;
5628 if (arm_feature(env, ARM_FEATURE_XSCALE)
5629 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5630 return 1;
5631
5632 switch (cpnum) {
5633 case 0:
5634 case 1:
5635 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5636 return disas_iwmmxt_insn(env, s, insn);
5637 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5638 return disas_dsp_insn(env, s, insn);
5639 }
5640 return 1;
5641 case 10:
5642 case 11:
5643 return disas_vfp_insn (env, s, insn);
fe1479c3
PB
5644 case 14:
5645 /* Coprocessors 7-15 are architecturally reserved by ARM.
5646 Unfortunately Intel decided to ignore this. */
5647 if (arm_feature(env, ARM_FEATURE_XSCALE))
5648 goto board;
5649 if (insn & (1 << 20))
5650 return disas_cp14_read(env, s, insn);
5651 else
5652 return disas_cp14_write(env, s, insn);
9ee6e8bb
PB
5653 case 15:
5654 return disas_cp15_insn (env, s, insn);
5655 default:
fe1479c3 5656 board:
9ee6e8bb
PB
5657 /* Unknown coprocessor. See if the board has hooked it. */
5658 return disas_cp_insn (env, s, insn);
5659 }
5660}
5661
5e3f878a
PB
5662
5663/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 5664static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
5665{
5666 TCGv tmp;
5667 tmp = new_tmp();
5668 tcg_gen_trunc_i64_i32(tmp, val);
5669 store_reg(s, rlow, tmp);
5670 tmp = new_tmp();
5671 tcg_gen_shri_i64(val, val, 32);
5672 tcg_gen_trunc_i64_i32(tmp, val);
5673 store_reg(s, rhigh, tmp);
5674}
5675
5676/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 5677static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 5678{
a7812ae4 5679 TCGv_i64 tmp;
5e3f878a
PB
5680 TCGv tmp2;
5681
36aa55dc 5682 /* Load value and extend to 64 bits. */
a7812ae4 5683 tmp = tcg_temp_new_i64();
5e3f878a
PB
5684 tmp2 = load_reg(s, rlow);
5685 tcg_gen_extu_i32_i64(tmp, tmp2);
5686 dead_tmp(tmp2);
5687 tcg_gen_add_i64(val, val, tmp);
5688}
5689
5690/* load and add a 64-bit value from a register pair. */
a7812ae4 5691static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 5692{
a7812ae4 5693 TCGv_i64 tmp;
36aa55dc
PB
5694 TCGv tmpl;
5695 TCGv tmph;
5e3f878a
PB
5696
5697 /* Load 64-bit value rd:rn. */
36aa55dc
PB
5698 tmpl = load_reg(s, rlow);
5699 tmph = load_reg(s, rhigh);
a7812ae4 5700 tmp = tcg_temp_new_i64();
36aa55dc
PB
5701 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5702 dead_tmp(tmpl);
5703 dead_tmp(tmph);
5e3f878a
PB
5704 tcg_gen_add_i64(val, val, tmp);
5705}
5706
5707/* Set N and Z flags from a 64-bit value. */
a7812ae4 5708static void gen_logicq_cc(TCGv_i64 val)
5e3f878a
PB
5709{
5710 TCGv tmp = new_tmp();
5711 gen_helper_logicq_cc(tmp, val);
6fbe23d5
PB
5712 gen_logic_CC(tmp);
5713 dead_tmp(tmp);
5e3f878a
PB
5714}
5715
9ee6e8bb
PB
5716static void disas_arm_insn(CPUState * env, DisasContext *s)
5717{
5718 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 5719 TCGv tmp;
3670669c 5720 TCGv tmp2;
6ddbc6e4 5721 TCGv tmp3;
b0109805 5722 TCGv addr;
a7812ae4 5723 TCGv_i64 tmp64;
9ee6e8bb
PB
5724
5725 insn = ldl_code(s->pc);
5726 s->pc += 4;
5727
5728 /* M variants do not implement ARM mode. */
5729 if (IS_M(env))
5730 goto illegal_op;
5731 cond = insn >> 28;
5732 if (cond == 0xf){
5733 /* Unconditional instructions. */
5734 if (((insn >> 25) & 7) == 1) {
5735 /* NEON Data processing. */
5736 if (!arm_feature(env, ARM_FEATURE_NEON))
5737 goto illegal_op;
5738
5739 if (disas_neon_data_insn(env, s, insn))
5740 goto illegal_op;
5741 return;
5742 }
5743 if ((insn & 0x0f100000) == 0x04000000) {
5744 /* NEON load/store. */
5745 if (!arm_feature(env, ARM_FEATURE_NEON))
5746 goto illegal_op;
5747
5748 if (disas_neon_ls_insn(env, s, insn))
5749 goto illegal_op;
5750 return;
5751 }
5752 if ((insn & 0x0d70f000) == 0x0550f000)
5753 return; /* PLD */
5754 else if ((insn & 0x0ffffdff) == 0x01010000) {
5755 ARCH(6);
5756 /* setend */
5757 if (insn & (1 << 9)) {
5758 /* BE8 mode not implemented. */
5759 goto illegal_op;
5760 }
5761 return;
5762 } else if ((insn & 0x0fffff00) == 0x057ff000) {
5763 switch ((insn >> 4) & 0xf) {
5764 case 1: /* clrex */
5765 ARCH(6K);
8f8e3aa4 5766 gen_helper_clrex(cpu_env);
9ee6e8bb
PB
5767 return;
5768 case 4: /* dsb */
5769 case 5: /* dmb */
5770 case 6: /* isb */
5771 ARCH(7);
5772 /* We don't emulate caches so these are a no-op. */
5773 return;
5774 default:
5775 goto illegal_op;
5776 }
5777 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
5778 /* srs */
c67b6b71 5779 int32_t offset;
9ee6e8bb
PB
5780 if (IS_USER(s))
5781 goto illegal_op;
5782 ARCH(6);
5783 op1 = (insn & 0x1f);
5784 if (op1 == (env->uncached_cpsr & CPSR_M)) {
b0109805 5785 addr = load_reg(s, 13);
9ee6e8bb 5786 } else {
b0109805
PB
5787 addr = new_tmp();
5788 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op1));
9ee6e8bb
PB
5789 }
5790 i = (insn >> 23) & 3;
5791 switch (i) {
5792 case 0: offset = -4; break; /* DA */
c67b6b71
FN
5793 case 1: offset = 0; break; /* IA */
5794 case 2: offset = -8; break; /* DB */
9ee6e8bb
PB
5795 case 3: offset = 4; break; /* IB */
5796 default: abort();
5797 }
5798 if (offset)
b0109805
PB
5799 tcg_gen_addi_i32(addr, addr, offset);
5800 tmp = load_reg(s, 14);
5801 gen_st32(tmp, addr, 0);
c67b6b71 5802 tmp = load_cpu_field(spsr);
b0109805
PB
5803 tcg_gen_addi_i32(addr, addr, 4);
5804 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
5805 if (insn & (1 << 21)) {
5806 /* Base writeback. */
5807 switch (i) {
5808 case 0: offset = -8; break;
c67b6b71
FN
5809 case 1: offset = 4; break;
5810 case 2: offset = -4; break;
9ee6e8bb
PB
5811 case 3: offset = 0; break;
5812 default: abort();
5813 }
5814 if (offset)
c67b6b71 5815 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb 5816 if (op1 == (env->uncached_cpsr & CPSR_M)) {
c67b6b71 5817 store_reg(s, 13, addr);
9ee6e8bb 5818 } else {
c67b6b71
FN
5819 gen_helper_set_r13_banked(cpu_env, tcg_const_i32(op1), addr);
5820 dead_tmp(addr);
9ee6e8bb 5821 }
b0109805
PB
5822 } else {
5823 dead_tmp(addr);
9ee6e8bb
PB
5824 }
5825 } else if ((insn & 0x0e5fffe0) == 0x081d0a00) {
5826 /* rfe */
c67b6b71 5827 int32_t offset;
9ee6e8bb
PB
5828 if (IS_USER(s))
5829 goto illegal_op;
5830 ARCH(6);
5831 rn = (insn >> 16) & 0xf;
b0109805 5832 addr = load_reg(s, rn);
9ee6e8bb
PB
5833 i = (insn >> 23) & 3;
5834 switch (i) {
b0109805 5835 case 0: offset = -4; break; /* DA */
c67b6b71
FN
5836 case 1: offset = 0; break; /* IA */
5837 case 2: offset = -8; break; /* DB */
b0109805 5838 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
5839 default: abort();
5840 }
5841 if (offset)
b0109805
PB
5842 tcg_gen_addi_i32(addr, addr, offset);
5843 /* Load PC into tmp and CPSR into tmp2. */
5844 tmp = gen_ld32(addr, 0);
5845 tcg_gen_addi_i32(addr, addr, 4);
5846 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
5847 if (insn & (1 << 21)) {
5848 /* Base writeback. */
5849 switch (i) {
b0109805 5850 case 0: offset = -8; break;
c67b6b71
FN
5851 case 1: offset = 4; break;
5852 case 2: offset = -4; break;
b0109805 5853 case 3: offset = 0; break;
9ee6e8bb
PB
5854 default: abort();
5855 }
5856 if (offset)
b0109805
PB
5857 tcg_gen_addi_i32(addr, addr, offset);
5858 store_reg(s, rn, addr);
5859 } else {
5860 dead_tmp(addr);
9ee6e8bb 5861 }
b0109805 5862 gen_rfe(s, tmp, tmp2);
c67b6b71 5863 return;
9ee6e8bb
PB
5864 } else if ((insn & 0x0e000000) == 0x0a000000) {
5865 /* branch link and change to thumb (blx <offset>) */
5866 int32_t offset;
5867
5868 val = (uint32_t)s->pc;
d9ba4830
PB
5869 tmp = new_tmp();
5870 tcg_gen_movi_i32(tmp, val);
5871 store_reg(s, 14, tmp);
9ee6e8bb
PB
5872 /* Sign-extend the 24-bit offset */
5873 offset = (((int32_t)insn) << 8) >> 8;
5874 /* offset * 4 + bit24 * 2 + (thumb bit) */
5875 val += (offset << 2) | ((insn >> 23) & 2) | 1;
5876 /* pipeline offset */
5877 val += 4;
d9ba4830 5878 gen_bx_im(s, val);
9ee6e8bb
PB
5879 return;
5880 } else if ((insn & 0x0e000f00) == 0x0c000100) {
5881 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5882 /* iWMMXt register transfer. */
5883 if (env->cp15.c15_cpar & (1 << 1))
5884 if (!disas_iwmmxt_insn(env, s, insn))
5885 return;
5886 }
5887 } else if ((insn & 0x0fe00000) == 0x0c400000) {
5888 /* Coprocessor double register transfer. */
5889 } else if ((insn & 0x0f000010) == 0x0e000010) {
5890 /* Additional coprocessor register transfer. */
7997d92f 5891 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
5892 uint32_t mask;
5893 uint32_t val;
5894 /* cps (privileged) */
5895 if (IS_USER(s))
5896 return;
5897 mask = val = 0;
5898 if (insn & (1 << 19)) {
5899 if (insn & (1 << 8))
5900 mask |= CPSR_A;
5901 if (insn & (1 << 7))
5902 mask |= CPSR_I;
5903 if (insn & (1 << 6))
5904 mask |= CPSR_F;
5905 if (insn & (1 << 18))
5906 val |= mask;
5907 }
7997d92f 5908 if (insn & (1 << 17)) {
9ee6e8bb
PB
5909 mask |= CPSR_M;
5910 val |= (insn & 0x1f);
5911 }
5912 if (mask) {
2fbac54b 5913 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
5914 }
5915 return;
5916 }
5917 goto illegal_op;
5918 }
5919 if (cond != 0xe) {
5920 /* if not always execute, we generate a conditional jump to
5921 next instruction */
5922 s->condlabel = gen_new_label();
d9ba4830 5923 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
5924 s->condjmp = 1;
5925 }
5926 if ((insn & 0x0f900000) == 0x03000000) {
5927 if ((insn & (1 << 21)) == 0) {
5928 ARCH(6T2);
5929 rd = (insn >> 12) & 0xf;
5930 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
5931 if ((insn & (1 << 22)) == 0) {
5932 /* MOVW */
5e3f878a
PB
5933 tmp = new_tmp();
5934 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
5935 } else {
5936 /* MOVT */
5e3f878a 5937 tmp = load_reg(s, rd);
86831435 5938 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 5939 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 5940 }
5e3f878a 5941 store_reg(s, rd, tmp);
9ee6e8bb
PB
5942 } else {
5943 if (((insn >> 12) & 0xf) != 0xf)
5944 goto illegal_op;
5945 if (((insn >> 16) & 0xf) == 0) {
5946 gen_nop_hint(s, insn & 0xff);
5947 } else {
5948 /* CPSR = immediate */
5949 val = insn & 0xff;
5950 shift = ((insn >> 8) & 0xf) * 2;
5951 if (shift)
5952 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 5953 i = ((insn & (1 << 22)) != 0);
2fbac54b 5954 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
5955 goto illegal_op;
5956 }
5957 }
5958 } else if ((insn & 0x0f900000) == 0x01000000
5959 && (insn & 0x00000090) != 0x00000090) {
5960 /* miscellaneous instructions */
5961 op1 = (insn >> 21) & 3;
5962 sh = (insn >> 4) & 0xf;
5963 rm = insn & 0xf;
5964 switch (sh) {
5965 case 0x0: /* move program status register */
5966 if (op1 & 1) {
5967 /* PSR = reg */
2fbac54b 5968 tmp = load_reg(s, rm);
9ee6e8bb 5969 i = ((op1 & 2) != 0);
2fbac54b 5970 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
5971 goto illegal_op;
5972 } else {
5973 /* reg = PSR */
5974 rd = (insn >> 12) & 0xf;
5975 if (op1 & 2) {
5976 if (IS_USER(s))
5977 goto illegal_op;
d9ba4830 5978 tmp = load_cpu_field(spsr);
9ee6e8bb 5979 } else {
d9ba4830
PB
5980 tmp = new_tmp();
5981 gen_helper_cpsr_read(tmp);
9ee6e8bb 5982 }
d9ba4830 5983 store_reg(s, rd, tmp);
9ee6e8bb
PB
5984 }
5985 break;
5986 case 0x1:
5987 if (op1 == 1) {
5988 /* branch/exchange thumb (bx). */
d9ba4830
PB
5989 tmp = load_reg(s, rm);
5990 gen_bx(s, tmp);
9ee6e8bb
PB
5991 } else if (op1 == 3) {
5992 /* clz */
5993 rd = (insn >> 12) & 0xf;
1497c961
PB
5994 tmp = load_reg(s, rm);
5995 gen_helper_clz(tmp, tmp);
5996 store_reg(s, rd, tmp);
9ee6e8bb
PB
5997 } else {
5998 goto illegal_op;
5999 }
6000 break;
6001 case 0x2:
6002 if (op1 == 1) {
6003 ARCH(5J); /* bxj */
6004 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6005 tmp = load_reg(s, rm);
6006 gen_bx(s, tmp);
9ee6e8bb
PB
6007 } else {
6008 goto illegal_op;
6009 }
6010 break;
6011 case 0x3:
6012 if (op1 != 1)
6013 goto illegal_op;
6014
6015 /* branch link/exchange thumb (blx) */
d9ba4830
PB
6016 tmp = load_reg(s, rm);
6017 tmp2 = new_tmp();
6018 tcg_gen_movi_i32(tmp2, s->pc);
6019 store_reg(s, 14, tmp2);
6020 gen_bx(s, tmp);
9ee6e8bb
PB
6021 break;
6022 case 0x5: /* saturating add/subtract */
6023 rd = (insn >> 12) & 0xf;
6024 rn = (insn >> 16) & 0xf;
b40d0353 6025 tmp = load_reg(s, rm);
5e3f878a 6026 tmp2 = load_reg(s, rn);
9ee6e8bb 6027 if (op1 & 2)
5e3f878a 6028 gen_helper_double_saturate(tmp2, tmp2);
9ee6e8bb 6029 if (op1 & 1)
5e3f878a 6030 gen_helper_sub_saturate(tmp, tmp, tmp2);
9ee6e8bb 6031 else
5e3f878a
PB
6032 gen_helper_add_saturate(tmp, tmp, tmp2);
6033 dead_tmp(tmp2);
6034 store_reg(s, rd, tmp);
9ee6e8bb
PB
6035 break;
6036 case 7: /* bkpt */
6037 gen_set_condexec(s);
5e3f878a 6038 gen_set_pc_im(s->pc - 4);
d9ba4830 6039 gen_exception(EXCP_BKPT);
9ee6e8bb
PB
6040 s->is_jmp = DISAS_JUMP;
6041 break;
6042 case 0x8: /* signed multiply */
6043 case 0xa:
6044 case 0xc:
6045 case 0xe:
6046 rs = (insn >> 8) & 0xf;
6047 rn = (insn >> 12) & 0xf;
6048 rd = (insn >> 16) & 0xf;
6049 if (op1 == 1) {
6050 /* (32 * 16) >> 16 */
5e3f878a
PB
6051 tmp = load_reg(s, rm);
6052 tmp2 = load_reg(s, rs);
9ee6e8bb 6053 if (sh & 4)
5e3f878a 6054 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 6055 else
5e3f878a 6056 gen_sxth(tmp2);
a7812ae4
PB
6057 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6058 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 6059 tmp = new_tmp();
a7812ae4 6060 tcg_gen_trunc_i64_i32(tmp, tmp64);
9ee6e8bb 6061 if ((sh & 2) == 0) {
5e3f878a
PB
6062 tmp2 = load_reg(s, rn);
6063 gen_helper_add_setq(tmp, tmp, tmp2);
6064 dead_tmp(tmp2);
9ee6e8bb 6065 }
5e3f878a 6066 store_reg(s, rd, tmp);
9ee6e8bb
PB
6067 } else {
6068 /* 16 * 16 */
5e3f878a
PB
6069 tmp = load_reg(s, rm);
6070 tmp2 = load_reg(s, rs);
6071 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6072 dead_tmp(tmp2);
9ee6e8bb 6073 if (op1 == 2) {
a7812ae4
PB
6074 tmp64 = tcg_temp_new_i64();
6075 tcg_gen_ext_i32_i64(tmp64, tmp);
22478e79 6076 dead_tmp(tmp);
a7812ae4
PB
6077 gen_addq(s, tmp64, rn, rd);
6078 gen_storeq_reg(s, rn, rd, tmp64);
9ee6e8bb
PB
6079 } else {
6080 if (op1 == 0) {
5e3f878a
PB
6081 tmp2 = load_reg(s, rn);
6082 gen_helper_add_setq(tmp, tmp, tmp2);
6083 dead_tmp(tmp2);
9ee6e8bb 6084 }
5e3f878a 6085 store_reg(s, rd, tmp);
9ee6e8bb
PB
6086 }
6087 }
6088 break;
6089 default:
6090 goto illegal_op;
6091 }
6092 } else if (((insn & 0x0e000000) == 0 &&
6093 (insn & 0x00000090) != 0x90) ||
6094 ((insn & 0x0e000000) == (1 << 25))) {
6095 int set_cc, logic_cc, shiftop;
6096
6097 op1 = (insn >> 21) & 0xf;
6098 set_cc = (insn >> 20) & 1;
6099 logic_cc = table_logic_cc[op1] & set_cc;
6100
6101 /* data processing instruction */
6102 if (insn & (1 << 25)) {
6103 /* immediate operand */
6104 val = insn & 0xff;
6105 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 6106 if (shift) {
9ee6e8bb 6107 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9
JR
6108 }
6109 tmp2 = new_tmp();
6110 tcg_gen_movi_i32(tmp2, val);
6111 if (logic_cc && shift) {
6112 gen_set_CF_bit31(tmp2);
6113 }
9ee6e8bb
PB
6114 } else {
6115 /* register */
6116 rm = (insn) & 0xf;
e9bb4aa9 6117 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6118 shiftop = (insn >> 5) & 3;
6119 if (!(insn & (1 << 4))) {
6120 shift = (insn >> 7) & 0x1f;
e9bb4aa9 6121 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
6122 } else {
6123 rs = (insn >> 8) & 0xf;
8984bd2e 6124 tmp = load_reg(s, rs);
e9bb4aa9 6125 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
6126 }
6127 }
6128 if (op1 != 0x0f && op1 != 0x0d) {
6129 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
6130 tmp = load_reg(s, rn);
6131 } else {
6132 TCGV_UNUSED(tmp);
9ee6e8bb
PB
6133 }
6134 rd = (insn >> 12) & 0xf;
6135 switch(op1) {
6136 case 0x00:
e9bb4aa9
JR
6137 tcg_gen_and_i32(tmp, tmp, tmp2);
6138 if (logic_cc) {
6139 gen_logic_CC(tmp);
6140 }
21aeb343 6141 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6142 break;
6143 case 0x01:
e9bb4aa9
JR
6144 tcg_gen_xor_i32(tmp, tmp, tmp2);
6145 if (logic_cc) {
6146 gen_logic_CC(tmp);
6147 }
21aeb343 6148 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6149 break;
6150 case 0x02:
6151 if (set_cc && rd == 15) {
6152 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 6153 if (IS_USER(s)) {
9ee6e8bb 6154 goto illegal_op;
e9bb4aa9
JR
6155 }
6156 gen_helper_sub_cc(tmp, tmp, tmp2);
6157 gen_exception_return(s, tmp);
9ee6e8bb 6158 } else {
e9bb4aa9
JR
6159 if (set_cc) {
6160 gen_helper_sub_cc(tmp, tmp, tmp2);
6161 } else {
6162 tcg_gen_sub_i32(tmp, tmp, tmp2);
6163 }
21aeb343 6164 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6165 }
6166 break;
6167 case 0x03:
e9bb4aa9
JR
6168 if (set_cc) {
6169 gen_helper_sub_cc(tmp, tmp2, tmp);
6170 } else {
6171 tcg_gen_sub_i32(tmp, tmp2, tmp);
6172 }
21aeb343 6173 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6174 break;
6175 case 0x04:
e9bb4aa9
JR
6176 if (set_cc) {
6177 gen_helper_add_cc(tmp, tmp, tmp2);
6178 } else {
6179 tcg_gen_add_i32(tmp, tmp, tmp2);
6180 }
21aeb343 6181 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6182 break;
6183 case 0x05:
e9bb4aa9
JR
6184 if (set_cc) {
6185 gen_helper_adc_cc(tmp, tmp, tmp2);
6186 } else {
6187 gen_add_carry(tmp, tmp, tmp2);
6188 }
21aeb343 6189 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6190 break;
6191 case 0x06:
e9bb4aa9
JR
6192 if (set_cc) {
6193 gen_helper_sbc_cc(tmp, tmp, tmp2);
6194 } else {
6195 gen_sub_carry(tmp, tmp, tmp2);
6196 }
21aeb343 6197 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6198 break;
6199 case 0x07:
e9bb4aa9
JR
6200 if (set_cc) {
6201 gen_helper_sbc_cc(tmp, tmp2, tmp);
6202 } else {
6203 gen_sub_carry(tmp, tmp2, tmp);
6204 }
21aeb343 6205 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6206 break;
6207 case 0x08:
6208 if (set_cc) {
e9bb4aa9
JR
6209 tcg_gen_and_i32(tmp, tmp, tmp2);
6210 gen_logic_CC(tmp);
9ee6e8bb 6211 }
e9bb4aa9 6212 dead_tmp(tmp);
9ee6e8bb
PB
6213 break;
6214 case 0x09:
6215 if (set_cc) {
e9bb4aa9
JR
6216 tcg_gen_xor_i32(tmp, tmp, tmp2);
6217 gen_logic_CC(tmp);
9ee6e8bb 6218 }
e9bb4aa9 6219 dead_tmp(tmp);
9ee6e8bb
PB
6220 break;
6221 case 0x0a:
6222 if (set_cc) {
e9bb4aa9 6223 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb 6224 }
e9bb4aa9 6225 dead_tmp(tmp);
9ee6e8bb
PB
6226 break;
6227 case 0x0b:
6228 if (set_cc) {
e9bb4aa9 6229 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 6230 }
e9bb4aa9 6231 dead_tmp(tmp);
9ee6e8bb
PB
6232 break;
6233 case 0x0c:
e9bb4aa9
JR
6234 tcg_gen_or_i32(tmp, tmp, tmp2);
6235 if (logic_cc) {
6236 gen_logic_CC(tmp);
6237 }
21aeb343 6238 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6239 break;
6240 case 0x0d:
6241 if (logic_cc && rd == 15) {
6242 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 6243 if (IS_USER(s)) {
9ee6e8bb 6244 goto illegal_op;
e9bb4aa9
JR
6245 }
6246 gen_exception_return(s, tmp2);
9ee6e8bb 6247 } else {
e9bb4aa9
JR
6248 if (logic_cc) {
6249 gen_logic_CC(tmp2);
6250 }
21aeb343 6251 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6252 }
6253 break;
6254 case 0x0e:
e9bb4aa9
JR
6255 tcg_gen_bic_i32(tmp, tmp, tmp2);
6256 if (logic_cc) {
6257 gen_logic_CC(tmp);
6258 }
21aeb343 6259 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6260 break;
6261 default:
6262 case 0x0f:
e9bb4aa9
JR
6263 tcg_gen_not_i32(tmp2, tmp2);
6264 if (logic_cc) {
6265 gen_logic_CC(tmp2);
6266 }
21aeb343 6267 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6268 break;
6269 }
e9bb4aa9
JR
6270 if (op1 != 0x0f && op1 != 0x0d) {
6271 dead_tmp(tmp2);
6272 }
9ee6e8bb
PB
6273 } else {
6274 /* other instructions */
6275 op1 = (insn >> 24) & 0xf;
6276 switch(op1) {
6277 case 0x0:
6278 case 0x1:
6279 /* multiplies, extra load/stores */
6280 sh = (insn >> 5) & 3;
6281 if (sh == 0) {
6282 if (op1 == 0x0) {
6283 rd = (insn >> 16) & 0xf;
6284 rn = (insn >> 12) & 0xf;
6285 rs = (insn >> 8) & 0xf;
6286 rm = (insn) & 0xf;
6287 op1 = (insn >> 20) & 0xf;
6288 switch (op1) {
6289 case 0: case 1: case 2: case 3: case 6:
6290 /* 32 bit mul */
5e3f878a
PB
6291 tmp = load_reg(s, rs);
6292 tmp2 = load_reg(s, rm);
6293 tcg_gen_mul_i32(tmp, tmp, tmp2);
6294 dead_tmp(tmp2);
9ee6e8bb
PB
6295 if (insn & (1 << 22)) {
6296 /* Subtract (mls) */
6297 ARCH(6T2);
5e3f878a
PB
6298 tmp2 = load_reg(s, rn);
6299 tcg_gen_sub_i32(tmp, tmp2, tmp);
6300 dead_tmp(tmp2);
9ee6e8bb
PB
6301 } else if (insn & (1 << 21)) {
6302 /* Add */
5e3f878a
PB
6303 tmp2 = load_reg(s, rn);
6304 tcg_gen_add_i32(tmp, tmp, tmp2);
6305 dead_tmp(tmp2);
9ee6e8bb
PB
6306 }
6307 if (insn & (1 << 20))
5e3f878a
PB
6308 gen_logic_CC(tmp);
6309 store_reg(s, rd, tmp);
9ee6e8bb
PB
6310 break;
6311 default:
6312 /* 64 bit mul */
5e3f878a
PB
6313 tmp = load_reg(s, rs);
6314 tmp2 = load_reg(s, rm);
9ee6e8bb 6315 if (insn & (1 << 22))
a7812ae4 6316 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6317 else
a7812ae4 6318 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
9ee6e8bb 6319 if (insn & (1 << 21)) /* mult accumulate */
a7812ae4 6320 gen_addq(s, tmp64, rn, rd);
9ee6e8bb
PB
6321 if (!(insn & (1 << 23))) { /* double accumulate */
6322 ARCH(6);
a7812ae4
PB
6323 gen_addq_lo(s, tmp64, rn);
6324 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
6325 }
6326 if (insn & (1 << 20))
a7812ae4
PB
6327 gen_logicq_cc(tmp64);
6328 gen_storeq_reg(s, rn, rd, tmp64);
9ee6e8bb
PB
6329 break;
6330 }
6331 } else {
6332 rn = (insn >> 16) & 0xf;
6333 rd = (insn >> 12) & 0xf;
6334 if (insn & (1 << 23)) {
6335 /* load/store exclusive */
86753403
PB
6336 op1 = (insn >> 21) & 0x3;
6337 if (op1)
a47f43d2 6338 ARCH(6K);
86753403
PB
6339 else
6340 ARCH(6);
3174f8e9 6341 addr = tcg_temp_local_new_i32();
98a46317 6342 load_reg_var(s, addr, rn);
9ee6e8bb 6343 if (insn & (1 << 20)) {
3174f8e9 6344 gen_helper_mark_exclusive(cpu_env, addr);
86753403
PB
6345 switch (op1) {
6346 case 0: /* ldrex */
6347 tmp = gen_ld32(addr, IS_USER(s));
6348 break;
6349 case 1: /* ldrexd */
6350 tmp = gen_ld32(addr, IS_USER(s));
6351 store_reg(s, rd, tmp);
6352 tcg_gen_addi_i32(addr, addr, 4);
6353 tmp = gen_ld32(addr, IS_USER(s));
6354 rd++;
6355 break;
6356 case 2: /* ldrexb */
6357 tmp = gen_ld8u(addr, IS_USER(s));
6358 break;
6359 case 3: /* ldrexh */
6360 tmp = gen_ld16u(addr, IS_USER(s));
6361 break;
6362 default:
6363 abort();
6364 }
8f8e3aa4 6365 store_reg(s, rd, tmp);
9ee6e8bb 6366 } else {
8f8e3aa4 6367 int label = gen_new_label();
9ee6e8bb 6368 rm = insn & 0xf;
3174f8e9
FN
6369 tmp2 = tcg_temp_local_new_i32();
6370 gen_helper_test_exclusive(tmp2, cpu_env, addr);
6371 tcg_gen_brcondi_i32(TCG_COND_NE, tmp2, 0, label);
8f8e3aa4 6372 tmp = load_reg(s,rm);
86753403
PB
6373 switch (op1) {
6374 case 0: /* strex */
6375 gen_st32(tmp, addr, IS_USER(s));
6376 break;
6377 case 1: /* strexd */
6378 gen_st32(tmp, addr, IS_USER(s));
6379 tcg_gen_addi_i32(addr, addr, 4);
6380 tmp = load_reg(s, rm + 1);
6381 gen_st32(tmp, addr, IS_USER(s));
6382 break;
6383 case 2: /* strexb */
6384 gen_st8(tmp, addr, IS_USER(s));
6385 break;
6386 case 3: /* strexh */
6387 gen_st16(tmp, addr, IS_USER(s));
6388 break;
6389 default:
6390 abort();
6391 }
2637a3be 6392 gen_set_label(label);
3174f8e9
FN
6393 tcg_gen_mov_i32(cpu_R[rd], tmp2);
6394 tcg_temp_free(tmp2);
9ee6e8bb 6395 }
3174f8e9 6396 tcg_temp_free(addr);
9ee6e8bb
PB
6397 } else {
6398 /* SWP instruction */
6399 rm = (insn) & 0xf;
6400
8984bd2e
PB
6401 /* ??? This is not really atomic. However we know
6402 we never have multiple CPUs running in parallel,
6403 so it is good enough. */
6404 addr = load_reg(s, rn);
6405 tmp = load_reg(s, rm);
9ee6e8bb 6406 if (insn & (1 << 22)) {
8984bd2e
PB
6407 tmp2 = gen_ld8u(addr, IS_USER(s));
6408 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 6409 } else {
8984bd2e
PB
6410 tmp2 = gen_ld32(addr, IS_USER(s));
6411 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 6412 }
8984bd2e
PB
6413 dead_tmp(addr);
6414 store_reg(s, rd, tmp2);
9ee6e8bb
PB
6415 }
6416 }
6417 } else {
6418 int address_offset;
6419 int load;
6420 /* Misc load/store */
6421 rn = (insn >> 16) & 0xf;
6422 rd = (insn >> 12) & 0xf;
b0109805 6423 addr = load_reg(s, rn);
9ee6e8bb 6424 if (insn & (1 << 24))
b0109805 6425 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
6426 address_offset = 0;
6427 if (insn & (1 << 20)) {
6428 /* load */
6429 switch(sh) {
6430 case 1:
b0109805 6431 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
6432 break;
6433 case 2:
b0109805 6434 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
6435 break;
6436 default:
6437 case 3:
b0109805 6438 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
6439 break;
6440 }
6441 load = 1;
6442 } else if (sh & 2) {
6443 /* doubleword */
6444 if (sh & 1) {
6445 /* store */
b0109805
PB
6446 tmp = load_reg(s, rd);
6447 gen_st32(tmp, addr, IS_USER(s));
6448 tcg_gen_addi_i32(addr, addr, 4);
6449 tmp = load_reg(s, rd + 1);
6450 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6451 load = 0;
6452 } else {
6453 /* load */
b0109805
PB
6454 tmp = gen_ld32(addr, IS_USER(s));
6455 store_reg(s, rd, tmp);
6456 tcg_gen_addi_i32(addr, addr, 4);
6457 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
6458 rd++;
6459 load = 1;
6460 }
6461 address_offset = -4;
6462 } else {
6463 /* store */
b0109805
PB
6464 tmp = load_reg(s, rd);
6465 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6466 load = 0;
6467 }
6468 /* Perform base writeback before the loaded value to
6469 ensure correct behavior with overlapping index registers.
6470 ldrd with base writeback is is undefined if the
6471 destination and index registers overlap. */
6472 if (!(insn & (1 << 24))) {
b0109805
PB
6473 gen_add_datah_offset(s, insn, address_offset, addr);
6474 store_reg(s, rn, addr);
9ee6e8bb
PB
6475 } else if (insn & (1 << 21)) {
6476 if (address_offset)
b0109805
PB
6477 tcg_gen_addi_i32(addr, addr, address_offset);
6478 store_reg(s, rn, addr);
6479 } else {
6480 dead_tmp(addr);
9ee6e8bb
PB
6481 }
6482 if (load) {
6483 /* Complete the load. */
b0109805 6484 store_reg(s, rd, tmp);
9ee6e8bb
PB
6485 }
6486 }
6487 break;
6488 case 0x4:
6489 case 0x5:
6490 goto do_ldst;
6491 case 0x6:
6492 case 0x7:
6493 if (insn & (1 << 4)) {
6494 ARCH(6);
6495 /* Armv6 Media instructions. */
6496 rm = insn & 0xf;
6497 rn = (insn >> 16) & 0xf;
2c0262af 6498 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
6499 rs = (insn >> 8) & 0xf;
6500 switch ((insn >> 23) & 3) {
6501 case 0: /* Parallel add/subtract. */
6502 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
6503 tmp = load_reg(s, rn);
6504 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6505 sh = (insn >> 5) & 7;
6506 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6507 goto illegal_op;
6ddbc6e4
PB
6508 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6509 dead_tmp(tmp2);
6510 store_reg(s, rd, tmp);
9ee6e8bb
PB
6511 break;
6512 case 1:
6513 if ((insn & 0x00700020) == 0) {
6c95676b 6514 /* Halfword pack. */
3670669c
PB
6515 tmp = load_reg(s, rn);
6516 tmp2 = load_reg(s, rm);
9ee6e8bb 6517 shift = (insn >> 7) & 0x1f;
3670669c
PB
6518 if (insn & (1 << 6)) {
6519 /* pkhtb */
22478e79
AZ
6520 if (shift == 0)
6521 shift = 31;
6522 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 6523 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 6524 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
6525 } else {
6526 /* pkhbt */
22478e79
AZ
6527 if (shift)
6528 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 6529 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
6530 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6531 }
6532 tcg_gen_or_i32(tmp, tmp, tmp2);
22478e79 6533 dead_tmp(tmp2);
3670669c 6534 store_reg(s, rd, tmp);
9ee6e8bb
PB
6535 } else if ((insn & 0x00200020) == 0x00200000) {
6536 /* [us]sat */
6ddbc6e4 6537 tmp = load_reg(s, rm);
9ee6e8bb
PB
6538 shift = (insn >> 7) & 0x1f;
6539 if (insn & (1 << 6)) {
6540 if (shift == 0)
6541 shift = 31;
6ddbc6e4 6542 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 6543 } else {
6ddbc6e4 6544 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
6545 }
6546 sh = (insn >> 16) & 0x1f;
6547 if (sh != 0) {
6548 if (insn & (1 << 22))
6ddbc6e4 6549 gen_helper_usat(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6550 else
6ddbc6e4 6551 gen_helper_ssat(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6552 }
6ddbc6e4 6553 store_reg(s, rd, tmp);
9ee6e8bb
PB
6554 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6555 /* [us]sat16 */
6ddbc6e4 6556 tmp = load_reg(s, rm);
9ee6e8bb
PB
6557 sh = (insn >> 16) & 0x1f;
6558 if (sh != 0) {
6559 if (insn & (1 << 22))
6ddbc6e4 6560 gen_helper_usat16(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6561 else
6ddbc6e4 6562 gen_helper_ssat16(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6563 }
6ddbc6e4 6564 store_reg(s, rd, tmp);
9ee6e8bb
PB
6565 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6566 /* Select bytes. */
6ddbc6e4
PB
6567 tmp = load_reg(s, rn);
6568 tmp2 = load_reg(s, rm);
6569 tmp3 = new_tmp();
6570 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6571 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6572 dead_tmp(tmp3);
6573 dead_tmp(tmp2);
6574 store_reg(s, rd, tmp);
9ee6e8bb 6575 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 6576 tmp = load_reg(s, rm);
9ee6e8bb
PB
6577 shift = (insn >> 10) & 3;
6578 /* ??? In many cases it's not neccessary to do a
6579 rotate, a shift is sufficient. */
6580 if (shift != 0)
5e3f878a 6581 tcg_gen_rori_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
6582 op1 = (insn >> 20) & 7;
6583 switch (op1) {
5e3f878a
PB
6584 case 0: gen_sxtb16(tmp); break;
6585 case 2: gen_sxtb(tmp); break;
6586 case 3: gen_sxth(tmp); break;
6587 case 4: gen_uxtb16(tmp); break;
6588 case 6: gen_uxtb(tmp); break;
6589 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
6590 default: goto illegal_op;
6591 }
6592 if (rn != 15) {
5e3f878a 6593 tmp2 = load_reg(s, rn);
9ee6e8bb 6594 if ((op1 & 3) == 0) {
5e3f878a 6595 gen_add16(tmp, tmp2);
9ee6e8bb 6596 } else {
5e3f878a
PB
6597 tcg_gen_add_i32(tmp, tmp, tmp2);
6598 dead_tmp(tmp2);
9ee6e8bb
PB
6599 }
6600 }
6c95676b 6601 store_reg(s, rd, tmp);
9ee6e8bb
PB
6602 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6603 /* rev */
b0109805 6604 tmp = load_reg(s, rm);
9ee6e8bb
PB
6605 if (insn & (1 << 22)) {
6606 if (insn & (1 << 7)) {
b0109805 6607 gen_revsh(tmp);
9ee6e8bb
PB
6608 } else {
6609 ARCH(6T2);
b0109805 6610 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
6611 }
6612 } else {
6613 if (insn & (1 << 7))
b0109805 6614 gen_rev16(tmp);
9ee6e8bb 6615 else
66896cb8 6616 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 6617 }
b0109805 6618 store_reg(s, rd, tmp);
9ee6e8bb
PB
6619 } else {
6620 goto illegal_op;
6621 }
6622 break;
6623 case 2: /* Multiplies (Type 3). */
5e3f878a
PB
6624 tmp = load_reg(s, rm);
6625 tmp2 = load_reg(s, rs);
9ee6e8bb
PB
6626 if (insn & (1 << 20)) {
6627 /* Signed multiply most significant [accumulate]. */
a7812ae4 6628 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6629 if (insn & (1 << 5))
a7812ae4
PB
6630 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
6631 tcg_gen_shri_i64(tmp64, tmp64, 32);
5e3f878a 6632 tmp = new_tmp();
a7812ae4 6633 tcg_gen_trunc_i64_i32(tmp, tmp64);
955a7dd5
AZ
6634 if (rd != 15) {
6635 tmp2 = load_reg(s, rd);
9ee6e8bb 6636 if (insn & (1 << 6)) {
5e3f878a 6637 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 6638 } else {
5e3f878a 6639 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 6640 }
5e3f878a 6641 dead_tmp(tmp2);
9ee6e8bb 6642 }
955a7dd5 6643 store_reg(s, rn, tmp);
9ee6e8bb
PB
6644 } else {
6645 if (insn & (1 << 5))
5e3f878a
PB
6646 gen_swap_half(tmp2);
6647 gen_smul_dual(tmp, tmp2);
6648 /* This addition cannot overflow. */
6649 if (insn & (1 << 6)) {
6650 tcg_gen_sub_i32(tmp, tmp, tmp2);
6651 } else {
6652 tcg_gen_add_i32(tmp, tmp, tmp2);
6653 }
6654 dead_tmp(tmp2);
9ee6e8bb 6655 if (insn & (1 << 22)) {
5e3f878a 6656 /* smlald, smlsld */
a7812ae4
PB
6657 tmp64 = tcg_temp_new_i64();
6658 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 6659 dead_tmp(tmp);
a7812ae4
PB
6660 gen_addq(s, tmp64, rd, rn);
6661 gen_storeq_reg(s, rd, rn, tmp64);
9ee6e8bb 6662 } else {
5e3f878a 6663 /* smuad, smusd, smlad, smlsd */
22478e79 6664 if (rd != 15)
9ee6e8bb 6665 {
22478e79 6666 tmp2 = load_reg(s, rd);
5e3f878a
PB
6667 gen_helper_add_setq(tmp, tmp, tmp2);
6668 dead_tmp(tmp2);
9ee6e8bb 6669 }
22478e79 6670 store_reg(s, rn, tmp);
9ee6e8bb
PB
6671 }
6672 }
6673 break;
6674 case 3:
6675 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
6676 switch (op1) {
6677 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
6678 ARCH(6);
6679 tmp = load_reg(s, rm);
6680 tmp2 = load_reg(s, rs);
6681 gen_helper_usad8(tmp, tmp, tmp2);
6682 dead_tmp(tmp2);
ded9d295
AZ
6683 if (rd != 15) {
6684 tmp2 = load_reg(s, rd);
6ddbc6e4
PB
6685 tcg_gen_add_i32(tmp, tmp, tmp2);
6686 dead_tmp(tmp2);
9ee6e8bb 6687 }
ded9d295 6688 store_reg(s, rn, tmp);
9ee6e8bb
PB
6689 break;
6690 case 0x20: case 0x24: case 0x28: case 0x2c:
6691 /* Bitfield insert/clear. */
6692 ARCH(6T2);
6693 shift = (insn >> 7) & 0x1f;
6694 i = (insn >> 16) & 0x1f;
6695 i = i + 1 - shift;
6696 if (rm == 15) {
5e3f878a
PB
6697 tmp = new_tmp();
6698 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6699 } else {
5e3f878a 6700 tmp = load_reg(s, rm);
9ee6e8bb
PB
6701 }
6702 if (i != 32) {
5e3f878a 6703 tmp2 = load_reg(s, rd);
8f8e3aa4 6704 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
5e3f878a 6705 dead_tmp(tmp2);
9ee6e8bb 6706 }
5e3f878a 6707 store_reg(s, rd, tmp);
9ee6e8bb
PB
6708 break;
6709 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
6710 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 6711 ARCH(6T2);
5e3f878a 6712 tmp = load_reg(s, rm);
9ee6e8bb
PB
6713 shift = (insn >> 7) & 0x1f;
6714 i = ((insn >> 16) & 0x1f) + 1;
6715 if (shift + i > 32)
6716 goto illegal_op;
6717 if (i < 32) {
6718 if (op1 & 0x20) {
5e3f878a 6719 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 6720 } else {
5e3f878a 6721 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
6722 }
6723 }
5e3f878a 6724 store_reg(s, rd, tmp);
9ee6e8bb
PB
6725 break;
6726 default:
6727 goto illegal_op;
6728 }
6729 break;
6730 }
6731 break;
6732 }
6733 do_ldst:
6734 /* Check for undefined extension instructions
6735 * per the ARM Bible IE:
6736 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
6737 */
6738 sh = (0xf << 20) | (0xf << 4);
6739 if (op1 == 0x7 && ((insn & sh) == sh))
6740 {
6741 goto illegal_op;
6742 }
6743 /* load/store byte/word */
6744 rn = (insn >> 16) & 0xf;
6745 rd = (insn >> 12) & 0xf;
b0109805 6746 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
6747 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
6748 if (insn & (1 << 24))
b0109805 6749 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
6750 if (insn & (1 << 20)) {
6751 /* load */
9ee6e8bb 6752 if (insn & (1 << 22)) {
b0109805 6753 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 6754 } else {
b0109805 6755 tmp = gen_ld32(tmp2, i);
9ee6e8bb 6756 }
9ee6e8bb
PB
6757 } else {
6758 /* store */
b0109805 6759 tmp = load_reg(s, rd);
9ee6e8bb 6760 if (insn & (1 << 22))
b0109805 6761 gen_st8(tmp, tmp2, i);
9ee6e8bb 6762 else
b0109805 6763 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
6764 }
6765 if (!(insn & (1 << 24))) {
b0109805
PB
6766 gen_add_data_offset(s, insn, tmp2);
6767 store_reg(s, rn, tmp2);
6768 } else if (insn & (1 << 21)) {
6769 store_reg(s, rn, tmp2);
6770 } else {
6771 dead_tmp(tmp2);
9ee6e8bb
PB
6772 }
6773 if (insn & (1 << 20)) {
6774 /* Complete the load. */
6775 if (rd == 15)
b0109805 6776 gen_bx(s, tmp);
9ee6e8bb 6777 else
b0109805 6778 store_reg(s, rd, tmp);
9ee6e8bb
PB
6779 }
6780 break;
6781 case 0x08:
6782 case 0x09:
6783 {
6784 int j, n, user, loaded_base;
b0109805 6785 TCGv loaded_var;
9ee6e8bb
PB
6786 /* load/store multiple words */
6787 /* XXX: store correct base if write back */
6788 user = 0;
6789 if (insn & (1 << 22)) {
6790 if (IS_USER(s))
6791 goto illegal_op; /* only usable in supervisor mode */
6792
6793 if ((insn & (1 << 15)) == 0)
6794 user = 1;
6795 }
6796 rn = (insn >> 16) & 0xf;
b0109805 6797 addr = load_reg(s, rn);
9ee6e8bb
PB
6798
6799 /* compute total size */
6800 loaded_base = 0;
a50f5b91 6801 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
6802 n = 0;
6803 for(i=0;i<16;i++) {
6804 if (insn & (1 << i))
6805 n++;
6806 }
6807 /* XXX: test invalid n == 0 case ? */
6808 if (insn & (1 << 23)) {
6809 if (insn & (1 << 24)) {
6810 /* pre increment */
b0109805 6811 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6812 } else {
6813 /* post increment */
6814 }
6815 } else {
6816 if (insn & (1 << 24)) {
6817 /* pre decrement */
b0109805 6818 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
6819 } else {
6820 /* post decrement */
6821 if (n != 1)
b0109805 6822 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
6823 }
6824 }
6825 j = 0;
6826 for(i=0;i<16;i++) {
6827 if (insn & (1 << i)) {
6828 if (insn & (1 << 20)) {
6829 /* load */
b0109805 6830 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 6831 if (i == 15) {
b0109805 6832 gen_bx(s, tmp);
9ee6e8bb 6833 } else if (user) {
b0109805
PB
6834 gen_helper_set_user_reg(tcg_const_i32(i), tmp);
6835 dead_tmp(tmp);
9ee6e8bb 6836 } else if (i == rn) {
b0109805 6837 loaded_var = tmp;
9ee6e8bb
PB
6838 loaded_base = 1;
6839 } else {
b0109805 6840 store_reg(s, i, tmp);
9ee6e8bb
PB
6841 }
6842 } else {
6843 /* store */
6844 if (i == 15) {
6845 /* special case: r15 = PC + 8 */
6846 val = (long)s->pc + 4;
b0109805
PB
6847 tmp = new_tmp();
6848 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 6849 } else if (user) {
b0109805
PB
6850 tmp = new_tmp();
6851 gen_helper_get_user_reg(tmp, tcg_const_i32(i));
9ee6e8bb 6852 } else {
b0109805 6853 tmp = load_reg(s, i);
9ee6e8bb 6854 }
b0109805 6855 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6856 }
6857 j++;
6858 /* no need to add after the last transfer */
6859 if (j != n)
b0109805 6860 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6861 }
6862 }
6863 if (insn & (1 << 21)) {
6864 /* write back */
6865 if (insn & (1 << 23)) {
6866 if (insn & (1 << 24)) {
6867 /* pre increment */
6868 } else {
6869 /* post increment */
b0109805 6870 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6871 }
6872 } else {
6873 if (insn & (1 << 24)) {
6874 /* pre decrement */
6875 if (n != 1)
b0109805 6876 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
6877 } else {
6878 /* post decrement */
b0109805 6879 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
6880 }
6881 }
b0109805
PB
6882 store_reg(s, rn, addr);
6883 } else {
6884 dead_tmp(addr);
9ee6e8bb
PB
6885 }
6886 if (loaded_base) {
b0109805 6887 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
6888 }
6889 if ((insn & (1 << 22)) && !user) {
6890 /* Restore CPSR from SPSR. */
d9ba4830
PB
6891 tmp = load_cpu_field(spsr);
6892 gen_set_cpsr(tmp, 0xffffffff);
6893 dead_tmp(tmp);
9ee6e8bb
PB
6894 s->is_jmp = DISAS_UPDATE;
6895 }
6896 }
6897 break;
6898 case 0xa:
6899 case 0xb:
6900 {
6901 int32_t offset;
6902
6903 /* branch (and link) */
6904 val = (int32_t)s->pc;
6905 if (insn & (1 << 24)) {
5e3f878a
PB
6906 tmp = new_tmp();
6907 tcg_gen_movi_i32(tmp, val);
6908 store_reg(s, 14, tmp);
9ee6e8bb
PB
6909 }
6910 offset = (((int32_t)insn << 8) >> 8);
6911 val += (offset << 2) + 4;
6912 gen_jmp(s, val);
6913 }
6914 break;
6915 case 0xc:
6916 case 0xd:
6917 case 0xe:
6918 /* Coprocessor. */
6919 if (disas_coproc_insn(env, s, insn))
6920 goto illegal_op;
6921 break;
6922 case 0xf:
6923 /* swi */
5e3f878a 6924 gen_set_pc_im(s->pc);
9ee6e8bb
PB
6925 s->is_jmp = DISAS_SWI;
6926 break;
6927 default:
6928 illegal_op:
6929 gen_set_condexec(s);
5e3f878a 6930 gen_set_pc_im(s->pc - 4);
d9ba4830 6931 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
6932 s->is_jmp = DISAS_JUMP;
6933 break;
6934 }
6935 }
6936}
6937
6938/* Return true if this is a Thumb-2 logical op. */
6939static int
6940thumb2_logic_op(int op)
6941{
6942 return (op < 8);
6943}
6944
6945/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
6946 then set condition code flags based on the result of the operation.
6947 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
6948 to the high bit of T1.
6949 Returns zero if the opcode is valid. */
6950
6951static int
396e467c 6952gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
9ee6e8bb
PB
6953{
6954 int logic_cc;
6955
6956 logic_cc = 0;
6957 switch (op) {
6958 case 0: /* and */
396e467c 6959 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
6960 logic_cc = conds;
6961 break;
6962 case 1: /* bic */
396e467c 6963 tcg_gen_bic_i32(t0, t0, t1);
9ee6e8bb
PB
6964 logic_cc = conds;
6965 break;
6966 case 2: /* orr */
396e467c 6967 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
6968 logic_cc = conds;
6969 break;
6970 case 3: /* orn */
396e467c
FN
6971 tcg_gen_not_i32(t1, t1);
6972 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
6973 logic_cc = conds;
6974 break;
6975 case 4: /* eor */
396e467c 6976 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
6977 logic_cc = conds;
6978 break;
6979 case 8: /* add */
6980 if (conds)
396e467c 6981 gen_helper_add_cc(t0, t0, t1);
9ee6e8bb 6982 else
396e467c 6983 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
6984 break;
6985 case 10: /* adc */
6986 if (conds)
396e467c 6987 gen_helper_adc_cc(t0, t0, t1);
9ee6e8bb 6988 else
396e467c 6989 gen_adc(t0, t1);
9ee6e8bb
PB
6990 break;
6991 case 11: /* sbc */
6992 if (conds)
396e467c 6993 gen_helper_sbc_cc(t0, t0, t1);
9ee6e8bb 6994 else
396e467c 6995 gen_sub_carry(t0, t0, t1);
9ee6e8bb
PB
6996 break;
6997 case 13: /* sub */
6998 if (conds)
396e467c 6999 gen_helper_sub_cc(t0, t0, t1);
9ee6e8bb 7000 else
396e467c 7001 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
7002 break;
7003 case 14: /* rsb */
7004 if (conds)
396e467c 7005 gen_helper_sub_cc(t0, t1, t0);
9ee6e8bb 7006 else
396e467c 7007 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
7008 break;
7009 default: /* 5, 6, 7, 9, 12, 15. */
7010 return 1;
7011 }
7012 if (logic_cc) {
396e467c 7013 gen_logic_CC(t0);
9ee6e8bb 7014 if (shifter_out)
396e467c 7015 gen_set_CF_bit31(t1);
9ee6e8bb
PB
7016 }
7017 return 0;
7018}
7019
7020/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7021 is not legal. */
7022static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7023{
b0109805 7024 uint32_t insn, imm, shift, offset;
9ee6e8bb 7025 uint32_t rd, rn, rm, rs;
b26eefb6 7026 TCGv tmp;
6ddbc6e4
PB
7027 TCGv tmp2;
7028 TCGv tmp3;
b0109805 7029 TCGv addr;
a7812ae4 7030 TCGv_i64 tmp64;
9ee6e8bb
PB
7031 int op;
7032 int shiftop;
7033 int conds;
7034 int logic_cc;
7035
7036 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7037 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 7038 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
7039 16-bit instructions to get correct prefetch abort behavior. */
7040 insn = insn_hw1;
7041 if ((insn & (1 << 12)) == 0) {
7042 /* Second half of blx. */
7043 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
7044 tmp = load_reg(s, 14);
7045 tcg_gen_addi_i32(tmp, tmp, offset);
7046 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 7047
d9ba4830 7048 tmp2 = new_tmp();
b0109805 7049 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7050 store_reg(s, 14, tmp2);
7051 gen_bx(s, tmp);
9ee6e8bb
PB
7052 return 0;
7053 }
7054 if (insn & (1 << 11)) {
7055 /* Second half of bl. */
7056 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 7057 tmp = load_reg(s, 14);
6a0d8a1d 7058 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 7059
d9ba4830 7060 tmp2 = new_tmp();
b0109805 7061 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7062 store_reg(s, 14, tmp2);
7063 gen_bx(s, tmp);
9ee6e8bb
PB
7064 return 0;
7065 }
7066 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7067 /* Instruction spans a page boundary. Implement it as two
7068 16-bit instructions in case the second half causes an
7069 prefetch abort. */
7070 offset = ((int32_t)insn << 21) >> 9;
396e467c 7071 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
7072 return 0;
7073 }
7074 /* Fall through to 32-bit decode. */
7075 }
7076
7077 insn = lduw_code(s->pc);
7078 s->pc += 2;
7079 insn |= (uint32_t)insn_hw1 << 16;
7080
7081 if ((insn & 0xf800e800) != 0xf000e800) {
7082 ARCH(6T2);
7083 }
7084
7085 rn = (insn >> 16) & 0xf;
7086 rs = (insn >> 12) & 0xf;
7087 rd = (insn >> 8) & 0xf;
7088 rm = insn & 0xf;
7089 switch ((insn >> 25) & 0xf) {
7090 case 0: case 1: case 2: case 3:
7091 /* 16-bit instructions. Should never happen. */
7092 abort();
7093 case 4:
7094 if (insn & (1 << 22)) {
7095 /* Other load/store, table branch. */
7096 if (insn & 0x01200000) {
7097 /* Load/store doubleword. */
7098 if (rn == 15) {
b0109805
PB
7099 addr = new_tmp();
7100 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 7101 } else {
b0109805 7102 addr = load_reg(s, rn);
9ee6e8bb
PB
7103 }
7104 offset = (insn & 0xff) * 4;
7105 if ((insn & (1 << 23)) == 0)
7106 offset = -offset;
7107 if (insn & (1 << 24)) {
b0109805 7108 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
7109 offset = 0;
7110 }
7111 if (insn & (1 << 20)) {
7112 /* ldrd */
b0109805
PB
7113 tmp = gen_ld32(addr, IS_USER(s));
7114 store_reg(s, rs, tmp);
7115 tcg_gen_addi_i32(addr, addr, 4);
7116 tmp = gen_ld32(addr, IS_USER(s));
7117 store_reg(s, rd, tmp);
9ee6e8bb
PB
7118 } else {
7119 /* strd */
b0109805
PB
7120 tmp = load_reg(s, rs);
7121 gen_st32(tmp, addr, IS_USER(s));
7122 tcg_gen_addi_i32(addr, addr, 4);
7123 tmp = load_reg(s, rd);
7124 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7125 }
7126 if (insn & (1 << 21)) {
7127 /* Base writeback. */
7128 if (rn == 15)
7129 goto illegal_op;
b0109805
PB
7130 tcg_gen_addi_i32(addr, addr, offset - 4);
7131 store_reg(s, rn, addr);
7132 } else {
7133 dead_tmp(addr);
9ee6e8bb
PB
7134 }
7135 } else if ((insn & (1 << 23)) == 0) {
7136 /* Load/store exclusive word. */
3174f8e9 7137 addr = tcg_temp_local_new();
98a46317 7138 load_reg_var(s, addr, rn);
2c0262af 7139 if (insn & (1 << 20)) {
3174f8e9 7140 gen_helper_mark_exclusive(cpu_env, addr);
8f8e3aa4
PB
7141 tmp = gen_ld32(addr, IS_USER(s));
7142 store_reg(s, rd, tmp);
9ee6e8bb 7143 } else {
8f8e3aa4 7144 int label = gen_new_label();
3174f8e9
FN
7145 tmp2 = tcg_temp_local_new();
7146 gen_helper_test_exclusive(tmp2, cpu_env, addr);
7147 tcg_gen_brcondi_i32(TCG_COND_NE, tmp2, 0, label);
8f8e3aa4 7148 tmp = load_reg(s, rs);
3174f8e9 7149 gen_st32(tmp, addr, IS_USER(s));
8f8e3aa4 7150 gen_set_label(label);
3174f8e9
FN
7151 tcg_gen_mov_i32(cpu_R[rd], tmp2);
7152 tcg_temp_free(tmp2);
9ee6e8bb 7153 }
3174f8e9 7154 tcg_temp_free(addr);
9ee6e8bb
PB
7155 } else if ((insn & (1 << 6)) == 0) {
7156 /* Table Branch. */
7157 if (rn == 15) {
b0109805
PB
7158 addr = new_tmp();
7159 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 7160 } else {
b0109805 7161 addr = load_reg(s, rn);
9ee6e8bb 7162 }
b26eefb6 7163 tmp = load_reg(s, rm);
b0109805 7164 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
7165 if (insn & (1 << 4)) {
7166 /* tbh */
b0109805 7167 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 7168 dead_tmp(tmp);
b0109805 7169 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 7170 } else { /* tbb */
b26eefb6 7171 dead_tmp(tmp);
b0109805 7172 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7173 }
b0109805
PB
7174 dead_tmp(addr);
7175 tcg_gen_shli_i32(tmp, tmp, 1);
7176 tcg_gen_addi_i32(tmp, tmp, s->pc);
7177 store_reg(s, 15, tmp);
9ee6e8bb
PB
7178 } else {
7179 /* Load/store exclusive byte/halfword/doubleword. */
8f8e3aa4
PB
7180 /* ??? These are not really atomic. However we know
7181 we never have multiple CPUs running in parallel,
7182 so it is good enough. */
9ee6e8bb 7183 op = (insn >> 4) & 0x3;
3174f8e9 7184 addr = tcg_temp_local_new();
98a46317 7185 load_reg_var(s, addr, rn);
9ee6e8bb 7186 if (insn & (1 << 20)) {
8f8e3aa4 7187 gen_helper_mark_exclusive(cpu_env, addr);
9ee6e8bb
PB
7188 switch (op) {
7189 case 0:
8f8e3aa4 7190 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7191 break;
2c0262af 7192 case 1:
8f8e3aa4 7193 tmp = gen_ld16u(addr, IS_USER(s));
2c0262af 7194 break;
9ee6e8bb 7195 case 3:
8f8e3aa4
PB
7196 tmp = gen_ld32(addr, IS_USER(s));
7197 tcg_gen_addi_i32(addr, addr, 4);
7198 tmp2 = gen_ld32(addr, IS_USER(s));
7199 store_reg(s, rd, tmp2);
2c0262af
FB
7200 break;
7201 default:
9ee6e8bb
PB
7202 goto illegal_op;
7203 }
8f8e3aa4 7204 store_reg(s, rs, tmp);
9ee6e8bb 7205 } else {
8f8e3aa4 7206 int label = gen_new_label();
3174f8e9
FN
7207 tmp2 = tcg_temp_local_new();
7208 gen_helper_test_exclusive(tmp2, cpu_env, addr);
7209 tcg_gen_brcondi_i32(TCG_COND_NE, tmp2, 0, label);
8f8e3aa4 7210 tmp = load_reg(s, rs);
9ee6e8bb
PB
7211 switch (op) {
7212 case 0:
8f8e3aa4 7213 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7214 break;
7215 case 1:
8f8e3aa4 7216 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb 7217 break;
2c0262af 7218 case 3:
8f8e3aa4
PB
7219 gen_st32(tmp, addr, IS_USER(s));
7220 tcg_gen_addi_i32(addr, addr, 4);
7221 tmp = load_reg(s, rd);
7222 gen_st32(tmp, addr, IS_USER(s));
2c0262af 7223 break;
9ee6e8bb
PB
7224 default:
7225 goto illegal_op;
2c0262af 7226 }
8f8e3aa4 7227 gen_set_label(label);
3174f8e9
FN
7228 tcg_gen_mov_i32(cpu_R[rm], tmp2);
7229 tcg_temp_free(tmp2);
9ee6e8bb 7230 }
3174f8e9 7231 tcg_temp_free(addr);
9ee6e8bb
PB
7232 }
7233 } else {
7234 /* Load/store multiple, RFE, SRS. */
7235 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7236 /* Not available in user mode. */
b0109805 7237 if (IS_USER(s))
9ee6e8bb
PB
7238 goto illegal_op;
7239 if (insn & (1 << 20)) {
7240 /* rfe */
b0109805
PB
7241 addr = load_reg(s, rn);
7242 if ((insn & (1 << 24)) == 0)
7243 tcg_gen_addi_i32(addr, addr, -8);
7244 /* Load PC into tmp and CPSR into tmp2. */
7245 tmp = gen_ld32(addr, 0);
7246 tcg_gen_addi_i32(addr, addr, 4);
7247 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
7248 if (insn & (1 << 21)) {
7249 /* Base writeback. */
b0109805
PB
7250 if (insn & (1 << 24)) {
7251 tcg_gen_addi_i32(addr, addr, 4);
7252 } else {
7253 tcg_gen_addi_i32(addr, addr, -4);
7254 }
7255 store_reg(s, rn, addr);
7256 } else {
7257 dead_tmp(addr);
9ee6e8bb 7258 }
b0109805 7259 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
7260 } else {
7261 /* srs */
7262 op = (insn & 0x1f);
7263 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7264 addr = load_reg(s, 13);
9ee6e8bb 7265 } else {
b0109805
PB
7266 addr = new_tmp();
7267 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op));
9ee6e8bb
PB
7268 }
7269 if ((insn & (1 << 24)) == 0) {
b0109805 7270 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 7271 }
b0109805
PB
7272 tmp = load_reg(s, 14);
7273 gen_st32(tmp, addr, 0);
7274 tcg_gen_addi_i32(addr, addr, 4);
7275 tmp = new_tmp();
7276 gen_helper_cpsr_read(tmp);
7277 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
7278 if (insn & (1 << 21)) {
7279 if ((insn & (1 << 24)) == 0) {
b0109805 7280 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 7281 } else {
b0109805 7282 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7283 }
7284 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7285 store_reg(s, 13, addr);
9ee6e8bb 7286 } else {
b0109805
PB
7287 gen_helper_set_r13_banked(cpu_env,
7288 tcg_const_i32(op), addr);
9ee6e8bb 7289 }
b0109805
PB
7290 } else {
7291 dead_tmp(addr);
9ee6e8bb
PB
7292 }
7293 }
7294 } else {
7295 int i;
7296 /* Load/store multiple. */
b0109805 7297 addr = load_reg(s, rn);
9ee6e8bb
PB
7298 offset = 0;
7299 for (i = 0; i < 16; i++) {
7300 if (insn & (1 << i))
7301 offset += 4;
7302 }
7303 if (insn & (1 << 24)) {
b0109805 7304 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7305 }
7306
7307 for (i = 0; i < 16; i++) {
7308 if ((insn & (1 << i)) == 0)
7309 continue;
7310 if (insn & (1 << 20)) {
7311 /* Load. */
b0109805 7312 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 7313 if (i == 15) {
b0109805 7314 gen_bx(s, tmp);
9ee6e8bb 7315 } else {
b0109805 7316 store_reg(s, i, tmp);
9ee6e8bb
PB
7317 }
7318 } else {
7319 /* Store. */
b0109805
PB
7320 tmp = load_reg(s, i);
7321 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7322 }
b0109805 7323 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7324 }
7325 if (insn & (1 << 21)) {
7326 /* Base register writeback. */
7327 if (insn & (1 << 24)) {
b0109805 7328 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7329 }
7330 /* Fault if writeback register is in register list. */
7331 if (insn & (1 << rn))
7332 goto illegal_op;
b0109805
PB
7333 store_reg(s, rn, addr);
7334 } else {
7335 dead_tmp(addr);
9ee6e8bb
PB
7336 }
7337 }
7338 }
7339 break;
7340 case 5: /* Data processing register constant shift. */
3174f8e9
FN
7341 if (rn == 15) {
7342 tmp = new_tmp();
7343 tcg_gen_movi_i32(tmp, 0);
7344 } else {
7345 tmp = load_reg(s, rn);
7346 }
7347 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7348 op = (insn >> 21) & 0xf;
7349 shiftop = (insn >> 4) & 3;
7350 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7351 conds = (insn & (1 << 20)) != 0;
7352 logic_cc = (conds && thumb2_logic_op(op));
3174f8e9
FN
7353 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7354 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9ee6e8bb 7355 goto illegal_op;
3174f8e9
FN
7356 dead_tmp(tmp2);
7357 if (rd != 15) {
7358 store_reg(s, rd, tmp);
7359 } else {
7360 dead_tmp(tmp);
7361 }
9ee6e8bb
PB
7362 break;
7363 case 13: /* Misc data processing. */
7364 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7365 if (op < 4 && (insn & 0xf000) != 0xf000)
7366 goto illegal_op;
7367 switch (op) {
7368 case 0: /* Register controlled shift. */
8984bd2e
PB
7369 tmp = load_reg(s, rn);
7370 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7371 if ((insn & 0x70) != 0)
7372 goto illegal_op;
7373 op = (insn >> 21) & 3;
8984bd2e
PB
7374 logic_cc = (insn & (1 << 20)) != 0;
7375 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7376 if (logic_cc)
7377 gen_logic_CC(tmp);
21aeb343 7378 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7379 break;
7380 case 1: /* Sign/zero extend. */
5e3f878a 7381 tmp = load_reg(s, rm);
9ee6e8bb
PB
7382 shift = (insn >> 4) & 3;
7383 /* ??? In many cases it's not neccessary to do a
7384 rotate, a shift is sufficient. */
7385 if (shift != 0)
5e3f878a 7386 tcg_gen_rori_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7387 op = (insn >> 20) & 7;
7388 switch (op) {
5e3f878a
PB
7389 case 0: gen_sxth(tmp); break;
7390 case 1: gen_uxth(tmp); break;
7391 case 2: gen_sxtb16(tmp); break;
7392 case 3: gen_uxtb16(tmp); break;
7393 case 4: gen_sxtb(tmp); break;
7394 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
7395 default: goto illegal_op;
7396 }
7397 if (rn != 15) {
5e3f878a 7398 tmp2 = load_reg(s, rn);
9ee6e8bb 7399 if ((op >> 1) == 1) {
5e3f878a 7400 gen_add16(tmp, tmp2);
9ee6e8bb 7401 } else {
5e3f878a
PB
7402 tcg_gen_add_i32(tmp, tmp, tmp2);
7403 dead_tmp(tmp2);
9ee6e8bb
PB
7404 }
7405 }
5e3f878a 7406 store_reg(s, rd, tmp);
9ee6e8bb
PB
7407 break;
7408 case 2: /* SIMD add/subtract. */
7409 op = (insn >> 20) & 7;
7410 shift = (insn >> 4) & 7;
7411 if ((op & 3) == 3 || (shift & 3) == 3)
7412 goto illegal_op;
6ddbc6e4
PB
7413 tmp = load_reg(s, rn);
7414 tmp2 = load_reg(s, rm);
7415 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7416 dead_tmp(tmp2);
7417 store_reg(s, rd, tmp);
9ee6e8bb
PB
7418 break;
7419 case 3: /* Other data processing. */
7420 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7421 if (op < 4) {
7422 /* Saturating add/subtract. */
d9ba4830
PB
7423 tmp = load_reg(s, rn);
7424 tmp2 = load_reg(s, rm);
9ee6e8bb 7425 if (op & 2)
d9ba4830 7426 gen_helper_double_saturate(tmp, tmp);
9ee6e8bb 7427 if (op & 1)
d9ba4830 7428 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 7429 else
d9ba4830
PB
7430 gen_helper_add_saturate(tmp, tmp, tmp2);
7431 dead_tmp(tmp2);
9ee6e8bb 7432 } else {
d9ba4830 7433 tmp = load_reg(s, rn);
9ee6e8bb
PB
7434 switch (op) {
7435 case 0x0a: /* rbit */
d9ba4830 7436 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7437 break;
7438 case 0x08: /* rev */
66896cb8 7439 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
7440 break;
7441 case 0x09: /* rev16 */
d9ba4830 7442 gen_rev16(tmp);
9ee6e8bb
PB
7443 break;
7444 case 0x0b: /* revsh */
d9ba4830 7445 gen_revsh(tmp);
9ee6e8bb
PB
7446 break;
7447 case 0x10: /* sel */
d9ba4830 7448 tmp2 = load_reg(s, rm);
6ddbc6e4
PB
7449 tmp3 = new_tmp();
7450 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
d9ba4830 7451 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6ddbc6e4 7452 dead_tmp(tmp3);
d9ba4830 7453 dead_tmp(tmp2);
9ee6e8bb
PB
7454 break;
7455 case 0x18: /* clz */
d9ba4830 7456 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
7457 break;
7458 default:
7459 goto illegal_op;
7460 }
7461 }
d9ba4830 7462 store_reg(s, rd, tmp);
9ee6e8bb
PB
7463 break;
7464 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7465 op = (insn >> 4) & 0xf;
d9ba4830
PB
7466 tmp = load_reg(s, rn);
7467 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7468 switch ((insn >> 20) & 7) {
7469 case 0: /* 32 x 32 -> 32 */
d9ba4830
PB
7470 tcg_gen_mul_i32(tmp, tmp, tmp2);
7471 dead_tmp(tmp2);
9ee6e8bb 7472 if (rs != 15) {
d9ba4830 7473 tmp2 = load_reg(s, rs);
9ee6e8bb 7474 if (op)
d9ba4830 7475 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 7476 else
d9ba4830
PB
7477 tcg_gen_add_i32(tmp, tmp, tmp2);
7478 dead_tmp(tmp2);
9ee6e8bb 7479 }
9ee6e8bb
PB
7480 break;
7481 case 1: /* 16 x 16 -> 32 */
d9ba4830
PB
7482 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7483 dead_tmp(tmp2);
9ee6e8bb 7484 if (rs != 15) {
d9ba4830
PB
7485 tmp2 = load_reg(s, rs);
7486 gen_helper_add_setq(tmp, tmp, tmp2);
7487 dead_tmp(tmp2);
9ee6e8bb 7488 }
9ee6e8bb
PB
7489 break;
7490 case 2: /* Dual multiply add. */
7491 case 4: /* Dual multiply subtract. */
7492 if (op)
d9ba4830
PB
7493 gen_swap_half(tmp2);
7494 gen_smul_dual(tmp, tmp2);
9ee6e8bb
PB
7495 /* This addition cannot overflow. */
7496 if (insn & (1 << 22)) {
d9ba4830 7497 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 7498 } else {
d9ba4830 7499 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 7500 }
d9ba4830 7501 dead_tmp(tmp2);
9ee6e8bb
PB
7502 if (rs != 15)
7503 {
d9ba4830
PB
7504 tmp2 = load_reg(s, rs);
7505 gen_helper_add_setq(tmp, tmp, tmp2);
7506 dead_tmp(tmp2);
9ee6e8bb 7507 }
9ee6e8bb
PB
7508 break;
7509 case 3: /* 32 * 16 -> 32msb */
7510 if (op)
d9ba4830 7511 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7512 else
d9ba4830 7513 gen_sxth(tmp2);
a7812ae4
PB
7514 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7515 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 7516 tmp = new_tmp();
a7812ae4 7517 tcg_gen_trunc_i64_i32(tmp, tmp64);
9ee6e8bb
PB
7518 if (rs != 15)
7519 {
d9ba4830
PB
7520 tmp2 = load_reg(s, rs);
7521 gen_helper_add_setq(tmp, tmp, tmp2);
7522 dead_tmp(tmp2);
9ee6e8bb 7523 }
9ee6e8bb
PB
7524 break;
7525 case 5: case 6: /* 32 * 32 -> 32msb */
d9ba4830
PB
7526 gen_imull(tmp, tmp2);
7527 if (insn & (1 << 5)) {
7528 gen_roundqd(tmp, tmp2);
7529 dead_tmp(tmp2);
7530 } else {
7531 dead_tmp(tmp);
7532 tmp = tmp2;
7533 }
9ee6e8bb 7534 if (rs != 15) {
d9ba4830 7535 tmp2 = load_reg(s, rs);
9ee6e8bb 7536 if (insn & (1 << 21)) {
d9ba4830 7537 tcg_gen_add_i32(tmp, tmp, tmp2);
99c475ab 7538 } else {
d9ba4830 7539 tcg_gen_sub_i32(tmp, tmp2, tmp);
99c475ab 7540 }
d9ba4830 7541 dead_tmp(tmp2);
2c0262af 7542 }
9ee6e8bb
PB
7543 break;
7544 case 7: /* Unsigned sum of absolute differences. */
d9ba4830
PB
7545 gen_helper_usad8(tmp, tmp, tmp2);
7546 dead_tmp(tmp2);
9ee6e8bb 7547 if (rs != 15) {
d9ba4830
PB
7548 tmp2 = load_reg(s, rs);
7549 tcg_gen_add_i32(tmp, tmp, tmp2);
7550 dead_tmp(tmp2);
5fd46862 7551 }
9ee6e8bb 7552 break;
2c0262af 7553 }
d9ba4830 7554 store_reg(s, rd, tmp);
2c0262af 7555 break;
9ee6e8bb
PB
7556 case 6: case 7: /* 64-bit multiply, Divide. */
7557 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
7558 tmp = load_reg(s, rn);
7559 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7560 if ((op & 0x50) == 0x10) {
7561 /* sdiv, udiv */
7562 if (!arm_feature(env, ARM_FEATURE_DIV))
7563 goto illegal_op;
7564 if (op & 0x20)
5e3f878a 7565 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 7566 else
5e3f878a
PB
7567 gen_helper_sdiv(tmp, tmp, tmp2);
7568 dead_tmp(tmp2);
7569 store_reg(s, rd, tmp);
9ee6e8bb
PB
7570 } else if ((op & 0xe) == 0xc) {
7571 /* Dual multiply accumulate long. */
7572 if (op & 1)
5e3f878a
PB
7573 gen_swap_half(tmp2);
7574 gen_smul_dual(tmp, tmp2);
9ee6e8bb 7575 if (op & 0x10) {
5e3f878a 7576 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 7577 } else {
5e3f878a 7578 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 7579 }
5e3f878a 7580 dead_tmp(tmp2);
a7812ae4
PB
7581 /* BUGFIX */
7582 tmp64 = tcg_temp_new_i64();
7583 tcg_gen_ext_i32_i64(tmp64, tmp);
7584 dead_tmp(tmp);
7585 gen_addq(s, tmp64, rs, rd);
7586 gen_storeq_reg(s, rs, rd, tmp64);
2c0262af 7587 } else {
9ee6e8bb
PB
7588 if (op & 0x20) {
7589 /* Unsigned 64-bit multiply */
a7812ae4 7590 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 7591 } else {
9ee6e8bb
PB
7592 if (op & 8) {
7593 /* smlalxy */
5e3f878a
PB
7594 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7595 dead_tmp(tmp2);
a7812ae4
PB
7596 tmp64 = tcg_temp_new_i64();
7597 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 7598 dead_tmp(tmp);
9ee6e8bb
PB
7599 } else {
7600 /* Signed 64-bit multiply */
a7812ae4 7601 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 7602 }
b5ff1b31 7603 }
9ee6e8bb
PB
7604 if (op & 4) {
7605 /* umaal */
a7812ae4
PB
7606 gen_addq_lo(s, tmp64, rs);
7607 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
7608 } else if (op & 0x40) {
7609 /* 64-bit accumulate. */
a7812ae4 7610 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 7611 }
a7812ae4 7612 gen_storeq_reg(s, rs, rd, tmp64);
5fd46862 7613 }
2c0262af 7614 break;
9ee6e8bb
PB
7615 }
7616 break;
7617 case 6: case 7: case 14: case 15:
7618 /* Coprocessor. */
7619 if (((insn >> 24) & 3) == 3) {
7620 /* Translate into the equivalent ARM encoding. */
7621 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
7622 if (disas_neon_data_insn(env, s, insn))
7623 goto illegal_op;
7624 } else {
7625 if (insn & (1 << 28))
7626 goto illegal_op;
7627 if (disas_coproc_insn (env, s, insn))
7628 goto illegal_op;
7629 }
7630 break;
7631 case 8: case 9: case 10: case 11:
7632 if (insn & (1 << 15)) {
7633 /* Branches, misc control. */
7634 if (insn & 0x5000) {
7635 /* Unconditional branch. */
7636 /* signextend(hw1[10:0]) -> offset[:12]. */
7637 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7638 /* hw1[10:0] -> offset[11:1]. */
7639 offset |= (insn & 0x7ff) << 1;
7640 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7641 offset[24:22] already have the same value because of the
7642 sign extension above. */
7643 offset ^= ((~insn) & (1 << 13)) << 10;
7644 offset ^= ((~insn) & (1 << 11)) << 11;
7645
9ee6e8bb
PB
7646 if (insn & (1 << 14)) {
7647 /* Branch and link. */
3174f8e9 7648 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 7649 }
3b46e624 7650
b0109805 7651 offset += s->pc;
9ee6e8bb
PB
7652 if (insn & (1 << 12)) {
7653 /* b/bl */
b0109805 7654 gen_jmp(s, offset);
9ee6e8bb
PB
7655 } else {
7656 /* blx */
b0109805
PB
7657 offset &= ~(uint32_t)2;
7658 gen_bx_im(s, offset);
2c0262af 7659 }
9ee6e8bb
PB
7660 } else if (((insn >> 23) & 7) == 7) {
7661 /* Misc control */
7662 if (insn & (1 << 13))
7663 goto illegal_op;
7664
7665 if (insn & (1 << 26)) {
7666 /* Secure monitor call (v6Z) */
7667 goto illegal_op; /* not implemented. */
2c0262af 7668 } else {
9ee6e8bb
PB
7669 op = (insn >> 20) & 7;
7670 switch (op) {
7671 case 0: /* msr cpsr. */
7672 if (IS_M(env)) {
8984bd2e
PB
7673 tmp = load_reg(s, rn);
7674 addr = tcg_const_i32(insn & 0xff);
7675 gen_helper_v7m_msr(cpu_env, addr, tmp);
9ee6e8bb
PB
7676 gen_lookup_tb(s);
7677 break;
7678 }
7679 /* fall through */
7680 case 1: /* msr spsr. */
7681 if (IS_M(env))
7682 goto illegal_op;
2fbac54b
FN
7683 tmp = load_reg(s, rn);
7684 if (gen_set_psr(s,
9ee6e8bb 7685 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 7686 op == 1, tmp))
9ee6e8bb
PB
7687 goto illegal_op;
7688 break;
7689 case 2: /* cps, nop-hint. */
7690 if (((insn >> 8) & 7) == 0) {
7691 gen_nop_hint(s, insn & 0xff);
7692 }
7693 /* Implemented as NOP in user mode. */
7694 if (IS_USER(s))
7695 break;
7696 offset = 0;
7697 imm = 0;
7698 if (insn & (1 << 10)) {
7699 if (insn & (1 << 7))
7700 offset |= CPSR_A;
7701 if (insn & (1 << 6))
7702 offset |= CPSR_I;
7703 if (insn & (1 << 5))
7704 offset |= CPSR_F;
7705 if (insn & (1 << 9))
7706 imm = CPSR_A | CPSR_I | CPSR_F;
7707 }
7708 if (insn & (1 << 8)) {
7709 offset |= 0x1f;
7710 imm |= (insn & 0x1f);
7711 }
7712 if (offset) {
2fbac54b 7713 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
7714 }
7715 break;
7716 case 3: /* Special control operations. */
7717 op = (insn >> 4) & 0xf;
7718 switch (op) {
7719 case 2: /* clrex */
8f8e3aa4 7720 gen_helper_clrex(cpu_env);
9ee6e8bb
PB
7721 break;
7722 case 4: /* dsb */
7723 case 5: /* dmb */
7724 case 6: /* isb */
7725 /* These execute as NOPs. */
7726 ARCH(7);
7727 break;
7728 default:
7729 goto illegal_op;
7730 }
7731 break;
7732 case 4: /* bxj */
7733 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
7734 tmp = load_reg(s, rn);
7735 gen_bx(s, tmp);
9ee6e8bb
PB
7736 break;
7737 case 5: /* Exception return. */
7738 /* Unpredictable in user mode. */
7739 goto illegal_op;
7740 case 6: /* mrs cpsr. */
8984bd2e 7741 tmp = new_tmp();
9ee6e8bb 7742 if (IS_M(env)) {
8984bd2e
PB
7743 addr = tcg_const_i32(insn & 0xff);
7744 gen_helper_v7m_mrs(tmp, cpu_env, addr);
9ee6e8bb 7745 } else {
8984bd2e 7746 gen_helper_cpsr_read(tmp);
9ee6e8bb 7747 }
8984bd2e 7748 store_reg(s, rd, tmp);
9ee6e8bb
PB
7749 break;
7750 case 7: /* mrs spsr. */
7751 /* Not accessible in user mode. */
7752 if (IS_USER(s) || IS_M(env))
7753 goto illegal_op;
d9ba4830
PB
7754 tmp = load_cpu_field(spsr);
7755 store_reg(s, rd, tmp);
9ee6e8bb 7756 break;
2c0262af
FB
7757 }
7758 }
9ee6e8bb
PB
7759 } else {
7760 /* Conditional branch. */
7761 op = (insn >> 22) & 0xf;
7762 /* Generate a conditional jump to next instruction. */
7763 s->condlabel = gen_new_label();
d9ba4830 7764 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
7765 s->condjmp = 1;
7766
7767 /* offset[11:1] = insn[10:0] */
7768 offset = (insn & 0x7ff) << 1;
7769 /* offset[17:12] = insn[21:16]. */
7770 offset |= (insn & 0x003f0000) >> 4;
7771 /* offset[31:20] = insn[26]. */
7772 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
7773 /* offset[18] = insn[13]. */
7774 offset |= (insn & (1 << 13)) << 5;
7775 /* offset[19] = insn[11]. */
7776 offset |= (insn & (1 << 11)) << 8;
7777
7778 /* jump to the offset */
b0109805 7779 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
7780 }
7781 } else {
7782 /* Data processing immediate. */
7783 if (insn & (1 << 25)) {
7784 if (insn & (1 << 24)) {
7785 if (insn & (1 << 20))
7786 goto illegal_op;
7787 /* Bitfield/Saturate. */
7788 op = (insn >> 21) & 7;
7789 imm = insn & 0x1f;
7790 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4
PB
7791 if (rn == 15) {
7792 tmp = new_tmp();
7793 tcg_gen_movi_i32(tmp, 0);
7794 } else {
7795 tmp = load_reg(s, rn);
7796 }
9ee6e8bb
PB
7797 switch (op) {
7798 case 2: /* Signed bitfield extract. */
7799 imm++;
7800 if (shift + imm > 32)
7801 goto illegal_op;
7802 if (imm < 32)
6ddbc6e4 7803 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
7804 break;
7805 case 6: /* Unsigned bitfield extract. */
7806 imm++;
7807 if (shift + imm > 32)
7808 goto illegal_op;
7809 if (imm < 32)
6ddbc6e4 7810 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
7811 break;
7812 case 3: /* Bitfield insert/clear. */
7813 if (imm < shift)
7814 goto illegal_op;
7815 imm = imm + 1 - shift;
7816 if (imm != 32) {
6ddbc6e4 7817 tmp2 = load_reg(s, rd);
8f8e3aa4 7818 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
6ddbc6e4 7819 dead_tmp(tmp2);
9ee6e8bb
PB
7820 }
7821 break;
7822 case 7:
7823 goto illegal_op;
7824 default: /* Saturate. */
9ee6e8bb
PB
7825 if (shift) {
7826 if (op & 1)
6ddbc6e4 7827 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7828 else
6ddbc6e4 7829 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 7830 }
6ddbc6e4 7831 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
7832 if (op & 4) {
7833 /* Unsigned. */
9ee6e8bb 7834 if ((op & 1) && shift == 0)
6ddbc6e4 7835 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 7836 else
6ddbc6e4 7837 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 7838 } else {
9ee6e8bb 7839 /* Signed. */
9ee6e8bb 7840 if ((op & 1) && shift == 0)
6ddbc6e4 7841 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 7842 else
6ddbc6e4 7843 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 7844 }
9ee6e8bb 7845 break;
2c0262af 7846 }
6ddbc6e4 7847 store_reg(s, rd, tmp);
9ee6e8bb
PB
7848 } else {
7849 imm = ((insn & 0x04000000) >> 15)
7850 | ((insn & 0x7000) >> 4) | (insn & 0xff);
7851 if (insn & (1 << 22)) {
7852 /* 16-bit immediate. */
7853 imm |= (insn >> 4) & 0xf000;
7854 if (insn & (1 << 23)) {
7855 /* movt */
5e3f878a 7856 tmp = load_reg(s, rd);
86831435 7857 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 7858 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 7859 } else {
9ee6e8bb 7860 /* movw */
5e3f878a
PB
7861 tmp = new_tmp();
7862 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
7863 }
7864 } else {
9ee6e8bb
PB
7865 /* Add/sub 12-bit immediate. */
7866 if (rn == 15) {
b0109805 7867 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 7868 if (insn & (1 << 23))
b0109805 7869 offset -= imm;
9ee6e8bb 7870 else
b0109805 7871 offset += imm;
5e3f878a
PB
7872 tmp = new_tmp();
7873 tcg_gen_movi_i32(tmp, offset);
2c0262af 7874 } else {
5e3f878a 7875 tmp = load_reg(s, rn);
9ee6e8bb 7876 if (insn & (1 << 23))
5e3f878a 7877 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 7878 else
5e3f878a 7879 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 7880 }
9ee6e8bb 7881 }
5e3f878a 7882 store_reg(s, rd, tmp);
191abaa2 7883 }
9ee6e8bb
PB
7884 } else {
7885 int shifter_out = 0;
7886 /* modified 12-bit immediate. */
7887 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
7888 imm = (insn & 0xff);
7889 switch (shift) {
7890 case 0: /* XY */
7891 /* Nothing to do. */
7892 break;
7893 case 1: /* 00XY00XY */
7894 imm |= imm << 16;
7895 break;
7896 case 2: /* XY00XY00 */
7897 imm |= imm << 16;
7898 imm <<= 8;
7899 break;
7900 case 3: /* XYXYXYXY */
7901 imm |= imm << 16;
7902 imm |= imm << 8;
7903 break;
7904 default: /* Rotated constant. */
7905 shift = (shift << 1) | (imm >> 7);
7906 imm |= 0x80;
7907 imm = imm << (32 - shift);
7908 shifter_out = 1;
7909 break;
b5ff1b31 7910 }
3174f8e9
FN
7911 tmp2 = new_tmp();
7912 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 7913 rn = (insn >> 16) & 0xf;
3174f8e9
FN
7914 if (rn == 15) {
7915 tmp = new_tmp();
7916 tcg_gen_movi_i32(tmp, 0);
7917 } else {
7918 tmp = load_reg(s, rn);
7919 }
9ee6e8bb
PB
7920 op = (insn >> 21) & 0xf;
7921 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 7922 shifter_out, tmp, tmp2))
9ee6e8bb 7923 goto illegal_op;
3174f8e9 7924 dead_tmp(tmp2);
9ee6e8bb
PB
7925 rd = (insn >> 8) & 0xf;
7926 if (rd != 15) {
3174f8e9
FN
7927 store_reg(s, rd, tmp);
7928 } else {
7929 dead_tmp(tmp);
2c0262af 7930 }
2c0262af 7931 }
9ee6e8bb
PB
7932 }
7933 break;
7934 case 12: /* Load/store single data item. */
7935 {
7936 int postinc = 0;
7937 int writeback = 0;
b0109805 7938 int user;
9ee6e8bb
PB
7939 if ((insn & 0x01100000) == 0x01000000) {
7940 if (disas_neon_ls_insn(env, s, insn))
c1713132 7941 goto illegal_op;
9ee6e8bb
PB
7942 break;
7943 }
b0109805 7944 user = IS_USER(s);
9ee6e8bb 7945 if (rn == 15) {
b0109805 7946 addr = new_tmp();
9ee6e8bb
PB
7947 /* PC relative. */
7948 /* s->pc has already been incremented by 4. */
7949 imm = s->pc & 0xfffffffc;
7950 if (insn & (1 << 23))
7951 imm += insn & 0xfff;
7952 else
7953 imm -= insn & 0xfff;
b0109805 7954 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 7955 } else {
b0109805 7956 addr = load_reg(s, rn);
9ee6e8bb
PB
7957 if (insn & (1 << 23)) {
7958 /* Positive offset. */
7959 imm = insn & 0xfff;
b0109805 7960 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
7961 } else {
7962 op = (insn >> 8) & 7;
7963 imm = insn & 0xff;
7964 switch (op) {
7965 case 0: case 8: /* Shifted Register. */
7966 shift = (insn >> 4) & 0xf;
7967 if (shift > 3)
18c9b560 7968 goto illegal_op;
b26eefb6 7969 tmp = load_reg(s, rm);
9ee6e8bb 7970 if (shift)
b26eefb6 7971 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 7972 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 7973 dead_tmp(tmp);
9ee6e8bb
PB
7974 break;
7975 case 4: /* Negative offset. */
b0109805 7976 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb
PB
7977 break;
7978 case 6: /* User privilege. */
b0109805
PB
7979 tcg_gen_addi_i32(addr, addr, imm);
7980 user = 1;
9ee6e8bb
PB
7981 break;
7982 case 1: /* Post-decrement. */
7983 imm = -imm;
7984 /* Fall through. */
7985 case 3: /* Post-increment. */
9ee6e8bb
PB
7986 postinc = 1;
7987 writeback = 1;
7988 break;
7989 case 5: /* Pre-decrement. */
7990 imm = -imm;
7991 /* Fall through. */
7992 case 7: /* Pre-increment. */
b0109805 7993 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
7994 writeback = 1;
7995 break;
7996 default:
b7bcbe95 7997 goto illegal_op;
9ee6e8bb
PB
7998 }
7999 }
8000 }
8001 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8002 if (insn & (1 << 20)) {
8003 /* Load. */
8004 if (rs == 15 && op != 2) {
8005 if (op & 2)
b5ff1b31 8006 goto illegal_op;
9ee6e8bb
PB
8007 /* Memory hint. Implemented as NOP. */
8008 } else {
8009 switch (op) {
b0109805
PB
8010 case 0: tmp = gen_ld8u(addr, user); break;
8011 case 4: tmp = gen_ld8s(addr, user); break;
8012 case 1: tmp = gen_ld16u(addr, user); break;
8013 case 5: tmp = gen_ld16s(addr, user); break;
8014 case 2: tmp = gen_ld32(addr, user); break;
9ee6e8bb
PB
8015 default: goto illegal_op;
8016 }
8017 if (rs == 15) {
b0109805 8018 gen_bx(s, tmp);
9ee6e8bb 8019 } else {
b0109805 8020 store_reg(s, rs, tmp);
9ee6e8bb
PB
8021 }
8022 }
8023 } else {
8024 /* Store. */
8025 if (rs == 15)
b7bcbe95 8026 goto illegal_op;
b0109805 8027 tmp = load_reg(s, rs);
9ee6e8bb 8028 switch (op) {
b0109805
PB
8029 case 0: gen_st8(tmp, addr, user); break;
8030 case 1: gen_st16(tmp, addr, user); break;
8031 case 2: gen_st32(tmp, addr, user); break;
9ee6e8bb 8032 default: goto illegal_op;
b7bcbe95 8033 }
2c0262af 8034 }
9ee6e8bb 8035 if (postinc)
b0109805
PB
8036 tcg_gen_addi_i32(addr, addr, imm);
8037 if (writeback) {
8038 store_reg(s, rn, addr);
8039 } else {
8040 dead_tmp(addr);
8041 }
9ee6e8bb
PB
8042 }
8043 break;
8044 default:
8045 goto illegal_op;
2c0262af 8046 }
9ee6e8bb
PB
8047 return 0;
8048illegal_op:
8049 return 1;
2c0262af
FB
8050}
8051
9ee6e8bb 8052static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
8053{
8054 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8055 int32_t offset;
8056 int i;
b26eefb6 8057 TCGv tmp;
d9ba4830 8058 TCGv tmp2;
b0109805 8059 TCGv addr;
99c475ab 8060
9ee6e8bb
PB
8061 if (s->condexec_mask) {
8062 cond = s->condexec_cond;
8063 s->condlabel = gen_new_label();
d9ba4830 8064 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
8065 s->condjmp = 1;
8066 }
8067
b5ff1b31 8068 insn = lduw_code(s->pc);
99c475ab 8069 s->pc += 2;
b5ff1b31 8070
99c475ab
FB
8071 switch (insn >> 12) {
8072 case 0: case 1:
396e467c 8073
99c475ab
FB
8074 rd = insn & 7;
8075 op = (insn >> 11) & 3;
8076 if (op == 3) {
8077 /* add/subtract */
8078 rn = (insn >> 3) & 7;
396e467c 8079 tmp = load_reg(s, rn);
99c475ab
FB
8080 if (insn & (1 << 10)) {
8081 /* immediate */
396e467c
FN
8082 tmp2 = new_tmp();
8083 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
8084 } else {
8085 /* reg */
8086 rm = (insn >> 6) & 7;
396e467c 8087 tmp2 = load_reg(s, rm);
99c475ab 8088 }
9ee6e8bb
PB
8089 if (insn & (1 << 9)) {
8090 if (s->condexec_mask)
396e467c 8091 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8092 else
396e467c 8093 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb
PB
8094 } else {
8095 if (s->condexec_mask)
396e467c 8096 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 8097 else
396e467c 8098 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 8099 }
396e467c
FN
8100 dead_tmp(tmp2);
8101 store_reg(s, rd, tmp);
99c475ab
FB
8102 } else {
8103 /* shift immediate */
8104 rm = (insn >> 3) & 7;
8105 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
8106 tmp = load_reg(s, rm);
8107 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8108 if (!s->condexec_mask)
8109 gen_logic_CC(tmp);
8110 store_reg(s, rd, tmp);
99c475ab
FB
8111 }
8112 break;
8113 case 2: case 3:
8114 /* arithmetic large immediate */
8115 op = (insn >> 11) & 3;
8116 rd = (insn >> 8) & 0x7;
396e467c
FN
8117 if (op == 0) { /* mov */
8118 tmp = new_tmp();
8119 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 8120 if (!s->condexec_mask)
396e467c
FN
8121 gen_logic_CC(tmp);
8122 store_reg(s, rd, tmp);
8123 } else {
8124 tmp = load_reg(s, rd);
8125 tmp2 = new_tmp();
8126 tcg_gen_movi_i32(tmp2, insn & 0xff);
8127 switch (op) {
8128 case 1: /* cmp */
8129 gen_helper_sub_cc(tmp, tmp, tmp2);
8130 dead_tmp(tmp);
8131 dead_tmp(tmp2);
8132 break;
8133 case 2: /* add */
8134 if (s->condexec_mask)
8135 tcg_gen_add_i32(tmp, tmp, tmp2);
8136 else
8137 gen_helper_add_cc(tmp, tmp, tmp2);
8138 dead_tmp(tmp2);
8139 store_reg(s, rd, tmp);
8140 break;
8141 case 3: /* sub */
8142 if (s->condexec_mask)
8143 tcg_gen_sub_i32(tmp, tmp, tmp2);
8144 else
8145 gen_helper_sub_cc(tmp, tmp, tmp2);
8146 dead_tmp(tmp2);
8147 store_reg(s, rd, tmp);
8148 break;
8149 }
99c475ab 8150 }
99c475ab
FB
8151 break;
8152 case 4:
8153 if (insn & (1 << 11)) {
8154 rd = (insn >> 8) & 7;
5899f386
FB
8155 /* load pc-relative. Bit 1 of PC is ignored. */
8156 val = s->pc + 2 + ((insn & 0xff) * 4);
8157 val &= ~(uint32_t)2;
b0109805
PB
8158 addr = new_tmp();
8159 tcg_gen_movi_i32(addr, val);
8160 tmp = gen_ld32(addr, IS_USER(s));
8161 dead_tmp(addr);
8162 store_reg(s, rd, tmp);
99c475ab
FB
8163 break;
8164 }
8165 if (insn & (1 << 10)) {
8166 /* data processing extended or blx */
8167 rd = (insn & 7) | ((insn >> 4) & 8);
8168 rm = (insn >> 3) & 0xf;
8169 op = (insn >> 8) & 3;
8170 switch (op) {
8171 case 0: /* add */
396e467c
FN
8172 tmp = load_reg(s, rd);
8173 tmp2 = load_reg(s, rm);
8174 tcg_gen_add_i32(tmp, tmp, tmp2);
8175 dead_tmp(tmp2);
8176 store_reg(s, rd, tmp);
99c475ab
FB
8177 break;
8178 case 1: /* cmp */
396e467c
FN
8179 tmp = load_reg(s, rd);
8180 tmp2 = load_reg(s, rm);
8181 gen_helper_sub_cc(tmp, tmp, tmp2);
8182 dead_tmp(tmp2);
8183 dead_tmp(tmp);
99c475ab
FB
8184 break;
8185 case 2: /* mov/cpy */
396e467c
FN
8186 tmp = load_reg(s, rm);
8187 store_reg(s, rd, tmp);
99c475ab
FB
8188 break;
8189 case 3:/* branch [and link] exchange thumb register */
b0109805 8190 tmp = load_reg(s, rm);
99c475ab
FB
8191 if (insn & (1 << 7)) {
8192 val = (uint32_t)s->pc | 1;
b0109805
PB
8193 tmp2 = new_tmp();
8194 tcg_gen_movi_i32(tmp2, val);
8195 store_reg(s, 14, tmp2);
99c475ab 8196 }
d9ba4830 8197 gen_bx(s, tmp);
99c475ab
FB
8198 break;
8199 }
8200 break;
8201 }
8202
8203 /* data processing register */
8204 rd = insn & 7;
8205 rm = (insn >> 3) & 7;
8206 op = (insn >> 6) & 0xf;
8207 if (op == 2 || op == 3 || op == 4 || op == 7) {
8208 /* the shift/rotate ops want the operands backwards */
8209 val = rm;
8210 rm = rd;
8211 rd = val;
8212 val = 1;
8213 } else {
8214 val = 0;
8215 }
8216
396e467c
FN
8217 if (op == 9) { /* neg */
8218 tmp = new_tmp();
8219 tcg_gen_movi_i32(tmp, 0);
8220 } else if (op != 0xf) { /* mvn doesn't read its first operand */
8221 tmp = load_reg(s, rd);
8222 } else {
8223 TCGV_UNUSED(tmp);
8224 }
99c475ab 8225
396e467c 8226 tmp2 = load_reg(s, rm);
5899f386 8227 switch (op) {
99c475ab 8228 case 0x0: /* and */
396e467c 8229 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 8230 if (!s->condexec_mask)
396e467c 8231 gen_logic_CC(tmp);
99c475ab
FB
8232 break;
8233 case 0x1: /* eor */
396e467c 8234 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 8235 if (!s->condexec_mask)
396e467c 8236 gen_logic_CC(tmp);
99c475ab
FB
8237 break;
8238 case 0x2: /* lsl */
9ee6e8bb 8239 if (s->condexec_mask) {
396e467c 8240 gen_helper_shl(tmp2, tmp2, tmp);
9ee6e8bb 8241 } else {
396e467c
FN
8242 gen_helper_shl_cc(tmp2, tmp2, tmp);
8243 gen_logic_CC(tmp2);
9ee6e8bb 8244 }
99c475ab
FB
8245 break;
8246 case 0x3: /* lsr */
9ee6e8bb 8247 if (s->condexec_mask) {
396e467c 8248 gen_helper_shr(tmp2, tmp2, tmp);
9ee6e8bb 8249 } else {
396e467c
FN
8250 gen_helper_shr_cc(tmp2, tmp2, tmp);
8251 gen_logic_CC(tmp2);
9ee6e8bb 8252 }
99c475ab
FB
8253 break;
8254 case 0x4: /* asr */
9ee6e8bb 8255 if (s->condexec_mask) {
396e467c 8256 gen_helper_sar(tmp2, tmp2, tmp);
9ee6e8bb 8257 } else {
396e467c
FN
8258 gen_helper_sar_cc(tmp2, tmp2, tmp);
8259 gen_logic_CC(tmp2);
9ee6e8bb 8260 }
99c475ab
FB
8261 break;
8262 case 0x5: /* adc */
9ee6e8bb 8263 if (s->condexec_mask)
396e467c 8264 gen_adc(tmp, tmp2);
9ee6e8bb 8265 else
396e467c 8266 gen_helper_adc_cc(tmp, tmp, tmp2);
99c475ab
FB
8267 break;
8268 case 0x6: /* sbc */
9ee6e8bb 8269 if (s->condexec_mask)
396e467c 8270 gen_sub_carry(tmp, tmp, tmp2);
9ee6e8bb 8271 else
396e467c 8272 gen_helper_sbc_cc(tmp, tmp, tmp2);
99c475ab
FB
8273 break;
8274 case 0x7: /* ror */
9ee6e8bb 8275 if (s->condexec_mask) {
396e467c 8276 gen_helper_ror(tmp2, tmp2, tmp);
9ee6e8bb 8277 } else {
396e467c
FN
8278 gen_helper_ror_cc(tmp2, tmp2, tmp);
8279 gen_logic_CC(tmp2);
9ee6e8bb 8280 }
99c475ab
FB
8281 break;
8282 case 0x8: /* tst */
396e467c
FN
8283 tcg_gen_and_i32(tmp, tmp, tmp2);
8284 gen_logic_CC(tmp);
99c475ab 8285 rd = 16;
5899f386 8286 break;
99c475ab 8287 case 0x9: /* neg */
9ee6e8bb 8288 if (s->condexec_mask)
396e467c 8289 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 8290 else
396e467c 8291 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
8292 break;
8293 case 0xa: /* cmp */
396e467c 8294 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
8295 rd = 16;
8296 break;
8297 case 0xb: /* cmn */
396e467c 8298 gen_helper_add_cc(tmp, tmp, tmp2);
99c475ab
FB
8299 rd = 16;
8300 break;
8301 case 0xc: /* orr */
396e467c 8302 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 8303 if (!s->condexec_mask)
396e467c 8304 gen_logic_CC(tmp);
99c475ab
FB
8305 break;
8306 case 0xd: /* mul */
396e467c 8307 gen_mull(tmp, tmp2);
9ee6e8bb 8308 if (!s->condexec_mask)
396e467c 8309 gen_logic_CC(tmp);
99c475ab
FB
8310 break;
8311 case 0xe: /* bic */
396e467c 8312 tcg_gen_bic_i32(tmp, tmp, tmp2);
9ee6e8bb 8313 if (!s->condexec_mask)
396e467c 8314 gen_logic_CC(tmp);
99c475ab
FB
8315 break;
8316 case 0xf: /* mvn */
396e467c 8317 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 8318 if (!s->condexec_mask)
396e467c 8319 gen_logic_CC(tmp2);
99c475ab 8320 val = 1;
5899f386 8321 rm = rd;
99c475ab
FB
8322 break;
8323 }
8324 if (rd != 16) {
396e467c
FN
8325 if (val) {
8326 store_reg(s, rm, tmp2);
8327 if (op != 0xf)
8328 dead_tmp(tmp);
8329 } else {
8330 store_reg(s, rd, tmp);
8331 dead_tmp(tmp2);
8332 }
8333 } else {
8334 dead_tmp(tmp);
8335 dead_tmp(tmp2);
99c475ab
FB
8336 }
8337 break;
8338
8339 case 5:
8340 /* load/store register offset. */
8341 rd = insn & 7;
8342 rn = (insn >> 3) & 7;
8343 rm = (insn >> 6) & 7;
8344 op = (insn >> 9) & 7;
b0109805 8345 addr = load_reg(s, rn);
b26eefb6 8346 tmp = load_reg(s, rm);
b0109805 8347 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 8348 dead_tmp(tmp);
99c475ab
FB
8349
8350 if (op < 3) /* store */
b0109805 8351 tmp = load_reg(s, rd);
99c475ab
FB
8352
8353 switch (op) {
8354 case 0: /* str */
b0109805 8355 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
8356 break;
8357 case 1: /* strh */
b0109805 8358 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
8359 break;
8360 case 2: /* strb */
b0109805 8361 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
8362 break;
8363 case 3: /* ldrsb */
b0109805 8364 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
8365 break;
8366 case 4: /* ldr */
b0109805 8367 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8368 break;
8369 case 5: /* ldrh */
b0109805 8370 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
8371 break;
8372 case 6: /* ldrb */
b0109805 8373 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
8374 break;
8375 case 7: /* ldrsh */
b0109805 8376 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
8377 break;
8378 }
8379 if (op >= 3) /* load */
b0109805
PB
8380 store_reg(s, rd, tmp);
8381 dead_tmp(addr);
99c475ab
FB
8382 break;
8383
8384 case 6:
8385 /* load/store word immediate offset */
8386 rd = insn & 7;
8387 rn = (insn >> 3) & 7;
b0109805 8388 addr = load_reg(s, rn);
99c475ab 8389 val = (insn >> 4) & 0x7c;
b0109805 8390 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8391
8392 if (insn & (1 << 11)) {
8393 /* load */
b0109805
PB
8394 tmp = gen_ld32(addr, IS_USER(s));
8395 store_reg(s, rd, tmp);
99c475ab
FB
8396 } else {
8397 /* store */
b0109805
PB
8398 tmp = load_reg(s, rd);
8399 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8400 }
b0109805 8401 dead_tmp(addr);
99c475ab
FB
8402 break;
8403
8404 case 7:
8405 /* load/store byte immediate offset */
8406 rd = insn & 7;
8407 rn = (insn >> 3) & 7;
b0109805 8408 addr = load_reg(s, rn);
99c475ab 8409 val = (insn >> 6) & 0x1f;
b0109805 8410 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8411
8412 if (insn & (1 << 11)) {
8413 /* load */
b0109805
PB
8414 tmp = gen_ld8u(addr, IS_USER(s));
8415 store_reg(s, rd, tmp);
99c475ab
FB
8416 } else {
8417 /* store */
b0109805
PB
8418 tmp = load_reg(s, rd);
8419 gen_st8(tmp, addr, IS_USER(s));
99c475ab 8420 }
b0109805 8421 dead_tmp(addr);
99c475ab
FB
8422 break;
8423
8424 case 8:
8425 /* load/store halfword immediate offset */
8426 rd = insn & 7;
8427 rn = (insn >> 3) & 7;
b0109805 8428 addr = load_reg(s, rn);
99c475ab 8429 val = (insn >> 5) & 0x3e;
b0109805 8430 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8431
8432 if (insn & (1 << 11)) {
8433 /* load */
b0109805
PB
8434 tmp = gen_ld16u(addr, IS_USER(s));
8435 store_reg(s, rd, tmp);
99c475ab
FB
8436 } else {
8437 /* store */
b0109805
PB
8438 tmp = load_reg(s, rd);
8439 gen_st16(tmp, addr, IS_USER(s));
99c475ab 8440 }
b0109805 8441 dead_tmp(addr);
99c475ab
FB
8442 break;
8443
8444 case 9:
8445 /* load/store from stack */
8446 rd = (insn >> 8) & 7;
b0109805 8447 addr = load_reg(s, 13);
99c475ab 8448 val = (insn & 0xff) * 4;
b0109805 8449 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8450
8451 if (insn & (1 << 11)) {
8452 /* load */
b0109805
PB
8453 tmp = gen_ld32(addr, IS_USER(s));
8454 store_reg(s, rd, tmp);
99c475ab
FB
8455 } else {
8456 /* store */
b0109805
PB
8457 tmp = load_reg(s, rd);
8458 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8459 }
b0109805 8460 dead_tmp(addr);
99c475ab
FB
8461 break;
8462
8463 case 10:
8464 /* add to high reg */
8465 rd = (insn >> 8) & 7;
5899f386
FB
8466 if (insn & (1 << 11)) {
8467 /* SP */
5e3f878a 8468 tmp = load_reg(s, 13);
5899f386
FB
8469 } else {
8470 /* PC. bit 1 is ignored. */
5e3f878a
PB
8471 tmp = new_tmp();
8472 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 8473 }
99c475ab 8474 val = (insn & 0xff) * 4;
5e3f878a
PB
8475 tcg_gen_addi_i32(tmp, tmp, val);
8476 store_reg(s, rd, tmp);
99c475ab
FB
8477 break;
8478
8479 case 11:
8480 /* misc */
8481 op = (insn >> 8) & 0xf;
8482 switch (op) {
8483 case 0:
8484 /* adjust stack pointer */
b26eefb6 8485 tmp = load_reg(s, 13);
99c475ab
FB
8486 val = (insn & 0x7f) * 4;
8487 if (insn & (1 << 7))
6a0d8a1d 8488 val = -(int32_t)val;
b26eefb6
PB
8489 tcg_gen_addi_i32(tmp, tmp, val);
8490 store_reg(s, 13, tmp);
99c475ab
FB
8491 break;
8492
9ee6e8bb
PB
8493 case 2: /* sign/zero extend. */
8494 ARCH(6);
8495 rd = insn & 7;
8496 rm = (insn >> 3) & 7;
b0109805 8497 tmp = load_reg(s, rm);
9ee6e8bb 8498 switch ((insn >> 6) & 3) {
b0109805
PB
8499 case 0: gen_sxth(tmp); break;
8500 case 1: gen_sxtb(tmp); break;
8501 case 2: gen_uxth(tmp); break;
8502 case 3: gen_uxtb(tmp); break;
9ee6e8bb 8503 }
b0109805 8504 store_reg(s, rd, tmp);
9ee6e8bb 8505 break;
99c475ab
FB
8506 case 4: case 5: case 0xc: case 0xd:
8507 /* push/pop */
b0109805 8508 addr = load_reg(s, 13);
5899f386
FB
8509 if (insn & (1 << 8))
8510 offset = 4;
99c475ab 8511 else
5899f386
FB
8512 offset = 0;
8513 for (i = 0; i < 8; i++) {
8514 if (insn & (1 << i))
8515 offset += 4;
8516 }
8517 if ((insn & (1 << 11)) == 0) {
b0109805 8518 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8519 }
99c475ab
FB
8520 for (i = 0; i < 8; i++) {
8521 if (insn & (1 << i)) {
8522 if (insn & (1 << 11)) {
8523 /* pop */
b0109805
PB
8524 tmp = gen_ld32(addr, IS_USER(s));
8525 store_reg(s, i, tmp);
99c475ab
FB
8526 } else {
8527 /* push */
b0109805
PB
8528 tmp = load_reg(s, i);
8529 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8530 }
5899f386 8531 /* advance to the next address. */
b0109805 8532 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8533 }
8534 }
a50f5b91 8535 TCGV_UNUSED(tmp);
99c475ab
FB
8536 if (insn & (1 << 8)) {
8537 if (insn & (1 << 11)) {
8538 /* pop pc */
b0109805 8539 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8540 /* don't set the pc until the rest of the instruction
8541 has completed */
8542 } else {
8543 /* push lr */
b0109805
PB
8544 tmp = load_reg(s, 14);
8545 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8546 }
b0109805 8547 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 8548 }
5899f386 8549 if ((insn & (1 << 11)) == 0) {
b0109805 8550 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8551 }
99c475ab 8552 /* write back the new stack pointer */
b0109805 8553 store_reg(s, 13, addr);
99c475ab
FB
8554 /* set the new PC value */
8555 if ((insn & 0x0900) == 0x0900)
b0109805 8556 gen_bx(s, tmp);
99c475ab
FB
8557 break;
8558
9ee6e8bb
PB
8559 case 1: case 3: case 9: case 11: /* czb */
8560 rm = insn & 7;
d9ba4830 8561 tmp = load_reg(s, rm);
9ee6e8bb
PB
8562 s->condlabel = gen_new_label();
8563 s->condjmp = 1;
8564 if (insn & (1 << 11))
cb63669a 8565 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 8566 else
cb63669a 8567 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
d9ba4830 8568 dead_tmp(tmp);
9ee6e8bb
PB
8569 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8570 val = (uint32_t)s->pc + 2;
8571 val += offset;
8572 gen_jmp(s, val);
8573 break;
8574
8575 case 15: /* IT, nop-hint. */
8576 if ((insn & 0xf) == 0) {
8577 gen_nop_hint(s, (insn >> 4) & 0xf);
8578 break;
8579 }
8580 /* If Then. */
8581 s->condexec_cond = (insn >> 4) & 0xe;
8582 s->condexec_mask = insn & 0x1f;
8583 /* No actual code generated for this insn, just setup state. */
8584 break;
8585
06c949e6 8586 case 0xe: /* bkpt */
9ee6e8bb 8587 gen_set_condexec(s);
5e3f878a 8588 gen_set_pc_im(s->pc - 2);
d9ba4830 8589 gen_exception(EXCP_BKPT);
06c949e6
PB
8590 s->is_jmp = DISAS_JUMP;
8591 break;
8592
9ee6e8bb
PB
8593 case 0xa: /* rev */
8594 ARCH(6);
8595 rn = (insn >> 3) & 0x7;
8596 rd = insn & 0x7;
b0109805 8597 tmp = load_reg(s, rn);
9ee6e8bb 8598 switch ((insn >> 6) & 3) {
66896cb8 8599 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
8600 case 1: gen_rev16(tmp); break;
8601 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
8602 default: goto illegal_op;
8603 }
b0109805 8604 store_reg(s, rd, tmp);
9ee6e8bb
PB
8605 break;
8606
8607 case 6: /* cps */
8608 ARCH(6);
8609 if (IS_USER(s))
8610 break;
8611 if (IS_M(env)) {
8984bd2e 8612 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9ee6e8bb 8613 /* PRIMASK */
8984bd2e
PB
8614 if (insn & 1) {
8615 addr = tcg_const_i32(16);
8616 gen_helper_v7m_msr(cpu_env, addr, tmp);
8617 }
9ee6e8bb 8618 /* FAULTMASK */
8984bd2e
PB
8619 if (insn & 2) {
8620 addr = tcg_const_i32(17);
8621 gen_helper_v7m_msr(cpu_env, addr, tmp);
8622 }
9ee6e8bb
PB
8623 gen_lookup_tb(s);
8624 } else {
8625 if (insn & (1 << 4))
8626 shift = CPSR_A | CPSR_I | CPSR_F;
8627 else
8628 shift = 0;
2fbac54b 8629 gen_set_psr_im(s, shift, 0, ((insn & 7) << 6) & shift);
9ee6e8bb
PB
8630 }
8631 break;
8632
99c475ab
FB
8633 default:
8634 goto undef;
8635 }
8636 break;
8637
8638 case 12:
8639 /* load/store multiple */
8640 rn = (insn >> 8) & 0x7;
b0109805 8641 addr = load_reg(s, rn);
99c475ab
FB
8642 for (i = 0; i < 8; i++) {
8643 if (insn & (1 << i)) {
99c475ab
FB
8644 if (insn & (1 << 11)) {
8645 /* load */
b0109805
PB
8646 tmp = gen_ld32(addr, IS_USER(s));
8647 store_reg(s, i, tmp);
99c475ab
FB
8648 } else {
8649 /* store */
b0109805
PB
8650 tmp = load_reg(s, i);
8651 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8652 }
5899f386 8653 /* advance to the next address */
b0109805 8654 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8655 }
8656 }
5899f386 8657 /* Base register writeback. */
b0109805
PB
8658 if ((insn & (1 << rn)) == 0) {
8659 store_reg(s, rn, addr);
8660 } else {
8661 dead_tmp(addr);
8662 }
99c475ab
FB
8663 break;
8664
8665 case 13:
8666 /* conditional branch or swi */
8667 cond = (insn >> 8) & 0xf;
8668 if (cond == 0xe)
8669 goto undef;
8670
8671 if (cond == 0xf) {
8672 /* swi */
9ee6e8bb 8673 gen_set_condexec(s);
422ebf69 8674 gen_set_pc_im(s->pc);
9ee6e8bb 8675 s->is_jmp = DISAS_SWI;
99c475ab
FB
8676 break;
8677 }
8678 /* generate a conditional jump to next instruction */
e50e6a20 8679 s->condlabel = gen_new_label();
d9ba4830 8680 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 8681 s->condjmp = 1;
99c475ab
FB
8682
8683 /* jump to the offset */
5899f386 8684 val = (uint32_t)s->pc + 2;
99c475ab 8685 offset = ((int32_t)insn << 24) >> 24;
5899f386 8686 val += offset << 1;
8aaca4c0 8687 gen_jmp(s, val);
99c475ab
FB
8688 break;
8689
8690 case 14:
358bf29e 8691 if (insn & (1 << 11)) {
9ee6e8bb
PB
8692 if (disas_thumb2_insn(env, s, insn))
8693 goto undef32;
358bf29e
PB
8694 break;
8695 }
9ee6e8bb 8696 /* unconditional branch */
99c475ab
FB
8697 val = (uint32_t)s->pc;
8698 offset = ((int32_t)insn << 21) >> 21;
8699 val += (offset << 1) + 2;
8aaca4c0 8700 gen_jmp(s, val);
99c475ab
FB
8701 break;
8702
8703 case 15:
9ee6e8bb 8704 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 8705 goto undef32;
9ee6e8bb 8706 break;
99c475ab
FB
8707 }
8708 return;
9ee6e8bb
PB
8709undef32:
8710 gen_set_condexec(s);
5e3f878a 8711 gen_set_pc_im(s->pc - 4);
d9ba4830 8712 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
8713 s->is_jmp = DISAS_JUMP;
8714 return;
8715illegal_op:
99c475ab 8716undef:
9ee6e8bb 8717 gen_set_condexec(s);
5e3f878a 8718 gen_set_pc_im(s->pc - 2);
d9ba4830 8719 gen_exception(EXCP_UDEF);
99c475ab
FB
8720 s->is_jmp = DISAS_JUMP;
8721}
8722
2c0262af
FB
8723/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8724 basic block 'tb'. If search_pc is TRUE, also generate PC
8725 information for each intermediate instruction. */
2cfc5f17
TS
8726static inline void gen_intermediate_code_internal(CPUState *env,
8727 TranslationBlock *tb,
8728 int search_pc)
2c0262af
FB
8729{
8730 DisasContext dc1, *dc = &dc1;
a1d1bb31 8731 CPUBreakpoint *bp;
2c0262af
FB
8732 uint16_t *gen_opc_end;
8733 int j, lj;
0fa85d43 8734 target_ulong pc_start;
b5ff1b31 8735 uint32_t next_page_start;
2e70f6ef
PB
8736 int num_insns;
8737 int max_insns;
3b46e624 8738
2c0262af 8739 /* generate intermediate code */
b26eefb6 8740 num_temps = 0;
b26eefb6 8741
0fa85d43 8742 pc_start = tb->pc;
3b46e624 8743
2c0262af
FB
8744 dc->tb = tb;
8745
2c0262af 8746 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
8747
8748 dc->is_jmp = DISAS_NEXT;
8749 dc->pc = pc_start;
8aaca4c0 8750 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 8751 dc->condjmp = 0;
5899f386 8752 dc->thumb = env->thumb;
9ee6e8bb
PB
8753 dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
8754 dc->condexec_cond = env->condexec_bits >> 4;
b5ff1b31 8755#if !defined(CONFIG_USER_ONLY)
9ee6e8bb
PB
8756 if (IS_M(env)) {
8757 dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
8758 } else {
8759 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
8760 }
b5ff1b31 8761#endif
a7812ae4
PB
8762 cpu_F0s = tcg_temp_new_i32();
8763 cpu_F1s = tcg_temp_new_i32();
8764 cpu_F0d = tcg_temp_new_i64();
8765 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
8766 cpu_V0 = cpu_F0d;
8767 cpu_V1 = cpu_F1d;
e677137d 8768 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 8769 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 8770 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 8771 lj = -1;
2e70f6ef
PB
8772 num_insns = 0;
8773 max_insns = tb->cflags & CF_COUNT_MASK;
8774 if (max_insns == 0)
8775 max_insns = CF_COUNT_MASK;
8776
8777 gen_icount_start();
9ee6e8bb
PB
8778 /* Reset the conditional execution bits immediately. This avoids
8779 complications trying to do it at the end of the block. */
8780 if (env->condexec_bits)
8f01245e
PB
8781 {
8782 TCGv tmp = new_tmp();
8783 tcg_gen_movi_i32(tmp, 0);
d9ba4830 8784 store_cpu_field(tmp, condexec_bits);
8f01245e 8785 }
2c0262af 8786 do {
fbb4a2e3
PB
8787#ifdef CONFIG_USER_ONLY
8788 /* Intercept jump to the magic kernel page. */
8789 if (dc->pc >= 0xffff0000) {
8790 /* We always get here via a jump, so know we are not in a
8791 conditional execution block. */
8792 gen_exception(EXCP_KERNEL_TRAP);
8793 dc->is_jmp = DISAS_UPDATE;
8794 break;
8795 }
8796#else
9ee6e8bb
PB
8797 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
8798 /* We always get here via a jump, so know we are not in a
8799 conditional execution block. */
d9ba4830 8800 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
8801 dc->is_jmp = DISAS_UPDATE;
8802 break;
9ee6e8bb
PB
8803 }
8804#endif
8805
72cf2d4f
BS
8806 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
8807 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 8808 if (bp->pc == dc->pc) {
9ee6e8bb 8809 gen_set_condexec(dc);
5e3f878a 8810 gen_set_pc_im(dc->pc);
d9ba4830 8811 gen_exception(EXCP_DEBUG);
1fddef4b 8812 dc->is_jmp = DISAS_JUMP;
9ee6e8bb
PB
8813 /* Advance PC so that clearing the breakpoint will
8814 invalidate this TB. */
8815 dc->pc += 2;
8816 goto done_generating;
1fddef4b
FB
8817 break;
8818 }
8819 }
8820 }
2c0262af
FB
8821 if (search_pc) {
8822 j = gen_opc_ptr - gen_opc_buf;
8823 if (lj < j) {
8824 lj++;
8825 while (lj < j)
8826 gen_opc_instr_start[lj++] = 0;
8827 }
0fa85d43 8828 gen_opc_pc[lj] = dc->pc;
2c0262af 8829 gen_opc_instr_start[lj] = 1;
2e70f6ef 8830 gen_opc_icount[lj] = num_insns;
2c0262af 8831 }
e50e6a20 8832
2e70f6ef
PB
8833 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
8834 gen_io_start();
8835
9ee6e8bb
PB
8836 if (env->thumb) {
8837 disas_thumb_insn(env, dc);
8838 if (dc->condexec_mask) {
8839 dc->condexec_cond = (dc->condexec_cond & 0xe)
8840 | ((dc->condexec_mask >> 4) & 1);
8841 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
8842 if (dc->condexec_mask == 0) {
8843 dc->condexec_cond = 0;
8844 }
8845 }
8846 } else {
8847 disas_arm_insn(env, dc);
8848 }
b26eefb6
PB
8849 if (num_temps) {
8850 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
8851 num_temps = 0;
8852 }
e50e6a20
FB
8853
8854 if (dc->condjmp && !dc->is_jmp) {
8855 gen_set_label(dc->condlabel);
8856 dc->condjmp = 0;
8857 }
aaf2d97d 8858 /* Translation stops when a conditional branch is encountered.
e50e6a20 8859 * Otherwise the subsequent code could get translated several times.
b5ff1b31 8860 * Also stop translation when a page boundary is reached. This
bf20dc07 8861 * ensures prefetch aborts occur at the right place. */
2e70f6ef 8862 num_insns ++;
1fddef4b
FB
8863 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
8864 !env->singlestep_enabled &&
1b530a6d 8865 !singlestep &&
2e70f6ef
PB
8866 dc->pc < next_page_start &&
8867 num_insns < max_insns);
8868
8869 if (tb->cflags & CF_LAST_IO) {
8870 if (dc->condjmp) {
8871 /* FIXME: This can theoretically happen with self-modifying
8872 code. */
8873 cpu_abort(env, "IO on conditional branch instruction");
8874 }
8875 gen_io_end();
8876 }
9ee6e8bb 8877
b5ff1b31 8878 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
8879 instruction was a conditional branch or trap, and the PC has
8880 already been written. */
551bd27f 8881 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 8882 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 8883 if (dc->condjmp) {
9ee6e8bb
PB
8884 gen_set_condexec(dc);
8885 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 8886 gen_exception(EXCP_SWI);
9ee6e8bb 8887 } else {
d9ba4830 8888 gen_exception(EXCP_DEBUG);
9ee6e8bb 8889 }
e50e6a20
FB
8890 gen_set_label(dc->condlabel);
8891 }
8892 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 8893 gen_set_pc_im(dc->pc);
e50e6a20 8894 dc->condjmp = 0;
8aaca4c0 8895 }
9ee6e8bb
PB
8896 gen_set_condexec(dc);
8897 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 8898 gen_exception(EXCP_SWI);
9ee6e8bb
PB
8899 } else {
8900 /* FIXME: Single stepping a WFI insn will not halt
8901 the CPU. */
d9ba4830 8902 gen_exception(EXCP_DEBUG);
9ee6e8bb 8903 }
8aaca4c0 8904 } else {
9ee6e8bb
PB
8905 /* While branches must always occur at the end of an IT block,
8906 there are a few other things that can cause us to terminate
8907 the TB in the middel of an IT block:
8908 - Exception generating instructions (bkpt, swi, undefined).
8909 - Page boundaries.
8910 - Hardware watchpoints.
8911 Hardware breakpoints have already been handled and skip this code.
8912 */
8913 gen_set_condexec(dc);
8aaca4c0 8914 switch(dc->is_jmp) {
8aaca4c0 8915 case DISAS_NEXT:
6e256c93 8916 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
8917 break;
8918 default:
8919 case DISAS_JUMP:
8920 case DISAS_UPDATE:
8921 /* indicate that the hash table must be used to find the next TB */
57fec1fe 8922 tcg_gen_exit_tb(0);
8aaca4c0
FB
8923 break;
8924 case DISAS_TB_JUMP:
8925 /* nothing more to generate */
8926 break;
9ee6e8bb 8927 case DISAS_WFI:
d9ba4830 8928 gen_helper_wfi();
9ee6e8bb
PB
8929 break;
8930 case DISAS_SWI:
d9ba4830 8931 gen_exception(EXCP_SWI);
9ee6e8bb 8932 break;
8aaca4c0 8933 }
e50e6a20
FB
8934 if (dc->condjmp) {
8935 gen_set_label(dc->condlabel);
9ee6e8bb 8936 gen_set_condexec(dc);
6e256c93 8937 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
8938 dc->condjmp = 0;
8939 }
2c0262af 8940 }
2e70f6ef 8941
9ee6e8bb 8942done_generating:
2e70f6ef 8943 gen_icount_end(tb, num_insns);
2c0262af
FB
8944 *gen_opc_ptr = INDEX_op_end;
8945
8946#ifdef DEBUG_DISAS
8fec2b8c 8947 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
8948 qemu_log("----------------\n");
8949 qemu_log("IN: %s\n", lookup_symbol(pc_start));
8950 log_target_disas(pc_start, dc->pc - pc_start, env->thumb);
8951 qemu_log("\n");
2c0262af
FB
8952 }
8953#endif
b5ff1b31
FB
8954 if (search_pc) {
8955 j = gen_opc_ptr - gen_opc_buf;
8956 lj++;
8957 while (lj <= j)
8958 gen_opc_instr_start[lj++] = 0;
b5ff1b31 8959 } else {
2c0262af 8960 tb->size = dc->pc - pc_start;
2e70f6ef 8961 tb->icount = num_insns;
b5ff1b31 8962 }
2c0262af
FB
8963}
8964
2cfc5f17 8965void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2c0262af 8966{
2cfc5f17 8967 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
8968}
8969
2cfc5f17 8970void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2c0262af 8971{
2cfc5f17 8972 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
8973}
8974
b5ff1b31
FB
8975static const char *cpu_mode_names[16] = {
8976 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
8977 "???", "???", "???", "und", "???", "???", "???", "sys"
8978};
9ee6e8bb 8979
5fafdf24 8980void cpu_dump_state(CPUState *env, FILE *f,
7fe48483
FB
8981 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
8982 int flags)
2c0262af
FB
8983{
8984 int i;
06e80fc9 8985#if 0
bc380d17 8986 union {
b7bcbe95
FB
8987 uint32_t i;
8988 float s;
8989 } s0, s1;
8990 CPU_DoubleU d;
a94a6abf
PB
8991 /* ??? This assumes float64 and double have the same layout.
8992 Oh well, it's only debug dumps. */
8993 union {
8994 float64 f64;
8995 double d;
8996 } d0;
06e80fc9 8997#endif
b5ff1b31 8998 uint32_t psr;
2c0262af
FB
8999
9000 for(i=0;i<16;i++) {
7fe48483 9001 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 9002 if ((i % 4) == 3)
7fe48483 9003 cpu_fprintf(f, "\n");
2c0262af 9004 else
7fe48483 9005 cpu_fprintf(f, " ");
2c0262af 9006 }
b5ff1b31 9007 psr = cpsr_read(env);
687fa640
TS
9008 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9009 psr,
b5ff1b31
FB
9010 psr & (1 << 31) ? 'N' : '-',
9011 psr & (1 << 30) ? 'Z' : '-',
9012 psr & (1 << 29) ? 'C' : '-',
9013 psr & (1 << 28) ? 'V' : '-',
5fafdf24 9014 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 9015 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 9016
5e3f878a 9017#if 0
b7bcbe95 9018 for (i = 0; i < 16; i++) {
8e96005d
FB
9019 d.d = env->vfp.regs[i];
9020 s0.i = d.l.lower;
9021 s1.i = d.l.upper;
a94a6abf
PB
9022 d0.f64 = d.d;
9023 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 9024 i * 2, (int)s0.i, s0.s,
a94a6abf 9025 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 9026 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 9027 d0.d);
b7bcbe95 9028 }
40f137e1 9029 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
5e3f878a 9030#endif
2c0262af 9031}
a6b025d3 9032
d2856f1a
AJ
9033void gen_pc_load(CPUState *env, TranslationBlock *tb,
9034 unsigned long searched_pc, int pc_pos, void *puc)
9035{
9036 env->regs[15] = gen_opc_pc[pc_pos];
9037}