]> git.proxmox.com Git - qemu.git/blame - target-arm/translate.c
target-arm: remove cpu_T for ARM once and for all
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
28#include "exec-all.h"
29#include "disas.h"
57fec1fe 30#include "tcg-op.h"
79383c9c 31#include "qemu-log.h"
1497c961 32
a7812ae4 33#include "helpers.h"
1497c961 34#define GEN_HELPER 1
b26eefb6 35#include "helpers.h"
2c0262af 36
9ee6e8bb
PB
37#define ENABLE_ARCH_5J 0
38#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 42
86753403 43#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 44
2c0262af
FB
45/* internal defines */
46typedef struct DisasContext {
0fa85d43 47 target_ulong pc;
2c0262af 48 int is_jmp;
e50e6a20
FB
49 /* Nonzero if this instruction has been conditionally skipped. */
50 int condjmp;
51 /* The label that will be jumped to when the instruction is skipped. */
52 int condlabel;
9ee6e8bb
PB
53 /* Thumb-2 condtional execution bits. */
54 int condexec_mask;
55 int condexec_cond;
2c0262af 56 struct TranslationBlock *tb;
8aaca4c0 57 int singlestep_enabled;
5899f386 58 int thumb;
b5ff1b31
FB
59#if !defined(CONFIG_USER_ONLY)
60 int user;
61#endif
2c0262af
FB
62} DisasContext;
63
b5ff1b31
FB
64#if defined(CONFIG_USER_ONLY)
65#define IS_USER(s) 1
66#else
67#define IS_USER(s) (s->user)
68#endif
69
9ee6e8bb
PB
70/* These instructions trap after executing, so defer them until after the
71 conditional executions state has been updated. */
72#define DISAS_WFI 4
73#define DISAS_SWI 5
2c0262af 74
a7812ae4 75static TCGv_ptr cpu_env;
ad69471c 76/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 77static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 78static TCGv_i32 cpu_R[16];
ad69471c 79
b26eefb6 80/* FIXME: These should be removed. */
a7812ae4
PB
81static TCGv cpu_F0s, cpu_F1s;
82static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 83
2e70f6ef
PB
84#include "gen-icount.h"
85
155c3eac
FN
86static const char *regnames[] =
87 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
88 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
89
b26eefb6
PB
90/* initialize TCG globals. */
91void arm_translate_init(void)
92{
155c3eac
FN
93 int i;
94
a7812ae4
PB
95 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
96
155c3eac
FN
97 for (i = 0; i < 16; i++) {
98 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
99 offsetof(CPUState, regs[i]),
100 regnames[i]);
101 }
102
a7812ae4
PB
103#define GEN_HELPER 2
104#include "helpers.h"
b26eefb6
PB
105}
106
b26eefb6 107static int num_temps;
b26eefb6
PB
108
109/* Allocate a temporary variable. */
a7812ae4 110static TCGv_i32 new_tmp(void)
b26eefb6 111{
12edd4f2
FN
112 num_temps++;
113 return tcg_temp_new_i32();
b26eefb6
PB
114}
115
116/* Release a temporary variable. */
117static void dead_tmp(TCGv tmp)
118{
12edd4f2 119 tcg_temp_free(tmp);
b26eefb6 120 num_temps--;
b26eefb6
PB
121}
122
d9ba4830
PB
123static inline TCGv load_cpu_offset(int offset)
124{
125 TCGv tmp = new_tmp();
126 tcg_gen_ld_i32(tmp, cpu_env, offset);
127 return tmp;
128}
129
130#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
131
132static inline void store_cpu_offset(TCGv var, int offset)
133{
134 tcg_gen_st_i32(var, cpu_env, offset);
135 dead_tmp(var);
136}
137
138#define store_cpu_field(var, name) \
139 store_cpu_offset(var, offsetof(CPUState, name))
140
b26eefb6
PB
141/* Set a variable to the value of a CPU register. */
142static void load_reg_var(DisasContext *s, TCGv var, int reg)
143{
144 if (reg == 15) {
145 uint32_t addr;
146 /* normaly, since we updated PC, we need only to add one insn */
147 if (s->thumb)
148 addr = (long)s->pc + 2;
149 else
150 addr = (long)s->pc + 4;
151 tcg_gen_movi_i32(var, addr);
152 } else {
155c3eac 153 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
154 }
155}
156
157/* Create a new temporary and set it to the value of a CPU register. */
158static inline TCGv load_reg(DisasContext *s, int reg)
159{
160 TCGv tmp = new_tmp();
161 load_reg_var(s, tmp, reg);
162 return tmp;
163}
164
165/* Set a CPU register. The source must be a temporary and will be
166 marked as dead. */
167static void store_reg(DisasContext *s, int reg, TCGv var)
168{
169 if (reg == 15) {
170 tcg_gen_andi_i32(var, var, ~1);
171 s->is_jmp = DISAS_JUMP;
172 }
155c3eac 173 tcg_gen_mov_i32(cpu_R[reg], var);
b26eefb6
PB
174 dead_tmp(var);
175}
176
b26eefb6 177/* Value extensions. */
86831435
PB
178#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
179#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
180#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
181#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
182
1497c961
PB
183#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
184#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 185
b26eefb6 186
d9ba4830
PB
187#define gen_set_cpsr(var, mask) gen_helper_cpsr_write(var, tcg_const_i32(mask))
188/* Set NZCV flags from the high 4 bits of var. */
189#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
190
191static void gen_exception(int excp)
192{
193 TCGv tmp = new_tmp();
194 tcg_gen_movi_i32(tmp, excp);
195 gen_helper_exception(tmp);
196 dead_tmp(tmp);
197}
198
3670669c
PB
199static void gen_smul_dual(TCGv a, TCGv b)
200{
201 TCGv tmp1 = new_tmp();
202 TCGv tmp2 = new_tmp();
22478e79
AZ
203 tcg_gen_ext16s_i32(tmp1, a);
204 tcg_gen_ext16s_i32(tmp2, b);
3670669c
PB
205 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
206 dead_tmp(tmp2);
207 tcg_gen_sari_i32(a, a, 16);
208 tcg_gen_sari_i32(b, b, 16);
209 tcg_gen_mul_i32(b, b, a);
210 tcg_gen_mov_i32(a, tmp1);
211 dead_tmp(tmp1);
212}
213
214/* Byteswap each halfword. */
215static void gen_rev16(TCGv var)
216{
217 TCGv tmp = new_tmp();
218 tcg_gen_shri_i32(tmp, var, 8);
219 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
220 tcg_gen_shli_i32(var, var, 8);
221 tcg_gen_andi_i32(var, var, 0xff00ff00);
222 tcg_gen_or_i32(var, var, tmp);
223 dead_tmp(tmp);
224}
225
226/* Byteswap low halfword and sign extend. */
227static void gen_revsh(TCGv var)
228{
229 TCGv tmp = new_tmp();
230 tcg_gen_shri_i32(tmp, var, 8);
231 tcg_gen_andi_i32(tmp, tmp, 0x00ff);
232 tcg_gen_shli_i32(var, var, 8);
233 tcg_gen_ext8s_i32(var, var);
234 tcg_gen_or_i32(var, var, tmp);
235 dead_tmp(tmp);
236}
237
238/* Unsigned bitfield extract. */
239static void gen_ubfx(TCGv var, int shift, uint32_t mask)
240{
241 if (shift)
242 tcg_gen_shri_i32(var, var, shift);
243 tcg_gen_andi_i32(var, var, mask);
244}
245
246/* Signed bitfield extract. */
247static void gen_sbfx(TCGv var, int shift, int width)
248{
249 uint32_t signbit;
250
251 if (shift)
252 tcg_gen_sari_i32(var, var, shift);
253 if (shift + width < 32) {
254 signbit = 1u << (width - 1);
255 tcg_gen_andi_i32(var, var, (1u << width) - 1);
256 tcg_gen_xori_i32(var, var, signbit);
257 tcg_gen_subi_i32(var, var, signbit);
258 }
259}
260
261/* Bitfield insertion. Insert val into base. Clobbers base and val. */
262static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
263{
3670669c 264 tcg_gen_andi_i32(val, val, mask);
8f8e3aa4
PB
265 tcg_gen_shli_i32(val, val, shift);
266 tcg_gen_andi_i32(base, base, ~(mask << shift));
3670669c
PB
267 tcg_gen_or_i32(dest, base, val);
268}
269
d9ba4830
PB
270/* Round the top 32 bits of a 64-bit value. */
271static void gen_roundqd(TCGv a, TCGv b)
3670669c 272{
d9ba4830
PB
273 tcg_gen_shri_i32(a, a, 31);
274 tcg_gen_add_i32(a, a, b);
3670669c
PB
275}
276
8f01245e
PB
277/* FIXME: Most targets have native widening multiplication.
278 It would be good to use that instead of a full wide multiply. */
5e3f878a 279/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 280static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 281{
a7812ae4
PB
282 TCGv_i64 tmp1 = tcg_temp_new_i64();
283 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
284
285 tcg_gen_extu_i32_i64(tmp1, a);
286 dead_tmp(a);
287 tcg_gen_extu_i32_i64(tmp2, b);
288 dead_tmp(b);
289 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
290 return tmp1;
291}
292
a7812ae4 293static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 294{
a7812ae4
PB
295 TCGv_i64 tmp1 = tcg_temp_new_i64();
296 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
297
298 tcg_gen_ext_i32_i64(tmp1, a);
299 dead_tmp(a);
300 tcg_gen_ext_i32_i64(tmp2, b);
301 dead_tmp(b);
302 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
303 return tmp1;
304}
305
8f01245e 306/* Unsigned 32x32->64 multiply. */
396e467c 307static void gen_mull(TCGv a, TCGv b)
8f01245e 308{
a7812ae4
PB
309 TCGv_i64 tmp1 = tcg_temp_new_i64();
310 TCGv_i64 tmp2 = tcg_temp_new_i64();
8f01245e 311
396e467c
FN
312 tcg_gen_extu_i32_i64(tmp1, a);
313 tcg_gen_extu_i32_i64(tmp2, b);
8f01245e 314 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
396e467c 315 tcg_gen_trunc_i64_i32(a, tmp1);
8f01245e 316 tcg_gen_shri_i64(tmp1, tmp1, 32);
396e467c 317 tcg_gen_trunc_i64_i32(b, tmp1);
8f01245e
PB
318}
319
320/* Signed 32x32->64 multiply. */
d9ba4830 321static void gen_imull(TCGv a, TCGv b)
8f01245e 322{
a7812ae4
PB
323 TCGv_i64 tmp1 = tcg_temp_new_i64();
324 TCGv_i64 tmp2 = tcg_temp_new_i64();
8f01245e 325
d9ba4830
PB
326 tcg_gen_ext_i32_i64(tmp1, a);
327 tcg_gen_ext_i32_i64(tmp2, b);
8f01245e 328 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
d9ba4830 329 tcg_gen_trunc_i64_i32(a, tmp1);
8f01245e 330 tcg_gen_shri_i64(tmp1, tmp1, 32);
d9ba4830
PB
331 tcg_gen_trunc_i64_i32(b, tmp1);
332}
d9ba4830 333
8f01245e
PB
334/* Swap low and high halfwords. */
335static void gen_swap_half(TCGv var)
336{
337 TCGv tmp = new_tmp();
338 tcg_gen_shri_i32(tmp, var, 16);
339 tcg_gen_shli_i32(var, var, 16);
340 tcg_gen_or_i32(var, var, tmp);
3670669c 341 dead_tmp(tmp);
8f01245e
PB
342}
343
b26eefb6
PB
344/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
345 tmp = (t0 ^ t1) & 0x8000;
346 t0 &= ~0x8000;
347 t1 &= ~0x8000;
348 t0 = (t0 + t1) ^ tmp;
349 */
350
351static void gen_add16(TCGv t0, TCGv t1)
352{
353 TCGv tmp = new_tmp();
354 tcg_gen_xor_i32(tmp, t0, t1);
355 tcg_gen_andi_i32(tmp, tmp, 0x8000);
356 tcg_gen_andi_i32(t0, t0, ~0x8000);
357 tcg_gen_andi_i32(t1, t1, ~0x8000);
358 tcg_gen_add_i32(t0, t0, t1);
359 tcg_gen_xor_i32(t0, t0, tmp);
360 dead_tmp(tmp);
361 dead_tmp(t1);
362}
363
9a119ff6
PB
364#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
365
b26eefb6
PB
366/* Set CF to the top bit of var. */
367static void gen_set_CF_bit31(TCGv var)
368{
369 TCGv tmp = new_tmp();
370 tcg_gen_shri_i32(tmp, var, 31);
4cc633c3 371 gen_set_CF(tmp);
b26eefb6
PB
372 dead_tmp(tmp);
373}
374
375/* Set N and Z flags from var. */
376static inline void gen_logic_CC(TCGv var)
377{
6fbe23d5
PB
378 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
379 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
b26eefb6
PB
380}
381
382/* T0 += T1 + CF. */
396e467c 383static void gen_adc(TCGv t0, TCGv t1)
b26eefb6 384{
d9ba4830 385 TCGv tmp;
396e467c 386 tcg_gen_add_i32(t0, t0, t1);
d9ba4830 387 tmp = load_cpu_field(CF);
396e467c 388 tcg_gen_add_i32(t0, t0, tmp);
b26eefb6
PB
389 dead_tmp(tmp);
390}
391
e9bb4aa9
JR
392/* dest = T0 + T1 + CF. */
393static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
394{
395 TCGv tmp;
396 tcg_gen_add_i32(dest, t0, t1);
397 tmp = load_cpu_field(CF);
398 tcg_gen_add_i32(dest, dest, tmp);
399 dead_tmp(tmp);
400}
401
3670669c
PB
402/* dest = T0 - T1 + CF - 1. */
403static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
404{
d9ba4830 405 TCGv tmp;
3670669c 406 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 407 tmp = load_cpu_field(CF);
3670669c
PB
408 tcg_gen_add_i32(dest, dest, tmp);
409 tcg_gen_subi_i32(dest, dest, 1);
410 dead_tmp(tmp);
411}
412
b26eefb6
PB
413/* T0 &= ~T1. Clobbers T1. */
414/* FIXME: Implement bic natively. */
8f8e3aa4
PB
415static inline void tcg_gen_bic_i32(TCGv dest, TCGv t0, TCGv t1)
416{
417 TCGv tmp = new_tmp();
418 tcg_gen_not_i32(tmp, t1);
419 tcg_gen_and_i32(dest, t0, tmp);
420 dead_tmp(tmp);
421}
b26eefb6 422
ad69471c
PB
423/* FIXME: Implement this natively. */
424#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
425
b26eefb6
PB
426/* FIXME: Implement this natively. */
427static void tcg_gen_rori_i32(TCGv t0, TCGv t1, int i)
428{
429 TCGv tmp;
430
431 if (i == 0)
432 return;
433
434 tmp = new_tmp();
435 tcg_gen_shri_i32(tmp, t1, i);
436 tcg_gen_shli_i32(t1, t1, 32 - i);
437 tcg_gen_or_i32(t0, t1, tmp);
438 dead_tmp(tmp);
439}
440
9a119ff6 441static void shifter_out_im(TCGv var, int shift)
b26eefb6 442{
9a119ff6
PB
443 TCGv tmp = new_tmp();
444 if (shift == 0) {
445 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 446 } else {
9a119ff6 447 tcg_gen_shri_i32(tmp, var, shift);
4cc633c3 448 if (shift != 31)
9a119ff6
PB
449 tcg_gen_andi_i32(tmp, tmp, 1);
450 }
451 gen_set_CF(tmp);
452 dead_tmp(tmp);
453}
b26eefb6 454
9a119ff6
PB
455/* Shift by immediate. Includes special handling for shift == 0. */
456static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
457{
458 switch (shiftop) {
459 case 0: /* LSL */
460 if (shift != 0) {
461 if (flags)
462 shifter_out_im(var, 32 - shift);
463 tcg_gen_shli_i32(var, var, shift);
464 }
465 break;
466 case 1: /* LSR */
467 if (shift == 0) {
468 if (flags) {
469 tcg_gen_shri_i32(var, var, 31);
470 gen_set_CF(var);
471 }
472 tcg_gen_movi_i32(var, 0);
473 } else {
474 if (flags)
475 shifter_out_im(var, shift - 1);
476 tcg_gen_shri_i32(var, var, shift);
477 }
478 break;
479 case 2: /* ASR */
480 if (shift == 0)
481 shift = 32;
482 if (flags)
483 shifter_out_im(var, shift - 1);
484 if (shift == 32)
485 shift = 31;
486 tcg_gen_sari_i32(var, var, shift);
487 break;
488 case 3: /* ROR/RRX */
489 if (shift != 0) {
490 if (flags)
491 shifter_out_im(var, shift - 1);
492 tcg_gen_rori_i32(var, var, shift); break;
493 } else {
d9ba4830 494 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
495 if (flags)
496 shifter_out_im(var, 0);
497 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
498 tcg_gen_shli_i32(tmp, tmp, 31);
499 tcg_gen_or_i32(var, var, tmp);
500 dead_tmp(tmp);
b26eefb6
PB
501 }
502 }
503};
504
8984bd2e
PB
505static inline void gen_arm_shift_reg(TCGv var, int shiftop,
506 TCGv shift, int flags)
507{
508 if (flags) {
509 switch (shiftop) {
510 case 0: gen_helper_shl_cc(var, var, shift); break;
511 case 1: gen_helper_shr_cc(var, var, shift); break;
512 case 2: gen_helper_sar_cc(var, var, shift); break;
513 case 3: gen_helper_ror_cc(var, var, shift); break;
514 }
515 } else {
516 switch (shiftop) {
517 case 0: gen_helper_shl(var, var, shift); break;
518 case 1: gen_helper_shr(var, var, shift); break;
519 case 2: gen_helper_sar(var, var, shift); break;
520 case 3: gen_helper_ror(var, var, shift); break;
521 }
522 }
523 dead_tmp(shift);
524}
525
6ddbc6e4
PB
526#define PAS_OP(pfx) \
527 switch (op2) { \
528 case 0: gen_pas_helper(glue(pfx,add16)); break; \
529 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
530 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
531 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
532 case 4: gen_pas_helper(glue(pfx,add8)); break; \
533 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
534 }
d9ba4830 535static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 536{
a7812ae4 537 TCGv_ptr tmp;
6ddbc6e4
PB
538
539 switch (op1) {
540#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
541 case 1:
a7812ae4 542 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
543 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
544 PAS_OP(s)
545 break;
546 case 5:
a7812ae4 547 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
548 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
549 PAS_OP(u)
550 break;
551#undef gen_pas_helper
552#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
553 case 2:
554 PAS_OP(q);
555 break;
556 case 3:
557 PAS_OP(sh);
558 break;
559 case 6:
560 PAS_OP(uq);
561 break;
562 case 7:
563 PAS_OP(uh);
564 break;
565#undef gen_pas_helper
566 }
567}
9ee6e8bb
PB
568#undef PAS_OP
569
6ddbc6e4
PB
570/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
571#define PAS_OP(pfx) \
572 switch (op2) { \
573 case 0: gen_pas_helper(glue(pfx,add8)); break; \
574 case 1: gen_pas_helper(glue(pfx,add16)); break; \
575 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
576 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
577 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
578 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
579 }
d9ba4830 580static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 581{
a7812ae4 582 TCGv_ptr tmp;
6ddbc6e4
PB
583
584 switch (op1) {
585#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
586 case 0:
a7812ae4 587 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
588 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
589 PAS_OP(s)
590 break;
591 case 4:
a7812ae4 592 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
593 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
594 PAS_OP(u)
595 break;
596#undef gen_pas_helper
597#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
598 case 1:
599 PAS_OP(q);
600 break;
601 case 2:
602 PAS_OP(sh);
603 break;
604 case 5:
605 PAS_OP(uq);
606 break;
607 case 6:
608 PAS_OP(uh);
609 break;
610#undef gen_pas_helper
611 }
612}
9ee6e8bb
PB
613#undef PAS_OP
614
d9ba4830
PB
615static void gen_test_cc(int cc, int label)
616{
617 TCGv tmp;
618 TCGv tmp2;
d9ba4830
PB
619 int inv;
620
d9ba4830
PB
621 switch (cc) {
622 case 0: /* eq: Z */
6fbe23d5 623 tmp = load_cpu_field(ZF);
cb63669a 624 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
625 break;
626 case 1: /* ne: !Z */
6fbe23d5 627 tmp = load_cpu_field(ZF);
cb63669a 628 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
629 break;
630 case 2: /* cs: C */
631 tmp = load_cpu_field(CF);
cb63669a 632 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
633 break;
634 case 3: /* cc: !C */
635 tmp = load_cpu_field(CF);
cb63669a 636 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
637 break;
638 case 4: /* mi: N */
6fbe23d5 639 tmp = load_cpu_field(NF);
cb63669a 640 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
641 break;
642 case 5: /* pl: !N */
6fbe23d5 643 tmp = load_cpu_field(NF);
cb63669a 644 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
645 break;
646 case 6: /* vs: V */
647 tmp = load_cpu_field(VF);
cb63669a 648 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
649 break;
650 case 7: /* vc: !V */
651 tmp = load_cpu_field(VF);
cb63669a 652 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
653 break;
654 case 8: /* hi: C && !Z */
655 inv = gen_new_label();
656 tmp = load_cpu_field(CF);
cb63669a 657 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830 658 dead_tmp(tmp);
6fbe23d5 659 tmp = load_cpu_field(ZF);
cb63669a 660 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
661 gen_set_label(inv);
662 break;
663 case 9: /* ls: !C || Z */
664 tmp = load_cpu_field(CF);
cb63669a 665 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830 666 dead_tmp(tmp);
6fbe23d5 667 tmp = load_cpu_field(ZF);
cb63669a 668 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
669 break;
670 case 10: /* ge: N == V -> N ^ V == 0 */
671 tmp = load_cpu_field(VF);
6fbe23d5 672 tmp2 = load_cpu_field(NF);
d9ba4830
PB
673 tcg_gen_xor_i32(tmp, tmp, tmp2);
674 dead_tmp(tmp2);
cb63669a 675 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
676 break;
677 case 11: /* lt: N != V -> N ^ V != 0 */
678 tmp = load_cpu_field(VF);
6fbe23d5 679 tmp2 = load_cpu_field(NF);
d9ba4830
PB
680 tcg_gen_xor_i32(tmp, tmp, tmp2);
681 dead_tmp(tmp2);
cb63669a 682 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
683 break;
684 case 12: /* gt: !Z && N == V */
685 inv = gen_new_label();
6fbe23d5 686 tmp = load_cpu_field(ZF);
cb63669a 687 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830
PB
688 dead_tmp(tmp);
689 tmp = load_cpu_field(VF);
6fbe23d5 690 tmp2 = load_cpu_field(NF);
d9ba4830
PB
691 tcg_gen_xor_i32(tmp, tmp, tmp2);
692 dead_tmp(tmp2);
cb63669a 693 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
694 gen_set_label(inv);
695 break;
696 case 13: /* le: Z || N != V */
6fbe23d5 697 tmp = load_cpu_field(ZF);
cb63669a 698 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
699 dead_tmp(tmp);
700 tmp = load_cpu_field(VF);
6fbe23d5 701 tmp2 = load_cpu_field(NF);
d9ba4830
PB
702 tcg_gen_xor_i32(tmp, tmp, tmp2);
703 dead_tmp(tmp2);
cb63669a 704 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
705 break;
706 default:
707 fprintf(stderr, "Bad condition code 0x%x\n", cc);
708 abort();
709 }
710 dead_tmp(tmp);
711}
2c0262af 712
b1d8e52e 713static const uint8_t table_logic_cc[16] = {
2c0262af
FB
714 1, /* and */
715 1, /* xor */
716 0, /* sub */
717 0, /* rsb */
718 0, /* add */
719 0, /* adc */
720 0, /* sbc */
721 0, /* rsc */
722 1, /* andl */
723 1, /* xorl */
724 0, /* cmp */
725 0, /* cmn */
726 1, /* orr */
727 1, /* mov */
728 1, /* bic */
729 1, /* mvn */
730};
3b46e624 731
d9ba4830
PB
732/* Set PC and Thumb state from an immediate address. */
733static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 734{
b26eefb6 735 TCGv tmp;
99c475ab 736
b26eefb6 737 s->is_jmp = DISAS_UPDATE;
d9ba4830 738 if (s->thumb != (addr & 1)) {
155c3eac 739 tmp = new_tmp();
d9ba4830
PB
740 tcg_gen_movi_i32(tmp, addr & 1);
741 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
155c3eac 742 dead_tmp(tmp);
d9ba4830 743 }
155c3eac 744 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
745}
746
747/* Set PC and Thumb state from var. var is marked as dead. */
748static inline void gen_bx(DisasContext *s, TCGv var)
749{
d9ba4830 750 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
751 tcg_gen_andi_i32(cpu_R[15], var, ~1);
752 tcg_gen_andi_i32(var, var, 1);
753 store_cpu_field(var, thumb);
d9ba4830
PB
754}
755
21aeb343
JR
756/* Variant of store_reg which uses branch&exchange logic when storing
757 to r15 in ARM architecture v7 and above. The source must be a temporary
758 and will be marked as dead. */
759static inline void store_reg_bx(CPUState *env, DisasContext *s,
760 int reg, TCGv var)
761{
762 if (reg == 15 && ENABLE_ARCH_7) {
763 gen_bx(s, var);
764 } else {
765 store_reg(s, reg, var);
766 }
767}
768
b0109805
PB
769static inline TCGv gen_ld8s(TCGv addr, int index)
770{
771 TCGv tmp = new_tmp();
772 tcg_gen_qemu_ld8s(tmp, addr, index);
773 return tmp;
774}
775static inline TCGv gen_ld8u(TCGv addr, int index)
776{
777 TCGv tmp = new_tmp();
778 tcg_gen_qemu_ld8u(tmp, addr, index);
779 return tmp;
780}
781static inline TCGv gen_ld16s(TCGv addr, int index)
782{
783 TCGv tmp = new_tmp();
784 tcg_gen_qemu_ld16s(tmp, addr, index);
785 return tmp;
786}
787static inline TCGv gen_ld16u(TCGv addr, int index)
788{
789 TCGv tmp = new_tmp();
790 tcg_gen_qemu_ld16u(tmp, addr, index);
791 return tmp;
792}
793static inline TCGv gen_ld32(TCGv addr, int index)
794{
795 TCGv tmp = new_tmp();
796 tcg_gen_qemu_ld32u(tmp, addr, index);
797 return tmp;
798}
799static inline void gen_st8(TCGv val, TCGv addr, int index)
800{
801 tcg_gen_qemu_st8(val, addr, index);
802 dead_tmp(val);
803}
804static inline void gen_st16(TCGv val, TCGv addr, int index)
805{
806 tcg_gen_qemu_st16(val, addr, index);
807 dead_tmp(val);
808}
809static inline void gen_st32(TCGv val, TCGv addr, int index)
810{
811 tcg_gen_qemu_st32(val, addr, index);
812 dead_tmp(val);
813}
b5ff1b31 814
5e3f878a
PB
815static inline void gen_set_pc_im(uint32_t val)
816{
155c3eac 817 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
818}
819
b5ff1b31
FB
820/* Force a TB lookup after an instruction that changes the CPU state. */
821static inline void gen_lookup_tb(DisasContext *s)
822{
a6445c52 823 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
824 s->is_jmp = DISAS_UPDATE;
825}
826
b0109805
PB
827static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
828 TCGv var)
2c0262af 829{
1e8d4eec 830 int val, rm, shift, shiftop;
b26eefb6 831 TCGv offset;
2c0262af
FB
832
833 if (!(insn & (1 << 25))) {
834 /* immediate */
835 val = insn & 0xfff;
836 if (!(insn & (1 << 23)))
837 val = -val;
537730b9 838 if (val != 0)
b0109805 839 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
840 } else {
841 /* shift/register */
842 rm = (insn) & 0xf;
843 shift = (insn >> 7) & 0x1f;
1e8d4eec 844 shiftop = (insn >> 5) & 3;
b26eefb6 845 offset = load_reg(s, rm);
9a119ff6 846 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 847 if (!(insn & (1 << 23)))
b0109805 848 tcg_gen_sub_i32(var, var, offset);
2c0262af 849 else
b0109805 850 tcg_gen_add_i32(var, var, offset);
b26eefb6 851 dead_tmp(offset);
2c0262af
FB
852 }
853}
854
191f9a93 855static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 856 int extra, TCGv var)
2c0262af
FB
857{
858 int val, rm;
b26eefb6 859 TCGv offset;
3b46e624 860
2c0262af
FB
861 if (insn & (1 << 22)) {
862 /* immediate */
863 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
864 if (!(insn & (1 << 23)))
865 val = -val;
18acad92 866 val += extra;
537730b9 867 if (val != 0)
b0109805 868 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
869 } else {
870 /* register */
191f9a93 871 if (extra)
b0109805 872 tcg_gen_addi_i32(var, var, extra);
2c0262af 873 rm = (insn) & 0xf;
b26eefb6 874 offset = load_reg(s, rm);
2c0262af 875 if (!(insn & (1 << 23)))
b0109805 876 tcg_gen_sub_i32(var, var, offset);
2c0262af 877 else
b0109805 878 tcg_gen_add_i32(var, var, offset);
b26eefb6 879 dead_tmp(offset);
2c0262af
FB
880 }
881}
882
4373f3ce
PB
883#define VFP_OP2(name) \
884static inline void gen_vfp_##name(int dp) \
885{ \
886 if (dp) \
887 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
888 else \
889 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
b7bcbe95
FB
890}
891
4373f3ce
PB
892VFP_OP2(add)
893VFP_OP2(sub)
894VFP_OP2(mul)
895VFP_OP2(div)
896
897#undef VFP_OP2
898
899static inline void gen_vfp_abs(int dp)
900{
901 if (dp)
902 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
903 else
904 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
905}
906
907static inline void gen_vfp_neg(int dp)
908{
909 if (dp)
910 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
911 else
912 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
913}
914
915static inline void gen_vfp_sqrt(int dp)
916{
917 if (dp)
918 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
919 else
920 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
921}
922
923static inline void gen_vfp_cmp(int dp)
924{
925 if (dp)
926 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
927 else
928 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
929}
930
931static inline void gen_vfp_cmpe(int dp)
932{
933 if (dp)
934 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
935 else
936 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
937}
938
939static inline void gen_vfp_F1_ld0(int dp)
940{
941 if (dp)
5b340b51 942 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 943 else
5b340b51 944 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
945}
946
947static inline void gen_vfp_uito(int dp)
948{
949 if (dp)
950 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
951 else
952 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
953}
954
955static inline void gen_vfp_sito(int dp)
956{
957 if (dp)
66230e0d 958 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
4373f3ce 959 else
66230e0d 960 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
4373f3ce
PB
961}
962
963static inline void gen_vfp_toui(int dp)
964{
965 if (dp)
966 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
967 else
968 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
969}
970
971static inline void gen_vfp_touiz(int dp)
972{
973 if (dp)
974 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
975 else
976 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
977}
978
979static inline void gen_vfp_tosi(int dp)
980{
981 if (dp)
982 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
983 else
984 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
985}
986
987static inline void gen_vfp_tosiz(int dp)
9ee6e8bb
PB
988{
989 if (dp)
4373f3ce 990 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
9ee6e8bb 991 else
4373f3ce
PB
992 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
993}
994
995#define VFP_GEN_FIX(name) \
996static inline void gen_vfp_##name(int dp, int shift) \
997{ \
998 if (dp) \
999 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tcg_const_i32(shift), cpu_env);\
1000 else \
1001 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tcg_const_i32(shift), cpu_env);\
9ee6e8bb 1002}
4373f3ce
PB
1003VFP_GEN_FIX(tosh)
1004VFP_GEN_FIX(tosl)
1005VFP_GEN_FIX(touh)
1006VFP_GEN_FIX(toul)
1007VFP_GEN_FIX(shto)
1008VFP_GEN_FIX(slto)
1009VFP_GEN_FIX(uhto)
1010VFP_GEN_FIX(ulto)
1011#undef VFP_GEN_FIX
9ee6e8bb 1012
312eea9f 1013static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1014{
1015 if (dp)
312eea9f 1016 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1017 else
312eea9f 1018 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1019}
1020
312eea9f 1021static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1022{
1023 if (dp)
312eea9f 1024 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1025 else
312eea9f 1026 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1027}
1028
8e96005d
FB
1029static inline long
1030vfp_reg_offset (int dp, int reg)
1031{
1032 if (dp)
1033 return offsetof(CPUARMState, vfp.regs[reg]);
1034 else if (reg & 1) {
1035 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1036 + offsetof(CPU_DoubleU, l.upper);
1037 } else {
1038 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1039 + offsetof(CPU_DoubleU, l.lower);
1040 }
1041}
9ee6e8bb
PB
1042
1043/* Return the offset of a 32-bit piece of a NEON register.
1044 zero is the least significant end of the register. */
1045static inline long
1046neon_reg_offset (int reg, int n)
1047{
1048 int sreg;
1049 sreg = reg * 2 + n;
1050 return vfp_reg_offset(0, sreg);
1051}
1052
8f8e3aa4
PB
1053static TCGv neon_load_reg(int reg, int pass)
1054{
1055 TCGv tmp = new_tmp();
1056 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1057 return tmp;
1058}
1059
1060static void neon_store_reg(int reg, int pass, TCGv var)
1061{
1062 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1063 dead_tmp(var);
1064}
1065
a7812ae4 1066static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1067{
1068 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1069}
1070
a7812ae4 1071static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1072{
1073 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1074}
1075
4373f3ce
PB
1076#define tcg_gen_ld_f32 tcg_gen_ld_i32
1077#define tcg_gen_ld_f64 tcg_gen_ld_i64
1078#define tcg_gen_st_f32 tcg_gen_st_i32
1079#define tcg_gen_st_f64 tcg_gen_st_i64
1080
b7bcbe95
FB
1081static inline void gen_mov_F0_vreg(int dp, int reg)
1082{
1083 if (dp)
4373f3ce 1084 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1085 else
4373f3ce 1086 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1087}
1088
1089static inline void gen_mov_F1_vreg(int dp, int reg)
1090{
1091 if (dp)
4373f3ce 1092 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1093 else
4373f3ce 1094 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1095}
1096
1097static inline void gen_mov_vreg_F0(int dp, int reg)
1098{
1099 if (dp)
4373f3ce 1100 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1101 else
4373f3ce 1102 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1103}
1104
18c9b560
AZ
1105#define ARM_CP_RW_BIT (1 << 20)
1106
a7812ae4 1107static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d
PB
1108{
1109 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1110}
1111
a7812ae4 1112static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d
PB
1113{
1114 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1115}
1116
da6b5335 1117static inline TCGv iwmmxt_load_creg(int reg)
e677137d 1118{
da6b5335
FN
1119 TCGv var = new_tmp();
1120 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1121 return var;
e677137d
PB
1122}
1123
da6b5335 1124static inline void iwmmxt_store_creg(int reg, TCGv var)
e677137d 1125{
da6b5335 1126 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
e677137d
PB
1127}
1128
1129static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1130{
1131 iwmmxt_store_reg(cpu_M0, rn);
1132}
1133
1134static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1135{
1136 iwmmxt_load_reg(cpu_M0, rn);
1137}
1138
1139static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1140{
1141 iwmmxt_load_reg(cpu_V1, rn);
1142 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1143}
1144
1145static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1146{
1147 iwmmxt_load_reg(cpu_V1, rn);
1148 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1149}
1150
1151static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1152{
1153 iwmmxt_load_reg(cpu_V1, rn);
1154 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1155}
1156
1157#define IWMMXT_OP(name) \
1158static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1159{ \
1160 iwmmxt_load_reg(cpu_V1, rn); \
1161 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1162}
1163
1164#define IWMMXT_OP_ENV(name) \
1165static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1166{ \
1167 iwmmxt_load_reg(cpu_V1, rn); \
1168 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1169}
1170
1171#define IWMMXT_OP_ENV_SIZE(name) \
1172IWMMXT_OP_ENV(name##b) \
1173IWMMXT_OP_ENV(name##w) \
1174IWMMXT_OP_ENV(name##l)
1175
1176#define IWMMXT_OP_ENV1(name) \
1177static inline void gen_op_iwmmxt_##name##_M0(void) \
1178{ \
1179 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1180}
1181
1182IWMMXT_OP(maddsq)
1183IWMMXT_OP(madduq)
1184IWMMXT_OP(sadb)
1185IWMMXT_OP(sadw)
1186IWMMXT_OP(mulslw)
1187IWMMXT_OP(mulshw)
1188IWMMXT_OP(mululw)
1189IWMMXT_OP(muluhw)
1190IWMMXT_OP(macsw)
1191IWMMXT_OP(macuw)
1192
1193IWMMXT_OP_ENV_SIZE(unpackl)
1194IWMMXT_OP_ENV_SIZE(unpackh)
1195
1196IWMMXT_OP_ENV1(unpacklub)
1197IWMMXT_OP_ENV1(unpackluw)
1198IWMMXT_OP_ENV1(unpacklul)
1199IWMMXT_OP_ENV1(unpackhub)
1200IWMMXT_OP_ENV1(unpackhuw)
1201IWMMXT_OP_ENV1(unpackhul)
1202IWMMXT_OP_ENV1(unpacklsb)
1203IWMMXT_OP_ENV1(unpacklsw)
1204IWMMXT_OP_ENV1(unpacklsl)
1205IWMMXT_OP_ENV1(unpackhsb)
1206IWMMXT_OP_ENV1(unpackhsw)
1207IWMMXT_OP_ENV1(unpackhsl)
1208
1209IWMMXT_OP_ENV_SIZE(cmpeq)
1210IWMMXT_OP_ENV_SIZE(cmpgtu)
1211IWMMXT_OP_ENV_SIZE(cmpgts)
1212
1213IWMMXT_OP_ENV_SIZE(mins)
1214IWMMXT_OP_ENV_SIZE(minu)
1215IWMMXT_OP_ENV_SIZE(maxs)
1216IWMMXT_OP_ENV_SIZE(maxu)
1217
1218IWMMXT_OP_ENV_SIZE(subn)
1219IWMMXT_OP_ENV_SIZE(addn)
1220IWMMXT_OP_ENV_SIZE(subu)
1221IWMMXT_OP_ENV_SIZE(addu)
1222IWMMXT_OP_ENV_SIZE(subs)
1223IWMMXT_OP_ENV_SIZE(adds)
1224
1225IWMMXT_OP_ENV(avgb0)
1226IWMMXT_OP_ENV(avgb1)
1227IWMMXT_OP_ENV(avgw0)
1228IWMMXT_OP_ENV(avgw1)
1229
1230IWMMXT_OP(msadb)
1231
1232IWMMXT_OP_ENV(packuw)
1233IWMMXT_OP_ENV(packul)
1234IWMMXT_OP_ENV(packuq)
1235IWMMXT_OP_ENV(packsw)
1236IWMMXT_OP_ENV(packsl)
1237IWMMXT_OP_ENV(packsq)
1238
e677137d
PB
1239static void gen_op_iwmmxt_set_mup(void)
1240{
1241 TCGv tmp;
1242 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1243 tcg_gen_ori_i32(tmp, tmp, 2);
1244 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1245}
1246
1247static void gen_op_iwmmxt_set_cup(void)
1248{
1249 TCGv tmp;
1250 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1251 tcg_gen_ori_i32(tmp, tmp, 1);
1252 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1253}
1254
1255static void gen_op_iwmmxt_setpsr_nz(void)
1256{
1257 TCGv tmp = new_tmp();
1258 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1259 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1260}
1261
1262static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1263{
1264 iwmmxt_load_reg(cpu_V1, rn);
86831435 1265 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1266 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1267}
1268
da6b5335 1269static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
18c9b560
AZ
1270{
1271 int rd;
1272 uint32_t offset;
da6b5335 1273 TCGv tmp;
18c9b560
AZ
1274
1275 rd = (insn >> 16) & 0xf;
da6b5335 1276 tmp = load_reg(s, rd);
18c9b560
AZ
1277
1278 offset = (insn & 0xff) << ((insn >> 7) & 2);
1279 if (insn & (1 << 24)) {
1280 /* Pre indexed */
1281 if (insn & (1 << 23))
da6b5335 1282 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1283 else
da6b5335
FN
1284 tcg_gen_addi_i32(tmp, tmp, -offset);
1285 tcg_gen_mov_i32(dest, tmp);
18c9b560 1286 if (insn & (1 << 21))
da6b5335
FN
1287 store_reg(s, rd, tmp);
1288 else
1289 dead_tmp(tmp);
18c9b560
AZ
1290 } else if (insn & (1 << 21)) {
1291 /* Post indexed */
da6b5335 1292 tcg_gen_mov_i32(dest, tmp);
18c9b560 1293 if (insn & (1 << 23))
da6b5335 1294 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1295 else
da6b5335
FN
1296 tcg_gen_addi_i32(tmp, tmp, -offset);
1297 store_reg(s, rd, tmp);
18c9b560
AZ
1298 } else if (!(insn & (1 << 23)))
1299 return 1;
1300 return 0;
1301}
1302
da6b5335 1303static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
18c9b560
AZ
1304{
1305 int rd = (insn >> 0) & 0xf;
da6b5335 1306 TCGv tmp;
18c9b560 1307
da6b5335
FN
1308 if (insn & (1 << 8)) {
1309 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1310 return 1;
da6b5335
FN
1311 } else {
1312 tmp = iwmmxt_load_creg(rd);
1313 }
1314 } else {
1315 tmp = new_tmp();
1316 iwmmxt_load_reg(cpu_V0, rd);
1317 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1318 }
1319 tcg_gen_andi_i32(tmp, tmp, mask);
1320 tcg_gen_mov_i32(dest, tmp);
1321 dead_tmp(tmp);
18c9b560
AZ
1322 return 0;
1323}
1324
1325/* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1326 (ie. an undefined instruction). */
1327static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1328{
1329 int rd, wrd;
1330 int rdhi, rdlo, rd0, rd1, i;
da6b5335
FN
1331 TCGv addr;
1332 TCGv tmp, tmp2, tmp3;
18c9b560
AZ
1333
1334 if ((insn & 0x0e000e00) == 0x0c000000) {
1335 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1336 wrd = insn & 0xf;
1337 rdlo = (insn >> 12) & 0xf;
1338 rdhi = (insn >> 16) & 0xf;
1339 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1340 iwmmxt_load_reg(cpu_V0, wrd);
1341 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1342 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1343 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1344 } else { /* TMCRR */
da6b5335
FN
1345 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1346 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1347 gen_op_iwmmxt_set_mup();
1348 }
1349 return 0;
1350 }
1351
1352 wrd = (insn >> 12) & 0xf;
da6b5335
FN
1353 addr = new_tmp();
1354 if (gen_iwmmxt_address(s, insn, addr)) {
1355 dead_tmp(addr);
18c9b560 1356 return 1;
da6b5335 1357 }
18c9b560
AZ
1358 if (insn & ARM_CP_RW_BIT) {
1359 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
da6b5335
FN
1360 tmp = new_tmp();
1361 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1362 iwmmxt_store_creg(wrd, tmp);
18c9b560 1363 } else {
e677137d
PB
1364 i = 1;
1365 if (insn & (1 << 8)) {
1366 if (insn & (1 << 22)) { /* WLDRD */
da6b5335 1367 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1368 i = 0;
1369 } else { /* WLDRW wRd */
da6b5335 1370 tmp = gen_ld32(addr, IS_USER(s));
e677137d
PB
1371 }
1372 } else {
1373 if (insn & (1 << 22)) { /* WLDRH */
da6b5335 1374 tmp = gen_ld16u(addr, IS_USER(s));
e677137d 1375 } else { /* WLDRB */
da6b5335 1376 tmp = gen_ld8u(addr, IS_USER(s));
e677137d
PB
1377 }
1378 }
1379 if (i) {
1380 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1381 dead_tmp(tmp);
1382 }
18c9b560
AZ
1383 gen_op_iwmmxt_movq_wRn_M0(wrd);
1384 }
1385 } else {
1386 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335
FN
1387 tmp = iwmmxt_load_creg(wrd);
1388 gen_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1389 } else {
1390 gen_op_iwmmxt_movq_M0_wRn(wrd);
e677137d
PB
1391 tmp = new_tmp();
1392 if (insn & (1 << 8)) {
1393 if (insn & (1 << 22)) { /* WSTRD */
1394 dead_tmp(tmp);
da6b5335 1395 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1396 } else { /* WSTRW wRd */
1397 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1398 gen_st32(tmp, addr, IS_USER(s));
e677137d
PB
1399 }
1400 } else {
1401 if (insn & (1 << 22)) { /* WSTRH */
1402 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1403 gen_st16(tmp, addr, IS_USER(s));
e677137d
PB
1404 } else { /* WSTRB */
1405 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1406 gen_st8(tmp, addr, IS_USER(s));
e677137d
PB
1407 }
1408 }
18c9b560
AZ
1409 }
1410 }
1411 return 0;
1412 }
1413
1414 if ((insn & 0x0f000000) != 0x0e000000)
1415 return 1;
1416
1417 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1418 case 0x000: /* WOR */
1419 wrd = (insn >> 12) & 0xf;
1420 rd0 = (insn >> 0) & 0xf;
1421 rd1 = (insn >> 16) & 0xf;
1422 gen_op_iwmmxt_movq_M0_wRn(rd0);
1423 gen_op_iwmmxt_orq_M0_wRn(rd1);
1424 gen_op_iwmmxt_setpsr_nz();
1425 gen_op_iwmmxt_movq_wRn_M0(wrd);
1426 gen_op_iwmmxt_set_mup();
1427 gen_op_iwmmxt_set_cup();
1428 break;
1429 case 0x011: /* TMCR */
1430 if (insn & 0xf)
1431 return 1;
1432 rd = (insn >> 12) & 0xf;
1433 wrd = (insn >> 16) & 0xf;
1434 switch (wrd) {
1435 case ARM_IWMMXT_wCID:
1436 case ARM_IWMMXT_wCASF:
1437 break;
1438 case ARM_IWMMXT_wCon:
1439 gen_op_iwmmxt_set_cup();
1440 /* Fall through. */
1441 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1442 tmp = iwmmxt_load_creg(wrd);
1443 tmp2 = load_reg(s, rd);
1444 tcg_gen_bic_i32(tmp, tmp, tmp2);
1445 dead_tmp(tmp2);
1446 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1447 break;
1448 case ARM_IWMMXT_wCGR0:
1449 case ARM_IWMMXT_wCGR1:
1450 case ARM_IWMMXT_wCGR2:
1451 case ARM_IWMMXT_wCGR3:
1452 gen_op_iwmmxt_set_cup();
da6b5335
FN
1453 tmp = load_reg(s, rd);
1454 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1455 break;
1456 default:
1457 return 1;
1458 }
1459 break;
1460 case 0x100: /* WXOR */
1461 wrd = (insn >> 12) & 0xf;
1462 rd0 = (insn >> 0) & 0xf;
1463 rd1 = (insn >> 16) & 0xf;
1464 gen_op_iwmmxt_movq_M0_wRn(rd0);
1465 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1466 gen_op_iwmmxt_setpsr_nz();
1467 gen_op_iwmmxt_movq_wRn_M0(wrd);
1468 gen_op_iwmmxt_set_mup();
1469 gen_op_iwmmxt_set_cup();
1470 break;
1471 case 0x111: /* TMRC */
1472 if (insn & 0xf)
1473 return 1;
1474 rd = (insn >> 12) & 0xf;
1475 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1476 tmp = iwmmxt_load_creg(wrd);
1477 store_reg(s, rd, tmp);
18c9b560
AZ
1478 break;
1479 case 0x300: /* WANDN */
1480 wrd = (insn >> 12) & 0xf;
1481 rd0 = (insn >> 0) & 0xf;
1482 rd1 = (insn >> 16) & 0xf;
1483 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1484 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1485 gen_op_iwmmxt_andq_M0_wRn(rd1);
1486 gen_op_iwmmxt_setpsr_nz();
1487 gen_op_iwmmxt_movq_wRn_M0(wrd);
1488 gen_op_iwmmxt_set_mup();
1489 gen_op_iwmmxt_set_cup();
1490 break;
1491 case 0x200: /* WAND */
1492 wrd = (insn >> 12) & 0xf;
1493 rd0 = (insn >> 0) & 0xf;
1494 rd1 = (insn >> 16) & 0xf;
1495 gen_op_iwmmxt_movq_M0_wRn(rd0);
1496 gen_op_iwmmxt_andq_M0_wRn(rd1);
1497 gen_op_iwmmxt_setpsr_nz();
1498 gen_op_iwmmxt_movq_wRn_M0(wrd);
1499 gen_op_iwmmxt_set_mup();
1500 gen_op_iwmmxt_set_cup();
1501 break;
1502 case 0x810: case 0xa10: /* WMADD */
1503 wrd = (insn >> 12) & 0xf;
1504 rd0 = (insn >> 0) & 0xf;
1505 rd1 = (insn >> 16) & 0xf;
1506 gen_op_iwmmxt_movq_M0_wRn(rd0);
1507 if (insn & (1 << 21))
1508 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1509 else
1510 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1511 gen_op_iwmmxt_movq_wRn_M0(wrd);
1512 gen_op_iwmmxt_set_mup();
1513 break;
1514 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1515 wrd = (insn >> 12) & 0xf;
1516 rd0 = (insn >> 16) & 0xf;
1517 rd1 = (insn >> 0) & 0xf;
1518 gen_op_iwmmxt_movq_M0_wRn(rd0);
1519 switch ((insn >> 22) & 3) {
1520 case 0:
1521 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1522 break;
1523 case 1:
1524 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1525 break;
1526 case 2:
1527 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1528 break;
1529 case 3:
1530 return 1;
1531 }
1532 gen_op_iwmmxt_movq_wRn_M0(wrd);
1533 gen_op_iwmmxt_set_mup();
1534 gen_op_iwmmxt_set_cup();
1535 break;
1536 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1537 wrd = (insn >> 12) & 0xf;
1538 rd0 = (insn >> 16) & 0xf;
1539 rd1 = (insn >> 0) & 0xf;
1540 gen_op_iwmmxt_movq_M0_wRn(rd0);
1541 switch ((insn >> 22) & 3) {
1542 case 0:
1543 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1544 break;
1545 case 1:
1546 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1547 break;
1548 case 2:
1549 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1550 break;
1551 case 3:
1552 return 1;
1553 }
1554 gen_op_iwmmxt_movq_wRn_M0(wrd);
1555 gen_op_iwmmxt_set_mup();
1556 gen_op_iwmmxt_set_cup();
1557 break;
1558 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1559 wrd = (insn >> 12) & 0xf;
1560 rd0 = (insn >> 16) & 0xf;
1561 rd1 = (insn >> 0) & 0xf;
1562 gen_op_iwmmxt_movq_M0_wRn(rd0);
1563 if (insn & (1 << 22))
1564 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1565 else
1566 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1567 if (!(insn & (1 << 20)))
1568 gen_op_iwmmxt_addl_M0_wRn(wrd);
1569 gen_op_iwmmxt_movq_wRn_M0(wrd);
1570 gen_op_iwmmxt_set_mup();
1571 break;
1572 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1573 wrd = (insn >> 12) & 0xf;
1574 rd0 = (insn >> 16) & 0xf;
1575 rd1 = (insn >> 0) & 0xf;
1576 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1577 if (insn & (1 << 21)) {
1578 if (insn & (1 << 20))
1579 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1580 else
1581 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1582 } else {
1583 if (insn & (1 << 20))
1584 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1585 else
1586 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1587 }
18c9b560
AZ
1588 gen_op_iwmmxt_movq_wRn_M0(wrd);
1589 gen_op_iwmmxt_set_mup();
1590 break;
1591 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1592 wrd = (insn >> 12) & 0xf;
1593 rd0 = (insn >> 16) & 0xf;
1594 rd1 = (insn >> 0) & 0xf;
1595 gen_op_iwmmxt_movq_M0_wRn(rd0);
1596 if (insn & (1 << 21))
1597 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1598 else
1599 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1600 if (!(insn & (1 << 20))) {
e677137d
PB
1601 iwmmxt_load_reg(cpu_V1, wrd);
1602 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1603 }
1604 gen_op_iwmmxt_movq_wRn_M0(wrd);
1605 gen_op_iwmmxt_set_mup();
1606 break;
1607 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1608 wrd = (insn >> 12) & 0xf;
1609 rd0 = (insn >> 16) & 0xf;
1610 rd1 = (insn >> 0) & 0xf;
1611 gen_op_iwmmxt_movq_M0_wRn(rd0);
1612 switch ((insn >> 22) & 3) {
1613 case 0:
1614 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1615 break;
1616 case 1:
1617 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1618 break;
1619 case 2:
1620 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1621 break;
1622 case 3:
1623 return 1;
1624 }
1625 gen_op_iwmmxt_movq_wRn_M0(wrd);
1626 gen_op_iwmmxt_set_mup();
1627 gen_op_iwmmxt_set_cup();
1628 break;
1629 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1630 wrd = (insn >> 12) & 0xf;
1631 rd0 = (insn >> 16) & 0xf;
1632 rd1 = (insn >> 0) & 0xf;
1633 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1634 if (insn & (1 << 22)) {
1635 if (insn & (1 << 20))
1636 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1637 else
1638 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1639 } else {
1640 if (insn & (1 << 20))
1641 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1642 else
1643 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1644 }
18c9b560
AZ
1645 gen_op_iwmmxt_movq_wRn_M0(wrd);
1646 gen_op_iwmmxt_set_mup();
1647 gen_op_iwmmxt_set_cup();
1648 break;
1649 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1650 wrd = (insn >> 12) & 0xf;
1651 rd0 = (insn >> 16) & 0xf;
1652 rd1 = (insn >> 0) & 0xf;
1653 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1654 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1655 tcg_gen_andi_i32(tmp, tmp, 7);
1656 iwmmxt_load_reg(cpu_V1, rd1);
1657 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1658 dead_tmp(tmp);
18c9b560
AZ
1659 gen_op_iwmmxt_movq_wRn_M0(wrd);
1660 gen_op_iwmmxt_set_mup();
1661 break;
1662 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1663 if (((insn >> 6) & 3) == 3)
1664 return 1;
18c9b560
AZ
1665 rd = (insn >> 12) & 0xf;
1666 wrd = (insn >> 16) & 0xf;
da6b5335 1667 tmp = load_reg(s, rd);
18c9b560
AZ
1668 gen_op_iwmmxt_movq_M0_wRn(wrd);
1669 switch ((insn >> 6) & 3) {
1670 case 0:
da6b5335
FN
1671 tmp2 = tcg_const_i32(0xff);
1672 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1673 break;
1674 case 1:
da6b5335
FN
1675 tmp2 = tcg_const_i32(0xffff);
1676 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1677 break;
1678 case 2:
da6b5335
FN
1679 tmp2 = tcg_const_i32(0xffffffff);
1680 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1681 break;
da6b5335
FN
1682 default:
1683 TCGV_UNUSED(tmp2);
1684 TCGV_UNUSED(tmp3);
18c9b560 1685 }
da6b5335
FN
1686 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1687 tcg_temp_free(tmp3);
1688 tcg_temp_free(tmp2);
1689 dead_tmp(tmp);
18c9b560
AZ
1690 gen_op_iwmmxt_movq_wRn_M0(wrd);
1691 gen_op_iwmmxt_set_mup();
1692 break;
1693 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1694 rd = (insn >> 12) & 0xf;
1695 wrd = (insn >> 16) & 0xf;
da6b5335 1696 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1697 return 1;
1698 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335 1699 tmp = new_tmp();
18c9b560
AZ
1700 switch ((insn >> 22) & 3) {
1701 case 0:
da6b5335
FN
1702 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1703 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1704 if (insn & 8) {
1705 tcg_gen_ext8s_i32(tmp, tmp);
1706 } else {
1707 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1708 }
1709 break;
1710 case 1:
da6b5335
FN
1711 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1712 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1713 if (insn & 8) {
1714 tcg_gen_ext16s_i32(tmp, tmp);
1715 } else {
1716 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1717 }
1718 break;
1719 case 2:
da6b5335
FN
1720 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1721 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1722 break;
18c9b560 1723 }
da6b5335 1724 store_reg(s, rd, tmp);
18c9b560
AZ
1725 break;
1726 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1727 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1728 return 1;
da6b5335 1729 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1730 switch ((insn >> 22) & 3) {
1731 case 0:
da6b5335 1732 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1733 break;
1734 case 1:
da6b5335 1735 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1736 break;
1737 case 2:
da6b5335 1738 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1739 break;
18c9b560 1740 }
da6b5335
FN
1741 tcg_gen_shli_i32(tmp, tmp, 28);
1742 gen_set_nzcv(tmp);
1743 dead_tmp(tmp);
18c9b560
AZ
1744 break;
1745 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1746 if (((insn >> 6) & 3) == 3)
1747 return 1;
18c9b560
AZ
1748 rd = (insn >> 12) & 0xf;
1749 wrd = (insn >> 16) & 0xf;
da6b5335 1750 tmp = load_reg(s, rd);
18c9b560
AZ
1751 switch ((insn >> 6) & 3) {
1752 case 0:
da6b5335 1753 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1754 break;
1755 case 1:
da6b5335 1756 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1757 break;
1758 case 2:
da6b5335 1759 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1760 break;
18c9b560 1761 }
da6b5335 1762 dead_tmp(tmp);
18c9b560
AZ
1763 gen_op_iwmmxt_movq_wRn_M0(wrd);
1764 gen_op_iwmmxt_set_mup();
1765 break;
1766 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1767 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1768 return 1;
da6b5335
FN
1769 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1770 tmp2 = new_tmp();
1771 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1772 switch ((insn >> 22) & 3) {
1773 case 0:
1774 for (i = 0; i < 7; i ++) {
da6b5335
FN
1775 tcg_gen_shli_i32(tmp2, tmp2, 4);
1776 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1777 }
1778 break;
1779 case 1:
1780 for (i = 0; i < 3; i ++) {
da6b5335
FN
1781 tcg_gen_shli_i32(tmp2, tmp2, 8);
1782 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1783 }
1784 break;
1785 case 2:
da6b5335
FN
1786 tcg_gen_shli_i32(tmp2, tmp2, 16);
1787 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1788 break;
18c9b560 1789 }
da6b5335
FN
1790 gen_set_nzcv(tmp);
1791 dead_tmp(tmp2);
1792 dead_tmp(tmp);
18c9b560
AZ
1793 break;
1794 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1795 wrd = (insn >> 12) & 0xf;
1796 rd0 = (insn >> 16) & 0xf;
1797 gen_op_iwmmxt_movq_M0_wRn(rd0);
1798 switch ((insn >> 22) & 3) {
1799 case 0:
e677137d 1800 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1801 break;
1802 case 1:
e677137d 1803 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1804 break;
1805 case 2:
e677137d 1806 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1807 break;
1808 case 3:
1809 return 1;
1810 }
1811 gen_op_iwmmxt_movq_wRn_M0(wrd);
1812 gen_op_iwmmxt_set_mup();
1813 break;
1814 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1815 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1816 return 1;
da6b5335
FN
1817 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1818 tmp2 = new_tmp();
1819 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1820 switch ((insn >> 22) & 3) {
1821 case 0:
1822 for (i = 0; i < 7; i ++) {
da6b5335
FN
1823 tcg_gen_shli_i32(tmp2, tmp2, 4);
1824 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1825 }
1826 break;
1827 case 1:
1828 for (i = 0; i < 3; i ++) {
da6b5335
FN
1829 tcg_gen_shli_i32(tmp2, tmp2, 8);
1830 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1831 }
1832 break;
1833 case 2:
da6b5335
FN
1834 tcg_gen_shli_i32(tmp2, tmp2, 16);
1835 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1836 break;
18c9b560 1837 }
da6b5335
FN
1838 gen_set_nzcv(tmp);
1839 dead_tmp(tmp2);
1840 dead_tmp(tmp);
18c9b560
AZ
1841 break;
1842 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1843 rd = (insn >> 12) & 0xf;
1844 rd0 = (insn >> 16) & 0xf;
da6b5335 1845 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1846 return 1;
1847 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 1848 tmp = new_tmp();
18c9b560
AZ
1849 switch ((insn >> 22) & 3) {
1850 case 0:
da6b5335 1851 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1852 break;
1853 case 1:
da6b5335 1854 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1855 break;
1856 case 2:
da6b5335 1857 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1858 break;
18c9b560 1859 }
da6b5335 1860 store_reg(s, rd, tmp);
18c9b560
AZ
1861 break;
1862 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1863 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1864 wrd = (insn >> 12) & 0xf;
1865 rd0 = (insn >> 16) & 0xf;
1866 rd1 = (insn >> 0) & 0xf;
1867 gen_op_iwmmxt_movq_M0_wRn(rd0);
1868 switch ((insn >> 22) & 3) {
1869 case 0:
1870 if (insn & (1 << 21))
1871 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1872 else
1873 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1874 break;
1875 case 1:
1876 if (insn & (1 << 21))
1877 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1878 else
1879 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1880 break;
1881 case 2:
1882 if (insn & (1 << 21))
1883 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1884 else
1885 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1886 break;
1887 case 3:
1888 return 1;
1889 }
1890 gen_op_iwmmxt_movq_wRn_M0(wrd);
1891 gen_op_iwmmxt_set_mup();
1892 gen_op_iwmmxt_set_cup();
1893 break;
1894 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1895 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1896 wrd = (insn >> 12) & 0xf;
1897 rd0 = (insn >> 16) & 0xf;
1898 gen_op_iwmmxt_movq_M0_wRn(rd0);
1899 switch ((insn >> 22) & 3) {
1900 case 0:
1901 if (insn & (1 << 21))
1902 gen_op_iwmmxt_unpacklsb_M0();
1903 else
1904 gen_op_iwmmxt_unpacklub_M0();
1905 break;
1906 case 1:
1907 if (insn & (1 << 21))
1908 gen_op_iwmmxt_unpacklsw_M0();
1909 else
1910 gen_op_iwmmxt_unpackluw_M0();
1911 break;
1912 case 2:
1913 if (insn & (1 << 21))
1914 gen_op_iwmmxt_unpacklsl_M0();
1915 else
1916 gen_op_iwmmxt_unpacklul_M0();
1917 break;
1918 case 3:
1919 return 1;
1920 }
1921 gen_op_iwmmxt_movq_wRn_M0(wrd);
1922 gen_op_iwmmxt_set_mup();
1923 gen_op_iwmmxt_set_cup();
1924 break;
1925 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1926 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1927 wrd = (insn >> 12) & 0xf;
1928 rd0 = (insn >> 16) & 0xf;
1929 gen_op_iwmmxt_movq_M0_wRn(rd0);
1930 switch ((insn >> 22) & 3) {
1931 case 0:
1932 if (insn & (1 << 21))
1933 gen_op_iwmmxt_unpackhsb_M0();
1934 else
1935 gen_op_iwmmxt_unpackhub_M0();
1936 break;
1937 case 1:
1938 if (insn & (1 << 21))
1939 gen_op_iwmmxt_unpackhsw_M0();
1940 else
1941 gen_op_iwmmxt_unpackhuw_M0();
1942 break;
1943 case 2:
1944 if (insn & (1 << 21))
1945 gen_op_iwmmxt_unpackhsl_M0();
1946 else
1947 gen_op_iwmmxt_unpackhul_M0();
1948 break;
1949 case 3:
1950 return 1;
1951 }
1952 gen_op_iwmmxt_movq_wRn_M0(wrd);
1953 gen_op_iwmmxt_set_mup();
1954 gen_op_iwmmxt_set_cup();
1955 break;
1956 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1957 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
1958 if (((insn >> 22) & 3) == 0)
1959 return 1;
18c9b560
AZ
1960 wrd = (insn >> 12) & 0xf;
1961 rd0 = (insn >> 16) & 0xf;
1962 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1963 tmp = new_tmp();
1964 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
1965 dead_tmp(tmp);
18c9b560 1966 return 1;
da6b5335 1967 }
18c9b560 1968 switch ((insn >> 22) & 3) {
18c9b560 1969 case 1:
da6b5335 1970 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1971 break;
1972 case 2:
da6b5335 1973 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1974 break;
1975 case 3:
da6b5335 1976 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1977 break;
1978 }
da6b5335 1979 dead_tmp(tmp);
18c9b560
AZ
1980 gen_op_iwmmxt_movq_wRn_M0(wrd);
1981 gen_op_iwmmxt_set_mup();
1982 gen_op_iwmmxt_set_cup();
1983 break;
1984 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1985 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
1986 if (((insn >> 22) & 3) == 0)
1987 return 1;
18c9b560
AZ
1988 wrd = (insn >> 12) & 0xf;
1989 rd0 = (insn >> 16) & 0xf;
1990 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1991 tmp = new_tmp();
1992 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
1993 dead_tmp(tmp);
18c9b560 1994 return 1;
da6b5335 1995 }
18c9b560 1996 switch ((insn >> 22) & 3) {
18c9b560 1997 case 1:
da6b5335 1998 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1999 break;
2000 case 2:
da6b5335 2001 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2002 break;
2003 case 3:
da6b5335 2004 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2005 break;
2006 }
da6b5335 2007 dead_tmp(tmp);
18c9b560
AZ
2008 gen_op_iwmmxt_movq_wRn_M0(wrd);
2009 gen_op_iwmmxt_set_mup();
2010 gen_op_iwmmxt_set_cup();
2011 break;
2012 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2013 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2014 if (((insn >> 22) & 3) == 0)
2015 return 1;
18c9b560
AZ
2016 wrd = (insn >> 12) & 0xf;
2017 rd0 = (insn >> 16) & 0xf;
2018 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2019 tmp = new_tmp();
2020 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2021 dead_tmp(tmp);
18c9b560 2022 return 1;
da6b5335 2023 }
18c9b560 2024 switch ((insn >> 22) & 3) {
18c9b560 2025 case 1:
da6b5335 2026 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2027 break;
2028 case 2:
da6b5335 2029 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2030 break;
2031 case 3:
da6b5335 2032 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2033 break;
2034 }
da6b5335 2035 dead_tmp(tmp);
18c9b560
AZ
2036 gen_op_iwmmxt_movq_wRn_M0(wrd);
2037 gen_op_iwmmxt_set_mup();
2038 gen_op_iwmmxt_set_cup();
2039 break;
2040 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2041 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2042 if (((insn >> 22) & 3) == 0)
2043 return 1;
18c9b560
AZ
2044 wrd = (insn >> 12) & 0xf;
2045 rd0 = (insn >> 16) & 0xf;
2046 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2047 tmp = new_tmp();
18c9b560 2048 switch ((insn >> 22) & 3) {
18c9b560 2049 case 1:
da6b5335
FN
2050 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2051 dead_tmp(tmp);
18c9b560 2052 return 1;
da6b5335
FN
2053 }
2054 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2055 break;
2056 case 2:
da6b5335
FN
2057 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2058 dead_tmp(tmp);
18c9b560 2059 return 1;
da6b5335
FN
2060 }
2061 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2062 break;
2063 case 3:
da6b5335
FN
2064 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2065 dead_tmp(tmp);
18c9b560 2066 return 1;
da6b5335
FN
2067 }
2068 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2069 break;
2070 }
da6b5335 2071 dead_tmp(tmp);
18c9b560
AZ
2072 gen_op_iwmmxt_movq_wRn_M0(wrd);
2073 gen_op_iwmmxt_set_mup();
2074 gen_op_iwmmxt_set_cup();
2075 break;
2076 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2077 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2078 wrd = (insn >> 12) & 0xf;
2079 rd0 = (insn >> 16) & 0xf;
2080 rd1 = (insn >> 0) & 0xf;
2081 gen_op_iwmmxt_movq_M0_wRn(rd0);
2082 switch ((insn >> 22) & 3) {
2083 case 0:
2084 if (insn & (1 << 21))
2085 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2086 else
2087 gen_op_iwmmxt_minub_M0_wRn(rd1);
2088 break;
2089 case 1:
2090 if (insn & (1 << 21))
2091 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2092 else
2093 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2094 break;
2095 case 2:
2096 if (insn & (1 << 21))
2097 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2098 else
2099 gen_op_iwmmxt_minul_M0_wRn(rd1);
2100 break;
2101 case 3:
2102 return 1;
2103 }
2104 gen_op_iwmmxt_movq_wRn_M0(wrd);
2105 gen_op_iwmmxt_set_mup();
2106 break;
2107 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2108 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2109 wrd = (insn >> 12) & 0xf;
2110 rd0 = (insn >> 16) & 0xf;
2111 rd1 = (insn >> 0) & 0xf;
2112 gen_op_iwmmxt_movq_M0_wRn(rd0);
2113 switch ((insn >> 22) & 3) {
2114 case 0:
2115 if (insn & (1 << 21))
2116 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2117 else
2118 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2119 break;
2120 case 1:
2121 if (insn & (1 << 21))
2122 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2123 else
2124 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2125 break;
2126 case 2:
2127 if (insn & (1 << 21))
2128 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2129 else
2130 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2131 break;
2132 case 3:
2133 return 1;
2134 }
2135 gen_op_iwmmxt_movq_wRn_M0(wrd);
2136 gen_op_iwmmxt_set_mup();
2137 break;
2138 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2139 case 0x402: case 0x502: case 0x602: case 0x702:
2140 wrd = (insn >> 12) & 0xf;
2141 rd0 = (insn >> 16) & 0xf;
2142 rd1 = (insn >> 0) & 0xf;
2143 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2144 tmp = tcg_const_i32((insn >> 20) & 3);
2145 iwmmxt_load_reg(cpu_V1, rd1);
2146 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2147 tcg_temp_free(tmp);
18c9b560
AZ
2148 gen_op_iwmmxt_movq_wRn_M0(wrd);
2149 gen_op_iwmmxt_set_mup();
2150 break;
2151 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2152 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2153 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2154 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2155 wrd = (insn >> 12) & 0xf;
2156 rd0 = (insn >> 16) & 0xf;
2157 rd1 = (insn >> 0) & 0xf;
2158 gen_op_iwmmxt_movq_M0_wRn(rd0);
2159 switch ((insn >> 20) & 0xf) {
2160 case 0x0:
2161 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2162 break;
2163 case 0x1:
2164 gen_op_iwmmxt_subub_M0_wRn(rd1);
2165 break;
2166 case 0x3:
2167 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2168 break;
2169 case 0x4:
2170 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2171 break;
2172 case 0x5:
2173 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2174 break;
2175 case 0x7:
2176 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2177 break;
2178 case 0x8:
2179 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2180 break;
2181 case 0x9:
2182 gen_op_iwmmxt_subul_M0_wRn(rd1);
2183 break;
2184 case 0xb:
2185 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2186 break;
2187 default:
2188 return 1;
2189 }
2190 gen_op_iwmmxt_movq_wRn_M0(wrd);
2191 gen_op_iwmmxt_set_mup();
2192 gen_op_iwmmxt_set_cup();
2193 break;
2194 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2195 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2196 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2197 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2198 wrd = (insn >> 12) & 0xf;
2199 rd0 = (insn >> 16) & 0xf;
2200 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2201 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2202 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2203 tcg_temp_free(tmp);
18c9b560
AZ
2204 gen_op_iwmmxt_movq_wRn_M0(wrd);
2205 gen_op_iwmmxt_set_mup();
2206 gen_op_iwmmxt_set_cup();
2207 break;
2208 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2209 case 0x418: case 0x518: case 0x618: case 0x718:
2210 case 0x818: case 0x918: case 0xa18: case 0xb18:
2211 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2212 wrd = (insn >> 12) & 0xf;
2213 rd0 = (insn >> 16) & 0xf;
2214 rd1 = (insn >> 0) & 0xf;
2215 gen_op_iwmmxt_movq_M0_wRn(rd0);
2216 switch ((insn >> 20) & 0xf) {
2217 case 0x0:
2218 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2219 break;
2220 case 0x1:
2221 gen_op_iwmmxt_addub_M0_wRn(rd1);
2222 break;
2223 case 0x3:
2224 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2225 break;
2226 case 0x4:
2227 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2228 break;
2229 case 0x5:
2230 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2231 break;
2232 case 0x7:
2233 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2234 break;
2235 case 0x8:
2236 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2237 break;
2238 case 0x9:
2239 gen_op_iwmmxt_addul_M0_wRn(rd1);
2240 break;
2241 case 0xb:
2242 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2243 break;
2244 default:
2245 return 1;
2246 }
2247 gen_op_iwmmxt_movq_wRn_M0(wrd);
2248 gen_op_iwmmxt_set_mup();
2249 gen_op_iwmmxt_set_cup();
2250 break;
2251 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2252 case 0x408: case 0x508: case 0x608: case 0x708:
2253 case 0x808: case 0x908: case 0xa08: case 0xb08:
2254 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2255 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2256 return 1;
18c9b560
AZ
2257 wrd = (insn >> 12) & 0xf;
2258 rd0 = (insn >> 16) & 0xf;
2259 rd1 = (insn >> 0) & 0xf;
2260 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2261 switch ((insn >> 22) & 3) {
18c9b560
AZ
2262 case 1:
2263 if (insn & (1 << 21))
2264 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2265 else
2266 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2267 break;
2268 case 2:
2269 if (insn & (1 << 21))
2270 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2271 else
2272 gen_op_iwmmxt_packul_M0_wRn(rd1);
2273 break;
2274 case 3:
2275 if (insn & (1 << 21))
2276 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2277 else
2278 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2279 break;
2280 }
2281 gen_op_iwmmxt_movq_wRn_M0(wrd);
2282 gen_op_iwmmxt_set_mup();
2283 gen_op_iwmmxt_set_cup();
2284 break;
2285 case 0x201: case 0x203: case 0x205: case 0x207:
2286 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2287 case 0x211: case 0x213: case 0x215: case 0x217:
2288 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2289 wrd = (insn >> 5) & 0xf;
2290 rd0 = (insn >> 12) & 0xf;
2291 rd1 = (insn >> 0) & 0xf;
2292 if (rd0 == 0xf || rd1 == 0xf)
2293 return 1;
2294 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2295 tmp = load_reg(s, rd0);
2296 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2297 switch ((insn >> 16) & 0xf) {
2298 case 0x0: /* TMIA */
da6b5335 2299 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2300 break;
2301 case 0x8: /* TMIAPH */
da6b5335 2302 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2303 break;
2304 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2305 if (insn & (1 << 16))
da6b5335 2306 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2307 if (insn & (1 << 17))
da6b5335
FN
2308 tcg_gen_shri_i32(tmp2, tmp2, 16);
2309 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2310 break;
2311 default:
da6b5335
FN
2312 dead_tmp(tmp2);
2313 dead_tmp(tmp);
18c9b560
AZ
2314 return 1;
2315 }
da6b5335
FN
2316 dead_tmp(tmp2);
2317 dead_tmp(tmp);
18c9b560
AZ
2318 gen_op_iwmmxt_movq_wRn_M0(wrd);
2319 gen_op_iwmmxt_set_mup();
2320 break;
2321 default:
2322 return 1;
2323 }
2324
2325 return 0;
2326}
2327
2328/* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2329 (ie. an undefined instruction). */
2330static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2331{
2332 int acc, rd0, rd1, rdhi, rdlo;
3a554c0f 2333 TCGv tmp, tmp2;
18c9b560
AZ
2334
2335 if ((insn & 0x0ff00f10) == 0x0e200010) {
2336 /* Multiply with Internal Accumulate Format */
2337 rd0 = (insn >> 12) & 0xf;
2338 rd1 = insn & 0xf;
2339 acc = (insn >> 5) & 7;
2340
2341 if (acc != 0)
2342 return 1;
2343
3a554c0f
FN
2344 tmp = load_reg(s, rd0);
2345 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2346 switch ((insn >> 16) & 0xf) {
2347 case 0x0: /* MIA */
3a554c0f 2348 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2349 break;
2350 case 0x8: /* MIAPH */
3a554c0f 2351 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2352 break;
2353 case 0xc: /* MIABB */
2354 case 0xd: /* MIABT */
2355 case 0xe: /* MIATB */
2356 case 0xf: /* MIATT */
18c9b560 2357 if (insn & (1 << 16))
3a554c0f 2358 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2359 if (insn & (1 << 17))
3a554c0f
FN
2360 tcg_gen_shri_i32(tmp2, tmp2, 16);
2361 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2362 break;
2363 default:
2364 return 1;
2365 }
3a554c0f
FN
2366 dead_tmp(tmp2);
2367 dead_tmp(tmp);
18c9b560
AZ
2368
2369 gen_op_iwmmxt_movq_wRn_M0(acc);
2370 return 0;
2371 }
2372
2373 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2374 /* Internal Accumulator Access Format */
2375 rdhi = (insn >> 16) & 0xf;
2376 rdlo = (insn >> 12) & 0xf;
2377 acc = insn & 7;
2378
2379 if (acc != 0)
2380 return 1;
2381
2382 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2383 iwmmxt_load_reg(cpu_V0, acc);
2384 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2385 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2386 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2387 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2388 } else { /* MAR */
3a554c0f
FN
2389 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2390 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2391 }
2392 return 0;
2393 }
2394
2395 return 1;
2396}
2397
c1713132
AZ
2398/* Disassemble system coprocessor instruction. Return nonzero if
2399 instruction is not defined. */
2400static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2401{
8984bd2e 2402 TCGv tmp;
c1713132
AZ
2403 uint32_t rd = (insn >> 12) & 0xf;
2404 uint32_t cp = (insn >> 8) & 0xf;
2405 if (IS_USER(s)) {
2406 return 1;
2407 }
2408
18c9b560 2409 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2410 if (!env->cp[cp].cp_read)
2411 return 1;
8984bd2e
PB
2412 gen_set_pc_im(s->pc);
2413 tmp = new_tmp();
2414 gen_helper_get_cp(tmp, cpu_env, tcg_const_i32(insn));
2415 store_reg(s, rd, tmp);
c1713132
AZ
2416 } else {
2417 if (!env->cp[cp].cp_write)
2418 return 1;
8984bd2e
PB
2419 gen_set_pc_im(s->pc);
2420 tmp = load_reg(s, rd);
2421 gen_helper_set_cp(cpu_env, tcg_const_i32(insn), tmp);
a60de947 2422 dead_tmp(tmp);
c1713132
AZ
2423 }
2424 return 0;
2425}
2426
9ee6e8bb
PB
2427static int cp15_user_ok(uint32_t insn)
2428{
2429 int cpn = (insn >> 16) & 0xf;
2430 int cpm = insn & 0xf;
2431 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2432
2433 if (cpn == 13 && cpm == 0) {
2434 /* TLS register. */
2435 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2436 return 1;
2437 }
2438 if (cpn == 7) {
2439 /* ISB, DSB, DMB. */
2440 if ((cpm == 5 && op == 4)
2441 || (cpm == 10 && (op == 4 || op == 5)))
2442 return 1;
2443 }
2444 return 0;
2445}
2446
b5ff1b31
FB
2447/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2448 instruction is not defined. */
a90b7318 2449static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2450{
2451 uint32_t rd;
8984bd2e 2452 TCGv tmp;
b5ff1b31 2453
9ee6e8bb
PB
2454 /* M profile cores use memory mapped registers instead of cp15. */
2455 if (arm_feature(env, ARM_FEATURE_M))
2456 return 1;
2457
2458 if ((insn & (1 << 25)) == 0) {
2459 if (insn & (1 << 20)) {
2460 /* mrrc */
2461 return 1;
2462 }
2463 /* mcrr. Used for block cache operations, so implement as no-op. */
2464 return 0;
2465 }
2466 if ((insn & (1 << 4)) == 0) {
2467 /* cdp */
2468 return 1;
2469 }
2470 if (IS_USER(s) && !cp15_user_ok(insn)) {
b5ff1b31
FB
2471 return 1;
2472 }
9332f9da
FB
2473 if ((insn & 0x0fff0fff) == 0x0e070f90
2474 || (insn & 0x0fff0fff) == 0x0e070f58) {
2475 /* Wait for interrupt. */
8984bd2e 2476 gen_set_pc_im(s->pc);
9ee6e8bb 2477 s->is_jmp = DISAS_WFI;
9332f9da
FB
2478 return 0;
2479 }
b5ff1b31 2480 rd = (insn >> 12) & 0xf;
18c9b560 2481 if (insn & ARM_CP_RW_BIT) {
8984bd2e
PB
2482 tmp = new_tmp();
2483 gen_helper_get_cp15(tmp, cpu_env, tcg_const_i32(insn));
b5ff1b31
FB
2484 /* If the destination register is r15 then sets condition codes. */
2485 if (rd != 15)
8984bd2e
PB
2486 store_reg(s, rd, tmp);
2487 else
2488 dead_tmp(tmp);
b5ff1b31 2489 } else {
8984bd2e
PB
2490 tmp = load_reg(s, rd);
2491 gen_helper_set_cp15(cpu_env, tcg_const_i32(insn), tmp);
2492 dead_tmp(tmp);
a90b7318
AZ
2493 /* Normally we would always end the TB here, but Linux
2494 * arch/arm/mach-pxa/sleep.S expects two instructions following
2495 * an MMU enable to execute from cache. Imitate this behaviour. */
2496 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2497 (insn & 0x0fff0fff) != 0x0e010f10)
2498 gen_lookup_tb(s);
b5ff1b31 2499 }
b5ff1b31
FB
2500 return 0;
2501}
2502
9ee6e8bb
PB
2503#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2504#define VFP_SREG(insn, bigbit, smallbit) \
2505 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2506#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2507 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2508 reg = (((insn) >> (bigbit)) & 0x0f) \
2509 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2510 } else { \
2511 if (insn & (1 << (smallbit))) \
2512 return 1; \
2513 reg = ((insn) >> (bigbit)) & 0x0f; \
2514 }} while (0)
2515
2516#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2517#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2518#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2519#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2520#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2521#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2522
4373f3ce
PB
2523/* Move between integer and VFP cores. */
2524static TCGv gen_vfp_mrs(void)
2525{
2526 TCGv tmp = new_tmp();
2527 tcg_gen_mov_i32(tmp, cpu_F0s);
2528 return tmp;
2529}
2530
2531static void gen_vfp_msr(TCGv tmp)
2532{
2533 tcg_gen_mov_i32(cpu_F0s, tmp);
2534 dead_tmp(tmp);
2535}
2536
9ee6e8bb
PB
2537static inline int
2538vfp_enabled(CPUState * env)
2539{
2540 return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
2541}
2542
ad69471c
PB
2543static void gen_neon_dup_u8(TCGv var, int shift)
2544{
2545 TCGv tmp = new_tmp();
2546 if (shift)
2547 tcg_gen_shri_i32(var, var, shift);
86831435 2548 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2549 tcg_gen_shli_i32(tmp, var, 8);
2550 tcg_gen_or_i32(var, var, tmp);
2551 tcg_gen_shli_i32(tmp, var, 16);
2552 tcg_gen_or_i32(var, var, tmp);
2553 dead_tmp(tmp);
2554}
2555
2556static void gen_neon_dup_low16(TCGv var)
2557{
2558 TCGv tmp = new_tmp();
86831435 2559 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2560 tcg_gen_shli_i32(tmp, var, 16);
2561 tcg_gen_or_i32(var, var, tmp);
2562 dead_tmp(tmp);
2563}
2564
2565static void gen_neon_dup_high16(TCGv var)
2566{
2567 TCGv tmp = new_tmp();
2568 tcg_gen_andi_i32(var, var, 0xffff0000);
2569 tcg_gen_shri_i32(tmp, var, 16);
2570 tcg_gen_or_i32(var, var, tmp);
2571 dead_tmp(tmp);
2572}
2573
b7bcbe95
FB
2574/* Disassemble a VFP instruction. Returns nonzero if an error occured
2575 (ie. an undefined instruction). */
2576static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2577{
2578 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2579 int dp, veclen;
312eea9f 2580 TCGv addr;
4373f3ce 2581 TCGv tmp;
ad69471c 2582 TCGv tmp2;
b7bcbe95 2583
40f137e1
PB
2584 if (!arm_feature(env, ARM_FEATURE_VFP))
2585 return 1;
2586
9ee6e8bb
PB
2587 if (!vfp_enabled(env)) {
2588 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2589 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2590 return 1;
2591 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2592 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2593 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2594 return 1;
2595 }
b7bcbe95
FB
2596 dp = ((insn & 0xf00) == 0xb00);
2597 switch ((insn >> 24) & 0xf) {
2598 case 0xe:
2599 if (insn & (1 << 4)) {
2600 /* single register transfer */
b7bcbe95
FB
2601 rd = (insn >> 12) & 0xf;
2602 if (dp) {
9ee6e8bb
PB
2603 int size;
2604 int pass;
2605
2606 VFP_DREG_N(rn, insn);
2607 if (insn & 0xf)
b7bcbe95 2608 return 1;
9ee6e8bb
PB
2609 if (insn & 0x00c00060
2610 && !arm_feature(env, ARM_FEATURE_NEON))
2611 return 1;
2612
2613 pass = (insn >> 21) & 1;
2614 if (insn & (1 << 22)) {
2615 size = 0;
2616 offset = ((insn >> 5) & 3) * 8;
2617 } else if (insn & (1 << 5)) {
2618 size = 1;
2619 offset = (insn & (1 << 6)) ? 16 : 0;
2620 } else {
2621 size = 2;
2622 offset = 0;
2623 }
18c9b560 2624 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2625 /* vfp->arm */
ad69471c 2626 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2627 switch (size) {
2628 case 0:
9ee6e8bb 2629 if (offset)
ad69471c 2630 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2631 if (insn & (1 << 23))
ad69471c 2632 gen_uxtb(tmp);
9ee6e8bb 2633 else
ad69471c 2634 gen_sxtb(tmp);
9ee6e8bb
PB
2635 break;
2636 case 1:
9ee6e8bb
PB
2637 if (insn & (1 << 23)) {
2638 if (offset) {
ad69471c 2639 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2640 } else {
ad69471c 2641 gen_uxth(tmp);
9ee6e8bb
PB
2642 }
2643 } else {
2644 if (offset) {
ad69471c 2645 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2646 } else {
ad69471c 2647 gen_sxth(tmp);
9ee6e8bb
PB
2648 }
2649 }
2650 break;
2651 case 2:
9ee6e8bb
PB
2652 break;
2653 }
ad69471c 2654 store_reg(s, rd, tmp);
b7bcbe95
FB
2655 } else {
2656 /* arm->vfp */
ad69471c 2657 tmp = load_reg(s, rd);
9ee6e8bb
PB
2658 if (insn & (1 << 23)) {
2659 /* VDUP */
2660 if (size == 0) {
ad69471c 2661 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2662 } else if (size == 1) {
ad69471c 2663 gen_neon_dup_low16(tmp);
9ee6e8bb 2664 }
cbbccffc
PB
2665 for (n = 0; n <= pass * 2; n++) {
2666 tmp2 = new_tmp();
2667 tcg_gen_mov_i32(tmp2, tmp);
2668 neon_store_reg(rn, n, tmp2);
2669 }
2670 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2671 } else {
2672 /* VMOV */
2673 switch (size) {
2674 case 0:
ad69471c
PB
2675 tmp2 = neon_load_reg(rn, pass);
2676 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2677 dead_tmp(tmp2);
9ee6e8bb
PB
2678 break;
2679 case 1:
ad69471c
PB
2680 tmp2 = neon_load_reg(rn, pass);
2681 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2682 dead_tmp(tmp2);
9ee6e8bb
PB
2683 break;
2684 case 2:
9ee6e8bb
PB
2685 break;
2686 }
ad69471c 2687 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2688 }
b7bcbe95 2689 }
9ee6e8bb
PB
2690 } else { /* !dp */
2691 if ((insn & 0x6f) != 0x00)
2692 return 1;
2693 rn = VFP_SREG_N(insn);
18c9b560 2694 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2695 /* vfp->arm */
2696 if (insn & (1 << 21)) {
2697 /* system register */
40f137e1 2698 rn >>= 1;
9ee6e8bb 2699
b7bcbe95 2700 switch (rn) {
40f137e1 2701 case ARM_VFP_FPSID:
4373f3ce 2702 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2703 VFP3 restricts all id registers to privileged
2704 accesses. */
2705 if (IS_USER(s)
2706 && arm_feature(env, ARM_FEATURE_VFP3))
2707 return 1;
4373f3ce 2708 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2709 break;
40f137e1 2710 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2711 if (IS_USER(s))
2712 return 1;
4373f3ce 2713 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2714 break;
40f137e1
PB
2715 case ARM_VFP_FPINST:
2716 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2717 /* Not present in VFP3. */
2718 if (IS_USER(s)
2719 || arm_feature(env, ARM_FEATURE_VFP3))
2720 return 1;
4373f3ce 2721 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2722 break;
40f137e1 2723 case ARM_VFP_FPSCR:
601d70b9 2724 if (rd == 15) {
4373f3ce
PB
2725 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2726 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2727 } else {
2728 tmp = new_tmp();
2729 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2730 }
b7bcbe95 2731 break;
9ee6e8bb
PB
2732 case ARM_VFP_MVFR0:
2733 case ARM_VFP_MVFR1:
2734 if (IS_USER(s)
2735 || !arm_feature(env, ARM_FEATURE_VFP3))
2736 return 1;
4373f3ce 2737 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2738 break;
b7bcbe95
FB
2739 default:
2740 return 1;
2741 }
2742 } else {
2743 gen_mov_F0_vreg(0, rn);
4373f3ce 2744 tmp = gen_vfp_mrs();
b7bcbe95
FB
2745 }
2746 if (rd == 15) {
b5ff1b31 2747 /* Set the 4 flag bits in the CPSR. */
4373f3ce
PB
2748 gen_set_nzcv(tmp);
2749 dead_tmp(tmp);
2750 } else {
2751 store_reg(s, rd, tmp);
2752 }
b7bcbe95
FB
2753 } else {
2754 /* arm->vfp */
4373f3ce 2755 tmp = load_reg(s, rd);
b7bcbe95 2756 if (insn & (1 << 21)) {
40f137e1 2757 rn >>= 1;
b7bcbe95
FB
2758 /* system register */
2759 switch (rn) {
40f137e1 2760 case ARM_VFP_FPSID:
9ee6e8bb
PB
2761 case ARM_VFP_MVFR0:
2762 case ARM_VFP_MVFR1:
b7bcbe95
FB
2763 /* Writes are ignored. */
2764 break;
40f137e1 2765 case ARM_VFP_FPSCR:
4373f3ce
PB
2766 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2767 dead_tmp(tmp);
b5ff1b31 2768 gen_lookup_tb(s);
b7bcbe95 2769 break;
40f137e1 2770 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2771 if (IS_USER(s))
2772 return 1;
4373f3ce 2773 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2774 gen_lookup_tb(s);
2775 break;
2776 case ARM_VFP_FPINST:
2777 case ARM_VFP_FPINST2:
4373f3ce 2778 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2779 break;
b7bcbe95
FB
2780 default:
2781 return 1;
2782 }
2783 } else {
4373f3ce 2784 gen_vfp_msr(tmp);
b7bcbe95
FB
2785 gen_mov_vreg_F0(0, rn);
2786 }
2787 }
2788 }
2789 } else {
2790 /* data processing */
2791 /* The opcode is in bits 23, 21, 20 and 6. */
2792 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2793 if (dp) {
2794 if (op == 15) {
2795 /* rn is opcode */
2796 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2797 } else {
2798 /* rn is register number */
9ee6e8bb 2799 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2800 }
2801
2802 if (op == 15 && (rn == 15 || rn > 17)) {
2803 /* Integer or single precision destination. */
9ee6e8bb 2804 rd = VFP_SREG_D(insn);
b7bcbe95 2805 } else {
9ee6e8bb 2806 VFP_DREG_D(rd, insn);
b7bcbe95
FB
2807 }
2808
2809 if (op == 15 && (rn == 16 || rn == 17)) {
2810 /* Integer source. */
2811 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
2812 } else {
9ee6e8bb 2813 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2814 }
2815 } else {
9ee6e8bb 2816 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2817 if (op == 15 && rn == 15) {
2818 /* Double precision destination. */
9ee6e8bb
PB
2819 VFP_DREG_D(rd, insn);
2820 } else {
2821 rd = VFP_SREG_D(insn);
2822 }
2823 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2824 }
2825
2826 veclen = env->vfp.vec_len;
2827 if (op == 15 && rn > 3)
2828 veclen = 0;
2829
2830 /* Shut up compiler warnings. */
2831 delta_m = 0;
2832 delta_d = 0;
2833 bank_mask = 0;
3b46e624 2834
b7bcbe95
FB
2835 if (veclen > 0) {
2836 if (dp)
2837 bank_mask = 0xc;
2838 else
2839 bank_mask = 0x18;
2840
2841 /* Figure out what type of vector operation this is. */
2842 if ((rd & bank_mask) == 0) {
2843 /* scalar */
2844 veclen = 0;
2845 } else {
2846 if (dp)
2847 delta_d = (env->vfp.vec_stride >> 1) + 1;
2848 else
2849 delta_d = env->vfp.vec_stride + 1;
2850
2851 if ((rm & bank_mask) == 0) {
2852 /* mixed scalar/vector */
2853 delta_m = 0;
2854 } else {
2855 /* vector */
2856 delta_m = delta_d;
2857 }
2858 }
2859 }
2860
2861 /* Load the initial operands. */
2862 if (op == 15) {
2863 switch (rn) {
2864 case 16:
2865 case 17:
2866 /* Integer source */
2867 gen_mov_F0_vreg(0, rm);
2868 break;
2869 case 8:
2870 case 9:
2871 /* Compare */
2872 gen_mov_F0_vreg(dp, rd);
2873 gen_mov_F1_vreg(dp, rm);
2874 break;
2875 case 10:
2876 case 11:
2877 /* Compare with zero */
2878 gen_mov_F0_vreg(dp, rd);
2879 gen_vfp_F1_ld0(dp);
2880 break;
9ee6e8bb
PB
2881 case 20:
2882 case 21:
2883 case 22:
2884 case 23:
644ad806
PB
2885 case 28:
2886 case 29:
2887 case 30:
2888 case 31:
9ee6e8bb
PB
2889 /* Source and destination the same. */
2890 gen_mov_F0_vreg(dp, rd);
2891 break;
b7bcbe95
FB
2892 default:
2893 /* One source operand. */
2894 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 2895 break;
b7bcbe95
FB
2896 }
2897 } else {
2898 /* Two source operands. */
2899 gen_mov_F0_vreg(dp, rn);
2900 gen_mov_F1_vreg(dp, rm);
2901 }
2902
2903 for (;;) {
2904 /* Perform the calculation. */
2905 switch (op) {
2906 case 0: /* mac: fd + (fn * fm) */
2907 gen_vfp_mul(dp);
2908 gen_mov_F1_vreg(dp, rd);
2909 gen_vfp_add(dp);
2910 break;
2911 case 1: /* nmac: fd - (fn * fm) */
2912 gen_vfp_mul(dp);
2913 gen_vfp_neg(dp);
2914 gen_mov_F1_vreg(dp, rd);
2915 gen_vfp_add(dp);
2916 break;
2917 case 2: /* msc: -fd + (fn * fm) */
2918 gen_vfp_mul(dp);
2919 gen_mov_F1_vreg(dp, rd);
2920 gen_vfp_sub(dp);
2921 break;
2922 case 3: /* nmsc: -fd - (fn * fm) */
2923 gen_vfp_mul(dp);
b7bcbe95 2924 gen_vfp_neg(dp);
c9fb531a
PB
2925 gen_mov_F1_vreg(dp, rd);
2926 gen_vfp_sub(dp);
b7bcbe95
FB
2927 break;
2928 case 4: /* mul: fn * fm */
2929 gen_vfp_mul(dp);
2930 break;
2931 case 5: /* nmul: -(fn * fm) */
2932 gen_vfp_mul(dp);
2933 gen_vfp_neg(dp);
2934 break;
2935 case 6: /* add: fn + fm */
2936 gen_vfp_add(dp);
2937 break;
2938 case 7: /* sub: fn - fm */
2939 gen_vfp_sub(dp);
2940 break;
2941 case 8: /* div: fn / fm */
2942 gen_vfp_div(dp);
2943 break;
9ee6e8bb
PB
2944 case 14: /* fconst */
2945 if (!arm_feature(env, ARM_FEATURE_VFP3))
2946 return 1;
2947
2948 n = (insn << 12) & 0x80000000;
2949 i = ((insn >> 12) & 0x70) | (insn & 0xf);
2950 if (dp) {
2951 if (i & 0x40)
2952 i |= 0x3f80;
2953 else
2954 i |= 0x4000;
2955 n |= i << 16;
4373f3ce 2956 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
2957 } else {
2958 if (i & 0x40)
2959 i |= 0x780;
2960 else
2961 i |= 0x800;
2962 n |= i << 19;
5b340b51 2963 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 2964 }
9ee6e8bb 2965 break;
b7bcbe95
FB
2966 case 15: /* extension space */
2967 switch (rn) {
2968 case 0: /* cpy */
2969 /* no-op */
2970 break;
2971 case 1: /* abs */
2972 gen_vfp_abs(dp);
2973 break;
2974 case 2: /* neg */
2975 gen_vfp_neg(dp);
2976 break;
2977 case 3: /* sqrt */
2978 gen_vfp_sqrt(dp);
2979 break;
2980 case 8: /* cmp */
2981 gen_vfp_cmp(dp);
2982 break;
2983 case 9: /* cmpe */
2984 gen_vfp_cmpe(dp);
2985 break;
2986 case 10: /* cmpz */
2987 gen_vfp_cmp(dp);
2988 break;
2989 case 11: /* cmpez */
2990 gen_vfp_F1_ld0(dp);
2991 gen_vfp_cmpe(dp);
2992 break;
2993 case 15: /* single<->double conversion */
2994 if (dp)
4373f3ce 2995 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 2996 else
4373f3ce 2997 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
2998 break;
2999 case 16: /* fuito */
3000 gen_vfp_uito(dp);
3001 break;
3002 case 17: /* fsito */
3003 gen_vfp_sito(dp);
3004 break;
9ee6e8bb
PB
3005 case 20: /* fshto */
3006 if (!arm_feature(env, ARM_FEATURE_VFP3))
3007 return 1;
644ad806 3008 gen_vfp_shto(dp, 16 - rm);
9ee6e8bb
PB
3009 break;
3010 case 21: /* fslto */
3011 if (!arm_feature(env, ARM_FEATURE_VFP3))
3012 return 1;
644ad806 3013 gen_vfp_slto(dp, 32 - rm);
9ee6e8bb
PB
3014 break;
3015 case 22: /* fuhto */
3016 if (!arm_feature(env, ARM_FEATURE_VFP3))
3017 return 1;
644ad806 3018 gen_vfp_uhto(dp, 16 - rm);
9ee6e8bb
PB
3019 break;
3020 case 23: /* fulto */
3021 if (!arm_feature(env, ARM_FEATURE_VFP3))
3022 return 1;
644ad806 3023 gen_vfp_ulto(dp, 32 - rm);
9ee6e8bb 3024 break;
b7bcbe95
FB
3025 case 24: /* ftoui */
3026 gen_vfp_toui(dp);
3027 break;
3028 case 25: /* ftouiz */
3029 gen_vfp_touiz(dp);
3030 break;
3031 case 26: /* ftosi */
3032 gen_vfp_tosi(dp);
3033 break;
3034 case 27: /* ftosiz */
3035 gen_vfp_tosiz(dp);
3036 break;
9ee6e8bb
PB
3037 case 28: /* ftosh */
3038 if (!arm_feature(env, ARM_FEATURE_VFP3))
3039 return 1;
644ad806 3040 gen_vfp_tosh(dp, 16 - rm);
9ee6e8bb
PB
3041 break;
3042 case 29: /* ftosl */
3043 if (!arm_feature(env, ARM_FEATURE_VFP3))
3044 return 1;
644ad806 3045 gen_vfp_tosl(dp, 32 - rm);
9ee6e8bb
PB
3046 break;
3047 case 30: /* ftouh */
3048 if (!arm_feature(env, ARM_FEATURE_VFP3))
3049 return 1;
644ad806 3050 gen_vfp_touh(dp, 16 - rm);
9ee6e8bb
PB
3051 break;
3052 case 31: /* ftoul */
3053 if (!arm_feature(env, ARM_FEATURE_VFP3))
3054 return 1;
644ad806 3055 gen_vfp_toul(dp, 32 - rm);
9ee6e8bb 3056 break;
b7bcbe95
FB
3057 default: /* undefined */
3058 printf ("rn:%d\n", rn);
3059 return 1;
3060 }
3061 break;
3062 default: /* undefined */
3063 printf ("op:%d\n", op);
3064 return 1;
3065 }
3066
3067 /* Write back the result. */
3068 if (op == 15 && (rn >= 8 && rn <= 11))
3069 ; /* Comparison, do nothing. */
3070 else if (op == 15 && rn > 17)
3071 /* Integer result. */
3072 gen_mov_vreg_F0(0, rd);
3073 else if (op == 15 && rn == 15)
3074 /* conversion */
3075 gen_mov_vreg_F0(!dp, rd);
3076 else
3077 gen_mov_vreg_F0(dp, rd);
3078
3079 /* break out of the loop if we have finished */
3080 if (veclen == 0)
3081 break;
3082
3083 if (op == 15 && delta_m == 0) {
3084 /* single source one-many */
3085 while (veclen--) {
3086 rd = ((rd + delta_d) & (bank_mask - 1))
3087 | (rd & bank_mask);
3088 gen_mov_vreg_F0(dp, rd);
3089 }
3090 break;
3091 }
3092 /* Setup the next operands. */
3093 veclen--;
3094 rd = ((rd + delta_d) & (bank_mask - 1))
3095 | (rd & bank_mask);
3096
3097 if (op == 15) {
3098 /* One source operand. */
3099 rm = ((rm + delta_m) & (bank_mask - 1))
3100 | (rm & bank_mask);
3101 gen_mov_F0_vreg(dp, rm);
3102 } else {
3103 /* Two source operands. */
3104 rn = ((rn + delta_d) & (bank_mask - 1))
3105 | (rn & bank_mask);
3106 gen_mov_F0_vreg(dp, rn);
3107 if (delta_m) {
3108 rm = ((rm + delta_m) & (bank_mask - 1))
3109 | (rm & bank_mask);
3110 gen_mov_F1_vreg(dp, rm);
3111 }
3112 }
3113 }
3114 }
3115 break;
3116 case 0xc:
3117 case 0xd:
9ee6e8bb 3118 if (dp && (insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3119 /* two-register transfer */
3120 rn = (insn >> 16) & 0xf;
3121 rd = (insn >> 12) & 0xf;
3122 if (dp) {
9ee6e8bb
PB
3123 VFP_DREG_M(rm, insn);
3124 } else {
3125 rm = VFP_SREG_M(insn);
3126 }
b7bcbe95 3127
18c9b560 3128 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3129 /* vfp->arm */
3130 if (dp) {
4373f3ce
PB
3131 gen_mov_F0_vreg(0, rm * 2);
3132 tmp = gen_vfp_mrs();
3133 store_reg(s, rd, tmp);
3134 gen_mov_F0_vreg(0, rm * 2 + 1);
3135 tmp = gen_vfp_mrs();
3136 store_reg(s, rn, tmp);
b7bcbe95
FB
3137 } else {
3138 gen_mov_F0_vreg(0, rm);
4373f3ce
PB
3139 tmp = gen_vfp_mrs();
3140 store_reg(s, rn, tmp);
b7bcbe95 3141 gen_mov_F0_vreg(0, rm + 1);
4373f3ce
PB
3142 tmp = gen_vfp_mrs();
3143 store_reg(s, rd, tmp);
b7bcbe95
FB
3144 }
3145 } else {
3146 /* arm->vfp */
3147 if (dp) {
4373f3ce
PB
3148 tmp = load_reg(s, rd);
3149 gen_vfp_msr(tmp);
3150 gen_mov_vreg_F0(0, rm * 2);
3151 tmp = load_reg(s, rn);
3152 gen_vfp_msr(tmp);
3153 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3154 } else {
4373f3ce
PB
3155 tmp = load_reg(s, rn);
3156 gen_vfp_msr(tmp);
b7bcbe95 3157 gen_mov_vreg_F0(0, rm);
4373f3ce
PB
3158 tmp = load_reg(s, rd);
3159 gen_vfp_msr(tmp);
b7bcbe95
FB
3160 gen_mov_vreg_F0(0, rm + 1);
3161 }
3162 }
3163 } else {
3164 /* Load/store */
3165 rn = (insn >> 16) & 0xf;
3166 if (dp)
9ee6e8bb 3167 VFP_DREG_D(rd, insn);
b7bcbe95 3168 else
9ee6e8bb
PB
3169 rd = VFP_SREG_D(insn);
3170 if (s->thumb && rn == 15) {
312eea9f
FN
3171 addr = new_tmp();
3172 tcg_gen_movi_i32(addr, s->pc & ~2);
9ee6e8bb 3173 } else {
312eea9f 3174 addr = load_reg(s, rn);
9ee6e8bb 3175 }
b7bcbe95
FB
3176 if ((insn & 0x01200000) == 0x01000000) {
3177 /* Single load/store */
3178 offset = (insn & 0xff) << 2;
3179 if ((insn & (1 << 23)) == 0)
3180 offset = -offset;
312eea9f 3181 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3182 if (insn & (1 << 20)) {
312eea9f 3183 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3184 gen_mov_vreg_F0(dp, rd);
3185 } else {
3186 gen_mov_F0_vreg(dp, rd);
312eea9f 3187 gen_vfp_st(s, dp, addr);
b7bcbe95 3188 }
312eea9f 3189 dead_tmp(addr);
b7bcbe95
FB
3190 } else {
3191 /* load/store multiple */
3192 if (dp)
3193 n = (insn >> 1) & 0x7f;
3194 else
3195 n = insn & 0xff;
3196
3197 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3198 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3199
3200 if (dp)
3201 offset = 8;
3202 else
3203 offset = 4;
3204 for (i = 0; i < n; i++) {
18c9b560 3205 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3206 /* load */
312eea9f 3207 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3208 gen_mov_vreg_F0(dp, rd + i);
3209 } else {
3210 /* store */
3211 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3212 gen_vfp_st(s, dp, addr);
b7bcbe95 3213 }
312eea9f 3214 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95
FB
3215 }
3216 if (insn & (1 << 21)) {
3217 /* writeback */
3218 if (insn & (1 << 24))
3219 offset = -offset * n;
3220 else if (dp && (insn & 1))
3221 offset = 4;
3222 else
3223 offset = 0;
3224
3225 if (offset != 0)
312eea9f
FN
3226 tcg_gen_addi_i32(addr, addr, offset);
3227 store_reg(s, rn, addr);
3228 } else {
3229 dead_tmp(addr);
b7bcbe95
FB
3230 }
3231 }
3232 }
3233 break;
3234 default:
3235 /* Should never happen. */
3236 return 1;
3237 }
3238 return 0;
3239}
3240
6e256c93 3241static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3242{
6e256c93
FB
3243 TranslationBlock *tb;
3244
3245 tb = s->tb;
3246 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3247 tcg_gen_goto_tb(n);
8984bd2e 3248 gen_set_pc_im(dest);
57fec1fe 3249 tcg_gen_exit_tb((long)tb + n);
6e256c93 3250 } else {
8984bd2e 3251 gen_set_pc_im(dest);
57fec1fe 3252 tcg_gen_exit_tb(0);
6e256c93 3253 }
c53be334
FB
3254}
3255
8aaca4c0
FB
3256static inline void gen_jmp (DisasContext *s, uint32_t dest)
3257{
551bd27f 3258 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3259 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3260 if (s->thumb)
d9ba4830
PB
3261 dest |= 1;
3262 gen_bx_im(s, dest);
8aaca4c0 3263 } else {
6e256c93 3264 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3265 s->is_jmp = DISAS_TB_JUMP;
3266 }
3267}
3268
d9ba4830 3269static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3270{
ee097184 3271 if (x)
d9ba4830 3272 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3273 else
d9ba4830 3274 gen_sxth(t0);
ee097184 3275 if (y)
d9ba4830 3276 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3277 else
d9ba4830
PB
3278 gen_sxth(t1);
3279 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3280}
3281
3282/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 3283static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3284 uint32_t mask;
3285
3286 mask = 0;
3287 if (flags & (1 << 0))
3288 mask |= 0xff;
3289 if (flags & (1 << 1))
3290 mask |= 0xff00;
3291 if (flags & (1 << 2))
3292 mask |= 0xff0000;
3293 if (flags & (1 << 3))
3294 mask |= 0xff000000;
9ee6e8bb 3295
2ae23e75 3296 /* Mask out undefined bits. */
9ee6e8bb
PB
3297 mask &= ~CPSR_RESERVED;
3298 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3299 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3300 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3301 mask &= ~CPSR_IT;
9ee6e8bb 3302 /* Mask out execution state bits. */
2ae23e75 3303 if (!spsr)
e160c51c 3304 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3305 /* Mask out privileged bits. */
3306 if (IS_USER(s))
9ee6e8bb 3307 mask &= CPSR_USER;
b5ff1b31
FB
3308 return mask;
3309}
3310
2fbac54b
FN
3311/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3312static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
b5ff1b31 3313{
d9ba4830 3314 TCGv tmp;
b5ff1b31
FB
3315 if (spsr) {
3316 /* ??? This is also undefined in system mode. */
3317 if (IS_USER(s))
3318 return 1;
d9ba4830
PB
3319
3320 tmp = load_cpu_field(spsr);
3321 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3322 tcg_gen_andi_i32(t0, t0, mask);
3323 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3324 store_cpu_field(tmp, spsr);
b5ff1b31 3325 } else {
2fbac54b 3326 gen_set_cpsr(t0, mask);
b5ff1b31 3327 }
2fbac54b 3328 dead_tmp(t0);
b5ff1b31
FB
3329 gen_lookup_tb(s);
3330 return 0;
3331}
3332
2fbac54b
FN
3333/* Returns nonzero if access to the PSR is not permitted. */
3334static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3335{
3336 TCGv tmp;
3337 tmp = new_tmp();
3338 tcg_gen_movi_i32(tmp, val);
3339 return gen_set_psr(s, mask, spsr, tmp);
3340}
3341
e9bb4aa9
JR
3342/* Generate an old-style exception return. Marks pc as dead. */
3343static void gen_exception_return(DisasContext *s, TCGv pc)
b5ff1b31 3344{
d9ba4830 3345 TCGv tmp;
e9bb4aa9 3346 store_reg(s, 15, pc);
d9ba4830
PB
3347 tmp = load_cpu_field(spsr);
3348 gen_set_cpsr(tmp, 0xffffffff);
3349 dead_tmp(tmp);
b5ff1b31
FB
3350 s->is_jmp = DISAS_UPDATE;
3351}
3352
b0109805
PB
3353/* Generate a v6 exception return. Marks both values as dead. */
3354static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3355{
b0109805
PB
3356 gen_set_cpsr(cpsr, 0xffffffff);
3357 dead_tmp(cpsr);
3358 store_reg(s, 15, pc);
9ee6e8bb
PB
3359 s->is_jmp = DISAS_UPDATE;
3360}
3b46e624 3361
9ee6e8bb
PB
3362static inline void
3363gen_set_condexec (DisasContext *s)
3364{
3365 if (s->condexec_mask) {
8f01245e
PB
3366 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3367 TCGv tmp = new_tmp();
3368 tcg_gen_movi_i32(tmp, val);
d9ba4830 3369 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3370 }
3371}
3b46e624 3372
9ee6e8bb
PB
3373static void gen_nop_hint(DisasContext *s, int val)
3374{
3375 switch (val) {
3376 case 3: /* wfi */
8984bd2e 3377 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3378 s->is_jmp = DISAS_WFI;
3379 break;
3380 case 2: /* wfe */
3381 case 4: /* sev */
3382 /* TODO: Implement SEV and WFE. May help SMP performance. */
3383 default: /* nop */
3384 break;
3385 }
3386}
99c475ab 3387
ad69471c 3388#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3389
dd8fbd78 3390static inline int gen_neon_add(int size, TCGv t0, TCGv t1)
9ee6e8bb
PB
3391{
3392 switch (size) {
dd8fbd78
FN
3393 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3394 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3395 case 2: tcg_gen_add_i32(t0, t0, t1); break;
9ee6e8bb
PB
3396 default: return 1;
3397 }
3398 return 0;
3399}
3400
dd8fbd78 3401static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
ad69471c
PB
3402{
3403 switch (size) {
dd8fbd78
FN
3404 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3405 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3406 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3407 default: return;
3408 }
3409}
3410
3411/* 32-bit pairwise ops end up the same as the elementwise versions. */
3412#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3413#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3414#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3415#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3416
3417/* FIXME: This is wrong. They set the wrong overflow bit. */
3418#define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3419#define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3420#define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3421#define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3422
3423#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3424 switch ((size << 1) | u) { \
3425 case 0: \
dd8fbd78 3426 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3427 break; \
3428 case 1: \
dd8fbd78 3429 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3430 break; \
3431 case 2: \
dd8fbd78 3432 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3433 break; \
3434 case 3: \
dd8fbd78 3435 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3436 break; \
3437 case 4: \
dd8fbd78 3438 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3439 break; \
3440 case 5: \
dd8fbd78 3441 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3442 break; \
3443 default: return 1; \
3444 }} while (0)
9ee6e8bb
PB
3445
3446#define GEN_NEON_INTEGER_OP(name) do { \
3447 switch ((size << 1) | u) { \
ad69471c 3448 case 0: \
dd8fbd78 3449 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3450 break; \
3451 case 1: \
dd8fbd78 3452 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3453 break; \
3454 case 2: \
dd8fbd78 3455 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3456 break; \
3457 case 3: \
dd8fbd78 3458 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3459 break; \
3460 case 4: \
dd8fbd78 3461 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3462 break; \
3463 case 5: \
dd8fbd78 3464 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3465 break; \
9ee6e8bb
PB
3466 default: return 1; \
3467 }} while (0)
3468
dd8fbd78 3469static TCGv neon_load_scratch(int scratch)
9ee6e8bb 3470{
dd8fbd78
FN
3471 TCGv tmp = new_tmp();
3472 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3473 return tmp;
9ee6e8bb
PB
3474}
3475
dd8fbd78 3476static void neon_store_scratch(int scratch, TCGv var)
9ee6e8bb 3477{
dd8fbd78
FN
3478 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3479 dead_tmp(var);
9ee6e8bb
PB
3480}
3481
dd8fbd78 3482static inline TCGv neon_get_scalar(int size, int reg)
9ee6e8bb 3483{
dd8fbd78 3484 TCGv tmp;
9ee6e8bb 3485 if (size == 1) {
dd8fbd78 3486 tmp = neon_load_reg(reg >> 1, reg & 1);
9ee6e8bb 3487 } else {
dd8fbd78
FN
3488 tmp = neon_load_reg(reg >> 2, (reg >> 1) & 1);
3489 if (reg & 1) {
3490 gen_neon_dup_low16(tmp);
3491 } else {
3492 gen_neon_dup_high16(tmp);
3493 }
9ee6e8bb 3494 }
dd8fbd78 3495 return tmp;
9ee6e8bb
PB
3496}
3497
19457615
FN
3498static void gen_neon_unzip_u8(TCGv t0, TCGv t1)
3499{
3500 TCGv rd, rm, tmp;
3501
3502 rd = new_tmp();
3503 rm = new_tmp();
3504 tmp = new_tmp();
3505
3506 tcg_gen_andi_i32(rd, t0, 0xff);
3507 tcg_gen_shri_i32(tmp, t0, 8);
3508 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3509 tcg_gen_or_i32(rd, rd, tmp);
3510 tcg_gen_shli_i32(tmp, t1, 16);
3511 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3512 tcg_gen_or_i32(rd, rd, tmp);
3513 tcg_gen_shli_i32(tmp, t1, 8);
3514 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3515 tcg_gen_or_i32(rd, rd, tmp);
3516
3517 tcg_gen_shri_i32(rm, t0, 8);
3518 tcg_gen_andi_i32(rm, rm, 0xff);
3519 tcg_gen_shri_i32(tmp, t0, 16);
3520 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3521 tcg_gen_or_i32(rm, rm, tmp);
3522 tcg_gen_shli_i32(tmp, t1, 8);
3523 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3524 tcg_gen_or_i32(rm, rm, tmp);
3525 tcg_gen_andi_i32(tmp, t1, 0xff000000);
3526 tcg_gen_or_i32(t1, rm, tmp);
3527 tcg_gen_mov_i32(t0, rd);
3528
3529 dead_tmp(tmp);
3530 dead_tmp(rm);
3531 dead_tmp(rd);
3532}
3533
3534static void gen_neon_zip_u8(TCGv t0, TCGv t1)
3535{
3536 TCGv rd, rm, tmp;
3537
3538 rd = new_tmp();
3539 rm = new_tmp();
3540 tmp = new_tmp();
3541
3542 tcg_gen_andi_i32(rd, t0, 0xff);
3543 tcg_gen_shli_i32(tmp, t1, 8);
3544 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3545 tcg_gen_or_i32(rd, rd, tmp);
3546 tcg_gen_shli_i32(tmp, t0, 16);
3547 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3548 tcg_gen_or_i32(rd, rd, tmp);
3549 tcg_gen_shli_i32(tmp, t1, 24);
3550 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3551 tcg_gen_or_i32(rd, rd, tmp);
3552
3553 tcg_gen_andi_i32(rm, t1, 0xff000000);
3554 tcg_gen_shri_i32(tmp, t0, 8);
3555 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3556 tcg_gen_or_i32(rm, rm, tmp);
3557 tcg_gen_shri_i32(tmp, t1, 8);
3558 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3559 tcg_gen_or_i32(rm, rm, tmp);
3560 tcg_gen_shri_i32(tmp, t0, 16);
3561 tcg_gen_andi_i32(tmp, tmp, 0xff);
3562 tcg_gen_or_i32(t1, rm, tmp);
3563 tcg_gen_mov_i32(t0, rd);
3564
3565 dead_tmp(tmp);
3566 dead_tmp(rm);
3567 dead_tmp(rd);
3568}
3569
3570static void gen_neon_zip_u16(TCGv t0, TCGv t1)
3571{
3572 TCGv tmp, tmp2;
3573
3574 tmp = new_tmp();
3575 tmp2 = new_tmp();
3576
3577 tcg_gen_andi_i32(tmp, t0, 0xffff);
3578 tcg_gen_shli_i32(tmp2, t1, 16);
3579 tcg_gen_or_i32(tmp, tmp, tmp2);
3580 tcg_gen_andi_i32(t1, t1, 0xffff0000);
3581 tcg_gen_shri_i32(tmp2, t0, 16);
3582 tcg_gen_or_i32(t1, t1, tmp2);
3583 tcg_gen_mov_i32(t0, tmp);
3584
3585 dead_tmp(tmp2);
3586 dead_tmp(tmp);
3587}
3588
9ee6e8bb
PB
3589static void gen_neon_unzip(int reg, int q, int tmp, int size)
3590{
3591 int n;
dd8fbd78 3592 TCGv t0, t1;
9ee6e8bb
PB
3593
3594 for (n = 0; n < q + 1; n += 2) {
dd8fbd78
FN
3595 t0 = neon_load_reg(reg, n);
3596 t1 = neon_load_reg(reg, n + 1);
9ee6e8bb 3597 switch (size) {
dd8fbd78
FN
3598 case 0: gen_neon_unzip_u8(t0, t1); break;
3599 case 1: gen_neon_zip_u16(t0, t1); break; /* zip and unzip are the same. */
9ee6e8bb
PB
3600 case 2: /* no-op */; break;
3601 default: abort();
3602 }
dd8fbd78
FN
3603 neon_store_scratch(tmp + n, t0);
3604 neon_store_scratch(tmp + n + 1, t1);
9ee6e8bb
PB
3605 }
3606}
3607
19457615
FN
3608static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3609{
3610 TCGv rd, tmp;
3611
3612 rd = new_tmp();
3613 tmp = new_tmp();
3614
3615 tcg_gen_shli_i32(rd, t0, 8);
3616 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3617 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3618 tcg_gen_or_i32(rd, rd, tmp);
3619
3620 tcg_gen_shri_i32(t1, t1, 8);
3621 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3622 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3623 tcg_gen_or_i32(t1, t1, tmp);
3624 tcg_gen_mov_i32(t0, rd);
3625
3626 dead_tmp(tmp);
3627 dead_tmp(rd);
3628}
3629
3630static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3631{
3632 TCGv rd, tmp;
3633
3634 rd = new_tmp();
3635 tmp = new_tmp();
3636
3637 tcg_gen_shli_i32(rd, t0, 16);
3638 tcg_gen_andi_i32(tmp, t1, 0xffff);
3639 tcg_gen_or_i32(rd, rd, tmp);
3640 tcg_gen_shri_i32(t1, t1, 16);
3641 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3642 tcg_gen_or_i32(t1, t1, tmp);
3643 tcg_gen_mov_i32(t0, rd);
3644
3645 dead_tmp(tmp);
3646 dead_tmp(rd);
3647}
3648
3649
9ee6e8bb
PB
3650static struct {
3651 int nregs;
3652 int interleave;
3653 int spacing;
3654} neon_ls_element_type[11] = {
3655 {4, 4, 1},
3656 {4, 4, 2},
3657 {4, 1, 1},
3658 {4, 2, 1},
3659 {3, 3, 1},
3660 {3, 3, 2},
3661 {3, 1, 1},
3662 {1, 1, 1},
3663 {2, 2, 1},
3664 {2, 2, 2},
3665 {2, 1, 1}
3666};
3667
3668/* Translate a NEON load/store element instruction. Return nonzero if the
3669 instruction is invalid. */
3670static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3671{
3672 int rd, rn, rm;
3673 int op;
3674 int nregs;
3675 int interleave;
3676 int stride;
3677 int size;
3678 int reg;
3679 int pass;
3680 int load;
3681 int shift;
9ee6e8bb 3682 int n;
1b2b1e54 3683 TCGv addr;
b0109805 3684 TCGv tmp;
8f8e3aa4 3685 TCGv tmp2;
9ee6e8bb
PB
3686
3687 if (!vfp_enabled(env))
3688 return 1;
3689 VFP_DREG_D(rd, insn);
3690 rn = (insn >> 16) & 0xf;
3691 rm = insn & 0xf;
3692 load = (insn & (1 << 21)) != 0;
1b2b1e54 3693 addr = new_tmp();
9ee6e8bb
PB
3694 if ((insn & (1 << 23)) == 0) {
3695 /* Load store all elements. */
3696 op = (insn >> 8) & 0xf;
3697 size = (insn >> 6) & 3;
3698 if (op > 10 || size == 3)
3699 return 1;
3700 nregs = neon_ls_element_type[op].nregs;
3701 interleave = neon_ls_element_type[op].interleave;
1b2b1e54 3702 tcg_gen_mov_i32(addr, cpu_R[rn]);
9ee6e8bb
PB
3703 stride = (1 << size) * interleave;
3704 for (reg = 0; reg < nregs; reg++) {
3705 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
1b2b1e54 3706 tcg_gen_addi_i32(addr, cpu_R[rn], (1 << size) * reg);
9ee6e8bb 3707 } else if (interleave == 2 && nregs == 4 && reg == 2) {
1b2b1e54 3708 tcg_gen_addi_i32(addr, cpu_R[rn], 1 << size);
9ee6e8bb
PB
3709 }
3710 for (pass = 0; pass < 2; pass++) {
3711 if (size == 2) {
3712 if (load) {
1b2b1e54 3713 tmp = gen_ld32(addr, IS_USER(s));
ad69471c 3714 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3715 } else {
ad69471c 3716 tmp = neon_load_reg(rd, pass);
1b2b1e54 3717 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 3718 }
1b2b1e54 3719 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb
PB
3720 } else if (size == 1) {
3721 if (load) {
1b2b1e54
FN
3722 tmp = gen_ld16u(addr, IS_USER(s));
3723 tcg_gen_addi_i32(addr, addr, stride);
3724 tmp2 = gen_ld16u(addr, IS_USER(s));
3725 tcg_gen_addi_i32(addr, addr, stride);
8f8e3aa4
PB
3726 gen_bfi(tmp, tmp, tmp2, 16, 0xffff);
3727 dead_tmp(tmp2);
3728 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3729 } else {
8f8e3aa4
PB
3730 tmp = neon_load_reg(rd, pass);
3731 tmp2 = new_tmp();
3732 tcg_gen_shri_i32(tmp2, tmp, 16);
1b2b1e54
FN
3733 gen_st16(tmp, addr, IS_USER(s));
3734 tcg_gen_addi_i32(addr, addr, stride);
3735 gen_st16(tmp2, addr, IS_USER(s));
3736 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb
PB
3737 }
3738 } else /* size == 0 */ {
3739 if (load) {
a50f5b91 3740 TCGV_UNUSED(tmp2);
9ee6e8bb 3741 for (n = 0; n < 4; n++) {
1b2b1e54
FN
3742 tmp = gen_ld8u(addr, IS_USER(s));
3743 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3744 if (n == 0) {
8f8e3aa4 3745 tmp2 = tmp;
9ee6e8bb 3746 } else {
8f8e3aa4
PB
3747 gen_bfi(tmp2, tmp2, tmp, n * 8, 0xff);
3748 dead_tmp(tmp);
9ee6e8bb 3749 }
9ee6e8bb 3750 }
8f8e3aa4 3751 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 3752 } else {
8f8e3aa4 3753 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 3754 for (n = 0; n < 4; n++) {
8f8e3aa4 3755 tmp = new_tmp();
9ee6e8bb 3756 if (n == 0) {
8f8e3aa4 3757 tcg_gen_mov_i32(tmp, tmp2);
9ee6e8bb 3758 } else {
8f8e3aa4 3759 tcg_gen_shri_i32(tmp, tmp2, n * 8);
9ee6e8bb 3760 }
1b2b1e54
FN
3761 gen_st8(tmp, addr, IS_USER(s));
3762 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3763 }
8f8e3aa4 3764 dead_tmp(tmp2);
9ee6e8bb
PB
3765 }
3766 }
3767 }
3768 rd += neon_ls_element_type[op].spacing;
3769 }
3770 stride = nregs * 8;
3771 } else {
3772 size = (insn >> 10) & 3;
3773 if (size == 3) {
3774 /* Load single element to all lanes. */
3775 if (!load)
3776 return 1;
3777 size = (insn >> 6) & 3;
3778 nregs = ((insn >> 8) & 3) + 1;
3779 stride = (insn & (1 << 5)) ? 2 : 1;
1b2b1e54 3780 tcg_gen_mov_i32(addr, cpu_R[rn]);
9ee6e8bb
PB
3781 for (reg = 0; reg < nregs; reg++) {
3782 switch (size) {
3783 case 0:
1b2b1e54 3784 tmp = gen_ld8u(addr, IS_USER(s));
ad69471c 3785 gen_neon_dup_u8(tmp, 0);
9ee6e8bb
PB
3786 break;
3787 case 1:
1b2b1e54 3788 tmp = gen_ld16u(addr, IS_USER(s));
ad69471c 3789 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
3790 break;
3791 case 2:
1b2b1e54 3792 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
3793 break;
3794 case 3:
3795 return 1;
a50f5b91
PB
3796 default: /* Avoid compiler warnings. */
3797 abort();
99c475ab 3798 }
1b2b1e54 3799 tcg_gen_addi_i32(addr, addr, 1 << size);
ad69471c
PB
3800 tmp2 = new_tmp();
3801 tcg_gen_mov_i32(tmp2, tmp);
3802 neon_store_reg(rd, 0, tmp2);
3018f259 3803 neon_store_reg(rd, 1, tmp);
9ee6e8bb
PB
3804 rd += stride;
3805 }
3806 stride = (1 << size) * nregs;
3807 } else {
3808 /* Single element. */
3809 pass = (insn >> 7) & 1;
3810 switch (size) {
3811 case 0:
3812 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
3813 stride = 1;
3814 break;
3815 case 1:
3816 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
3817 stride = (insn & (1 << 5)) ? 2 : 1;
3818 break;
3819 case 2:
3820 shift = 0;
9ee6e8bb
PB
3821 stride = (insn & (1 << 6)) ? 2 : 1;
3822 break;
3823 default:
3824 abort();
3825 }
3826 nregs = ((insn >> 8) & 3) + 1;
1b2b1e54 3827 tcg_gen_mov_i32(addr, cpu_R[rn]);
9ee6e8bb
PB
3828 for (reg = 0; reg < nregs; reg++) {
3829 if (load) {
9ee6e8bb
PB
3830 switch (size) {
3831 case 0:
1b2b1e54 3832 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb
PB
3833 break;
3834 case 1:
1b2b1e54 3835 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
3836 break;
3837 case 2:
1b2b1e54 3838 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 3839 break;
a50f5b91
PB
3840 default: /* Avoid compiler warnings. */
3841 abort();
9ee6e8bb
PB
3842 }
3843 if (size != 2) {
8f8e3aa4
PB
3844 tmp2 = neon_load_reg(rd, pass);
3845 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3846 dead_tmp(tmp2);
9ee6e8bb 3847 }
8f8e3aa4 3848 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3849 } else { /* Store */
8f8e3aa4
PB
3850 tmp = neon_load_reg(rd, pass);
3851 if (shift)
3852 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
3853 switch (size) {
3854 case 0:
1b2b1e54 3855 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
3856 break;
3857 case 1:
1b2b1e54 3858 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
3859 break;
3860 case 2:
1b2b1e54 3861 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 3862 break;
99c475ab 3863 }
99c475ab 3864 }
9ee6e8bb 3865 rd += stride;
1b2b1e54 3866 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 3867 }
9ee6e8bb 3868 stride = nregs * (1 << size);
99c475ab 3869 }
9ee6e8bb 3870 }
1b2b1e54 3871 dead_tmp(addr);
9ee6e8bb 3872 if (rm != 15) {
b26eefb6
PB
3873 TCGv base;
3874
3875 base = load_reg(s, rn);
9ee6e8bb 3876 if (rm == 13) {
b26eefb6 3877 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 3878 } else {
b26eefb6
PB
3879 TCGv index;
3880 index = load_reg(s, rm);
3881 tcg_gen_add_i32(base, base, index);
3882 dead_tmp(index);
9ee6e8bb 3883 }
b26eefb6 3884 store_reg(s, rn, base);
9ee6e8bb
PB
3885 }
3886 return 0;
3887}
3b46e624 3888
8f8e3aa4
PB
3889/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
3890static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
3891{
3892 tcg_gen_and_i32(t, t, c);
3893 tcg_gen_bic_i32(f, f, c);
3894 tcg_gen_or_i32(dest, t, f);
3895}
3896
a7812ae4 3897static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
3898{
3899 switch (size) {
3900 case 0: gen_helper_neon_narrow_u8(dest, src); break;
3901 case 1: gen_helper_neon_narrow_u16(dest, src); break;
3902 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
3903 default: abort();
3904 }
3905}
3906
a7812ae4 3907static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
3908{
3909 switch (size) {
3910 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
3911 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
3912 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
3913 default: abort();
3914 }
3915}
3916
a7812ae4 3917static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
3918{
3919 switch (size) {
3920 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
3921 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
3922 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
3923 default: abort();
3924 }
3925}
3926
3927static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
3928 int q, int u)
3929{
3930 if (q) {
3931 if (u) {
3932 switch (size) {
3933 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3934 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3935 default: abort();
3936 }
3937 } else {
3938 switch (size) {
3939 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
3940 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
3941 default: abort();
3942 }
3943 }
3944 } else {
3945 if (u) {
3946 switch (size) {
3947 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3948 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3949 default: abort();
3950 }
3951 } else {
3952 switch (size) {
3953 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
3954 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
3955 default: abort();
3956 }
3957 }
3958 }
3959}
3960
a7812ae4 3961static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
3962{
3963 if (u) {
3964 switch (size) {
3965 case 0: gen_helper_neon_widen_u8(dest, src); break;
3966 case 1: gen_helper_neon_widen_u16(dest, src); break;
3967 case 2: tcg_gen_extu_i32_i64(dest, src); break;
3968 default: abort();
3969 }
3970 } else {
3971 switch (size) {
3972 case 0: gen_helper_neon_widen_s8(dest, src); break;
3973 case 1: gen_helper_neon_widen_s16(dest, src); break;
3974 case 2: tcg_gen_ext_i32_i64(dest, src); break;
3975 default: abort();
3976 }
3977 }
3978 dead_tmp(src);
3979}
3980
3981static inline void gen_neon_addl(int size)
3982{
3983 switch (size) {
3984 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
3985 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
3986 case 2: tcg_gen_add_i64(CPU_V001); break;
3987 default: abort();
3988 }
3989}
3990
3991static inline void gen_neon_subl(int size)
3992{
3993 switch (size) {
3994 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
3995 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
3996 case 2: tcg_gen_sub_i64(CPU_V001); break;
3997 default: abort();
3998 }
3999}
4000
a7812ae4 4001static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4002{
4003 switch (size) {
4004 case 0: gen_helper_neon_negl_u16(var, var); break;
4005 case 1: gen_helper_neon_negl_u32(var, var); break;
4006 case 2: gen_helper_neon_negl_u64(var, var); break;
4007 default: abort();
4008 }
4009}
4010
a7812ae4 4011static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4012{
4013 switch (size) {
4014 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4015 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4016 default: abort();
4017 }
4018}
4019
a7812ae4 4020static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4021{
a7812ae4 4022 TCGv_i64 tmp;
ad69471c
PB
4023
4024 switch ((size << 1) | u) {
4025 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4026 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4027 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4028 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4029 case 4:
4030 tmp = gen_muls_i64_i32(a, b);
4031 tcg_gen_mov_i64(dest, tmp);
4032 break;
4033 case 5:
4034 tmp = gen_mulu_i64_i32(a, b);
4035 tcg_gen_mov_i64(dest, tmp);
4036 break;
4037 default: abort();
4038 }
ad69471c
PB
4039}
4040
9ee6e8bb
PB
4041/* Translate a NEON data processing instruction. Return nonzero if the
4042 instruction is invalid.
ad69471c
PB
4043 We process data in a mixture of 32-bit and 64-bit chunks.
4044 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4045
9ee6e8bb
PB
4046static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4047{
4048 int op;
4049 int q;
4050 int rd, rn, rm;
4051 int size;
4052 int shift;
4053 int pass;
4054 int count;
4055 int pairwise;
4056 int u;
4057 int n;
4058 uint32_t imm;
8f8e3aa4
PB
4059 TCGv tmp;
4060 TCGv tmp2;
4061 TCGv tmp3;
a7812ae4 4062 TCGv_i64 tmp64;
9ee6e8bb
PB
4063
4064 if (!vfp_enabled(env))
4065 return 1;
4066 q = (insn & (1 << 6)) != 0;
4067 u = (insn >> 24) & 1;
4068 VFP_DREG_D(rd, insn);
4069 VFP_DREG_N(rn, insn);
4070 VFP_DREG_M(rm, insn);
4071 size = (insn >> 20) & 3;
4072 if ((insn & (1 << 23)) == 0) {
4073 /* Three register same length. */
4074 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
ad69471c
PB
4075 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4076 || op == 10 || op == 11 || op == 16)) {
4077 /* 64-bit element instructions. */
9ee6e8bb 4078 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4079 neon_load_reg64(cpu_V0, rn + pass);
4080 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb
PB
4081 switch (op) {
4082 case 1: /* VQADD */
4083 if (u) {
ad69471c 4084 gen_helper_neon_add_saturate_u64(CPU_V001);
2c0262af 4085 } else {
ad69471c 4086 gen_helper_neon_add_saturate_s64(CPU_V001);
2c0262af 4087 }
9ee6e8bb
PB
4088 break;
4089 case 5: /* VQSUB */
4090 if (u) {
ad69471c
PB
4091 gen_helper_neon_sub_saturate_u64(CPU_V001);
4092 } else {
4093 gen_helper_neon_sub_saturate_s64(CPU_V001);
4094 }
4095 break;
4096 case 8: /* VSHL */
4097 if (u) {
4098 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4099 } else {
4100 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4101 }
4102 break;
4103 case 9: /* VQSHL */
4104 if (u) {
4105 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4106 cpu_V0, cpu_V0);
4107 } else {
4108 gen_helper_neon_qshl_s64(cpu_V1, cpu_env,
4109 cpu_V1, cpu_V0);
4110 }
4111 break;
4112 case 10: /* VRSHL */
4113 if (u) {
4114 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4115 } else {
ad69471c
PB
4116 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4117 }
4118 break;
4119 case 11: /* VQRSHL */
4120 if (u) {
4121 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4122 cpu_V1, cpu_V0);
4123 } else {
4124 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4125 cpu_V1, cpu_V0);
1e8d4eec 4126 }
9ee6e8bb
PB
4127 break;
4128 case 16:
4129 if (u) {
ad69471c 4130 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4131 } else {
ad69471c 4132 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4133 }
4134 break;
4135 default:
4136 abort();
2c0262af 4137 }
ad69471c 4138 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4139 }
9ee6e8bb 4140 return 0;
2c0262af 4141 }
9ee6e8bb
PB
4142 switch (op) {
4143 case 8: /* VSHL */
4144 case 9: /* VQSHL */
4145 case 10: /* VRSHL */
ad69471c 4146 case 11: /* VQRSHL */
9ee6e8bb 4147 {
ad69471c
PB
4148 int rtmp;
4149 /* Shift instruction operands are reversed. */
4150 rtmp = rn;
9ee6e8bb 4151 rn = rm;
ad69471c 4152 rm = rtmp;
9ee6e8bb
PB
4153 pairwise = 0;
4154 }
2c0262af 4155 break;
9ee6e8bb
PB
4156 case 20: /* VPMAX */
4157 case 21: /* VPMIN */
4158 case 23: /* VPADD */
4159 pairwise = 1;
2c0262af 4160 break;
9ee6e8bb
PB
4161 case 26: /* VPADD (float) */
4162 pairwise = (u && size < 2);
2c0262af 4163 break;
9ee6e8bb
PB
4164 case 30: /* VPMIN/VPMAX (float) */
4165 pairwise = u;
2c0262af 4166 break;
9ee6e8bb
PB
4167 default:
4168 pairwise = 0;
2c0262af 4169 break;
9ee6e8bb 4170 }
dd8fbd78 4171
9ee6e8bb
PB
4172 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4173
4174 if (pairwise) {
4175 /* Pairwise. */
4176 if (q)
4177 n = (pass & 1) * 2;
2c0262af 4178 else
9ee6e8bb
PB
4179 n = 0;
4180 if (pass < q + 1) {
dd8fbd78
FN
4181 tmp = neon_load_reg(rn, n);
4182 tmp2 = neon_load_reg(rn, n + 1);
9ee6e8bb 4183 } else {
dd8fbd78
FN
4184 tmp = neon_load_reg(rm, n);
4185 tmp2 = neon_load_reg(rm, n + 1);
9ee6e8bb
PB
4186 }
4187 } else {
4188 /* Elementwise. */
dd8fbd78
FN
4189 tmp = neon_load_reg(rn, pass);
4190 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4191 }
4192 switch (op) {
4193 case 0: /* VHADD */
4194 GEN_NEON_INTEGER_OP(hadd);
4195 break;
4196 case 1: /* VQADD */
ad69471c 4197 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4198 break;
9ee6e8bb
PB
4199 case 2: /* VRHADD */
4200 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4201 break;
9ee6e8bb
PB
4202 case 3: /* Logic ops. */
4203 switch ((u << 2) | size) {
4204 case 0: /* VAND */
dd8fbd78 4205 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4206 break;
4207 case 1: /* BIC */
dd8fbd78 4208 tcg_gen_bic_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4209 break;
4210 case 2: /* VORR */
dd8fbd78 4211 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4212 break;
4213 case 3: /* VORN */
dd8fbd78
FN
4214 tcg_gen_not_i32(tmp2, tmp2);
4215 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4216 break;
4217 case 4: /* VEOR */
dd8fbd78 4218 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4219 break;
4220 case 5: /* VBSL */
dd8fbd78
FN
4221 tmp3 = neon_load_reg(rd, pass);
4222 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4223 dead_tmp(tmp3);
9ee6e8bb
PB
4224 break;
4225 case 6: /* VBIT */
dd8fbd78
FN
4226 tmp3 = neon_load_reg(rd, pass);
4227 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4228 dead_tmp(tmp3);
9ee6e8bb
PB
4229 break;
4230 case 7: /* VBIF */
dd8fbd78
FN
4231 tmp3 = neon_load_reg(rd, pass);
4232 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4233 dead_tmp(tmp3);
9ee6e8bb 4234 break;
2c0262af
FB
4235 }
4236 break;
9ee6e8bb
PB
4237 case 4: /* VHSUB */
4238 GEN_NEON_INTEGER_OP(hsub);
4239 break;
4240 case 5: /* VQSUB */
ad69471c 4241 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4242 break;
9ee6e8bb
PB
4243 case 6: /* VCGT */
4244 GEN_NEON_INTEGER_OP(cgt);
4245 break;
4246 case 7: /* VCGE */
4247 GEN_NEON_INTEGER_OP(cge);
4248 break;
4249 case 8: /* VSHL */
ad69471c 4250 GEN_NEON_INTEGER_OP(shl);
2c0262af 4251 break;
9ee6e8bb 4252 case 9: /* VQSHL */
ad69471c 4253 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4254 break;
9ee6e8bb 4255 case 10: /* VRSHL */
ad69471c 4256 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4257 break;
9ee6e8bb 4258 case 11: /* VQRSHL */
ad69471c 4259 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb
PB
4260 break;
4261 case 12: /* VMAX */
4262 GEN_NEON_INTEGER_OP(max);
4263 break;
4264 case 13: /* VMIN */
4265 GEN_NEON_INTEGER_OP(min);
4266 break;
4267 case 14: /* VABD */
4268 GEN_NEON_INTEGER_OP(abd);
4269 break;
4270 case 15: /* VABA */
4271 GEN_NEON_INTEGER_OP(abd);
dd8fbd78
FN
4272 dead_tmp(tmp2);
4273 tmp2 = neon_load_reg(rd, pass);
4274 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4275 break;
4276 case 16:
4277 if (!u) { /* VADD */
dd8fbd78 4278 if (gen_neon_add(size, tmp, tmp2))
9ee6e8bb
PB
4279 return 1;
4280 } else { /* VSUB */
4281 switch (size) {
dd8fbd78
FN
4282 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4283 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4284 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4285 default: return 1;
4286 }
4287 }
4288 break;
4289 case 17:
4290 if (!u) { /* VTST */
4291 switch (size) {
dd8fbd78
FN
4292 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4293 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4294 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4295 default: return 1;
4296 }
4297 } else { /* VCEQ */
4298 switch (size) {
dd8fbd78
FN
4299 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4300 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4301 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4302 default: return 1;
4303 }
4304 }
4305 break;
4306 case 18: /* Multiply. */
4307 switch (size) {
dd8fbd78
FN
4308 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4309 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4310 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4311 default: return 1;
4312 }
dd8fbd78
FN
4313 dead_tmp(tmp2);
4314 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4315 if (u) { /* VMLS */
dd8fbd78 4316 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4317 } else { /* VMLA */
dd8fbd78 4318 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4319 }
4320 break;
4321 case 19: /* VMUL */
4322 if (u) { /* polynomial */
dd8fbd78 4323 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4324 } else { /* Integer */
4325 switch (size) {
dd8fbd78
FN
4326 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4327 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4328 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4329 default: return 1;
4330 }
4331 }
4332 break;
4333 case 20: /* VPMAX */
4334 GEN_NEON_INTEGER_OP(pmax);
4335 break;
4336 case 21: /* VPMIN */
4337 GEN_NEON_INTEGER_OP(pmin);
4338 break;
4339 case 22: /* Hultiply high. */
4340 if (!u) { /* VQDMULH */
4341 switch (size) {
dd8fbd78
FN
4342 case 1: gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4343 case 2: gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
9ee6e8bb
PB
4344 default: return 1;
4345 }
4346 } else { /* VQRDHMUL */
4347 switch (size) {
dd8fbd78
FN
4348 case 1: gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4349 case 2: gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
9ee6e8bb
PB
4350 default: return 1;
4351 }
4352 }
4353 break;
4354 case 23: /* VPADD */
4355 if (u)
4356 return 1;
4357 switch (size) {
dd8fbd78
FN
4358 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4359 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4360 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4361 default: return 1;
4362 }
4363 break;
4364 case 26: /* Floating point arithnetic. */
4365 switch ((u << 2) | size) {
4366 case 0: /* VADD */
dd8fbd78 4367 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4368 break;
4369 case 2: /* VSUB */
dd8fbd78 4370 gen_helper_neon_sub_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4371 break;
4372 case 4: /* VPADD */
dd8fbd78 4373 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4374 break;
4375 case 6: /* VABD */
dd8fbd78 4376 gen_helper_neon_abd_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4377 break;
4378 default:
4379 return 1;
4380 }
4381 break;
4382 case 27: /* Float multiply. */
dd8fbd78 4383 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb 4384 if (!u) {
dd8fbd78
FN
4385 dead_tmp(tmp2);
4386 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4387 if (size == 0) {
dd8fbd78 4388 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb 4389 } else {
dd8fbd78 4390 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
4391 }
4392 }
4393 break;
4394 case 28: /* Float compare. */
4395 if (!u) {
dd8fbd78 4396 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
b5ff1b31 4397 } else {
9ee6e8bb 4398 if (size == 0)
dd8fbd78 4399 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
9ee6e8bb 4400 else
dd8fbd78 4401 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
b5ff1b31 4402 }
2c0262af 4403 break;
9ee6e8bb
PB
4404 case 29: /* Float compare absolute. */
4405 if (!u)
4406 return 1;
4407 if (size == 0)
dd8fbd78 4408 gen_helper_neon_acge_f32(tmp, tmp, tmp2);
9ee6e8bb 4409 else
dd8fbd78 4410 gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
2c0262af 4411 break;
9ee6e8bb
PB
4412 case 30: /* Float min/max. */
4413 if (size == 0)
dd8fbd78 4414 gen_helper_neon_max_f32(tmp, tmp, tmp2);
9ee6e8bb 4415 else
dd8fbd78 4416 gen_helper_neon_min_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4417 break;
4418 case 31:
4419 if (size == 0)
dd8fbd78 4420 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 4421 else
dd8fbd78 4422 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 4423 break;
9ee6e8bb
PB
4424 default:
4425 abort();
2c0262af 4426 }
dd8fbd78
FN
4427 dead_tmp(tmp2);
4428
9ee6e8bb
PB
4429 /* Save the result. For elementwise operations we can put it
4430 straight into the destination register. For pairwise operations
4431 we have to be careful to avoid clobbering the source operands. */
4432 if (pairwise && rd == rm) {
dd8fbd78 4433 neon_store_scratch(pass, tmp);
9ee6e8bb 4434 } else {
dd8fbd78 4435 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4436 }
4437
4438 } /* for pass */
4439 if (pairwise && rd == rm) {
4440 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
4441 tmp = neon_load_scratch(pass);
4442 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4443 }
4444 }
ad69471c 4445 /* End of 3 register same size operations. */
9ee6e8bb
PB
4446 } else if (insn & (1 << 4)) {
4447 if ((insn & 0x00380080) != 0) {
4448 /* Two registers and shift. */
4449 op = (insn >> 8) & 0xf;
4450 if (insn & (1 << 7)) {
4451 /* 64-bit shift. */
4452 size = 3;
4453 } else {
4454 size = 2;
4455 while ((insn & (1 << (size + 19))) == 0)
4456 size--;
4457 }
4458 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4459 /* To avoid excessive dumplication of ops we implement shift
4460 by immediate using the variable shift operations. */
4461 if (op < 8) {
4462 /* Shift by immediate:
4463 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4464 /* Right shifts are encoded as N - shift, where N is the
4465 element size in bits. */
4466 if (op <= 4)
4467 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4468 if (size == 3) {
4469 count = q + 1;
4470 } else {
4471 count = q ? 4: 2;
4472 }
4473 switch (size) {
4474 case 0:
4475 imm = (uint8_t) shift;
4476 imm |= imm << 8;
4477 imm |= imm << 16;
4478 break;
4479 case 1:
4480 imm = (uint16_t) shift;
4481 imm |= imm << 16;
4482 break;
4483 case 2:
4484 case 3:
4485 imm = shift;
4486 break;
4487 default:
4488 abort();
4489 }
4490
4491 for (pass = 0; pass < count; pass++) {
ad69471c
PB
4492 if (size == 3) {
4493 neon_load_reg64(cpu_V0, rm + pass);
4494 tcg_gen_movi_i64(cpu_V1, imm);
4495 switch (op) {
4496 case 0: /* VSHR */
4497 case 1: /* VSRA */
4498 if (u)
4499 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4500 else
ad69471c 4501 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4502 break;
ad69471c
PB
4503 case 2: /* VRSHR */
4504 case 3: /* VRSRA */
4505 if (u)
4506 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4507 else
ad69471c 4508 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4509 break;
ad69471c
PB
4510 case 4: /* VSRI */
4511 if (!u)
4512 return 1;
4513 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4514 break;
4515 case 5: /* VSHL, VSLI */
4516 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4517 break;
4518 case 6: /* VQSHL */
4519 if (u)
4520 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4521 else
ad69471c
PB
4522 gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4523 break;
4524 case 7: /* VQSHLU */
4525 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4526 break;
9ee6e8bb 4527 }
ad69471c
PB
4528 if (op == 1 || op == 3) {
4529 /* Accumulate. */
4530 neon_load_reg64(cpu_V0, rd + pass);
4531 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4532 } else if (op == 4 || (op == 5 && u)) {
4533 /* Insert */
4534 cpu_abort(env, "VS[LR]I.64 not implemented");
4535 }
4536 neon_store_reg64(cpu_V0, rd + pass);
4537 } else { /* size < 3 */
4538 /* Operands in T0 and T1. */
dd8fbd78
FN
4539 tmp = neon_load_reg(rm, pass);
4540 tmp2 = new_tmp();
4541 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
4542 switch (op) {
4543 case 0: /* VSHR */
4544 case 1: /* VSRA */
4545 GEN_NEON_INTEGER_OP(shl);
4546 break;
4547 case 2: /* VRSHR */
4548 case 3: /* VRSRA */
4549 GEN_NEON_INTEGER_OP(rshl);
4550 break;
4551 case 4: /* VSRI */
4552 if (!u)
4553 return 1;
4554 GEN_NEON_INTEGER_OP(shl);
4555 break;
4556 case 5: /* VSHL, VSLI */
4557 switch (size) {
dd8fbd78
FN
4558 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
4559 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
4560 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
ad69471c
PB
4561 default: return 1;
4562 }
4563 break;
4564 case 6: /* VQSHL */
4565 GEN_NEON_INTEGER_OP_ENV(qshl);
4566 break;
4567 case 7: /* VQSHLU */
4568 switch (size) {
dd8fbd78
FN
4569 case 0: gen_helper_neon_qshl_u8(tmp, cpu_env, tmp, tmp2); break;
4570 case 1: gen_helper_neon_qshl_u16(tmp, cpu_env, tmp, tmp2); break;
4571 case 2: gen_helper_neon_qshl_u32(tmp, cpu_env, tmp, tmp2); break;
ad69471c
PB
4572 default: return 1;
4573 }
4574 break;
4575 }
dd8fbd78 4576 dead_tmp(tmp2);
ad69471c
PB
4577
4578 if (op == 1 || op == 3) {
4579 /* Accumulate. */
dd8fbd78
FN
4580 tmp2 = neon_load_reg(rd, pass);
4581 gen_neon_add(size, tmp2, tmp);
4582 dead_tmp(tmp2);
ad69471c
PB
4583 } else if (op == 4 || (op == 5 && u)) {
4584 /* Insert */
4585 switch (size) {
4586 case 0:
4587 if (op == 4)
4588 imm = 0xff >> -shift;
4589 else
4590 imm = (uint8_t)(0xff << shift);
4591 imm |= imm << 8;
4592 imm |= imm << 16;
4593 break;
4594 case 1:
4595 if (op == 4)
4596 imm = 0xffff >> -shift;
4597 else
4598 imm = (uint16_t)(0xffff << shift);
4599 imm |= imm << 16;
4600 break;
4601 case 2:
4602 if (op == 4)
4603 imm = 0xffffffffu >> -shift;
4604 else
4605 imm = 0xffffffffu << shift;
4606 break;
4607 default:
4608 abort();
4609 }
dd8fbd78
FN
4610 tmp2 = neon_load_reg(rd, pass);
4611 tcg_gen_andi_i32(tmp, tmp, imm);
4612 tcg_gen_andi_i32(tmp2, tmp2, ~imm);
4613 tcg_gen_or_i32(tmp, tmp, tmp2);
4614 dead_tmp(tmp2);
ad69471c 4615 }
dd8fbd78 4616 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4617 }
4618 } /* for pass */
4619 } else if (op < 10) {
ad69471c 4620 /* Shift by immediate and narrow:
9ee6e8bb
PB
4621 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4622 shift = shift - (1 << (size + 3));
4623 size++;
9ee6e8bb
PB
4624 switch (size) {
4625 case 1:
ad69471c 4626 imm = (uint16_t)shift;
9ee6e8bb 4627 imm |= imm << 16;
ad69471c 4628 tmp2 = tcg_const_i32(imm);
a7812ae4 4629 TCGV_UNUSED_I64(tmp64);
9ee6e8bb
PB
4630 break;
4631 case 2:
ad69471c
PB
4632 imm = (uint32_t)shift;
4633 tmp2 = tcg_const_i32(imm);
a7812ae4 4634 TCGV_UNUSED_I64(tmp64);
4cc633c3 4635 break;
9ee6e8bb 4636 case 3:
a7812ae4
PB
4637 tmp64 = tcg_const_i64(shift);
4638 TCGV_UNUSED(tmp2);
9ee6e8bb
PB
4639 break;
4640 default:
4641 abort();
4642 }
4643
ad69471c
PB
4644 for (pass = 0; pass < 2; pass++) {
4645 if (size == 3) {
4646 neon_load_reg64(cpu_V0, rm + pass);
4647 if (q) {
4648 if (u)
a7812ae4 4649 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4650 else
a7812ae4 4651 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c
PB
4652 } else {
4653 if (u)
a7812ae4 4654 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4655 else
a7812ae4 4656 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c 4657 }
2c0262af 4658 } else {
ad69471c
PB
4659 tmp = neon_load_reg(rm + pass, 0);
4660 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
36aa55dc
PB
4661 tmp3 = neon_load_reg(rm + pass, 1);
4662 gen_neon_shift_narrow(size, tmp3, tmp2, q, u);
4663 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
ad69471c 4664 dead_tmp(tmp);
36aa55dc 4665 dead_tmp(tmp3);
9ee6e8bb 4666 }
ad69471c
PB
4667 tmp = new_tmp();
4668 if (op == 8 && !u) {
4669 gen_neon_narrow(size - 1, tmp, cpu_V0);
9ee6e8bb 4670 } else {
ad69471c
PB
4671 if (op == 8)
4672 gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
9ee6e8bb 4673 else
ad69471c
PB
4674 gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
4675 }
4676 if (pass == 0) {
4677 tmp2 = tmp;
4678 } else {
4679 neon_store_reg(rd, 0, tmp2);
4680 neon_store_reg(rd, 1, tmp);
9ee6e8bb
PB
4681 }
4682 } /* for pass */
4683 } else if (op == 10) {
4684 /* VSHLL */
ad69471c 4685 if (q || size == 3)
9ee6e8bb 4686 return 1;
ad69471c
PB
4687 tmp = neon_load_reg(rm, 0);
4688 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 4689 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4690 if (pass == 1)
4691 tmp = tmp2;
4692
4693 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 4694
9ee6e8bb
PB
4695 if (shift != 0) {
4696 /* The shift is less than the width of the source
ad69471c
PB
4697 type, so we can just shift the whole register. */
4698 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4699 if (size < 2 || !u) {
4700 uint64_t imm64;
4701 if (size == 0) {
4702 imm = (0xffu >> (8 - shift));
4703 imm |= imm << 16;
4704 } else {
4705 imm = 0xffff >> (16 - shift);
9ee6e8bb 4706 }
ad69471c
PB
4707 imm64 = imm | (((uint64_t)imm) << 32);
4708 tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
9ee6e8bb
PB
4709 }
4710 }
ad69471c 4711 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4712 }
4713 } else if (op == 15 || op == 16) {
4714 /* VCVT fixed-point. */
4715 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 4716 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
9ee6e8bb
PB
4717 if (op & 1) {
4718 if (u)
4373f3ce 4719 gen_vfp_ulto(0, shift);
9ee6e8bb 4720 else
4373f3ce 4721 gen_vfp_slto(0, shift);
9ee6e8bb
PB
4722 } else {
4723 if (u)
4373f3ce 4724 gen_vfp_toul(0, shift);
9ee6e8bb 4725 else
4373f3ce 4726 gen_vfp_tosl(0, shift);
2c0262af 4727 }
4373f3ce 4728 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
4729 }
4730 } else {
9ee6e8bb
PB
4731 return 1;
4732 }
4733 } else { /* (insn & 0x00380080) == 0 */
4734 int invert;
4735
4736 op = (insn >> 8) & 0xf;
4737 /* One register and immediate. */
4738 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4739 invert = (insn & (1 << 5)) != 0;
4740 switch (op) {
4741 case 0: case 1:
4742 /* no-op */
4743 break;
4744 case 2: case 3:
4745 imm <<= 8;
4746 break;
4747 case 4: case 5:
4748 imm <<= 16;
4749 break;
4750 case 6: case 7:
4751 imm <<= 24;
4752 break;
4753 case 8: case 9:
4754 imm |= imm << 16;
4755 break;
4756 case 10: case 11:
4757 imm = (imm << 8) | (imm << 24);
4758 break;
4759 case 12:
4760 imm = (imm < 8) | 0xff;
4761 break;
4762 case 13:
4763 imm = (imm << 16) | 0xffff;
4764 break;
4765 case 14:
4766 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4767 if (invert)
4768 imm = ~imm;
4769 break;
4770 case 15:
4771 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4772 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4773 break;
4774 }
4775 if (invert)
4776 imm = ~imm;
4777
9ee6e8bb
PB
4778 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4779 if (op & 1 && op < 12) {
ad69471c 4780 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
4781 if (invert) {
4782 /* The immediate value has already been inverted, so
4783 BIC becomes AND. */
ad69471c 4784 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 4785 } else {
ad69471c 4786 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 4787 }
9ee6e8bb 4788 } else {
ad69471c
PB
4789 /* VMOV, VMVN. */
4790 tmp = new_tmp();
9ee6e8bb 4791 if (op == 14 && invert) {
ad69471c
PB
4792 uint32_t val;
4793 val = 0;
9ee6e8bb
PB
4794 for (n = 0; n < 4; n++) {
4795 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 4796 val |= 0xff << (n * 8);
9ee6e8bb 4797 }
ad69471c
PB
4798 tcg_gen_movi_i32(tmp, val);
4799 } else {
4800 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 4801 }
9ee6e8bb 4802 }
ad69471c 4803 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4804 }
4805 }
e4b3861d 4806 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
4807 if (size != 3) {
4808 op = (insn >> 8) & 0xf;
4809 if ((insn & (1 << 6)) == 0) {
4810 /* Three registers of different lengths. */
4811 int src1_wide;
4812 int src2_wide;
4813 int prewiden;
4814 /* prewiden, src1_wide, src2_wide */
4815 static const int neon_3reg_wide[16][3] = {
4816 {1, 0, 0}, /* VADDL */
4817 {1, 1, 0}, /* VADDW */
4818 {1, 0, 0}, /* VSUBL */
4819 {1, 1, 0}, /* VSUBW */
4820 {0, 1, 1}, /* VADDHN */
4821 {0, 0, 0}, /* VABAL */
4822 {0, 1, 1}, /* VSUBHN */
4823 {0, 0, 0}, /* VABDL */
4824 {0, 0, 0}, /* VMLAL */
4825 {0, 0, 0}, /* VQDMLAL */
4826 {0, 0, 0}, /* VMLSL */
4827 {0, 0, 0}, /* VQDMLSL */
4828 {0, 0, 0}, /* Integer VMULL */
4829 {0, 0, 0}, /* VQDMULL */
4830 {0, 0, 0} /* Polynomial VMULL */
4831 };
4832
4833 prewiden = neon_3reg_wide[op][0];
4834 src1_wide = neon_3reg_wide[op][1];
4835 src2_wide = neon_3reg_wide[op][2];
4836
ad69471c
PB
4837 if (size == 0 && (op == 9 || op == 11 || op == 13))
4838 return 1;
4839
9ee6e8bb
PB
4840 /* Avoid overlapping operands. Wide source operands are
4841 always aligned so will never overlap with wide
4842 destinations in problematic ways. */
8f8e3aa4 4843 if (rd == rm && !src2_wide) {
dd8fbd78
FN
4844 tmp = neon_load_reg(rm, 1);
4845 neon_store_scratch(2, tmp);
8f8e3aa4 4846 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
4847 tmp = neon_load_reg(rn, 1);
4848 neon_store_scratch(2, tmp);
9ee6e8bb 4849 }
a50f5b91 4850 TCGV_UNUSED(tmp3);
9ee6e8bb 4851 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4852 if (src1_wide) {
4853 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 4854 TCGV_UNUSED(tmp);
9ee6e8bb 4855 } else {
ad69471c 4856 if (pass == 1 && rd == rn) {
dd8fbd78 4857 tmp = neon_load_scratch(2);
9ee6e8bb 4858 } else {
ad69471c
PB
4859 tmp = neon_load_reg(rn, pass);
4860 }
4861 if (prewiden) {
4862 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
4863 }
4864 }
ad69471c
PB
4865 if (src2_wide) {
4866 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 4867 TCGV_UNUSED(tmp2);
9ee6e8bb 4868 } else {
ad69471c 4869 if (pass == 1 && rd == rm) {
dd8fbd78 4870 tmp2 = neon_load_scratch(2);
9ee6e8bb 4871 } else {
ad69471c
PB
4872 tmp2 = neon_load_reg(rm, pass);
4873 }
4874 if (prewiden) {
4875 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 4876 }
9ee6e8bb
PB
4877 }
4878 switch (op) {
4879 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 4880 gen_neon_addl(size);
9ee6e8bb
PB
4881 break;
4882 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
ad69471c 4883 gen_neon_subl(size);
9ee6e8bb
PB
4884 break;
4885 case 5: case 7: /* VABAL, VABDL */
4886 switch ((size << 1) | u) {
ad69471c
PB
4887 case 0:
4888 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
4889 break;
4890 case 1:
4891 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
4892 break;
4893 case 2:
4894 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
4895 break;
4896 case 3:
4897 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
4898 break;
4899 case 4:
4900 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
4901 break;
4902 case 5:
4903 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
4904 break;
9ee6e8bb
PB
4905 default: abort();
4906 }
ad69471c
PB
4907 dead_tmp(tmp2);
4908 dead_tmp(tmp);
9ee6e8bb
PB
4909 break;
4910 case 8: case 9: case 10: case 11: case 12: case 13:
4911 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 4912 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
dd8fbd78
FN
4913 dead_tmp(tmp2);
4914 dead_tmp(tmp);
9ee6e8bb
PB
4915 break;
4916 case 14: /* Polynomial VMULL */
4917 cpu_abort(env, "Polynomial VMULL not implemented");
4918
4919 default: /* 15 is RESERVED. */
4920 return 1;
4921 }
4922 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
4923 /* Accumulate. */
4924 if (op == 10 || op == 11) {
ad69471c 4925 gen_neon_negl(cpu_V0, size);
9ee6e8bb
PB
4926 }
4927
9ee6e8bb 4928 if (op != 13) {
ad69471c 4929 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb
PB
4930 }
4931
4932 switch (op) {
4933 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
ad69471c 4934 gen_neon_addl(size);
9ee6e8bb
PB
4935 break;
4936 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c
PB
4937 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4938 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
4939 break;
9ee6e8bb
PB
4940 /* Fall through. */
4941 case 13: /* VQDMULL */
ad69471c 4942 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
4943 break;
4944 default:
4945 abort();
4946 }
ad69471c 4947 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4948 } else if (op == 4 || op == 6) {
4949 /* Narrowing operation. */
ad69471c 4950 tmp = new_tmp();
9ee6e8bb
PB
4951 if (u) {
4952 switch (size) {
ad69471c
PB
4953 case 0:
4954 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
4955 break;
4956 case 1:
4957 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
4958 break;
4959 case 2:
4960 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4961 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
4962 break;
9ee6e8bb
PB
4963 default: abort();
4964 }
4965 } else {
4966 switch (size) {
ad69471c
PB
4967 case 0:
4968 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
4969 break;
4970 case 1:
4971 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
4972 break;
4973 case 2:
4974 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
4975 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4976 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
4977 break;
9ee6e8bb
PB
4978 default: abort();
4979 }
4980 }
ad69471c
PB
4981 if (pass == 0) {
4982 tmp3 = tmp;
4983 } else {
4984 neon_store_reg(rd, 0, tmp3);
4985 neon_store_reg(rd, 1, tmp);
4986 }
9ee6e8bb
PB
4987 } else {
4988 /* Write back the result. */
ad69471c 4989 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4990 }
4991 }
4992 } else {
4993 /* Two registers and a scalar. */
4994 switch (op) {
4995 case 0: /* Integer VMLA scalar */
4996 case 1: /* Float VMLA scalar */
4997 case 4: /* Integer VMLS scalar */
4998 case 5: /* Floating point VMLS scalar */
4999 case 8: /* Integer VMUL scalar */
5000 case 9: /* Floating point VMUL scalar */
5001 case 12: /* VQDMULH scalar */
5002 case 13: /* VQRDMULH scalar */
dd8fbd78
FN
5003 tmp = neon_get_scalar(size, rm);
5004 neon_store_scratch(0, tmp);
9ee6e8bb 5005 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5006 tmp = neon_load_scratch(0);
5007 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5008 if (op == 12) {
5009 if (size == 1) {
dd8fbd78 5010 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5011 } else {
dd8fbd78 5012 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5013 }
5014 } else if (op == 13) {
5015 if (size == 1) {
dd8fbd78 5016 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5017 } else {
dd8fbd78 5018 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5019 }
5020 } else if (op & 1) {
dd8fbd78 5021 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5022 } else {
5023 switch (size) {
dd8fbd78
FN
5024 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5025 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5026 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5027 default: return 1;
5028 }
5029 }
dd8fbd78 5030 dead_tmp(tmp2);
9ee6e8bb
PB
5031 if (op < 8) {
5032 /* Accumulate. */
dd8fbd78 5033 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5034 switch (op) {
5035 case 0:
dd8fbd78 5036 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5037 break;
5038 case 1:
dd8fbd78 5039 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5040 break;
5041 case 4:
dd8fbd78 5042 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5043 break;
5044 case 5:
dd8fbd78 5045 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
5046 break;
5047 default:
5048 abort();
5049 }
dd8fbd78 5050 dead_tmp(tmp2);
9ee6e8bb 5051 }
dd8fbd78 5052 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5053 }
5054 break;
5055 case 2: /* VMLAL sclar */
5056 case 3: /* VQDMLAL scalar */
5057 case 6: /* VMLSL scalar */
5058 case 7: /* VQDMLSL scalar */
5059 case 10: /* VMULL scalar */
5060 case 11: /* VQDMULL scalar */
ad69471c
PB
5061 if (size == 0 && (op == 3 || op == 7 || op == 11))
5062 return 1;
5063
dd8fbd78
FN
5064 tmp2 = neon_get_scalar(size, rm);
5065 tmp3 = neon_load_reg(rn, 1);
ad69471c 5066
9ee6e8bb 5067 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5068 if (pass == 0) {
5069 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5070 } else {
dd8fbd78 5071 tmp = tmp3;
9ee6e8bb 5072 }
ad69471c 5073 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
dd8fbd78 5074 dead_tmp(tmp);
9ee6e8bb 5075 if (op == 6 || op == 7) {
ad69471c
PB
5076 gen_neon_negl(cpu_V0, size);
5077 }
5078 if (op != 11) {
5079 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5080 }
9ee6e8bb
PB
5081 switch (op) {
5082 case 2: case 6:
ad69471c 5083 gen_neon_addl(size);
9ee6e8bb
PB
5084 break;
5085 case 3: case 7:
ad69471c
PB
5086 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5087 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5088 break;
5089 case 10:
5090 /* no-op */
5091 break;
5092 case 11:
ad69471c 5093 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5094 break;
5095 default:
5096 abort();
5097 }
ad69471c 5098 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5099 }
dd8fbd78
FN
5100
5101 dead_tmp(tmp2);
5102
9ee6e8bb
PB
5103 break;
5104 default: /* 14 and 15 are RESERVED */
5105 return 1;
5106 }
5107 }
5108 } else { /* size == 3 */
5109 if (!u) {
5110 /* Extract. */
9ee6e8bb 5111 imm = (insn >> 8) & 0xf;
ad69471c
PB
5112 count = q + 1;
5113
5114 if (imm > 7 && !q)
5115 return 1;
5116
5117 if (imm == 0) {
5118 neon_load_reg64(cpu_V0, rn);
5119 if (q) {
5120 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5121 }
ad69471c
PB
5122 } else if (imm == 8) {
5123 neon_load_reg64(cpu_V0, rn + 1);
5124 if (q) {
5125 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5126 }
ad69471c 5127 } else if (q) {
a7812ae4 5128 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5129 if (imm < 8) {
5130 neon_load_reg64(cpu_V0, rn);
a7812ae4 5131 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5132 } else {
5133 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5134 neon_load_reg64(tmp64, rm);
ad69471c
PB
5135 }
5136 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5137 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5138 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5139 if (imm < 8) {
5140 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5141 } else {
ad69471c
PB
5142 neon_load_reg64(cpu_V1, rm + 1);
5143 imm -= 8;
9ee6e8bb 5144 }
ad69471c 5145 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5146 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5147 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
ad69471c 5148 } else {
a7812ae4 5149 /* BUGFIX */
ad69471c 5150 neon_load_reg64(cpu_V0, rn);
a7812ae4 5151 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5152 neon_load_reg64(cpu_V1, rm);
a7812ae4 5153 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5154 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5155 }
5156 neon_store_reg64(cpu_V0, rd);
5157 if (q) {
5158 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5159 }
5160 } else if ((insn & (1 << 11)) == 0) {
5161 /* Two register misc. */
5162 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5163 size = (insn >> 18) & 3;
5164 switch (op) {
5165 case 0: /* VREV64 */
5166 if (size == 3)
5167 return 1;
5168 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5169 tmp = neon_load_reg(rm, pass * 2);
5170 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5171 switch (size) {
dd8fbd78
FN
5172 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5173 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5174 case 2: /* no-op */ break;
5175 default: abort();
5176 }
dd8fbd78 5177 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5178 if (size == 2) {
dd8fbd78 5179 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5180 } else {
9ee6e8bb 5181 switch (size) {
dd8fbd78
FN
5182 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5183 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5184 default: abort();
5185 }
dd8fbd78 5186 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5187 }
5188 }
5189 break;
5190 case 4: case 5: /* VPADDL */
5191 case 12: case 13: /* VPADAL */
9ee6e8bb
PB
5192 if (size == 3)
5193 return 1;
ad69471c
PB
5194 for (pass = 0; pass < q + 1; pass++) {
5195 tmp = neon_load_reg(rm, pass * 2);
5196 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5197 tmp = neon_load_reg(rm, pass * 2 + 1);
5198 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5199 switch (size) {
5200 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5201 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5202 case 2: tcg_gen_add_i64(CPU_V001); break;
5203 default: abort();
5204 }
9ee6e8bb
PB
5205 if (op >= 12) {
5206 /* Accumulate. */
ad69471c
PB
5207 neon_load_reg64(cpu_V1, rd + pass);
5208 gen_neon_addl(size);
9ee6e8bb 5209 }
ad69471c 5210 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5211 }
5212 break;
5213 case 33: /* VTRN */
5214 if (size == 2) {
5215 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
5216 tmp = neon_load_reg(rm, n);
5217 tmp2 = neon_load_reg(rd, n + 1);
5218 neon_store_reg(rm, n, tmp2);
5219 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
5220 }
5221 } else {
5222 goto elementwise;
5223 }
5224 break;
5225 case 34: /* VUZP */
5226 /* Reg Before After
5227 Rd A3 A2 A1 A0 B2 B0 A2 A0
5228 Rm B3 B2 B1 B0 B3 B1 A3 A1
5229 */
5230 if (size == 3)
5231 return 1;
5232 gen_neon_unzip(rd, q, 0, size);
5233 gen_neon_unzip(rm, q, 4, size);
5234 if (q) {
5235 static int unzip_order_q[8] =
5236 {0, 2, 4, 6, 1, 3, 5, 7};
5237 for (n = 0; n < 8; n++) {
5238 int reg = (n < 4) ? rd : rm;
dd8fbd78
FN
5239 tmp = neon_load_scratch(unzip_order_q[n]);
5240 neon_store_reg(reg, n % 4, tmp);
9ee6e8bb
PB
5241 }
5242 } else {
5243 static int unzip_order[4] =
5244 {0, 4, 1, 5};
5245 for (n = 0; n < 4; n++) {
5246 int reg = (n < 2) ? rd : rm;
dd8fbd78
FN
5247 tmp = neon_load_scratch(unzip_order[n]);
5248 neon_store_reg(reg, n % 2, tmp);
9ee6e8bb
PB
5249 }
5250 }
5251 break;
5252 case 35: /* VZIP */
5253 /* Reg Before After
5254 Rd A3 A2 A1 A0 B1 A1 B0 A0
5255 Rm B3 B2 B1 B0 B3 A3 B2 A2
5256 */
5257 if (size == 3)
5258 return 1;
5259 count = (q ? 4 : 2);
5260 for (n = 0; n < count; n++) {
dd8fbd78
FN
5261 tmp = neon_load_reg(rd, n);
5262 tmp2 = neon_load_reg(rd, n);
9ee6e8bb 5263 switch (size) {
dd8fbd78
FN
5264 case 0: gen_neon_zip_u8(tmp, tmp2); break;
5265 case 1: gen_neon_zip_u16(tmp, tmp2); break;
9ee6e8bb
PB
5266 case 2: /* no-op */; break;
5267 default: abort();
5268 }
dd8fbd78
FN
5269 neon_store_scratch(n * 2, tmp);
5270 neon_store_scratch(n * 2 + 1, tmp2);
9ee6e8bb
PB
5271 }
5272 for (n = 0; n < count * 2; n++) {
5273 int reg = (n < count) ? rd : rm;
dd8fbd78
FN
5274 tmp = neon_load_scratch(n);
5275 neon_store_reg(reg, n % count, tmp);
9ee6e8bb
PB
5276 }
5277 break;
5278 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
ad69471c
PB
5279 if (size == 3)
5280 return 1;
a50f5b91 5281 TCGV_UNUSED(tmp2);
9ee6e8bb 5282 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5283 neon_load_reg64(cpu_V0, rm + pass);
5284 tmp = new_tmp();
9ee6e8bb 5285 if (op == 36 && q == 0) {
ad69471c 5286 gen_neon_narrow(size, tmp, cpu_V0);
9ee6e8bb 5287 } else if (q) {
ad69471c 5288 gen_neon_narrow_satu(size, tmp, cpu_V0);
9ee6e8bb 5289 } else {
ad69471c
PB
5290 gen_neon_narrow_sats(size, tmp, cpu_V0);
5291 }
5292 if (pass == 0) {
5293 tmp2 = tmp;
5294 } else {
5295 neon_store_reg(rd, 0, tmp2);
5296 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5297 }
9ee6e8bb
PB
5298 }
5299 break;
5300 case 38: /* VSHLL */
ad69471c 5301 if (q || size == 3)
9ee6e8bb 5302 return 1;
ad69471c
PB
5303 tmp = neon_load_reg(rm, 0);
5304 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5305 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5306 if (pass == 1)
5307 tmp = tmp2;
5308 gen_neon_widen(cpu_V0, tmp, size, 1);
5309 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5310 }
5311 break;
5312 default:
5313 elementwise:
5314 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5315 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5316 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5317 neon_reg_offset(rm, pass));
dd8fbd78 5318 TCGV_UNUSED(tmp);
9ee6e8bb 5319 } else {
dd8fbd78 5320 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
5321 }
5322 switch (op) {
5323 case 1: /* VREV32 */
5324 switch (size) {
dd8fbd78
FN
5325 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5326 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5327 default: return 1;
5328 }
5329 break;
5330 case 2: /* VREV16 */
5331 if (size != 0)
5332 return 1;
dd8fbd78 5333 gen_rev16(tmp);
9ee6e8bb 5334 break;
9ee6e8bb
PB
5335 case 8: /* CLS */
5336 switch (size) {
dd8fbd78
FN
5337 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5338 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5339 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
9ee6e8bb
PB
5340 default: return 1;
5341 }
5342 break;
5343 case 9: /* CLZ */
5344 switch (size) {
dd8fbd78
FN
5345 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5346 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5347 case 2: gen_helper_clz(tmp, tmp); break;
9ee6e8bb
PB
5348 default: return 1;
5349 }
5350 break;
5351 case 10: /* CNT */
5352 if (size != 0)
5353 return 1;
dd8fbd78 5354 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb
PB
5355 break;
5356 case 11: /* VNOT */
5357 if (size != 0)
5358 return 1;
dd8fbd78 5359 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5360 break;
5361 case 14: /* VQABS */
5362 switch (size) {
dd8fbd78
FN
5363 case 0: gen_helper_neon_qabs_s8(tmp, cpu_env, tmp); break;
5364 case 1: gen_helper_neon_qabs_s16(tmp, cpu_env, tmp); break;
5365 case 2: gen_helper_neon_qabs_s32(tmp, cpu_env, tmp); break;
9ee6e8bb
PB
5366 default: return 1;
5367 }
5368 break;
5369 case 15: /* VQNEG */
5370 switch (size) {
dd8fbd78
FN
5371 case 0: gen_helper_neon_qneg_s8(tmp, cpu_env, tmp); break;
5372 case 1: gen_helper_neon_qneg_s16(tmp, cpu_env, tmp); break;
5373 case 2: gen_helper_neon_qneg_s32(tmp, cpu_env, tmp); break;
9ee6e8bb
PB
5374 default: return 1;
5375 }
5376 break;
5377 case 16: case 19: /* VCGT #0, VCLE #0 */
dd8fbd78 5378 tmp2 = tcg_const_i32(0);
9ee6e8bb 5379 switch(size) {
dd8fbd78
FN
5380 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5381 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5382 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5383 default: return 1;
5384 }
dd8fbd78 5385 tcg_temp_free(tmp2);
9ee6e8bb 5386 if (op == 19)
dd8fbd78 5387 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5388 break;
5389 case 17: case 20: /* VCGE #0, VCLT #0 */
dd8fbd78 5390 tmp2 = tcg_const_i32(0);
9ee6e8bb 5391 switch(size) {
dd8fbd78
FN
5392 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5393 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5394 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5395 default: return 1;
5396 }
dd8fbd78 5397 tcg_temp_free(tmp2);
9ee6e8bb 5398 if (op == 20)
dd8fbd78 5399 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5400 break;
5401 case 18: /* VCEQ #0 */
dd8fbd78 5402 tmp2 = tcg_const_i32(0);
9ee6e8bb 5403 switch(size) {
dd8fbd78
FN
5404 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5405 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5406 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5407 default: return 1;
5408 }
dd8fbd78 5409 tcg_temp_free(tmp2);
9ee6e8bb
PB
5410 break;
5411 case 22: /* VABS */
5412 switch(size) {
dd8fbd78
FN
5413 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
5414 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
5415 case 2: tcg_gen_abs_i32(tmp, tmp); break;
9ee6e8bb
PB
5416 default: return 1;
5417 }
5418 break;
5419 case 23: /* VNEG */
ad69471c
PB
5420 if (size == 3)
5421 return 1;
dd8fbd78
FN
5422 tmp2 = tcg_const_i32(0);
5423 gen_neon_rsb(size, tmp, tmp2);
5424 tcg_temp_free(tmp2);
9ee6e8bb
PB
5425 break;
5426 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
dd8fbd78
FN
5427 tmp2 = tcg_const_i32(0);
5428 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
5429 tcg_temp_free(tmp2);
9ee6e8bb 5430 if (op == 27)
dd8fbd78 5431 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5432 break;
5433 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
dd8fbd78
FN
5434 tmp2 = tcg_const_i32(0);
5435 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
5436 tcg_temp_free(tmp2);
9ee6e8bb 5437 if (op == 28)
dd8fbd78 5438 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5439 break;
5440 case 26: /* Float VCEQ #0 */
dd8fbd78
FN
5441 tmp2 = tcg_const_i32(0);
5442 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
5443 tcg_temp_free(tmp2);
9ee6e8bb
PB
5444 break;
5445 case 30: /* Float VABS */
4373f3ce 5446 gen_vfp_abs(0);
9ee6e8bb
PB
5447 break;
5448 case 31: /* Float VNEG */
4373f3ce 5449 gen_vfp_neg(0);
9ee6e8bb
PB
5450 break;
5451 case 32: /* VSWP */
dd8fbd78
FN
5452 tmp2 = neon_load_reg(rd, pass);
5453 neon_store_reg(rm, pass, tmp2);
9ee6e8bb
PB
5454 break;
5455 case 33: /* VTRN */
dd8fbd78 5456 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5457 switch (size) {
dd8fbd78
FN
5458 case 0: gen_neon_trn_u8(tmp, tmp2); break;
5459 case 1: gen_neon_trn_u16(tmp, tmp2); break;
9ee6e8bb
PB
5460 case 2: abort();
5461 default: return 1;
5462 }
dd8fbd78 5463 neon_store_reg(rm, pass, tmp2);
9ee6e8bb
PB
5464 break;
5465 case 56: /* Integer VRECPE */
dd8fbd78 5466 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb
PB
5467 break;
5468 case 57: /* Integer VRSQRTE */
dd8fbd78 5469 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb
PB
5470 break;
5471 case 58: /* Float VRECPE */
4373f3ce 5472 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5473 break;
5474 case 59: /* Float VRSQRTE */
4373f3ce 5475 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5476 break;
5477 case 60: /* VCVT.F32.S32 */
4373f3ce 5478 gen_vfp_tosiz(0);
9ee6e8bb
PB
5479 break;
5480 case 61: /* VCVT.F32.U32 */
4373f3ce 5481 gen_vfp_touiz(0);
9ee6e8bb
PB
5482 break;
5483 case 62: /* VCVT.S32.F32 */
4373f3ce 5484 gen_vfp_sito(0);
9ee6e8bb
PB
5485 break;
5486 case 63: /* VCVT.U32.F32 */
4373f3ce 5487 gen_vfp_uito(0);
9ee6e8bb
PB
5488 break;
5489 default:
5490 /* Reserved: 21, 29, 39-56 */
5491 return 1;
5492 }
5493 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5494 tcg_gen_st_f32(cpu_F0s, cpu_env,
5495 neon_reg_offset(rd, pass));
9ee6e8bb 5496 } else {
dd8fbd78 5497 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5498 }
5499 }
5500 break;
5501 }
5502 } else if ((insn & (1 << 10)) == 0) {
5503 /* VTBL, VTBX. */
3018f259 5504 n = ((insn >> 5) & 0x18) + 8;
9ee6e8bb 5505 if (insn & (1 << 6)) {
8f8e3aa4 5506 tmp = neon_load_reg(rd, 0);
9ee6e8bb 5507 } else {
8f8e3aa4
PB
5508 tmp = new_tmp();
5509 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5510 }
8f8e3aa4
PB
5511 tmp2 = neon_load_reg(rm, 0);
5512 gen_helper_neon_tbl(tmp2, tmp2, tmp, tcg_const_i32(rn),
5513 tcg_const_i32(n));
3018f259 5514 dead_tmp(tmp);
9ee6e8bb 5515 if (insn & (1 << 6)) {
8f8e3aa4 5516 tmp = neon_load_reg(rd, 1);
9ee6e8bb 5517 } else {
8f8e3aa4
PB
5518 tmp = new_tmp();
5519 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5520 }
8f8e3aa4
PB
5521 tmp3 = neon_load_reg(rm, 1);
5522 gen_helper_neon_tbl(tmp3, tmp3, tmp, tcg_const_i32(rn),
5523 tcg_const_i32(n));
5524 neon_store_reg(rd, 0, tmp2);
3018f259
PB
5525 neon_store_reg(rd, 1, tmp3);
5526 dead_tmp(tmp);
9ee6e8bb
PB
5527 } else if ((insn & 0x380) == 0) {
5528 /* VDUP */
5529 if (insn & (1 << 19)) {
dd8fbd78 5530 tmp = neon_load_reg(rm, 1);
9ee6e8bb 5531 } else {
dd8fbd78 5532 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
5533 }
5534 if (insn & (1 << 16)) {
dd8fbd78 5535 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
5536 } else if (insn & (1 << 17)) {
5537 if ((insn >> 18) & 1)
dd8fbd78 5538 gen_neon_dup_high16(tmp);
9ee6e8bb 5539 else
dd8fbd78 5540 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
5541 }
5542 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5543 tmp2 = new_tmp();
5544 tcg_gen_mov_i32(tmp2, tmp);
5545 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 5546 }
dd8fbd78 5547 dead_tmp(tmp);
9ee6e8bb
PB
5548 } else {
5549 return 1;
5550 }
5551 }
5552 }
5553 return 0;
5554}
5555
fe1479c3
PB
5556static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5557{
5558 int crn = (insn >> 16) & 0xf;
5559 int crm = insn & 0xf;
5560 int op1 = (insn >> 21) & 7;
5561 int op2 = (insn >> 5) & 7;
5562 int rt = (insn >> 12) & 0xf;
5563 TCGv tmp;
5564
5565 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5566 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5567 /* TEECR */
5568 if (IS_USER(s))
5569 return 1;
5570 tmp = load_cpu_field(teecr);
5571 store_reg(s, rt, tmp);
5572 return 0;
5573 }
5574 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5575 /* TEEHBR */
5576 if (IS_USER(s) && (env->teecr & 1))
5577 return 1;
5578 tmp = load_cpu_field(teehbr);
5579 store_reg(s, rt, tmp);
5580 return 0;
5581 }
5582 }
5583 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5584 op1, crn, crm, op2);
5585 return 1;
5586}
5587
5588static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5589{
5590 int crn = (insn >> 16) & 0xf;
5591 int crm = insn & 0xf;
5592 int op1 = (insn >> 21) & 7;
5593 int op2 = (insn >> 5) & 7;
5594 int rt = (insn >> 12) & 0xf;
5595 TCGv tmp;
5596
5597 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5598 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5599 /* TEECR */
5600 if (IS_USER(s))
5601 return 1;
5602 tmp = load_reg(s, rt);
5603 gen_helper_set_teecr(cpu_env, tmp);
5604 dead_tmp(tmp);
5605 return 0;
5606 }
5607 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5608 /* TEEHBR */
5609 if (IS_USER(s) && (env->teecr & 1))
5610 return 1;
5611 tmp = load_reg(s, rt);
5612 store_cpu_field(tmp, teehbr);
5613 return 0;
5614 }
5615 }
5616 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5617 op1, crn, crm, op2);
5618 return 1;
5619}
5620
9ee6e8bb
PB
5621static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5622{
5623 int cpnum;
5624
5625 cpnum = (insn >> 8) & 0xf;
5626 if (arm_feature(env, ARM_FEATURE_XSCALE)
5627 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5628 return 1;
5629
5630 switch (cpnum) {
5631 case 0:
5632 case 1:
5633 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5634 return disas_iwmmxt_insn(env, s, insn);
5635 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5636 return disas_dsp_insn(env, s, insn);
5637 }
5638 return 1;
5639 case 10:
5640 case 11:
5641 return disas_vfp_insn (env, s, insn);
fe1479c3
PB
5642 case 14:
5643 /* Coprocessors 7-15 are architecturally reserved by ARM.
5644 Unfortunately Intel decided to ignore this. */
5645 if (arm_feature(env, ARM_FEATURE_XSCALE))
5646 goto board;
5647 if (insn & (1 << 20))
5648 return disas_cp14_read(env, s, insn);
5649 else
5650 return disas_cp14_write(env, s, insn);
9ee6e8bb
PB
5651 case 15:
5652 return disas_cp15_insn (env, s, insn);
5653 default:
fe1479c3 5654 board:
9ee6e8bb
PB
5655 /* Unknown coprocessor. See if the board has hooked it. */
5656 return disas_cp_insn (env, s, insn);
5657 }
5658}
5659
5e3f878a
PB
5660
5661/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 5662static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
5663{
5664 TCGv tmp;
5665 tmp = new_tmp();
5666 tcg_gen_trunc_i64_i32(tmp, val);
5667 store_reg(s, rlow, tmp);
5668 tmp = new_tmp();
5669 tcg_gen_shri_i64(val, val, 32);
5670 tcg_gen_trunc_i64_i32(tmp, val);
5671 store_reg(s, rhigh, tmp);
5672}
5673
5674/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 5675static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 5676{
a7812ae4 5677 TCGv_i64 tmp;
5e3f878a
PB
5678 TCGv tmp2;
5679
36aa55dc 5680 /* Load value and extend to 64 bits. */
a7812ae4 5681 tmp = tcg_temp_new_i64();
5e3f878a
PB
5682 tmp2 = load_reg(s, rlow);
5683 tcg_gen_extu_i32_i64(tmp, tmp2);
5684 dead_tmp(tmp2);
5685 tcg_gen_add_i64(val, val, tmp);
5686}
5687
5688/* load and add a 64-bit value from a register pair. */
a7812ae4 5689static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 5690{
a7812ae4 5691 TCGv_i64 tmp;
36aa55dc
PB
5692 TCGv tmpl;
5693 TCGv tmph;
5e3f878a
PB
5694
5695 /* Load 64-bit value rd:rn. */
36aa55dc
PB
5696 tmpl = load_reg(s, rlow);
5697 tmph = load_reg(s, rhigh);
a7812ae4 5698 tmp = tcg_temp_new_i64();
36aa55dc
PB
5699 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5700 dead_tmp(tmpl);
5701 dead_tmp(tmph);
5e3f878a
PB
5702 tcg_gen_add_i64(val, val, tmp);
5703}
5704
5705/* Set N and Z flags from a 64-bit value. */
a7812ae4 5706static void gen_logicq_cc(TCGv_i64 val)
5e3f878a
PB
5707{
5708 TCGv tmp = new_tmp();
5709 gen_helper_logicq_cc(tmp, val);
6fbe23d5
PB
5710 gen_logic_CC(tmp);
5711 dead_tmp(tmp);
5e3f878a
PB
5712}
5713
9ee6e8bb
PB
5714static void disas_arm_insn(CPUState * env, DisasContext *s)
5715{
5716 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 5717 TCGv tmp;
3670669c 5718 TCGv tmp2;
6ddbc6e4 5719 TCGv tmp3;
b0109805 5720 TCGv addr;
a7812ae4 5721 TCGv_i64 tmp64;
9ee6e8bb
PB
5722
5723 insn = ldl_code(s->pc);
5724 s->pc += 4;
5725
5726 /* M variants do not implement ARM mode. */
5727 if (IS_M(env))
5728 goto illegal_op;
5729 cond = insn >> 28;
5730 if (cond == 0xf){
5731 /* Unconditional instructions. */
5732 if (((insn >> 25) & 7) == 1) {
5733 /* NEON Data processing. */
5734 if (!arm_feature(env, ARM_FEATURE_NEON))
5735 goto illegal_op;
5736
5737 if (disas_neon_data_insn(env, s, insn))
5738 goto illegal_op;
5739 return;
5740 }
5741 if ((insn & 0x0f100000) == 0x04000000) {
5742 /* NEON load/store. */
5743 if (!arm_feature(env, ARM_FEATURE_NEON))
5744 goto illegal_op;
5745
5746 if (disas_neon_ls_insn(env, s, insn))
5747 goto illegal_op;
5748 return;
5749 }
5750 if ((insn & 0x0d70f000) == 0x0550f000)
5751 return; /* PLD */
5752 else if ((insn & 0x0ffffdff) == 0x01010000) {
5753 ARCH(6);
5754 /* setend */
5755 if (insn & (1 << 9)) {
5756 /* BE8 mode not implemented. */
5757 goto illegal_op;
5758 }
5759 return;
5760 } else if ((insn & 0x0fffff00) == 0x057ff000) {
5761 switch ((insn >> 4) & 0xf) {
5762 case 1: /* clrex */
5763 ARCH(6K);
8f8e3aa4 5764 gen_helper_clrex(cpu_env);
9ee6e8bb
PB
5765 return;
5766 case 4: /* dsb */
5767 case 5: /* dmb */
5768 case 6: /* isb */
5769 ARCH(7);
5770 /* We don't emulate caches so these are a no-op. */
5771 return;
5772 default:
5773 goto illegal_op;
5774 }
5775 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
5776 /* srs */
c67b6b71 5777 int32_t offset;
9ee6e8bb
PB
5778 if (IS_USER(s))
5779 goto illegal_op;
5780 ARCH(6);
5781 op1 = (insn & 0x1f);
5782 if (op1 == (env->uncached_cpsr & CPSR_M)) {
b0109805 5783 addr = load_reg(s, 13);
9ee6e8bb 5784 } else {
b0109805
PB
5785 addr = new_tmp();
5786 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op1));
9ee6e8bb
PB
5787 }
5788 i = (insn >> 23) & 3;
5789 switch (i) {
5790 case 0: offset = -4; break; /* DA */
c67b6b71
FN
5791 case 1: offset = 0; break; /* IA */
5792 case 2: offset = -8; break; /* DB */
9ee6e8bb
PB
5793 case 3: offset = 4; break; /* IB */
5794 default: abort();
5795 }
5796 if (offset)
b0109805
PB
5797 tcg_gen_addi_i32(addr, addr, offset);
5798 tmp = load_reg(s, 14);
5799 gen_st32(tmp, addr, 0);
c67b6b71 5800 tmp = load_cpu_field(spsr);
b0109805
PB
5801 tcg_gen_addi_i32(addr, addr, 4);
5802 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
5803 if (insn & (1 << 21)) {
5804 /* Base writeback. */
5805 switch (i) {
5806 case 0: offset = -8; break;
c67b6b71
FN
5807 case 1: offset = 4; break;
5808 case 2: offset = -4; break;
9ee6e8bb
PB
5809 case 3: offset = 0; break;
5810 default: abort();
5811 }
5812 if (offset)
c67b6b71 5813 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb 5814 if (op1 == (env->uncached_cpsr & CPSR_M)) {
c67b6b71 5815 store_reg(s, 13, addr);
9ee6e8bb 5816 } else {
c67b6b71
FN
5817 gen_helper_set_r13_banked(cpu_env, tcg_const_i32(op1), addr);
5818 dead_tmp(addr);
9ee6e8bb 5819 }
b0109805
PB
5820 } else {
5821 dead_tmp(addr);
9ee6e8bb
PB
5822 }
5823 } else if ((insn & 0x0e5fffe0) == 0x081d0a00) {
5824 /* rfe */
c67b6b71 5825 int32_t offset;
9ee6e8bb
PB
5826 if (IS_USER(s))
5827 goto illegal_op;
5828 ARCH(6);
5829 rn = (insn >> 16) & 0xf;
b0109805 5830 addr = load_reg(s, rn);
9ee6e8bb
PB
5831 i = (insn >> 23) & 3;
5832 switch (i) {
b0109805 5833 case 0: offset = -4; break; /* DA */
c67b6b71
FN
5834 case 1: offset = 0; break; /* IA */
5835 case 2: offset = -8; break; /* DB */
b0109805 5836 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
5837 default: abort();
5838 }
5839 if (offset)
b0109805
PB
5840 tcg_gen_addi_i32(addr, addr, offset);
5841 /* Load PC into tmp and CPSR into tmp2. */
5842 tmp = gen_ld32(addr, 0);
5843 tcg_gen_addi_i32(addr, addr, 4);
5844 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
5845 if (insn & (1 << 21)) {
5846 /* Base writeback. */
5847 switch (i) {
b0109805 5848 case 0: offset = -8; break;
c67b6b71
FN
5849 case 1: offset = 4; break;
5850 case 2: offset = -4; break;
b0109805 5851 case 3: offset = 0; break;
9ee6e8bb
PB
5852 default: abort();
5853 }
5854 if (offset)
b0109805
PB
5855 tcg_gen_addi_i32(addr, addr, offset);
5856 store_reg(s, rn, addr);
5857 } else {
5858 dead_tmp(addr);
9ee6e8bb 5859 }
b0109805 5860 gen_rfe(s, tmp, tmp2);
c67b6b71 5861 return;
9ee6e8bb
PB
5862 } else if ((insn & 0x0e000000) == 0x0a000000) {
5863 /* branch link and change to thumb (blx <offset>) */
5864 int32_t offset;
5865
5866 val = (uint32_t)s->pc;
d9ba4830
PB
5867 tmp = new_tmp();
5868 tcg_gen_movi_i32(tmp, val);
5869 store_reg(s, 14, tmp);
9ee6e8bb
PB
5870 /* Sign-extend the 24-bit offset */
5871 offset = (((int32_t)insn) << 8) >> 8;
5872 /* offset * 4 + bit24 * 2 + (thumb bit) */
5873 val += (offset << 2) | ((insn >> 23) & 2) | 1;
5874 /* pipeline offset */
5875 val += 4;
d9ba4830 5876 gen_bx_im(s, val);
9ee6e8bb
PB
5877 return;
5878 } else if ((insn & 0x0e000f00) == 0x0c000100) {
5879 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5880 /* iWMMXt register transfer. */
5881 if (env->cp15.c15_cpar & (1 << 1))
5882 if (!disas_iwmmxt_insn(env, s, insn))
5883 return;
5884 }
5885 } else if ((insn & 0x0fe00000) == 0x0c400000) {
5886 /* Coprocessor double register transfer. */
5887 } else if ((insn & 0x0f000010) == 0x0e000010) {
5888 /* Additional coprocessor register transfer. */
7997d92f 5889 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
5890 uint32_t mask;
5891 uint32_t val;
5892 /* cps (privileged) */
5893 if (IS_USER(s))
5894 return;
5895 mask = val = 0;
5896 if (insn & (1 << 19)) {
5897 if (insn & (1 << 8))
5898 mask |= CPSR_A;
5899 if (insn & (1 << 7))
5900 mask |= CPSR_I;
5901 if (insn & (1 << 6))
5902 mask |= CPSR_F;
5903 if (insn & (1 << 18))
5904 val |= mask;
5905 }
7997d92f 5906 if (insn & (1 << 17)) {
9ee6e8bb
PB
5907 mask |= CPSR_M;
5908 val |= (insn & 0x1f);
5909 }
5910 if (mask) {
2fbac54b 5911 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
5912 }
5913 return;
5914 }
5915 goto illegal_op;
5916 }
5917 if (cond != 0xe) {
5918 /* if not always execute, we generate a conditional jump to
5919 next instruction */
5920 s->condlabel = gen_new_label();
d9ba4830 5921 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
5922 s->condjmp = 1;
5923 }
5924 if ((insn & 0x0f900000) == 0x03000000) {
5925 if ((insn & (1 << 21)) == 0) {
5926 ARCH(6T2);
5927 rd = (insn >> 12) & 0xf;
5928 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
5929 if ((insn & (1 << 22)) == 0) {
5930 /* MOVW */
5e3f878a
PB
5931 tmp = new_tmp();
5932 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
5933 } else {
5934 /* MOVT */
5e3f878a 5935 tmp = load_reg(s, rd);
86831435 5936 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 5937 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 5938 }
5e3f878a 5939 store_reg(s, rd, tmp);
9ee6e8bb
PB
5940 } else {
5941 if (((insn >> 12) & 0xf) != 0xf)
5942 goto illegal_op;
5943 if (((insn >> 16) & 0xf) == 0) {
5944 gen_nop_hint(s, insn & 0xff);
5945 } else {
5946 /* CPSR = immediate */
5947 val = insn & 0xff;
5948 shift = ((insn >> 8) & 0xf) * 2;
5949 if (shift)
5950 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 5951 i = ((insn & (1 << 22)) != 0);
2fbac54b 5952 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
5953 goto illegal_op;
5954 }
5955 }
5956 } else if ((insn & 0x0f900000) == 0x01000000
5957 && (insn & 0x00000090) != 0x00000090) {
5958 /* miscellaneous instructions */
5959 op1 = (insn >> 21) & 3;
5960 sh = (insn >> 4) & 0xf;
5961 rm = insn & 0xf;
5962 switch (sh) {
5963 case 0x0: /* move program status register */
5964 if (op1 & 1) {
5965 /* PSR = reg */
2fbac54b 5966 tmp = load_reg(s, rm);
9ee6e8bb 5967 i = ((op1 & 2) != 0);
2fbac54b 5968 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
5969 goto illegal_op;
5970 } else {
5971 /* reg = PSR */
5972 rd = (insn >> 12) & 0xf;
5973 if (op1 & 2) {
5974 if (IS_USER(s))
5975 goto illegal_op;
d9ba4830 5976 tmp = load_cpu_field(spsr);
9ee6e8bb 5977 } else {
d9ba4830
PB
5978 tmp = new_tmp();
5979 gen_helper_cpsr_read(tmp);
9ee6e8bb 5980 }
d9ba4830 5981 store_reg(s, rd, tmp);
9ee6e8bb
PB
5982 }
5983 break;
5984 case 0x1:
5985 if (op1 == 1) {
5986 /* branch/exchange thumb (bx). */
d9ba4830
PB
5987 tmp = load_reg(s, rm);
5988 gen_bx(s, tmp);
9ee6e8bb
PB
5989 } else if (op1 == 3) {
5990 /* clz */
5991 rd = (insn >> 12) & 0xf;
1497c961
PB
5992 tmp = load_reg(s, rm);
5993 gen_helper_clz(tmp, tmp);
5994 store_reg(s, rd, tmp);
9ee6e8bb
PB
5995 } else {
5996 goto illegal_op;
5997 }
5998 break;
5999 case 0x2:
6000 if (op1 == 1) {
6001 ARCH(5J); /* bxj */
6002 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6003 tmp = load_reg(s, rm);
6004 gen_bx(s, tmp);
9ee6e8bb
PB
6005 } else {
6006 goto illegal_op;
6007 }
6008 break;
6009 case 0x3:
6010 if (op1 != 1)
6011 goto illegal_op;
6012
6013 /* branch link/exchange thumb (blx) */
d9ba4830
PB
6014 tmp = load_reg(s, rm);
6015 tmp2 = new_tmp();
6016 tcg_gen_movi_i32(tmp2, s->pc);
6017 store_reg(s, 14, tmp2);
6018 gen_bx(s, tmp);
9ee6e8bb
PB
6019 break;
6020 case 0x5: /* saturating add/subtract */
6021 rd = (insn >> 12) & 0xf;
6022 rn = (insn >> 16) & 0xf;
b40d0353 6023 tmp = load_reg(s, rm);
5e3f878a 6024 tmp2 = load_reg(s, rn);
9ee6e8bb 6025 if (op1 & 2)
5e3f878a 6026 gen_helper_double_saturate(tmp2, tmp2);
9ee6e8bb 6027 if (op1 & 1)
5e3f878a 6028 gen_helper_sub_saturate(tmp, tmp, tmp2);
9ee6e8bb 6029 else
5e3f878a
PB
6030 gen_helper_add_saturate(tmp, tmp, tmp2);
6031 dead_tmp(tmp2);
6032 store_reg(s, rd, tmp);
9ee6e8bb
PB
6033 break;
6034 case 7: /* bkpt */
6035 gen_set_condexec(s);
5e3f878a 6036 gen_set_pc_im(s->pc - 4);
d9ba4830 6037 gen_exception(EXCP_BKPT);
9ee6e8bb
PB
6038 s->is_jmp = DISAS_JUMP;
6039 break;
6040 case 0x8: /* signed multiply */
6041 case 0xa:
6042 case 0xc:
6043 case 0xe:
6044 rs = (insn >> 8) & 0xf;
6045 rn = (insn >> 12) & 0xf;
6046 rd = (insn >> 16) & 0xf;
6047 if (op1 == 1) {
6048 /* (32 * 16) >> 16 */
5e3f878a
PB
6049 tmp = load_reg(s, rm);
6050 tmp2 = load_reg(s, rs);
9ee6e8bb 6051 if (sh & 4)
5e3f878a 6052 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 6053 else
5e3f878a 6054 gen_sxth(tmp2);
a7812ae4
PB
6055 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6056 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 6057 tmp = new_tmp();
a7812ae4 6058 tcg_gen_trunc_i64_i32(tmp, tmp64);
9ee6e8bb 6059 if ((sh & 2) == 0) {
5e3f878a
PB
6060 tmp2 = load_reg(s, rn);
6061 gen_helper_add_setq(tmp, tmp, tmp2);
6062 dead_tmp(tmp2);
9ee6e8bb 6063 }
5e3f878a 6064 store_reg(s, rd, tmp);
9ee6e8bb
PB
6065 } else {
6066 /* 16 * 16 */
5e3f878a
PB
6067 tmp = load_reg(s, rm);
6068 tmp2 = load_reg(s, rs);
6069 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6070 dead_tmp(tmp2);
9ee6e8bb 6071 if (op1 == 2) {
a7812ae4
PB
6072 tmp64 = tcg_temp_new_i64();
6073 tcg_gen_ext_i32_i64(tmp64, tmp);
22478e79 6074 dead_tmp(tmp);
a7812ae4
PB
6075 gen_addq(s, tmp64, rn, rd);
6076 gen_storeq_reg(s, rn, rd, tmp64);
9ee6e8bb
PB
6077 } else {
6078 if (op1 == 0) {
5e3f878a
PB
6079 tmp2 = load_reg(s, rn);
6080 gen_helper_add_setq(tmp, tmp, tmp2);
6081 dead_tmp(tmp2);
9ee6e8bb 6082 }
5e3f878a 6083 store_reg(s, rd, tmp);
9ee6e8bb
PB
6084 }
6085 }
6086 break;
6087 default:
6088 goto illegal_op;
6089 }
6090 } else if (((insn & 0x0e000000) == 0 &&
6091 (insn & 0x00000090) != 0x90) ||
6092 ((insn & 0x0e000000) == (1 << 25))) {
6093 int set_cc, logic_cc, shiftop;
6094
6095 op1 = (insn >> 21) & 0xf;
6096 set_cc = (insn >> 20) & 1;
6097 logic_cc = table_logic_cc[op1] & set_cc;
6098
6099 /* data processing instruction */
6100 if (insn & (1 << 25)) {
6101 /* immediate operand */
6102 val = insn & 0xff;
6103 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 6104 if (shift) {
9ee6e8bb 6105 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9
JR
6106 }
6107 tmp2 = new_tmp();
6108 tcg_gen_movi_i32(tmp2, val);
6109 if (logic_cc && shift) {
6110 gen_set_CF_bit31(tmp2);
6111 }
9ee6e8bb
PB
6112 } else {
6113 /* register */
6114 rm = (insn) & 0xf;
e9bb4aa9 6115 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6116 shiftop = (insn >> 5) & 3;
6117 if (!(insn & (1 << 4))) {
6118 shift = (insn >> 7) & 0x1f;
e9bb4aa9 6119 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
6120 } else {
6121 rs = (insn >> 8) & 0xf;
8984bd2e 6122 tmp = load_reg(s, rs);
e9bb4aa9 6123 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
6124 }
6125 }
6126 if (op1 != 0x0f && op1 != 0x0d) {
6127 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
6128 tmp = load_reg(s, rn);
6129 } else {
6130 TCGV_UNUSED(tmp);
9ee6e8bb
PB
6131 }
6132 rd = (insn >> 12) & 0xf;
6133 switch(op1) {
6134 case 0x00:
e9bb4aa9
JR
6135 tcg_gen_and_i32(tmp, tmp, tmp2);
6136 if (logic_cc) {
6137 gen_logic_CC(tmp);
6138 }
21aeb343 6139 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6140 break;
6141 case 0x01:
e9bb4aa9
JR
6142 tcg_gen_xor_i32(tmp, tmp, tmp2);
6143 if (logic_cc) {
6144 gen_logic_CC(tmp);
6145 }
21aeb343 6146 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6147 break;
6148 case 0x02:
6149 if (set_cc && rd == 15) {
6150 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 6151 if (IS_USER(s)) {
9ee6e8bb 6152 goto illegal_op;
e9bb4aa9
JR
6153 }
6154 gen_helper_sub_cc(tmp, tmp, tmp2);
6155 gen_exception_return(s, tmp);
9ee6e8bb 6156 } else {
e9bb4aa9
JR
6157 if (set_cc) {
6158 gen_helper_sub_cc(tmp, tmp, tmp2);
6159 } else {
6160 tcg_gen_sub_i32(tmp, tmp, tmp2);
6161 }
21aeb343 6162 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6163 }
6164 break;
6165 case 0x03:
e9bb4aa9
JR
6166 if (set_cc) {
6167 gen_helper_sub_cc(tmp, tmp2, tmp);
6168 } else {
6169 tcg_gen_sub_i32(tmp, tmp2, tmp);
6170 }
21aeb343 6171 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6172 break;
6173 case 0x04:
e9bb4aa9
JR
6174 if (set_cc) {
6175 gen_helper_add_cc(tmp, tmp, tmp2);
6176 } else {
6177 tcg_gen_add_i32(tmp, tmp, tmp2);
6178 }
21aeb343 6179 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6180 break;
6181 case 0x05:
e9bb4aa9
JR
6182 if (set_cc) {
6183 gen_helper_adc_cc(tmp, tmp, tmp2);
6184 } else {
6185 gen_add_carry(tmp, tmp, tmp2);
6186 }
21aeb343 6187 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6188 break;
6189 case 0x06:
e9bb4aa9
JR
6190 if (set_cc) {
6191 gen_helper_sbc_cc(tmp, tmp, tmp2);
6192 } else {
6193 gen_sub_carry(tmp, tmp, tmp2);
6194 }
21aeb343 6195 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6196 break;
6197 case 0x07:
e9bb4aa9
JR
6198 if (set_cc) {
6199 gen_helper_sbc_cc(tmp, tmp2, tmp);
6200 } else {
6201 gen_sub_carry(tmp, tmp2, tmp);
6202 }
21aeb343 6203 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6204 break;
6205 case 0x08:
6206 if (set_cc) {
e9bb4aa9
JR
6207 tcg_gen_and_i32(tmp, tmp, tmp2);
6208 gen_logic_CC(tmp);
9ee6e8bb 6209 }
e9bb4aa9 6210 dead_tmp(tmp);
9ee6e8bb
PB
6211 break;
6212 case 0x09:
6213 if (set_cc) {
e9bb4aa9
JR
6214 tcg_gen_xor_i32(tmp, tmp, tmp2);
6215 gen_logic_CC(tmp);
9ee6e8bb 6216 }
e9bb4aa9 6217 dead_tmp(tmp);
9ee6e8bb
PB
6218 break;
6219 case 0x0a:
6220 if (set_cc) {
e9bb4aa9 6221 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb 6222 }
e9bb4aa9 6223 dead_tmp(tmp);
9ee6e8bb
PB
6224 break;
6225 case 0x0b:
6226 if (set_cc) {
e9bb4aa9 6227 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 6228 }
e9bb4aa9 6229 dead_tmp(tmp);
9ee6e8bb
PB
6230 break;
6231 case 0x0c:
e9bb4aa9
JR
6232 tcg_gen_or_i32(tmp, tmp, tmp2);
6233 if (logic_cc) {
6234 gen_logic_CC(tmp);
6235 }
21aeb343 6236 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6237 break;
6238 case 0x0d:
6239 if (logic_cc && rd == 15) {
6240 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 6241 if (IS_USER(s)) {
9ee6e8bb 6242 goto illegal_op;
e9bb4aa9
JR
6243 }
6244 gen_exception_return(s, tmp2);
9ee6e8bb 6245 } else {
e9bb4aa9
JR
6246 if (logic_cc) {
6247 gen_logic_CC(tmp2);
6248 }
21aeb343 6249 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6250 }
6251 break;
6252 case 0x0e:
e9bb4aa9
JR
6253 tcg_gen_bic_i32(tmp, tmp, tmp2);
6254 if (logic_cc) {
6255 gen_logic_CC(tmp);
6256 }
21aeb343 6257 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6258 break;
6259 default:
6260 case 0x0f:
e9bb4aa9
JR
6261 tcg_gen_not_i32(tmp2, tmp2);
6262 if (logic_cc) {
6263 gen_logic_CC(tmp2);
6264 }
21aeb343 6265 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6266 break;
6267 }
e9bb4aa9
JR
6268 if (op1 != 0x0f && op1 != 0x0d) {
6269 dead_tmp(tmp2);
6270 }
9ee6e8bb
PB
6271 } else {
6272 /* other instructions */
6273 op1 = (insn >> 24) & 0xf;
6274 switch(op1) {
6275 case 0x0:
6276 case 0x1:
6277 /* multiplies, extra load/stores */
6278 sh = (insn >> 5) & 3;
6279 if (sh == 0) {
6280 if (op1 == 0x0) {
6281 rd = (insn >> 16) & 0xf;
6282 rn = (insn >> 12) & 0xf;
6283 rs = (insn >> 8) & 0xf;
6284 rm = (insn) & 0xf;
6285 op1 = (insn >> 20) & 0xf;
6286 switch (op1) {
6287 case 0: case 1: case 2: case 3: case 6:
6288 /* 32 bit mul */
5e3f878a
PB
6289 tmp = load_reg(s, rs);
6290 tmp2 = load_reg(s, rm);
6291 tcg_gen_mul_i32(tmp, tmp, tmp2);
6292 dead_tmp(tmp2);
9ee6e8bb
PB
6293 if (insn & (1 << 22)) {
6294 /* Subtract (mls) */
6295 ARCH(6T2);
5e3f878a
PB
6296 tmp2 = load_reg(s, rn);
6297 tcg_gen_sub_i32(tmp, tmp2, tmp);
6298 dead_tmp(tmp2);
9ee6e8bb
PB
6299 } else if (insn & (1 << 21)) {
6300 /* Add */
5e3f878a
PB
6301 tmp2 = load_reg(s, rn);
6302 tcg_gen_add_i32(tmp, tmp, tmp2);
6303 dead_tmp(tmp2);
9ee6e8bb
PB
6304 }
6305 if (insn & (1 << 20))
5e3f878a
PB
6306 gen_logic_CC(tmp);
6307 store_reg(s, rd, tmp);
9ee6e8bb
PB
6308 break;
6309 default:
6310 /* 64 bit mul */
5e3f878a
PB
6311 tmp = load_reg(s, rs);
6312 tmp2 = load_reg(s, rm);
9ee6e8bb 6313 if (insn & (1 << 22))
a7812ae4 6314 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6315 else
a7812ae4 6316 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
9ee6e8bb 6317 if (insn & (1 << 21)) /* mult accumulate */
a7812ae4 6318 gen_addq(s, tmp64, rn, rd);
9ee6e8bb
PB
6319 if (!(insn & (1 << 23))) { /* double accumulate */
6320 ARCH(6);
a7812ae4
PB
6321 gen_addq_lo(s, tmp64, rn);
6322 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
6323 }
6324 if (insn & (1 << 20))
a7812ae4
PB
6325 gen_logicq_cc(tmp64);
6326 gen_storeq_reg(s, rn, rd, tmp64);
9ee6e8bb
PB
6327 break;
6328 }
6329 } else {
6330 rn = (insn >> 16) & 0xf;
6331 rd = (insn >> 12) & 0xf;
6332 if (insn & (1 << 23)) {
6333 /* load/store exclusive */
86753403
PB
6334 op1 = (insn >> 21) & 0x3;
6335 if (op1)
a47f43d2 6336 ARCH(6K);
86753403
PB
6337 else
6338 ARCH(6);
3174f8e9
FN
6339 addr = tcg_temp_local_new_i32();
6340 tcg_gen_mov_i32(addr, cpu_R[rn]);
9ee6e8bb 6341 if (insn & (1 << 20)) {
3174f8e9 6342 gen_helper_mark_exclusive(cpu_env, addr);
86753403
PB
6343 switch (op1) {
6344 case 0: /* ldrex */
6345 tmp = gen_ld32(addr, IS_USER(s));
6346 break;
6347 case 1: /* ldrexd */
6348 tmp = gen_ld32(addr, IS_USER(s));
6349 store_reg(s, rd, tmp);
6350 tcg_gen_addi_i32(addr, addr, 4);
6351 tmp = gen_ld32(addr, IS_USER(s));
6352 rd++;
6353 break;
6354 case 2: /* ldrexb */
6355 tmp = gen_ld8u(addr, IS_USER(s));
6356 break;
6357 case 3: /* ldrexh */
6358 tmp = gen_ld16u(addr, IS_USER(s));
6359 break;
6360 default:
6361 abort();
6362 }
8f8e3aa4 6363 store_reg(s, rd, tmp);
9ee6e8bb 6364 } else {
8f8e3aa4 6365 int label = gen_new_label();
9ee6e8bb 6366 rm = insn & 0xf;
3174f8e9
FN
6367 tmp2 = tcg_temp_local_new_i32();
6368 gen_helper_test_exclusive(tmp2, cpu_env, addr);
6369 tcg_gen_brcondi_i32(TCG_COND_NE, tmp2, 0, label);
8f8e3aa4 6370 tmp = load_reg(s,rm);
86753403
PB
6371 switch (op1) {
6372 case 0: /* strex */
6373 gen_st32(tmp, addr, IS_USER(s));
6374 break;
6375 case 1: /* strexd */
6376 gen_st32(tmp, addr, IS_USER(s));
6377 tcg_gen_addi_i32(addr, addr, 4);
6378 tmp = load_reg(s, rm + 1);
6379 gen_st32(tmp, addr, IS_USER(s));
6380 break;
6381 case 2: /* strexb */
6382 gen_st8(tmp, addr, IS_USER(s));
6383 break;
6384 case 3: /* strexh */
6385 gen_st16(tmp, addr, IS_USER(s));
6386 break;
6387 default:
6388 abort();
6389 }
2637a3be 6390 gen_set_label(label);
3174f8e9
FN
6391 tcg_gen_mov_i32(cpu_R[rd], tmp2);
6392 tcg_temp_free(tmp2);
9ee6e8bb 6393 }
3174f8e9 6394 tcg_temp_free(addr);
9ee6e8bb
PB
6395 } else {
6396 /* SWP instruction */
6397 rm = (insn) & 0xf;
6398
8984bd2e
PB
6399 /* ??? This is not really atomic. However we know
6400 we never have multiple CPUs running in parallel,
6401 so it is good enough. */
6402 addr = load_reg(s, rn);
6403 tmp = load_reg(s, rm);
9ee6e8bb 6404 if (insn & (1 << 22)) {
8984bd2e
PB
6405 tmp2 = gen_ld8u(addr, IS_USER(s));
6406 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 6407 } else {
8984bd2e
PB
6408 tmp2 = gen_ld32(addr, IS_USER(s));
6409 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 6410 }
8984bd2e
PB
6411 dead_tmp(addr);
6412 store_reg(s, rd, tmp2);
9ee6e8bb
PB
6413 }
6414 }
6415 } else {
6416 int address_offset;
6417 int load;
6418 /* Misc load/store */
6419 rn = (insn >> 16) & 0xf;
6420 rd = (insn >> 12) & 0xf;
b0109805 6421 addr = load_reg(s, rn);
9ee6e8bb 6422 if (insn & (1 << 24))
b0109805 6423 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
6424 address_offset = 0;
6425 if (insn & (1 << 20)) {
6426 /* load */
6427 switch(sh) {
6428 case 1:
b0109805 6429 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
6430 break;
6431 case 2:
b0109805 6432 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
6433 break;
6434 default:
6435 case 3:
b0109805 6436 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
6437 break;
6438 }
6439 load = 1;
6440 } else if (sh & 2) {
6441 /* doubleword */
6442 if (sh & 1) {
6443 /* store */
b0109805
PB
6444 tmp = load_reg(s, rd);
6445 gen_st32(tmp, addr, IS_USER(s));
6446 tcg_gen_addi_i32(addr, addr, 4);
6447 tmp = load_reg(s, rd + 1);
6448 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6449 load = 0;
6450 } else {
6451 /* load */
b0109805
PB
6452 tmp = gen_ld32(addr, IS_USER(s));
6453 store_reg(s, rd, tmp);
6454 tcg_gen_addi_i32(addr, addr, 4);
6455 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
6456 rd++;
6457 load = 1;
6458 }
6459 address_offset = -4;
6460 } else {
6461 /* store */
b0109805
PB
6462 tmp = load_reg(s, rd);
6463 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6464 load = 0;
6465 }
6466 /* Perform base writeback before the loaded value to
6467 ensure correct behavior with overlapping index registers.
6468 ldrd with base writeback is is undefined if the
6469 destination and index registers overlap. */
6470 if (!(insn & (1 << 24))) {
b0109805
PB
6471 gen_add_datah_offset(s, insn, address_offset, addr);
6472 store_reg(s, rn, addr);
9ee6e8bb
PB
6473 } else if (insn & (1 << 21)) {
6474 if (address_offset)
b0109805
PB
6475 tcg_gen_addi_i32(addr, addr, address_offset);
6476 store_reg(s, rn, addr);
6477 } else {
6478 dead_tmp(addr);
9ee6e8bb
PB
6479 }
6480 if (load) {
6481 /* Complete the load. */
b0109805 6482 store_reg(s, rd, tmp);
9ee6e8bb
PB
6483 }
6484 }
6485 break;
6486 case 0x4:
6487 case 0x5:
6488 goto do_ldst;
6489 case 0x6:
6490 case 0x7:
6491 if (insn & (1 << 4)) {
6492 ARCH(6);
6493 /* Armv6 Media instructions. */
6494 rm = insn & 0xf;
6495 rn = (insn >> 16) & 0xf;
2c0262af 6496 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
6497 rs = (insn >> 8) & 0xf;
6498 switch ((insn >> 23) & 3) {
6499 case 0: /* Parallel add/subtract. */
6500 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
6501 tmp = load_reg(s, rn);
6502 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6503 sh = (insn >> 5) & 7;
6504 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6505 goto illegal_op;
6ddbc6e4
PB
6506 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6507 dead_tmp(tmp2);
6508 store_reg(s, rd, tmp);
9ee6e8bb
PB
6509 break;
6510 case 1:
6511 if ((insn & 0x00700020) == 0) {
6c95676b 6512 /* Halfword pack. */
3670669c
PB
6513 tmp = load_reg(s, rn);
6514 tmp2 = load_reg(s, rm);
9ee6e8bb 6515 shift = (insn >> 7) & 0x1f;
3670669c
PB
6516 if (insn & (1 << 6)) {
6517 /* pkhtb */
22478e79
AZ
6518 if (shift == 0)
6519 shift = 31;
6520 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 6521 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 6522 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
6523 } else {
6524 /* pkhbt */
22478e79
AZ
6525 if (shift)
6526 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 6527 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
6528 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6529 }
6530 tcg_gen_or_i32(tmp, tmp, tmp2);
22478e79 6531 dead_tmp(tmp2);
3670669c 6532 store_reg(s, rd, tmp);
9ee6e8bb
PB
6533 } else if ((insn & 0x00200020) == 0x00200000) {
6534 /* [us]sat */
6ddbc6e4 6535 tmp = load_reg(s, rm);
9ee6e8bb
PB
6536 shift = (insn >> 7) & 0x1f;
6537 if (insn & (1 << 6)) {
6538 if (shift == 0)
6539 shift = 31;
6ddbc6e4 6540 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 6541 } else {
6ddbc6e4 6542 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
6543 }
6544 sh = (insn >> 16) & 0x1f;
6545 if (sh != 0) {
6546 if (insn & (1 << 22))
6ddbc6e4 6547 gen_helper_usat(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6548 else
6ddbc6e4 6549 gen_helper_ssat(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6550 }
6ddbc6e4 6551 store_reg(s, rd, tmp);
9ee6e8bb
PB
6552 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6553 /* [us]sat16 */
6ddbc6e4 6554 tmp = load_reg(s, rm);
9ee6e8bb
PB
6555 sh = (insn >> 16) & 0x1f;
6556 if (sh != 0) {
6557 if (insn & (1 << 22))
6ddbc6e4 6558 gen_helper_usat16(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6559 else
6ddbc6e4 6560 gen_helper_ssat16(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6561 }
6ddbc6e4 6562 store_reg(s, rd, tmp);
9ee6e8bb
PB
6563 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6564 /* Select bytes. */
6ddbc6e4
PB
6565 tmp = load_reg(s, rn);
6566 tmp2 = load_reg(s, rm);
6567 tmp3 = new_tmp();
6568 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6569 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6570 dead_tmp(tmp3);
6571 dead_tmp(tmp2);
6572 store_reg(s, rd, tmp);
9ee6e8bb 6573 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 6574 tmp = load_reg(s, rm);
9ee6e8bb
PB
6575 shift = (insn >> 10) & 3;
6576 /* ??? In many cases it's not neccessary to do a
6577 rotate, a shift is sufficient. */
6578 if (shift != 0)
5e3f878a 6579 tcg_gen_rori_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
6580 op1 = (insn >> 20) & 7;
6581 switch (op1) {
5e3f878a
PB
6582 case 0: gen_sxtb16(tmp); break;
6583 case 2: gen_sxtb(tmp); break;
6584 case 3: gen_sxth(tmp); break;
6585 case 4: gen_uxtb16(tmp); break;
6586 case 6: gen_uxtb(tmp); break;
6587 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
6588 default: goto illegal_op;
6589 }
6590 if (rn != 15) {
5e3f878a 6591 tmp2 = load_reg(s, rn);
9ee6e8bb 6592 if ((op1 & 3) == 0) {
5e3f878a 6593 gen_add16(tmp, tmp2);
9ee6e8bb 6594 } else {
5e3f878a
PB
6595 tcg_gen_add_i32(tmp, tmp, tmp2);
6596 dead_tmp(tmp2);
9ee6e8bb
PB
6597 }
6598 }
6c95676b 6599 store_reg(s, rd, tmp);
9ee6e8bb
PB
6600 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6601 /* rev */
b0109805 6602 tmp = load_reg(s, rm);
9ee6e8bb
PB
6603 if (insn & (1 << 22)) {
6604 if (insn & (1 << 7)) {
b0109805 6605 gen_revsh(tmp);
9ee6e8bb
PB
6606 } else {
6607 ARCH(6T2);
b0109805 6608 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
6609 }
6610 } else {
6611 if (insn & (1 << 7))
b0109805 6612 gen_rev16(tmp);
9ee6e8bb 6613 else
66896cb8 6614 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 6615 }
b0109805 6616 store_reg(s, rd, tmp);
9ee6e8bb
PB
6617 } else {
6618 goto illegal_op;
6619 }
6620 break;
6621 case 2: /* Multiplies (Type 3). */
5e3f878a
PB
6622 tmp = load_reg(s, rm);
6623 tmp2 = load_reg(s, rs);
9ee6e8bb
PB
6624 if (insn & (1 << 20)) {
6625 /* Signed multiply most significant [accumulate]. */
a7812ae4 6626 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6627 if (insn & (1 << 5))
a7812ae4
PB
6628 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
6629 tcg_gen_shri_i64(tmp64, tmp64, 32);
5e3f878a 6630 tmp = new_tmp();
a7812ae4 6631 tcg_gen_trunc_i64_i32(tmp, tmp64);
955a7dd5
AZ
6632 if (rd != 15) {
6633 tmp2 = load_reg(s, rd);
9ee6e8bb 6634 if (insn & (1 << 6)) {
5e3f878a 6635 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 6636 } else {
5e3f878a 6637 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 6638 }
5e3f878a 6639 dead_tmp(tmp2);
9ee6e8bb 6640 }
955a7dd5 6641 store_reg(s, rn, tmp);
9ee6e8bb
PB
6642 } else {
6643 if (insn & (1 << 5))
5e3f878a
PB
6644 gen_swap_half(tmp2);
6645 gen_smul_dual(tmp, tmp2);
6646 /* This addition cannot overflow. */
6647 if (insn & (1 << 6)) {
6648 tcg_gen_sub_i32(tmp, tmp, tmp2);
6649 } else {
6650 tcg_gen_add_i32(tmp, tmp, tmp2);
6651 }
6652 dead_tmp(tmp2);
9ee6e8bb 6653 if (insn & (1 << 22)) {
5e3f878a 6654 /* smlald, smlsld */
a7812ae4
PB
6655 tmp64 = tcg_temp_new_i64();
6656 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 6657 dead_tmp(tmp);
a7812ae4
PB
6658 gen_addq(s, tmp64, rd, rn);
6659 gen_storeq_reg(s, rd, rn, tmp64);
9ee6e8bb 6660 } else {
5e3f878a 6661 /* smuad, smusd, smlad, smlsd */
22478e79 6662 if (rd != 15)
9ee6e8bb 6663 {
22478e79 6664 tmp2 = load_reg(s, rd);
5e3f878a
PB
6665 gen_helper_add_setq(tmp, tmp, tmp2);
6666 dead_tmp(tmp2);
9ee6e8bb 6667 }
22478e79 6668 store_reg(s, rn, tmp);
9ee6e8bb
PB
6669 }
6670 }
6671 break;
6672 case 3:
6673 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
6674 switch (op1) {
6675 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
6676 ARCH(6);
6677 tmp = load_reg(s, rm);
6678 tmp2 = load_reg(s, rs);
6679 gen_helper_usad8(tmp, tmp, tmp2);
6680 dead_tmp(tmp2);
ded9d295
AZ
6681 if (rd != 15) {
6682 tmp2 = load_reg(s, rd);
6ddbc6e4
PB
6683 tcg_gen_add_i32(tmp, tmp, tmp2);
6684 dead_tmp(tmp2);
9ee6e8bb 6685 }
ded9d295 6686 store_reg(s, rn, tmp);
9ee6e8bb
PB
6687 break;
6688 case 0x20: case 0x24: case 0x28: case 0x2c:
6689 /* Bitfield insert/clear. */
6690 ARCH(6T2);
6691 shift = (insn >> 7) & 0x1f;
6692 i = (insn >> 16) & 0x1f;
6693 i = i + 1 - shift;
6694 if (rm == 15) {
5e3f878a
PB
6695 tmp = new_tmp();
6696 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6697 } else {
5e3f878a 6698 tmp = load_reg(s, rm);
9ee6e8bb
PB
6699 }
6700 if (i != 32) {
5e3f878a 6701 tmp2 = load_reg(s, rd);
8f8e3aa4 6702 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
5e3f878a 6703 dead_tmp(tmp2);
9ee6e8bb 6704 }
5e3f878a 6705 store_reg(s, rd, tmp);
9ee6e8bb
PB
6706 break;
6707 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
6708 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 6709 ARCH(6T2);
5e3f878a 6710 tmp = load_reg(s, rm);
9ee6e8bb
PB
6711 shift = (insn >> 7) & 0x1f;
6712 i = ((insn >> 16) & 0x1f) + 1;
6713 if (shift + i > 32)
6714 goto illegal_op;
6715 if (i < 32) {
6716 if (op1 & 0x20) {
5e3f878a 6717 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 6718 } else {
5e3f878a 6719 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
6720 }
6721 }
5e3f878a 6722 store_reg(s, rd, tmp);
9ee6e8bb
PB
6723 break;
6724 default:
6725 goto illegal_op;
6726 }
6727 break;
6728 }
6729 break;
6730 }
6731 do_ldst:
6732 /* Check for undefined extension instructions
6733 * per the ARM Bible IE:
6734 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
6735 */
6736 sh = (0xf << 20) | (0xf << 4);
6737 if (op1 == 0x7 && ((insn & sh) == sh))
6738 {
6739 goto illegal_op;
6740 }
6741 /* load/store byte/word */
6742 rn = (insn >> 16) & 0xf;
6743 rd = (insn >> 12) & 0xf;
b0109805 6744 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
6745 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
6746 if (insn & (1 << 24))
b0109805 6747 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
6748 if (insn & (1 << 20)) {
6749 /* load */
9ee6e8bb 6750 if (insn & (1 << 22)) {
b0109805 6751 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 6752 } else {
b0109805 6753 tmp = gen_ld32(tmp2, i);
9ee6e8bb 6754 }
9ee6e8bb
PB
6755 } else {
6756 /* store */
b0109805 6757 tmp = load_reg(s, rd);
9ee6e8bb 6758 if (insn & (1 << 22))
b0109805 6759 gen_st8(tmp, tmp2, i);
9ee6e8bb 6760 else
b0109805 6761 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
6762 }
6763 if (!(insn & (1 << 24))) {
b0109805
PB
6764 gen_add_data_offset(s, insn, tmp2);
6765 store_reg(s, rn, tmp2);
6766 } else if (insn & (1 << 21)) {
6767 store_reg(s, rn, tmp2);
6768 } else {
6769 dead_tmp(tmp2);
9ee6e8bb
PB
6770 }
6771 if (insn & (1 << 20)) {
6772 /* Complete the load. */
6773 if (rd == 15)
b0109805 6774 gen_bx(s, tmp);
9ee6e8bb 6775 else
b0109805 6776 store_reg(s, rd, tmp);
9ee6e8bb
PB
6777 }
6778 break;
6779 case 0x08:
6780 case 0x09:
6781 {
6782 int j, n, user, loaded_base;
b0109805 6783 TCGv loaded_var;
9ee6e8bb
PB
6784 /* load/store multiple words */
6785 /* XXX: store correct base if write back */
6786 user = 0;
6787 if (insn & (1 << 22)) {
6788 if (IS_USER(s))
6789 goto illegal_op; /* only usable in supervisor mode */
6790
6791 if ((insn & (1 << 15)) == 0)
6792 user = 1;
6793 }
6794 rn = (insn >> 16) & 0xf;
b0109805 6795 addr = load_reg(s, rn);
9ee6e8bb
PB
6796
6797 /* compute total size */
6798 loaded_base = 0;
a50f5b91 6799 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
6800 n = 0;
6801 for(i=0;i<16;i++) {
6802 if (insn & (1 << i))
6803 n++;
6804 }
6805 /* XXX: test invalid n == 0 case ? */
6806 if (insn & (1 << 23)) {
6807 if (insn & (1 << 24)) {
6808 /* pre increment */
b0109805 6809 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6810 } else {
6811 /* post increment */
6812 }
6813 } else {
6814 if (insn & (1 << 24)) {
6815 /* pre decrement */
b0109805 6816 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
6817 } else {
6818 /* post decrement */
6819 if (n != 1)
b0109805 6820 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
6821 }
6822 }
6823 j = 0;
6824 for(i=0;i<16;i++) {
6825 if (insn & (1 << i)) {
6826 if (insn & (1 << 20)) {
6827 /* load */
b0109805 6828 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 6829 if (i == 15) {
b0109805 6830 gen_bx(s, tmp);
9ee6e8bb 6831 } else if (user) {
b0109805
PB
6832 gen_helper_set_user_reg(tcg_const_i32(i), tmp);
6833 dead_tmp(tmp);
9ee6e8bb 6834 } else if (i == rn) {
b0109805 6835 loaded_var = tmp;
9ee6e8bb
PB
6836 loaded_base = 1;
6837 } else {
b0109805 6838 store_reg(s, i, tmp);
9ee6e8bb
PB
6839 }
6840 } else {
6841 /* store */
6842 if (i == 15) {
6843 /* special case: r15 = PC + 8 */
6844 val = (long)s->pc + 4;
b0109805
PB
6845 tmp = new_tmp();
6846 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 6847 } else if (user) {
b0109805
PB
6848 tmp = new_tmp();
6849 gen_helper_get_user_reg(tmp, tcg_const_i32(i));
9ee6e8bb 6850 } else {
b0109805 6851 tmp = load_reg(s, i);
9ee6e8bb 6852 }
b0109805 6853 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6854 }
6855 j++;
6856 /* no need to add after the last transfer */
6857 if (j != n)
b0109805 6858 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6859 }
6860 }
6861 if (insn & (1 << 21)) {
6862 /* write back */
6863 if (insn & (1 << 23)) {
6864 if (insn & (1 << 24)) {
6865 /* pre increment */
6866 } else {
6867 /* post increment */
b0109805 6868 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6869 }
6870 } else {
6871 if (insn & (1 << 24)) {
6872 /* pre decrement */
6873 if (n != 1)
b0109805 6874 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
6875 } else {
6876 /* post decrement */
b0109805 6877 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
6878 }
6879 }
b0109805
PB
6880 store_reg(s, rn, addr);
6881 } else {
6882 dead_tmp(addr);
9ee6e8bb
PB
6883 }
6884 if (loaded_base) {
b0109805 6885 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
6886 }
6887 if ((insn & (1 << 22)) && !user) {
6888 /* Restore CPSR from SPSR. */
d9ba4830
PB
6889 tmp = load_cpu_field(spsr);
6890 gen_set_cpsr(tmp, 0xffffffff);
6891 dead_tmp(tmp);
9ee6e8bb
PB
6892 s->is_jmp = DISAS_UPDATE;
6893 }
6894 }
6895 break;
6896 case 0xa:
6897 case 0xb:
6898 {
6899 int32_t offset;
6900
6901 /* branch (and link) */
6902 val = (int32_t)s->pc;
6903 if (insn & (1 << 24)) {
5e3f878a
PB
6904 tmp = new_tmp();
6905 tcg_gen_movi_i32(tmp, val);
6906 store_reg(s, 14, tmp);
9ee6e8bb
PB
6907 }
6908 offset = (((int32_t)insn << 8) >> 8);
6909 val += (offset << 2) + 4;
6910 gen_jmp(s, val);
6911 }
6912 break;
6913 case 0xc:
6914 case 0xd:
6915 case 0xe:
6916 /* Coprocessor. */
6917 if (disas_coproc_insn(env, s, insn))
6918 goto illegal_op;
6919 break;
6920 case 0xf:
6921 /* swi */
5e3f878a 6922 gen_set_pc_im(s->pc);
9ee6e8bb
PB
6923 s->is_jmp = DISAS_SWI;
6924 break;
6925 default:
6926 illegal_op:
6927 gen_set_condexec(s);
5e3f878a 6928 gen_set_pc_im(s->pc - 4);
d9ba4830 6929 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
6930 s->is_jmp = DISAS_JUMP;
6931 break;
6932 }
6933 }
6934}
6935
6936/* Return true if this is a Thumb-2 logical op. */
6937static int
6938thumb2_logic_op(int op)
6939{
6940 return (op < 8);
6941}
6942
6943/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
6944 then set condition code flags based on the result of the operation.
6945 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
6946 to the high bit of T1.
6947 Returns zero if the opcode is valid. */
6948
6949static int
396e467c 6950gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
9ee6e8bb
PB
6951{
6952 int logic_cc;
6953
6954 logic_cc = 0;
6955 switch (op) {
6956 case 0: /* and */
396e467c 6957 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
6958 logic_cc = conds;
6959 break;
6960 case 1: /* bic */
396e467c 6961 tcg_gen_bic_i32(t0, t0, t1);
9ee6e8bb
PB
6962 logic_cc = conds;
6963 break;
6964 case 2: /* orr */
396e467c 6965 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
6966 logic_cc = conds;
6967 break;
6968 case 3: /* orn */
396e467c
FN
6969 tcg_gen_not_i32(t1, t1);
6970 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
6971 logic_cc = conds;
6972 break;
6973 case 4: /* eor */
396e467c 6974 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
6975 logic_cc = conds;
6976 break;
6977 case 8: /* add */
6978 if (conds)
396e467c 6979 gen_helper_add_cc(t0, t0, t1);
9ee6e8bb 6980 else
396e467c 6981 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
6982 break;
6983 case 10: /* adc */
6984 if (conds)
396e467c 6985 gen_helper_adc_cc(t0, t0, t1);
9ee6e8bb 6986 else
396e467c 6987 gen_adc(t0, t1);
9ee6e8bb
PB
6988 break;
6989 case 11: /* sbc */
6990 if (conds)
396e467c 6991 gen_helper_sbc_cc(t0, t0, t1);
9ee6e8bb 6992 else
396e467c 6993 gen_sub_carry(t0, t0, t1);
9ee6e8bb
PB
6994 break;
6995 case 13: /* sub */
6996 if (conds)
396e467c 6997 gen_helper_sub_cc(t0, t0, t1);
9ee6e8bb 6998 else
396e467c 6999 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
7000 break;
7001 case 14: /* rsb */
7002 if (conds)
396e467c 7003 gen_helper_sub_cc(t0, t1, t0);
9ee6e8bb 7004 else
396e467c 7005 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
7006 break;
7007 default: /* 5, 6, 7, 9, 12, 15. */
7008 return 1;
7009 }
7010 if (logic_cc) {
396e467c 7011 gen_logic_CC(t0);
9ee6e8bb 7012 if (shifter_out)
396e467c 7013 gen_set_CF_bit31(t1);
9ee6e8bb
PB
7014 }
7015 return 0;
7016}
7017
7018/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7019 is not legal. */
7020static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7021{
b0109805 7022 uint32_t insn, imm, shift, offset;
9ee6e8bb 7023 uint32_t rd, rn, rm, rs;
b26eefb6 7024 TCGv tmp;
6ddbc6e4
PB
7025 TCGv tmp2;
7026 TCGv tmp3;
b0109805 7027 TCGv addr;
a7812ae4 7028 TCGv_i64 tmp64;
9ee6e8bb
PB
7029 int op;
7030 int shiftop;
7031 int conds;
7032 int logic_cc;
7033
7034 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7035 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 7036 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
7037 16-bit instructions to get correct prefetch abort behavior. */
7038 insn = insn_hw1;
7039 if ((insn & (1 << 12)) == 0) {
7040 /* Second half of blx. */
7041 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
7042 tmp = load_reg(s, 14);
7043 tcg_gen_addi_i32(tmp, tmp, offset);
7044 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 7045
d9ba4830 7046 tmp2 = new_tmp();
b0109805 7047 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7048 store_reg(s, 14, tmp2);
7049 gen_bx(s, tmp);
9ee6e8bb
PB
7050 return 0;
7051 }
7052 if (insn & (1 << 11)) {
7053 /* Second half of bl. */
7054 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 7055 tmp = load_reg(s, 14);
6a0d8a1d 7056 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 7057
d9ba4830 7058 tmp2 = new_tmp();
b0109805 7059 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7060 store_reg(s, 14, tmp2);
7061 gen_bx(s, tmp);
9ee6e8bb
PB
7062 return 0;
7063 }
7064 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7065 /* Instruction spans a page boundary. Implement it as two
7066 16-bit instructions in case the second half causes an
7067 prefetch abort. */
7068 offset = ((int32_t)insn << 21) >> 9;
396e467c 7069 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
7070 return 0;
7071 }
7072 /* Fall through to 32-bit decode. */
7073 }
7074
7075 insn = lduw_code(s->pc);
7076 s->pc += 2;
7077 insn |= (uint32_t)insn_hw1 << 16;
7078
7079 if ((insn & 0xf800e800) != 0xf000e800) {
7080 ARCH(6T2);
7081 }
7082
7083 rn = (insn >> 16) & 0xf;
7084 rs = (insn >> 12) & 0xf;
7085 rd = (insn >> 8) & 0xf;
7086 rm = insn & 0xf;
7087 switch ((insn >> 25) & 0xf) {
7088 case 0: case 1: case 2: case 3:
7089 /* 16-bit instructions. Should never happen. */
7090 abort();
7091 case 4:
7092 if (insn & (1 << 22)) {
7093 /* Other load/store, table branch. */
7094 if (insn & 0x01200000) {
7095 /* Load/store doubleword. */
7096 if (rn == 15) {
b0109805
PB
7097 addr = new_tmp();
7098 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 7099 } else {
b0109805 7100 addr = load_reg(s, rn);
9ee6e8bb
PB
7101 }
7102 offset = (insn & 0xff) * 4;
7103 if ((insn & (1 << 23)) == 0)
7104 offset = -offset;
7105 if (insn & (1 << 24)) {
b0109805 7106 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
7107 offset = 0;
7108 }
7109 if (insn & (1 << 20)) {
7110 /* ldrd */
b0109805
PB
7111 tmp = gen_ld32(addr, IS_USER(s));
7112 store_reg(s, rs, tmp);
7113 tcg_gen_addi_i32(addr, addr, 4);
7114 tmp = gen_ld32(addr, IS_USER(s));
7115 store_reg(s, rd, tmp);
9ee6e8bb
PB
7116 } else {
7117 /* strd */
b0109805
PB
7118 tmp = load_reg(s, rs);
7119 gen_st32(tmp, addr, IS_USER(s));
7120 tcg_gen_addi_i32(addr, addr, 4);
7121 tmp = load_reg(s, rd);
7122 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7123 }
7124 if (insn & (1 << 21)) {
7125 /* Base writeback. */
7126 if (rn == 15)
7127 goto illegal_op;
b0109805
PB
7128 tcg_gen_addi_i32(addr, addr, offset - 4);
7129 store_reg(s, rn, addr);
7130 } else {
7131 dead_tmp(addr);
9ee6e8bb
PB
7132 }
7133 } else if ((insn & (1 << 23)) == 0) {
7134 /* Load/store exclusive word. */
3174f8e9
FN
7135 addr = tcg_temp_local_new();
7136 tcg_gen_mov_i32(addr, cpu_R[rn]);
2c0262af 7137 if (insn & (1 << 20)) {
3174f8e9 7138 gen_helper_mark_exclusive(cpu_env, addr);
8f8e3aa4
PB
7139 tmp = gen_ld32(addr, IS_USER(s));
7140 store_reg(s, rd, tmp);
9ee6e8bb 7141 } else {
8f8e3aa4 7142 int label = gen_new_label();
3174f8e9
FN
7143 tmp2 = tcg_temp_local_new();
7144 gen_helper_test_exclusive(tmp2, cpu_env, addr);
7145 tcg_gen_brcondi_i32(TCG_COND_NE, tmp2, 0, label);
8f8e3aa4 7146 tmp = load_reg(s, rs);
3174f8e9 7147 gen_st32(tmp, addr, IS_USER(s));
8f8e3aa4 7148 gen_set_label(label);
3174f8e9
FN
7149 tcg_gen_mov_i32(cpu_R[rd], tmp2);
7150 tcg_temp_free(tmp2);
9ee6e8bb 7151 }
3174f8e9 7152 tcg_temp_free(addr);
9ee6e8bb
PB
7153 } else if ((insn & (1 << 6)) == 0) {
7154 /* Table Branch. */
7155 if (rn == 15) {
b0109805
PB
7156 addr = new_tmp();
7157 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 7158 } else {
b0109805 7159 addr = load_reg(s, rn);
9ee6e8bb 7160 }
b26eefb6 7161 tmp = load_reg(s, rm);
b0109805 7162 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
7163 if (insn & (1 << 4)) {
7164 /* tbh */
b0109805 7165 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 7166 dead_tmp(tmp);
b0109805 7167 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 7168 } else { /* tbb */
b26eefb6 7169 dead_tmp(tmp);
b0109805 7170 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7171 }
b0109805
PB
7172 dead_tmp(addr);
7173 tcg_gen_shli_i32(tmp, tmp, 1);
7174 tcg_gen_addi_i32(tmp, tmp, s->pc);
7175 store_reg(s, 15, tmp);
9ee6e8bb
PB
7176 } else {
7177 /* Load/store exclusive byte/halfword/doubleword. */
8f8e3aa4
PB
7178 /* ??? These are not really atomic. However we know
7179 we never have multiple CPUs running in parallel,
7180 so it is good enough. */
9ee6e8bb 7181 op = (insn >> 4) & 0x3;
3174f8e9
FN
7182 addr = tcg_temp_local_new();
7183 tcg_gen_mov_i32(addr, cpu_R[rn]);
9ee6e8bb 7184 if (insn & (1 << 20)) {
8f8e3aa4 7185 gen_helper_mark_exclusive(cpu_env, addr);
9ee6e8bb
PB
7186 switch (op) {
7187 case 0:
8f8e3aa4 7188 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7189 break;
2c0262af 7190 case 1:
8f8e3aa4 7191 tmp = gen_ld16u(addr, IS_USER(s));
2c0262af 7192 break;
9ee6e8bb 7193 case 3:
8f8e3aa4
PB
7194 tmp = gen_ld32(addr, IS_USER(s));
7195 tcg_gen_addi_i32(addr, addr, 4);
7196 tmp2 = gen_ld32(addr, IS_USER(s));
7197 store_reg(s, rd, tmp2);
2c0262af
FB
7198 break;
7199 default:
9ee6e8bb
PB
7200 goto illegal_op;
7201 }
8f8e3aa4 7202 store_reg(s, rs, tmp);
9ee6e8bb 7203 } else {
8f8e3aa4 7204 int label = gen_new_label();
3174f8e9
FN
7205 tmp2 = tcg_temp_local_new();
7206 gen_helper_test_exclusive(tmp2, cpu_env, addr);
7207 tcg_gen_brcondi_i32(TCG_COND_NE, tmp2, 0, label);
8f8e3aa4 7208 tmp = load_reg(s, rs);
9ee6e8bb
PB
7209 switch (op) {
7210 case 0:
8f8e3aa4 7211 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7212 break;
7213 case 1:
8f8e3aa4 7214 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb 7215 break;
2c0262af 7216 case 3:
8f8e3aa4
PB
7217 gen_st32(tmp, addr, IS_USER(s));
7218 tcg_gen_addi_i32(addr, addr, 4);
7219 tmp = load_reg(s, rd);
7220 gen_st32(tmp, addr, IS_USER(s));
2c0262af 7221 break;
9ee6e8bb
PB
7222 default:
7223 goto illegal_op;
2c0262af 7224 }
8f8e3aa4 7225 gen_set_label(label);
3174f8e9
FN
7226 tcg_gen_mov_i32(cpu_R[rm], tmp2);
7227 tcg_temp_free(tmp2);
9ee6e8bb 7228 }
3174f8e9 7229 tcg_temp_free(addr);
9ee6e8bb
PB
7230 }
7231 } else {
7232 /* Load/store multiple, RFE, SRS. */
7233 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7234 /* Not available in user mode. */
b0109805 7235 if (IS_USER(s))
9ee6e8bb
PB
7236 goto illegal_op;
7237 if (insn & (1 << 20)) {
7238 /* rfe */
b0109805
PB
7239 addr = load_reg(s, rn);
7240 if ((insn & (1 << 24)) == 0)
7241 tcg_gen_addi_i32(addr, addr, -8);
7242 /* Load PC into tmp and CPSR into tmp2. */
7243 tmp = gen_ld32(addr, 0);
7244 tcg_gen_addi_i32(addr, addr, 4);
7245 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
7246 if (insn & (1 << 21)) {
7247 /* Base writeback. */
b0109805
PB
7248 if (insn & (1 << 24)) {
7249 tcg_gen_addi_i32(addr, addr, 4);
7250 } else {
7251 tcg_gen_addi_i32(addr, addr, -4);
7252 }
7253 store_reg(s, rn, addr);
7254 } else {
7255 dead_tmp(addr);
9ee6e8bb 7256 }
b0109805 7257 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
7258 } else {
7259 /* srs */
7260 op = (insn & 0x1f);
7261 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7262 addr = load_reg(s, 13);
9ee6e8bb 7263 } else {
b0109805
PB
7264 addr = new_tmp();
7265 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op));
9ee6e8bb
PB
7266 }
7267 if ((insn & (1 << 24)) == 0) {
b0109805 7268 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 7269 }
b0109805
PB
7270 tmp = load_reg(s, 14);
7271 gen_st32(tmp, addr, 0);
7272 tcg_gen_addi_i32(addr, addr, 4);
7273 tmp = new_tmp();
7274 gen_helper_cpsr_read(tmp);
7275 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
7276 if (insn & (1 << 21)) {
7277 if ((insn & (1 << 24)) == 0) {
b0109805 7278 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 7279 } else {
b0109805 7280 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7281 }
7282 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7283 store_reg(s, 13, addr);
9ee6e8bb 7284 } else {
b0109805
PB
7285 gen_helper_set_r13_banked(cpu_env,
7286 tcg_const_i32(op), addr);
9ee6e8bb 7287 }
b0109805
PB
7288 } else {
7289 dead_tmp(addr);
9ee6e8bb
PB
7290 }
7291 }
7292 } else {
7293 int i;
7294 /* Load/store multiple. */
b0109805 7295 addr = load_reg(s, rn);
9ee6e8bb
PB
7296 offset = 0;
7297 for (i = 0; i < 16; i++) {
7298 if (insn & (1 << i))
7299 offset += 4;
7300 }
7301 if (insn & (1 << 24)) {
b0109805 7302 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7303 }
7304
7305 for (i = 0; i < 16; i++) {
7306 if ((insn & (1 << i)) == 0)
7307 continue;
7308 if (insn & (1 << 20)) {
7309 /* Load. */
b0109805 7310 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 7311 if (i == 15) {
b0109805 7312 gen_bx(s, tmp);
9ee6e8bb 7313 } else {
b0109805 7314 store_reg(s, i, tmp);
9ee6e8bb
PB
7315 }
7316 } else {
7317 /* Store. */
b0109805
PB
7318 tmp = load_reg(s, i);
7319 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7320 }
b0109805 7321 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7322 }
7323 if (insn & (1 << 21)) {
7324 /* Base register writeback. */
7325 if (insn & (1 << 24)) {
b0109805 7326 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7327 }
7328 /* Fault if writeback register is in register list. */
7329 if (insn & (1 << rn))
7330 goto illegal_op;
b0109805
PB
7331 store_reg(s, rn, addr);
7332 } else {
7333 dead_tmp(addr);
9ee6e8bb
PB
7334 }
7335 }
7336 }
7337 break;
7338 case 5: /* Data processing register constant shift. */
3174f8e9
FN
7339 if (rn == 15) {
7340 tmp = new_tmp();
7341 tcg_gen_movi_i32(tmp, 0);
7342 } else {
7343 tmp = load_reg(s, rn);
7344 }
7345 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7346 op = (insn >> 21) & 0xf;
7347 shiftop = (insn >> 4) & 3;
7348 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7349 conds = (insn & (1 << 20)) != 0;
7350 logic_cc = (conds && thumb2_logic_op(op));
3174f8e9
FN
7351 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7352 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9ee6e8bb 7353 goto illegal_op;
3174f8e9
FN
7354 dead_tmp(tmp2);
7355 if (rd != 15) {
7356 store_reg(s, rd, tmp);
7357 } else {
7358 dead_tmp(tmp);
7359 }
9ee6e8bb
PB
7360 break;
7361 case 13: /* Misc data processing. */
7362 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7363 if (op < 4 && (insn & 0xf000) != 0xf000)
7364 goto illegal_op;
7365 switch (op) {
7366 case 0: /* Register controlled shift. */
8984bd2e
PB
7367 tmp = load_reg(s, rn);
7368 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7369 if ((insn & 0x70) != 0)
7370 goto illegal_op;
7371 op = (insn >> 21) & 3;
8984bd2e
PB
7372 logic_cc = (insn & (1 << 20)) != 0;
7373 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7374 if (logic_cc)
7375 gen_logic_CC(tmp);
21aeb343 7376 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7377 break;
7378 case 1: /* Sign/zero extend. */
5e3f878a 7379 tmp = load_reg(s, rm);
9ee6e8bb
PB
7380 shift = (insn >> 4) & 3;
7381 /* ??? In many cases it's not neccessary to do a
7382 rotate, a shift is sufficient. */
7383 if (shift != 0)
5e3f878a 7384 tcg_gen_rori_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7385 op = (insn >> 20) & 7;
7386 switch (op) {
5e3f878a
PB
7387 case 0: gen_sxth(tmp); break;
7388 case 1: gen_uxth(tmp); break;
7389 case 2: gen_sxtb16(tmp); break;
7390 case 3: gen_uxtb16(tmp); break;
7391 case 4: gen_sxtb(tmp); break;
7392 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
7393 default: goto illegal_op;
7394 }
7395 if (rn != 15) {
5e3f878a 7396 tmp2 = load_reg(s, rn);
9ee6e8bb 7397 if ((op >> 1) == 1) {
5e3f878a 7398 gen_add16(tmp, tmp2);
9ee6e8bb 7399 } else {
5e3f878a
PB
7400 tcg_gen_add_i32(tmp, tmp, tmp2);
7401 dead_tmp(tmp2);
9ee6e8bb
PB
7402 }
7403 }
5e3f878a 7404 store_reg(s, rd, tmp);
9ee6e8bb
PB
7405 break;
7406 case 2: /* SIMD add/subtract. */
7407 op = (insn >> 20) & 7;
7408 shift = (insn >> 4) & 7;
7409 if ((op & 3) == 3 || (shift & 3) == 3)
7410 goto illegal_op;
6ddbc6e4
PB
7411 tmp = load_reg(s, rn);
7412 tmp2 = load_reg(s, rm);
7413 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7414 dead_tmp(tmp2);
7415 store_reg(s, rd, tmp);
9ee6e8bb
PB
7416 break;
7417 case 3: /* Other data processing. */
7418 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7419 if (op < 4) {
7420 /* Saturating add/subtract. */
d9ba4830
PB
7421 tmp = load_reg(s, rn);
7422 tmp2 = load_reg(s, rm);
9ee6e8bb 7423 if (op & 2)
d9ba4830 7424 gen_helper_double_saturate(tmp, tmp);
9ee6e8bb 7425 if (op & 1)
d9ba4830 7426 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 7427 else
d9ba4830
PB
7428 gen_helper_add_saturate(tmp, tmp, tmp2);
7429 dead_tmp(tmp2);
9ee6e8bb 7430 } else {
d9ba4830 7431 tmp = load_reg(s, rn);
9ee6e8bb
PB
7432 switch (op) {
7433 case 0x0a: /* rbit */
d9ba4830 7434 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7435 break;
7436 case 0x08: /* rev */
66896cb8 7437 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
7438 break;
7439 case 0x09: /* rev16 */
d9ba4830 7440 gen_rev16(tmp);
9ee6e8bb
PB
7441 break;
7442 case 0x0b: /* revsh */
d9ba4830 7443 gen_revsh(tmp);
9ee6e8bb
PB
7444 break;
7445 case 0x10: /* sel */
d9ba4830 7446 tmp2 = load_reg(s, rm);
6ddbc6e4
PB
7447 tmp3 = new_tmp();
7448 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
d9ba4830 7449 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6ddbc6e4 7450 dead_tmp(tmp3);
d9ba4830 7451 dead_tmp(tmp2);
9ee6e8bb
PB
7452 break;
7453 case 0x18: /* clz */
d9ba4830 7454 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
7455 break;
7456 default:
7457 goto illegal_op;
7458 }
7459 }
d9ba4830 7460 store_reg(s, rd, tmp);
9ee6e8bb
PB
7461 break;
7462 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7463 op = (insn >> 4) & 0xf;
d9ba4830
PB
7464 tmp = load_reg(s, rn);
7465 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7466 switch ((insn >> 20) & 7) {
7467 case 0: /* 32 x 32 -> 32 */
d9ba4830
PB
7468 tcg_gen_mul_i32(tmp, tmp, tmp2);
7469 dead_tmp(tmp2);
9ee6e8bb 7470 if (rs != 15) {
d9ba4830 7471 tmp2 = load_reg(s, rs);
9ee6e8bb 7472 if (op)
d9ba4830 7473 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 7474 else
d9ba4830
PB
7475 tcg_gen_add_i32(tmp, tmp, tmp2);
7476 dead_tmp(tmp2);
9ee6e8bb 7477 }
9ee6e8bb
PB
7478 break;
7479 case 1: /* 16 x 16 -> 32 */
d9ba4830
PB
7480 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7481 dead_tmp(tmp2);
9ee6e8bb 7482 if (rs != 15) {
d9ba4830
PB
7483 tmp2 = load_reg(s, rs);
7484 gen_helper_add_setq(tmp, tmp, tmp2);
7485 dead_tmp(tmp2);
9ee6e8bb 7486 }
9ee6e8bb
PB
7487 break;
7488 case 2: /* Dual multiply add. */
7489 case 4: /* Dual multiply subtract. */
7490 if (op)
d9ba4830
PB
7491 gen_swap_half(tmp2);
7492 gen_smul_dual(tmp, tmp2);
9ee6e8bb
PB
7493 /* This addition cannot overflow. */
7494 if (insn & (1 << 22)) {
d9ba4830 7495 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 7496 } else {
d9ba4830 7497 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 7498 }
d9ba4830 7499 dead_tmp(tmp2);
9ee6e8bb
PB
7500 if (rs != 15)
7501 {
d9ba4830
PB
7502 tmp2 = load_reg(s, rs);
7503 gen_helper_add_setq(tmp, tmp, tmp2);
7504 dead_tmp(tmp2);
9ee6e8bb 7505 }
9ee6e8bb
PB
7506 break;
7507 case 3: /* 32 * 16 -> 32msb */
7508 if (op)
d9ba4830 7509 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7510 else
d9ba4830 7511 gen_sxth(tmp2);
a7812ae4
PB
7512 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7513 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 7514 tmp = new_tmp();
a7812ae4 7515 tcg_gen_trunc_i64_i32(tmp, tmp64);
9ee6e8bb
PB
7516 if (rs != 15)
7517 {
d9ba4830
PB
7518 tmp2 = load_reg(s, rs);
7519 gen_helper_add_setq(tmp, tmp, tmp2);
7520 dead_tmp(tmp2);
9ee6e8bb 7521 }
9ee6e8bb
PB
7522 break;
7523 case 5: case 6: /* 32 * 32 -> 32msb */
d9ba4830
PB
7524 gen_imull(tmp, tmp2);
7525 if (insn & (1 << 5)) {
7526 gen_roundqd(tmp, tmp2);
7527 dead_tmp(tmp2);
7528 } else {
7529 dead_tmp(tmp);
7530 tmp = tmp2;
7531 }
9ee6e8bb 7532 if (rs != 15) {
d9ba4830 7533 tmp2 = load_reg(s, rs);
9ee6e8bb 7534 if (insn & (1 << 21)) {
d9ba4830 7535 tcg_gen_add_i32(tmp, tmp, tmp2);
99c475ab 7536 } else {
d9ba4830 7537 tcg_gen_sub_i32(tmp, tmp2, tmp);
99c475ab 7538 }
d9ba4830 7539 dead_tmp(tmp2);
2c0262af 7540 }
9ee6e8bb
PB
7541 break;
7542 case 7: /* Unsigned sum of absolute differences. */
d9ba4830
PB
7543 gen_helper_usad8(tmp, tmp, tmp2);
7544 dead_tmp(tmp2);
9ee6e8bb 7545 if (rs != 15) {
d9ba4830
PB
7546 tmp2 = load_reg(s, rs);
7547 tcg_gen_add_i32(tmp, tmp, tmp2);
7548 dead_tmp(tmp2);
5fd46862 7549 }
9ee6e8bb 7550 break;
2c0262af 7551 }
d9ba4830 7552 store_reg(s, rd, tmp);
2c0262af 7553 break;
9ee6e8bb
PB
7554 case 6: case 7: /* 64-bit multiply, Divide. */
7555 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
7556 tmp = load_reg(s, rn);
7557 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7558 if ((op & 0x50) == 0x10) {
7559 /* sdiv, udiv */
7560 if (!arm_feature(env, ARM_FEATURE_DIV))
7561 goto illegal_op;
7562 if (op & 0x20)
5e3f878a 7563 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 7564 else
5e3f878a
PB
7565 gen_helper_sdiv(tmp, tmp, tmp2);
7566 dead_tmp(tmp2);
7567 store_reg(s, rd, tmp);
9ee6e8bb
PB
7568 } else if ((op & 0xe) == 0xc) {
7569 /* Dual multiply accumulate long. */
7570 if (op & 1)
5e3f878a
PB
7571 gen_swap_half(tmp2);
7572 gen_smul_dual(tmp, tmp2);
9ee6e8bb 7573 if (op & 0x10) {
5e3f878a 7574 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 7575 } else {
5e3f878a 7576 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 7577 }
5e3f878a 7578 dead_tmp(tmp2);
a7812ae4
PB
7579 /* BUGFIX */
7580 tmp64 = tcg_temp_new_i64();
7581 tcg_gen_ext_i32_i64(tmp64, tmp);
7582 dead_tmp(tmp);
7583 gen_addq(s, tmp64, rs, rd);
7584 gen_storeq_reg(s, rs, rd, tmp64);
2c0262af 7585 } else {
9ee6e8bb
PB
7586 if (op & 0x20) {
7587 /* Unsigned 64-bit multiply */
a7812ae4 7588 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 7589 } else {
9ee6e8bb
PB
7590 if (op & 8) {
7591 /* smlalxy */
5e3f878a
PB
7592 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7593 dead_tmp(tmp2);
a7812ae4
PB
7594 tmp64 = tcg_temp_new_i64();
7595 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 7596 dead_tmp(tmp);
9ee6e8bb
PB
7597 } else {
7598 /* Signed 64-bit multiply */
a7812ae4 7599 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 7600 }
b5ff1b31 7601 }
9ee6e8bb
PB
7602 if (op & 4) {
7603 /* umaal */
a7812ae4
PB
7604 gen_addq_lo(s, tmp64, rs);
7605 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
7606 } else if (op & 0x40) {
7607 /* 64-bit accumulate. */
a7812ae4 7608 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 7609 }
a7812ae4 7610 gen_storeq_reg(s, rs, rd, tmp64);
5fd46862 7611 }
2c0262af 7612 break;
9ee6e8bb
PB
7613 }
7614 break;
7615 case 6: case 7: case 14: case 15:
7616 /* Coprocessor. */
7617 if (((insn >> 24) & 3) == 3) {
7618 /* Translate into the equivalent ARM encoding. */
7619 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
7620 if (disas_neon_data_insn(env, s, insn))
7621 goto illegal_op;
7622 } else {
7623 if (insn & (1 << 28))
7624 goto illegal_op;
7625 if (disas_coproc_insn (env, s, insn))
7626 goto illegal_op;
7627 }
7628 break;
7629 case 8: case 9: case 10: case 11:
7630 if (insn & (1 << 15)) {
7631 /* Branches, misc control. */
7632 if (insn & 0x5000) {
7633 /* Unconditional branch. */
7634 /* signextend(hw1[10:0]) -> offset[:12]. */
7635 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7636 /* hw1[10:0] -> offset[11:1]. */
7637 offset |= (insn & 0x7ff) << 1;
7638 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7639 offset[24:22] already have the same value because of the
7640 sign extension above. */
7641 offset ^= ((~insn) & (1 << 13)) << 10;
7642 offset ^= ((~insn) & (1 << 11)) << 11;
7643
9ee6e8bb
PB
7644 if (insn & (1 << 14)) {
7645 /* Branch and link. */
3174f8e9 7646 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 7647 }
3b46e624 7648
b0109805 7649 offset += s->pc;
9ee6e8bb
PB
7650 if (insn & (1 << 12)) {
7651 /* b/bl */
b0109805 7652 gen_jmp(s, offset);
9ee6e8bb
PB
7653 } else {
7654 /* blx */
b0109805
PB
7655 offset &= ~(uint32_t)2;
7656 gen_bx_im(s, offset);
2c0262af 7657 }
9ee6e8bb
PB
7658 } else if (((insn >> 23) & 7) == 7) {
7659 /* Misc control */
7660 if (insn & (1 << 13))
7661 goto illegal_op;
7662
7663 if (insn & (1 << 26)) {
7664 /* Secure monitor call (v6Z) */
7665 goto illegal_op; /* not implemented. */
2c0262af 7666 } else {
9ee6e8bb
PB
7667 op = (insn >> 20) & 7;
7668 switch (op) {
7669 case 0: /* msr cpsr. */
7670 if (IS_M(env)) {
8984bd2e
PB
7671 tmp = load_reg(s, rn);
7672 addr = tcg_const_i32(insn & 0xff);
7673 gen_helper_v7m_msr(cpu_env, addr, tmp);
9ee6e8bb
PB
7674 gen_lookup_tb(s);
7675 break;
7676 }
7677 /* fall through */
7678 case 1: /* msr spsr. */
7679 if (IS_M(env))
7680 goto illegal_op;
2fbac54b
FN
7681 tmp = load_reg(s, rn);
7682 if (gen_set_psr(s,
9ee6e8bb 7683 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 7684 op == 1, tmp))
9ee6e8bb
PB
7685 goto illegal_op;
7686 break;
7687 case 2: /* cps, nop-hint. */
7688 if (((insn >> 8) & 7) == 0) {
7689 gen_nop_hint(s, insn & 0xff);
7690 }
7691 /* Implemented as NOP in user mode. */
7692 if (IS_USER(s))
7693 break;
7694 offset = 0;
7695 imm = 0;
7696 if (insn & (1 << 10)) {
7697 if (insn & (1 << 7))
7698 offset |= CPSR_A;
7699 if (insn & (1 << 6))
7700 offset |= CPSR_I;
7701 if (insn & (1 << 5))
7702 offset |= CPSR_F;
7703 if (insn & (1 << 9))
7704 imm = CPSR_A | CPSR_I | CPSR_F;
7705 }
7706 if (insn & (1 << 8)) {
7707 offset |= 0x1f;
7708 imm |= (insn & 0x1f);
7709 }
7710 if (offset) {
2fbac54b 7711 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
7712 }
7713 break;
7714 case 3: /* Special control operations. */
7715 op = (insn >> 4) & 0xf;
7716 switch (op) {
7717 case 2: /* clrex */
8f8e3aa4 7718 gen_helper_clrex(cpu_env);
9ee6e8bb
PB
7719 break;
7720 case 4: /* dsb */
7721 case 5: /* dmb */
7722 case 6: /* isb */
7723 /* These execute as NOPs. */
7724 ARCH(7);
7725 break;
7726 default:
7727 goto illegal_op;
7728 }
7729 break;
7730 case 4: /* bxj */
7731 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
7732 tmp = load_reg(s, rn);
7733 gen_bx(s, tmp);
9ee6e8bb
PB
7734 break;
7735 case 5: /* Exception return. */
7736 /* Unpredictable in user mode. */
7737 goto illegal_op;
7738 case 6: /* mrs cpsr. */
8984bd2e 7739 tmp = new_tmp();
9ee6e8bb 7740 if (IS_M(env)) {
8984bd2e
PB
7741 addr = tcg_const_i32(insn & 0xff);
7742 gen_helper_v7m_mrs(tmp, cpu_env, addr);
9ee6e8bb 7743 } else {
8984bd2e 7744 gen_helper_cpsr_read(tmp);
9ee6e8bb 7745 }
8984bd2e 7746 store_reg(s, rd, tmp);
9ee6e8bb
PB
7747 break;
7748 case 7: /* mrs spsr. */
7749 /* Not accessible in user mode. */
7750 if (IS_USER(s) || IS_M(env))
7751 goto illegal_op;
d9ba4830
PB
7752 tmp = load_cpu_field(spsr);
7753 store_reg(s, rd, tmp);
9ee6e8bb 7754 break;
2c0262af
FB
7755 }
7756 }
9ee6e8bb
PB
7757 } else {
7758 /* Conditional branch. */
7759 op = (insn >> 22) & 0xf;
7760 /* Generate a conditional jump to next instruction. */
7761 s->condlabel = gen_new_label();
d9ba4830 7762 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
7763 s->condjmp = 1;
7764
7765 /* offset[11:1] = insn[10:0] */
7766 offset = (insn & 0x7ff) << 1;
7767 /* offset[17:12] = insn[21:16]. */
7768 offset |= (insn & 0x003f0000) >> 4;
7769 /* offset[31:20] = insn[26]. */
7770 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
7771 /* offset[18] = insn[13]. */
7772 offset |= (insn & (1 << 13)) << 5;
7773 /* offset[19] = insn[11]. */
7774 offset |= (insn & (1 << 11)) << 8;
7775
7776 /* jump to the offset */
b0109805 7777 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
7778 }
7779 } else {
7780 /* Data processing immediate. */
7781 if (insn & (1 << 25)) {
7782 if (insn & (1 << 24)) {
7783 if (insn & (1 << 20))
7784 goto illegal_op;
7785 /* Bitfield/Saturate. */
7786 op = (insn >> 21) & 7;
7787 imm = insn & 0x1f;
7788 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4
PB
7789 if (rn == 15) {
7790 tmp = new_tmp();
7791 tcg_gen_movi_i32(tmp, 0);
7792 } else {
7793 tmp = load_reg(s, rn);
7794 }
9ee6e8bb
PB
7795 switch (op) {
7796 case 2: /* Signed bitfield extract. */
7797 imm++;
7798 if (shift + imm > 32)
7799 goto illegal_op;
7800 if (imm < 32)
6ddbc6e4 7801 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
7802 break;
7803 case 6: /* Unsigned bitfield extract. */
7804 imm++;
7805 if (shift + imm > 32)
7806 goto illegal_op;
7807 if (imm < 32)
6ddbc6e4 7808 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
7809 break;
7810 case 3: /* Bitfield insert/clear. */
7811 if (imm < shift)
7812 goto illegal_op;
7813 imm = imm + 1 - shift;
7814 if (imm != 32) {
6ddbc6e4 7815 tmp2 = load_reg(s, rd);
8f8e3aa4 7816 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
6ddbc6e4 7817 dead_tmp(tmp2);
9ee6e8bb
PB
7818 }
7819 break;
7820 case 7:
7821 goto illegal_op;
7822 default: /* Saturate. */
9ee6e8bb
PB
7823 if (shift) {
7824 if (op & 1)
6ddbc6e4 7825 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7826 else
6ddbc6e4 7827 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 7828 }
6ddbc6e4 7829 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
7830 if (op & 4) {
7831 /* Unsigned. */
9ee6e8bb 7832 if ((op & 1) && shift == 0)
6ddbc6e4 7833 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 7834 else
6ddbc6e4 7835 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 7836 } else {
9ee6e8bb 7837 /* Signed. */
9ee6e8bb 7838 if ((op & 1) && shift == 0)
6ddbc6e4 7839 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 7840 else
6ddbc6e4 7841 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 7842 }
9ee6e8bb 7843 break;
2c0262af 7844 }
6ddbc6e4 7845 store_reg(s, rd, tmp);
9ee6e8bb
PB
7846 } else {
7847 imm = ((insn & 0x04000000) >> 15)
7848 | ((insn & 0x7000) >> 4) | (insn & 0xff);
7849 if (insn & (1 << 22)) {
7850 /* 16-bit immediate. */
7851 imm |= (insn >> 4) & 0xf000;
7852 if (insn & (1 << 23)) {
7853 /* movt */
5e3f878a 7854 tmp = load_reg(s, rd);
86831435 7855 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 7856 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 7857 } else {
9ee6e8bb 7858 /* movw */
5e3f878a
PB
7859 tmp = new_tmp();
7860 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
7861 }
7862 } else {
9ee6e8bb
PB
7863 /* Add/sub 12-bit immediate. */
7864 if (rn == 15) {
b0109805 7865 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 7866 if (insn & (1 << 23))
b0109805 7867 offset -= imm;
9ee6e8bb 7868 else
b0109805 7869 offset += imm;
5e3f878a
PB
7870 tmp = new_tmp();
7871 tcg_gen_movi_i32(tmp, offset);
2c0262af 7872 } else {
5e3f878a 7873 tmp = load_reg(s, rn);
9ee6e8bb 7874 if (insn & (1 << 23))
5e3f878a 7875 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 7876 else
5e3f878a 7877 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 7878 }
9ee6e8bb 7879 }
5e3f878a 7880 store_reg(s, rd, tmp);
191abaa2 7881 }
9ee6e8bb
PB
7882 } else {
7883 int shifter_out = 0;
7884 /* modified 12-bit immediate. */
7885 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
7886 imm = (insn & 0xff);
7887 switch (shift) {
7888 case 0: /* XY */
7889 /* Nothing to do. */
7890 break;
7891 case 1: /* 00XY00XY */
7892 imm |= imm << 16;
7893 break;
7894 case 2: /* XY00XY00 */
7895 imm |= imm << 16;
7896 imm <<= 8;
7897 break;
7898 case 3: /* XYXYXYXY */
7899 imm |= imm << 16;
7900 imm |= imm << 8;
7901 break;
7902 default: /* Rotated constant. */
7903 shift = (shift << 1) | (imm >> 7);
7904 imm |= 0x80;
7905 imm = imm << (32 - shift);
7906 shifter_out = 1;
7907 break;
b5ff1b31 7908 }
3174f8e9
FN
7909 tmp2 = new_tmp();
7910 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 7911 rn = (insn >> 16) & 0xf;
3174f8e9
FN
7912 if (rn == 15) {
7913 tmp = new_tmp();
7914 tcg_gen_movi_i32(tmp, 0);
7915 } else {
7916 tmp = load_reg(s, rn);
7917 }
9ee6e8bb
PB
7918 op = (insn >> 21) & 0xf;
7919 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 7920 shifter_out, tmp, tmp2))
9ee6e8bb 7921 goto illegal_op;
3174f8e9 7922 dead_tmp(tmp2);
9ee6e8bb
PB
7923 rd = (insn >> 8) & 0xf;
7924 if (rd != 15) {
3174f8e9
FN
7925 store_reg(s, rd, tmp);
7926 } else {
7927 dead_tmp(tmp);
2c0262af 7928 }
2c0262af 7929 }
9ee6e8bb
PB
7930 }
7931 break;
7932 case 12: /* Load/store single data item. */
7933 {
7934 int postinc = 0;
7935 int writeback = 0;
b0109805 7936 int user;
9ee6e8bb
PB
7937 if ((insn & 0x01100000) == 0x01000000) {
7938 if (disas_neon_ls_insn(env, s, insn))
c1713132 7939 goto illegal_op;
9ee6e8bb
PB
7940 break;
7941 }
b0109805 7942 user = IS_USER(s);
9ee6e8bb 7943 if (rn == 15) {
b0109805 7944 addr = new_tmp();
9ee6e8bb
PB
7945 /* PC relative. */
7946 /* s->pc has already been incremented by 4. */
7947 imm = s->pc & 0xfffffffc;
7948 if (insn & (1 << 23))
7949 imm += insn & 0xfff;
7950 else
7951 imm -= insn & 0xfff;
b0109805 7952 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 7953 } else {
b0109805 7954 addr = load_reg(s, rn);
9ee6e8bb
PB
7955 if (insn & (1 << 23)) {
7956 /* Positive offset. */
7957 imm = insn & 0xfff;
b0109805 7958 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
7959 } else {
7960 op = (insn >> 8) & 7;
7961 imm = insn & 0xff;
7962 switch (op) {
7963 case 0: case 8: /* Shifted Register. */
7964 shift = (insn >> 4) & 0xf;
7965 if (shift > 3)
18c9b560 7966 goto illegal_op;
b26eefb6 7967 tmp = load_reg(s, rm);
9ee6e8bb 7968 if (shift)
b26eefb6 7969 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 7970 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 7971 dead_tmp(tmp);
9ee6e8bb
PB
7972 break;
7973 case 4: /* Negative offset. */
b0109805 7974 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb
PB
7975 break;
7976 case 6: /* User privilege. */
b0109805
PB
7977 tcg_gen_addi_i32(addr, addr, imm);
7978 user = 1;
9ee6e8bb
PB
7979 break;
7980 case 1: /* Post-decrement. */
7981 imm = -imm;
7982 /* Fall through. */
7983 case 3: /* Post-increment. */
9ee6e8bb
PB
7984 postinc = 1;
7985 writeback = 1;
7986 break;
7987 case 5: /* Pre-decrement. */
7988 imm = -imm;
7989 /* Fall through. */
7990 case 7: /* Pre-increment. */
b0109805 7991 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
7992 writeback = 1;
7993 break;
7994 default:
b7bcbe95 7995 goto illegal_op;
9ee6e8bb
PB
7996 }
7997 }
7998 }
7999 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8000 if (insn & (1 << 20)) {
8001 /* Load. */
8002 if (rs == 15 && op != 2) {
8003 if (op & 2)
b5ff1b31 8004 goto illegal_op;
9ee6e8bb
PB
8005 /* Memory hint. Implemented as NOP. */
8006 } else {
8007 switch (op) {
b0109805
PB
8008 case 0: tmp = gen_ld8u(addr, user); break;
8009 case 4: tmp = gen_ld8s(addr, user); break;
8010 case 1: tmp = gen_ld16u(addr, user); break;
8011 case 5: tmp = gen_ld16s(addr, user); break;
8012 case 2: tmp = gen_ld32(addr, user); break;
9ee6e8bb
PB
8013 default: goto illegal_op;
8014 }
8015 if (rs == 15) {
b0109805 8016 gen_bx(s, tmp);
9ee6e8bb 8017 } else {
b0109805 8018 store_reg(s, rs, tmp);
9ee6e8bb
PB
8019 }
8020 }
8021 } else {
8022 /* Store. */
8023 if (rs == 15)
b7bcbe95 8024 goto illegal_op;
b0109805 8025 tmp = load_reg(s, rs);
9ee6e8bb 8026 switch (op) {
b0109805
PB
8027 case 0: gen_st8(tmp, addr, user); break;
8028 case 1: gen_st16(tmp, addr, user); break;
8029 case 2: gen_st32(tmp, addr, user); break;
9ee6e8bb 8030 default: goto illegal_op;
b7bcbe95 8031 }
2c0262af 8032 }
9ee6e8bb 8033 if (postinc)
b0109805
PB
8034 tcg_gen_addi_i32(addr, addr, imm);
8035 if (writeback) {
8036 store_reg(s, rn, addr);
8037 } else {
8038 dead_tmp(addr);
8039 }
9ee6e8bb
PB
8040 }
8041 break;
8042 default:
8043 goto illegal_op;
2c0262af 8044 }
9ee6e8bb
PB
8045 return 0;
8046illegal_op:
8047 return 1;
2c0262af
FB
8048}
8049
9ee6e8bb 8050static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
8051{
8052 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8053 int32_t offset;
8054 int i;
b26eefb6 8055 TCGv tmp;
d9ba4830 8056 TCGv tmp2;
b0109805 8057 TCGv addr;
99c475ab 8058
9ee6e8bb
PB
8059 if (s->condexec_mask) {
8060 cond = s->condexec_cond;
8061 s->condlabel = gen_new_label();
d9ba4830 8062 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
8063 s->condjmp = 1;
8064 }
8065
b5ff1b31 8066 insn = lduw_code(s->pc);
99c475ab 8067 s->pc += 2;
b5ff1b31 8068
99c475ab
FB
8069 switch (insn >> 12) {
8070 case 0: case 1:
396e467c 8071
99c475ab
FB
8072 rd = insn & 7;
8073 op = (insn >> 11) & 3;
8074 if (op == 3) {
8075 /* add/subtract */
8076 rn = (insn >> 3) & 7;
396e467c 8077 tmp = load_reg(s, rn);
99c475ab
FB
8078 if (insn & (1 << 10)) {
8079 /* immediate */
396e467c
FN
8080 tmp2 = new_tmp();
8081 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
8082 } else {
8083 /* reg */
8084 rm = (insn >> 6) & 7;
396e467c 8085 tmp2 = load_reg(s, rm);
99c475ab 8086 }
9ee6e8bb
PB
8087 if (insn & (1 << 9)) {
8088 if (s->condexec_mask)
396e467c 8089 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8090 else
396e467c 8091 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb
PB
8092 } else {
8093 if (s->condexec_mask)
396e467c 8094 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 8095 else
396e467c 8096 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 8097 }
396e467c
FN
8098 dead_tmp(tmp2);
8099 store_reg(s, rd, tmp);
99c475ab
FB
8100 } else {
8101 /* shift immediate */
8102 rm = (insn >> 3) & 7;
8103 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
8104 tmp = load_reg(s, rm);
8105 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8106 if (!s->condexec_mask)
8107 gen_logic_CC(tmp);
8108 store_reg(s, rd, tmp);
99c475ab
FB
8109 }
8110 break;
8111 case 2: case 3:
8112 /* arithmetic large immediate */
8113 op = (insn >> 11) & 3;
8114 rd = (insn >> 8) & 0x7;
396e467c
FN
8115 if (op == 0) { /* mov */
8116 tmp = new_tmp();
8117 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 8118 if (!s->condexec_mask)
396e467c
FN
8119 gen_logic_CC(tmp);
8120 store_reg(s, rd, tmp);
8121 } else {
8122 tmp = load_reg(s, rd);
8123 tmp2 = new_tmp();
8124 tcg_gen_movi_i32(tmp2, insn & 0xff);
8125 switch (op) {
8126 case 1: /* cmp */
8127 gen_helper_sub_cc(tmp, tmp, tmp2);
8128 dead_tmp(tmp);
8129 dead_tmp(tmp2);
8130 break;
8131 case 2: /* add */
8132 if (s->condexec_mask)
8133 tcg_gen_add_i32(tmp, tmp, tmp2);
8134 else
8135 gen_helper_add_cc(tmp, tmp, tmp2);
8136 dead_tmp(tmp2);
8137 store_reg(s, rd, tmp);
8138 break;
8139 case 3: /* sub */
8140 if (s->condexec_mask)
8141 tcg_gen_sub_i32(tmp, tmp, tmp2);
8142 else
8143 gen_helper_sub_cc(tmp, tmp, tmp2);
8144 dead_tmp(tmp2);
8145 store_reg(s, rd, tmp);
8146 break;
8147 }
99c475ab 8148 }
99c475ab
FB
8149 break;
8150 case 4:
8151 if (insn & (1 << 11)) {
8152 rd = (insn >> 8) & 7;
5899f386
FB
8153 /* load pc-relative. Bit 1 of PC is ignored. */
8154 val = s->pc + 2 + ((insn & 0xff) * 4);
8155 val &= ~(uint32_t)2;
b0109805
PB
8156 addr = new_tmp();
8157 tcg_gen_movi_i32(addr, val);
8158 tmp = gen_ld32(addr, IS_USER(s));
8159 dead_tmp(addr);
8160 store_reg(s, rd, tmp);
99c475ab
FB
8161 break;
8162 }
8163 if (insn & (1 << 10)) {
8164 /* data processing extended or blx */
8165 rd = (insn & 7) | ((insn >> 4) & 8);
8166 rm = (insn >> 3) & 0xf;
8167 op = (insn >> 8) & 3;
8168 switch (op) {
8169 case 0: /* add */
396e467c
FN
8170 tmp = load_reg(s, rd);
8171 tmp2 = load_reg(s, rm);
8172 tcg_gen_add_i32(tmp, tmp, tmp2);
8173 dead_tmp(tmp2);
8174 store_reg(s, rd, tmp);
99c475ab
FB
8175 break;
8176 case 1: /* cmp */
396e467c
FN
8177 tmp = load_reg(s, rd);
8178 tmp2 = load_reg(s, rm);
8179 gen_helper_sub_cc(tmp, tmp, tmp2);
8180 dead_tmp(tmp2);
8181 dead_tmp(tmp);
99c475ab
FB
8182 break;
8183 case 2: /* mov/cpy */
396e467c
FN
8184 tmp = load_reg(s, rm);
8185 store_reg(s, rd, tmp);
99c475ab
FB
8186 break;
8187 case 3:/* branch [and link] exchange thumb register */
b0109805 8188 tmp = load_reg(s, rm);
99c475ab
FB
8189 if (insn & (1 << 7)) {
8190 val = (uint32_t)s->pc | 1;
b0109805
PB
8191 tmp2 = new_tmp();
8192 tcg_gen_movi_i32(tmp2, val);
8193 store_reg(s, 14, tmp2);
99c475ab 8194 }
d9ba4830 8195 gen_bx(s, tmp);
99c475ab
FB
8196 break;
8197 }
8198 break;
8199 }
8200
8201 /* data processing register */
8202 rd = insn & 7;
8203 rm = (insn >> 3) & 7;
8204 op = (insn >> 6) & 0xf;
8205 if (op == 2 || op == 3 || op == 4 || op == 7) {
8206 /* the shift/rotate ops want the operands backwards */
8207 val = rm;
8208 rm = rd;
8209 rd = val;
8210 val = 1;
8211 } else {
8212 val = 0;
8213 }
8214
396e467c
FN
8215 if (op == 9) { /* neg */
8216 tmp = new_tmp();
8217 tcg_gen_movi_i32(tmp, 0);
8218 } else if (op != 0xf) { /* mvn doesn't read its first operand */
8219 tmp = load_reg(s, rd);
8220 } else {
8221 TCGV_UNUSED(tmp);
8222 }
99c475ab 8223
396e467c 8224 tmp2 = load_reg(s, rm);
5899f386 8225 switch (op) {
99c475ab 8226 case 0x0: /* and */
396e467c 8227 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 8228 if (!s->condexec_mask)
396e467c 8229 gen_logic_CC(tmp);
99c475ab
FB
8230 break;
8231 case 0x1: /* eor */
396e467c 8232 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 8233 if (!s->condexec_mask)
396e467c 8234 gen_logic_CC(tmp);
99c475ab
FB
8235 break;
8236 case 0x2: /* lsl */
9ee6e8bb 8237 if (s->condexec_mask) {
396e467c 8238 gen_helper_shl(tmp2, tmp2, tmp);
9ee6e8bb 8239 } else {
396e467c
FN
8240 gen_helper_shl_cc(tmp2, tmp2, tmp);
8241 gen_logic_CC(tmp2);
9ee6e8bb 8242 }
99c475ab
FB
8243 break;
8244 case 0x3: /* lsr */
9ee6e8bb 8245 if (s->condexec_mask) {
396e467c 8246 gen_helper_shr(tmp2, tmp2, tmp);
9ee6e8bb 8247 } else {
396e467c
FN
8248 gen_helper_shr_cc(tmp2, tmp2, tmp);
8249 gen_logic_CC(tmp2);
9ee6e8bb 8250 }
99c475ab
FB
8251 break;
8252 case 0x4: /* asr */
9ee6e8bb 8253 if (s->condexec_mask) {
396e467c 8254 gen_helper_sar(tmp2, tmp2, tmp);
9ee6e8bb 8255 } else {
396e467c
FN
8256 gen_helper_sar_cc(tmp2, tmp2, tmp);
8257 gen_logic_CC(tmp2);
9ee6e8bb 8258 }
99c475ab
FB
8259 break;
8260 case 0x5: /* adc */
9ee6e8bb 8261 if (s->condexec_mask)
396e467c 8262 gen_adc(tmp, tmp2);
9ee6e8bb 8263 else
396e467c 8264 gen_helper_adc_cc(tmp, tmp, tmp2);
99c475ab
FB
8265 break;
8266 case 0x6: /* sbc */
9ee6e8bb 8267 if (s->condexec_mask)
396e467c 8268 gen_sub_carry(tmp, tmp, tmp2);
9ee6e8bb 8269 else
396e467c 8270 gen_helper_sbc_cc(tmp, tmp, tmp2);
99c475ab
FB
8271 break;
8272 case 0x7: /* ror */
9ee6e8bb 8273 if (s->condexec_mask) {
396e467c 8274 gen_helper_ror(tmp2, tmp2, tmp);
9ee6e8bb 8275 } else {
396e467c
FN
8276 gen_helper_ror_cc(tmp2, tmp2, tmp);
8277 gen_logic_CC(tmp2);
9ee6e8bb 8278 }
99c475ab
FB
8279 break;
8280 case 0x8: /* tst */
396e467c
FN
8281 tcg_gen_and_i32(tmp, tmp, tmp2);
8282 gen_logic_CC(tmp);
99c475ab 8283 rd = 16;
5899f386 8284 break;
99c475ab 8285 case 0x9: /* neg */
9ee6e8bb 8286 if (s->condexec_mask)
396e467c 8287 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 8288 else
396e467c 8289 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
8290 break;
8291 case 0xa: /* cmp */
396e467c 8292 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
8293 rd = 16;
8294 break;
8295 case 0xb: /* cmn */
396e467c 8296 gen_helper_add_cc(tmp, tmp, tmp2);
99c475ab
FB
8297 rd = 16;
8298 break;
8299 case 0xc: /* orr */
396e467c 8300 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 8301 if (!s->condexec_mask)
396e467c 8302 gen_logic_CC(tmp);
99c475ab
FB
8303 break;
8304 case 0xd: /* mul */
396e467c 8305 gen_mull(tmp, tmp2);
9ee6e8bb 8306 if (!s->condexec_mask)
396e467c 8307 gen_logic_CC(tmp);
99c475ab
FB
8308 break;
8309 case 0xe: /* bic */
396e467c 8310 tcg_gen_bic_i32(tmp, tmp, tmp2);
9ee6e8bb 8311 if (!s->condexec_mask)
396e467c 8312 gen_logic_CC(tmp);
99c475ab
FB
8313 break;
8314 case 0xf: /* mvn */
396e467c 8315 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 8316 if (!s->condexec_mask)
396e467c 8317 gen_logic_CC(tmp2);
99c475ab 8318 val = 1;
5899f386 8319 rm = rd;
99c475ab
FB
8320 break;
8321 }
8322 if (rd != 16) {
396e467c
FN
8323 if (val) {
8324 store_reg(s, rm, tmp2);
8325 if (op != 0xf)
8326 dead_tmp(tmp);
8327 } else {
8328 store_reg(s, rd, tmp);
8329 dead_tmp(tmp2);
8330 }
8331 } else {
8332 dead_tmp(tmp);
8333 dead_tmp(tmp2);
99c475ab
FB
8334 }
8335 break;
8336
8337 case 5:
8338 /* load/store register offset. */
8339 rd = insn & 7;
8340 rn = (insn >> 3) & 7;
8341 rm = (insn >> 6) & 7;
8342 op = (insn >> 9) & 7;
b0109805 8343 addr = load_reg(s, rn);
b26eefb6 8344 tmp = load_reg(s, rm);
b0109805 8345 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 8346 dead_tmp(tmp);
99c475ab
FB
8347
8348 if (op < 3) /* store */
b0109805 8349 tmp = load_reg(s, rd);
99c475ab
FB
8350
8351 switch (op) {
8352 case 0: /* str */
b0109805 8353 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
8354 break;
8355 case 1: /* strh */
b0109805 8356 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
8357 break;
8358 case 2: /* strb */
b0109805 8359 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
8360 break;
8361 case 3: /* ldrsb */
b0109805 8362 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
8363 break;
8364 case 4: /* ldr */
b0109805 8365 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8366 break;
8367 case 5: /* ldrh */
b0109805 8368 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
8369 break;
8370 case 6: /* ldrb */
b0109805 8371 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
8372 break;
8373 case 7: /* ldrsh */
b0109805 8374 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
8375 break;
8376 }
8377 if (op >= 3) /* load */
b0109805
PB
8378 store_reg(s, rd, tmp);
8379 dead_tmp(addr);
99c475ab
FB
8380 break;
8381
8382 case 6:
8383 /* load/store word immediate offset */
8384 rd = insn & 7;
8385 rn = (insn >> 3) & 7;
b0109805 8386 addr = load_reg(s, rn);
99c475ab 8387 val = (insn >> 4) & 0x7c;
b0109805 8388 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8389
8390 if (insn & (1 << 11)) {
8391 /* load */
b0109805
PB
8392 tmp = gen_ld32(addr, IS_USER(s));
8393 store_reg(s, rd, tmp);
99c475ab
FB
8394 } else {
8395 /* store */
b0109805
PB
8396 tmp = load_reg(s, rd);
8397 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8398 }
b0109805 8399 dead_tmp(addr);
99c475ab
FB
8400 break;
8401
8402 case 7:
8403 /* load/store byte immediate offset */
8404 rd = insn & 7;
8405 rn = (insn >> 3) & 7;
b0109805 8406 addr = load_reg(s, rn);
99c475ab 8407 val = (insn >> 6) & 0x1f;
b0109805 8408 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8409
8410 if (insn & (1 << 11)) {
8411 /* load */
b0109805
PB
8412 tmp = gen_ld8u(addr, IS_USER(s));
8413 store_reg(s, rd, tmp);
99c475ab
FB
8414 } else {
8415 /* store */
b0109805
PB
8416 tmp = load_reg(s, rd);
8417 gen_st8(tmp, addr, IS_USER(s));
99c475ab 8418 }
b0109805 8419 dead_tmp(addr);
99c475ab
FB
8420 break;
8421
8422 case 8:
8423 /* load/store halfword immediate offset */
8424 rd = insn & 7;
8425 rn = (insn >> 3) & 7;
b0109805 8426 addr = load_reg(s, rn);
99c475ab 8427 val = (insn >> 5) & 0x3e;
b0109805 8428 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8429
8430 if (insn & (1 << 11)) {
8431 /* load */
b0109805
PB
8432 tmp = gen_ld16u(addr, IS_USER(s));
8433 store_reg(s, rd, tmp);
99c475ab
FB
8434 } else {
8435 /* store */
b0109805
PB
8436 tmp = load_reg(s, rd);
8437 gen_st16(tmp, addr, IS_USER(s));
99c475ab 8438 }
b0109805 8439 dead_tmp(addr);
99c475ab
FB
8440 break;
8441
8442 case 9:
8443 /* load/store from stack */
8444 rd = (insn >> 8) & 7;
b0109805 8445 addr = load_reg(s, 13);
99c475ab 8446 val = (insn & 0xff) * 4;
b0109805 8447 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8448
8449 if (insn & (1 << 11)) {
8450 /* load */
b0109805
PB
8451 tmp = gen_ld32(addr, IS_USER(s));
8452 store_reg(s, rd, tmp);
99c475ab
FB
8453 } else {
8454 /* store */
b0109805
PB
8455 tmp = load_reg(s, rd);
8456 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8457 }
b0109805 8458 dead_tmp(addr);
99c475ab
FB
8459 break;
8460
8461 case 10:
8462 /* add to high reg */
8463 rd = (insn >> 8) & 7;
5899f386
FB
8464 if (insn & (1 << 11)) {
8465 /* SP */
5e3f878a 8466 tmp = load_reg(s, 13);
5899f386
FB
8467 } else {
8468 /* PC. bit 1 is ignored. */
5e3f878a
PB
8469 tmp = new_tmp();
8470 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 8471 }
99c475ab 8472 val = (insn & 0xff) * 4;
5e3f878a
PB
8473 tcg_gen_addi_i32(tmp, tmp, val);
8474 store_reg(s, rd, tmp);
99c475ab
FB
8475 break;
8476
8477 case 11:
8478 /* misc */
8479 op = (insn >> 8) & 0xf;
8480 switch (op) {
8481 case 0:
8482 /* adjust stack pointer */
b26eefb6 8483 tmp = load_reg(s, 13);
99c475ab
FB
8484 val = (insn & 0x7f) * 4;
8485 if (insn & (1 << 7))
6a0d8a1d 8486 val = -(int32_t)val;
b26eefb6
PB
8487 tcg_gen_addi_i32(tmp, tmp, val);
8488 store_reg(s, 13, tmp);
99c475ab
FB
8489 break;
8490
9ee6e8bb
PB
8491 case 2: /* sign/zero extend. */
8492 ARCH(6);
8493 rd = insn & 7;
8494 rm = (insn >> 3) & 7;
b0109805 8495 tmp = load_reg(s, rm);
9ee6e8bb 8496 switch ((insn >> 6) & 3) {
b0109805
PB
8497 case 0: gen_sxth(tmp); break;
8498 case 1: gen_sxtb(tmp); break;
8499 case 2: gen_uxth(tmp); break;
8500 case 3: gen_uxtb(tmp); break;
9ee6e8bb 8501 }
b0109805 8502 store_reg(s, rd, tmp);
9ee6e8bb 8503 break;
99c475ab
FB
8504 case 4: case 5: case 0xc: case 0xd:
8505 /* push/pop */
b0109805 8506 addr = load_reg(s, 13);
5899f386
FB
8507 if (insn & (1 << 8))
8508 offset = 4;
99c475ab 8509 else
5899f386
FB
8510 offset = 0;
8511 for (i = 0; i < 8; i++) {
8512 if (insn & (1 << i))
8513 offset += 4;
8514 }
8515 if ((insn & (1 << 11)) == 0) {
b0109805 8516 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8517 }
99c475ab
FB
8518 for (i = 0; i < 8; i++) {
8519 if (insn & (1 << i)) {
8520 if (insn & (1 << 11)) {
8521 /* pop */
b0109805
PB
8522 tmp = gen_ld32(addr, IS_USER(s));
8523 store_reg(s, i, tmp);
99c475ab
FB
8524 } else {
8525 /* push */
b0109805
PB
8526 tmp = load_reg(s, i);
8527 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8528 }
5899f386 8529 /* advance to the next address. */
b0109805 8530 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8531 }
8532 }
a50f5b91 8533 TCGV_UNUSED(tmp);
99c475ab
FB
8534 if (insn & (1 << 8)) {
8535 if (insn & (1 << 11)) {
8536 /* pop pc */
b0109805 8537 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8538 /* don't set the pc until the rest of the instruction
8539 has completed */
8540 } else {
8541 /* push lr */
b0109805
PB
8542 tmp = load_reg(s, 14);
8543 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8544 }
b0109805 8545 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 8546 }
5899f386 8547 if ((insn & (1 << 11)) == 0) {
b0109805 8548 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8549 }
99c475ab 8550 /* write back the new stack pointer */
b0109805 8551 store_reg(s, 13, addr);
99c475ab
FB
8552 /* set the new PC value */
8553 if ((insn & 0x0900) == 0x0900)
b0109805 8554 gen_bx(s, tmp);
99c475ab
FB
8555 break;
8556
9ee6e8bb
PB
8557 case 1: case 3: case 9: case 11: /* czb */
8558 rm = insn & 7;
d9ba4830 8559 tmp = load_reg(s, rm);
9ee6e8bb
PB
8560 s->condlabel = gen_new_label();
8561 s->condjmp = 1;
8562 if (insn & (1 << 11))
cb63669a 8563 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 8564 else
cb63669a 8565 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
d9ba4830 8566 dead_tmp(tmp);
9ee6e8bb
PB
8567 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8568 val = (uint32_t)s->pc + 2;
8569 val += offset;
8570 gen_jmp(s, val);
8571 break;
8572
8573 case 15: /* IT, nop-hint. */
8574 if ((insn & 0xf) == 0) {
8575 gen_nop_hint(s, (insn >> 4) & 0xf);
8576 break;
8577 }
8578 /* If Then. */
8579 s->condexec_cond = (insn >> 4) & 0xe;
8580 s->condexec_mask = insn & 0x1f;
8581 /* No actual code generated for this insn, just setup state. */
8582 break;
8583
06c949e6 8584 case 0xe: /* bkpt */
9ee6e8bb 8585 gen_set_condexec(s);
5e3f878a 8586 gen_set_pc_im(s->pc - 2);
d9ba4830 8587 gen_exception(EXCP_BKPT);
06c949e6
PB
8588 s->is_jmp = DISAS_JUMP;
8589 break;
8590
9ee6e8bb
PB
8591 case 0xa: /* rev */
8592 ARCH(6);
8593 rn = (insn >> 3) & 0x7;
8594 rd = insn & 0x7;
b0109805 8595 tmp = load_reg(s, rn);
9ee6e8bb 8596 switch ((insn >> 6) & 3) {
66896cb8 8597 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
8598 case 1: gen_rev16(tmp); break;
8599 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
8600 default: goto illegal_op;
8601 }
b0109805 8602 store_reg(s, rd, tmp);
9ee6e8bb
PB
8603 break;
8604
8605 case 6: /* cps */
8606 ARCH(6);
8607 if (IS_USER(s))
8608 break;
8609 if (IS_M(env)) {
8984bd2e 8610 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9ee6e8bb 8611 /* PRIMASK */
8984bd2e
PB
8612 if (insn & 1) {
8613 addr = tcg_const_i32(16);
8614 gen_helper_v7m_msr(cpu_env, addr, tmp);
8615 }
9ee6e8bb 8616 /* FAULTMASK */
8984bd2e
PB
8617 if (insn & 2) {
8618 addr = tcg_const_i32(17);
8619 gen_helper_v7m_msr(cpu_env, addr, tmp);
8620 }
9ee6e8bb
PB
8621 gen_lookup_tb(s);
8622 } else {
8623 if (insn & (1 << 4))
8624 shift = CPSR_A | CPSR_I | CPSR_F;
8625 else
8626 shift = 0;
2fbac54b 8627 gen_set_psr_im(s, shift, 0, ((insn & 7) << 6) & shift);
9ee6e8bb
PB
8628 }
8629 break;
8630
99c475ab
FB
8631 default:
8632 goto undef;
8633 }
8634 break;
8635
8636 case 12:
8637 /* load/store multiple */
8638 rn = (insn >> 8) & 0x7;
b0109805 8639 addr = load_reg(s, rn);
99c475ab
FB
8640 for (i = 0; i < 8; i++) {
8641 if (insn & (1 << i)) {
99c475ab
FB
8642 if (insn & (1 << 11)) {
8643 /* load */
b0109805
PB
8644 tmp = gen_ld32(addr, IS_USER(s));
8645 store_reg(s, i, tmp);
99c475ab
FB
8646 } else {
8647 /* store */
b0109805
PB
8648 tmp = load_reg(s, i);
8649 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8650 }
5899f386 8651 /* advance to the next address */
b0109805 8652 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8653 }
8654 }
5899f386 8655 /* Base register writeback. */
b0109805
PB
8656 if ((insn & (1 << rn)) == 0) {
8657 store_reg(s, rn, addr);
8658 } else {
8659 dead_tmp(addr);
8660 }
99c475ab
FB
8661 break;
8662
8663 case 13:
8664 /* conditional branch or swi */
8665 cond = (insn >> 8) & 0xf;
8666 if (cond == 0xe)
8667 goto undef;
8668
8669 if (cond == 0xf) {
8670 /* swi */
9ee6e8bb 8671 gen_set_condexec(s);
422ebf69 8672 gen_set_pc_im(s->pc);
9ee6e8bb 8673 s->is_jmp = DISAS_SWI;
99c475ab
FB
8674 break;
8675 }
8676 /* generate a conditional jump to next instruction */
e50e6a20 8677 s->condlabel = gen_new_label();
d9ba4830 8678 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 8679 s->condjmp = 1;
99c475ab
FB
8680
8681 /* jump to the offset */
5899f386 8682 val = (uint32_t)s->pc + 2;
99c475ab 8683 offset = ((int32_t)insn << 24) >> 24;
5899f386 8684 val += offset << 1;
8aaca4c0 8685 gen_jmp(s, val);
99c475ab
FB
8686 break;
8687
8688 case 14:
358bf29e 8689 if (insn & (1 << 11)) {
9ee6e8bb
PB
8690 if (disas_thumb2_insn(env, s, insn))
8691 goto undef32;
358bf29e
PB
8692 break;
8693 }
9ee6e8bb 8694 /* unconditional branch */
99c475ab
FB
8695 val = (uint32_t)s->pc;
8696 offset = ((int32_t)insn << 21) >> 21;
8697 val += (offset << 1) + 2;
8aaca4c0 8698 gen_jmp(s, val);
99c475ab
FB
8699 break;
8700
8701 case 15:
9ee6e8bb 8702 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 8703 goto undef32;
9ee6e8bb 8704 break;
99c475ab
FB
8705 }
8706 return;
9ee6e8bb
PB
8707undef32:
8708 gen_set_condexec(s);
5e3f878a 8709 gen_set_pc_im(s->pc - 4);
d9ba4830 8710 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
8711 s->is_jmp = DISAS_JUMP;
8712 return;
8713illegal_op:
99c475ab 8714undef:
9ee6e8bb 8715 gen_set_condexec(s);
5e3f878a 8716 gen_set_pc_im(s->pc - 2);
d9ba4830 8717 gen_exception(EXCP_UDEF);
99c475ab
FB
8718 s->is_jmp = DISAS_JUMP;
8719}
8720
2c0262af
FB
8721/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8722 basic block 'tb'. If search_pc is TRUE, also generate PC
8723 information for each intermediate instruction. */
2cfc5f17
TS
8724static inline void gen_intermediate_code_internal(CPUState *env,
8725 TranslationBlock *tb,
8726 int search_pc)
2c0262af
FB
8727{
8728 DisasContext dc1, *dc = &dc1;
a1d1bb31 8729 CPUBreakpoint *bp;
2c0262af
FB
8730 uint16_t *gen_opc_end;
8731 int j, lj;
0fa85d43 8732 target_ulong pc_start;
b5ff1b31 8733 uint32_t next_page_start;
2e70f6ef
PB
8734 int num_insns;
8735 int max_insns;
3b46e624 8736
2c0262af 8737 /* generate intermediate code */
b26eefb6 8738 num_temps = 0;
b26eefb6 8739
0fa85d43 8740 pc_start = tb->pc;
3b46e624 8741
2c0262af
FB
8742 dc->tb = tb;
8743
2c0262af 8744 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
8745
8746 dc->is_jmp = DISAS_NEXT;
8747 dc->pc = pc_start;
8aaca4c0 8748 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 8749 dc->condjmp = 0;
5899f386 8750 dc->thumb = env->thumb;
9ee6e8bb
PB
8751 dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
8752 dc->condexec_cond = env->condexec_bits >> 4;
b5ff1b31 8753#if !defined(CONFIG_USER_ONLY)
9ee6e8bb
PB
8754 if (IS_M(env)) {
8755 dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
8756 } else {
8757 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
8758 }
b5ff1b31 8759#endif
a7812ae4
PB
8760 cpu_F0s = tcg_temp_new_i32();
8761 cpu_F1s = tcg_temp_new_i32();
8762 cpu_F0d = tcg_temp_new_i64();
8763 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
8764 cpu_V0 = cpu_F0d;
8765 cpu_V1 = cpu_F1d;
e677137d 8766 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 8767 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 8768 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 8769 lj = -1;
2e70f6ef
PB
8770 num_insns = 0;
8771 max_insns = tb->cflags & CF_COUNT_MASK;
8772 if (max_insns == 0)
8773 max_insns = CF_COUNT_MASK;
8774
8775 gen_icount_start();
9ee6e8bb
PB
8776 /* Reset the conditional execution bits immediately. This avoids
8777 complications trying to do it at the end of the block. */
8778 if (env->condexec_bits)
8f01245e
PB
8779 {
8780 TCGv tmp = new_tmp();
8781 tcg_gen_movi_i32(tmp, 0);
d9ba4830 8782 store_cpu_field(tmp, condexec_bits);
8f01245e 8783 }
2c0262af 8784 do {
fbb4a2e3
PB
8785#ifdef CONFIG_USER_ONLY
8786 /* Intercept jump to the magic kernel page. */
8787 if (dc->pc >= 0xffff0000) {
8788 /* We always get here via a jump, so know we are not in a
8789 conditional execution block. */
8790 gen_exception(EXCP_KERNEL_TRAP);
8791 dc->is_jmp = DISAS_UPDATE;
8792 break;
8793 }
8794#else
9ee6e8bb
PB
8795 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
8796 /* We always get here via a jump, so know we are not in a
8797 conditional execution block. */
d9ba4830 8798 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
8799 dc->is_jmp = DISAS_UPDATE;
8800 break;
9ee6e8bb
PB
8801 }
8802#endif
8803
72cf2d4f
BS
8804 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
8805 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 8806 if (bp->pc == dc->pc) {
9ee6e8bb 8807 gen_set_condexec(dc);
5e3f878a 8808 gen_set_pc_im(dc->pc);
d9ba4830 8809 gen_exception(EXCP_DEBUG);
1fddef4b 8810 dc->is_jmp = DISAS_JUMP;
9ee6e8bb
PB
8811 /* Advance PC so that clearing the breakpoint will
8812 invalidate this TB. */
8813 dc->pc += 2;
8814 goto done_generating;
1fddef4b
FB
8815 break;
8816 }
8817 }
8818 }
2c0262af
FB
8819 if (search_pc) {
8820 j = gen_opc_ptr - gen_opc_buf;
8821 if (lj < j) {
8822 lj++;
8823 while (lj < j)
8824 gen_opc_instr_start[lj++] = 0;
8825 }
0fa85d43 8826 gen_opc_pc[lj] = dc->pc;
2c0262af 8827 gen_opc_instr_start[lj] = 1;
2e70f6ef 8828 gen_opc_icount[lj] = num_insns;
2c0262af 8829 }
e50e6a20 8830
2e70f6ef
PB
8831 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
8832 gen_io_start();
8833
9ee6e8bb
PB
8834 if (env->thumb) {
8835 disas_thumb_insn(env, dc);
8836 if (dc->condexec_mask) {
8837 dc->condexec_cond = (dc->condexec_cond & 0xe)
8838 | ((dc->condexec_mask >> 4) & 1);
8839 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
8840 if (dc->condexec_mask == 0) {
8841 dc->condexec_cond = 0;
8842 }
8843 }
8844 } else {
8845 disas_arm_insn(env, dc);
8846 }
b26eefb6
PB
8847 if (num_temps) {
8848 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
8849 num_temps = 0;
8850 }
e50e6a20
FB
8851
8852 if (dc->condjmp && !dc->is_jmp) {
8853 gen_set_label(dc->condlabel);
8854 dc->condjmp = 0;
8855 }
aaf2d97d 8856 /* Translation stops when a conditional branch is encountered.
e50e6a20 8857 * Otherwise the subsequent code could get translated several times.
b5ff1b31 8858 * Also stop translation when a page boundary is reached. This
bf20dc07 8859 * ensures prefetch aborts occur at the right place. */
2e70f6ef 8860 num_insns ++;
1fddef4b
FB
8861 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
8862 !env->singlestep_enabled &&
1b530a6d 8863 !singlestep &&
2e70f6ef
PB
8864 dc->pc < next_page_start &&
8865 num_insns < max_insns);
8866
8867 if (tb->cflags & CF_LAST_IO) {
8868 if (dc->condjmp) {
8869 /* FIXME: This can theoretically happen with self-modifying
8870 code. */
8871 cpu_abort(env, "IO on conditional branch instruction");
8872 }
8873 gen_io_end();
8874 }
9ee6e8bb 8875
b5ff1b31 8876 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
8877 instruction was a conditional branch or trap, and the PC has
8878 already been written. */
551bd27f 8879 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 8880 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 8881 if (dc->condjmp) {
9ee6e8bb
PB
8882 gen_set_condexec(dc);
8883 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 8884 gen_exception(EXCP_SWI);
9ee6e8bb 8885 } else {
d9ba4830 8886 gen_exception(EXCP_DEBUG);
9ee6e8bb 8887 }
e50e6a20
FB
8888 gen_set_label(dc->condlabel);
8889 }
8890 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 8891 gen_set_pc_im(dc->pc);
e50e6a20 8892 dc->condjmp = 0;
8aaca4c0 8893 }
9ee6e8bb
PB
8894 gen_set_condexec(dc);
8895 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 8896 gen_exception(EXCP_SWI);
9ee6e8bb
PB
8897 } else {
8898 /* FIXME: Single stepping a WFI insn will not halt
8899 the CPU. */
d9ba4830 8900 gen_exception(EXCP_DEBUG);
9ee6e8bb 8901 }
8aaca4c0 8902 } else {
9ee6e8bb
PB
8903 /* While branches must always occur at the end of an IT block,
8904 there are a few other things that can cause us to terminate
8905 the TB in the middel of an IT block:
8906 - Exception generating instructions (bkpt, swi, undefined).
8907 - Page boundaries.
8908 - Hardware watchpoints.
8909 Hardware breakpoints have already been handled and skip this code.
8910 */
8911 gen_set_condexec(dc);
8aaca4c0 8912 switch(dc->is_jmp) {
8aaca4c0 8913 case DISAS_NEXT:
6e256c93 8914 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
8915 break;
8916 default:
8917 case DISAS_JUMP:
8918 case DISAS_UPDATE:
8919 /* indicate that the hash table must be used to find the next TB */
57fec1fe 8920 tcg_gen_exit_tb(0);
8aaca4c0
FB
8921 break;
8922 case DISAS_TB_JUMP:
8923 /* nothing more to generate */
8924 break;
9ee6e8bb 8925 case DISAS_WFI:
d9ba4830 8926 gen_helper_wfi();
9ee6e8bb
PB
8927 break;
8928 case DISAS_SWI:
d9ba4830 8929 gen_exception(EXCP_SWI);
9ee6e8bb 8930 break;
8aaca4c0 8931 }
e50e6a20
FB
8932 if (dc->condjmp) {
8933 gen_set_label(dc->condlabel);
9ee6e8bb 8934 gen_set_condexec(dc);
6e256c93 8935 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
8936 dc->condjmp = 0;
8937 }
2c0262af 8938 }
2e70f6ef 8939
9ee6e8bb 8940done_generating:
2e70f6ef 8941 gen_icount_end(tb, num_insns);
2c0262af
FB
8942 *gen_opc_ptr = INDEX_op_end;
8943
8944#ifdef DEBUG_DISAS
8fec2b8c 8945 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
8946 qemu_log("----------------\n");
8947 qemu_log("IN: %s\n", lookup_symbol(pc_start));
8948 log_target_disas(pc_start, dc->pc - pc_start, env->thumb);
8949 qemu_log("\n");
2c0262af
FB
8950 }
8951#endif
b5ff1b31
FB
8952 if (search_pc) {
8953 j = gen_opc_ptr - gen_opc_buf;
8954 lj++;
8955 while (lj <= j)
8956 gen_opc_instr_start[lj++] = 0;
b5ff1b31 8957 } else {
2c0262af 8958 tb->size = dc->pc - pc_start;
2e70f6ef 8959 tb->icount = num_insns;
b5ff1b31 8960 }
2c0262af
FB
8961}
8962
2cfc5f17 8963void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2c0262af 8964{
2cfc5f17 8965 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
8966}
8967
2cfc5f17 8968void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2c0262af 8969{
2cfc5f17 8970 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
8971}
8972
b5ff1b31
FB
8973static const char *cpu_mode_names[16] = {
8974 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
8975 "???", "???", "???", "und", "???", "???", "???", "sys"
8976};
9ee6e8bb 8977
5fafdf24 8978void cpu_dump_state(CPUState *env, FILE *f,
7fe48483
FB
8979 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
8980 int flags)
2c0262af
FB
8981{
8982 int i;
06e80fc9 8983#if 0
bc380d17 8984 union {
b7bcbe95
FB
8985 uint32_t i;
8986 float s;
8987 } s0, s1;
8988 CPU_DoubleU d;
a94a6abf
PB
8989 /* ??? This assumes float64 and double have the same layout.
8990 Oh well, it's only debug dumps. */
8991 union {
8992 float64 f64;
8993 double d;
8994 } d0;
06e80fc9 8995#endif
b5ff1b31 8996 uint32_t psr;
2c0262af
FB
8997
8998 for(i=0;i<16;i++) {
7fe48483 8999 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 9000 if ((i % 4) == 3)
7fe48483 9001 cpu_fprintf(f, "\n");
2c0262af 9002 else
7fe48483 9003 cpu_fprintf(f, " ");
2c0262af 9004 }
b5ff1b31 9005 psr = cpsr_read(env);
687fa640
TS
9006 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9007 psr,
b5ff1b31
FB
9008 psr & (1 << 31) ? 'N' : '-',
9009 psr & (1 << 30) ? 'Z' : '-',
9010 psr & (1 << 29) ? 'C' : '-',
9011 psr & (1 << 28) ? 'V' : '-',
5fafdf24 9012 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 9013 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 9014
5e3f878a 9015#if 0
b7bcbe95 9016 for (i = 0; i < 16; i++) {
8e96005d
FB
9017 d.d = env->vfp.regs[i];
9018 s0.i = d.l.lower;
9019 s1.i = d.l.upper;
a94a6abf
PB
9020 d0.f64 = d.d;
9021 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 9022 i * 2, (int)s0.i, s0.s,
a94a6abf 9023 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 9024 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 9025 d0.d);
b7bcbe95 9026 }
40f137e1 9027 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
5e3f878a 9028#endif
2c0262af 9029}
a6b025d3 9030
d2856f1a
AJ
9031void gen_pc_load(CPUState *env, TranslationBlock *tb,
9032 unsigned long searched_pc, int pc_pos, void *puc)
9033{
9034 env->regs[15] = gen_opc_pc[pc_pos];
9035}