]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/translate.c
Built network devices once
[mirror_qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
28#include "exec-all.h"
29#include "disas.h"
57fec1fe 30#include "tcg-op.h"
79383c9c 31#include "qemu-log.h"
1497c961 32
a7812ae4 33#include "helpers.h"
1497c961 34#define GEN_HELPER 1
b26eefb6 35#include "helpers.h"
2c0262af 36
9ee6e8bb
PB
37#define ENABLE_ARCH_5J 0
38#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 42
86753403 43#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 44
2c0262af
FB
45/* internal defines */
46typedef struct DisasContext {
0fa85d43 47 target_ulong pc;
2c0262af 48 int is_jmp;
e50e6a20
FB
49 /* Nonzero if this instruction has been conditionally skipped. */
50 int condjmp;
51 /* The label that will be jumped to when the instruction is skipped. */
52 int condlabel;
9ee6e8bb
PB
53 /* Thumb-2 condtional execution bits. */
54 int condexec_mask;
55 int condexec_cond;
2c0262af 56 struct TranslationBlock *tb;
8aaca4c0 57 int singlestep_enabled;
5899f386 58 int thumb;
b5ff1b31
FB
59#if !defined(CONFIG_USER_ONLY)
60 int user;
61#endif
2c0262af
FB
62} DisasContext;
63
b5ff1b31
FB
64#if defined(CONFIG_USER_ONLY)
65#define IS_USER(s) 1
66#else
67#define IS_USER(s) (s->user)
68#endif
69
9ee6e8bb
PB
70/* These instructions trap after executing, so defer them until after the
71 conditional executions state has been updated. */
72#define DISAS_WFI 4
73#define DISAS_SWI 5
2c0262af 74
a7812ae4 75static TCGv_ptr cpu_env;
ad69471c 76/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 77static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 78static TCGv_i32 cpu_R[16];
ad69471c 79
b26eefb6 80/* FIXME: These should be removed. */
a7812ae4
PB
81static TCGv cpu_F0s, cpu_F1s;
82static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 83
2e70f6ef
PB
84#include "gen-icount.h"
85
155c3eac
FN
86static const char *regnames[] =
87 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
88 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
89
b26eefb6
PB
90/* initialize TCG globals. */
91void arm_translate_init(void)
92{
155c3eac
FN
93 int i;
94
a7812ae4
PB
95 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
96
155c3eac
FN
97 for (i = 0; i < 16; i++) {
98 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
99 offsetof(CPUState, regs[i]),
100 regnames[i]);
101 }
102
a7812ae4
PB
103#define GEN_HELPER 2
104#include "helpers.h"
b26eefb6
PB
105}
106
b26eefb6 107static int num_temps;
b26eefb6
PB
108
109/* Allocate a temporary variable. */
a7812ae4 110static TCGv_i32 new_tmp(void)
b26eefb6 111{
12edd4f2
FN
112 num_temps++;
113 return tcg_temp_new_i32();
b26eefb6
PB
114}
115
116/* Release a temporary variable. */
117static void dead_tmp(TCGv tmp)
118{
12edd4f2 119 tcg_temp_free(tmp);
b26eefb6 120 num_temps--;
b26eefb6
PB
121}
122
d9ba4830
PB
123static inline TCGv load_cpu_offset(int offset)
124{
125 TCGv tmp = new_tmp();
126 tcg_gen_ld_i32(tmp, cpu_env, offset);
127 return tmp;
128}
129
130#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
131
132static inline void store_cpu_offset(TCGv var, int offset)
133{
134 tcg_gen_st_i32(var, cpu_env, offset);
135 dead_tmp(var);
136}
137
138#define store_cpu_field(var, name) \
139 store_cpu_offset(var, offsetof(CPUState, name))
140
b26eefb6
PB
141/* Set a variable to the value of a CPU register. */
142static void load_reg_var(DisasContext *s, TCGv var, int reg)
143{
144 if (reg == 15) {
145 uint32_t addr;
146 /* normaly, since we updated PC, we need only to add one insn */
147 if (s->thumb)
148 addr = (long)s->pc + 2;
149 else
150 addr = (long)s->pc + 4;
151 tcg_gen_movi_i32(var, addr);
152 } else {
155c3eac 153 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
154 }
155}
156
157/* Create a new temporary and set it to the value of a CPU register. */
158static inline TCGv load_reg(DisasContext *s, int reg)
159{
160 TCGv tmp = new_tmp();
161 load_reg_var(s, tmp, reg);
162 return tmp;
163}
164
165/* Set a CPU register. The source must be a temporary and will be
166 marked as dead. */
167static void store_reg(DisasContext *s, int reg, TCGv var)
168{
169 if (reg == 15) {
170 tcg_gen_andi_i32(var, var, ~1);
171 s->is_jmp = DISAS_JUMP;
172 }
155c3eac 173 tcg_gen_mov_i32(cpu_R[reg], var);
b26eefb6
PB
174 dead_tmp(var);
175}
176
b26eefb6 177/* Value extensions. */
86831435
PB
178#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
179#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
180#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
181#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
182
1497c961
PB
183#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
184#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 185
b26eefb6 186
b75263d6
JR
187static inline void gen_set_cpsr(TCGv var, uint32_t mask)
188{
189 TCGv tmp_mask = tcg_const_i32(mask);
190 gen_helper_cpsr_write(var, tmp_mask);
191 tcg_temp_free_i32(tmp_mask);
192}
d9ba4830
PB
193/* Set NZCV flags from the high 4 bits of var. */
194#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
195
196static void gen_exception(int excp)
197{
198 TCGv tmp = new_tmp();
199 tcg_gen_movi_i32(tmp, excp);
200 gen_helper_exception(tmp);
201 dead_tmp(tmp);
202}
203
3670669c
PB
204static void gen_smul_dual(TCGv a, TCGv b)
205{
206 TCGv tmp1 = new_tmp();
207 TCGv tmp2 = new_tmp();
22478e79
AZ
208 tcg_gen_ext16s_i32(tmp1, a);
209 tcg_gen_ext16s_i32(tmp2, b);
3670669c
PB
210 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
211 dead_tmp(tmp2);
212 tcg_gen_sari_i32(a, a, 16);
213 tcg_gen_sari_i32(b, b, 16);
214 tcg_gen_mul_i32(b, b, a);
215 tcg_gen_mov_i32(a, tmp1);
216 dead_tmp(tmp1);
217}
218
219/* Byteswap each halfword. */
220static void gen_rev16(TCGv var)
221{
222 TCGv tmp = new_tmp();
223 tcg_gen_shri_i32(tmp, var, 8);
224 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
225 tcg_gen_shli_i32(var, var, 8);
226 tcg_gen_andi_i32(var, var, 0xff00ff00);
227 tcg_gen_or_i32(var, var, tmp);
228 dead_tmp(tmp);
229}
230
231/* Byteswap low halfword and sign extend. */
232static void gen_revsh(TCGv var)
233{
234 TCGv tmp = new_tmp();
235 tcg_gen_shri_i32(tmp, var, 8);
236 tcg_gen_andi_i32(tmp, tmp, 0x00ff);
237 tcg_gen_shli_i32(var, var, 8);
238 tcg_gen_ext8s_i32(var, var);
239 tcg_gen_or_i32(var, var, tmp);
240 dead_tmp(tmp);
241}
242
243/* Unsigned bitfield extract. */
244static void gen_ubfx(TCGv var, int shift, uint32_t mask)
245{
246 if (shift)
247 tcg_gen_shri_i32(var, var, shift);
248 tcg_gen_andi_i32(var, var, mask);
249}
250
251/* Signed bitfield extract. */
252static void gen_sbfx(TCGv var, int shift, int width)
253{
254 uint32_t signbit;
255
256 if (shift)
257 tcg_gen_sari_i32(var, var, shift);
258 if (shift + width < 32) {
259 signbit = 1u << (width - 1);
260 tcg_gen_andi_i32(var, var, (1u << width) - 1);
261 tcg_gen_xori_i32(var, var, signbit);
262 tcg_gen_subi_i32(var, var, signbit);
263 }
264}
265
266/* Bitfield insertion. Insert val into base. Clobbers base and val. */
267static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
268{
3670669c 269 tcg_gen_andi_i32(val, val, mask);
8f8e3aa4
PB
270 tcg_gen_shli_i32(val, val, shift);
271 tcg_gen_andi_i32(base, base, ~(mask << shift));
3670669c
PB
272 tcg_gen_or_i32(dest, base, val);
273}
274
d9ba4830
PB
275/* Round the top 32 bits of a 64-bit value. */
276static void gen_roundqd(TCGv a, TCGv b)
3670669c 277{
d9ba4830
PB
278 tcg_gen_shri_i32(a, a, 31);
279 tcg_gen_add_i32(a, a, b);
3670669c
PB
280}
281
8f01245e
PB
282/* FIXME: Most targets have native widening multiplication.
283 It would be good to use that instead of a full wide multiply. */
5e3f878a 284/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 285static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 286{
a7812ae4
PB
287 TCGv_i64 tmp1 = tcg_temp_new_i64();
288 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
289
290 tcg_gen_extu_i32_i64(tmp1, a);
291 dead_tmp(a);
292 tcg_gen_extu_i32_i64(tmp2, b);
293 dead_tmp(b);
294 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 295 tcg_temp_free_i64(tmp2);
5e3f878a
PB
296 return tmp1;
297}
298
a7812ae4 299static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 300{
a7812ae4
PB
301 TCGv_i64 tmp1 = tcg_temp_new_i64();
302 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
303
304 tcg_gen_ext_i32_i64(tmp1, a);
305 dead_tmp(a);
306 tcg_gen_ext_i32_i64(tmp2, b);
307 dead_tmp(b);
308 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 309 tcg_temp_free_i64(tmp2);
5e3f878a
PB
310 return tmp1;
311}
312
8f01245e 313/* Signed 32x32->64 multiply. */
d9ba4830 314static void gen_imull(TCGv a, TCGv b)
8f01245e 315{
a7812ae4
PB
316 TCGv_i64 tmp1 = tcg_temp_new_i64();
317 TCGv_i64 tmp2 = tcg_temp_new_i64();
8f01245e 318
d9ba4830
PB
319 tcg_gen_ext_i32_i64(tmp1, a);
320 tcg_gen_ext_i32_i64(tmp2, b);
8f01245e 321 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 322 tcg_temp_free_i64(tmp2);
d9ba4830 323 tcg_gen_trunc_i64_i32(a, tmp1);
8f01245e 324 tcg_gen_shri_i64(tmp1, tmp1, 32);
d9ba4830 325 tcg_gen_trunc_i64_i32(b, tmp1);
b75263d6 326 tcg_temp_free_i64(tmp1);
d9ba4830 327}
d9ba4830 328
8f01245e
PB
329/* Swap low and high halfwords. */
330static void gen_swap_half(TCGv var)
331{
332 TCGv tmp = new_tmp();
333 tcg_gen_shri_i32(tmp, var, 16);
334 tcg_gen_shli_i32(var, var, 16);
335 tcg_gen_or_i32(var, var, tmp);
3670669c 336 dead_tmp(tmp);
8f01245e
PB
337}
338
b26eefb6
PB
339/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
340 tmp = (t0 ^ t1) & 0x8000;
341 t0 &= ~0x8000;
342 t1 &= ~0x8000;
343 t0 = (t0 + t1) ^ tmp;
344 */
345
346static void gen_add16(TCGv t0, TCGv t1)
347{
348 TCGv tmp = new_tmp();
349 tcg_gen_xor_i32(tmp, t0, t1);
350 tcg_gen_andi_i32(tmp, tmp, 0x8000);
351 tcg_gen_andi_i32(t0, t0, ~0x8000);
352 tcg_gen_andi_i32(t1, t1, ~0x8000);
353 tcg_gen_add_i32(t0, t0, t1);
354 tcg_gen_xor_i32(t0, t0, tmp);
355 dead_tmp(tmp);
356 dead_tmp(t1);
357}
358
9a119ff6
PB
359#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
360
b26eefb6
PB
361/* Set CF to the top bit of var. */
362static void gen_set_CF_bit31(TCGv var)
363{
364 TCGv tmp = new_tmp();
365 tcg_gen_shri_i32(tmp, var, 31);
4cc633c3 366 gen_set_CF(tmp);
b26eefb6
PB
367 dead_tmp(tmp);
368}
369
370/* Set N and Z flags from var. */
371static inline void gen_logic_CC(TCGv var)
372{
6fbe23d5
PB
373 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
374 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
b26eefb6
PB
375}
376
377/* T0 += T1 + CF. */
396e467c 378static void gen_adc(TCGv t0, TCGv t1)
b26eefb6 379{
d9ba4830 380 TCGv tmp;
396e467c 381 tcg_gen_add_i32(t0, t0, t1);
d9ba4830 382 tmp = load_cpu_field(CF);
396e467c 383 tcg_gen_add_i32(t0, t0, tmp);
b26eefb6
PB
384 dead_tmp(tmp);
385}
386
e9bb4aa9
JR
387/* dest = T0 + T1 + CF. */
388static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
389{
390 TCGv tmp;
391 tcg_gen_add_i32(dest, t0, t1);
392 tmp = load_cpu_field(CF);
393 tcg_gen_add_i32(dest, dest, tmp);
394 dead_tmp(tmp);
395}
396
3670669c
PB
397/* dest = T0 - T1 + CF - 1. */
398static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
399{
d9ba4830 400 TCGv tmp;
3670669c 401 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 402 tmp = load_cpu_field(CF);
3670669c
PB
403 tcg_gen_add_i32(dest, dest, tmp);
404 tcg_gen_subi_i32(dest, dest, 1);
405 dead_tmp(tmp);
406}
407
ad69471c
PB
408/* FIXME: Implement this natively. */
409#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
410
9a119ff6 411static void shifter_out_im(TCGv var, int shift)
b26eefb6 412{
9a119ff6
PB
413 TCGv tmp = new_tmp();
414 if (shift == 0) {
415 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 416 } else {
9a119ff6 417 tcg_gen_shri_i32(tmp, var, shift);
4cc633c3 418 if (shift != 31)
9a119ff6
PB
419 tcg_gen_andi_i32(tmp, tmp, 1);
420 }
421 gen_set_CF(tmp);
422 dead_tmp(tmp);
423}
b26eefb6 424
9a119ff6
PB
425/* Shift by immediate. Includes special handling for shift == 0. */
426static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
427{
428 switch (shiftop) {
429 case 0: /* LSL */
430 if (shift != 0) {
431 if (flags)
432 shifter_out_im(var, 32 - shift);
433 tcg_gen_shli_i32(var, var, shift);
434 }
435 break;
436 case 1: /* LSR */
437 if (shift == 0) {
438 if (flags) {
439 tcg_gen_shri_i32(var, var, 31);
440 gen_set_CF(var);
441 }
442 tcg_gen_movi_i32(var, 0);
443 } else {
444 if (flags)
445 shifter_out_im(var, shift - 1);
446 tcg_gen_shri_i32(var, var, shift);
447 }
448 break;
449 case 2: /* ASR */
450 if (shift == 0)
451 shift = 32;
452 if (flags)
453 shifter_out_im(var, shift - 1);
454 if (shift == 32)
455 shift = 31;
456 tcg_gen_sari_i32(var, var, shift);
457 break;
458 case 3: /* ROR/RRX */
459 if (shift != 0) {
460 if (flags)
461 shifter_out_im(var, shift - 1);
f669df27 462 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 463 } else {
d9ba4830 464 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
465 if (flags)
466 shifter_out_im(var, 0);
467 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
468 tcg_gen_shli_i32(tmp, tmp, 31);
469 tcg_gen_or_i32(var, var, tmp);
470 dead_tmp(tmp);
b26eefb6
PB
471 }
472 }
473};
474
8984bd2e
PB
475static inline void gen_arm_shift_reg(TCGv var, int shiftop,
476 TCGv shift, int flags)
477{
478 if (flags) {
479 switch (shiftop) {
480 case 0: gen_helper_shl_cc(var, var, shift); break;
481 case 1: gen_helper_shr_cc(var, var, shift); break;
482 case 2: gen_helper_sar_cc(var, var, shift); break;
483 case 3: gen_helper_ror_cc(var, var, shift); break;
484 }
485 } else {
486 switch (shiftop) {
487 case 0: gen_helper_shl(var, var, shift); break;
488 case 1: gen_helper_shr(var, var, shift); break;
489 case 2: gen_helper_sar(var, var, shift); break;
f669df27
AJ
490 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
491 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
492 }
493 }
494 dead_tmp(shift);
495}
496
6ddbc6e4
PB
497#define PAS_OP(pfx) \
498 switch (op2) { \
499 case 0: gen_pas_helper(glue(pfx,add16)); break; \
500 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
501 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
502 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
503 case 4: gen_pas_helper(glue(pfx,add8)); break; \
504 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
505 }
d9ba4830 506static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 507{
a7812ae4 508 TCGv_ptr tmp;
6ddbc6e4
PB
509
510 switch (op1) {
511#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
512 case 1:
a7812ae4 513 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
514 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
515 PAS_OP(s)
b75263d6 516 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
517 break;
518 case 5:
a7812ae4 519 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
520 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
521 PAS_OP(u)
b75263d6 522 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
523 break;
524#undef gen_pas_helper
525#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
526 case 2:
527 PAS_OP(q);
528 break;
529 case 3:
530 PAS_OP(sh);
531 break;
532 case 6:
533 PAS_OP(uq);
534 break;
535 case 7:
536 PAS_OP(uh);
537 break;
538#undef gen_pas_helper
539 }
540}
9ee6e8bb
PB
541#undef PAS_OP
542
6ddbc6e4
PB
543/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
544#define PAS_OP(pfx) \
545 switch (op2) { \
546 case 0: gen_pas_helper(glue(pfx,add8)); break; \
547 case 1: gen_pas_helper(glue(pfx,add16)); break; \
548 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
549 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
550 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
551 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
552 }
d9ba4830 553static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 554{
a7812ae4 555 TCGv_ptr tmp;
6ddbc6e4
PB
556
557 switch (op1) {
558#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
559 case 0:
a7812ae4 560 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
561 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
562 PAS_OP(s)
b75263d6 563 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
564 break;
565 case 4:
a7812ae4 566 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
567 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
568 PAS_OP(u)
b75263d6 569 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
570 break;
571#undef gen_pas_helper
572#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
573 case 1:
574 PAS_OP(q);
575 break;
576 case 2:
577 PAS_OP(sh);
578 break;
579 case 5:
580 PAS_OP(uq);
581 break;
582 case 6:
583 PAS_OP(uh);
584 break;
585#undef gen_pas_helper
586 }
587}
9ee6e8bb
PB
588#undef PAS_OP
589
d9ba4830
PB
590static void gen_test_cc(int cc, int label)
591{
592 TCGv tmp;
593 TCGv tmp2;
d9ba4830
PB
594 int inv;
595
d9ba4830
PB
596 switch (cc) {
597 case 0: /* eq: Z */
6fbe23d5 598 tmp = load_cpu_field(ZF);
cb63669a 599 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
600 break;
601 case 1: /* ne: !Z */
6fbe23d5 602 tmp = load_cpu_field(ZF);
cb63669a 603 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
604 break;
605 case 2: /* cs: C */
606 tmp = load_cpu_field(CF);
cb63669a 607 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
608 break;
609 case 3: /* cc: !C */
610 tmp = load_cpu_field(CF);
cb63669a 611 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
612 break;
613 case 4: /* mi: N */
6fbe23d5 614 tmp = load_cpu_field(NF);
cb63669a 615 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
616 break;
617 case 5: /* pl: !N */
6fbe23d5 618 tmp = load_cpu_field(NF);
cb63669a 619 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
620 break;
621 case 6: /* vs: V */
622 tmp = load_cpu_field(VF);
cb63669a 623 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
624 break;
625 case 7: /* vc: !V */
626 tmp = load_cpu_field(VF);
cb63669a 627 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
628 break;
629 case 8: /* hi: C && !Z */
630 inv = gen_new_label();
631 tmp = load_cpu_field(CF);
cb63669a 632 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830 633 dead_tmp(tmp);
6fbe23d5 634 tmp = load_cpu_field(ZF);
cb63669a 635 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
636 gen_set_label(inv);
637 break;
638 case 9: /* ls: !C || Z */
639 tmp = load_cpu_field(CF);
cb63669a 640 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830 641 dead_tmp(tmp);
6fbe23d5 642 tmp = load_cpu_field(ZF);
cb63669a 643 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
644 break;
645 case 10: /* ge: N == V -> N ^ V == 0 */
646 tmp = load_cpu_field(VF);
6fbe23d5 647 tmp2 = load_cpu_field(NF);
d9ba4830
PB
648 tcg_gen_xor_i32(tmp, tmp, tmp2);
649 dead_tmp(tmp2);
cb63669a 650 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
651 break;
652 case 11: /* lt: N != V -> N ^ V != 0 */
653 tmp = load_cpu_field(VF);
6fbe23d5 654 tmp2 = load_cpu_field(NF);
d9ba4830
PB
655 tcg_gen_xor_i32(tmp, tmp, tmp2);
656 dead_tmp(tmp2);
cb63669a 657 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
658 break;
659 case 12: /* gt: !Z && N == V */
660 inv = gen_new_label();
6fbe23d5 661 tmp = load_cpu_field(ZF);
cb63669a 662 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830
PB
663 dead_tmp(tmp);
664 tmp = load_cpu_field(VF);
6fbe23d5 665 tmp2 = load_cpu_field(NF);
d9ba4830
PB
666 tcg_gen_xor_i32(tmp, tmp, tmp2);
667 dead_tmp(tmp2);
cb63669a 668 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
669 gen_set_label(inv);
670 break;
671 case 13: /* le: Z || N != V */
6fbe23d5 672 tmp = load_cpu_field(ZF);
cb63669a 673 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
674 dead_tmp(tmp);
675 tmp = load_cpu_field(VF);
6fbe23d5 676 tmp2 = load_cpu_field(NF);
d9ba4830
PB
677 tcg_gen_xor_i32(tmp, tmp, tmp2);
678 dead_tmp(tmp2);
cb63669a 679 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
680 break;
681 default:
682 fprintf(stderr, "Bad condition code 0x%x\n", cc);
683 abort();
684 }
685 dead_tmp(tmp);
686}
2c0262af 687
b1d8e52e 688static const uint8_t table_logic_cc[16] = {
2c0262af
FB
689 1, /* and */
690 1, /* xor */
691 0, /* sub */
692 0, /* rsb */
693 0, /* add */
694 0, /* adc */
695 0, /* sbc */
696 0, /* rsc */
697 1, /* andl */
698 1, /* xorl */
699 0, /* cmp */
700 0, /* cmn */
701 1, /* orr */
702 1, /* mov */
703 1, /* bic */
704 1, /* mvn */
705};
3b46e624 706
d9ba4830
PB
707/* Set PC and Thumb state from an immediate address. */
708static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 709{
b26eefb6 710 TCGv tmp;
99c475ab 711
b26eefb6 712 s->is_jmp = DISAS_UPDATE;
d9ba4830 713 if (s->thumb != (addr & 1)) {
155c3eac 714 tmp = new_tmp();
d9ba4830
PB
715 tcg_gen_movi_i32(tmp, addr & 1);
716 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
155c3eac 717 dead_tmp(tmp);
d9ba4830 718 }
155c3eac 719 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
720}
721
722/* Set PC and Thumb state from var. var is marked as dead. */
723static inline void gen_bx(DisasContext *s, TCGv var)
724{
d9ba4830 725 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
726 tcg_gen_andi_i32(cpu_R[15], var, ~1);
727 tcg_gen_andi_i32(var, var, 1);
728 store_cpu_field(var, thumb);
d9ba4830
PB
729}
730
21aeb343
JR
731/* Variant of store_reg which uses branch&exchange logic when storing
732 to r15 in ARM architecture v7 and above. The source must be a temporary
733 and will be marked as dead. */
734static inline void store_reg_bx(CPUState *env, DisasContext *s,
735 int reg, TCGv var)
736{
737 if (reg == 15 && ENABLE_ARCH_7) {
738 gen_bx(s, var);
739 } else {
740 store_reg(s, reg, var);
741 }
742}
743
b0109805
PB
744static inline TCGv gen_ld8s(TCGv addr, int index)
745{
746 TCGv tmp = new_tmp();
747 tcg_gen_qemu_ld8s(tmp, addr, index);
748 return tmp;
749}
750static inline TCGv gen_ld8u(TCGv addr, int index)
751{
752 TCGv tmp = new_tmp();
753 tcg_gen_qemu_ld8u(tmp, addr, index);
754 return tmp;
755}
756static inline TCGv gen_ld16s(TCGv addr, int index)
757{
758 TCGv tmp = new_tmp();
759 tcg_gen_qemu_ld16s(tmp, addr, index);
760 return tmp;
761}
762static inline TCGv gen_ld16u(TCGv addr, int index)
763{
764 TCGv tmp = new_tmp();
765 tcg_gen_qemu_ld16u(tmp, addr, index);
766 return tmp;
767}
768static inline TCGv gen_ld32(TCGv addr, int index)
769{
770 TCGv tmp = new_tmp();
771 tcg_gen_qemu_ld32u(tmp, addr, index);
772 return tmp;
773}
84496233
JR
774static inline TCGv_i64 gen_ld64(TCGv addr, int index)
775{
776 TCGv_i64 tmp = tcg_temp_new_i64();
777 tcg_gen_qemu_ld64(tmp, addr, index);
778 return tmp;
779}
b0109805
PB
780static inline void gen_st8(TCGv val, TCGv addr, int index)
781{
782 tcg_gen_qemu_st8(val, addr, index);
783 dead_tmp(val);
784}
785static inline void gen_st16(TCGv val, TCGv addr, int index)
786{
787 tcg_gen_qemu_st16(val, addr, index);
788 dead_tmp(val);
789}
790static inline void gen_st32(TCGv val, TCGv addr, int index)
791{
792 tcg_gen_qemu_st32(val, addr, index);
793 dead_tmp(val);
794}
84496233
JR
795static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
796{
797 tcg_gen_qemu_st64(val, addr, index);
798 tcg_temp_free_i64(val);
799}
b5ff1b31 800
5e3f878a
PB
801static inline void gen_set_pc_im(uint32_t val)
802{
155c3eac 803 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
804}
805
b5ff1b31
FB
806/* Force a TB lookup after an instruction that changes the CPU state. */
807static inline void gen_lookup_tb(DisasContext *s)
808{
a6445c52 809 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
810 s->is_jmp = DISAS_UPDATE;
811}
812
b0109805
PB
813static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
814 TCGv var)
2c0262af 815{
1e8d4eec 816 int val, rm, shift, shiftop;
b26eefb6 817 TCGv offset;
2c0262af
FB
818
819 if (!(insn & (1 << 25))) {
820 /* immediate */
821 val = insn & 0xfff;
822 if (!(insn & (1 << 23)))
823 val = -val;
537730b9 824 if (val != 0)
b0109805 825 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
826 } else {
827 /* shift/register */
828 rm = (insn) & 0xf;
829 shift = (insn >> 7) & 0x1f;
1e8d4eec 830 shiftop = (insn >> 5) & 3;
b26eefb6 831 offset = load_reg(s, rm);
9a119ff6 832 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 833 if (!(insn & (1 << 23)))
b0109805 834 tcg_gen_sub_i32(var, var, offset);
2c0262af 835 else
b0109805 836 tcg_gen_add_i32(var, var, offset);
b26eefb6 837 dead_tmp(offset);
2c0262af
FB
838 }
839}
840
191f9a93 841static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 842 int extra, TCGv var)
2c0262af
FB
843{
844 int val, rm;
b26eefb6 845 TCGv offset;
3b46e624 846
2c0262af
FB
847 if (insn & (1 << 22)) {
848 /* immediate */
849 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
850 if (!(insn & (1 << 23)))
851 val = -val;
18acad92 852 val += extra;
537730b9 853 if (val != 0)
b0109805 854 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
855 } else {
856 /* register */
191f9a93 857 if (extra)
b0109805 858 tcg_gen_addi_i32(var, var, extra);
2c0262af 859 rm = (insn) & 0xf;
b26eefb6 860 offset = load_reg(s, rm);
2c0262af 861 if (!(insn & (1 << 23)))
b0109805 862 tcg_gen_sub_i32(var, var, offset);
2c0262af 863 else
b0109805 864 tcg_gen_add_i32(var, var, offset);
b26eefb6 865 dead_tmp(offset);
2c0262af
FB
866 }
867}
868
4373f3ce
PB
869#define VFP_OP2(name) \
870static inline void gen_vfp_##name(int dp) \
871{ \
872 if (dp) \
873 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
874 else \
875 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
b7bcbe95
FB
876}
877
4373f3ce
PB
878VFP_OP2(add)
879VFP_OP2(sub)
880VFP_OP2(mul)
881VFP_OP2(div)
882
883#undef VFP_OP2
884
885static inline void gen_vfp_abs(int dp)
886{
887 if (dp)
888 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
889 else
890 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
891}
892
893static inline void gen_vfp_neg(int dp)
894{
895 if (dp)
896 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
897 else
898 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
899}
900
901static inline void gen_vfp_sqrt(int dp)
902{
903 if (dp)
904 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
905 else
906 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
907}
908
909static inline void gen_vfp_cmp(int dp)
910{
911 if (dp)
912 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
913 else
914 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
915}
916
917static inline void gen_vfp_cmpe(int dp)
918{
919 if (dp)
920 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
921 else
922 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
923}
924
925static inline void gen_vfp_F1_ld0(int dp)
926{
927 if (dp)
5b340b51 928 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 929 else
5b340b51 930 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
931}
932
933static inline void gen_vfp_uito(int dp)
934{
935 if (dp)
936 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
937 else
938 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
939}
940
941static inline void gen_vfp_sito(int dp)
942{
943 if (dp)
66230e0d 944 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
4373f3ce 945 else
66230e0d 946 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
4373f3ce
PB
947}
948
949static inline void gen_vfp_toui(int dp)
950{
951 if (dp)
952 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
953 else
954 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
955}
956
957static inline void gen_vfp_touiz(int dp)
958{
959 if (dp)
960 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
961 else
962 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
963}
964
965static inline void gen_vfp_tosi(int dp)
966{
967 if (dp)
968 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
969 else
970 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
971}
972
973static inline void gen_vfp_tosiz(int dp)
9ee6e8bb
PB
974{
975 if (dp)
4373f3ce 976 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
9ee6e8bb 977 else
4373f3ce
PB
978 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
979}
980
981#define VFP_GEN_FIX(name) \
982static inline void gen_vfp_##name(int dp, int shift) \
983{ \
b75263d6 984 TCGv tmp_shift = tcg_const_i32(shift); \
4373f3ce 985 if (dp) \
b75263d6 986 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
4373f3ce 987 else \
b75263d6
JR
988 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
989 tcg_temp_free_i32(tmp_shift); \
9ee6e8bb 990}
4373f3ce
PB
991VFP_GEN_FIX(tosh)
992VFP_GEN_FIX(tosl)
993VFP_GEN_FIX(touh)
994VFP_GEN_FIX(toul)
995VFP_GEN_FIX(shto)
996VFP_GEN_FIX(slto)
997VFP_GEN_FIX(uhto)
998VFP_GEN_FIX(ulto)
999#undef VFP_GEN_FIX
9ee6e8bb 1000
312eea9f 1001static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1002{
1003 if (dp)
312eea9f 1004 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1005 else
312eea9f 1006 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1007}
1008
312eea9f 1009static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1010{
1011 if (dp)
312eea9f 1012 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1013 else
312eea9f 1014 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1015}
1016
8e96005d
FB
1017static inline long
1018vfp_reg_offset (int dp, int reg)
1019{
1020 if (dp)
1021 return offsetof(CPUARMState, vfp.regs[reg]);
1022 else if (reg & 1) {
1023 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1024 + offsetof(CPU_DoubleU, l.upper);
1025 } else {
1026 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1027 + offsetof(CPU_DoubleU, l.lower);
1028 }
1029}
9ee6e8bb
PB
1030
1031/* Return the offset of a 32-bit piece of a NEON register.
1032 zero is the least significant end of the register. */
1033static inline long
1034neon_reg_offset (int reg, int n)
1035{
1036 int sreg;
1037 sreg = reg * 2 + n;
1038 return vfp_reg_offset(0, sreg);
1039}
1040
8f8e3aa4
PB
1041static TCGv neon_load_reg(int reg, int pass)
1042{
1043 TCGv tmp = new_tmp();
1044 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1045 return tmp;
1046}
1047
1048static void neon_store_reg(int reg, int pass, TCGv var)
1049{
1050 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1051 dead_tmp(var);
1052}
1053
a7812ae4 1054static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1055{
1056 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1057}
1058
a7812ae4 1059static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1060{
1061 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1062}
1063
4373f3ce
PB
1064#define tcg_gen_ld_f32 tcg_gen_ld_i32
1065#define tcg_gen_ld_f64 tcg_gen_ld_i64
1066#define tcg_gen_st_f32 tcg_gen_st_i32
1067#define tcg_gen_st_f64 tcg_gen_st_i64
1068
b7bcbe95
FB
1069static inline void gen_mov_F0_vreg(int dp, int reg)
1070{
1071 if (dp)
4373f3ce 1072 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1073 else
4373f3ce 1074 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1075}
1076
1077static inline void gen_mov_F1_vreg(int dp, int reg)
1078{
1079 if (dp)
4373f3ce 1080 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1081 else
4373f3ce 1082 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1083}
1084
1085static inline void gen_mov_vreg_F0(int dp, int reg)
1086{
1087 if (dp)
4373f3ce 1088 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1089 else
4373f3ce 1090 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1091}
1092
18c9b560
AZ
1093#define ARM_CP_RW_BIT (1 << 20)
1094
a7812ae4 1095static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d
PB
1096{
1097 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1098}
1099
a7812ae4 1100static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d
PB
1101{
1102 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1103}
1104
da6b5335 1105static inline TCGv iwmmxt_load_creg(int reg)
e677137d 1106{
da6b5335
FN
1107 TCGv var = new_tmp();
1108 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1109 return var;
e677137d
PB
1110}
1111
da6b5335 1112static inline void iwmmxt_store_creg(int reg, TCGv var)
e677137d 1113{
da6b5335 1114 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
e677137d
PB
1115}
1116
1117static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1118{
1119 iwmmxt_store_reg(cpu_M0, rn);
1120}
1121
1122static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1123{
1124 iwmmxt_load_reg(cpu_M0, rn);
1125}
1126
1127static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1128{
1129 iwmmxt_load_reg(cpu_V1, rn);
1130 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1131}
1132
1133static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1134{
1135 iwmmxt_load_reg(cpu_V1, rn);
1136 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1137}
1138
1139static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1140{
1141 iwmmxt_load_reg(cpu_V1, rn);
1142 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1143}
1144
1145#define IWMMXT_OP(name) \
1146static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1147{ \
1148 iwmmxt_load_reg(cpu_V1, rn); \
1149 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1150}
1151
1152#define IWMMXT_OP_ENV(name) \
1153static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1154{ \
1155 iwmmxt_load_reg(cpu_V1, rn); \
1156 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1157}
1158
1159#define IWMMXT_OP_ENV_SIZE(name) \
1160IWMMXT_OP_ENV(name##b) \
1161IWMMXT_OP_ENV(name##w) \
1162IWMMXT_OP_ENV(name##l)
1163
1164#define IWMMXT_OP_ENV1(name) \
1165static inline void gen_op_iwmmxt_##name##_M0(void) \
1166{ \
1167 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1168}
1169
1170IWMMXT_OP(maddsq)
1171IWMMXT_OP(madduq)
1172IWMMXT_OP(sadb)
1173IWMMXT_OP(sadw)
1174IWMMXT_OP(mulslw)
1175IWMMXT_OP(mulshw)
1176IWMMXT_OP(mululw)
1177IWMMXT_OP(muluhw)
1178IWMMXT_OP(macsw)
1179IWMMXT_OP(macuw)
1180
1181IWMMXT_OP_ENV_SIZE(unpackl)
1182IWMMXT_OP_ENV_SIZE(unpackh)
1183
1184IWMMXT_OP_ENV1(unpacklub)
1185IWMMXT_OP_ENV1(unpackluw)
1186IWMMXT_OP_ENV1(unpacklul)
1187IWMMXT_OP_ENV1(unpackhub)
1188IWMMXT_OP_ENV1(unpackhuw)
1189IWMMXT_OP_ENV1(unpackhul)
1190IWMMXT_OP_ENV1(unpacklsb)
1191IWMMXT_OP_ENV1(unpacklsw)
1192IWMMXT_OP_ENV1(unpacklsl)
1193IWMMXT_OP_ENV1(unpackhsb)
1194IWMMXT_OP_ENV1(unpackhsw)
1195IWMMXT_OP_ENV1(unpackhsl)
1196
1197IWMMXT_OP_ENV_SIZE(cmpeq)
1198IWMMXT_OP_ENV_SIZE(cmpgtu)
1199IWMMXT_OP_ENV_SIZE(cmpgts)
1200
1201IWMMXT_OP_ENV_SIZE(mins)
1202IWMMXT_OP_ENV_SIZE(minu)
1203IWMMXT_OP_ENV_SIZE(maxs)
1204IWMMXT_OP_ENV_SIZE(maxu)
1205
1206IWMMXT_OP_ENV_SIZE(subn)
1207IWMMXT_OP_ENV_SIZE(addn)
1208IWMMXT_OP_ENV_SIZE(subu)
1209IWMMXT_OP_ENV_SIZE(addu)
1210IWMMXT_OP_ENV_SIZE(subs)
1211IWMMXT_OP_ENV_SIZE(adds)
1212
1213IWMMXT_OP_ENV(avgb0)
1214IWMMXT_OP_ENV(avgb1)
1215IWMMXT_OP_ENV(avgw0)
1216IWMMXT_OP_ENV(avgw1)
1217
1218IWMMXT_OP(msadb)
1219
1220IWMMXT_OP_ENV(packuw)
1221IWMMXT_OP_ENV(packul)
1222IWMMXT_OP_ENV(packuq)
1223IWMMXT_OP_ENV(packsw)
1224IWMMXT_OP_ENV(packsl)
1225IWMMXT_OP_ENV(packsq)
1226
e677137d
PB
1227static void gen_op_iwmmxt_set_mup(void)
1228{
1229 TCGv tmp;
1230 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1231 tcg_gen_ori_i32(tmp, tmp, 2);
1232 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1233}
1234
1235static void gen_op_iwmmxt_set_cup(void)
1236{
1237 TCGv tmp;
1238 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1239 tcg_gen_ori_i32(tmp, tmp, 1);
1240 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1241}
1242
1243static void gen_op_iwmmxt_setpsr_nz(void)
1244{
1245 TCGv tmp = new_tmp();
1246 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1247 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1248}
1249
1250static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1251{
1252 iwmmxt_load_reg(cpu_V1, rn);
86831435 1253 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1254 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1255}
1256
da6b5335 1257static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
18c9b560
AZ
1258{
1259 int rd;
1260 uint32_t offset;
da6b5335 1261 TCGv tmp;
18c9b560
AZ
1262
1263 rd = (insn >> 16) & 0xf;
da6b5335 1264 tmp = load_reg(s, rd);
18c9b560
AZ
1265
1266 offset = (insn & 0xff) << ((insn >> 7) & 2);
1267 if (insn & (1 << 24)) {
1268 /* Pre indexed */
1269 if (insn & (1 << 23))
da6b5335 1270 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1271 else
da6b5335
FN
1272 tcg_gen_addi_i32(tmp, tmp, -offset);
1273 tcg_gen_mov_i32(dest, tmp);
18c9b560 1274 if (insn & (1 << 21))
da6b5335
FN
1275 store_reg(s, rd, tmp);
1276 else
1277 dead_tmp(tmp);
18c9b560
AZ
1278 } else if (insn & (1 << 21)) {
1279 /* Post indexed */
da6b5335 1280 tcg_gen_mov_i32(dest, tmp);
18c9b560 1281 if (insn & (1 << 23))
da6b5335 1282 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1283 else
da6b5335
FN
1284 tcg_gen_addi_i32(tmp, tmp, -offset);
1285 store_reg(s, rd, tmp);
18c9b560
AZ
1286 } else if (!(insn & (1 << 23)))
1287 return 1;
1288 return 0;
1289}
1290
da6b5335 1291static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
18c9b560
AZ
1292{
1293 int rd = (insn >> 0) & 0xf;
da6b5335 1294 TCGv tmp;
18c9b560 1295
da6b5335
FN
1296 if (insn & (1 << 8)) {
1297 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1298 return 1;
da6b5335
FN
1299 } else {
1300 tmp = iwmmxt_load_creg(rd);
1301 }
1302 } else {
1303 tmp = new_tmp();
1304 iwmmxt_load_reg(cpu_V0, rd);
1305 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1306 }
1307 tcg_gen_andi_i32(tmp, tmp, mask);
1308 tcg_gen_mov_i32(dest, tmp);
1309 dead_tmp(tmp);
18c9b560
AZ
1310 return 0;
1311}
1312
1313/* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1314 (ie. an undefined instruction). */
1315static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1316{
1317 int rd, wrd;
1318 int rdhi, rdlo, rd0, rd1, i;
da6b5335
FN
1319 TCGv addr;
1320 TCGv tmp, tmp2, tmp3;
18c9b560
AZ
1321
1322 if ((insn & 0x0e000e00) == 0x0c000000) {
1323 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1324 wrd = insn & 0xf;
1325 rdlo = (insn >> 12) & 0xf;
1326 rdhi = (insn >> 16) & 0xf;
1327 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1328 iwmmxt_load_reg(cpu_V0, wrd);
1329 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1330 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1331 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1332 } else { /* TMCRR */
da6b5335
FN
1333 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1334 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1335 gen_op_iwmmxt_set_mup();
1336 }
1337 return 0;
1338 }
1339
1340 wrd = (insn >> 12) & 0xf;
da6b5335
FN
1341 addr = new_tmp();
1342 if (gen_iwmmxt_address(s, insn, addr)) {
1343 dead_tmp(addr);
18c9b560 1344 return 1;
da6b5335 1345 }
18c9b560
AZ
1346 if (insn & ARM_CP_RW_BIT) {
1347 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
da6b5335
FN
1348 tmp = new_tmp();
1349 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1350 iwmmxt_store_creg(wrd, tmp);
18c9b560 1351 } else {
e677137d
PB
1352 i = 1;
1353 if (insn & (1 << 8)) {
1354 if (insn & (1 << 22)) { /* WLDRD */
da6b5335 1355 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1356 i = 0;
1357 } else { /* WLDRW wRd */
da6b5335 1358 tmp = gen_ld32(addr, IS_USER(s));
e677137d
PB
1359 }
1360 } else {
1361 if (insn & (1 << 22)) { /* WLDRH */
da6b5335 1362 tmp = gen_ld16u(addr, IS_USER(s));
e677137d 1363 } else { /* WLDRB */
da6b5335 1364 tmp = gen_ld8u(addr, IS_USER(s));
e677137d
PB
1365 }
1366 }
1367 if (i) {
1368 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1369 dead_tmp(tmp);
1370 }
18c9b560
AZ
1371 gen_op_iwmmxt_movq_wRn_M0(wrd);
1372 }
1373 } else {
1374 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335
FN
1375 tmp = iwmmxt_load_creg(wrd);
1376 gen_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1377 } else {
1378 gen_op_iwmmxt_movq_M0_wRn(wrd);
e677137d
PB
1379 tmp = new_tmp();
1380 if (insn & (1 << 8)) {
1381 if (insn & (1 << 22)) { /* WSTRD */
1382 dead_tmp(tmp);
da6b5335 1383 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1384 } else { /* WSTRW wRd */
1385 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1386 gen_st32(tmp, addr, IS_USER(s));
e677137d
PB
1387 }
1388 } else {
1389 if (insn & (1 << 22)) { /* WSTRH */
1390 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1391 gen_st16(tmp, addr, IS_USER(s));
e677137d
PB
1392 } else { /* WSTRB */
1393 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1394 gen_st8(tmp, addr, IS_USER(s));
e677137d
PB
1395 }
1396 }
18c9b560
AZ
1397 }
1398 }
1399 return 0;
1400 }
1401
1402 if ((insn & 0x0f000000) != 0x0e000000)
1403 return 1;
1404
1405 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1406 case 0x000: /* WOR */
1407 wrd = (insn >> 12) & 0xf;
1408 rd0 = (insn >> 0) & 0xf;
1409 rd1 = (insn >> 16) & 0xf;
1410 gen_op_iwmmxt_movq_M0_wRn(rd0);
1411 gen_op_iwmmxt_orq_M0_wRn(rd1);
1412 gen_op_iwmmxt_setpsr_nz();
1413 gen_op_iwmmxt_movq_wRn_M0(wrd);
1414 gen_op_iwmmxt_set_mup();
1415 gen_op_iwmmxt_set_cup();
1416 break;
1417 case 0x011: /* TMCR */
1418 if (insn & 0xf)
1419 return 1;
1420 rd = (insn >> 12) & 0xf;
1421 wrd = (insn >> 16) & 0xf;
1422 switch (wrd) {
1423 case ARM_IWMMXT_wCID:
1424 case ARM_IWMMXT_wCASF:
1425 break;
1426 case ARM_IWMMXT_wCon:
1427 gen_op_iwmmxt_set_cup();
1428 /* Fall through. */
1429 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1430 tmp = iwmmxt_load_creg(wrd);
1431 tmp2 = load_reg(s, rd);
f669df27 1432 tcg_gen_andc_i32(tmp, tmp, tmp2);
da6b5335
FN
1433 dead_tmp(tmp2);
1434 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1435 break;
1436 case ARM_IWMMXT_wCGR0:
1437 case ARM_IWMMXT_wCGR1:
1438 case ARM_IWMMXT_wCGR2:
1439 case ARM_IWMMXT_wCGR3:
1440 gen_op_iwmmxt_set_cup();
da6b5335
FN
1441 tmp = load_reg(s, rd);
1442 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1443 break;
1444 default:
1445 return 1;
1446 }
1447 break;
1448 case 0x100: /* WXOR */
1449 wrd = (insn >> 12) & 0xf;
1450 rd0 = (insn >> 0) & 0xf;
1451 rd1 = (insn >> 16) & 0xf;
1452 gen_op_iwmmxt_movq_M0_wRn(rd0);
1453 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1454 gen_op_iwmmxt_setpsr_nz();
1455 gen_op_iwmmxt_movq_wRn_M0(wrd);
1456 gen_op_iwmmxt_set_mup();
1457 gen_op_iwmmxt_set_cup();
1458 break;
1459 case 0x111: /* TMRC */
1460 if (insn & 0xf)
1461 return 1;
1462 rd = (insn >> 12) & 0xf;
1463 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1464 tmp = iwmmxt_load_creg(wrd);
1465 store_reg(s, rd, tmp);
18c9b560
AZ
1466 break;
1467 case 0x300: /* WANDN */
1468 wrd = (insn >> 12) & 0xf;
1469 rd0 = (insn >> 0) & 0xf;
1470 rd1 = (insn >> 16) & 0xf;
1471 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1472 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1473 gen_op_iwmmxt_andq_M0_wRn(rd1);
1474 gen_op_iwmmxt_setpsr_nz();
1475 gen_op_iwmmxt_movq_wRn_M0(wrd);
1476 gen_op_iwmmxt_set_mup();
1477 gen_op_iwmmxt_set_cup();
1478 break;
1479 case 0x200: /* WAND */
1480 wrd = (insn >> 12) & 0xf;
1481 rd0 = (insn >> 0) & 0xf;
1482 rd1 = (insn >> 16) & 0xf;
1483 gen_op_iwmmxt_movq_M0_wRn(rd0);
1484 gen_op_iwmmxt_andq_M0_wRn(rd1);
1485 gen_op_iwmmxt_setpsr_nz();
1486 gen_op_iwmmxt_movq_wRn_M0(wrd);
1487 gen_op_iwmmxt_set_mup();
1488 gen_op_iwmmxt_set_cup();
1489 break;
1490 case 0x810: case 0xa10: /* WMADD */
1491 wrd = (insn >> 12) & 0xf;
1492 rd0 = (insn >> 0) & 0xf;
1493 rd1 = (insn >> 16) & 0xf;
1494 gen_op_iwmmxt_movq_M0_wRn(rd0);
1495 if (insn & (1 << 21))
1496 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1497 else
1498 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1499 gen_op_iwmmxt_movq_wRn_M0(wrd);
1500 gen_op_iwmmxt_set_mup();
1501 break;
1502 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1503 wrd = (insn >> 12) & 0xf;
1504 rd0 = (insn >> 16) & 0xf;
1505 rd1 = (insn >> 0) & 0xf;
1506 gen_op_iwmmxt_movq_M0_wRn(rd0);
1507 switch ((insn >> 22) & 3) {
1508 case 0:
1509 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1510 break;
1511 case 1:
1512 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1513 break;
1514 case 2:
1515 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1516 break;
1517 case 3:
1518 return 1;
1519 }
1520 gen_op_iwmmxt_movq_wRn_M0(wrd);
1521 gen_op_iwmmxt_set_mup();
1522 gen_op_iwmmxt_set_cup();
1523 break;
1524 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1525 wrd = (insn >> 12) & 0xf;
1526 rd0 = (insn >> 16) & 0xf;
1527 rd1 = (insn >> 0) & 0xf;
1528 gen_op_iwmmxt_movq_M0_wRn(rd0);
1529 switch ((insn >> 22) & 3) {
1530 case 0:
1531 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1532 break;
1533 case 1:
1534 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1535 break;
1536 case 2:
1537 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1538 break;
1539 case 3:
1540 return 1;
1541 }
1542 gen_op_iwmmxt_movq_wRn_M0(wrd);
1543 gen_op_iwmmxt_set_mup();
1544 gen_op_iwmmxt_set_cup();
1545 break;
1546 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1547 wrd = (insn >> 12) & 0xf;
1548 rd0 = (insn >> 16) & 0xf;
1549 rd1 = (insn >> 0) & 0xf;
1550 gen_op_iwmmxt_movq_M0_wRn(rd0);
1551 if (insn & (1 << 22))
1552 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1553 else
1554 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1555 if (!(insn & (1 << 20)))
1556 gen_op_iwmmxt_addl_M0_wRn(wrd);
1557 gen_op_iwmmxt_movq_wRn_M0(wrd);
1558 gen_op_iwmmxt_set_mup();
1559 break;
1560 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1561 wrd = (insn >> 12) & 0xf;
1562 rd0 = (insn >> 16) & 0xf;
1563 rd1 = (insn >> 0) & 0xf;
1564 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1565 if (insn & (1 << 21)) {
1566 if (insn & (1 << 20))
1567 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1568 else
1569 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1570 } else {
1571 if (insn & (1 << 20))
1572 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1573 else
1574 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1575 }
18c9b560
AZ
1576 gen_op_iwmmxt_movq_wRn_M0(wrd);
1577 gen_op_iwmmxt_set_mup();
1578 break;
1579 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1580 wrd = (insn >> 12) & 0xf;
1581 rd0 = (insn >> 16) & 0xf;
1582 rd1 = (insn >> 0) & 0xf;
1583 gen_op_iwmmxt_movq_M0_wRn(rd0);
1584 if (insn & (1 << 21))
1585 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1586 else
1587 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1588 if (!(insn & (1 << 20))) {
e677137d
PB
1589 iwmmxt_load_reg(cpu_V1, wrd);
1590 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1591 }
1592 gen_op_iwmmxt_movq_wRn_M0(wrd);
1593 gen_op_iwmmxt_set_mup();
1594 break;
1595 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1596 wrd = (insn >> 12) & 0xf;
1597 rd0 = (insn >> 16) & 0xf;
1598 rd1 = (insn >> 0) & 0xf;
1599 gen_op_iwmmxt_movq_M0_wRn(rd0);
1600 switch ((insn >> 22) & 3) {
1601 case 0:
1602 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1603 break;
1604 case 1:
1605 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1606 break;
1607 case 2:
1608 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1609 break;
1610 case 3:
1611 return 1;
1612 }
1613 gen_op_iwmmxt_movq_wRn_M0(wrd);
1614 gen_op_iwmmxt_set_mup();
1615 gen_op_iwmmxt_set_cup();
1616 break;
1617 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1618 wrd = (insn >> 12) & 0xf;
1619 rd0 = (insn >> 16) & 0xf;
1620 rd1 = (insn >> 0) & 0xf;
1621 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1622 if (insn & (1 << 22)) {
1623 if (insn & (1 << 20))
1624 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1625 else
1626 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1627 } else {
1628 if (insn & (1 << 20))
1629 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1630 else
1631 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1632 }
18c9b560
AZ
1633 gen_op_iwmmxt_movq_wRn_M0(wrd);
1634 gen_op_iwmmxt_set_mup();
1635 gen_op_iwmmxt_set_cup();
1636 break;
1637 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1638 wrd = (insn >> 12) & 0xf;
1639 rd0 = (insn >> 16) & 0xf;
1640 rd1 = (insn >> 0) & 0xf;
1641 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1642 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1643 tcg_gen_andi_i32(tmp, tmp, 7);
1644 iwmmxt_load_reg(cpu_V1, rd1);
1645 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1646 dead_tmp(tmp);
18c9b560
AZ
1647 gen_op_iwmmxt_movq_wRn_M0(wrd);
1648 gen_op_iwmmxt_set_mup();
1649 break;
1650 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1651 if (((insn >> 6) & 3) == 3)
1652 return 1;
18c9b560
AZ
1653 rd = (insn >> 12) & 0xf;
1654 wrd = (insn >> 16) & 0xf;
da6b5335 1655 tmp = load_reg(s, rd);
18c9b560
AZ
1656 gen_op_iwmmxt_movq_M0_wRn(wrd);
1657 switch ((insn >> 6) & 3) {
1658 case 0:
da6b5335
FN
1659 tmp2 = tcg_const_i32(0xff);
1660 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1661 break;
1662 case 1:
da6b5335
FN
1663 tmp2 = tcg_const_i32(0xffff);
1664 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1665 break;
1666 case 2:
da6b5335
FN
1667 tmp2 = tcg_const_i32(0xffffffff);
1668 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1669 break;
da6b5335
FN
1670 default:
1671 TCGV_UNUSED(tmp2);
1672 TCGV_UNUSED(tmp3);
18c9b560 1673 }
da6b5335
FN
1674 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1675 tcg_temp_free(tmp3);
1676 tcg_temp_free(tmp2);
1677 dead_tmp(tmp);
18c9b560
AZ
1678 gen_op_iwmmxt_movq_wRn_M0(wrd);
1679 gen_op_iwmmxt_set_mup();
1680 break;
1681 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1682 rd = (insn >> 12) & 0xf;
1683 wrd = (insn >> 16) & 0xf;
da6b5335 1684 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1685 return 1;
1686 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335 1687 tmp = new_tmp();
18c9b560
AZ
1688 switch ((insn >> 22) & 3) {
1689 case 0:
da6b5335
FN
1690 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1691 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1692 if (insn & 8) {
1693 tcg_gen_ext8s_i32(tmp, tmp);
1694 } else {
1695 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1696 }
1697 break;
1698 case 1:
da6b5335
FN
1699 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1700 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1701 if (insn & 8) {
1702 tcg_gen_ext16s_i32(tmp, tmp);
1703 } else {
1704 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1705 }
1706 break;
1707 case 2:
da6b5335
FN
1708 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1709 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1710 break;
18c9b560 1711 }
da6b5335 1712 store_reg(s, rd, tmp);
18c9b560
AZ
1713 break;
1714 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1715 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1716 return 1;
da6b5335 1717 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1718 switch ((insn >> 22) & 3) {
1719 case 0:
da6b5335 1720 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1721 break;
1722 case 1:
da6b5335 1723 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1724 break;
1725 case 2:
da6b5335 1726 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1727 break;
18c9b560 1728 }
da6b5335
FN
1729 tcg_gen_shli_i32(tmp, tmp, 28);
1730 gen_set_nzcv(tmp);
1731 dead_tmp(tmp);
18c9b560
AZ
1732 break;
1733 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1734 if (((insn >> 6) & 3) == 3)
1735 return 1;
18c9b560
AZ
1736 rd = (insn >> 12) & 0xf;
1737 wrd = (insn >> 16) & 0xf;
da6b5335 1738 tmp = load_reg(s, rd);
18c9b560
AZ
1739 switch ((insn >> 6) & 3) {
1740 case 0:
da6b5335 1741 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1742 break;
1743 case 1:
da6b5335 1744 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1745 break;
1746 case 2:
da6b5335 1747 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1748 break;
18c9b560 1749 }
da6b5335 1750 dead_tmp(tmp);
18c9b560
AZ
1751 gen_op_iwmmxt_movq_wRn_M0(wrd);
1752 gen_op_iwmmxt_set_mup();
1753 break;
1754 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1755 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1756 return 1;
da6b5335
FN
1757 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1758 tmp2 = new_tmp();
1759 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1760 switch ((insn >> 22) & 3) {
1761 case 0:
1762 for (i = 0; i < 7; i ++) {
da6b5335
FN
1763 tcg_gen_shli_i32(tmp2, tmp2, 4);
1764 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1765 }
1766 break;
1767 case 1:
1768 for (i = 0; i < 3; i ++) {
da6b5335
FN
1769 tcg_gen_shli_i32(tmp2, tmp2, 8);
1770 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1771 }
1772 break;
1773 case 2:
da6b5335
FN
1774 tcg_gen_shli_i32(tmp2, tmp2, 16);
1775 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1776 break;
18c9b560 1777 }
da6b5335
FN
1778 gen_set_nzcv(tmp);
1779 dead_tmp(tmp2);
1780 dead_tmp(tmp);
18c9b560
AZ
1781 break;
1782 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1783 wrd = (insn >> 12) & 0xf;
1784 rd0 = (insn >> 16) & 0xf;
1785 gen_op_iwmmxt_movq_M0_wRn(rd0);
1786 switch ((insn >> 22) & 3) {
1787 case 0:
e677137d 1788 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1789 break;
1790 case 1:
e677137d 1791 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1792 break;
1793 case 2:
e677137d 1794 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1795 break;
1796 case 3:
1797 return 1;
1798 }
1799 gen_op_iwmmxt_movq_wRn_M0(wrd);
1800 gen_op_iwmmxt_set_mup();
1801 break;
1802 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1803 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1804 return 1;
da6b5335
FN
1805 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1806 tmp2 = new_tmp();
1807 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1808 switch ((insn >> 22) & 3) {
1809 case 0:
1810 for (i = 0; i < 7; i ++) {
da6b5335
FN
1811 tcg_gen_shli_i32(tmp2, tmp2, 4);
1812 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1813 }
1814 break;
1815 case 1:
1816 for (i = 0; i < 3; i ++) {
da6b5335
FN
1817 tcg_gen_shli_i32(tmp2, tmp2, 8);
1818 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1819 }
1820 break;
1821 case 2:
da6b5335
FN
1822 tcg_gen_shli_i32(tmp2, tmp2, 16);
1823 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1824 break;
18c9b560 1825 }
da6b5335
FN
1826 gen_set_nzcv(tmp);
1827 dead_tmp(tmp2);
1828 dead_tmp(tmp);
18c9b560
AZ
1829 break;
1830 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1831 rd = (insn >> 12) & 0xf;
1832 rd0 = (insn >> 16) & 0xf;
da6b5335 1833 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1834 return 1;
1835 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 1836 tmp = new_tmp();
18c9b560
AZ
1837 switch ((insn >> 22) & 3) {
1838 case 0:
da6b5335 1839 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1840 break;
1841 case 1:
da6b5335 1842 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1843 break;
1844 case 2:
da6b5335 1845 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1846 break;
18c9b560 1847 }
da6b5335 1848 store_reg(s, rd, tmp);
18c9b560
AZ
1849 break;
1850 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1851 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1852 wrd = (insn >> 12) & 0xf;
1853 rd0 = (insn >> 16) & 0xf;
1854 rd1 = (insn >> 0) & 0xf;
1855 gen_op_iwmmxt_movq_M0_wRn(rd0);
1856 switch ((insn >> 22) & 3) {
1857 case 0:
1858 if (insn & (1 << 21))
1859 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1860 else
1861 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1862 break;
1863 case 1:
1864 if (insn & (1 << 21))
1865 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1866 else
1867 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1868 break;
1869 case 2:
1870 if (insn & (1 << 21))
1871 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1872 else
1873 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1874 break;
1875 case 3:
1876 return 1;
1877 }
1878 gen_op_iwmmxt_movq_wRn_M0(wrd);
1879 gen_op_iwmmxt_set_mup();
1880 gen_op_iwmmxt_set_cup();
1881 break;
1882 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1883 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1884 wrd = (insn >> 12) & 0xf;
1885 rd0 = (insn >> 16) & 0xf;
1886 gen_op_iwmmxt_movq_M0_wRn(rd0);
1887 switch ((insn >> 22) & 3) {
1888 case 0:
1889 if (insn & (1 << 21))
1890 gen_op_iwmmxt_unpacklsb_M0();
1891 else
1892 gen_op_iwmmxt_unpacklub_M0();
1893 break;
1894 case 1:
1895 if (insn & (1 << 21))
1896 gen_op_iwmmxt_unpacklsw_M0();
1897 else
1898 gen_op_iwmmxt_unpackluw_M0();
1899 break;
1900 case 2:
1901 if (insn & (1 << 21))
1902 gen_op_iwmmxt_unpacklsl_M0();
1903 else
1904 gen_op_iwmmxt_unpacklul_M0();
1905 break;
1906 case 3:
1907 return 1;
1908 }
1909 gen_op_iwmmxt_movq_wRn_M0(wrd);
1910 gen_op_iwmmxt_set_mup();
1911 gen_op_iwmmxt_set_cup();
1912 break;
1913 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1914 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1915 wrd = (insn >> 12) & 0xf;
1916 rd0 = (insn >> 16) & 0xf;
1917 gen_op_iwmmxt_movq_M0_wRn(rd0);
1918 switch ((insn >> 22) & 3) {
1919 case 0:
1920 if (insn & (1 << 21))
1921 gen_op_iwmmxt_unpackhsb_M0();
1922 else
1923 gen_op_iwmmxt_unpackhub_M0();
1924 break;
1925 case 1:
1926 if (insn & (1 << 21))
1927 gen_op_iwmmxt_unpackhsw_M0();
1928 else
1929 gen_op_iwmmxt_unpackhuw_M0();
1930 break;
1931 case 2:
1932 if (insn & (1 << 21))
1933 gen_op_iwmmxt_unpackhsl_M0();
1934 else
1935 gen_op_iwmmxt_unpackhul_M0();
1936 break;
1937 case 3:
1938 return 1;
1939 }
1940 gen_op_iwmmxt_movq_wRn_M0(wrd);
1941 gen_op_iwmmxt_set_mup();
1942 gen_op_iwmmxt_set_cup();
1943 break;
1944 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1945 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
1946 if (((insn >> 22) & 3) == 0)
1947 return 1;
18c9b560
AZ
1948 wrd = (insn >> 12) & 0xf;
1949 rd0 = (insn >> 16) & 0xf;
1950 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1951 tmp = new_tmp();
1952 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
1953 dead_tmp(tmp);
18c9b560 1954 return 1;
da6b5335 1955 }
18c9b560 1956 switch ((insn >> 22) & 3) {
18c9b560 1957 case 1:
da6b5335 1958 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1959 break;
1960 case 2:
da6b5335 1961 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1962 break;
1963 case 3:
da6b5335 1964 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1965 break;
1966 }
da6b5335 1967 dead_tmp(tmp);
18c9b560
AZ
1968 gen_op_iwmmxt_movq_wRn_M0(wrd);
1969 gen_op_iwmmxt_set_mup();
1970 gen_op_iwmmxt_set_cup();
1971 break;
1972 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1973 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
1974 if (((insn >> 22) & 3) == 0)
1975 return 1;
18c9b560
AZ
1976 wrd = (insn >> 12) & 0xf;
1977 rd0 = (insn >> 16) & 0xf;
1978 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1979 tmp = new_tmp();
1980 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
1981 dead_tmp(tmp);
18c9b560 1982 return 1;
da6b5335 1983 }
18c9b560 1984 switch ((insn >> 22) & 3) {
18c9b560 1985 case 1:
da6b5335 1986 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1987 break;
1988 case 2:
da6b5335 1989 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1990 break;
1991 case 3:
da6b5335 1992 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1993 break;
1994 }
da6b5335 1995 dead_tmp(tmp);
18c9b560
AZ
1996 gen_op_iwmmxt_movq_wRn_M0(wrd);
1997 gen_op_iwmmxt_set_mup();
1998 gen_op_iwmmxt_set_cup();
1999 break;
2000 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2001 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2002 if (((insn >> 22) & 3) == 0)
2003 return 1;
18c9b560
AZ
2004 wrd = (insn >> 12) & 0xf;
2005 rd0 = (insn >> 16) & 0xf;
2006 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2007 tmp = new_tmp();
2008 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2009 dead_tmp(tmp);
18c9b560 2010 return 1;
da6b5335 2011 }
18c9b560 2012 switch ((insn >> 22) & 3) {
18c9b560 2013 case 1:
da6b5335 2014 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2015 break;
2016 case 2:
da6b5335 2017 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2018 break;
2019 case 3:
da6b5335 2020 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2021 break;
2022 }
da6b5335 2023 dead_tmp(tmp);
18c9b560
AZ
2024 gen_op_iwmmxt_movq_wRn_M0(wrd);
2025 gen_op_iwmmxt_set_mup();
2026 gen_op_iwmmxt_set_cup();
2027 break;
2028 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2029 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2030 if (((insn >> 22) & 3) == 0)
2031 return 1;
18c9b560
AZ
2032 wrd = (insn >> 12) & 0xf;
2033 rd0 = (insn >> 16) & 0xf;
2034 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2035 tmp = new_tmp();
18c9b560 2036 switch ((insn >> 22) & 3) {
18c9b560 2037 case 1:
da6b5335
FN
2038 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2039 dead_tmp(tmp);
18c9b560 2040 return 1;
da6b5335
FN
2041 }
2042 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2043 break;
2044 case 2:
da6b5335
FN
2045 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2046 dead_tmp(tmp);
18c9b560 2047 return 1;
da6b5335
FN
2048 }
2049 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2050 break;
2051 case 3:
da6b5335
FN
2052 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2053 dead_tmp(tmp);
18c9b560 2054 return 1;
da6b5335
FN
2055 }
2056 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2057 break;
2058 }
da6b5335 2059 dead_tmp(tmp);
18c9b560
AZ
2060 gen_op_iwmmxt_movq_wRn_M0(wrd);
2061 gen_op_iwmmxt_set_mup();
2062 gen_op_iwmmxt_set_cup();
2063 break;
2064 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2065 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2066 wrd = (insn >> 12) & 0xf;
2067 rd0 = (insn >> 16) & 0xf;
2068 rd1 = (insn >> 0) & 0xf;
2069 gen_op_iwmmxt_movq_M0_wRn(rd0);
2070 switch ((insn >> 22) & 3) {
2071 case 0:
2072 if (insn & (1 << 21))
2073 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2074 else
2075 gen_op_iwmmxt_minub_M0_wRn(rd1);
2076 break;
2077 case 1:
2078 if (insn & (1 << 21))
2079 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2080 else
2081 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2082 break;
2083 case 2:
2084 if (insn & (1 << 21))
2085 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2086 else
2087 gen_op_iwmmxt_minul_M0_wRn(rd1);
2088 break;
2089 case 3:
2090 return 1;
2091 }
2092 gen_op_iwmmxt_movq_wRn_M0(wrd);
2093 gen_op_iwmmxt_set_mup();
2094 break;
2095 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2096 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2097 wrd = (insn >> 12) & 0xf;
2098 rd0 = (insn >> 16) & 0xf;
2099 rd1 = (insn >> 0) & 0xf;
2100 gen_op_iwmmxt_movq_M0_wRn(rd0);
2101 switch ((insn >> 22) & 3) {
2102 case 0:
2103 if (insn & (1 << 21))
2104 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2105 else
2106 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2107 break;
2108 case 1:
2109 if (insn & (1 << 21))
2110 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2111 else
2112 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2113 break;
2114 case 2:
2115 if (insn & (1 << 21))
2116 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2117 else
2118 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2119 break;
2120 case 3:
2121 return 1;
2122 }
2123 gen_op_iwmmxt_movq_wRn_M0(wrd);
2124 gen_op_iwmmxt_set_mup();
2125 break;
2126 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2127 case 0x402: case 0x502: case 0x602: case 0x702:
2128 wrd = (insn >> 12) & 0xf;
2129 rd0 = (insn >> 16) & 0xf;
2130 rd1 = (insn >> 0) & 0xf;
2131 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2132 tmp = tcg_const_i32((insn >> 20) & 3);
2133 iwmmxt_load_reg(cpu_V1, rd1);
2134 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2135 tcg_temp_free(tmp);
18c9b560
AZ
2136 gen_op_iwmmxt_movq_wRn_M0(wrd);
2137 gen_op_iwmmxt_set_mup();
2138 break;
2139 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2140 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2141 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2142 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2143 wrd = (insn >> 12) & 0xf;
2144 rd0 = (insn >> 16) & 0xf;
2145 rd1 = (insn >> 0) & 0xf;
2146 gen_op_iwmmxt_movq_M0_wRn(rd0);
2147 switch ((insn >> 20) & 0xf) {
2148 case 0x0:
2149 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2150 break;
2151 case 0x1:
2152 gen_op_iwmmxt_subub_M0_wRn(rd1);
2153 break;
2154 case 0x3:
2155 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2156 break;
2157 case 0x4:
2158 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2159 break;
2160 case 0x5:
2161 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2162 break;
2163 case 0x7:
2164 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2165 break;
2166 case 0x8:
2167 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2168 break;
2169 case 0x9:
2170 gen_op_iwmmxt_subul_M0_wRn(rd1);
2171 break;
2172 case 0xb:
2173 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2174 break;
2175 default:
2176 return 1;
2177 }
2178 gen_op_iwmmxt_movq_wRn_M0(wrd);
2179 gen_op_iwmmxt_set_mup();
2180 gen_op_iwmmxt_set_cup();
2181 break;
2182 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2183 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2184 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2185 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2186 wrd = (insn >> 12) & 0xf;
2187 rd0 = (insn >> 16) & 0xf;
2188 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2189 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2190 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2191 tcg_temp_free(tmp);
18c9b560
AZ
2192 gen_op_iwmmxt_movq_wRn_M0(wrd);
2193 gen_op_iwmmxt_set_mup();
2194 gen_op_iwmmxt_set_cup();
2195 break;
2196 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2197 case 0x418: case 0x518: case 0x618: case 0x718:
2198 case 0x818: case 0x918: case 0xa18: case 0xb18:
2199 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2200 wrd = (insn >> 12) & 0xf;
2201 rd0 = (insn >> 16) & 0xf;
2202 rd1 = (insn >> 0) & 0xf;
2203 gen_op_iwmmxt_movq_M0_wRn(rd0);
2204 switch ((insn >> 20) & 0xf) {
2205 case 0x0:
2206 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2207 break;
2208 case 0x1:
2209 gen_op_iwmmxt_addub_M0_wRn(rd1);
2210 break;
2211 case 0x3:
2212 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2213 break;
2214 case 0x4:
2215 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2216 break;
2217 case 0x5:
2218 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2219 break;
2220 case 0x7:
2221 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2222 break;
2223 case 0x8:
2224 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2225 break;
2226 case 0x9:
2227 gen_op_iwmmxt_addul_M0_wRn(rd1);
2228 break;
2229 case 0xb:
2230 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2231 break;
2232 default:
2233 return 1;
2234 }
2235 gen_op_iwmmxt_movq_wRn_M0(wrd);
2236 gen_op_iwmmxt_set_mup();
2237 gen_op_iwmmxt_set_cup();
2238 break;
2239 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2240 case 0x408: case 0x508: case 0x608: case 0x708:
2241 case 0x808: case 0x908: case 0xa08: case 0xb08:
2242 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2243 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2244 return 1;
18c9b560
AZ
2245 wrd = (insn >> 12) & 0xf;
2246 rd0 = (insn >> 16) & 0xf;
2247 rd1 = (insn >> 0) & 0xf;
2248 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2249 switch ((insn >> 22) & 3) {
18c9b560
AZ
2250 case 1:
2251 if (insn & (1 << 21))
2252 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2253 else
2254 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2255 break;
2256 case 2:
2257 if (insn & (1 << 21))
2258 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2259 else
2260 gen_op_iwmmxt_packul_M0_wRn(rd1);
2261 break;
2262 case 3:
2263 if (insn & (1 << 21))
2264 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2265 else
2266 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2267 break;
2268 }
2269 gen_op_iwmmxt_movq_wRn_M0(wrd);
2270 gen_op_iwmmxt_set_mup();
2271 gen_op_iwmmxt_set_cup();
2272 break;
2273 case 0x201: case 0x203: case 0x205: case 0x207:
2274 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2275 case 0x211: case 0x213: case 0x215: case 0x217:
2276 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2277 wrd = (insn >> 5) & 0xf;
2278 rd0 = (insn >> 12) & 0xf;
2279 rd1 = (insn >> 0) & 0xf;
2280 if (rd0 == 0xf || rd1 == 0xf)
2281 return 1;
2282 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2283 tmp = load_reg(s, rd0);
2284 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2285 switch ((insn >> 16) & 0xf) {
2286 case 0x0: /* TMIA */
da6b5335 2287 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2288 break;
2289 case 0x8: /* TMIAPH */
da6b5335 2290 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2291 break;
2292 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2293 if (insn & (1 << 16))
da6b5335 2294 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2295 if (insn & (1 << 17))
da6b5335
FN
2296 tcg_gen_shri_i32(tmp2, tmp2, 16);
2297 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2298 break;
2299 default:
da6b5335
FN
2300 dead_tmp(tmp2);
2301 dead_tmp(tmp);
18c9b560
AZ
2302 return 1;
2303 }
da6b5335
FN
2304 dead_tmp(tmp2);
2305 dead_tmp(tmp);
18c9b560
AZ
2306 gen_op_iwmmxt_movq_wRn_M0(wrd);
2307 gen_op_iwmmxt_set_mup();
2308 break;
2309 default:
2310 return 1;
2311 }
2312
2313 return 0;
2314}
2315
2316/* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2317 (ie. an undefined instruction). */
2318static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2319{
2320 int acc, rd0, rd1, rdhi, rdlo;
3a554c0f 2321 TCGv tmp, tmp2;
18c9b560
AZ
2322
2323 if ((insn & 0x0ff00f10) == 0x0e200010) {
2324 /* Multiply with Internal Accumulate Format */
2325 rd0 = (insn >> 12) & 0xf;
2326 rd1 = insn & 0xf;
2327 acc = (insn >> 5) & 7;
2328
2329 if (acc != 0)
2330 return 1;
2331
3a554c0f
FN
2332 tmp = load_reg(s, rd0);
2333 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2334 switch ((insn >> 16) & 0xf) {
2335 case 0x0: /* MIA */
3a554c0f 2336 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2337 break;
2338 case 0x8: /* MIAPH */
3a554c0f 2339 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2340 break;
2341 case 0xc: /* MIABB */
2342 case 0xd: /* MIABT */
2343 case 0xe: /* MIATB */
2344 case 0xf: /* MIATT */
18c9b560 2345 if (insn & (1 << 16))
3a554c0f 2346 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2347 if (insn & (1 << 17))
3a554c0f
FN
2348 tcg_gen_shri_i32(tmp2, tmp2, 16);
2349 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2350 break;
2351 default:
2352 return 1;
2353 }
3a554c0f
FN
2354 dead_tmp(tmp2);
2355 dead_tmp(tmp);
18c9b560
AZ
2356
2357 gen_op_iwmmxt_movq_wRn_M0(acc);
2358 return 0;
2359 }
2360
2361 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2362 /* Internal Accumulator Access Format */
2363 rdhi = (insn >> 16) & 0xf;
2364 rdlo = (insn >> 12) & 0xf;
2365 acc = insn & 7;
2366
2367 if (acc != 0)
2368 return 1;
2369
2370 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2371 iwmmxt_load_reg(cpu_V0, acc);
2372 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2373 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2374 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2375 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2376 } else { /* MAR */
3a554c0f
FN
2377 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2378 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2379 }
2380 return 0;
2381 }
2382
2383 return 1;
2384}
2385
c1713132
AZ
2386/* Disassemble system coprocessor instruction. Return nonzero if
2387 instruction is not defined. */
2388static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2389{
b75263d6 2390 TCGv tmp, tmp2;
c1713132
AZ
2391 uint32_t rd = (insn >> 12) & 0xf;
2392 uint32_t cp = (insn >> 8) & 0xf;
2393 if (IS_USER(s)) {
2394 return 1;
2395 }
2396
18c9b560 2397 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2398 if (!env->cp[cp].cp_read)
2399 return 1;
8984bd2e
PB
2400 gen_set_pc_im(s->pc);
2401 tmp = new_tmp();
b75263d6
JR
2402 tmp2 = tcg_const_i32(insn);
2403 gen_helper_get_cp(tmp, cpu_env, tmp2);
2404 tcg_temp_free(tmp2);
8984bd2e 2405 store_reg(s, rd, tmp);
c1713132
AZ
2406 } else {
2407 if (!env->cp[cp].cp_write)
2408 return 1;
8984bd2e
PB
2409 gen_set_pc_im(s->pc);
2410 tmp = load_reg(s, rd);
b75263d6
JR
2411 tmp2 = tcg_const_i32(insn);
2412 gen_helper_set_cp(cpu_env, tmp2, tmp);
2413 tcg_temp_free(tmp2);
a60de947 2414 dead_tmp(tmp);
c1713132
AZ
2415 }
2416 return 0;
2417}
2418
9ee6e8bb
PB
2419static int cp15_user_ok(uint32_t insn)
2420{
2421 int cpn = (insn >> 16) & 0xf;
2422 int cpm = insn & 0xf;
2423 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2424
2425 if (cpn == 13 && cpm == 0) {
2426 /* TLS register. */
2427 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2428 return 1;
2429 }
2430 if (cpn == 7) {
2431 /* ISB, DSB, DMB. */
2432 if ((cpm == 5 && op == 4)
2433 || (cpm == 10 && (op == 4 || op == 5)))
2434 return 1;
2435 }
2436 return 0;
2437}
2438
b5ff1b31
FB
2439/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2440 instruction is not defined. */
a90b7318 2441static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2442{
2443 uint32_t rd;
b75263d6 2444 TCGv tmp, tmp2;
b5ff1b31 2445
9ee6e8bb
PB
2446 /* M profile cores use memory mapped registers instead of cp15. */
2447 if (arm_feature(env, ARM_FEATURE_M))
2448 return 1;
2449
2450 if ((insn & (1 << 25)) == 0) {
2451 if (insn & (1 << 20)) {
2452 /* mrrc */
2453 return 1;
2454 }
2455 /* mcrr. Used for block cache operations, so implement as no-op. */
2456 return 0;
2457 }
2458 if ((insn & (1 << 4)) == 0) {
2459 /* cdp */
2460 return 1;
2461 }
2462 if (IS_USER(s) && !cp15_user_ok(insn)) {
b5ff1b31
FB
2463 return 1;
2464 }
9332f9da
FB
2465 if ((insn & 0x0fff0fff) == 0x0e070f90
2466 || (insn & 0x0fff0fff) == 0x0e070f58) {
2467 /* Wait for interrupt. */
8984bd2e 2468 gen_set_pc_im(s->pc);
9ee6e8bb 2469 s->is_jmp = DISAS_WFI;
9332f9da
FB
2470 return 0;
2471 }
b5ff1b31 2472 rd = (insn >> 12) & 0xf;
b75263d6 2473 tmp2 = tcg_const_i32(insn);
18c9b560 2474 if (insn & ARM_CP_RW_BIT) {
8984bd2e 2475 tmp = new_tmp();
b75263d6 2476 gen_helper_get_cp15(tmp, cpu_env, tmp2);
b5ff1b31
FB
2477 /* If the destination register is r15 then sets condition codes. */
2478 if (rd != 15)
8984bd2e
PB
2479 store_reg(s, rd, tmp);
2480 else
2481 dead_tmp(tmp);
b5ff1b31 2482 } else {
8984bd2e 2483 tmp = load_reg(s, rd);
b75263d6 2484 gen_helper_set_cp15(cpu_env, tmp2, tmp);
8984bd2e 2485 dead_tmp(tmp);
a90b7318
AZ
2486 /* Normally we would always end the TB here, but Linux
2487 * arch/arm/mach-pxa/sleep.S expects two instructions following
2488 * an MMU enable to execute from cache. Imitate this behaviour. */
2489 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2490 (insn & 0x0fff0fff) != 0x0e010f10)
2491 gen_lookup_tb(s);
b5ff1b31 2492 }
b75263d6 2493 tcg_temp_free_i32(tmp2);
b5ff1b31
FB
2494 return 0;
2495}
2496
9ee6e8bb
PB
2497#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2498#define VFP_SREG(insn, bigbit, smallbit) \
2499 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2500#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2501 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2502 reg = (((insn) >> (bigbit)) & 0x0f) \
2503 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2504 } else { \
2505 if (insn & (1 << (smallbit))) \
2506 return 1; \
2507 reg = ((insn) >> (bigbit)) & 0x0f; \
2508 }} while (0)
2509
2510#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2511#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2512#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2513#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2514#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2515#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2516
4373f3ce
PB
2517/* Move between integer and VFP cores. */
2518static TCGv gen_vfp_mrs(void)
2519{
2520 TCGv tmp = new_tmp();
2521 tcg_gen_mov_i32(tmp, cpu_F0s);
2522 return tmp;
2523}
2524
2525static void gen_vfp_msr(TCGv tmp)
2526{
2527 tcg_gen_mov_i32(cpu_F0s, tmp);
2528 dead_tmp(tmp);
2529}
2530
9ee6e8bb
PB
2531static inline int
2532vfp_enabled(CPUState * env)
2533{
2534 return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
2535}
2536
ad69471c
PB
2537static void gen_neon_dup_u8(TCGv var, int shift)
2538{
2539 TCGv tmp = new_tmp();
2540 if (shift)
2541 tcg_gen_shri_i32(var, var, shift);
86831435 2542 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2543 tcg_gen_shli_i32(tmp, var, 8);
2544 tcg_gen_or_i32(var, var, tmp);
2545 tcg_gen_shli_i32(tmp, var, 16);
2546 tcg_gen_or_i32(var, var, tmp);
2547 dead_tmp(tmp);
2548}
2549
2550static void gen_neon_dup_low16(TCGv var)
2551{
2552 TCGv tmp = new_tmp();
86831435 2553 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2554 tcg_gen_shli_i32(tmp, var, 16);
2555 tcg_gen_or_i32(var, var, tmp);
2556 dead_tmp(tmp);
2557}
2558
2559static void gen_neon_dup_high16(TCGv var)
2560{
2561 TCGv tmp = new_tmp();
2562 tcg_gen_andi_i32(var, var, 0xffff0000);
2563 tcg_gen_shri_i32(tmp, var, 16);
2564 tcg_gen_or_i32(var, var, tmp);
2565 dead_tmp(tmp);
2566}
2567
b7bcbe95
FB
2568/* Disassemble a VFP instruction. Returns nonzero if an error occured
2569 (ie. an undefined instruction). */
2570static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2571{
2572 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2573 int dp, veclen;
312eea9f 2574 TCGv addr;
4373f3ce 2575 TCGv tmp;
ad69471c 2576 TCGv tmp2;
b7bcbe95 2577
40f137e1
PB
2578 if (!arm_feature(env, ARM_FEATURE_VFP))
2579 return 1;
2580
9ee6e8bb
PB
2581 if (!vfp_enabled(env)) {
2582 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2583 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2584 return 1;
2585 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2586 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2587 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2588 return 1;
2589 }
b7bcbe95
FB
2590 dp = ((insn & 0xf00) == 0xb00);
2591 switch ((insn >> 24) & 0xf) {
2592 case 0xe:
2593 if (insn & (1 << 4)) {
2594 /* single register transfer */
b7bcbe95
FB
2595 rd = (insn >> 12) & 0xf;
2596 if (dp) {
9ee6e8bb
PB
2597 int size;
2598 int pass;
2599
2600 VFP_DREG_N(rn, insn);
2601 if (insn & 0xf)
b7bcbe95 2602 return 1;
9ee6e8bb
PB
2603 if (insn & 0x00c00060
2604 && !arm_feature(env, ARM_FEATURE_NEON))
2605 return 1;
2606
2607 pass = (insn >> 21) & 1;
2608 if (insn & (1 << 22)) {
2609 size = 0;
2610 offset = ((insn >> 5) & 3) * 8;
2611 } else if (insn & (1 << 5)) {
2612 size = 1;
2613 offset = (insn & (1 << 6)) ? 16 : 0;
2614 } else {
2615 size = 2;
2616 offset = 0;
2617 }
18c9b560 2618 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2619 /* vfp->arm */
ad69471c 2620 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2621 switch (size) {
2622 case 0:
9ee6e8bb 2623 if (offset)
ad69471c 2624 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2625 if (insn & (1 << 23))
ad69471c 2626 gen_uxtb(tmp);
9ee6e8bb 2627 else
ad69471c 2628 gen_sxtb(tmp);
9ee6e8bb
PB
2629 break;
2630 case 1:
9ee6e8bb
PB
2631 if (insn & (1 << 23)) {
2632 if (offset) {
ad69471c 2633 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2634 } else {
ad69471c 2635 gen_uxth(tmp);
9ee6e8bb
PB
2636 }
2637 } else {
2638 if (offset) {
ad69471c 2639 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2640 } else {
ad69471c 2641 gen_sxth(tmp);
9ee6e8bb
PB
2642 }
2643 }
2644 break;
2645 case 2:
9ee6e8bb
PB
2646 break;
2647 }
ad69471c 2648 store_reg(s, rd, tmp);
b7bcbe95
FB
2649 } else {
2650 /* arm->vfp */
ad69471c 2651 tmp = load_reg(s, rd);
9ee6e8bb
PB
2652 if (insn & (1 << 23)) {
2653 /* VDUP */
2654 if (size == 0) {
ad69471c 2655 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2656 } else if (size == 1) {
ad69471c 2657 gen_neon_dup_low16(tmp);
9ee6e8bb 2658 }
cbbccffc
PB
2659 for (n = 0; n <= pass * 2; n++) {
2660 tmp2 = new_tmp();
2661 tcg_gen_mov_i32(tmp2, tmp);
2662 neon_store_reg(rn, n, tmp2);
2663 }
2664 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2665 } else {
2666 /* VMOV */
2667 switch (size) {
2668 case 0:
ad69471c
PB
2669 tmp2 = neon_load_reg(rn, pass);
2670 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2671 dead_tmp(tmp2);
9ee6e8bb
PB
2672 break;
2673 case 1:
ad69471c
PB
2674 tmp2 = neon_load_reg(rn, pass);
2675 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2676 dead_tmp(tmp2);
9ee6e8bb
PB
2677 break;
2678 case 2:
9ee6e8bb
PB
2679 break;
2680 }
ad69471c 2681 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2682 }
b7bcbe95 2683 }
9ee6e8bb
PB
2684 } else { /* !dp */
2685 if ((insn & 0x6f) != 0x00)
2686 return 1;
2687 rn = VFP_SREG_N(insn);
18c9b560 2688 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2689 /* vfp->arm */
2690 if (insn & (1 << 21)) {
2691 /* system register */
40f137e1 2692 rn >>= 1;
9ee6e8bb 2693
b7bcbe95 2694 switch (rn) {
40f137e1 2695 case ARM_VFP_FPSID:
4373f3ce 2696 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2697 VFP3 restricts all id registers to privileged
2698 accesses. */
2699 if (IS_USER(s)
2700 && arm_feature(env, ARM_FEATURE_VFP3))
2701 return 1;
4373f3ce 2702 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2703 break;
40f137e1 2704 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2705 if (IS_USER(s))
2706 return 1;
4373f3ce 2707 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2708 break;
40f137e1
PB
2709 case ARM_VFP_FPINST:
2710 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2711 /* Not present in VFP3. */
2712 if (IS_USER(s)
2713 || arm_feature(env, ARM_FEATURE_VFP3))
2714 return 1;
4373f3ce 2715 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2716 break;
40f137e1 2717 case ARM_VFP_FPSCR:
601d70b9 2718 if (rd == 15) {
4373f3ce
PB
2719 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2720 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2721 } else {
2722 tmp = new_tmp();
2723 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2724 }
b7bcbe95 2725 break;
9ee6e8bb
PB
2726 case ARM_VFP_MVFR0:
2727 case ARM_VFP_MVFR1:
2728 if (IS_USER(s)
2729 || !arm_feature(env, ARM_FEATURE_VFP3))
2730 return 1;
4373f3ce 2731 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2732 break;
b7bcbe95
FB
2733 default:
2734 return 1;
2735 }
2736 } else {
2737 gen_mov_F0_vreg(0, rn);
4373f3ce 2738 tmp = gen_vfp_mrs();
b7bcbe95
FB
2739 }
2740 if (rd == 15) {
b5ff1b31 2741 /* Set the 4 flag bits in the CPSR. */
4373f3ce
PB
2742 gen_set_nzcv(tmp);
2743 dead_tmp(tmp);
2744 } else {
2745 store_reg(s, rd, tmp);
2746 }
b7bcbe95
FB
2747 } else {
2748 /* arm->vfp */
4373f3ce 2749 tmp = load_reg(s, rd);
b7bcbe95 2750 if (insn & (1 << 21)) {
40f137e1 2751 rn >>= 1;
b7bcbe95
FB
2752 /* system register */
2753 switch (rn) {
40f137e1 2754 case ARM_VFP_FPSID:
9ee6e8bb
PB
2755 case ARM_VFP_MVFR0:
2756 case ARM_VFP_MVFR1:
b7bcbe95
FB
2757 /* Writes are ignored. */
2758 break;
40f137e1 2759 case ARM_VFP_FPSCR:
4373f3ce
PB
2760 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2761 dead_tmp(tmp);
b5ff1b31 2762 gen_lookup_tb(s);
b7bcbe95 2763 break;
40f137e1 2764 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2765 if (IS_USER(s))
2766 return 1;
71b3c3de
JR
2767 /* TODO: VFP subarchitecture support.
2768 * For now, keep the EN bit only */
2769 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 2770 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2771 gen_lookup_tb(s);
2772 break;
2773 case ARM_VFP_FPINST:
2774 case ARM_VFP_FPINST2:
4373f3ce 2775 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2776 break;
b7bcbe95
FB
2777 default:
2778 return 1;
2779 }
2780 } else {
4373f3ce 2781 gen_vfp_msr(tmp);
b7bcbe95
FB
2782 gen_mov_vreg_F0(0, rn);
2783 }
2784 }
2785 }
2786 } else {
2787 /* data processing */
2788 /* The opcode is in bits 23, 21, 20 and 6. */
2789 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2790 if (dp) {
2791 if (op == 15) {
2792 /* rn is opcode */
2793 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2794 } else {
2795 /* rn is register number */
9ee6e8bb 2796 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2797 }
2798
2799 if (op == 15 && (rn == 15 || rn > 17)) {
2800 /* Integer or single precision destination. */
9ee6e8bb 2801 rd = VFP_SREG_D(insn);
b7bcbe95 2802 } else {
9ee6e8bb 2803 VFP_DREG_D(rd, insn);
b7bcbe95
FB
2804 }
2805
2806 if (op == 15 && (rn == 16 || rn == 17)) {
2807 /* Integer source. */
2808 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
2809 } else {
9ee6e8bb 2810 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2811 }
2812 } else {
9ee6e8bb 2813 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2814 if (op == 15 && rn == 15) {
2815 /* Double precision destination. */
9ee6e8bb
PB
2816 VFP_DREG_D(rd, insn);
2817 } else {
2818 rd = VFP_SREG_D(insn);
2819 }
2820 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2821 }
2822
2823 veclen = env->vfp.vec_len;
2824 if (op == 15 && rn > 3)
2825 veclen = 0;
2826
2827 /* Shut up compiler warnings. */
2828 delta_m = 0;
2829 delta_d = 0;
2830 bank_mask = 0;
3b46e624 2831
b7bcbe95
FB
2832 if (veclen > 0) {
2833 if (dp)
2834 bank_mask = 0xc;
2835 else
2836 bank_mask = 0x18;
2837
2838 /* Figure out what type of vector operation this is. */
2839 if ((rd & bank_mask) == 0) {
2840 /* scalar */
2841 veclen = 0;
2842 } else {
2843 if (dp)
2844 delta_d = (env->vfp.vec_stride >> 1) + 1;
2845 else
2846 delta_d = env->vfp.vec_stride + 1;
2847
2848 if ((rm & bank_mask) == 0) {
2849 /* mixed scalar/vector */
2850 delta_m = 0;
2851 } else {
2852 /* vector */
2853 delta_m = delta_d;
2854 }
2855 }
2856 }
2857
2858 /* Load the initial operands. */
2859 if (op == 15) {
2860 switch (rn) {
2861 case 16:
2862 case 17:
2863 /* Integer source */
2864 gen_mov_F0_vreg(0, rm);
2865 break;
2866 case 8:
2867 case 9:
2868 /* Compare */
2869 gen_mov_F0_vreg(dp, rd);
2870 gen_mov_F1_vreg(dp, rm);
2871 break;
2872 case 10:
2873 case 11:
2874 /* Compare with zero */
2875 gen_mov_F0_vreg(dp, rd);
2876 gen_vfp_F1_ld0(dp);
2877 break;
9ee6e8bb
PB
2878 case 20:
2879 case 21:
2880 case 22:
2881 case 23:
644ad806
PB
2882 case 28:
2883 case 29:
2884 case 30:
2885 case 31:
9ee6e8bb
PB
2886 /* Source and destination the same. */
2887 gen_mov_F0_vreg(dp, rd);
2888 break;
b7bcbe95
FB
2889 default:
2890 /* One source operand. */
2891 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 2892 break;
b7bcbe95
FB
2893 }
2894 } else {
2895 /* Two source operands. */
2896 gen_mov_F0_vreg(dp, rn);
2897 gen_mov_F1_vreg(dp, rm);
2898 }
2899
2900 for (;;) {
2901 /* Perform the calculation. */
2902 switch (op) {
2903 case 0: /* mac: fd + (fn * fm) */
2904 gen_vfp_mul(dp);
2905 gen_mov_F1_vreg(dp, rd);
2906 gen_vfp_add(dp);
2907 break;
2908 case 1: /* nmac: fd - (fn * fm) */
2909 gen_vfp_mul(dp);
2910 gen_vfp_neg(dp);
2911 gen_mov_F1_vreg(dp, rd);
2912 gen_vfp_add(dp);
2913 break;
2914 case 2: /* msc: -fd + (fn * fm) */
2915 gen_vfp_mul(dp);
2916 gen_mov_F1_vreg(dp, rd);
2917 gen_vfp_sub(dp);
2918 break;
2919 case 3: /* nmsc: -fd - (fn * fm) */
2920 gen_vfp_mul(dp);
b7bcbe95 2921 gen_vfp_neg(dp);
c9fb531a
PB
2922 gen_mov_F1_vreg(dp, rd);
2923 gen_vfp_sub(dp);
b7bcbe95
FB
2924 break;
2925 case 4: /* mul: fn * fm */
2926 gen_vfp_mul(dp);
2927 break;
2928 case 5: /* nmul: -(fn * fm) */
2929 gen_vfp_mul(dp);
2930 gen_vfp_neg(dp);
2931 break;
2932 case 6: /* add: fn + fm */
2933 gen_vfp_add(dp);
2934 break;
2935 case 7: /* sub: fn - fm */
2936 gen_vfp_sub(dp);
2937 break;
2938 case 8: /* div: fn / fm */
2939 gen_vfp_div(dp);
2940 break;
9ee6e8bb
PB
2941 case 14: /* fconst */
2942 if (!arm_feature(env, ARM_FEATURE_VFP3))
2943 return 1;
2944
2945 n = (insn << 12) & 0x80000000;
2946 i = ((insn >> 12) & 0x70) | (insn & 0xf);
2947 if (dp) {
2948 if (i & 0x40)
2949 i |= 0x3f80;
2950 else
2951 i |= 0x4000;
2952 n |= i << 16;
4373f3ce 2953 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
2954 } else {
2955 if (i & 0x40)
2956 i |= 0x780;
2957 else
2958 i |= 0x800;
2959 n |= i << 19;
5b340b51 2960 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 2961 }
9ee6e8bb 2962 break;
b7bcbe95
FB
2963 case 15: /* extension space */
2964 switch (rn) {
2965 case 0: /* cpy */
2966 /* no-op */
2967 break;
2968 case 1: /* abs */
2969 gen_vfp_abs(dp);
2970 break;
2971 case 2: /* neg */
2972 gen_vfp_neg(dp);
2973 break;
2974 case 3: /* sqrt */
2975 gen_vfp_sqrt(dp);
2976 break;
2977 case 8: /* cmp */
2978 gen_vfp_cmp(dp);
2979 break;
2980 case 9: /* cmpe */
2981 gen_vfp_cmpe(dp);
2982 break;
2983 case 10: /* cmpz */
2984 gen_vfp_cmp(dp);
2985 break;
2986 case 11: /* cmpez */
2987 gen_vfp_F1_ld0(dp);
2988 gen_vfp_cmpe(dp);
2989 break;
2990 case 15: /* single<->double conversion */
2991 if (dp)
4373f3ce 2992 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 2993 else
4373f3ce 2994 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
2995 break;
2996 case 16: /* fuito */
2997 gen_vfp_uito(dp);
2998 break;
2999 case 17: /* fsito */
3000 gen_vfp_sito(dp);
3001 break;
9ee6e8bb
PB
3002 case 20: /* fshto */
3003 if (!arm_feature(env, ARM_FEATURE_VFP3))
3004 return 1;
644ad806 3005 gen_vfp_shto(dp, 16 - rm);
9ee6e8bb
PB
3006 break;
3007 case 21: /* fslto */
3008 if (!arm_feature(env, ARM_FEATURE_VFP3))
3009 return 1;
644ad806 3010 gen_vfp_slto(dp, 32 - rm);
9ee6e8bb
PB
3011 break;
3012 case 22: /* fuhto */
3013 if (!arm_feature(env, ARM_FEATURE_VFP3))
3014 return 1;
644ad806 3015 gen_vfp_uhto(dp, 16 - rm);
9ee6e8bb
PB
3016 break;
3017 case 23: /* fulto */
3018 if (!arm_feature(env, ARM_FEATURE_VFP3))
3019 return 1;
644ad806 3020 gen_vfp_ulto(dp, 32 - rm);
9ee6e8bb 3021 break;
b7bcbe95
FB
3022 case 24: /* ftoui */
3023 gen_vfp_toui(dp);
3024 break;
3025 case 25: /* ftouiz */
3026 gen_vfp_touiz(dp);
3027 break;
3028 case 26: /* ftosi */
3029 gen_vfp_tosi(dp);
3030 break;
3031 case 27: /* ftosiz */
3032 gen_vfp_tosiz(dp);
3033 break;
9ee6e8bb
PB
3034 case 28: /* ftosh */
3035 if (!arm_feature(env, ARM_FEATURE_VFP3))
3036 return 1;
644ad806 3037 gen_vfp_tosh(dp, 16 - rm);
9ee6e8bb
PB
3038 break;
3039 case 29: /* ftosl */
3040 if (!arm_feature(env, ARM_FEATURE_VFP3))
3041 return 1;
644ad806 3042 gen_vfp_tosl(dp, 32 - rm);
9ee6e8bb
PB
3043 break;
3044 case 30: /* ftouh */
3045 if (!arm_feature(env, ARM_FEATURE_VFP3))
3046 return 1;
644ad806 3047 gen_vfp_touh(dp, 16 - rm);
9ee6e8bb
PB
3048 break;
3049 case 31: /* ftoul */
3050 if (!arm_feature(env, ARM_FEATURE_VFP3))
3051 return 1;
644ad806 3052 gen_vfp_toul(dp, 32 - rm);
9ee6e8bb 3053 break;
b7bcbe95
FB
3054 default: /* undefined */
3055 printf ("rn:%d\n", rn);
3056 return 1;
3057 }
3058 break;
3059 default: /* undefined */
3060 printf ("op:%d\n", op);
3061 return 1;
3062 }
3063
3064 /* Write back the result. */
3065 if (op == 15 && (rn >= 8 && rn <= 11))
3066 ; /* Comparison, do nothing. */
3067 else if (op == 15 && rn > 17)
3068 /* Integer result. */
3069 gen_mov_vreg_F0(0, rd);
3070 else if (op == 15 && rn == 15)
3071 /* conversion */
3072 gen_mov_vreg_F0(!dp, rd);
3073 else
3074 gen_mov_vreg_F0(dp, rd);
3075
3076 /* break out of the loop if we have finished */
3077 if (veclen == 0)
3078 break;
3079
3080 if (op == 15 && delta_m == 0) {
3081 /* single source one-many */
3082 while (veclen--) {
3083 rd = ((rd + delta_d) & (bank_mask - 1))
3084 | (rd & bank_mask);
3085 gen_mov_vreg_F0(dp, rd);
3086 }
3087 break;
3088 }
3089 /* Setup the next operands. */
3090 veclen--;
3091 rd = ((rd + delta_d) & (bank_mask - 1))
3092 | (rd & bank_mask);
3093
3094 if (op == 15) {
3095 /* One source operand. */
3096 rm = ((rm + delta_m) & (bank_mask - 1))
3097 | (rm & bank_mask);
3098 gen_mov_F0_vreg(dp, rm);
3099 } else {
3100 /* Two source operands. */
3101 rn = ((rn + delta_d) & (bank_mask - 1))
3102 | (rn & bank_mask);
3103 gen_mov_F0_vreg(dp, rn);
3104 if (delta_m) {
3105 rm = ((rm + delta_m) & (bank_mask - 1))
3106 | (rm & bank_mask);
3107 gen_mov_F1_vreg(dp, rm);
3108 }
3109 }
3110 }
3111 }
3112 break;
3113 case 0xc:
3114 case 0xd:
9ee6e8bb 3115 if (dp && (insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3116 /* two-register transfer */
3117 rn = (insn >> 16) & 0xf;
3118 rd = (insn >> 12) & 0xf;
3119 if (dp) {
9ee6e8bb
PB
3120 VFP_DREG_M(rm, insn);
3121 } else {
3122 rm = VFP_SREG_M(insn);
3123 }
b7bcbe95 3124
18c9b560 3125 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3126 /* vfp->arm */
3127 if (dp) {
4373f3ce
PB
3128 gen_mov_F0_vreg(0, rm * 2);
3129 tmp = gen_vfp_mrs();
3130 store_reg(s, rd, tmp);
3131 gen_mov_F0_vreg(0, rm * 2 + 1);
3132 tmp = gen_vfp_mrs();
3133 store_reg(s, rn, tmp);
b7bcbe95
FB
3134 } else {
3135 gen_mov_F0_vreg(0, rm);
4373f3ce
PB
3136 tmp = gen_vfp_mrs();
3137 store_reg(s, rn, tmp);
b7bcbe95 3138 gen_mov_F0_vreg(0, rm + 1);
4373f3ce
PB
3139 tmp = gen_vfp_mrs();
3140 store_reg(s, rd, tmp);
b7bcbe95
FB
3141 }
3142 } else {
3143 /* arm->vfp */
3144 if (dp) {
4373f3ce
PB
3145 tmp = load_reg(s, rd);
3146 gen_vfp_msr(tmp);
3147 gen_mov_vreg_F0(0, rm * 2);
3148 tmp = load_reg(s, rn);
3149 gen_vfp_msr(tmp);
3150 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3151 } else {
4373f3ce
PB
3152 tmp = load_reg(s, rn);
3153 gen_vfp_msr(tmp);
b7bcbe95 3154 gen_mov_vreg_F0(0, rm);
4373f3ce
PB
3155 tmp = load_reg(s, rd);
3156 gen_vfp_msr(tmp);
b7bcbe95
FB
3157 gen_mov_vreg_F0(0, rm + 1);
3158 }
3159 }
3160 } else {
3161 /* Load/store */
3162 rn = (insn >> 16) & 0xf;
3163 if (dp)
9ee6e8bb 3164 VFP_DREG_D(rd, insn);
b7bcbe95 3165 else
9ee6e8bb
PB
3166 rd = VFP_SREG_D(insn);
3167 if (s->thumb && rn == 15) {
312eea9f
FN
3168 addr = new_tmp();
3169 tcg_gen_movi_i32(addr, s->pc & ~2);
9ee6e8bb 3170 } else {
312eea9f 3171 addr = load_reg(s, rn);
9ee6e8bb 3172 }
b7bcbe95
FB
3173 if ((insn & 0x01200000) == 0x01000000) {
3174 /* Single load/store */
3175 offset = (insn & 0xff) << 2;
3176 if ((insn & (1 << 23)) == 0)
3177 offset = -offset;
312eea9f 3178 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3179 if (insn & (1 << 20)) {
312eea9f 3180 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3181 gen_mov_vreg_F0(dp, rd);
3182 } else {
3183 gen_mov_F0_vreg(dp, rd);
312eea9f 3184 gen_vfp_st(s, dp, addr);
b7bcbe95 3185 }
312eea9f 3186 dead_tmp(addr);
b7bcbe95
FB
3187 } else {
3188 /* load/store multiple */
3189 if (dp)
3190 n = (insn >> 1) & 0x7f;
3191 else
3192 n = insn & 0xff;
3193
3194 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3195 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3196
3197 if (dp)
3198 offset = 8;
3199 else
3200 offset = 4;
3201 for (i = 0; i < n; i++) {
18c9b560 3202 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3203 /* load */
312eea9f 3204 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3205 gen_mov_vreg_F0(dp, rd + i);
3206 } else {
3207 /* store */
3208 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3209 gen_vfp_st(s, dp, addr);
b7bcbe95 3210 }
312eea9f 3211 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95
FB
3212 }
3213 if (insn & (1 << 21)) {
3214 /* writeback */
3215 if (insn & (1 << 24))
3216 offset = -offset * n;
3217 else if (dp && (insn & 1))
3218 offset = 4;
3219 else
3220 offset = 0;
3221
3222 if (offset != 0)
312eea9f
FN
3223 tcg_gen_addi_i32(addr, addr, offset);
3224 store_reg(s, rn, addr);
3225 } else {
3226 dead_tmp(addr);
b7bcbe95
FB
3227 }
3228 }
3229 }
3230 break;
3231 default:
3232 /* Should never happen. */
3233 return 1;
3234 }
3235 return 0;
3236}
3237
6e256c93 3238static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3239{
6e256c93
FB
3240 TranslationBlock *tb;
3241
3242 tb = s->tb;
3243 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3244 tcg_gen_goto_tb(n);
8984bd2e 3245 gen_set_pc_im(dest);
57fec1fe 3246 tcg_gen_exit_tb((long)tb + n);
6e256c93 3247 } else {
8984bd2e 3248 gen_set_pc_im(dest);
57fec1fe 3249 tcg_gen_exit_tb(0);
6e256c93 3250 }
c53be334
FB
3251}
3252
8aaca4c0
FB
3253static inline void gen_jmp (DisasContext *s, uint32_t dest)
3254{
551bd27f 3255 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3256 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3257 if (s->thumb)
d9ba4830
PB
3258 dest |= 1;
3259 gen_bx_im(s, dest);
8aaca4c0 3260 } else {
6e256c93 3261 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3262 s->is_jmp = DISAS_TB_JUMP;
3263 }
3264}
3265
d9ba4830 3266static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3267{
ee097184 3268 if (x)
d9ba4830 3269 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3270 else
d9ba4830 3271 gen_sxth(t0);
ee097184 3272 if (y)
d9ba4830 3273 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3274 else
d9ba4830
PB
3275 gen_sxth(t1);
3276 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3277}
3278
3279/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 3280static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3281 uint32_t mask;
3282
3283 mask = 0;
3284 if (flags & (1 << 0))
3285 mask |= 0xff;
3286 if (flags & (1 << 1))
3287 mask |= 0xff00;
3288 if (flags & (1 << 2))
3289 mask |= 0xff0000;
3290 if (flags & (1 << 3))
3291 mask |= 0xff000000;
9ee6e8bb 3292
2ae23e75 3293 /* Mask out undefined bits. */
9ee6e8bb
PB
3294 mask &= ~CPSR_RESERVED;
3295 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3296 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3297 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3298 mask &= ~CPSR_IT;
9ee6e8bb 3299 /* Mask out execution state bits. */
2ae23e75 3300 if (!spsr)
e160c51c 3301 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3302 /* Mask out privileged bits. */
3303 if (IS_USER(s))
9ee6e8bb 3304 mask &= CPSR_USER;
b5ff1b31
FB
3305 return mask;
3306}
3307
2fbac54b
FN
3308/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3309static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
b5ff1b31 3310{
d9ba4830 3311 TCGv tmp;
b5ff1b31
FB
3312 if (spsr) {
3313 /* ??? This is also undefined in system mode. */
3314 if (IS_USER(s))
3315 return 1;
d9ba4830
PB
3316
3317 tmp = load_cpu_field(spsr);
3318 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3319 tcg_gen_andi_i32(t0, t0, mask);
3320 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3321 store_cpu_field(tmp, spsr);
b5ff1b31 3322 } else {
2fbac54b 3323 gen_set_cpsr(t0, mask);
b5ff1b31 3324 }
2fbac54b 3325 dead_tmp(t0);
b5ff1b31
FB
3326 gen_lookup_tb(s);
3327 return 0;
3328}
3329
2fbac54b
FN
3330/* Returns nonzero if access to the PSR is not permitted. */
3331static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3332{
3333 TCGv tmp;
3334 tmp = new_tmp();
3335 tcg_gen_movi_i32(tmp, val);
3336 return gen_set_psr(s, mask, spsr, tmp);
3337}
3338
e9bb4aa9
JR
3339/* Generate an old-style exception return. Marks pc as dead. */
3340static void gen_exception_return(DisasContext *s, TCGv pc)
b5ff1b31 3341{
d9ba4830 3342 TCGv tmp;
e9bb4aa9 3343 store_reg(s, 15, pc);
d9ba4830
PB
3344 tmp = load_cpu_field(spsr);
3345 gen_set_cpsr(tmp, 0xffffffff);
3346 dead_tmp(tmp);
b5ff1b31
FB
3347 s->is_jmp = DISAS_UPDATE;
3348}
3349
b0109805
PB
3350/* Generate a v6 exception return. Marks both values as dead. */
3351static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3352{
b0109805
PB
3353 gen_set_cpsr(cpsr, 0xffffffff);
3354 dead_tmp(cpsr);
3355 store_reg(s, 15, pc);
9ee6e8bb
PB
3356 s->is_jmp = DISAS_UPDATE;
3357}
3b46e624 3358
9ee6e8bb
PB
3359static inline void
3360gen_set_condexec (DisasContext *s)
3361{
3362 if (s->condexec_mask) {
8f01245e
PB
3363 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3364 TCGv tmp = new_tmp();
3365 tcg_gen_movi_i32(tmp, val);
d9ba4830 3366 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3367 }
3368}
3b46e624 3369
9ee6e8bb
PB
3370static void gen_nop_hint(DisasContext *s, int val)
3371{
3372 switch (val) {
3373 case 3: /* wfi */
8984bd2e 3374 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3375 s->is_jmp = DISAS_WFI;
3376 break;
3377 case 2: /* wfe */
3378 case 4: /* sev */
3379 /* TODO: Implement SEV and WFE. May help SMP performance. */
3380 default: /* nop */
3381 break;
3382 }
3383}
99c475ab 3384
ad69471c 3385#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3386
dd8fbd78 3387static inline int gen_neon_add(int size, TCGv t0, TCGv t1)
9ee6e8bb
PB
3388{
3389 switch (size) {
dd8fbd78
FN
3390 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3391 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3392 case 2: tcg_gen_add_i32(t0, t0, t1); break;
9ee6e8bb
PB
3393 default: return 1;
3394 }
3395 return 0;
3396}
3397
dd8fbd78 3398static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
ad69471c
PB
3399{
3400 switch (size) {
dd8fbd78
FN
3401 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3402 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3403 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3404 default: return;
3405 }
3406}
3407
3408/* 32-bit pairwise ops end up the same as the elementwise versions. */
3409#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3410#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3411#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3412#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3413
3414/* FIXME: This is wrong. They set the wrong overflow bit. */
3415#define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3416#define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3417#define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3418#define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3419
3420#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3421 switch ((size << 1) | u) { \
3422 case 0: \
dd8fbd78 3423 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3424 break; \
3425 case 1: \
dd8fbd78 3426 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3427 break; \
3428 case 2: \
dd8fbd78 3429 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3430 break; \
3431 case 3: \
dd8fbd78 3432 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3433 break; \
3434 case 4: \
dd8fbd78 3435 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3436 break; \
3437 case 5: \
dd8fbd78 3438 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3439 break; \
3440 default: return 1; \
3441 }} while (0)
9ee6e8bb
PB
3442
3443#define GEN_NEON_INTEGER_OP(name) do { \
3444 switch ((size << 1) | u) { \
ad69471c 3445 case 0: \
dd8fbd78 3446 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3447 break; \
3448 case 1: \
dd8fbd78 3449 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3450 break; \
3451 case 2: \
dd8fbd78 3452 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3453 break; \
3454 case 3: \
dd8fbd78 3455 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3456 break; \
3457 case 4: \
dd8fbd78 3458 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3459 break; \
3460 case 5: \
dd8fbd78 3461 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3462 break; \
9ee6e8bb
PB
3463 default: return 1; \
3464 }} while (0)
3465
dd8fbd78 3466static TCGv neon_load_scratch(int scratch)
9ee6e8bb 3467{
dd8fbd78
FN
3468 TCGv tmp = new_tmp();
3469 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3470 return tmp;
9ee6e8bb
PB
3471}
3472
dd8fbd78 3473static void neon_store_scratch(int scratch, TCGv var)
9ee6e8bb 3474{
dd8fbd78
FN
3475 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3476 dead_tmp(var);
9ee6e8bb
PB
3477}
3478
dd8fbd78 3479static inline TCGv neon_get_scalar(int size, int reg)
9ee6e8bb 3480{
dd8fbd78 3481 TCGv tmp;
9ee6e8bb 3482 if (size == 1) {
dd8fbd78 3483 tmp = neon_load_reg(reg >> 1, reg & 1);
9ee6e8bb 3484 } else {
dd8fbd78
FN
3485 tmp = neon_load_reg(reg >> 2, (reg >> 1) & 1);
3486 if (reg & 1) {
3487 gen_neon_dup_low16(tmp);
3488 } else {
3489 gen_neon_dup_high16(tmp);
3490 }
9ee6e8bb 3491 }
dd8fbd78 3492 return tmp;
9ee6e8bb
PB
3493}
3494
19457615
FN
3495static void gen_neon_unzip_u8(TCGv t0, TCGv t1)
3496{
3497 TCGv rd, rm, tmp;
3498
3499 rd = new_tmp();
3500 rm = new_tmp();
3501 tmp = new_tmp();
3502
3503 tcg_gen_andi_i32(rd, t0, 0xff);
3504 tcg_gen_shri_i32(tmp, t0, 8);
3505 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3506 tcg_gen_or_i32(rd, rd, tmp);
3507 tcg_gen_shli_i32(tmp, t1, 16);
3508 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3509 tcg_gen_or_i32(rd, rd, tmp);
3510 tcg_gen_shli_i32(tmp, t1, 8);
3511 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3512 tcg_gen_or_i32(rd, rd, tmp);
3513
3514 tcg_gen_shri_i32(rm, t0, 8);
3515 tcg_gen_andi_i32(rm, rm, 0xff);
3516 tcg_gen_shri_i32(tmp, t0, 16);
3517 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3518 tcg_gen_or_i32(rm, rm, tmp);
3519 tcg_gen_shli_i32(tmp, t1, 8);
3520 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3521 tcg_gen_or_i32(rm, rm, tmp);
3522 tcg_gen_andi_i32(tmp, t1, 0xff000000);
3523 tcg_gen_or_i32(t1, rm, tmp);
3524 tcg_gen_mov_i32(t0, rd);
3525
3526 dead_tmp(tmp);
3527 dead_tmp(rm);
3528 dead_tmp(rd);
3529}
3530
3531static void gen_neon_zip_u8(TCGv t0, TCGv t1)
3532{
3533 TCGv rd, rm, tmp;
3534
3535 rd = new_tmp();
3536 rm = new_tmp();
3537 tmp = new_tmp();
3538
3539 tcg_gen_andi_i32(rd, t0, 0xff);
3540 tcg_gen_shli_i32(tmp, t1, 8);
3541 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3542 tcg_gen_or_i32(rd, rd, tmp);
3543 tcg_gen_shli_i32(tmp, t0, 16);
3544 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3545 tcg_gen_or_i32(rd, rd, tmp);
3546 tcg_gen_shli_i32(tmp, t1, 24);
3547 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3548 tcg_gen_or_i32(rd, rd, tmp);
3549
3550 tcg_gen_andi_i32(rm, t1, 0xff000000);
3551 tcg_gen_shri_i32(tmp, t0, 8);
3552 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3553 tcg_gen_or_i32(rm, rm, tmp);
3554 tcg_gen_shri_i32(tmp, t1, 8);
3555 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3556 tcg_gen_or_i32(rm, rm, tmp);
3557 tcg_gen_shri_i32(tmp, t0, 16);
3558 tcg_gen_andi_i32(tmp, tmp, 0xff);
3559 tcg_gen_or_i32(t1, rm, tmp);
3560 tcg_gen_mov_i32(t0, rd);
3561
3562 dead_tmp(tmp);
3563 dead_tmp(rm);
3564 dead_tmp(rd);
3565}
3566
3567static void gen_neon_zip_u16(TCGv t0, TCGv t1)
3568{
3569 TCGv tmp, tmp2;
3570
3571 tmp = new_tmp();
3572 tmp2 = new_tmp();
3573
3574 tcg_gen_andi_i32(tmp, t0, 0xffff);
3575 tcg_gen_shli_i32(tmp2, t1, 16);
3576 tcg_gen_or_i32(tmp, tmp, tmp2);
3577 tcg_gen_andi_i32(t1, t1, 0xffff0000);
3578 tcg_gen_shri_i32(tmp2, t0, 16);
3579 tcg_gen_or_i32(t1, t1, tmp2);
3580 tcg_gen_mov_i32(t0, tmp);
3581
3582 dead_tmp(tmp2);
3583 dead_tmp(tmp);
3584}
3585
9ee6e8bb
PB
3586static void gen_neon_unzip(int reg, int q, int tmp, int size)
3587{
3588 int n;
dd8fbd78 3589 TCGv t0, t1;
9ee6e8bb
PB
3590
3591 for (n = 0; n < q + 1; n += 2) {
dd8fbd78
FN
3592 t0 = neon_load_reg(reg, n);
3593 t1 = neon_load_reg(reg, n + 1);
9ee6e8bb 3594 switch (size) {
dd8fbd78
FN
3595 case 0: gen_neon_unzip_u8(t0, t1); break;
3596 case 1: gen_neon_zip_u16(t0, t1); break; /* zip and unzip are the same. */
9ee6e8bb
PB
3597 case 2: /* no-op */; break;
3598 default: abort();
3599 }
dd8fbd78
FN
3600 neon_store_scratch(tmp + n, t0);
3601 neon_store_scratch(tmp + n + 1, t1);
9ee6e8bb
PB
3602 }
3603}
3604
19457615
FN
3605static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3606{
3607 TCGv rd, tmp;
3608
3609 rd = new_tmp();
3610 tmp = new_tmp();
3611
3612 tcg_gen_shli_i32(rd, t0, 8);
3613 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3614 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3615 tcg_gen_or_i32(rd, rd, tmp);
3616
3617 tcg_gen_shri_i32(t1, t1, 8);
3618 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3619 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3620 tcg_gen_or_i32(t1, t1, tmp);
3621 tcg_gen_mov_i32(t0, rd);
3622
3623 dead_tmp(tmp);
3624 dead_tmp(rd);
3625}
3626
3627static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3628{
3629 TCGv rd, tmp;
3630
3631 rd = new_tmp();
3632 tmp = new_tmp();
3633
3634 tcg_gen_shli_i32(rd, t0, 16);
3635 tcg_gen_andi_i32(tmp, t1, 0xffff);
3636 tcg_gen_or_i32(rd, rd, tmp);
3637 tcg_gen_shri_i32(t1, t1, 16);
3638 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3639 tcg_gen_or_i32(t1, t1, tmp);
3640 tcg_gen_mov_i32(t0, rd);
3641
3642 dead_tmp(tmp);
3643 dead_tmp(rd);
3644}
3645
3646
9ee6e8bb
PB
3647static struct {
3648 int nregs;
3649 int interleave;
3650 int spacing;
3651} neon_ls_element_type[11] = {
3652 {4, 4, 1},
3653 {4, 4, 2},
3654 {4, 1, 1},
3655 {4, 2, 1},
3656 {3, 3, 1},
3657 {3, 3, 2},
3658 {3, 1, 1},
3659 {1, 1, 1},
3660 {2, 2, 1},
3661 {2, 2, 2},
3662 {2, 1, 1}
3663};
3664
3665/* Translate a NEON load/store element instruction. Return nonzero if the
3666 instruction is invalid. */
3667static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3668{
3669 int rd, rn, rm;
3670 int op;
3671 int nregs;
3672 int interleave;
84496233 3673 int spacing;
9ee6e8bb
PB
3674 int stride;
3675 int size;
3676 int reg;
3677 int pass;
3678 int load;
3679 int shift;
9ee6e8bb 3680 int n;
1b2b1e54 3681 TCGv addr;
b0109805 3682 TCGv tmp;
8f8e3aa4 3683 TCGv tmp2;
84496233 3684 TCGv_i64 tmp64;
9ee6e8bb
PB
3685
3686 if (!vfp_enabled(env))
3687 return 1;
3688 VFP_DREG_D(rd, insn);
3689 rn = (insn >> 16) & 0xf;
3690 rm = insn & 0xf;
3691 load = (insn & (1 << 21)) != 0;
1b2b1e54 3692 addr = new_tmp();
9ee6e8bb
PB
3693 if ((insn & (1 << 23)) == 0) {
3694 /* Load store all elements. */
3695 op = (insn >> 8) & 0xf;
3696 size = (insn >> 6) & 3;
84496233 3697 if (op > 10)
9ee6e8bb
PB
3698 return 1;
3699 nregs = neon_ls_element_type[op].nregs;
3700 interleave = neon_ls_element_type[op].interleave;
84496233
JR
3701 spacing = neon_ls_element_type[op].spacing;
3702 if (size == 3 && (interleave | spacing) != 1)
3703 return 1;
dcc65026 3704 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3705 stride = (1 << size) * interleave;
3706 for (reg = 0; reg < nregs; reg++) {
3707 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
3708 load_reg_var(s, addr, rn);
3709 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 3710 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
3711 load_reg_var(s, addr, rn);
3712 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 3713 }
84496233
JR
3714 if (size == 3) {
3715 if (load) {
3716 tmp64 = gen_ld64(addr, IS_USER(s));
3717 neon_store_reg64(tmp64, rd);
3718 tcg_temp_free_i64(tmp64);
3719 } else {
3720 tmp64 = tcg_temp_new_i64();
3721 neon_load_reg64(tmp64, rd);
3722 gen_st64(tmp64, addr, IS_USER(s));
3723 }
3724 tcg_gen_addi_i32(addr, addr, stride);
3725 } else {
3726 for (pass = 0; pass < 2; pass++) {
3727 if (size == 2) {
3728 if (load) {
3729 tmp = gen_ld32(addr, IS_USER(s));
3730 neon_store_reg(rd, pass, tmp);
3731 } else {
3732 tmp = neon_load_reg(rd, pass);
3733 gen_st32(tmp, addr, IS_USER(s));
3734 }
1b2b1e54 3735 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
3736 } else if (size == 1) {
3737 if (load) {
3738 tmp = gen_ld16u(addr, IS_USER(s));
3739 tcg_gen_addi_i32(addr, addr, stride);
3740 tmp2 = gen_ld16u(addr, IS_USER(s));
3741 tcg_gen_addi_i32(addr, addr, stride);
3742 gen_bfi(tmp, tmp, tmp2, 16, 0xffff);
3743 dead_tmp(tmp2);
3744 neon_store_reg(rd, pass, tmp);
3745 } else {
3746 tmp = neon_load_reg(rd, pass);
3747 tmp2 = new_tmp();
3748 tcg_gen_shri_i32(tmp2, tmp, 16);
3749 gen_st16(tmp, addr, IS_USER(s));
3750 tcg_gen_addi_i32(addr, addr, stride);
3751 gen_st16(tmp2, addr, IS_USER(s));
1b2b1e54 3752 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3753 }
84496233
JR
3754 } else /* size == 0 */ {
3755 if (load) {
3756 TCGV_UNUSED(tmp2);
3757 for (n = 0; n < 4; n++) {
3758 tmp = gen_ld8u(addr, IS_USER(s));
3759 tcg_gen_addi_i32(addr, addr, stride);
3760 if (n == 0) {
3761 tmp2 = tmp;
3762 } else {
3763 gen_bfi(tmp2, tmp2, tmp, n * 8, 0xff);
3764 dead_tmp(tmp);
3765 }
9ee6e8bb 3766 }
84496233
JR
3767 neon_store_reg(rd, pass, tmp2);
3768 } else {
3769 tmp2 = neon_load_reg(rd, pass);
3770 for (n = 0; n < 4; n++) {
3771 tmp = new_tmp();
3772 if (n == 0) {
3773 tcg_gen_mov_i32(tmp, tmp2);
3774 } else {
3775 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3776 }
3777 gen_st8(tmp, addr, IS_USER(s));
3778 tcg_gen_addi_i32(addr, addr, stride);
3779 }
3780 dead_tmp(tmp2);
9ee6e8bb
PB
3781 }
3782 }
3783 }
3784 }
84496233 3785 rd += spacing;
9ee6e8bb
PB
3786 }
3787 stride = nregs * 8;
3788 } else {
3789 size = (insn >> 10) & 3;
3790 if (size == 3) {
3791 /* Load single element to all lanes. */
3792 if (!load)
3793 return 1;
3794 size = (insn >> 6) & 3;
3795 nregs = ((insn >> 8) & 3) + 1;
3796 stride = (insn & (1 << 5)) ? 2 : 1;
dcc65026 3797 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3798 for (reg = 0; reg < nregs; reg++) {
3799 switch (size) {
3800 case 0:
1b2b1e54 3801 tmp = gen_ld8u(addr, IS_USER(s));
ad69471c 3802 gen_neon_dup_u8(tmp, 0);
9ee6e8bb
PB
3803 break;
3804 case 1:
1b2b1e54 3805 tmp = gen_ld16u(addr, IS_USER(s));
ad69471c 3806 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
3807 break;
3808 case 2:
1b2b1e54 3809 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
3810 break;
3811 case 3:
3812 return 1;
a50f5b91
PB
3813 default: /* Avoid compiler warnings. */
3814 abort();
99c475ab 3815 }
1b2b1e54 3816 tcg_gen_addi_i32(addr, addr, 1 << size);
ad69471c
PB
3817 tmp2 = new_tmp();
3818 tcg_gen_mov_i32(tmp2, tmp);
3819 neon_store_reg(rd, 0, tmp2);
3018f259 3820 neon_store_reg(rd, 1, tmp);
9ee6e8bb
PB
3821 rd += stride;
3822 }
3823 stride = (1 << size) * nregs;
3824 } else {
3825 /* Single element. */
3826 pass = (insn >> 7) & 1;
3827 switch (size) {
3828 case 0:
3829 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
3830 stride = 1;
3831 break;
3832 case 1:
3833 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
3834 stride = (insn & (1 << 5)) ? 2 : 1;
3835 break;
3836 case 2:
3837 shift = 0;
9ee6e8bb
PB
3838 stride = (insn & (1 << 6)) ? 2 : 1;
3839 break;
3840 default:
3841 abort();
3842 }
3843 nregs = ((insn >> 8) & 3) + 1;
dcc65026 3844 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3845 for (reg = 0; reg < nregs; reg++) {
3846 if (load) {
9ee6e8bb
PB
3847 switch (size) {
3848 case 0:
1b2b1e54 3849 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb
PB
3850 break;
3851 case 1:
1b2b1e54 3852 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
3853 break;
3854 case 2:
1b2b1e54 3855 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 3856 break;
a50f5b91
PB
3857 default: /* Avoid compiler warnings. */
3858 abort();
9ee6e8bb
PB
3859 }
3860 if (size != 2) {
8f8e3aa4
PB
3861 tmp2 = neon_load_reg(rd, pass);
3862 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3863 dead_tmp(tmp2);
9ee6e8bb 3864 }
8f8e3aa4 3865 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3866 } else { /* Store */
8f8e3aa4
PB
3867 tmp = neon_load_reg(rd, pass);
3868 if (shift)
3869 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
3870 switch (size) {
3871 case 0:
1b2b1e54 3872 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
3873 break;
3874 case 1:
1b2b1e54 3875 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
3876 break;
3877 case 2:
1b2b1e54 3878 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 3879 break;
99c475ab 3880 }
99c475ab 3881 }
9ee6e8bb 3882 rd += stride;
1b2b1e54 3883 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 3884 }
9ee6e8bb 3885 stride = nregs * (1 << size);
99c475ab 3886 }
9ee6e8bb 3887 }
1b2b1e54 3888 dead_tmp(addr);
9ee6e8bb 3889 if (rm != 15) {
b26eefb6
PB
3890 TCGv base;
3891
3892 base = load_reg(s, rn);
9ee6e8bb 3893 if (rm == 13) {
b26eefb6 3894 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 3895 } else {
b26eefb6
PB
3896 TCGv index;
3897 index = load_reg(s, rm);
3898 tcg_gen_add_i32(base, base, index);
3899 dead_tmp(index);
9ee6e8bb 3900 }
b26eefb6 3901 store_reg(s, rn, base);
9ee6e8bb
PB
3902 }
3903 return 0;
3904}
3b46e624 3905
8f8e3aa4
PB
3906/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
3907static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
3908{
3909 tcg_gen_and_i32(t, t, c);
f669df27 3910 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
3911 tcg_gen_or_i32(dest, t, f);
3912}
3913
a7812ae4 3914static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
3915{
3916 switch (size) {
3917 case 0: gen_helper_neon_narrow_u8(dest, src); break;
3918 case 1: gen_helper_neon_narrow_u16(dest, src); break;
3919 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
3920 default: abort();
3921 }
3922}
3923
a7812ae4 3924static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
3925{
3926 switch (size) {
3927 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
3928 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
3929 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
3930 default: abort();
3931 }
3932}
3933
a7812ae4 3934static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
3935{
3936 switch (size) {
3937 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
3938 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
3939 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
3940 default: abort();
3941 }
3942}
3943
3944static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
3945 int q, int u)
3946{
3947 if (q) {
3948 if (u) {
3949 switch (size) {
3950 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3951 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3952 default: abort();
3953 }
3954 } else {
3955 switch (size) {
3956 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
3957 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
3958 default: abort();
3959 }
3960 }
3961 } else {
3962 if (u) {
3963 switch (size) {
3964 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3965 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3966 default: abort();
3967 }
3968 } else {
3969 switch (size) {
3970 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
3971 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
3972 default: abort();
3973 }
3974 }
3975 }
3976}
3977
a7812ae4 3978static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
3979{
3980 if (u) {
3981 switch (size) {
3982 case 0: gen_helper_neon_widen_u8(dest, src); break;
3983 case 1: gen_helper_neon_widen_u16(dest, src); break;
3984 case 2: tcg_gen_extu_i32_i64(dest, src); break;
3985 default: abort();
3986 }
3987 } else {
3988 switch (size) {
3989 case 0: gen_helper_neon_widen_s8(dest, src); break;
3990 case 1: gen_helper_neon_widen_s16(dest, src); break;
3991 case 2: tcg_gen_ext_i32_i64(dest, src); break;
3992 default: abort();
3993 }
3994 }
3995 dead_tmp(src);
3996}
3997
3998static inline void gen_neon_addl(int size)
3999{
4000 switch (size) {
4001 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4002 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4003 case 2: tcg_gen_add_i64(CPU_V001); break;
4004 default: abort();
4005 }
4006}
4007
4008static inline void gen_neon_subl(int size)
4009{
4010 switch (size) {
4011 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4012 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4013 case 2: tcg_gen_sub_i64(CPU_V001); break;
4014 default: abort();
4015 }
4016}
4017
a7812ae4 4018static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4019{
4020 switch (size) {
4021 case 0: gen_helper_neon_negl_u16(var, var); break;
4022 case 1: gen_helper_neon_negl_u32(var, var); break;
4023 case 2: gen_helper_neon_negl_u64(var, var); break;
4024 default: abort();
4025 }
4026}
4027
a7812ae4 4028static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4029{
4030 switch (size) {
4031 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4032 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4033 default: abort();
4034 }
4035}
4036
a7812ae4 4037static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4038{
a7812ae4 4039 TCGv_i64 tmp;
ad69471c
PB
4040
4041 switch ((size << 1) | u) {
4042 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4043 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4044 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4045 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4046 case 4:
4047 tmp = gen_muls_i64_i32(a, b);
4048 tcg_gen_mov_i64(dest, tmp);
4049 break;
4050 case 5:
4051 tmp = gen_mulu_i64_i32(a, b);
4052 tcg_gen_mov_i64(dest, tmp);
4053 break;
4054 default: abort();
4055 }
ad69471c
PB
4056}
4057
9ee6e8bb
PB
4058/* Translate a NEON data processing instruction. Return nonzero if the
4059 instruction is invalid.
ad69471c
PB
4060 We process data in a mixture of 32-bit and 64-bit chunks.
4061 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4062
9ee6e8bb
PB
4063static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4064{
4065 int op;
4066 int q;
4067 int rd, rn, rm;
4068 int size;
4069 int shift;
4070 int pass;
4071 int count;
4072 int pairwise;
4073 int u;
4074 int n;
ca9a32e4 4075 uint32_t imm, mask;
b75263d6 4076 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4077 TCGv_i64 tmp64;
9ee6e8bb
PB
4078
4079 if (!vfp_enabled(env))
4080 return 1;
4081 q = (insn & (1 << 6)) != 0;
4082 u = (insn >> 24) & 1;
4083 VFP_DREG_D(rd, insn);
4084 VFP_DREG_N(rn, insn);
4085 VFP_DREG_M(rm, insn);
4086 size = (insn >> 20) & 3;
4087 if ((insn & (1 << 23)) == 0) {
4088 /* Three register same length. */
4089 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
ad69471c
PB
4090 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4091 || op == 10 || op == 11 || op == 16)) {
4092 /* 64-bit element instructions. */
9ee6e8bb 4093 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4094 neon_load_reg64(cpu_V0, rn + pass);
4095 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb
PB
4096 switch (op) {
4097 case 1: /* VQADD */
4098 if (u) {
ad69471c 4099 gen_helper_neon_add_saturate_u64(CPU_V001);
2c0262af 4100 } else {
ad69471c 4101 gen_helper_neon_add_saturate_s64(CPU_V001);
2c0262af 4102 }
9ee6e8bb
PB
4103 break;
4104 case 5: /* VQSUB */
4105 if (u) {
ad69471c
PB
4106 gen_helper_neon_sub_saturate_u64(CPU_V001);
4107 } else {
4108 gen_helper_neon_sub_saturate_s64(CPU_V001);
4109 }
4110 break;
4111 case 8: /* VSHL */
4112 if (u) {
4113 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4114 } else {
4115 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4116 }
4117 break;
4118 case 9: /* VQSHL */
4119 if (u) {
4120 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4121 cpu_V0, cpu_V0);
4122 } else {
4123 gen_helper_neon_qshl_s64(cpu_V1, cpu_env,
4124 cpu_V1, cpu_V0);
4125 }
4126 break;
4127 case 10: /* VRSHL */
4128 if (u) {
4129 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4130 } else {
ad69471c
PB
4131 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4132 }
4133 break;
4134 case 11: /* VQRSHL */
4135 if (u) {
4136 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4137 cpu_V1, cpu_V0);
4138 } else {
4139 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4140 cpu_V1, cpu_V0);
1e8d4eec 4141 }
9ee6e8bb
PB
4142 break;
4143 case 16:
4144 if (u) {
ad69471c 4145 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4146 } else {
ad69471c 4147 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4148 }
4149 break;
4150 default:
4151 abort();
2c0262af 4152 }
ad69471c 4153 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4154 }
9ee6e8bb 4155 return 0;
2c0262af 4156 }
9ee6e8bb
PB
4157 switch (op) {
4158 case 8: /* VSHL */
4159 case 9: /* VQSHL */
4160 case 10: /* VRSHL */
ad69471c 4161 case 11: /* VQRSHL */
9ee6e8bb 4162 {
ad69471c
PB
4163 int rtmp;
4164 /* Shift instruction operands are reversed. */
4165 rtmp = rn;
9ee6e8bb 4166 rn = rm;
ad69471c 4167 rm = rtmp;
9ee6e8bb
PB
4168 pairwise = 0;
4169 }
2c0262af 4170 break;
9ee6e8bb
PB
4171 case 20: /* VPMAX */
4172 case 21: /* VPMIN */
4173 case 23: /* VPADD */
4174 pairwise = 1;
2c0262af 4175 break;
9ee6e8bb
PB
4176 case 26: /* VPADD (float) */
4177 pairwise = (u && size < 2);
2c0262af 4178 break;
9ee6e8bb
PB
4179 case 30: /* VPMIN/VPMAX (float) */
4180 pairwise = u;
2c0262af 4181 break;
9ee6e8bb
PB
4182 default:
4183 pairwise = 0;
2c0262af 4184 break;
9ee6e8bb 4185 }
dd8fbd78 4186
9ee6e8bb
PB
4187 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4188
4189 if (pairwise) {
4190 /* Pairwise. */
4191 if (q)
4192 n = (pass & 1) * 2;
2c0262af 4193 else
9ee6e8bb
PB
4194 n = 0;
4195 if (pass < q + 1) {
dd8fbd78
FN
4196 tmp = neon_load_reg(rn, n);
4197 tmp2 = neon_load_reg(rn, n + 1);
9ee6e8bb 4198 } else {
dd8fbd78
FN
4199 tmp = neon_load_reg(rm, n);
4200 tmp2 = neon_load_reg(rm, n + 1);
9ee6e8bb
PB
4201 }
4202 } else {
4203 /* Elementwise. */
dd8fbd78
FN
4204 tmp = neon_load_reg(rn, pass);
4205 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4206 }
4207 switch (op) {
4208 case 0: /* VHADD */
4209 GEN_NEON_INTEGER_OP(hadd);
4210 break;
4211 case 1: /* VQADD */
ad69471c 4212 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4213 break;
9ee6e8bb
PB
4214 case 2: /* VRHADD */
4215 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4216 break;
9ee6e8bb
PB
4217 case 3: /* Logic ops. */
4218 switch ((u << 2) | size) {
4219 case 0: /* VAND */
dd8fbd78 4220 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4221 break;
4222 case 1: /* BIC */
f669df27 4223 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4224 break;
4225 case 2: /* VORR */
dd8fbd78 4226 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4227 break;
4228 case 3: /* VORN */
f669df27 4229 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4230 break;
4231 case 4: /* VEOR */
dd8fbd78 4232 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4233 break;
4234 case 5: /* VBSL */
dd8fbd78
FN
4235 tmp3 = neon_load_reg(rd, pass);
4236 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4237 dead_tmp(tmp3);
9ee6e8bb
PB
4238 break;
4239 case 6: /* VBIT */
dd8fbd78
FN
4240 tmp3 = neon_load_reg(rd, pass);
4241 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4242 dead_tmp(tmp3);
9ee6e8bb
PB
4243 break;
4244 case 7: /* VBIF */
dd8fbd78
FN
4245 tmp3 = neon_load_reg(rd, pass);
4246 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4247 dead_tmp(tmp3);
9ee6e8bb 4248 break;
2c0262af
FB
4249 }
4250 break;
9ee6e8bb
PB
4251 case 4: /* VHSUB */
4252 GEN_NEON_INTEGER_OP(hsub);
4253 break;
4254 case 5: /* VQSUB */
ad69471c 4255 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4256 break;
9ee6e8bb
PB
4257 case 6: /* VCGT */
4258 GEN_NEON_INTEGER_OP(cgt);
4259 break;
4260 case 7: /* VCGE */
4261 GEN_NEON_INTEGER_OP(cge);
4262 break;
4263 case 8: /* VSHL */
ad69471c 4264 GEN_NEON_INTEGER_OP(shl);
2c0262af 4265 break;
9ee6e8bb 4266 case 9: /* VQSHL */
ad69471c 4267 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4268 break;
9ee6e8bb 4269 case 10: /* VRSHL */
ad69471c 4270 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4271 break;
9ee6e8bb 4272 case 11: /* VQRSHL */
ad69471c 4273 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb
PB
4274 break;
4275 case 12: /* VMAX */
4276 GEN_NEON_INTEGER_OP(max);
4277 break;
4278 case 13: /* VMIN */
4279 GEN_NEON_INTEGER_OP(min);
4280 break;
4281 case 14: /* VABD */
4282 GEN_NEON_INTEGER_OP(abd);
4283 break;
4284 case 15: /* VABA */
4285 GEN_NEON_INTEGER_OP(abd);
dd8fbd78
FN
4286 dead_tmp(tmp2);
4287 tmp2 = neon_load_reg(rd, pass);
4288 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4289 break;
4290 case 16:
4291 if (!u) { /* VADD */
dd8fbd78 4292 if (gen_neon_add(size, tmp, tmp2))
9ee6e8bb
PB
4293 return 1;
4294 } else { /* VSUB */
4295 switch (size) {
dd8fbd78
FN
4296 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4297 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4298 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4299 default: return 1;
4300 }
4301 }
4302 break;
4303 case 17:
4304 if (!u) { /* VTST */
4305 switch (size) {
dd8fbd78
FN
4306 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4307 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4308 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4309 default: return 1;
4310 }
4311 } else { /* VCEQ */
4312 switch (size) {
dd8fbd78
FN
4313 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4314 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4315 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4316 default: return 1;
4317 }
4318 }
4319 break;
4320 case 18: /* Multiply. */
4321 switch (size) {
dd8fbd78
FN
4322 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4323 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4324 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4325 default: return 1;
4326 }
dd8fbd78
FN
4327 dead_tmp(tmp2);
4328 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4329 if (u) { /* VMLS */
dd8fbd78 4330 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4331 } else { /* VMLA */
dd8fbd78 4332 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4333 }
4334 break;
4335 case 19: /* VMUL */
4336 if (u) { /* polynomial */
dd8fbd78 4337 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4338 } else { /* Integer */
4339 switch (size) {
dd8fbd78
FN
4340 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4341 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4342 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4343 default: return 1;
4344 }
4345 }
4346 break;
4347 case 20: /* VPMAX */
4348 GEN_NEON_INTEGER_OP(pmax);
4349 break;
4350 case 21: /* VPMIN */
4351 GEN_NEON_INTEGER_OP(pmin);
4352 break;
4353 case 22: /* Hultiply high. */
4354 if (!u) { /* VQDMULH */
4355 switch (size) {
dd8fbd78
FN
4356 case 1: gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4357 case 2: gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
9ee6e8bb
PB
4358 default: return 1;
4359 }
4360 } else { /* VQRDHMUL */
4361 switch (size) {
dd8fbd78
FN
4362 case 1: gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4363 case 2: gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
9ee6e8bb
PB
4364 default: return 1;
4365 }
4366 }
4367 break;
4368 case 23: /* VPADD */
4369 if (u)
4370 return 1;
4371 switch (size) {
dd8fbd78
FN
4372 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4373 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4374 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4375 default: return 1;
4376 }
4377 break;
4378 case 26: /* Floating point arithnetic. */
4379 switch ((u << 2) | size) {
4380 case 0: /* VADD */
dd8fbd78 4381 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4382 break;
4383 case 2: /* VSUB */
dd8fbd78 4384 gen_helper_neon_sub_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4385 break;
4386 case 4: /* VPADD */
dd8fbd78 4387 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4388 break;
4389 case 6: /* VABD */
dd8fbd78 4390 gen_helper_neon_abd_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4391 break;
4392 default:
4393 return 1;
4394 }
4395 break;
4396 case 27: /* Float multiply. */
dd8fbd78 4397 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb 4398 if (!u) {
dd8fbd78
FN
4399 dead_tmp(tmp2);
4400 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4401 if (size == 0) {
dd8fbd78 4402 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb 4403 } else {
dd8fbd78 4404 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
4405 }
4406 }
4407 break;
4408 case 28: /* Float compare. */
4409 if (!u) {
dd8fbd78 4410 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
b5ff1b31 4411 } else {
9ee6e8bb 4412 if (size == 0)
dd8fbd78 4413 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
9ee6e8bb 4414 else
dd8fbd78 4415 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
b5ff1b31 4416 }
2c0262af 4417 break;
9ee6e8bb
PB
4418 case 29: /* Float compare absolute. */
4419 if (!u)
4420 return 1;
4421 if (size == 0)
dd8fbd78 4422 gen_helper_neon_acge_f32(tmp, tmp, tmp2);
9ee6e8bb 4423 else
dd8fbd78 4424 gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
2c0262af 4425 break;
9ee6e8bb
PB
4426 case 30: /* Float min/max. */
4427 if (size == 0)
dd8fbd78 4428 gen_helper_neon_max_f32(tmp, tmp, tmp2);
9ee6e8bb 4429 else
dd8fbd78 4430 gen_helper_neon_min_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4431 break;
4432 case 31:
4433 if (size == 0)
dd8fbd78 4434 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 4435 else
dd8fbd78 4436 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 4437 break;
9ee6e8bb
PB
4438 default:
4439 abort();
2c0262af 4440 }
dd8fbd78
FN
4441 dead_tmp(tmp2);
4442
9ee6e8bb
PB
4443 /* Save the result. For elementwise operations we can put it
4444 straight into the destination register. For pairwise operations
4445 we have to be careful to avoid clobbering the source operands. */
4446 if (pairwise && rd == rm) {
dd8fbd78 4447 neon_store_scratch(pass, tmp);
9ee6e8bb 4448 } else {
dd8fbd78 4449 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4450 }
4451
4452 } /* for pass */
4453 if (pairwise && rd == rm) {
4454 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
4455 tmp = neon_load_scratch(pass);
4456 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4457 }
4458 }
ad69471c 4459 /* End of 3 register same size operations. */
9ee6e8bb
PB
4460 } else if (insn & (1 << 4)) {
4461 if ((insn & 0x00380080) != 0) {
4462 /* Two registers and shift. */
4463 op = (insn >> 8) & 0xf;
4464 if (insn & (1 << 7)) {
4465 /* 64-bit shift. */
4466 size = 3;
4467 } else {
4468 size = 2;
4469 while ((insn & (1 << (size + 19))) == 0)
4470 size--;
4471 }
4472 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4473 /* To avoid excessive dumplication of ops we implement shift
4474 by immediate using the variable shift operations. */
4475 if (op < 8) {
4476 /* Shift by immediate:
4477 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4478 /* Right shifts are encoded as N - shift, where N is the
4479 element size in bits. */
4480 if (op <= 4)
4481 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4482 if (size == 3) {
4483 count = q + 1;
4484 } else {
4485 count = q ? 4: 2;
4486 }
4487 switch (size) {
4488 case 0:
4489 imm = (uint8_t) shift;
4490 imm |= imm << 8;
4491 imm |= imm << 16;
4492 break;
4493 case 1:
4494 imm = (uint16_t) shift;
4495 imm |= imm << 16;
4496 break;
4497 case 2:
4498 case 3:
4499 imm = shift;
4500 break;
4501 default:
4502 abort();
4503 }
4504
4505 for (pass = 0; pass < count; pass++) {
ad69471c
PB
4506 if (size == 3) {
4507 neon_load_reg64(cpu_V0, rm + pass);
4508 tcg_gen_movi_i64(cpu_V1, imm);
4509 switch (op) {
4510 case 0: /* VSHR */
4511 case 1: /* VSRA */
4512 if (u)
4513 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4514 else
ad69471c 4515 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4516 break;
ad69471c
PB
4517 case 2: /* VRSHR */
4518 case 3: /* VRSRA */
4519 if (u)
4520 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4521 else
ad69471c 4522 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4523 break;
ad69471c
PB
4524 case 4: /* VSRI */
4525 if (!u)
4526 return 1;
4527 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4528 break;
4529 case 5: /* VSHL, VSLI */
4530 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4531 break;
4532 case 6: /* VQSHL */
4533 if (u)
4534 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4535 else
ad69471c
PB
4536 gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4537 break;
4538 case 7: /* VQSHLU */
4539 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4540 break;
9ee6e8bb 4541 }
ad69471c
PB
4542 if (op == 1 || op == 3) {
4543 /* Accumulate. */
4544 neon_load_reg64(cpu_V0, rd + pass);
4545 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4546 } else if (op == 4 || (op == 5 && u)) {
4547 /* Insert */
4548 cpu_abort(env, "VS[LR]I.64 not implemented");
4549 }
4550 neon_store_reg64(cpu_V0, rd + pass);
4551 } else { /* size < 3 */
4552 /* Operands in T0 and T1. */
dd8fbd78
FN
4553 tmp = neon_load_reg(rm, pass);
4554 tmp2 = new_tmp();
4555 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
4556 switch (op) {
4557 case 0: /* VSHR */
4558 case 1: /* VSRA */
4559 GEN_NEON_INTEGER_OP(shl);
4560 break;
4561 case 2: /* VRSHR */
4562 case 3: /* VRSRA */
4563 GEN_NEON_INTEGER_OP(rshl);
4564 break;
4565 case 4: /* VSRI */
4566 if (!u)
4567 return 1;
4568 GEN_NEON_INTEGER_OP(shl);
4569 break;
4570 case 5: /* VSHL, VSLI */
4571 switch (size) {
dd8fbd78
FN
4572 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
4573 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
4574 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
ad69471c
PB
4575 default: return 1;
4576 }
4577 break;
4578 case 6: /* VQSHL */
4579 GEN_NEON_INTEGER_OP_ENV(qshl);
4580 break;
4581 case 7: /* VQSHLU */
4582 switch (size) {
dd8fbd78
FN
4583 case 0: gen_helper_neon_qshl_u8(tmp, cpu_env, tmp, tmp2); break;
4584 case 1: gen_helper_neon_qshl_u16(tmp, cpu_env, tmp, tmp2); break;
4585 case 2: gen_helper_neon_qshl_u32(tmp, cpu_env, tmp, tmp2); break;
ad69471c
PB
4586 default: return 1;
4587 }
4588 break;
4589 }
dd8fbd78 4590 dead_tmp(tmp2);
ad69471c
PB
4591
4592 if (op == 1 || op == 3) {
4593 /* Accumulate. */
dd8fbd78
FN
4594 tmp2 = neon_load_reg(rd, pass);
4595 gen_neon_add(size, tmp2, tmp);
4596 dead_tmp(tmp2);
ad69471c
PB
4597 } else if (op == 4 || (op == 5 && u)) {
4598 /* Insert */
4599 switch (size) {
4600 case 0:
4601 if (op == 4)
ca9a32e4 4602 mask = 0xff >> -shift;
ad69471c 4603 else
ca9a32e4
JR
4604 mask = (uint8_t)(0xff << shift);
4605 mask |= mask << 8;
4606 mask |= mask << 16;
ad69471c
PB
4607 break;
4608 case 1:
4609 if (op == 4)
ca9a32e4 4610 mask = 0xffff >> -shift;
ad69471c 4611 else
ca9a32e4
JR
4612 mask = (uint16_t)(0xffff << shift);
4613 mask |= mask << 16;
ad69471c
PB
4614 break;
4615 case 2:
ca9a32e4
JR
4616 if (shift < -31 || shift > 31) {
4617 mask = 0;
4618 } else {
4619 if (op == 4)
4620 mask = 0xffffffffu >> -shift;
4621 else
4622 mask = 0xffffffffu << shift;
4623 }
ad69471c
PB
4624 break;
4625 default:
4626 abort();
4627 }
dd8fbd78 4628 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
4629 tcg_gen_andi_i32(tmp, tmp, mask);
4630 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78
FN
4631 tcg_gen_or_i32(tmp, tmp, tmp2);
4632 dead_tmp(tmp2);
ad69471c 4633 }
dd8fbd78 4634 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4635 }
4636 } /* for pass */
4637 } else if (op < 10) {
ad69471c 4638 /* Shift by immediate and narrow:
9ee6e8bb
PB
4639 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4640 shift = shift - (1 << (size + 3));
4641 size++;
9ee6e8bb
PB
4642 switch (size) {
4643 case 1:
ad69471c 4644 imm = (uint16_t)shift;
9ee6e8bb 4645 imm |= imm << 16;
ad69471c 4646 tmp2 = tcg_const_i32(imm);
a7812ae4 4647 TCGV_UNUSED_I64(tmp64);
9ee6e8bb
PB
4648 break;
4649 case 2:
ad69471c
PB
4650 imm = (uint32_t)shift;
4651 tmp2 = tcg_const_i32(imm);
a7812ae4 4652 TCGV_UNUSED_I64(tmp64);
4cc633c3 4653 break;
9ee6e8bb 4654 case 3:
a7812ae4
PB
4655 tmp64 = tcg_const_i64(shift);
4656 TCGV_UNUSED(tmp2);
9ee6e8bb
PB
4657 break;
4658 default:
4659 abort();
4660 }
4661
ad69471c
PB
4662 for (pass = 0; pass < 2; pass++) {
4663 if (size == 3) {
4664 neon_load_reg64(cpu_V0, rm + pass);
4665 if (q) {
4666 if (u)
a7812ae4 4667 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4668 else
a7812ae4 4669 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c
PB
4670 } else {
4671 if (u)
a7812ae4 4672 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4673 else
a7812ae4 4674 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c 4675 }
2c0262af 4676 } else {
ad69471c
PB
4677 tmp = neon_load_reg(rm + pass, 0);
4678 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
36aa55dc
PB
4679 tmp3 = neon_load_reg(rm + pass, 1);
4680 gen_neon_shift_narrow(size, tmp3, tmp2, q, u);
4681 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
ad69471c 4682 dead_tmp(tmp);
36aa55dc 4683 dead_tmp(tmp3);
9ee6e8bb 4684 }
ad69471c
PB
4685 tmp = new_tmp();
4686 if (op == 8 && !u) {
4687 gen_neon_narrow(size - 1, tmp, cpu_V0);
9ee6e8bb 4688 } else {
ad69471c
PB
4689 if (op == 8)
4690 gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
9ee6e8bb 4691 else
ad69471c
PB
4692 gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
4693 }
2301db49 4694 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4695 } /* for pass */
b75263d6
JR
4696 if (size == 3) {
4697 tcg_temp_free_i64(tmp64);
2301db49
JR
4698 } else {
4699 dead_tmp(tmp2);
b75263d6 4700 }
9ee6e8bb
PB
4701 } else if (op == 10) {
4702 /* VSHLL */
ad69471c 4703 if (q || size == 3)
9ee6e8bb 4704 return 1;
ad69471c
PB
4705 tmp = neon_load_reg(rm, 0);
4706 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 4707 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4708 if (pass == 1)
4709 tmp = tmp2;
4710
4711 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 4712
9ee6e8bb
PB
4713 if (shift != 0) {
4714 /* The shift is less than the width of the source
ad69471c
PB
4715 type, so we can just shift the whole register. */
4716 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4717 if (size < 2 || !u) {
4718 uint64_t imm64;
4719 if (size == 0) {
4720 imm = (0xffu >> (8 - shift));
4721 imm |= imm << 16;
4722 } else {
4723 imm = 0xffff >> (16 - shift);
9ee6e8bb 4724 }
ad69471c
PB
4725 imm64 = imm | (((uint64_t)imm) << 32);
4726 tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
9ee6e8bb
PB
4727 }
4728 }
ad69471c 4729 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4730 }
4731 } else if (op == 15 || op == 16) {
4732 /* VCVT fixed-point. */
4733 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 4734 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
9ee6e8bb
PB
4735 if (op & 1) {
4736 if (u)
4373f3ce 4737 gen_vfp_ulto(0, shift);
9ee6e8bb 4738 else
4373f3ce 4739 gen_vfp_slto(0, shift);
9ee6e8bb
PB
4740 } else {
4741 if (u)
4373f3ce 4742 gen_vfp_toul(0, shift);
9ee6e8bb 4743 else
4373f3ce 4744 gen_vfp_tosl(0, shift);
2c0262af 4745 }
4373f3ce 4746 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
4747 }
4748 } else {
9ee6e8bb
PB
4749 return 1;
4750 }
4751 } else { /* (insn & 0x00380080) == 0 */
4752 int invert;
4753
4754 op = (insn >> 8) & 0xf;
4755 /* One register and immediate. */
4756 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4757 invert = (insn & (1 << 5)) != 0;
4758 switch (op) {
4759 case 0: case 1:
4760 /* no-op */
4761 break;
4762 case 2: case 3:
4763 imm <<= 8;
4764 break;
4765 case 4: case 5:
4766 imm <<= 16;
4767 break;
4768 case 6: case 7:
4769 imm <<= 24;
4770 break;
4771 case 8: case 9:
4772 imm |= imm << 16;
4773 break;
4774 case 10: case 11:
4775 imm = (imm << 8) | (imm << 24);
4776 break;
4777 case 12:
4778 imm = (imm < 8) | 0xff;
4779 break;
4780 case 13:
4781 imm = (imm << 16) | 0xffff;
4782 break;
4783 case 14:
4784 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4785 if (invert)
4786 imm = ~imm;
4787 break;
4788 case 15:
4789 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4790 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4791 break;
4792 }
4793 if (invert)
4794 imm = ~imm;
4795
9ee6e8bb
PB
4796 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4797 if (op & 1 && op < 12) {
ad69471c 4798 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
4799 if (invert) {
4800 /* The immediate value has already been inverted, so
4801 BIC becomes AND. */
ad69471c 4802 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 4803 } else {
ad69471c 4804 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 4805 }
9ee6e8bb 4806 } else {
ad69471c
PB
4807 /* VMOV, VMVN. */
4808 tmp = new_tmp();
9ee6e8bb 4809 if (op == 14 && invert) {
ad69471c
PB
4810 uint32_t val;
4811 val = 0;
9ee6e8bb
PB
4812 for (n = 0; n < 4; n++) {
4813 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 4814 val |= 0xff << (n * 8);
9ee6e8bb 4815 }
ad69471c
PB
4816 tcg_gen_movi_i32(tmp, val);
4817 } else {
4818 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 4819 }
9ee6e8bb 4820 }
ad69471c 4821 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4822 }
4823 }
e4b3861d 4824 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
4825 if (size != 3) {
4826 op = (insn >> 8) & 0xf;
4827 if ((insn & (1 << 6)) == 0) {
4828 /* Three registers of different lengths. */
4829 int src1_wide;
4830 int src2_wide;
4831 int prewiden;
4832 /* prewiden, src1_wide, src2_wide */
4833 static const int neon_3reg_wide[16][3] = {
4834 {1, 0, 0}, /* VADDL */
4835 {1, 1, 0}, /* VADDW */
4836 {1, 0, 0}, /* VSUBL */
4837 {1, 1, 0}, /* VSUBW */
4838 {0, 1, 1}, /* VADDHN */
4839 {0, 0, 0}, /* VABAL */
4840 {0, 1, 1}, /* VSUBHN */
4841 {0, 0, 0}, /* VABDL */
4842 {0, 0, 0}, /* VMLAL */
4843 {0, 0, 0}, /* VQDMLAL */
4844 {0, 0, 0}, /* VMLSL */
4845 {0, 0, 0}, /* VQDMLSL */
4846 {0, 0, 0}, /* Integer VMULL */
4847 {0, 0, 0}, /* VQDMULL */
4848 {0, 0, 0} /* Polynomial VMULL */
4849 };
4850
4851 prewiden = neon_3reg_wide[op][0];
4852 src1_wide = neon_3reg_wide[op][1];
4853 src2_wide = neon_3reg_wide[op][2];
4854
ad69471c
PB
4855 if (size == 0 && (op == 9 || op == 11 || op == 13))
4856 return 1;
4857
9ee6e8bb
PB
4858 /* Avoid overlapping operands. Wide source operands are
4859 always aligned so will never overlap with wide
4860 destinations in problematic ways. */
8f8e3aa4 4861 if (rd == rm && !src2_wide) {
dd8fbd78
FN
4862 tmp = neon_load_reg(rm, 1);
4863 neon_store_scratch(2, tmp);
8f8e3aa4 4864 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
4865 tmp = neon_load_reg(rn, 1);
4866 neon_store_scratch(2, tmp);
9ee6e8bb 4867 }
a50f5b91 4868 TCGV_UNUSED(tmp3);
9ee6e8bb 4869 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4870 if (src1_wide) {
4871 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 4872 TCGV_UNUSED(tmp);
9ee6e8bb 4873 } else {
ad69471c 4874 if (pass == 1 && rd == rn) {
dd8fbd78 4875 tmp = neon_load_scratch(2);
9ee6e8bb 4876 } else {
ad69471c
PB
4877 tmp = neon_load_reg(rn, pass);
4878 }
4879 if (prewiden) {
4880 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
4881 }
4882 }
ad69471c
PB
4883 if (src2_wide) {
4884 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 4885 TCGV_UNUSED(tmp2);
9ee6e8bb 4886 } else {
ad69471c 4887 if (pass == 1 && rd == rm) {
dd8fbd78 4888 tmp2 = neon_load_scratch(2);
9ee6e8bb 4889 } else {
ad69471c
PB
4890 tmp2 = neon_load_reg(rm, pass);
4891 }
4892 if (prewiden) {
4893 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 4894 }
9ee6e8bb
PB
4895 }
4896 switch (op) {
4897 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 4898 gen_neon_addl(size);
9ee6e8bb
PB
4899 break;
4900 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
ad69471c 4901 gen_neon_subl(size);
9ee6e8bb
PB
4902 break;
4903 case 5: case 7: /* VABAL, VABDL */
4904 switch ((size << 1) | u) {
ad69471c
PB
4905 case 0:
4906 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
4907 break;
4908 case 1:
4909 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
4910 break;
4911 case 2:
4912 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
4913 break;
4914 case 3:
4915 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
4916 break;
4917 case 4:
4918 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
4919 break;
4920 case 5:
4921 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
4922 break;
9ee6e8bb
PB
4923 default: abort();
4924 }
ad69471c
PB
4925 dead_tmp(tmp2);
4926 dead_tmp(tmp);
9ee6e8bb
PB
4927 break;
4928 case 8: case 9: case 10: case 11: case 12: case 13:
4929 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 4930 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
dd8fbd78
FN
4931 dead_tmp(tmp2);
4932 dead_tmp(tmp);
9ee6e8bb
PB
4933 break;
4934 case 14: /* Polynomial VMULL */
4935 cpu_abort(env, "Polynomial VMULL not implemented");
4936
4937 default: /* 15 is RESERVED. */
4938 return 1;
4939 }
4940 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
4941 /* Accumulate. */
4942 if (op == 10 || op == 11) {
ad69471c 4943 gen_neon_negl(cpu_V0, size);
9ee6e8bb
PB
4944 }
4945
9ee6e8bb 4946 if (op != 13) {
ad69471c 4947 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb
PB
4948 }
4949
4950 switch (op) {
4951 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
ad69471c 4952 gen_neon_addl(size);
9ee6e8bb
PB
4953 break;
4954 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c
PB
4955 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4956 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
4957 break;
9ee6e8bb
PB
4958 /* Fall through. */
4959 case 13: /* VQDMULL */
ad69471c 4960 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
4961 break;
4962 default:
4963 abort();
4964 }
ad69471c 4965 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4966 } else if (op == 4 || op == 6) {
4967 /* Narrowing operation. */
ad69471c 4968 tmp = new_tmp();
9ee6e8bb
PB
4969 if (u) {
4970 switch (size) {
ad69471c
PB
4971 case 0:
4972 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
4973 break;
4974 case 1:
4975 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
4976 break;
4977 case 2:
4978 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4979 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
4980 break;
9ee6e8bb
PB
4981 default: abort();
4982 }
4983 } else {
4984 switch (size) {
ad69471c
PB
4985 case 0:
4986 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
4987 break;
4988 case 1:
4989 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
4990 break;
4991 case 2:
4992 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
4993 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4994 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
4995 break;
9ee6e8bb
PB
4996 default: abort();
4997 }
4998 }
ad69471c
PB
4999 if (pass == 0) {
5000 tmp3 = tmp;
5001 } else {
5002 neon_store_reg(rd, 0, tmp3);
5003 neon_store_reg(rd, 1, tmp);
5004 }
9ee6e8bb
PB
5005 } else {
5006 /* Write back the result. */
ad69471c 5007 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5008 }
5009 }
5010 } else {
5011 /* Two registers and a scalar. */
5012 switch (op) {
5013 case 0: /* Integer VMLA scalar */
5014 case 1: /* Float VMLA scalar */
5015 case 4: /* Integer VMLS scalar */
5016 case 5: /* Floating point VMLS scalar */
5017 case 8: /* Integer VMUL scalar */
5018 case 9: /* Floating point VMUL scalar */
5019 case 12: /* VQDMULH scalar */
5020 case 13: /* VQRDMULH scalar */
dd8fbd78
FN
5021 tmp = neon_get_scalar(size, rm);
5022 neon_store_scratch(0, tmp);
9ee6e8bb 5023 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5024 tmp = neon_load_scratch(0);
5025 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5026 if (op == 12) {
5027 if (size == 1) {
dd8fbd78 5028 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5029 } else {
dd8fbd78 5030 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5031 }
5032 } else if (op == 13) {
5033 if (size == 1) {
dd8fbd78 5034 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5035 } else {
dd8fbd78 5036 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5037 }
5038 } else if (op & 1) {
dd8fbd78 5039 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5040 } else {
5041 switch (size) {
dd8fbd78
FN
5042 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5043 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5044 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5045 default: return 1;
5046 }
5047 }
dd8fbd78 5048 dead_tmp(tmp2);
9ee6e8bb
PB
5049 if (op < 8) {
5050 /* Accumulate. */
dd8fbd78 5051 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5052 switch (op) {
5053 case 0:
dd8fbd78 5054 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5055 break;
5056 case 1:
dd8fbd78 5057 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5058 break;
5059 case 4:
dd8fbd78 5060 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5061 break;
5062 case 5:
dd8fbd78 5063 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
5064 break;
5065 default:
5066 abort();
5067 }
dd8fbd78 5068 dead_tmp(tmp2);
9ee6e8bb 5069 }
dd8fbd78 5070 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5071 }
5072 break;
5073 case 2: /* VMLAL sclar */
5074 case 3: /* VQDMLAL scalar */
5075 case 6: /* VMLSL scalar */
5076 case 7: /* VQDMLSL scalar */
5077 case 10: /* VMULL scalar */
5078 case 11: /* VQDMULL scalar */
ad69471c
PB
5079 if (size == 0 && (op == 3 || op == 7 || op == 11))
5080 return 1;
5081
dd8fbd78
FN
5082 tmp2 = neon_get_scalar(size, rm);
5083 tmp3 = neon_load_reg(rn, 1);
ad69471c 5084
9ee6e8bb 5085 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5086 if (pass == 0) {
5087 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5088 } else {
dd8fbd78 5089 tmp = tmp3;
9ee6e8bb 5090 }
ad69471c 5091 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
dd8fbd78 5092 dead_tmp(tmp);
9ee6e8bb 5093 if (op == 6 || op == 7) {
ad69471c
PB
5094 gen_neon_negl(cpu_V0, size);
5095 }
5096 if (op != 11) {
5097 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5098 }
9ee6e8bb
PB
5099 switch (op) {
5100 case 2: case 6:
ad69471c 5101 gen_neon_addl(size);
9ee6e8bb
PB
5102 break;
5103 case 3: case 7:
ad69471c
PB
5104 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5105 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5106 break;
5107 case 10:
5108 /* no-op */
5109 break;
5110 case 11:
ad69471c 5111 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5112 break;
5113 default:
5114 abort();
5115 }
ad69471c 5116 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5117 }
dd8fbd78
FN
5118
5119 dead_tmp(tmp2);
5120
9ee6e8bb
PB
5121 break;
5122 default: /* 14 and 15 are RESERVED */
5123 return 1;
5124 }
5125 }
5126 } else { /* size == 3 */
5127 if (!u) {
5128 /* Extract. */
9ee6e8bb 5129 imm = (insn >> 8) & 0xf;
ad69471c
PB
5130 count = q + 1;
5131
5132 if (imm > 7 && !q)
5133 return 1;
5134
5135 if (imm == 0) {
5136 neon_load_reg64(cpu_V0, rn);
5137 if (q) {
5138 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5139 }
ad69471c
PB
5140 } else if (imm == 8) {
5141 neon_load_reg64(cpu_V0, rn + 1);
5142 if (q) {
5143 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5144 }
ad69471c 5145 } else if (q) {
a7812ae4 5146 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5147 if (imm < 8) {
5148 neon_load_reg64(cpu_V0, rn);
a7812ae4 5149 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5150 } else {
5151 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5152 neon_load_reg64(tmp64, rm);
ad69471c
PB
5153 }
5154 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5155 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5156 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5157 if (imm < 8) {
5158 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5159 } else {
ad69471c
PB
5160 neon_load_reg64(cpu_V1, rm + 1);
5161 imm -= 8;
9ee6e8bb 5162 }
ad69471c 5163 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5164 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5165 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 5166 tcg_temp_free_i64(tmp64);
ad69471c 5167 } else {
a7812ae4 5168 /* BUGFIX */
ad69471c 5169 neon_load_reg64(cpu_V0, rn);
a7812ae4 5170 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5171 neon_load_reg64(cpu_V1, rm);
a7812ae4 5172 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5173 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5174 }
5175 neon_store_reg64(cpu_V0, rd);
5176 if (q) {
5177 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5178 }
5179 } else if ((insn & (1 << 11)) == 0) {
5180 /* Two register misc. */
5181 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5182 size = (insn >> 18) & 3;
5183 switch (op) {
5184 case 0: /* VREV64 */
5185 if (size == 3)
5186 return 1;
5187 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5188 tmp = neon_load_reg(rm, pass * 2);
5189 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5190 switch (size) {
dd8fbd78
FN
5191 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5192 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5193 case 2: /* no-op */ break;
5194 default: abort();
5195 }
dd8fbd78 5196 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5197 if (size == 2) {
dd8fbd78 5198 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5199 } else {
9ee6e8bb 5200 switch (size) {
dd8fbd78
FN
5201 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5202 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5203 default: abort();
5204 }
dd8fbd78 5205 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5206 }
5207 }
5208 break;
5209 case 4: case 5: /* VPADDL */
5210 case 12: case 13: /* VPADAL */
9ee6e8bb
PB
5211 if (size == 3)
5212 return 1;
ad69471c
PB
5213 for (pass = 0; pass < q + 1; pass++) {
5214 tmp = neon_load_reg(rm, pass * 2);
5215 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5216 tmp = neon_load_reg(rm, pass * 2 + 1);
5217 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5218 switch (size) {
5219 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5220 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5221 case 2: tcg_gen_add_i64(CPU_V001); break;
5222 default: abort();
5223 }
9ee6e8bb
PB
5224 if (op >= 12) {
5225 /* Accumulate. */
ad69471c
PB
5226 neon_load_reg64(cpu_V1, rd + pass);
5227 gen_neon_addl(size);
9ee6e8bb 5228 }
ad69471c 5229 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5230 }
5231 break;
5232 case 33: /* VTRN */
5233 if (size == 2) {
5234 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
5235 tmp = neon_load_reg(rm, n);
5236 tmp2 = neon_load_reg(rd, n + 1);
5237 neon_store_reg(rm, n, tmp2);
5238 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
5239 }
5240 } else {
5241 goto elementwise;
5242 }
5243 break;
5244 case 34: /* VUZP */
5245 /* Reg Before After
5246 Rd A3 A2 A1 A0 B2 B0 A2 A0
5247 Rm B3 B2 B1 B0 B3 B1 A3 A1
5248 */
5249 if (size == 3)
5250 return 1;
5251 gen_neon_unzip(rd, q, 0, size);
5252 gen_neon_unzip(rm, q, 4, size);
5253 if (q) {
5254 static int unzip_order_q[8] =
5255 {0, 2, 4, 6, 1, 3, 5, 7};
5256 for (n = 0; n < 8; n++) {
5257 int reg = (n < 4) ? rd : rm;
dd8fbd78
FN
5258 tmp = neon_load_scratch(unzip_order_q[n]);
5259 neon_store_reg(reg, n % 4, tmp);
9ee6e8bb
PB
5260 }
5261 } else {
5262 static int unzip_order[4] =
5263 {0, 4, 1, 5};
5264 for (n = 0; n < 4; n++) {
5265 int reg = (n < 2) ? rd : rm;
dd8fbd78
FN
5266 tmp = neon_load_scratch(unzip_order[n]);
5267 neon_store_reg(reg, n % 2, tmp);
9ee6e8bb
PB
5268 }
5269 }
5270 break;
5271 case 35: /* VZIP */
5272 /* Reg Before After
5273 Rd A3 A2 A1 A0 B1 A1 B0 A0
5274 Rm B3 B2 B1 B0 B3 A3 B2 A2
5275 */
5276 if (size == 3)
5277 return 1;
5278 count = (q ? 4 : 2);
5279 for (n = 0; n < count; n++) {
dd8fbd78
FN
5280 tmp = neon_load_reg(rd, n);
5281 tmp2 = neon_load_reg(rd, n);
9ee6e8bb 5282 switch (size) {
dd8fbd78
FN
5283 case 0: gen_neon_zip_u8(tmp, tmp2); break;
5284 case 1: gen_neon_zip_u16(tmp, tmp2); break;
9ee6e8bb
PB
5285 case 2: /* no-op */; break;
5286 default: abort();
5287 }
dd8fbd78
FN
5288 neon_store_scratch(n * 2, tmp);
5289 neon_store_scratch(n * 2 + 1, tmp2);
9ee6e8bb
PB
5290 }
5291 for (n = 0; n < count * 2; n++) {
5292 int reg = (n < count) ? rd : rm;
dd8fbd78
FN
5293 tmp = neon_load_scratch(n);
5294 neon_store_reg(reg, n % count, tmp);
9ee6e8bb
PB
5295 }
5296 break;
5297 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
ad69471c
PB
5298 if (size == 3)
5299 return 1;
a50f5b91 5300 TCGV_UNUSED(tmp2);
9ee6e8bb 5301 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5302 neon_load_reg64(cpu_V0, rm + pass);
5303 tmp = new_tmp();
9ee6e8bb 5304 if (op == 36 && q == 0) {
ad69471c 5305 gen_neon_narrow(size, tmp, cpu_V0);
9ee6e8bb 5306 } else if (q) {
ad69471c 5307 gen_neon_narrow_satu(size, tmp, cpu_V0);
9ee6e8bb 5308 } else {
ad69471c
PB
5309 gen_neon_narrow_sats(size, tmp, cpu_V0);
5310 }
5311 if (pass == 0) {
5312 tmp2 = tmp;
5313 } else {
5314 neon_store_reg(rd, 0, tmp2);
5315 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5316 }
9ee6e8bb
PB
5317 }
5318 break;
5319 case 38: /* VSHLL */
ad69471c 5320 if (q || size == 3)
9ee6e8bb 5321 return 1;
ad69471c
PB
5322 tmp = neon_load_reg(rm, 0);
5323 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5324 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5325 if (pass == 1)
5326 tmp = tmp2;
5327 gen_neon_widen(cpu_V0, tmp, size, 1);
5328 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5329 }
5330 break;
5331 default:
5332 elementwise:
5333 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5334 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5335 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5336 neon_reg_offset(rm, pass));
dd8fbd78 5337 TCGV_UNUSED(tmp);
9ee6e8bb 5338 } else {
dd8fbd78 5339 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
5340 }
5341 switch (op) {
5342 case 1: /* VREV32 */
5343 switch (size) {
dd8fbd78
FN
5344 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5345 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5346 default: return 1;
5347 }
5348 break;
5349 case 2: /* VREV16 */
5350 if (size != 0)
5351 return 1;
dd8fbd78 5352 gen_rev16(tmp);
9ee6e8bb 5353 break;
9ee6e8bb
PB
5354 case 8: /* CLS */
5355 switch (size) {
dd8fbd78
FN
5356 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5357 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5358 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
9ee6e8bb
PB
5359 default: return 1;
5360 }
5361 break;
5362 case 9: /* CLZ */
5363 switch (size) {
dd8fbd78
FN
5364 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5365 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5366 case 2: gen_helper_clz(tmp, tmp); break;
9ee6e8bb
PB
5367 default: return 1;
5368 }
5369 break;
5370 case 10: /* CNT */
5371 if (size != 0)
5372 return 1;
dd8fbd78 5373 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb
PB
5374 break;
5375 case 11: /* VNOT */
5376 if (size != 0)
5377 return 1;
dd8fbd78 5378 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5379 break;
5380 case 14: /* VQABS */
5381 switch (size) {
dd8fbd78
FN
5382 case 0: gen_helper_neon_qabs_s8(tmp, cpu_env, tmp); break;
5383 case 1: gen_helper_neon_qabs_s16(tmp, cpu_env, tmp); break;
5384 case 2: gen_helper_neon_qabs_s32(tmp, cpu_env, tmp); break;
9ee6e8bb
PB
5385 default: return 1;
5386 }
5387 break;
5388 case 15: /* VQNEG */
5389 switch (size) {
dd8fbd78
FN
5390 case 0: gen_helper_neon_qneg_s8(tmp, cpu_env, tmp); break;
5391 case 1: gen_helper_neon_qneg_s16(tmp, cpu_env, tmp); break;
5392 case 2: gen_helper_neon_qneg_s32(tmp, cpu_env, tmp); break;
9ee6e8bb
PB
5393 default: return 1;
5394 }
5395 break;
5396 case 16: case 19: /* VCGT #0, VCLE #0 */
dd8fbd78 5397 tmp2 = tcg_const_i32(0);
9ee6e8bb 5398 switch(size) {
dd8fbd78
FN
5399 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5400 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5401 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5402 default: return 1;
5403 }
dd8fbd78 5404 tcg_temp_free(tmp2);
9ee6e8bb 5405 if (op == 19)
dd8fbd78 5406 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5407 break;
5408 case 17: case 20: /* VCGE #0, VCLT #0 */
dd8fbd78 5409 tmp2 = tcg_const_i32(0);
9ee6e8bb 5410 switch(size) {
dd8fbd78
FN
5411 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5412 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5413 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5414 default: return 1;
5415 }
dd8fbd78 5416 tcg_temp_free(tmp2);
9ee6e8bb 5417 if (op == 20)
dd8fbd78 5418 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5419 break;
5420 case 18: /* VCEQ #0 */
dd8fbd78 5421 tmp2 = tcg_const_i32(0);
9ee6e8bb 5422 switch(size) {
dd8fbd78
FN
5423 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5424 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5425 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5426 default: return 1;
5427 }
dd8fbd78 5428 tcg_temp_free(tmp2);
9ee6e8bb
PB
5429 break;
5430 case 22: /* VABS */
5431 switch(size) {
dd8fbd78
FN
5432 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
5433 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
5434 case 2: tcg_gen_abs_i32(tmp, tmp); break;
9ee6e8bb
PB
5435 default: return 1;
5436 }
5437 break;
5438 case 23: /* VNEG */
ad69471c
PB
5439 if (size == 3)
5440 return 1;
dd8fbd78
FN
5441 tmp2 = tcg_const_i32(0);
5442 gen_neon_rsb(size, tmp, tmp2);
5443 tcg_temp_free(tmp2);
9ee6e8bb
PB
5444 break;
5445 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
dd8fbd78
FN
5446 tmp2 = tcg_const_i32(0);
5447 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
5448 tcg_temp_free(tmp2);
9ee6e8bb 5449 if (op == 27)
dd8fbd78 5450 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5451 break;
5452 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
dd8fbd78
FN
5453 tmp2 = tcg_const_i32(0);
5454 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
5455 tcg_temp_free(tmp2);
9ee6e8bb 5456 if (op == 28)
dd8fbd78 5457 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5458 break;
5459 case 26: /* Float VCEQ #0 */
dd8fbd78
FN
5460 tmp2 = tcg_const_i32(0);
5461 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
5462 tcg_temp_free(tmp2);
9ee6e8bb
PB
5463 break;
5464 case 30: /* Float VABS */
4373f3ce 5465 gen_vfp_abs(0);
9ee6e8bb
PB
5466 break;
5467 case 31: /* Float VNEG */
4373f3ce 5468 gen_vfp_neg(0);
9ee6e8bb
PB
5469 break;
5470 case 32: /* VSWP */
dd8fbd78
FN
5471 tmp2 = neon_load_reg(rd, pass);
5472 neon_store_reg(rm, pass, tmp2);
9ee6e8bb
PB
5473 break;
5474 case 33: /* VTRN */
dd8fbd78 5475 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5476 switch (size) {
dd8fbd78
FN
5477 case 0: gen_neon_trn_u8(tmp, tmp2); break;
5478 case 1: gen_neon_trn_u16(tmp, tmp2); break;
9ee6e8bb
PB
5479 case 2: abort();
5480 default: return 1;
5481 }
dd8fbd78 5482 neon_store_reg(rm, pass, tmp2);
9ee6e8bb
PB
5483 break;
5484 case 56: /* Integer VRECPE */
dd8fbd78 5485 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb
PB
5486 break;
5487 case 57: /* Integer VRSQRTE */
dd8fbd78 5488 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb
PB
5489 break;
5490 case 58: /* Float VRECPE */
4373f3ce 5491 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5492 break;
5493 case 59: /* Float VRSQRTE */
4373f3ce 5494 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5495 break;
5496 case 60: /* VCVT.F32.S32 */
4373f3ce 5497 gen_vfp_tosiz(0);
9ee6e8bb
PB
5498 break;
5499 case 61: /* VCVT.F32.U32 */
4373f3ce 5500 gen_vfp_touiz(0);
9ee6e8bb
PB
5501 break;
5502 case 62: /* VCVT.S32.F32 */
4373f3ce 5503 gen_vfp_sito(0);
9ee6e8bb
PB
5504 break;
5505 case 63: /* VCVT.U32.F32 */
4373f3ce 5506 gen_vfp_uito(0);
9ee6e8bb
PB
5507 break;
5508 default:
5509 /* Reserved: 21, 29, 39-56 */
5510 return 1;
5511 }
5512 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5513 tcg_gen_st_f32(cpu_F0s, cpu_env,
5514 neon_reg_offset(rd, pass));
9ee6e8bb 5515 } else {
dd8fbd78 5516 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5517 }
5518 }
5519 break;
5520 }
5521 } else if ((insn & (1 << 10)) == 0) {
5522 /* VTBL, VTBX. */
3018f259 5523 n = ((insn >> 5) & 0x18) + 8;
9ee6e8bb 5524 if (insn & (1 << 6)) {
8f8e3aa4 5525 tmp = neon_load_reg(rd, 0);
9ee6e8bb 5526 } else {
8f8e3aa4
PB
5527 tmp = new_tmp();
5528 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5529 }
8f8e3aa4 5530 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
5531 tmp4 = tcg_const_i32(rn);
5532 tmp5 = tcg_const_i32(n);
5533 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
3018f259 5534 dead_tmp(tmp);
9ee6e8bb 5535 if (insn & (1 << 6)) {
8f8e3aa4 5536 tmp = neon_load_reg(rd, 1);
9ee6e8bb 5537 } else {
8f8e3aa4
PB
5538 tmp = new_tmp();
5539 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5540 }
8f8e3aa4 5541 tmp3 = neon_load_reg(rm, 1);
b75263d6 5542 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
5543 tcg_temp_free_i32(tmp5);
5544 tcg_temp_free_i32(tmp4);
8f8e3aa4 5545 neon_store_reg(rd, 0, tmp2);
3018f259
PB
5546 neon_store_reg(rd, 1, tmp3);
5547 dead_tmp(tmp);
9ee6e8bb
PB
5548 } else if ((insn & 0x380) == 0) {
5549 /* VDUP */
5550 if (insn & (1 << 19)) {
dd8fbd78 5551 tmp = neon_load_reg(rm, 1);
9ee6e8bb 5552 } else {
dd8fbd78 5553 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
5554 }
5555 if (insn & (1 << 16)) {
dd8fbd78 5556 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
5557 } else if (insn & (1 << 17)) {
5558 if ((insn >> 18) & 1)
dd8fbd78 5559 gen_neon_dup_high16(tmp);
9ee6e8bb 5560 else
dd8fbd78 5561 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
5562 }
5563 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5564 tmp2 = new_tmp();
5565 tcg_gen_mov_i32(tmp2, tmp);
5566 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 5567 }
dd8fbd78 5568 dead_tmp(tmp);
9ee6e8bb
PB
5569 } else {
5570 return 1;
5571 }
5572 }
5573 }
5574 return 0;
5575}
5576
fe1479c3
PB
5577static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5578{
5579 int crn = (insn >> 16) & 0xf;
5580 int crm = insn & 0xf;
5581 int op1 = (insn >> 21) & 7;
5582 int op2 = (insn >> 5) & 7;
5583 int rt = (insn >> 12) & 0xf;
5584 TCGv tmp;
5585
5586 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5587 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5588 /* TEECR */
5589 if (IS_USER(s))
5590 return 1;
5591 tmp = load_cpu_field(teecr);
5592 store_reg(s, rt, tmp);
5593 return 0;
5594 }
5595 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5596 /* TEEHBR */
5597 if (IS_USER(s) && (env->teecr & 1))
5598 return 1;
5599 tmp = load_cpu_field(teehbr);
5600 store_reg(s, rt, tmp);
5601 return 0;
5602 }
5603 }
5604 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5605 op1, crn, crm, op2);
5606 return 1;
5607}
5608
5609static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5610{
5611 int crn = (insn >> 16) & 0xf;
5612 int crm = insn & 0xf;
5613 int op1 = (insn >> 21) & 7;
5614 int op2 = (insn >> 5) & 7;
5615 int rt = (insn >> 12) & 0xf;
5616 TCGv tmp;
5617
5618 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5619 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5620 /* TEECR */
5621 if (IS_USER(s))
5622 return 1;
5623 tmp = load_reg(s, rt);
5624 gen_helper_set_teecr(cpu_env, tmp);
5625 dead_tmp(tmp);
5626 return 0;
5627 }
5628 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5629 /* TEEHBR */
5630 if (IS_USER(s) && (env->teecr & 1))
5631 return 1;
5632 tmp = load_reg(s, rt);
5633 store_cpu_field(tmp, teehbr);
5634 return 0;
5635 }
5636 }
5637 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5638 op1, crn, crm, op2);
5639 return 1;
5640}
5641
9ee6e8bb
PB
5642static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5643{
5644 int cpnum;
5645
5646 cpnum = (insn >> 8) & 0xf;
5647 if (arm_feature(env, ARM_FEATURE_XSCALE)
5648 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5649 return 1;
5650
5651 switch (cpnum) {
5652 case 0:
5653 case 1:
5654 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5655 return disas_iwmmxt_insn(env, s, insn);
5656 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5657 return disas_dsp_insn(env, s, insn);
5658 }
5659 return 1;
5660 case 10:
5661 case 11:
5662 return disas_vfp_insn (env, s, insn);
fe1479c3
PB
5663 case 14:
5664 /* Coprocessors 7-15 are architecturally reserved by ARM.
5665 Unfortunately Intel decided to ignore this. */
5666 if (arm_feature(env, ARM_FEATURE_XSCALE))
5667 goto board;
5668 if (insn & (1 << 20))
5669 return disas_cp14_read(env, s, insn);
5670 else
5671 return disas_cp14_write(env, s, insn);
9ee6e8bb
PB
5672 case 15:
5673 return disas_cp15_insn (env, s, insn);
5674 default:
fe1479c3 5675 board:
9ee6e8bb
PB
5676 /* Unknown coprocessor. See if the board has hooked it. */
5677 return disas_cp_insn (env, s, insn);
5678 }
5679}
5680
5e3f878a
PB
5681
5682/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 5683static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
5684{
5685 TCGv tmp;
5686 tmp = new_tmp();
5687 tcg_gen_trunc_i64_i32(tmp, val);
5688 store_reg(s, rlow, tmp);
5689 tmp = new_tmp();
5690 tcg_gen_shri_i64(val, val, 32);
5691 tcg_gen_trunc_i64_i32(tmp, val);
5692 store_reg(s, rhigh, tmp);
5693}
5694
5695/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 5696static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 5697{
a7812ae4 5698 TCGv_i64 tmp;
5e3f878a
PB
5699 TCGv tmp2;
5700
36aa55dc 5701 /* Load value and extend to 64 bits. */
a7812ae4 5702 tmp = tcg_temp_new_i64();
5e3f878a
PB
5703 tmp2 = load_reg(s, rlow);
5704 tcg_gen_extu_i32_i64(tmp, tmp2);
5705 dead_tmp(tmp2);
5706 tcg_gen_add_i64(val, val, tmp);
b75263d6 5707 tcg_temp_free_i64(tmp);
5e3f878a
PB
5708}
5709
5710/* load and add a 64-bit value from a register pair. */
a7812ae4 5711static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 5712{
a7812ae4 5713 TCGv_i64 tmp;
36aa55dc
PB
5714 TCGv tmpl;
5715 TCGv tmph;
5e3f878a
PB
5716
5717 /* Load 64-bit value rd:rn. */
36aa55dc
PB
5718 tmpl = load_reg(s, rlow);
5719 tmph = load_reg(s, rhigh);
a7812ae4 5720 tmp = tcg_temp_new_i64();
36aa55dc
PB
5721 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5722 dead_tmp(tmpl);
5723 dead_tmp(tmph);
5e3f878a 5724 tcg_gen_add_i64(val, val, tmp);
b75263d6 5725 tcg_temp_free_i64(tmp);
5e3f878a
PB
5726}
5727
5728/* Set N and Z flags from a 64-bit value. */
a7812ae4 5729static void gen_logicq_cc(TCGv_i64 val)
5e3f878a
PB
5730{
5731 TCGv tmp = new_tmp();
5732 gen_helper_logicq_cc(tmp, val);
6fbe23d5
PB
5733 gen_logic_CC(tmp);
5734 dead_tmp(tmp);
5e3f878a
PB
5735}
5736
9ee6e8bb
PB
5737static void disas_arm_insn(CPUState * env, DisasContext *s)
5738{
5739 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 5740 TCGv tmp;
3670669c 5741 TCGv tmp2;
6ddbc6e4 5742 TCGv tmp3;
b0109805 5743 TCGv addr;
a7812ae4 5744 TCGv_i64 tmp64;
9ee6e8bb
PB
5745
5746 insn = ldl_code(s->pc);
5747 s->pc += 4;
5748
5749 /* M variants do not implement ARM mode. */
5750 if (IS_M(env))
5751 goto illegal_op;
5752 cond = insn >> 28;
5753 if (cond == 0xf){
5754 /* Unconditional instructions. */
5755 if (((insn >> 25) & 7) == 1) {
5756 /* NEON Data processing. */
5757 if (!arm_feature(env, ARM_FEATURE_NEON))
5758 goto illegal_op;
5759
5760 if (disas_neon_data_insn(env, s, insn))
5761 goto illegal_op;
5762 return;
5763 }
5764 if ((insn & 0x0f100000) == 0x04000000) {
5765 /* NEON load/store. */
5766 if (!arm_feature(env, ARM_FEATURE_NEON))
5767 goto illegal_op;
5768
5769 if (disas_neon_ls_insn(env, s, insn))
5770 goto illegal_op;
5771 return;
5772 }
5773 if ((insn & 0x0d70f000) == 0x0550f000)
5774 return; /* PLD */
5775 else if ((insn & 0x0ffffdff) == 0x01010000) {
5776 ARCH(6);
5777 /* setend */
5778 if (insn & (1 << 9)) {
5779 /* BE8 mode not implemented. */
5780 goto illegal_op;
5781 }
5782 return;
5783 } else if ((insn & 0x0fffff00) == 0x057ff000) {
5784 switch ((insn >> 4) & 0xf) {
5785 case 1: /* clrex */
5786 ARCH(6K);
8f8e3aa4 5787 gen_helper_clrex(cpu_env);
9ee6e8bb
PB
5788 return;
5789 case 4: /* dsb */
5790 case 5: /* dmb */
5791 case 6: /* isb */
5792 ARCH(7);
5793 /* We don't emulate caches so these are a no-op. */
5794 return;
5795 default:
5796 goto illegal_op;
5797 }
5798 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
5799 /* srs */
c67b6b71 5800 int32_t offset;
9ee6e8bb
PB
5801 if (IS_USER(s))
5802 goto illegal_op;
5803 ARCH(6);
5804 op1 = (insn & 0x1f);
5805 if (op1 == (env->uncached_cpsr & CPSR_M)) {
b0109805 5806 addr = load_reg(s, 13);
9ee6e8bb 5807 } else {
b0109805 5808 addr = new_tmp();
b75263d6
JR
5809 tmp = tcg_const_i32(op1);
5810 gen_helper_get_r13_banked(addr, cpu_env, tmp);
5811 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5812 }
5813 i = (insn >> 23) & 3;
5814 switch (i) {
5815 case 0: offset = -4; break; /* DA */
c67b6b71
FN
5816 case 1: offset = 0; break; /* IA */
5817 case 2: offset = -8; break; /* DB */
9ee6e8bb
PB
5818 case 3: offset = 4; break; /* IB */
5819 default: abort();
5820 }
5821 if (offset)
b0109805
PB
5822 tcg_gen_addi_i32(addr, addr, offset);
5823 tmp = load_reg(s, 14);
5824 gen_st32(tmp, addr, 0);
c67b6b71 5825 tmp = load_cpu_field(spsr);
b0109805
PB
5826 tcg_gen_addi_i32(addr, addr, 4);
5827 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
5828 if (insn & (1 << 21)) {
5829 /* Base writeback. */
5830 switch (i) {
5831 case 0: offset = -8; break;
c67b6b71
FN
5832 case 1: offset = 4; break;
5833 case 2: offset = -4; break;
9ee6e8bb
PB
5834 case 3: offset = 0; break;
5835 default: abort();
5836 }
5837 if (offset)
c67b6b71 5838 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb 5839 if (op1 == (env->uncached_cpsr & CPSR_M)) {
c67b6b71 5840 store_reg(s, 13, addr);
9ee6e8bb 5841 } else {
b75263d6
JR
5842 tmp = tcg_const_i32(op1);
5843 gen_helper_set_r13_banked(cpu_env, tmp, addr);
5844 tcg_temp_free_i32(tmp);
c67b6b71 5845 dead_tmp(addr);
9ee6e8bb 5846 }
b0109805
PB
5847 } else {
5848 dead_tmp(addr);
9ee6e8bb
PB
5849 }
5850 } else if ((insn & 0x0e5fffe0) == 0x081d0a00) {
5851 /* rfe */
c67b6b71 5852 int32_t offset;
9ee6e8bb
PB
5853 if (IS_USER(s))
5854 goto illegal_op;
5855 ARCH(6);
5856 rn = (insn >> 16) & 0xf;
b0109805 5857 addr = load_reg(s, rn);
9ee6e8bb
PB
5858 i = (insn >> 23) & 3;
5859 switch (i) {
b0109805 5860 case 0: offset = -4; break; /* DA */
c67b6b71
FN
5861 case 1: offset = 0; break; /* IA */
5862 case 2: offset = -8; break; /* DB */
b0109805 5863 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
5864 default: abort();
5865 }
5866 if (offset)
b0109805
PB
5867 tcg_gen_addi_i32(addr, addr, offset);
5868 /* Load PC into tmp and CPSR into tmp2. */
5869 tmp = gen_ld32(addr, 0);
5870 tcg_gen_addi_i32(addr, addr, 4);
5871 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
5872 if (insn & (1 << 21)) {
5873 /* Base writeback. */
5874 switch (i) {
b0109805 5875 case 0: offset = -8; break;
c67b6b71
FN
5876 case 1: offset = 4; break;
5877 case 2: offset = -4; break;
b0109805 5878 case 3: offset = 0; break;
9ee6e8bb
PB
5879 default: abort();
5880 }
5881 if (offset)
b0109805
PB
5882 tcg_gen_addi_i32(addr, addr, offset);
5883 store_reg(s, rn, addr);
5884 } else {
5885 dead_tmp(addr);
9ee6e8bb 5886 }
b0109805 5887 gen_rfe(s, tmp, tmp2);
c67b6b71 5888 return;
9ee6e8bb
PB
5889 } else if ((insn & 0x0e000000) == 0x0a000000) {
5890 /* branch link and change to thumb (blx <offset>) */
5891 int32_t offset;
5892
5893 val = (uint32_t)s->pc;
d9ba4830
PB
5894 tmp = new_tmp();
5895 tcg_gen_movi_i32(tmp, val);
5896 store_reg(s, 14, tmp);
9ee6e8bb
PB
5897 /* Sign-extend the 24-bit offset */
5898 offset = (((int32_t)insn) << 8) >> 8;
5899 /* offset * 4 + bit24 * 2 + (thumb bit) */
5900 val += (offset << 2) | ((insn >> 23) & 2) | 1;
5901 /* pipeline offset */
5902 val += 4;
d9ba4830 5903 gen_bx_im(s, val);
9ee6e8bb
PB
5904 return;
5905 } else if ((insn & 0x0e000f00) == 0x0c000100) {
5906 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5907 /* iWMMXt register transfer. */
5908 if (env->cp15.c15_cpar & (1 << 1))
5909 if (!disas_iwmmxt_insn(env, s, insn))
5910 return;
5911 }
5912 } else if ((insn & 0x0fe00000) == 0x0c400000) {
5913 /* Coprocessor double register transfer. */
5914 } else if ((insn & 0x0f000010) == 0x0e000010) {
5915 /* Additional coprocessor register transfer. */
7997d92f 5916 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
5917 uint32_t mask;
5918 uint32_t val;
5919 /* cps (privileged) */
5920 if (IS_USER(s))
5921 return;
5922 mask = val = 0;
5923 if (insn & (1 << 19)) {
5924 if (insn & (1 << 8))
5925 mask |= CPSR_A;
5926 if (insn & (1 << 7))
5927 mask |= CPSR_I;
5928 if (insn & (1 << 6))
5929 mask |= CPSR_F;
5930 if (insn & (1 << 18))
5931 val |= mask;
5932 }
7997d92f 5933 if (insn & (1 << 17)) {
9ee6e8bb
PB
5934 mask |= CPSR_M;
5935 val |= (insn & 0x1f);
5936 }
5937 if (mask) {
2fbac54b 5938 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
5939 }
5940 return;
5941 }
5942 goto illegal_op;
5943 }
5944 if (cond != 0xe) {
5945 /* if not always execute, we generate a conditional jump to
5946 next instruction */
5947 s->condlabel = gen_new_label();
d9ba4830 5948 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
5949 s->condjmp = 1;
5950 }
5951 if ((insn & 0x0f900000) == 0x03000000) {
5952 if ((insn & (1 << 21)) == 0) {
5953 ARCH(6T2);
5954 rd = (insn >> 12) & 0xf;
5955 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
5956 if ((insn & (1 << 22)) == 0) {
5957 /* MOVW */
5e3f878a
PB
5958 tmp = new_tmp();
5959 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
5960 } else {
5961 /* MOVT */
5e3f878a 5962 tmp = load_reg(s, rd);
86831435 5963 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 5964 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 5965 }
5e3f878a 5966 store_reg(s, rd, tmp);
9ee6e8bb
PB
5967 } else {
5968 if (((insn >> 12) & 0xf) != 0xf)
5969 goto illegal_op;
5970 if (((insn >> 16) & 0xf) == 0) {
5971 gen_nop_hint(s, insn & 0xff);
5972 } else {
5973 /* CPSR = immediate */
5974 val = insn & 0xff;
5975 shift = ((insn >> 8) & 0xf) * 2;
5976 if (shift)
5977 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 5978 i = ((insn & (1 << 22)) != 0);
2fbac54b 5979 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
5980 goto illegal_op;
5981 }
5982 }
5983 } else if ((insn & 0x0f900000) == 0x01000000
5984 && (insn & 0x00000090) != 0x00000090) {
5985 /* miscellaneous instructions */
5986 op1 = (insn >> 21) & 3;
5987 sh = (insn >> 4) & 0xf;
5988 rm = insn & 0xf;
5989 switch (sh) {
5990 case 0x0: /* move program status register */
5991 if (op1 & 1) {
5992 /* PSR = reg */
2fbac54b 5993 tmp = load_reg(s, rm);
9ee6e8bb 5994 i = ((op1 & 2) != 0);
2fbac54b 5995 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
5996 goto illegal_op;
5997 } else {
5998 /* reg = PSR */
5999 rd = (insn >> 12) & 0xf;
6000 if (op1 & 2) {
6001 if (IS_USER(s))
6002 goto illegal_op;
d9ba4830 6003 tmp = load_cpu_field(spsr);
9ee6e8bb 6004 } else {
d9ba4830
PB
6005 tmp = new_tmp();
6006 gen_helper_cpsr_read(tmp);
9ee6e8bb 6007 }
d9ba4830 6008 store_reg(s, rd, tmp);
9ee6e8bb
PB
6009 }
6010 break;
6011 case 0x1:
6012 if (op1 == 1) {
6013 /* branch/exchange thumb (bx). */
d9ba4830
PB
6014 tmp = load_reg(s, rm);
6015 gen_bx(s, tmp);
9ee6e8bb
PB
6016 } else if (op1 == 3) {
6017 /* clz */
6018 rd = (insn >> 12) & 0xf;
1497c961
PB
6019 tmp = load_reg(s, rm);
6020 gen_helper_clz(tmp, tmp);
6021 store_reg(s, rd, tmp);
9ee6e8bb
PB
6022 } else {
6023 goto illegal_op;
6024 }
6025 break;
6026 case 0x2:
6027 if (op1 == 1) {
6028 ARCH(5J); /* bxj */
6029 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6030 tmp = load_reg(s, rm);
6031 gen_bx(s, tmp);
9ee6e8bb
PB
6032 } else {
6033 goto illegal_op;
6034 }
6035 break;
6036 case 0x3:
6037 if (op1 != 1)
6038 goto illegal_op;
6039
6040 /* branch link/exchange thumb (blx) */
d9ba4830
PB
6041 tmp = load_reg(s, rm);
6042 tmp2 = new_tmp();
6043 tcg_gen_movi_i32(tmp2, s->pc);
6044 store_reg(s, 14, tmp2);
6045 gen_bx(s, tmp);
9ee6e8bb
PB
6046 break;
6047 case 0x5: /* saturating add/subtract */
6048 rd = (insn >> 12) & 0xf;
6049 rn = (insn >> 16) & 0xf;
b40d0353 6050 tmp = load_reg(s, rm);
5e3f878a 6051 tmp2 = load_reg(s, rn);
9ee6e8bb 6052 if (op1 & 2)
5e3f878a 6053 gen_helper_double_saturate(tmp2, tmp2);
9ee6e8bb 6054 if (op1 & 1)
5e3f878a 6055 gen_helper_sub_saturate(tmp, tmp, tmp2);
9ee6e8bb 6056 else
5e3f878a
PB
6057 gen_helper_add_saturate(tmp, tmp, tmp2);
6058 dead_tmp(tmp2);
6059 store_reg(s, rd, tmp);
9ee6e8bb
PB
6060 break;
6061 case 7: /* bkpt */
6062 gen_set_condexec(s);
5e3f878a 6063 gen_set_pc_im(s->pc - 4);
d9ba4830 6064 gen_exception(EXCP_BKPT);
9ee6e8bb
PB
6065 s->is_jmp = DISAS_JUMP;
6066 break;
6067 case 0x8: /* signed multiply */
6068 case 0xa:
6069 case 0xc:
6070 case 0xe:
6071 rs = (insn >> 8) & 0xf;
6072 rn = (insn >> 12) & 0xf;
6073 rd = (insn >> 16) & 0xf;
6074 if (op1 == 1) {
6075 /* (32 * 16) >> 16 */
5e3f878a
PB
6076 tmp = load_reg(s, rm);
6077 tmp2 = load_reg(s, rs);
9ee6e8bb 6078 if (sh & 4)
5e3f878a 6079 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 6080 else
5e3f878a 6081 gen_sxth(tmp2);
a7812ae4
PB
6082 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6083 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 6084 tmp = new_tmp();
a7812ae4 6085 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 6086 tcg_temp_free_i64(tmp64);
9ee6e8bb 6087 if ((sh & 2) == 0) {
5e3f878a
PB
6088 tmp2 = load_reg(s, rn);
6089 gen_helper_add_setq(tmp, tmp, tmp2);
6090 dead_tmp(tmp2);
9ee6e8bb 6091 }
5e3f878a 6092 store_reg(s, rd, tmp);
9ee6e8bb
PB
6093 } else {
6094 /* 16 * 16 */
5e3f878a
PB
6095 tmp = load_reg(s, rm);
6096 tmp2 = load_reg(s, rs);
6097 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6098 dead_tmp(tmp2);
9ee6e8bb 6099 if (op1 == 2) {
a7812ae4
PB
6100 tmp64 = tcg_temp_new_i64();
6101 tcg_gen_ext_i32_i64(tmp64, tmp);
22478e79 6102 dead_tmp(tmp);
a7812ae4
PB
6103 gen_addq(s, tmp64, rn, rd);
6104 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 6105 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
6106 } else {
6107 if (op1 == 0) {
5e3f878a
PB
6108 tmp2 = load_reg(s, rn);
6109 gen_helper_add_setq(tmp, tmp, tmp2);
6110 dead_tmp(tmp2);
9ee6e8bb 6111 }
5e3f878a 6112 store_reg(s, rd, tmp);
9ee6e8bb
PB
6113 }
6114 }
6115 break;
6116 default:
6117 goto illegal_op;
6118 }
6119 } else if (((insn & 0x0e000000) == 0 &&
6120 (insn & 0x00000090) != 0x90) ||
6121 ((insn & 0x0e000000) == (1 << 25))) {
6122 int set_cc, logic_cc, shiftop;
6123
6124 op1 = (insn >> 21) & 0xf;
6125 set_cc = (insn >> 20) & 1;
6126 logic_cc = table_logic_cc[op1] & set_cc;
6127
6128 /* data processing instruction */
6129 if (insn & (1 << 25)) {
6130 /* immediate operand */
6131 val = insn & 0xff;
6132 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 6133 if (shift) {
9ee6e8bb 6134 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9
JR
6135 }
6136 tmp2 = new_tmp();
6137 tcg_gen_movi_i32(tmp2, val);
6138 if (logic_cc && shift) {
6139 gen_set_CF_bit31(tmp2);
6140 }
9ee6e8bb
PB
6141 } else {
6142 /* register */
6143 rm = (insn) & 0xf;
e9bb4aa9 6144 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6145 shiftop = (insn >> 5) & 3;
6146 if (!(insn & (1 << 4))) {
6147 shift = (insn >> 7) & 0x1f;
e9bb4aa9 6148 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
6149 } else {
6150 rs = (insn >> 8) & 0xf;
8984bd2e 6151 tmp = load_reg(s, rs);
e9bb4aa9 6152 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
6153 }
6154 }
6155 if (op1 != 0x0f && op1 != 0x0d) {
6156 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
6157 tmp = load_reg(s, rn);
6158 } else {
6159 TCGV_UNUSED(tmp);
9ee6e8bb
PB
6160 }
6161 rd = (insn >> 12) & 0xf;
6162 switch(op1) {
6163 case 0x00:
e9bb4aa9
JR
6164 tcg_gen_and_i32(tmp, tmp, tmp2);
6165 if (logic_cc) {
6166 gen_logic_CC(tmp);
6167 }
21aeb343 6168 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6169 break;
6170 case 0x01:
e9bb4aa9
JR
6171 tcg_gen_xor_i32(tmp, tmp, tmp2);
6172 if (logic_cc) {
6173 gen_logic_CC(tmp);
6174 }
21aeb343 6175 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6176 break;
6177 case 0x02:
6178 if (set_cc && rd == 15) {
6179 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 6180 if (IS_USER(s)) {
9ee6e8bb 6181 goto illegal_op;
e9bb4aa9
JR
6182 }
6183 gen_helper_sub_cc(tmp, tmp, tmp2);
6184 gen_exception_return(s, tmp);
9ee6e8bb 6185 } else {
e9bb4aa9
JR
6186 if (set_cc) {
6187 gen_helper_sub_cc(tmp, tmp, tmp2);
6188 } else {
6189 tcg_gen_sub_i32(tmp, tmp, tmp2);
6190 }
21aeb343 6191 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6192 }
6193 break;
6194 case 0x03:
e9bb4aa9
JR
6195 if (set_cc) {
6196 gen_helper_sub_cc(tmp, tmp2, tmp);
6197 } else {
6198 tcg_gen_sub_i32(tmp, tmp2, tmp);
6199 }
21aeb343 6200 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6201 break;
6202 case 0x04:
e9bb4aa9
JR
6203 if (set_cc) {
6204 gen_helper_add_cc(tmp, tmp, tmp2);
6205 } else {
6206 tcg_gen_add_i32(tmp, tmp, tmp2);
6207 }
21aeb343 6208 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6209 break;
6210 case 0x05:
e9bb4aa9
JR
6211 if (set_cc) {
6212 gen_helper_adc_cc(tmp, tmp, tmp2);
6213 } else {
6214 gen_add_carry(tmp, tmp, tmp2);
6215 }
21aeb343 6216 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6217 break;
6218 case 0x06:
e9bb4aa9
JR
6219 if (set_cc) {
6220 gen_helper_sbc_cc(tmp, tmp, tmp2);
6221 } else {
6222 gen_sub_carry(tmp, tmp, tmp2);
6223 }
21aeb343 6224 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6225 break;
6226 case 0x07:
e9bb4aa9
JR
6227 if (set_cc) {
6228 gen_helper_sbc_cc(tmp, tmp2, tmp);
6229 } else {
6230 gen_sub_carry(tmp, tmp2, tmp);
6231 }
21aeb343 6232 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6233 break;
6234 case 0x08:
6235 if (set_cc) {
e9bb4aa9
JR
6236 tcg_gen_and_i32(tmp, tmp, tmp2);
6237 gen_logic_CC(tmp);
9ee6e8bb 6238 }
e9bb4aa9 6239 dead_tmp(tmp);
9ee6e8bb
PB
6240 break;
6241 case 0x09:
6242 if (set_cc) {
e9bb4aa9
JR
6243 tcg_gen_xor_i32(tmp, tmp, tmp2);
6244 gen_logic_CC(tmp);
9ee6e8bb 6245 }
e9bb4aa9 6246 dead_tmp(tmp);
9ee6e8bb
PB
6247 break;
6248 case 0x0a:
6249 if (set_cc) {
e9bb4aa9 6250 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb 6251 }
e9bb4aa9 6252 dead_tmp(tmp);
9ee6e8bb
PB
6253 break;
6254 case 0x0b:
6255 if (set_cc) {
e9bb4aa9 6256 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 6257 }
e9bb4aa9 6258 dead_tmp(tmp);
9ee6e8bb
PB
6259 break;
6260 case 0x0c:
e9bb4aa9
JR
6261 tcg_gen_or_i32(tmp, tmp, tmp2);
6262 if (logic_cc) {
6263 gen_logic_CC(tmp);
6264 }
21aeb343 6265 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6266 break;
6267 case 0x0d:
6268 if (logic_cc && rd == 15) {
6269 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 6270 if (IS_USER(s)) {
9ee6e8bb 6271 goto illegal_op;
e9bb4aa9
JR
6272 }
6273 gen_exception_return(s, tmp2);
9ee6e8bb 6274 } else {
e9bb4aa9
JR
6275 if (logic_cc) {
6276 gen_logic_CC(tmp2);
6277 }
21aeb343 6278 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6279 }
6280 break;
6281 case 0x0e:
f669df27 6282 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
6283 if (logic_cc) {
6284 gen_logic_CC(tmp);
6285 }
21aeb343 6286 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6287 break;
6288 default:
6289 case 0x0f:
e9bb4aa9
JR
6290 tcg_gen_not_i32(tmp2, tmp2);
6291 if (logic_cc) {
6292 gen_logic_CC(tmp2);
6293 }
21aeb343 6294 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6295 break;
6296 }
e9bb4aa9
JR
6297 if (op1 != 0x0f && op1 != 0x0d) {
6298 dead_tmp(tmp2);
6299 }
9ee6e8bb
PB
6300 } else {
6301 /* other instructions */
6302 op1 = (insn >> 24) & 0xf;
6303 switch(op1) {
6304 case 0x0:
6305 case 0x1:
6306 /* multiplies, extra load/stores */
6307 sh = (insn >> 5) & 3;
6308 if (sh == 0) {
6309 if (op1 == 0x0) {
6310 rd = (insn >> 16) & 0xf;
6311 rn = (insn >> 12) & 0xf;
6312 rs = (insn >> 8) & 0xf;
6313 rm = (insn) & 0xf;
6314 op1 = (insn >> 20) & 0xf;
6315 switch (op1) {
6316 case 0: case 1: case 2: case 3: case 6:
6317 /* 32 bit mul */
5e3f878a
PB
6318 tmp = load_reg(s, rs);
6319 tmp2 = load_reg(s, rm);
6320 tcg_gen_mul_i32(tmp, tmp, tmp2);
6321 dead_tmp(tmp2);
9ee6e8bb
PB
6322 if (insn & (1 << 22)) {
6323 /* Subtract (mls) */
6324 ARCH(6T2);
5e3f878a
PB
6325 tmp2 = load_reg(s, rn);
6326 tcg_gen_sub_i32(tmp, tmp2, tmp);
6327 dead_tmp(tmp2);
9ee6e8bb
PB
6328 } else if (insn & (1 << 21)) {
6329 /* Add */
5e3f878a
PB
6330 tmp2 = load_reg(s, rn);
6331 tcg_gen_add_i32(tmp, tmp, tmp2);
6332 dead_tmp(tmp2);
9ee6e8bb
PB
6333 }
6334 if (insn & (1 << 20))
5e3f878a
PB
6335 gen_logic_CC(tmp);
6336 store_reg(s, rd, tmp);
9ee6e8bb
PB
6337 break;
6338 default:
6339 /* 64 bit mul */
5e3f878a
PB
6340 tmp = load_reg(s, rs);
6341 tmp2 = load_reg(s, rm);
9ee6e8bb 6342 if (insn & (1 << 22))
a7812ae4 6343 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6344 else
a7812ae4 6345 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
9ee6e8bb 6346 if (insn & (1 << 21)) /* mult accumulate */
a7812ae4 6347 gen_addq(s, tmp64, rn, rd);
9ee6e8bb
PB
6348 if (!(insn & (1 << 23))) { /* double accumulate */
6349 ARCH(6);
a7812ae4
PB
6350 gen_addq_lo(s, tmp64, rn);
6351 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
6352 }
6353 if (insn & (1 << 20))
a7812ae4
PB
6354 gen_logicq_cc(tmp64);
6355 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 6356 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
6357 break;
6358 }
6359 } else {
6360 rn = (insn >> 16) & 0xf;
6361 rd = (insn >> 12) & 0xf;
6362 if (insn & (1 << 23)) {
6363 /* load/store exclusive */
86753403
PB
6364 op1 = (insn >> 21) & 0x3;
6365 if (op1)
a47f43d2 6366 ARCH(6K);
86753403
PB
6367 else
6368 ARCH(6);
3174f8e9 6369 addr = tcg_temp_local_new_i32();
98a46317 6370 load_reg_var(s, addr, rn);
9ee6e8bb 6371 if (insn & (1 << 20)) {
3174f8e9 6372 gen_helper_mark_exclusive(cpu_env, addr);
86753403
PB
6373 switch (op1) {
6374 case 0: /* ldrex */
6375 tmp = gen_ld32(addr, IS_USER(s));
6376 break;
6377 case 1: /* ldrexd */
6378 tmp = gen_ld32(addr, IS_USER(s));
6379 store_reg(s, rd, tmp);
6380 tcg_gen_addi_i32(addr, addr, 4);
6381 tmp = gen_ld32(addr, IS_USER(s));
6382 rd++;
6383 break;
6384 case 2: /* ldrexb */
6385 tmp = gen_ld8u(addr, IS_USER(s));
6386 break;
6387 case 3: /* ldrexh */
6388 tmp = gen_ld16u(addr, IS_USER(s));
6389 break;
6390 default:
6391 abort();
6392 }
8f8e3aa4 6393 store_reg(s, rd, tmp);
9ee6e8bb 6394 } else {
8f8e3aa4 6395 int label = gen_new_label();
9ee6e8bb 6396 rm = insn & 0xf;
3174f8e9
FN
6397 tmp2 = tcg_temp_local_new_i32();
6398 gen_helper_test_exclusive(tmp2, cpu_env, addr);
6399 tcg_gen_brcondi_i32(TCG_COND_NE, tmp2, 0, label);
8f8e3aa4 6400 tmp = load_reg(s,rm);
86753403
PB
6401 switch (op1) {
6402 case 0: /* strex */
6403 gen_st32(tmp, addr, IS_USER(s));
6404 break;
6405 case 1: /* strexd */
6406 gen_st32(tmp, addr, IS_USER(s));
6407 tcg_gen_addi_i32(addr, addr, 4);
6408 tmp = load_reg(s, rm + 1);
6409 gen_st32(tmp, addr, IS_USER(s));
6410 break;
6411 case 2: /* strexb */
6412 gen_st8(tmp, addr, IS_USER(s));
6413 break;
6414 case 3: /* strexh */
6415 gen_st16(tmp, addr, IS_USER(s));
6416 break;
6417 default:
6418 abort();
6419 }
2637a3be 6420 gen_set_label(label);
3174f8e9
FN
6421 tcg_gen_mov_i32(cpu_R[rd], tmp2);
6422 tcg_temp_free(tmp2);
9ee6e8bb 6423 }
3174f8e9 6424 tcg_temp_free(addr);
9ee6e8bb
PB
6425 } else {
6426 /* SWP instruction */
6427 rm = (insn) & 0xf;
6428
8984bd2e
PB
6429 /* ??? This is not really atomic. However we know
6430 we never have multiple CPUs running in parallel,
6431 so it is good enough. */
6432 addr = load_reg(s, rn);
6433 tmp = load_reg(s, rm);
9ee6e8bb 6434 if (insn & (1 << 22)) {
8984bd2e
PB
6435 tmp2 = gen_ld8u(addr, IS_USER(s));
6436 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 6437 } else {
8984bd2e
PB
6438 tmp2 = gen_ld32(addr, IS_USER(s));
6439 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 6440 }
8984bd2e
PB
6441 dead_tmp(addr);
6442 store_reg(s, rd, tmp2);
9ee6e8bb
PB
6443 }
6444 }
6445 } else {
6446 int address_offset;
6447 int load;
6448 /* Misc load/store */
6449 rn = (insn >> 16) & 0xf;
6450 rd = (insn >> 12) & 0xf;
b0109805 6451 addr = load_reg(s, rn);
9ee6e8bb 6452 if (insn & (1 << 24))
b0109805 6453 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
6454 address_offset = 0;
6455 if (insn & (1 << 20)) {
6456 /* load */
6457 switch(sh) {
6458 case 1:
b0109805 6459 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
6460 break;
6461 case 2:
b0109805 6462 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
6463 break;
6464 default:
6465 case 3:
b0109805 6466 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
6467 break;
6468 }
6469 load = 1;
6470 } else if (sh & 2) {
6471 /* doubleword */
6472 if (sh & 1) {
6473 /* store */
b0109805
PB
6474 tmp = load_reg(s, rd);
6475 gen_st32(tmp, addr, IS_USER(s));
6476 tcg_gen_addi_i32(addr, addr, 4);
6477 tmp = load_reg(s, rd + 1);
6478 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6479 load = 0;
6480 } else {
6481 /* load */
b0109805
PB
6482 tmp = gen_ld32(addr, IS_USER(s));
6483 store_reg(s, rd, tmp);
6484 tcg_gen_addi_i32(addr, addr, 4);
6485 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
6486 rd++;
6487 load = 1;
6488 }
6489 address_offset = -4;
6490 } else {
6491 /* store */
b0109805
PB
6492 tmp = load_reg(s, rd);
6493 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6494 load = 0;
6495 }
6496 /* Perform base writeback before the loaded value to
6497 ensure correct behavior with overlapping index registers.
6498 ldrd with base writeback is is undefined if the
6499 destination and index registers overlap. */
6500 if (!(insn & (1 << 24))) {
b0109805
PB
6501 gen_add_datah_offset(s, insn, address_offset, addr);
6502 store_reg(s, rn, addr);
9ee6e8bb
PB
6503 } else if (insn & (1 << 21)) {
6504 if (address_offset)
b0109805
PB
6505 tcg_gen_addi_i32(addr, addr, address_offset);
6506 store_reg(s, rn, addr);
6507 } else {
6508 dead_tmp(addr);
9ee6e8bb
PB
6509 }
6510 if (load) {
6511 /* Complete the load. */
b0109805 6512 store_reg(s, rd, tmp);
9ee6e8bb
PB
6513 }
6514 }
6515 break;
6516 case 0x4:
6517 case 0x5:
6518 goto do_ldst;
6519 case 0x6:
6520 case 0x7:
6521 if (insn & (1 << 4)) {
6522 ARCH(6);
6523 /* Armv6 Media instructions. */
6524 rm = insn & 0xf;
6525 rn = (insn >> 16) & 0xf;
2c0262af 6526 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
6527 rs = (insn >> 8) & 0xf;
6528 switch ((insn >> 23) & 3) {
6529 case 0: /* Parallel add/subtract. */
6530 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
6531 tmp = load_reg(s, rn);
6532 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6533 sh = (insn >> 5) & 7;
6534 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6535 goto illegal_op;
6ddbc6e4
PB
6536 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6537 dead_tmp(tmp2);
6538 store_reg(s, rd, tmp);
9ee6e8bb
PB
6539 break;
6540 case 1:
6541 if ((insn & 0x00700020) == 0) {
6c95676b 6542 /* Halfword pack. */
3670669c
PB
6543 tmp = load_reg(s, rn);
6544 tmp2 = load_reg(s, rm);
9ee6e8bb 6545 shift = (insn >> 7) & 0x1f;
3670669c
PB
6546 if (insn & (1 << 6)) {
6547 /* pkhtb */
22478e79
AZ
6548 if (shift == 0)
6549 shift = 31;
6550 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 6551 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 6552 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
6553 } else {
6554 /* pkhbt */
22478e79
AZ
6555 if (shift)
6556 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 6557 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
6558 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6559 }
6560 tcg_gen_or_i32(tmp, tmp, tmp2);
22478e79 6561 dead_tmp(tmp2);
3670669c 6562 store_reg(s, rd, tmp);
9ee6e8bb
PB
6563 } else if ((insn & 0x00200020) == 0x00200000) {
6564 /* [us]sat */
6ddbc6e4 6565 tmp = load_reg(s, rm);
9ee6e8bb
PB
6566 shift = (insn >> 7) & 0x1f;
6567 if (insn & (1 << 6)) {
6568 if (shift == 0)
6569 shift = 31;
6ddbc6e4 6570 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 6571 } else {
6ddbc6e4 6572 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
6573 }
6574 sh = (insn >> 16) & 0x1f;
6575 if (sh != 0) {
b75263d6 6576 tmp2 = tcg_const_i32(sh);
9ee6e8bb 6577 if (insn & (1 << 22))
b75263d6 6578 gen_helper_usat(tmp, tmp, tmp2);
9ee6e8bb 6579 else
b75263d6
JR
6580 gen_helper_ssat(tmp, tmp, tmp2);
6581 tcg_temp_free_i32(tmp2);
9ee6e8bb 6582 }
6ddbc6e4 6583 store_reg(s, rd, tmp);
9ee6e8bb
PB
6584 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6585 /* [us]sat16 */
6ddbc6e4 6586 tmp = load_reg(s, rm);
9ee6e8bb
PB
6587 sh = (insn >> 16) & 0x1f;
6588 if (sh != 0) {
b75263d6 6589 tmp2 = tcg_const_i32(sh);
9ee6e8bb 6590 if (insn & (1 << 22))
b75263d6 6591 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 6592 else
b75263d6
JR
6593 gen_helper_ssat16(tmp, tmp, tmp2);
6594 tcg_temp_free_i32(tmp2);
9ee6e8bb 6595 }
6ddbc6e4 6596 store_reg(s, rd, tmp);
9ee6e8bb
PB
6597 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6598 /* Select bytes. */
6ddbc6e4
PB
6599 tmp = load_reg(s, rn);
6600 tmp2 = load_reg(s, rm);
6601 tmp3 = new_tmp();
6602 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6603 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6604 dead_tmp(tmp3);
6605 dead_tmp(tmp2);
6606 store_reg(s, rd, tmp);
9ee6e8bb 6607 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 6608 tmp = load_reg(s, rm);
9ee6e8bb
PB
6609 shift = (insn >> 10) & 3;
6610 /* ??? In many cases it's not neccessary to do a
6611 rotate, a shift is sufficient. */
6612 if (shift != 0)
f669df27 6613 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
6614 op1 = (insn >> 20) & 7;
6615 switch (op1) {
5e3f878a
PB
6616 case 0: gen_sxtb16(tmp); break;
6617 case 2: gen_sxtb(tmp); break;
6618 case 3: gen_sxth(tmp); break;
6619 case 4: gen_uxtb16(tmp); break;
6620 case 6: gen_uxtb(tmp); break;
6621 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
6622 default: goto illegal_op;
6623 }
6624 if (rn != 15) {
5e3f878a 6625 tmp2 = load_reg(s, rn);
9ee6e8bb 6626 if ((op1 & 3) == 0) {
5e3f878a 6627 gen_add16(tmp, tmp2);
9ee6e8bb 6628 } else {
5e3f878a
PB
6629 tcg_gen_add_i32(tmp, tmp, tmp2);
6630 dead_tmp(tmp2);
9ee6e8bb
PB
6631 }
6632 }
6c95676b 6633 store_reg(s, rd, tmp);
9ee6e8bb
PB
6634 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6635 /* rev */
b0109805 6636 tmp = load_reg(s, rm);
9ee6e8bb
PB
6637 if (insn & (1 << 22)) {
6638 if (insn & (1 << 7)) {
b0109805 6639 gen_revsh(tmp);
9ee6e8bb
PB
6640 } else {
6641 ARCH(6T2);
b0109805 6642 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
6643 }
6644 } else {
6645 if (insn & (1 << 7))
b0109805 6646 gen_rev16(tmp);
9ee6e8bb 6647 else
66896cb8 6648 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 6649 }
b0109805 6650 store_reg(s, rd, tmp);
9ee6e8bb
PB
6651 } else {
6652 goto illegal_op;
6653 }
6654 break;
6655 case 2: /* Multiplies (Type 3). */
5e3f878a
PB
6656 tmp = load_reg(s, rm);
6657 tmp2 = load_reg(s, rs);
9ee6e8bb
PB
6658 if (insn & (1 << 20)) {
6659 /* Signed multiply most significant [accumulate]. */
a7812ae4 6660 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6661 if (insn & (1 << 5))
a7812ae4
PB
6662 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
6663 tcg_gen_shri_i64(tmp64, tmp64, 32);
5e3f878a 6664 tmp = new_tmp();
a7812ae4 6665 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 6666 tcg_temp_free_i64(tmp64);
955a7dd5
AZ
6667 if (rd != 15) {
6668 tmp2 = load_reg(s, rd);
9ee6e8bb 6669 if (insn & (1 << 6)) {
5e3f878a 6670 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 6671 } else {
5e3f878a 6672 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 6673 }
5e3f878a 6674 dead_tmp(tmp2);
9ee6e8bb 6675 }
955a7dd5 6676 store_reg(s, rn, tmp);
9ee6e8bb
PB
6677 } else {
6678 if (insn & (1 << 5))
5e3f878a
PB
6679 gen_swap_half(tmp2);
6680 gen_smul_dual(tmp, tmp2);
6681 /* This addition cannot overflow. */
6682 if (insn & (1 << 6)) {
6683 tcg_gen_sub_i32(tmp, tmp, tmp2);
6684 } else {
6685 tcg_gen_add_i32(tmp, tmp, tmp2);
6686 }
6687 dead_tmp(tmp2);
9ee6e8bb 6688 if (insn & (1 << 22)) {
5e3f878a 6689 /* smlald, smlsld */
a7812ae4
PB
6690 tmp64 = tcg_temp_new_i64();
6691 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 6692 dead_tmp(tmp);
a7812ae4
PB
6693 gen_addq(s, tmp64, rd, rn);
6694 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 6695 tcg_temp_free_i64(tmp64);
9ee6e8bb 6696 } else {
5e3f878a 6697 /* smuad, smusd, smlad, smlsd */
22478e79 6698 if (rd != 15)
9ee6e8bb 6699 {
22478e79 6700 tmp2 = load_reg(s, rd);
5e3f878a
PB
6701 gen_helper_add_setq(tmp, tmp, tmp2);
6702 dead_tmp(tmp2);
9ee6e8bb 6703 }
22478e79 6704 store_reg(s, rn, tmp);
9ee6e8bb
PB
6705 }
6706 }
6707 break;
6708 case 3:
6709 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
6710 switch (op1) {
6711 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
6712 ARCH(6);
6713 tmp = load_reg(s, rm);
6714 tmp2 = load_reg(s, rs);
6715 gen_helper_usad8(tmp, tmp, tmp2);
6716 dead_tmp(tmp2);
ded9d295
AZ
6717 if (rd != 15) {
6718 tmp2 = load_reg(s, rd);
6ddbc6e4
PB
6719 tcg_gen_add_i32(tmp, tmp, tmp2);
6720 dead_tmp(tmp2);
9ee6e8bb 6721 }
ded9d295 6722 store_reg(s, rn, tmp);
9ee6e8bb
PB
6723 break;
6724 case 0x20: case 0x24: case 0x28: case 0x2c:
6725 /* Bitfield insert/clear. */
6726 ARCH(6T2);
6727 shift = (insn >> 7) & 0x1f;
6728 i = (insn >> 16) & 0x1f;
6729 i = i + 1 - shift;
6730 if (rm == 15) {
5e3f878a
PB
6731 tmp = new_tmp();
6732 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6733 } else {
5e3f878a 6734 tmp = load_reg(s, rm);
9ee6e8bb
PB
6735 }
6736 if (i != 32) {
5e3f878a 6737 tmp2 = load_reg(s, rd);
8f8e3aa4 6738 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
5e3f878a 6739 dead_tmp(tmp2);
9ee6e8bb 6740 }
5e3f878a 6741 store_reg(s, rd, tmp);
9ee6e8bb
PB
6742 break;
6743 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
6744 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 6745 ARCH(6T2);
5e3f878a 6746 tmp = load_reg(s, rm);
9ee6e8bb
PB
6747 shift = (insn >> 7) & 0x1f;
6748 i = ((insn >> 16) & 0x1f) + 1;
6749 if (shift + i > 32)
6750 goto illegal_op;
6751 if (i < 32) {
6752 if (op1 & 0x20) {
5e3f878a 6753 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 6754 } else {
5e3f878a 6755 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
6756 }
6757 }
5e3f878a 6758 store_reg(s, rd, tmp);
9ee6e8bb
PB
6759 break;
6760 default:
6761 goto illegal_op;
6762 }
6763 break;
6764 }
6765 break;
6766 }
6767 do_ldst:
6768 /* Check for undefined extension instructions
6769 * per the ARM Bible IE:
6770 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
6771 */
6772 sh = (0xf << 20) | (0xf << 4);
6773 if (op1 == 0x7 && ((insn & sh) == sh))
6774 {
6775 goto illegal_op;
6776 }
6777 /* load/store byte/word */
6778 rn = (insn >> 16) & 0xf;
6779 rd = (insn >> 12) & 0xf;
b0109805 6780 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
6781 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
6782 if (insn & (1 << 24))
b0109805 6783 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
6784 if (insn & (1 << 20)) {
6785 /* load */
9ee6e8bb 6786 if (insn & (1 << 22)) {
b0109805 6787 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 6788 } else {
b0109805 6789 tmp = gen_ld32(tmp2, i);
9ee6e8bb 6790 }
9ee6e8bb
PB
6791 } else {
6792 /* store */
b0109805 6793 tmp = load_reg(s, rd);
9ee6e8bb 6794 if (insn & (1 << 22))
b0109805 6795 gen_st8(tmp, tmp2, i);
9ee6e8bb 6796 else
b0109805 6797 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
6798 }
6799 if (!(insn & (1 << 24))) {
b0109805
PB
6800 gen_add_data_offset(s, insn, tmp2);
6801 store_reg(s, rn, tmp2);
6802 } else if (insn & (1 << 21)) {
6803 store_reg(s, rn, tmp2);
6804 } else {
6805 dead_tmp(tmp2);
9ee6e8bb
PB
6806 }
6807 if (insn & (1 << 20)) {
6808 /* Complete the load. */
6809 if (rd == 15)
b0109805 6810 gen_bx(s, tmp);
9ee6e8bb 6811 else
b0109805 6812 store_reg(s, rd, tmp);
9ee6e8bb
PB
6813 }
6814 break;
6815 case 0x08:
6816 case 0x09:
6817 {
6818 int j, n, user, loaded_base;
b0109805 6819 TCGv loaded_var;
9ee6e8bb
PB
6820 /* load/store multiple words */
6821 /* XXX: store correct base if write back */
6822 user = 0;
6823 if (insn & (1 << 22)) {
6824 if (IS_USER(s))
6825 goto illegal_op; /* only usable in supervisor mode */
6826
6827 if ((insn & (1 << 15)) == 0)
6828 user = 1;
6829 }
6830 rn = (insn >> 16) & 0xf;
b0109805 6831 addr = load_reg(s, rn);
9ee6e8bb
PB
6832
6833 /* compute total size */
6834 loaded_base = 0;
a50f5b91 6835 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
6836 n = 0;
6837 for(i=0;i<16;i++) {
6838 if (insn & (1 << i))
6839 n++;
6840 }
6841 /* XXX: test invalid n == 0 case ? */
6842 if (insn & (1 << 23)) {
6843 if (insn & (1 << 24)) {
6844 /* pre increment */
b0109805 6845 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6846 } else {
6847 /* post increment */
6848 }
6849 } else {
6850 if (insn & (1 << 24)) {
6851 /* pre decrement */
b0109805 6852 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
6853 } else {
6854 /* post decrement */
6855 if (n != 1)
b0109805 6856 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
6857 }
6858 }
6859 j = 0;
6860 for(i=0;i<16;i++) {
6861 if (insn & (1 << i)) {
6862 if (insn & (1 << 20)) {
6863 /* load */
b0109805 6864 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 6865 if (i == 15) {
b0109805 6866 gen_bx(s, tmp);
9ee6e8bb 6867 } else if (user) {
b75263d6
JR
6868 tmp2 = tcg_const_i32(i);
6869 gen_helper_set_user_reg(tmp2, tmp);
6870 tcg_temp_free_i32(tmp2);
b0109805 6871 dead_tmp(tmp);
9ee6e8bb 6872 } else if (i == rn) {
b0109805 6873 loaded_var = tmp;
9ee6e8bb
PB
6874 loaded_base = 1;
6875 } else {
b0109805 6876 store_reg(s, i, tmp);
9ee6e8bb
PB
6877 }
6878 } else {
6879 /* store */
6880 if (i == 15) {
6881 /* special case: r15 = PC + 8 */
6882 val = (long)s->pc + 4;
b0109805
PB
6883 tmp = new_tmp();
6884 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 6885 } else if (user) {
b0109805 6886 tmp = new_tmp();
b75263d6
JR
6887 tmp2 = tcg_const_i32(i);
6888 gen_helper_get_user_reg(tmp, tmp2);
6889 tcg_temp_free_i32(tmp2);
9ee6e8bb 6890 } else {
b0109805 6891 tmp = load_reg(s, i);
9ee6e8bb 6892 }
b0109805 6893 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6894 }
6895 j++;
6896 /* no need to add after the last transfer */
6897 if (j != n)
b0109805 6898 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6899 }
6900 }
6901 if (insn & (1 << 21)) {
6902 /* write back */
6903 if (insn & (1 << 23)) {
6904 if (insn & (1 << 24)) {
6905 /* pre increment */
6906 } else {
6907 /* post increment */
b0109805 6908 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6909 }
6910 } else {
6911 if (insn & (1 << 24)) {
6912 /* pre decrement */
6913 if (n != 1)
b0109805 6914 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
6915 } else {
6916 /* post decrement */
b0109805 6917 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
6918 }
6919 }
b0109805
PB
6920 store_reg(s, rn, addr);
6921 } else {
6922 dead_tmp(addr);
9ee6e8bb
PB
6923 }
6924 if (loaded_base) {
b0109805 6925 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
6926 }
6927 if ((insn & (1 << 22)) && !user) {
6928 /* Restore CPSR from SPSR. */
d9ba4830
PB
6929 tmp = load_cpu_field(spsr);
6930 gen_set_cpsr(tmp, 0xffffffff);
6931 dead_tmp(tmp);
9ee6e8bb
PB
6932 s->is_jmp = DISAS_UPDATE;
6933 }
6934 }
6935 break;
6936 case 0xa:
6937 case 0xb:
6938 {
6939 int32_t offset;
6940
6941 /* branch (and link) */
6942 val = (int32_t)s->pc;
6943 if (insn & (1 << 24)) {
5e3f878a
PB
6944 tmp = new_tmp();
6945 tcg_gen_movi_i32(tmp, val);
6946 store_reg(s, 14, tmp);
9ee6e8bb
PB
6947 }
6948 offset = (((int32_t)insn << 8) >> 8);
6949 val += (offset << 2) + 4;
6950 gen_jmp(s, val);
6951 }
6952 break;
6953 case 0xc:
6954 case 0xd:
6955 case 0xe:
6956 /* Coprocessor. */
6957 if (disas_coproc_insn(env, s, insn))
6958 goto illegal_op;
6959 break;
6960 case 0xf:
6961 /* swi */
5e3f878a 6962 gen_set_pc_im(s->pc);
9ee6e8bb
PB
6963 s->is_jmp = DISAS_SWI;
6964 break;
6965 default:
6966 illegal_op:
6967 gen_set_condexec(s);
5e3f878a 6968 gen_set_pc_im(s->pc - 4);
d9ba4830 6969 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
6970 s->is_jmp = DISAS_JUMP;
6971 break;
6972 }
6973 }
6974}
6975
6976/* Return true if this is a Thumb-2 logical op. */
6977static int
6978thumb2_logic_op(int op)
6979{
6980 return (op < 8);
6981}
6982
6983/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
6984 then set condition code flags based on the result of the operation.
6985 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
6986 to the high bit of T1.
6987 Returns zero if the opcode is valid. */
6988
6989static int
396e467c 6990gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
9ee6e8bb
PB
6991{
6992 int logic_cc;
6993
6994 logic_cc = 0;
6995 switch (op) {
6996 case 0: /* and */
396e467c 6997 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
6998 logic_cc = conds;
6999 break;
7000 case 1: /* bic */
f669df27 7001 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
7002 logic_cc = conds;
7003 break;
7004 case 2: /* orr */
396e467c 7005 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7006 logic_cc = conds;
7007 break;
7008 case 3: /* orn */
396e467c
FN
7009 tcg_gen_not_i32(t1, t1);
7010 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7011 logic_cc = conds;
7012 break;
7013 case 4: /* eor */
396e467c 7014 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
7015 logic_cc = conds;
7016 break;
7017 case 8: /* add */
7018 if (conds)
396e467c 7019 gen_helper_add_cc(t0, t0, t1);
9ee6e8bb 7020 else
396e467c 7021 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
7022 break;
7023 case 10: /* adc */
7024 if (conds)
396e467c 7025 gen_helper_adc_cc(t0, t0, t1);
9ee6e8bb 7026 else
396e467c 7027 gen_adc(t0, t1);
9ee6e8bb
PB
7028 break;
7029 case 11: /* sbc */
7030 if (conds)
396e467c 7031 gen_helper_sbc_cc(t0, t0, t1);
9ee6e8bb 7032 else
396e467c 7033 gen_sub_carry(t0, t0, t1);
9ee6e8bb
PB
7034 break;
7035 case 13: /* sub */
7036 if (conds)
396e467c 7037 gen_helper_sub_cc(t0, t0, t1);
9ee6e8bb 7038 else
396e467c 7039 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
7040 break;
7041 case 14: /* rsb */
7042 if (conds)
396e467c 7043 gen_helper_sub_cc(t0, t1, t0);
9ee6e8bb 7044 else
396e467c 7045 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
7046 break;
7047 default: /* 5, 6, 7, 9, 12, 15. */
7048 return 1;
7049 }
7050 if (logic_cc) {
396e467c 7051 gen_logic_CC(t0);
9ee6e8bb 7052 if (shifter_out)
396e467c 7053 gen_set_CF_bit31(t1);
9ee6e8bb
PB
7054 }
7055 return 0;
7056}
7057
7058/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7059 is not legal. */
7060static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7061{
b0109805 7062 uint32_t insn, imm, shift, offset;
9ee6e8bb 7063 uint32_t rd, rn, rm, rs;
b26eefb6 7064 TCGv tmp;
6ddbc6e4
PB
7065 TCGv tmp2;
7066 TCGv tmp3;
b0109805 7067 TCGv addr;
a7812ae4 7068 TCGv_i64 tmp64;
9ee6e8bb
PB
7069 int op;
7070 int shiftop;
7071 int conds;
7072 int logic_cc;
7073
7074 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7075 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 7076 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
7077 16-bit instructions to get correct prefetch abort behavior. */
7078 insn = insn_hw1;
7079 if ((insn & (1 << 12)) == 0) {
7080 /* Second half of blx. */
7081 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
7082 tmp = load_reg(s, 14);
7083 tcg_gen_addi_i32(tmp, tmp, offset);
7084 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 7085
d9ba4830 7086 tmp2 = new_tmp();
b0109805 7087 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7088 store_reg(s, 14, tmp2);
7089 gen_bx(s, tmp);
9ee6e8bb
PB
7090 return 0;
7091 }
7092 if (insn & (1 << 11)) {
7093 /* Second half of bl. */
7094 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 7095 tmp = load_reg(s, 14);
6a0d8a1d 7096 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 7097
d9ba4830 7098 tmp2 = new_tmp();
b0109805 7099 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7100 store_reg(s, 14, tmp2);
7101 gen_bx(s, tmp);
9ee6e8bb
PB
7102 return 0;
7103 }
7104 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7105 /* Instruction spans a page boundary. Implement it as two
7106 16-bit instructions in case the second half causes an
7107 prefetch abort. */
7108 offset = ((int32_t)insn << 21) >> 9;
396e467c 7109 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
7110 return 0;
7111 }
7112 /* Fall through to 32-bit decode. */
7113 }
7114
7115 insn = lduw_code(s->pc);
7116 s->pc += 2;
7117 insn |= (uint32_t)insn_hw1 << 16;
7118
7119 if ((insn & 0xf800e800) != 0xf000e800) {
7120 ARCH(6T2);
7121 }
7122
7123 rn = (insn >> 16) & 0xf;
7124 rs = (insn >> 12) & 0xf;
7125 rd = (insn >> 8) & 0xf;
7126 rm = insn & 0xf;
7127 switch ((insn >> 25) & 0xf) {
7128 case 0: case 1: case 2: case 3:
7129 /* 16-bit instructions. Should never happen. */
7130 abort();
7131 case 4:
7132 if (insn & (1 << 22)) {
7133 /* Other load/store, table branch. */
7134 if (insn & 0x01200000) {
7135 /* Load/store doubleword. */
7136 if (rn == 15) {
b0109805
PB
7137 addr = new_tmp();
7138 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 7139 } else {
b0109805 7140 addr = load_reg(s, rn);
9ee6e8bb
PB
7141 }
7142 offset = (insn & 0xff) * 4;
7143 if ((insn & (1 << 23)) == 0)
7144 offset = -offset;
7145 if (insn & (1 << 24)) {
b0109805 7146 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
7147 offset = 0;
7148 }
7149 if (insn & (1 << 20)) {
7150 /* ldrd */
b0109805
PB
7151 tmp = gen_ld32(addr, IS_USER(s));
7152 store_reg(s, rs, tmp);
7153 tcg_gen_addi_i32(addr, addr, 4);
7154 tmp = gen_ld32(addr, IS_USER(s));
7155 store_reg(s, rd, tmp);
9ee6e8bb
PB
7156 } else {
7157 /* strd */
b0109805
PB
7158 tmp = load_reg(s, rs);
7159 gen_st32(tmp, addr, IS_USER(s));
7160 tcg_gen_addi_i32(addr, addr, 4);
7161 tmp = load_reg(s, rd);
7162 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7163 }
7164 if (insn & (1 << 21)) {
7165 /* Base writeback. */
7166 if (rn == 15)
7167 goto illegal_op;
b0109805
PB
7168 tcg_gen_addi_i32(addr, addr, offset - 4);
7169 store_reg(s, rn, addr);
7170 } else {
7171 dead_tmp(addr);
9ee6e8bb
PB
7172 }
7173 } else if ((insn & (1 << 23)) == 0) {
7174 /* Load/store exclusive word. */
3174f8e9 7175 addr = tcg_temp_local_new();
98a46317 7176 load_reg_var(s, addr, rn);
2c0262af 7177 if (insn & (1 << 20)) {
3174f8e9 7178 gen_helper_mark_exclusive(cpu_env, addr);
8f8e3aa4
PB
7179 tmp = gen_ld32(addr, IS_USER(s));
7180 store_reg(s, rd, tmp);
9ee6e8bb 7181 } else {
8f8e3aa4 7182 int label = gen_new_label();
3174f8e9
FN
7183 tmp2 = tcg_temp_local_new();
7184 gen_helper_test_exclusive(tmp2, cpu_env, addr);
7185 tcg_gen_brcondi_i32(TCG_COND_NE, tmp2, 0, label);
8f8e3aa4 7186 tmp = load_reg(s, rs);
3174f8e9 7187 gen_st32(tmp, addr, IS_USER(s));
8f8e3aa4 7188 gen_set_label(label);
3174f8e9
FN
7189 tcg_gen_mov_i32(cpu_R[rd], tmp2);
7190 tcg_temp_free(tmp2);
9ee6e8bb 7191 }
3174f8e9 7192 tcg_temp_free(addr);
9ee6e8bb
PB
7193 } else if ((insn & (1 << 6)) == 0) {
7194 /* Table Branch. */
7195 if (rn == 15) {
b0109805
PB
7196 addr = new_tmp();
7197 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 7198 } else {
b0109805 7199 addr = load_reg(s, rn);
9ee6e8bb 7200 }
b26eefb6 7201 tmp = load_reg(s, rm);
b0109805 7202 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
7203 if (insn & (1 << 4)) {
7204 /* tbh */
b0109805 7205 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 7206 dead_tmp(tmp);
b0109805 7207 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 7208 } else { /* tbb */
b26eefb6 7209 dead_tmp(tmp);
b0109805 7210 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7211 }
b0109805
PB
7212 dead_tmp(addr);
7213 tcg_gen_shli_i32(tmp, tmp, 1);
7214 tcg_gen_addi_i32(tmp, tmp, s->pc);
7215 store_reg(s, 15, tmp);
9ee6e8bb
PB
7216 } else {
7217 /* Load/store exclusive byte/halfword/doubleword. */
8f8e3aa4
PB
7218 /* ??? These are not really atomic. However we know
7219 we never have multiple CPUs running in parallel,
7220 so it is good enough. */
9ee6e8bb 7221 op = (insn >> 4) & 0x3;
3174f8e9 7222 addr = tcg_temp_local_new();
98a46317 7223 load_reg_var(s, addr, rn);
9ee6e8bb 7224 if (insn & (1 << 20)) {
8f8e3aa4 7225 gen_helper_mark_exclusive(cpu_env, addr);
9ee6e8bb
PB
7226 switch (op) {
7227 case 0:
8f8e3aa4 7228 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7229 break;
2c0262af 7230 case 1:
8f8e3aa4 7231 tmp = gen_ld16u(addr, IS_USER(s));
2c0262af 7232 break;
9ee6e8bb 7233 case 3:
8f8e3aa4
PB
7234 tmp = gen_ld32(addr, IS_USER(s));
7235 tcg_gen_addi_i32(addr, addr, 4);
7236 tmp2 = gen_ld32(addr, IS_USER(s));
7237 store_reg(s, rd, tmp2);
2c0262af
FB
7238 break;
7239 default:
9ee6e8bb
PB
7240 goto illegal_op;
7241 }
8f8e3aa4 7242 store_reg(s, rs, tmp);
9ee6e8bb 7243 } else {
8f8e3aa4 7244 int label = gen_new_label();
3174f8e9
FN
7245 tmp2 = tcg_temp_local_new();
7246 gen_helper_test_exclusive(tmp2, cpu_env, addr);
7247 tcg_gen_brcondi_i32(TCG_COND_NE, tmp2, 0, label);
8f8e3aa4 7248 tmp = load_reg(s, rs);
9ee6e8bb
PB
7249 switch (op) {
7250 case 0:
8f8e3aa4 7251 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7252 break;
7253 case 1:
8f8e3aa4 7254 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb 7255 break;
2c0262af 7256 case 3:
8f8e3aa4
PB
7257 gen_st32(tmp, addr, IS_USER(s));
7258 tcg_gen_addi_i32(addr, addr, 4);
7259 tmp = load_reg(s, rd);
7260 gen_st32(tmp, addr, IS_USER(s));
2c0262af 7261 break;
9ee6e8bb
PB
7262 default:
7263 goto illegal_op;
2c0262af 7264 }
8f8e3aa4 7265 gen_set_label(label);
3174f8e9
FN
7266 tcg_gen_mov_i32(cpu_R[rm], tmp2);
7267 tcg_temp_free(tmp2);
9ee6e8bb 7268 }
3174f8e9 7269 tcg_temp_free(addr);
9ee6e8bb
PB
7270 }
7271 } else {
7272 /* Load/store multiple, RFE, SRS. */
7273 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7274 /* Not available in user mode. */
b0109805 7275 if (IS_USER(s))
9ee6e8bb
PB
7276 goto illegal_op;
7277 if (insn & (1 << 20)) {
7278 /* rfe */
b0109805
PB
7279 addr = load_reg(s, rn);
7280 if ((insn & (1 << 24)) == 0)
7281 tcg_gen_addi_i32(addr, addr, -8);
7282 /* Load PC into tmp and CPSR into tmp2. */
7283 tmp = gen_ld32(addr, 0);
7284 tcg_gen_addi_i32(addr, addr, 4);
7285 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
7286 if (insn & (1 << 21)) {
7287 /* Base writeback. */
b0109805
PB
7288 if (insn & (1 << 24)) {
7289 tcg_gen_addi_i32(addr, addr, 4);
7290 } else {
7291 tcg_gen_addi_i32(addr, addr, -4);
7292 }
7293 store_reg(s, rn, addr);
7294 } else {
7295 dead_tmp(addr);
9ee6e8bb 7296 }
b0109805 7297 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
7298 } else {
7299 /* srs */
7300 op = (insn & 0x1f);
7301 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7302 addr = load_reg(s, 13);
9ee6e8bb 7303 } else {
b0109805 7304 addr = new_tmp();
b75263d6
JR
7305 tmp = tcg_const_i32(op);
7306 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7307 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7308 }
7309 if ((insn & (1 << 24)) == 0) {
b0109805 7310 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 7311 }
b0109805
PB
7312 tmp = load_reg(s, 14);
7313 gen_st32(tmp, addr, 0);
7314 tcg_gen_addi_i32(addr, addr, 4);
7315 tmp = new_tmp();
7316 gen_helper_cpsr_read(tmp);
7317 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
7318 if (insn & (1 << 21)) {
7319 if ((insn & (1 << 24)) == 0) {
b0109805 7320 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 7321 } else {
b0109805 7322 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7323 }
7324 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7325 store_reg(s, 13, addr);
9ee6e8bb 7326 } else {
b75263d6
JR
7327 tmp = tcg_const_i32(op);
7328 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7329 tcg_temp_free_i32(tmp);
9ee6e8bb 7330 }
b0109805
PB
7331 } else {
7332 dead_tmp(addr);
9ee6e8bb
PB
7333 }
7334 }
7335 } else {
7336 int i;
7337 /* Load/store multiple. */
b0109805 7338 addr = load_reg(s, rn);
9ee6e8bb
PB
7339 offset = 0;
7340 for (i = 0; i < 16; i++) {
7341 if (insn & (1 << i))
7342 offset += 4;
7343 }
7344 if (insn & (1 << 24)) {
b0109805 7345 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7346 }
7347
7348 for (i = 0; i < 16; i++) {
7349 if ((insn & (1 << i)) == 0)
7350 continue;
7351 if (insn & (1 << 20)) {
7352 /* Load. */
b0109805 7353 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 7354 if (i == 15) {
b0109805 7355 gen_bx(s, tmp);
9ee6e8bb 7356 } else {
b0109805 7357 store_reg(s, i, tmp);
9ee6e8bb
PB
7358 }
7359 } else {
7360 /* Store. */
b0109805
PB
7361 tmp = load_reg(s, i);
7362 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7363 }
b0109805 7364 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7365 }
7366 if (insn & (1 << 21)) {
7367 /* Base register writeback. */
7368 if (insn & (1 << 24)) {
b0109805 7369 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7370 }
7371 /* Fault if writeback register is in register list. */
7372 if (insn & (1 << rn))
7373 goto illegal_op;
b0109805
PB
7374 store_reg(s, rn, addr);
7375 } else {
7376 dead_tmp(addr);
9ee6e8bb
PB
7377 }
7378 }
7379 }
7380 break;
7381 case 5: /* Data processing register constant shift. */
3174f8e9
FN
7382 if (rn == 15) {
7383 tmp = new_tmp();
7384 tcg_gen_movi_i32(tmp, 0);
7385 } else {
7386 tmp = load_reg(s, rn);
7387 }
7388 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7389 op = (insn >> 21) & 0xf;
7390 shiftop = (insn >> 4) & 3;
7391 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7392 conds = (insn & (1 << 20)) != 0;
7393 logic_cc = (conds && thumb2_logic_op(op));
3174f8e9
FN
7394 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7395 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9ee6e8bb 7396 goto illegal_op;
3174f8e9
FN
7397 dead_tmp(tmp2);
7398 if (rd != 15) {
7399 store_reg(s, rd, tmp);
7400 } else {
7401 dead_tmp(tmp);
7402 }
9ee6e8bb
PB
7403 break;
7404 case 13: /* Misc data processing. */
7405 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7406 if (op < 4 && (insn & 0xf000) != 0xf000)
7407 goto illegal_op;
7408 switch (op) {
7409 case 0: /* Register controlled shift. */
8984bd2e
PB
7410 tmp = load_reg(s, rn);
7411 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7412 if ((insn & 0x70) != 0)
7413 goto illegal_op;
7414 op = (insn >> 21) & 3;
8984bd2e
PB
7415 logic_cc = (insn & (1 << 20)) != 0;
7416 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7417 if (logic_cc)
7418 gen_logic_CC(tmp);
21aeb343 7419 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7420 break;
7421 case 1: /* Sign/zero extend. */
5e3f878a 7422 tmp = load_reg(s, rm);
9ee6e8bb
PB
7423 shift = (insn >> 4) & 3;
7424 /* ??? In many cases it's not neccessary to do a
7425 rotate, a shift is sufficient. */
7426 if (shift != 0)
f669df27 7427 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7428 op = (insn >> 20) & 7;
7429 switch (op) {
5e3f878a
PB
7430 case 0: gen_sxth(tmp); break;
7431 case 1: gen_uxth(tmp); break;
7432 case 2: gen_sxtb16(tmp); break;
7433 case 3: gen_uxtb16(tmp); break;
7434 case 4: gen_sxtb(tmp); break;
7435 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
7436 default: goto illegal_op;
7437 }
7438 if (rn != 15) {
5e3f878a 7439 tmp2 = load_reg(s, rn);
9ee6e8bb 7440 if ((op >> 1) == 1) {
5e3f878a 7441 gen_add16(tmp, tmp2);
9ee6e8bb 7442 } else {
5e3f878a
PB
7443 tcg_gen_add_i32(tmp, tmp, tmp2);
7444 dead_tmp(tmp2);
9ee6e8bb
PB
7445 }
7446 }
5e3f878a 7447 store_reg(s, rd, tmp);
9ee6e8bb
PB
7448 break;
7449 case 2: /* SIMD add/subtract. */
7450 op = (insn >> 20) & 7;
7451 shift = (insn >> 4) & 7;
7452 if ((op & 3) == 3 || (shift & 3) == 3)
7453 goto illegal_op;
6ddbc6e4
PB
7454 tmp = load_reg(s, rn);
7455 tmp2 = load_reg(s, rm);
7456 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7457 dead_tmp(tmp2);
7458 store_reg(s, rd, tmp);
9ee6e8bb
PB
7459 break;
7460 case 3: /* Other data processing. */
7461 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7462 if (op < 4) {
7463 /* Saturating add/subtract. */
d9ba4830
PB
7464 tmp = load_reg(s, rn);
7465 tmp2 = load_reg(s, rm);
9ee6e8bb 7466 if (op & 2)
d9ba4830 7467 gen_helper_double_saturate(tmp, tmp);
9ee6e8bb 7468 if (op & 1)
d9ba4830 7469 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 7470 else
d9ba4830
PB
7471 gen_helper_add_saturate(tmp, tmp, tmp2);
7472 dead_tmp(tmp2);
9ee6e8bb 7473 } else {
d9ba4830 7474 tmp = load_reg(s, rn);
9ee6e8bb
PB
7475 switch (op) {
7476 case 0x0a: /* rbit */
d9ba4830 7477 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7478 break;
7479 case 0x08: /* rev */
66896cb8 7480 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
7481 break;
7482 case 0x09: /* rev16 */
d9ba4830 7483 gen_rev16(tmp);
9ee6e8bb
PB
7484 break;
7485 case 0x0b: /* revsh */
d9ba4830 7486 gen_revsh(tmp);
9ee6e8bb
PB
7487 break;
7488 case 0x10: /* sel */
d9ba4830 7489 tmp2 = load_reg(s, rm);
6ddbc6e4
PB
7490 tmp3 = new_tmp();
7491 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
d9ba4830 7492 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6ddbc6e4 7493 dead_tmp(tmp3);
d9ba4830 7494 dead_tmp(tmp2);
9ee6e8bb
PB
7495 break;
7496 case 0x18: /* clz */
d9ba4830 7497 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
7498 break;
7499 default:
7500 goto illegal_op;
7501 }
7502 }
d9ba4830 7503 store_reg(s, rd, tmp);
9ee6e8bb
PB
7504 break;
7505 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7506 op = (insn >> 4) & 0xf;
d9ba4830
PB
7507 tmp = load_reg(s, rn);
7508 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7509 switch ((insn >> 20) & 7) {
7510 case 0: /* 32 x 32 -> 32 */
d9ba4830
PB
7511 tcg_gen_mul_i32(tmp, tmp, tmp2);
7512 dead_tmp(tmp2);
9ee6e8bb 7513 if (rs != 15) {
d9ba4830 7514 tmp2 = load_reg(s, rs);
9ee6e8bb 7515 if (op)
d9ba4830 7516 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 7517 else
d9ba4830
PB
7518 tcg_gen_add_i32(tmp, tmp, tmp2);
7519 dead_tmp(tmp2);
9ee6e8bb 7520 }
9ee6e8bb
PB
7521 break;
7522 case 1: /* 16 x 16 -> 32 */
d9ba4830
PB
7523 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7524 dead_tmp(tmp2);
9ee6e8bb 7525 if (rs != 15) {
d9ba4830
PB
7526 tmp2 = load_reg(s, rs);
7527 gen_helper_add_setq(tmp, tmp, tmp2);
7528 dead_tmp(tmp2);
9ee6e8bb 7529 }
9ee6e8bb
PB
7530 break;
7531 case 2: /* Dual multiply add. */
7532 case 4: /* Dual multiply subtract. */
7533 if (op)
d9ba4830
PB
7534 gen_swap_half(tmp2);
7535 gen_smul_dual(tmp, tmp2);
9ee6e8bb
PB
7536 /* This addition cannot overflow. */
7537 if (insn & (1 << 22)) {
d9ba4830 7538 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 7539 } else {
d9ba4830 7540 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 7541 }
d9ba4830 7542 dead_tmp(tmp2);
9ee6e8bb
PB
7543 if (rs != 15)
7544 {
d9ba4830
PB
7545 tmp2 = load_reg(s, rs);
7546 gen_helper_add_setq(tmp, tmp, tmp2);
7547 dead_tmp(tmp2);
9ee6e8bb 7548 }
9ee6e8bb
PB
7549 break;
7550 case 3: /* 32 * 16 -> 32msb */
7551 if (op)
d9ba4830 7552 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7553 else
d9ba4830 7554 gen_sxth(tmp2);
a7812ae4
PB
7555 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7556 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 7557 tmp = new_tmp();
a7812ae4 7558 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 7559 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7560 if (rs != 15)
7561 {
d9ba4830
PB
7562 tmp2 = load_reg(s, rs);
7563 gen_helper_add_setq(tmp, tmp, tmp2);
7564 dead_tmp(tmp2);
9ee6e8bb 7565 }
9ee6e8bb
PB
7566 break;
7567 case 5: case 6: /* 32 * 32 -> 32msb */
d9ba4830
PB
7568 gen_imull(tmp, tmp2);
7569 if (insn & (1 << 5)) {
7570 gen_roundqd(tmp, tmp2);
7571 dead_tmp(tmp2);
7572 } else {
7573 dead_tmp(tmp);
7574 tmp = tmp2;
7575 }
9ee6e8bb 7576 if (rs != 15) {
d9ba4830 7577 tmp2 = load_reg(s, rs);
9ee6e8bb 7578 if (insn & (1 << 21)) {
d9ba4830 7579 tcg_gen_add_i32(tmp, tmp, tmp2);
99c475ab 7580 } else {
d9ba4830 7581 tcg_gen_sub_i32(tmp, tmp2, tmp);
99c475ab 7582 }
d9ba4830 7583 dead_tmp(tmp2);
2c0262af 7584 }
9ee6e8bb
PB
7585 break;
7586 case 7: /* Unsigned sum of absolute differences. */
d9ba4830
PB
7587 gen_helper_usad8(tmp, tmp, tmp2);
7588 dead_tmp(tmp2);
9ee6e8bb 7589 if (rs != 15) {
d9ba4830
PB
7590 tmp2 = load_reg(s, rs);
7591 tcg_gen_add_i32(tmp, tmp, tmp2);
7592 dead_tmp(tmp2);
5fd46862 7593 }
9ee6e8bb 7594 break;
2c0262af 7595 }
d9ba4830 7596 store_reg(s, rd, tmp);
2c0262af 7597 break;
9ee6e8bb
PB
7598 case 6: case 7: /* 64-bit multiply, Divide. */
7599 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
7600 tmp = load_reg(s, rn);
7601 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7602 if ((op & 0x50) == 0x10) {
7603 /* sdiv, udiv */
7604 if (!arm_feature(env, ARM_FEATURE_DIV))
7605 goto illegal_op;
7606 if (op & 0x20)
5e3f878a 7607 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 7608 else
5e3f878a
PB
7609 gen_helper_sdiv(tmp, tmp, tmp2);
7610 dead_tmp(tmp2);
7611 store_reg(s, rd, tmp);
9ee6e8bb
PB
7612 } else if ((op & 0xe) == 0xc) {
7613 /* Dual multiply accumulate long. */
7614 if (op & 1)
5e3f878a
PB
7615 gen_swap_half(tmp2);
7616 gen_smul_dual(tmp, tmp2);
9ee6e8bb 7617 if (op & 0x10) {
5e3f878a 7618 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 7619 } else {
5e3f878a 7620 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 7621 }
5e3f878a 7622 dead_tmp(tmp2);
a7812ae4
PB
7623 /* BUGFIX */
7624 tmp64 = tcg_temp_new_i64();
7625 tcg_gen_ext_i32_i64(tmp64, tmp);
7626 dead_tmp(tmp);
7627 gen_addq(s, tmp64, rs, rd);
7628 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 7629 tcg_temp_free_i64(tmp64);
2c0262af 7630 } else {
9ee6e8bb
PB
7631 if (op & 0x20) {
7632 /* Unsigned 64-bit multiply */
a7812ae4 7633 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 7634 } else {
9ee6e8bb
PB
7635 if (op & 8) {
7636 /* smlalxy */
5e3f878a
PB
7637 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7638 dead_tmp(tmp2);
a7812ae4
PB
7639 tmp64 = tcg_temp_new_i64();
7640 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 7641 dead_tmp(tmp);
9ee6e8bb
PB
7642 } else {
7643 /* Signed 64-bit multiply */
a7812ae4 7644 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 7645 }
b5ff1b31 7646 }
9ee6e8bb
PB
7647 if (op & 4) {
7648 /* umaal */
a7812ae4
PB
7649 gen_addq_lo(s, tmp64, rs);
7650 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
7651 } else if (op & 0x40) {
7652 /* 64-bit accumulate. */
a7812ae4 7653 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 7654 }
a7812ae4 7655 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 7656 tcg_temp_free_i64(tmp64);
5fd46862 7657 }
2c0262af 7658 break;
9ee6e8bb
PB
7659 }
7660 break;
7661 case 6: case 7: case 14: case 15:
7662 /* Coprocessor. */
7663 if (((insn >> 24) & 3) == 3) {
7664 /* Translate into the equivalent ARM encoding. */
7665 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
7666 if (disas_neon_data_insn(env, s, insn))
7667 goto illegal_op;
7668 } else {
7669 if (insn & (1 << 28))
7670 goto illegal_op;
7671 if (disas_coproc_insn (env, s, insn))
7672 goto illegal_op;
7673 }
7674 break;
7675 case 8: case 9: case 10: case 11:
7676 if (insn & (1 << 15)) {
7677 /* Branches, misc control. */
7678 if (insn & 0x5000) {
7679 /* Unconditional branch. */
7680 /* signextend(hw1[10:0]) -> offset[:12]. */
7681 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7682 /* hw1[10:0] -> offset[11:1]. */
7683 offset |= (insn & 0x7ff) << 1;
7684 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7685 offset[24:22] already have the same value because of the
7686 sign extension above. */
7687 offset ^= ((~insn) & (1 << 13)) << 10;
7688 offset ^= ((~insn) & (1 << 11)) << 11;
7689
9ee6e8bb
PB
7690 if (insn & (1 << 14)) {
7691 /* Branch and link. */
3174f8e9 7692 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 7693 }
3b46e624 7694
b0109805 7695 offset += s->pc;
9ee6e8bb
PB
7696 if (insn & (1 << 12)) {
7697 /* b/bl */
b0109805 7698 gen_jmp(s, offset);
9ee6e8bb
PB
7699 } else {
7700 /* blx */
b0109805
PB
7701 offset &= ~(uint32_t)2;
7702 gen_bx_im(s, offset);
2c0262af 7703 }
9ee6e8bb
PB
7704 } else if (((insn >> 23) & 7) == 7) {
7705 /* Misc control */
7706 if (insn & (1 << 13))
7707 goto illegal_op;
7708
7709 if (insn & (1 << 26)) {
7710 /* Secure monitor call (v6Z) */
7711 goto illegal_op; /* not implemented. */
2c0262af 7712 } else {
9ee6e8bb
PB
7713 op = (insn >> 20) & 7;
7714 switch (op) {
7715 case 0: /* msr cpsr. */
7716 if (IS_M(env)) {
8984bd2e
PB
7717 tmp = load_reg(s, rn);
7718 addr = tcg_const_i32(insn & 0xff);
7719 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6
JR
7720 tcg_temp_free_i32(addr);
7721 dead_tmp(tmp);
9ee6e8bb
PB
7722 gen_lookup_tb(s);
7723 break;
7724 }
7725 /* fall through */
7726 case 1: /* msr spsr. */
7727 if (IS_M(env))
7728 goto illegal_op;
2fbac54b
FN
7729 tmp = load_reg(s, rn);
7730 if (gen_set_psr(s,
9ee6e8bb 7731 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 7732 op == 1, tmp))
9ee6e8bb
PB
7733 goto illegal_op;
7734 break;
7735 case 2: /* cps, nop-hint. */
7736 if (((insn >> 8) & 7) == 0) {
7737 gen_nop_hint(s, insn & 0xff);
7738 }
7739 /* Implemented as NOP in user mode. */
7740 if (IS_USER(s))
7741 break;
7742 offset = 0;
7743 imm = 0;
7744 if (insn & (1 << 10)) {
7745 if (insn & (1 << 7))
7746 offset |= CPSR_A;
7747 if (insn & (1 << 6))
7748 offset |= CPSR_I;
7749 if (insn & (1 << 5))
7750 offset |= CPSR_F;
7751 if (insn & (1 << 9))
7752 imm = CPSR_A | CPSR_I | CPSR_F;
7753 }
7754 if (insn & (1 << 8)) {
7755 offset |= 0x1f;
7756 imm |= (insn & 0x1f);
7757 }
7758 if (offset) {
2fbac54b 7759 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
7760 }
7761 break;
7762 case 3: /* Special control operations. */
7763 op = (insn >> 4) & 0xf;
7764 switch (op) {
7765 case 2: /* clrex */
8f8e3aa4 7766 gen_helper_clrex(cpu_env);
9ee6e8bb
PB
7767 break;
7768 case 4: /* dsb */
7769 case 5: /* dmb */
7770 case 6: /* isb */
7771 /* These execute as NOPs. */
7772 ARCH(7);
7773 break;
7774 default:
7775 goto illegal_op;
7776 }
7777 break;
7778 case 4: /* bxj */
7779 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
7780 tmp = load_reg(s, rn);
7781 gen_bx(s, tmp);
9ee6e8bb
PB
7782 break;
7783 case 5: /* Exception return. */
7784 /* Unpredictable in user mode. */
7785 goto illegal_op;
7786 case 6: /* mrs cpsr. */
8984bd2e 7787 tmp = new_tmp();
9ee6e8bb 7788 if (IS_M(env)) {
8984bd2e
PB
7789 addr = tcg_const_i32(insn & 0xff);
7790 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 7791 tcg_temp_free_i32(addr);
9ee6e8bb 7792 } else {
8984bd2e 7793 gen_helper_cpsr_read(tmp);
9ee6e8bb 7794 }
8984bd2e 7795 store_reg(s, rd, tmp);
9ee6e8bb
PB
7796 break;
7797 case 7: /* mrs spsr. */
7798 /* Not accessible in user mode. */
7799 if (IS_USER(s) || IS_M(env))
7800 goto illegal_op;
d9ba4830
PB
7801 tmp = load_cpu_field(spsr);
7802 store_reg(s, rd, tmp);
9ee6e8bb 7803 break;
2c0262af
FB
7804 }
7805 }
9ee6e8bb
PB
7806 } else {
7807 /* Conditional branch. */
7808 op = (insn >> 22) & 0xf;
7809 /* Generate a conditional jump to next instruction. */
7810 s->condlabel = gen_new_label();
d9ba4830 7811 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
7812 s->condjmp = 1;
7813
7814 /* offset[11:1] = insn[10:0] */
7815 offset = (insn & 0x7ff) << 1;
7816 /* offset[17:12] = insn[21:16]. */
7817 offset |= (insn & 0x003f0000) >> 4;
7818 /* offset[31:20] = insn[26]. */
7819 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
7820 /* offset[18] = insn[13]. */
7821 offset |= (insn & (1 << 13)) << 5;
7822 /* offset[19] = insn[11]. */
7823 offset |= (insn & (1 << 11)) << 8;
7824
7825 /* jump to the offset */
b0109805 7826 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
7827 }
7828 } else {
7829 /* Data processing immediate. */
7830 if (insn & (1 << 25)) {
7831 if (insn & (1 << 24)) {
7832 if (insn & (1 << 20))
7833 goto illegal_op;
7834 /* Bitfield/Saturate. */
7835 op = (insn >> 21) & 7;
7836 imm = insn & 0x1f;
7837 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4
PB
7838 if (rn == 15) {
7839 tmp = new_tmp();
7840 tcg_gen_movi_i32(tmp, 0);
7841 } else {
7842 tmp = load_reg(s, rn);
7843 }
9ee6e8bb
PB
7844 switch (op) {
7845 case 2: /* Signed bitfield extract. */
7846 imm++;
7847 if (shift + imm > 32)
7848 goto illegal_op;
7849 if (imm < 32)
6ddbc6e4 7850 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
7851 break;
7852 case 6: /* Unsigned bitfield extract. */
7853 imm++;
7854 if (shift + imm > 32)
7855 goto illegal_op;
7856 if (imm < 32)
6ddbc6e4 7857 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
7858 break;
7859 case 3: /* Bitfield insert/clear. */
7860 if (imm < shift)
7861 goto illegal_op;
7862 imm = imm + 1 - shift;
7863 if (imm != 32) {
6ddbc6e4 7864 tmp2 = load_reg(s, rd);
8f8e3aa4 7865 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
6ddbc6e4 7866 dead_tmp(tmp2);
9ee6e8bb
PB
7867 }
7868 break;
7869 case 7:
7870 goto illegal_op;
7871 default: /* Saturate. */
9ee6e8bb
PB
7872 if (shift) {
7873 if (op & 1)
6ddbc6e4 7874 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7875 else
6ddbc6e4 7876 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 7877 }
6ddbc6e4 7878 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
7879 if (op & 4) {
7880 /* Unsigned. */
9ee6e8bb 7881 if ((op & 1) && shift == 0)
6ddbc6e4 7882 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 7883 else
6ddbc6e4 7884 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 7885 } else {
9ee6e8bb 7886 /* Signed. */
9ee6e8bb 7887 if ((op & 1) && shift == 0)
6ddbc6e4 7888 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 7889 else
6ddbc6e4 7890 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 7891 }
b75263d6 7892 tcg_temp_free_i32(tmp2);
9ee6e8bb 7893 break;
2c0262af 7894 }
6ddbc6e4 7895 store_reg(s, rd, tmp);
9ee6e8bb
PB
7896 } else {
7897 imm = ((insn & 0x04000000) >> 15)
7898 | ((insn & 0x7000) >> 4) | (insn & 0xff);
7899 if (insn & (1 << 22)) {
7900 /* 16-bit immediate. */
7901 imm |= (insn >> 4) & 0xf000;
7902 if (insn & (1 << 23)) {
7903 /* movt */
5e3f878a 7904 tmp = load_reg(s, rd);
86831435 7905 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 7906 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 7907 } else {
9ee6e8bb 7908 /* movw */
5e3f878a
PB
7909 tmp = new_tmp();
7910 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
7911 }
7912 } else {
9ee6e8bb
PB
7913 /* Add/sub 12-bit immediate. */
7914 if (rn == 15) {
b0109805 7915 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 7916 if (insn & (1 << 23))
b0109805 7917 offset -= imm;
9ee6e8bb 7918 else
b0109805 7919 offset += imm;
5e3f878a
PB
7920 tmp = new_tmp();
7921 tcg_gen_movi_i32(tmp, offset);
2c0262af 7922 } else {
5e3f878a 7923 tmp = load_reg(s, rn);
9ee6e8bb 7924 if (insn & (1 << 23))
5e3f878a 7925 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 7926 else
5e3f878a 7927 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 7928 }
9ee6e8bb 7929 }
5e3f878a 7930 store_reg(s, rd, tmp);
191abaa2 7931 }
9ee6e8bb
PB
7932 } else {
7933 int shifter_out = 0;
7934 /* modified 12-bit immediate. */
7935 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
7936 imm = (insn & 0xff);
7937 switch (shift) {
7938 case 0: /* XY */
7939 /* Nothing to do. */
7940 break;
7941 case 1: /* 00XY00XY */
7942 imm |= imm << 16;
7943 break;
7944 case 2: /* XY00XY00 */
7945 imm |= imm << 16;
7946 imm <<= 8;
7947 break;
7948 case 3: /* XYXYXYXY */
7949 imm |= imm << 16;
7950 imm |= imm << 8;
7951 break;
7952 default: /* Rotated constant. */
7953 shift = (shift << 1) | (imm >> 7);
7954 imm |= 0x80;
7955 imm = imm << (32 - shift);
7956 shifter_out = 1;
7957 break;
b5ff1b31 7958 }
3174f8e9
FN
7959 tmp2 = new_tmp();
7960 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 7961 rn = (insn >> 16) & 0xf;
3174f8e9
FN
7962 if (rn == 15) {
7963 tmp = new_tmp();
7964 tcg_gen_movi_i32(tmp, 0);
7965 } else {
7966 tmp = load_reg(s, rn);
7967 }
9ee6e8bb
PB
7968 op = (insn >> 21) & 0xf;
7969 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 7970 shifter_out, tmp, tmp2))
9ee6e8bb 7971 goto illegal_op;
3174f8e9 7972 dead_tmp(tmp2);
9ee6e8bb
PB
7973 rd = (insn >> 8) & 0xf;
7974 if (rd != 15) {
3174f8e9
FN
7975 store_reg(s, rd, tmp);
7976 } else {
7977 dead_tmp(tmp);
2c0262af 7978 }
2c0262af 7979 }
9ee6e8bb
PB
7980 }
7981 break;
7982 case 12: /* Load/store single data item. */
7983 {
7984 int postinc = 0;
7985 int writeback = 0;
b0109805 7986 int user;
9ee6e8bb
PB
7987 if ((insn & 0x01100000) == 0x01000000) {
7988 if (disas_neon_ls_insn(env, s, insn))
c1713132 7989 goto illegal_op;
9ee6e8bb
PB
7990 break;
7991 }
b0109805 7992 user = IS_USER(s);
9ee6e8bb 7993 if (rn == 15) {
b0109805 7994 addr = new_tmp();
9ee6e8bb
PB
7995 /* PC relative. */
7996 /* s->pc has already been incremented by 4. */
7997 imm = s->pc & 0xfffffffc;
7998 if (insn & (1 << 23))
7999 imm += insn & 0xfff;
8000 else
8001 imm -= insn & 0xfff;
b0109805 8002 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 8003 } else {
b0109805 8004 addr = load_reg(s, rn);
9ee6e8bb
PB
8005 if (insn & (1 << 23)) {
8006 /* Positive offset. */
8007 imm = insn & 0xfff;
b0109805 8008 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8009 } else {
8010 op = (insn >> 8) & 7;
8011 imm = insn & 0xff;
8012 switch (op) {
8013 case 0: case 8: /* Shifted Register. */
8014 shift = (insn >> 4) & 0xf;
8015 if (shift > 3)
18c9b560 8016 goto illegal_op;
b26eefb6 8017 tmp = load_reg(s, rm);
9ee6e8bb 8018 if (shift)
b26eefb6 8019 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 8020 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 8021 dead_tmp(tmp);
9ee6e8bb
PB
8022 break;
8023 case 4: /* Negative offset. */
b0109805 8024 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb
PB
8025 break;
8026 case 6: /* User privilege. */
b0109805
PB
8027 tcg_gen_addi_i32(addr, addr, imm);
8028 user = 1;
9ee6e8bb
PB
8029 break;
8030 case 1: /* Post-decrement. */
8031 imm = -imm;
8032 /* Fall through. */
8033 case 3: /* Post-increment. */
9ee6e8bb
PB
8034 postinc = 1;
8035 writeback = 1;
8036 break;
8037 case 5: /* Pre-decrement. */
8038 imm = -imm;
8039 /* Fall through. */
8040 case 7: /* Pre-increment. */
b0109805 8041 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8042 writeback = 1;
8043 break;
8044 default:
b7bcbe95 8045 goto illegal_op;
9ee6e8bb
PB
8046 }
8047 }
8048 }
8049 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8050 if (insn & (1 << 20)) {
8051 /* Load. */
8052 if (rs == 15 && op != 2) {
8053 if (op & 2)
b5ff1b31 8054 goto illegal_op;
9ee6e8bb
PB
8055 /* Memory hint. Implemented as NOP. */
8056 } else {
8057 switch (op) {
b0109805
PB
8058 case 0: tmp = gen_ld8u(addr, user); break;
8059 case 4: tmp = gen_ld8s(addr, user); break;
8060 case 1: tmp = gen_ld16u(addr, user); break;
8061 case 5: tmp = gen_ld16s(addr, user); break;
8062 case 2: tmp = gen_ld32(addr, user); break;
9ee6e8bb
PB
8063 default: goto illegal_op;
8064 }
8065 if (rs == 15) {
b0109805 8066 gen_bx(s, tmp);
9ee6e8bb 8067 } else {
b0109805 8068 store_reg(s, rs, tmp);
9ee6e8bb
PB
8069 }
8070 }
8071 } else {
8072 /* Store. */
8073 if (rs == 15)
b7bcbe95 8074 goto illegal_op;
b0109805 8075 tmp = load_reg(s, rs);
9ee6e8bb 8076 switch (op) {
b0109805
PB
8077 case 0: gen_st8(tmp, addr, user); break;
8078 case 1: gen_st16(tmp, addr, user); break;
8079 case 2: gen_st32(tmp, addr, user); break;
9ee6e8bb 8080 default: goto illegal_op;
b7bcbe95 8081 }
2c0262af 8082 }
9ee6e8bb 8083 if (postinc)
b0109805
PB
8084 tcg_gen_addi_i32(addr, addr, imm);
8085 if (writeback) {
8086 store_reg(s, rn, addr);
8087 } else {
8088 dead_tmp(addr);
8089 }
9ee6e8bb
PB
8090 }
8091 break;
8092 default:
8093 goto illegal_op;
2c0262af 8094 }
9ee6e8bb
PB
8095 return 0;
8096illegal_op:
8097 return 1;
2c0262af
FB
8098}
8099
9ee6e8bb 8100static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
8101{
8102 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8103 int32_t offset;
8104 int i;
b26eefb6 8105 TCGv tmp;
d9ba4830 8106 TCGv tmp2;
b0109805 8107 TCGv addr;
99c475ab 8108
9ee6e8bb
PB
8109 if (s->condexec_mask) {
8110 cond = s->condexec_cond;
8111 s->condlabel = gen_new_label();
d9ba4830 8112 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
8113 s->condjmp = 1;
8114 }
8115
b5ff1b31 8116 insn = lduw_code(s->pc);
99c475ab 8117 s->pc += 2;
b5ff1b31 8118
99c475ab
FB
8119 switch (insn >> 12) {
8120 case 0: case 1:
396e467c 8121
99c475ab
FB
8122 rd = insn & 7;
8123 op = (insn >> 11) & 3;
8124 if (op == 3) {
8125 /* add/subtract */
8126 rn = (insn >> 3) & 7;
396e467c 8127 tmp = load_reg(s, rn);
99c475ab
FB
8128 if (insn & (1 << 10)) {
8129 /* immediate */
396e467c
FN
8130 tmp2 = new_tmp();
8131 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
8132 } else {
8133 /* reg */
8134 rm = (insn >> 6) & 7;
396e467c 8135 tmp2 = load_reg(s, rm);
99c475ab 8136 }
9ee6e8bb
PB
8137 if (insn & (1 << 9)) {
8138 if (s->condexec_mask)
396e467c 8139 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8140 else
396e467c 8141 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb
PB
8142 } else {
8143 if (s->condexec_mask)
396e467c 8144 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 8145 else
396e467c 8146 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 8147 }
396e467c
FN
8148 dead_tmp(tmp2);
8149 store_reg(s, rd, tmp);
99c475ab
FB
8150 } else {
8151 /* shift immediate */
8152 rm = (insn >> 3) & 7;
8153 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
8154 tmp = load_reg(s, rm);
8155 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8156 if (!s->condexec_mask)
8157 gen_logic_CC(tmp);
8158 store_reg(s, rd, tmp);
99c475ab
FB
8159 }
8160 break;
8161 case 2: case 3:
8162 /* arithmetic large immediate */
8163 op = (insn >> 11) & 3;
8164 rd = (insn >> 8) & 0x7;
396e467c
FN
8165 if (op == 0) { /* mov */
8166 tmp = new_tmp();
8167 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 8168 if (!s->condexec_mask)
396e467c
FN
8169 gen_logic_CC(tmp);
8170 store_reg(s, rd, tmp);
8171 } else {
8172 tmp = load_reg(s, rd);
8173 tmp2 = new_tmp();
8174 tcg_gen_movi_i32(tmp2, insn & 0xff);
8175 switch (op) {
8176 case 1: /* cmp */
8177 gen_helper_sub_cc(tmp, tmp, tmp2);
8178 dead_tmp(tmp);
8179 dead_tmp(tmp2);
8180 break;
8181 case 2: /* add */
8182 if (s->condexec_mask)
8183 tcg_gen_add_i32(tmp, tmp, tmp2);
8184 else
8185 gen_helper_add_cc(tmp, tmp, tmp2);
8186 dead_tmp(tmp2);
8187 store_reg(s, rd, tmp);
8188 break;
8189 case 3: /* sub */
8190 if (s->condexec_mask)
8191 tcg_gen_sub_i32(tmp, tmp, tmp2);
8192 else
8193 gen_helper_sub_cc(tmp, tmp, tmp2);
8194 dead_tmp(tmp2);
8195 store_reg(s, rd, tmp);
8196 break;
8197 }
99c475ab 8198 }
99c475ab
FB
8199 break;
8200 case 4:
8201 if (insn & (1 << 11)) {
8202 rd = (insn >> 8) & 7;
5899f386
FB
8203 /* load pc-relative. Bit 1 of PC is ignored. */
8204 val = s->pc + 2 + ((insn & 0xff) * 4);
8205 val &= ~(uint32_t)2;
b0109805
PB
8206 addr = new_tmp();
8207 tcg_gen_movi_i32(addr, val);
8208 tmp = gen_ld32(addr, IS_USER(s));
8209 dead_tmp(addr);
8210 store_reg(s, rd, tmp);
99c475ab
FB
8211 break;
8212 }
8213 if (insn & (1 << 10)) {
8214 /* data processing extended or blx */
8215 rd = (insn & 7) | ((insn >> 4) & 8);
8216 rm = (insn >> 3) & 0xf;
8217 op = (insn >> 8) & 3;
8218 switch (op) {
8219 case 0: /* add */
396e467c
FN
8220 tmp = load_reg(s, rd);
8221 tmp2 = load_reg(s, rm);
8222 tcg_gen_add_i32(tmp, tmp, tmp2);
8223 dead_tmp(tmp2);
8224 store_reg(s, rd, tmp);
99c475ab
FB
8225 break;
8226 case 1: /* cmp */
396e467c
FN
8227 tmp = load_reg(s, rd);
8228 tmp2 = load_reg(s, rm);
8229 gen_helper_sub_cc(tmp, tmp, tmp2);
8230 dead_tmp(tmp2);
8231 dead_tmp(tmp);
99c475ab
FB
8232 break;
8233 case 2: /* mov/cpy */
396e467c
FN
8234 tmp = load_reg(s, rm);
8235 store_reg(s, rd, tmp);
99c475ab
FB
8236 break;
8237 case 3:/* branch [and link] exchange thumb register */
b0109805 8238 tmp = load_reg(s, rm);
99c475ab
FB
8239 if (insn & (1 << 7)) {
8240 val = (uint32_t)s->pc | 1;
b0109805
PB
8241 tmp2 = new_tmp();
8242 tcg_gen_movi_i32(tmp2, val);
8243 store_reg(s, 14, tmp2);
99c475ab 8244 }
d9ba4830 8245 gen_bx(s, tmp);
99c475ab
FB
8246 break;
8247 }
8248 break;
8249 }
8250
8251 /* data processing register */
8252 rd = insn & 7;
8253 rm = (insn >> 3) & 7;
8254 op = (insn >> 6) & 0xf;
8255 if (op == 2 || op == 3 || op == 4 || op == 7) {
8256 /* the shift/rotate ops want the operands backwards */
8257 val = rm;
8258 rm = rd;
8259 rd = val;
8260 val = 1;
8261 } else {
8262 val = 0;
8263 }
8264
396e467c
FN
8265 if (op == 9) { /* neg */
8266 tmp = new_tmp();
8267 tcg_gen_movi_i32(tmp, 0);
8268 } else if (op != 0xf) { /* mvn doesn't read its first operand */
8269 tmp = load_reg(s, rd);
8270 } else {
8271 TCGV_UNUSED(tmp);
8272 }
99c475ab 8273
396e467c 8274 tmp2 = load_reg(s, rm);
5899f386 8275 switch (op) {
99c475ab 8276 case 0x0: /* and */
396e467c 8277 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 8278 if (!s->condexec_mask)
396e467c 8279 gen_logic_CC(tmp);
99c475ab
FB
8280 break;
8281 case 0x1: /* eor */
396e467c 8282 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 8283 if (!s->condexec_mask)
396e467c 8284 gen_logic_CC(tmp);
99c475ab
FB
8285 break;
8286 case 0x2: /* lsl */
9ee6e8bb 8287 if (s->condexec_mask) {
396e467c 8288 gen_helper_shl(tmp2, tmp2, tmp);
9ee6e8bb 8289 } else {
396e467c
FN
8290 gen_helper_shl_cc(tmp2, tmp2, tmp);
8291 gen_logic_CC(tmp2);
9ee6e8bb 8292 }
99c475ab
FB
8293 break;
8294 case 0x3: /* lsr */
9ee6e8bb 8295 if (s->condexec_mask) {
396e467c 8296 gen_helper_shr(tmp2, tmp2, tmp);
9ee6e8bb 8297 } else {
396e467c
FN
8298 gen_helper_shr_cc(tmp2, tmp2, tmp);
8299 gen_logic_CC(tmp2);
9ee6e8bb 8300 }
99c475ab
FB
8301 break;
8302 case 0x4: /* asr */
9ee6e8bb 8303 if (s->condexec_mask) {
396e467c 8304 gen_helper_sar(tmp2, tmp2, tmp);
9ee6e8bb 8305 } else {
396e467c
FN
8306 gen_helper_sar_cc(tmp2, tmp2, tmp);
8307 gen_logic_CC(tmp2);
9ee6e8bb 8308 }
99c475ab
FB
8309 break;
8310 case 0x5: /* adc */
9ee6e8bb 8311 if (s->condexec_mask)
396e467c 8312 gen_adc(tmp, tmp2);
9ee6e8bb 8313 else
396e467c 8314 gen_helper_adc_cc(tmp, tmp, tmp2);
99c475ab
FB
8315 break;
8316 case 0x6: /* sbc */
9ee6e8bb 8317 if (s->condexec_mask)
396e467c 8318 gen_sub_carry(tmp, tmp, tmp2);
9ee6e8bb 8319 else
396e467c 8320 gen_helper_sbc_cc(tmp, tmp, tmp2);
99c475ab
FB
8321 break;
8322 case 0x7: /* ror */
9ee6e8bb 8323 if (s->condexec_mask) {
f669df27
AJ
8324 tcg_gen_andi_i32(tmp, tmp, 0x1f);
8325 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 8326 } else {
396e467c
FN
8327 gen_helper_ror_cc(tmp2, tmp2, tmp);
8328 gen_logic_CC(tmp2);
9ee6e8bb 8329 }
99c475ab
FB
8330 break;
8331 case 0x8: /* tst */
396e467c
FN
8332 tcg_gen_and_i32(tmp, tmp, tmp2);
8333 gen_logic_CC(tmp);
99c475ab 8334 rd = 16;
5899f386 8335 break;
99c475ab 8336 case 0x9: /* neg */
9ee6e8bb 8337 if (s->condexec_mask)
396e467c 8338 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 8339 else
396e467c 8340 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
8341 break;
8342 case 0xa: /* cmp */
396e467c 8343 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
8344 rd = 16;
8345 break;
8346 case 0xb: /* cmn */
396e467c 8347 gen_helper_add_cc(tmp, tmp, tmp2);
99c475ab
FB
8348 rd = 16;
8349 break;
8350 case 0xc: /* orr */
396e467c 8351 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 8352 if (!s->condexec_mask)
396e467c 8353 gen_logic_CC(tmp);
99c475ab
FB
8354 break;
8355 case 0xd: /* mul */
7b2919a0 8356 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 8357 if (!s->condexec_mask)
396e467c 8358 gen_logic_CC(tmp);
99c475ab
FB
8359 break;
8360 case 0xe: /* bic */
f669df27 8361 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 8362 if (!s->condexec_mask)
396e467c 8363 gen_logic_CC(tmp);
99c475ab
FB
8364 break;
8365 case 0xf: /* mvn */
396e467c 8366 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 8367 if (!s->condexec_mask)
396e467c 8368 gen_logic_CC(tmp2);
99c475ab 8369 val = 1;
5899f386 8370 rm = rd;
99c475ab
FB
8371 break;
8372 }
8373 if (rd != 16) {
396e467c
FN
8374 if (val) {
8375 store_reg(s, rm, tmp2);
8376 if (op != 0xf)
8377 dead_tmp(tmp);
8378 } else {
8379 store_reg(s, rd, tmp);
8380 dead_tmp(tmp2);
8381 }
8382 } else {
8383 dead_tmp(tmp);
8384 dead_tmp(tmp2);
99c475ab
FB
8385 }
8386 break;
8387
8388 case 5:
8389 /* load/store register offset. */
8390 rd = insn & 7;
8391 rn = (insn >> 3) & 7;
8392 rm = (insn >> 6) & 7;
8393 op = (insn >> 9) & 7;
b0109805 8394 addr = load_reg(s, rn);
b26eefb6 8395 tmp = load_reg(s, rm);
b0109805 8396 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 8397 dead_tmp(tmp);
99c475ab
FB
8398
8399 if (op < 3) /* store */
b0109805 8400 tmp = load_reg(s, rd);
99c475ab
FB
8401
8402 switch (op) {
8403 case 0: /* str */
b0109805 8404 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
8405 break;
8406 case 1: /* strh */
b0109805 8407 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
8408 break;
8409 case 2: /* strb */
b0109805 8410 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
8411 break;
8412 case 3: /* ldrsb */
b0109805 8413 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
8414 break;
8415 case 4: /* ldr */
b0109805 8416 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8417 break;
8418 case 5: /* ldrh */
b0109805 8419 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
8420 break;
8421 case 6: /* ldrb */
b0109805 8422 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
8423 break;
8424 case 7: /* ldrsh */
b0109805 8425 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
8426 break;
8427 }
8428 if (op >= 3) /* load */
b0109805
PB
8429 store_reg(s, rd, tmp);
8430 dead_tmp(addr);
99c475ab
FB
8431 break;
8432
8433 case 6:
8434 /* load/store word immediate offset */
8435 rd = insn & 7;
8436 rn = (insn >> 3) & 7;
b0109805 8437 addr = load_reg(s, rn);
99c475ab 8438 val = (insn >> 4) & 0x7c;
b0109805 8439 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8440
8441 if (insn & (1 << 11)) {
8442 /* load */
b0109805
PB
8443 tmp = gen_ld32(addr, IS_USER(s));
8444 store_reg(s, rd, tmp);
99c475ab
FB
8445 } else {
8446 /* store */
b0109805
PB
8447 tmp = load_reg(s, rd);
8448 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8449 }
b0109805 8450 dead_tmp(addr);
99c475ab
FB
8451 break;
8452
8453 case 7:
8454 /* load/store byte immediate offset */
8455 rd = insn & 7;
8456 rn = (insn >> 3) & 7;
b0109805 8457 addr = load_reg(s, rn);
99c475ab 8458 val = (insn >> 6) & 0x1f;
b0109805 8459 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8460
8461 if (insn & (1 << 11)) {
8462 /* load */
b0109805
PB
8463 tmp = gen_ld8u(addr, IS_USER(s));
8464 store_reg(s, rd, tmp);
99c475ab
FB
8465 } else {
8466 /* store */
b0109805
PB
8467 tmp = load_reg(s, rd);
8468 gen_st8(tmp, addr, IS_USER(s));
99c475ab 8469 }
b0109805 8470 dead_tmp(addr);
99c475ab
FB
8471 break;
8472
8473 case 8:
8474 /* load/store halfword immediate offset */
8475 rd = insn & 7;
8476 rn = (insn >> 3) & 7;
b0109805 8477 addr = load_reg(s, rn);
99c475ab 8478 val = (insn >> 5) & 0x3e;
b0109805 8479 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8480
8481 if (insn & (1 << 11)) {
8482 /* load */
b0109805
PB
8483 tmp = gen_ld16u(addr, IS_USER(s));
8484 store_reg(s, rd, tmp);
99c475ab
FB
8485 } else {
8486 /* store */
b0109805
PB
8487 tmp = load_reg(s, rd);
8488 gen_st16(tmp, addr, IS_USER(s));
99c475ab 8489 }
b0109805 8490 dead_tmp(addr);
99c475ab
FB
8491 break;
8492
8493 case 9:
8494 /* load/store from stack */
8495 rd = (insn >> 8) & 7;
b0109805 8496 addr = load_reg(s, 13);
99c475ab 8497 val = (insn & 0xff) * 4;
b0109805 8498 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8499
8500 if (insn & (1 << 11)) {
8501 /* load */
b0109805
PB
8502 tmp = gen_ld32(addr, IS_USER(s));
8503 store_reg(s, rd, tmp);
99c475ab
FB
8504 } else {
8505 /* store */
b0109805
PB
8506 tmp = load_reg(s, rd);
8507 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8508 }
b0109805 8509 dead_tmp(addr);
99c475ab
FB
8510 break;
8511
8512 case 10:
8513 /* add to high reg */
8514 rd = (insn >> 8) & 7;
5899f386
FB
8515 if (insn & (1 << 11)) {
8516 /* SP */
5e3f878a 8517 tmp = load_reg(s, 13);
5899f386
FB
8518 } else {
8519 /* PC. bit 1 is ignored. */
5e3f878a
PB
8520 tmp = new_tmp();
8521 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 8522 }
99c475ab 8523 val = (insn & 0xff) * 4;
5e3f878a
PB
8524 tcg_gen_addi_i32(tmp, tmp, val);
8525 store_reg(s, rd, tmp);
99c475ab
FB
8526 break;
8527
8528 case 11:
8529 /* misc */
8530 op = (insn >> 8) & 0xf;
8531 switch (op) {
8532 case 0:
8533 /* adjust stack pointer */
b26eefb6 8534 tmp = load_reg(s, 13);
99c475ab
FB
8535 val = (insn & 0x7f) * 4;
8536 if (insn & (1 << 7))
6a0d8a1d 8537 val = -(int32_t)val;
b26eefb6
PB
8538 tcg_gen_addi_i32(tmp, tmp, val);
8539 store_reg(s, 13, tmp);
99c475ab
FB
8540 break;
8541
9ee6e8bb
PB
8542 case 2: /* sign/zero extend. */
8543 ARCH(6);
8544 rd = insn & 7;
8545 rm = (insn >> 3) & 7;
b0109805 8546 tmp = load_reg(s, rm);
9ee6e8bb 8547 switch ((insn >> 6) & 3) {
b0109805
PB
8548 case 0: gen_sxth(tmp); break;
8549 case 1: gen_sxtb(tmp); break;
8550 case 2: gen_uxth(tmp); break;
8551 case 3: gen_uxtb(tmp); break;
9ee6e8bb 8552 }
b0109805 8553 store_reg(s, rd, tmp);
9ee6e8bb 8554 break;
99c475ab
FB
8555 case 4: case 5: case 0xc: case 0xd:
8556 /* push/pop */
b0109805 8557 addr = load_reg(s, 13);
5899f386
FB
8558 if (insn & (1 << 8))
8559 offset = 4;
99c475ab 8560 else
5899f386
FB
8561 offset = 0;
8562 for (i = 0; i < 8; i++) {
8563 if (insn & (1 << i))
8564 offset += 4;
8565 }
8566 if ((insn & (1 << 11)) == 0) {
b0109805 8567 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8568 }
99c475ab
FB
8569 for (i = 0; i < 8; i++) {
8570 if (insn & (1 << i)) {
8571 if (insn & (1 << 11)) {
8572 /* pop */
b0109805
PB
8573 tmp = gen_ld32(addr, IS_USER(s));
8574 store_reg(s, i, tmp);
99c475ab
FB
8575 } else {
8576 /* push */
b0109805
PB
8577 tmp = load_reg(s, i);
8578 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8579 }
5899f386 8580 /* advance to the next address. */
b0109805 8581 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8582 }
8583 }
a50f5b91 8584 TCGV_UNUSED(tmp);
99c475ab
FB
8585 if (insn & (1 << 8)) {
8586 if (insn & (1 << 11)) {
8587 /* pop pc */
b0109805 8588 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8589 /* don't set the pc until the rest of the instruction
8590 has completed */
8591 } else {
8592 /* push lr */
b0109805
PB
8593 tmp = load_reg(s, 14);
8594 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8595 }
b0109805 8596 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 8597 }
5899f386 8598 if ((insn & (1 << 11)) == 0) {
b0109805 8599 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8600 }
99c475ab 8601 /* write back the new stack pointer */
b0109805 8602 store_reg(s, 13, addr);
99c475ab
FB
8603 /* set the new PC value */
8604 if ((insn & 0x0900) == 0x0900)
b0109805 8605 gen_bx(s, tmp);
99c475ab
FB
8606 break;
8607
9ee6e8bb
PB
8608 case 1: case 3: case 9: case 11: /* czb */
8609 rm = insn & 7;
d9ba4830 8610 tmp = load_reg(s, rm);
9ee6e8bb
PB
8611 s->condlabel = gen_new_label();
8612 s->condjmp = 1;
8613 if (insn & (1 << 11))
cb63669a 8614 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 8615 else
cb63669a 8616 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
d9ba4830 8617 dead_tmp(tmp);
9ee6e8bb
PB
8618 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8619 val = (uint32_t)s->pc + 2;
8620 val += offset;
8621 gen_jmp(s, val);
8622 break;
8623
8624 case 15: /* IT, nop-hint. */
8625 if ((insn & 0xf) == 0) {
8626 gen_nop_hint(s, (insn >> 4) & 0xf);
8627 break;
8628 }
8629 /* If Then. */
8630 s->condexec_cond = (insn >> 4) & 0xe;
8631 s->condexec_mask = insn & 0x1f;
8632 /* No actual code generated for this insn, just setup state. */
8633 break;
8634
06c949e6 8635 case 0xe: /* bkpt */
9ee6e8bb 8636 gen_set_condexec(s);
5e3f878a 8637 gen_set_pc_im(s->pc - 2);
d9ba4830 8638 gen_exception(EXCP_BKPT);
06c949e6
PB
8639 s->is_jmp = DISAS_JUMP;
8640 break;
8641
9ee6e8bb
PB
8642 case 0xa: /* rev */
8643 ARCH(6);
8644 rn = (insn >> 3) & 0x7;
8645 rd = insn & 0x7;
b0109805 8646 tmp = load_reg(s, rn);
9ee6e8bb 8647 switch ((insn >> 6) & 3) {
66896cb8 8648 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
8649 case 1: gen_rev16(tmp); break;
8650 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
8651 default: goto illegal_op;
8652 }
b0109805 8653 store_reg(s, rd, tmp);
9ee6e8bb
PB
8654 break;
8655
8656 case 6: /* cps */
8657 ARCH(6);
8658 if (IS_USER(s))
8659 break;
8660 if (IS_M(env)) {
8984bd2e 8661 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9ee6e8bb 8662 /* PRIMASK */
8984bd2e
PB
8663 if (insn & 1) {
8664 addr = tcg_const_i32(16);
8665 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8666 tcg_temp_free_i32(addr);
8984bd2e 8667 }
9ee6e8bb 8668 /* FAULTMASK */
8984bd2e
PB
8669 if (insn & 2) {
8670 addr = tcg_const_i32(17);
8671 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8672 tcg_temp_free_i32(addr);
8984bd2e 8673 }
b75263d6 8674 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8675 gen_lookup_tb(s);
8676 } else {
8677 if (insn & (1 << 4))
8678 shift = CPSR_A | CPSR_I | CPSR_F;
8679 else
8680 shift = 0;
2fbac54b 8681 gen_set_psr_im(s, shift, 0, ((insn & 7) << 6) & shift);
9ee6e8bb
PB
8682 }
8683 break;
8684
99c475ab
FB
8685 default:
8686 goto undef;
8687 }
8688 break;
8689
8690 case 12:
8691 /* load/store multiple */
8692 rn = (insn >> 8) & 0x7;
b0109805 8693 addr = load_reg(s, rn);
99c475ab
FB
8694 for (i = 0; i < 8; i++) {
8695 if (insn & (1 << i)) {
99c475ab
FB
8696 if (insn & (1 << 11)) {
8697 /* load */
b0109805
PB
8698 tmp = gen_ld32(addr, IS_USER(s));
8699 store_reg(s, i, tmp);
99c475ab
FB
8700 } else {
8701 /* store */
b0109805
PB
8702 tmp = load_reg(s, i);
8703 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8704 }
5899f386 8705 /* advance to the next address */
b0109805 8706 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8707 }
8708 }
5899f386 8709 /* Base register writeback. */
b0109805
PB
8710 if ((insn & (1 << rn)) == 0) {
8711 store_reg(s, rn, addr);
8712 } else {
8713 dead_tmp(addr);
8714 }
99c475ab
FB
8715 break;
8716
8717 case 13:
8718 /* conditional branch or swi */
8719 cond = (insn >> 8) & 0xf;
8720 if (cond == 0xe)
8721 goto undef;
8722
8723 if (cond == 0xf) {
8724 /* swi */
9ee6e8bb 8725 gen_set_condexec(s);
422ebf69 8726 gen_set_pc_im(s->pc);
9ee6e8bb 8727 s->is_jmp = DISAS_SWI;
99c475ab
FB
8728 break;
8729 }
8730 /* generate a conditional jump to next instruction */
e50e6a20 8731 s->condlabel = gen_new_label();
d9ba4830 8732 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 8733 s->condjmp = 1;
99c475ab
FB
8734
8735 /* jump to the offset */
5899f386 8736 val = (uint32_t)s->pc + 2;
99c475ab 8737 offset = ((int32_t)insn << 24) >> 24;
5899f386 8738 val += offset << 1;
8aaca4c0 8739 gen_jmp(s, val);
99c475ab
FB
8740 break;
8741
8742 case 14:
358bf29e 8743 if (insn & (1 << 11)) {
9ee6e8bb
PB
8744 if (disas_thumb2_insn(env, s, insn))
8745 goto undef32;
358bf29e
PB
8746 break;
8747 }
9ee6e8bb 8748 /* unconditional branch */
99c475ab
FB
8749 val = (uint32_t)s->pc;
8750 offset = ((int32_t)insn << 21) >> 21;
8751 val += (offset << 1) + 2;
8aaca4c0 8752 gen_jmp(s, val);
99c475ab
FB
8753 break;
8754
8755 case 15:
9ee6e8bb 8756 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 8757 goto undef32;
9ee6e8bb 8758 break;
99c475ab
FB
8759 }
8760 return;
9ee6e8bb
PB
8761undef32:
8762 gen_set_condexec(s);
5e3f878a 8763 gen_set_pc_im(s->pc - 4);
d9ba4830 8764 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
8765 s->is_jmp = DISAS_JUMP;
8766 return;
8767illegal_op:
99c475ab 8768undef:
9ee6e8bb 8769 gen_set_condexec(s);
5e3f878a 8770 gen_set_pc_im(s->pc - 2);
d9ba4830 8771 gen_exception(EXCP_UDEF);
99c475ab
FB
8772 s->is_jmp = DISAS_JUMP;
8773}
8774
2c0262af
FB
8775/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8776 basic block 'tb'. If search_pc is TRUE, also generate PC
8777 information for each intermediate instruction. */
2cfc5f17
TS
8778static inline void gen_intermediate_code_internal(CPUState *env,
8779 TranslationBlock *tb,
8780 int search_pc)
2c0262af
FB
8781{
8782 DisasContext dc1, *dc = &dc1;
a1d1bb31 8783 CPUBreakpoint *bp;
2c0262af
FB
8784 uint16_t *gen_opc_end;
8785 int j, lj;
0fa85d43 8786 target_ulong pc_start;
b5ff1b31 8787 uint32_t next_page_start;
2e70f6ef
PB
8788 int num_insns;
8789 int max_insns;
3b46e624 8790
2c0262af 8791 /* generate intermediate code */
b26eefb6 8792 num_temps = 0;
b26eefb6 8793
0fa85d43 8794 pc_start = tb->pc;
3b46e624 8795
2c0262af
FB
8796 dc->tb = tb;
8797
2c0262af 8798 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
8799
8800 dc->is_jmp = DISAS_NEXT;
8801 dc->pc = pc_start;
8aaca4c0 8802 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 8803 dc->condjmp = 0;
5899f386 8804 dc->thumb = env->thumb;
9ee6e8bb
PB
8805 dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
8806 dc->condexec_cond = env->condexec_bits >> 4;
b5ff1b31 8807#if !defined(CONFIG_USER_ONLY)
9ee6e8bb
PB
8808 if (IS_M(env)) {
8809 dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
8810 } else {
8811 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
8812 }
b5ff1b31 8813#endif
a7812ae4
PB
8814 cpu_F0s = tcg_temp_new_i32();
8815 cpu_F1s = tcg_temp_new_i32();
8816 cpu_F0d = tcg_temp_new_i64();
8817 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
8818 cpu_V0 = cpu_F0d;
8819 cpu_V1 = cpu_F1d;
e677137d 8820 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 8821 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 8822 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 8823 lj = -1;
2e70f6ef
PB
8824 num_insns = 0;
8825 max_insns = tb->cflags & CF_COUNT_MASK;
8826 if (max_insns == 0)
8827 max_insns = CF_COUNT_MASK;
8828
8829 gen_icount_start();
9ee6e8bb
PB
8830 /* Reset the conditional execution bits immediately. This avoids
8831 complications trying to do it at the end of the block. */
8832 if (env->condexec_bits)
8f01245e
PB
8833 {
8834 TCGv tmp = new_tmp();
8835 tcg_gen_movi_i32(tmp, 0);
d9ba4830 8836 store_cpu_field(tmp, condexec_bits);
8f01245e 8837 }
2c0262af 8838 do {
fbb4a2e3
PB
8839#ifdef CONFIG_USER_ONLY
8840 /* Intercept jump to the magic kernel page. */
8841 if (dc->pc >= 0xffff0000) {
8842 /* We always get here via a jump, so know we are not in a
8843 conditional execution block. */
8844 gen_exception(EXCP_KERNEL_TRAP);
8845 dc->is_jmp = DISAS_UPDATE;
8846 break;
8847 }
8848#else
9ee6e8bb
PB
8849 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
8850 /* We always get here via a jump, so know we are not in a
8851 conditional execution block. */
d9ba4830 8852 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
8853 dc->is_jmp = DISAS_UPDATE;
8854 break;
9ee6e8bb
PB
8855 }
8856#endif
8857
72cf2d4f
BS
8858 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
8859 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 8860 if (bp->pc == dc->pc) {
9ee6e8bb 8861 gen_set_condexec(dc);
5e3f878a 8862 gen_set_pc_im(dc->pc);
d9ba4830 8863 gen_exception(EXCP_DEBUG);
1fddef4b 8864 dc->is_jmp = DISAS_JUMP;
9ee6e8bb
PB
8865 /* Advance PC so that clearing the breakpoint will
8866 invalidate this TB. */
8867 dc->pc += 2;
8868 goto done_generating;
1fddef4b
FB
8869 break;
8870 }
8871 }
8872 }
2c0262af
FB
8873 if (search_pc) {
8874 j = gen_opc_ptr - gen_opc_buf;
8875 if (lj < j) {
8876 lj++;
8877 while (lj < j)
8878 gen_opc_instr_start[lj++] = 0;
8879 }
0fa85d43 8880 gen_opc_pc[lj] = dc->pc;
2c0262af 8881 gen_opc_instr_start[lj] = 1;
2e70f6ef 8882 gen_opc_icount[lj] = num_insns;
2c0262af 8883 }
e50e6a20 8884
2e70f6ef
PB
8885 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
8886 gen_io_start();
8887
9ee6e8bb
PB
8888 if (env->thumb) {
8889 disas_thumb_insn(env, dc);
8890 if (dc->condexec_mask) {
8891 dc->condexec_cond = (dc->condexec_cond & 0xe)
8892 | ((dc->condexec_mask >> 4) & 1);
8893 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
8894 if (dc->condexec_mask == 0) {
8895 dc->condexec_cond = 0;
8896 }
8897 }
8898 } else {
8899 disas_arm_insn(env, dc);
8900 }
b26eefb6
PB
8901 if (num_temps) {
8902 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
8903 num_temps = 0;
8904 }
e50e6a20
FB
8905
8906 if (dc->condjmp && !dc->is_jmp) {
8907 gen_set_label(dc->condlabel);
8908 dc->condjmp = 0;
8909 }
aaf2d97d 8910 /* Translation stops when a conditional branch is encountered.
e50e6a20 8911 * Otherwise the subsequent code could get translated several times.
b5ff1b31 8912 * Also stop translation when a page boundary is reached. This
bf20dc07 8913 * ensures prefetch aborts occur at the right place. */
2e70f6ef 8914 num_insns ++;
1fddef4b
FB
8915 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
8916 !env->singlestep_enabled &&
1b530a6d 8917 !singlestep &&
2e70f6ef
PB
8918 dc->pc < next_page_start &&
8919 num_insns < max_insns);
8920
8921 if (tb->cflags & CF_LAST_IO) {
8922 if (dc->condjmp) {
8923 /* FIXME: This can theoretically happen with self-modifying
8924 code. */
8925 cpu_abort(env, "IO on conditional branch instruction");
8926 }
8927 gen_io_end();
8928 }
9ee6e8bb 8929
b5ff1b31 8930 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
8931 instruction was a conditional branch or trap, and the PC has
8932 already been written. */
551bd27f 8933 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 8934 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 8935 if (dc->condjmp) {
9ee6e8bb
PB
8936 gen_set_condexec(dc);
8937 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 8938 gen_exception(EXCP_SWI);
9ee6e8bb 8939 } else {
d9ba4830 8940 gen_exception(EXCP_DEBUG);
9ee6e8bb 8941 }
e50e6a20
FB
8942 gen_set_label(dc->condlabel);
8943 }
8944 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 8945 gen_set_pc_im(dc->pc);
e50e6a20 8946 dc->condjmp = 0;
8aaca4c0 8947 }
9ee6e8bb
PB
8948 gen_set_condexec(dc);
8949 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 8950 gen_exception(EXCP_SWI);
9ee6e8bb
PB
8951 } else {
8952 /* FIXME: Single stepping a WFI insn will not halt
8953 the CPU. */
d9ba4830 8954 gen_exception(EXCP_DEBUG);
9ee6e8bb 8955 }
8aaca4c0 8956 } else {
9ee6e8bb
PB
8957 /* While branches must always occur at the end of an IT block,
8958 there are a few other things that can cause us to terminate
8959 the TB in the middel of an IT block:
8960 - Exception generating instructions (bkpt, swi, undefined).
8961 - Page boundaries.
8962 - Hardware watchpoints.
8963 Hardware breakpoints have already been handled and skip this code.
8964 */
8965 gen_set_condexec(dc);
8aaca4c0 8966 switch(dc->is_jmp) {
8aaca4c0 8967 case DISAS_NEXT:
6e256c93 8968 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
8969 break;
8970 default:
8971 case DISAS_JUMP:
8972 case DISAS_UPDATE:
8973 /* indicate that the hash table must be used to find the next TB */
57fec1fe 8974 tcg_gen_exit_tb(0);
8aaca4c0
FB
8975 break;
8976 case DISAS_TB_JUMP:
8977 /* nothing more to generate */
8978 break;
9ee6e8bb 8979 case DISAS_WFI:
d9ba4830 8980 gen_helper_wfi();
9ee6e8bb
PB
8981 break;
8982 case DISAS_SWI:
d9ba4830 8983 gen_exception(EXCP_SWI);
9ee6e8bb 8984 break;
8aaca4c0 8985 }
e50e6a20
FB
8986 if (dc->condjmp) {
8987 gen_set_label(dc->condlabel);
9ee6e8bb 8988 gen_set_condexec(dc);
6e256c93 8989 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
8990 dc->condjmp = 0;
8991 }
2c0262af 8992 }
2e70f6ef 8993
9ee6e8bb 8994done_generating:
2e70f6ef 8995 gen_icount_end(tb, num_insns);
2c0262af
FB
8996 *gen_opc_ptr = INDEX_op_end;
8997
8998#ifdef DEBUG_DISAS
8fec2b8c 8999 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
9000 qemu_log("----------------\n");
9001 qemu_log("IN: %s\n", lookup_symbol(pc_start));
9002 log_target_disas(pc_start, dc->pc - pc_start, env->thumb);
9003 qemu_log("\n");
2c0262af
FB
9004 }
9005#endif
b5ff1b31
FB
9006 if (search_pc) {
9007 j = gen_opc_ptr - gen_opc_buf;
9008 lj++;
9009 while (lj <= j)
9010 gen_opc_instr_start[lj++] = 0;
b5ff1b31 9011 } else {
2c0262af 9012 tb->size = dc->pc - pc_start;
2e70f6ef 9013 tb->icount = num_insns;
b5ff1b31 9014 }
2c0262af
FB
9015}
9016
2cfc5f17 9017void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2c0262af 9018{
2cfc5f17 9019 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
9020}
9021
2cfc5f17 9022void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2c0262af 9023{
2cfc5f17 9024 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
9025}
9026
b5ff1b31
FB
9027static const char *cpu_mode_names[16] = {
9028 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9029 "???", "???", "???", "und", "???", "???", "???", "sys"
9030};
9ee6e8bb 9031
5fafdf24 9032void cpu_dump_state(CPUState *env, FILE *f,
7fe48483
FB
9033 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
9034 int flags)
2c0262af
FB
9035{
9036 int i;
06e80fc9 9037#if 0
bc380d17 9038 union {
b7bcbe95
FB
9039 uint32_t i;
9040 float s;
9041 } s0, s1;
9042 CPU_DoubleU d;
a94a6abf
PB
9043 /* ??? This assumes float64 and double have the same layout.
9044 Oh well, it's only debug dumps. */
9045 union {
9046 float64 f64;
9047 double d;
9048 } d0;
06e80fc9 9049#endif
b5ff1b31 9050 uint32_t psr;
2c0262af
FB
9051
9052 for(i=0;i<16;i++) {
7fe48483 9053 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 9054 if ((i % 4) == 3)
7fe48483 9055 cpu_fprintf(f, "\n");
2c0262af 9056 else
7fe48483 9057 cpu_fprintf(f, " ");
2c0262af 9058 }
b5ff1b31 9059 psr = cpsr_read(env);
687fa640
TS
9060 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9061 psr,
b5ff1b31
FB
9062 psr & (1 << 31) ? 'N' : '-',
9063 psr & (1 << 30) ? 'Z' : '-',
9064 psr & (1 << 29) ? 'C' : '-',
9065 psr & (1 << 28) ? 'V' : '-',
5fafdf24 9066 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 9067 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 9068
5e3f878a 9069#if 0
b7bcbe95 9070 for (i = 0; i < 16; i++) {
8e96005d
FB
9071 d.d = env->vfp.regs[i];
9072 s0.i = d.l.lower;
9073 s1.i = d.l.upper;
a94a6abf
PB
9074 d0.f64 = d.d;
9075 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 9076 i * 2, (int)s0.i, s0.s,
a94a6abf 9077 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 9078 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 9079 d0.d);
b7bcbe95 9080 }
40f137e1 9081 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
5e3f878a 9082#endif
2c0262af 9083}
a6b025d3 9084
d2856f1a
AJ
9085void gen_pc_load(CPUState *env, TranslationBlock *tb,
9086 unsigned long searched_pc, int pc_pos, void *puc)
9087{
9088 env->regs[15] = gen_opc_pc[pc_pos];
9089}