]> git.proxmox.com Git - qemu.git/blame - target-arm/translate.c
sparc32: tcx: remove unused include directive
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
28#include "exec-all.h"
29#include "disas.h"
57fec1fe 30#include "tcg-op.h"
79383c9c 31#include "qemu-log.h"
1497c961 32
a7812ae4 33#include "helpers.h"
1497c961 34#define GEN_HELPER 1
b26eefb6 35#include "helpers.h"
2c0262af 36
9ee6e8bb
PB
37#define ENABLE_ARCH_5J 0
38#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 42
86753403 43#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 44
2c0262af
FB
45/* internal defines */
46typedef struct DisasContext {
0fa85d43 47 target_ulong pc;
2c0262af 48 int is_jmp;
e50e6a20
FB
49 /* Nonzero if this instruction has been conditionally skipped. */
50 int condjmp;
51 /* The label that will be jumped to when the instruction is skipped. */
52 int condlabel;
9ee6e8bb
PB
53 /* Thumb-2 condtional execution bits. */
54 int condexec_mask;
55 int condexec_cond;
2c0262af 56 struct TranslationBlock *tb;
8aaca4c0 57 int singlestep_enabled;
5899f386 58 int thumb;
b5ff1b31
FB
59#if !defined(CONFIG_USER_ONLY)
60 int user;
61#endif
2c0262af
FB
62} DisasContext;
63
b5ff1b31
FB
64#if defined(CONFIG_USER_ONLY)
65#define IS_USER(s) 1
66#else
67#define IS_USER(s) (s->user)
68#endif
69
9ee6e8bb
PB
70/* These instructions trap after executing, so defer them until after the
71 conditional executions state has been updated. */
72#define DISAS_WFI 4
73#define DISAS_SWI 5
2c0262af 74
a7812ae4 75static TCGv_ptr cpu_env;
ad69471c 76/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 77static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 78static TCGv_i32 cpu_R[16];
ad69471c 79
b26eefb6 80/* FIXME: These should be removed. */
a7812ae4
PB
81static TCGv cpu_F0s, cpu_F1s;
82static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 83
2e70f6ef
PB
84#include "gen-icount.h"
85
155c3eac
FN
86static const char *regnames[] =
87 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
88 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
89
b26eefb6
PB
90/* initialize TCG globals. */
91void arm_translate_init(void)
92{
155c3eac
FN
93 int i;
94
a7812ae4
PB
95 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
96
155c3eac
FN
97 for (i = 0; i < 16; i++) {
98 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
99 offsetof(CPUState, regs[i]),
100 regnames[i]);
101 }
102
a7812ae4
PB
103#define GEN_HELPER 2
104#include "helpers.h"
b26eefb6
PB
105}
106
b26eefb6 107static int num_temps;
b26eefb6
PB
108
109/* Allocate a temporary variable. */
a7812ae4 110static TCGv_i32 new_tmp(void)
b26eefb6 111{
12edd4f2
FN
112 num_temps++;
113 return tcg_temp_new_i32();
b26eefb6
PB
114}
115
116/* Release a temporary variable. */
117static void dead_tmp(TCGv tmp)
118{
12edd4f2 119 tcg_temp_free(tmp);
b26eefb6 120 num_temps--;
b26eefb6
PB
121}
122
d9ba4830
PB
123static inline TCGv load_cpu_offset(int offset)
124{
125 TCGv tmp = new_tmp();
126 tcg_gen_ld_i32(tmp, cpu_env, offset);
127 return tmp;
128}
129
130#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
131
132static inline void store_cpu_offset(TCGv var, int offset)
133{
134 tcg_gen_st_i32(var, cpu_env, offset);
135 dead_tmp(var);
136}
137
138#define store_cpu_field(var, name) \
139 store_cpu_offset(var, offsetof(CPUState, name))
140
b26eefb6
PB
141/* Set a variable to the value of a CPU register. */
142static void load_reg_var(DisasContext *s, TCGv var, int reg)
143{
144 if (reg == 15) {
145 uint32_t addr;
146 /* normaly, since we updated PC, we need only to add one insn */
147 if (s->thumb)
148 addr = (long)s->pc + 2;
149 else
150 addr = (long)s->pc + 4;
151 tcg_gen_movi_i32(var, addr);
152 } else {
155c3eac 153 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
154 }
155}
156
157/* Create a new temporary and set it to the value of a CPU register. */
158static inline TCGv load_reg(DisasContext *s, int reg)
159{
160 TCGv tmp = new_tmp();
161 load_reg_var(s, tmp, reg);
162 return tmp;
163}
164
165/* Set a CPU register. The source must be a temporary and will be
166 marked as dead. */
167static void store_reg(DisasContext *s, int reg, TCGv var)
168{
169 if (reg == 15) {
170 tcg_gen_andi_i32(var, var, ~1);
171 s->is_jmp = DISAS_JUMP;
172 }
155c3eac 173 tcg_gen_mov_i32(cpu_R[reg], var);
b26eefb6
PB
174 dead_tmp(var);
175}
176
b26eefb6 177/* Value extensions. */
86831435
PB
178#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
179#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
180#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
181#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
182
1497c961
PB
183#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
184#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 185
b26eefb6 186
b75263d6
JR
187static inline void gen_set_cpsr(TCGv var, uint32_t mask)
188{
189 TCGv tmp_mask = tcg_const_i32(mask);
190 gen_helper_cpsr_write(var, tmp_mask);
191 tcg_temp_free_i32(tmp_mask);
192}
d9ba4830
PB
193/* Set NZCV flags from the high 4 bits of var. */
194#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
195
196static void gen_exception(int excp)
197{
198 TCGv tmp = new_tmp();
199 tcg_gen_movi_i32(tmp, excp);
200 gen_helper_exception(tmp);
201 dead_tmp(tmp);
202}
203
3670669c
PB
204static void gen_smul_dual(TCGv a, TCGv b)
205{
206 TCGv tmp1 = new_tmp();
207 TCGv tmp2 = new_tmp();
22478e79
AZ
208 tcg_gen_ext16s_i32(tmp1, a);
209 tcg_gen_ext16s_i32(tmp2, b);
3670669c
PB
210 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
211 dead_tmp(tmp2);
212 tcg_gen_sari_i32(a, a, 16);
213 tcg_gen_sari_i32(b, b, 16);
214 tcg_gen_mul_i32(b, b, a);
215 tcg_gen_mov_i32(a, tmp1);
216 dead_tmp(tmp1);
217}
218
219/* Byteswap each halfword. */
220static void gen_rev16(TCGv var)
221{
222 TCGv tmp = new_tmp();
223 tcg_gen_shri_i32(tmp, var, 8);
224 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
225 tcg_gen_shli_i32(var, var, 8);
226 tcg_gen_andi_i32(var, var, 0xff00ff00);
227 tcg_gen_or_i32(var, var, tmp);
228 dead_tmp(tmp);
229}
230
231/* Byteswap low halfword and sign extend. */
232static void gen_revsh(TCGv var)
233{
234 TCGv tmp = new_tmp();
235 tcg_gen_shri_i32(tmp, var, 8);
236 tcg_gen_andi_i32(tmp, tmp, 0x00ff);
237 tcg_gen_shli_i32(var, var, 8);
238 tcg_gen_ext8s_i32(var, var);
239 tcg_gen_or_i32(var, var, tmp);
240 dead_tmp(tmp);
241}
242
243/* Unsigned bitfield extract. */
244static void gen_ubfx(TCGv var, int shift, uint32_t mask)
245{
246 if (shift)
247 tcg_gen_shri_i32(var, var, shift);
248 tcg_gen_andi_i32(var, var, mask);
249}
250
251/* Signed bitfield extract. */
252static void gen_sbfx(TCGv var, int shift, int width)
253{
254 uint32_t signbit;
255
256 if (shift)
257 tcg_gen_sari_i32(var, var, shift);
258 if (shift + width < 32) {
259 signbit = 1u << (width - 1);
260 tcg_gen_andi_i32(var, var, (1u << width) - 1);
261 tcg_gen_xori_i32(var, var, signbit);
262 tcg_gen_subi_i32(var, var, signbit);
263 }
264}
265
266/* Bitfield insertion. Insert val into base. Clobbers base and val. */
267static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
268{
3670669c 269 tcg_gen_andi_i32(val, val, mask);
8f8e3aa4
PB
270 tcg_gen_shli_i32(val, val, shift);
271 tcg_gen_andi_i32(base, base, ~(mask << shift));
3670669c
PB
272 tcg_gen_or_i32(dest, base, val);
273}
274
d9ba4830
PB
275/* Round the top 32 bits of a 64-bit value. */
276static void gen_roundqd(TCGv a, TCGv b)
3670669c 277{
d9ba4830
PB
278 tcg_gen_shri_i32(a, a, 31);
279 tcg_gen_add_i32(a, a, b);
3670669c
PB
280}
281
8f01245e
PB
282/* FIXME: Most targets have native widening multiplication.
283 It would be good to use that instead of a full wide multiply. */
5e3f878a 284/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 285static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 286{
a7812ae4
PB
287 TCGv_i64 tmp1 = tcg_temp_new_i64();
288 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
289
290 tcg_gen_extu_i32_i64(tmp1, a);
291 dead_tmp(a);
292 tcg_gen_extu_i32_i64(tmp2, b);
293 dead_tmp(b);
294 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 295 tcg_temp_free_i64(tmp2);
5e3f878a
PB
296 return tmp1;
297}
298
a7812ae4 299static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 300{
a7812ae4
PB
301 TCGv_i64 tmp1 = tcg_temp_new_i64();
302 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
303
304 tcg_gen_ext_i32_i64(tmp1, a);
305 dead_tmp(a);
306 tcg_gen_ext_i32_i64(tmp2, b);
307 dead_tmp(b);
308 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 309 tcg_temp_free_i64(tmp2);
5e3f878a
PB
310 return tmp1;
311}
312
8f01245e 313/* Signed 32x32->64 multiply. */
d9ba4830 314static void gen_imull(TCGv a, TCGv b)
8f01245e 315{
a7812ae4
PB
316 TCGv_i64 tmp1 = tcg_temp_new_i64();
317 TCGv_i64 tmp2 = tcg_temp_new_i64();
8f01245e 318
d9ba4830
PB
319 tcg_gen_ext_i32_i64(tmp1, a);
320 tcg_gen_ext_i32_i64(tmp2, b);
8f01245e 321 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 322 tcg_temp_free_i64(tmp2);
d9ba4830 323 tcg_gen_trunc_i64_i32(a, tmp1);
8f01245e 324 tcg_gen_shri_i64(tmp1, tmp1, 32);
d9ba4830 325 tcg_gen_trunc_i64_i32(b, tmp1);
b75263d6 326 tcg_temp_free_i64(tmp1);
d9ba4830 327}
d9ba4830 328
8f01245e
PB
329/* Swap low and high halfwords. */
330static void gen_swap_half(TCGv var)
331{
332 TCGv tmp = new_tmp();
333 tcg_gen_shri_i32(tmp, var, 16);
334 tcg_gen_shli_i32(var, var, 16);
335 tcg_gen_or_i32(var, var, tmp);
3670669c 336 dead_tmp(tmp);
8f01245e
PB
337}
338
b26eefb6
PB
339/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
340 tmp = (t0 ^ t1) & 0x8000;
341 t0 &= ~0x8000;
342 t1 &= ~0x8000;
343 t0 = (t0 + t1) ^ tmp;
344 */
345
346static void gen_add16(TCGv t0, TCGv t1)
347{
348 TCGv tmp = new_tmp();
349 tcg_gen_xor_i32(tmp, t0, t1);
350 tcg_gen_andi_i32(tmp, tmp, 0x8000);
351 tcg_gen_andi_i32(t0, t0, ~0x8000);
352 tcg_gen_andi_i32(t1, t1, ~0x8000);
353 tcg_gen_add_i32(t0, t0, t1);
354 tcg_gen_xor_i32(t0, t0, tmp);
355 dead_tmp(tmp);
356 dead_tmp(t1);
357}
358
9a119ff6
PB
359#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
360
b26eefb6
PB
361/* Set CF to the top bit of var. */
362static void gen_set_CF_bit31(TCGv var)
363{
364 TCGv tmp = new_tmp();
365 tcg_gen_shri_i32(tmp, var, 31);
4cc633c3 366 gen_set_CF(tmp);
b26eefb6
PB
367 dead_tmp(tmp);
368}
369
370/* Set N and Z flags from var. */
371static inline void gen_logic_CC(TCGv var)
372{
6fbe23d5
PB
373 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
374 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
b26eefb6
PB
375}
376
377/* T0 += T1 + CF. */
396e467c 378static void gen_adc(TCGv t0, TCGv t1)
b26eefb6 379{
d9ba4830 380 TCGv tmp;
396e467c 381 tcg_gen_add_i32(t0, t0, t1);
d9ba4830 382 tmp = load_cpu_field(CF);
396e467c 383 tcg_gen_add_i32(t0, t0, tmp);
b26eefb6
PB
384 dead_tmp(tmp);
385}
386
e9bb4aa9
JR
387/* dest = T0 + T1 + CF. */
388static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
389{
390 TCGv tmp;
391 tcg_gen_add_i32(dest, t0, t1);
392 tmp = load_cpu_field(CF);
393 tcg_gen_add_i32(dest, dest, tmp);
394 dead_tmp(tmp);
395}
396
3670669c
PB
397/* dest = T0 - T1 + CF - 1. */
398static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
399{
d9ba4830 400 TCGv tmp;
3670669c 401 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 402 tmp = load_cpu_field(CF);
3670669c
PB
403 tcg_gen_add_i32(dest, dest, tmp);
404 tcg_gen_subi_i32(dest, dest, 1);
405 dead_tmp(tmp);
406}
407
b26eefb6
PB
408/* T0 &= ~T1. Clobbers T1. */
409/* FIXME: Implement bic natively. */
8f8e3aa4
PB
410static inline void tcg_gen_bic_i32(TCGv dest, TCGv t0, TCGv t1)
411{
412 TCGv tmp = new_tmp();
413 tcg_gen_not_i32(tmp, t1);
414 tcg_gen_and_i32(dest, t0, tmp);
415 dead_tmp(tmp);
416}
b26eefb6 417
ad69471c
PB
418/* FIXME: Implement this natively. */
419#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
420
b26eefb6
PB
421/* FIXME: Implement this natively. */
422static void tcg_gen_rori_i32(TCGv t0, TCGv t1, int i)
423{
424 TCGv tmp;
425
426 if (i == 0)
427 return;
428
429 tmp = new_tmp();
430 tcg_gen_shri_i32(tmp, t1, i);
431 tcg_gen_shli_i32(t1, t1, 32 - i);
432 tcg_gen_or_i32(t0, t1, tmp);
433 dead_tmp(tmp);
434}
435
9a119ff6 436static void shifter_out_im(TCGv var, int shift)
b26eefb6 437{
9a119ff6
PB
438 TCGv tmp = new_tmp();
439 if (shift == 0) {
440 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 441 } else {
9a119ff6 442 tcg_gen_shri_i32(tmp, var, shift);
4cc633c3 443 if (shift != 31)
9a119ff6
PB
444 tcg_gen_andi_i32(tmp, tmp, 1);
445 }
446 gen_set_CF(tmp);
447 dead_tmp(tmp);
448}
b26eefb6 449
9a119ff6
PB
450/* Shift by immediate. Includes special handling for shift == 0. */
451static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
452{
453 switch (shiftop) {
454 case 0: /* LSL */
455 if (shift != 0) {
456 if (flags)
457 shifter_out_im(var, 32 - shift);
458 tcg_gen_shli_i32(var, var, shift);
459 }
460 break;
461 case 1: /* LSR */
462 if (shift == 0) {
463 if (flags) {
464 tcg_gen_shri_i32(var, var, 31);
465 gen_set_CF(var);
466 }
467 tcg_gen_movi_i32(var, 0);
468 } else {
469 if (flags)
470 shifter_out_im(var, shift - 1);
471 tcg_gen_shri_i32(var, var, shift);
472 }
473 break;
474 case 2: /* ASR */
475 if (shift == 0)
476 shift = 32;
477 if (flags)
478 shifter_out_im(var, shift - 1);
479 if (shift == 32)
480 shift = 31;
481 tcg_gen_sari_i32(var, var, shift);
482 break;
483 case 3: /* ROR/RRX */
484 if (shift != 0) {
485 if (flags)
486 shifter_out_im(var, shift - 1);
487 tcg_gen_rori_i32(var, var, shift); break;
488 } else {
d9ba4830 489 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
490 if (flags)
491 shifter_out_im(var, 0);
492 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
493 tcg_gen_shli_i32(tmp, tmp, 31);
494 tcg_gen_or_i32(var, var, tmp);
495 dead_tmp(tmp);
b26eefb6
PB
496 }
497 }
498};
499
8984bd2e
PB
500static inline void gen_arm_shift_reg(TCGv var, int shiftop,
501 TCGv shift, int flags)
502{
503 if (flags) {
504 switch (shiftop) {
505 case 0: gen_helper_shl_cc(var, var, shift); break;
506 case 1: gen_helper_shr_cc(var, var, shift); break;
507 case 2: gen_helper_sar_cc(var, var, shift); break;
508 case 3: gen_helper_ror_cc(var, var, shift); break;
509 }
510 } else {
511 switch (shiftop) {
512 case 0: gen_helper_shl(var, var, shift); break;
513 case 1: gen_helper_shr(var, var, shift); break;
514 case 2: gen_helper_sar(var, var, shift); break;
515 case 3: gen_helper_ror(var, var, shift); break;
516 }
517 }
518 dead_tmp(shift);
519}
520
6ddbc6e4
PB
521#define PAS_OP(pfx) \
522 switch (op2) { \
523 case 0: gen_pas_helper(glue(pfx,add16)); break; \
524 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
525 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
526 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
527 case 4: gen_pas_helper(glue(pfx,add8)); break; \
528 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
529 }
d9ba4830 530static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 531{
a7812ae4 532 TCGv_ptr tmp;
6ddbc6e4
PB
533
534 switch (op1) {
535#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
536 case 1:
a7812ae4 537 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
538 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
539 PAS_OP(s)
b75263d6 540 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
541 break;
542 case 5:
a7812ae4 543 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
544 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
545 PAS_OP(u)
b75263d6 546 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
547 break;
548#undef gen_pas_helper
549#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
550 case 2:
551 PAS_OP(q);
552 break;
553 case 3:
554 PAS_OP(sh);
555 break;
556 case 6:
557 PAS_OP(uq);
558 break;
559 case 7:
560 PAS_OP(uh);
561 break;
562#undef gen_pas_helper
563 }
564}
9ee6e8bb
PB
565#undef PAS_OP
566
6ddbc6e4
PB
567/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
568#define PAS_OP(pfx) \
569 switch (op2) { \
570 case 0: gen_pas_helper(glue(pfx,add8)); break; \
571 case 1: gen_pas_helper(glue(pfx,add16)); break; \
572 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
573 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
574 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
575 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
576 }
d9ba4830 577static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 578{
a7812ae4 579 TCGv_ptr tmp;
6ddbc6e4
PB
580
581 switch (op1) {
582#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
583 case 0:
a7812ae4 584 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
585 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
586 PAS_OP(s)
b75263d6 587 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
588 break;
589 case 4:
a7812ae4 590 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
591 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
592 PAS_OP(u)
b75263d6 593 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
594 break;
595#undef gen_pas_helper
596#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
597 case 1:
598 PAS_OP(q);
599 break;
600 case 2:
601 PAS_OP(sh);
602 break;
603 case 5:
604 PAS_OP(uq);
605 break;
606 case 6:
607 PAS_OP(uh);
608 break;
609#undef gen_pas_helper
610 }
611}
9ee6e8bb
PB
612#undef PAS_OP
613
d9ba4830
PB
614static void gen_test_cc(int cc, int label)
615{
616 TCGv tmp;
617 TCGv tmp2;
d9ba4830
PB
618 int inv;
619
d9ba4830
PB
620 switch (cc) {
621 case 0: /* eq: Z */
6fbe23d5 622 tmp = load_cpu_field(ZF);
cb63669a 623 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
624 break;
625 case 1: /* ne: !Z */
6fbe23d5 626 tmp = load_cpu_field(ZF);
cb63669a 627 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
628 break;
629 case 2: /* cs: C */
630 tmp = load_cpu_field(CF);
cb63669a 631 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
632 break;
633 case 3: /* cc: !C */
634 tmp = load_cpu_field(CF);
cb63669a 635 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
636 break;
637 case 4: /* mi: N */
6fbe23d5 638 tmp = load_cpu_field(NF);
cb63669a 639 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
640 break;
641 case 5: /* pl: !N */
6fbe23d5 642 tmp = load_cpu_field(NF);
cb63669a 643 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
644 break;
645 case 6: /* vs: V */
646 tmp = load_cpu_field(VF);
cb63669a 647 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
648 break;
649 case 7: /* vc: !V */
650 tmp = load_cpu_field(VF);
cb63669a 651 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
652 break;
653 case 8: /* hi: C && !Z */
654 inv = gen_new_label();
655 tmp = load_cpu_field(CF);
cb63669a 656 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830 657 dead_tmp(tmp);
6fbe23d5 658 tmp = load_cpu_field(ZF);
cb63669a 659 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
660 gen_set_label(inv);
661 break;
662 case 9: /* ls: !C || Z */
663 tmp = load_cpu_field(CF);
cb63669a 664 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830 665 dead_tmp(tmp);
6fbe23d5 666 tmp = load_cpu_field(ZF);
cb63669a 667 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
668 break;
669 case 10: /* ge: N == V -> N ^ V == 0 */
670 tmp = load_cpu_field(VF);
6fbe23d5 671 tmp2 = load_cpu_field(NF);
d9ba4830
PB
672 tcg_gen_xor_i32(tmp, tmp, tmp2);
673 dead_tmp(tmp2);
cb63669a 674 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
675 break;
676 case 11: /* lt: N != V -> N ^ V != 0 */
677 tmp = load_cpu_field(VF);
6fbe23d5 678 tmp2 = load_cpu_field(NF);
d9ba4830
PB
679 tcg_gen_xor_i32(tmp, tmp, tmp2);
680 dead_tmp(tmp2);
cb63669a 681 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
682 break;
683 case 12: /* gt: !Z && N == V */
684 inv = gen_new_label();
6fbe23d5 685 tmp = load_cpu_field(ZF);
cb63669a 686 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830
PB
687 dead_tmp(tmp);
688 tmp = load_cpu_field(VF);
6fbe23d5 689 tmp2 = load_cpu_field(NF);
d9ba4830
PB
690 tcg_gen_xor_i32(tmp, tmp, tmp2);
691 dead_tmp(tmp2);
cb63669a 692 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
693 gen_set_label(inv);
694 break;
695 case 13: /* le: Z || N != V */
6fbe23d5 696 tmp = load_cpu_field(ZF);
cb63669a 697 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
698 dead_tmp(tmp);
699 tmp = load_cpu_field(VF);
6fbe23d5 700 tmp2 = load_cpu_field(NF);
d9ba4830
PB
701 tcg_gen_xor_i32(tmp, tmp, tmp2);
702 dead_tmp(tmp2);
cb63669a 703 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
704 break;
705 default:
706 fprintf(stderr, "Bad condition code 0x%x\n", cc);
707 abort();
708 }
709 dead_tmp(tmp);
710}
2c0262af 711
b1d8e52e 712static const uint8_t table_logic_cc[16] = {
2c0262af
FB
713 1, /* and */
714 1, /* xor */
715 0, /* sub */
716 0, /* rsb */
717 0, /* add */
718 0, /* adc */
719 0, /* sbc */
720 0, /* rsc */
721 1, /* andl */
722 1, /* xorl */
723 0, /* cmp */
724 0, /* cmn */
725 1, /* orr */
726 1, /* mov */
727 1, /* bic */
728 1, /* mvn */
729};
3b46e624 730
d9ba4830
PB
731/* Set PC and Thumb state from an immediate address. */
732static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 733{
b26eefb6 734 TCGv tmp;
99c475ab 735
b26eefb6 736 s->is_jmp = DISAS_UPDATE;
d9ba4830 737 if (s->thumb != (addr & 1)) {
155c3eac 738 tmp = new_tmp();
d9ba4830
PB
739 tcg_gen_movi_i32(tmp, addr & 1);
740 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
155c3eac 741 dead_tmp(tmp);
d9ba4830 742 }
155c3eac 743 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
744}
745
746/* Set PC and Thumb state from var. var is marked as dead. */
747static inline void gen_bx(DisasContext *s, TCGv var)
748{
d9ba4830 749 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
750 tcg_gen_andi_i32(cpu_R[15], var, ~1);
751 tcg_gen_andi_i32(var, var, 1);
752 store_cpu_field(var, thumb);
d9ba4830
PB
753}
754
21aeb343
JR
755/* Variant of store_reg which uses branch&exchange logic when storing
756 to r15 in ARM architecture v7 and above. The source must be a temporary
757 and will be marked as dead. */
758static inline void store_reg_bx(CPUState *env, DisasContext *s,
759 int reg, TCGv var)
760{
761 if (reg == 15 && ENABLE_ARCH_7) {
762 gen_bx(s, var);
763 } else {
764 store_reg(s, reg, var);
765 }
766}
767
b0109805
PB
768static inline TCGv gen_ld8s(TCGv addr, int index)
769{
770 TCGv tmp = new_tmp();
771 tcg_gen_qemu_ld8s(tmp, addr, index);
772 return tmp;
773}
774static inline TCGv gen_ld8u(TCGv addr, int index)
775{
776 TCGv tmp = new_tmp();
777 tcg_gen_qemu_ld8u(tmp, addr, index);
778 return tmp;
779}
780static inline TCGv gen_ld16s(TCGv addr, int index)
781{
782 TCGv tmp = new_tmp();
783 tcg_gen_qemu_ld16s(tmp, addr, index);
784 return tmp;
785}
786static inline TCGv gen_ld16u(TCGv addr, int index)
787{
788 TCGv tmp = new_tmp();
789 tcg_gen_qemu_ld16u(tmp, addr, index);
790 return tmp;
791}
792static inline TCGv gen_ld32(TCGv addr, int index)
793{
794 TCGv tmp = new_tmp();
795 tcg_gen_qemu_ld32u(tmp, addr, index);
796 return tmp;
797}
798static inline void gen_st8(TCGv val, TCGv addr, int index)
799{
800 tcg_gen_qemu_st8(val, addr, index);
801 dead_tmp(val);
802}
803static inline void gen_st16(TCGv val, TCGv addr, int index)
804{
805 tcg_gen_qemu_st16(val, addr, index);
806 dead_tmp(val);
807}
808static inline void gen_st32(TCGv val, TCGv addr, int index)
809{
810 tcg_gen_qemu_st32(val, addr, index);
811 dead_tmp(val);
812}
b5ff1b31 813
5e3f878a
PB
814static inline void gen_set_pc_im(uint32_t val)
815{
155c3eac 816 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
817}
818
b5ff1b31
FB
819/* Force a TB lookup after an instruction that changes the CPU state. */
820static inline void gen_lookup_tb(DisasContext *s)
821{
a6445c52 822 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
823 s->is_jmp = DISAS_UPDATE;
824}
825
b0109805
PB
826static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
827 TCGv var)
2c0262af 828{
1e8d4eec 829 int val, rm, shift, shiftop;
b26eefb6 830 TCGv offset;
2c0262af
FB
831
832 if (!(insn & (1 << 25))) {
833 /* immediate */
834 val = insn & 0xfff;
835 if (!(insn & (1 << 23)))
836 val = -val;
537730b9 837 if (val != 0)
b0109805 838 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
839 } else {
840 /* shift/register */
841 rm = (insn) & 0xf;
842 shift = (insn >> 7) & 0x1f;
1e8d4eec 843 shiftop = (insn >> 5) & 3;
b26eefb6 844 offset = load_reg(s, rm);
9a119ff6 845 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 846 if (!(insn & (1 << 23)))
b0109805 847 tcg_gen_sub_i32(var, var, offset);
2c0262af 848 else
b0109805 849 tcg_gen_add_i32(var, var, offset);
b26eefb6 850 dead_tmp(offset);
2c0262af
FB
851 }
852}
853
191f9a93 854static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 855 int extra, TCGv var)
2c0262af
FB
856{
857 int val, rm;
b26eefb6 858 TCGv offset;
3b46e624 859
2c0262af
FB
860 if (insn & (1 << 22)) {
861 /* immediate */
862 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
863 if (!(insn & (1 << 23)))
864 val = -val;
18acad92 865 val += extra;
537730b9 866 if (val != 0)
b0109805 867 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
868 } else {
869 /* register */
191f9a93 870 if (extra)
b0109805 871 tcg_gen_addi_i32(var, var, extra);
2c0262af 872 rm = (insn) & 0xf;
b26eefb6 873 offset = load_reg(s, rm);
2c0262af 874 if (!(insn & (1 << 23)))
b0109805 875 tcg_gen_sub_i32(var, var, offset);
2c0262af 876 else
b0109805 877 tcg_gen_add_i32(var, var, offset);
b26eefb6 878 dead_tmp(offset);
2c0262af
FB
879 }
880}
881
4373f3ce
PB
882#define VFP_OP2(name) \
883static inline void gen_vfp_##name(int dp) \
884{ \
885 if (dp) \
886 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
887 else \
888 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
b7bcbe95
FB
889}
890
4373f3ce
PB
891VFP_OP2(add)
892VFP_OP2(sub)
893VFP_OP2(mul)
894VFP_OP2(div)
895
896#undef VFP_OP2
897
898static inline void gen_vfp_abs(int dp)
899{
900 if (dp)
901 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
902 else
903 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
904}
905
906static inline void gen_vfp_neg(int dp)
907{
908 if (dp)
909 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
910 else
911 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
912}
913
914static inline void gen_vfp_sqrt(int dp)
915{
916 if (dp)
917 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
918 else
919 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
920}
921
922static inline void gen_vfp_cmp(int dp)
923{
924 if (dp)
925 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
926 else
927 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
928}
929
930static inline void gen_vfp_cmpe(int dp)
931{
932 if (dp)
933 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
934 else
935 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
936}
937
938static inline void gen_vfp_F1_ld0(int dp)
939{
940 if (dp)
5b340b51 941 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 942 else
5b340b51 943 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
944}
945
946static inline void gen_vfp_uito(int dp)
947{
948 if (dp)
949 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
950 else
951 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
952}
953
954static inline void gen_vfp_sito(int dp)
955{
956 if (dp)
66230e0d 957 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
4373f3ce 958 else
66230e0d 959 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
4373f3ce
PB
960}
961
962static inline void gen_vfp_toui(int dp)
963{
964 if (dp)
965 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
966 else
967 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
968}
969
970static inline void gen_vfp_touiz(int dp)
971{
972 if (dp)
973 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
974 else
975 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
976}
977
978static inline void gen_vfp_tosi(int dp)
979{
980 if (dp)
981 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
982 else
983 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
984}
985
986static inline void gen_vfp_tosiz(int dp)
9ee6e8bb
PB
987{
988 if (dp)
4373f3ce 989 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
9ee6e8bb 990 else
4373f3ce
PB
991 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
992}
993
994#define VFP_GEN_FIX(name) \
995static inline void gen_vfp_##name(int dp, int shift) \
996{ \
b75263d6 997 TCGv tmp_shift = tcg_const_i32(shift); \
4373f3ce 998 if (dp) \
b75263d6 999 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
4373f3ce 1000 else \
b75263d6
JR
1001 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
1002 tcg_temp_free_i32(tmp_shift); \
9ee6e8bb 1003}
4373f3ce
PB
1004VFP_GEN_FIX(tosh)
1005VFP_GEN_FIX(tosl)
1006VFP_GEN_FIX(touh)
1007VFP_GEN_FIX(toul)
1008VFP_GEN_FIX(shto)
1009VFP_GEN_FIX(slto)
1010VFP_GEN_FIX(uhto)
1011VFP_GEN_FIX(ulto)
1012#undef VFP_GEN_FIX
9ee6e8bb 1013
312eea9f 1014static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1015{
1016 if (dp)
312eea9f 1017 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1018 else
312eea9f 1019 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1020}
1021
312eea9f 1022static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1023{
1024 if (dp)
312eea9f 1025 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1026 else
312eea9f 1027 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1028}
1029
8e96005d
FB
1030static inline long
1031vfp_reg_offset (int dp, int reg)
1032{
1033 if (dp)
1034 return offsetof(CPUARMState, vfp.regs[reg]);
1035 else if (reg & 1) {
1036 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1037 + offsetof(CPU_DoubleU, l.upper);
1038 } else {
1039 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1040 + offsetof(CPU_DoubleU, l.lower);
1041 }
1042}
9ee6e8bb
PB
1043
1044/* Return the offset of a 32-bit piece of a NEON register.
1045 zero is the least significant end of the register. */
1046static inline long
1047neon_reg_offset (int reg, int n)
1048{
1049 int sreg;
1050 sreg = reg * 2 + n;
1051 return vfp_reg_offset(0, sreg);
1052}
1053
8f8e3aa4
PB
1054static TCGv neon_load_reg(int reg, int pass)
1055{
1056 TCGv tmp = new_tmp();
1057 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1058 return tmp;
1059}
1060
1061static void neon_store_reg(int reg, int pass, TCGv var)
1062{
1063 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1064 dead_tmp(var);
1065}
1066
a7812ae4 1067static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1068{
1069 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1070}
1071
a7812ae4 1072static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1073{
1074 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1075}
1076
4373f3ce
PB
1077#define tcg_gen_ld_f32 tcg_gen_ld_i32
1078#define tcg_gen_ld_f64 tcg_gen_ld_i64
1079#define tcg_gen_st_f32 tcg_gen_st_i32
1080#define tcg_gen_st_f64 tcg_gen_st_i64
1081
b7bcbe95
FB
1082static inline void gen_mov_F0_vreg(int dp, int reg)
1083{
1084 if (dp)
4373f3ce 1085 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1086 else
4373f3ce 1087 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1088}
1089
1090static inline void gen_mov_F1_vreg(int dp, int reg)
1091{
1092 if (dp)
4373f3ce 1093 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1094 else
4373f3ce 1095 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1096}
1097
1098static inline void gen_mov_vreg_F0(int dp, int reg)
1099{
1100 if (dp)
4373f3ce 1101 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1102 else
4373f3ce 1103 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1104}
1105
18c9b560
AZ
1106#define ARM_CP_RW_BIT (1 << 20)
1107
a7812ae4 1108static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d
PB
1109{
1110 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1111}
1112
a7812ae4 1113static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d
PB
1114{
1115 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1116}
1117
da6b5335 1118static inline TCGv iwmmxt_load_creg(int reg)
e677137d 1119{
da6b5335
FN
1120 TCGv var = new_tmp();
1121 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1122 return var;
e677137d
PB
1123}
1124
da6b5335 1125static inline void iwmmxt_store_creg(int reg, TCGv var)
e677137d 1126{
da6b5335 1127 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
e677137d
PB
1128}
1129
1130static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1131{
1132 iwmmxt_store_reg(cpu_M0, rn);
1133}
1134
1135static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1136{
1137 iwmmxt_load_reg(cpu_M0, rn);
1138}
1139
1140static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1141{
1142 iwmmxt_load_reg(cpu_V1, rn);
1143 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1144}
1145
1146static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1147{
1148 iwmmxt_load_reg(cpu_V1, rn);
1149 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1150}
1151
1152static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1153{
1154 iwmmxt_load_reg(cpu_V1, rn);
1155 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1156}
1157
1158#define IWMMXT_OP(name) \
1159static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1160{ \
1161 iwmmxt_load_reg(cpu_V1, rn); \
1162 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1163}
1164
1165#define IWMMXT_OP_ENV(name) \
1166static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1167{ \
1168 iwmmxt_load_reg(cpu_V1, rn); \
1169 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1170}
1171
1172#define IWMMXT_OP_ENV_SIZE(name) \
1173IWMMXT_OP_ENV(name##b) \
1174IWMMXT_OP_ENV(name##w) \
1175IWMMXT_OP_ENV(name##l)
1176
1177#define IWMMXT_OP_ENV1(name) \
1178static inline void gen_op_iwmmxt_##name##_M0(void) \
1179{ \
1180 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1181}
1182
1183IWMMXT_OP(maddsq)
1184IWMMXT_OP(madduq)
1185IWMMXT_OP(sadb)
1186IWMMXT_OP(sadw)
1187IWMMXT_OP(mulslw)
1188IWMMXT_OP(mulshw)
1189IWMMXT_OP(mululw)
1190IWMMXT_OP(muluhw)
1191IWMMXT_OP(macsw)
1192IWMMXT_OP(macuw)
1193
1194IWMMXT_OP_ENV_SIZE(unpackl)
1195IWMMXT_OP_ENV_SIZE(unpackh)
1196
1197IWMMXT_OP_ENV1(unpacklub)
1198IWMMXT_OP_ENV1(unpackluw)
1199IWMMXT_OP_ENV1(unpacklul)
1200IWMMXT_OP_ENV1(unpackhub)
1201IWMMXT_OP_ENV1(unpackhuw)
1202IWMMXT_OP_ENV1(unpackhul)
1203IWMMXT_OP_ENV1(unpacklsb)
1204IWMMXT_OP_ENV1(unpacklsw)
1205IWMMXT_OP_ENV1(unpacklsl)
1206IWMMXT_OP_ENV1(unpackhsb)
1207IWMMXT_OP_ENV1(unpackhsw)
1208IWMMXT_OP_ENV1(unpackhsl)
1209
1210IWMMXT_OP_ENV_SIZE(cmpeq)
1211IWMMXT_OP_ENV_SIZE(cmpgtu)
1212IWMMXT_OP_ENV_SIZE(cmpgts)
1213
1214IWMMXT_OP_ENV_SIZE(mins)
1215IWMMXT_OP_ENV_SIZE(minu)
1216IWMMXT_OP_ENV_SIZE(maxs)
1217IWMMXT_OP_ENV_SIZE(maxu)
1218
1219IWMMXT_OP_ENV_SIZE(subn)
1220IWMMXT_OP_ENV_SIZE(addn)
1221IWMMXT_OP_ENV_SIZE(subu)
1222IWMMXT_OP_ENV_SIZE(addu)
1223IWMMXT_OP_ENV_SIZE(subs)
1224IWMMXT_OP_ENV_SIZE(adds)
1225
1226IWMMXT_OP_ENV(avgb0)
1227IWMMXT_OP_ENV(avgb1)
1228IWMMXT_OP_ENV(avgw0)
1229IWMMXT_OP_ENV(avgw1)
1230
1231IWMMXT_OP(msadb)
1232
1233IWMMXT_OP_ENV(packuw)
1234IWMMXT_OP_ENV(packul)
1235IWMMXT_OP_ENV(packuq)
1236IWMMXT_OP_ENV(packsw)
1237IWMMXT_OP_ENV(packsl)
1238IWMMXT_OP_ENV(packsq)
1239
e677137d
PB
1240static void gen_op_iwmmxt_set_mup(void)
1241{
1242 TCGv tmp;
1243 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1244 tcg_gen_ori_i32(tmp, tmp, 2);
1245 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1246}
1247
1248static void gen_op_iwmmxt_set_cup(void)
1249{
1250 TCGv tmp;
1251 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1252 tcg_gen_ori_i32(tmp, tmp, 1);
1253 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1254}
1255
1256static void gen_op_iwmmxt_setpsr_nz(void)
1257{
1258 TCGv tmp = new_tmp();
1259 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1260 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1261}
1262
1263static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1264{
1265 iwmmxt_load_reg(cpu_V1, rn);
86831435 1266 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1267 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1268}
1269
da6b5335 1270static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
18c9b560
AZ
1271{
1272 int rd;
1273 uint32_t offset;
da6b5335 1274 TCGv tmp;
18c9b560
AZ
1275
1276 rd = (insn >> 16) & 0xf;
da6b5335 1277 tmp = load_reg(s, rd);
18c9b560
AZ
1278
1279 offset = (insn & 0xff) << ((insn >> 7) & 2);
1280 if (insn & (1 << 24)) {
1281 /* Pre indexed */
1282 if (insn & (1 << 23))
da6b5335 1283 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1284 else
da6b5335
FN
1285 tcg_gen_addi_i32(tmp, tmp, -offset);
1286 tcg_gen_mov_i32(dest, tmp);
18c9b560 1287 if (insn & (1 << 21))
da6b5335
FN
1288 store_reg(s, rd, tmp);
1289 else
1290 dead_tmp(tmp);
18c9b560
AZ
1291 } else if (insn & (1 << 21)) {
1292 /* Post indexed */
da6b5335 1293 tcg_gen_mov_i32(dest, tmp);
18c9b560 1294 if (insn & (1 << 23))
da6b5335 1295 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1296 else
da6b5335
FN
1297 tcg_gen_addi_i32(tmp, tmp, -offset);
1298 store_reg(s, rd, tmp);
18c9b560
AZ
1299 } else if (!(insn & (1 << 23)))
1300 return 1;
1301 return 0;
1302}
1303
da6b5335 1304static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
18c9b560
AZ
1305{
1306 int rd = (insn >> 0) & 0xf;
da6b5335 1307 TCGv tmp;
18c9b560 1308
da6b5335
FN
1309 if (insn & (1 << 8)) {
1310 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1311 return 1;
da6b5335
FN
1312 } else {
1313 tmp = iwmmxt_load_creg(rd);
1314 }
1315 } else {
1316 tmp = new_tmp();
1317 iwmmxt_load_reg(cpu_V0, rd);
1318 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1319 }
1320 tcg_gen_andi_i32(tmp, tmp, mask);
1321 tcg_gen_mov_i32(dest, tmp);
1322 dead_tmp(tmp);
18c9b560
AZ
1323 return 0;
1324}
1325
1326/* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1327 (ie. an undefined instruction). */
1328static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1329{
1330 int rd, wrd;
1331 int rdhi, rdlo, rd0, rd1, i;
da6b5335
FN
1332 TCGv addr;
1333 TCGv tmp, tmp2, tmp3;
18c9b560
AZ
1334
1335 if ((insn & 0x0e000e00) == 0x0c000000) {
1336 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1337 wrd = insn & 0xf;
1338 rdlo = (insn >> 12) & 0xf;
1339 rdhi = (insn >> 16) & 0xf;
1340 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1341 iwmmxt_load_reg(cpu_V0, wrd);
1342 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1343 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1344 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1345 } else { /* TMCRR */
da6b5335
FN
1346 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1347 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1348 gen_op_iwmmxt_set_mup();
1349 }
1350 return 0;
1351 }
1352
1353 wrd = (insn >> 12) & 0xf;
da6b5335
FN
1354 addr = new_tmp();
1355 if (gen_iwmmxt_address(s, insn, addr)) {
1356 dead_tmp(addr);
18c9b560 1357 return 1;
da6b5335 1358 }
18c9b560
AZ
1359 if (insn & ARM_CP_RW_BIT) {
1360 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
da6b5335
FN
1361 tmp = new_tmp();
1362 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1363 iwmmxt_store_creg(wrd, tmp);
18c9b560 1364 } else {
e677137d
PB
1365 i = 1;
1366 if (insn & (1 << 8)) {
1367 if (insn & (1 << 22)) { /* WLDRD */
da6b5335 1368 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1369 i = 0;
1370 } else { /* WLDRW wRd */
da6b5335 1371 tmp = gen_ld32(addr, IS_USER(s));
e677137d
PB
1372 }
1373 } else {
1374 if (insn & (1 << 22)) { /* WLDRH */
da6b5335 1375 tmp = gen_ld16u(addr, IS_USER(s));
e677137d 1376 } else { /* WLDRB */
da6b5335 1377 tmp = gen_ld8u(addr, IS_USER(s));
e677137d
PB
1378 }
1379 }
1380 if (i) {
1381 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1382 dead_tmp(tmp);
1383 }
18c9b560
AZ
1384 gen_op_iwmmxt_movq_wRn_M0(wrd);
1385 }
1386 } else {
1387 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335
FN
1388 tmp = iwmmxt_load_creg(wrd);
1389 gen_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1390 } else {
1391 gen_op_iwmmxt_movq_M0_wRn(wrd);
e677137d
PB
1392 tmp = new_tmp();
1393 if (insn & (1 << 8)) {
1394 if (insn & (1 << 22)) { /* WSTRD */
1395 dead_tmp(tmp);
da6b5335 1396 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1397 } else { /* WSTRW wRd */
1398 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1399 gen_st32(tmp, addr, IS_USER(s));
e677137d
PB
1400 }
1401 } else {
1402 if (insn & (1 << 22)) { /* WSTRH */
1403 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1404 gen_st16(tmp, addr, IS_USER(s));
e677137d
PB
1405 } else { /* WSTRB */
1406 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1407 gen_st8(tmp, addr, IS_USER(s));
e677137d
PB
1408 }
1409 }
18c9b560
AZ
1410 }
1411 }
1412 return 0;
1413 }
1414
1415 if ((insn & 0x0f000000) != 0x0e000000)
1416 return 1;
1417
1418 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1419 case 0x000: /* WOR */
1420 wrd = (insn >> 12) & 0xf;
1421 rd0 = (insn >> 0) & 0xf;
1422 rd1 = (insn >> 16) & 0xf;
1423 gen_op_iwmmxt_movq_M0_wRn(rd0);
1424 gen_op_iwmmxt_orq_M0_wRn(rd1);
1425 gen_op_iwmmxt_setpsr_nz();
1426 gen_op_iwmmxt_movq_wRn_M0(wrd);
1427 gen_op_iwmmxt_set_mup();
1428 gen_op_iwmmxt_set_cup();
1429 break;
1430 case 0x011: /* TMCR */
1431 if (insn & 0xf)
1432 return 1;
1433 rd = (insn >> 12) & 0xf;
1434 wrd = (insn >> 16) & 0xf;
1435 switch (wrd) {
1436 case ARM_IWMMXT_wCID:
1437 case ARM_IWMMXT_wCASF:
1438 break;
1439 case ARM_IWMMXT_wCon:
1440 gen_op_iwmmxt_set_cup();
1441 /* Fall through. */
1442 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1443 tmp = iwmmxt_load_creg(wrd);
1444 tmp2 = load_reg(s, rd);
1445 tcg_gen_bic_i32(tmp, tmp, tmp2);
1446 dead_tmp(tmp2);
1447 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1448 break;
1449 case ARM_IWMMXT_wCGR0:
1450 case ARM_IWMMXT_wCGR1:
1451 case ARM_IWMMXT_wCGR2:
1452 case ARM_IWMMXT_wCGR3:
1453 gen_op_iwmmxt_set_cup();
da6b5335
FN
1454 tmp = load_reg(s, rd);
1455 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1456 break;
1457 default:
1458 return 1;
1459 }
1460 break;
1461 case 0x100: /* WXOR */
1462 wrd = (insn >> 12) & 0xf;
1463 rd0 = (insn >> 0) & 0xf;
1464 rd1 = (insn >> 16) & 0xf;
1465 gen_op_iwmmxt_movq_M0_wRn(rd0);
1466 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1467 gen_op_iwmmxt_setpsr_nz();
1468 gen_op_iwmmxt_movq_wRn_M0(wrd);
1469 gen_op_iwmmxt_set_mup();
1470 gen_op_iwmmxt_set_cup();
1471 break;
1472 case 0x111: /* TMRC */
1473 if (insn & 0xf)
1474 return 1;
1475 rd = (insn >> 12) & 0xf;
1476 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1477 tmp = iwmmxt_load_creg(wrd);
1478 store_reg(s, rd, tmp);
18c9b560
AZ
1479 break;
1480 case 0x300: /* WANDN */
1481 wrd = (insn >> 12) & 0xf;
1482 rd0 = (insn >> 0) & 0xf;
1483 rd1 = (insn >> 16) & 0xf;
1484 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1485 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1486 gen_op_iwmmxt_andq_M0_wRn(rd1);
1487 gen_op_iwmmxt_setpsr_nz();
1488 gen_op_iwmmxt_movq_wRn_M0(wrd);
1489 gen_op_iwmmxt_set_mup();
1490 gen_op_iwmmxt_set_cup();
1491 break;
1492 case 0x200: /* WAND */
1493 wrd = (insn >> 12) & 0xf;
1494 rd0 = (insn >> 0) & 0xf;
1495 rd1 = (insn >> 16) & 0xf;
1496 gen_op_iwmmxt_movq_M0_wRn(rd0);
1497 gen_op_iwmmxt_andq_M0_wRn(rd1);
1498 gen_op_iwmmxt_setpsr_nz();
1499 gen_op_iwmmxt_movq_wRn_M0(wrd);
1500 gen_op_iwmmxt_set_mup();
1501 gen_op_iwmmxt_set_cup();
1502 break;
1503 case 0x810: case 0xa10: /* WMADD */
1504 wrd = (insn >> 12) & 0xf;
1505 rd0 = (insn >> 0) & 0xf;
1506 rd1 = (insn >> 16) & 0xf;
1507 gen_op_iwmmxt_movq_M0_wRn(rd0);
1508 if (insn & (1 << 21))
1509 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1510 else
1511 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1512 gen_op_iwmmxt_movq_wRn_M0(wrd);
1513 gen_op_iwmmxt_set_mup();
1514 break;
1515 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1516 wrd = (insn >> 12) & 0xf;
1517 rd0 = (insn >> 16) & 0xf;
1518 rd1 = (insn >> 0) & 0xf;
1519 gen_op_iwmmxt_movq_M0_wRn(rd0);
1520 switch ((insn >> 22) & 3) {
1521 case 0:
1522 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1523 break;
1524 case 1:
1525 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1526 break;
1527 case 2:
1528 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1529 break;
1530 case 3:
1531 return 1;
1532 }
1533 gen_op_iwmmxt_movq_wRn_M0(wrd);
1534 gen_op_iwmmxt_set_mup();
1535 gen_op_iwmmxt_set_cup();
1536 break;
1537 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1538 wrd = (insn >> 12) & 0xf;
1539 rd0 = (insn >> 16) & 0xf;
1540 rd1 = (insn >> 0) & 0xf;
1541 gen_op_iwmmxt_movq_M0_wRn(rd0);
1542 switch ((insn >> 22) & 3) {
1543 case 0:
1544 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1545 break;
1546 case 1:
1547 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1548 break;
1549 case 2:
1550 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1551 break;
1552 case 3:
1553 return 1;
1554 }
1555 gen_op_iwmmxt_movq_wRn_M0(wrd);
1556 gen_op_iwmmxt_set_mup();
1557 gen_op_iwmmxt_set_cup();
1558 break;
1559 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1560 wrd = (insn >> 12) & 0xf;
1561 rd0 = (insn >> 16) & 0xf;
1562 rd1 = (insn >> 0) & 0xf;
1563 gen_op_iwmmxt_movq_M0_wRn(rd0);
1564 if (insn & (1 << 22))
1565 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1566 else
1567 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1568 if (!(insn & (1 << 20)))
1569 gen_op_iwmmxt_addl_M0_wRn(wrd);
1570 gen_op_iwmmxt_movq_wRn_M0(wrd);
1571 gen_op_iwmmxt_set_mup();
1572 break;
1573 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1574 wrd = (insn >> 12) & 0xf;
1575 rd0 = (insn >> 16) & 0xf;
1576 rd1 = (insn >> 0) & 0xf;
1577 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1578 if (insn & (1 << 21)) {
1579 if (insn & (1 << 20))
1580 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1581 else
1582 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1583 } else {
1584 if (insn & (1 << 20))
1585 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1586 else
1587 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1588 }
18c9b560
AZ
1589 gen_op_iwmmxt_movq_wRn_M0(wrd);
1590 gen_op_iwmmxt_set_mup();
1591 break;
1592 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1593 wrd = (insn >> 12) & 0xf;
1594 rd0 = (insn >> 16) & 0xf;
1595 rd1 = (insn >> 0) & 0xf;
1596 gen_op_iwmmxt_movq_M0_wRn(rd0);
1597 if (insn & (1 << 21))
1598 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1599 else
1600 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1601 if (!(insn & (1 << 20))) {
e677137d
PB
1602 iwmmxt_load_reg(cpu_V1, wrd);
1603 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1604 }
1605 gen_op_iwmmxt_movq_wRn_M0(wrd);
1606 gen_op_iwmmxt_set_mup();
1607 break;
1608 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1609 wrd = (insn >> 12) & 0xf;
1610 rd0 = (insn >> 16) & 0xf;
1611 rd1 = (insn >> 0) & 0xf;
1612 gen_op_iwmmxt_movq_M0_wRn(rd0);
1613 switch ((insn >> 22) & 3) {
1614 case 0:
1615 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1616 break;
1617 case 1:
1618 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1619 break;
1620 case 2:
1621 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1622 break;
1623 case 3:
1624 return 1;
1625 }
1626 gen_op_iwmmxt_movq_wRn_M0(wrd);
1627 gen_op_iwmmxt_set_mup();
1628 gen_op_iwmmxt_set_cup();
1629 break;
1630 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1631 wrd = (insn >> 12) & 0xf;
1632 rd0 = (insn >> 16) & 0xf;
1633 rd1 = (insn >> 0) & 0xf;
1634 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1635 if (insn & (1 << 22)) {
1636 if (insn & (1 << 20))
1637 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1638 else
1639 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1640 } else {
1641 if (insn & (1 << 20))
1642 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1643 else
1644 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1645 }
18c9b560
AZ
1646 gen_op_iwmmxt_movq_wRn_M0(wrd);
1647 gen_op_iwmmxt_set_mup();
1648 gen_op_iwmmxt_set_cup();
1649 break;
1650 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1651 wrd = (insn >> 12) & 0xf;
1652 rd0 = (insn >> 16) & 0xf;
1653 rd1 = (insn >> 0) & 0xf;
1654 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1655 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1656 tcg_gen_andi_i32(tmp, tmp, 7);
1657 iwmmxt_load_reg(cpu_V1, rd1);
1658 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1659 dead_tmp(tmp);
18c9b560
AZ
1660 gen_op_iwmmxt_movq_wRn_M0(wrd);
1661 gen_op_iwmmxt_set_mup();
1662 break;
1663 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1664 if (((insn >> 6) & 3) == 3)
1665 return 1;
18c9b560
AZ
1666 rd = (insn >> 12) & 0xf;
1667 wrd = (insn >> 16) & 0xf;
da6b5335 1668 tmp = load_reg(s, rd);
18c9b560
AZ
1669 gen_op_iwmmxt_movq_M0_wRn(wrd);
1670 switch ((insn >> 6) & 3) {
1671 case 0:
da6b5335
FN
1672 tmp2 = tcg_const_i32(0xff);
1673 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1674 break;
1675 case 1:
da6b5335
FN
1676 tmp2 = tcg_const_i32(0xffff);
1677 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1678 break;
1679 case 2:
da6b5335
FN
1680 tmp2 = tcg_const_i32(0xffffffff);
1681 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1682 break;
da6b5335
FN
1683 default:
1684 TCGV_UNUSED(tmp2);
1685 TCGV_UNUSED(tmp3);
18c9b560 1686 }
da6b5335
FN
1687 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1688 tcg_temp_free(tmp3);
1689 tcg_temp_free(tmp2);
1690 dead_tmp(tmp);
18c9b560
AZ
1691 gen_op_iwmmxt_movq_wRn_M0(wrd);
1692 gen_op_iwmmxt_set_mup();
1693 break;
1694 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1695 rd = (insn >> 12) & 0xf;
1696 wrd = (insn >> 16) & 0xf;
da6b5335 1697 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1698 return 1;
1699 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335 1700 tmp = new_tmp();
18c9b560
AZ
1701 switch ((insn >> 22) & 3) {
1702 case 0:
da6b5335
FN
1703 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1704 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1705 if (insn & 8) {
1706 tcg_gen_ext8s_i32(tmp, tmp);
1707 } else {
1708 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1709 }
1710 break;
1711 case 1:
da6b5335
FN
1712 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1713 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1714 if (insn & 8) {
1715 tcg_gen_ext16s_i32(tmp, tmp);
1716 } else {
1717 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1718 }
1719 break;
1720 case 2:
da6b5335
FN
1721 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1722 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1723 break;
18c9b560 1724 }
da6b5335 1725 store_reg(s, rd, tmp);
18c9b560
AZ
1726 break;
1727 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1728 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1729 return 1;
da6b5335 1730 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1731 switch ((insn >> 22) & 3) {
1732 case 0:
da6b5335 1733 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1734 break;
1735 case 1:
da6b5335 1736 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1737 break;
1738 case 2:
da6b5335 1739 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1740 break;
18c9b560 1741 }
da6b5335
FN
1742 tcg_gen_shli_i32(tmp, tmp, 28);
1743 gen_set_nzcv(tmp);
1744 dead_tmp(tmp);
18c9b560
AZ
1745 break;
1746 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1747 if (((insn >> 6) & 3) == 3)
1748 return 1;
18c9b560
AZ
1749 rd = (insn >> 12) & 0xf;
1750 wrd = (insn >> 16) & 0xf;
da6b5335 1751 tmp = load_reg(s, rd);
18c9b560
AZ
1752 switch ((insn >> 6) & 3) {
1753 case 0:
da6b5335 1754 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1755 break;
1756 case 1:
da6b5335 1757 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1758 break;
1759 case 2:
da6b5335 1760 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1761 break;
18c9b560 1762 }
da6b5335 1763 dead_tmp(tmp);
18c9b560
AZ
1764 gen_op_iwmmxt_movq_wRn_M0(wrd);
1765 gen_op_iwmmxt_set_mup();
1766 break;
1767 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1768 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1769 return 1;
da6b5335
FN
1770 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1771 tmp2 = new_tmp();
1772 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1773 switch ((insn >> 22) & 3) {
1774 case 0:
1775 for (i = 0; i < 7; i ++) {
da6b5335
FN
1776 tcg_gen_shli_i32(tmp2, tmp2, 4);
1777 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1778 }
1779 break;
1780 case 1:
1781 for (i = 0; i < 3; i ++) {
da6b5335
FN
1782 tcg_gen_shli_i32(tmp2, tmp2, 8);
1783 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1784 }
1785 break;
1786 case 2:
da6b5335
FN
1787 tcg_gen_shli_i32(tmp2, tmp2, 16);
1788 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1789 break;
18c9b560 1790 }
da6b5335
FN
1791 gen_set_nzcv(tmp);
1792 dead_tmp(tmp2);
1793 dead_tmp(tmp);
18c9b560
AZ
1794 break;
1795 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1796 wrd = (insn >> 12) & 0xf;
1797 rd0 = (insn >> 16) & 0xf;
1798 gen_op_iwmmxt_movq_M0_wRn(rd0);
1799 switch ((insn >> 22) & 3) {
1800 case 0:
e677137d 1801 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1802 break;
1803 case 1:
e677137d 1804 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1805 break;
1806 case 2:
e677137d 1807 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1808 break;
1809 case 3:
1810 return 1;
1811 }
1812 gen_op_iwmmxt_movq_wRn_M0(wrd);
1813 gen_op_iwmmxt_set_mup();
1814 break;
1815 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1816 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1817 return 1;
da6b5335
FN
1818 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1819 tmp2 = new_tmp();
1820 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1821 switch ((insn >> 22) & 3) {
1822 case 0:
1823 for (i = 0; i < 7; i ++) {
da6b5335
FN
1824 tcg_gen_shli_i32(tmp2, tmp2, 4);
1825 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1826 }
1827 break;
1828 case 1:
1829 for (i = 0; i < 3; i ++) {
da6b5335
FN
1830 tcg_gen_shli_i32(tmp2, tmp2, 8);
1831 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1832 }
1833 break;
1834 case 2:
da6b5335
FN
1835 tcg_gen_shli_i32(tmp2, tmp2, 16);
1836 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1837 break;
18c9b560 1838 }
da6b5335
FN
1839 gen_set_nzcv(tmp);
1840 dead_tmp(tmp2);
1841 dead_tmp(tmp);
18c9b560
AZ
1842 break;
1843 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1844 rd = (insn >> 12) & 0xf;
1845 rd0 = (insn >> 16) & 0xf;
da6b5335 1846 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1847 return 1;
1848 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 1849 tmp = new_tmp();
18c9b560
AZ
1850 switch ((insn >> 22) & 3) {
1851 case 0:
da6b5335 1852 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1853 break;
1854 case 1:
da6b5335 1855 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1856 break;
1857 case 2:
da6b5335 1858 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1859 break;
18c9b560 1860 }
da6b5335 1861 store_reg(s, rd, tmp);
18c9b560
AZ
1862 break;
1863 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1864 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1865 wrd = (insn >> 12) & 0xf;
1866 rd0 = (insn >> 16) & 0xf;
1867 rd1 = (insn >> 0) & 0xf;
1868 gen_op_iwmmxt_movq_M0_wRn(rd0);
1869 switch ((insn >> 22) & 3) {
1870 case 0:
1871 if (insn & (1 << 21))
1872 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1873 else
1874 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1875 break;
1876 case 1:
1877 if (insn & (1 << 21))
1878 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1879 else
1880 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1881 break;
1882 case 2:
1883 if (insn & (1 << 21))
1884 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1885 else
1886 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1887 break;
1888 case 3:
1889 return 1;
1890 }
1891 gen_op_iwmmxt_movq_wRn_M0(wrd);
1892 gen_op_iwmmxt_set_mup();
1893 gen_op_iwmmxt_set_cup();
1894 break;
1895 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1896 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1897 wrd = (insn >> 12) & 0xf;
1898 rd0 = (insn >> 16) & 0xf;
1899 gen_op_iwmmxt_movq_M0_wRn(rd0);
1900 switch ((insn >> 22) & 3) {
1901 case 0:
1902 if (insn & (1 << 21))
1903 gen_op_iwmmxt_unpacklsb_M0();
1904 else
1905 gen_op_iwmmxt_unpacklub_M0();
1906 break;
1907 case 1:
1908 if (insn & (1 << 21))
1909 gen_op_iwmmxt_unpacklsw_M0();
1910 else
1911 gen_op_iwmmxt_unpackluw_M0();
1912 break;
1913 case 2:
1914 if (insn & (1 << 21))
1915 gen_op_iwmmxt_unpacklsl_M0();
1916 else
1917 gen_op_iwmmxt_unpacklul_M0();
1918 break;
1919 case 3:
1920 return 1;
1921 }
1922 gen_op_iwmmxt_movq_wRn_M0(wrd);
1923 gen_op_iwmmxt_set_mup();
1924 gen_op_iwmmxt_set_cup();
1925 break;
1926 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1927 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1928 wrd = (insn >> 12) & 0xf;
1929 rd0 = (insn >> 16) & 0xf;
1930 gen_op_iwmmxt_movq_M0_wRn(rd0);
1931 switch ((insn >> 22) & 3) {
1932 case 0:
1933 if (insn & (1 << 21))
1934 gen_op_iwmmxt_unpackhsb_M0();
1935 else
1936 gen_op_iwmmxt_unpackhub_M0();
1937 break;
1938 case 1:
1939 if (insn & (1 << 21))
1940 gen_op_iwmmxt_unpackhsw_M0();
1941 else
1942 gen_op_iwmmxt_unpackhuw_M0();
1943 break;
1944 case 2:
1945 if (insn & (1 << 21))
1946 gen_op_iwmmxt_unpackhsl_M0();
1947 else
1948 gen_op_iwmmxt_unpackhul_M0();
1949 break;
1950 case 3:
1951 return 1;
1952 }
1953 gen_op_iwmmxt_movq_wRn_M0(wrd);
1954 gen_op_iwmmxt_set_mup();
1955 gen_op_iwmmxt_set_cup();
1956 break;
1957 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1958 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
1959 if (((insn >> 22) & 3) == 0)
1960 return 1;
18c9b560
AZ
1961 wrd = (insn >> 12) & 0xf;
1962 rd0 = (insn >> 16) & 0xf;
1963 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1964 tmp = new_tmp();
1965 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
1966 dead_tmp(tmp);
18c9b560 1967 return 1;
da6b5335 1968 }
18c9b560 1969 switch ((insn >> 22) & 3) {
18c9b560 1970 case 1:
da6b5335 1971 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1972 break;
1973 case 2:
da6b5335 1974 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1975 break;
1976 case 3:
da6b5335 1977 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1978 break;
1979 }
da6b5335 1980 dead_tmp(tmp);
18c9b560
AZ
1981 gen_op_iwmmxt_movq_wRn_M0(wrd);
1982 gen_op_iwmmxt_set_mup();
1983 gen_op_iwmmxt_set_cup();
1984 break;
1985 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1986 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
1987 if (((insn >> 22) & 3) == 0)
1988 return 1;
18c9b560
AZ
1989 wrd = (insn >> 12) & 0xf;
1990 rd0 = (insn >> 16) & 0xf;
1991 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1992 tmp = new_tmp();
1993 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
1994 dead_tmp(tmp);
18c9b560 1995 return 1;
da6b5335 1996 }
18c9b560 1997 switch ((insn >> 22) & 3) {
18c9b560 1998 case 1:
da6b5335 1999 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2000 break;
2001 case 2:
da6b5335 2002 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2003 break;
2004 case 3:
da6b5335 2005 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2006 break;
2007 }
da6b5335 2008 dead_tmp(tmp);
18c9b560
AZ
2009 gen_op_iwmmxt_movq_wRn_M0(wrd);
2010 gen_op_iwmmxt_set_mup();
2011 gen_op_iwmmxt_set_cup();
2012 break;
2013 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2014 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2015 if (((insn >> 22) & 3) == 0)
2016 return 1;
18c9b560
AZ
2017 wrd = (insn >> 12) & 0xf;
2018 rd0 = (insn >> 16) & 0xf;
2019 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2020 tmp = new_tmp();
2021 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2022 dead_tmp(tmp);
18c9b560 2023 return 1;
da6b5335 2024 }
18c9b560 2025 switch ((insn >> 22) & 3) {
18c9b560 2026 case 1:
da6b5335 2027 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2028 break;
2029 case 2:
da6b5335 2030 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2031 break;
2032 case 3:
da6b5335 2033 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2034 break;
2035 }
da6b5335 2036 dead_tmp(tmp);
18c9b560
AZ
2037 gen_op_iwmmxt_movq_wRn_M0(wrd);
2038 gen_op_iwmmxt_set_mup();
2039 gen_op_iwmmxt_set_cup();
2040 break;
2041 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2042 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2043 if (((insn >> 22) & 3) == 0)
2044 return 1;
18c9b560
AZ
2045 wrd = (insn >> 12) & 0xf;
2046 rd0 = (insn >> 16) & 0xf;
2047 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2048 tmp = new_tmp();
18c9b560 2049 switch ((insn >> 22) & 3) {
18c9b560 2050 case 1:
da6b5335
FN
2051 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2052 dead_tmp(tmp);
18c9b560 2053 return 1;
da6b5335
FN
2054 }
2055 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2056 break;
2057 case 2:
da6b5335
FN
2058 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2059 dead_tmp(tmp);
18c9b560 2060 return 1;
da6b5335
FN
2061 }
2062 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2063 break;
2064 case 3:
da6b5335
FN
2065 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2066 dead_tmp(tmp);
18c9b560 2067 return 1;
da6b5335
FN
2068 }
2069 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2070 break;
2071 }
da6b5335 2072 dead_tmp(tmp);
18c9b560
AZ
2073 gen_op_iwmmxt_movq_wRn_M0(wrd);
2074 gen_op_iwmmxt_set_mup();
2075 gen_op_iwmmxt_set_cup();
2076 break;
2077 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2078 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2079 wrd = (insn >> 12) & 0xf;
2080 rd0 = (insn >> 16) & 0xf;
2081 rd1 = (insn >> 0) & 0xf;
2082 gen_op_iwmmxt_movq_M0_wRn(rd0);
2083 switch ((insn >> 22) & 3) {
2084 case 0:
2085 if (insn & (1 << 21))
2086 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2087 else
2088 gen_op_iwmmxt_minub_M0_wRn(rd1);
2089 break;
2090 case 1:
2091 if (insn & (1 << 21))
2092 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2093 else
2094 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2095 break;
2096 case 2:
2097 if (insn & (1 << 21))
2098 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2099 else
2100 gen_op_iwmmxt_minul_M0_wRn(rd1);
2101 break;
2102 case 3:
2103 return 1;
2104 }
2105 gen_op_iwmmxt_movq_wRn_M0(wrd);
2106 gen_op_iwmmxt_set_mup();
2107 break;
2108 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2109 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2110 wrd = (insn >> 12) & 0xf;
2111 rd0 = (insn >> 16) & 0xf;
2112 rd1 = (insn >> 0) & 0xf;
2113 gen_op_iwmmxt_movq_M0_wRn(rd0);
2114 switch ((insn >> 22) & 3) {
2115 case 0:
2116 if (insn & (1 << 21))
2117 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2118 else
2119 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2120 break;
2121 case 1:
2122 if (insn & (1 << 21))
2123 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2124 else
2125 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2126 break;
2127 case 2:
2128 if (insn & (1 << 21))
2129 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2130 else
2131 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2132 break;
2133 case 3:
2134 return 1;
2135 }
2136 gen_op_iwmmxt_movq_wRn_M0(wrd);
2137 gen_op_iwmmxt_set_mup();
2138 break;
2139 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2140 case 0x402: case 0x502: case 0x602: case 0x702:
2141 wrd = (insn >> 12) & 0xf;
2142 rd0 = (insn >> 16) & 0xf;
2143 rd1 = (insn >> 0) & 0xf;
2144 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2145 tmp = tcg_const_i32((insn >> 20) & 3);
2146 iwmmxt_load_reg(cpu_V1, rd1);
2147 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2148 tcg_temp_free(tmp);
18c9b560
AZ
2149 gen_op_iwmmxt_movq_wRn_M0(wrd);
2150 gen_op_iwmmxt_set_mup();
2151 break;
2152 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2153 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2154 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2155 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2156 wrd = (insn >> 12) & 0xf;
2157 rd0 = (insn >> 16) & 0xf;
2158 rd1 = (insn >> 0) & 0xf;
2159 gen_op_iwmmxt_movq_M0_wRn(rd0);
2160 switch ((insn >> 20) & 0xf) {
2161 case 0x0:
2162 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2163 break;
2164 case 0x1:
2165 gen_op_iwmmxt_subub_M0_wRn(rd1);
2166 break;
2167 case 0x3:
2168 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2169 break;
2170 case 0x4:
2171 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2172 break;
2173 case 0x5:
2174 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2175 break;
2176 case 0x7:
2177 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2178 break;
2179 case 0x8:
2180 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2181 break;
2182 case 0x9:
2183 gen_op_iwmmxt_subul_M0_wRn(rd1);
2184 break;
2185 case 0xb:
2186 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2187 break;
2188 default:
2189 return 1;
2190 }
2191 gen_op_iwmmxt_movq_wRn_M0(wrd);
2192 gen_op_iwmmxt_set_mup();
2193 gen_op_iwmmxt_set_cup();
2194 break;
2195 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2196 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2197 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2198 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2199 wrd = (insn >> 12) & 0xf;
2200 rd0 = (insn >> 16) & 0xf;
2201 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2202 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2203 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2204 tcg_temp_free(tmp);
18c9b560
AZ
2205 gen_op_iwmmxt_movq_wRn_M0(wrd);
2206 gen_op_iwmmxt_set_mup();
2207 gen_op_iwmmxt_set_cup();
2208 break;
2209 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2210 case 0x418: case 0x518: case 0x618: case 0x718:
2211 case 0x818: case 0x918: case 0xa18: case 0xb18:
2212 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2213 wrd = (insn >> 12) & 0xf;
2214 rd0 = (insn >> 16) & 0xf;
2215 rd1 = (insn >> 0) & 0xf;
2216 gen_op_iwmmxt_movq_M0_wRn(rd0);
2217 switch ((insn >> 20) & 0xf) {
2218 case 0x0:
2219 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2220 break;
2221 case 0x1:
2222 gen_op_iwmmxt_addub_M0_wRn(rd1);
2223 break;
2224 case 0x3:
2225 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2226 break;
2227 case 0x4:
2228 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2229 break;
2230 case 0x5:
2231 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2232 break;
2233 case 0x7:
2234 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2235 break;
2236 case 0x8:
2237 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2238 break;
2239 case 0x9:
2240 gen_op_iwmmxt_addul_M0_wRn(rd1);
2241 break;
2242 case 0xb:
2243 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2244 break;
2245 default:
2246 return 1;
2247 }
2248 gen_op_iwmmxt_movq_wRn_M0(wrd);
2249 gen_op_iwmmxt_set_mup();
2250 gen_op_iwmmxt_set_cup();
2251 break;
2252 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2253 case 0x408: case 0x508: case 0x608: case 0x708:
2254 case 0x808: case 0x908: case 0xa08: case 0xb08:
2255 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2256 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2257 return 1;
18c9b560
AZ
2258 wrd = (insn >> 12) & 0xf;
2259 rd0 = (insn >> 16) & 0xf;
2260 rd1 = (insn >> 0) & 0xf;
2261 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2262 switch ((insn >> 22) & 3) {
18c9b560
AZ
2263 case 1:
2264 if (insn & (1 << 21))
2265 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2266 else
2267 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2268 break;
2269 case 2:
2270 if (insn & (1 << 21))
2271 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2272 else
2273 gen_op_iwmmxt_packul_M0_wRn(rd1);
2274 break;
2275 case 3:
2276 if (insn & (1 << 21))
2277 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2278 else
2279 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2280 break;
2281 }
2282 gen_op_iwmmxt_movq_wRn_M0(wrd);
2283 gen_op_iwmmxt_set_mup();
2284 gen_op_iwmmxt_set_cup();
2285 break;
2286 case 0x201: case 0x203: case 0x205: case 0x207:
2287 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2288 case 0x211: case 0x213: case 0x215: case 0x217:
2289 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2290 wrd = (insn >> 5) & 0xf;
2291 rd0 = (insn >> 12) & 0xf;
2292 rd1 = (insn >> 0) & 0xf;
2293 if (rd0 == 0xf || rd1 == 0xf)
2294 return 1;
2295 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2296 tmp = load_reg(s, rd0);
2297 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2298 switch ((insn >> 16) & 0xf) {
2299 case 0x0: /* TMIA */
da6b5335 2300 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2301 break;
2302 case 0x8: /* TMIAPH */
da6b5335 2303 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2304 break;
2305 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2306 if (insn & (1 << 16))
da6b5335 2307 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2308 if (insn & (1 << 17))
da6b5335
FN
2309 tcg_gen_shri_i32(tmp2, tmp2, 16);
2310 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2311 break;
2312 default:
da6b5335
FN
2313 dead_tmp(tmp2);
2314 dead_tmp(tmp);
18c9b560
AZ
2315 return 1;
2316 }
da6b5335
FN
2317 dead_tmp(tmp2);
2318 dead_tmp(tmp);
18c9b560
AZ
2319 gen_op_iwmmxt_movq_wRn_M0(wrd);
2320 gen_op_iwmmxt_set_mup();
2321 break;
2322 default:
2323 return 1;
2324 }
2325
2326 return 0;
2327}
2328
2329/* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2330 (ie. an undefined instruction). */
2331static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2332{
2333 int acc, rd0, rd1, rdhi, rdlo;
3a554c0f 2334 TCGv tmp, tmp2;
18c9b560
AZ
2335
2336 if ((insn & 0x0ff00f10) == 0x0e200010) {
2337 /* Multiply with Internal Accumulate Format */
2338 rd0 = (insn >> 12) & 0xf;
2339 rd1 = insn & 0xf;
2340 acc = (insn >> 5) & 7;
2341
2342 if (acc != 0)
2343 return 1;
2344
3a554c0f
FN
2345 tmp = load_reg(s, rd0);
2346 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2347 switch ((insn >> 16) & 0xf) {
2348 case 0x0: /* MIA */
3a554c0f 2349 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2350 break;
2351 case 0x8: /* MIAPH */
3a554c0f 2352 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2353 break;
2354 case 0xc: /* MIABB */
2355 case 0xd: /* MIABT */
2356 case 0xe: /* MIATB */
2357 case 0xf: /* MIATT */
18c9b560 2358 if (insn & (1 << 16))
3a554c0f 2359 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2360 if (insn & (1 << 17))
3a554c0f
FN
2361 tcg_gen_shri_i32(tmp2, tmp2, 16);
2362 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2363 break;
2364 default:
2365 return 1;
2366 }
3a554c0f
FN
2367 dead_tmp(tmp2);
2368 dead_tmp(tmp);
18c9b560
AZ
2369
2370 gen_op_iwmmxt_movq_wRn_M0(acc);
2371 return 0;
2372 }
2373
2374 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2375 /* Internal Accumulator Access Format */
2376 rdhi = (insn >> 16) & 0xf;
2377 rdlo = (insn >> 12) & 0xf;
2378 acc = insn & 7;
2379
2380 if (acc != 0)
2381 return 1;
2382
2383 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2384 iwmmxt_load_reg(cpu_V0, acc);
2385 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2386 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2387 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2388 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2389 } else { /* MAR */
3a554c0f
FN
2390 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2391 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2392 }
2393 return 0;
2394 }
2395
2396 return 1;
2397}
2398
c1713132
AZ
2399/* Disassemble system coprocessor instruction. Return nonzero if
2400 instruction is not defined. */
2401static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2402{
b75263d6 2403 TCGv tmp, tmp2;
c1713132
AZ
2404 uint32_t rd = (insn >> 12) & 0xf;
2405 uint32_t cp = (insn >> 8) & 0xf;
2406 if (IS_USER(s)) {
2407 return 1;
2408 }
2409
18c9b560 2410 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2411 if (!env->cp[cp].cp_read)
2412 return 1;
8984bd2e
PB
2413 gen_set_pc_im(s->pc);
2414 tmp = new_tmp();
b75263d6
JR
2415 tmp2 = tcg_const_i32(insn);
2416 gen_helper_get_cp(tmp, cpu_env, tmp2);
2417 tcg_temp_free(tmp2);
8984bd2e 2418 store_reg(s, rd, tmp);
c1713132
AZ
2419 } else {
2420 if (!env->cp[cp].cp_write)
2421 return 1;
8984bd2e
PB
2422 gen_set_pc_im(s->pc);
2423 tmp = load_reg(s, rd);
b75263d6
JR
2424 tmp2 = tcg_const_i32(insn);
2425 gen_helper_set_cp(cpu_env, tmp2, tmp);
2426 tcg_temp_free(tmp2);
a60de947 2427 dead_tmp(tmp);
c1713132
AZ
2428 }
2429 return 0;
2430}
2431
9ee6e8bb
PB
2432static int cp15_user_ok(uint32_t insn)
2433{
2434 int cpn = (insn >> 16) & 0xf;
2435 int cpm = insn & 0xf;
2436 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2437
2438 if (cpn == 13 && cpm == 0) {
2439 /* TLS register. */
2440 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2441 return 1;
2442 }
2443 if (cpn == 7) {
2444 /* ISB, DSB, DMB. */
2445 if ((cpm == 5 && op == 4)
2446 || (cpm == 10 && (op == 4 || op == 5)))
2447 return 1;
2448 }
2449 return 0;
2450}
2451
b5ff1b31
FB
2452/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2453 instruction is not defined. */
a90b7318 2454static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2455{
2456 uint32_t rd;
b75263d6 2457 TCGv tmp, tmp2;
b5ff1b31 2458
9ee6e8bb
PB
2459 /* M profile cores use memory mapped registers instead of cp15. */
2460 if (arm_feature(env, ARM_FEATURE_M))
2461 return 1;
2462
2463 if ((insn & (1 << 25)) == 0) {
2464 if (insn & (1 << 20)) {
2465 /* mrrc */
2466 return 1;
2467 }
2468 /* mcrr. Used for block cache operations, so implement as no-op. */
2469 return 0;
2470 }
2471 if ((insn & (1 << 4)) == 0) {
2472 /* cdp */
2473 return 1;
2474 }
2475 if (IS_USER(s) && !cp15_user_ok(insn)) {
b5ff1b31
FB
2476 return 1;
2477 }
9332f9da
FB
2478 if ((insn & 0x0fff0fff) == 0x0e070f90
2479 || (insn & 0x0fff0fff) == 0x0e070f58) {
2480 /* Wait for interrupt. */
8984bd2e 2481 gen_set_pc_im(s->pc);
9ee6e8bb 2482 s->is_jmp = DISAS_WFI;
9332f9da
FB
2483 return 0;
2484 }
b5ff1b31 2485 rd = (insn >> 12) & 0xf;
b75263d6 2486 tmp2 = tcg_const_i32(insn);
18c9b560 2487 if (insn & ARM_CP_RW_BIT) {
8984bd2e 2488 tmp = new_tmp();
b75263d6 2489 gen_helper_get_cp15(tmp, cpu_env, tmp2);
b5ff1b31
FB
2490 /* If the destination register is r15 then sets condition codes. */
2491 if (rd != 15)
8984bd2e
PB
2492 store_reg(s, rd, tmp);
2493 else
2494 dead_tmp(tmp);
b5ff1b31 2495 } else {
8984bd2e 2496 tmp = load_reg(s, rd);
b75263d6 2497 gen_helper_set_cp15(cpu_env, tmp2, tmp);
8984bd2e 2498 dead_tmp(tmp);
a90b7318
AZ
2499 /* Normally we would always end the TB here, but Linux
2500 * arch/arm/mach-pxa/sleep.S expects two instructions following
2501 * an MMU enable to execute from cache. Imitate this behaviour. */
2502 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2503 (insn & 0x0fff0fff) != 0x0e010f10)
2504 gen_lookup_tb(s);
b5ff1b31 2505 }
b75263d6 2506 tcg_temp_free_i32(tmp2);
b5ff1b31
FB
2507 return 0;
2508}
2509
9ee6e8bb
PB
2510#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2511#define VFP_SREG(insn, bigbit, smallbit) \
2512 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2513#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2514 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2515 reg = (((insn) >> (bigbit)) & 0x0f) \
2516 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2517 } else { \
2518 if (insn & (1 << (smallbit))) \
2519 return 1; \
2520 reg = ((insn) >> (bigbit)) & 0x0f; \
2521 }} while (0)
2522
2523#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2524#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2525#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2526#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2527#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2528#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2529
4373f3ce
PB
2530/* Move between integer and VFP cores. */
2531static TCGv gen_vfp_mrs(void)
2532{
2533 TCGv tmp = new_tmp();
2534 tcg_gen_mov_i32(tmp, cpu_F0s);
2535 return tmp;
2536}
2537
2538static void gen_vfp_msr(TCGv tmp)
2539{
2540 tcg_gen_mov_i32(cpu_F0s, tmp);
2541 dead_tmp(tmp);
2542}
2543
9ee6e8bb
PB
2544static inline int
2545vfp_enabled(CPUState * env)
2546{
2547 return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
2548}
2549
ad69471c
PB
2550static void gen_neon_dup_u8(TCGv var, int shift)
2551{
2552 TCGv tmp = new_tmp();
2553 if (shift)
2554 tcg_gen_shri_i32(var, var, shift);
86831435 2555 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2556 tcg_gen_shli_i32(tmp, var, 8);
2557 tcg_gen_or_i32(var, var, tmp);
2558 tcg_gen_shli_i32(tmp, var, 16);
2559 tcg_gen_or_i32(var, var, tmp);
2560 dead_tmp(tmp);
2561}
2562
2563static void gen_neon_dup_low16(TCGv var)
2564{
2565 TCGv tmp = new_tmp();
86831435 2566 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2567 tcg_gen_shli_i32(tmp, var, 16);
2568 tcg_gen_or_i32(var, var, tmp);
2569 dead_tmp(tmp);
2570}
2571
2572static void gen_neon_dup_high16(TCGv var)
2573{
2574 TCGv tmp = new_tmp();
2575 tcg_gen_andi_i32(var, var, 0xffff0000);
2576 tcg_gen_shri_i32(tmp, var, 16);
2577 tcg_gen_or_i32(var, var, tmp);
2578 dead_tmp(tmp);
2579}
2580
b7bcbe95
FB
2581/* Disassemble a VFP instruction. Returns nonzero if an error occured
2582 (ie. an undefined instruction). */
2583static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2584{
2585 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2586 int dp, veclen;
312eea9f 2587 TCGv addr;
4373f3ce 2588 TCGv tmp;
ad69471c 2589 TCGv tmp2;
b7bcbe95 2590
40f137e1
PB
2591 if (!arm_feature(env, ARM_FEATURE_VFP))
2592 return 1;
2593
9ee6e8bb
PB
2594 if (!vfp_enabled(env)) {
2595 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2596 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2597 return 1;
2598 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2599 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2600 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2601 return 1;
2602 }
b7bcbe95
FB
2603 dp = ((insn & 0xf00) == 0xb00);
2604 switch ((insn >> 24) & 0xf) {
2605 case 0xe:
2606 if (insn & (1 << 4)) {
2607 /* single register transfer */
b7bcbe95
FB
2608 rd = (insn >> 12) & 0xf;
2609 if (dp) {
9ee6e8bb
PB
2610 int size;
2611 int pass;
2612
2613 VFP_DREG_N(rn, insn);
2614 if (insn & 0xf)
b7bcbe95 2615 return 1;
9ee6e8bb
PB
2616 if (insn & 0x00c00060
2617 && !arm_feature(env, ARM_FEATURE_NEON))
2618 return 1;
2619
2620 pass = (insn >> 21) & 1;
2621 if (insn & (1 << 22)) {
2622 size = 0;
2623 offset = ((insn >> 5) & 3) * 8;
2624 } else if (insn & (1 << 5)) {
2625 size = 1;
2626 offset = (insn & (1 << 6)) ? 16 : 0;
2627 } else {
2628 size = 2;
2629 offset = 0;
2630 }
18c9b560 2631 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2632 /* vfp->arm */
ad69471c 2633 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2634 switch (size) {
2635 case 0:
9ee6e8bb 2636 if (offset)
ad69471c 2637 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2638 if (insn & (1 << 23))
ad69471c 2639 gen_uxtb(tmp);
9ee6e8bb 2640 else
ad69471c 2641 gen_sxtb(tmp);
9ee6e8bb
PB
2642 break;
2643 case 1:
9ee6e8bb
PB
2644 if (insn & (1 << 23)) {
2645 if (offset) {
ad69471c 2646 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2647 } else {
ad69471c 2648 gen_uxth(tmp);
9ee6e8bb
PB
2649 }
2650 } else {
2651 if (offset) {
ad69471c 2652 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2653 } else {
ad69471c 2654 gen_sxth(tmp);
9ee6e8bb
PB
2655 }
2656 }
2657 break;
2658 case 2:
9ee6e8bb
PB
2659 break;
2660 }
ad69471c 2661 store_reg(s, rd, tmp);
b7bcbe95
FB
2662 } else {
2663 /* arm->vfp */
ad69471c 2664 tmp = load_reg(s, rd);
9ee6e8bb
PB
2665 if (insn & (1 << 23)) {
2666 /* VDUP */
2667 if (size == 0) {
ad69471c 2668 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2669 } else if (size == 1) {
ad69471c 2670 gen_neon_dup_low16(tmp);
9ee6e8bb 2671 }
cbbccffc
PB
2672 for (n = 0; n <= pass * 2; n++) {
2673 tmp2 = new_tmp();
2674 tcg_gen_mov_i32(tmp2, tmp);
2675 neon_store_reg(rn, n, tmp2);
2676 }
2677 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2678 } else {
2679 /* VMOV */
2680 switch (size) {
2681 case 0:
ad69471c
PB
2682 tmp2 = neon_load_reg(rn, pass);
2683 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2684 dead_tmp(tmp2);
9ee6e8bb
PB
2685 break;
2686 case 1:
ad69471c
PB
2687 tmp2 = neon_load_reg(rn, pass);
2688 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2689 dead_tmp(tmp2);
9ee6e8bb
PB
2690 break;
2691 case 2:
9ee6e8bb
PB
2692 break;
2693 }
ad69471c 2694 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2695 }
b7bcbe95 2696 }
9ee6e8bb
PB
2697 } else { /* !dp */
2698 if ((insn & 0x6f) != 0x00)
2699 return 1;
2700 rn = VFP_SREG_N(insn);
18c9b560 2701 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2702 /* vfp->arm */
2703 if (insn & (1 << 21)) {
2704 /* system register */
40f137e1 2705 rn >>= 1;
9ee6e8bb 2706
b7bcbe95 2707 switch (rn) {
40f137e1 2708 case ARM_VFP_FPSID:
4373f3ce 2709 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2710 VFP3 restricts all id registers to privileged
2711 accesses. */
2712 if (IS_USER(s)
2713 && arm_feature(env, ARM_FEATURE_VFP3))
2714 return 1;
4373f3ce 2715 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2716 break;
40f137e1 2717 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2718 if (IS_USER(s))
2719 return 1;
4373f3ce 2720 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2721 break;
40f137e1
PB
2722 case ARM_VFP_FPINST:
2723 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2724 /* Not present in VFP3. */
2725 if (IS_USER(s)
2726 || arm_feature(env, ARM_FEATURE_VFP3))
2727 return 1;
4373f3ce 2728 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2729 break;
40f137e1 2730 case ARM_VFP_FPSCR:
601d70b9 2731 if (rd == 15) {
4373f3ce
PB
2732 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2733 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2734 } else {
2735 tmp = new_tmp();
2736 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2737 }
b7bcbe95 2738 break;
9ee6e8bb
PB
2739 case ARM_VFP_MVFR0:
2740 case ARM_VFP_MVFR1:
2741 if (IS_USER(s)
2742 || !arm_feature(env, ARM_FEATURE_VFP3))
2743 return 1;
4373f3ce 2744 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2745 break;
b7bcbe95
FB
2746 default:
2747 return 1;
2748 }
2749 } else {
2750 gen_mov_F0_vreg(0, rn);
4373f3ce 2751 tmp = gen_vfp_mrs();
b7bcbe95
FB
2752 }
2753 if (rd == 15) {
b5ff1b31 2754 /* Set the 4 flag bits in the CPSR. */
4373f3ce
PB
2755 gen_set_nzcv(tmp);
2756 dead_tmp(tmp);
2757 } else {
2758 store_reg(s, rd, tmp);
2759 }
b7bcbe95
FB
2760 } else {
2761 /* arm->vfp */
4373f3ce 2762 tmp = load_reg(s, rd);
b7bcbe95 2763 if (insn & (1 << 21)) {
40f137e1 2764 rn >>= 1;
b7bcbe95
FB
2765 /* system register */
2766 switch (rn) {
40f137e1 2767 case ARM_VFP_FPSID:
9ee6e8bb
PB
2768 case ARM_VFP_MVFR0:
2769 case ARM_VFP_MVFR1:
b7bcbe95
FB
2770 /* Writes are ignored. */
2771 break;
40f137e1 2772 case ARM_VFP_FPSCR:
4373f3ce
PB
2773 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2774 dead_tmp(tmp);
b5ff1b31 2775 gen_lookup_tb(s);
b7bcbe95 2776 break;
40f137e1 2777 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2778 if (IS_USER(s))
2779 return 1;
4373f3ce 2780 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2781 gen_lookup_tb(s);
2782 break;
2783 case ARM_VFP_FPINST:
2784 case ARM_VFP_FPINST2:
4373f3ce 2785 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2786 break;
b7bcbe95
FB
2787 default:
2788 return 1;
2789 }
2790 } else {
4373f3ce 2791 gen_vfp_msr(tmp);
b7bcbe95
FB
2792 gen_mov_vreg_F0(0, rn);
2793 }
2794 }
2795 }
2796 } else {
2797 /* data processing */
2798 /* The opcode is in bits 23, 21, 20 and 6. */
2799 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2800 if (dp) {
2801 if (op == 15) {
2802 /* rn is opcode */
2803 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2804 } else {
2805 /* rn is register number */
9ee6e8bb 2806 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2807 }
2808
2809 if (op == 15 && (rn == 15 || rn > 17)) {
2810 /* Integer or single precision destination. */
9ee6e8bb 2811 rd = VFP_SREG_D(insn);
b7bcbe95 2812 } else {
9ee6e8bb 2813 VFP_DREG_D(rd, insn);
b7bcbe95
FB
2814 }
2815
2816 if (op == 15 && (rn == 16 || rn == 17)) {
2817 /* Integer source. */
2818 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
2819 } else {
9ee6e8bb 2820 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2821 }
2822 } else {
9ee6e8bb 2823 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2824 if (op == 15 && rn == 15) {
2825 /* Double precision destination. */
9ee6e8bb
PB
2826 VFP_DREG_D(rd, insn);
2827 } else {
2828 rd = VFP_SREG_D(insn);
2829 }
2830 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2831 }
2832
2833 veclen = env->vfp.vec_len;
2834 if (op == 15 && rn > 3)
2835 veclen = 0;
2836
2837 /* Shut up compiler warnings. */
2838 delta_m = 0;
2839 delta_d = 0;
2840 bank_mask = 0;
3b46e624 2841
b7bcbe95
FB
2842 if (veclen > 0) {
2843 if (dp)
2844 bank_mask = 0xc;
2845 else
2846 bank_mask = 0x18;
2847
2848 /* Figure out what type of vector operation this is. */
2849 if ((rd & bank_mask) == 0) {
2850 /* scalar */
2851 veclen = 0;
2852 } else {
2853 if (dp)
2854 delta_d = (env->vfp.vec_stride >> 1) + 1;
2855 else
2856 delta_d = env->vfp.vec_stride + 1;
2857
2858 if ((rm & bank_mask) == 0) {
2859 /* mixed scalar/vector */
2860 delta_m = 0;
2861 } else {
2862 /* vector */
2863 delta_m = delta_d;
2864 }
2865 }
2866 }
2867
2868 /* Load the initial operands. */
2869 if (op == 15) {
2870 switch (rn) {
2871 case 16:
2872 case 17:
2873 /* Integer source */
2874 gen_mov_F0_vreg(0, rm);
2875 break;
2876 case 8:
2877 case 9:
2878 /* Compare */
2879 gen_mov_F0_vreg(dp, rd);
2880 gen_mov_F1_vreg(dp, rm);
2881 break;
2882 case 10:
2883 case 11:
2884 /* Compare with zero */
2885 gen_mov_F0_vreg(dp, rd);
2886 gen_vfp_F1_ld0(dp);
2887 break;
9ee6e8bb
PB
2888 case 20:
2889 case 21:
2890 case 22:
2891 case 23:
644ad806
PB
2892 case 28:
2893 case 29:
2894 case 30:
2895 case 31:
9ee6e8bb
PB
2896 /* Source and destination the same. */
2897 gen_mov_F0_vreg(dp, rd);
2898 break;
b7bcbe95
FB
2899 default:
2900 /* One source operand. */
2901 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 2902 break;
b7bcbe95
FB
2903 }
2904 } else {
2905 /* Two source operands. */
2906 gen_mov_F0_vreg(dp, rn);
2907 gen_mov_F1_vreg(dp, rm);
2908 }
2909
2910 for (;;) {
2911 /* Perform the calculation. */
2912 switch (op) {
2913 case 0: /* mac: fd + (fn * fm) */
2914 gen_vfp_mul(dp);
2915 gen_mov_F1_vreg(dp, rd);
2916 gen_vfp_add(dp);
2917 break;
2918 case 1: /* nmac: fd - (fn * fm) */
2919 gen_vfp_mul(dp);
2920 gen_vfp_neg(dp);
2921 gen_mov_F1_vreg(dp, rd);
2922 gen_vfp_add(dp);
2923 break;
2924 case 2: /* msc: -fd + (fn * fm) */
2925 gen_vfp_mul(dp);
2926 gen_mov_F1_vreg(dp, rd);
2927 gen_vfp_sub(dp);
2928 break;
2929 case 3: /* nmsc: -fd - (fn * fm) */
2930 gen_vfp_mul(dp);
b7bcbe95 2931 gen_vfp_neg(dp);
c9fb531a
PB
2932 gen_mov_F1_vreg(dp, rd);
2933 gen_vfp_sub(dp);
b7bcbe95
FB
2934 break;
2935 case 4: /* mul: fn * fm */
2936 gen_vfp_mul(dp);
2937 break;
2938 case 5: /* nmul: -(fn * fm) */
2939 gen_vfp_mul(dp);
2940 gen_vfp_neg(dp);
2941 break;
2942 case 6: /* add: fn + fm */
2943 gen_vfp_add(dp);
2944 break;
2945 case 7: /* sub: fn - fm */
2946 gen_vfp_sub(dp);
2947 break;
2948 case 8: /* div: fn / fm */
2949 gen_vfp_div(dp);
2950 break;
9ee6e8bb
PB
2951 case 14: /* fconst */
2952 if (!arm_feature(env, ARM_FEATURE_VFP3))
2953 return 1;
2954
2955 n = (insn << 12) & 0x80000000;
2956 i = ((insn >> 12) & 0x70) | (insn & 0xf);
2957 if (dp) {
2958 if (i & 0x40)
2959 i |= 0x3f80;
2960 else
2961 i |= 0x4000;
2962 n |= i << 16;
4373f3ce 2963 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
2964 } else {
2965 if (i & 0x40)
2966 i |= 0x780;
2967 else
2968 i |= 0x800;
2969 n |= i << 19;
5b340b51 2970 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 2971 }
9ee6e8bb 2972 break;
b7bcbe95
FB
2973 case 15: /* extension space */
2974 switch (rn) {
2975 case 0: /* cpy */
2976 /* no-op */
2977 break;
2978 case 1: /* abs */
2979 gen_vfp_abs(dp);
2980 break;
2981 case 2: /* neg */
2982 gen_vfp_neg(dp);
2983 break;
2984 case 3: /* sqrt */
2985 gen_vfp_sqrt(dp);
2986 break;
2987 case 8: /* cmp */
2988 gen_vfp_cmp(dp);
2989 break;
2990 case 9: /* cmpe */
2991 gen_vfp_cmpe(dp);
2992 break;
2993 case 10: /* cmpz */
2994 gen_vfp_cmp(dp);
2995 break;
2996 case 11: /* cmpez */
2997 gen_vfp_F1_ld0(dp);
2998 gen_vfp_cmpe(dp);
2999 break;
3000 case 15: /* single<->double conversion */
3001 if (dp)
4373f3ce 3002 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3003 else
4373f3ce 3004 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3005 break;
3006 case 16: /* fuito */
3007 gen_vfp_uito(dp);
3008 break;
3009 case 17: /* fsito */
3010 gen_vfp_sito(dp);
3011 break;
9ee6e8bb
PB
3012 case 20: /* fshto */
3013 if (!arm_feature(env, ARM_FEATURE_VFP3))
3014 return 1;
644ad806 3015 gen_vfp_shto(dp, 16 - rm);
9ee6e8bb
PB
3016 break;
3017 case 21: /* fslto */
3018 if (!arm_feature(env, ARM_FEATURE_VFP3))
3019 return 1;
644ad806 3020 gen_vfp_slto(dp, 32 - rm);
9ee6e8bb
PB
3021 break;
3022 case 22: /* fuhto */
3023 if (!arm_feature(env, ARM_FEATURE_VFP3))
3024 return 1;
644ad806 3025 gen_vfp_uhto(dp, 16 - rm);
9ee6e8bb
PB
3026 break;
3027 case 23: /* fulto */
3028 if (!arm_feature(env, ARM_FEATURE_VFP3))
3029 return 1;
644ad806 3030 gen_vfp_ulto(dp, 32 - rm);
9ee6e8bb 3031 break;
b7bcbe95
FB
3032 case 24: /* ftoui */
3033 gen_vfp_toui(dp);
3034 break;
3035 case 25: /* ftouiz */
3036 gen_vfp_touiz(dp);
3037 break;
3038 case 26: /* ftosi */
3039 gen_vfp_tosi(dp);
3040 break;
3041 case 27: /* ftosiz */
3042 gen_vfp_tosiz(dp);
3043 break;
9ee6e8bb
PB
3044 case 28: /* ftosh */
3045 if (!arm_feature(env, ARM_FEATURE_VFP3))
3046 return 1;
644ad806 3047 gen_vfp_tosh(dp, 16 - rm);
9ee6e8bb
PB
3048 break;
3049 case 29: /* ftosl */
3050 if (!arm_feature(env, ARM_FEATURE_VFP3))
3051 return 1;
644ad806 3052 gen_vfp_tosl(dp, 32 - rm);
9ee6e8bb
PB
3053 break;
3054 case 30: /* ftouh */
3055 if (!arm_feature(env, ARM_FEATURE_VFP3))
3056 return 1;
644ad806 3057 gen_vfp_touh(dp, 16 - rm);
9ee6e8bb
PB
3058 break;
3059 case 31: /* ftoul */
3060 if (!arm_feature(env, ARM_FEATURE_VFP3))
3061 return 1;
644ad806 3062 gen_vfp_toul(dp, 32 - rm);
9ee6e8bb 3063 break;
b7bcbe95
FB
3064 default: /* undefined */
3065 printf ("rn:%d\n", rn);
3066 return 1;
3067 }
3068 break;
3069 default: /* undefined */
3070 printf ("op:%d\n", op);
3071 return 1;
3072 }
3073
3074 /* Write back the result. */
3075 if (op == 15 && (rn >= 8 && rn <= 11))
3076 ; /* Comparison, do nothing. */
3077 else if (op == 15 && rn > 17)
3078 /* Integer result. */
3079 gen_mov_vreg_F0(0, rd);
3080 else if (op == 15 && rn == 15)
3081 /* conversion */
3082 gen_mov_vreg_F0(!dp, rd);
3083 else
3084 gen_mov_vreg_F0(dp, rd);
3085
3086 /* break out of the loop if we have finished */
3087 if (veclen == 0)
3088 break;
3089
3090 if (op == 15 && delta_m == 0) {
3091 /* single source one-many */
3092 while (veclen--) {
3093 rd = ((rd + delta_d) & (bank_mask - 1))
3094 | (rd & bank_mask);
3095 gen_mov_vreg_F0(dp, rd);
3096 }
3097 break;
3098 }
3099 /* Setup the next operands. */
3100 veclen--;
3101 rd = ((rd + delta_d) & (bank_mask - 1))
3102 | (rd & bank_mask);
3103
3104 if (op == 15) {
3105 /* One source operand. */
3106 rm = ((rm + delta_m) & (bank_mask - 1))
3107 | (rm & bank_mask);
3108 gen_mov_F0_vreg(dp, rm);
3109 } else {
3110 /* Two source operands. */
3111 rn = ((rn + delta_d) & (bank_mask - 1))
3112 | (rn & bank_mask);
3113 gen_mov_F0_vreg(dp, rn);
3114 if (delta_m) {
3115 rm = ((rm + delta_m) & (bank_mask - 1))
3116 | (rm & bank_mask);
3117 gen_mov_F1_vreg(dp, rm);
3118 }
3119 }
3120 }
3121 }
3122 break;
3123 case 0xc:
3124 case 0xd:
9ee6e8bb 3125 if (dp && (insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3126 /* two-register transfer */
3127 rn = (insn >> 16) & 0xf;
3128 rd = (insn >> 12) & 0xf;
3129 if (dp) {
9ee6e8bb
PB
3130 VFP_DREG_M(rm, insn);
3131 } else {
3132 rm = VFP_SREG_M(insn);
3133 }
b7bcbe95 3134
18c9b560 3135 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3136 /* vfp->arm */
3137 if (dp) {
4373f3ce
PB
3138 gen_mov_F0_vreg(0, rm * 2);
3139 tmp = gen_vfp_mrs();
3140 store_reg(s, rd, tmp);
3141 gen_mov_F0_vreg(0, rm * 2 + 1);
3142 tmp = gen_vfp_mrs();
3143 store_reg(s, rn, tmp);
b7bcbe95
FB
3144 } else {
3145 gen_mov_F0_vreg(0, rm);
4373f3ce
PB
3146 tmp = gen_vfp_mrs();
3147 store_reg(s, rn, tmp);
b7bcbe95 3148 gen_mov_F0_vreg(0, rm + 1);
4373f3ce
PB
3149 tmp = gen_vfp_mrs();
3150 store_reg(s, rd, tmp);
b7bcbe95
FB
3151 }
3152 } else {
3153 /* arm->vfp */
3154 if (dp) {
4373f3ce
PB
3155 tmp = load_reg(s, rd);
3156 gen_vfp_msr(tmp);
3157 gen_mov_vreg_F0(0, rm * 2);
3158 tmp = load_reg(s, rn);
3159 gen_vfp_msr(tmp);
3160 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3161 } else {
4373f3ce
PB
3162 tmp = load_reg(s, rn);
3163 gen_vfp_msr(tmp);
b7bcbe95 3164 gen_mov_vreg_F0(0, rm);
4373f3ce
PB
3165 tmp = load_reg(s, rd);
3166 gen_vfp_msr(tmp);
b7bcbe95
FB
3167 gen_mov_vreg_F0(0, rm + 1);
3168 }
3169 }
3170 } else {
3171 /* Load/store */
3172 rn = (insn >> 16) & 0xf;
3173 if (dp)
9ee6e8bb 3174 VFP_DREG_D(rd, insn);
b7bcbe95 3175 else
9ee6e8bb
PB
3176 rd = VFP_SREG_D(insn);
3177 if (s->thumb && rn == 15) {
312eea9f
FN
3178 addr = new_tmp();
3179 tcg_gen_movi_i32(addr, s->pc & ~2);
9ee6e8bb 3180 } else {
312eea9f 3181 addr = load_reg(s, rn);
9ee6e8bb 3182 }
b7bcbe95
FB
3183 if ((insn & 0x01200000) == 0x01000000) {
3184 /* Single load/store */
3185 offset = (insn & 0xff) << 2;
3186 if ((insn & (1 << 23)) == 0)
3187 offset = -offset;
312eea9f 3188 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3189 if (insn & (1 << 20)) {
312eea9f 3190 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3191 gen_mov_vreg_F0(dp, rd);
3192 } else {
3193 gen_mov_F0_vreg(dp, rd);
312eea9f 3194 gen_vfp_st(s, dp, addr);
b7bcbe95 3195 }
312eea9f 3196 dead_tmp(addr);
b7bcbe95
FB
3197 } else {
3198 /* load/store multiple */
3199 if (dp)
3200 n = (insn >> 1) & 0x7f;
3201 else
3202 n = insn & 0xff;
3203
3204 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3205 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3206
3207 if (dp)
3208 offset = 8;
3209 else
3210 offset = 4;
3211 for (i = 0; i < n; i++) {
18c9b560 3212 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3213 /* load */
312eea9f 3214 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3215 gen_mov_vreg_F0(dp, rd + i);
3216 } else {
3217 /* store */
3218 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3219 gen_vfp_st(s, dp, addr);
b7bcbe95 3220 }
312eea9f 3221 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95
FB
3222 }
3223 if (insn & (1 << 21)) {
3224 /* writeback */
3225 if (insn & (1 << 24))
3226 offset = -offset * n;
3227 else if (dp && (insn & 1))
3228 offset = 4;
3229 else
3230 offset = 0;
3231
3232 if (offset != 0)
312eea9f
FN
3233 tcg_gen_addi_i32(addr, addr, offset);
3234 store_reg(s, rn, addr);
3235 } else {
3236 dead_tmp(addr);
b7bcbe95
FB
3237 }
3238 }
3239 }
3240 break;
3241 default:
3242 /* Should never happen. */
3243 return 1;
3244 }
3245 return 0;
3246}
3247
6e256c93 3248static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3249{
6e256c93
FB
3250 TranslationBlock *tb;
3251
3252 tb = s->tb;
3253 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3254 tcg_gen_goto_tb(n);
8984bd2e 3255 gen_set_pc_im(dest);
57fec1fe 3256 tcg_gen_exit_tb((long)tb + n);
6e256c93 3257 } else {
8984bd2e 3258 gen_set_pc_im(dest);
57fec1fe 3259 tcg_gen_exit_tb(0);
6e256c93 3260 }
c53be334
FB
3261}
3262
8aaca4c0
FB
3263static inline void gen_jmp (DisasContext *s, uint32_t dest)
3264{
551bd27f 3265 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3266 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3267 if (s->thumb)
d9ba4830
PB
3268 dest |= 1;
3269 gen_bx_im(s, dest);
8aaca4c0 3270 } else {
6e256c93 3271 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3272 s->is_jmp = DISAS_TB_JUMP;
3273 }
3274}
3275
d9ba4830 3276static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3277{
ee097184 3278 if (x)
d9ba4830 3279 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3280 else
d9ba4830 3281 gen_sxth(t0);
ee097184 3282 if (y)
d9ba4830 3283 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3284 else
d9ba4830
PB
3285 gen_sxth(t1);
3286 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3287}
3288
3289/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 3290static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3291 uint32_t mask;
3292
3293 mask = 0;
3294 if (flags & (1 << 0))
3295 mask |= 0xff;
3296 if (flags & (1 << 1))
3297 mask |= 0xff00;
3298 if (flags & (1 << 2))
3299 mask |= 0xff0000;
3300 if (flags & (1 << 3))
3301 mask |= 0xff000000;
9ee6e8bb 3302
2ae23e75 3303 /* Mask out undefined bits. */
9ee6e8bb
PB
3304 mask &= ~CPSR_RESERVED;
3305 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3306 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3307 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3308 mask &= ~CPSR_IT;
9ee6e8bb 3309 /* Mask out execution state bits. */
2ae23e75 3310 if (!spsr)
e160c51c 3311 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3312 /* Mask out privileged bits. */
3313 if (IS_USER(s))
9ee6e8bb 3314 mask &= CPSR_USER;
b5ff1b31
FB
3315 return mask;
3316}
3317
2fbac54b
FN
3318/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3319static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
b5ff1b31 3320{
d9ba4830 3321 TCGv tmp;
b5ff1b31
FB
3322 if (spsr) {
3323 /* ??? This is also undefined in system mode. */
3324 if (IS_USER(s))
3325 return 1;
d9ba4830
PB
3326
3327 tmp = load_cpu_field(spsr);
3328 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3329 tcg_gen_andi_i32(t0, t0, mask);
3330 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3331 store_cpu_field(tmp, spsr);
b5ff1b31 3332 } else {
2fbac54b 3333 gen_set_cpsr(t0, mask);
b5ff1b31 3334 }
2fbac54b 3335 dead_tmp(t0);
b5ff1b31
FB
3336 gen_lookup_tb(s);
3337 return 0;
3338}
3339
2fbac54b
FN
3340/* Returns nonzero if access to the PSR is not permitted. */
3341static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3342{
3343 TCGv tmp;
3344 tmp = new_tmp();
3345 tcg_gen_movi_i32(tmp, val);
3346 return gen_set_psr(s, mask, spsr, tmp);
3347}
3348
e9bb4aa9
JR
3349/* Generate an old-style exception return. Marks pc as dead. */
3350static void gen_exception_return(DisasContext *s, TCGv pc)
b5ff1b31 3351{
d9ba4830 3352 TCGv tmp;
e9bb4aa9 3353 store_reg(s, 15, pc);
d9ba4830
PB
3354 tmp = load_cpu_field(spsr);
3355 gen_set_cpsr(tmp, 0xffffffff);
3356 dead_tmp(tmp);
b5ff1b31
FB
3357 s->is_jmp = DISAS_UPDATE;
3358}
3359
b0109805
PB
3360/* Generate a v6 exception return. Marks both values as dead. */
3361static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3362{
b0109805
PB
3363 gen_set_cpsr(cpsr, 0xffffffff);
3364 dead_tmp(cpsr);
3365 store_reg(s, 15, pc);
9ee6e8bb
PB
3366 s->is_jmp = DISAS_UPDATE;
3367}
3b46e624 3368
9ee6e8bb
PB
3369static inline void
3370gen_set_condexec (DisasContext *s)
3371{
3372 if (s->condexec_mask) {
8f01245e
PB
3373 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3374 TCGv tmp = new_tmp();
3375 tcg_gen_movi_i32(tmp, val);
d9ba4830 3376 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3377 }
3378}
3b46e624 3379
9ee6e8bb
PB
3380static void gen_nop_hint(DisasContext *s, int val)
3381{
3382 switch (val) {
3383 case 3: /* wfi */
8984bd2e 3384 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3385 s->is_jmp = DISAS_WFI;
3386 break;
3387 case 2: /* wfe */
3388 case 4: /* sev */
3389 /* TODO: Implement SEV and WFE. May help SMP performance. */
3390 default: /* nop */
3391 break;
3392 }
3393}
99c475ab 3394
ad69471c 3395#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3396
dd8fbd78 3397static inline int gen_neon_add(int size, TCGv t0, TCGv t1)
9ee6e8bb
PB
3398{
3399 switch (size) {
dd8fbd78
FN
3400 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3401 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3402 case 2: tcg_gen_add_i32(t0, t0, t1); break;
9ee6e8bb
PB
3403 default: return 1;
3404 }
3405 return 0;
3406}
3407
dd8fbd78 3408static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
ad69471c
PB
3409{
3410 switch (size) {
dd8fbd78
FN
3411 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3412 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3413 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3414 default: return;
3415 }
3416}
3417
3418/* 32-bit pairwise ops end up the same as the elementwise versions. */
3419#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3420#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3421#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3422#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3423
3424/* FIXME: This is wrong. They set the wrong overflow bit. */
3425#define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3426#define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3427#define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3428#define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3429
3430#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3431 switch ((size << 1) | u) { \
3432 case 0: \
dd8fbd78 3433 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3434 break; \
3435 case 1: \
dd8fbd78 3436 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3437 break; \
3438 case 2: \
dd8fbd78 3439 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3440 break; \
3441 case 3: \
dd8fbd78 3442 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3443 break; \
3444 case 4: \
dd8fbd78 3445 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3446 break; \
3447 case 5: \
dd8fbd78 3448 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3449 break; \
3450 default: return 1; \
3451 }} while (0)
9ee6e8bb
PB
3452
3453#define GEN_NEON_INTEGER_OP(name) do { \
3454 switch ((size << 1) | u) { \
ad69471c 3455 case 0: \
dd8fbd78 3456 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3457 break; \
3458 case 1: \
dd8fbd78 3459 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3460 break; \
3461 case 2: \
dd8fbd78 3462 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3463 break; \
3464 case 3: \
dd8fbd78 3465 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3466 break; \
3467 case 4: \
dd8fbd78 3468 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3469 break; \
3470 case 5: \
dd8fbd78 3471 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3472 break; \
9ee6e8bb
PB
3473 default: return 1; \
3474 }} while (0)
3475
dd8fbd78 3476static TCGv neon_load_scratch(int scratch)
9ee6e8bb 3477{
dd8fbd78
FN
3478 TCGv tmp = new_tmp();
3479 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3480 return tmp;
9ee6e8bb
PB
3481}
3482
dd8fbd78 3483static void neon_store_scratch(int scratch, TCGv var)
9ee6e8bb 3484{
dd8fbd78
FN
3485 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3486 dead_tmp(var);
9ee6e8bb
PB
3487}
3488
dd8fbd78 3489static inline TCGv neon_get_scalar(int size, int reg)
9ee6e8bb 3490{
dd8fbd78 3491 TCGv tmp;
9ee6e8bb 3492 if (size == 1) {
dd8fbd78 3493 tmp = neon_load_reg(reg >> 1, reg & 1);
9ee6e8bb 3494 } else {
dd8fbd78
FN
3495 tmp = neon_load_reg(reg >> 2, (reg >> 1) & 1);
3496 if (reg & 1) {
3497 gen_neon_dup_low16(tmp);
3498 } else {
3499 gen_neon_dup_high16(tmp);
3500 }
9ee6e8bb 3501 }
dd8fbd78 3502 return tmp;
9ee6e8bb
PB
3503}
3504
19457615
FN
3505static void gen_neon_unzip_u8(TCGv t0, TCGv t1)
3506{
3507 TCGv rd, rm, tmp;
3508
3509 rd = new_tmp();
3510 rm = new_tmp();
3511 tmp = new_tmp();
3512
3513 tcg_gen_andi_i32(rd, t0, 0xff);
3514 tcg_gen_shri_i32(tmp, t0, 8);
3515 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3516 tcg_gen_or_i32(rd, rd, tmp);
3517 tcg_gen_shli_i32(tmp, t1, 16);
3518 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3519 tcg_gen_or_i32(rd, rd, tmp);
3520 tcg_gen_shli_i32(tmp, t1, 8);
3521 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3522 tcg_gen_or_i32(rd, rd, tmp);
3523
3524 tcg_gen_shri_i32(rm, t0, 8);
3525 tcg_gen_andi_i32(rm, rm, 0xff);
3526 tcg_gen_shri_i32(tmp, t0, 16);
3527 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3528 tcg_gen_or_i32(rm, rm, tmp);
3529 tcg_gen_shli_i32(tmp, t1, 8);
3530 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3531 tcg_gen_or_i32(rm, rm, tmp);
3532 tcg_gen_andi_i32(tmp, t1, 0xff000000);
3533 tcg_gen_or_i32(t1, rm, tmp);
3534 tcg_gen_mov_i32(t0, rd);
3535
3536 dead_tmp(tmp);
3537 dead_tmp(rm);
3538 dead_tmp(rd);
3539}
3540
3541static void gen_neon_zip_u8(TCGv t0, TCGv t1)
3542{
3543 TCGv rd, rm, tmp;
3544
3545 rd = new_tmp();
3546 rm = new_tmp();
3547 tmp = new_tmp();
3548
3549 tcg_gen_andi_i32(rd, t0, 0xff);
3550 tcg_gen_shli_i32(tmp, t1, 8);
3551 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3552 tcg_gen_or_i32(rd, rd, tmp);
3553 tcg_gen_shli_i32(tmp, t0, 16);
3554 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3555 tcg_gen_or_i32(rd, rd, tmp);
3556 tcg_gen_shli_i32(tmp, t1, 24);
3557 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3558 tcg_gen_or_i32(rd, rd, tmp);
3559
3560 tcg_gen_andi_i32(rm, t1, 0xff000000);
3561 tcg_gen_shri_i32(tmp, t0, 8);
3562 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3563 tcg_gen_or_i32(rm, rm, tmp);
3564 tcg_gen_shri_i32(tmp, t1, 8);
3565 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3566 tcg_gen_or_i32(rm, rm, tmp);
3567 tcg_gen_shri_i32(tmp, t0, 16);
3568 tcg_gen_andi_i32(tmp, tmp, 0xff);
3569 tcg_gen_or_i32(t1, rm, tmp);
3570 tcg_gen_mov_i32(t0, rd);
3571
3572 dead_tmp(tmp);
3573 dead_tmp(rm);
3574 dead_tmp(rd);
3575}
3576
3577static void gen_neon_zip_u16(TCGv t0, TCGv t1)
3578{
3579 TCGv tmp, tmp2;
3580
3581 tmp = new_tmp();
3582 tmp2 = new_tmp();
3583
3584 tcg_gen_andi_i32(tmp, t0, 0xffff);
3585 tcg_gen_shli_i32(tmp2, t1, 16);
3586 tcg_gen_or_i32(tmp, tmp, tmp2);
3587 tcg_gen_andi_i32(t1, t1, 0xffff0000);
3588 tcg_gen_shri_i32(tmp2, t0, 16);
3589 tcg_gen_or_i32(t1, t1, tmp2);
3590 tcg_gen_mov_i32(t0, tmp);
3591
3592 dead_tmp(tmp2);
3593 dead_tmp(tmp);
3594}
3595
9ee6e8bb
PB
3596static void gen_neon_unzip(int reg, int q, int tmp, int size)
3597{
3598 int n;
dd8fbd78 3599 TCGv t0, t1;
9ee6e8bb
PB
3600
3601 for (n = 0; n < q + 1; n += 2) {
dd8fbd78
FN
3602 t0 = neon_load_reg(reg, n);
3603 t1 = neon_load_reg(reg, n + 1);
9ee6e8bb 3604 switch (size) {
dd8fbd78
FN
3605 case 0: gen_neon_unzip_u8(t0, t1); break;
3606 case 1: gen_neon_zip_u16(t0, t1); break; /* zip and unzip are the same. */
9ee6e8bb
PB
3607 case 2: /* no-op */; break;
3608 default: abort();
3609 }
dd8fbd78
FN
3610 neon_store_scratch(tmp + n, t0);
3611 neon_store_scratch(tmp + n + 1, t1);
9ee6e8bb
PB
3612 }
3613}
3614
19457615
FN
3615static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3616{
3617 TCGv rd, tmp;
3618
3619 rd = new_tmp();
3620 tmp = new_tmp();
3621
3622 tcg_gen_shli_i32(rd, t0, 8);
3623 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3624 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3625 tcg_gen_or_i32(rd, rd, tmp);
3626
3627 tcg_gen_shri_i32(t1, t1, 8);
3628 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3629 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3630 tcg_gen_or_i32(t1, t1, tmp);
3631 tcg_gen_mov_i32(t0, rd);
3632
3633 dead_tmp(tmp);
3634 dead_tmp(rd);
3635}
3636
3637static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3638{
3639 TCGv rd, tmp;
3640
3641 rd = new_tmp();
3642 tmp = new_tmp();
3643
3644 tcg_gen_shli_i32(rd, t0, 16);
3645 tcg_gen_andi_i32(tmp, t1, 0xffff);
3646 tcg_gen_or_i32(rd, rd, tmp);
3647 tcg_gen_shri_i32(t1, t1, 16);
3648 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3649 tcg_gen_or_i32(t1, t1, tmp);
3650 tcg_gen_mov_i32(t0, rd);
3651
3652 dead_tmp(tmp);
3653 dead_tmp(rd);
3654}
3655
3656
9ee6e8bb
PB
3657static struct {
3658 int nregs;
3659 int interleave;
3660 int spacing;
3661} neon_ls_element_type[11] = {
3662 {4, 4, 1},
3663 {4, 4, 2},
3664 {4, 1, 1},
3665 {4, 2, 1},
3666 {3, 3, 1},
3667 {3, 3, 2},
3668 {3, 1, 1},
3669 {1, 1, 1},
3670 {2, 2, 1},
3671 {2, 2, 2},
3672 {2, 1, 1}
3673};
3674
3675/* Translate a NEON load/store element instruction. Return nonzero if the
3676 instruction is invalid. */
3677static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3678{
3679 int rd, rn, rm;
3680 int op;
3681 int nregs;
3682 int interleave;
3683 int stride;
3684 int size;
3685 int reg;
3686 int pass;
3687 int load;
3688 int shift;
9ee6e8bb 3689 int n;
1b2b1e54 3690 TCGv addr;
b0109805 3691 TCGv tmp;
8f8e3aa4 3692 TCGv tmp2;
9ee6e8bb
PB
3693
3694 if (!vfp_enabled(env))
3695 return 1;
3696 VFP_DREG_D(rd, insn);
3697 rn = (insn >> 16) & 0xf;
3698 rm = insn & 0xf;
3699 load = (insn & (1 << 21)) != 0;
1b2b1e54 3700 addr = new_tmp();
9ee6e8bb
PB
3701 if ((insn & (1 << 23)) == 0) {
3702 /* Load store all elements. */
3703 op = (insn >> 8) & 0xf;
3704 size = (insn >> 6) & 3;
3705 if (op > 10 || size == 3)
3706 return 1;
3707 nregs = neon_ls_element_type[op].nregs;
3708 interleave = neon_ls_element_type[op].interleave;
dcc65026 3709 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3710 stride = (1 << size) * interleave;
3711 for (reg = 0; reg < nregs; reg++) {
3712 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
3713 load_reg_var(s, addr, rn);
3714 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 3715 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
3716 load_reg_var(s, addr, rn);
3717 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb
PB
3718 }
3719 for (pass = 0; pass < 2; pass++) {
3720 if (size == 2) {
3721 if (load) {
1b2b1e54 3722 tmp = gen_ld32(addr, IS_USER(s));
ad69471c 3723 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3724 } else {
ad69471c 3725 tmp = neon_load_reg(rd, pass);
1b2b1e54 3726 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 3727 }
1b2b1e54 3728 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb
PB
3729 } else if (size == 1) {
3730 if (load) {
1b2b1e54
FN
3731 tmp = gen_ld16u(addr, IS_USER(s));
3732 tcg_gen_addi_i32(addr, addr, stride);
3733 tmp2 = gen_ld16u(addr, IS_USER(s));
3734 tcg_gen_addi_i32(addr, addr, stride);
8f8e3aa4
PB
3735 gen_bfi(tmp, tmp, tmp2, 16, 0xffff);
3736 dead_tmp(tmp2);
3737 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3738 } else {
8f8e3aa4
PB
3739 tmp = neon_load_reg(rd, pass);
3740 tmp2 = new_tmp();
3741 tcg_gen_shri_i32(tmp2, tmp, 16);
1b2b1e54
FN
3742 gen_st16(tmp, addr, IS_USER(s));
3743 tcg_gen_addi_i32(addr, addr, stride);
3744 gen_st16(tmp2, addr, IS_USER(s));
3745 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb
PB
3746 }
3747 } else /* size == 0 */ {
3748 if (load) {
a50f5b91 3749 TCGV_UNUSED(tmp2);
9ee6e8bb 3750 for (n = 0; n < 4; n++) {
1b2b1e54
FN
3751 tmp = gen_ld8u(addr, IS_USER(s));
3752 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3753 if (n == 0) {
8f8e3aa4 3754 tmp2 = tmp;
9ee6e8bb 3755 } else {
8f8e3aa4
PB
3756 gen_bfi(tmp2, tmp2, tmp, n * 8, 0xff);
3757 dead_tmp(tmp);
9ee6e8bb 3758 }
9ee6e8bb 3759 }
8f8e3aa4 3760 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 3761 } else {
8f8e3aa4 3762 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 3763 for (n = 0; n < 4; n++) {
8f8e3aa4 3764 tmp = new_tmp();
9ee6e8bb 3765 if (n == 0) {
8f8e3aa4 3766 tcg_gen_mov_i32(tmp, tmp2);
9ee6e8bb 3767 } else {
8f8e3aa4 3768 tcg_gen_shri_i32(tmp, tmp2, n * 8);
9ee6e8bb 3769 }
1b2b1e54
FN
3770 gen_st8(tmp, addr, IS_USER(s));
3771 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3772 }
8f8e3aa4 3773 dead_tmp(tmp2);
9ee6e8bb
PB
3774 }
3775 }
3776 }
3777 rd += neon_ls_element_type[op].spacing;
3778 }
3779 stride = nregs * 8;
3780 } else {
3781 size = (insn >> 10) & 3;
3782 if (size == 3) {
3783 /* Load single element to all lanes. */
3784 if (!load)
3785 return 1;
3786 size = (insn >> 6) & 3;
3787 nregs = ((insn >> 8) & 3) + 1;
3788 stride = (insn & (1 << 5)) ? 2 : 1;
dcc65026 3789 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3790 for (reg = 0; reg < nregs; reg++) {
3791 switch (size) {
3792 case 0:
1b2b1e54 3793 tmp = gen_ld8u(addr, IS_USER(s));
ad69471c 3794 gen_neon_dup_u8(tmp, 0);
9ee6e8bb
PB
3795 break;
3796 case 1:
1b2b1e54 3797 tmp = gen_ld16u(addr, IS_USER(s));
ad69471c 3798 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
3799 break;
3800 case 2:
1b2b1e54 3801 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
3802 break;
3803 case 3:
3804 return 1;
a50f5b91
PB
3805 default: /* Avoid compiler warnings. */
3806 abort();
99c475ab 3807 }
1b2b1e54 3808 tcg_gen_addi_i32(addr, addr, 1 << size);
ad69471c
PB
3809 tmp2 = new_tmp();
3810 tcg_gen_mov_i32(tmp2, tmp);
3811 neon_store_reg(rd, 0, tmp2);
3018f259 3812 neon_store_reg(rd, 1, tmp);
9ee6e8bb
PB
3813 rd += stride;
3814 }
3815 stride = (1 << size) * nregs;
3816 } else {
3817 /* Single element. */
3818 pass = (insn >> 7) & 1;
3819 switch (size) {
3820 case 0:
3821 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
3822 stride = 1;
3823 break;
3824 case 1:
3825 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
3826 stride = (insn & (1 << 5)) ? 2 : 1;
3827 break;
3828 case 2:
3829 shift = 0;
9ee6e8bb
PB
3830 stride = (insn & (1 << 6)) ? 2 : 1;
3831 break;
3832 default:
3833 abort();
3834 }
3835 nregs = ((insn >> 8) & 3) + 1;
dcc65026 3836 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3837 for (reg = 0; reg < nregs; reg++) {
3838 if (load) {
9ee6e8bb
PB
3839 switch (size) {
3840 case 0:
1b2b1e54 3841 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb
PB
3842 break;
3843 case 1:
1b2b1e54 3844 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
3845 break;
3846 case 2:
1b2b1e54 3847 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 3848 break;
a50f5b91
PB
3849 default: /* Avoid compiler warnings. */
3850 abort();
9ee6e8bb
PB
3851 }
3852 if (size != 2) {
8f8e3aa4
PB
3853 tmp2 = neon_load_reg(rd, pass);
3854 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3855 dead_tmp(tmp2);
9ee6e8bb 3856 }
8f8e3aa4 3857 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3858 } else { /* Store */
8f8e3aa4
PB
3859 tmp = neon_load_reg(rd, pass);
3860 if (shift)
3861 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
3862 switch (size) {
3863 case 0:
1b2b1e54 3864 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
3865 break;
3866 case 1:
1b2b1e54 3867 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
3868 break;
3869 case 2:
1b2b1e54 3870 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 3871 break;
99c475ab 3872 }
99c475ab 3873 }
9ee6e8bb 3874 rd += stride;
1b2b1e54 3875 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 3876 }
9ee6e8bb 3877 stride = nregs * (1 << size);
99c475ab 3878 }
9ee6e8bb 3879 }
1b2b1e54 3880 dead_tmp(addr);
9ee6e8bb 3881 if (rm != 15) {
b26eefb6
PB
3882 TCGv base;
3883
3884 base = load_reg(s, rn);
9ee6e8bb 3885 if (rm == 13) {
b26eefb6 3886 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 3887 } else {
b26eefb6
PB
3888 TCGv index;
3889 index = load_reg(s, rm);
3890 tcg_gen_add_i32(base, base, index);
3891 dead_tmp(index);
9ee6e8bb 3892 }
b26eefb6 3893 store_reg(s, rn, base);
9ee6e8bb
PB
3894 }
3895 return 0;
3896}
3b46e624 3897
8f8e3aa4
PB
3898/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
3899static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
3900{
3901 tcg_gen_and_i32(t, t, c);
3902 tcg_gen_bic_i32(f, f, c);
3903 tcg_gen_or_i32(dest, t, f);
3904}
3905
a7812ae4 3906static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
3907{
3908 switch (size) {
3909 case 0: gen_helper_neon_narrow_u8(dest, src); break;
3910 case 1: gen_helper_neon_narrow_u16(dest, src); break;
3911 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
3912 default: abort();
3913 }
3914}
3915
a7812ae4 3916static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
3917{
3918 switch (size) {
3919 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
3920 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
3921 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
3922 default: abort();
3923 }
3924}
3925
a7812ae4 3926static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
3927{
3928 switch (size) {
3929 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
3930 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
3931 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
3932 default: abort();
3933 }
3934}
3935
3936static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
3937 int q, int u)
3938{
3939 if (q) {
3940 if (u) {
3941 switch (size) {
3942 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3943 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3944 default: abort();
3945 }
3946 } else {
3947 switch (size) {
3948 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
3949 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
3950 default: abort();
3951 }
3952 }
3953 } else {
3954 if (u) {
3955 switch (size) {
3956 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3957 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3958 default: abort();
3959 }
3960 } else {
3961 switch (size) {
3962 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
3963 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
3964 default: abort();
3965 }
3966 }
3967 }
3968}
3969
a7812ae4 3970static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
3971{
3972 if (u) {
3973 switch (size) {
3974 case 0: gen_helper_neon_widen_u8(dest, src); break;
3975 case 1: gen_helper_neon_widen_u16(dest, src); break;
3976 case 2: tcg_gen_extu_i32_i64(dest, src); break;
3977 default: abort();
3978 }
3979 } else {
3980 switch (size) {
3981 case 0: gen_helper_neon_widen_s8(dest, src); break;
3982 case 1: gen_helper_neon_widen_s16(dest, src); break;
3983 case 2: tcg_gen_ext_i32_i64(dest, src); break;
3984 default: abort();
3985 }
3986 }
3987 dead_tmp(src);
3988}
3989
3990static inline void gen_neon_addl(int size)
3991{
3992 switch (size) {
3993 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
3994 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
3995 case 2: tcg_gen_add_i64(CPU_V001); break;
3996 default: abort();
3997 }
3998}
3999
4000static inline void gen_neon_subl(int size)
4001{
4002 switch (size) {
4003 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4004 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4005 case 2: tcg_gen_sub_i64(CPU_V001); break;
4006 default: abort();
4007 }
4008}
4009
a7812ae4 4010static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4011{
4012 switch (size) {
4013 case 0: gen_helper_neon_negl_u16(var, var); break;
4014 case 1: gen_helper_neon_negl_u32(var, var); break;
4015 case 2: gen_helper_neon_negl_u64(var, var); break;
4016 default: abort();
4017 }
4018}
4019
a7812ae4 4020static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4021{
4022 switch (size) {
4023 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4024 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4025 default: abort();
4026 }
4027}
4028
a7812ae4 4029static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4030{
a7812ae4 4031 TCGv_i64 tmp;
ad69471c
PB
4032
4033 switch ((size << 1) | u) {
4034 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4035 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4036 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4037 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4038 case 4:
4039 tmp = gen_muls_i64_i32(a, b);
4040 tcg_gen_mov_i64(dest, tmp);
4041 break;
4042 case 5:
4043 tmp = gen_mulu_i64_i32(a, b);
4044 tcg_gen_mov_i64(dest, tmp);
4045 break;
4046 default: abort();
4047 }
ad69471c
PB
4048}
4049
9ee6e8bb
PB
4050/* Translate a NEON data processing instruction. Return nonzero if the
4051 instruction is invalid.
ad69471c
PB
4052 We process data in a mixture of 32-bit and 64-bit chunks.
4053 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4054
9ee6e8bb
PB
4055static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4056{
4057 int op;
4058 int q;
4059 int rd, rn, rm;
4060 int size;
4061 int shift;
4062 int pass;
4063 int count;
4064 int pairwise;
4065 int u;
4066 int n;
4067 uint32_t imm;
b75263d6 4068 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4069 TCGv_i64 tmp64;
9ee6e8bb
PB
4070
4071 if (!vfp_enabled(env))
4072 return 1;
4073 q = (insn & (1 << 6)) != 0;
4074 u = (insn >> 24) & 1;
4075 VFP_DREG_D(rd, insn);
4076 VFP_DREG_N(rn, insn);
4077 VFP_DREG_M(rm, insn);
4078 size = (insn >> 20) & 3;
4079 if ((insn & (1 << 23)) == 0) {
4080 /* Three register same length. */
4081 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
ad69471c
PB
4082 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4083 || op == 10 || op == 11 || op == 16)) {
4084 /* 64-bit element instructions. */
9ee6e8bb 4085 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4086 neon_load_reg64(cpu_V0, rn + pass);
4087 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb
PB
4088 switch (op) {
4089 case 1: /* VQADD */
4090 if (u) {
ad69471c 4091 gen_helper_neon_add_saturate_u64(CPU_V001);
2c0262af 4092 } else {
ad69471c 4093 gen_helper_neon_add_saturate_s64(CPU_V001);
2c0262af 4094 }
9ee6e8bb
PB
4095 break;
4096 case 5: /* VQSUB */
4097 if (u) {
ad69471c
PB
4098 gen_helper_neon_sub_saturate_u64(CPU_V001);
4099 } else {
4100 gen_helper_neon_sub_saturate_s64(CPU_V001);
4101 }
4102 break;
4103 case 8: /* VSHL */
4104 if (u) {
4105 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4106 } else {
4107 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4108 }
4109 break;
4110 case 9: /* VQSHL */
4111 if (u) {
4112 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4113 cpu_V0, cpu_V0);
4114 } else {
4115 gen_helper_neon_qshl_s64(cpu_V1, cpu_env,
4116 cpu_V1, cpu_V0);
4117 }
4118 break;
4119 case 10: /* VRSHL */
4120 if (u) {
4121 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4122 } else {
ad69471c
PB
4123 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4124 }
4125 break;
4126 case 11: /* VQRSHL */
4127 if (u) {
4128 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4129 cpu_V1, cpu_V0);
4130 } else {
4131 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4132 cpu_V1, cpu_V0);
1e8d4eec 4133 }
9ee6e8bb
PB
4134 break;
4135 case 16:
4136 if (u) {
ad69471c 4137 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4138 } else {
ad69471c 4139 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4140 }
4141 break;
4142 default:
4143 abort();
2c0262af 4144 }
ad69471c 4145 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4146 }
9ee6e8bb 4147 return 0;
2c0262af 4148 }
9ee6e8bb
PB
4149 switch (op) {
4150 case 8: /* VSHL */
4151 case 9: /* VQSHL */
4152 case 10: /* VRSHL */
ad69471c 4153 case 11: /* VQRSHL */
9ee6e8bb 4154 {
ad69471c
PB
4155 int rtmp;
4156 /* Shift instruction operands are reversed. */
4157 rtmp = rn;
9ee6e8bb 4158 rn = rm;
ad69471c 4159 rm = rtmp;
9ee6e8bb
PB
4160 pairwise = 0;
4161 }
2c0262af 4162 break;
9ee6e8bb
PB
4163 case 20: /* VPMAX */
4164 case 21: /* VPMIN */
4165 case 23: /* VPADD */
4166 pairwise = 1;
2c0262af 4167 break;
9ee6e8bb
PB
4168 case 26: /* VPADD (float) */
4169 pairwise = (u && size < 2);
2c0262af 4170 break;
9ee6e8bb
PB
4171 case 30: /* VPMIN/VPMAX (float) */
4172 pairwise = u;
2c0262af 4173 break;
9ee6e8bb
PB
4174 default:
4175 pairwise = 0;
2c0262af 4176 break;
9ee6e8bb 4177 }
dd8fbd78 4178
9ee6e8bb
PB
4179 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4180
4181 if (pairwise) {
4182 /* Pairwise. */
4183 if (q)
4184 n = (pass & 1) * 2;
2c0262af 4185 else
9ee6e8bb
PB
4186 n = 0;
4187 if (pass < q + 1) {
dd8fbd78
FN
4188 tmp = neon_load_reg(rn, n);
4189 tmp2 = neon_load_reg(rn, n + 1);
9ee6e8bb 4190 } else {
dd8fbd78
FN
4191 tmp = neon_load_reg(rm, n);
4192 tmp2 = neon_load_reg(rm, n + 1);
9ee6e8bb
PB
4193 }
4194 } else {
4195 /* Elementwise. */
dd8fbd78
FN
4196 tmp = neon_load_reg(rn, pass);
4197 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4198 }
4199 switch (op) {
4200 case 0: /* VHADD */
4201 GEN_NEON_INTEGER_OP(hadd);
4202 break;
4203 case 1: /* VQADD */
ad69471c 4204 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4205 break;
9ee6e8bb
PB
4206 case 2: /* VRHADD */
4207 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4208 break;
9ee6e8bb
PB
4209 case 3: /* Logic ops. */
4210 switch ((u << 2) | size) {
4211 case 0: /* VAND */
dd8fbd78 4212 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4213 break;
4214 case 1: /* BIC */
dd8fbd78 4215 tcg_gen_bic_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4216 break;
4217 case 2: /* VORR */
dd8fbd78 4218 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4219 break;
4220 case 3: /* VORN */
dd8fbd78
FN
4221 tcg_gen_not_i32(tmp2, tmp2);
4222 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4223 break;
4224 case 4: /* VEOR */
dd8fbd78 4225 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4226 break;
4227 case 5: /* VBSL */
dd8fbd78
FN
4228 tmp3 = neon_load_reg(rd, pass);
4229 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4230 dead_tmp(tmp3);
9ee6e8bb
PB
4231 break;
4232 case 6: /* VBIT */
dd8fbd78
FN
4233 tmp3 = neon_load_reg(rd, pass);
4234 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4235 dead_tmp(tmp3);
9ee6e8bb
PB
4236 break;
4237 case 7: /* VBIF */
dd8fbd78
FN
4238 tmp3 = neon_load_reg(rd, pass);
4239 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4240 dead_tmp(tmp3);
9ee6e8bb 4241 break;
2c0262af
FB
4242 }
4243 break;
9ee6e8bb
PB
4244 case 4: /* VHSUB */
4245 GEN_NEON_INTEGER_OP(hsub);
4246 break;
4247 case 5: /* VQSUB */
ad69471c 4248 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4249 break;
9ee6e8bb
PB
4250 case 6: /* VCGT */
4251 GEN_NEON_INTEGER_OP(cgt);
4252 break;
4253 case 7: /* VCGE */
4254 GEN_NEON_INTEGER_OP(cge);
4255 break;
4256 case 8: /* VSHL */
ad69471c 4257 GEN_NEON_INTEGER_OP(shl);
2c0262af 4258 break;
9ee6e8bb 4259 case 9: /* VQSHL */
ad69471c 4260 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4261 break;
9ee6e8bb 4262 case 10: /* VRSHL */
ad69471c 4263 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4264 break;
9ee6e8bb 4265 case 11: /* VQRSHL */
ad69471c 4266 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb
PB
4267 break;
4268 case 12: /* VMAX */
4269 GEN_NEON_INTEGER_OP(max);
4270 break;
4271 case 13: /* VMIN */
4272 GEN_NEON_INTEGER_OP(min);
4273 break;
4274 case 14: /* VABD */
4275 GEN_NEON_INTEGER_OP(abd);
4276 break;
4277 case 15: /* VABA */
4278 GEN_NEON_INTEGER_OP(abd);
dd8fbd78
FN
4279 dead_tmp(tmp2);
4280 tmp2 = neon_load_reg(rd, pass);
4281 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4282 break;
4283 case 16:
4284 if (!u) { /* VADD */
dd8fbd78 4285 if (gen_neon_add(size, tmp, tmp2))
9ee6e8bb
PB
4286 return 1;
4287 } else { /* VSUB */
4288 switch (size) {
dd8fbd78
FN
4289 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4290 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4291 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4292 default: return 1;
4293 }
4294 }
4295 break;
4296 case 17:
4297 if (!u) { /* VTST */
4298 switch (size) {
dd8fbd78
FN
4299 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4300 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4301 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4302 default: return 1;
4303 }
4304 } else { /* VCEQ */
4305 switch (size) {
dd8fbd78
FN
4306 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4307 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4308 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4309 default: return 1;
4310 }
4311 }
4312 break;
4313 case 18: /* Multiply. */
4314 switch (size) {
dd8fbd78
FN
4315 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4316 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4317 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4318 default: return 1;
4319 }
dd8fbd78
FN
4320 dead_tmp(tmp2);
4321 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4322 if (u) { /* VMLS */
dd8fbd78 4323 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4324 } else { /* VMLA */
dd8fbd78 4325 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4326 }
4327 break;
4328 case 19: /* VMUL */
4329 if (u) { /* polynomial */
dd8fbd78 4330 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4331 } else { /* Integer */
4332 switch (size) {
dd8fbd78
FN
4333 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4334 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4335 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4336 default: return 1;
4337 }
4338 }
4339 break;
4340 case 20: /* VPMAX */
4341 GEN_NEON_INTEGER_OP(pmax);
4342 break;
4343 case 21: /* VPMIN */
4344 GEN_NEON_INTEGER_OP(pmin);
4345 break;
4346 case 22: /* Hultiply high. */
4347 if (!u) { /* VQDMULH */
4348 switch (size) {
dd8fbd78
FN
4349 case 1: gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4350 case 2: gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
9ee6e8bb
PB
4351 default: return 1;
4352 }
4353 } else { /* VQRDHMUL */
4354 switch (size) {
dd8fbd78
FN
4355 case 1: gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4356 case 2: gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
9ee6e8bb
PB
4357 default: return 1;
4358 }
4359 }
4360 break;
4361 case 23: /* VPADD */
4362 if (u)
4363 return 1;
4364 switch (size) {
dd8fbd78
FN
4365 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4366 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4367 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4368 default: return 1;
4369 }
4370 break;
4371 case 26: /* Floating point arithnetic. */
4372 switch ((u << 2) | size) {
4373 case 0: /* VADD */
dd8fbd78 4374 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4375 break;
4376 case 2: /* VSUB */
dd8fbd78 4377 gen_helper_neon_sub_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4378 break;
4379 case 4: /* VPADD */
dd8fbd78 4380 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4381 break;
4382 case 6: /* VABD */
dd8fbd78 4383 gen_helper_neon_abd_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4384 break;
4385 default:
4386 return 1;
4387 }
4388 break;
4389 case 27: /* Float multiply. */
dd8fbd78 4390 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb 4391 if (!u) {
dd8fbd78
FN
4392 dead_tmp(tmp2);
4393 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4394 if (size == 0) {
dd8fbd78 4395 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb 4396 } else {
dd8fbd78 4397 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
4398 }
4399 }
4400 break;
4401 case 28: /* Float compare. */
4402 if (!u) {
dd8fbd78 4403 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
b5ff1b31 4404 } else {
9ee6e8bb 4405 if (size == 0)
dd8fbd78 4406 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
9ee6e8bb 4407 else
dd8fbd78 4408 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
b5ff1b31 4409 }
2c0262af 4410 break;
9ee6e8bb
PB
4411 case 29: /* Float compare absolute. */
4412 if (!u)
4413 return 1;
4414 if (size == 0)
dd8fbd78 4415 gen_helper_neon_acge_f32(tmp, tmp, tmp2);
9ee6e8bb 4416 else
dd8fbd78 4417 gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
2c0262af 4418 break;
9ee6e8bb
PB
4419 case 30: /* Float min/max. */
4420 if (size == 0)
dd8fbd78 4421 gen_helper_neon_max_f32(tmp, tmp, tmp2);
9ee6e8bb 4422 else
dd8fbd78 4423 gen_helper_neon_min_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4424 break;
4425 case 31:
4426 if (size == 0)
dd8fbd78 4427 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 4428 else
dd8fbd78 4429 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 4430 break;
9ee6e8bb
PB
4431 default:
4432 abort();
2c0262af 4433 }
dd8fbd78
FN
4434 dead_tmp(tmp2);
4435
9ee6e8bb
PB
4436 /* Save the result. For elementwise operations we can put it
4437 straight into the destination register. For pairwise operations
4438 we have to be careful to avoid clobbering the source operands. */
4439 if (pairwise && rd == rm) {
dd8fbd78 4440 neon_store_scratch(pass, tmp);
9ee6e8bb 4441 } else {
dd8fbd78 4442 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4443 }
4444
4445 } /* for pass */
4446 if (pairwise && rd == rm) {
4447 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
4448 tmp = neon_load_scratch(pass);
4449 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4450 }
4451 }
ad69471c 4452 /* End of 3 register same size operations. */
9ee6e8bb
PB
4453 } else if (insn & (1 << 4)) {
4454 if ((insn & 0x00380080) != 0) {
4455 /* Two registers and shift. */
4456 op = (insn >> 8) & 0xf;
4457 if (insn & (1 << 7)) {
4458 /* 64-bit shift. */
4459 size = 3;
4460 } else {
4461 size = 2;
4462 while ((insn & (1 << (size + 19))) == 0)
4463 size--;
4464 }
4465 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4466 /* To avoid excessive dumplication of ops we implement shift
4467 by immediate using the variable shift operations. */
4468 if (op < 8) {
4469 /* Shift by immediate:
4470 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4471 /* Right shifts are encoded as N - shift, where N is the
4472 element size in bits. */
4473 if (op <= 4)
4474 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4475 if (size == 3) {
4476 count = q + 1;
4477 } else {
4478 count = q ? 4: 2;
4479 }
4480 switch (size) {
4481 case 0:
4482 imm = (uint8_t) shift;
4483 imm |= imm << 8;
4484 imm |= imm << 16;
4485 break;
4486 case 1:
4487 imm = (uint16_t) shift;
4488 imm |= imm << 16;
4489 break;
4490 case 2:
4491 case 3:
4492 imm = shift;
4493 break;
4494 default:
4495 abort();
4496 }
4497
4498 for (pass = 0; pass < count; pass++) {
ad69471c
PB
4499 if (size == 3) {
4500 neon_load_reg64(cpu_V0, rm + pass);
4501 tcg_gen_movi_i64(cpu_V1, imm);
4502 switch (op) {
4503 case 0: /* VSHR */
4504 case 1: /* VSRA */
4505 if (u)
4506 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4507 else
ad69471c 4508 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4509 break;
ad69471c
PB
4510 case 2: /* VRSHR */
4511 case 3: /* VRSRA */
4512 if (u)
4513 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4514 else
ad69471c 4515 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4516 break;
ad69471c
PB
4517 case 4: /* VSRI */
4518 if (!u)
4519 return 1;
4520 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4521 break;
4522 case 5: /* VSHL, VSLI */
4523 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4524 break;
4525 case 6: /* VQSHL */
4526 if (u)
4527 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4528 else
ad69471c
PB
4529 gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4530 break;
4531 case 7: /* VQSHLU */
4532 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4533 break;
9ee6e8bb 4534 }
ad69471c
PB
4535 if (op == 1 || op == 3) {
4536 /* Accumulate. */
4537 neon_load_reg64(cpu_V0, rd + pass);
4538 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4539 } else if (op == 4 || (op == 5 && u)) {
4540 /* Insert */
4541 cpu_abort(env, "VS[LR]I.64 not implemented");
4542 }
4543 neon_store_reg64(cpu_V0, rd + pass);
4544 } else { /* size < 3 */
4545 /* Operands in T0 and T1. */
dd8fbd78
FN
4546 tmp = neon_load_reg(rm, pass);
4547 tmp2 = new_tmp();
4548 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
4549 switch (op) {
4550 case 0: /* VSHR */
4551 case 1: /* VSRA */
4552 GEN_NEON_INTEGER_OP(shl);
4553 break;
4554 case 2: /* VRSHR */
4555 case 3: /* VRSRA */
4556 GEN_NEON_INTEGER_OP(rshl);
4557 break;
4558 case 4: /* VSRI */
4559 if (!u)
4560 return 1;
4561 GEN_NEON_INTEGER_OP(shl);
4562 break;
4563 case 5: /* VSHL, VSLI */
4564 switch (size) {
dd8fbd78
FN
4565 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
4566 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
4567 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
ad69471c
PB
4568 default: return 1;
4569 }
4570 break;
4571 case 6: /* VQSHL */
4572 GEN_NEON_INTEGER_OP_ENV(qshl);
4573 break;
4574 case 7: /* VQSHLU */
4575 switch (size) {
dd8fbd78
FN
4576 case 0: gen_helper_neon_qshl_u8(tmp, cpu_env, tmp, tmp2); break;
4577 case 1: gen_helper_neon_qshl_u16(tmp, cpu_env, tmp, tmp2); break;
4578 case 2: gen_helper_neon_qshl_u32(tmp, cpu_env, tmp, tmp2); break;
ad69471c
PB
4579 default: return 1;
4580 }
4581 break;
4582 }
dd8fbd78 4583 dead_tmp(tmp2);
ad69471c
PB
4584
4585 if (op == 1 || op == 3) {
4586 /* Accumulate. */
dd8fbd78
FN
4587 tmp2 = neon_load_reg(rd, pass);
4588 gen_neon_add(size, tmp2, tmp);
4589 dead_tmp(tmp2);
ad69471c
PB
4590 } else if (op == 4 || (op == 5 && u)) {
4591 /* Insert */
4592 switch (size) {
4593 case 0:
4594 if (op == 4)
4595 imm = 0xff >> -shift;
4596 else
4597 imm = (uint8_t)(0xff << shift);
4598 imm |= imm << 8;
4599 imm |= imm << 16;
4600 break;
4601 case 1:
4602 if (op == 4)
4603 imm = 0xffff >> -shift;
4604 else
4605 imm = (uint16_t)(0xffff << shift);
4606 imm |= imm << 16;
4607 break;
4608 case 2:
4609 if (op == 4)
4610 imm = 0xffffffffu >> -shift;
4611 else
4612 imm = 0xffffffffu << shift;
4613 break;
4614 default:
4615 abort();
4616 }
dd8fbd78
FN
4617 tmp2 = neon_load_reg(rd, pass);
4618 tcg_gen_andi_i32(tmp, tmp, imm);
4619 tcg_gen_andi_i32(tmp2, tmp2, ~imm);
4620 tcg_gen_or_i32(tmp, tmp, tmp2);
4621 dead_tmp(tmp2);
ad69471c 4622 }
dd8fbd78 4623 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4624 }
4625 } /* for pass */
4626 } else if (op < 10) {
ad69471c 4627 /* Shift by immediate and narrow:
9ee6e8bb
PB
4628 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4629 shift = shift - (1 << (size + 3));
4630 size++;
9ee6e8bb
PB
4631 switch (size) {
4632 case 1:
ad69471c 4633 imm = (uint16_t)shift;
9ee6e8bb 4634 imm |= imm << 16;
ad69471c 4635 tmp2 = tcg_const_i32(imm);
a7812ae4 4636 TCGV_UNUSED_I64(tmp64);
9ee6e8bb
PB
4637 break;
4638 case 2:
ad69471c
PB
4639 imm = (uint32_t)shift;
4640 tmp2 = tcg_const_i32(imm);
a7812ae4 4641 TCGV_UNUSED_I64(tmp64);
4cc633c3 4642 break;
9ee6e8bb 4643 case 3:
a7812ae4
PB
4644 tmp64 = tcg_const_i64(shift);
4645 TCGV_UNUSED(tmp2);
9ee6e8bb
PB
4646 break;
4647 default:
4648 abort();
4649 }
4650
ad69471c
PB
4651 for (pass = 0; pass < 2; pass++) {
4652 if (size == 3) {
4653 neon_load_reg64(cpu_V0, rm + pass);
4654 if (q) {
4655 if (u)
a7812ae4 4656 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4657 else
a7812ae4 4658 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c
PB
4659 } else {
4660 if (u)
a7812ae4 4661 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4662 else
a7812ae4 4663 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c 4664 }
2c0262af 4665 } else {
ad69471c
PB
4666 tmp = neon_load_reg(rm + pass, 0);
4667 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
36aa55dc
PB
4668 tmp3 = neon_load_reg(rm + pass, 1);
4669 gen_neon_shift_narrow(size, tmp3, tmp2, q, u);
4670 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
ad69471c 4671 dead_tmp(tmp);
36aa55dc 4672 dead_tmp(tmp3);
9ee6e8bb 4673 }
ad69471c
PB
4674 tmp = new_tmp();
4675 if (op == 8 && !u) {
4676 gen_neon_narrow(size - 1, tmp, cpu_V0);
9ee6e8bb 4677 } else {
ad69471c
PB
4678 if (op == 8)
4679 gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
9ee6e8bb 4680 else
ad69471c
PB
4681 gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
4682 }
4683 if (pass == 0) {
b75263d6
JR
4684 if (size != 3) {
4685 dead_tmp(tmp2);
4686 }
ad69471c
PB
4687 tmp2 = tmp;
4688 } else {
4689 neon_store_reg(rd, 0, tmp2);
4690 neon_store_reg(rd, 1, tmp);
9ee6e8bb
PB
4691 }
4692 } /* for pass */
b75263d6
JR
4693 if (size == 3) {
4694 tcg_temp_free_i64(tmp64);
4695 }
9ee6e8bb
PB
4696 } else if (op == 10) {
4697 /* VSHLL */
ad69471c 4698 if (q || size == 3)
9ee6e8bb 4699 return 1;
ad69471c
PB
4700 tmp = neon_load_reg(rm, 0);
4701 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 4702 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4703 if (pass == 1)
4704 tmp = tmp2;
4705
4706 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 4707
9ee6e8bb
PB
4708 if (shift != 0) {
4709 /* The shift is less than the width of the source
ad69471c
PB
4710 type, so we can just shift the whole register. */
4711 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4712 if (size < 2 || !u) {
4713 uint64_t imm64;
4714 if (size == 0) {
4715 imm = (0xffu >> (8 - shift));
4716 imm |= imm << 16;
4717 } else {
4718 imm = 0xffff >> (16 - shift);
9ee6e8bb 4719 }
ad69471c
PB
4720 imm64 = imm | (((uint64_t)imm) << 32);
4721 tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
9ee6e8bb
PB
4722 }
4723 }
ad69471c 4724 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4725 }
4726 } else if (op == 15 || op == 16) {
4727 /* VCVT fixed-point. */
4728 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 4729 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
9ee6e8bb
PB
4730 if (op & 1) {
4731 if (u)
4373f3ce 4732 gen_vfp_ulto(0, shift);
9ee6e8bb 4733 else
4373f3ce 4734 gen_vfp_slto(0, shift);
9ee6e8bb
PB
4735 } else {
4736 if (u)
4373f3ce 4737 gen_vfp_toul(0, shift);
9ee6e8bb 4738 else
4373f3ce 4739 gen_vfp_tosl(0, shift);
2c0262af 4740 }
4373f3ce 4741 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
4742 }
4743 } else {
9ee6e8bb
PB
4744 return 1;
4745 }
4746 } else { /* (insn & 0x00380080) == 0 */
4747 int invert;
4748
4749 op = (insn >> 8) & 0xf;
4750 /* One register and immediate. */
4751 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4752 invert = (insn & (1 << 5)) != 0;
4753 switch (op) {
4754 case 0: case 1:
4755 /* no-op */
4756 break;
4757 case 2: case 3:
4758 imm <<= 8;
4759 break;
4760 case 4: case 5:
4761 imm <<= 16;
4762 break;
4763 case 6: case 7:
4764 imm <<= 24;
4765 break;
4766 case 8: case 9:
4767 imm |= imm << 16;
4768 break;
4769 case 10: case 11:
4770 imm = (imm << 8) | (imm << 24);
4771 break;
4772 case 12:
4773 imm = (imm < 8) | 0xff;
4774 break;
4775 case 13:
4776 imm = (imm << 16) | 0xffff;
4777 break;
4778 case 14:
4779 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4780 if (invert)
4781 imm = ~imm;
4782 break;
4783 case 15:
4784 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4785 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4786 break;
4787 }
4788 if (invert)
4789 imm = ~imm;
4790
9ee6e8bb
PB
4791 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4792 if (op & 1 && op < 12) {
ad69471c 4793 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
4794 if (invert) {
4795 /* The immediate value has already been inverted, so
4796 BIC becomes AND. */
ad69471c 4797 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 4798 } else {
ad69471c 4799 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 4800 }
9ee6e8bb 4801 } else {
ad69471c
PB
4802 /* VMOV, VMVN. */
4803 tmp = new_tmp();
9ee6e8bb 4804 if (op == 14 && invert) {
ad69471c
PB
4805 uint32_t val;
4806 val = 0;
9ee6e8bb
PB
4807 for (n = 0; n < 4; n++) {
4808 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 4809 val |= 0xff << (n * 8);
9ee6e8bb 4810 }
ad69471c
PB
4811 tcg_gen_movi_i32(tmp, val);
4812 } else {
4813 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 4814 }
9ee6e8bb 4815 }
ad69471c 4816 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4817 }
4818 }
e4b3861d 4819 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
4820 if (size != 3) {
4821 op = (insn >> 8) & 0xf;
4822 if ((insn & (1 << 6)) == 0) {
4823 /* Three registers of different lengths. */
4824 int src1_wide;
4825 int src2_wide;
4826 int prewiden;
4827 /* prewiden, src1_wide, src2_wide */
4828 static const int neon_3reg_wide[16][3] = {
4829 {1, 0, 0}, /* VADDL */
4830 {1, 1, 0}, /* VADDW */
4831 {1, 0, 0}, /* VSUBL */
4832 {1, 1, 0}, /* VSUBW */
4833 {0, 1, 1}, /* VADDHN */
4834 {0, 0, 0}, /* VABAL */
4835 {0, 1, 1}, /* VSUBHN */
4836 {0, 0, 0}, /* VABDL */
4837 {0, 0, 0}, /* VMLAL */
4838 {0, 0, 0}, /* VQDMLAL */
4839 {0, 0, 0}, /* VMLSL */
4840 {0, 0, 0}, /* VQDMLSL */
4841 {0, 0, 0}, /* Integer VMULL */
4842 {0, 0, 0}, /* VQDMULL */
4843 {0, 0, 0} /* Polynomial VMULL */
4844 };
4845
4846 prewiden = neon_3reg_wide[op][0];
4847 src1_wide = neon_3reg_wide[op][1];
4848 src2_wide = neon_3reg_wide[op][2];
4849
ad69471c
PB
4850 if (size == 0 && (op == 9 || op == 11 || op == 13))
4851 return 1;
4852
9ee6e8bb
PB
4853 /* Avoid overlapping operands. Wide source operands are
4854 always aligned so will never overlap with wide
4855 destinations in problematic ways. */
8f8e3aa4 4856 if (rd == rm && !src2_wide) {
dd8fbd78
FN
4857 tmp = neon_load_reg(rm, 1);
4858 neon_store_scratch(2, tmp);
8f8e3aa4 4859 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
4860 tmp = neon_load_reg(rn, 1);
4861 neon_store_scratch(2, tmp);
9ee6e8bb 4862 }
a50f5b91 4863 TCGV_UNUSED(tmp3);
9ee6e8bb 4864 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4865 if (src1_wide) {
4866 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 4867 TCGV_UNUSED(tmp);
9ee6e8bb 4868 } else {
ad69471c 4869 if (pass == 1 && rd == rn) {
dd8fbd78 4870 tmp = neon_load_scratch(2);
9ee6e8bb 4871 } else {
ad69471c
PB
4872 tmp = neon_load_reg(rn, pass);
4873 }
4874 if (prewiden) {
4875 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
4876 }
4877 }
ad69471c
PB
4878 if (src2_wide) {
4879 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 4880 TCGV_UNUSED(tmp2);
9ee6e8bb 4881 } else {
ad69471c 4882 if (pass == 1 && rd == rm) {
dd8fbd78 4883 tmp2 = neon_load_scratch(2);
9ee6e8bb 4884 } else {
ad69471c
PB
4885 tmp2 = neon_load_reg(rm, pass);
4886 }
4887 if (prewiden) {
4888 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 4889 }
9ee6e8bb
PB
4890 }
4891 switch (op) {
4892 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 4893 gen_neon_addl(size);
9ee6e8bb
PB
4894 break;
4895 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
ad69471c 4896 gen_neon_subl(size);
9ee6e8bb
PB
4897 break;
4898 case 5: case 7: /* VABAL, VABDL */
4899 switch ((size << 1) | u) {
ad69471c
PB
4900 case 0:
4901 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
4902 break;
4903 case 1:
4904 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
4905 break;
4906 case 2:
4907 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
4908 break;
4909 case 3:
4910 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
4911 break;
4912 case 4:
4913 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
4914 break;
4915 case 5:
4916 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
4917 break;
9ee6e8bb
PB
4918 default: abort();
4919 }
ad69471c
PB
4920 dead_tmp(tmp2);
4921 dead_tmp(tmp);
9ee6e8bb
PB
4922 break;
4923 case 8: case 9: case 10: case 11: case 12: case 13:
4924 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 4925 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
dd8fbd78
FN
4926 dead_tmp(tmp2);
4927 dead_tmp(tmp);
9ee6e8bb
PB
4928 break;
4929 case 14: /* Polynomial VMULL */
4930 cpu_abort(env, "Polynomial VMULL not implemented");
4931
4932 default: /* 15 is RESERVED. */
4933 return 1;
4934 }
4935 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
4936 /* Accumulate. */
4937 if (op == 10 || op == 11) {
ad69471c 4938 gen_neon_negl(cpu_V0, size);
9ee6e8bb
PB
4939 }
4940
9ee6e8bb 4941 if (op != 13) {
ad69471c 4942 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb
PB
4943 }
4944
4945 switch (op) {
4946 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
ad69471c 4947 gen_neon_addl(size);
9ee6e8bb
PB
4948 break;
4949 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c
PB
4950 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4951 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
4952 break;
9ee6e8bb
PB
4953 /* Fall through. */
4954 case 13: /* VQDMULL */
ad69471c 4955 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
4956 break;
4957 default:
4958 abort();
4959 }
ad69471c 4960 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4961 } else if (op == 4 || op == 6) {
4962 /* Narrowing operation. */
ad69471c 4963 tmp = new_tmp();
9ee6e8bb
PB
4964 if (u) {
4965 switch (size) {
ad69471c
PB
4966 case 0:
4967 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
4968 break;
4969 case 1:
4970 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
4971 break;
4972 case 2:
4973 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4974 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
4975 break;
9ee6e8bb
PB
4976 default: abort();
4977 }
4978 } else {
4979 switch (size) {
ad69471c
PB
4980 case 0:
4981 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
4982 break;
4983 case 1:
4984 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
4985 break;
4986 case 2:
4987 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
4988 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4989 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
4990 break;
9ee6e8bb
PB
4991 default: abort();
4992 }
4993 }
ad69471c
PB
4994 if (pass == 0) {
4995 tmp3 = tmp;
4996 } else {
4997 neon_store_reg(rd, 0, tmp3);
4998 neon_store_reg(rd, 1, tmp);
4999 }
9ee6e8bb
PB
5000 } else {
5001 /* Write back the result. */
ad69471c 5002 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5003 }
5004 }
5005 } else {
5006 /* Two registers and a scalar. */
5007 switch (op) {
5008 case 0: /* Integer VMLA scalar */
5009 case 1: /* Float VMLA scalar */
5010 case 4: /* Integer VMLS scalar */
5011 case 5: /* Floating point VMLS scalar */
5012 case 8: /* Integer VMUL scalar */
5013 case 9: /* Floating point VMUL scalar */
5014 case 12: /* VQDMULH scalar */
5015 case 13: /* VQRDMULH scalar */
dd8fbd78
FN
5016 tmp = neon_get_scalar(size, rm);
5017 neon_store_scratch(0, tmp);
9ee6e8bb 5018 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5019 tmp = neon_load_scratch(0);
5020 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5021 if (op == 12) {
5022 if (size == 1) {
dd8fbd78 5023 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5024 } else {
dd8fbd78 5025 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5026 }
5027 } else if (op == 13) {
5028 if (size == 1) {
dd8fbd78 5029 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5030 } else {
dd8fbd78 5031 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5032 }
5033 } else if (op & 1) {
dd8fbd78 5034 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5035 } else {
5036 switch (size) {
dd8fbd78
FN
5037 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5038 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5039 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5040 default: return 1;
5041 }
5042 }
dd8fbd78 5043 dead_tmp(tmp2);
9ee6e8bb
PB
5044 if (op < 8) {
5045 /* Accumulate. */
dd8fbd78 5046 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5047 switch (op) {
5048 case 0:
dd8fbd78 5049 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5050 break;
5051 case 1:
dd8fbd78 5052 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5053 break;
5054 case 4:
dd8fbd78 5055 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5056 break;
5057 case 5:
dd8fbd78 5058 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
5059 break;
5060 default:
5061 abort();
5062 }
dd8fbd78 5063 dead_tmp(tmp2);
9ee6e8bb 5064 }
dd8fbd78 5065 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5066 }
5067 break;
5068 case 2: /* VMLAL sclar */
5069 case 3: /* VQDMLAL scalar */
5070 case 6: /* VMLSL scalar */
5071 case 7: /* VQDMLSL scalar */
5072 case 10: /* VMULL scalar */
5073 case 11: /* VQDMULL scalar */
ad69471c
PB
5074 if (size == 0 && (op == 3 || op == 7 || op == 11))
5075 return 1;
5076
dd8fbd78
FN
5077 tmp2 = neon_get_scalar(size, rm);
5078 tmp3 = neon_load_reg(rn, 1);
ad69471c 5079
9ee6e8bb 5080 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5081 if (pass == 0) {
5082 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5083 } else {
dd8fbd78 5084 tmp = tmp3;
9ee6e8bb 5085 }
ad69471c 5086 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
dd8fbd78 5087 dead_tmp(tmp);
9ee6e8bb 5088 if (op == 6 || op == 7) {
ad69471c
PB
5089 gen_neon_negl(cpu_V0, size);
5090 }
5091 if (op != 11) {
5092 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5093 }
9ee6e8bb
PB
5094 switch (op) {
5095 case 2: case 6:
ad69471c 5096 gen_neon_addl(size);
9ee6e8bb
PB
5097 break;
5098 case 3: case 7:
ad69471c
PB
5099 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5100 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5101 break;
5102 case 10:
5103 /* no-op */
5104 break;
5105 case 11:
ad69471c 5106 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5107 break;
5108 default:
5109 abort();
5110 }
ad69471c 5111 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5112 }
dd8fbd78
FN
5113
5114 dead_tmp(tmp2);
5115
9ee6e8bb
PB
5116 break;
5117 default: /* 14 and 15 are RESERVED */
5118 return 1;
5119 }
5120 }
5121 } else { /* size == 3 */
5122 if (!u) {
5123 /* Extract. */
9ee6e8bb 5124 imm = (insn >> 8) & 0xf;
ad69471c
PB
5125 count = q + 1;
5126
5127 if (imm > 7 && !q)
5128 return 1;
5129
5130 if (imm == 0) {
5131 neon_load_reg64(cpu_V0, rn);
5132 if (q) {
5133 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5134 }
ad69471c
PB
5135 } else if (imm == 8) {
5136 neon_load_reg64(cpu_V0, rn + 1);
5137 if (q) {
5138 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5139 }
ad69471c 5140 } else if (q) {
a7812ae4 5141 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5142 if (imm < 8) {
5143 neon_load_reg64(cpu_V0, rn);
a7812ae4 5144 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5145 } else {
5146 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5147 neon_load_reg64(tmp64, rm);
ad69471c
PB
5148 }
5149 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5150 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5151 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5152 if (imm < 8) {
5153 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5154 } else {
ad69471c
PB
5155 neon_load_reg64(cpu_V1, rm + 1);
5156 imm -= 8;
9ee6e8bb 5157 }
ad69471c 5158 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5159 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5160 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 5161 tcg_temp_free_i64(tmp64);
ad69471c 5162 } else {
a7812ae4 5163 /* BUGFIX */
ad69471c 5164 neon_load_reg64(cpu_V0, rn);
a7812ae4 5165 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5166 neon_load_reg64(cpu_V1, rm);
a7812ae4 5167 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5168 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5169 }
5170 neon_store_reg64(cpu_V0, rd);
5171 if (q) {
5172 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5173 }
5174 } else if ((insn & (1 << 11)) == 0) {
5175 /* Two register misc. */
5176 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5177 size = (insn >> 18) & 3;
5178 switch (op) {
5179 case 0: /* VREV64 */
5180 if (size == 3)
5181 return 1;
5182 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5183 tmp = neon_load_reg(rm, pass * 2);
5184 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5185 switch (size) {
dd8fbd78
FN
5186 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5187 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5188 case 2: /* no-op */ break;
5189 default: abort();
5190 }
dd8fbd78 5191 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5192 if (size == 2) {
dd8fbd78 5193 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5194 } else {
9ee6e8bb 5195 switch (size) {
dd8fbd78
FN
5196 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5197 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5198 default: abort();
5199 }
dd8fbd78 5200 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5201 }
5202 }
5203 break;
5204 case 4: case 5: /* VPADDL */
5205 case 12: case 13: /* VPADAL */
9ee6e8bb
PB
5206 if (size == 3)
5207 return 1;
ad69471c
PB
5208 for (pass = 0; pass < q + 1; pass++) {
5209 tmp = neon_load_reg(rm, pass * 2);
5210 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5211 tmp = neon_load_reg(rm, pass * 2 + 1);
5212 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5213 switch (size) {
5214 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5215 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5216 case 2: tcg_gen_add_i64(CPU_V001); break;
5217 default: abort();
5218 }
9ee6e8bb
PB
5219 if (op >= 12) {
5220 /* Accumulate. */
ad69471c
PB
5221 neon_load_reg64(cpu_V1, rd + pass);
5222 gen_neon_addl(size);
9ee6e8bb 5223 }
ad69471c 5224 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5225 }
5226 break;
5227 case 33: /* VTRN */
5228 if (size == 2) {
5229 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
5230 tmp = neon_load_reg(rm, n);
5231 tmp2 = neon_load_reg(rd, n + 1);
5232 neon_store_reg(rm, n, tmp2);
5233 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
5234 }
5235 } else {
5236 goto elementwise;
5237 }
5238 break;
5239 case 34: /* VUZP */
5240 /* Reg Before After
5241 Rd A3 A2 A1 A0 B2 B0 A2 A0
5242 Rm B3 B2 B1 B0 B3 B1 A3 A1
5243 */
5244 if (size == 3)
5245 return 1;
5246 gen_neon_unzip(rd, q, 0, size);
5247 gen_neon_unzip(rm, q, 4, size);
5248 if (q) {
5249 static int unzip_order_q[8] =
5250 {0, 2, 4, 6, 1, 3, 5, 7};
5251 for (n = 0; n < 8; n++) {
5252 int reg = (n < 4) ? rd : rm;
dd8fbd78
FN
5253 tmp = neon_load_scratch(unzip_order_q[n]);
5254 neon_store_reg(reg, n % 4, tmp);
9ee6e8bb
PB
5255 }
5256 } else {
5257 static int unzip_order[4] =
5258 {0, 4, 1, 5};
5259 for (n = 0; n < 4; n++) {
5260 int reg = (n < 2) ? rd : rm;
dd8fbd78
FN
5261 tmp = neon_load_scratch(unzip_order[n]);
5262 neon_store_reg(reg, n % 2, tmp);
9ee6e8bb
PB
5263 }
5264 }
5265 break;
5266 case 35: /* VZIP */
5267 /* Reg Before After
5268 Rd A3 A2 A1 A0 B1 A1 B0 A0
5269 Rm B3 B2 B1 B0 B3 A3 B2 A2
5270 */
5271 if (size == 3)
5272 return 1;
5273 count = (q ? 4 : 2);
5274 for (n = 0; n < count; n++) {
dd8fbd78
FN
5275 tmp = neon_load_reg(rd, n);
5276 tmp2 = neon_load_reg(rd, n);
9ee6e8bb 5277 switch (size) {
dd8fbd78
FN
5278 case 0: gen_neon_zip_u8(tmp, tmp2); break;
5279 case 1: gen_neon_zip_u16(tmp, tmp2); break;
9ee6e8bb
PB
5280 case 2: /* no-op */; break;
5281 default: abort();
5282 }
dd8fbd78
FN
5283 neon_store_scratch(n * 2, tmp);
5284 neon_store_scratch(n * 2 + 1, tmp2);
9ee6e8bb
PB
5285 }
5286 for (n = 0; n < count * 2; n++) {
5287 int reg = (n < count) ? rd : rm;
dd8fbd78
FN
5288 tmp = neon_load_scratch(n);
5289 neon_store_reg(reg, n % count, tmp);
9ee6e8bb
PB
5290 }
5291 break;
5292 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
ad69471c
PB
5293 if (size == 3)
5294 return 1;
a50f5b91 5295 TCGV_UNUSED(tmp2);
9ee6e8bb 5296 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5297 neon_load_reg64(cpu_V0, rm + pass);
5298 tmp = new_tmp();
9ee6e8bb 5299 if (op == 36 && q == 0) {
ad69471c 5300 gen_neon_narrow(size, tmp, cpu_V0);
9ee6e8bb 5301 } else if (q) {
ad69471c 5302 gen_neon_narrow_satu(size, tmp, cpu_V0);
9ee6e8bb 5303 } else {
ad69471c
PB
5304 gen_neon_narrow_sats(size, tmp, cpu_V0);
5305 }
5306 if (pass == 0) {
5307 tmp2 = tmp;
5308 } else {
5309 neon_store_reg(rd, 0, tmp2);
5310 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5311 }
9ee6e8bb
PB
5312 }
5313 break;
5314 case 38: /* VSHLL */
ad69471c 5315 if (q || size == 3)
9ee6e8bb 5316 return 1;
ad69471c
PB
5317 tmp = neon_load_reg(rm, 0);
5318 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5319 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5320 if (pass == 1)
5321 tmp = tmp2;
5322 gen_neon_widen(cpu_V0, tmp, size, 1);
5323 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5324 }
5325 break;
5326 default:
5327 elementwise:
5328 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5329 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5330 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5331 neon_reg_offset(rm, pass));
dd8fbd78 5332 TCGV_UNUSED(tmp);
9ee6e8bb 5333 } else {
dd8fbd78 5334 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
5335 }
5336 switch (op) {
5337 case 1: /* VREV32 */
5338 switch (size) {
dd8fbd78
FN
5339 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5340 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5341 default: return 1;
5342 }
5343 break;
5344 case 2: /* VREV16 */
5345 if (size != 0)
5346 return 1;
dd8fbd78 5347 gen_rev16(tmp);
9ee6e8bb 5348 break;
9ee6e8bb
PB
5349 case 8: /* CLS */
5350 switch (size) {
dd8fbd78
FN
5351 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5352 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5353 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
9ee6e8bb
PB
5354 default: return 1;
5355 }
5356 break;
5357 case 9: /* CLZ */
5358 switch (size) {
dd8fbd78
FN
5359 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5360 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5361 case 2: gen_helper_clz(tmp, tmp); break;
9ee6e8bb
PB
5362 default: return 1;
5363 }
5364 break;
5365 case 10: /* CNT */
5366 if (size != 0)
5367 return 1;
dd8fbd78 5368 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb
PB
5369 break;
5370 case 11: /* VNOT */
5371 if (size != 0)
5372 return 1;
dd8fbd78 5373 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5374 break;
5375 case 14: /* VQABS */
5376 switch (size) {
dd8fbd78
FN
5377 case 0: gen_helper_neon_qabs_s8(tmp, cpu_env, tmp); break;
5378 case 1: gen_helper_neon_qabs_s16(tmp, cpu_env, tmp); break;
5379 case 2: gen_helper_neon_qabs_s32(tmp, cpu_env, tmp); break;
9ee6e8bb
PB
5380 default: return 1;
5381 }
5382 break;
5383 case 15: /* VQNEG */
5384 switch (size) {
dd8fbd78
FN
5385 case 0: gen_helper_neon_qneg_s8(tmp, cpu_env, tmp); break;
5386 case 1: gen_helper_neon_qneg_s16(tmp, cpu_env, tmp); break;
5387 case 2: gen_helper_neon_qneg_s32(tmp, cpu_env, tmp); break;
9ee6e8bb
PB
5388 default: return 1;
5389 }
5390 break;
5391 case 16: case 19: /* VCGT #0, VCLE #0 */
dd8fbd78 5392 tmp2 = tcg_const_i32(0);
9ee6e8bb 5393 switch(size) {
dd8fbd78
FN
5394 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5395 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5396 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5397 default: return 1;
5398 }
dd8fbd78 5399 tcg_temp_free(tmp2);
9ee6e8bb 5400 if (op == 19)
dd8fbd78 5401 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5402 break;
5403 case 17: case 20: /* VCGE #0, VCLT #0 */
dd8fbd78 5404 tmp2 = tcg_const_i32(0);
9ee6e8bb 5405 switch(size) {
dd8fbd78
FN
5406 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5407 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5408 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5409 default: return 1;
5410 }
dd8fbd78 5411 tcg_temp_free(tmp2);
9ee6e8bb 5412 if (op == 20)
dd8fbd78 5413 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5414 break;
5415 case 18: /* VCEQ #0 */
dd8fbd78 5416 tmp2 = tcg_const_i32(0);
9ee6e8bb 5417 switch(size) {
dd8fbd78
FN
5418 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5419 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5420 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5421 default: return 1;
5422 }
dd8fbd78 5423 tcg_temp_free(tmp2);
9ee6e8bb
PB
5424 break;
5425 case 22: /* VABS */
5426 switch(size) {
dd8fbd78
FN
5427 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
5428 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
5429 case 2: tcg_gen_abs_i32(tmp, tmp); break;
9ee6e8bb
PB
5430 default: return 1;
5431 }
5432 break;
5433 case 23: /* VNEG */
ad69471c
PB
5434 if (size == 3)
5435 return 1;
dd8fbd78
FN
5436 tmp2 = tcg_const_i32(0);
5437 gen_neon_rsb(size, tmp, tmp2);
5438 tcg_temp_free(tmp2);
9ee6e8bb
PB
5439 break;
5440 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
dd8fbd78
FN
5441 tmp2 = tcg_const_i32(0);
5442 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
5443 tcg_temp_free(tmp2);
9ee6e8bb 5444 if (op == 27)
dd8fbd78 5445 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5446 break;
5447 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
dd8fbd78
FN
5448 tmp2 = tcg_const_i32(0);
5449 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
5450 tcg_temp_free(tmp2);
9ee6e8bb 5451 if (op == 28)
dd8fbd78 5452 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5453 break;
5454 case 26: /* Float VCEQ #0 */
dd8fbd78
FN
5455 tmp2 = tcg_const_i32(0);
5456 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
5457 tcg_temp_free(tmp2);
9ee6e8bb
PB
5458 break;
5459 case 30: /* Float VABS */
4373f3ce 5460 gen_vfp_abs(0);
9ee6e8bb
PB
5461 break;
5462 case 31: /* Float VNEG */
4373f3ce 5463 gen_vfp_neg(0);
9ee6e8bb
PB
5464 break;
5465 case 32: /* VSWP */
dd8fbd78
FN
5466 tmp2 = neon_load_reg(rd, pass);
5467 neon_store_reg(rm, pass, tmp2);
9ee6e8bb
PB
5468 break;
5469 case 33: /* VTRN */
dd8fbd78 5470 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5471 switch (size) {
dd8fbd78
FN
5472 case 0: gen_neon_trn_u8(tmp, tmp2); break;
5473 case 1: gen_neon_trn_u16(tmp, tmp2); break;
9ee6e8bb
PB
5474 case 2: abort();
5475 default: return 1;
5476 }
dd8fbd78 5477 neon_store_reg(rm, pass, tmp2);
9ee6e8bb
PB
5478 break;
5479 case 56: /* Integer VRECPE */
dd8fbd78 5480 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb
PB
5481 break;
5482 case 57: /* Integer VRSQRTE */
dd8fbd78 5483 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb
PB
5484 break;
5485 case 58: /* Float VRECPE */
4373f3ce 5486 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5487 break;
5488 case 59: /* Float VRSQRTE */
4373f3ce 5489 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5490 break;
5491 case 60: /* VCVT.F32.S32 */
4373f3ce 5492 gen_vfp_tosiz(0);
9ee6e8bb
PB
5493 break;
5494 case 61: /* VCVT.F32.U32 */
4373f3ce 5495 gen_vfp_touiz(0);
9ee6e8bb
PB
5496 break;
5497 case 62: /* VCVT.S32.F32 */
4373f3ce 5498 gen_vfp_sito(0);
9ee6e8bb
PB
5499 break;
5500 case 63: /* VCVT.U32.F32 */
4373f3ce 5501 gen_vfp_uito(0);
9ee6e8bb
PB
5502 break;
5503 default:
5504 /* Reserved: 21, 29, 39-56 */
5505 return 1;
5506 }
5507 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5508 tcg_gen_st_f32(cpu_F0s, cpu_env,
5509 neon_reg_offset(rd, pass));
9ee6e8bb 5510 } else {
dd8fbd78 5511 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5512 }
5513 }
5514 break;
5515 }
5516 } else if ((insn & (1 << 10)) == 0) {
5517 /* VTBL, VTBX. */
3018f259 5518 n = ((insn >> 5) & 0x18) + 8;
9ee6e8bb 5519 if (insn & (1 << 6)) {
8f8e3aa4 5520 tmp = neon_load_reg(rd, 0);
9ee6e8bb 5521 } else {
8f8e3aa4
PB
5522 tmp = new_tmp();
5523 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5524 }
8f8e3aa4 5525 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
5526 tmp4 = tcg_const_i32(rn);
5527 tmp5 = tcg_const_i32(n);
5528 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
3018f259 5529 dead_tmp(tmp);
9ee6e8bb 5530 if (insn & (1 << 6)) {
8f8e3aa4 5531 tmp = neon_load_reg(rd, 1);
9ee6e8bb 5532 } else {
8f8e3aa4
PB
5533 tmp = new_tmp();
5534 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5535 }
8f8e3aa4 5536 tmp3 = neon_load_reg(rm, 1);
b75263d6
JR
5537 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
5538 dead_tmp(tmp5);
5539 dead_tmp(tmp4);
8f8e3aa4 5540 neon_store_reg(rd, 0, tmp2);
3018f259
PB
5541 neon_store_reg(rd, 1, tmp3);
5542 dead_tmp(tmp);
9ee6e8bb
PB
5543 } else if ((insn & 0x380) == 0) {
5544 /* VDUP */
5545 if (insn & (1 << 19)) {
dd8fbd78 5546 tmp = neon_load_reg(rm, 1);
9ee6e8bb 5547 } else {
dd8fbd78 5548 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
5549 }
5550 if (insn & (1 << 16)) {
dd8fbd78 5551 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
5552 } else if (insn & (1 << 17)) {
5553 if ((insn >> 18) & 1)
dd8fbd78 5554 gen_neon_dup_high16(tmp);
9ee6e8bb 5555 else
dd8fbd78 5556 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
5557 }
5558 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5559 tmp2 = new_tmp();
5560 tcg_gen_mov_i32(tmp2, tmp);
5561 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 5562 }
dd8fbd78 5563 dead_tmp(tmp);
9ee6e8bb
PB
5564 } else {
5565 return 1;
5566 }
5567 }
5568 }
5569 return 0;
5570}
5571
fe1479c3
PB
5572static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5573{
5574 int crn = (insn >> 16) & 0xf;
5575 int crm = insn & 0xf;
5576 int op1 = (insn >> 21) & 7;
5577 int op2 = (insn >> 5) & 7;
5578 int rt = (insn >> 12) & 0xf;
5579 TCGv tmp;
5580
5581 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5582 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5583 /* TEECR */
5584 if (IS_USER(s))
5585 return 1;
5586 tmp = load_cpu_field(teecr);
5587 store_reg(s, rt, tmp);
5588 return 0;
5589 }
5590 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5591 /* TEEHBR */
5592 if (IS_USER(s) && (env->teecr & 1))
5593 return 1;
5594 tmp = load_cpu_field(teehbr);
5595 store_reg(s, rt, tmp);
5596 return 0;
5597 }
5598 }
5599 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5600 op1, crn, crm, op2);
5601 return 1;
5602}
5603
5604static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5605{
5606 int crn = (insn >> 16) & 0xf;
5607 int crm = insn & 0xf;
5608 int op1 = (insn >> 21) & 7;
5609 int op2 = (insn >> 5) & 7;
5610 int rt = (insn >> 12) & 0xf;
5611 TCGv tmp;
5612
5613 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5614 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5615 /* TEECR */
5616 if (IS_USER(s))
5617 return 1;
5618 tmp = load_reg(s, rt);
5619 gen_helper_set_teecr(cpu_env, tmp);
5620 dead_tmp(tmp);
5621 return 0;
5622 }
5623 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5624 /* TEEHBR */
5625 if (IS_USER(s) && (env->teecr & 1))
5626 return 1;
5627 tmp = load_reg(s, rt);
5628 store_cpu_field(tmp, teehbr);
5629 return 0;
5630 }
5631 }
5632 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5633 op1, crn, crm, op2);
5634 return 1;
5635}
5636
9ee6e8bb
PB
5637static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5638{
5639 int cpnum;
5640
5641 cpnum = (insn >> 8) & 0xf;
5642 if (arm_feature(env, ARM_FEATURE_XSCALE)
5643 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5644 return 1;
5645
5646 switch (cpnum) {
5647 case 0:
5648 case 1:
5649 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5650 return disas_iwmmxt_insn(env, s, insn);
5651 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5652 return disas_dsp_insn(env, s, insn);
5653 }
5654 return 1;
5655 case 10:
5656 case 11:
5657 return disas_vfp_insn (env, s, insn);
fe1479c3
PB
5658 case 14:
5659 /* Coprocessors 7-15 are architecturally reserved by ARM.
5660 Unfortunately Intel decided to ignore this. */
5661 if (arm_feature(env, ARM_FEATURE_XSCALE))
5662 goto board;
5663 if (insn & (1 << 20))
5664 return disas_cp14_read(env, s, insn);
5665 else
5666 return disas_cp14_write(env, s, insn);
9ee6e8bb
PB
5667 case 15:
5668 return disas_cp15_insn (env, s, insn);
5669 default:
fe1479c3 5670 board:
9ee6e8bb
PB
5671 /* Unknown coprocessor. See if the board has hooked it. */
5672 return disas_cp_insn (env, s, insn);
5673 }
5674}
5675
5e3f878a
PB
5676
5677/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 5678static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
5679{
5680 TCGv tmp;
5681 tmp = new_tmp();
5682 tcg_gen_trunc_i64_i32(tmp, val);
5683 store_reg(s, rlow, tmp);
5684 tmp = new_tmp();
5685 tcg_gen_shri_i64(val, val, 32);
5686 tcg_gen_trunc_i64_i32(tmp, val);
5687 store_reg(s, rhigh, tmp);
5688}
5689
5690/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 5691static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 5692{
a7812ae4 5693 TCGv_i64 tmp;
5e3f878a
PB
5694 TCGv tmp2;
5695
36aa55dc 5696 /* Load value and extend to 64 bits. */
a7812ae4 5697 tmp = tcg_temp_new_i64();
5e3f878a
PB
5698 tmp2 = load_reg(s, rlow);
5699 tcg_gen_extu_i32_i64(tmp, tmp2);
5700 dead_tmp(tmp2);
5701 tcg_gen_add_i64(val, val, tmp);
b75263d6 5702 tcg_temp_free_i64(tmp);
5e3f878a
PB
5703}
5704
5705/* load and add a 64-bit value from a register pair. */
a7812ae4 5706static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 5707{
a7812ae4 5708 TCGv_i64 tmp;
36aa55dc
PB
5709 TCGv tmpl;
5710 TCGv tmph;
5e3f878a
PB
5711
5712 /* Load 64-bit value rd:rn. */
36aa55dc
PB
5713 tmpl = load_reg(s, rlow);
5714 tmph = load_reg(s, rhigh);
a7812ae4 5715 tmp = tcg_temp_new_i64();
36aa55dc
PB
5716 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5717 dead_tmp(tmpl);
5718 dead_tmp(tmph);
5e3f878a 5719 tcg_gen_add_i64(val, val, tmp);
b75263d6 5720 tcg_temp_free_i64(tmp);
5e3f878a
PB
5721}
5722
5723/* Set N and Z flags from a 64-bit value. */
a7812ae4 5724static void gen_logicq_cc(TCGv_i64 val)
5e3f878a
PB
5725{
5726 TCGv tmp = new_tmp();
5727 gen_helper_logicq_cc(tmp, val);
6fbe23d5
PB
5728 gen_logic_CC(tmp);
5729 dead_tmp(tmp);
5e3f878a
PB
5730}
5731
9ee6e8bb
PB
5732static void disas_arm_insn(CPUState * env, DisasContext *s)
5733{
5734 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 5735 TCGv tmp;
3670669c 5736 TCGv tmp2;
6ddbc6e4 5737 TCGv tmp3;
b0109805 5738 TCGv addr;
a7812ae4 5739 TCGv_i64 tmp64;
9ee6e8bb
PB
5740
5741 insn = ldl_code(s->pc);
5742 s->pc += 4;
5743
5744 /* M variants do not implement ARM mode. */
5745 if (IS_M(env))
5746 goto illegal_op;
5747 cond = insn >> 28;
5748 if (cond == 0xf){
5749 /* Unconditional instructions. */
5750 if (((insn >> 25) & 7) == 1) {
5751 /* NEON Data processing. */
5752 if (!arm_feature(env, ARM_FEATURE_NEON))
5753 goto illegal_op;
5754
5755 if (disas_neon_data_insn(env, s, insn))
5756 goto illegal_op;
5757 return;
5758 }
5759 if ((insn & 0x0f100000) == 0x04000000) {
5760 /* NEON load/store. */
5761 if (!arm_feature(env, ARM_FEATURE_NEON))
5762 goto illegal_op;
5763
5764 if (disas_neon_ls_insn(env, s, insn))
5765 goto illegal_op;
5766 return;
5767 }
5768 if ((insn & 0x0d70f000) == 0x0550f000)
5769 return; /* PLD */
5770 else if ((insn & 0x0ffffdff) == 0x01010000) {
5771 ARCH(6);
5772 /* setend */
5773 if (insn & (1 << 9)) {
5774 /* BE8 mode not implemented. */
5775 goto illegal_op;
5776 }
5777 return;
5778 } else if ((insn & 0x0fffff00) == 0x057ff000) {
5779 switch ((insn >> 4) & 0xf) {
5780 case 1: /* clrex */
5781 ARCH(6K);
8f8e3aa4 5782 gen_helper_clrex(cpu_env);
9ee6e8bb
PB
5783 return;
5784 case 4: /* dsb */
5785 case 5: /* dmb */
5786 case 6: /* isb */
5787 ARCH(7);
5788 /* We don't emulate caches so these are a no-op. */
5789 return;
5790 default:
5791 goto illegal_op;
5792 }
5793 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
5794 /* srs */
c67b6b71 5795 int32_t offset;
9ee6e8bb
PB
5796 if (IS_USER(s))
5797 goto illegal_op;
5798 ARCH(6);
5799 op1 = (insn & 0x1f);
5800 if (op1 == (env->uncached_cpsr & CPSR_M)) {
b0109805 5801 addr = load_reg(s, 13);
9ee6e8bb 5802 } else {
b0109805 5803 addr = new_tmp();
b75263d6
JR
5804 tmp = tcg_const_i32(op1);
5805 gen_helper_get_r13_banked(addr, cpu_env, tmp);
5806 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5807 }
5808 i = (insn >> 23) & 3;
5809 switch (i) {
5810 case 0: offset = -4; break; /* DA */
c67b6b71
FN
5811 case 1: offset = 0; break; /* IA */
5812 case 2: offset = -8; break; /* DB */
9ee6e8bb
PB
5813 case 3: offset = 4; break; /* IB */
5814 default: abort();
5815 }
5816 if (offset)
b0109805
PB
5817 tcg_gen_addi_i32(addr, addr, offset);
5818 tmp = load_reg(s, 14);
5819 gen_st32(tmp, addr, 0);
c67b6b71 5820 tmp = load_cpu_field(spsr);
b0109805
PB
5821 tcg_gen_addi_i32(addr, addr, 4);
5822 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
5823 if (insn & (1 << 21)) {
5824 /* Base writeback. */
5825 switch (i) {
5826 case 0: offset = -8; break;
c67b6b71
FN
5827 case 1: offset = 4; break;
5828 case 2: offset = -4; break;
9ee6e8bb
PB
5829 case 3: offset = 0; break;
5830 default: abort();
5831 }
5832 if (offset)
c67b6b71 5833 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb 5834 if (op1 == (env->uncached_cpsr & CPSR_M)) {
c67b6b71 5835 store_reg(s, 13, addr);
9ee6e8bb 5836 } else {
b75263d6
JR
5837 tmp = tcg_const_i32(op1);
5838 gen_helper_set_r13_banked(cpu_env, tmp, addr);
5839 tcg_temp_free_i32(tmp);
c67b6b71 5840 dead_tmp(addr);
9ee6e8bb 5841 }
b0109805
PB
5842 } else {
5843 dead_tmp(addr);
9ee6e8bb
PB
5844 }
5845 } else if ((insn & 0x0e5fffe0) == 0x081d0a00) {
5846 /* rfe */
c67b6b71 5847 int32_t offset;
9ee6e8bb
PB
5848 if (IS_USER(s))
5849 goto illegal_op;
5850 ARCH(6);
5851 rn = (insn >> 16) & 0xf;
b0109805 5852 addr = load_reg(s, rn);
9ee6e8bb
PB
5853 i = (insn >> 23) & 3;
5854 switch (i) {
b0109805 5855 case 0: offset = -4; break; /* DA */
c67b6b71
FN
5856 case 1: offset = 0; break; /* IA */
5857 case 2: offset = -8; break; /* DB */
b0109805 5858 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
5859 default: abort();
5860 }
5861 if (offset)
b0109805
PB
5862 tcg_gen_addi_i32(addr, addr, offset);
5863 /* Load PC into tmp and CPSR into tmp2. */
5864 tmp = gen_ld32(addr, 0);
5865 tcg_gen_addi_i32(addr, addr, 4);
5866 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
5867 if (insn & (1 << 21)) {
5868 /* Base writeback. */
5869 switch (i) {
b0109805 5870 case 0: offset = -8; break;
c67b6b71
FN
5871 case 1: offset = 4; break;
5872 case 2: offset = -4; break;
b0109805 5873 case 3: offset = 0; break;
9ee6e8bb
PB
5874 default: abort();
5875 }
5876 if (offset)
b0109805
PB
5877 tcg_gen_addi_i32(addr, addr, offset);
5878 store_reg(s, rn, addr);
5879 } else {
5880 dead_tmp(addr);
9ee6e8bb 5881 }
b0109805 5882 gen_rfe(s, tmp, tmp2);
c67b6b71 5883 return;
9ee6e8bb
PB
5884 } else if ((insn & 0x0e000000) == 0x0a000000) {
5885 /* branch link and change to thumb (blx <offset>) */
5886 int32_t offset;
5887
5888 val = (uint32_t)s->pc;
d9ba4830
PB
5889 tmp = new_tmp();
5890 tcg_gen_movi_i32(tmp, val);
5891 store_reg(s, 14, tmp);
9ee6e8bb
PB
5892 /* Sign-extend the 24-bit offset */
5893 offset = (((int32_t)insn) << 8) >> 8;
5894 /* offset * 4 + bit24 * 2 + (thumb bit) */
5895 val += (offset << 2) | ((insn >> 23) & 2) | 1;
5896 /* pipeline offset */
5897 val += 4;
d9ba4830 5898 gen_bx_im(s, val);
9ee6e8bb
PB
5899 return;
5900 } else if ((insn & 0x0e000f00) == 0x0c000100) {
5901 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5902 /* iWMMXt register transfer. */
5903 if (env->cp15.c15_cpar & (1 << 1))
5904 if (!disas_iwmmxt_insn(env, s, insn))
5905 return;
5906 }
5907 } else if ((insn & 0x0fe00000) == 0x0c400000) {
5908 /* Coprocessor double register transfer. */
5909 } else if ((insn & 0x0f000010) == 0x0e000010) {
5910 /* Additional coprocessor register transfer. */
7997d92f 5911 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
5912 uint32_t mask;
5913 uint32_t val;
5914 /* cps (privileged) */
5915 if (IS_USER(s))
5916 return;
5917 mask = val = 0;
5918 if (insn & (1 << 19)) {
5919 if (insn & (1 << 8))
5920 mask |= CPSR_A;
5921 if (insn & (1 << 7))
5922 mask |= CPSR_I;
5923 if (insn & (1 << 6))
5924 mask |= CPSR_F;
5925 if (insn & (1 << 18))
5926 val |= mask;
5927 }
7997d92f 5928 if (insn & (1 << 17)) {
9ee6e8bb
PB
5929 mask |= CPSR_M;
5930 val |= (insn & 0x1f);
5931 }
5932 if (mask) {
2fbac54b 5933 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
5934 }
5935 return;
5936 }
5937 goto illegal_op;
5938 }
5939 if (cond != 0xe) {
5940 /* if not always execute, we generate a conditional jump to
5941 next instruction */
5942 s->condlabel = gen_new_label();
d9ba4830 5943 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
5944 s->condjmp = 1;
5945 }
5946 if ((insn & 0x0f900000) == 0x03000000) {
5947 if ((insn & (1 << 21)) == 0) {
5948 ARCH(6T2);
5949 rd = (insn >> 12) & 0xf;
5950 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
5951 if ((insn & (1 << 22)) == 0) {
5952 /* MOVW */
5e3f878a
PB
5953 tmp = new_tmp();
5954 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
5955 } else {
5956 /* MOVT */
5e3f878a 5957 tmp = load_reg(s, rd);
86831435 5958 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 5959 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 5960 }
5e3f878a 5961 store_reg(s, rd, tmp);
9ee6e8bb
PB
5962 } else {
5963 if (((insn >> 12) & 0xf) != 0xf)
5964 goto illegal_op;
5965 if (((insn >> 16) & 0xf) == 0) {
5966 gen_nop_hint(s, insn & 0xff);
5967 } else {
5968 /* CPSR = immediate */
5969 val = insn & 0xff;
5970 shift = ((insn >> 8) & 0xf) * 2;
5971 if (shift)
5972 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 5973 i = ((insn & (1 << 22)) != 0);
2fbac54b 5974 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
5975 goto illegal_op;
5976 }
5977 }
5978 } else if ((insn & 0x0f900000) == 0x01000000
5979 && (insn & 0x00000090) != 0x00000090) {
5980 /* miscellaneous instructions */
5981 op1 = (insn >> 21) & 3;
5982 sh = (insn >> 4) & 0xf;
5983 rm = insn & 0xf;
5984 switch (sh) {
5985 case 0x0: /* move program status register */
5986 if (op1 & 1) {
5987 /* PSR = reg */
2fbac54b 5988 tmp = load_reg(s, rm);
9ee6e8bb 5989 i = ((op1 & 2) != 0);
2fbac54b 5990 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
5991 goto illegal_op;
5992 } else {
5993 /* reg = PSR */
5994 rd = (insn >> 12) & 0xf;
5995 if (op1 & 2) {
5996 if (IS_USER(s))
5997 goto illegal_op;
d9ba4830 5998 tmp = load_cpu_field(spsr);
9ee6e8bb 5999 } else {
d9ba4830
PB
6000 tmp = new_tmp();
6001 gen_helper_cpsr_read(tmp);
9ee6e8bb 6002 }
d9ba4830 6003 store_reg(s, rd, tmp);
9ee6e8bb
PB
6004 }
6005 break;
6006 case 0x1:
6007 if (op1 == 1) {
6008 /* branch/exchange thumb (bx). */
d9ba4830
PB
6009 tmp = load_reg(s, rm);
6010 gen_bx(s, tmp);
9ee6e8bb
PB
6011 } else if (op1 == 3) {
6012 /* clz */
6013 rd = (insn >> 12) & 0xf;
1497c961
PB
6014 tmp = load_reg(s, rm);
6015 gen_helper_clz(tmp, tmp);
6016 store_reg(s, rd, tmp);
9ee6e8bb
PB
6017 } else {
6018 goto illegal_op;
6019 }
6020 break;
6021 case 0x2:
6022 if (op1 == 1) {
6023 ARCH(5J); /* bxj */
6024 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6025 tmp = load_reg(s, rm);
6026 gen_bx(s, tmp);
9ee6e8bb
PB
6027 } else {
6028 goto illegal_op;
6029 }
6030 break;
6031 case 0x3:
6032 if (op1 != 1)
6033 goto illegal_op;
6034
6035 /* branch link/exchange thumb (blx) */
d9ba4830
PB
6036 tmp = load_reg(s, rm);
6037 tmp2 = new_tmp();
6038 tcg_gen_movi_i32(tmp2, s->pc);
6039 store_reg(s, 14, tmp2);
6040 gen_bx(s, tmp);
9ee6e8bb
PB
6041 break;
6042 case 0x5: /* saturating add/subtract */
6043 rd = (insn >> 12) & 0xf;
6044 rn = (insn >> 16) & 0xf;
b40d0353 6045 tmp = load_reg(s, rm);
5e3f878a 6046 tmp2 = load_reg(s, rn);
9ee6e8bb 6047 if (op1 & 2)
5e3f878a 6048 gen_helper_double_saturate(tmp2, tmp2);
9ee6e8bb 6049 if (op1 & 1)
5e3f878a 6050 gen_helper_sub_saturate(tmp, tmp, tmp2);
9ee6e8bb 6051 else
5e3f878a
PB
6052 gen_helper_add_saturate(tmp, tmp, tmp2);
6053 dead_tmp(tmp2);
6054 store_reg(s, rd, tmp);
9ee6e8bb
PB
6055 break;
6056 case 7: /* bkpt */
6057 gen_set_condexec(s);
5e3f878a 6058 gen_set_pc_im(s->pc - 4);
d9ba4830 6059 gen_exception(EXCP_BKPT);
9ee6e8bb
PB
6060 s->is_jmp = DISAS_JUMP;
6061 break;
6062 case 0x8: /* signed multiply */
6063 case 0xa:
6064 case 0xc:
6065 case 0xe:
6066 rs = (insn >> 8) & 0xf;
6067 rn = (insn >> 12) & 0xf;
6068 rd = (insn >> 16) & 0xf;
6069 if (op1 == 1) {
6070 /* (32 * 16) >> 16 */
5e3f878a
PB
6071 tmp = load_reg(s, rm);
6072 tmp2 = load_reg(s, rs);
9ee6e8bb 6073 if (sh & 4)
5e3f878a 6074 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 6075 else
5e3f878a 6076 gen_sxth(tmp2);
a7812ae4
PB
6077 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6078 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 6079 tmp = new_tmp();
a7812ae4 6080 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 6081 tcg_temp_free_i64(tmp64);
9ee6e8bb 6082 if ((sh & 2) == 0) {
5e3f878a
PB
6083 tmp2 = load_reg(s, rn);
6084 gen_helper_add_setq(tmp, tmp, tmp2);
6085 dead_tmp(tmp2);
9ee6e8bb 6086 }
5e3f878a 6087 store_reg(s, rd, tmp);
9ee6e8bb
PB
6088 } else {
6089 /* 16 * 16 */
5e3f878a
PB
6090 tmp = load_reg(s, rm);
6091 tmp2 = load_reg(s, rs);
6092 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6093 dead_tmp(tmp2);
9ee6e8bb 6094 if (op1 == 2) {
a7812ae4
PB
6095 tmp64 = tcg_temp_new_i64();
6096 tcg_gen_ext_i32_i64(tmp64, tmp);
22478e79 6097 dead_tmp(tmp);
a7812ae4
PB
6098 gen_addq(s, tmp64, rn, rd);
6099 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 6100 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
6101 } else {
6102 if (op1 == 0) {
5e3f878a
PB
6103 tmp2 = load_reg(s, rn);
6104 gen_helper_add_setq(tmp, tmp, tmp2);
6105 dead_tmp(tmp2);
9ee6e8bb 6106 }
5e3f878a 6107 store_reg(s, rd, tmp);
9ee6e8bb
PB
6108 }
6109 }
6110 break;
6111 default:
6112 goto illegal_op;
6113 }
6114 } else if (((insn & 0x0e000000) == 0 &&
6115 (insn & 0x00000090) != 0x90) ||
6116 ((insn & 0x0e000000) == (1 << 25))) {
6117 int set_cc, logic_cc, shiftop;
6118
6119 op1 = (insn >> 21) & 0xf;
6120 set_cc = (insn >> 20) & 1;
6121 logic_cc = table_logic_cc[op1] & set_cc;
6122
6123 /* data processing instruction */
6124 if (insn & (1 << 25)) {
6125 /* immediate operand */
6126 val = insn & 0xff;
6127 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 6128 if (shift) {
9ee6e8bb 6129 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9
JR
6130 }
6131 tmp2 = new_tmp();
6132 tcg_gen_movi_i32(tmp2, val);
6133 if (logic_cc && shift) {
6134 gen_set_CF_bit31(tmp2);
6135 }
9ee6e8bb
PB
6136 } else {
6137 /* register */
6138 rm = (insn) & 0xf;
e9bb4aa9 6139 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6140 shiftop = (insn >> 5) & 3;
6141 if (!(insn & (1 << 4))) {
6142 shift = (insn >> 7) & 0x1f;
e9bb4aa9 6143 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
6144 } else {
6145 rs = (insn >> 8) & 0xf;
8984bd2e 6146 tmp = load_reg(s, rs);
e9bb4aa9 6147 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
6148 }
6149 }
6150 if (op1 != 0x0f && op1 != 0x0d) {
6151 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
6152 tmp = load_reg(s, rn);
6153 } else {
6154 TCGV_UNUSED(tmp);
9ee6e8bb
PB
6155 }
6156 rd = (insn >> 12) & 0xf;
6157 switch(op1) {
6158 case 0x00:
e9bb4aa9
JR
6159 tcg_gen_and_i32(tmp, tmp, tmp2);
6160 if (logic_cc) {
6161 gen_logic_CC(tmp);
6162 }
21aeb343 6163 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6164 break;
6165 case 0x01:
e9bb4aa9
JR
6166 tcg_gen_xor_i32(tmp, tmp, tmp2);
6167 if (logic_cc) {
6168 gen_logic_CC(tmp);
6169 }
21aeb343 6170 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6171 break;
6172 case 0x02:
6173 if (set_cc && rd == 15) {
6174 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 6175 if (IS_USER(s)) {
9ee6e8bb 6176 goto illegal_op;
e9bb4aa9
JR
6177 }
6178 gen_helper_sub_cc(tmp, tmp, tmp2);
6179 gen_exception_return(s, tmp);
9ee6e8bb 6180 } else {
e9bb4aa9
JR
6181 if (set_cc) {
6182 gen_helper_sub_cc(tmp, tmp, tmp2);
6183 } else {
6184 tcg_gen_sub_i32(tmp, tmp, tmp2);
6185 }
21aeb343 6186 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6187 }
6188 break;
6189 case 0x03:
e9bb4aa9
JR
6190 if (set_cc) {
6191 gen_helper_sub_cc(tmp, tmp2, tmp);
6192 } else {
6193 tcg_gen_sub_i32(tmp, tmp2, tmp);
6194 }
21aeb343 6195 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6196 break;
6197 case 0x04:
e9bb4aa9
JR
6198 if (set_cc) {
6199 gen_helper_add_cc(tmp, tmp, tmp2);
6200 } else {
6201 tcg_gen_add_i32(tmp, tmp, tmp2);
6202 }
21aeb343 6203 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6204 break;
6205 case 0x05:
e9bb4aa9
JR
6206 if (set_cc) {
6207 gen_helper_adc_cc(tmp, tmp, tmp2);
6208 } else {
6209 gen_add_carry(tmp, tmp, tmp2);
6210 }
21aeb343 6211 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6212 break;
6213 case 0x06:
e9bb4aa9
JR
6214 if (set_cc) {
6215 gen_helper_sbc_cc(tmp, tmp, tmp2);
6216 } else {
6217 gen_sub_carry(tmp, tmp, tmp2);
6218 }
21aeb343 6219 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6220 break;
6221 case 0x07:
e9bb4aa9
JR
6222 if (set_cc) {
6223 gen_helper_sbc_cc(tmp, tmp2, tmp);
6224 } else {
6225 gen_sub_carry(tmp, tmp2, tmp);
6226 }
21aeb343 6227 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6228 break;
6229 case 0x08:
6230 if (set_cc) {
e9bb4aa9
JR
6231 tcg_gen_and_i32(tmp, tmp, tmp2);
6232 gen_logic_CC(tmp);
9ee6e8bb 6233 }
e9bb4aa9 6234 dead_tmp(tmp);
9ee6e8bb
PB
6235 break;
6236 case 0x09:
6237 if (set_cc) {
e9bb4aa9
JR
6238 tcg_gen_xor_i32(tmp, tmp, tmp2);
6239 gen_logic_CC(tmp);
9ee6e8bb 6240 }
e9bb4aa9 6241 dead_tmp(tmp);
9ee6e8bb
PB
6242 break;
6243 case 0x0a:
6244 if (set_cc) {
e9bb4aa9 6245 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb 6246 }
e9bb4aa9 6247 dead_tmp(tmp);
9ee6e8bb
PB
6248 break;
6249 case 0x0b:
6250 if (set_cc) {
e9bb4aa9 6251 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 6252 }
e9bb4aa9 6253 dead_tmp(tmp);
9ee6e8bb
PB
6254 break;
6255 case 0x0c:
e9bb4aa9
JR
6256 tcg_gen_or_i32(tmp, tmp, tmp2);
6257 if (logic_cc) {
6258 gen_logic_CC(tmp);
6259 }
21aeb343 6260 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6261 break;
6262 case 0x0d:
6263 if (logic_cc && rd == 15) {
6264 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 6265 if (IS_USER(s)) {
9ee6e8bb 6266 goto illegal_op;
e9bb4aa9
JR
6267 }
6268 gen_exception_return(s, tmp2);
9ee6e8bb 6269 } else {
e9bb4aa9
JR
6270 if (logic_cc) {
6271 gen_logic_CC(tmp2);
6272 }
21aeb343 6273 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6274 }
6275 break;
6276 case 0x0e:
e9bb4aa9
JR
6277 tcg_gen_bic_i32(tmp, tmp, tmp2);
6278 if (logic_cc) {
6279 gen_logic_CC(tmp);
6280 }
21aeb343 6281 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6282 break;
6283 default:
6284 case 0x0f:
e9bb4aa9
JR
6285 tcg_gen_not_i32(tmp2, tmp2);
6286 if (logic_cc) {
6287 gen_logic_CC(tmp2);
6288 }
21aeb343 6289 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6290 break;
6291 }
e9bb4aa9
JR
6292 if (op1 != 0x0f && op1 != 0x0d) {
6293 dead_tmp(tmp2);
6294 }
9ee6e8bb
PB
6295 } else {
6296 /* other instructions */
6297 op1 = (insn >> 24) & 0xf;
6298 switch(op1) {
6299 case 0x0:
6300 case 0x1:
6301 /* multiplies, extra load/stores */
6302 sh = (insn >> 5) & 3;
6303 if (sh == 0) {
6304 if (op1 == 0x0) {
6305 rd = (insn >> 16) & 0xf;
6306 rn = (insn >> 12) & 0xf;
6307 rs = (insn >> 8) & 0xf;
6308 rm = (insn) & 0xf;
6309 op1 = (insn >> 20) & 0xf;
6310 switch (op1) {
6311 case 0: case 1: case 2: case 3: case 6:
6312 /* 32 bit mul */
5e3f878a
PB
6313 tmp = load_reg(s, rs);
6314 tmp2 = load_reg(s, rm);
6315 tcg_gen_mul_i32(tmp, tmp, tmp2);
6316 dead_tmp(tmp2);
9ee6e8bb
PB
6317 if (insn & (1 << 22)) {
6318 /* Subtract (mls) */
6319 ARCH(6T2);
5e3f878a
PB
6320 tmp2 = load_reg(s, rn);
6321 tcg_gen_sub_i32(tmp, tmp2, tmp);
6322 dead_tmp(tmp2);
9ee6e8bb
PB
6323 } else if (insn & (1 << 21)) {
6324 /* Add */
5e3f878a
PB
6325 tmp2 = load_reg(s, rn);
6326 tcg_gen_add_i32(tmp, tmp, tmp2);
6327 dead_tmp(tmp2);
9ee6e8bb
PB
6328 }
6329 if (insn & (1 << 20))
5e3f878a
PB
6330 gen_logic_CC(tmp);
6331 store_reg(s, rd, tmp);
9ee6e8bb
PB
6332 break;
6333 default:
6334 /* 64 bit mul */
5e3f878a
PB
6335 tmp = load_reg(s, rs);
6336 tmp2 = load_reg(s, rm);
9ee6e8bb 6337 if (insn & (1 << 22))
a7812ae4 6338 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6339 else
a7812ae4 6340 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
9ee6e8bb 6341 if (insn & (1 << 21)) /* mult accumulate */
a7812ae4 6342 gen_addq(s, tmp64, rn, rd);
9ee6e8bb
PB
6343 if (!(insn & (1 << 23))) { /* double accumulate */
6344 ARCH(6);
a7812ae4
PB
6345 gen_addq_lo(s, tmp64, rn);
6346 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
6347 }
6348 if (insn & (1 << 20))
a7812ae4
PB
6349 gen_logicq_cc(tmp64);
6350 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 6351 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
6352 break;
6353 }
6354 } else {
6355 rn = (insn >> 16) & 0xf;
6356 rd = (insn >> 12) & 0xf;
6357 if (insn & (1 << 23)) {
6358 /* load/store exclusive */
86753403
PB
6359 op1 = (insn >> 21) & 0x3;
6360 if (op1)
a47f43d2 6361 ARCH(6K);
86753403
PB
6362 else
6363 ARCH(6);
3174f8e9 6364 addr = tcg_temp_local_new_i32();
98a46317 6365 load_reg_var(s, addr, rn);
9ee6e8bb 6366 if (insn & (1 << 20)) {
3174f8e9 6367 gen_helper_mark_exclusive(cpu_env, addr);
86753403
PB
6368 switch (op1) {
6369 case 0: /* ldrex */
6370 tmp = gen_ld32(addr, IS_USER(s));
6371 break;
6372 case 1: /* ldrexd */
6373 tmp = gen_ld32(addr, IS_USER(s));
6374 store_reg(s, rd, tmp);
6375 tcg_gen_addi_i32(addr, addr, 4);
6376 tmp = gen_ld32(addr, IS_USER(s));
6377 rd++;
6378 break;
6379 case 2: /* ldrexb */
6380 tmp = gen_ld8u(addr, IS_USER(s));
6381 break;
6382 case 3: /* ldrexh */
6383 tmp = gen_ld16u(addr, IS_USER(s));
6384 break;
6385 default:
6386 abort();
6387 }
8f8e3aa4 6388 store_reg(s, rd, tmp);
9ee6e8bb 6389 } else {
8f8e3aa4 6390 int label = gen_new_label();
9ee6e8bb 6391 rm = insn & 0xf;
3174f8e9
FN
6392 tmp2 = tcg_temp_local_new_i32();
6393 gen_helper_test_exclusive(tmp2, cpu_env, addr);
6394 tcg_gen_brcondi_i32(TCG_COND_NE, tmp2, 0, label);
8f8e3aa4 6395 tmp = load_reg(s,rm);
86753403
PB
6396 switch (op1) {
6397 case 0: /* strex */
6398 gen_st32(tmp, addr, IS_USER(s));
6399 break;
6400 case 1: /* strexd */
6401 gen_st32(tmp, addr, IS_USER(s));
6402 tcg_gen_addi_i32(addr, addr, 4);
6403 tmp = load_reg(s, rm + 1);
6404 gen_st32(tmp, addr, IS_USER(s));
6405 break;
6406 case 2: /* strexb */
6407 gen_st8(tmp, addr, IS_USER(s));
6408 break;
6409 case 3: /* strexh */
6410 gen_st16(tmp, addr, IS_USER(s));
6411 break;
6412 default:
6413 abort();
6414 }
2637a3be 6415 gen_set_label(label);
3174f8e9
FN
6416 tcg_gen_mov_i32(cpu_R[rd], tmp2);
6417 tcg_temp_free(tmp2);
9ee6e8bb 6418 }
3174f8e9 6419 tcg_temp_free(addr);
9ee6e8bb
PB
6420 } else {
6421 /* SWP instruction */
6422 rm = (insn) & 0xf;
6423
8984bd2e
PB
6424 /* ??? This is not really atomic. However we know
6425 we never have multiple CPUs running in parallel,
6426 so it is good enough. */
6427 addr = load_reg(s, rn);
6428 tmp = load_reg(s, rm);
9ee6e8bb 6429 if (insn & (1 << 22)) {
8984bd2e
PB
6430 tmp2 = gen_ld8u(addr, IS_USER(s));
6431 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 6432 } else {
8984bd2e
PB
6433 tmp2 = gen_ld32(addr, IS_USER(s));
6434 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 6435 }
8984bd2e
PB
6436 dead_tmp(addr);
6437 store_reg(s, rd, tmp2);
9ee6e8bb
PB
6438 }
6439 }
6440 } else {
6441 int address_offset;
6442 int load;
6443 /* Misc load/store */
6444 rn = (insn >> 16) & 0xf;
6445 rd = (insn >> 12) & 0xf;
b0109805 6446 addr = load_reg(s, rn);
9ee6e8bb 6447 if (insn & (1 << 24))
b0109805 6448 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
6449 address_offset = 0;
6450 if (insn & (1 << 20)) {
6451 /* load */
6452 switch(sh) {
6453 case 1:
b0109805 6454 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
6455 break;
6456 case 2:
b0109805 6457 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
6458 break;
6459 default:
6460 case 3:
b0109805 6461 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
6462 break;
6463 }
6464 load = 1;
6465 } else if (sh & 2) {
6466 /* doubleword */
6467 if (sh & 1) {
6468 /* store */
b0109805
PB
6469 tmp = load_reg(s, rd);
6470 gen_st32(tmp, addr, IS_USER(s));
6471 tcg_gen_addi_i32(addr, addr, 4);
6472 tmp = load_reg(s, rd + 1);
6473 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6474 load = 0;
6475 } else {
6476 /* load */
b0109805
PB
6477 tmp = gen_ld32(addr, IS_USER(s));
6478 store_reg(s, rd, tmp);
6479 tcg_gen_addi_i32(addr, addr, 4);
6480 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
6481 rd++;
6482 load = 1;
6483 }
6484 address_offset = -4;
6485 } else {
6486 /* store */
b0109805
PB
6487 tmp = load_reg(s, rd);
6488 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6489 load = 0;
6490 }
6491 /* Perform base writeback before the loaded value to
6492 ensure correct behavior with overlapping index registers.
6493 ldrd with base writeback is is undefined if the
6494 destination and index registers overlap. */
6495 if (!(insn & (1 << 24))) {
b0109805
PB
6496 gen_add_datah_offset(s, insn, address_offset, addr);
6497 store_reg(s, rn, addr);
9ee6e8bb
PB
6498 } else if (insn & (1 << 21)) {
6499 if (address_offset)
b0109805
PB
6500 tcg_gen_addi_i32(addr, addr, address_offset);
6501 store_reg(s, rn, addr);
6502 } else {
6503 dead_tmp(addr);
9ee6e8bb
PB
6504 }
6505 if (load) {
6506 /* Complete the load. */
b0109805 6507 store_reg(s, rd, tmp);
9ee6e8bb
PB
6508 }
6509 }
6510 break;
6511 case 0x4:
6512 case 0x5:
6513 goto do_ldst;
6514 case 0x6:
6515 case 0x7:
6516 if (insn & (1 << 4)) {
6517 ARCH(6);
6518 /* Armv6 Media instructions. */
6519 rm = insn & 0xf;
6520 rn = (insn >> 16) & 0xf;
2c0262af 6521 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
6522 rs = (insn >> 8) & 0xf;
6523 switch ((insn >> 23) & 3) {
6524 case 0: /* Parallel add/subtract. */
6525 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
6526 tmp = load_reg(s, rn);
6527 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6528 sh = (insn >> 5) & 7;
6529 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6530 goto illegal_op;
6ddbc6e4
PB
6531 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6532 dead_tmp(tmp2);
6533 store_reg(s, rd, tmp);
9ee6e8bb
PB
6534 break;
6535 case 1:
6536 if ((insn & 0x00700020) == 0) {
6c95676b 6537 /* Halfword pack. */
3670669c
PB
6538 tmp = load_reg(s, rn);
6539 tmp2 = load_reg(s, rm);
9ee6e8bb 6540 shift = (insn >> 7) & 0x1f;
3670669c
PB
6541 if (insn & (1 << 6)) {
6542 /* pkhtb */
22478e79
AZ
6543 if (shift == 0)
6544 shift = 31;
6545 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 6546 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 6547 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
6548 } else {
6549 /* pkhbt */
22478e79
AZ
6550 if (shift)
6551 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 6552 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
6553 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6554 }
6555 tcg_gen_or_i32(tmp, tmp, tmp2);
22478e79 6556 dead_tmp(tmp2);
3670669c 6557 store_reg(s, rd, tmp);
9ee6e8bb
PB
6558 } else if ((insn & 0x00200020) == 0x00200000) {
6559 /* [us]sat */
6ddbc6e4 6560 tmp = load_reg(s, rm);
9ee6e8bb
PB
6561 shift = (insn >> 7) & 0x1f;
6562 if (insn & (1 << 6)) {
6563 if (shift == 0)
6564 shift = 31;
6ddbc6e4 6565 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 6566 } else {
6ddbc6e4 6567 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
6568 }
6569 sh = (insn >> 16) & 0x1f;
6570 if (sh != 0) {
b75263d6 6571 tmp2 = tcg_const_i32(sh);
9ee6e8bb 6572 if (insn & (1 << 22))
b75263d6 6573 gen_helper_usat(tmp, tmp, tmp2);
9ee6e8bb 6574 else
b75263d6
JR
6575 gen_helper_ssat(tmp, tmp, tmp2);
6576 tcg_temp_free_i32(tmp2);
9ee6e8bb 6577 }
6ddbc6e4 6578 store_reg(s, rd, tmp);
9ee6e8bb
PB
6579 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6580 /* [us]sat16 */
6ddbc6e4 6581 tmp = load_reg(s, rm);
9ee6e8bb
PB
6582 sh = (insn >> 16) & 0x1f;
6583 if (sh != 0) {
b75263d6 6584 tmp2 = tcg_const_i32(sh);
9ee6e8bb 6585 if (insn & (1 << 22))
b75263d6 6586 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 6587 else
b75263d6
JR
6588 gen_helper_ssat16(tmp, tmp, tmp2);
6589 tcg_temp_free_i32(tmp2);
9ee6e8bb 6590 }
6ddbc6e4 6591 store_reg(s, rd, tmp);
9ee6e8bb
PB
6592 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6593 /* Select bytes. */
6ddbc6e4
PB
6594 tmp = load_reg(s, rn);
6595 tmp2 = load_reg(s, rm);
6596 tmp3 = new_tmp();
6597 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6598 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6599 dead_tmp(tmp3);
6600 dead_tmp(tmp2);
6601 store_reg(s, rd, tmp);
9ee6e8bb 6602 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 6603 tmp = load_reg(s, rm);
9ee6e8bb
PB
6604 shift = (insn >> 10) & 3;
6605 /* ??? In many cases it's not neccessary to do a
6606 rotate, a shift is sufficient. */
6607 if (shift != 0)
5e3f878a 6608 tcg_gen_rori_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
6609 op1 = (insn >> 20) & 7;
6610 switch (op1) {
5e3f878a
PB
6611 case 0: gen_sxtb16(tmp); break;
6612 case 2: gen_sxtb(tmp); break;
6613 case 3: gen_sxth(tmp); break;
6614 case 4: gen_uxtb16(tmp); break;
6615 case 6: gen_uxtb(tmp); break;
6616 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
6617 default: goto illegal_op;
6618 }
6619 if (rn != 15) {
5e3f878a 6620 tmp2 = load_reg(s, rn);
9ee6e8bb 6621 if ((op1 & 3) == 0) {
5e3f878a 6622 gen_add16(tmp, tmp2);
9ee6e8bb 6623 } else {
5e3f878a
PB
6624 tcg_gen_add_i32(tmp, tmp, tmp2);
6625 dead_tmp(tmp2);
9ee6e8bb
PB
6626 }
6627 }
6c95676b 6628 store_reg(s, rd, tmp);
9ee6e8bb
PB
6629 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6630 /* rev */
b0109805 6631 tmp = load_reg(s, rm);
9ee6e8bb
PB
6632 if (insn & (1 << 22)) {
6633 if (insn & (1 << 7)) {
b0109805 6634 gen_revsh(tmp);
9ee6e8bb
PB
6635 } else {
6636 ARCH(6T2);
b0109805 6637 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
6638 }
6639 } else {
6640 if (insn & (1 << 7))
b0109805 6641 gen_rev16(tmp);
9ee6e8bb 6642 else
66896cb8 6643 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 6644 }
b0109805 6645 store_reg(s, rd, tmp);
9ee6e8bb
PB
6646 } else {
6647 goto illegal_op;
6648 }
6649 break;
6650 case 2: /* Multiplies (Type 3). */
5e3f878a
PB
6651 tmp = load_reg(s, rm);
6652 tmp2 = load_reg(s, rs);
9ee6e8bb
PB
6653 if (insn & (1 << 20)) {
6654 /* Signed multiply most significant [accumulate]. */
a7812ae4 6655 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6656 if (insn & (1 << 5))
a7812ae4
PB
6657 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
6658 tcg_gen_shri_i64(tmp64, tmp64, 32);
5e3f878a 6659 tmp = new_tmp();
a7812ae4 6660 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 6661 tcg_temp_free_i64(tmp64);
955a7dd5
AZ
6662 if (rd != 15) {
6663 tmp2 = load_reg(s, rd);
9ee6e8bb 6664 if (insn & (1 << 6)) {
5e3f878a 6665 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 6666 } else {
5e3f878a 6667 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 6668 }
5e3f878a 6669 dead_tmp(tmp2);
9ee6e8bb 6670 }
955a7dd5 6671 store_reg(s, rn, tmp);
9ee6e8bb
PB
6672 } else {
6673 if (insn & (1 << 5))
5e3f878a
PB
6674 gen_swap_half(tmp2);
6675 gen_smul_dual(tmp, tmp2);
6676 /* This addition cannot overflow. */
6677 if (insn & (1 << 6)) {
6678 tcg_gen_sub_i32(tmp, tmp, tmp2);
6679 } else {
6680 tcg_gen_add_i32(tmp, tmp, tmp2);
6681 }
6682 dead_tmp(tmp2);
9ee6e8bb 6683 if (insn & (1 << 22)) {
5e3f878a 6684 /* smlald, smlsld */
a7812ae4
PB
6685 tmp64 = tcg_temp_new_i64();
6686 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 6687 dead_tmp(tmp);
a7812ae4
PB
6688 gen_addq(s, tmp64, rd, rn);
6689 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 6690 tcg_temp_free_i64(tmp64);
9ee6e8bb 6691 } else {
5e3f878a 6692 /* smuad, smusd, smlad, smlsd */
22478e79 6693 if (rd != 15)
9ee6e8bb 6694 {
22478e79 6695 tmp2 = load_reg(s, rd);
5e3f878a
PB
6696 gen_helper_add_setq(tmp, tmp, tmp2);
6697 dead_tmp(tmp2);
9ee6e8bb 6698 }
22478e79 6699 store_reg(s, rn, tmp);
9ee6e8bb
PB
6700 }
6701 }
6702 break;
6703 case 3:
6704 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
6705 switch (op1) {
6706 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
6707 ARCH(6);
6708 tmp = load_reg(s, rm);
6709 tmp2 = load_reg(s, rs);
6710 gen_helper_usad8(tmp, tmp, tmp2);
6711 dead_tmp(tmp2);
ded9d295
AZ
6712 if (rd != 15) {
6713 tmp2 = load_reg(s, rd);
6ddbc6e4
PB
6714 tcg_gen_add_i32(tmp, tmp, tmp2);
6715 dead_tmp(tmp2);
9ee6e8bb 6716 }
ded9d295 6717 store_reg(s, rn, tmp);
9ee6e8bb
PB
6718 break;
6719 case 0x20: case 0x24: case 0x28: case 0x2c:
6720 /* Bitfield insert/clear. */
6721 ARCH(6T2);
6722 shift = (insn >> 7) & 0x1f;
6723 i = (insn >> 16) & 0x1f;
6724 i = i + 1 - shift;
6725 if (rm == 15) {
5e3f878a
PB
6726 tmp = new_tmp();
6727 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6728 } else {
5e3f878a 6729 tmp = load_reg(s, rm);
9ee6e8bb
PB
6730 }
6731 if (i != 32) {
5e3f878a 6732 tmp2 = load_reg(s, rd);
8f8e3aa4 6733 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
5e3f878a 6734 dead_tmp(tmp2);
9ee6e8bb 6735 }
5e3f878a 6736 store_reg(s, rd, tmp);
9ee6e8bb
PB
6737 break;
6738 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
6739 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 6740 ARCH(6T2);
5e3f878a 6741 tmp = load_reg(s, rm);
9ee6e8bb
PB
6742 shift = (insn >> 7) & 0x1f;
6743 i = ((insn >> 16) & 0x1f) + 1;
6744 if (shift + i > 32)
6745 goto illegal_op;
6746 if (i < 32) {
6747 if (op1 & 0x20) {
5e3f878a 6748 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 6749 } else {
5e3f878a 6750 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
6751 }
6752 }
5e3f878a 6753 store_reg(s, rd, tmp);
9ee6e8bb
PB
6754 break;
6755 default:
6756 goto illegal_op;
6757 }
6758 break;
6759 }
6760 break;
6761 }
6762 do_ldst:
6763 /* Check for undefined extension instructions
6764 * per the ARM Bible IE:
6765 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
6766 */
6767 sh = (0xf << 20) | (0xf << 4);
6768 if (op1 == 0x7 && ((insn & sh) == sh))
6769 {
6770 goto illegal_op;
6771 }
6772 /* load/store byte/word */
6773 rn = (insn >> 16) & 0xf;
6774 rd = (insn >> 12) & 0xf;
b0109805 6775 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
6776 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
6777 if (insn & (1 << 24))
b0109805 6778 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
6779 if (insn & (1 << 20)) {
6780 /* load */
9ee6e8bb 6781 if (insn & (1 << 22)) {
b0109805 6782 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 6783 } else {
b0109805 6784 tmp = gen_ld32(tmp2, i);
9ee6e8bb 6785 }
9ee6e8bb
PB
6786 } else {
6787 /* store */
b0109805 6788 tmp = load_reg(s, rd);
9ee6e8bb 6789 if (insn & (1 << 22))
b0109805 6790 gen_st8(tmp, tmp2, i);
9ee6e8bb 6791 else
b0109805 6792 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
6793 }
6794 if (!(insn & (1 << 24))) {
b0109805
PB
6795 gen_add_data_offset(s, insn, tmp2);
6796 store_reg(s, rn, tmp2);
6797 } else if (insn & (1 << 21)) {
6798 store_reg(s, rn, tmp2);
6799 } else {
6800 dead_tmp(tmp2);
9ee6e8bb
PB
6801 }
6802 if (insn & (1 << 20)) {
6803 /* Complete the load. */
6804 if (rd == 15)
b0109805 6805 gen_bx(s, tmp);
9ee6e8bb 6806 else
b0109805 6807 store_reg(s, rd, tmp);
9ee6e8bb
PB
6808 }
6809 break;
6810 case 0x08:
6811 case 0x09:
6812 {
6813 int j, n, user, loaded_base;
b0109805 6814 TCGv loaded_var;
9ee6e8bb
PB
6815 /* load/store multiple words */
6816 /* XXX: store correct base if write back */
6817 user = 0;
6818 if (insn & (1 << 22)) {
6819 if (IS_USER(s))
6820 goto illegal_op; /* only usable in supervisor mode */
6821
6822 if ((insn & (1 << 15)) == 0)
6823 user = 1;
6824 }
6825 rn = (insn >> 16) & 0xf;
b0109805 6826 addr = load_reg(s, rn);
9ee6e8bb
PB
6827
6828 /* compute total size */
6829 loaded_base = 0;
a50f5b91 6830 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
6831 n = 0;
6832 for(i=0;i<16;i++) {
6833 if (insn & (1 << i))
6834 n++;
6835 }
6836 /* XXX: test invalid n == 0 case ? */
6837 if (insn & (1 << 23)) {
6838 if (insn & (1 << 24)) {
6839 /* pre increment */
b0109805 6840 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6841 } else {
6842 /* post increment */
6843 }
6844 } else {
6845 if (insn & (1 << 24)) {
6846 /* pre decrement */
b0109805 6847 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
6848 } else {
6849 /* post decrement */
6850 if (n != 1)
b0109805 6851 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
6852 }
6853 }
6854 j = 0;
6855 for(i=0;i<16;i++) {
6856 if (insn & (1 << i)) {
6857 if (insn & (1 << 20)) {
6858 /* load */
b0109805 6859 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 6860 if (i == 15) {
b0109805 6861 gen_bx(s, tmp);
9ee6e8bb 6862 } else if (user) {
b75263d6
JR
6863 tmp2 = tcg_const_i32(i);
6864 gen_helper_set_user_reg(tmp2, tmp);
6865 tcg_temp_free_i32(tmp2);
b0109805 6866 dead_tmp(tmp);
9ee6e8bb 6867 } else if (i == rn) {
b0109805 6868 loaded_var = tmp;
9ee6e8bb
PB
6869 loaded_base = 1;
6870 } else {
b0109805 6871 store_reg(s, i, tmp);
9ee6e8bb
PB
6872 }
6873 } else {
6874 /* store */
6875 if (i == 15) {
6876 /* special case: r15 = PC + 8 */
6877 val = (long)s->pc + 4;
b0109805
PB
6878 tmp = new_tmp();
6879 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 6880 } else if (user) {
b0109805 6881 tmp = new_tmp();
b75263d6
JR
6882 tmp2 = tcg_const_i32(i);
6883 gen_helper_get_user_reg(tmp, tmp2);
6884 tcg_temp_free_i32(tmp2);
9ee6e8bb 6885 } else {
b0109805 6886 tmp = load_reg(s, i);
9ee6e8bb 6887 }
b0109805 6888 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6889 }
6890 j++;
6891 /* no need to add after the last transfer */
6892 if (j != n)
b0109805 6893 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6894 }
6895 }
6896 if (insn & (1 << 21)) {
6897 /* write back */
6898 if (insn & (1 << 23)) {
6899 if (insn & (1 << 24)) {
6900 /* pre increment */
6901 } else {
6902 /* post increment */
b0109805 6903 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6904 }
6905 } else {
6906 if (insn & (1 << 24)) {
6907 /* pre decrement */
6908 if (n != 1)
b0109805 6909 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
6910 } else {
6911 /* post decrement */
b0109805 6912 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
6913 }
6914 }
b0109805
PB
6915 store_reg(s, rn, addr);
6916 } else {
6917 dead_tmp(addr);
9ee6e8bb
PB
6918 }
6919 if (loaded_base) {
b0109805 6920 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
6921 }
6922 if ((insn & (1 << 22)) && !user) {
6923 /* Restore CPSR from SPSR. */
d9ba4830
PB
6924 tmp = load_cpu_field(spsr);
6925 gen_set_cpsr(tmp, 0xffffffff);
6926 dead_tmp(tmp);
9ee6e8bb
PB
6927 s->is_jmp = DISAS_UPDATE;
6928 }
6929 }
6930 break;
6931 case 0xa:
6932 case 0xb:
6933 {
6934 int32_t offset;
6935
6936 /* branch (and link) */
6937 val = (int32_t)s->pc;
6938 if (insn & (1 << 24)) {
5e3f878a
PB
6939 tmp = new_tmp();
6940 tcg_gen_movi_i32(tmp, val);
6941 store_reg(s, 14, tmp);
9ee6e8bb
PB
6942 }
6943 offset = (((int32_t)insn << 8) >> 8);
6944 val += (offset << 2) + 4;
6945 gen_jmp(s, val);
6946 }
6947 break;
6948 case 0xc:
6949 case 0xd:
6950 case 0xe:
6951 /* Coprocessor. */
6952 if (disas_coproc_insn(env, s, insn))
6953 goto illegal_op;
6954 break;
6955 case 0xf:
6956 /* swi */
5e3f878a 6957 gen_set_pc_im(s->pc);
9ee6e8bb
PB
6958 s->is_jmp = DISAS_SWI;
6959 break;
6960 default:
6961 illegal_op:
6962 gen_set_condexec(s);
5e3f878a 6963 gen_set_pc_im(s->pc - 4);
d9ba4830 6964 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
6965 s->is_jmp = DISAS_JUMP;
6966 break;
6967 }
6968 }
6969}
6970
6971/* Return true if this is a Thumb-2 logical op. */
6972static int
6973thumb2_logic_op(int op)
6974{
6975 return (op < 8);
6976}
6977
6978/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
6979 then set condition code flags based on the result of the operation.
6980 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
6981 to the high bit of T1.
6982 Returns zero if the opcode is valid. */
6983
6984static int
396e467c 6985gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
9ee6e8bb
PB
6986{
6987 int logic_cc;
6988
6989 logic_cc = 0;
6990 switch (op) {
6991 case 0: /* and */
396e467c 6992 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
6993 logic_cc = conds;
6994 break;
6995 case 1: /* bic */
396e467c 6996 tcg_gen_bic_i32(t0, t0, t1);
9ee6e8bb
PB
6997 logic_cc = conds;
6998 break;
6999 case 2: /* orr */
396e467c 7000 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7001 logic_cc = conds;
7002 break;
7003 case 3: /* orn */
396e467c
FN
7004 tcg_gen_not_i32(t1, t1);
7005 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7006 logic_cc = conds;
7007 break;
7008 case 4: /* eor */
396e467c 7009 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
7010 logic_cc = conds;
7011 break;
7012 case 8: /* add */
7013 if (conds)
396e467c 7014 gen_helper_add_cc(t0, t0, t1);
9ee6e8bb 7015 else
396e467c 7016 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
7017 break;
7018 case 10: /* adc */
7019 if (conds)
396e467c 7020 gen_helper_adc_cc(t0, t0, t1);
9ee6e8bb 7021 else
396e467c 7022 gen_adc(t0, t1);
9ee6e8bb
PB
7023 break;
7024 case 11: /* sbc */
7025 if (conds)
396e467c 7026 gen_helper_sbc_cc(t0, t0, t1);
9ee6e8bb 7027 else
396e467c 7028 gen_sub_carry(t0, t0, t1);
9ee6e8bb
PB
7029 break;
7030 case 13: /* sub */
7031 if (conds)
396e467c 7032 gen_helper_sub_cc(t0, t0, t1);
9ee6e8bb 7033 else
396e467c 7034 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
7035 break;
7036 case 14: /* rsb */
7037 if (conds)
396e467c 7038 gen_helper_sub_cc(t0, t1, t0);
9ee6e8bb 7039 else
396e467c 7040 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
7041 break;
7042 default: /* 5, 6, 7, 9, 12, 15. */
7043 return 1;
7044 }
7045 if (logic_cc) {
396e467c 7046 gen_logic_CC(t0);
9ee6e8bb 7047 if (shifter_out)
396e467c 7048 gen_set_CF_bit31(t1);
9ee6e8bb
PB
7049 }
7050 return 0;
7051}
7052
7053/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7054 is not legal. */
7055static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7056{
b0109805 7057 uint32_t insn, imm, shift, offset;
9ee6e8bb 7058 uint32_t rd, rn, rm, rs;
b26eefb6 7059 TCGv tmp;
6ddbc6e4
PB
7060 TCGv tmp2;
7061 TCGv tmp3;
b0109805 7062 TCGv addr;
a7812ae4 7063 TCGv_i64 tmp64;
9ee6e8bb
PB
7064 int op;
7065 int shiftop;
7066 int conds;
7067 int logic_cc;
7068
7069 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7070 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 7071 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
7072 16-bit instructions to get correct prefetch abort behavior. */
7073 insn = insn_hw1;
7074 if ((insn & (1 << 12)) == 0) {
7075 /* Second half of blx. */
7076 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
7077 tmp = load_reg(s, 14);
7078 tcg_gen_addi_i32(tmp, tmp, offset);
7079 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 7080
d9ba4830 7081 tmp2 = new_tmp();
b0109805 7082 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7083 store_reg(s, 14, tmp2);
7084 gen_bx(s, tmp);
9ee6e8bb
PB
7085 return 0;
7086 }
7087 if (insn & (1 << 11)) {
7088 /* Second half of bl. */
7089 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 7090 tmp = load_reg(s, 14);
6a0d8a1d 7091 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 7092
d9ba4830 7093 tmp2 = new_tmp();
b0109805 7094 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7095 store_reg(s, 14, tmp2);
7096 gen_bx(s, tmp);
9ee6e8bb
PB
7097 return 0;
7098 }
7099 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7100 /* Instruction spans a page boundary. Implement it as two
7101 16-bit instructions in case the second half causes an
7102 prefetch abort. */
7103 offset = ((int32_t)insn << 21) >> 9;
396e467c 7104 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
7105 return 0;
7106 }
7107 /* Fall through to 32-bit decode. */
7108 }
7109
7110 insn = lduw_code(s->pc);
7111 s->pc += 2;
7112 insn |= (uint32_t)insn_hw1 << 16;
7113
7114 if ((insn & 0xf800e800) != 0xf000e800) {
7115 ARCH(6T2);
7116 }
7117
7118 rn = (insn >> 16) & 0xf;
7119 rs = (insn >> 12) & 0xf;
7120 rd = (insn >> 8) & 0xf;
7121 rm = insn & 0xf;
7122 switch ((insn >> 25) & 0xf) {
7123 case 0: case 1: case 2: case 3:
7124 /* 16-bit instructions. Should never happen. */
7125 abort();
7126 case 4:
7127 if (insn & (1 << 22)) {
7128 /* Other load/store, table branch. */
7129 if (insn & 0x01200000) {
7130 /* Load/store doubleword. */
7131 if (rn == 15) {
b0109805
PB
7132 addr = new_tmp();
7133 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 7134 } else {
b0109805 7135 addr = load_reg(s, rn);
9ee6e8bb
PB
7136 }
7137 offset = (insn & 0xff) * 4;
7138 if ((insn & (1 << 23)) == 0)
7139 offset = -offset;
7140 if (insn & (1 << 24)) {
b0109805 7141 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
7142 offset = 0;
7143 }
7144 if (insn & (1 << 20)) {
7145 /* ldrd */
b0109805
PB
7146 tmp = gen_ld32(addr, IS_USER(s));
7147 store_reg(s, rs, tmp);
7148 tcg_gen_addi_i32(addr, addr, 4);
7149 tmp = gen_ld32(addr, IS_USER(s));
7150 store_reg(s, rd, tmp);
9ee6e8bb
PB
7151 } else {
7152 /* strd */
b0109805
PB
7153 tmp = load_reg(s, rs);
7154 gen_st32(tmp, addr, IS_USER(s));
7155 tcg_gen_addi_i32(addr, addr, 4);
7156 tmp = load_reg(s, rd);
7157 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7158 }
7159 if (insn & (1 << 21)) {
7160 /* Base writeback. */
7161 if (rn == 15)
7162 goto illegal_op;
b0109805
PB
7163 tcg_gen_addi_i32(addr, addr, offset - 4);
7164 store_reg(s, rn, addr);
7165 } else {
7166 dead_tmp(addr);
9ee6e8bb
PB
7167 }
7168 } else if ((insn & (1 << 23)) == 0) {
7169 /* Load/store exclusive word. */
3174f8e9 7170 addr = tcg_temp_local_new();
98a46317 7171 load_reg_var(s, addr, rn);
2c0262af 7172 if (insn & (1 << 20)) {
3174f8e9 7173 gen_helper_mark_exclusive(cpu_env, addr);
8f8e3aa4
PB
7174 tmp = gen_ld32(addr, IS_USER(s));
7175 store_reg(s, rd, tmp);
9ee6e8bb 7176 } else {
8f8e3aa4 7177 int label = gen_new_label();
3174f8e9
FN
7178 tmp2 = tcg_temp_local_new();
7179 gen_helper_test_exclusive(tmp2, cpu_env, addr);
7180 tcg_gen_brcondi_i32(TCG_COND_NE, tmp2, 0, label);
8f8e3aa4 7181 tmp = load_reg(s, rs);
3174f8e9 7182 gen_st32(tmp, addr, IS_USER(s));
8f8e3aa4 7183 gen_set_label(label);
3174f8e9
FN
7184 tcg_gen_mov_i32(cpu_R[rd], tmp2);
7185 tcg_temp_free(tmp2);
9ee6e8bb 7186 }
3174f8e9 7187 tcg_temp_free(addr);
9ee6e8bb
PB
7188 } else if ((insn & (1 << 6)) == 0) {
7189 /* Table Branch. */
7190 if (rn == 15) {
b0109805
PB
7191 addr = new_tmp();
7192 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 7193 } else {
b0109805 7194 addr = load_reg(s, rn);
9ee6e8bb 7195 }
b26eefb6 7196 tmp = load_reg(s, rm);
b0109805 7197 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
7198 if (insn & (1 << 4)) {
7199 /* tbh */
b0109805 7200 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 7201 dead_tmp(tmp);
b0109805 7202 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 7203 } else { /* tbb */
b26eefb6 7204 dead_tmp(tmp);
b0109805 7205 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7206 }
b0109805
PB
7207 dead_tmp(addr);
7208 tcg_gen_shli_i32(tmp, tmp, 1);
7209 tcg_gen_addi_i32(tmp, tmp, s->pc);
7210 store_reg(s, 15, tmp);
9ee6e8bb
PB
7211 } else {
7212 /* Load/store exclusive byte/halfword/doubleword. */
8f8e3aa4
PB
7213 /* ??? These are not really atomic. However we know
7214 we never have multiple CPUs running in parallel,
7215 so it is good enough. */
9ee6e8bb 7216 op = (insn >> 4) & 0x3;
3174f8e9 7217 addr = tcg_temp_local_new();
98a46317 7218 load_reg_var(s, addr, rn);
9ee6e8bb 7219 if (insn & (1 << 20)) {
8f8e3aa4 7220 gen_helper_mark_exclusive(cpu_env, addr);
9ee6e8bb
PB
7221 switch (op) {
7222 case 0:
8f8e3aa4 7223 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7224 break;
2c0262af 7225 case 1:
8f8e3aa4 7226 tmp = gen_ld16u(addr, IS_USER(s));
2c0262af 7227 break;
9ee6e8bb 7228 case 3:
8f8e3aa4
PB
7229 tmp = gen_ld32(addr, IS_USER(s));
7230 tcg_gen_addi_i32(addr, addr, 4);
7231 tmp2 = gen_ld32(addr, IS_USER(s));
7232 store_reg(s, rd, tmp2);
2c0262af
FB
7233 break;
7234 default:
9ee6e8bb
PB
7235 goto illegal_op;
7236 }
8f8e3aa4 7237 store_reg(s, rs, tmp);
9ee6e8bb 7238 } else {
8f8e3aa4 7239 int label = gen_new_label();
3174f8e9
FN
7240 tmp2 = tcg_temp_local_new();
7241 gen_helper_test_exclusive(tmp2, cpu_env, addr);
7242 tcg_gen_brcondi_i32(TCG_COND_NE, tmp2, 0, label);
8f8e3aa4 7243 tmp = load_reg(s, rs);
9ee6e8bb
PB
7244 switch (op) {
7245 case 0:
8f8e3aa4 7246 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7247 break;
7248 case 1:
8f8e3aa4 7249 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb 7250 break;
2c0262af 7251 case 3:
8f8e3aa4
PB
7252 gen_st32(tmp, addr, IS_USER(s));
7253 tcg_gen_addi_i32(addr, addr, 4);
7254 tmp = load_reg(s, rd);
7255 gen_st32(tmp, addr, IS_USER(s));
2c0262af 7256 break;
9ee6e8bb
PB
7257 default:
7258 goto illegal_op;
2c0262af 7259 }
8f8e3aa4 7260 gen_set_label(label);
3174f8e9
FN
7261 tcg_gen_mov_i32(cpu_R[rm], tmp2);
7262 tcg_temp_free(tmp2);
9ee6e8bb 7263 }
3174f8e9 7264 tcg_temp_free(addr);
9ee6e8bb
PB
7265 }
7266 } else {
7267 /* Load/store multiple, RFE, SRS. */
7268 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7269 /* Not available in user mode. */
b0109805 7270 if (IS_USER(s))
9ee6e8bb
PB
7271 goto illegal_op;
7272 if (insn & (1 << 20)) {
7273 /* rfe */
b0109805
PB
7274 addr = load_reg(s, rn);
7275 if ((insn & (1 << 24)) == 0)
7276 tcg_gen_addi_i32(addr, addr, -8);
7277 /* Load PC into tmp and CPSR into tmp2. */
7278 tmp = gen_ld32(addr, 0);
7279 tcg_gen_addi_i32(addr, addr, 4);
7280 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
7281 if (insn & (1 << 21)) {
7282 /* Base writeback. */
b0109805
PB
7283 if (insn & (1 << 24)) {
7284 tcg_gen_addi_i32(addr, addr, 4);
7285 } else {
7286 tcg_gen_addi_i32(addr, addr, -4);
7287 }
7288 store_reg(s, rn, addr);
7289 } else {
7290 dead_tmp(addr);
9ee6e8bb 7291 }
b0109805 7292 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
7293 } else {
7294 /* srs */
7295 op = (insn & 0x1f);
7296 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7297 addr = load_reg(s, 13);
9ee6e8bb 7298 } else {
b0109805 7299 addr = new_tmp();
b75263d6
JR
7300 tmp = tcg_const_i32(op);
7301 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7302 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7303 }
7304 if ((insn & (1 << 24)) == 0) {
b0109805 7305 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 7306 }
b0109805
PB
7307 tmp = load_reg(s, 14);
7308 gen_st32(tmp, addr, 0);
7309 tcg_gen_addi_i32(addr, addr, 4);
7310 tmp = new_tmp();
7311 gen_helper_cpsr_read(tmp);
7312 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
7313 if (insn & (1 << 21)) {
7314 if ((insn & (1 << 24)) == 0) {
b0109805 7315 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 7316 } else {
b0109805 7317 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7318 }
7319 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7320 store_reg(s, 13, addr);
9ee6e8bb 7321 } else {
b75263d6
JR
7322 tmp = tcg_const_i32(op);
7323 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7324 tcg_temp_free_i32(tmp);
9ee6e8bb 7325 }
b0109805
PB
7326 } else {
7327 dead_tmp(addr);
9ee6e8bb
PB
7328 }
7329 }
7330 } else {
7331 int i;
7332 /* Load/store multiple. */
b0109805 7333 addr = load_reg(s, rn);
9ee6e8bb
PB
7334 offset = 0;
7335 for (i = 0; i < 16; i++) {
7336 if (insn & (1 << i))
7337 offset += 4;
7338 }
7339 if (insn & (1 << 24)) {
b0109805 7340 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7341 }
7342
7343 for (i = 0; i < 16; i++) {
7344 if ((insn & (1 << i)) == 0)
7345 continue;
7346 if (insn & (1 << 20)) {
7347 /* Load. */
b0109805 7348 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 7349 if (i == 15) {
b0109805 7350 gen_bx(s, tmp);
9ee6e8bb 7351 } else {
b0109805 7352 store_reg(s, i, tmp);
9ee6e8bb
PB
7353 }
7354 } else {
7355 /* Store. */
b0109805
PB
7356 tmp = load_reg(s, i);
7357 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7358 }
b0109805 7359 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7360 }
7361 if (insn & (1 << 21)) {
7362 /* Base register writeback. */
7363 if (insn & (1 << 24)) {
b0109805 7364 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7365 }
7366 /* Fault if writeback register is in register list. */
7367 if (insn & (1 << rn))
7368 goto illegal_op;
b0109805
PB
7369 store_reg(s, rn, addr);
7370 } else {
7371 dead_tmp(addr);
9ee6e8bb
PB
7372 }
7373 }
7374 }
7375 break;
7376 case 5: /* Data processing register constant shift. */
3174f8e9
FN
7377 if (rn == 15) {
7378 tmp = new_tmp();
7379 tcg_gen_movi_i32(tmp, 0);
7380 } else {
7381 tmp = load_reg(s, rn);
7382 }
7383 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7384 op = (insn >> 21) & 0xf;
7385 shiftop = (insn >> 4) & 3;
7386 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7387 conds = (insn & (1 << 20)) != 0;
7388 logic_cc = (conds && thumb2_logic_op(op));
3174f8e9
FN
7389 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7390 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9ee6e8bb 7391 goto illegal_op;
3174f8e9
FN
7392 dead_tmp(tmp2);
7393 if (rd != 15) {
7394 store_reg(s, rd, tmp);
7395 } else {
7396 dead_tmp(tmp);
7397 }
9ee6e8bb
PB
7398 break;
7399 case 13: /* Misc data processing. */
7400 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7401 if (op < 4 && (insn & 0xf000) != 0xf000)
7402 goto illegal_op;
7403 switch (op) {
7404 case 0: /* Register controlled shift. */
8984bd2e
PB
7405 tmp = load_reg(s, rn);
7406 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7407 if ((insn & 0x70) != 0)
7408 goto illegal_op;
7409 op = (insn >> 21) & 3;
8984bd2e
PB
7410 logic_cc = (insn & (1 << 20)) != 0;
7411 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7412 if (logic_cc)
7413 gen_logic_CC(tmp);
21aeb343 7414 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7415 break;
7416 case 1: /* Sign/zero extend. */
5e3f878a 7417 tmp = load_reg(s, rm);
9ee6e8bb
PB
7418 shift = (insn >> 4) & 3;
7419 /* ??? In many cases it's not neccessary to do a
7420 rotate, a shift is sufficient. */
7421 if (shift != 0)
5e3f878a 7422 tcg_gen_rori_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7423 op = (insn >> 20) & 7;
7424 switch (op) {
5e3f878a
PB
7425 case 0: gen_sxth(tmp); break;
7426 case 1: gen_uxth(tmp); break;
7427 case 2: gen_sxtb16(tmp); break;
7428 case 3: gen_uxtb16(tmp); break;
7429 case 4: gen_sxtb(tmp); break;
7430 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
7431 default: goto illegal_op;
7432 }
7433 if (rn != 15) {
5e3f878a 7434 tmp2 = load_reg(s, rn);
9ee6e8bb 7435 if ((op >> 1) == 1) {
5e3f878a 7436 gen_add16(tmp, tmp2);
9ee6e8bb 7437 } else {
5e3f878a
PB
7438 tcg_gen_add_i32(tmp, tmp, tmp2);
7439 dead_tmp(tmp2);
9ee6e8bb
PB
7440 }
7441 }
5e3f878a 7442 store_reg(s, rd, tmp);
9ee6e8bb
PB
7443 break;
7444 case 2: /* SIMD add/subtract. */
7445 op = (insn >> 20) & 7;
7446 shift = (insn >> 4) & 7;
7447 if ((op & 3) == 3 || (shift & 3) == 3)
7448 goto illegal_op;
6ddbc6e4
PB
7449 tmp = load_reg(s, rn);
7450 tmp2 = load_reg(s, rm);
7451 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7452 dead_tmp(tmp2);
7453 store_reg(s, rd, tmp);
9ee6e8bb
PB
7454 break;
7455 case 3: /* Other data processing. */
7456 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7457 if (op < 4) {
7458 /* Saturating add/subtract. */
d9ba4830
PB
7459 tmp = load_reg(s, rn);
7460 tmp2 = load_reg(s, rm);
9ee6e8bb 7461 if (op & 2)
d9ba4830 7462 gen_helper_double_saturate(tmp, tmp);
9ee6e8bb 7463 if (op & 1)
d9ba4830 7464 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 7465 else
d9ba4830
PB
7466 gen_helper_add_saturate(tmp, tmp, tmp2);
7467 dead_tmp(tmp2);
9ee6e8bb 7468 } else {
d9ba4830 7469 tmp = load_reg(s, rn);
9ee6e8bb
PB
7470 switch (op) {
7471 case 0x0a: /* rbit */
d9ba4830 7472 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7473 break;
7474 case 0x08: /* rev */
66896cb8 7475 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
7476 break;
7477 case 0x09: /* rev16 */
d9ba4830 7478 gen_rev16(tmp);
9ee6e8bb
PB
7479 break;
7480 case 0x0b: /* revsh */
d9ba4830 7481 gen_revsh(tmp);
9ee6e8bb
PB
7482 break;
7483 case 0x10: /* sel */
d9ba4830 7484 tmp2 = load_reg(s, rm);
6ddbc6e4
PB
7485 tmp3 = new_tmp();
7486 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
d9ba4830 7487 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6ddbc6e4 7488 dead_tmp(tmp3);
d9ba4830 7489 dead_tmp(tmp2);
9ee6e8bb
PB
7490 break;
7491 case 0x18: /* clz */
d9ba4830 7492 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
7493 break;
7494 default:
7495 goto illegal_op;
7496 }
7497 }
d9ba4830 7498 store_reg(s, rd, tmp);
9ee6e8bb
PB
7499 break;
7500 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7501 op = (insn >> 4) & 0xf;
d9ba4830
PB
7502 tmp = load_reg(s, rn);
7503 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7504 switch ((insn >> 20) & 7) {
7505 case 0: /* 32 x 32 -> 32 */
d9ba4830
PB
7506 tcg_gen_mul_i32(tmp, tmp, tmp2);
7507 dead_tmp(tmp2);
9ee6e8bb 7508 if (rs != 15) {
d9ba4830 7509 tmp2 = load_reg(s, rs);
9ee6e8bb 7510 if (op)
d9ba4830 7511 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 7512 else
d9ba4830
PB
7513 tcg_gen_add_i32(tmp, tmp, tmp2);
7514 dead_tmp(tmp2);
9ee6e8bb 7515 }
9ee6e8bb
PB
7516 break;
7517 case 1: /* 16 x 16 -> 32 */
d9ba4830
PB
7518 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7519 dead_tmp(tmp2);
9ee6e8bb 7520 if (rs != 15) {
d9ba4830
PB
7521 tmp2 = load_reg(s, rs);
7522 gen_helper_add_setq(tmp, tmp, tmp2);
7523 dead_tmp(tmp2);
9ee6e8bb 7524 }
9ee6e8bb
PB
7525 break;
7526 case 2: /* Dual multiply add. */
7527 case 4: /* Dual multiply subtract. */
7528 if (op)
d9ba4830
PB
7529 gen_swap_half(tmp2);
7530 gen_smul_dual(tmp, tmp2);
9ee6e8bb
PB
7531 /* This addition cannot overflow. */
7532 if (insn & (1 << 22)) {
d9ba4830 7533 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 7534 } else {
d9ba4830 7535 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 7536 }
d9ba4830 7537 dead_tmp(tmp2);
9ee6e8bb
PB
7538 if (rs != 15)
7539 {
d9ba4830
PB
7540 tmp2 = load_reg(s, rs);
7541 gen_helper_add_setq(tmp, tmp, tmp2);
7542 dead_tmp(tmp2);
9ee6e8bb 7543 }
9ee6e8bb
PB
7544 break;
7545 case 3: /* 32 * 16 -> 32msb */
7546 if (op)
d9ba4830 7547 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7548 else
d9ba4830 7549 gen_sxth(tmp2);
a7812ae4
PB
7550 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7551 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 7552 tmp = new_tmp();
a7812ae4 7553 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 7554 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7555 if (rs != 15)
7556 {
d9ba4830
PB
7557 tmp2 = load_reg(s, rs);
7558 gen_helper_add_setq(tmp, tmp, tmp2);
7559 dead_tmp(tmp2);
9ee6e8bb 7560 }
9ee6e8bb
PB
7561 break;
7562 case 5: case 6: /* 32 * 32 -> 32msb */
d9ba4830
PB
7563 gen_imull(tmp, tmp2);
7564 if (insn & (1 << 5)) {
7565 gen_roundqd(tmp, tmp2);
7566 dead_tmp(tmp2);
7567 } else {
7568 dead_tmp(tmp);
7569 tmp = tmp2;
7570 }
9ee6e8bb 7571 if (rs != 15) {
d9ba4830 7572 tmp2 = load_reg(s, rs);
9ee6e8bb 7573 if (insn & (1 << 21)) {
d9ba4830 7574 tcg_gen_add_i32(tmp, tmp, tmp2);
99c475ab 7575 } else {
d9ba4830 7576 tcg_gen_sub_i32(tmp, tmp2, tmp);
99c475ab 7577 }
d9ba4830 7578 dead_tmp(tmp2);
2c0262af 7579 }
9ee6e8bb
PB
7580 break;
7581 case 7: /* Unsigned sum of absolute differences. */
d9ba4830
PB
7582 gen_helper_usad8(tmp, tmp, tmp2);
7583 dead_tmp(tmp2);
9ee6e8bb 7584 if (rs != 15) {
d9ba4830
PB
7585 tmp2 = load_reg(s, rs);
7586 tcg_gen_add_i32(tmp, tmp, tmp2);
7587 dead_tmp(tmp2);
5fd46862 7588 }
9ee6e8bb 7589 break;
2c0262af 7590 }
d9ba4830 7591 store_reg(s, rd, tmp);
2c0262af 7592 break;
9ee6e8bb
PB
7593 case 6: case 7: /* 64-bit multiply, Divide. */
7594 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
7595 tmp = load_reg(s, rn);
7596 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7597 if ((op & 0x50) == 0x10) {
7598 /* sdiv, udiv */
7599 if (!arm_feature(env, ARM_FEATURE_DIV))
7600 goto illegal_op;
7601 if (op & 0x20)
5e3f878a 7602 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 7603 else
5e3f878a
PB
7604 gen_helper_sdiv(tmp, tmp, tmp2);
7605 dead_tmp(tmp2);
7606 store_reg(s, rd, tmp);
9ee6e8bb
PB
7607 } else if ((op & 0xe) == 0xc) {
7608 /* Dual multiply accumulate long. */
7609 if (op & 1)
5e3f878a
PB
7610 gen_swap_half(tmp2);
7611 gen_smul_dual(tmp, tmp2);
9ee6e8bb 7612 if (op & 0x10) {
5e3f878a 7613 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 7614 } else {
5e3f878a 7615 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 7616 }
5e3f878a 7617 dead_tmp(tmp2);
a7812ae4
PB
7618 /* BUGFIX */
7619 tmp64 = tcg_temp_new_i64();
7620 tcg_gen_ext_i32_i64(tmp64, tmp);
7621 dead_tmp(tmp);
7622 gen_addq(s, tmp64, rs, rd);
7623 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 7624 tcg_temp_free_i64(tmp64);
2c0262af 7625 } else {
9ee6e8bb
PB
7626 if (op & 0x20) {
7627 /* Unsigned 64-bit multiply */
a7812ae4 7628 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 7629 } else {
9ee6e8bb
PB
7630 if (op & 8) {
7631 /* smlalxy */
5e3f878a
PB
7632 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7633 dead_tmp(tmp2);
a7812ae4
PB
7634 tmp64 = tcg_temp_new_i64();
7635 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 7636 dead_tmp(tmp);
9ee6e8bb
PB
7637 } else {
7638 /* Signed 64-bit multiply */
a7812ae4 7639 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 7640 }
b5ff1b31 7641 }
9ee6e8bb
PB
7642 if (op & 4) {
7643 /* umaal */
a7812ae4
PB
7644 gen_addq_lo(s, tmp64, rs);
7645 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
7646 } else if (op & 0x40) {
7647 /* 64-bit accumulate. */
a7812ae4 7648 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 7649 }
a7812ae4 7650 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 7651 tcg_temp_free_i64(tmp64);
5fd46862 7652 }
2c0262af 7653 break;
9ee6e8bb
PB
7654 }
7655 break;
7656 case 6: case 7: case 14: case 15:
7657 /* Coprocessor. */
7658 if (((insn >> 24) & 3) == 3) {
7659 /* Translate into the equivalent ARM encoding. */
7660 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
7661 if (disas_neon_data_insn(env, s, insn))
7662 goto illegal_op;
7663 } else {
7664 if (insn & (1 << 28))
7665 goto illegal_op;
7666 if (disas_coproc_insn (env, s, insn))
7667 goto illegal_op;
7668 }
7669 break;
7670 case 8: case 9: case 10: case 11:
7671 if (insn & (1 << 15)) {
7672 /* Branches, misc control. */
7673 if (insn & 0x5000) {
7674 /* Unconditional branch. */
7675 /* signextend(hw1[10:0]) -> offset[:12]. */
7676 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7677 /* hw1[10:0] -> offset[11:1]. */
7678 offset |= (insn & 0x7ff) << 1;
7679 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7680 offset[24:22] already have the same value because of the
7681 sign extension above. */
7682 offset ^= ((~insn) & (1 << 13)) << 10;
7683 offset ^= ((~insn) & (1 << 11)) << 11;
7684
9ee6e8bb
PB
7685 if (insn & (1 << 14)) {
7686 /* Branch and link. */
3174f8e9 7687 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 7688 }
3b46e624 7689
b0109805 7690 offset += s->pc;
9ee6e8bb
PB
7691 if (insn & (1 << 12)) {
7692 /* b/bl */
b0109805 7693 gen_jmp(s, offset);
9ee6e8bb
PB
7694 } else {
7695 /* blx */
b0109805
PB
7696 offset &= ~(uint32_t)2;
7697 gen_bx_im(s, offset);
2c0262af 7698 }
9ee6e8bb
PB
7699 } else if (((insn >> 23) & 7) == 7) {
7700 /* Misc control */
7701 if (insn & (1 << 13))
7702 goto illegal_op;
7703
7704 if (insn & (1 << 26)) {
7705 /* Secure monitor call (v6Z) */
7706 goto illegal_op; /* not implemented. */
2c0262af 7707 } else {
9ee6e8bb
PB
7708 op = (insn >> 20) & 7;
7709 switch (op) {
7710 case 0: /* msr cpsr. */
7711 if (IS_M(env)) {
8984bd2e
PB
7712 tmp = load_reg(s, rn);
7713 addr = tcg_const_i32(insn & 0xff);
7714 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6
JR
7715 tcg_temp_free_i32(addr);
7716 dead_tmp(tmp);
9ee6e8bb
PB
7717 gen_lookup_tb(s);
7718 break;
7719 }
7720 /* fall through */
7721 case 1: /* msr spsr. */
7722 if (IS_M(env))
7723 goto illegal_op;
2fbac54b
FN
7724 tmp = load_reg(s, rn);
7725 if (gen_set_psr(s,
9ee6e8bb 7726 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 7727 op == 1, tmp))
9ee6e8bb
PB
7728 goto illegal_op;
7729 break;
7730 case 2: /* cps, nop-hint. */
7731 if (((insn >> 8) & 7) == 0) {
7732 gen_nop_hint(s, insn & 0xff);
7733 }
7734 /* Implemented as NOP in user mode. */
7735 if (IS_USER(s))
7736 break;
7737 offset = 0;
7738 imm = 0;
7739 if (insn & (1 << 10)) {
7740 if (insn & (1 << 7))
7741 offset |= CPSR_A;
7742 if (insn & (1 << 6))
7743 offset |= CPSR_I;
7744 if (insn & (1 << 5))
7745 offset |= CPSR_F;
7746 if (insn & (1 << 9))
7747 imm = CPSR_A | CPSR_I | CPSR_F;
7748 }
7749 if (insn & (1 << 8)) {
7750 offset |= 0x1f;
7751 imm |= (insn & 0x1f);
7752 }
7753 if (offset) {
2fbac54b 7754 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
7755 }
7756 break;
7757 case 3: /* Special control operations. */
7758 op = (insn >> 4) & 0xf;
7759 switch (op) {
7760 case 2: /* clrex */
8f8e3aa4 7761 gen_helper_clrex(cpu_env);
9ee6e8bb
PB
7762 break;
7763 case 4: /* dsb */
7764 case 5: /* dmb */
7765 case 6: /* isb */
7766 /* These execute as NOPs. */
7767 ARCH(7);
7768 break;
7769 default:
7770 goto illegal_op;
7771 }
7772 break;
7773 case 4: /* bxj */
7774 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
7775 tmp = load_reg(s, rn);
7776 gen_bx(s, tmp);
9ee6e8bb
PB
7777 break;
7778 case 5: /* Exception return. */
7779 /* Unpredictable in user mode. */
7780 goto illegal_op;
7781 case 6: /* mrs cpsr. */
8984bd2e 7782 tmp = new_tmp();
9ee6e8bb 7783 if (IS_M(env)) {
8984bd2e
PB
7784 addr = tcg_const_i32(insn & 0xff);
7785 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 7786 tcg_temp_free_i32(addr);
9ee6e8bb 7787 } else {
8984bd2e 7788 gen_helper_cpsr_read(tmp);
9ee6e8bb 7789 }
8984bd2e 7790 store_reg(s, rd, tmp);
9ee6e8bb
PB
7791 break;
7792 case 7: /* mrs spsr. */
7793 /* Not accessible in user mode. */
7794 if (IS_USER(s) || IS_M(env))
7795 goto illegal_op;
d9ba4830
PB
7796 tmp = load_cpu_field(spsr);
7797 store_reg(s, rd, tmp);
9ee6e8bb 7798 break;
2c0262af
FB
7799 }
7800 }
9ee6e8bb
PB
7801 } else {
7802 /* Conditional branch. */
7803 op = (insn >> 22) & 0xf;
7804 /* Generate a conditional jump to next instruction. */
7805 s->condlabel = gen_new_label();
d9ba4830 7806 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
7807 s->condjmp = 1;
7808
7809 /* offset[11:1] = insn[10:0] */
7810 offset = (insn & 0x7ff) << 1;
7811 /* offset[17:12] = insn[21:16]. */
7812 offset |= (insn & 0x003f0000) >> 4;
7813 /* offset[31:20] = insn[26]. */
7814 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
7815 /* offset[18] = insn[13]. */
7816 offset |= (insn & (1 << 13)) << 5;
7817 /* offset[19] = insn[11]. */
7818 offset |= (insn & (1 << 11)) << 8;
7819
7820 /* jump to the offset */
b0109805 7821 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
7822 }
7823 } else {
7824 /* Data processing immediate. */
7825 if (insn & (1 << 25)) {
7826 if (insn & (1 << 24)) {
7827 if (insn & (1 << 20))
7828 goto illegal_op;
7829 /* Bitfield/Saturate. */
7830 op = (insn >> 21) & 7;
7831 imm = insn & 0x1f;
7832 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4
PB
7833 if (rn == 15) {
7834 tmp = new_tmp();
7835 tcg_gen_movi_i32(tmp, 0);
7836 } else {
7837 tmp = load_reg(s, rn);
7838 }
9ee6e8bb
PB
7839 switch (op) {
7840 case 2: /* Signed bitfield extract. */
7841 imm++;
7842 if (shift + imm > 32)
7843 goto illegal_op;
7844 if (imm < 32)
6ddbc6e4 7845 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
7846 break;
7847 case 6: /* Unsigned bitfield extract. */
7848 imm++;
7849 if (shift + imm > 32)
7850 goto illegal_op;
7851 if (imm < 32)
6ddbc6e4 7852 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
7853 break;
7854 case 3: /* Bitfield insert/clear. */
7855 if (imm < shift)
7856 goto illegal_op;
7857 imm = imm + 1 - shift;
7858 if (imm != 32) {
6ddbc6e4 7859 tmp2 = load_reg(s, rd);
8f8e3aa4 7860 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
6ddbc6e4 7861 dead_tmp(tmp2);
9ee6e8bb
PB
7862 }
7863 break;
7864 case 7:
7865 goto illegal_op;
7866 default: /* Saturate. */
9ee6e8bb
PB
7867 if (shift) {
7868 if (op & 1)
6ddbc6e4 7869 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7870 else
6ddbc6e4 7871 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 7872 }
6ddbc6e4 7873 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
7874 if (op & 4) {
7875 /* Unsigned. */
9ee6e8bb 7876 if ((op & 1) && shift == 0)
6ddbc6e4 7877 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 7878 else
6ddbc6e4 7879 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 7880 } else {
9ee6e8bb 7881 /* Signed. */
9ee6e8bb 7882 if ((op & 1) && shift == 0)
6ddbc6e4 7883 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 7884 else
6ddbc6e4 7885 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 7886 }
b75263d6 7887 tcg_temp_free_i32(tmp2);
9ee6e8bb 7888 break;
2c0262af 7889 }
6ddbc6e4 7890 store_reg(s, rd, tmp);
9ee6e8bb
PB
7891 } else {
7892 imm = ((insn & 0x04000000) >> 15)
7893 | ((insn & 0x7000) >> 4) | (insn & 0xff);
7894 if (insn & (1 << 22)) {
7895 /* 16-bit immediate. */
7896 imm |= (insn >> 4) & 0xf000;
7897 if (insn & (1 << 23)) {
7898 /* movt */
5e3f878a 7899 tmp = load_reg(s, rd);
86831435 7900 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 7901 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 7902 } else {
9ee6e8bb 7903 /* movw */
5e3f878a
PB
7904 tmp = new_tmp();
7905 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
7906 }
7907 } else {
9ee6e8bb
PB
7908 /* Add/sub 12-bit immediate. */
7909 if (rn == 15) {
b0109805 7910 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 7911 if (insn & (1 << 23))
b0109805 7912 offset -= imm;
9ee6e8bb 7913 else
b0109805 7914 offset += imm;
5e3f878a
PB
7915 tmp = new_tmp();
7916 tcg_gen_movi_i32(tmp, offset);
2c0262af 7917 } else {
5e3f878a 7918 tmp = load_reg(s, rn);
9ee6e8bb 7919 if (insn & (1 << 23))
5e3f878a 7920 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 7921 else
5e3f878a 7922 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 7923 }
9ee6e8bb 7924 }
5e3f878a 7925 store_reg(s, rd, tmp);
191abaa2 7926 }
9ee6e8bb
PB
7927 } else {
7928 int shifter_out = 0;
7929 /* modified 12-bit immediate. */
7930 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
7931 imm = (insn & 0xff);
7932 switch (shift) {
7933 case 0: /* XY */
7934 /* Nothing to do. */
7935 break;
7936 case 1: /* 00XY00XY */
7937 imm |= imm << 16;
7938 break;
7939 case 2: /* XY00XY00 */
7940 imm |= imm << 16;
7941 imm <<= 8;
7942 break;
7943 case 3: /* XYXYXYXY */
7944 imm |= imm << 16;
7945 imm |= imm << 8;
7946 break;
7947 default: /* Rotated constant. */
7948 shift = (shift << 1) | (imm >> 7);
7949 imm |= 0x80;
7950 imm = imm << (32 - shift);
7951 shifter_out = 1;
7952 break;
b5ff1b31 7953 }
3174f8e9
FN
7954 tmp2 = new_tmp();
7955 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 7956 rn = (insn >> 16) & 0xf;
3174f8e9
FN
7957 if (rn == 15) {
7958 tmp = new_tmp();
7959 tcg_gen_movi_i32(tmp, 0);
7960 } else {
7961 tmp = load_reg(s, rn);
7962 }
9ee6e8bb
PB
7963 op = (insn >> 21) & 0xf;
7964 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 7965 shifter_out, tmp, tmp2))
9ee6e8bb 7966 goto illegal_op;
3174f8e9 7967 dead_tmp(tmp2);
9ee6e8bb
PB
7968 rd = (insn >> 8) & 0xf;
7969 if (rd != 15) {
3174f8e9
FN
7970 store_reg(s, rd, tmp);
7971 } else {
7972 dead_tmp(tmp);
2c0262af 7973 }
2c0262af 7974 }
9ee6e8bb
PB
7975 }
7976 break;
7977 case 12: /* Load/store single data item. */
7978 {
7979 int postinc = 0;
7980 int writeback = 0;
b0109805 7981 int user;
9ee6e8bb
PB
7982 if ((insn & 0x01100000) == 0x01000000) {
7983 if (disas_neon_ls_insn(env, s, insn))
c1713132 7984 goto illegal_op;
9ee6e8bb
PB
7985 break;
7986 }
b0109805 7987 user = IS_USER(s);
9ee6e8bb 7988 if (rn == 15) {
b0109805 7989 addr = new_tmp();
9ee6e8bb
PB
7990 /* PC relative. */
7991 /* s->pc has already been incremented by 4. */
7992 imm = s->pc & 0xfffffffc;
7993 if (insn & (1 << 23))
7994 imm += insn & 0xfff;
7995 else
7996 imm -= insn & 0xfff;
b0109805 7997 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 7998 } else {
b0109805 7999 addr = load_reg(s, rn);
9ee6e8bb
PB
8000 if (insn & (1 << 23)) {
8001 /* Positive offset. */
8002 imm = insn & 0xfff;
b0109805 8003 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8004 } else {
8005 op = (insn >> 8) & 7;
8006 imm = insn & 0xff;
8007 switch (op) {
8008 case 0: case 8: /* Shifted Register. */
8009 shift = (insn >> 4) & 0xf;
8010 if (shift > 3)
18c9b560 8011 goto illegal_op;
b26eefb6 8012 tmp = load_reg(s, rm);
9ee6e8bb 8013 if (shift)
b26eefb6 8014 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 8015 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 8016 dead_tmp(tmp);
9ee6e8bb
PB
8017 break;
8018 case 4: /* Negative offset. */
b0109805 8019 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb
PB
8020 break;
8021 case 6: /* User privilege. */
b0109805
PB
8022 tcg_gen_addi_i32(addr, addr, imm);
8023 user = 1;
9ee6e8bb
PB
8024 break;
8025 case 1: /* Post-decrement. */
8026 imm = -imm;
8027 /* Fall through. */
8028 case 3: /* Post-increment. */
9ee6e8bb
PB
8029 postinc = 1;
8030 writeback = 1;
8031 break;
8032 case 5: /* Pre-decrement. */
8033 imm = -imm;
8034 /* Fall through. */
8035 case 7: /* Pre-increment. */
b0109805 8036 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8037 writeback = 1;
8038 break;
8039 default:
b7bcbe95 8040 goto illegal_op;
9ee6e8bb
PB
8041 }
8042 }
8043 }
8044 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8045 if (insn & (1 << 20)) {
8046 /* Load. */
8047 if (rs == 15 && op != 2) {
8048 if (op & 2)
b5ff1b31 8049 goto illegal_op;
9ee6e8bb
PB
8050 /* Memory hint. Implemented as NOP. */
8051 } else {
8052 switch (op) {
b0109805
PB
8053 case 0: tmp = gen_ld8u(addr, user); break;
8054 case 4: tmp = gen_ld8s(addr, user); break;
8055 case 1: tmp = gen_ld16u(addr, user); break;
8056 case 5: tmp = gen_ld16s(addr, user); break;
8057 case 2: tmp = gen_ld32(addr, user); break;
9ee6e8bb
PB
8058 default: goto illegal_op;
8059 }
8060 if (rs == 15) {
b0109805 8061 gen_bx(s, tmp);
9ee6e8bb 8062 } else {
b0109805 8063 store_reg(s, rs, tmp);
9ee6e8bb
PB
8064 }
8065 }
8066 } else {
8067 /* Store. */
8068 if (rs == 15)
b7bcbe95 8069 goto illegal_op;
b0109805 8070 tmp = load_reg(s, rs);
9ee6e8bb 8071 switch (op) {
b0109805
PB
8072 case 0: gen_st8(tmp, addr, user); break;
8073 case 1: gen_st16(tmp, addr, user); break;
8074 case 2: gen_st32(tmp, addr, user); break;
9ee6e8bb 8075 default: goto illegal_op;
b7bcbe95 8076 }
2c0262af 8077 }
9ee6e8bb 8078 if (postinc)
b0109805
PB
8079 tcg_gen_addi_i32(addr, addr, imm);
8080 if (writeback) {
8081 store_reg(s, rn, addr);
8082 } else {
8083 dead_tmp(addr);
8084 }
9ee6e8bb
PB
8085 }
8086 break;
8087 default:
8088 goto illegal_op;
2c0262af 8089 }
9ee6e8bb
PB
8090 return 0;
8091illegal_op:
8092 return 1;
2c0262af
FB
8093}
8094
9ee6e8bb 8095static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
8096{
8097 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8098 int32_t offset;
8099 int i;
b26eefb6 8100 TCGv tmp;
d9ba4830 8101 TCGv tmp2;
b0109805 8102 TCGv addr;
99c475ab 8103
9ee6e8bb
PB
8104 if (s->condexec_mask) {
8105 cond = s->condexec_cond;
8106 s->condlabel = gen_new_label();
d9ba4830 8107 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
8108 s->condjmp = 1;
8109 }
8110
b5ff1b31 8111 insn = lduw_code(s->pc);
99c475ab 8112 s->pc += 2;
b5ff1b31 8113
99c475ab
FB
8114 switch (insn >> 12) {
8115 case 0: case 1:
396e467c 8116
99c475ab
FB
8117 rd = insn & 7;
8118 op = (insn >> 11) & 3;
8119 if (op == 3) {
8120 /* add/subtract */
8121 rn = (insn >> 3) & 7;
396e467c 8122 tmp = load_reg(s, rn);
99c475ab
FB
8123 if (insn & (1 << 10)) {
8124 /* immediate */
396e467c
FN
8125 tmp2 = new_tmp();
8126 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
8127 } else {
8128 /* reg */
8129 rm = (insn >> 6) & 7;
396e467c 8130 tmp2 = load_reg(s, rm);
99c475ab 8131 }
9ee6e8bb
PB
8132 if (insn & (1 << 9)) {
8133 if (s->condexec_mask)
396e467c 8134 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8135 else
396e467c 8136 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb
PB
8137 } else {
8138 if (s->condexec_mask)
396e467c 8139 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 8140 else
396e467c 8141 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 8142 }
396e467c
FN
8143 dead_tmp(tmp2);
8144 store_reg(s, rd, tmp);
99c475ab
FB
8145 } else {
8146 /* shift immediate */
8147 rm = (insn >> 3) & 7;
8148 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
8149 tmp = load_reg(s, rm);
8150 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8151 if (!s->condexec_mask)
8152 gen_logic_CC(tmp);
8153 store_reg(s, rd, tmp);
99c475ab
FB
8154 }
8155 break;
8156 case 2: case 3:
8157 /* arithmetic large immediate */
8158 op = (insn >> 11) & 3;
8159 rd = (insn >> 8) & 0x7;
396e467c
FN
8160 if (op == 0) { /* mov */
8161 tmp = new_tmp();
8162 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 8163 if (!s->condexec_mask)
396e467c
FN
8164 gen_logic_CC(tmp);
8165 store_reg(s, rd, tmp);
8166 } else {
8167 tmp = load_reg(s, rd);
8168 tmp2 = new_tmp();
8169 tcg_gen_movi_i32(tmp2, insn & 0xff);
8170 switch (op) {
8171 case 1: /* cmp */
8172 gen_helper_sub_cc(tmp, tmp, tmp2);
8173 dead_tmp(tmp);
8174 dead_tmp(tmp2);
8175 break;
8176 case 2: /* add */
8177 if (s->condexec_mask)
8178 tcg_gen_add_i32(tmp, tmp, tmp2);
8179 else
8180 gen_helper_add_cc(tmp, tmp, tmp2);
8181 dead_tmp(tmp2);
8182 store_reg(s, rd, tmp);
8183 break;
8184 case 3: /* sub */
8185 if (s->condexec_mask)
8186 tcg_gen_sub_i32(tmp, tmp, tmp2);
8187 else
8188 gen_helper_sub_cc(tmp, tmp, tmp2);
8189 dead_tmp(tmp2);
8190 store_reg(s, rd, tmp);
8191 break;
8192 }
99c475ab 8193 }
99c475ab
FB
8194 break;
8195 case 4:
8196 if (insn & (1 << 11)) {
8197 rd = (insn >> 8) & 7;
5899f386
FB
8198 /* load pc-relative. Bit 1 of PC is ignored. */
8199 val = s->pc + 2 + ((insn & 0xff) * 4);
8200 val &= ~(uint32_t)2;
b0109805
PB
8201 addr = new_tmp();
8202 tcg_gen_movi_i32(addr, val);
8203 tmp = gen_ld32(addr, IS_USER(s));
8204 dead_tmp(addr);
8205 store_reg(s, rd, tmp);
99c475ab
FB
8206 break;
8207 }
8208 if (insn & (1 << 10)) {
8209 /* data processing extended or blx */
8210 rd = (insn & 7) | ((insn >> 4) & 8);
8211 rm = (insn >> 3) & 0xf;
8212 op = (insn >> 8) & 3;
8213 switch (op) {
8214 case 0: /* add */
396e467c
FN
8215 tmp = load_reg(s, rd);
8216 tmp2 = load_reg(s, rm);
8217 tcg_gen_add_i32(tmp, tmp, tmp2);
8218 dead_tmp(tmp2);
8219 store_reg(s, rd, tmp);
99c475ab
FB
8220 break;
8221 case 1: /* cmp */
396e467c
FN
8222 tmp = load_reg(s, rd);
8223 tmp2 = load_reg(s, rm);
8224 gen_helper_sub_cc(tmp, tmp, tmp2);
8225 dead_tmp(tmp2);
8226 dead_tmp(tmp);
99c475ab
FB
8227 break;
8228 case 2: /* mov/cpy */
396e467c
FN
8229 tmp = load_reg(s, rm);
8230 store_reg(s, rd, tmp);
99c475ab
FB
8231 break;
8232 case 3:/* branch [and link] exchange thumb register */
b0109805 8233 tmp = load_reg(s, rm);
99c475ab
FB
8234 if (insn & (1 << 7)) {
8235 val = (uint32_t)s->pc | 1;
b0109805
PB
8236 tmp2 = new_tmp();
8237 tcg_gen_movi_i32(tmp2, val);
8238 store_reg(s, 14, tmp2);
99c475ab 8239 }
d9ba4830 8240 gen_bx(s, tmp);
99c475ab
FB
8241 break;
8242 }
8243 break;
8244 }
8245
8246 /* data processing register */
8247 rd = insn & 7;
8248 rm = (insn >> 3) & 7;
8249 op = (insn >> 6) & 0xf;
8250 if (op == 2 || op == 3 || op == 4 || op == 7) {
8251 /* the shift/rotate ops want the operands backwards */
8252 val = rm;
8253 rm = rd;
8254 rd = val;
8255 val = 1;
8256 } else {
8257 val = 0;
8258 }
8259
396e467c
FN
8260 if (op == 9) { /* neg */
8261 tmp = new_tmp();
8262 tcg_gen_movi_i32(tmp, 0);
8263 } else if (op != 0xf) { /* mvn doesn't read its first operand */
8264 tmp = load_reg(s, rd);
8265 } else {
8266 TCGV_UNUSED(tmp);
8267 }
99c475ab 8268
396e467c 8269 tmp2 = load_reg(s, rm);
5899f386 8270 switch (op) {
99c475ab 8271 case 0x0: /* and */
396e467c 8272 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 8273 if (!s->condexec_mask)
396e467c 8274 gen_logic_CC(tmp);
99c475ab
FB
8275 break;
8276 case 0x1: /* eor */
396e467c 8277 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 8278 if (!s->condexec_mask)
396e467c 8279 gen_logic_CC(tmp);
99c475ab
FB
8280 break;
8281 case 0x2: /* lsl */
9ee6e8bb 8282 if (s->condexec_mask) {
396e467c 8283 gen_helper_shl(tmp2, tmp2, tmp);
9ee6e8bb 8284 } else {
396e467c
FN
8285 gen_helper_shl_cc(tmp2, tmp2, tmp);
8286 gen_logic_CC(tmp2);
9ee6e8bb 8287 }
99c475ab
FB
8288 break;
8289 case 0x3: /* lsr */
9ee6e8bb 8290 if (s->condexec_mask) {
396e467c 8291 gen_helper_shr(tmp2, tmp2, tmp);
9ee6e8bb 8292 } else {
396e467c
FN
8293 gen_helper_shr_cc(tmp2, tmp2, tmp);
8294 gen_logic_CC(tmp2);
9ee6e8bb 8295 }
99c475ab
FB
8296 break;
8297 case 0x4: /* asr */
9ee6e8bb 8298 if (s->condexec_mask) {
396e467c 8299 gen_helper_sar(tmp2, tmp2, tmp);
9ee6e8bb 8300 } else {
396e467c
FN
8301 gen_helper_sar_cc(tmp2, tmp2, tmp);
8302 gen_logic_CC(tmp2);
9ee6e8bb 8303 }
99c475ab
FB
8304 break;
8305 case 0x5: /* adc */
9ee6e8bb 8306 if (s->condexec_mask)
396e467c 8307 gen_adc(tmp, tmp2);
9ee6e8bb 8308 else
396e467c 8309 gen_helper_adc_cc(tmp, tmp, tmp2);
99c475ab
FB
8310 break;
8311 case 0x6: /* sbc */
9ee6e8bb 8312 if (s->condexec_mask)
396e467c 8313 gen_sub_carry(tmp, tmp, tmp2);
9ee6e8bb 8314 else
396e467c 8315 gen_helper_sbc_cc(tmp, tmp, tmp2);
99c475ab
FB
8316 break;
8317 case 0x7: /* ror */
9ee6e8bb 8318 if (s->condexec_mask) {
396e467c 8319 gen_helper_ror(tmp2, tmp2, tmp);
9ee6e8bb 8320 } else {
396e467c
FN
8321 gen_helper_ror_cc(tmp2, tmp2, tmp);
8322 gen_logic_CC(tmp2);
9ee6e8bb 8323 }
99c475ab
FB
8324 break;
8325 case 0x8: /* tst */
396e467c
FN
8326 tcg_gen_and_i32(tmp, tmp, tmp2);
8327 gen_logic_CC(tmp);
99c475ab 8328 rd = 16;
5899f386 8329 break;
99c475ab 8330 case 0x9: /* neg */
9ee6e8bb 8331 if (s->condexec_mask)
396e467c 8332 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 8333 else
396e467c 8334 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
8335 break;
8336 case 0xa: /* cmp */
396e467c 8337 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
8338 rd = 16;
8339 break;
8340 case 0xb: /* cmn */
396e467c 8341 gen_helper_add_cc(tmp, tmp, tmp2);
99c475ab
FB
8342 rd = 16;
8343 break;
8344 case 0xc: /* orr */
396e467c 8345 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 8346 if (!s->condexec_mask)
396e467c 8347 gen_logic_CC(tmp);
99c475ab
FB
8348 break;
8349 case 0xd: /* mul */
7b2919a0 8350 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 8351 if (!s->condexec_mask)
396e467c 8352 gen_logic_CC(tmp);
99c475ab
FB
8353 break;
8354 case 0xe: /* bic */
396e467c 8355 tcg_gen_bic_i32(tmp, tmp, tmp2);
9ee6e8bb 8356 if (!s->condexec_mask)
396e467c 8357 gen_logic_CC(tmp);
99c475ab
FB
8358 break;
8359 case 0xf: /* mvn */
396e467c 8360 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 8361 if (!s->condexec_mask)
396e467c 8362 gen_logic_CC(tmp2);
99c475ab 8363 val = 1;
5899f386 8364 rm = rd;
99c475ab
FB
8365 break;
8366 }
8367 if (rd != 16) {
396e467c
FN
8368 if (val) {
8369 store_reg(s, rm, tmp2);
8370 if (op != 0xf)
8371 dead_tmp(tmp);
8372 } else {
8373 store_reg(s, rd, tmp);
8374 dead_tmp(tmp2);
8375 }
8376 } else {
8377 dead_tmp(tmp);
8378 dead_tmp(tmp2);
99c475ab
FB
8379 }
8380 break;
8381
8382 case 5:
8383 /* load/store register offset. */
8384 rd = insn & 7;
8385 rn = (insn >> 3) & 7;
8386 rm = (insn >> 6) & 7;
8387 op = (insn >> 9) & 7;
b0109805 8388 addr = load_reg(s, rn);
b26eefb6 8389 tmp = load_reg(s, rm);
b0109805 8390 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 8391 dead_tmp(tmp);
99c475ab
FB
8392
8393 if (op < 3) /* store */
b0109805 8394 tmp = load_reg(s, rd);
99c475ab
FB
8395
8396 switch (op) {
8397 case 0: /* str */
b0109805 8398 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
8399 break;
8400 case 1: /* strh */
b0109805 8401 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
8402 break;
8403 case 2: /* strb */
b0109805 8404 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
8405 break;
8406 case 3: /* ldrsb */
b0109805 8407 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
8408 break;
8409 case 4: /* ldr */
b0109805 8410 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8411 break;
8412 case 5: /* ldrh */
b0109805 8413 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
8414 break;
8415 case 6: /* ldrb */
b0109805 8416 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
8417 break;
8418 case 7: /* ldrsh */
b0109805 8419 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
8420 break;
8421 }
8422 if (op >= 3) /* load */
b0109805
PB
8423 store_reg(s, rd, tmp);
8424 dead_tmp(addr);
99c475ab
FB
8425 break;
8426
8427 case 6:
8428 /* load/store word immediate offset */
8429 rd = insn & 7;
8430 rn = (insn >> 3) & 7;
b0109805 8431 addr = load_reg(s, rn);
99c475ab 8432 val = (insn >> 4) & 0x7c;
b0109805 8433 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8434
8435 if (insn & (1 << 11)) {
8436 /* load */
b0109805
PB
8437 tmp = gen_ld32(addr, IS_USER(s));
8438 store_reg(s, rd, tmp);
99c475ab
FB
8439 } else {
8440 /* store */
b0109805
PB
8441 tmp = load_reg(s, rd);
8442 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8443 }
b0109805 8444 dead_tmp(addr);
99c475ab
FB
8445 break;
8446
8447 case 7:
8448 /* load/store byte immediate offset */
8449 rd = insn & 7;
8450 rn = (insn >> 3) & 7;
b0109805 8451 addr = load_reg(s, rn);
99c475ab 8452 val = (insn >> 6) & 0x1f;
b0109805 8453 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8454
8455 if (insn & (1 << 11)) {
8456 /* load */
b0109805
PB
8457 tmp = gen_ld8u(addr, IS_USER(s));
8458 store_reg(s, rd, tmp);
99c475ab
FB
8459 } else {
8460 /* store */
b0109805
PB
8461 tmp = load_reg(s, rd);
8462 gen_st8(tmp, addr, IS_USER(s));
99c475ab 8463 }
b0109805 8464 dead_tmp(addr);
99c475ab
FB
8465 break;
8466
8467 case 8:
8468 /* load/store halfword immediate offset */
8469 rd = insn & 7;
8470 rn = (insn >> 3) & 7;
b0109805 8471 addr = load_reg(s, rn);
99c475ab 8472 val = (insn >> 5) & 0x3e;
b0109805 8473 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8474
8475 if (insn & (1 << 11)) {
8476 /* load */
b0109805
PB
8477 tmp = gen_ld16u(addr, IS_USER(s));
8478 store_reg(s, rd, tmp);
99c475ab
FB
8479 } else {
8480 /* store */
b0109805
PB
8481 tmp = load_reg(s, rd);
8482 gen_st16(tmp, addr, IS_USER(s));
99c475ab 8483 }
b0109805 8484 dead_tmp(addr);
99c475ab
FB
8485 break;
8486
8487 case 9:
8488 /* load/store from stack */
8489 rd = (insn >> 8) & 7;
b0109805 8490 addr = load_reg(s, 13);
99c475ab 8491 val = (insn & 0xff) * 4;
b0109805 8492 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8493
8494 if (insn & (1 << 11)) {
8495 /* load */
b0109805
PB
8496 tmp = gen_ld32(addr, IS_USER(s));
8497 store_reg(s, rd, tmp);
99c475ab
FB
8498 } else {
8499 /* store */
b0109805
PB
8500 tmp = load_reg(s, rd);
8501 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8502 }
b0109805 8503 dead_tmp(addr);
99c475ab
FB
8504 break;
8505
8506 case 10:
8507 /* add to high reg */
8508 rd = (insn >> 8) & 7;
5899f386
FB
8509 if (insn & (1 << 11)) {
8510 /* SP */
5e3f878a 8511 tmp = load_reg(s, 13);
5899f386
FB
8512 } else {
8513 /* PC. bit 1 is ignored. */
5e3f878a
PB
8514 tmp = new_tmp();
8515 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 8516 }
99c475ab 8517 val = (insn & 0xff) * 4;
5e3f878a
PB
8518 tcg_gen_addi_i32(tmp, tmp, val);
8519 store_reg(s, rd, tmp);
99c475ab
FB
8520 break;
8521
8522 case 11:
8523 /* misc */
8524 op = (insn >> 8) & 0xf;
8525 switch (op) {
8526 case 0:
8527 /* adjust stack pointer */
b26eefb6 8528 tmp = load_reg(s, 13);
99c475ab
FB
8529 val = (insn & 0x7f) * 4;
8530 if (insn & (1 << 7))
6a0d8a1d 8531 val = -(int32_t)val;
b26eefb6
PB
8532 tcg_gen_addi_i32(tmp, tmp, val);
8533 store_reg(s, 13, tmp);
99c475ab
FB
8534 break;
8535
9ee6e8bb
PB
8536 case 2: /* sign/zero extend. */
8537 ARCH(6);
8538 rd = insn & 7;
8539 rm = (insn >> 3) & 7;
b0109805 8540 tmp = load_reg(s, rm);
9ee6e8bb 8541 switch ((insn >> 6) & 3) {
b0109805
PB
8542 case 0: gen_sxth(tmp); break;
8543 case 1: gen_sxtb(tmp); break;
8544 case 2: gen_uxth(tmp); break;
8545 case 3: gen_uxtb(tmp); break;
9ee6e8bb 8546 }
b0109805 8547 store_reg(s, rd, tmp);
9ee6e8bb 8548 break;
99c475ab
FB
8549 case 4: case 5: case 0xc: case 0xd:
8550 /* push/pop */
b0109805 8551 addr = load_reg(s, 13);
5899f386
FB
8552 if (insn & (1 << 8))
8553 offset = 4;
99c475ab 8554 else
5899f386
FB
8555 offset = 0;
8556 for (i = 0; i < 8; i++) {
8557 if (insn & (1 << i))
8558 offset += 4;
8559 }
8560 if ((insn & (1 << 11)) == 0) {
b0109805 8561 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8562 }
99c475ab
FB
8563 for (i = 0; i < 8; i++) {
8564 if (insn & (1 << i)) {
8565 if (insn & (1 << 11)) {
8566 /* pop */
b0109805
PB
8567 tmp = gen_ld32(addr, IS_USER(s));
8568 store_reg(s, i, tmp);
99c475ab
FB
8569 } else {
8570 /* push */
b0109805
PB
8571 tmp = load_reg(s, i);
8572 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8573 }
5899f386 8574 /* advance to the next address. */
b0109805 8575 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8576 }
8577 }
a50f5b91 8578 TCGV_UNUSED(tmp);
99c475ab
FB
8579 if (insn & (1 << 8)) {
8580 if (insn & (1 << 11)) {
8581 /* pop pc */
b0109805 8582 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8583 /* don't set the pc until the rest of the instruction
8584 has completed */
8585 } else {
8586 /* push lr */
b0109805
PB
8587 tmp = load_reg(s, 14);
8588 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8589 }
b0109805 8590 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 8591 }
5899f386 8592 if ((insn & (1 << 11)) == 0) {
b0109805 8593 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8594 }
99c475ab 8595 /* write back the new stack pointer */
b0109805 8596 store_reg(s, 13, addr);
99c475ab
FB
8597 /* set the new PC value */
8598 if ((insn & 0x0900) == 0x0900)
b0109805 8599 gen_bx(s, tmp);
99c475ab
FB
8600 break;
8601
9ee6e8bb
PB
8602 case 1: case 3: case 9: case 11: /* czb */
8603 rm = insn & 7;
d9ba4830 8604 tmp = load_reg(s, rm);
9ee6e8bb
PB
8605 s->condlabel = gen_new_label();
8606 s->condjmp = 1;
8607 if (insn & (1 << 11))
cb63669a 8608 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 8609 else
cb63669a 8610 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
d9ba4830 8611 dead_tmp(tmp);
9ee6e8bb
PB
8612 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8613 val = (uint32_t)s->pc + 2;
8614 val += offset;
8615 gen_jmp(s, val);
8616 break;
8617
8618 case 15: /* IT, nop-hint. */
8619 if ((insn & 0xf) == 0) {
8620 gen_nop_hint(s, (insn >> 4) & 0xf);
8621 break;
8622 }
8623 /* If Then. */
8624 s->condexec_cond = (insn >> 4) & 0xe;
8625 s->condexec_mask = insn & 0x1f;
8626 /* No actual code generated for this insn, just setup state. */
8627 break;
8628
06c949e6 8629 case 0xe: /* bkpt */
9ee6e8bb 8630 gen_set_condexec(s);
5e3f878a 8631 gen_set_pc_im(s->pc - 2);
d9ba4830 8632 gen_exception(EXCP_BKPT);
06c949e6
PB
8633 s->is_jmp = DISAS_JUMP;
8634 break;
8635
9ee6e8bb
PB
8636 case 0xa: /* rev */
8637 ARCH(6);
8638 rn = (insn >> 3) & 0x7;
8639 rd = insn & 0x7;
b0109805 8640 tmp = load_reg(s, rn);
9ee6e8bb 8641 switch ((insn >> 6) & 3) {
66896cb8 8642 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
8643 case 1: gen_rev16(tmp); break;
8644 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
8645 default: goto illegal_op;
8646 }
b0109805 8647 store_reg(s, rd, tmp);
9ee6e8bb
PB
8648 break;
8649
8650 case 6: /* cps */
8651 ARCH(6);
8652 if (IS_USER(s))
8653 break;
8654 if (IS_M(env)) {
8984bd2e 8655 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9ee6e8bb 8656 /* PRIMASK */
8984bd2e
PB
8657 if (insn & 1) {
8658 addr = tcg_const_i32(16);
8659 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8660 tcg_temp_free_i32(addr);
8984bd2e 8661 }
9ee6e8bb 8662 /* FAULTMASK */
8984bd2e
PB
8663 if (insn & 2) {
8664 addr = tcg_const_i32(17);
8665 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8666 tcg_temp_free_i32(addr);
8984bd2e 8667 }
b75263d6 8668 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8669 gen_lookup_tb(s);
8670 } else {
8671 if (insn & (1 << 4))
8672 shift = CPSR_A | CPSR_I | CPSR_F;
8673 else
8674 shift = 0;
2fbac54b 8675 gen_set_psr_im(s, shift, 0, ((insn & 7) << 6) & shift);
9ee6e8bb
PB
8676 }
8677 break;
8678
99c475ab
FB
8679 default:
8680 goto undef;
8681 }
8682 break;
8683
8684 case 12:
8685 /* load/store multiple */
8686 rn = (insn >> 8) & 0x7;
b0109805 8687 addr = load_reg(s, rn);
99c475ab
FB
8688 for (i = 0; i < 8; i++) {
8689 if (insn & (1 << i)) {
99c475ab
FB
8690 if (insn & (1 << 11)) {
8691 /* load */
b0109805
PB
8692 tmp = gen_ld32(addr, IS_USER(s));
8693 store_reg(s, i, tmp);
99c475ab
FB
8694 } else {
8695 /* store */
b0109805
PB
8696 tmp = load_reg(s, i);
8697 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8698 }
5899f386 8699 /* advance to the next address */
b0109805 8700 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8701 }
8702 }
5899f386 8703 /* Base register writeback. */
b0109805
PB
8704 if ((insn & (1 << rn)) == 0) {
8705 store_reg(s, rn, addr);
8706 } else {
8707 dead_tmp(addr);
8708 }
99c475ab
FB
8709 break;
8710
8711 case 13:
8712 /* conditional branch or swi */
8713 cond = (insn >> 8) & 0xf;
8714 if (cond == 0xe)
8715 goto undef;
8716
8717 if (cond == 0xf) {
8718 /* swi */
9ee6e8bb 8719 gen_set_condexec(s);
422ebf69 8720 gen_set_pc_im(s->pc);
9ee6e8bb 8721 s->is_jmp = DISAS_SWI;
99c475ab
FB
8722 break;
8723 }
8724 /* generate a conditional jump to next instruction */
e50e6a20 8725 s->condlabel = gen_new_label();
d9ba4830 8726 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 8727 s->condjmp = 1;
99c475ab
FB
8728
8729 /* jump to the offset */
5899f386 8730 val = (uint32_t)s->pc + 2;
99c475ab 8731 offset = ((int32_t)insn << 24) >> 24;
5899f386 8732 val += offset << 1;
8aaca4c0 8733 gen_jmp(s, val);
99c475ab
FB
8734 break;
8735
8736 case 14:
358bf29e 8737 if (insn & (1 << 11)) {
9ee6e8bb
PB
8738 if (disas_thumb2_insn(env, s, insn))
8739 goto undef32;
358bf29e
PB
8740 break;
8741 }
9ee6e8bb 8742 /* unconditional branch */
99c475ab
FB
8743 val = (uint32_t)s->pc;
8744 offset = ((int32_t)insn << 21) >> 21;
8745 val += (offset << 1) + 2;
8aaca4c0 8746 gen_jmp(s, val);
99c475ab
FB
8747 break;
8748
8749 case 15:
9ee6e8bb 8750 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 8751 goto undef32;
9ee6e8bb 8752 break;
99c475ab
FB
8753 }
8754 return;
9ee6e8bb
PB
8755undef32:
8756 gen_set_condexec(s);
5e3f878a 8757 gen_set_pc_im(s->pc - 4);
d9ba4830 8758 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
8759 s->is_jmp = DISAS_JUMP;
8760 return;
8761illegal_op:
99c475ab 8762undef:
9ee6e8bb 8763 gen_set_condexec(s);
5e3f878a 8764 gen_set_pc_im(s->pc - 2);
d9ba4830 8765 gen_exception(EXCP_UDEF);
99c475ab
FB
8766 s->is_jmp = DISAS_JUMP;
8767}
8768
2c0262af
FB
8769/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8770 basic block 'tb'. If search_pc is TRUE, also generate PC
8771 information for each intermediate instruction. */
2cfc5f17
TS
8772static inline void gen_intermediate_code_internal(CPUState *env,
8773 TranslationBlock *tb,
8774 int search_pc)
2c0262af
FB
8775{
8776 DisasContext dc1, *dc = &dc1;
a1d1bb31 8777 CPUBreakpoint *bp;
2c0262af
FB
8778 uint16_t *gen_opc_end;
8779 int j, lj;
0fa85d43 8780 target_ulong pc_start;
b5ff1b31 8781 uint32_t next_page_start;
2e70f6ef
PB
8782 int num_insns;
8783 int max_insns;
3b46e624 8784
2c0262af 8785 /* generate intermediate code */
b26eefb6 8786 num_temps = 0;
b26eefb6 8787
0fa85d43 8788 pc_start = tb->pc;
3b46e624 8789
2c0262af
FB
8790 dc->tb = tb;
8791
2c0262af 8792 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
8793
8794 dc->is_jmp = DISAS_NEXT;
8795 dc->pc = pc_start;
8aaca4c0 8796 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 8797 dc->condjmp = 0;
5899f386 8798 dc->thumb = env->thumb;
9ee6e8bb
PB
8799 dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
8800 dc->condexec_cond = env->condexec_bits >> 4;
b5ff1b31 8801#if !defined(CONFIG_USER_ONLY)
9ee6e8bb
PB
8802 if (IS_M(env)) {
8803 dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
8804 } else {
8805 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
8806 }
b5ff1b31 8807#endif
a7812ae4
PB
8808 cpu_F0s = tcg_temp_new_i32();
8809 cpu_F1s = tcg_temp_new_i32();
8810 cpu_F0d = tcg_temp_new_i64();
8811 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
8812 cpu_V0 = cpu_F0d;
8813 cpu_V1 = cpu_F1d;
e677137d 8814 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 8815 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 8816 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 8817 lj = -1;
2e70f6ef
PB
8818 num_insns = 0;
8819 max_insns = tb->cflags & CF_COUNT_MASK;
8820 if (max_insns == 0)
8821 max_insns = CF_COUNT_MASK;
8822
8823 gen_icount_start();
9ee6e8bb
PB
8824 /* Reset the conditional execution bits immediately. This avoids
8825 complications trying to do it at the end of the block. */
8826 if (env->condexec_bits)
8f01245e
PB
8827 {
8828 TCGv tmp = new_tmp();
8829 tcg_gen_movi_i32(tmp, 0);
d9ba4830 8830 store_cpu_field(tmp, condexec_bits);
8f01245e 8831 }
2c0262af 8832 do {
fbb4a2e3
PB
8833#ifdef CONFIG_USER_ONLY
8834 /* Intercept jump to the magic kernel page. */
8835 if (dc->pc >= 0xffff0000) {
8836 /* We always get here via a jump, so know we are not in a
8837 conditional execution block. */
8838 gen_exception(EXCP_KERNEL_TRAP);
8839 dc->is_jmp = DISAS_UPDATE;
8840 break;
8841 }
8842#else
9ee6e8bb
PB
8843 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
8844 /* We always get here via a jump, so know we are not in a
8845 conditional execution block. */
d9ba4830 8846 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
8847 dc->is_jmp = DISAS_UPDATE;
8848 break;
9ee6e8bb
PB
8849 }
8850#endif
8851
72cf2d4f
BS
8852 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
8853 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 8854 if (bp->pc == dc->pc) {
9ee6e8bb 8855 gen_set_condexec(dc);
5e3f878a 8856 gen_set_pc_im(dc->pc);
d9ba4830 8857 gen_exception(EXCP_DEBUG);
1fddef4b 8858 dc->is_jmp = DISAS_JUMP;
9ee6e8bb
PB
8859 /* Advance PC so that clearing the breakpoint will
8860 invalidate this TB. */
8861 dc->pc += 2;
8862 goto done_generating;
1fddef4b
FB
8863 break;
8864 }
8865 }
8866 }
2c0262af
FB
8867 if (search_pc) {
8868 j = gen_opc_ptr - gen_opc_buf;
8869 if (lj < j) {
8870 lj++;
8871 while (lj < j)
8872 gen_opc_instr_start[lj++] = 0;
8873 }
0fa85d43 8874 gen_opc_pc[lj] = dc->pc;
2c0262af 8875 gen_opc_instr_start[lj] = 1;
2e70f6ef 8876 gen_opc_icount[lj] = num_insns;
2c0262af 8877 }
e50e6a20 8878
2e70f6ef
PB
8879 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
8880 gen_io_start();
8881
9ee6e8bb
PB
8882 if (env->thumb) {
8883 disas_thumb_insn(env, dc);
8884 if (dc->condexec_mask) {
8885 dc->condexec_cond = (dc->condexec_cond & 0xe)
8886 | ((dc->condexec_mask >> 4) & 1);
8887 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
8888 if (dc->condexec_mask == 0) {
8889 dc->condexec_cond = 0;
8890 }
8891 }
8892 } else {
8893 disas_arm_insn(env, dc);
8894 }
b26eefb6
PB
8895 if (num_temps) {
8896 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
8897 num_temps = 0;
8898 }
e50e6a20
FB
8899
8900 if (dc->condjmp && !dc->is_jmp) {
8901 gen_set_label(dc->condlabel);
8902 dc->condjmp = 0;
8903 }
aaf2d97d 8904 /* Translation stops when a conditional branch is encountered.
e50e6a20 8905 * Otherwise the subsequent code could get translated several times.
b5ff1b31 8906 * Also stop translation when a page boundary is reached. This
bf20dc07 8907 * ensures prefetch aborts occur at the right place. */
2e70f6ef 8908 num_insns ++;
1fddef4b
FB
8909 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
8910 !env->singlestep_enabled &&
1b530a6d 8911 !singlestep &&
2e70f6ef
PB
8912 dc->pc < next_page_start &&
8913 num_insns < max_insns);
8914
8915 if (tb->cflags & CF_LAST_IO) {
8916 if (dc->condjmp) {
8917 /* FIXME: This can theoretically happen with self-modifying
8918 code. */
8919 cpu_abort(env, "IO on conditional branch instruction");
8920 }
8921 gen_io_end();
8922 }
9ee6e8bb 8923
b5ff1b31 8924 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
8925 instruction was a conditional branch or trap, and the PC has
8926 already been written. */
551bd27f 8927 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 8928 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 8929 if (dc->condjmp) {
9ee6e8bb
PB
8930 gen_set_condexec(dc);
8931 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 8932 gen_exception(EXCP_SWI);
9ee6e8bb 8933 } else {
d9ba4830 8934 gen_exception(EXCP_DEBUG);
9ee6e8bb 8935 }
e50e6a20
FB
8936 gen_set_label(dc->condlabel);
8937 }
8938 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 8939 gen_set_pc_im(dc->pc);
e50e6a20 8940 dc->condjmp = 0;
8aaca4c0 8941 }
9ee6e8bb
PB
8942 gen_set_condexec(dc);
8943 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 8944 gen_exception(EXCP_SWI);
9ee6e8bb
PB
8945 } else {
8946 /* FIXME: Single stepping a WFI insn will not halt
8947 the CPU. */
d9ba4830 8948 gen_exception(EXCP_DEBUG);
9ee6e8bb 8949 }
8aaca4c0 8950 } else {
9ee6e8bb
PB
8951 /* While branches must always occur at the end of an IT block,
8952 there are a few other things that can cause us to terminate
8953 the TB in the middel of an IT block:
8954 - Exception generating instructions (bkpt, swi, undefined).
8955 - Page boundaries.
8956 - Hardware watchpoints.
8957 Hardware breakpoints have already been handled and skip this code.
8958 */
8959 gen_set_condexec(dc);
8aaca4c0 8960 switch(dc->is_jmp) {
8aaca4c0 8961 case DISAS_NEXT:
6e256c93 8962 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
8963 break;
8964 default:
8965 case DISAS_JUMP:
8966 case DISAS_UPDATE:
8967 /* indicate that the hash table must be used to find the next TB */
57fec1fe 8968 tcg_gen_exit_tb(0);
8aaca4c0
FB
8969 break;
8970 case DISAS_TB_JUMP:
8971 /* nothing more to generate */
8972 break;
9ee6e8bb 8973 case DISAS_WFI:
d9ba4830 8974 gen_helper_wfi();
9ee6e8bb
PB
8975 break;
8976 case DISAS_SWI:
d9ba4830 8977 gen_exception(EXCP_SWI);
9ee6e8bb 8978 break;
8aaca4c0 8979 }
e50e6a20
FB
8980 if (dc->condjmp) {
8981 gen_set_label(dc->condlabel);
9ee6e8bb 8982 gen_set_condexec(dc);
6e256c93 8983 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
8984 dc->condjmp = 0;
8985 }
2c0262af 8986 }
2e70f6ef 8987
9ee6e8bb 8988done_generating:
2e70f6ef 8989 gen_icount_end(tb, num_insns);
2c0262af
FB
8990 *gen_opc_ptr = INDEX_op_end;
8991
8992#ifdef DEBUG_DISAS
8fec2b8c 8993 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
8994 qemu_log("----------------\n");
8995 qemu_log("IN: %s\n", lookup_symbol(pc_start));
8996 log_target_disas(pc_start, dc->pc - pc_start, env->thumb);
8997 qemu_log("\n");
2c0262af
FB
8998 }
8999#endif
b5ff1b31
FB
9000 if (search_pc) {
9001 j = gen_opc_ptr - gen_opc_buf;
9002 lj++;
9003 while (lj <= j)
9004 gen_opc_instr_start[lj++] = 0;
b5ff1b31 9005 } else {
2c0262af 9006 tb->size = dc->pc - pc_start;
2e70f6ef 9007 tb->icount = num_insns;
b5ff1b31 9008 }
2c0262af
FB
9009}
9010
2cfc5f17 9011void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2c0262af 9012{
2cfc5f17 9013 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
9014}
9015
2cfc5f17 9016void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2c0262af 9017{
2cfc5f17 9018 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
9019}
9020
b5ff1b31
FB
9021static const char *cpu_mode_names[16] = {
9022 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9023 "???", "???", "???", "und", "???", "???", "???", "sys"
9024};
9ee6e8bb 9025
5fafdf24 9026void cpu_dump_state(CPUState *env, FILE *f,
7fe48483
FB
9027 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
9028 int flags)
2c0262af
FB
9029{
9030 int i;
06e80fc9 9031#if 0
bc380d17 9032 union {
b7bcbe95
FB
9033 uint32_t i;
9034 float s;
9035 } s0, s1;
9036 CPU_DoubleU d;
a94a6abf
PB
9037 /* ??? This assumes float64 and double have the same layout.
9038 Oh well, it's only debug dumps. */
9039 union {
9040 float64 f64;
9041 double d;
9042 } d0;
06e80fc9 9043#endif
b5ff1b31 9044 uint32_t psr;
2c0262af
FB
9045
9046 for(i=0;i<16;i++) {
7fe48483 9047 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 9048 if ((i % 4) == 3)
7fe48483 9049 cpu_fprintf(f, "\n");
2c0262af 9050 else
7fe48483 9051 cpu_fprintf(f, " ");
2c0262af 9052 }
b5ff1b31 9053 psr = cpsr_read(env);
687fa640
TS
9054 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9055 psr,
b5ff1b31
FB
9056 psr & (1 << 31) ? 'N' : '-',
9057 psr & (1 << 30) ? 'Z' : '-',
9058 psr & (1 << 29) ? 'C' : '-',
9059 psr & (1 << 28) ? 'V' : '-',
5fafdf24 9060 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 9061 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 9062
5e3f878a 9063#if 0
b7bcbe95 9064 for (i = 0; i < 16; i++) {
8e96005d
FB
9065 d.d = env->vfp.regs[i];
9066 s0.i = d.l.lower;
9067 s1.i = d.l.upper;
a94a6abf
PB
9068 d0.f64 = d.d;
9069 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 9070 i * 2, (int)s0.i, s0.s,
a94a6abf 9071 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 9072 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 9073 d0.d);
b7bcbe95 9074 }
40f137e1 9075 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
5e3f878a 9076#endif
2c0262af 9077}
a6b025d3 9078
d2856f1a
AJ
9079void gen_pc_load(CPUState *env, TranslationBlock *tb,
9080 unsigned long searched_pc, int pc_pos, void *puc)
9081{
9082 env->regs[15] = gen_opc_pc[pc_pos];
9083}