]> git.proxmox.com Git - qemu.git/blame - target-arm/translate.c
block: delete a write-only variable
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
28#include "exec-all.h"
29#include "disas.h"
57fec1fe 30#include "tcg-op.h"
79383c9c 31#include "qemu-log.h"
1497c961 32
a7812ae4 33#include "helpers.h"
1497c961 34#define GEN_HELPER 1
b26eefb6 35#include "helpers.h"
2c0262af 36
9ee6e8bb
PB
37#define ENABLE_ARCH_5J 0
38#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 42
86753403 43#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 44
2c0262af
FB
45/* internal defines */
46typedef struct DisasContext {
0fa85d43 47 target_ulong pc;
2c0262af 48 int is_jmp;
e50e6a20
FB
49 /* Nonzero if this instruction has been conditionally skipped. */
50 int condjmp;
51 /* The label that will be jumped to when the instruction is skipped. */
52 int condlabel;
9ee6e8bb
PB
53 /* Thumb-2 condtional execution bits. */
54 int condexec_mask;
55 int condexec_cond;
2c0262af 56 struct TranslationBlock *tb;
8aaca4c0 57 int singlestep_enabled;
5899f386 58 int thumb;
b5ff1b31
FB
59#if !defined(CONFIG_USER_ONLY)
60 int user;
61#endif
2c0262af
FB
62} DisasContext;
63
b5ff1b31
FB
64#if defined(CONFIG_USER_ONLY)
65#define IS_USER(s) 1
66#else
67#define IS_USER(s) (s->user)
68#endif
69
9ee6e8bb
PB
70/* These instructions trap after executing, so defer them until after the
71 conditional executions state has been updated. */
72#define DISAS_WFI 4
73#define DISAS_SWI 5
2c0262af 74
a7812ae4 75static TCGv_ptr cpu_env;
ad69471c 76/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 77static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 78static TCGv_i32 cpu_R[16];
426f5abc
PB
79static TCGv_i32 cpu_exclusive_addr;
80static TCGv_i32 cpu_exclusive_val;
81static TCGv_i32 cpu_exclusive_high;
82#ifdef CONFIG_USER_ONLY
83static TCGv_i32 cpu_exclusive_test;
84static TCGv_i32 cpu_exclusive_info;
85#endif
ad69471c 86
b26eefb6 87/* FIXME: These should be removed. */
a7812ae4
PB
88static TCGv cpu_F0s, cpu_F1s;
89static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 90
2e70f6ef
PB
91#include "gen-icount.h"
92
155c3eac
FN
93static const char *regnames[] =
94 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
95 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
96
b26eefb6
PB
97/* initialize TCG globals. */
98void arm_translate_init(void)
99{
155c3eac
FN
100 int i;
101
a7812ae4
PB
102 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
103
155c3eac
FN
104 for (i = 0; i < 16; i++) {
105 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
106 offsetof(CPUState, regs[i]),
107 regnames[i]);
108 }
426f5abc
PB
109 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
110 offsetof(CPUState, exclusive_addr), "exclusive_addr");
111 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
112 offsetof(CPUState, exclusive_val), "exclusive_val");
113 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
114 offsetof(CPUState, exclusive_high), "exclusive_high");
115#ifdef CONFIG_USER_ONLY
116 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
117 offsetof(CPUState, exclusive_test), "exclusive_test");
118 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUState, exclusive_info), "exclusive_info");
120#endif
155c3eac 121
a7812ae4
PB
122#define GEN_HELPER 2
123#include "helpers.h"
b26eefb6
PB
124}
125
b26eefb6 126static int num_temps;
b26eefb6
PB
127
128/* Allocate a temporary variable. */
a7812ae4 129static TCGv_i32 new_tmp(void)
b26eefb6 130{
12edd4f2
FN
131 num_temps++;
132 return tcg_temp_new_i32();
b26eefb6
PB
133}
134
135/* Release a temporary variable. */
136static void dead_tmp(TCGv tmp)
137{
12edd4f2 138 tcg_temp_free(tmp);
b26eefb6 139 num_temps--;
b26eefb6
PB
140}
141
d9ba4830
PB
142static inline TCGv load_cpu_offset(int offset)
143{
144 TCGv tmp = new_tmp();
145 tcg_gen_ld_i32(tmp, cpu_env, offset);
146 return tmp;
147}
148
149#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
150
151static inline void store_cpu_offset(TCGv var, int offset)
152{
153 tcg_gen_st_i32(var, cpu_env, offset);
154 dead_tmp(var);
155}
156
157#define store_cpu_field(var, name) \
158 store_cpu_offset(var, offsetof(CPUState, name))
159
b26eefb6
PB
160/* Set a variable to the value of a CPU register. */
161static void load_reg_var(DisasContext *s, TCGv var, int reg)
162{
163 if (reg == 15) {
164 uint32_t addr;
165 /* normaly, since we updated PC, we need only to add one insn */
166 if (s->thumb)
167 addr = (long)s->pc + 2;
168 else
169 addr = (long)s->pc + 4;
170 tcg_gen_movi_i32(var, addr);
171 } else {
155c3eac 172 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
173 }
174}
175
176/* Create a new temporary and set it to the value of a CPU register. */
177static inline TCGv load_reg(DisasContext *s, int reg)
178{
179 TCGv tmp = new_tmp();
180 load_reg_var(s, tmp, reg);
181 return tmp;
182}
183
184/* Set a CPU register. The source must be a temporary and will be
185 marked as dead. */
186static void store_reg(DisasContext *s, int reg, TCGv var)
187{
188 if (reg == 15) {
189 tcg_gen_andi_i32(var, var, ~1);
190 s->is_jmp = DISAS_JUMP;
191 }
155c3eac 192 tcg_gen_mov_i32(cpu_R[reg], var);
b26eefb6
PB
193 dead_tmp(var);
194}
195
b26eefb6 196/* Value extensions. */
86831435
PB
197#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
198#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
199#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
200#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
201
1497c961
PB
202#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
203#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 204
b26eefb6 205
b75263d6
JR
206static inline void gen_set_cpsr(TCGv var, uint32_t mask)
207{
208 TCGv tmp_mask = tcg_const_i32(mask);
209 gen_helper_cpsr_write(var, tmp_mask);
210 tcg_temp_free_i32(tmp_mask);
211}
d9ba4830
PB
212/* Set NZCV flags from the high 4 bits of var. */
213#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
214
215static void gen_exception(int excp)
216{
217 TCGv tmp = new_tmp();
218 tcg_gen_movi_i32(tmp, excp);
219 gen_helper_exception(tmp);
220 dead_tmp(tmp);
221}
222
3670669c
PB
223static void gen_smul_dual(TCGv a, TCGv b)
224{
225 TCGv tmp1 = new_tmp();
226 TCGv tmp2 = new_tmp();
22478e79
AZ
227 tcg_gen_ext16s_i32(tmp1, a);
228 tcg_gen_ext16s_i32(tmp2, b);
3670669c
PB
229 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
230 dead_tmp(tmp2);
231 tcg_gen_sari_i32(a, a, 16);
232 tcg_gen_sari_i32(b, b, 16);
233 tcg_gen_mul_i32(b, b, a);
234 tcg_gen_mov_i32(a, tmp1);
235 dead_tmp(tmp1);
236}
237
238/* Byteswap each halfword. */
239static void gen_rev16(TCGv var)
240{
241 TCGv tmp = new_tmp();
242 tcg_gen_shri_i32(tmp, var, 8);
243 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
244 tcg_gen_shli_i32(var, var, 8);
245 tcg_gen_andi_i32(var, var, 0xff00ff00);
246 tcg_gen_or_i32(var, var, tmp);
247 dead_tmp(tmp);
248}
249
250/* Byteswap low halfword and sign extend. */
251static void gen_revsh(TCGv var)
252{
1a855029
AJ
253 tcg_gen_ext16u_i32(var, var);
254 tcg_gen_bswap16_i32(var, var);
255 tcg_gen_ext16s_i32(var, var);
3670669c
PB
256}
257
258/* Unsigned bitfield extract. */
259static void gen_ubfx(TCGv var, int shift, uint32_t mask)
260{
261 if (shift)
262 tcg_gen_shri_i32(var, var, shift);
263 tcg_gen_andi_i32(var, var, mask);
264}
265
266/* Signed bitfield extract. */
267static void gen_sbfx(TCGv var, int shift, int width)
268{
269 uint32_t signbit;
270
271 if (shift)
272 tcg_gen_sari_i32(var, var, shift);
273 if (shift + width < 32) {
274 signbit = 1u << (width - 1);
275 tcg_gen_andi_i32(var, var, (1u << width) - 1);
276 tcg_gen_xori_i32(var, var, signbit);
277 tcg_gen_subi_i32(var, var, signbit);
278 }
279}
280
281/* Bitfield insertion. Insert val into base. Clobbers base and val. */
282static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
283{
3670669c 284 tcg_gen_andi_i32(val, val, mask);
8f8e3aa4
PB
285 tcg_gen_shli_i32(val, val, shift);
286 tcg_gen_andi_i32(base, base, ~(mask << shift));
3670669c
PB
287 tcg_gen_or_i32(dest, base, val);
288}
289
d9ba4830
PB
290/* Round the top 32 bits of a 64-bit value. */
291static void gen_roundqd(TCGv a, TCGv b)
3670669c 292{
d9ba4830
PB
293 tcg_gen_shri_i32(a, a, 31);
294 tcg_gen_add_i32(a, a, b);
3670669c
PB
295}
296
8f01245e
PB
297/* FIXME: Most targets have native widening multiplication.
298 It would be good to use that instead of a full wide multiply. */
5e3f878a 299/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 300static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 301{
a7812ae4
PB
302 TCGv_i64 tmp1 = tcg_temp_new_i64();
303 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
304
305 tcg_gen_extu_i32_i64(tmp1, a);
306 dead_tmp(a);
307 tcg_gen_extu_i32_i64(tmp2, b);
308 dead_tmp(b);
309 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 310 tcg_temp_free_i64(tmp2);
5e3f878a
PB
311 return tmp1;
312}
313
a7812ae4 314static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 315{
a7812ae4
PB
316 TCGv_i64 tmp1 = tcg_temp_new_i64();
317 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
318
319 tcg_gen_ext_i32_i64(tmp1, a);
320 dead_tmp(a);
321 tcg_gen_ext_i32_i64(tmp2, b);
322 dead_tmp(b);
323 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 324 tcg_temp_free_i64(tmp2);
5e3f878a
PB
325 return tmp1;
326}
327
8f01245e 328/* Signed 32x32->64 multiply. */
d9ba4830 329static void gen_imull(TCGv a, TCGv b)
8f01245e 330{
a7812ae4
PB
331 TCGv_i64 tmp1 = tcg_temp_new_i64();
332 TCGv_i64 tmp2 = tcg_temp_new_i64();
8f01245e 333
d9ba4830
PB
334 tcg_gen_ext_i32_i64(tmp1, a);
335 tcg_gen_ext_i32_i64(tmp2, b);
8f01245e 336 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 337 tcg_temp_free_i64(tmp2);
d9ba4830 338 tcg_gen_trunc_i64_i32(a, tmp1);
8f01245e 339 tcg_gen_shri_i64(tmp1, tmp1, 32);
d9ba4830 340 tcg_gen_trunc_i64_i32(b, tmp1);
b75263d6 341 tcg_temp_free_i64(tmp1);
d9ba4830 342}
d9ba4830 343
8f01245e
PB
344/* Swap low and high halfwords. */
345static void gen_swap_half(TCGv var)
346{
347 TCGv tmp = new_tmp();
348 tcg_gen_shri_i32(tmp, var, 16);
349 tcg_gen_shli_i32(var, var, 16);
350 tcg_gen_or_i32(var, var, tmp);
3670669c 351 dead_tmp(tmp);
8f01245e
PB
352}
353
b26eefb6
PB
354/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
355 tmp = (t0 ^ t1) & 0x8000;
356 t0 &= ~0x8000;
357 t1 &= ~0x8000;
358 t0 = (t0 + t1) ^ tmp;
359 */
360
361static void gen_add16(TCGv t0, TCGv t1)
362{
363 TCGv tmp = new_tmp();
364 tcg_gen_xor_i32(tmp, t0, t1);
365 tcg_gen_andi_i32(tmp, tmp, 0x8000);
366 tcg_gen_andi_i32(t0, t0, ~0x8000);
367 tcg_gen_andi_i32(t1, t1, ~0x8000);
368 tcg_gen_add_i32(t0, t0, t1);
369 tcg_gen_xor_i32(t0, t0, tmp);
370 dead_tmp(tmp);
371 dead_tmp(t1);
372}
373
9a119ff6
PB
374#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
375
b26eefb6
PB
376/* Set CF to the top bit of var. */
377static void gen_set_CF_bit31(TCGv var)
378{
379 TCGv tmp = new_tmp();
380 tcg_gen_shri_i32(tmp, var, 31);
4cc633c3 381 gen_set_CF(tmp);
b26eefb6
PB
382 dead_tmp(tmp);
383}
384
385/* Set N and Z flags from var. */
386static inline void gen_logic_CC(TCGv var)
387{
6fbe23d5
PB
388 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
389 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
b26eefb6
PB
390}
391
392/* T0 += T1 + CF. */
396e467c 393static void gen_adc(TCGv t0, TCGv t1)
b26eefb6 394{
d9ba4830 395 TCGv tmp;
396e467c 396 tcg_gen_add_i32(t0, t0, t1);
d9ba4830 397 tmp = load_cpu_field(CF);
396e467c 398 tcg_gen_add_i32(t0, t0, tmp);
b26eefb6
PB
399 dead_tmp(tmp);
400}
401
e9bb4aa9
JR
402/* dest = T0 + T1 + CF. */
403static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
404{
405 TCGv tmp;
406 tcg_gen_add_i32(dest, t0, t1);
407 tmp = load_cpu_field(CF);
408 tcg_gen_add_i32(dest, dest, tmp);
409 dead_tmp(tmp);
410}
411
3670669c
PB
412/* dest = T0 - T1 + CF - 1. */
413static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
414{
d9ba4830 415 TCGv tmp;
3670669c 416 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 417 tmp = load_cpu_field(CF);
3670669c
PB
418 tcg_gen_add_i32(dest, dest, tmp);
419 tcg_gen_subi_i32(dest, dest, 1);
420 dead_tmp(tmp);
421}
422
ad69471c
PB
423/* FIXME: Implement this natively. */
424#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
425
9a119ff6 426static void shifter_out_im(TCGv var, int shift)
b26eefb6 427{
9a119ff6
PB
428 TCGv tmp = new_tmp();
429 if (shift == 0) {
430 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 431 } else {
9a119ff6 432 tcg_gen_shri_i32(tmp, var, shift);
4cc633c3 433 if (shift != 31)
9a119ff6
PB
434 tcg_gen_andi_i32(tmp, tmp, 1);
435 }
436 gen_set_CF(tmp);
437 dead_tmp(tmp);
438}
b26eefb6 439
9a119ff6
PB
440/* Shift by immediate. Includes special handling for shift == 0. */
441static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
442{
443 switch (shiftop) {
444 case 0: /* LSL */
445 if (shift != 0) {
446 if (flags)
447 shifter_out_im(var, 32 - shift);
448 tcg_gen_shli_i32(var, var, shift);
449 }
450 break;
451 case 1: /* LSR */
452 if (shift == 0) {
453 if (flags) {
454 tcg_gen_shri_i32(var, var, 31);
455 gen_set_CF(var);
456 }
457 tcg_gen_movi_i32(var, 0);
458 } else {
459 if (flags)
460 shifter_out_im(var, shift - 1);
461 tcg_gen_shri_i32(var, var, shift);
462 }
463 break;
464 case 2: /* ASR */
465 if (shift == 0)
466 shift = 32;
467 if (flags)
468 shifter_out_im(var, shift - 1);
469 if (shift == 32)
470 shift = 31;
471 tcg_gen_sari_i32(var, var, shift);
472 break;
473 case 3: /* ROR/RRX */
474 if (shift != 0) {
475 if (flags)
476 shifter_out_im(var, shift - 1);
f669df27 477 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 478 } else {
d9ba4830 479 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
480 if (flags)
481 shifter_out_im(var, 0);
482 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
483 tcg_gen_shli_i32(tmp, tmp, 31);
484 tcg_gen_or_i32(var, var, tmp);
485 dead_tmp(tmp);
b26eefb6
PB
486 }
487 }
488};
489
8984bd2e
PB
490static inline void gen_arm_shift_reg(TCGv var, int shiftop,
491 TCGv shift, int flags)
492{
493 if (flags) {
494 switch (shiftop) {
495 case 0: gen_helper_shl_cc(var, var, shift); break;
496 case 1: gen_helper_shr_cc(var, var, shift); break;
497 case 2: gen_helper_sar_cc(var, var, shift); break;
498 case 3: gen_helper_ror_cc(var, var, shift); break;
499 }
500 } else {
501 switch (shiftop) {
502 case 0: gen_helper_shl(var, var, shift); break;
503 case 1: gen_helper_shr(var, var, shift); break;
504 case 2: gen_helper_sar(var, var, shift); break;
f669df27
AJ
505 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
506 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
507 }
508 }
509 dead_tmp(shift);
510}
511
6ddbc6e4
PB
512#define PAS_OP(pfx) \
513 switch (op2) { \
514 case 0: gen_pas_helper(glue(pfx,add16)); break; \
515 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
516 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
517 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
518 case 4: gen_pas_helper(glue(pfx,add8)); break; \
519 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
520 }
d9ba4830 521static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 522{
a7812ae4 523 TCGv_ptr tmp;
6ddbc6e4
PB
524
525 switch (op1) {
526#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
527 case 1:
a7812ae4 528 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
529 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
530 PAS_OP(s)
b75263d6 531 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
532 break;
533 case 5:
a7812ae4 534 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
535 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
536 PAS_OP(u)
b75263d6 537 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
538 break;
539#undef gen_pas_helper
540#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
541 case 2:
542 PAS_OP(q);
543 break;
544 case 3:
545 PAS_OP(sh);
546 break;
547 case 6:
548 PAS_OP(uq);
549 break;
550 case 7:
551 PAS_OP(uh);
552 break;
553#undef gen_pas_helper
554 }
555}
9ee6e8bb
PB
556#undef PAS_OP
557
6ddbc6e4
PB
558/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
559#define PAS_OP(pfx) \
ed89a2f1 560 switch (op1) { \
6ddbc6e4
PB
561 case 0: gen_pas_helper(glue(pfx,add8)); break; \
562 case 1: gen_pas_helper(glue(pfx,add16)); break; \
563 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
564 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
565 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
566 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
567 }
d9ba4830 568static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 569{
a7812ae4 570 TCGv_ptr tmp;
6ddbc6e4 571
ed89a2f1 572 switch (op2) {
6ddbc6e4
PB
573#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
574 case 0:
a7812ae4 575 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
576 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
577 PAS_OP(s)
b75263d6 578 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
579 break;
580 case 4:
a7812ae4 581 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
582 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
583 PAS_OP(u)
b75263d6 584 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
585 break;
586#undef gen_pas_helper
587#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
588 case 1:
589 PAS_OP(q);
590 break;
591 case 2:
592 PAS_OP(sh);
593 break;
594 case 5:
595 PAS_OP(uq);
596 break;
597 case 6:
598 PAS_OP(uh);
599 break;
600#undef gen_pas_helper
601 }
602}
9ee6e8bb
PB
603#undef PAS_OP
604
d9ba4830
PB
605static void gen_test_cc(int cc, int label)
606{
607 TCGv tmp;
608 TCGv tmp2;
d9ba4830
PB
609 int inv;
610
d9ba4830
PB
611 switch (cc) {
612 case 0: /* eq: Z */
6fbe23d5 613 tmp = load_cpu_field(ZF);
cb63669a 614 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
615 break;
616 case 1: /* ne: !Z */
6fbe23d5 617 tmp = load_cpu_field(ZF);
cb63669a 618 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
619 break;
620 case 2: /* cs: C */
621 tmp = load_cpu_field(CF);
cb63669a 622 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
623 break;
624 case 3: /* cc: !C */
625 tmp = load_cpu_field(CF);
cb63669a 626 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
627 break;
628 case 4: /* mi: N */
6fbe23d5 629 tmp = load_cpu_field(NF);
cb63669a 630 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
631 break;
632 case 5: /* pl: !N */
6fbe23d5 633 tmp = load_cpu_field(NF);
cb63669a 634 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
635 break;
636 case 6: /* vs: V */
637 tmp = load_cpu_field(VF);
cb63669a 638 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
639 break;
640 case 7: /* vc: !V */
641 tmp = load_cpu_field(VF);
cb63669a 642 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
643 break;
644 case 8: /* hi: C && !Z */
645 inv = gen_new_label();
646 tmp = load_cpu_field(CF);
cb63669a 647 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830 648 dead_tmp(tmp);
6fbe23d5 649 tmp = load_cpu_field(ZF);
cb63669a 650 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
651 gen_set_label(inv);
652 break;
653 case 9: /* ls: !C || Z */
654 tmp = load_cpu_field(CF);
cb63669a 655 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830 656 dead_tmp(tmp);
6fbe23d5 657 tmp = load_cpu_field(ZF);
cb63669a 658 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
659 break;
660 case 10: /* ge: N == V -> N ^ V == 0 */
661 tmp = load_cpu_field(VF);
6fbe23d5 662 tmp2 = load_cpu_field(NF);
d9ba4830
PB
663 tcg_gen_xor_i32(tmp, tmp, tmp2);
664 dead_tmp(tmp2);
cb63669a 665 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
666 break;
667 case 11: /* lt: N != V -> N ^ V != 0 */
668 tmp = load_cpu_field(VF);
6fbe23d5 669 tmp2 = load_cpu_field(NF);
d9ba4830
PB
670 tcg_gen_xor_i32(tmp, tmp, tmp2);
671 dead_tmp(tmp2);
cb63669a 672 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
673 break;
674 case 12: /* gt: !Z && N == V */
675 inv = gen_new_label();
6fbe23d5 676 tmp = load_cpu_field(ZF);
cb63669a 677 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830
PB
678 dead_tmp(tmp);
679 tmp = load_cpu_field(VF);
6fbe23d5 680 tmp2 = load_cpu_field(NF);
d9ba4830
PB
681 tcg_gen_xor_i32(tmp, tmp, tmp2);
682 dead_tmp(tmp2);
cb63669a 683 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
684 gen_set_label(inv);
685 break;
686 case 13: /* le: Z || N != V */
6fbe23d5 687 tmp = load_cpu_field(ZF);
cb63669a 688 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
689 dead_tmp(tmp);
690 tmp = load_cpu_field(VF);
6fbe23d5 691 tmp2 = load_cpu_field(NF);
d9ba4830
PB
692 tcg_gen_xor_i32(tmp, tmp, tmp2);
693 dead_tmp(tmp2);
cb63669a 694 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
695 break;
696 default:
697 fprintf(stderr, "Bad condition code 0x%x\n", cc);
698 abort();
699 }
700 dead_tmp(tmp);
701}
2c0262af 702
b1d8e52e 703static const uint8_t table_logic_cc[16] = {
2c0262af
FB
704 1, /* and */
705 1, /* xor */
706 0, /* sub */
707 0, /* rsb */
708 0, /* add */
709 0, /* adc */
710 0, /* sbc */
711 0, /* rsc */
712 1, /* andl */
713 1, /* xorl */
714 0, /* cmp */
715 0, /* cmn */
716 1, /* orr */
717 1, /* mov */
718 1, /* bic */
719 1, /* mvn */
720};
3b46e624 721
d9ba4830
PB
722/* Set PC and Thumb state from an immediate address. */
723static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 724{
b26eefb6 725 TCGv tmp;
99c475ab 726
b26eefb6 727 s->is_jmp = DISAS_UPDATE;
d9ba4830 728 if (s->thumb != (addr & 1)) {
155c3eac 729 tmp = new_tmp();
d9ba4830
PB
730 tcg_gen_movi_i32(tmp, addr & 1);
731 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
155c3eac 732 dead_tmp(tmp);
d9ba4830 733 }
155c3eac 734 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
735}
736
737/* Set PC and Thumb state from var. var is marked as dead. */
738static inline void gen_bx(DisasContext *s, TCGv var)
739{
d9ba4830 740 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
741 tcg_gen_andi_i32(cpu_R[15], var, ~1);
742 tcg_gen_andi_i32(var, var, 1);
743 store_cpu_field(var, thumb);
d9ba4830
PB
744}
745
21aeb343
JR
746/* Variant of store_reg which uses branch&exchange logic when storing
747 to r15 in ARM architecture v7 and above. The source must be a temporary
748 and will be marked as dead. */
749static inline void store_reg_bx(CPUState *env, DisasContext *s,
750 int reg, TCGv var)
751{
752 if (reg == 15 && ENABLE_ARCH_7) {
753 gen_bx(s, var);
754 } else {
755 store_reg(s, reg, var);
756 }
757}
758
b0109805
PB
759static inline TCGv gen_ld8s(TCGv addr, int index)
760{
761 TCGv tmp = new_tmp();
762 tcg_gen_qemu_ld8s(tmp, addr, index);
763 return tmp;
764}
765static inline TCGv gen_ld8u(TCGv addr, int index)
766{
767 TCGv tmp = new_tmp();
768 tcg_gen_qemu_ld8u(tmp, addr, index);
769 return tmp;
770}
771static inline TCGv gen_ld16s(TCGv addr, int index)
772{
773 TCGv tmp = new_tmp();
774 tcg_gen_qemu_ld16s(tmp, addr, index);
775 return tmp;
776}
777static inline TCGv gen_ld16u(TCGv addr, int index)
778{
779 TCGv tmp = new_tmp();
780 tcg_gen_qemu_ld16u(tmp, addr, index);
781 return tmp;
782}
783static inline TCGv gen_ld32(TCGv addr, int index)
784{
785 TCGv tmp = new_tmp();
786 tcg_gen_qemu_ld32u(tmp, addr, index);
787 return tmp;
788}
84496233
JR
789static inline TCGv_i64 gen_ld64(TCGv addr, int index)
790{
791 TCGv_i64 tmp = tcg_temp_new_i64();
792 tcg_gen_qemu_ld64(tmp, addr, index);
793 return tmp;
794}
b0109805
PB
795static inline void gen_st8(TCGv val, TCGv addr, int index)
796{
797 tcg_gen_qemu_st8(val, addr, index);
798 dead_tmp(val);
799}
800static inline void gen_st16(TCGv val, TCGv addr, int index)
801{
802 tcg_gen_qemu_st16(val, addr, index);
803 dead_tmp(val);
804}
805static inline void gen_st32(TCGv val, TCGv addr, int index)
806{
807 tcg_gen_qemu_st32(val, addr, index);
808 dead_tmp(val);
809}
84496233
JR
810static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
811{
812 tcg_gen_qemu_st64(val, addr, index);
813 tcg_temp_free_i64(val);
814}
b5ff1b31 815
5e3f878a
PB
816static inline void gen_set_pc_im(uint32_t val)
817{
155c3eac 818 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
819}
820
b5ff1b31
FB
821/* Force a TB lookup after an instruction that changes the CPU state. */
822static inline void gen_lookup_tb(DisasContext *s)
823{
a6445c52 824 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
825 s->is_jmp = DISAS_UPDATE;
826}
827
b0109805
PB
828static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
829 TCGv var)
2c0262af 830{
1e8d4eec 831 int val, rm, shift, shiftop;
b26eefb6 832 TCGv offset;
2c0262af
FB
833
834 if (!(insn & (1 << 25))) {
835 /* immediate */
836 val = insn & 0xfff;
837 if (!(insn & (1 << 23)))
838 val = -val;
537730b9 839 if (val != 0)
b0109805 840 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
841 } else {
842 /* shift/register */
843 rm = (insn) & 0xf;
844 shift = (insn >> 7) & 0x1f;
1e8d4eec 845 shiftop = (insn >> 5) & 3;
b26eefb6 846 offset = load_reg(s, rm);
9a119ff6 847 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 848 if (!(insn & (1 << 23)))
b0109805 849 tcg_gen_sub_i32(var, var, offset);
2c0262af 850 else
b0109805 851 tcg_gen_add_i32(var, var, offset);
b26eefb6 852 dead_tmp(offset);
2c0262af
FB
853 }
854}
855
191f9a93 856static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 857 int extra, TCGv var)
2c0262af
FB
858{
859 int val, rm;
b26eefb6 860 TCGv offset;
3b46e624 861
2c0262af
FB
862 if (insn & (1 << 22)) {
863 /* immediate */
864 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
865 if (!(insn & (1 << 23)))
866 val = -val;
18acad92 867 val += extra;
537730b9 868 if (val != 0)
b0109805 869 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
870 } else {
871 /* register */
191f9a93 872 if (extra)
b0109805 873 tcg_gen_addi_i32(var, var, extra);
2c0262af 874 rm = (insn) & 0xf;
b26eefb6 875 offset = load_reg(s, rm);
2c0262af 876 if (!(insn & (1 << 23)))
b0109805 877 tcg_gen_sub_i32(var, var, offset);
2c0262af 878 else
b0109805 879 tcg_gen_add_i32(var, var, offset);
b26eefb6 880 dead_tmp(offset);
2c0262af
FB
881 }
882}
883
4373f3ce
PB
884#define VFP_OP2(name) \
885static inline void gen_vfp_##name(int dp) \
886{ \
887 if (dp) \
888 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
889 else \
890 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
b7bcbe95
FB
891}
892
4373f3ce
PB
893VFP_OP2(add)
894VFP_OP2(sub)
895VFP_OP2(mul)
896VFP_OP2(div)
897
898#undef VFP_OP2
899
900static inline void gen_vfp_abs(int dp)
901{
902 if (dp)
903 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
904 else
905 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
906}
907
908static inline void gen_vfp_neg(int dp)
909{
910 if (dp)
911 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
912 else
913 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
914}
915
916static inline void gen_vfp_sqrt(int dp)
917{
918 if (dp)
919 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
920 else
921 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
922}
923
924static inline void gen_vfp_cmp(int dp)
925{
926 if (dp)
927 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
928 else
929 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
930}
931
932static inline void gen_vfp_cmpe(int dp)
933{
934 if (dp)
935 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
936 else
937 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
938}
939
940static inline void gen_vfp_F1_ld0(int dp)
941{
942 if (dp)
5b340b51 943 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 944 else
5b340b51 945 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
946}
947
948static inline void gen_vfp_uito(int dp)
949{
950 if (dp)
951 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
952 else
953 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
954}
955
956static inline void gen_vfp_sito(int dp)
957{
958 if (dp)
66230e0d 959 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
4373f3ce 960 else
66230e0d 961 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
4373f3ce
PB
962}
963
964static inline void gen_vfp_toui(int dp)
965{
966 if (dp)
967 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
968 else
969 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
970}
971
972static inline void gen_vfp_touiz(int dp)
973{
974 if (dp)
975 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
976 else
977 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
978}
979
980static inline void gen_vfp_tosi(int dp)
981{
982 if (dp)
983 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
984 else
985 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
986}
987
988static inline void gen_vfp_tosiz(int dp)
9ee6e8bb
PB
989{
990 if (dp)
4373f3ce 991 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
9ee6e8bb 992 else
4373f3ce
PB
993 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
994}
995
996#define VFP_GEN_FIX(name) \
997static inline void gen_vfp_##name(int dp, int shift) \
998{ \
b75263d6 999 TCGv tmp_shift = tcg_const_i32(shift); \
4373f3ce 1000 if (dp) \
b75263d6 1001 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
4373f3ce 1002 else \
b75263d6
JR
1003 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
1004 tcg_temp_free_i32(tmp_shift); \
9ee6e8bb 1005}
4373f3ce
PB
1006VFP_GEN_FIX(tosh)
1007VFP_GEN_FIX(tosl)
1008VFP_GEN_FIX(touh)
1009VFP_GEN_FIX(toul)
1010VFP_GEN_FIX(shto)
1011VFP_GEN_FIX(slto)
1012VFP_GEN_FIX(uhto)
1013VFP_GEN_FIX(ulto)
1014#undef VFP_GEN_FIX
9ee6e8bb 1015
312eea9f 1016static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1017{
1018 if (dp)
312eea9f 1019 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1020 else
312eea9f 1021 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1022}
1023
312eea9f 1024static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1025{
1026 if (dp)
312eea9f 1027 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1028 else
312eea9f 1029 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1030}
1031
8e96005d
FB
1032static inline long
1033vfp_reg_offset (int dp, int reg)
1034{
1035 if (dp)
1036 return offsetof(CPUARMState, vfp.regs[reg]);
1037 else if (reg & 1) {
1038 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1039 + offsetof(CPU_DoubleU, l.upper);
1040 } else {
1041 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1042 + offsetof(CPU_DoubleU, l.lower);
1043 }
1044}
9ee6e8bb
PB
1045
1046/* Return the offset of a 32-bit piece of a NEON register.
1047 zero is the least significant end of the register. */
1048static inline long
1049neon_reg_offset (int reg, int n)
1050{
1051 int sreg;
1052 sreg = reg * 2 + n;
1053 return vfp_reg_offset(0, sreg);
1054}
1055
8f8e3aa4
PB
1056static TCGv neon_load_reg(int reg, int pass)
1057{
1058 TCGv tmp = new_tmp();
1059 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1060 return tmp;
1061}
1062
1063static void neon_store_reg(int reg, int pass, TCGv var)
1064{
1065 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1066 dead_tmp(var);
1067}
1068
a7812ae4 1069static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1070{
1071 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1072}
1073
a7812ae4 1074static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1075{
1076 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1077}
1078
4373f3ce
PB
1079#define tcg_gen_ld_f32 tcg_gen_ld_i32
1080#define tcg_gen_ld_f64 tcg_gen_ld_i64
1081#define tcg_gen_st_f32 tcg_gen_st_i32
1082#define tcg_gen_st_f64 tcg_gen_st_i64
1083
b7bcbe95
FB
1084static inline void gen_mov_F0_vreg(int dp, int reg)
1085{
1086 if (dp)
4373f3ce 1087 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1088 else
4373f3ce 1089 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1090}
1091
1092static inline void gen_mov_F1_vreg(int dp, int reg)
1093{
1094 if (dp)
4373f3ce 1095 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1096 else
4373f3ce 1097 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1098}
1099
1100static inline void gen_mov_vreg_F0(int dp, int reg)
1101{
1102 if (dp)
4373f3ce 1103 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1104 else
4373f3ce 1105 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1106}
1107
18c9b560
AZ
1108#define ARM_CP_RW_BIT (1 << 20)
1109
a7812ae4 1110static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d
PB
1111{
1112 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1113}
1114
a7812ae4 1115static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d
PB
1116{
1117 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1118}
1119
da6b5335 1120static inline TCGv iwmmxt_load_creg(int reg)
e677137d 1121{
da6b5335
FN
1122 TCGv var = new_tmp();
1123 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1124 return var;
e677137d
PB
1125}
1126
da6b5335 1127static inline void iwmmxt_store_creg(int reg, TCGv var)
e677137d 1128{
da6b5335 1129 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
d9968827 1130 dead_tmp(var);
e677137d
PB
1131}
1132
1133static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1134{
1135 iwmmxt_store_reg(cpu_M0, rn);
1136}
1137
1138static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1139{
1140 iwmmxt_load_reg(cpu_M0, rn);
1141}
1142
1143static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1144{
1145 iwmmxt_load_reg(cpu_V1, rn);
1146 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1147}
1148
1149static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1150{
1151 iwmmxt_load_reg(cpu_V1, rn);
1152 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1153}
1154
1155static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1156{
1157 iwmmxt_load_reg(cpu_V1, rn);
1158 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1159}
1160
1161#define IWMMXT_OP(name) \
1162static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1163{ \
1164 iwmmxt_load_reg(cpu_V1, rn); \
1165 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1166}
1167
1168#define IWMMXT_OP_ENV(name) \
1169static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1170{ \
1171 iwmmxt_load_reg(cpu_V1, rn); \
1172 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1173}
1174
1175#define IWMMXT_OP_ENV_SIZE(name) \
1176IWMMXT_OP_ENV(name##b) \
1177IWMMXT_OP_ENV(name##w) \
1178IWMMXT_OP_ENV(name##l)
1179
1180#define IWMMXT_OP_ENV1(name) \
1181static inline void gen_op_iwmmxt_##name##_M0(void) \
1182{ \
1183 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1184}
1185
1186IWMMXT_OP(maddsq)
1187IWMMXT_OP(madduq)
1188IWMMXT_OP(sadb)
1189IWMMXT_OP(sadw)
1190IWMMXT_OP(mulslw)
1191IWMMXT_OP(mulshw)
1192IWMMXT_OP(mululw)
1193IWMMXT_OP(muluhw)
1194IWMMXT_OP(macsw)
1195IWMMXT_OP(macuw)
1196
1197IWMMXT_OP_ENV_SIZE(unpackl)
1198IWMMXT_OP_ENV_SIZE(unpackh)
1199
1200IWMMXT_OP_ENV1(unpacklub)
1201IWMMXT_OP_ENV1(unpackluw)
1202IWMMXT_OP_ENV1(unpacklul)
1203IWMMXT_OP_ENV1(unpackhub)
1204IWMMXT_OP_ENV1(unpackhuw)
1205IWMMXT_OP_ENV1(unpackhul)
1206IWMMXT_OP_ENV1(unpacklsb)
1207IWMMXT_OP_ENV1(unpacklsw)
1208IWMMXT_OP_ENV1(unpacklsl)
1209IWMMXT_OP_ENV1(unpackhsb)
1210IWMMXT_OP_ENV1(unpackhsw)
1211IWMMXT_OP_ENV1(unpackhsl)
1212
1213IWMMXT_OP_ENV_SIZE(cmpeq)
1214IWMMXT_OP_ENV_SIZE(cmpgtu)
1215IWMMXT_OP_ENV_SIZE(cmpgts)
1216
1217IWMMXT_OP_ENV_SIZE(mins)
1218IWMMXT_OP_ENV_SIZE(minu)
1219IWMMXT_OP_ENV_SIZE(maxs)
1220IWMMXT_OP_ENV_SIZE(maxu)
1221
1222IWMMXT_OP_ENV_SIZE(subn)
1223IWMMXT_OP_ENV_SIZE(addn)
1224IWMMXT_OP_ENV_SIZE(subu)
1225IWMMXT_OP_ENV_SIZE(addu)
1226IWMMXT_OP_ENV_SIZE(subs)
1227IWMMXT_OP_ENV_SIZE(adds)
1228
1229IWMMXT_OP_ENV(avgb0)
1230IWMMXT_OP_ENV(avgb1)
1231IWMMXT_OP_ENV(avgw0)
1232IWMMXT_OP_ENV(avgw1)
1233
1234IWMMXT_OP(msadb)
1235
1236IWMMXT_OP_ENV(packuw)
1237IWMMXT_OP_ENV(packul)
1238IWMMXT_OP_ENV(packuq)
1239IWMMXT_OP_ENV(packsw)
1240IWMMXT_OP_ENV(packsl)
1241IWMMXT_OP_ENV(packsq)
1242
e677137d
PB
1243static void gen_op_iwmmxt_set_mup(void)
1244{
1245 TCGv tmp;
1246 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1247 tcg_gen_ori_i32(tmp, tmp, 2);
1248 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1249}
1250
1251static void gen_op_iwmmxt_set_cup(void)
1252{
1253 TCGv tmp;
1254 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1255 tcg_gen_ori_i32(tmp, tmp, 1);
1256 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1257}
1258
1259static void gen_op_iwmmxt_setpsr_nz(void)
1260{
1261 TCGv tmp = new_tmp();
1262 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1263 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1264}
1265
1266static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1267{
1268 iwmmxt_load_reg(cpu_V1, rn);
86831435 1269 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1270 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1271}
1272
da6b5335 1273static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
18c9b560
AZ
1274{
1275 int rd;
1276 uint32_t offset;
da6b5335 1277 TCGv tmp;
18c9b560
AZ
1278
1279 rd = (insn >> 16) & 0xf;
da6b5335 1280 tmp = load_reg(s, rd);
18c9b560
AZ
1281
1282 offset = (insn & 0xff) << ((insn >> 7) & 2);
1283 if (insn & (1 << 24)) {
1284 /* Pre indexed */
1285 if (insn & (1 << 23))
da6b5335 1286 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1287 else
da6b5335
FN
1288 tcg_gen_addi_i32(tmp, tmp, -offset);
1289 tcg_gen_mov_i32(dest, tmp);
18c9b560 1290 if (insn & (1 << 21))
da6b5335
FN
1291 store_reg(s, rd, tmp);
1292 else
1293 dead_tmp(tmp);
18c9b560
AZ
1294 } else if (insn & (1 << 21)) {
1295 /* Post indexed */
da6b5335 1296 tcg_gen_mov_i32(dest, tmp);
18c9b560 1297 if (insn & (1 << 23))
da6b5335 1298 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1299 else
da6b5335
FN
1300 tcg_gen_addi_i32(tmp, tmp, -offset);
1301 store_reg(s, rd, tmp);
18c9b560
AZ
1302 } else if (!(insn & (1 << 23)))
1303 return 1;
1304 return 0;
1305}
1306
da6b5335 1307static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
18c9b560
AZ
1308{
1309 int rd = (insn >> 0) & 0xf;
da6b5335 1310 TCGv tmp;
18c9b560 1311
da6b5335
FN
1312 if (insn & (1 << 8)) {
1313 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1314 return 1;
da6b5335
FN
1315 } else {
1316 tmp = iwmmxt_load_creg(rd);
1317 }
1318 } else {
1319 tmp = new_tmp();
1320 iwmmxt_load_reg(cpu_V0, rd);
1321 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1322 }
1323 tcg_gen_andi_i32(tmp, tmp, mask);
1324 tcg_gen_mov_i32(dest, tmp);
1325 dead_tmp(tmp);
18c9b560
AZ
1326 return 0;
1327}
1328
1329/* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1330 (ie. an undefined instruction). */
1331static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1332{
1333 int rd, wrd;
1334 int rdhi, rdlo, rd0, rd1, i;
da6b5335
FN
1335 TCGv addr;
1336 TCGv tmp, tmp2, tmp3;
18c9b560
AZ
1337
1338 if ((insn & 0x0e000e00) == 0x0c000000) {
1339 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1340 wrd = insn & 0xf;
1341 rdlo = (insn >> 12) & 0xf;
1342 rdhi = (insn >> 16) & 0xf;
1343 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1344 iwmmxt_load_reg(cpu_V0, wrd);
1345 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1346 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1347 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1348 } else { /* TMCRR */
da6b5335
FN
1349 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1350 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1351 gen_op_iwmmxt_set_mup();
1352 }
1353 return 0;
1354 }
1355
1356 wrd = (insn >> 12) & 0xf;
da6b5335
FN
1357 addr = new_tmp();
1358 if (gen_iwmmxt_address(s, insn, addr)) {
1359 dead_tmp(addr);
18c9b560 1360 return 1;
da6b5335 1361 }
18c9b560
AZ
1362 if (insn & ARM_CP_RW_BIT) {
1363 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
da6b5335
FN
1364 tmp = new_tmp();
1365 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1366 iwmmxt_store_creg(wrd, tmp);
18c9b560 1367 } else {
e677137d
PB
1368 i = 1;
1369 if (insn & (1 << 8)) {
1370 if (insn & (1 << 22)) { /* WLDRD */
da6b5335 1371 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1372 i = 0;
1373 } else { /* WLDRW wRd */
da6b5335 1374 tmp = gen_ld32(addr, IS_USER(s));
e677137d
PB
1375 }
1376 } else {
1377 if (insn & (1 << 22)) { /* WLDRH */
da6b5335 1378 tmp = gen_ld16u(addr, IS_USER(s));
e677137d 1379 } else { /* WLDRB */
da6b5335 1380 tmp = gen_ld8u(addr, IS_USER(s));
e677137d
PB
1381 }
1382 }
1383 if (i) {
1384 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1385 dead_tmp(tmp);
1386 }
18c9b560
AZ
1387 gen_op_iwmmxt_movq_wRn_M0(wrd);
1388 }
1389 } else {
1390 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335
FN
1391 tmp = iwmmxt_load_creg(wrd);
1392 gen_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1393 } else {
1394 gen_op_iwmmxt_movq_M0_wRn(wrd);
e677137d
PB
1395 tmp = new_tmp();
1396 if (insn & (1 << 8)) {
1397 if (insn & (1 << 22)) { /* WSTRD */
1398 dead_tmp(tmp);
da6b5335 1399 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1400 } else { /* WSTRW wRd */
1401 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1402 gen_st32(tmp, addr, IS_USER(s));
e677137d
PB
1403 }
1404 } else {
1405 if (insn & (1 << 22)) { /* WSTRH */
1406 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1407 gen_st16(tmp, addr, IS_USER(s));
e677137d
PB
1408 } else { /* WSTRB */
1409 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1410 gen_st8(tmp, addr, IS_USER(s));
e677137d
PB
1411 }
1412 }
18c9b560
AZ
1413 }
1414 }
d9968827 1415 dead_tmp(addr);
18c9b560
AZ
1416 return 0;
1417 }
1418
1419 if ((insn & 0x0f000000) != 0x0e000000)
1420 return 1;
1421
1422 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1423 case 0x000: /* WOR */
1424 wrd = (insn >> 12) & 0xf;
1425 rd0 = (insn >> 0) & 0xf;
1426 rd1 = (insn >> 16) & 0xf;
1427 gen_op_iwmmxt_movq_M0_wRn(rd0);
1428 gen_op_iwmmxt_orq_M0_wRn(rd1);
1429 gen_op_iwmmxt_setpsr_nz();
1430 gen_op_iwmmxt_movq_wRn_M0(wrd);
1431 gen_op_iwmmxt_set_mup();
1432 gen_op_iwmmxt_set_cup();
1433 break;
1434 case 0x011: /* TMCR */
1435 if (insn & 0xf)
1436 return 1;
1437 rd = (insn >> 12) & 0xf;
1438 wrd = (insn >> 16) & 0xf;
1439 switch (wrd) {
1440 case ARM_IWMMXT_wCID:
1441 case ARM_IWMMXT_wCASF:
1442 break;
1443 case ARM_IWMMXT_wCon:
1444 gen_op_iwmmxt_set_cup();
1445 /* Fall through. */
1446 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1447 tmp = iwmmxt_load_creg(wrd);
1448 tmp2 = load_reg(s, rd);
f669df27 1449 tcg_gen_andc_i32(tmp, tmp, tmp2);
da6b5335
FN
1450 dead_tmp(tmp2);
1451 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1452 break;
1453 case ARM_IWMMXT_wCGR0:
1454 case ARM_IWMMXT_wCGR1:
1455 case ARM_IWMMXT_wCGR2:
1456 case ARM_IWMMXT_wCGR3:
1457 gen_op_iwmmxt_set_cup();
da6b5335
FN
1458 tmp = load_reg(s, rd);
1459 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1460 break;
1461 default:
1462 return 1;
1463 }
1464 break;
1465 case 0x100: /* WXOR */
1466 wrd = (insn >> 12) & 0xf;
1467 rd0 = (insn >> 0) & 0xf;
1468 rd1 = (insn >> 16) & 0xf;
1469 gen_op_iwmmxt_movq_M0_wRn(rd0);
1470 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1471 gen_op_iwmmxt_setpsr_nz();
1472 gen_op_iwmmxt_movq_wRn_M0(wrd);
1473 gen_op_iwmmxt_set_mup();
1474 gen_op_iwmmxt_set_cup();
1475 break;
1476 case 0x111: /* TMRC */
1477 if (insn & 0xf)
1478 return 1;
1479 rd = (insn >> 12) & 0xf;
1480 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1481 tmp = iwmmxt_load_creg(wrd);
1482 store_reg(s, rd, tmp);
18c9b560
AZ
1483 break;
1484 case 0x300: /* WANDN */
1485 wrd = (insn >> 12) & 0xf;
1486 rd0 = (insn >> 0) & 0xf;
1487 rd1 = (insn >> 16) & 0xf;
1488 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1489 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1490 gen_op_iwmmxt_andq_M0_wRn(rd1);
1491 gen_op_iwmmxt_setpsr_nz();
1492 gen_op_iwmmxt_movq_wRn_M0(wrd);
1493 gen_op_iwmmxt_set_mup();
1494 gen_op_iwmmxt_set_cup();
1495 break;
1496 case 0x200: /* WAND */
1497 wrd = (insn >> 12) & 0xf;
1498 rd0 = (insn >> 0) & 0xf;
1499 rd1 = (insn >> 16) & 0xf;
1500 gen_op_iwmmxt_movq_M0_wRn(rd0);
1501 gen_op_iwmmxt_andq_M0_wRn(rd1);
1502 gen_op_iwmmxt_setpsr_nz();
1503 gen_op_iwmmxt_movq_wRn_M0(wrd);
1504 gen_op_iwmmxt_set_mup();
1505 gen_op_iwmmxt_set_cup();
1506 break;
1507 case 0x810: case 0xa10: /* WMADD */
1508 wrd = (insn >> 12) & 0xf;
1509 rd0 = (insn >> 0) & 0xf;
1510 rd1 = (insn >> 16) & 0xf;
1511 gen_op_iwmmxt_movq_M0_wRn(rd0);
1512 if (insn & (1 << 21))
1513 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1514 else
1515 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1516 gen_op_iwmmxt_movq_wRn_M0(wrd);
1517 gen_op_iwmmxt_set_mup();
1518 break;
1519 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1520 wrd = (insn >> 12) & 0xf;
1521 rd0 = (insn >> 16) & 0xf;
1522 rd1 = (insn >> 0) & 0xf;
1523 gen_op_iwmmxt_movq_M0_wRn(rd0);
1524 switch ((insn >> 22) & 3) {
1525 case 0:
1526 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1527 break;
1528 case 1:
1529 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1530 break;
1531 case 2:
1532 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1533 break;
1534 case 3:
1535 return 1;
1536 }
1537 gen_op_iwmmxt_movq_wRn_M0(wrd);
1538 gen_op_iwmmxt_set_mup();
1539 gen_op_iwmmxt_set_cup();
1540 break;
1541 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1542 wrd = (insn >> 12) & 0xf;
1543 rd0 = (insn >> 16) & 0xf;
1544 rd1 = (insn >> 0) & 0xf;
1545 gen_op_iwmmxt_movq_M0_wRn(rd0);
1546 switch ((insn >> 22) & 3) {
1547 case 0:
1548 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1549 break;
1550 case 1:
1551 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1552 break;
1553 case 2:
1554 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1555 break;
1556 case 3:
1557 return 1;
1558 }
1559 gen_op_iwmmxt_movq_wRn_M0(wrd);
1560 gen_op_iwmmxt_set_mup();
1561 gen_op_iwmmxt_set_cup();
1562 break;
1563 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1564 wrd = (insn >> 12) & 0xf;
1565 rd0 = (insn >> 16) & 0xf;
1566 rd1 = (insn >> 0) & 0xf;
1567 gen_op_iwmmxt_movq_M0_wRn(rd0);
1568 if (insn & (1 << 22))
1569 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1570 else
1571 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1572 if (!(insn & (1 << 20)))
1573 gen_op_iwmmxt_addl_M0_wRn(wrd);
1574 gen_op_iwmmxt_movq_wRn_M0(wrd);
1575 gen_op_iwmmxt_set_mup();
1576 break;
1577 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1578 wrd = (insn >> 12) & 0xf;
1579 rd0 = (insn >> 16) & 0xf;
1580 rd1 = (insn >> 0) & 0xf;
1581 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1582 if (insn & (1 << 21)) {
1583 if (insn & (1 << 20))
1584 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1585 else
1586 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1587 } else {
1588 if (insn & (1 << 20))
1589 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1590 else
1591 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1592 }
18c9b560
AZ
1593 gen_op_iwmmxt_movq_wRn_M0(wrd);
1594 gen_op_iwmmxt_set_mup();
1595 break;
1596 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1597 wrd = (insn >> 12) & 0xf;
1598 rd0 = (insn >> 16) & 0xf;
1599 rd1 = (insn >> 0) & 0xf;
1600 gen_op_iwmmxt_movq_M0_wRn(rd0);
1601 if (insn & (1 << 21))
1602 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1603 else
1604 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1605 if (!(insn & (1 << 20))) {
e677137d
PB
1606 iwmmxt_load_reg(cpu_V1, wrd);
1607 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1608 }
1609 gen_op_iwmmxt_movq_wRn_M0(wrd);
1610 gen_op_iwmmxt_set_mup();
1611 break;
1612 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1613 wrd = (insn >> 12) & 0xf;
1614 rd0 = (insn >> 16) & 0xf;
1615 rd1 = (insn >> 0) & 0xf;
1616 gen_op_iwmmxt_movq_M0_wRn(rd0);
1617 switch ((insn >> 22) & 3) {
1618 case 0:
1619 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1620 break;
1621 case 1:
1622 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1623 break;
1624 case 2:
1625 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1626 break;
1627 case 3:
1628 return 1;
1629 }
1630 gen_op_iwmmxt_movq_wRn_M0(wrd);
1631 gen_op_iwmmxt_set_mup();
1632 gen_op_iwmmxt_set_cup();
1633 break;
1634 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1635 wrd = (insn >> 12) & 0xf;
1636 rd0 = (insn >> 16) & 0xf;
1637 rd1 = (insn >> 0) & 0xf;
1638 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1639 if (insn & (1 << 22)) {
1640 if (insn & (1 << 20))
1641 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1642 else
1643 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1644 } else {
1645 if (insn & (1 << 20))
1646 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1647 else
1648 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1649 }
18c9b560
AZ
1650 gen_op_iwmmxt_movq_wRn_M0(wrd);
1651 gen_op_iwmmxt_set_mup();
1652 gen_op_iwmmxt_set_cup();
1653 break;
1654 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1655 wrd = (insn >> 12) & 0xf;
1656 rd0 = (insn >> 16) & 0xf;
1657 rd1 = (insn >> 0) & 0xf;
1658 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1659 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1660 tcg_gen_andi_i32(tmp, tmp, 7);
1661 iwmmxt_load_reg(cpu_V1, rd1);
1662 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1663 dead_tmp(tmp);
18c9b560
AZ
1664 gen_op_iwmmxt_movq_wRn_M0(wrd);
1665 gen_op_iwmmxt_set_mup();
1666 break;
1667 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1668 if (((insn >> 6) & 3) == 3)
1669 return 1;
18c9b560
AZ
1670 rd = (insn >> 12) & 0xf;
1671 wrd = (insn >> 16) & 0xf;
da6b5335 1672 tmp = load_reg(s, rd);
18c9b560
AZ
1673 gen_op_iwmmxt_movq_M0_wRn(wrd);
1674 switch ((insn >> 6) & 3) {
1675 case 0:
da6b5335
FN
1676 tmp2 = tcg_const_i32(0xff);
1677 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1678 break;
1679 case 1:
da6b5335
FN
1680 tmp2 = tcg_const_i32(0xffff);
1681 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1682 break;
1683 case 2:
da6b5335
FN
1684 tmp2 = tcg_const_i32(0xffffffff);
1685 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1686 break;
da6b5335
FN
1687 default:
1688 TCGV_UNUSED(tmp2);
1689 TCGV_UNUSED(tmp3);
18c9b560 1690 }
da6b5335
FN
1691 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1692 tcg_temp_free(tmp3);
1693 tcg_temp_free(tmp2);
1694 dead_tmp(tmp);
18c9b560
AZ
1695 gen_op_iwmmxt_movq_wRn_M0(wrd);
1696 gen_op_iwmmxt_set_mup();
1697 break;
1698 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1699 rd = (insn >> 12) & 0xf;
1700 wrd = (insn >> 16) & 0xf;
da6b5335 1701 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1702 return 1;
1703 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335 1704 tmp = new_tmp();
18c9b560
AZ
1705 switch ((insn >> 22) & 3) {
1706 case 0:
da6b5335
FN
1707 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1708 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1709 if (insn & 8) {
1710 tcg_gen_ext8s_i32(tmp, tmp);
1711 } else {
1712 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1713 }
1714 break;
1715 case 1:
da6b5335
FN
1716 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1717 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1718 if (insn & 8) {
1719 tcg_gen_ext16s_i32(tmp, tmp);
1720 } else {
1721 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1722 }
1723 break;
1724 case 2:
da6b5335
FN
1725 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1726 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1727 break;
18c9b560 1728 }
da6b5335 1729 store_reg(s, rd, tmp);
18c9b560
AZ
1730 break;
1731 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1732 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1733 return 1;
da6b5335 1734 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1735 switch ((insn >> 22) & 3) {
1736 case 0:
da6b5335 1737 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1738 break;
1739 case 1:
da6b5335 1740 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1741 break;
1742 case 2:
da6b5335 1743 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1744 break;
18c9b560 1745 }
da6b5335
FN
1746 tcg_gen_shli_i32(tmp, tmp, 28);
1747 gen_set_nzcv(tmp);
1748 dead_tmp(tmp);
18c9b560
AZ
1749 break;
1750 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1751 if (((insn >> 6) & 3) == 3)
1752 return 1;
18c9b560
AZ
1753 rd = (insn >> 12) & 0xf;
1754 wrd = (insn >> 16) & 0xf;
da6b5335 1755 tmp = load_reg(s, rd);
18c9b560
AZ
1756 switch ((insn >> 6) & 3) {
1757 case 0:
da6b5335 1758 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1759 break;
1760 case 1:
da6b5335 1761 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1762 break;
1763 case 2:
da6b5335 1764 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1765 break;
18c9b560 1766 }
da6b5335 1767 dead_tmp(tmp);
18c9b560
AZ
1768 gen_op_iwmmxt_movq_wRn_M0(wrd);
1769 gen_op_iwmmxt_set_mup();
1770 break;
1771 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1772 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1773 return 1;
da6b5335
FN
1774 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1775 tmp2 = new_tmp();
1776 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1777 switch ((insn >> 22) & 3) {
1778 case 0:
1779 for (i = 0; i < 7; i ++) {
da6b5335
FN
1780 tcg_gen_shli_i32(tmp2, tmp2, 4);
1781 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1782 }
1783 break;
1784 case 1:
1785 for (i = 0; i < 3; i ++) {
da6b5335
FN
1786 tcg_gen_shli_i32(tmp2, tmp2, 8);
1787 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1788 }
1789 break;
1790 case 2:
da6b5335
FN
1791 tcg_gen_shli_i32(tmp2, tmp2, 16);
1792 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1793 break;
18c9b560 1794 }
da6b5335
FN
1795 gen_set_nzcv(tmp);
1796 dead_tmp(tmp2);
1797 dead_tmp(tmp);
18c9b560
AZ
1798 break;
1799 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1800 wrd = (insn >> 12) & 0xf;
1801 rd0 = (insn >> 16) & 0xf;
1802 gen_op_iwmmxt_movq_M0_wRn(rd0);
1803 switch ((insn >> 22) & 3) {
1804 case 0:
e677137d 1805 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1806 break;
1807 case 1:
e677137d 1808 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1809 break;
1810 case 2:
e677137d 1811 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1812 break;
1813 case 3:
1814 return 1;
1815 }
1816 gen_op_iwmmxt_movq_wRn_M0(wrd);
1817 gen_op_iwmmxt_set_mup();
1818 break;
1819 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1820 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1821 return 1;
da6b5335
FN
1822 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1823 tmp2 = new_tmp();
1824 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1825 switch ((insn >> 22) & 3) {
1826 case 0:
1827 for (i = 0; i < 7; i ++) {
da6b5335
FN
1828 tcg_gen_shli_i32(tmp2, tmp2, 4);
1829 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1830 }
1831 break;
1832 case 1:
1833 for (i = 0; i < 3; i ++) {
da6b5335
FN
1834 tcg_gen_shli_i32(tmp2, tmp2, 8);
1835 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1836 }
1837 break;
1838 case 2:
da6b5335
FN
1839 tcg_gen_shli_i32(tmp2, tmp2, 16);
1840 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1841 break;
18c9b560 1842 }
da6b5335
FN
1843 gen_set_nzcv(tmp);
1844 dead_tmp(tmp2);
1845 dead_tmp(tmp);
18c9b560
AZ
1846 break;
1847 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1848 rd = (insn >> 12) & 0xf;
1849 rd0 = (insn >> 16) & 0xf;
da6b5335 1850 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1851 return 1;
1852 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 1853 tmp = new_tmp();
18c9b560
AZ
1854 switch ((insn >> 22) & 3) {
1855 case 0:
da6b5335 1856 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1857 break;
1858 case 1:
da6b5335 1859 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1860 break;
1861 case 2:
da6b5335 1862 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1863 break;
18c9b560 1864 }
da6b5335 1865 store_reg(s, rd, tmp);
18c9b560
AZ
1866 break;
1867 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1868 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1869 wrd = (insn >> 12) & 0xf;
1870 rd0 = (insn >> 16) & 0xf;
1871 rd1 = (insn >> 0) & 0xf;
1872 gen_op_iwmmxt_movq_M0_wRn(rd0);
1873 switch ((insn >> 22) & 3) {
1874 case 0:
1875 if (insn & (1 << 21))
1876 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1877 else
1878 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1879 break;
1880 case 1:
1881 if (insn & (1 << 21))
1882 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1883 else
1884 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1885 break;
1886 case 2:
1887 if (insn & (1 << 21))
1888 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1889 else
1890 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1891 break;
1892 case 3:
1893 return 1;
1894 }
1895 gen_op_iwmmxt_movq_wRn_M0(wrd);
1896 gen_op_iwmmxt_set_mup();
1897 gen_op_iwmmxt_set_cup();
1898 break;
1899 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1900 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1901 wrd = (insn >> 12) & 0xf;
1902 rd0 = (insn >> 16) & 0xf;
1903 gen_op_iwmmxt_movq_M0_wRn(rd0);
1904 switch ((insn >> 22) & 3) {
1905 case 0:
1906 if (insn & (1 << 21))
1907 gen_op_iwmmxt_unpacklsb_M0();
1908 else
1909 gen_op_iwmmxt_unpacklub_M0();
1910 break;
1911 case 1:
1912 if (insn & (1 << 21))
1913 gen_op_iwmmxt_unpacklsw_M0();
1914 else
1915 gen_op_iwmmxt_unpackluw_M0();
1916 break;
1917 case 2:
1918 if (insn & (1 << 21))
1919 gen_op_iwmmxt_unpacklsl_M0();
1920 else
1921 gen_op_iwmmxt_unpacklul_M0();
1922 break;
1923 case 3:
1924 return 1;
1925 }
1926 gen_op_iwmmxt_movq_wRn_M0(wrd);
1927 gen_op_iwmmxt_set_mup();
1928 gen_op_iwmmxt_set_cup();
1929 break;
1930 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1931 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1932 wrd = (insn >> 12) & 0xf;
1933 rd0 = (insn >> 16) & 0xf;
1934 gen_op_iwmmxt_movq_M0_wRn(rd0);
1935 switch ((insn >> 22) & 3) {
1936 case 0:
1937 if (insn & (1 << 21))
1938 gen_op_iwmmxt_unpackhsb_M0();
1939 else
1940 gen_op_iwmmxt_unpackhub_M0();
1941 break;
1942 case 1:
1943 if (insn & (1 << 21))
1944 gen_op_iwmmxt_unpackhsw_M0();
1945 else
1946 gen_op_iwmmxt_unpackhuw_M0();
1947 break;
1948 case 2:
1949 if (insn & (1 << 21))
1950 gen_op_iwmmxt_unpackhsl_M0();
1951 else
1952 gen_op_iwmmxt_unpackhul_M0();
1953 break;
1954 case 3:
1955 return 1;
1956 }
1957 gen_op_iwmmxt_movq_wRn_M0(wrd);
1958 gen_op_iwmmxt_set_mup();
1959 gen_op_iwmmxt_set_cup();
1960 break;
1961 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1962 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
1963 if (((insn >> 22) & 3) == 0)
1964 return 1;
18c9b560
AZ
1965 wrd = (insn >> 12) & 0xf;
1966 rd0 = (insn >> 16) & 0xf;
1967 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1968 tmp = new_tmp();
1969 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
1970 dead_tmp(tmp);
18c9b560 1971 return 1;
da6b5335 1972 }
18c9b560 1973 switch ((insn >> 22) & 3) {
18c9b560 1974 case 1:
da6b5335 1975 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1976 break;
1977 case 2:
da6b5335 1978 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1979 break;
1980 case 3:
da6b5335 1981 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1982 break;
1983 }
da6b5335 1984 dead_tmp(tmp);
18c9b560
AZ
1985 gen_op_iwmmxt_movq_wRn_M0(wrd);
1986 gen_op_iwmmxt_set_mup();
1987 gen_op_iwmmxt_set_cup();
1988 break;
1989 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1990 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
1991 if (((insn >> 22) & 3) == 0)
1992 return 1;
18c9b560
AZ
1993 wrd = (insn >> 12) & 0xf;
1994 rd0 = (insn >> 16) & 0xf;
1995 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1996 tmp = new_tmp();
1997 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
1998 dead_tmp(tmp);
18c9b560 1999 return 1;
da6b5335 2000 }
18c9b560 2001 switch ((insn >> 22) & 3) {
18c9b560 2002 case 1:
da6b5335 2003 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2004 break;
2005 case 2:
da6b5335 2006 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2007 break;
2008 case 3:
da6b5335 2009 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2010 break;
2011 }
da6b5335 2012 dead_tmp(tmp);
18c9b560
AZ
2013 gen_op_iwmmxt_movq_wRn_M0(wrd);
2014 gen_op_iwmmxt_set_mup();
2015 gen_op_iwmmxt_set_cup();
2016 break;
2017 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2018 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2019 if (((insn >> 22) & 3) == 0)
2020 return 1;
18c9b560
AZ
2021 wrd = (insn >> 12) & 0xf;
2022 rd0 = (insn >> 16) & 0xf;
2023 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2024 tmp = new_tmp();
2025 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2026 dead_tmp(tmp);
18c9b560 2027 return 1;
da6b5335 2028 }
18c9b560 2029 switch ((insn >> 22) & 3) {
18c9b560 2030 case 1:
da6b5335 2031 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2032 break;
2033 case 2:
da6b5335 2034 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2035 break;
2036 case 3:
da6b5335 2037 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2038 break;
2039 }
da6b5335 2040 dead_tmp(tmp);
18c9b560
AZ
2041 gen_op_iwmmxt_movq_wRn_M0(wrd);
2042 gen_op_iwmmxt_set_mup();
2043 gen_op_iwmmxt_set_cup();
2044 break;
2045 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2046 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2047 if (((insn >> 22) & 3) == 0)
2048 return 1;
18c9b560
AZ
2049 wrd = (insn >> 12) & 0xf;
2050 rd0 = (insn >> 16) & 0xf;
2051 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2052 tmp = new_tmp();
18c9b560 2053 switch ((insn >> 22) & 3) {
18c9b560 2054 case 1:
da6b5335
FN
2055 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2056 dead_tmp(tmp);
18c9b560 2057 return 1;
da6b5335
FN
2058 }
2059 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2060 break;
2061 case 2:
da6b5335
FN
2062 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2063 dead_tmp(tmp);
18c9b560 2064 return 1;
da6b5335
FN
2065 }
2066 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2067 break;
2068 case 3:
da6b5335
FN
2069 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2070 dead_tmp(tmp);
18c9b560 2071 return 1;
da6b5335
FN
2072 }
2073 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2074 break;
2075 }
da6b5335 2076 dead_tmp(tmp);
18c9b560
AZ
2077 gen_op_iwmmxt_movq_wRn_M0(wrd);
2078 gen_op_iwmmxt_set_mup();
2079 gen_op_iwmmxt_set_cup();
2080 break;
2081 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2082 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2083 wrd = (insn >> 12) & 0xf;
2084 rd0 = (insn >> 16) & 0xf;
2085 rd1 = (insn >> 0) & 0xf;
2086 gen_op_iwmmxt_movq_M0_wRn(rd0);
2087 switch ((insn >> 22) & 3) {
2088 case 0:
2089 if (insn & (1 << 21))
2090 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2091 else
2092 gen_op_iwmmxt_minub_M0_wRn(rd1);
2093 break;
2094 case 1:
2095 if (insn & (1 << 21))
2096 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2097 else
2098 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2099 break;
2100 case 2:
2101 if (insn & (1 << 21))
2102 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2103 else
2104 gen_op_iwmmxt_minul_M0_wRn(rd1);
2105 break;
2106 case 3:
2107 return 1;
2108 }
2109 gen_op_iwmmxt_movq_wRn_M0(wrd);
2110 gen_op_iwmmxt_set_mup();
2111 break;
2112 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2113 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2114 wrd = (insn >> 12) & 0xf;
2115 rd0 = (insn >> 16) & 0xf;
2116 rd1 = (insn >> 0) & 0xf;
2117 gen_op_iwmmxt_movq_M0_wRn(rd0);
2118 switch ((insn >> 22) & 3) {
2119 case 0:
2120 if (insn & (1 << 21))
2121 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2122 else
2123 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2124 break;
2125 case 1:
2126 if (insn & (1 << 21))
2127 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2128 else
2129 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2130 break;
2131 case 2:
2132 if (insn & (1 << 21))
2133 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2134 else
2135 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2136 break;
2137 case 3:
2138 return 1;
2139 }
2140 gen_op_iwmmxt_movq_wRn_M0(wrd);
2141 gen_op_iwmmxt_set_mup();
2142 break;
2143 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2144 case 0x402: case 0x502: case 0x602: case 0x702:
2145 wrd = (insn >> 12) & 0xf;
2146 rd0 = (insn >> 16) & 0xf;
2147 rd1 = (insn >> 0) & 0xf;
2148 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2149 tmp = tcg_const_i32((insn >> 20) & 3);
2150 iwmmxt_load_reg(cpu_V1, rd1);
2151 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2152 tcg_temp_free(tmp);
18c9b560
AZ
2153 gen_op_iwmmxt_movq_wRn_M0(wrd);
2154 gen_op_iwmmxt_set_mup();
2155 break;
2156 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2157 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2158 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2159 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2160 wrd = (insn >> 12) & 0xf;
2161 rd0 = (insn >> 16) & 0xf;
2162 rd1 = (insn >> 0) & 0xf;
2163 gen_op_iwmmxt_movq_M0_wRn(rd0);
2164 switch ((insn >> 20) & 0xf) {
2165 case 0x0:
2166 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2167 break;
2168 case 0x1:
2169 gen_op_iwmmxt_subub_M0_wRn(rd1);
2170 break;
2171 case 0x3:
2172 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2173 break;
2174 case 0x4:
2175 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2176 break;
2177 case 0x5:
2178 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2179 break;
2180 case 0x7:
2181 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2182 break;
2183 case 0x8:
2184 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2185 break;
2186 case 0x9:
2187 gen_op_iwmmxt_subul_M0_wRn(rd1);
2188 break;
2189 case 0xb:
2190 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2191 break;
2192 default:
2193 return 1;
2194 }
2195 gen_op_iwmmxt_movq_wRn_M0(wrd);
2196 gen_op_iwmmxt_set_mup();
2197 gen_op_iwmmxt_set_cup();
2198 break;
2199 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2200 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2201 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2202 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2203 wrd = (insn >> 12) & 0xf;
2204 rd0 = (insn >> 16) & 0xf;
2205 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2206 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2207 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2208 tcg_temp_free(tmp);
18c9b560
AZ
2209 gen_op_iwmmxt_movq_wRn_M0(wrd);
2210 gen_op_iwmmxt_set_mup();
2211 gen_op_iwmmxt_set_cup();
2212 break;
2213 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2214 case 0x418: case 0x518: case 0x618: case 0x718:
2215 case 0x818: case 0x918: case 0xa18: case 0xb18:
2216 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2217 wrd = (insn >> 12) & 0xf;
2218 rd0 = (insn >> 16) & 0xf;
2219 rd1 = (insn >> 0) & 0xf;
2220 gen_op_iwmmxt_movq_M0_wRn(rd0);
2221 switch ((insn >> 20) & 0xf) {
2222 case 0x0:
2223 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2224 break;
2225 case 0x1:
2226 gen_op_iwmmxt_addub_M0_wRn(rd1);
2227 break;
2228 case 0x3:
2229 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2230 break;
2231 case 0x4:
2232 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2233 break;
2234 case 0x5:
2235 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2236 break;
2237 case 0x7:
2238 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2239 break;
2240 case 0x8:
2241 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2242 break;
2243 case 0x9:
2244 gen_op_iwmmxt_addul_M0_wRn(rd1);
2245 break;
2246 case 0xb:
2247 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2248 break;
2249 default:
2250 return 1;
2251 }
2252 gen_op_iwmmxt_movq_wRn_M0(wrd);
2253 gen_op_iwmmxt_set_mup();
2254 gen_op_iwmmxt_set_cup();
2255 break;
2256 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2257 case 0x408: case 0x508: case 0x608: case 0x708:
2258 case 0x808: case 0x908: case 0xa08: case 0xb08:
2259 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2260 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2261 return 1;
18c9b560
AZ
2262 wrd = (insn >> 12) & 0xf;
2263 rd0 = (insn >> 16) & 0xf;
2264 rd1 = (insn >> 0) & 0xf;
2265 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2266 switch ((insn >> 22) & 3) {
18c9b560
AZ
2267 case 1:
2268 if (insn & (1 << 21))
2269 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2270 else
2271 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2272 break;
2273 case 2:
2274 if (insn & (1 << 21))
2275 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2276 else
2277 gen_op_iwmmxt_packul_M0_wRn(rd1);
2278 break;
2279 case 3:
2280 if (insn & (1 << 21))
2281 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2282 else
2283 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2284 break;
2285 }
2286 gen_op_iwmmxt_movq_wRn_M0(wrd);
2287 gen_op_iwmmxt_set_mup();
2288 gen_op_iwmmxt_set_cup();
2289 break;
2290 case 0x201: case 0x203: case 0x205: case 0x207:
2291 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2292 case 0x211: case 0x213: case 0x215: case 0x217:
2293 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2294 wrd = (insn >> 5) & 0xf;
2295 rd0 = (insn >> 12) & 0xf;
2296 rd1 = (insn >> 0) & 0xf;
2297 if (rd0 == 0xf || rd1 == 0xf)
2298 return 1;
2299 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2300 tmp = load_reg(s, rd0);
2301 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2302 switch ((insn >> 16) & 0xf) {
2303 case 0x0: /* TMIA */
da6b5335 2304 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2305 break;
2306 case 0x8: /* TMIAPH */
da6b5335 2307 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2308 break;
2309 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2310 if (insn & (1 << 16))
da6b5335 2311 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2312 if (insn & (1 << 17))
da6b5335
FN
2313 tcg_gen_shri_i32(tmp2, tmp2, 16);
2314 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2315 break;
2316 default:
da6b5335
FN
2317 dead_tmp(tmp2);
2318 dead_tmp(tmp);
18c9b560
AZ
2319 return 1;
2320 }
da6b5335
FN
2321 dead_tmp(tmp2);
2322 dead_tmp(tmp);
18c9b560
AZ
2323 gen_op_iwmmxt_movq_wRn_M0(wrd);
2324 gen_op_iwmmxt_set_mup();
2325 break;
2326 default:
2327 return 1;
2328 }
2329
2330 return 0;
2331}
2332
2333/* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2334 (ie. an undefined instruction). */
2335static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2336{
2337 int acc, rd0, rd1, rdhi, rdlo;
3a554c0f 2338 TCGv tmp, tmp2;
18c9b560
AZ
2339
2340 if ((insn & 0x0ff00f10) == 0x0e200010) {
2341 /* Multiply with Internal Accumulate Format */
2342 rd0 = (insn >> 12) & 0xf;
2343 rd1 = insn & 0xf;
2344 acc = (insn >> 5) & 7;
2345
2346 if (acc != 0)
2347 return 1;
2348
3a554c0f
FN
2349 tmp = load_reg(s, rd0);
2350 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2351 switch ((insn >> 16) & 0xf) {
2352 case 0x0: /* MIA */
3a554c0f 2353 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2354 break;
2355 case 0x8: /* MIAPH */
3a554c0f 2356 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2357 break;
2358 case 0xc: /* MIABB */
2359 case 0xd: /* MIABT */
2360 case 0xe: /* MIATB */
2361 case 0xf: /* MIATT */
18c9b560 2362 if (insn & (1 << 16))
3a554c0f 2363 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2364 if (insn & (1 << 17))
3a554c0f
FN
2365 tcg_gen_shri_i32(tmp2, tmp2, 16);
2366 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2367 break;
2368 default:
2369 return 1;
2370 }
3a554c0f
FN
2371 dead_tmp(tmp2);
2372 dead_tmp(tmp);
18c9b560
AZ
2373
2374 gen_op_iwmmxt_movq_wRn_M0(acc);
2375 return 0;
2376 }
2377
2378 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2379 /* Internal Accumulator Access Format */
2380 rdhi = (insn >> 16) & 0xf;
2381 rdlo = (insn >> 12) & 0xf;
2382 acc = insn & 7;
2383
2384 if (acc != 0)
2385 return 1;
2386
2387 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2388 iwmmxt_load_reg(cpu_V0, acc);
2389 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2390 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2391 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2392 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2393 } else { /* MAR */
3a554c0f
FN
2394 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2395 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2396 }
2397 return 0;
2398 }
2399
2400 return 1;
2401}
2402
c1713132
AZ
2403/* Disassemble system coprocessor instruction. Return nonzero if
2404 instruction is not defined. */
2405static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2406{
b75263d6 2407 TCGv tmp, tmp2;
c1713132
AZ
2408 uint32_t rd = (insn >> 12) & 0xf;
2409 uint32_t cp = (insn >> 8) & 0xf;
2410 if (IS_USER(s)) {
2411 return 1;
2412 }
2413
18c9b560 2414 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2415 if (!env->cp[cp].cp_read)
2416 return 1;
8984bd2e
PB
2417 gen_set_pc_im(s->pc);
2418 tmp = new_tmp();
b75263d6
JR
2419 tmp2 = tcg_const_i32(insn);
2420 gen_helper_get_cp(tmp, cpu_env, tmp2);
2421 tcg_temp_free(tmp2);
8984bd2e 2422 store_reg(s, rd, tmp);
c1713132
AZ
2423 } else {
2424 if (!env->cp[cp].cp_write)
2425 return 1;
8984bd2e
PB
2426 gen_set_pc_im(s->pc);
2427 tmp = load_reg(s, rd);
b75263d6
JR
2428 tmp2 = tcg_const_i32(insn);
2429 gen_helper_set_cp(cpu_env, tmp2, tmp);
2430 tcg_temp_free(tmp2);
a60de947 2431 dead_tmp(tmp);
c1713132
AZ
2432 }
2433 return 0;
2434}
2435
9ee6e8bb
PB
2436static int cp15_user_ok(uint32_t insn)
2437{
2438 int cpn = (insn >> 16) & 0xf;
2439 int cpm = insn & 0xf;
2440 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2441
2442 if (cpn == 13 && cpm == 0) {
2443 /* TLS register. */
2444 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2445 return 1;
2446 }
2447 if (cpn == 7) {
2448 /* ISB, DSB, DMB. */
2449 if ((cpm == 5 && op == 4)
2450 || (cpm == 10 && (op == 4 || op == 5)))
2451 return 1;
2452 }
2453 return 0;
2454}
2455
3f26c122
RV
2456static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2457{
2458 TCGv tmp;
2459 int cpn = (insn >> 16) & 0xf;
2460 int cpm = insn & 0xf;
2461 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2462
2463 if (!arm_feature(env, ARM_FEATURE_V6K))
2464 return 0;
2465
2466 if (!(cpn == 13 && cpm == 0))
2467 return 0;
2468
2469 if (insn & ARM_CP_RW_BIT) {
3f26c122
RV
2470 switch (op) {
2471 case 2:
c5883be2 2472 tmp = load_cpu_field(cp15.c13_tls1);
3f26c122
RV
2473 break;
2474 case 3:
c5883be2 2475 tmp = load_cpu_field(cp15.c13_tls2);
3f26c122
RV
2476 break;
2477 case 4:
c5883be2 2478 tmp = load_cpu_field(cp15.c13_tls3);
3f26c122
RV
2479 break;
2480 default:
3f26c122
RV
2481 return 0;
2482 }
2483 store_reg(s, rd, tmp);
2484
2485 } else {
2486 tmp = load_reg(s, rd);
2487 switch (op) {
2488 case 2:
c5883be2 2489 store_cpu_field(tmp, cp15.c13_tls1);
3f26c122
RV
2490 break;
2491 case 3:
c5883be2 2492 store_cpu_field(tmp, cp15.c13_tls2);
3f26c122
RV
2493 break;
2494 case 4:
c5883be2 2495 store_cpu_field(tmp, cp15.c13_tls3);
3f26c122
RV
2496 break;
2497 default:
c5883be2 2498 dead_tmp(tmp);
3f26c122
RV
2499 return 0;
2500 }
3f26c122
RV
2501 }
2502 return 1;
2503}
2504
b5ff1b31
FB
2505/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2506 instruction is not defined. */
a90b7318 2507static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2508{
2509 uint32_t rd;
b75263d6 2510 TCGv tmp, tmp2;
b5ff1b31 2511
9ee6e8bb
PB
2512 /* M profile cores use memory mapped registers instead of cp15. */
2513 if (arm_feature(env, ARM_FEATURE_M))
2514 return 1;
2515
2516 if ((insn & (1 << 25)) == 0) {
2517 if (insn & (1 << 20)) {
2518 /* mrrc */
2519 return 1;
2520 }
2521 /* mcrr. Used for block cache operations, so implement as no-op. */
2522 return 0;
2523 }
2524 if ((insn & (1 << 4)) == 0) {
2525 /* cdp */
2526 return 1;
2527 }
2528 if (IS_USER(s) && !cp15_user_ok(insn)) {
b5ff1b31
FB
2529 return 1;
2530 }
9332f9da
FB
2531 if ((insn & 0x0fff0fff) == 0x0e070f90
2532 || (insn & 0x0fff0fff) == 0x0e070f58) {
2533 /* Wait for interrupt. */
8984bd2e 2534 gen_set_pc_im(s->pc);
9ee6e8bb 2535 s->is_jmp = DISAS_WFI;
9332f9da
FB
2536 return 0;
2537 }
b5ff1b31 2538 rd = (insn >> 12) & 0xf;
3f26c122
RV
2539
2540 if (cp15_tls_load_store(env, s, insn, rd))
2541 return 0;
2542
b75263d6 2543 tmp2 = tcg_const_i32(insn);
18c9b560 2544 if (insn & ARM_CP_RW_BIT) {
8984bd2e 2545 tmp = new_tmp();
b75263d6 2546 gen_helper_get_cp15(tmp, cpu_env, tmp2);
b5ff1b31
FB
2547 /* If the destination register is r15 then sets condition codes. */
2548 if (rd != 15)
8984bd2e
PB
2549 store_reg(s, rd, tmp);
2550 else
2551 dead_tmp(tmp);
b5ff1b31 2552 } else {
8984bd2e 2553 tmp = load_reg(s, rd);
b75263d6 2554 gen_helper_set_cp15(cpu_env, tmp2, tmp);
8984bd2e 2555 dead_tmp(tmp);
a90b7318
AZ
2556 /* Normally we would always end the TB here, but Linux
2557 * arch/arm/mach-pxa/sleep.S expects two instructions following
2558 * an MMU enable to execute from cache. Imitate this behaviour. */
2559 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2560 (insn & 0x0fff0fff) != 0x0e010f10)
2561 gen_lookup_tb(s);
b5ff1b31 2562 }
b75263d6 2563 tcg_temp_free_i32(tmp2);
b5ff1b31
FB
2564 return 0;
2565}
2566
9ee6e8bb
PB
2567#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2568#define VFP_SREG(insn, bigbit, smallbit) \
2569 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2570#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2571 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2572 reg = (((insn) >> (bigbit)) & 0x0f) \
2573 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2574 } else { \
2575 if (insn & (1 << (smallbit))) \
2576 return 1; \
2577 reg = ((insn) >> (bigbit)) & 0x0f; \
2578 }} while (0)
2579
2580#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2581#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2582#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2583#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2584#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2585#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2586
4373f3ce
PB
2587/* Move between integer and VFP cores. */
2588static TCGv gen_vfp_mrs(void)
2589{
2590 TCGv tmp = new_tmp();
2591 tcg_gen_mov_i32(tmp, cpu_F0s);
2592 return tmp;
2593}
2594
2595static void gen_vfp_msr(TCGv tmp)
2596{
2597 tcg_gen_mov_i32(cpu_F0s, tmp);
2598 dead_tmp(tmp);
2599}
2600
9ee6e8bb
PB
2601static inline int
2602vfp_enabled(CPUState * env)
2603{
2604 return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
2605}
2606
ad69471c
PB
2607static void gen_neon_dup_u8(TCGv var, int shift)
2608{
2609 TCGv tmp = new_tmp();
2610 if (shift)
2611 tcg_gen_shri_i32(var, var, shift);
86831435 2612 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2613 tcg_gen_shli_i32(tmp, var, 8);
2614 tcg_gen_or_i32(var, var, tmp);
2615 tcg_gen_shli_i32(tmp, var, 16);
2616 tcg_gen_or_i32(var, var, tmp);
2617 dead_tmp(tmp);
2618}
2619
2620static void gen_neon_dup_low16(TCGv var)
2621{
2622 TCGv tmp = new_tmp();
86831435 2623 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2624 tcg_gen_shli_i32(tmp, var, 16);
2625 tcg_gen_or_i32(var, var, tmp);
2626 dead_tmp(tmp);
2627}
2628
2629static void gen_neon_dup_high16(TCGv var)
2630{
2631 TCGv tmp = new_tmp();
2632 tcg_gen_andi_i32(var, var, 0xffff0000);
2633 tcg_gen_shri_i32(tmp, var, 16);
2634 tcg_gen_or_i32(var, var, tmp);
2635 dead_tmp(tmp);
2636}
2637
b7bcbe95
FB
2638/* Disassemble a VFP instruction. Returns nonzero if an error occured
2639 (ie. an undefined instruction). */
2640static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2641{
2642 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2643 int dp, veclen;
312eea9f 2644 TCGv addr;
4373f3ce 2645 TCGv tmp;
ad69471c 2646 TCGv tmp2;
b7bcbe95 2647
40f137e1
PB
2648 if (!arm_feature(env, ARM_FEATURE_VFP))
2649 return 1;
2650
9ee6e8bb
PB
2651 if (!vfp_enabled(env)) {
2652 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2653 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2654 return 1;
2655 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2656 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2657 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2658 return 1;
2659 }
b7bcbe95
FB
2660 dp = ((insn & 0xf00) == 0xb00);
2661 switch ((insn >> 24) & 0xf) {
2662 case 0xe:
2663 if (insn & (1 << 4)) {
2664 /* single register transfer */
b7bcbe95
FB
2665 rd = (insn >> 12) & 0xf;
2666 if (dp) {
9ee6e8bb
PB
2667 int size;
2668 int pass;
2669
2670 VFP_DREG_N(rn, insn);
2671 if (insn & 0xf)
b7bcbe95 2672 return 1;
9ee6e8bb
PB
2673 if (insn & 0x00c00060
2674 && !arm_feature(env, ARM_FEATURE_NEON))
2675 return 1;
2676
2677 pass = (insn >> 21) & 1;
2678 if (insn & (1 << 22)) {
2679 size = 0;
2680 offset = ((insn >> 5) & 3) * 8;
2681 } else if (insn & (1 << 5)) {
2682 size = 1;
2683 offset = (insn & (1 << 6)) ? 16 : 0;
2684 } else {
2685 size = 2;
2686 offset = 0;
2687 }
18c9b560 2688 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2689 /* vfp->arm */
ad69471c 2690 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2691 switch (size) {
2692 case 0:
9ee6e8bb 2693 if (offset)
ad69471c 2694 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2695 if (insn & (1 << 23))
ad69471c 2696 gen_uxtb(tmp);
9ee6e8bb 2697 else
ad69471c 2698 gen_sxtb(tmp);
9ee6e8bb
PB
2699 break;
2700 case 1:
9ee6e8bb
PB
2701 if (insn & (1 << 23)) {
2702 if (offset) {
ad69471c 2703 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2704 } else {
ad69471c 2705 gen_uxth(tmp);
9ee6e8bb
PB
2706 }
2707 } else {
2708 if (offset) {
ad69471c 2709 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2710 } else {
ad69471c 2711 gen_sxth(tmp);
9ee6e8bb
PB
2712 }
2713 }
2714 break;
2715 case 2:
9ee6e8bb
PB
2716 break;
2717 }
ad69471c 2718 store_reg(s, rd, tmp);
b7bcbe95
FB
2719 } else {
2720 /* arm->vfp */
ad69471c 2721 tmp = load_reg(s, rd);
9ee6e8bb
PB
2722 if (insn & (1 << 23)) {
2723 /* VDUP */
2724 if (size == 0) {
ad69471c 2725 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2726 } else if (size == 1) {
ad69471c 2727 gen_neon_dup_low16(tmp);
9ee6e8bb 2728 }
cbbccffc
PB
2729 for (n = 0; n <= pass * 2; n++) {
2730 tmp2 = new_tmp();
2731 tcg_gen_mov_i32(tmp2, tmp);
2732 neon_store_reg(rn, n, tmp2);
2733 }
2734 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2735 } else {
2736 /* VMOV */
2737 switch (size) {
2738 case 0:
ad69471c
PB
2739 tmp2 = neon_load_reg(rn, pass);
2740 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2741 dead_tmp(tmp2);
9ee6e8bb
PB
2742 break;
2743 case 1:
ad69471c
PB
2744 tmp2 = neon_load_reg(rn, pass);
2745 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2746 dead_tmp(tmp2);
9ee6e8bb
PB
2747 break;
2748 case 2:
9ee6e8bb
PB
2749 break;
2750 }
ad69471c 2751 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2752 }
b7bcbe95 2753 }
9ee6e8bb
PB
2754 } else { /* !dp */
2755 if ((insn & 0x6f) != 0x00)
2756 return 1;
2757 rn = VFP_SREG_N(insn);
18c9b560 2758 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2759 /* vfp->arm */
2760 if (insn & (1 << 21)) {
2761 /* system register */
40f137e1 2762 rn >>= 1;
9ee6e8bb 2763
b7bcbe95 2764 switch (rn) {
40f137e1 2765 case ARM_VFP_FPSID:
4373f3ce 2766 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2767 VFP3 restricts all id registers to privileged
2768 accesses. */
2769 if (IS_USER(s)
2770 && arm_feature(env, ARM_FEATURE_VFP3))
2771 return 1;
4373f3ce 2772 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2773 break;
40f137e1 2774 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2775 if (IS_USER(s))
2776 return 1;
4373f3ce 2777 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2778 break;
40f137e1
PB
2779 case ARM_VFP_FPINST:
2780 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2781 /* Not present in VFP3. */
2782 if (IS_USER(s)
2783 || arm_feature(env, ARM_FEATURE_VFP3))
2784 return 1;
4373f3ce 2785 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2786 break;
40f137e1 2787 case ARM_VFP_FPSCR:
601d70b9 2788 if (rd == 15) {
4373f3ce
PB
2789 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2790 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2791 } else {
2792 tmp = new_tmp();
2793 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2794 }
b7bcbe95 2795 break;
9ee6e8bb
PB
2796 case ARM_VFP_MVFR0:
2797 case ARM_VFP_MVFR1:
2798 if (IS_USER(s)
2799 || !arm_feature(env, ARM_FEATURE_VFP3))
2800 return 1;
4373f3ce 2801 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2802 break;
b7bcbe95
FB
2803 default:
2804 return 1;
2805 }
2806 } else {
2807 gen_mov_F0_vreg(0, rn);
4373f3ce 2808 tmp = gen_vfp_mrs();
b7bcbe95
FB
2809 }
2810 if (rd == 15) {
b5ff1b31 2811 /* Set the 4 flag bits in the CPSR. */
4373f3ce
PB
2812 gen_set_nzcv(tmp);
2813 dead_tmp(tmp);
2814 } else {
2815 store_reg(s, rd, tmp);
2816 }
b7bcbe95
FB
2817 } else {
2818 /* arm->vfp */
4373f3ce 2819 tmp = load_reg(s, rd);
b7bcbe95 2820 if (insn & (1 << 21)) {
40f137e1 2821 rn >>= 1;
b7bcbe95
FB
2822 /* system register */
2823 switch (rn) {
40f137e1 2824 case ARM_VFP_FPSID:
9ee6e8bb
PB
2825 case ARM_VFP_MVFR0:
2826 case ARM_VFP_MVFR1:
b7bcbe95
FB
2827 /* Writes are ignored. */
2828 break;
40f137e1 2829 case ARM_VFP_FPSCR:
4373f3ce
PB
2830 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2831 dead_tmp(tmp);
b5ff1b31 2832 gen_lookup_tb(s);
b7bcbe95 2833 break;
40f137e1 2834 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2835 if (IS_USER(s))
2836 return 1;
71b3c3de
JR
2837 /* TODO: VFP subarchitecture support.
2838 * For now, keep the EN bit only */
2839 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 2840 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2841 gen_lookup_tb(s);
2842 break;
2843 case ARM_VFP_FPINST:
2844 case ARM_VFP_FPINST2:
4373f3ce 2845 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2846 break;
b7bcbe95
FB
2847 default:
2848 return 1;
2849 }
2850 } else {
4373f3ce 2851 gen_vfp_msr(tmp);
b7bcbe95
FB
2852 gen_mov_vreg_F0(0, rn);
2853 }
2854 }
2855 }
2856 } else {
2857 /* data processing */
2858 /* The opcode is in bits 23, 21, 20 and 6. */
2859 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2860 if (dp) {
2861 if (op == 15) {
2862 /* rn is opcode */
2863 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2864 } else {
2865 /* rn is register number */
9ee6e8bb 2866 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2867 }
2868
04595bf6 2869 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
b7bcbe95 2870 /* Integer or single precision destination. */
9ee6e8bb 2871 rd = VFP_SREG_D(insn);
b7bcbe95 2872 } else {
9ee6e8bb 2873 VFP_DREG_D(rd, insn);
b7bcbe95 2874 }
04595bf6
PM
2875 if (op == 15 &&
2876 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2877 /* VCVT from int is always from S reg regardless of dp bit.
2878 * VCVT with immediate frac_bits has same format as SREG_M
2879 */
2880 rm = VFP_SREG_M(insn);
b7bcbe95 2881 } else {
9ee6e8bb 2882 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2883 }
2884 } else {
9ee6e8bb 2885 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2886 if (op == 15 && rn == 15) {
2887 /* Double precision destination. */
9ee6e8bb
PB
2888 VFP_DREG_D(rd, insn);
2889 } else {
2890 rd = VFP_SREG_D(insn);
2891 }
04595bf6
PM
2892 /* NB that we implicitly rely on the encoding for the frac_bits
2893 * in VCVT of fixed to float being the same as that of an SREG_M
2894 */
9ee6e8bb 2895 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2896 }
2897
2898 veclen = env->vfp.vec_len;
2899 if (op == 15 && rn > 3)
2900 veclen = 0;
2901
2902 /* Shut up compiler warnings. */
2903 delta_m = 0;
2904 delta_d = 0;
2905 bank_mask = 0;
3b46e624 2906
b7bcbe95
FB
2907 if (veclen > 0) {
2908 if (dp)
2909 bank_mask = 0xc;
2910 else
2911 bank_mask = 0x18;
2912
2913 /* Figure out what type of vector operation this is. */
2914 if ((rd & bank_mask) == 0) {
2915 /* scalar */
2916 veclen = 0;
2917 } else {
2918 if (dp)
2919 delta_d = (env->vfp.vec_stride >> 1) + 1;
2920 else
2921 delta_d = env->vfp.vec_stride + 1;
2922
2923 if ((rm & bank_mask) == 0) {
2924 /* mixed scalar/vector */
2925 delta_m = 0;
2926 } else {
2927 /* vector */
2928 delta_m = delta_d;
2929 }
2930 }
2931 }
2932
2933 /* Load the initial operands. */
2934 if (op == 15) {
2935 switch (rn) {
2936 case 16:
2937 case 17:
2938 /* Integer source */
2939 gen_mov_F0_vreg(0, rm);
2940 break;
2941 case 8:
2942 case 9:
2943 /* Compare */
2944 gen_mov_F0_vreg(dp, rd);
2945 gen_mov_F1_vreg(dp, rm);
2946 break;
2947 case 10:
2948 case 11:
2949 /* Compare with zero */
2950 gen_mov_F0_vreg(dp, rd);
2951 gen_vfp_F1_ld0(dp);
2952 break;
9ee6e8bb
PB
2953 case 20:
2954 case 21:
2955 case 22:
2956 case 23:
644ad806
PB
2957 case 28:
2958 case 29:
2959 case 30:
2960 case 31:
9ee6e8bb
PB
2961 /* Source and destination the same. */
2962 gen_mov_F0_vreg(dp, rd);
2963 break;
b7bcbe95
FB
2964 default:
2965 /* One source operand. */
2966 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 2967 break;
b7bcbe95
FB
2968 }
2969 } else {
2970 /* Two source operands. */
2971 gen_mov_F0_vreg(dp, rn);
2972 gen_mov_F1_vreg(dp, rm);
2973 }
2974
2975 for (;;) {
2976 /* Perform the calculation. */
2977 switch (op) {
2978 case 0: /* mac: fd + (fn * fm) */
2979 gen_vfp_mul(dp);
2980 gen_mov_F1_vreg(dp, rd);
2981 gen_vfp_add(dp);
2982 break;
2983 case 1: /* nmac: fd - (fn * fm) */
2984 gen_vfp_mul(dp);
2985 gen_vfp_neg(dp);
2986 gen_mov_F1_vreg(dp, rd);
2987 gen_vfp_add(dp);
2988 break;
2989 case 2: /* msc: -fd + (fn * fm) */
2990 gen_vfp_mul(dp);
2991 gen_mov_F1_vreg(dp, rd);
2992 gen_vfp_sub(dp);
2993 break;
2994 case 3: /* nmsc: -fd - (fn * fm) */
2995 gen_vfp_mul(dp);
b7bcbe95 2996 gen_vfp_neg(dp);
c9fb531a
PB
2997 gen_mov_F1_vreg(dp, rd);
2998 gen_vfp_sub(dp);
b7bcbe95
FB
2999 break;
3000 case 4: /* mul: fn * fm */
3001 gen_vfp_mul(dp);
3002 break;
3003 case 5: /* nmul: -(fn * fm) */
3004 gen_vfp_mul(dp);
3005 gen_vfp_neg(dp);
3006 break;
3007 case 6: /* add: fn + fm */
3008 gen_vfp_add(dp);
3009 break;
3010 case 7: /* sub: fn - fm */
3011 gen_vfp_sub(dp);
3012 break;
3013 case 8: /* div: fn / fm */
3014 gen_vfp_div(dp);
3015 break;
9ee6e8bb
PB
3016 case 14: /* fconst */
3017 if (!arm_feature(env, ARM_FEATURE_VFP3))
3018 return 1;
3019
3020 n = (insn << 12) & 0x80000000;
3021 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3022 if (dp) {
3023 if (i & 0x40)
3024 i |= 0x3f80;
3025 else
3026 i |= 0x4000;
3027 n |= i << 16;
4373f3ce 3028 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3029 } else {
3030 if (i & 0x40)
3031 i |= 0x780;
3032 else
3033 i |= 0x800;
3034 n |= i << 19;
5b340b51 3035 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3036 }
9ee6e8bb 3037 break;
b7bcbe95
FB
3038 case 15: /* extension space */
3039 switch (rn) {
3040 case 0: /* cpy */
3041 /* no-op */
3042 break;
3043 case 1: /* abs */
3044 gen_vfp_abs(dp);
3045 break;
3046 case 2: /* neg */
3047 gen_vfp_neg(dp);
3048 break;
3049 case 3: /* sqrt */
3050 gen_vfp_sqrt(dp);
3051 break;
60011498
PB
3052 case 4: /* vcvtb.f32.f16 */
3053 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3054 return 1;
3055 tmp = gen_vfp_mrs();
3056 tcg_gen_ext16u_i32(tmp, tmp);
3057 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3058 dead_tmp(tmp);
3059 break;
3060 case 5: /* vcvtt.f32.f16 */
3061 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3062 return 1;
3063 tmp = gen_vfp_mrs();
3064 tcg_gen_shri_i32(tmp, tmp, 16);
3065 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3066 dead_tmp(tmp);
3067 break;
3068 case 6: /* vcvtb.f16.f32 */
3069 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3070 return 1;
3071 tmp = new_tmp();
3072 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3073 gen_mov_F0_vreg(0, rd);
3074 tmp2 = gen_vfp_mrs();
3075 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3076 tcg_gen_or_i32(tmp, tmp, tmp2);
3077 dead_tmp(tmp2);
3078 gen_vfp_msr(tmp);
3079 break;
3080 case 7: /* vcvtt.f16.f32 */
3081 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3082 return 1;
3083 tmp = new_tmp();
3084 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3085 tcg_gen_shli_i32(tmp, tmp, 16);
3086 gen_mov_F0_vreg(0, rd);
3087 tmp2 = gen_vfp_mrs();
3088 tcg_gen_ext16u_i32(tmp2, tmp2);
3089 tcg_gen_or_i32(tmp, tmp, tmp2);
3090 dead_tmp(tmp2);
3091 gen_vfp_msr(tmp);
3092 break;
b7bcbe95
FB
3093 case 8: /* cmp */
3094 gen_vfp_cmp(dp);
3095 break;
3096 case 9: /* cmpe */
3097 gen_vfp_cmpe(dp);
3098 break;
3099 case 10: /* cmpz */
3100 gen_vfp_cmp(dp);
3101 break;
3102 case 11: /* cmpez */
3103 gen_vfp_F1_ld0(dp);
3104 gen_vfp_cmpe(dp);
3105 break;
3106 case 15: /* single<->double conversion */
3107 if (dp)
4373f3ce 3108 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3109 else
4373f3ce 3110 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3111 break;
3112 case 16: /* fuito */
3113 gen_vfp_uito(dp);
3114 break;
3115 case 17: /* fsito */
3116 gen_vfp_sito(dp);
3117 break;
9ee6e8bb
PB
3118 case 20: /* fshto */
3119 if (!arm_feature(env, ARM_FEATURE_VFP3))
3120 return 1;
644ad806 3121 gen_vfp_shto(dp, 16 - rm);
9ee6e8bb
PB
3122 break;
3123 case 21: /* fslto */
3124 if (!arm_feature(env, ARM_FEATURE_VFP3))
3125 return 1;
644ad806 3126 gen_vfp_slto(dp, 32 - rm);
9ee6e8bb
PB
3127 break;
3128 case 22: /* fuhto */
3129 if (!arm_feature(env, ARM_FEATURE_VFP3))
3130 return 1;
644ad806 3131 gen_vfp_uhto(dp, 16 - rm);
9ee6e8bb
PB
3132 break;
3133 case 23: /* fulto */
3134 if (!arm_feature(env, ARM_FEATURE_VFP3))
3135 return 1;
644ad806 3136 gen_vfp_ulto(dp, 32 - rm);
9ee6e8bb 3137 break;
b7bcbe95
FB
3138 case 24: /* ftoui */
3139 gen_vfp_toui(dp);
3140 break;
3141 case 25: /* ftouiz */
3142 gen_vfp_touiz(dp);
3143 break;
3144 case 26: /* ftosi */
3145 gen_vfp_tosi(dp);
3146 break;
3147 case 27: /* ftosiz */
3148 gen_vfp_tosiz(dp);
3149 break;
9ee6e8bb
PB
3150 case 28: /* ftosh */
3151 if (!arm_feature(env, ARM_FEATURE_VFP3))
3152 return 1;
644ad806 3153 gen_vfp_tosh(dp, 16 - rm);
9ee6e8bb
PB
3154 break;
3155 case 29: /* ftosl */
3156 if (!arm_feature(env, ARM_FEATURE_VFP3))
3157 return 1;
644ad806 3158 gen_vfp_tosl(dp, 32 - rm);
9ee6e8bb
PB
3159 break;
3160 case 30: /* ftouh */
3161 if (!arm_feature(env, ARM_FEATURE_VFP3))
3162 return 1;
644ad806 3163 gen_vfp_touh(dp, 16 - rm);
9ee6e8bb
PB
3164 break;
3165 case 31: /* ftoul */
3166 if (!arm_feature(env, ARM_FEATURE_VFP3))
3167 return 1;
644ad806 3168 gen_vfp_toul(dp, 32 - rm);
9ee6e8bb 3169 break;
b7bcbe95
FB
3170 default: /* undefined */
3171 printf ("rn:%d\n", rn);
3172 return 1;
3173 }
3174 break;
3175 default: /* undefined */
3176 printf ("op:%d\n", op);
3177 return 1;
3178 }
3179
3180 /* Write back the result. */
3181 if (op == 15 && (rn >= 8 && rn <= 11))
3182 ; /* Comparison, do nothing. */
04595bf6
PM
3183 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3184 /* VCVT double to int: always integer result. */
b7bcbe95
FB
3185 gen_mov_vreg_F0(0, rd);
3186 else if (op == 15 && rn == 15)
3187 /* conversion */
3188 gen_mov_vreg_F0(!dp, rd);
3189 else
3190 gen_mov_vreg_F0(dp, rd);
3191
3192 /* break out of the loop if we have finished */
3193 if (veclen == 0)
3194 break;
3195
3196 if (op == 15 && delta_m == 0) {
3197 /* single source one-many */
3198 while (veclen--) {
3199 rd = ((rd + delta_d) & (bank_mask - 1))
3200 | (rd & bank_mask);
3201 gen_mov_vreg_F0(dp, rd);
3202 }
3203 break;
3204 }
3205 /* Setup the next operands. */
3206 veclen--;
3207 rd = ((rd + delta_d) & (bank_mask - 1))
3208 | (rd & bank_mask);
3209
3210 if (op == 15) {
3211 /* One source operand. */
3212 rm = ((rm + delta_m) & (bank_mask - 1))
3213 | (rm & bank_mask);
3214 gen_mov_F0_vreg(dp, rm);
3215 } else {
3216 /* Two source operands. */
3217 rn = ((rn + delta_d) & (bank_mask - 1))
3218 | (rn & bank_mask);
3219 gen_mov_F0_vreg(dp, rn);
3220 if (delta_m) {
3221 rm = ((rm + delta_m) & (bank_mask - 1))
3222 | (rm & bank_mask);
3223 gen_mov_F1_vreg(dp, rm);
3224 }
3225 }
3226 }
3227 }
3228 break;
3229 case 0xc:
3230 case 0xd:
9ee6e8bb 3231 if (dp && (insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3232 /* two-register transfer */
3233 rn = (insn >> 16) & 0xf;
3234 rd = (insn >> 12) & 0xf;
3235 if (dp) {
9ee6e8bb
PB
3236 VFP_DREG_M(rm, insn);
3237 } else {
3238 rm = VFP_SREG_M(insn);
3239 }
b7bcbe95 3240
18c9b560 3241 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3242 /* vfp->arm */
3243 if (dp) {
4373f3ce
PB
3244 gen_mov_F0_vreg(0, rm * 2);
3245 tmp = gen_vfp_mrs();
3246 store_reg(s, rd, tmp);
3247 gen_mov_F0_vreg(0, rm * 2 + 1);
3248 tmp = gen_vfp_mrs();
3249 store_reg(s, rn, tmp);
b7bcbe95
FB
3250 } else {
3251 gen_mov_F0_vreg(0, rm);
4373f3ce
PB
3252 tmp = gen_vfp_mrs();
3253 store_reg(s, rn, tmp);
b7bcbe95 3254 gen_mov_F0_vreg(0, rm + 1);
4373f3ce
PB
3255 tmp = gen_vfp_mrs();
3256 store_reg(s, rd, tmp);
b7bcbe95
FB
3257 }
3258 } else {
3259 /* arm->vfp */
3260 if (dp) {
4373f3ce
PB
3261 tmp = load_reg(s, rd);
3262 gen_vfp_msr(tmp);
3263 gen_mov_vreg_F0(0, rm * 2);
3264 tmp = load_reg(s, rn);
3265 gen_vfp_msr(tmp);
3266 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3267 } else {
4373f3ce
PB
3268 tmp = load_reg(s, rn);
3269 gen_vfp_msr(tmp);
b7bcbe95 3270 gen_mov_vreg_F0(0, rm);
4373f3ce
PB
3271 tmp = load_reg(s, rd);
3272 gen_vfp_msr(tmp);
b7bcbe95
FB
3273 gen_mov_vreg_F0(0, rm + 1);
3274 }
3275 }
3276 } else {
3277 /* Load/store */
3278 rn = (insn >> 16) & 0xf;
3279 if (dp)
9ee6e8bb 3280 VFP_DREG_D(rd, insn);
b7bcbe95 3281 else
9ee6e8bb
PB
3282 rd = VFP_SREG_D(insn);
3283 if (s->thumb && rn == 15) {
312eea9f
FN
3284 addr = new_tmp();
3285 tcg_gen_movi_i32(addr, s->pc & ~2);
9ee6e8bb 3286 } else {
312eea9f 3287 addr = load_reg(s, rn);
9ee6e8bb 3288 }
b7bcbe95
FB
3289 if ((insn & 0x01200000) == 0x01000000) {
3290 /* Single load/store */
3291 offset = (insn & 0xff) << 2;
3292 if ((insn & (1 << 23)) == 0)
3293 offset = -offset;
312eea9f 3294 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3295 if (insn & (1 << 20)) {
312eea9f 3296 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3297 gen_mov_vreg_F0(dp, rd);
3298 } else {
3299 gen_mov_F0_vreg(dp, rd);
312eea9f 3300 gen_vfp_st(s, dp, addr);
b7bcbe95 3301 }
312eea9f 3302 dead_tmp(addr);
b7bcbe95
FB
3303 } else {
3304 /* load/store multiple */
3305 if (dp)
3306 n = (insn >> 1) & 0x7f;
3307 else
3308 n = insn & 0xff;
3309
3310 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3311 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3312
3313 if (dp)
3314 offset = 8;
3315 else
3316 offset = 4;
3317 for (i = 0; i < n; i++) {
18c9b560 3318 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3319 /* load */
312eea9f 3320 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3321 gen_mov_vreg_F0(dp, rd + i);
3322 } else {
3323 /* store */
3324 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3325 gen_vfp_st(s, dp, addr);
b7bcbe95 3326 }
312eea9f 3327 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95
FB
3328 }
3329 if (insn & (1 << 21)) {
3330 /* writeback */
3331 if (insn & (1 << 24))
3332 offset = -offset * n;
3333 else if (dp && (insn & 1))
3334 offset = 4;
3335 else
3336 offset = 0;
3337
3338 if (offset != 0)
312eea9f
FN
3339 tcg_gen_addi_i32(addr, addr, offset);
3340 store_reg(s, rn, addr);
3341 } else {
3342 dead_tmp(addr);
b7bcbe95
FB
3343 }
3344 }
3345 }
3346 break;
3347 default:
3348 /* Should never happen. */
3349 return 1;
3350 }
3351 return 0;
3352}
3353
6e256c93 3354static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3355{
6e256c93
FB
3356 TranslationBlock *tb;
3357
3358 tb = s->tb;
3359 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3360 tcg_gen_goto_tb(n);
8984bd2e 3361 gen_set_pc_im(dest);
57fec1fe 3362 tcg_gen_exit_tb((long)tb + n);
6e256c93 3363 } else {
8984bd2e 3364 gen_set_pc_im(dest);
57fec1fe 3365 tcg_gen_exit_tb(0);
6e256c93 3366 }
c53be334
FB
3367}
3368
8aaca4c0
FB
3369static inline void gen_jmp (DisasContext *s, uint32_t dest)
3370{
551bd27f 3371 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3372 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3373 if (s->thumb)
d9ba4830
PB
3374 dest |= 1;
3375 gen_bx_im(s, dest);
8aaca4c0 3376 } else {
6e256c93 3377 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3378 s->is_jmp = DISAS_TB_JUMP;
3379 }
3380}
3381
d9ba4830 3382static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3383{
ee097184 3384 if (x)
d9ba4830 3385 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3386 else
d9ba4830 3387 gen_sxth(t0);
ee097184 3388 if (y)
d9ba4830 3389 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3390 else
d9ba4830
PB
3391 gen_sxth(t1);
3392 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3393}
3394
3395/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 3396static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3397 uint32_t mask;
3398
3399 mask = 0;
3400 if (flags & (1 << 0))
3401 mask |= 0xff;
3402 if (flags & (1 << 1))
3403 mask |= 0xff00;
3404 if (flags & (1 << 2))
3405 mask |= 0xff0000;
3406 if (flags & (1 << 3))
3407 mask |= 0xff000000;
9ee6e8bb 3408
2ae23e75 3409 /* Mask out undefined bits. */
9ee6e8bb
PB
3410 mask &= ~CPSR_RESERVED;
3411 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3412 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3413 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3414 mask &= ~CPSR_IT;
9ee6e8bb 3415 /* Mask out execution state bits. */
2ae23e75 3416 if (!spsr)
e160c51c 3417 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3418 /* Mask out privileged bits. */
3419 if (IS_USER(s))
9ee6e8bb 3420 mask &= CPSR_USER;
b5ff1b31
FB
3421 return mask;
3422}
3423
2fbac54b
FN
3424/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3425static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
b5ff1b31 3426{
d9ba4830 3427 TCGv tmp;
b5ff1b31
FB
3428 if (spsr) {
3429 /* ??? This is also undefined in system mode. */
3430 if (IS_USER(s))
3431 return 1;
d9ba4830
PB
3432
3433 tmp = load_cpu_field(spsr);
3434 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3435 tcg_gen_andi_i32(t0, t0, mask);
3436 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3437 store_cpu_field(tmp, spsr);
b5ff1b31 3438 } else {
2fbac54b 3439 gen_set_cpsr(t0, mask);
b5ff1b31 3440 }
2fbac54b 3441 dead_tmp(t0);
b5ff1b31
FB
3442 gen_lookup_tb(s);
3443 return 0;
3444}
3445
2fbac54b
FN
3446/* Returns nonzero if access to the PSR is not permitted. */
3447static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3448{
3449 TCGv tmp;
3450 tmp = new_tmp();
3451 tcg_gen_movi_i32(tmp, val);
3452 return gen_set_psr(s, mask, spsr, tmp);
3453}
3454
e9bb4aa9
JR
3455/* Generate an old-style exception return. Marks pc as dead. */
3456static void gen_exception_return(DisasContext *s, TCGv pc)
b5ff1b31 3457{
d9ba4830 3458 TCGv tmp;
e9bb4aa9 3459 store_reg(s, 15, pc);
d9ba4830
PB
3460 tmp = load_cpu_field(spsr);
3461 gen_set_cpsr(tmp, 0xffffffff);
3462 dead_tmp(tmp);
b5ff1b31
FB
3463 s->is_jmp = DISAS_UPDATE;
3464}
3465
b0109805
PB
3466/* Generate a v6 exception return. Marks both values as dead. */
3467static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3468{
b0109805
PB
3469 gen_set_cpsr(cpsr, 0xffffffff);
3470 dead_tmp(cpsr);
3471 store_reg(s, 15, pc);
9ee6e8bb
PB
3472 s->is_jmp = DISAS_UPDATE;
3473}
3b46e624 3474
9ee6e8bb
PB
3475static inline void
3476gen_set_condexec (DisasContext *s)
3477{
3478 if (s->condexec_mask) {
8f01245e
PB
3479 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3480 TCGv tmp = new_tmp();
3481 tcg_gen_movi_i32(tmp, val);
d9ba4830 3482 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3483 }
3484}
3b46e624 3485
9ee6e8bb
PB
3486static void gen_nop_hint(DisasContext *s, int val)
3487{
3488 switch (val) {
3489 case 3: /* wfi */
8984bd2e 3490 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3491 s->is_jmp = DISAS_WFI;
3492 break;
3493 case 2: /* wfe */
3494 case 4: /* sev */
3495 /* TODO: Implement SEV and WFE. May help SMP performance. */
3496 default: /* nop */
3497 break;
3498 }
3499}
99c475ab 3500
ad69471c 3501#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3502
dd8fbd78 3503static inline int gen_neon_add(int size, TCGv t0, TCGv t1)
9ee6e8bb
PB
3504{
3505 switch (size) {
dd8fbd78
FN
3506 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3507 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3508 case 2: tcg_gen_add_i32(t0, t0, t1); break;
9ee6e8bb
PB
3509 default: return 1;
3510 }
3511 return 0;
3512}
3513
dd8fbd78 3514static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
ad69471c
PB
3515{
3516 switch (size) {
dd8fbd78
FN
3517 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3518 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3519 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3520 default: return;
3521 }
3522}
3523
3524/* 32-bit pairwise ops end up the same as the elementwise versions. */
3525#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3526#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3527#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3528#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3529
3530/* FIXME: This is wrong. They set the wrong overflow bit. */
3531#define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3532#define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3533#define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3534#define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3535
3536#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3537 switch ((size << 1) | u) { \
3538 case 0: \
dd8fbd78 3539 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3540 break; \
3541 case 1: \
dd8fbd78 3542 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3543 break; \
3544 case 2: \
dd8fbd78 3545 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3546 break; \
3547 case 3: \
dd8fbd78 3548 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3549 break; \
3550 case 4: \
dd8fbd78 3551 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3552 break; \
3553 case 5: \
dd8fbd78 3554 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3555 break; \
3556 default: return 1; \
3557 }} while (0)
9ee6e8bb
PB
3558
3559#define GEN_NEON_INTEGER_OP(name) do { \
3560 switch ((size << 1) | u) { \
ad69471c 3561 case 0: \
dd8fbd78 3562 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3563 break; \
3564 case 1: \
dd8fbd78 3565 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3566 break; \
3567 case 2: \
dd8fbd78 3568 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3569 break; \
3570 case 3: \
dd8fbd78 3571 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3572 break; \
3573 case 4: \
dd8fbd78 3574 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3575 break; \
3576 case 5: \
dd8fbd78 3577 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3578 break; \
9ee6e8bb
PB
3579 default: return 1; \
3580 }} while (0)
3581
dd8fbd78 3582static TCGv neon_load_scratch(int scratch)
9ee6e8bb 3583{
dd8fbd78
FN
3584 TCGv tmp = new_tmp();
3585 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3586 return tmp;
9ee6e8bb
PB
3587}
3588
dd8fbd78 3589static void neon_store_scratch(int scratch, TCGv var)
9ee6e8bb 3590{
dd8fbd78
FN
3591 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3592 dead_tmp(var);
9ee6e8bb
PB
3593}
3594
dd8fbd78 3595static inline TCGv neon_get_scalar(int size, int reg)
9ee6e8bb 3596{
dd8fbd78 3597 TCGv tmp;
9ee6e8bb 3598 if (size == 1) {
dd8fbd78 3599 tmp = neon_load_reg(reg >> 1, reg & 1);
9ee6e8bb 3600 } else {
dd8fbd78
FN
3601 tmp = neon_load_reg(reg >> 2, (reg >> 1) & 1);
3602 if (reg & 1) {
3603 gen_neon_dup_low16(tmp);
3604 } else {
3605 gen_neon_dup_high16(tmp);
3606 }
9ee6e8bb 3607 }
dd8fbd78 3608 return tmp;
9ee6e8bb
PB
3609}
3610
19457615
FN
3611static void gen_neon_unzip_u8(TCGv t0, TCGv t1)
3612{
3613 TCGv rd, rm, tmp;
3614
3615 rd = new_tmp();
3616 rm = new_tmp();
3617 tmp = new_tmp();
3618
3619 tcg_gen_andi_i32(rd, t0, 0xff);
3620 tcg_gen_shri_i32(tmp, t0, 8);
3621 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3622 tcg_gen_or_i32(rd, rd, tmp);
3623 tcg_gen_shli_i32(tmp, t1, 16);
3624 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3625 tcg_gen_or_i32(rd, rd, tmp);
3626 tcg_gen_shli_i32(tmp, t1, 8);
3627 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3628 tcg_gen_or_i32(rd, rd, tmp);
3629
3630 tcg_gen_shri_i32(rm, t0, 8);
3631 tcg_gen_andi_i32(rm, rm, 0xff);
3632 tcg_gen_shri_i32(tmp, t0, 16);
3633 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3634 tcg_gen_or_i32(rm, rm, tmp);
3635 tcg_gen_shli_i32(tmp, t1, 8);
3636 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3637 tcg_gen_or_i32(rm, rm, tmp);
3638 tcg_gen_andi_i32(tmp, t1, 0xff000000);
3639 tcg_gen_or_i32(t1, rm, tmp);
3640 tcg_gen_mov_i32(t0, rd);
3641
3642 dead_tmp(tmp);
3643 dead_tmp(rm);
3644 dead_tmp(rd);
3645}
3646
3647static void gen_neon_zip_u8(TCGv t0, TCGv t1)
3648{
3649 TCGv rd, rm, tmp;
3650
3651 rd = new_tmp();
3652 rm = new_tmp();
3653 tmp = new_tmp();
3654
3655 tcg_gen_andi_i32(rd, t0, 0xff);
3656 tcg_gen_shli_i32(tmp, t1, 8);
3657 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3658 tcg_gen_or_i32(rd, rd, tmp);
3659 tcg_gen_shli_i32(tmp, t0, 16);
3660 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3661 tcg_gen_or_i32(rd, rd, tmp);
3662 tcg_gen_shli_i32(tmp, t1, 24);
3663 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3664 tcg_gen_or_i32(rd, rd, tmp);
3665
3666 tcg_gen_andi_i32(rm, t1, 0xff000000);
3667 tcg_gen_shri_i32(tmp, t0, 8);
3668 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3669 tcg_gen_or_i32(rm, rm, tmp);
3670 tcg_gen_shri_i32(tmp, t1, 8);
3671 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3672 tcg_gen_or_i32(rm, rm, tmp);
3673 tcg_gen_shri_i32(tmp, t0, 16);
3674 tcg_gen_andi_i32(tmp, tmp, 0xff);
3675 tcg_gen_or_i32(t1, rm, tmp);
3676 tcg_gen_mov_i32(t0, rd);
3677
3678 dead_tmp(tmp);
3679 dead_tmp(rm);
3680 dead_tmp(rd);
3681}
3682
3683static void gen_neon_zip_u16(TCGv t0, TCGv t1)
3684{
3685 TCGv tmp, tmp2;
3686
3687 tmp = new_tmp();
3688 tmp2 = new_tmp();
3689
3690 tcg_gen_andi_i32(tmp, t0, 0xffff);
3691 tcg_gen_shli_i32(tmp2, t1, 16);
3692 tcg_gen_or_i32(tmp, tmp, tmp2);
3693 tcg_gen_andi_i32(t1, t1, 0xffff0000);
3694 tcg_gen_shri_i32(tmp2, t0, 16);
3695 tcg_gen_or_i32(t1, t1, tmp2);
3696 tcg_gen_mov_i32(t0, tmp);
3697
3698 dead_tmp(tmp2);
3699 dead_tmp(tmp);
3700}
3701
9ee6e8bb
PB
3702static void gen_neon_unzip(int reg, int q, int tmp, int size)
3703{
3704 int n;
dd8fbd78 3705 TCGv t0, t1;
9ee6e8bb
PB
3706
3707 for (n = 0; n < q + 1; n += 2) {
dd8fbd78
FN
3708 t0 = neon_load_reg(reg, n);
3709 t1 = neon_load_reg(reg, n + 1);
9ee6e8bb 3710 switch (size) {
dd8fbd78
FN
3711 case 0: gen_neon_unzip_u8(t0, t1); break;
3712 case 1: gen_neon_zip_u16(t0, t1); break; /* zip and unzip are the same. */
9ee6e8bb
PB
3713 case 2: /* no-op */; break;
3714 default: abort();
3715 }
dd8fbd78
FN
3716 neon_store_scratch(tmp + n, t0);
3717 neon_store_scratch(tmp + n + 1, t1);
9ee6e8bb
PB
3718 }
3719}
3720
19457615
FN
3721static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3722{
3723 TCGv rd, tmp;
3724
3725 rd = new_tmp();
3726 tmp = new_tmp();
3727
3728 tcg_gen_shli_i32(rd, t0, 8);
3729 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3730 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3731 tcg_gen_or_i32(rd, rd, tmp);
3732
3733 tcg_gen_shri_i32(t1, t1, 8);
3734 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3735 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3736 tcg_gen_or_i32(t1, t1, tmp);
3737 tcg_gen_mov_i32(t0, rd);
3738
3739 dead_tmp(tmp);
3740 dead_tmp(rd);
3741}
3742
3743static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3744{
3745 TCGv rd, tmp;
3746
3747 rd = new_tmp();
3748 tmp = new_tmp();
3749
3750 tcg_gen_shli_i32(rd, t0, 16);
3751 tcg_gen_andi_i32(tmp, t1, 0xffff);
3752 tcg_gen_or_i32(rd, rd, tmp);
3753 tcg_gen_shri_i32(t1, t1, 16);
3754 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3755 tcg_gen_or_i32(t1, t1, tmp);
3756 tcg_gen_mov_i32(t0, rd);
3757
3758 dead_tmp(tmp);
3759 dead_tmp(rd);
3760}
3761
3762
9ee6e8bb
PB
3763static struct {
3764 int nregs;
3765 int interleave;
3766 int spacing;
3767} neon_ls_element_type[11] = {
3768 {4, 4, 1},
3769 {4, 4, 2},
3770 {4, 1, 1},
3771 {4, 2, 1},
3772 {3, 3, 1},
3773 {3, 3, 2},
3774 {3, 1, 1},
3775 {1, 1, 1},
3776 {2, 2, 1},
3777 {2, 2, 2},
3778 {2, 1, 1}
3779};
3780
3781/* Translate a NEON load/store element instruction. Return nonzero if the
3782 instruction is invalid. */
3783static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3784{
3785 int rd, rn, rm;
3786 int op;
3787 int nregs;
3788 int interleave;
84496233 3789 int spacing;
9ee6e8bb
PB
3790 int stride;
3791 int size;
3792 int reg;
3793 int pass;
3794 int load;
3795 int shift;
9ee6e8bb 3796 int n;
1b2b1e54 3797 TCGv addr;
b0109805 3798 TCGv tmp;
8f8e3aa4 3799 TCGv tmp2;
84496233 3800 TCGv_i64 tmp64;
9ee6e8bb
PB
3801
3802 if (!vfp_enabled(env))
3803 return 1;
3804 VFP_DREG_D(rd, insn);
3805 rn = (insn >> 16) & 0xf;
3806 rm = insn & 0xf;
3807 load = (insn & (1 << 21)) != 0;
1b2b1e54 3808 addr = new_tmp();
9ee6e8bb
PB
3809 if ((insn & (1 << 23)) == 0) {
3810 /* Load store all elements. */
3811 op = (insn >> 8) & 0xf;
3812 size = (insn >> 6) & 3;
84496233 3813 if (op > 10)
9ee6e8bb
PB
3814 return 1;
3815 nregs = neon_ls_element_type[op].nregs;
3816 interleave = neon_ls_element_type[op].interleave;
84496233
JR
3817 spacing = neon_ls_element_type[op].spacing;
3818 if (size == 3 && (interleave | spacing) != 1)
3819 return 1;
dcc65026 3820 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3821 stride = (1 << size) * interleave;
3822 for (reg = 0; reg < nregs; reg++) {
3823 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
3824 load_reg_var(s, addr, rn);
3825 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 3826 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
3827 load_reg_var(s, addr, rn);
3828 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 3829 }
84496233
JR
3830 if (size == 3) {
3831 if (load) {
3832 tmp64 = gen_ld64(addr, IS_USER(s));
3833 neon_store_reg64(tmp64, rd);
3834 tcg_temp_free_i64(tmp64);
3835 } else {
3836 tmp64 = tcg_temp_new_i64();
3837 neon_load_reg64(tmp64, rd);
3838 gen_st64(tmp64, addr, IS_USER(s));
3839 }
3840 tcg_gen_addi_i32(addr, addr, stride);
3841 } else {
3842 for (pass = 0; pass < 2; pass++) {
3843 if (size == 2) {
3844 if (load) {
3845 tmp = gen_ld32(addr, IS_USER(s));
3846 neon_store_reg(rd, pass, tmp);
3847 } else {
3848 tmp = neon_load_reg(rd, pass);
3849 gen_st32(tmp, addr, IS_USER(s));
3850 }
1b2b1e54 3851 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
3852 } else if (size == 1) {
3853 if (load) {
3854 tmp = gen_ld16u(addr, IS_USER(s));
3855 tcg_gen_addi_i32(addr, addr, stride);
3856 tmp2 = gen_ld16u(addr, IS_USER(s));
3857 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
3858 tcg_gen_shli_i32(tmp2, tmp2, 16);
3859 tcg_gen_or_i32(tmp, tmp, tmp2);
84496233
JR
3860 dead_tmp(tmp2);
3861 neon_store_reg(rd, pass, tmp);
3862 } else {
3863 tmp = neon_load_reg(rd, pass);
3864 tmp2 = new_tmp();
3865 tcg_gen_shri_i32(tmp2, tmp, 16);
3866 gen_st16(tmp, addr, IS_USER(s));
3867 tcg_gen_addi_i32(addr, addr, stride);
3868 gen_st16(tmp2, addr, IS_USER(s));
1b2b1e54 3869 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3870 }
84496233
JR
3871 } else /* size == 0 */ {
3872 if (load) {
3873 TCGV_UNUSED(tmp2);
3874 for (n = 0; n < 4; n++) {
3875 tmp = gen_ld8u(addr, IS_USER(s));
3876 tcg_gen_addi_i32(addr, addr, stride);
3877 if (n == 0) {
3878 tmp2 = tmp;
3879 } else {
41ba8341
PB
3880 tcg_gen_shli_i32(tmp, tmp, n * 8);
3881 tcg_gen_or_i32(tmp2, tmp2, tmp);
84496233
JR
3882 dead_tmp(tmp);
3883 }
9ee6e8bb 3884 }
84496233
JR
3885 neon_store_reg(rd, pass, tmp2);
3886 } else {
3887 tmp2 = neon_load_reg(rd, pass);
3888 for (n = 0; n < 4; n++) {
3889 tmp = new_tmp();
3890 if (n == 0) {
3891 tcg_gen_mov_i32(tmp, tmp2);
3892 } else {
3893 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3894 }
3895 gen_st8(tmp, addr, IS_USER(s));
3896 tcg_gen_addi_i32(addr, addr, stride);
3897 }
3898 dead_tmp(tmp2);
9ee6e8bb
PB
3899 }
3900 }
3901 }
3902 }
84496233 3903 rd += spacing;
9ee6e8bb
PB
3904 }
3905 stride = nregs * 8;
3906 } else {
3907 size = (insn >> 10) & 3;
3908 if (size == 3) {
3909 /* Load single element to all lanes. */
3910 if (!load)
3911 return 1;
3912 size = (insn >> 6) & 3;
3913 nregs = ((insn >> 8) & 3) + 1;
3914 stride = (insn & (1 << 5)) ? 2 : 1;
dcc65026 3915 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3916 for (reg = 0; reg < nregs; reg++) {
3917 switch (size) {
3918 case 0:
1b2b1e54 3919 tmp = gen_ld8u(addr, IS_USER(s));
ad69471c 3920 gen_neon_dup_u8(tmp, 0);
9ee6e8bb
PB
3921 break;
3922 case 1:
1b2b1e54 3923 tmp = gen_ld16u(addr, IS_USER(s));
ad69471c 3924 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
3925 break;
3926 case 2:
1b2b1e54 3927 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
3928 break;
3929 case 3:
3930 return 1;
a50f5b91
PB
3931 default: /* Avoid compiler warnings. */
3932 abort();
99c475ab 3933 }
1b2b1e54 3934 tcg_gen_addi_i32(addr, addr, 1 << size);
ad69471c
PB
3935 tmp2 = new_tmp();
3936 tcg_gen_mov_i32(tmp2, tmp);
3937 neon_store_reg(rd, 0, tmp2);
3018f259 3938 neon_store_reg(rd, 1, tmp);
9ee6e8bb
PB
3939 rd += stride;
3940 }
3941 stride = (1 << size) * nregs;
3942 } else {
3943 /* Single element. */
3944 pass = (insn >> 7) & 1;
3945 switch (size) {
3946 case 0:
3947 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
3948 stride = 1;
3949 break;
3950 case 1:
3951 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
3952 stride = (insn & (1 << 5)) ? 2 : 1;
3953 break;
3954 case 2:
3955 shift = 0;
9ee6e8bb
PB
3956 stride = (insn & (1 << 6)) ? 2 : 1;
3957 break;
3958 default:
3959 abort();
3960 }
3961 nregs = ((insn >> 8) & 3) + 1;
dcc65026 3962 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3963 for (reg = 0; reg < nregs; reg++) {
3964 if (load) {
9ee6e8bb
PB
3965 switch (size) {
3966 case 0:
1b2b1e54 3967 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb
PB
3968 break;
3969 case 1:
1b2b1e54 3970 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
3971 break;
3972 case 2:
1b2b1e54 3973 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 3974 break;
a50f5b91
PB
3975 default: /* Avoid compiler warnings. */
3976 abort();
9ee6e8bb
PB
3977 }
3978 if (size != 2) {
8f8e3aa4
PB
3979 tmp2 = neon_load_reg(rd, pass);
3980 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3981 dead_tmp(tmp2);
9ee6e8bb 3982 }
8f8e3aa4 3983 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3984 } else { /* Store */
8f8e3aa4
PB
3985 tmp = neon_load_reg(rd, pass);
3986 if (shift)
3987 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
3988 switch (size) {
3989 case 0:
1b2b1e54 3990 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
3991 break;
3992 case 1:
1b2b1e54 3993 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
3994 break;
3995 case 2:
1b2b1e54 3996 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 3997 break;
99c475ab 3998 }
99c475ab 3999 }
9ee6e8bb 4000 rd += stride;
1b2b1e54 4001 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4002 }
9ee6e8bb 4003 stride = nregs * (1 << size);
99c475ab 4004 }
9ee6e8bb 4005 }
1b2b1e54 4006 dead_tmp(addr);
9ee6e8bb 4007 if (rm != 15) {
b26eefb6
PB
4008 TCGv base;
4009
4010 base = load_reg(s, rn);
9ee6e8bb 4011 if (rm == 13) {
b26eefb6 4012 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4013 } else {
b26eefb6
PB
4014 TCGv index;
4015 index = load_reg(s, rm);
4016 tcg_gen_add_i32(base, base, index);
4017 dead_tmp(index);
9ee6e8bb 4018 }
b26eefb6 4019 store_reg(s, rn, base);
9ee6e8bb
PB
4020 }
4021 return 0;
4022}
3b46e624 4023
8f8e3aa4
PB
4024/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4025static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4026{
4027 tcg_gen_and_i32(t, t, c);
f669df27 4028 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4029 tcg_gen_or_i32(dest, t, f);
4030}
4031
a7812ae4 4032static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4033{
4034 switch (size) {
4035 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4036 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4037 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4038 default: abort();
4039 }
4040}
4041
a7812ae4 4042static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4043{
4044 switch (size) {
4045 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4046 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4047 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
4048 default: abort();
4049 }
4050}
4051
a7812ae4 4052static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4053{
4054 switch (size) {
4055 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4056 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4057 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4058 default: abort();
4059 }
4060}
4061
4062static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4063 int q, int u)
4064{
4065 if (q) {
4066 if (u) {
4067 switch (size) {
4068 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4069 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4070 default: abort();
4071 }
4072 } else {
4073 switch (size) {
4074 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4075 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4076 default: abort();
4077 }
4078 }
4079 } else {
4080 if (u) {
4081 switch (size) {
4082 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4083 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4084 default: abort();
4085 }
4086 } else {
4087 switch (size) {
4088 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4089 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4090 default: abort();
4091 }
4092 }
4093 }
4094}
4095
a7812ae4 4096static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
4097{
4098 if (u) {
4099 switch (size) {
4100 case 0: gen_helper_neon_widen_u8(dest, src); break;
4101 case 1: gen_helper_neon_widen_u16(dest, src); break;
4102 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4103 default: abort();
4104 }
4105 } else {
4106 switch (size) {
4107 case 0: gen_helper_neon_widen_s8(dest, src); break;
4108 case 1: gen_helper_neon_widen_s16(dest, src); break;
4109 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4110 default: abort();
4111 }
4112 }
4113 dead_tmp(src);
4114}
4115
4116static inline void gen_neon_addl(int size)
4117{
4118 switch (size) {
4119 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4120 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4121 case 2: tcg_gen_add_i64(CPU_V001); break;
4122 default: abort();
4123 }
4124}
4125
4126static inline void gen_neon_subl(int size)
4127{
4128 switch (size) {
4129 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4130 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4131 case 2: tcg_gen_sub_i64(CPU_V001); break;
4132 default: abort();
4133 }
4134}
4135
a7812ae4 4136static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4137{
4138 switch (size) {
4139 case 0: gen_helper_neon_negl_u16(var, var); break;
4140 case 1: gen_helper_neon_negl_u32(var, var); break;
4141 case 2: gen_helper_neon_negl_u64(var, var); break;
4142 default: abort();
4143 }
4144}
4145
a7812ae4 4146static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4147{
4148 switch (size) {
4149 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4150 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4151 default: abort();
4152 }
4153}
4154
a7812ae4 4155static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4156{
a7812ae4 4157 TCGv_i64 tmp;
ad69471c
PB
4158
4159 switch ((size << 1) | u) {
4160 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4161 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4162 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4163 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4164 case 4:
4165 tmp = gen_muls_i64_i32(a, b);
4166 tcg_gen_mov_i64(dest, tmp);
4167 break;
4168 case 5:
4169 tmp = gen_mulu_i64_i32(a, b);
4170 tcg_gen_mov_i64(dest, tmp);
4171 break;
4172 default: abort();
4173 }
ad69471c
PB
4174}
4175
9ee6e8bb
PB
4176/* Translate a NEON data processing instruction. Return nonzero if the
4177 instruction is invalid.
ad69471c
PB
4178 We process data in a mixture of 32-bit and 64-bit chunks.
4179 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4180
9ee6e8bb
PB
4181static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4182{
4183 int op;
4184 int q;
4185 int rd, rn, rm;
4186 int size;
4187 int shift;
4188 int pass;
4189 int count;
4190 int pairwise;
4191 int u;
4192 int n;
ca9a32e4 4193 uint32_t imm, mask;
b75263d6 4194 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4195 TCGv_i64 tmp64;
9ee6e8bb
PB
4196
4197 if (!vfp_enabled(env))
4198 return 1;
4199 q = (insn & (1 << 6)) != 0;
4200 u = (insn >> 24) & 1;
4201 VFP_DREG_D(rd, insn);
4202 VFP_DREG_N(rn, insn);
4203 VFP_DREG_M(rm, insn);
4204 size = (insn >> 20) & 3;
4205 if ((insn & (1 << 23)) == 0) {
4206 /* Three register same length. */
4207 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
ad69471c
PB
4208 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4209 || op == 10 || op == 11 || op == 16)) {
4210 /* 64-bit element instructions. */
9ee6e8bb 4211 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4212 neon_load_reg64(cpu_V0, rn + pass);
4213 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb
PB
4214 switch (op) {
4215 case 1: /* VQADD */
4216 if (u) {
ad69471c 4217 gen_helper_neon_add_saturate_u64(CPU_V001);
2c0262af 4218 } else {
ad69471c 4219 gen_helper_neon_add_saturate_s64(CPU_V001);
2c0262af 4220 }
9ee6e8bb
PB
4221 break;
4222 case 5: /* VQSUB */
4223 if (u) {
ad69471c
PB
4224 gen_helper_neon_sub_saturate_u64(CPU_V001);
4225 } else {
4226 gen_helper_neon_sub_saturate_s64(CPU_V001);
4227 }
4228 break;
4229 case 8: /* VSHL */
4230 if (u) {
4231 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4232 } else {
4233 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4234 }
4235 break;
4236 case 9: /* VQSHL */
4237 if (u) {
4238 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
def126ce 4239 cpu_V1, cpu_V0);
ad69471c 4240 } else {
def126ce 4241 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
ad69471c
PB
4242 cpu_V1, cpu_V0);
4243 }
4244 break;
4245 case 10: /* VRSHL */
4246 if (u) {
4247 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4248 } else {
ad69471c
PB
4249 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4250 }
4251 break;
4252 case 11: /* VQRSHL */
4253 if (u) {
4254 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4255 cpu_V1, cpu_V0);
4256 } else {
4257 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4258 cpu_V1, cpu_V0);
1e8d4eec 4259 }
9ee6e8bb
PB
4260 break;
4261 case 16:
4262 if (u) {
ad69471c 4263 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4264 } else {
ad69471c 4265 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4266 }
4267 break;
4268 default:
4269 abort();
2c0262af 4270 }
ad69471c 4271 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4272 }
9ee6e8bb 4273 return 0;
2c0262af 4274 }
9ee6e8bb
PB
4275 switch (op) {
4276 case 8: /* VSHL */
4277 case 9: /* VQSHL */
4278 case 10: /* VRSHL */
ad69471c 4279 case 11: /* VQRSHL */
9ee6e8bb 4280 {
ad69471c
PB
4281 int rtmp;
4282 /* Shift instruction operands are reversed. */
4283 rtmp = rn;
9ee6e8bb 4284 rn = rm;
ad69471c 4285 rm = rtmp;
9ee6e8bb
PB
4286 pairwise = 0;
4287 }
2c0262af 4288 break;
9ee6e8bb
PB
4289 case 20: /* VPMAX */
4290 case 21: /* VPMIN */
4291 case 23: /* VPADD */
4292 pairwise = 1;
2c0262af 4293 break;
9ee6e8bb
PB
4294 case 26: /* VPADD (float) */
4295 pairwise = (u && size < 2);
2c0262af 4296 break;
9ee6e8bb
PB
4297 case 30: /* VPMIN/VPMAX (float) */
4298 pairwise = u;
2c0262af 4299 break;
9ee6e8bb
PB
4300 default:
4301 pairwise = 0;
2c0262af 4302 break;
9ee6e8bb 4303 }
dd8fbd78 4304
9ee6e8bb
PB
4305 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4306
4307 if (pairwise) {
4308 /* Pairwise. */
4309 if (q)
4310 n = (pass & 1) * 2;
2c0262af 4311 else
9ee6e8bb
PB
4312 n = 0;
4313 if (pass < q + 1) {
dd8fbd78
FN
4314 tmp = neon_load_reg(rn, n);
4315 tmp2 = neon_load_reg(rn, n + 1);
9ee6e8bb 4316 } else {
dd8fbd78
FN
4317 tmp = neon_load_reg(rm, n);
4318 tmp2 = neon_load_reg(rm, n + 1);
9ee6e8bb
PB
4319 }
4320 } else {
4321 /* Elementwise. */
dd8fbd78
FN
4322 tmp = neon_load_reg(rn, pass);
4323 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4324 }
4325 switch (op) {
4326 case 0: /* VHADD */
4327 GEN_NEON_INTEGER_OP(hadd);
4328 break;
4329 case 1: /* VQADD */
ad69471c 4330 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4331 break;
9ee6e8bb
PB
4332 case 2: /* VRHADD */
4333 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4334 break;
9ee6e8bb
PB
4335 case 3: /* Logic ops. */
4336 switch ((u << 2) | size) {
4337 case 0: /* VAND */
dd8fbd78 4338 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4339 break;
4340 case 1: /* BIC */
f669df27 4341 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4342 break;
4343 case 2: /* VORR */
dd8fbd78 4344 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4345 break;
4346 case 3: /* VORN */
f669df27 4347 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4348 break;
4349 case 4: /* VEOR */
dd8fbd78 4350 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4351 break;
4352 case 5: /* VBSL */
dd8fbd78
FN
4353 tmp3 = neon_load_reg(rd, pass);
4354 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4355 dead_tmp(tmp3);
9ee6e8bb
PB
4356 break;
4357 case 6: /* VBIT */
dd8fbd78
FN
4358 tmp3 = neon_load_reg(rd, pass);
4359 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4360 dead_tmp(tmp3);
9ee6e8bb
PB
4361 break;
4362 case 7: /* VBIF */
dd8fbd78
FN
4363 tmp3 = neon_load_reg(rd, pass);
4364 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4365 dead_tmp(tmp3);
9ee6e8bb 4366 break;
2c0262af
FB
4367 }
4368 break;
9ee6e8bb
PB
4369 case 4: /* VHSUB */
4370 GEN_NEON_INTEGER_OP(hsub);
4371 break;
4372 case 5: /* VQSUB */
ad69471c 4373 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4374 break;
9ee6e8bb
PB
4375 case 6: /* VCGT */
4376 GEN_NEON_INTEGER_OP(cgt);
4377 break;
4378 case 7: /* VCGE */
4379 GEN_NEON_INTEGER_OP(cge);
4380 break;
4381 case 8: /* VSHL */
ad69471c 4382 GEN_NEON_INTEGER_OP(shl);
2c0262af 4383 break;
9ee6e8bb 4384 case 9: /* VQSHL */
ad69471c 4385 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4386 break;
9ee6e8bb 4387 case 10: /* VRSHL */
ad69471c 4388 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4389 break;
9ee6e8bb 4390 case 11: /* VQRSHL */
ad69471c 4391 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb
PB
4392 break;
4393 case 12: /* VMAX */
4394 GEN_NEON_INTEGER_OP(max);
4395 break;
4396 case 13: /* VMIN */
4397 GEN_NEON_INTEGER_OP(min);
4398 break;
4399 case 14: /* VABD */
4400 GEN_NEON_INTEGER_OP(abd);
4401 break;
4402 case 15: /* VABA */
4403 GEN_NEON_INTEGER_OP(abd);
dd8fbd78
FN
4404 dead_tmp(tmp2);
4405 tmp2 = neon_load_reg(rd, pass);
4406 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4407 break;
4408 case 16:
4409 if (!u) { /* VADD */
dd8fbd78 4410 if (gen_neon_add(size, tmp, tmp2))
9ee6e8bb
PB
4411 return 1;
4412 } else { /* VSUB */
4413 switch (size) {
dd8fbd78
FN
4414 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4415 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4416 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4417 default: return 1;
4418 }
4419 }
4420 break;
4421 case 17:
4422 if (!u) { /* VTST */
4423 switch (size) {
dd8fbd78
FN
4424 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4425 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4426 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4427 default: return 1;
4428 }
4429 } else { /* VCEQ */
4430 switch (size) {
dd8fbd78
FN
4431 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4432 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4433 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4434 default: return 1;
4435 }
4436 }
4437 break;
4438 case 18: /* Multiply. */
4439 switch (size) {
dd8fbd78
FN
4440 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4441 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4442 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4443 default: return 1;
4444 }
dd8fbd78
FN
4445 dead_tmp(tmp2);
4446 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4447 if (u) { /* VMLS */
dd8fbd78 4448 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4449 } else { /* VMLA */
dd8fbd78 4450 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4451 }
4452 break;
4453 case 19: /* VMUL */
4454 if (u) { /* polynomial */
dd8fbd78 4455 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4456 } else { /* Integer */
4457 switch (size) {
dd8fbd78
FN
4458 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4459 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4460 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4461 default: return 1;
4462 }
4463 }
4464 break;
4465 case 20: /* VPMAX */
4466 GEN_NEON_INTEGER_OP(pmax);
4467 break;
4468 case 21: /* VPMIN */
4469 GEN_NEON_INTEGER_OP(pmin);
4470 break;
4471 case 22: /* Hultiply high. */
4472 if (!u) { /* VQDMULH */
4473 switch (size) {
dd8fbd78
FN
4474 case 1: gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4475 case 2: gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
9ee6e8bb
PB
4476 default: return 1;
4477 }
4478 } else { /* VQRDHMUL */
4479 switch (size) {
dd8fbd78
FN
4480 case 1: gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4481 case 2: gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
9ee6e8bb
PB
4482 default: return 1;
4483 }
4484 }
4485 break;
4486 case 23: /* VPADD */
4487 if (u)
4488 return 1;
4489 switch (size) {
dd8fbd78
FN
4490 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4491 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4492 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4493 default: return 1;
4494 }
4495 break;
4496 case 26: /* Floating point arithnetic. */
4497 switch ((u << 2) | size) {
4498 case 0: /* VADD */
dd8fbd78 4499 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4500 break;
4501 case 2: /* VSUB */
dd8fbd78 4502 gen_helper_neon_sub_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4503 break;
4504 case 4: /* VPADD */
dd8fbd78 4505 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4506 break;
4507 case 6: /* VABD */
dd8fbd78 4508 gen_helper_neon_abd_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4509 break;
4510 default:
4511 return 1;
4512 }
4513 break;
4514 case 27: /* Float multiply. */
dd8fbd78 4515 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb 4516 if (!u) {
dd8fbd78
FN
4517 dead_tmp(tmp2);
4518 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4519 if (size == 0) {
dd8fbd78 4520 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb 4521 } else {
dd8fbd78 4522 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
4523 }
4524 }
4525 break;
4526 case 28: /* Float compare. */
4527 if (!u) {
dd8fbd78 4528 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
b5ff1b31 4529 } else {
9ee6e8bb 4530 if (size == 0)
dd8fbd78 4531 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
9ee6e8bb 4532 else
dd8fbd78 4533 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
b5ff1b31 4534 }
2c0262af 4535 break;
9ee6e8bb
PB
4536 case 29: /* Float compare absolute. */
4537 if (!u)
4538 return 1;
4539 if (size == 0)
dd8fbd78 4540 gen_helper_neon_acge_f32(tmp, tmp, tmp2);
9ee6e8bb 4541 else
dd8fbd78 4542 gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
2c0262af 4543 break;
9ee6e8bb
PB
4544 case 30: /* Float min/max. */
4545 if (size == 0)
dd8fbd78 4546 gen_helper_neon_max_f32(tmp, tmp, tmp2);
9ee6e8bb 4547 else
dd8fbd78 4548 gen_helper_neon_min_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4549 break;
4550 case 31:
4551 if (size == 0)
dd8fbd78 4552 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 4553 else
dd8fbd78 4554 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 4555 break;
9ee6e8bb
PB
4556 default:
4557 abort();
2c0262af 4558 }
dd8fbd78
FN
4559 dead_tmp(tmp2);
4560
9ee6e8bb
PB
4561 /* Save the result. For elementwise operations we can put it
4562 straight into the destination register. For pairwise operations
4563 we have to be careful to avoid clobbering the source operands. */
4564 if (pairwise && rd == rm) {
dd8fbd78 4565 neon_store_scratch(pass, tmp);
9ee6e8bb 4566 } else {
dd8fbd78 4567 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4568 }
4569
4570 } /* for pass */
4571 if (pairwise && rd == rm) {
4572 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
4573 tmp = neon_load_scratch(pass);
4574 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4575 }
4576 }
ad69471c 4577 /* End of 3 register same size operations. */
9ee6e8bb
PB
4578 } else if (insn & (1 << 4)) {
4579 if ((insn & 0x00380080) != 0) {
4580 /* Two registers and shift. */
4581 op = (insn >> 8) & 0xf;
4582 if (insn & (1 << 7)) {
4583 /* 64-bit shift. */
4584 size = 3;
4585 } else {
4586 size = 2;
4587 while ((insn & (1 << (size + 19))) == 0)
4588 size--;
4589 }
4590 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4591 /* To avoid excessive dumplication of ops we implement shift
4592 by immediate using the variable shift operations. */
4593 if (op < 8) {
4594 /* Shift by immediate:
4595 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4596 /* Right shifts are encoded as N - shift, where N is the
4597 element size in bits. */
4598 if (op <= 4)
4599 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4600 if (size == 3) {
4601 count = q + 1;
4602 } else {
4603 count = q ? 4: 2;
4604 }
4605 switch (size) {
4606 case 0:
4607 imm = (uint8_t) shift;
4608 imm |= imm << 8;
4609 imm |= imm << 16;
4610 break;
4611 case 1:
4612 imm = (uint16_t) shift;
4613 imm |= imm << 16;
4614 break;
4615 case 2:
4616 case 3:
4617 imm = shift;
4618 break;
4619 default:
4620 abort();
4621 }
4622
4623 for (pass = 0; pass < count; pass++) {
ad69471c
PB
4624 if (size == 3) {
4625 neon_load_reg64(cpu_V0, rm + pass);
4626 tcg_gen_movi_i64(cpu_V1, imm);
4627 switch (op) {
4628 case 0: /* VSHR */
4629 case 1: /* VSRA */
4630 if (u)
4631 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4632 else
ad69471c 4633 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4634 break;
ad69471c
PB
4635 case 2: /* VRSHR */
4636 case 3: /* VRSRA */
4637 if (u)
4638 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4639 else
ad69471c 4640 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4641 break;
ad69471c
PB
4642 case 4: /* VSRI */
4643 if (!u)
4644 return 1;
4645 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4646 break;
4647 case 5: /* VSHL, VSLI */
4648 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4649 break;
4650 case 6: /* VQSHL */
4651 if (u)
4652 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4653 else
ad69471c
PB
4654 gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4655 break;
4656 case 7: /* VQSHLU */
4657 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4658 break;
9ee6e8bb 4659 }
ad69471c
PB
4660 if (op == 1 || op == 3) {
4661 /* Accumulate. */
4662 neon_load_reg64(cpu_V0, rd + pass);
4663 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4664 } else if (op == 4 || (op == 5 && u)) {
4665 /* Insert */
4666 cpu_abort(env, "VS[LR]I.64 not implemented");
4667 }
4668 neon_store_reg64(cpu_V0, rd + pass);
4669 } else { /* size < 3 */
4670 /* Operands in T0 and T1. */
dd8fbd78
FN
4671 tmp = neon_load_reg(rm, pass);
4672 tmp2 = new_tmp();
4673 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
4674 switch (op) {
4675 case 0: /* VSHR */
4676 case 1: /* VSRA */
4677 GEN_NEON_INTEGER_OP(shl);
4678 break;
4679 case 2: /* VRSHR */
4680 case 3: /* VRSRA */
4681 GEN_NEON_INTEGER_OP(rshl);
4682 break;
4683 case 4: /* VSRI */
4684 if (!u)
4685 return 1;
4686 GEN_NEON_INTEGER_OP(shl);
4687 break;
4688 case 5: /* VSHL, VSLI */
4689 switch (size) {
dd8fbd78
FN
4690 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
4691 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
4692 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
ad69471c
PB
4693 default: return 1;
4694 }
4695 break;
4696 case 6: /* VQSHL */
4697 GEN_NEON_INTEGER_OP_ENV(qshl);
4698 break;
4699 case 7: /* VQSHLU */
4700 switch (size) {
dd8fbd78
FN
4701 case 0: gen_helper_neon_qshl_u8(tmp, cpu_env, tmp, tmp2); break;
4702 case 1: gen_helper_neon_qshl_u16(tmp, cpu_env, tmp, tmp2); break;
4703 case 2: gen_helper_neon_qshl_u32(tmp, cpu_env, tmp, tmp2); break;
ad69471c
PB
4704 default: return 1;
4705 }
4706 break;
4707 }
dd8fbd78 4708 dead_tmp(tmp2);
ad69471c
PB
4709
4710 if (op == 1 || op == 3) {
4711 /* Accumulate. */
dd8fbd78
FN
4712 tmp2 = neon_load_reg(rd, pass);
4713 gen_neon_add(size, tmp2, tmp);
4714 dead_tmp(tmp2);
ad69471c
PB
4715 } else if (op == 4 || (op == 5 && u)) {
4716 /* Insert */
4717 switch (size) {
4718 case 0:
4719 if (op == 4)
ca9a32e4 4720 mask = 0xff >> -shift;
ad69471c 4721 else
ca9a32e4
JR
4722 mask = (uint8_t)(0xff << shift);
4723 mask |= mask << 8;
4724 mask |= mask << 16;
ad69471c
PB
4725 break;
4726 case 1:
4727 if (op == 4)
ca9a32e4 4728 mask = 0xffff >> -shift;
ad69471c 4729 else
ca9a32e4
JR
4730 mask = (uint16_t)(0xffff << shift);
4731 mask |= mask << 16;
ad69471c
PB
4732 break;
4733 case 2:
ca9a32e4
JR
4734 if (shift < -31 || shift > 31) {
4735 mask = 0;
4736 } else {
4737 if (op == 4)
4738 mask = 0xffffffffu >> -shift;
4739 else
4740 mask = 0xffffffffu << shift;
4741 }
ad69471c
PB
4742 break;
4743 default:
4744 abort();
4745 }
dd8fbd78 4746 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
4747 tcg_gen_andi_i32(tmp, tmp, mask);
4748 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78
FN
4749 tcg_gen_or_i32(tmp, tmp, tmp2);
4750 dead_tmp(tmp2);
ad69471c 4751 }
dd8fbd78 4752 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4753 }
4754 } /* for pass */
4755 } else if (op < 10) {
ad69471c 4756 /* Shift by immediate and narrow:
9ee6e8bb
PB
4757 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4758 shift = shift - (1 << (size + 3));
4759 size++;
9ee6e8bb
PB
4760 switch (size) {
4761 case 1:
ad69471c 4762 imm = (uint16_t)shift;
9ee6e8bb 4763 imm |= imm << 16;
ad69471c 4764 tmp2 = tcg_const_i32(imm);
a7812ae4 4765 TCGV_UNUSED_I64(tmp64);
9ee6e8bb
PB
4766 break;
4767 case 2:
ad69471c
PB
4768 imm = (uint32_t)shift;
4769 tmp2 = tcg_const_i32(imm);
a7812ae4 4770 TCGV_UNUSED_I64(tmp64);
4cc633c3 4771 break;
9ee6e8bb 4772 case 3:
a7812ae4
PB
4773 tmp64 = tcg_const_i64(shift);
4774 TCGV_UNUSED(tmp2);
9ee6e8bb
PB
4775 break;
4776 default:
4777 abort();
4778 }
4779
ad69471c
PB
4780 for (pass = 0; pass < 2; pass++) {
4781 if (size == 3) {
4782 neon_load_reg64(cpu_V0, rm + pass);
4783 if (q) {
4784 if (u)
a7812ae4 4785 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4786 else
a7812ae4 4787 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c
PB
4788 } else {
4789 if (u)
a7812ae4 4790 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4791 else
a7812ae4 4792 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c 4793 }
2c0262af 4794 } else {
ad69471c
PB
4795 tmp = neon_load_reg(rm + pass, 0);
4796 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
36aa55dc
PB
4797 tmp3 = neon_load_reg(rm + pass, 1);
4798 gen_neon_shift_narrow(size, tmp3, tmp2, q, u);
4799 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
ad69471c 4800 dead_tmp(tmp);
36aa55dc 4801 dead_tmp(tmp3);
9ee6e8bb 4802 }
ad69471c
PB
4803 tmp = new_tmp();
4804 if (op == 8 && !u) {
4805 gen_neon_narrow(size - 1, tmp, cpu_V0);
9ee6e8bb 4806 } else {
ad69471c
PB
4807 if (op == 8)
4808 gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
9ee6e8bb 4809 else
ad69471c
PB
4810 gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
4811 }
2301db49 4812 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4813 } /* for pass */
b75263d6
JR
4814 if (size == 3) {
4815 tcg_temp_free_i64(tmp64);
2301db49
JR
4816 } else {
4817 dead_tmp(tmp2);
b75263d6 4818 }
9ee6e8bb
PB
4819 } else if (op == 10) {
4820 /* VSHLL */
ad69471c 4821 if (q || size == 3)
9ee6e8bb 4822 return 1;
ad69471c
PB
4823 tmp = neon_load_reg(rm, 0);
4824 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 4825 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4826 if (pass == 1)
4827 tmp = tmp2;
4828
4829 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 4830
9ee6e8bb
PB
4831 if (shift != 0) {
4832 /* The shift is less than the width of the source
ad69471c
PB
4833 type, so we can just shift the whole register. */
4834 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4835 if (size < 2 || !u) {
4836 uint64_t imm64;
4837 if (size == 0) {
4838 imm = (0xffu >> (8 - shift));
4839 imm |= imm << 16;
4840 } else {
4841 imm = 0xffff >> (16 - shift);
9ee6e8bb 4842 }
ad69471c
PB
4843 imm64 = imm | (((uint64_t)imm) << 32);
4844 tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
9ee6e8bb
PB
4845 }
4846 }
ad69471c 4847 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 4848 }
f73534a5 4849 } else if (op >= 14) {
9ee6e8bb 4850 /* VCVT fixed-point. */
f73534a5
PM
4851 /* We have already masked out the must-be-1 top bit of imm6,
4852 * hence this 32-shift where the ARM ARM has 64-imm6.
4853 */
4854 shift = 32 - shift;
9ee6e8bb 4855 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 4856 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 4857 if (!(op & 1)) {
9ee6e8bb 4858 if (u)
4373f3ce 4859 gen_vfp_ulto(0, shift);
9ee6e8bb 4860 else
4373f3ce 4861 gen_vfp_slto(0, shift);
9ee6e8bb
PB
4862 } else {
4863 if (u)
4373f3ce 4864 gen_vfp_toul(0, shift);
9ee6e8bb 4865 else
4373f3ce 4866 gen_vfp_tosl(0, shift);
2c0262af 4867 }
4373f3ce 4868 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
4869 }
4870 } else {
9ee6e8bb
PB
4871 return 1;
4872 }
4873 } else { /* (insn & 0x00380080) == 0 */
4874 int invert;
4875
4876 op = (insn >> 8) & 0xf;
4877 /* One register and immediate. */
4878 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4879 invert = (insn & (1 << 5)) != 0;
4880 switch (op) {
4881 case 0: case 1:
4882 /* no-op */
4883 break;
4884 case 2: case 3:
4885 imm <<= 8;
4886 break;
4887 case 4: case 5:
4888 imm <<= 16;
4889 break;
4890 case 6: case 7:
4891 imm <<= 24;
4892 break;
4893 case 8: case 9:
4894 imm |= imm << 16;
4895 break;
4896 case 10: case 11:
4897 imm = (imm << 8) | (imm << 24);
4898 break;
4899 case 12:
8e31209e 4900 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
4901 break;
4902 case 13:
4903 imm = (imm << 16) | 0xffff;
4904 break;
4905 case 14:
4906 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4907 if (invert)
4908 imm = ~imm;
4909 break;
4910 case 15:
4911 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4912 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4913 break;
4914 }
4915 if (invert)
4916 imm = ~imm;
4917
9ee6e8bb
PB
4918 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4919 if (op & 1 && op < 12) {
ad69471c 4920 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
4921 if (invert) {
4922 /* The immediate value has already been inverted, so
4923 BIC becomes AND. */
ad69471c 4924 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 4925 } else {
ad69471c 4926 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 4927 }
9ee6e8bb 4928 } else {
ad69471c
PB
4929 /* VMOV, VMVN. */
4930 tmp = new_tmp();
9ee6e8bb 4931 if (op == 14 && invert) {
ad69471c
PB
4932 uint32_t val;
4933 val = 0;
9ee6e8bb
PB
4934 for (n = 0; n < 4; n++) {
4935 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 4936 val |= 0xff << (n * 8);
9ee6e8bb 4937 }
ad69471c
PB
4938 tcg_gen_movi_i32(tmp, val);
4939 } else {
4940 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 4941 }
9ee6e8bb 4942 }
ad69471c 4943 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4944 }
4945 }
e4b3861d 4946 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
4947 if (size != 3) {
4948 op = (insn >> 8) & 0xf;
4949 if ((insn & (1 << 6)) == 0) {
4950 /* Three registers of different lengths. */
4951 int src1_wide;
4952 int src2_wide;
4953 int prewiden;
4954 /* prewiden, src1_wide, src2_wide */
4955 static const int neon_3reg_wide[16][3] = {
4956 {1, 0, 0}, /* VADDL */
4957 {1, 1, 0}, /* VADDW */
4958 {1, 0, 0}, /* VSUBL */
4959 {1, 1, 0}, /* VSUBW */
4960 {0, 1, 1}, /* VADDHN */
4961 {0, 0, 0}, /* VABAL */
4962 {0, 1, 1}, /* VSUBHN */
4963 {0, 0, 0}, /* VABDL */
4964 {0, 0, 0}, /* VMLAL */
4965 {0, 0, 0}, /* VQDMLAL */
4966 {0, 0, 0}, /* VMLSL */
4967 {0, 0, 0}, /* VQDMLSL */
4968 {0, 0, 0}, /* Integer VMULL */
4969 {0, 0, 0}, /* VQDMULL */
4970 {0, 0, 0} /* Polynomial VMULL */
4971 };
4972
4973 prewiden = neon_3reg_wide[op][0];
4974 src1_wide = neon_3reg_wide[op][1];
4975 src2_wide = neon_3reg_wide[op][2];
4976
ad69471c
PB
4977 if (size == 0 && (op == 9 || op == 11 || op == 13))
4978 return 1;
4979
9ee6e8bb
PB
4980 /* Avoid overlapping operands. Wide source operands are
4981 always aligned so will never overlap with wide
4982 destinations in problematic ways. */
8f8e3aa4 4983 if (rd == rm && !src2_wide) {
dd8fbd78
FN
4984 tmp = neon_load_reg(rm, 1);
4985 neon_store_scratch(2, tmp);
8f8e3aa4 4986 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
4987 tmp = neon_load_reg(rn, 1);
4988 neon_store_scratch(2, tmp);
9ee6e8bb 4989 }
a50f5b91 4990 TCGV_UNUSED(tmp3);
9ee6e8bb 4991 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4992 if (src1_wide) {
4993 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 4994 TCGV_UNUSED(tmp);
9ee6e8bb 4995 } else {
ad69471c 4996 if (pass == 1 && rd == rn) {
dd8fbd78 4997 tmp = neon_load_scratch(2);
9ee6e8bb 4998 } else {
ad69471c
PB
4999 tmp = neon_load_reg(rn, pass);
5000 }
5001 if (prewiden) {
5002 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5003 }
5004 }
ad69471c
PB
5005 if (src2_wide) {
5006 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 5007 TCGV_UNUSED(tmp2);
9ee6e8bb 5008 } else {
ad69471c 5009 if (pass == 1 && rd == rm) {
dd8fbd78 5010 tmp2 = neon_load_scratch(2);
9ee6e8bb 5011 } else {
ad69471c
PB
5012 tmp2 = neon_load_reg(rm, pass);
5013 }
5014 if (prewiden) {
5015 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5016 }
9ee6e8bb
PB
5017 }
5018 switch (op) {
5019 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5020 gen_neon_addl(size);
9ee6e8bb 5021 break;
79b0e534 5022 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5023 gen_neon_subl(size);
9ee6e8bb
PB
5024 break;
5025 case 5: case 7: /* VABAL, VABDL */
5026 switch ((size << 1) | u) {
ad69471c
PB
5027 case 0:
5028 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5029 break;
5030 case 1:
5031 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5032 break;
5033 case 2:
5034 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5035 break;
5036 case 3:
5037 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5038 break;
5039 case 4:
5040 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5041 break;
5042 case 5:
5043 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5044 break;
9ee6e8bb
PB
5045 default: abort();
5046 }
ad69471c
PB
5047 dead_tmp(tmp2);
5048 dead_tmp(tmp);
9ee6e8bb
PB
5049 break;
5050 case 8: case 9: case 10: case 11: case 12: case 13:
5051 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5052 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
dd8fbd78
FN
5053 dead_tmp(tmp2);
5054 dead_tmp(tmp);
9ee6e8bb
PB
5055 break;
5056 case 14: /* Polynomial VMULL */
5057 cpu_abort(env, "Polynomial VMULL not implemented");
5058
5059 default: /* 15 is RESERVED. */
5060 return 1;
5061 }
5062 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
5063 /* Accumulate. */
5064 if (op == 10 || op == 11) {
ad69471c 5065 gen_neon_negl(cpu_V0, size);
9ee6e8bb
PB
5066 }
5067
9ee6e8bb 5068 if (op != 13) {
ad69471c 5069 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb
PB
5070 }
5071
5072 switch (op) {
5073 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
ad69471c 5074 gen_neon_addl(size);
9ee6e8bb
PB
5075 break;
5076 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c
PB
5077 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5078 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5079 break;
9ee6e8bb
PB
5080 /* Fall through. */
5081 case 13: /* VQDMULL */
ad69471c 5082 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5083 break;
5084 default:
5085 abort();
5086 }
ad69471c 5087 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5088 } else if (op == 4 || op == 6) {
5089 /* Narrowing operation. */
ad69471c 5090 tmp = new_tmp();
79b0e534 5091 if (!u) {
9ee6e8bb 5092 switch (size) {
ad69471c
PB
5093 case 0:
5094 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5095 break;
5096 case 1:
5097 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5098 break;
5099 case 2:
5100 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5101 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5102 break;
9ee6e8bb
PB
5103 default: abort();
5104 }
5105 } else {
5106 switch (size) {
ad69471c
PB
5107 case 0:
5108 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5109 break;
5110 case 1:
5111 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5112 break;
5113 case 2:
5114 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5115 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5116 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5117 break;
9ee6e8bb
PB
5118 default: abort();
5119 }
5120 }
ad69471c
PB
5121 if (pass == 0) {
5122 tmp3 = tmp;
5123 } else {
5124 neon_store_reg(rd, 0, tmp3);
5125 neon_store_reg(rd, 1, tmp);
5126 }
9ee6e8bb
PB
5127 } else {
5128 /* Write back the result. */
ad69471c 5129 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5130 }
5131 }
5132 } else {
5133 /* Two registers and a scalar. */
5134 switch (op) {
5135 case 0: /* Integer VMLA scalar */
5136 case 1: /* Float VMLA scalar */
5137 case 4: /* Integer VMLS scalar */
5138 case 5: /* Floating point VMLS scalar */
5139 case 8: /* Integer VMUL scalar */
5140 case 9: /* Floating point VMUL scalar */
5141 case 12: /* VQDMULH scalar */
5142 case 13: /* VQRDMULH scalar */
dd8fbd78
FN
5143 tmp = neon_get_scalar(size, rm);
5144 neon_store_scratch(0, tmp);
9ee6e8bb 5145 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5146 tmp = neon_load_scratch(0);
5147 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5148 if (op == 12) {
5149 if (size == 1) {
dd8fbd78 5150 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5151 } else {
dd8fbd78 5152 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5153 }
5154 } else if (op == 13) {
5155 if (size == 1) {
dd8fbd78 5156 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5157 } else {
dd8fbd78 5158 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5159 }
5160 } else if (op & 1) {
dd8fbd78 5161 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5162 } else {
5163 switch (size) {
dd8fbd78
FN
5164 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5165 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5166 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5167 default: return 1;
5168 }
5169 }
dd8fbd78 5170 dead_tmp(tmp2);
9ee6e8bb
PB
5171 if (op < 8) {
5172 /* Accumulate. */
dd8fbd78 5173 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5174 switch (op) {
5175 case 0:
dd8fbd78 5176 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5177 break;
5178 case 1:
dd8fbd78 5179 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5180 break;
5181 case 4:
dd8fbd78 5182 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5183 break;
5184 case 5:
dd8fbd78 5185 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
5186 break;
5187 default:
5188 abort();
5189 }
dd8fbd78 5190 dead_tmp(tmp2);
9ee6e8bb 5191 }
dd8fbd78 5192 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5193 }
5194 break;
5195 case 2: /* VMLAL sclar */
5196 case 3: /* VQDMLAL scalar */
5197 case 6: /* VMLSL scalar */
5198 case 7: /* VQDMLSL scalar */
5199 case 10: /* VMULL scalar */
5200 case 11: /* VQDMULL scalar */
ad69471c
PB
5201 if (size == 0 && (op == 3 || op == 7 || op == 11))
5202 return 1;
5203
dd8fbd78
FN
5204 tmp2 = neon_get_scalar(size, rm);
5205 tmp3 = neon_load_reg(rn, 1);
ad69471c 5206
9ee6e8bb 5207 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5208 if (pass == 0) {
5209 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5210 } else {
dd8fbd78 5211 tmp = tmp3;
9ee6e8bb 5212 }
ad69471c 5213 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
dd8fbd78 5214 dead_tmp(tmp);
9ee6e8bb 5215 if (op == 6 || op == 7) {
ad69471c
PB
5216 gen_neon_negl(cpu_V0, size);
5217 }
5218 if (op != 11) {
5219 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5220 }
9ee6e8bb
PB
5221 switch (op) {
5222 case 2: case 6:
ad69471c 5223 gen_neon_addl(size);
9ee6e8bb
PB
5224 break;
5225 case 3: case 7:
ad69471c
PB
5226 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5227 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5228 break;
5229 case 10:
5230 /* no-op */
5231 break;
5232 case 11:
ad69471c 5233 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5234 break;
5235 default:
5236 abort();
5237 }
ad69471c 5238 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5239 }
dd8fbd78
FN
5240
5241 dead_tmp(tmp2);
5242
9ee6e8bb
PB
5243 break;
5244 default: /* 14 and 15 are RESERVED */
5245 return 1;
5246 }
5247 }
5248 } else { /* size == 3 */
5249 if (!u) {
5250 /* Extract. */
9ee6e8bb 5251 imm = (insn >> 8) & 0xf;
ad69471c
PB
5252
5253 if (imm > 7 && !q)
5254 return 1;
5255
5256 if (imm == 0) {
5257 neon_load_reg64(cpu_V0, rn);
5258 if (q) {
5259 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5260 }
ad69471c
PB
5261 } else if (imm == 8) {
5262 neon_load_reg64(cpu_V0, rn + 1);
5263 if (q) {
5264 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5265 }
ad69471c 5266 } else if (q) {
a7812ae4 5267 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5268 if (imm < 8) {
5269 neon_load_reg64(cpu_V0, rn);
a7812ae4 5270 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5271 } else {
5272 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5273 neon_load_reg64(tmp64, rm);
ad69471c
PB
5274 }
5275 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5276 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5277 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5278 if (imm < 8) {
5279 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5280 } else {
ad69471c
PB
5281 neon_load_reg64(cpu_V1, rm + 1);
5282 imm -= 8;
9ee6e8bb 5283 }
ad69471c 5284 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5285 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5286 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 5287 tcg_temp_free_i64(tmp64);
ad69471c 5288 } else {
a7812ae4 5289 /* BUGFIX */
ad69471c 5290 neon_load_reg64(cpu_V0, rn);
a7812ae4 5291 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5292 neon_load_reg64(cpu_V1, rm);
a7812ae4 5293 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5294 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5295 }
5296 neon_store_reg64(cpu_V0, rd);
5297 if (q) {
5298 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5299 }
5300 } else if ((insn & (1 << 11)) == 0) {
5301 /* Two register misc. */
5302 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5303 size = (insn >> 18) & 3;
5304 switch (op) {
5305 case 0: /* VREV64 */
5306 if (size == 3)
5307 return 1;
5308 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5309 tmp = neon_load_reg(rm, pass * 2);
5310 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5311 switch (size) {
dd8fbd78
FN
5312 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5313 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5314 case 2: /* no-op */ break;
5315 default: abort();
5316 }
dd8fbd78 5317 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5318 if (size == 2) {
dd8fbd78 5319 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5320 } else {
9ee6e8bb 5321 switch (size) {
dd8fbd78
FN
5322 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5323 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5324 default: abort();
5325 }
dd8fbd78 5326 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5327 }
5328 }
5329 break;
5330 case 4: case 5: /* VPADDL */
5331 case 12: case 13: /* VPADAL */
9ee6e8bb
PB
5332 if (size == 3)
5333 return 1;
ad69471c
PB
5334 for (pass = 0; pass < q + 1; pass++) {
5335 tmp = neon_load_reg(rm, pass * 2);
5336 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5337 tmp = neon_load_reg(rm, pass * 2 + 1);
5338 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5339 switch (size) {
5340 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5341 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5342 case 2: tcg_gen_add_i64(CPU_V001); break;
5343 default: abort();
5344 }
9ee6e8bb
PB
5345 if (op >= 12) {
5346 /* Accumulate. */
ad69471c
PB
5347 neon_load_reg64(cpu_V1, rd + pass);
5348 gen_neon_addl(size);
9ee6e8bb 5349 }
ad69471c 5350 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5351 }
5352 break;
5353 case 33: /* VTRN */
5354 if (size == 2) {
5355 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
5356 tmp = neon_load_reg(rm, n);
5357 tmp2 = neon_load_reg(rd, n + 1);
5358 neon_store_reg(rm, n, tmp2);
5359 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
5360 }
5361 } else {
5362 goto elementwise;
5363 }
5364 break;
5365 case 34: /* VUZP */
5366 /* Reg Before After
5367 Rd A3 A2 A1 A0 B2 B0 A2 A0
5368 Rm B3 B2 B1 B0 B3 B1 A3 A1
5369 */
5370 if (size == 3)
5371 return 1;
5372 gen_neon_unzip(rd, q, 0, size);
5373 gen_neon_unzip(rm, q, 4, size);
5374 if (q) {
5375 static int unzip_order_q[8] =
5376 {0, 2, 4, 6, 1, 3, 5, 7};
5377 for (n = 0; n < 8; n++) {
5378 int reg = (n < 4) ? rd : rm;
dd8fbd78
FN
5379 tmp = neon_load_scratch(unzip_order_q[n]);
5380 neon_store_reg(reg, n % 4, tmp);
9ee6e8bb
PB
5381 }
5382 } else {
5383 static int unzip_order[4] =
5384 {0, 4, 1, 5};
5385 for (n = 0; n < 4; n++) {
5386 int reg = (n < 2) ? rd : rm;
dd8fbd78
FN
5387 tmp = neon_load_scratch(unzip_order[n]);
5388 neon_store_reg(reg, n % 2, tmp);
9ee6e8bb
PB
5389 }
5390 }
5391 break;
5392 case 35: /* VZIP */
5393 /* Reg Before After
5394 Rd A3 A2 A1 A0 B1 A1 B0 A0
5395 Rm B3 B2 B1 B0 B3 A3 B2 A2
5396 */
5397 if (size == 3)
5398 return 1;
5399 count = (q ? 4 : 2);
5400 for (n = 0; n < count; n++) {
dd8fbd78
FN
5401 tmp = neon_load_reg(rd, n);
5402 tmp2 = neon_load_reg(rd, n);
9ee6e8bb 5403 switch (size) {
dd8fbd78
FN
5404 case 0: gen_neon_zip_u8(tmp, tmp2); break;
5405 case 1: gen_neon_zip_u16(tmp, tmp2); break;
9ee6e8bb
PB
5406 case 2: /* no-op */; break;
5407 default: abort();
5408 }
dd8fbd78
FN
5409 neon_store_scratch(n * 2, tmp);
5410 neon_store_scratch(n * 2 + 1, tmp2);
9ee6e8bb
PB
5411 }
5412 for (n = 0; n < count * 2; n++) {
5413 int reg = (n < count) ? rd : rm;
dd8fbd78
FN
5414 tmp = neon_load_scratch(n);
5415 neon_store_reg(reg, n % count, tmp);
9ee6e8bb
PB
5416 }
5417 break;
5418 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
ad69471c
PB
5419 if (size == 3)
5420 return 1;
a50f5b91 5421 TCGV_UNUSED(tmp2);
9ee6e8bb 5422 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5423 neon_load_reg64(cpu_V0, rm + pass);
5424 tmp = new_tmp();
9ee6e8bb 5425 if (op == 36 && q == 0) {
ad69471c 5426 gen_neon_narrow(size, tmp, cpu_V0);
9ee6e8bb 5427 } else if (q) {
ad69471c 5428 gen_neon_narrow_satu(size, tmp, cpu_V0);
9ee6e8bb 5429 } else {
ad69471c
PB
5430 gen_neon_narrow_sats(size, tmp, cpu_V0);
5431 }
5432 if (pass == 0) {
5433 tmp2 = tmp;
5434 } else {
5435 neon_store_reg(rd, 0, tmp2);
5436 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5437 }
9ee6e8bb
PB
5438 }
5439 break;
5440 case 38: /* VSHLL */
ad69471c 5441 if (q || size == 3)
9ee6e8bb 5442 return 1;
ad69471c
PB
5443 tmp = neon_load_reg(rm, 0);
5444 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5445 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5446 if (pass == 1)
5447 tmp = tmp2;
5448 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 5449 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 5450 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5451 }
5452 break;
60011498
PB
5453 case 44: /* VCVT.F16.F32 */
5454 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5455 return 1;
5456 tmp = new_tmp();
5457 tmp2 = new_tmp();
5458 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
5459 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5460 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
5461 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5462 tcg_gen_shli_i32(tmp2, tmp2, 16);
5463 tcg_gen_or_i32(tmp2, tmp2, tmp);
5464 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
5465 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5466 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5467 neon_store_reg(rd, 0, tmp2);
5468 tmp2 = new_tmp();
5469 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5470 tcg_gen_shli_i32(tmp2, tmp2, 16);
5471 tcg_gen_or_i32(tmp2, tmp2, tmp);
5472 neon_store_reg(rd, 1, tmp2);
5473 dead_tmp(tmp);
5474 break;
5475 case 46: /* VCVT.F32.F16 */
5476 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5477 return 1;
5478 tmp3 = new_tmp();
5479 tmp = neon_load_reg(rm, 0);
5480 tmp2 = neon_load_reg(rm, 1);
5481 tcg_gen_ext16u_i32(tmp3, tmp);
5482 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5483 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5484 tcg_gen_shri_i32(tmp3, tmp, 16);
5485 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5486 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
5487 dead_tmp(tmp);
5488 tcg_gen_ext16u_i32(tmp3, tmp2);
5489 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5490 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5491 tcg_gen_shri_i32(tmp3, tmp2, 16);
5492 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5493 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
5494 dead_tmp(tmp2);
5495 dead_tmp(tmp3);
5496 break;
9ee6e8bb
PB
5497 default:
5498 elementwise:
5499 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5500 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5501 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5502 neon_reg_offset(rm, pass));
dd8fbd78 5503 TCGV_UNUSED(tmp);
9ee6e8bb 5504 } else {
dd8fbd78 5505 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
5506 }
5507 switch (op) {
5508 case 1: /* VREV32 */
5509 switch (size) {
dd8fbd78
FN
5510 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5511 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5512 default: return 1;
5513 }
5514 break;
5515 case 2: /* VREV16 */
5516 if (size != 0)
5517 return 1;
dd8fbd78 5518 gen_rev16(tmp);
9ee6e8bb 5519 break;
9ee6e8bb
PB
5520 case 8: /* CLS */
5521 switch (size) {
dd8fbd78
FN
5522 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5523 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5524 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
9ee6e8bb
PB
5525 default: return 1;
5526 }
5527 break;
5528 case 9: /* CLZ */
5529 switch (size) {
dd8fbd78
FN
5530 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5531 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5532 case 2: gen_helper_clz(tmp, tmp); break;
9ee6e8bb
PB
5533 default: return 1;
5534 }
5535 break;
5536 case 10: /* CNT */
5537 if (size != 0)
5538 return 1;
dd8fbd78 5539 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb
PB
5540 break;
5541 case 11: /* VNOT */
5542 if (size != 0)
5543 return 1;
dd8fbd78 5544 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5545 break;
5546 case 14: /* VQABS */
5547 switch (size) {
dd8fbd78
FN
5548 case 0: gen_helper_neon_qabs_s8(tmp, cpu_env, tmp); break;
5549 case 1: gen_helper_neon_qabs_s16(tmp, cpu_env, tmp); break;
5550 case 2: gen_helper_neon_qabs_s32(tmp, cpu_env, tmp); break;
9ee6e8bb
PB
5551 default: return 1;
5552 }
5553 break;
5554 case 15: /* VQNEG */
5555 switch (size) {
dd8fbd78
FN
5556 case 0: gen_helper_neon_qneg_s8(tmp, cpu_env, tmp); break;
5557 case 1: gen_helper_neon_qneg_s16(tmp, cpu_env, tmp); break;
5558 case 2: gen_helper_neon_qneg_s32(tmp, cpu_env, tmp); break;
9ee6e8bb
PB
5559 default: return 1;
5560 }
5561 break;
5562 case 16: case 19: /* VCGT #0, VCLE #0 */
dd8fbd78 5563 tmp2 = tcg_const_i32(0);
9ee6e8bb 5564 switch(size) {
dd8fbd78
FN
5565 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5566 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5567 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5568 default: return 1;
5569 }
dd8fbd78 5570 tcg_temp_free(tmp2);
9ee6e8bb 5571 if (op == 19)
dd8fbd78 5572 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5573 break;
5574 case 17: case 20: /* VCGE #0, VCLT #0 */
dd8fbd78 5575 tmp2 = tcg_const_i32(0);
9ee6e8bb 5576 switch(size) {
dd8fbd78
FN
5577 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5578 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5579 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5580 default: return 1;
5581 }
dd8fbd78 5582 tcg_temp_free(tmp2);
9ee6e8bb 5583 if (op == 20)
dd8fbd78 5584 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5585 break;
5586 case 18: /* VCEQ #0 */
dd8fbd78 5587 tmp2 = tcg_const_i32(0);
9ee6e8bb 5588 switch(size) {
dd8fbd78
FN
5589 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5590 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5591 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5592 default: return 1;
5593 }
dd8fbd78 5594 tcg_temp_free(tmp2);
9ee6e8bb
PB
5595 break;
5596 case 22: /* VABS */
5597 switch(size) {
dd8fbd78
FN
5598 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
5599 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
5600 case 2: tcg_gen_abs_i32(tmp, tmp); break;
9ee6e8bb
PB
5601 default: return 1;
5602 }
5603 break;
5604 case 23: /* VNEG */
ad69471c
PB
5605 if (size == 3)
5606 return 1;
dd8fbd78
FN
5607 tmp2 = tcg_const_i32(0);
5608 gen_neon_rsb(size, tmp, tmp2);
5609 tcg_temp_free(tmp2);
9ee6e8bb
PB
5610 break;
5611 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
dd8fbd78
FN
5612 tmp2 = tcg_const_i32(0);
5613 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
5614 tcg_temp_free(tmp2);
9ee6e8bb 5615 if (op == 27)
dd8fbd78 5616 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5617 break;
5618 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
dd8fbd78
FN
5619 tmp2 = tcg_const_i32(0);
5620 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
5621 tcg_temp_free(tmp2);
9ee6e8bb 5622 if (op == 28)
dd8fbd78 5623 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5624 break;
5625 case 26: /* Float VCEQ #0 */
dd8fbd78
FN
5626 tmp2 = tcg_const_i32(0);
5627 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
5628 tcg_temp_free(tmp2);
9ee6e8bb
PB
5629 break;
5630 case 30: /* Float VABS */
4373f3ce 5631 gen_vfp_abs(0);
9ee6e8bb
PB
5632 break;
5633 case 31: /* Float VNEG */
4373f3ce 5634 gen_vfp_neg(0);
9ee6e8bb
PB
5635 break;
5636 case 32: /* VSWP */
dd8fbd78
FN
5637 tmp2 = neon_load_reg(rd, pass);
5638 neon_store_reg(rm, pass, tmp2);
9ee6e8bb
PB
5639 break;
5640 case 33: /* VTRN */
dd8fbd78 5641 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5642 switch (size) {
dd8fbd78
FN
5643 case 0: gen_neon_trn_u8(tmp, tmp2); break;
5644 case 1: gen_neon_trn_u16(tmp, tmp2); break;
9ee6e8bb
PB
5645 case 2: abort();
5646 default: return 1;
5647 }
dd8fbd78 5648 neon_store_reg(rm, pass, tmp2);
9ee6e8bb
PB
5649 break;
5650 case 56: /* Integer VRECPE */
dd8fbd78 5651 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb
PB
5652 break;
5653 case 57: /* Integer VRSQRTE */
dd8fbd78 5654 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb
PB
5655 break;
5656 case 58: /* Float VRECPE */
4373f3ce 5657 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5658 break;
5659 case 59: /* Float VRSQRTE */
4373f3ce 5660 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5661 break;
5662 case 60: /* VCVT.F32.S32 */
d3587ef8 5663 gen_vfp_sito(0);
9ee6e8bb
PB
5664 break;
5665 case 61: /* VCVT.F32.U32 */
d3587ef8 5666 gen_vfp_uito(0);
9ee6e8bb
PB
5667 break;
5668 case 62: /* VCVT.S32.F32 */
d3587ef8 5669 gen_vfp_tosiz(0);
9ee6e8bb
PB
5670 break;
5671 case 63: /* VCVT.U32.F32 */
d3587ef8 5672 gen_vfp_touiz(0);
9ee6e8bb
PB
5673 break;
5674 default:
5675 /* Reserved: 21, 29, 39-56 */
5676 return 1;
5677 }
5678 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5679 tcg_gen_st_f32(cpu_F0s, cpu_env,
5680 neon_reg_offset(rd, pass));
9ee6e8bb 5681 } else {
dd8fbd78 5682 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5683 }
5684 }
5685 break;
5686 }
5687 } else if ((insn & (1 << 10)) == 0) {
5688 /* VTBL, VTBX. */
3018f259 5689 n = ((insn >> 5) & 0x18) + 8;
9ee6e8bb 5690 if (insn & (1 << 6)) {
8f8e3aa4 5691 tmp = neon_load_reg(rd, 0);
9ee6e8bb 5692 } else {
8f8e3aa4
PB
5693 tmp = new_tmp();
5694 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5695 }
8f8e3aa4 5696 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
5697 tmp4 = tcg_const_i32(rn);
5698 tmp5 = tcg_const_i32(n);
5699 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
3018f259 5700 dead_tmp(tmp);
9ee6e8bb 5701 if (insn & (1 << 6)) {
8f8e3aa4 5702 tmp = neon_load_reg(rd, 1);
9ee6e8bb 5703 } else {
8f8e3aa4
PB
5704 tmp = new_tmp();
5705 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5706 }
8f8e3aa4 5707 tmp3 = neon_load_reg(rm, 1);
b75263d6 5708 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
5709 tcg_temp_free_i32(tmp5);
5710 tcg_temp_free_i32(tmp4);
8f8e3aa4 5711 neon_store_reg(rd, 0, tmp2);
3018f259
PB
5712 neon_store_reg(rd, 1, tmp3);
5713 dead_tmp(tmp);
9ee6e8bb
PB
5714 } else if ((insn & 0x380) == 0) {
5715 /* VDUP */
5716 if (insn & (1 << 19)) {
dd8fbd78 5717 tmp = neon_load_reg(rm, 1);
9ee6e8bb 5718 } else {
dd8fbd78 5719 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
5720 }
5721 if (insn & (1 << 16)) {
dd8fbd78 5722 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
5723 } else if (insn & (1 << 17)) {
5724 if ((insn >> 18) & 1)
dd8fbd78 5725 gen_neon_dup_high16(tmp);
9ee6e8bb 5726 else
dd8fbd78 5727 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
5728 }
5729 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5730 tmp2 = new_tmp();
5731 tcg_gen_mov_i32(tmp2, tmp);
5732 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 5733 }
dd8fbd78 5734 dead_tmp(tmp);
9ee6e8bb
PB
5735 } else {
5736 return 1;
5737 }
5738 }
5739 }
5740 return 0;
5741}
5742
fe1479c3
PB
5743static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5744{
5745 int crn = (insn >> 16) & 0xf;
5746 int crm = insn & 0xf;
5747 int op1 = (insn >> 21) & 7;
5748 int op2 = (insn >> 5) & 7;
5749 int rt = (insn >> 12) & 0xf;
5750 TCGv tmp;
5751
5752 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5753 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5754 /* TEECR */
5755 if (IS_USER(s))
5756 return 1;
5757 tmp = load_cpu_field(teecr);
5758 store_reg(s, rt, tmp);
5759 return 0;
5760 }
5761 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5762 /* TEEHBR */
5763 if (IS_USER(s) && (env->teecr & 1))
5764 return 1;
5765 tmp = load_cpu_field(teehbr);
5766 store_reg(s, rt, tmp);
5767 return 0;
5768 }
5769 }
5770 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5771 op1, crn, crm, op2);
5772 return 1;
5773}
5774
5775static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5776{
5777 int crn = (insn >> 16) & 0xf;
5778 int crm = insn & 0xf;
5779 int op1 = (insn >> 21) & 7;
5780 int op2 = (insn >> 5) & 7;
5781 int rt = (insn >> 12) & 0xf;
5782 TCGv tmp;
5783
5784 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5785 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5786 /* TEECR */
5787 if (IS_USER(s))
5788 return 1;
5789 tmp = load_reg(s, rt);
5790 gen_helper_set_teecr(cpu_env, tmp);
5791 dead_tmp(tmp);
5792 return 0;
5793 }
5794 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5795 /* TEEHBR */
5796 if (IS_USER(s) && (env->teecr & 1))
5797 return 1;
5798 tmp = load_reg(s, rt);
5799 store_cpu_field(tmp, teehbr);
5800 return 0;
5801 }
5802 }
5803 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5804 op1, crn, crm, op2);
5805 return 1;
5806}
5807
9ee6e8bb
PB
5808static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5809{
5810 int cpnum;
5811
5812 cpnum = (insn >> 8) & 0xf;
5813 if (arm_feature(env, ARM_FEATURE_XSCALE)
5814 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5815 return 1;
5816
5817 switch (cpnum) {
5818 case 0:
5819 case 1:
5820 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5821 return disas_iwmmxt_insn(env, s, insn);
5822 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5823 return disas_dsp_insn(env, s, insn);
5824 }
5825 return 1;
5826 case 10:
5827 case 11:
5828 return disas_vfp_insn (env, s, insn);
fe1479c3
PB
5829 case 14:
5830 /* Coprocessors 7-15 are architecturally reserved by ARM.
5831 Unfortunately Intel decided to ignore this. */
5832 if (arm_feature(env, ARM_FEATURE_XSCALE))
5833 goto board;
5834 if (insn & (1 << 20))
5835 return disas_cp14_read(env, s, insn);
5836 else
5837 return disas_cp14_write(env, s, insn);
9ee6e8bb
PB
5838 case 15:
5839 return disas_cp15_insn (env, s, insn);
5840 default:
fe1479c3 5841 board:
9ee6e8bb
PB
5842 /* Unknown coprocessor. See if the board has hooked it. */
5843 return disas_cp_insn (env, s, insn);
5844 }
5845}
5846
5e3f878a
PB
5847
5848/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 5849static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
5850{
5851 TCGv tmp;
5852 tmp = new_tmp();
5853 tcg_gen_trunc_i64_i32(tmp, val);
5854 store_reg(s, rlow, tmp);
5855 tmp = new_tmp();
5856 tcg_gen_shri_i64(val, val, 32);
5857 tcg_gen_trunc_i64_i32(tmp, val);
5858 store_reg(s, rhigh, tmp);
5859}
5860
5861/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 5862static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 5863{
a7812ae4 5864 TCGv_i64 tmp;
5e3f878a
PB
5865 TCGv tmp2;
5866
36aa55dc 5867 /* Load value and extend to 64 bits. */
a7812ae4 5868 tmp = tcg_temp_new_i64();
5e3f878a
PB
5869 tmp2 = load_reg(s, rlow);
5870 tcg_gen_extu_i32_i64(tmp, tmp2);
5871 dead_tmp(tmp2);
5872 tcg_gen_add_i64(val, val, tmp);
b75263d6 5873 tcg_temp_free_i64(tmp);
5e3f878a
PB
5874}
5875
5876/* load and add a 64-bit value from a register pair. */
a7812ae4 5877static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 5878{
a7812ae4 5879 TCGv_i64 tmp;
36aa55dc
PB
5880 TCGv tmpl;
5881 TCGv tmph;
5e3f878a
PB
5882
5883 /* Load 64-bit value rd:rn. */
36aa55dc
PB
5884 tmpl = load_reg(s, rlow);
5885 tmph = load_reg(s, rhigh);
a7812ae4 5886 tmp = tcg_temp_new_i64();
36aa55dc
PB
5887 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5888 dead_tmp(tmpl);
5889 dead_tmp(tmph);
5e3f878a 5890 tcg_gen_add_i64(val, val, tmp);
b75263d6 5891 tcg_temp_free_i64(tmp);
5e3f878a
PB
5892}
5893
5894/* Set N and Z flags from a 64-bit value. */
a7812ae4 5895static void gen_logicq_cc(TCGv_i64 val)
5e3f878a
PB
5896{
5897 TCGv tmp = new_tmp();
5898 gen_helper_logicq_cc(tmp, val);
6fbe23d5
PB
5899 gen_logic_CC(tmp);
5900 dead_tmp(tmp);
5e3f878a
PB
5901}
5902
426f5abc
PB
5903/* Load/Store exclusive instructions are implemented by remembering
5904 the value/address loaded, and seeing if these are the same
5905 when the store is performed. This should be is sufficient to implement
5906 the architecturally mandated semantics, and avoids having to monitor
5907 regular stores.
5908
5909 In system emulation mode only one CPU will be running at once, so
5910 this sequence is effectively atomic. In user emulation mode we
5911 throw an exception and handle the atomic operation elsewhere. */
5912static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
5913 TCGv addr, int size)
5914{
5915 TCGv tmp;
5916
5917 switch (size) {
5918 case 0:
5919 tmp = gen_ld8u(addr, IS_USER(s));
5920 break;
5921 case 1:
5922 tmp = gen_ld16u(addr, IS_USER(s));
5923 break;
5924 case 2:
5925 case 3:
5926 tmp = gen_ld32(addr, IS_USER(s));
5927 break;
5928 default:
5929 abort();
5930 }
5931 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
5932 store_reg(s, rt, tmp);
5933 if (size == 3) {
2c9adbda
PM
5934 TCGv tmp2 = new_tmp();
5935 tcg_gen_addi_i32(tmp2, addr, 4);
5936 tmp = gen_ld32(tmp2, IS_USER(s));
5937 dead_tmp(tmp2);
426f5abc
PB
5938 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
5939 store_reg(s, rt2, tmp);
5940 }
5941 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
5942}
5943
5944static void gen_clrex(DisasContext *s)
5945{
5946 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
5947}
5948
5949#ifdef CONFIG_USER_ONLY
5950static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
5951 TCGv addr, int size)
5952{
5953 tcg_gen_mov_i32(cpu_exclusive_test, addr);
5954 tcg_gen_movi_i32(cpu_exclusive_info,
5955 size | (rd << 4) | (rt << 8) | (rt2 << 12));
5956 gen_set_condexec(s);
5957 gen_set_pc_im(s->pc - 4);
5958 gen_exception(EXCP_STREX);
5959 s->is_jmp = DISAS_JUMP;
5960}
5961#else
5962static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
5963 TCGv addr, int size)
5964{
5965 TCGv tmp;
5966 int done_label;
5967 int fail_label;
5968
5969 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
5970 [addr] = {Rt};
5971 {Rd} = 0;
5972 } else {
5973 {Rd} = 1;
5974 } */
5975 fail_label = gen_new_label();
5976 done_label = gen_new_label();
5977 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
5978 switch (size) {
5979 case 0:
5980 tmp = gen_ld8u(addr, IS_USER(s));
5981 break;
5982 case 1:
5983 tmp = gen_ld16u(addr, IS_USER(s));
5984 break;
5985 case 2:
5986 case 3:
5987 tmp = gen_ld32(addr, IS_USER(s));
5988 break;
5989 default:
5990 abort();
5991 }
5992 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
5993 dead_tmp(tmp);
5994 if (size == 3) {
5995 TCGv tmp2 = new_tmp();
5996 tcg_gen_addi_i32(tmp2, addr, 4);
2c9adbda 5997 tmp = gen_ld32(tmp2, IS_USER(s));
426f5abc
PB
5998 dead_tmp(tmp2);
5999 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
6000 dead_tmp(tmp);
6001 }
6002 tmp = load_reg(s, rt);
6003 switch (size) {
6004 case 0:
6005 gen_st8(tmp, addr, IS_USER(s));
6006 break;
6007 case 1:
6008 gen_st16(tmp, addr, IS_USER(s));
6009 break;
6010 case 2:
6011 case 3:
6012 gen_st32(tmp, addr, IS_USER(s));
6013 break;
6014 default:
6015 abort();
6016 }
6017 if (size == 3) {
6018 tcg_gen_addi_i32(addr, addr, 4);
6019 tmp = load_reg(s, rt2);
6020 gen_st32(tmp, addr, IS_USER(s));
6021 }
6022 tcg_gen_movi_i32(cpu_R[rd], 0);
6023 tcg_gen_br(done_label);
6024 gen_set_label(fail_label);
6025 tcg_gen_movi_i32(cpu_R[rd], 1);
6026 gen_set_label(done_label);
6027 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6028}
6029#endif
6030
9ee6e8bb
PB
6031static void disas_arm_insn(CPUState * env, DisasContext *s)
6032{
6033 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 6034 TCGv tmp;
3670669c 6035 TCGv tmp2;
6ddbc6e4 6036 TCGv tmp3;
b0109805 6037 TCGv addr;
a7812ae4 6038 TCGv_i64 tmp64;
9ee6e8bb
PB
6039
6040 insn = ldl_code(s->pc);
6041 s->pc += 4;
6042
6043 /* M variants do not implement ARM mode. */
6044 if (IS_M(env))
6045 goto illegal_op;
6046 cond = insn >> 28;
6047 if (cond == 0xf){
6048 /* Unconditional instructions. */
6049 if (((insn >> 25) & 7) == 1) {
6050 /* NEON Data processing. */
6051 if (!arm_feature(env, ARM_FEATURE_NEON))
6052 goto illegal_op;
6053
6054 if (disas_neon_data_insn(env, s, insn))
6055 goto illegal_op;
6056 return;
6057 }
6058 if ((insn & 0x0f100000) == 0x04000000) {
6059 /* NEON load/store. */
6060 if (!arm_feature(env, ARM_FEATURE_NEON))
6061 goto illegal_op;
6062
6063 if (disas_neon_ls_insn(env, s, insn))
6064 goto illegal_op;
6065 return;
6066 }
6067 if ((insn & 0x0d70f000) == 0x0550f000)
6068 return; /* PLD */
6069 else if ((insn & 0x0ffffdff) == 0x01010000) {
6070 ARCH(6);
6071 /* setend */
6072 if (insn & (1 << 9)) {
6073 /* BE8 mode not implemented. */
6074 goto illegal_op;
6075 }
6076 return;
6077 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6078 switch ((insn >> 4) & 0xf) {
6079 case 1: /* clrex */
6080 ARCH(6K);
426f5abc 6081 gen_clrex(s);
9ee6e8bb
PB
6082 return;
6083 case 4: /* dsb */
6084 case 5: /* dmb */
6085 case 6: /* isb */
6086 ARCH(7);
6087 /* We don't emulate caches so these are a no-op. */
6088 return;
6089 default:
6090 goto illegal_op;
6091 }
6092 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6093 /* srs */
c67b6b71 6094 int32_t offset;
9ee6e8bb
PB
6095 if (IS_USER(s))
6096 goto illegal_op;
6097 ARCH(6);
6098 op1 = (insn & 0x1f);
6099 if (op1 == (env->uncached_cpsr & CPSR_M)) {
b0109805 6100 addr = load_reg(s, 13);
9ee6e8bb 6101 } else {
b0109805 6102 addr = new_tmp();
b75263d6
JR
6103 tmp = tcg_const_i32(op1);
6104 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6105 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6106 }
6107 i = (insn >> 23) & 3;
6108 switch (i) {
6109 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6110 case 1: offset = 0; break; /* IA */
6111 case 2: offset = -8; break; /* DB */
9ee6e8bb
PB
6112 case 3: offset = 4; break; /* IB */
6113 default: abort();
6114 }
6115 if (offset)
b0109805
PB
6116 tcg_gen_addi_i32(addr, addr, offset);
6117 tmp = load_reg(s, 14);
6118 gen_st32(tmp, addr, 0);
c67b6b71 6119 tmp = load_cpu_field(spsr);
b0109805
PB
6120 tcg_gen_addi_i32(addr, addr, 4);
6121 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
6122 if (insn & (1 << 21)) {
6123 /* Base writeback. */
6124 switch (i) {
6125 case 0: offset = -8; break;
c67b6b71
FN
6126 case 1: offset = 4; break;
6127 case 2: offset = -4; break;
9ee6e8bb
PB
6128 case 3: offset = 0; break;
6129 default: abort();
6130 }
6131 if (offset)
c67b6b71 6132 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb 6133 if (op1 == (env->uncached_cpsr & CPSR_M)) {
c67b6b71 6134 store_reg(s, 13, addr);
9ee6e8bb 6135 } else {
b75263d6
JR
6136 tmp = tcg_const_i32(op1);
6137 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6138 tcg_temp_free_i32(tmp);
c67b6b71 6139 dead_tmp(addr);
9ee6e8bb 6140 }
b0109805
PB
6141 } else {
6142 dead_tmp(addr);
9ee6e8bb 6143 }
a990f58f 6144 return;
ea825eee 6145 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 6146 /* rfe */
c67b6b71 6147 int32_t offset;
9ee6e8bb
PB
6148 if (IS_USER(s))
6149 goto illegal_op;
6150 ARCH(6);
6151 rn = (insn >> 16) & 0xf;
b0109805 6152 addr = load_reg(s, rn);
9ee6e8bb
PB
6153 i = (insn >> 23) & 3;
6154 switch (i) {
b0109805 6155 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6156 case 1: offset = 0; break; /* IA */
6157 case 2: offset = -8; break; /* DB */
b0109805 6158 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
6159 default: abort();
6160 }
6161 if (offset)
b0109805
PB
6162 tcg_gen_addi_i32(addr, addr, offset);
6163 /* Load PC into tmp and CPSR into tmp2. */
6164 tmp = gen_ld32(addr, 0);
6165 tcg_gen_addi_i32(addr, addr, 4);
6166 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
6167 if (insn & (1 << 21)) {
6168 /* Base writeback. */
6169 switch (i) {
b0109805 6170 case 0: offset = -8; break;
c67b6b71
FN
6171 case 1: offset = 4; break;
6172 case 2: offset = -4; break;
b0109805 6173 case 3: offset = 0; break;
9ee6e8bb
PB
6174 default: abort();
6175 }
6176 if (offset)
b0109805
PB
6177 tcg_gen_addi_i32(addr, addr, offset);
6178 store_reg(s, rn, addr);
6179 } else {
6180 dead_tmp(addr);
9ee6e8bb 6181 }
b0109805 6182 gen_rfe(s, tmp, tmp2);
c67b6b71 6183 return;
9ee6e8bb
PB
6184 } else if ((insn & 0x0e000000) == 0x0a000000) {
6185 /* branch link and change to thumb (blx <offset>) */
6186 int32_t offset;
6187
6188 val = (uint32_t)s->pc;
d9ba4830
PB
6189 tmp = new_tmp();
6190 tcg_gen_movi_i32(tmp, val);
6191 store_reg(s, 14, tmp);
9ee6e8bb
PB
6192 /* Sign-extend the 24-bit offset */
6193 offset = (((int32_t)insn) << 8) >> 8;
6194 /* offset * 4 + bit24 * 2 + (thumb bit) */
6195 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6196 /* pipeline offset */
6197 val += 4;
d9ba4830 6198 gen_bx_im(s, val);
9ee6e8bb
PB
6199 return;
6200 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6201 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6202 /* iWMMXt register transfer. */
6203 if (env->cp15.c15_cpar & (1 << 1))
6204 if (!disas_iwmmxt_insn(env, s, insn))
6205 return;
6206 }
6207 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6208 /* Coprocessor double register transfer. */
6209 } else if ((insn & 0x0f000010) == 0x0e000010) {
6210 /* Additional coprocessor register transfer. */
7997d92f 6211 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
6212 uint32_t mask;
6213 uint32_t val;
6214 /* cps (privileged) */
6215 if (IS_USER(s))
6216 return;
6217 mask = val = 0;
6218 if (insn & (1 << 19)) {
6219 if (insn & (1 << 8))
6220 mask |= CPSR_A;
6221 if (insn & (1 << 7))
6222 mask |= CPSR_I;
6223 if (insn & (1 << 6))
6224 mask |= CPSR_F;
6225 if (insn & (1 << 18))
6226 val |= mask;
6227 }
7997d92f 6228 if (insn & (1 << 17)) {
9ee6e8bb
PB
6229 mask |= CPSR_M;
6230 val |= (insn & 0x1f);
6231 }
6232 if (mask) {
2fbac54b 6233 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
6234 }
6235 return;
6236 }
6237 goto illegal_op;
6238 }
6239 if (cond != 0xe) {
6240 /* if not always execute, we generate a conditional jump to
6241 next instruction */
6242 s->condlabel = gen_new_label();
d9ba4830 6243 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
6244 s->condjmp = 1;
6245 }
6246 if ((insn & 0x0f900000) == 0x03000000) {
6247 if ((insn & (1 << 21)) == 0) {
6248 ARCH(6T2);
6249 rd = (insn >> 12) & 0xf;
6250 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6251 if ((insn & (1 << 22)) == 0) {
6252 /* MOVW */
5e3f878a
PB
6253 tmp = new_tmp();
6254 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
6255 } else {
6256 /* MOVT */
5e3f878a 6257 tmp = load_reg(s, rd);
86831435 6258 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 6259 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 6260 }
5e3f878a 6261 store_reg(s, rd, tmp);
9ee6e8bb
PB
6262 } else {
6263 if (((insn >> 12) & 0xf) != 0xf)
6264 goto illegal_op;
6265 if (((insn >> 16) & 0xf) == 0) {
6266 gen_nop_hint(s, insn & 0xff);
6267 } else {
6268 /* CPSR = immediate */
6269 val = insn & 0xff;
6270 shift = ((insn >> 8) & 0xf) * 2;
6271 if (shift)
6272 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 6273 i = ((insn & (1 << 22)) != 0);
2fbac54b 6274 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
6275 goto illegal_op;
6276 }
6277 }
6278 } else if ((insn & 0x0f900000) == 0x01000000
6279 && (insn & 0x00000090) != 0x00000090) {
6280 /* miscellaneous instructions */
6281 op1 = (insn >> 21) & 3;
6282 sh = (insn >> 4) & 0xf;
6283 rm = insn & 0xf;
6284 switch (sh) {
6285 case 0x0: /* move program status register */
6286 if (op1 & 1) {
6287 /* PSR = reg */
2fbac54b 6288 tmp = load_reg(s, rm);
9ee6e8bb 6289 i = ((op1 & 2) != 0);
2fbac54b 6290 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
6291 goto illegal_op;
6292 } else {
6293 /* reg = PSR */
6294 rd = (insn >> 12) & 0xf;
6295 if (op1 & 2) {
6296 if (IS_USER(s))
6297 goto illegal_op;
d9ba4830 6298 tmp = load_cpu_field(spsr);
9ee6e8bb 6299 } else {
d9ba4830
PB
6300 tmp = new_tmp();
6301 gen_helper_cpsr_read(tmp);
9ee6e8bb 6302 }
d9ba4830 6303 store_reg(s, rd, tmp);
9ee6e8bb
PB
6304 }
6305 break;
6306 case 0x1:
6307 if (op1 == 1) {
6308 /* branch/exchange thumb (bx). */
d9ba4830
PB
6309 tmp = load_reg(s, rm);
6310 gen_bx(s, tmp);
9ee6e8bb
PB
6311 } else if (op1 == 3) {
6312 /* clz */
6313 rd = (insn >> 12) & 0xf;
1497c961
PB
6314 tmp = load_reg(s, rm);
6315 gen_helper_clz(tmp, tmp);
6316 store_reg(s, rd, tmp);
9ee6e8bb
PB
6317 } else {
6318 goto illegal_op;
6319 }
6320 break;
6321 case 0x2:
6322 if (op1 == 1) {
6323 ARCH(5J); /* bxj */
6324 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6325 tmp = load_reg(s, rm);
6326 gen_bx(s, tmp);
9ee6e8bb
PB
6327 } else {
6328 goto illegal_op;
6329 }
6330 break;
6331 case 0x3:
6332 if (op1 != 1)
6333 goto illegal_op;
6334
6335 /* branch link/exchange thumb (blx) */
d9ba4830
PB
6336 tmp = load_reg(s, rm);
6337 tmp2 = new_tmp();
6338 tcg_gen_movi_i32(tmp2, s->pc);
6339 store_reg(s, 14, tmp2);
6340 gen_bx(s, tmp);
9ee6e8bb
PB
6341 break;
6342 case 0x5: /* saturating add/subtract */
6343 rd = (insn >> 12) & 0xf;
6344 rn = (insn >> 16) & 0xf;
b40d0353 6345 tmp = load_reg(s, rm);
5e3f878a 6346 tmp2 = load_reg(s, rn);
9ee6e8bb 6347 if (op1 & 2)
5e3f878a 6348 gen_helper_double_saturate(tmp2, tmp2);
9ee6e8bb 6349 if (op1 & 1)
5e3f878a 6350 gen_helper_sub_saturate(tmp, tmp, tmp2);
9ee6e8bb 6351 else
5e3f878a
PB
6352 gen_helper_add_saturate(tmp, tmp, tmp2);
6353 dead_tmp(tmp2);
6354 store_reg(s, rd, tmp);
9ee6e8bb 6355 break;
49e14940
AL
6356 case 7:
6357 /* SMC instruction (op1 == 3)
6358 and undefined instructions (op1 == 0 || op1 == 2)
6359 will trap */
6360 if (op1 != 1) {
6361 goto illegal_op;
6362 }
6363 /* bkpt */
9ee6e8bb 6364 gen_set_condexec(s);
5e3f878a 6365 gen_set_pc_im(s->pc - 4);
d9ba4830 6366 gen_exception(EXCP_BKPT);
9ee6e8bb
PB
6367 s->is_jmp = DISAS_JUMP;
6368 break;
6369 case 0x8: /* signed multiply */
6370 case 0xa:
6371 case 0xc:
6372 case 0xe:
6373 rs = (insn >> 8) & 0xf;
6374 rn = (insn >> 12) & 0xf;
6375 rd = (insn >> 16) & 0xf;
6376 if (op1 == 1) {
6377 /* (32 * 16) >> 16 */
5e3f878a
PB
6378 tmp = load_reg(s, rm);
6379 tmp2 = load_reg(s, rs);
9ee6e8bb 6380 if (sh & 4)
5e3f878a 6381 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 6382 else
5e3f878a 6383 gen_sxth(tmp2);
a7812ae4
PB
6384 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6385 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 6386 tmp = new_tmp();
a7812ae4 6387 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 6388 tcg_temp_free_i64(tmp64);
9ee6e8bb 6389 if ((sh & 2) == 0) {
5e3f878a
PB
6390 tmp2 = load_reg(s, rn);
6391 gen_helper_add_setq(tmp, tmp, tmp2);
6392 dead_tmp(tmp2);
9ee6e8bb 6393 }
5e3f878a 6394 store_reg(s, rd, tmp);
9ee6e8bb
PB
6395 } else {
6396 /* 16 * 16 */
5e3f878a
PB
6397 tmp = load_reg(s, rm);
6398 tmp2 = load_reg(s, rs);
6399 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6400 dead_tmp(tmp2);
9ee6e8bb 6401 if (op1 == 2) {
a7812ae4
PB
6402 tmp64 = tcg_temp_new_i64();
6403 tcg_gen_ext_i32_i64(tmp64, tmp);
22478e79 6404 dead_tmp(tmp);
a7812ae4
PB
6405 gen_addq(s, tmp64, rn, rd);
6406 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 6407 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
6408 } else {
6409 if (op1 == 0) {
5e3f878a
PB
6410 tmp2 = load_reg(s, rn);
6411 gen_helper_add_setq(tmp, tmp, tmp2);
6412 dead_tmp(tmp2);
9ee6e8bb 6413 }
5e3f878a 6414 store_reg(s, rd, tmp);
9ee6e8bb
PB
6415 }
6416 }
6417 break;
6418 default:
6419 goto illegal_op;
6420 }
6421 } else if (((insn & 0x0e000000) == 0 &&
6422 (insn & 0x00000090) != 0x90) ||
6423 ((insn & 0x0e000000) == (1 << 25))) {
6424 int set_cc, logic_cc, shiftop;
6425
6426 op1 = (insn >> 21) & 0xf;
6427 set_cc = (insn >> 20) & 1;
6428 logic_cc = table_logic_cc[op1] & set_cc;
6429
6430 /* data processing instruction */
6431 if (insn & (1 << 25)) {
6432 /* immediate operand */
6433 val = insn & 0xff;
6434 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 6435 if (shift) {
9ee6e8bb 6436 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9
JR
6437 }
6438 tmp2 = new_tmp();
6439 tcg_gen_movi_i32(tmp2, val);
6440 if (logic_cc && shift) {
6441 gen_set_CF_bit31(tmp2);
6442 }
9ee6e8bb
PB
6443 } else {
6444 /* register */
6445 rm = (insn) & 0xf;
e9bb4aa9 6446 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6447 shiftop = (insn >> 5) & 3;
6448 if (!(insn & (1 << 4))) {
6449 shift = (insn >> 7) & 0x1f;
e9bb4aa9 6450 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
6451 } else {
6452 rs = (insn >> 8) & 0xf;
8984bd2e 6453 tmp = load_reg(s, rs);
e9bb4aa9 6454 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
6455 }
6456 }
6457 if (op1 != 0x0f && op1 != 0x0d) {
6458 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
6459 tmp = load_reg(s, rn);
6460 } else {
6461 TCGV_UNUSED(tmp);
9ee6e8bb
PB
6462 }
6463 rd = (insn >> 12) & 0xf;
6464 switch(op1) {
6465 case 0x00:
e9bb4aa9
JR
6466 tcg_gen_and_i32(tmp, tmp, tmp2);
6467 if (logic_cc) {
6468 gen_logic_CC(tmp);
6469 }
21aeb343 6470 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6471 break;
6472 case 0x01:
e9bb4aa9
JR
6473 tcg_gen_xor_i32(tmp, tmp, tmp2);
6474 if (logic_cc) {
6475 gen_logic_CC(tmp);
6476 }
21aeb343 6477 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6478 break;
6479 case 0x02:
6480 if (set_cc && rd == 15) {
6481 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 6482 if (IS_USER(s)) {
9ee6e8bb 6483 goto illegal_op;
e9bb4aa9
JR
6484 }
6485 gen_helper_sub_cc(tmp, tmp, tmp2);
6486 gen_exception_return(s, tmp);
9ee6e8bb 6487 } else {
e9bb4aa9
JR
6488 if (set_cc) {
6489 gen_helper_sub_cc(tmp, tmp, tmp2);
6490 } else {
6491 tcg_gen_sub_i32(tmp, tmp, tmp2);
6492 }
21aeb343 6493 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6494 }
6495 break;
6496 case 0x03:
e9bb4aa9
JR
6497 if (set_cc) {
6498 gen_helper_sub_cc(tmp, tmp2, tmp);
6499 } else {
6500 tcg_gen_sub_i32(tmp, tmp2, tmp);
6501 }
21aeb343 6502 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6503 break;
6504 case 0x04:
e9bb4aa9
JR
6505 if (set_cc) {
6506 gen_helper_add_cc(tmp, tmp, tmp2);
6507 } else {
6508 tcg_gen_add_i32(tmp, tmp, tmp2);
6509 }
21aeb343 6510 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6511 break;
6512 case 0x05:
e9bb4aa9
JR
6513 if (set_cc) {
6514 gen_helper_adc_cc(tmp, tmp, tmp2);
6515 } else {
6516 gen_add_carry(tmp, tmp, tmp2);
6517 }
21aeb343 6518 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6519 break;
6520 case 0x06:
e9bb4aa9
JR
6521 if (set_cc) {
6522 gen_helper_sbc_cc(tmp, tmp, tmp2);
6523 } else {
6524 gen_sub_carry(tmp, tmp, tmp2);
6525 }
21aeb343 6526 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6527 break;
6528 case 0x07:
e9bb4aa9
JR
6529 if (set_cc) {
6530 gen_helper_sbc_cc(tmp, tmp2, tmp);
6531 } else {
6532 gen_sub_carry(tmp, tmp2, tmp);
6533 }
21aeb343 6534 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6535 break;
6536 case 0x08:
6537 if (set_cc) {
e9bb4aa9
JR
6538 tcg_gen_and_i32(tmp, tmp, tmp2);
6539 gen_logic_CC(tmp);
9ee6e8bb 6540 }
e9bb4aa9 6541 dead_tmp(tmp);
9ee6e8bb
PB
6542 break;
6543 case 0x09:
6544 if (set_cc) {
e9bb4aa9
JR
6545 tcg_gen_xor_i32(tmp, tmp, tmp2);
6546 gen_logic_CC(tmp);
9ee6e8bb 6547 }
e9bb4aa9 6548 dead_tmp(tmp);
9ee6e8bb
PB
6549 break;
6550 case 0x0a:
6551 if (set_cc) {
e9bb4aa9 6552 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb 6553 }
e9bb4aa9 6554 dead_tmp(tmp);
9ee6e8bb
PB
6555 break;
6556 case 0x0b:
6557 if (set_cc) {
e9bb4aa9 6558 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 6559 }
e9bb4aa9 6560 dead_tmp(tmp);
9ee6e8bb
PB
6561 break;
6562 case 0x0c:
e9bb4aa9
JR
6563 tcg_gen_or_i32(tmp, tmp, tmp2);
6564 if (logic_cc) {
6565 gen_logic_CC(tmp);
6566 }
21aeb343 6567 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6568 break;
6569 case 0x0d:
6570 if (logic_cc && rd == 15) {
6571 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 6572 if (IS_USER(s)) {
9ee6e8bb 6573 goto illegal_op;
e9bb4aa9
JR
6574 }
6575 gen_exception_return(s, tmp2);
9ee6e8bb 6576 } else {
e9bb4aa9
JR
6577 if (logic_cc) {
6578 gen_logic_CC(tmp2);
6579 }
21aeb343 6580 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6581 }
6582 break;
6583 case 0x0e:
f669df27 6584 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
6585 if (logic_cc) {
6586 gen_logic_CC(tmp);
6587 }
21aeb343 6588 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6589 break;
6590 default:
6591 case 0x0f:
e9bb4aa9
JR
6592 tcg_gen_not_i32(tmp2, tmp2);
6593 if (logic_cc) {
6594 gen_logic_CC(tmp2);
6595 }
21aeb343 6596 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6597 break;
6598 }
e9bb4aa9
JR
6599 if (op1 != 0x0f && op1 != 0x0d) {
6600 dead_tmp(tmp2);
6601 }
9ee6e8bb
PB
6602 } else {
6603 /* other instructions */
6604 op1 = (insn >> 24) & 0xf;
6605 switch(op1) {
6606 case 0x0:
6607 case 0x1:
6608 /* multiplies, extra load/stores */
6609 sh = (insn >> 5) & 3;
6610 if (sh == 0) {
6611 if (op1 == 0x0) {
6612 rd = (insn >> 16) & 0xf;
6613 rn = (insn >> 12) & 0xf;
6614 rs = (insn >> 8) & 0xf;
6615 rm = (insn) & 0xf;
6616 op1 = (insn >> 20) & 0xf;
6617 switch (op1) {
6618 case 0: case 1: case 2: case 3: case 6:
6619 /* 32 bit mul */
5e3f878a
PB
6620 tmp = load_reg(s, rs);
6621 tmp2 = load_reg(s, rm);
6622 tcg_gen_mul_i32(tmp, tmp, tmp2);
6623 dead_tmp(tmp2);
9ee6e8bb
PB
6624 if (insn & (1 << 22)) {
6625 /* Subtract (mls) */
6626 ARCH(6T2);
5e3f878a
PB
6627 tmp2 = load_reg(s, rn);
6628 tcg_gen_sub_i32(tmp, tmp2, tmp);
6629 dead_tmp(tmp2);
9ee6e8bb
PB
6630 } else if (insn & (1 << 21)) {
6631 /* Add */
5e3f878a
PB
6632 tmp2 = load_reg(s, rn);
6633 tcg_gen_add_i32(tmp, tmp, tmp2);
6634 dead_tmp(tmp2);
9ee6e8bb
PB
6635 }
6636 if (insn & (1 << 20))
5e3f878a
PB
6637 gen_logic_CC(tmp);
6638 store_reg(s, rd, tmp);
9ee6e8bb 6639 break;
8aac08b1
AJ
6640 case 4:
6641 /* 64 bit mul double accumulate (UMAAL) */
6642 ARCH(6);
6643 tmp = load_reg(s, rs);
6644 tmp2 = load_reg(s, rm);
6645 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6646 gen_addq_lo(s, tmp64, rn);
6647 gen_addq_lo(s, tmp64, rd);
6648 gen_storeq_reg(s, rn, rd, tmp64);
6649 tcg_temp_free_i64(tmp64);
6650 break;
6651 case 8: case 9: case 10: case 11:
6652 case 12: case 13: case 14: case 15:
6653 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
6654 tmp = load_reg(s, rs);
6655 tmp2 = load_reg(s, rm);
8aac08b1 6656 if (insn & (1 << 22)) {
a7812ae4 6657 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8aac08b1 6658 } else {
a7812ae4 6659 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8aac08b1
AJ
6660 }
6661 if (insn & (1 << 21)) { /* mult accumulate */
a7812ae4 6662 gen_addq(s, tmp64, rn, rd);
9ee6e8bb 6663 }
8aac08b1 6664 if (insn & (1 << 20)) {
a7812ae4 6665 gen_logicq_cc(tmp64);
8aac08b1 6666 }
a7812ae4 6667 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 6668 tcg_temp_free_i64(tmp64);
9ee6e8bb 6669 break;
8aac08b1
AJ
6670 default:
6671 goto illegal_op;
9ee6e8bb
PB
6672 }
6673 } else {
6674 rn = (insn >> 16) & 0xf;
6675 rd = (insn >> 12) & 0xf;
6676 if (insn & (1 << 23)) {
6677 /* load/store exclusive */
86753403
PB
6678 op1 = (insn >> 21) & 0x3;
6679 if (op1)
a47f43d2 6680 ARCH(6K);
86753403
PB
6681 else
6682 ARCH(6);
3174f8e9 6683 addr = tcg_temp_local_new_i32();
98a46317 6684 load_reg_var(s, addr, rn);
9ee6e8bb 6685 if (insn & (1 << 20)) {
86753403
PB
6686 switch (op1) {
6687 case 0: /* ldrex */
426f5abc 6688 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
6689 break;
6690 case 1: /* ldrexd */
426f5abc 6691 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
6692 break;
6693 case 2: /* ldrexb */
426f5abc 6694 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
6695 break;
6696 case 3: /* ldrexh */
426f5abc 6697 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
6698 break;
6699 default:
6700 abort();
6701 }
9ee6e8bb
PB
6702 } else {
6703 rm = insn & 0xf;
86753403
PB
6704 switch (op1) {
6705 case 0: /* strex */
426f5abc 6706 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
6707 break;
6708 case 1: /* strexd */
502e64fe 6709 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
6710 break;
6711 case 2: /* strexb */
426f5abc 6712 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
6713 break;
6714 case 3: /* strexh */
426f5abc 6715 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
6716 break;
6717 default:
6718 abort();
6719 }
9ee6e8bb 6720 }
3174f8e9 6721 tcg_temp_free(addr);
9ee6e8bb
PB
6722 } else {
6723 /* SWP instruction */
6724 rm = (insn) & 0xf;
6725
8984bd2e
PB
6726 /* ??? This is not really atomic. However we know
6727 we never have multiple CPUs running in parallel,
6728 so it is good enough. */
6729 addr = load_reg(s, rn);
6730 tmp = load_reg(s, rm);
9ee6e8bb 6731 if (insn & (1 << 22)) {
8984bd2e
PB
6732 tmp2 = gen_ld8u(addr, IS_USER(s));
6733 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 6734 } else {
8984bd2e
PB
6735 tmp2 = gen_ld32(addr, IS_USER(s));
6736 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 6737 }
8984bd2e
PB
6738 dead_tmp(addr);
6739 store_reg(s, rd, tmp2);
9ee6e8bb
PB
6740 }
6741 }
6742 } else {
6743 int address_offset;
6744 int load;
6745 /* Misc load/store */
6746 rn = (insn >> 16) & 0xf;
6747 rd = (insn >> 12) & 0xf;
b0109805 6748 addr = load_reg(s, rn);
9ee6e8bb 6749 if (insn & (1 << 24))
b0109805 6750 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
6751 address_offset = 0;
6752 if (insn & (1 << 20)) {
6753 /* load */
6754 switch(sh) {
6755 case 1:
b0109805 6756 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
6757 break;
6758 case 2:
b0109805 6759 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
6760 break;
6761 default:
6762 case 3:
b0109805 6763 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
6764 break;
6765 }
6766 load = 1;
6767 } else if (sh & 2) {
6768 /* doubleword */
6769 if (sh & 1) {
6770 /* store */
b0109805
PB
6771 tmp = load_reg(s, rd);
6772 gen_st32(tmp, addr, IS_USER(s));
6773 tcg_gen_addi_i32(addr, addr, 4);
6774 tmp = load_reg(s, rd + 1);
6775 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6776 load = 0;
6777 } else {
6778 /* load */
b0109805
PB
6779 tmp = gen_ld32(addr, IS_USER(s));
6780 store_reg(s, rd, tmp);
6781 tcg_gen_addi_i32(addr, addr, 4);
6782 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
6783 rd++;
6784 load = 1;
6785 }
6786 address_offset = -4;
6787 } else {
6788 /* store */
b0109805
PB
6789 tmp = load_reg(s, rd);
6790 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6791 load = 0;
6792 }
6793 /* Perform base writeback before the loaded value to
6794 ensure correct behavior with overlapping index registers.
6795 ldrd with base writeback is is undefined if the
6796 destination and index registers overlap. */
6797 if (!(insn & (1 << 24))) {
b0109805
PB
6798 gen_add_datah_offset(s, insn, address_offset, addr);
6799 store_reg(s, rn, addr);
9ee6e8bb
PB
6800 } else if (insn & (1 << 21)) {
6801 if (address_offset)
b0109805
PB
6802 tcg_gen_addi_i32(addr, addr, address_offset);
6803 store_reg(s, rn, addr);
6804 } else {
6805 dead_tmp(addr);
9ee6e8bb
PB
6806 }
6807 if (load) {
6808 /* Complete the load. */
b0109805 6809 store_reg(s, rd, tmp);
9ee6e8bb
PB
6810 }
6811 }
6812 break;
6813 case 0x4:
6814 case 0x5:
6815 goto do_ldst;
6816 case 0x6:
6817 case 0x7:
6818 if (insn & (1 << 4)) {
6819 ARCH(6);
6820 /* Armv6 Media instructions. */
6821 rm = insn & 0xf;
6822 rn = (insn >> 16) & 0xf;
2c0262af 6823 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
6824 rs = (insn >> 8) & 0xf;
6825 switch ((insn >> 23) & 3) {
6826 case 0: /* Parallel add/subtract. */
6827 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
6828 tmp = load_reg(s, rn);
6829 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6830 sh = (insn >> 5) & 7;
6831 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6832 goto illegal_op;
6ddbc6e4
PB
6833 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6834 dead_tmp(tmp2);
6835 store_reg(s, rd, tmp);
9ee6e8bb
PB
6836 break;
6837 case 1:
6838 if ((insn & 0x00700020) == 0) {
6c95676b 6839 /* Halfword pack. */
3670669c
PB
6840 tmp = load_reg(s, rn);
6841 tmp2 = load_reg(s, rm);
9ee6e8bb 6842 shift = (insn >> 7) & 0x1f;
3670669c
PB
6843 if (insn & (1 << 6)) {
6844 /* pkhtb */
22478e79
AZ
6845 if (shift == 0)
6846 shift = 31;
6847 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 6848 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 6849 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
6850 } else {
6851 /* pkhbt */
22478e79
AZ
6852 if (shift)
6853 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 6854 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
6855 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6856 }
6857 tcg_gen_or_i32(tmp, tmp, tmp2);
22478e79 6858 dead_tmp(tmp2);
3670669c 6859 store_reg(s, rd, tmp);
9ee6e8bb
PB
6860 } else if ((insn & 0x00200020) == 0x00200000) {
6861 /* [us]sat */
6ddbc6e4 6862 tmp = load_reg(s, rm);
9ee6e8bb
PB
6863 shift = (insn >> 7) & 0x1f;
6864 if (insn & (1 << 6)) {
6865 if (shift == 0)
6866 shift = 31;
6ddbc6e4 6867 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 6868 } else {
6ddbc6e4 6869 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
6870 }
6871 sh = (insn >> 16) & 0x1f;
6872 if (sh != 0) {
b75263d6 6873 tmp2 = tcg_const_i32(sh);
9ee6e8bb 6874 if (insn & (1 << 22))
b75263d6 6875 gen_helper_usat(tmp, tmp, tmp2);
9ee6e8bb 6876 else
b75263d6
JR
6877 gen_helper_ssat(tmp, tmp, tmp2);
6878 tcg_temp_free_i32(tmp2);
9ee6e8bb 6879 }
6ddbc6e4 6880 store_reg(s, rd, tmp);
9ee6e8bb
PB
6881 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6882 /* [us]sat16 */
6ddbc6e4 6883 tmp = load_reg(s, rm);
9ee6e8bb
PB
6884 sh = (insn >> 16) & 0x1f;
6885 if (sh != 0) {
b75263d6 6886 tmp2 = tcg_const_i32(sh);
9ee6e8bb 6887 if (insn & (1 << 22))
b75263d6 6888 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 6889 else
b75263d6
JR
6890 gen_helper_ssat16(tmp, tmp, tmp2);
6891 tcg_temp_free_i32(tmp2);
9ee6e8bb 6892 }
6ddbc6e4 6893 store_reg(s, rd, tmp);
9ee6e8bb
PB
6894 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6895 /* Select bytes. */
6ddbc6e4
PB
6896 tmp = load_reg(s, rn);
6897 tmp2 = load_reg(s, rm);
6898 tmp3 = new_tmp();
6899 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6900 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6901 dead_tmp(tmp3);
6902 dead_tmp(tmp2);
6903 store_reg(s, rd, tmp);
9ee6e8bb 6904 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 6905 tmp = load_reg(s, rm);
9ee6e8bb
PB
6906 shift = (insn >> 10) & 3;
6907 /* ??? In many cases it's not neccessary to do a
6908 rotate, a shift is sufficient. */
6909 if (shift != 0)
f669df27 6910 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
6911 op1 = (insn >> 20) & 7;
6912 switch (op1) {
5e3f878a
PB
6913 case 0: gen_sxtb16(tmp); break;
6914 case 2: gen_sxtb(tmp); break;
6915 case 3: gen_sxth(tmp); break;
6916 case 4: gen_uxtb16(tmp); break;
6917 case 6: gen_uxtb(tmp); break;
6918 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
6919 default: goto illegal_op;
6920 }
6921 if (rn != 15) {
5e3f878a 6922 tmp2 = load_reg(s, rn);
9ee6e8bb 6923 if ((op1 & 3) == 0) {
5e3f878a 6924 gen_add16(tmp, tmp2);
9ee6e8bb 6925 } else {
5e3f878a
PB
6926 tcg_gen_add_i32(tmp, tmp, tmp2);
6927 dead_tmp(tmp2);
9ee6e8bb
PB
6928 }
6929 }
6c95676b 6930 store_reg(s, rd, tmp);
9ee6e8bb
PB
6931 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6932 /* rev */
b0109805 6933 tmp = load_reg(s, rm);
9ee6e8bb
PB
6934 if (insn & (1 << 22)) {
6935 if (insn & (1 << 7)) {
b0109805 6936 gen_revsh(tmp);
9ee6e8bb
PB
6937 } else {
6938 ARCH(6T2);
b0109805 6939 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
6940 }
6941 } else {
6942 if (insn & (1 << 7))
b0109805 6943 gen_rev16(tmp);
9ee6e8bb 6944 else
66896cb8 6945 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 6946 }
b0109805 6947 store_reg(s, rd, tmp);
9ee6e8bb
PB
6948 } else {
6949 goto illegal_op;
6950 }
6951 break;
6952 case 2: /* Multiplies (Type 3). */
5e3f878a
PB
6953 tmp = load_reg(s, rm);
6954 tmp2 = load_reg(s, rs);
9ee6e8bb
PB
6955 if (insn & (1 << 20)) {
6956 /* Signed multiply most significant [accumulate]. */
a7812ae4 6957 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6958 if (insn & (1 << 5))
a7812ae4
PB
6959 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
6960 tcg_gen_shri_i64(tmp64, tmp64, 32);
5e3f878a 6961 tmp = new_tmp();
a7812ae4 6962 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 6963 tcg_temp_free_i64(tmp64);
955a7dd5
AZ
6964 if (rd != 15) {
6965 tmp2 = load_reg(s, rd);
9ee6e8bb 6966 if (insn & (1 << 6)) {
5e3f878a 6967 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 6968 } else {
5e3f878a 6969 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 6970 }
5e3f878a 6971 dead_tmp(tmp2);
9ee6e8bb 6972 }
955a7dd5 6973 store_reg(s, rn, tmp);
9ee6e8bb
PB
6974 } else {
6975 if (insn & (1 << 5))
5e3f878a
PB
6976 gen_swap_half(tmp2);
6977 gen_smul_dual(tmp, tmp2);
6978 /* This addition cannot overflow. */
6979 if (insn & (1 << 6)) {
6980 tcg_gen_sub_i32(tmp, tmp, tmp2);
6981 } else {
6982 tcg_gen_add_i32(tmp, tmp, tmp2);
6983 }
6984 dead_tmp(tmp2);
9ee6e8bb 6985 if (insn & (1 << 22)) {
5e3f878a 6986 /* smlald, smlsld */
a7812ae4
PB
6987 tmp64 = tcg_temp_new_i64();
6988 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 6989 dead_tmp(tmp);
a7812ae4
PB
6990 gen_addq(s, tmp64, rd, rn);
6991 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 6992 tcg_temp_free_i64(tmp64);
9ee6e8bb 6993 } else {
5e3f878a 6994 /* smuad, smusd, smlad, smlsd */
22478e79 6995 if (rd != 15)
9ee6e8bb 6996 {
22478e79 6997 tmp2 = load_reg(s, rd);
5e3f878a
PB
6998 gen_helper_add_setq(tmp, tmp, tmp2);
6999 dead_tmp(tmp2);
9ee6e8bb 7000 }
22478e79 7001 store_reg(s, rn, tmp);
9ee6e8bb
PB
7002 }
7003 }
7004 break;
7005 case 3:
7006 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7007 switch (op1) {
7008 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
7009 ARCH(6);
7010 tmp = load_reg(s, rm);
7011 tmp2 = load_reg(s, rs);
7012 gen_helper_usad8(tmp, tmp, tmp2);
7013 dead_tmp(tmp2);
ded9d295
AZ
7014 if (rd != 15) {
7015 tmp2 = load_reg(s, rd);
6ddbc6e4
PB
7016 tcg_gen_add_i32(tmp, tmp, tmp2);
7017 dead_tmp(tmp2);
9ee6e8bb 7018 }
ded9d295 7019 store_reg(s, rn, tmp);
9ee6e8bb
PB
7020 break;
7021 case 0x20: case 0x24: case 0x28: case 0x2c:
7022 /* Bitfield insert/clear. */
7023 ARCH(6T2);
7024 shift = (insn >> 7) & 0x1f;
7025 i = (insn >> 16) & 0x1f;
7026 i = i + 1 - shift;
7027 if (rm == 15) {
5e3f878a
PB
7028 tmp = new_tmp();
7029 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7030 } else {
5e3f878a 7031 tmp = load_reg(s, rm);
9ee6e8bb
PB
7032 }
7033 if (i != 32) {
5e3f878a 7034 tmp2 = load_reg(s, rd);
8f8e3aa4 7035 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
5e3f878a 7036 dead_tmp(tmp2);
9ee6e8bb 7037 }
5e3f878a 7038 store_reg(s, rd, tmp);
9ee6e8bb
PB
7039 break;
7040 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7041 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 7042 ARCH(6T2);
5e3f878a 7043 tmp = load_reg(s, rm);
9ee6e8bb
PB
7044 shift = (insn >> 7) & 0x1f;
7045 i = ((insn >> 16) & 0x1f) + 1;
7046 if (shift + i > 32)
7047 goto illegal_op;
7048 if (i < 32) {
7049 if (op1 & 0x20) {
5e3f878a 7050 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 7051 } else {
5e3f878a 7052 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
7053 }
7054 }
5e3f878a 7055 store_reg(s, rd, tmp);
9ee6e8bb
PB
7056 break;
7057 default:
7058 goto illegal_op;
7059 }
7060 break;
7061 }
7062 break;
7063 }
7064 do_ldst:
7065 /* Check for undefined extension instructions
7066 * per the ARM Bible IE:
7067 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7068 */
7069 sh = (0xf << 20) | (0xf << 4);
7070 if (op1 == 0x7 && ((insn & sh) == sh))
7071 {
7072 goto illegal_op;
7073 }
7074 /* load/store byte/word */
7075 rn = (insn >> 16) & 0xf;
7076 rd = (insn >> 12) & 0xf;
b0109805 7077 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
7078 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7079 if (insn & (1 << 24))
b0109805 7080 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
7081 if (insn & (1 << 20)) {
7082 /* load */
9ee6e8bb 7083 if (insn & (1 << 22)) {
b0109805 7084 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 7085 } else {
b0109805 7086 tmp = gen_ld32(tmp2, i);
9ee6e8bb 7087 }
9ee6e8bb
PB
7088 } else {
7089 /* store */
b0109805 7090 tmp = load_reg(s, rd);
9ee6e8bb 7091 if (insn & (1 << 22))
b0109805 7092 gen_st8(tmp, tmp2, i);
9ee6e8bb 7093 else
b0109805 7094 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
7095 }
7096 if (!(insn & (1 << 24))) {
b0109805
PB
7097 gen_add_data_offset(s, insn, tmp2);
7098 store_reg(s, rn, tmp2);
7099 } else if (insn & (1 << 21)) {
7100 store_reg(s, rn, tmp2);
7101 } else {
7102 dead_tmp(tmp2);
9ee6e8bb
PB
7103 }
7104 if (insn & (1 << 20)) {
7105 /* Complete the load. */
7106 if (rd == 15)
b0109805 7107 gen_bx(s, tmp);
9ee6e8bb 7108 else
b0109805 7109 store_reg(s, rd, tmp);
9ee6e8bb
PB
7110 }
7111 break;
7112 case 0x08:
7113 case 0x09:
7114 {
7115 int j, n, user, loaded_base;
b0109805 7116 TCGv loaded_var;
9ee6e8bb
PB
7117 /* load/store multiple words */
7118 /* XXX: store correct base if write back */
7119 user = 0;
7120 if (insn & (1 << 22)) {
7121 if (IS_USER(s))
7122 goto illegal_op; /* only usable in supervisor mode */
7123
7124 if ((insn & (1 << 15)) == 0)
7125 user = 1;
7126 }
7127 rn = (insn >> 16) & 0xf;
b0109805 7128 addr = load_reg(s, rn);
9ee6e8bb
PB
7129
7130 /* compute total size */
7131 loaded_base = 0;
a50f5b91 7132 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
7133 n = 0;
7134 for(i=0;i<16;i++) {
7135 if (insn & (1 << i))
7136 n++;
7137 }
7138 /* XXX: test invalid n == 0 case ? */
7139 if (insn & (1 << 23)) {
7140 if (insn & (1 << 24)) {
7141 /* pre increment */
b0109805 7142 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7143 } else {
7144 /* post increment */
7145 }
7146 } else {
7147 if (insn & (1 << 24)) {
7148 /* pre decrement */
b0109805 7149 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7150 } else {
7151 /* post decrement */
7152 if (n != 1)
b0109805 7153 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7154 }
7155 }
7156 j = 0;
7157 for(i=0;i<16;i++) {
7158 if (insn & (1 << i)) {
7159 if (insn & (1 << 20)) {
7160 /* load */
b0109805 7161 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 7162 if (i == 15) {
b0109805 7163 gen_bx(s, tmp);
9ee6e8bb 7164 } else if (user) {
b75263d6
JR
7165 tmp2 = tcg_const_i32(i);
7166 gen_helper_set_user_reg(tmp2, tmp);
7167 tcg_temp_free_i32(tmp2);
b0109805 7168 dead_tmp(tmp);
9ee6e8bb 7169 } else if (i == rn) {
b0109805 7170 loaded_var = tmp;
9ee6e8bb
PB
7171 loaded_base = 1;
7172 } else {
b0109805 7173 store_reg(s, i, tmp);
9ee6e8bb
PB
7174 }
7175 } else {
7176 /* store */
7177 if (i == 15) {
7178 /* special case: r15 = PC + 8 */
7179 val = (long)s->pc + 4;
b0109805
PB
7180 tmp = new_tmp();
7181 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 7182 } else if (user) {
b0109805 7183 tmp = new_tmp();
b75263d6
JR
7184 tmp2 = tcg_const_i32(i);
7185 gen_helper_get_user_reg(tmp, tmp2);
7186 tcg_temp_free_i32(tmp2);
9ee6e8bb 7187 } else {
b0109805 7188 tmp = load_reg(s, i);
9ee6e8bb 7189 }
b0109805 7190 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7191 }
7192 j++;
7193 /* no need to add after the last transfer */
7194 if (j != n)
b0109805 7195 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7196 }
7197 }
7198 if (insn & (1 << 21)) {
7199 /* write back */
7200 if (insn & (1 << 23)) {
7201 if (insn & (1 << 24)) {
7202 /* pre increment */
7203 } else {
7204 /* post increment */
b0109805 7205 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7206 }
7207 } else {
7208 if (insn & (1 << 24)) {
7209 /* pre decrement */
7210 if (n != 1)
b0109805 7211 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7212 } else {
7213 /* post decrement */
b0109805 7214 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7215 }
7216 }
b0109805
PB
7217 store_reg(s, rn, addr);
7218 } else {
7219 dead_tmp(addr);
9ee6e8bb
PB
7220 }
7221 if (loaded_base) {
b0109805 7222 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
7223 }
7224 if ((insn & (1 << 22)) && !user) {
7225 /* Restore CPSR from SPSR. */
d9ba4830
PB
7226 tmp = load_cpu_field(spsr);
7227 gen_set_cpsr(tmp, 0xffffffff);
7228 dead_tmp(tmp);
9ee6e8bb
PB
7229 s->is_jmp = DISAS_UPDATE;
7230 }
7231 }
7232 break;
7233 case 0xa:
7234 case 0xb:
7235 {
7236 int32_t offset;
7237
7238 /* branch (and link) */
7239 val = (int32_t)s->pc;
7240 if (insn & (1 << 24)) {
5e3f878a
PB
7241 tmp = new_tmp();
7242 tcg_gen_movi_i32(tmp, val);
7243 store_reg(s, 14, tmp);
9ee6e8bb
PB
7244 }
7245 offset = (((int32_t)insn << 8) >> 8);
7246 val += (offset << 2) + 4;
7247 gen_jmp(s, val);
7248 }
7249 break;
7250 case 0xc:
7251 case 0xd:
7252 case 0xe:
7253 /* Coprocessor. */
7254 if (disas_coproc_insn(env, s, insn))
7255 goto illegal_op;
7256 break;
7257 case 0xf:
7258 /* swi */
5e3f878a 7259 gen_set_pc_im(s->pc);
9ee6e8bb
PB
7260 s->is_jmp = DISAS_SWI;
7261 break;
7262 default:
7263 illegal_op:
7264 gen_set_condexec(s);
5e3f878a 7265 gen_set_pc_im(s->pc - 4);
d9ba4830 7266 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
7267 s->is_jmp = DISAS_JUMP;
7268 break;
7269 }
7270 }
7271}
7272
7273/* Return true if this is a Thumb-2 logical op. */
7274static int
7275thumb2_logic_op(int op)
7276{
7277 return (op < 8);
7278}
7279
7280/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7281 then set condition code flags based on the result of the operation.
7282 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7283 to the high bit of T1.
7284 Returns zero if the opcode is valid. */
7285
7286static int
396e467c 7287gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
9ee6e8bb
PB
7288{
7289 int logic_cc;
7290
7291 logic_cc = 0;
7292 switch (op) {
7293 case 0: /* and */
396e467c 7294 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
7295 logic_cc = conds;
7296 break;
7297 case 1: /* bic */
f669df27 7298 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
7299 logic_cc = conds;
7300 break;
7301 case 2: /* orr */
396e467c 7302 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7303 logic_cc = conds;
7304 break;
7305 case 3: /* orn */
396e467c
FN
7306 tcg_gen_not_i32(t1, t1);
7307 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7308 logic_cc = conds;
7309 break;
7310 case 4: /* eor */
396e467c 7311 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
7312 logic_cc = conds;
7313 break;
7314 case 8: /* add */
7315 if (conds)
396e467c 7316 gen_helper_add_cc(t0, t0, t1);
9ee6e8bb 7317 else
396e467c 7318 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
7319 break;
7320 case 10: /* adc */
7321 if (conds)
396e467c 7322 gen_helper_adc_cc(t0, t0, t1);
9ee6e8bb 7323 else
396e467c 7324 gen_adc(t0, t1);
9ee6e8bb
PB
7325 break;
7326 case 11: /* sbc */
7327 if (conds)
396e467c 7328 gen_helper_sbc_cc(t0, t0, t1);
9ee6e8bb 7329 else
396e467c 7330 gen_sub_carry(t0, t0, t1);
9ee6e8bb
PB
7331 break;
7332 case 13: /* sub */
7333 if (conds)
396e467c 7334 gen_helper_sub_cc(t0, t0, t1);
9ee6e8bb 7335 else
396e467c 7336 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
7337 break;
7338 case 14: /* rsb */
7339 if (conds)
396e467c 7340 gen_helper_sub_cc(t0, t1, t0);
9ee6e8bb 7341 else
396e467c 7342 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
7343 break;
7344 default: /* 5, 6, 7, 9, 12, 15. */
7345 return 1;
7346 }
7347 if (logic_cc) {
396e467c 7348 gen_logic_CC(t0);
9ee6e8bb 7349 if (shifter_out)
396e467c 7350 gen_set_CF_bit31(t1);
9ee6e8bb
PB
7351 }
7352 return 0;
7353}
7354
7355/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7356 is not legal. */
7357static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7358{
b0109805 7359 uint32_t insn, imm, shift, offset;
9ee6e8bb 7360 uint32_t rd, rn, rm, rs;
b26eefb6 7361 TCGv tmp;
6ddbc6e4
PB
7362 TCGv tmp2;
7363 TCGv tmp3;
b0109805 7364 TCGv addr;
a7812ae4 7365 TCGv_i64 tmp64;
9ee6e8bb
PB
7366 int op;
7367 int shiftop;
7368 int conds;
7369 int logic_cc;
7370
7371 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7372 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 7373 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
7374 16-bit instructions to get correct prefetch abort behavior. */
7375 insn = insn_hw1;
7376 if ((insn & (1 << 12)) == 0) {
7377 /* Second half of blx. */
7378 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
7379 tmp = load_reg(s, 14);
7380 tcg_gen_addi_i32(tmp, tmp, offset);
7381 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 7382
d9ba4830 7383 tmp2 = new_tmp();
b0109805 7384 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7385 store_reg(s, 14, tmp2);
7386 gen_bx(s, tmp);
9ee6e8bb
PB
7387 return 0;
7388 }
7389 if (insn & (1 << 11)) {
7390 /* Second half of bl. */
7391 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 7392 tmp = load_reg(s, 14);
6a0d8a1d 7393 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 7394
d9ba4830 7395 tmp2 = new_tmp();
b0109805 7396 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7397 store_reg(s, 14, tmp2);
7398 gen_bx(s, tmp);
9ee6e8bb
PB
7399 return 0;
7400 }
7401 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7402 /* Instruction spans a page boundary. Implement it as two
7403 16-bit instructions in case the second half causes an
7404 prefetch abort. */
7405 offset = ((int32_t)insn << 21) >> 9;
396e467c 7406 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
7407 return 0;
7408 }
7409 /* Fall through to 32-bit decode. */
7410 }
7411
7412 insn = lduw_code(s->pc);
7413 s->pc += 2;
7414 insn |= (uint32_t)insn_hw1 << 16;
7415
7416 if ((insn & 0xf800e800) != 0xf000e800) {
7417 ARCH(6T2);
7418 }
7419
7420 rn = (insn >> 16) & 0xf;
7421 rs = (insn >> 12) & 0xf;
7422 rd = (insn >> 8) & 0xf;
7423 rm = insn & 0xf;
7424 switch ((insn >> 25) & 0xf) {
7425 case 0: case 1: case 2: case 3:
7426 /* 16-bit instructions. Should never happen. */
7427 abort();
7428 case 4:
7429 if (insn & (1 << 22)) {
7430 /* Other load/store, table branch. */
7431 if (insn & 0x01200000) {
7432 /* Load/store doubleword. */
7433 if (rn == 15) {
b0109805
PB
7434 addr = new_tmp();
7435 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 7436 } else {
b0109805 7437 addr = load_reg(s, rn);
9ee6e8bb
PB
7438 }
7439 offset = (insn & 0xff) * 4;
7440 if ((insn & (1 << 23)) == 0)
7441 offset = -offset;
7442 if (insn & (1 << 24)) {
b0109805 7443 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
7444 offset = 0;
7445 }
7446 if (insn & (1 << 20)) {
7447 /* ldrd */
b0109805
PB
7448 tmp = gen_ld32(addr, IS_USER(s));
7449 store_reg(s, rs, tmp);
7450 tcg_gen_addi_i32(addr, addr, 4);
7451 tmp = gen_ld32(addr, IS_USER(s));
7452 store_reg(s, rd, tmp);
9ee6e8bb
PB
7453 } else {
7454 /* strd */
b0109805
PB
7455 tmp = load_reg(s, rs);
7456 gen_st32(tmp, addr, IS_USER(s));
7457 tcg_gen_addi_i32(addr, addr, 4);
7458 tmp = load_reg(s, rd);
7459 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7460 }
7461 if (insn & (1 << 21)) {
7462 /* Base writeback. */
7463 if (rn == 15)
7464 goto illegal_op;
b0109805
PB
7465 tcg_gen_addi_i32(addr, addr, offset - 4);
7466 store_reg(s, rn, addr);
7467 } else {
7468 dead_tmp(addr);
9ee6e8bb
PB
7469 }
7470 } else if ((insn & (1 << 23)) == 0) {
7471 /* Load/store exclusive word. */
3174f8e9 7472 addr = tcg_temp_local_new();
98a46317 7473 load_reg_var(s, addr, rn);
426f5abc 7474 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 7475 if (insn & (1 << 20)) {
426f5abc 7476 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 7477 } else {
426f5abc 7478 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 7479 }
3174f8e9 7480 tcg_temp_free(addr);
9ee6e8bb
PB
7481 } else if ((insn & (1 << 6)) == 0) {
7482 /* Table Branch. */
7483 if (rn == 15) {
b0109805
PB
7484 addr = new_tmp();
7485 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 7486 } else {
b0109805 7487 addr = load_reg(s, rn);
9ee6e8bb 7488 }
b26eefb6 7489 tmp = load_reg(s, rm);
b0109805 7490 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
7491 if (insn & (1 << 4)) {
7492 /* tbh */
b0109805 7493 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 7494 dead_tmp(tmp);
b0109805 7495 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 7496 } else { /* tbb */
b26eefb6 7497 dead_tmp(tmp);
b0109805 7498 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7499 }
b0109805
PB
7500 dead_tmp(addr);
7501 tcg_gen_shli_i32(tmp, tmp, 1);
7502 tcg_gen_addi_i32(tmp, tmp, s->pc);
7503 store_reg(s, 15, tmp);
9ee6e8bb
PB
7504 } else {
7505 /* Load/store exclusive byte/halfword/doubleword. */
426f5abc 7506 ARCH(7);
9ee6e8bb 7507 op = (insn >> 4) & 0x3;
426f5abc
PB
7508 if (op == 2) {
7509 goto illegal_op;
7510 }
3174f8e9 7511 addr = tcg_temp_local_new();
98a46317 7512 load_reg_var(s, addr, rn);
9ee6e8bb 7513 if (insn & (1 << 20)) {
426f5abc 7514 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 7515 } else {
426f5abc 7516 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 7517 }
3174f8e9 7518 tcg_temp_free(addr);
9ee6e8bb
PB
7519 }
7520 } else {
7521 /* Load/store multiple, RFE, SRS. */
7522 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7523 /* Not available in user mode. */
b0109805 7524 if (IS_USER(s))
9ee6e8bb
PB
7525 goto illegal_op;
7526 if (insn & (1 << 20)) {
7527 /* rfe */
b0109805
PB
7528 addr = load_reg(s, rn);
7529 if ((insn & (1 << 24)) == 0)
7530 tcg_gen_addi_i32(addr, addr, -8);
7531 /* Load PC into tmp and CPSR into tmp2. */
7532 tmp = gen_ld32(addr, 0);
7533 tcg_gen_addi_i32(addr, addr, 4);
7534 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
7535 if (insn & (1 << 21)) {
7536 /* Base writeback. */
b0109805
PB
7537 if (insn & (1 << 24)) {
7538 tcg_gen_addi_i32(addr, addr, 4);
7539 } else {
7540 tcg_gen_addi_i32(addr, addr, -4);
7541 }
7542 store_reg(s, rn, addr);
7543 } else {
7544 dead_tmp(addr);
9ee6e8bb 7545 }
b0109805 7546 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
7547 } else {
7548 /* srs */
7549 op = (insn & 0x1f);
7550 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7551 addr = load_reg(s, 13);
9ee6e8bb 7552 } else {
b0109805 7553 addr = new_tmp();
b75263d6
JR
7554 tmp = tcg_const_i32(op);
7555 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7556 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7557 }
7558 if ((insn & (1 << 24)) == 0) {
b0109805 7559 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 7560 }
b0109805
PB
7561 tmp = load_reg(s, 14);
7562 gen_st32(tmp, addr, 0);
7563 tcg_gen_addi_i32(addr, addr, 4);
7564 tmp = new_tmp();
7565 gen_helper_cpsr_read(tmp);
7566 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
7567 if (insn & (1 << 21)) {
7568 if ((insn & (1 << 24)) == 0) {
b0109805 7569 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 7570 } else {
b0109805 7571 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7572 }
7573 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7574 store_reg(s, 13, addr);
9ee6e8bb 7575 } else {
b75263d6
JR
7576 tmp = tcg_const_i32(op);
7577 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7578 tcg_temp_free_i32(tmp);
9ee6e8bb 7579 }
b0109805
PB
7580 } else {
7581 dead_tmp(addr);
9ee6e8bb
PB
7582 }
7583 }
7584 } else {
7585 int i;
7586 /* Load/store multiple. */
b0109805 7587 addr = load_reg(s, rn);
9ee6e8bb
PB
7588 offset = 0;
7589 for (i = 0; i < 16; i++) {
7590 if (insn & (1 << i))
7591 offset += 4;
7592 }
7593 if (insn & (1 << 24)) {
b0109805 7594 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7595 }
7596
7597 for (i = 0; i < 16; i++) {
7598 if ((insn & (1 << i)) == 0)
7599 continue;
7600 if (insn & (1 << 20)) {
7601 /* Load. */
b0109805 7602 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 7603 if (i == 15) {
b0109805 7604 gen_bx(s, tmp);
9ee6e8bb 7605 } else {
b0109805 7606 store_reg(s, i, tmp);
9ee6e8bb
PB
7607 }
7608 } else {
7609 /* Store. */
b0109805
PB
7610 tmp = load_reg(s, i);
7611 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7612 }
b0109805 7613 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7614 }
7615 if (insn & (1 << 21)) {
7616 /* Base register writeback. */
7617 if (insn & (1 << 24)) {
b0109805 7618 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7619 }
7620 /* Fault if writeback register is in register list. */
7621 if (insn & (1 << rn))
7622 goto illegal_op;
b0109805
PB
7623 store_reg(s, rn, addr);
7624 } else {
7625 dead_tmp(addr);
9ee6e8bb
PB
7626 }
7627 }
7628 }
7629 break;
2af9ab77
JB
7630 case 5:
7631
9ee6e8bb 7632 op = (insn >> 21) & 0xf;
2af9ab77
JB
7633 if (op == 6) {
7634 /* Halfword pack. */
7635 tmp = load_reg(s, rn);
7636 tmp2 = load_reg(s, rm);
7637 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
7638 if (insn & (1 << 5)) {
7639 /* pkhtb */
7640 if (shift == 0)
7641 shift = 31;
7642 tcg_gen_sari_i32(tmp2, tmp2, shift);
7643 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
7644 tcg_gen_ext16u_i32(tmp2, tmp2);
7645 } else {
7646 /* pkhbt */
7647 if (shift)
7648 tcg_gen_shli_i32(tmp2, tmp2, shift);
7649 tcg_gen_ext16u_i32(tmp, tmp);
7650 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7651 }
7652 tcg_gen_or_i32(tmp, tmp, tmp2);
7653 dead_tmp(tmp2);
3174f8e9
FN
7654 store_reg(s, rd, tmp);
7655 } else {
2af9ab77
JB
7656 /* Data processing register constant shift. */
7657 if (rn == 15) {
7658 tmp = new_tmp();
7659 tcg_gen_movi_i32(tmp, 0);
7660 } else {
7661 tmp = load_reg(s, rn);
7662 }
7663 tmp2 = load_reg(s, rm);
7664
7665 shiftop = (insn >> 4) & 3;
7666 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7667 conds = (insn & (1 << 20)) != 0;
7668 logic_cc = (conds && thumb2_logic_op(op));
7669 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7670 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
7671 goto illegal_op;
7672 dead_tmp(tmp2);
7673 if (rd != 15) {
7674 store_reg(s, rd, tmp);
7675 } else {
7676 dead_tmp(tmp);
7677 }
3174f8e9 7678 }
9ee6e8bb
PB
7679 break;
7680 case 13: /* Misc data processing. */
7681 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7682 if (op < 4 && (insn & 0xf000) != 0xf000)
7683 goto illegal_op;
7684 switch (op) {
7685 case 0: /* Register controlled shift. */
8984bd2e
PB
7686 tmp = load_reg(s, rn);
7687 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7688 if ((insn & 0x70) != 0)
7689 goto illegal_op;
7690 op = (insn >> 21) & 3;
8984bd2e
PB
7691 logic_cc = (insn & (1 << 20)) != 0;
7692 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7693 if (logic_cc)
7694 gen_logic_CC(tmp);
21aeb343 7695 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7696 break;
7697 case 1: /* Sign/zero extend. */
5e3f878a 7698 tmp = load_reg(s, rm);
9ee6e8bb
PB
7699 shift = (insn >> 4) & 3;
7700 /* ??? In many cases it's not neccessary to do a
7701 rotate, a shift is sufficient. */
7702 if (shift != 0)
f669df27 7703 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7704 op = (insn >> 20) & 7;
7705 switch (op) {
5e3f878a
PB
7706 case 0: gen_sxth(tmp); break;
7707 case 1: gen_uxth(tmp); break;
7708 case 2: gen_sxtb16(tmp); break;
7709 case 3: gen_uxtb16(tmp); break;
7710 case 4: gen_sxtb(tmp); break;
7711 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
7712 default: goto illegal_op;
7713 }
7714 if (rn != 15) {
5e3f878a 7715 tmp2 = load_reg(s, rn);
9ee6e8bb 7716 if ((op >> 1) == 1) {
5e3f878a 7717 gen_add16(tmp, tmp2);
9ee6e8bb 7718 } else {
5e3f878a
PB
7719 tcg_gen_add_i32(tmp, tmp, tmp2);
7720 dead_tmp(tmp2);
9ee6e8bb
PB
7721 }
7722 }
5e3f878a 7723 store_reg(s, rd, tmp);
9ee6e8bb
PB
7724 break;
7725 case 2: /* SIMD add/subtract. */
7726 op = (insn >> 20) & 7;
7727 shift = (insn >> 4) & 7;
7728 if ((op & 3) == 3 || (shift & 3) == 3)
7729 goto illegal_op;
6ddbc6e4
PB
7730 tmp = load_reg(s, rn);
7731 tmp2 = load_reg(s, rm);
7732 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7733 dead_tmp(tmp2);
7734 store_reg(s, rd, tmp);
9ee6e8bb
PB
7735 break;
7736 case 3: /* Other data processing. */
7737 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7738 if (op < 4) {
7739 /* Saturating add/subtract. */
d9ba4830
PB
7740 tmp = load_reg(s, rn);
7741 tmp2 = load_reg(s, rm);
9ee6e8bb 7742 if (op & 1)
4809c612
JB
7743 gen_helper_double_saturate(tmp, tmp);
7744 if (op & 2)
d9ba4830 7745 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 7746 else
d9ba4830
PB
7747 gen_helper_add_saturate(tmp, tmp, tmp2);
7748 dead_tmp(tmp2);
9ee6e8bb 7749 } else {
d9ba4830 7750 tmp = load_reg(s, rn);
9ee6e8bb
PB
7751 switch (op) {
7752 case 0x0a: /* rbit */
d9ba4830 7753 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7754 break;
7755 case 0x08: /* rev */
66896cb8 7756 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
7757 break;
7758 case 0x09: /* rev16 */
d9ba4830 7759 gen_rev16(tmp);
9ee6e8bb
PB
7760 break;
7761 case 0x0b: /* revsh */
d9ba4830 7762 gen_revsh(tmp);
9ee6e8bb
PB
7763 break;
7764 case 0x10: /* sel */
d9ba4830 7765 tmp2 = load_reg(s, rm);
6ddbc6e4
PB
7766 tmp3 = new_tmp();
7767 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
d9ba4830 7768 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6ddbc6e4 7769 dead_tmp(tmp3);
d9ba4830 7770 dead_tmp(tmp2);
9ee6e8bb
PB
7771 break;
7772 case 0x18: /* clz */
d9ba4830 7773 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
7774 break;
7775 default:
7776 goto illegal_op;
7777 }
7778 }
d9ba4830 7779 store_reg(s, rd, tmp);
9ee6e8bb
PB
7780 break;
7781 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7782 op = (insn >> 4) & 0xf;
d9ba4830
PB
7783 tmp = load_reg(s, rn);
7784 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7785 switch ((insn >> 20) & 7) {
7786 case 0: /* 32 x 32 -> 32 */
d9ba4830
PB
7787 tcg_gen_mul_i32(tmp, tmp, tmp2);
7788 dead_tmp(tmp2);
9ee6e8bb 7789 if (rs != 15) {
d9ba4830 7790 tmp2 = load_reg(s, rs);
9ee6e8bb 7791 if (op)
d9ba4830 7792 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 7793 else
d9ba4830
PB
7794 tcg_gen_add_i32(tmp, tmp, tmp2);
7795 dead_tmp(tmp2);
9ee6e8bb 7796 }
9ee6e8bb
PB
7797 break;
7798 case 1: /* 16 x 16 -> 32 */
d9ba4830
PB
7799 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7800 dead_tmp(tmp2);
9ee6e8bb 7801 if (rs != 15) {
d9ba4830
PB
7802 tmp2 = load_reg(s, rs);
7803 gen_helper_add_setq(tmp, tmp, tmp2);
7804 dead_tmp(tmp2);
9ee6e8bb 7805 }
9ee6e8bb
PB
7806 break;
7807 case 2: /* Dual multiply add. */
7808 case 4: /* Dual multiply subtract. */
7809 if (op)
d9ba4830
PB
7810 gen_swap_half(tmp2);
7811 gen_smul_dual(tmp, tmp2);
9ee6e8bb
PB
7812 /* This addition cannot overflow. */
7813 if (insn & (1 << 22)) {
d9ba4830 7814 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 7815 } else {
d9ba4830 7816 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 7817 }
d9ba4830 7818 dead_tmp(tmp2);
9ee6e8bb
PB
7819 if (rs != 15)
7820 {
d9ba4830
PB
7821 tmp2 = load_reg(s, rs);
7822 gen_helper_add_setq(tmp, tmp, tmp2);
7823 dead_tmp(tmp2);
9ee6e8bb 7824 }
9ee6e8bb
PB
7825 break;
7826 case 3: /* 32 * 16 -> 32msb */
7827 if (op)
d9ba4830 7828 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7829 else
d9ba4830 7830 gen_sxth(tmp2);
a7812ae4
PB
7831 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7832 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 7833 tmp = new_tmp();
a7812ae4 7834 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 7835 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7836 if (rs != 15)
7837 {
d9ba4830
PB
7838 tmp2 = load_reg(s, rs);
7839 gen_helper_add_setq(tmp, tmp, tmp2);
7840 dead_tmp(tmp2);
9ee6e8bb 7841 }
9ee6e8bb
PB
7842 break;
7843 case 5: case 6: /* 32 * 32 -> 32msb */
d9ba4830
PB
7844 gen_imull(tmp, tmp2);
7845 if (insn & (1 << 5)) {
7846 gen_roundqd(tmp, tmp2);
7847 dead_tmp(tmp2);
7848 } else {
7849 dead_tmp(tmp);
7850 tmp = tmp2;
7851 }
9ee6e8bb 7852 if (rs != 15) {
d9ba4830 7853 tmp2 = load_reg(s, rs);
9ee6e8bb 7854 if (insn & (1 << 21)) {
d9ba4830 7855 tcg_gen_add_i32(tmp, tmp, tmp2);
99c475ab 7856 } else {
d9ba4830 7857 tcg_gen_sub_i32(tmp, tmp2, tmp);
99c475ab 7858 }
d9ba4830 7859 dead_tmp(tmp2);
2c0262af 7860 }
9ee6e8bb
PB
7861 break;
7862 case 7: /* Unsigned sum of absolute differences. */
d9ba4830
PB
7863 gen_helper_usad8(tmp, tmp, tmp2);
7864 dead_tmp(tmp2);
9ee6e8bb 7865 if (rs != 15) {
d9ba4830
PB
7866 tmp2 = load_reg(s, rs);
7867 tcg_gen_add_i32(tmp, tmp, tmp2);
7868 dead_tmp(tmp2);
5fd46862 7869 }
9ee6e8bb 7870 break;
2c0262af 7871 }
d9ba4830 7872 store_reg(s, rd, tmp);
2c0262af 7873 break;
9ee6e8bb
PB
7874 case 6: case 7: /* 64-bit multiply, Divide. */
7875 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
7876 tmp = load_reg(s, rn);
7877 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7878 if ((op & 0x50) == 0x10) {
7879 /* sdiv, udiv */
7880 if (!arm_feature(env, ARM_FEATURE_DIV))
7881 goto illegal_op;
7882 if (op & 0x20)
5e3f878a 7883 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 7884 else
5e3f878a
PB
7885 gen_helper_sdiv(tmp, tmp, tmp2);
7886 dead_tmp(tmp2);
7887 store_reg(s, rd, tmp);
9ee6e8bb
PB
7888 } else if ((op & 0xe) == 0xc) {
7889 /* Dual multiply accumulate long. */
7890 if (op & 1)
5e3f878a
PB
7891 gen_swap_half(tmp2);
7892 gen_smul_dual(tmp, tmp2);
9ee6e8bb 7893 if (op & 0x10) {
5e3f878a 7894 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 7895 } else {
5e3f878a 7896 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 7897 }
5e3f878a 7898 dead_tmp(tmp2);
a7812ae4
PB
7899 /* BUGFIX */
7900 tmp64 = tcg_temp_new_i64();
7901 tcg_gen_ext_i32_i64(tmp64, tmp);
7902 dead_tmp(tmp);
7903 gen_addq(s, tmp64, rs, rd);
7904 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 7905 tcg_temp_free_i64(tmp64);
2c0262af 7906 } else {
9ee6e8bb
PB
7907 if (op & 0x20) {
7908 /* Unsigned 64-bit multiply */
a7812ae4 7909 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 7910 } else {
9ee6e8bb
PB
7911 if (op & 8) {
7912 /* smlalxy */
5e3f878a
PB
7913 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7914 dead_tmp(tmp2);
a7812ae4
PB
7915 tmp64 = tcg_temp_new_i64();
7916 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 7917 dead_tmp(tmp);
9ee6e8bb
PB
7918 } else {
7919 /* Signed 64-bit multiply */
a7812ae4 7920 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 7921 }
b5ff1b31 7922 }
9ee6e8bb
PB
7923 if (op & 4) {
7924 /* umaal */
a7812ae4
PB
7925 gen_addq_lo(s, tmp64, rs);
7926 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
7927 } else if (op & 0x40) {
7928 /* 64-bit accumulate. */
a7812ae4 7929 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 7930 }
a7812ae4 7931 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 7932 tcg_temp_free_i64(tmp64);
5fd46862 7933 }
2c0262af 7934 break;
9ee6e8bb
PB
7935 }
7936 break;
7937 case 6: case 7: case 14: case 15:
7938 /* Coprocessor. */
7939 if (((insn >> 24) & 3) == 3) {
7940 /* Translate into the equivalent ARM encoding. */
7941 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
7942 if (disas_neon_data_insn(env, s, insn))
7943 goto illegal_op;
7944 } else {
7945 if (insn & (1 << 28))
7946 goto illegal_op;
7947 if (disas_coproc_insn (env, s, insn))
7948 goto illegal_op;
7949 }
7950 break;
7951 case 8: case 9: case 10: case 11:
7952 if (insn & (1 << 15)) {
7953 /* Branches, misc control. */
7954 if (insn & 0x5000) {
7955 /* Unconditional branch. */
7956 /* signextend(hw1[10:0]) -> offset[:12]. */
7957 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7958 /* hw1[10:0] -> offset[11:1]. */
7959 offset |= (insn & 0x7ff) << 1;
7960 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7961 offset[24:22] already have the same value because of the
7962 sign extension above. */
7963 offset ^= ((~insn) & (1 << 13)) << 10;
7964 offset ^= ((~insn) & (1 << 11)) << 11;
7965
9ee6e8bb
PB
7966 if (insn & (1 << 14)) {
7967 /* Branch and link. */
3174f8e9 7968 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 7969 }
3b46e624 7970
b0109805 7971 offset += s->pc;
9ee6e8bb
PB
7972 if (insn & (1 << 12)) {
7973 /* b/bl */
b0109805 7974 gen_jmp(s, offset);
9ee6e8bb
PB
7975 } else {
7976 /* blx */
b0109805
PB
7977 offset &= ~(uint32_t)2;
7978 gen_bx_im(s, offset);
2c0262af 7979 }
9ee6e8bb
PB
7980 } else if (((insn >> 23) & 7) == 7) {
7981 /* Misc control */
7982 if (insn & (1 << 13))
7983 goto illegal_op;
7984
7985 if (insn & (1 << 26)) {
7986 /* Secure monitor call (v6Z) */
7987 goto illegal_op; /* not implemented. */
2c0262af 7988 } else {
9ee6e8bb
PB
7989 op = (insn >> 20) & 7;
7990 switch (op) {
7991 case 0: /* msr cpsr. */
7992 if (IS_M(env)) {
8984bd2e
PB
7993 tmp = load_reg(s, rn);
7994 addr = tcg_const_i32(insn & 0xff);
7995 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6
JR
7996 tcg_temp_free_i32(addr);
7997 dead_tmp(tmp);
9ee6e8bb
PB
7998 gen_lookup_tb(s);
7999 break;
8000 }
8001 /* fall through */
8002 case 1: /* msr spsr. */
8003 if (IS_M(env))
8004 goto illegal_op;
2fbac54b
FN
8005 tmp = load_reg(s, rn);
8006 if (gen_set_psr(s,
9ee6e8bb 8007 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 8008 op == 1, tmp))
9ee6e8bb
PB
8009 goto illegal_op;
8010 break;
8011 case 2: /* cps, nop-hint. */
8012 if (((insn >> 8) & 7) == 0) {
8013 gen_nop_hint(s, insn & 0xff);
8014 }
8015 /* Implemented as NOP in user mode. */
8016 if (IS_USER(s))
8017 break;
8018 offset = 0;
8019 imm = 0;
8020 if (insn & (1 << 10)) {
8021 if (insn & (1 << 7))
8022 offset |= CPSR_A;
8023 if (insn & (1 << 6))
8024 offset |= CPSR_I;
8025 if (insn & (1 << 5))
8026 offset |= CPSR_F;
8027 if (insn & (1 << 9))
8028 imm = CPSR_A | CPSR_I | CPSR_F;
8029 }
8030 if (insn & (1 << 8)) {
8031 offset |= 0x1f;
8032 imm |= (insn & 0x1f);
8033 }
8034 if (offset) {
2fbac54b 8035 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
8036 }
8037 break;
8038 case 3: /* Special control operations. */
426f5abc 8039 ARCH(7);
9ee6e8bb
PB
8040 op = (insn >> 4) & 0xf;
8041 switch (op) {
8042 case 2: /* clrex */
426f5abc 8043 gen_clrex(s);
9ee6e8bb
PB
8044 break;
8045 case 4: /* dsb */
8046 case 5: /* dmb */
8047 case 6: /* isb */
8048 /* These execute as NOPs. */
9ee6e8bb
PB
8049 break;
8050 default:
8051 goto illegal_op;
8052 }
8053 break;
8054 case 4: /* bxj */
8055 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8056 tmp = load_reg(s, rn);
8057 gen_bx(s, tmp);
9ee6e8bb
PB
8058 break;
8059 case 5: /* Exception return. */
b8b45b68
RV
8060 if (IS_USER(s)) {
8061 goto illegal_op;
8062 }
8063 if (rn != 14 || rd != 15) {
8064 goto illegal_op;
8065 }
8066 tmp = load_reg(s, rn);
8067 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8068 gen_exception_return(s, tmp);
8069 break;
9ee6e8bb 8070 case 6: /* mrs cpsr. */
8984bd2e 8071 tmp = new_tmp();
9ee6e8bb 8072 if (IS_M(env)) {
8984bd2e
PB
8073 addr = tcg_const_i32(insn & 0xff);
8074 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 8075 tcg_temp_free_i32(addr);
9ee6e8bb 8076 } else {
8984bd2e 8077 gen_helper_cpsr_read(tmp);
9ee6e8bb 8078 }
8984bd2e 8079 store_reg(s, rd, tmp);
9ee6e8bb
PB
8080 break;
8081 case 7: /* mrs spsr. */
8082 /* Not accessible in user mode. */
8083 if (IS_USER(s) || IS_M(env))
8084 goto illegal_op;
d9ba4830
PB
8085 tmp = load_cpu_field(spsr);
8086 store_reg(s, rd, tmp);
9ee6e8bb 8087 break;
2c0262af
FB
8088 }
8089 }
9ee6e8bb
PB
8090 } else {
8091 /* Conditional branch. */
8092 op = (insn >> 22) & 0xf;
8093 /* Generate a conditional jump to next instruction. */
8094 s->condlabel = gen_new_label();
d9ba4830 8095 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
8096 s->condjmp = 1;
8097
8098 /* offset[11:1] = insn[10:0] */
8099 offset = (insn & 0x7ff) << 1;
8100 /* offset[17:12] = insn[21:16]. */
8101 offset |= (insn & 0x003f0000) >> 4;
8102 /* offset[31:20] = insn[26]. */
8103 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8104 /* offset[18] = insn[13]. */
8105 offset |= (insn & (1 << 13)) << 5;
8106 /* offset[19] = insn[11]. */
8107 offset |= (insn & (1 << 11)) << 8;
8108
8109 /* jump to the offset */
b0109805 8110 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
8111 }
8112 } else {
8113 /* Data processing immediate. */
8114 if (insn & (1 << 25)) {
8115 if (insn & (1 << 24)) {
8116 if (insn & (1 << 20))
8117 goto illegal_op;
8118 /* Bitfield/Saturate. */
8119 op = (insn >> 21) & 7;
8120 imm = insn & 0x1f;
8121 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4
PB
8122 if (rn == 15) {
8123 tmp = new_tmp();
8124 tcg_gen_movi_i32(tmp, 0);
8125 } else {
8126 tmp = load_reg(s, rn);
8127 }
9ee6e8bb
PB
8128 switch (op) {
8129 case 2: /* Signed bitfield extract. */
8130 imm++;
8131 if (shift + imm > 32)
8132 goto illegal_op;
8133 if (imm < 32)
6ddbc6e4 8134 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
8135 break;
8136 case 6: /* Unsigned bitfield extract. */
8137 imm++;
8138 if (shift + imm > 32)
8139 goto illegal_op;
8140 if (imm < 32)
6ddbc6e4 8141 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
8142 break;
8143 case 3: /* Bitfield insert/clear. */
8144 if (imm < shift)
8145 goto illegal_op;
8146 imm = imm + 1 - shift;
8147 if (imm != 32) {
6ddbc6e4 8148 tmp2 = load_reg(s, rd);
8f8e3aa4 8149 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
6ddbc6e4 8150 dead_tmp(tmp2);
9ee6e8bb
PB
8151 }
8152 break;
8153 case 7:
8154 goto illegal_op;
8155 default: /* Saturate. */
9ee6e8bb
PB
8156 if (shift) {
8157 if (op & 1)
6ddbc6e4 8158 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8159 else
6ddbc6e4 8160 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 8161 }
6ddbc6e4 8162 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
8163 if (op & 4) {
8164 /* Unsigned. */
9ee6e8bb 8165 if ((op & 1) && shift == 0)
6ddbc6e4 8166 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 8167 else
6ddbc6e4 8168 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 8169 } else {
9ee6e8bb 8170 /* Signed. */
9ee6e8bb 8171 if ((op & 1) && shift == 0)
6ddbc6e4 8172 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 8173 else
6ddbc6e4 8174 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 8175 }
b75263d6 8176 tcg_temp_free_i32(tmp2);
9ee6e8bb 8177 break;
2c0262af 8178 }
6ddbc6e4 8179 store_reg(s, rd, tmp);
9ee6e8bb
PB
8180 } else {
8181 imm = ((insn & 0x04000000) >> 15)
8182 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8183 if (insn & (1 << 22)) {
8184 /* 16-bit immediate. */
8185 imm |= (insn >> 4) & 0xf000;
8186 if (insn & (1 << 23)) {
8187 /* movt */
5e3f878a 8188 tmp = load_reg(s, rd);
86831435 8189 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8190 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 8191 } else {
9ee6e8bb 8192 /* movw */
5e3f878a
PB
8193 tmp = new_tmp();
8194 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
8195 }
8196 } else {
9ee6e8bb
PB
8197 /* Add/sub 12-bit immediate. */
8198 if (rn == 15) {
b0109805 8199 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 8200 if (insn & (1 << 23))
b0109805 8201 offset -= imm;
9ee6e8bb 8202 else
b0109805 8203 offset += imm;
5e3f878a
PB
8204 tmp = new_tmp();
8205 tcg_gen_movi_i32(tmp, offset);
2c0262af 8206 } else {
5e3f878a 8207 tmp = load_reg(s, rn);
9ee6e8bb 8208 if (insn & (1 << 23))
5e3f878a 8209 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 8210 else
5e3f878a 8211 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 8212 }
9ee6e8bb 8213 }
5e3f878a 8214 store_reg(s, rd, tmp);
191abaa2 8215 }
9ee6e8bb
PB
8216 } else {
8217 int shifter_out = 0;
8218 /* modified 12-bit immediate. */
8219 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8220 imm = (insn & 0xff);
8221 switch (shift) {
8222 case 0: /* XY */
8223 /* Nothing to do. */
8224 break;
8225 case 1: /* 00XY00XY */
8226 imm |= imm << 16;
8227 break;
8228 case 2: /* XY00XY00 */
8229 imm |= imm << 16;
8230 imm <<= 8;
8231 break;
8232 case 3: /* XYXYXYXY */
8233 imm |= imm << 16;
8234 imm |= imm << 8;
8235 break;
8236 default: /* Rotated constant. */
8237 shift = (shift << 1) | (imm >> 7);
8238 imm |= 0x80;
8239 imm = imm << (32 - shift);
8240 shifter_out = 1;
8241 break;
b5ff1b31 8242 }
3174f8e9
FN
8243 tmp2 = new_tmp();
8244 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 8245 rn = (insn >> 16) & 0xf;
3174f8e9
FN
8246 if (rn == 15) {
8247 tmp = new_tmp();
8248 tcg_gen_movi_i32(tmp, 0);
8249 } else {
8250 tmp = load_reg(s, rn);
8251 }
9ee6e8bb
PB
8252 op = (insn >> 21) & 0xf;
8253 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 8254 shifter_out, tmp, tmp2))
9ee6e8bb 8255 goto illegal_op;
3174f8e9 8256 dead_tmp(tmp2);
9ee6e8bb
PB
8257 rd = (insn >> 8) & 0xf;
8258 if (rd != 15) {
3174f8e9
FN
8259 store_reg(s, rd, tmp);
8260 } else {
8261 dead_tmp(tmp);
2c0262af 8262 }
2c0262af 8263 }
9ee6e8bb
PB
8264 }
8265 break;
8266 case 12: /* Load/store single data item. */
8267 {
8268 int postinc = 0;
8269 int writeback = 0;
b0109805 8270 int user;
9ee6e8bb
PB
8271 if ((insn & 0x01100000) == 0x01000000) {
8272 if (disas_neon_ls_insn(env, s, insn))
c1713132 8273 goto illegal_op;
9ee6e8bb
PB
8274 break;
8275 }
b0109805 8276 user = IS_USER(s);
9ee6e8bb 8277 if (rn == 15) {
b0109805 8278 addr = new_tmp();
9ee6e8bb
PB
8279 /* PC relative. */
8280 /* s->pc has already been incremented by 4. */
8281 imm = s->pc & 0xfffffffc;
8282 if (insn & (1 << 23))
8283 imm += insn & 0xfff;
8284 else
8285 imm -= insn & 0xfff;
b0109805 8286 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 8287 } else {
b0109805 8288 addr = load_reg(s, rn);
9ee6e8bb
PB
8289 if (insn & (1 << 23)) {
8290 /* Positive offset. */
8291 imm = insn & 0xfff;
b0109805 8292 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8293 } else {
8294 op = (insn >> 8) & 7;
8295 imm = insn & 0xff;
8296 switch (op) {
8297 case 0: case 8: /* Shifted Register. */
8298 shift = (insn >> 4) & 0xf;
8299 if (shift > 3)
18c9b560 8300 goto illegal_op;
b26eefb6 8301 tmp = load_reg(s, rm);
9ee6e8bb 8302 if (shift)
b26eefb6 8303 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 8304 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 8305 dead_tmp(tmp);
9ee6e8bb
PB
8306 break;
8307 case 4: /* Negative offset. */
b0109805 8308 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb
PB
8309 break;
8310 case 6: /* User privilege. */
b0109805
PB
8311 tcg_gen_addi_i32(addr, addr, imm);
8312 user = 1;
9ee6e8bb
PB
8313 break;
8314 case 1: /* Post-decrement. */
8315 imm = -imm;
8316 /* Fall through. */
8317 case 3: /* Post-increment. */
9ee6e8bb
PB
8318 postinc = 1;
8319 writeback = 1;
8320 break;
8321 case 5: /* Pre-decrement. */
8322 imm = -imm;
8323 /* Fall through. */
8324 case 7: /* Pre-increment. */
b0109805 8325 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8326 writeback = 1;
8327 break;
8328 default:
b7bcbe95 8329 goto illegal_op;
9ee6e8bb
PB
8330 }
8331 }
8332 }
8333 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8334 if (insn & (1 << 20)) {
8335 /* Load. */
8336 if (rs == 15 && op != 2) {
8337 if (op & 2)
b5ff1b31 8338 goto illegal_op;
9ee6e8bb
PB
8339 /* Memory hint. Implemented as NOP. */
8340 } else {
8341 switch (op) {
b0109805
PB
8342 case 0: tmp = gen_ld8u(addr, user); break;
8343 case 4: tmp = gen_ld8s(addr, user); break;
8344 case 1: tmp = gen_ld16u(addr, user); break;
8345 case 5: tmp = gen_ld16s(addr, user); break;
8346 case 2: tmp = gen_ld32(addr, user); break;
9ee6e8bb
PB
8347 default: goto illegal_op;
8348 }
8349 if (rs == 15) {
b0109805 8350 gen_bx(s, tmp);
9ee6e8bb 8351 } else {
b0109805 8352 store_reg(s, rs, tmp);
9ee6e8bb
PB
8353 }
8354 }
8355 } else {
8356 /* Store. */
8357 if (rs == 15)
b7bcbe95 8358 goto illegal_op;
b0109805 8359 tmp = load_reg(s, rs);
9ee6e8bb 8360 switch (op) {
b0109805
PB
8361 case 0: gen_st8(tmp, addr, user); break;
8362 case 1: gen_st16(tmp, addr, user); break;
8363 case 2: gen_st32(tmp, addr, user); break;
9ee6e8bb 8364 default: goto illegal_op;
b7bcbe95 8365 }
2c0262af 8366 }
9ee6e8bb 8367 if (postinc)
b0109805
PB
8368 tcg_gen_addi_i32(addr, addr, imm);
8369 if (writeback) {
8370 store_reg(s, rn, addr);
8371 } else {
8372 dead_tmp(addr);
8373 }
9ee6e8bb
PB
8374 }
8375 break;
8376 default:
8377 goto illegal_op;
2c0262af 8378 }
9ee6e8bb
PB
8379 return 0;
8380illegal_op:
8381 return 1;
2c0262af
FB
8382}
8383
9ee6e8bb 8384static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
8385{
8386 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8387 int32_t offset;
8388 int i;
b26eefb6 8389 TCGv tmp;
d9ba4830 8390 TCGv tmp2;
b0109805 8391 TCGv addr;
99c475ab 8392
9ee6e8bb
PB
8393 if (s->condexec_mask) {
8394 cond = s->condexec_cond;
bedd2912
JB
8395 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
8396 s->condlabel = gen_new_label();
8397 gen_test_cc(cond ^ 1, s->condlabel);
8398 s->condjmp = 1;
8399 }
9ee6e8bb
PB
8400 }
8401
b5ff1b31 8402 insn = lduw_code(s->pc);
99c475ab 8403 s->pc += 2;
b5ff1b31 8404
99c475ab
FB
8405 switch (insn >> 12) {
8406 case 0: case 1:
396e467c 8407
99c475ab
FB
8408 rd = insn & 7;
8409 op = (insn >> 11) & 3;
8410 if (op == 3) {
8411 /* add/subtract */
8412 rn = (insn >> 3) & 7;
396e467c 8413 tmp = load_reg(s, rn);
99c475ab
FB
8414 if (insn & (1 << 10)) {
8415 /* immediate */
396e467c
FN
8416 tmp2 = new_tmp();
8417 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
8418 } else {
8419 /* reg */
8420 rm = (insn >> 6) & 7;
396e467c 8421 tmp2 = load_reg(s, rm);
99c475ab 8422 }
9ee6e8bb
PB
8423 if (insn & (1 << 9)) {
8424 if (s->condexec_mask)
396e467c 8425 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8426 else
396e467c 8427 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb
PB
8428 } else {
8429 if (s->condexec_mask)
396e467c 8430 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 8431 else
396e467c 8432 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 8433 }
396e467c
FN
8434 dead_tmp(tmp2);
8435 store_reg(s, rd, tmp);
99c475ab
FB
8436 } else {
8437 /* shift immediate */
8438 rm = (insn >> 3) & 7;
8439 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
8440 tmp = load_reg(s, rm);
8441 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8442 if (!s->condexec_mask)
8443 gen_logic_CC(tmp);
8444 store_reg(s, rd, tmp);
99c475ab
FB
8445 }
8446 break;
8447 case 2: case 3:
8448 /* arithmetic large immediate */
8449 op = (insn >> 11) & 3;
8450 rd = (insn >> 8) & 0x7;
396e467c
FN
8451 if (op == 0) { /* mov */
8452 tmp = new_tmp();
8453 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 8454 if (!s->condexec_mask)
396e467c
FN
8455 gen_logic_CC(tmp);
8456 store_reg(s, rd, tmp);
8457 } else {
8458 tmp = load_reg(s, rd);
8459 tmp2 = new_tmp();
8460 tcg_gen_movi_i32(tmp2, insn & 0xff);
8461 switch (op) {
8462 case 1: /* cmp */
8463 gen_helper_sub_cc(tmp, tmp, tmp2);
8464 dead_tmp(tmp);
8465 dead_tmp(tmp2);
8466 break;
8467 case 2: /* add */
8468 if (s->condexec_mask)
8469 tcg_gen_add_i32(tmp, tmp, tmp2);
8470 else
8471 gen_helper_add_cc(tmp, tmp, tmp2);
8472 dead_tmp(tmp2);
8473 store_reg(s, rd, tmp);
8474 break;
8475 case 3: /* sub */
8476 if (s->condexec_mask)
8477 tcg_gen_sub_i32(tmp, tmp, tmp2);
8478 else
8479 gen_helper_sub_cc(tmp, tmp, tmp2);
8480 dead_tmp(tmp2);
8481 store_reg(s, rd, tmp);
8482 break;
8483 }
99c475ab 8484 }
99c475ab
FB
8485 break;
8486 case 4:
8487 if (insn & (1 << 11)) {
8488 rd = (insn >> 8) & 7;
5899f386
FB
8489 /* load pc-relative. Bit 1 of PC is ignored. */
8490 val = s->pc + 2 + ((insn & 0xff) * 4);
8491 val &= ~(uint32_t)2;
b0109805
PB
8492 addr = new_tmp();
8493 tcg_gen_movi_i32(addr, val);
8494 tmp = gen_ld32(addr, IS_USER(s));
8495 dead_tmp(addr);
8496 store_reg(s, rd, tmp);
99c475ab
FB
8497 break;
8498 }
8499 if (insn & (1 << 10)) {
8500 /* data processing extended or blx */
8501 rd = (insn & 7) | ((insn >> 4) & 8);
8502 rm = (insn >> 3) & 0xf;
8503 op = (insn >> 8) & 3;
8504 switch (op) {
8505 case 0: /* add */
396e467c
FN
8506 tmp = load_reg(s, rd);
8507 tmp2 = load_reg(s, rm);
8508 tcg_gen_add_i32(tmp, tmp, tmp2);
8509 dead_tmp(tmp2);
8510 store_reg(s, rd, tmp);
99c475ab
FB
8511 break;
8512 case 1: /* cmp */
396e467c
FN
8513 tmp = load_reg(s, rd);
8514 tmp2 = load_reg(s, rm);
8515 gen_helper_sub_cc(tmp, tmp, tmp2);
8516 dead_tmp(tmp2);
8517 dead_tmp(tmp);
99c475ab
FB
8518 break;
8519 case 2: /* mov/cpy */
396e467c
FN
8520 tmp = load_reg(s, rm);
8521 store_reg(s, rd, tmp);
99c475ab
FB
8522 break;
8523 case 3:/* branch [and link] exchange thumb register */
b0109805 8524 tmp = load_reg(s, rm);
99c475ab
FB
8525 if (insn & (1 << 7)) {
8526 val = (uint32_t)s->pc | 1;
b0109805
PB
8527 tmp2 = new_tmp();
8528 tcg_gen_movi_i32(tmp2, val);
8529 store_reg(s, 14, tmp2);
99c475ab 8530 }
d9ba4830 8531 gen_bx(s, tmp);
99c475ab
FB
8532 break;
8533 }
8534 break;
8535 }
8536
8537 /* data processing register */
8538 rd = insn & 7;
8539 rm = (insn >> 3) & 7;
8540 op = (insn >> 6) & 0xf;
8541 if (op == 2 || op == 3 || op == 4 || op == 7) {
8542 /* the shift/rotate ops want the operands backwards */
8543 val = rm;
8544 rm = rd;
8545 rd = val;
8546 val = 1;
8547 } else {
8548 val = 0;
8549 }
8550
396e467c
FN
8551 if (op == 9) { /* neg */
8552 tmp = new_tmp();
8553 tcg_gen_movi_i32(tmp, 0);
8554 } else if (op != 0xf) { /* mvn doesn't read its first operand */
8555 tmp = load_reg(s, rd);
8556 } else {
8557 TCGV_UNUSED(tmp);
8558 }
99c475ab 8559
396e467c 8560 tmp2 = load_reg(s, rm);
5899f386 8561 switch (op) {
99c475ab 8562 case 0x0: /* and */
396e467c 8563 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 8564 if (!s->condexec_mask)
396e467c 8565 gen_logic_CC(tmp);
99c475ab
FB
8566 break;
8567 case 0x1: /* eor */
396e467c 8568 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 8569 if (!s->condexec_mask)
396e467c 8570 gen_logic_CC(tmp);
99c475ab
FB
8571 break;
8572 case 0x2: /* lsl */
9ee6e8bb 8573 if (s->condexec_mask) {
396e467c 8574 gen_helper_shl(tmp2, tmp2, tmp);
9ee6e8bb 8575 } else {
396e467c
FN
8576 gen_helper_shl_cc(tmp2, tmp2, tmp);
8577 gen_logic_CC(tmp2);
9ee6e8bb 8578 }
99c475ab
FB
8579 break;
8580 case 0x3: /* lsr */
9ee6e8bb 8581 if (s->condexec_mask) {
396e467c 8582 gen_helper_shr(tmp2, tmp2, tmp);
9ee6e8bb 8583 } else {
396e467c
FN
8584 gen_helper_shr_cc(tmp2, tmp2, tmp);
8585 gen_logic_CC(tmp2);
9ee6e8bb 8586 }
99c475ab
FB
8587 break;
8588 case 0x4: /* asr */
9ee6e8bb 8589 if (s->condexec_mask) {
396e467c 8590 gen_helper_sar(tmp2, tmp2, tmp);
9ee6e8bb 8591 } else {
396e467c
FN
8592 gen_helper_sar_cc(tmp2, tmp2, tmp);
8593 gen_logic_CC(tmp2);
9ee6e8bb 8594 }
99c475ab
FB
8595 break;
8596 case 0x5: /* adc */
9ee6e8bb 8597 if (s->condexec_mask)
396e467c 8598 gen_adc(tmp, tmp2);
9ee6e8bb 8599 else
396e467c 8600 gen_helper_adc_cc(tmp, tmp, tmp2);
99c475ab
FB
8601 break;
8602 case 0x6: /* sbc */
9ee6e8bb 8603 if (s->condexec_mask)
396e467c 8604 gen_sub_carry(tmp, tmp, tmp2);
9ee6e8bb 8605 else
396e467c 8606 gen_helper_sbc_cc(tmp, tmp, tmp2);
99c475ab
FB
8607 break;
8608 case 0x7: /* ror */
9ee6e8bb 8609 if (s->condexec_mask) {
f669df27
AJ
8610 tcg_gen_andi_i32(tmp, tmp, 0x1f);
8611 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 8612 } else {
396e467c
FN
8613 gen_helper_ror_cc(tmp2, tmp2, tmp);
8614 gen_logic_CC(tmp2);
9ee6e8bb 8615 }
99c475ab
FB
8616 break;
8617 case 0x8: /* tst */
396e467c
FN
8618 tcg_gen_and_i32(tmp, tmp, tmp2);
8619 gen_logic_CC(tmp);
99c475ab 8620 rd = 16;
5899f386 8621 break;
99c475ab 8622 case 0x9: /* neg */
9ee6e8bb 8623 if (s->condexec_mask)
396e467c 8624 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 8625 else
396e467c 8626 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
8627 break;
8628 case 0xa: /* cmp */
396e467c 8629 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
8630 rd = 16;
8631 break;
8632 case 0xb: /* cmn */
396e467c 8633 gen_helper_add_cc(tmp, tmp, tmp2);
99c475ab
FB
8634 rd = 16;
8635 break;
8636 case 0xc: /* orr */
396e467c 8637 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 8638 if (!s->condexec_mask)
396e467c 8639 gen_logic_CC(tmp);
99c475ab
FB
8640 break;
8641 case 0xd: /* mul */
7b2919a0 8642 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 8643 if (!s->condexec_mask)
396e467c 8644 gen_logic_CC(tmp);
99c475ab
FB
8645 break;
8646 case 0xe: /* bic */
f669df27 8647 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 8648 if (!s->condexec_mask)
396e467c 8649 gen_logic_CC(tmp);
99c475ab
FB
8650 break;
8651 case 0xf: /* mvn */
396e467c 8652 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 8653 if (!s->condexec_mask)
396e467c 8654 gen_logic_CC(tmp2);
99c475ab 8655 val = 1;
5899f386 8656 rm = rd;
99c475ab
FB
8657 break;
8658 }
8659 if (rd != 16) {
396e467c
FN
8660 if (val) {
8661 store_reg(s, rm, tmp2);
8662 if (op != 0xf)
8663 dead_tmp(tmp);
8664 } else {
8665 store_reg(s, rd, tmp);
8666 dead_tmp(tmp2);
8667 }
8668 } else {
8669 dead_tmp(tmp);
8670 dead_tmp(tmp2);
99c475ab
FB
8671 }
8672 break;
8673
8674 case 5:
8675 /* load/store register offset. */
8676 rd = insn & 7;
8677 rn = (insn >> 3) & 7;
8678 rm = (insn >> 6) & 7;
8679 op = (insn >> 9) & 7;
b0109805 8680 addr = load_reg(s, rn);
b26eefb6 8681 tmp = load_reg(s, rm);
b0109805 8682 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 8683 dead_tmp(tmp);
99c475ab
FB
8684
8685 if (op < 3) /* store */
b0109805 8686 tmp = load_reg(s, rd);
99c475ab
FB
8687
8688 switch (op) {
8689 case 0: /* str */
b0109805 8690 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
8691 break;
8692 case 1: /* strh */
b0109805 8693 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
8694 break;
8695 case 2: /* strb */
b0109805 8696 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
8697 break;
8698 case 3: /* ldrsb */
b0109805 8699 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
8700 break;
8701 case 4: /* ldr */
b0109805 8702 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8703 break;
8704 case 5: /* ldrh */
b0109805 8705 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
8706 break;
8707 case 6: /* ldrb */
b0109805 8708 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
8709 break;
8710 case 7: /* ldrsh */
b0109805 8711 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
8712 break;
8713 }
8714 if (op >= 3) /* load */
b0109805
PB
8715 store_reg(s, rd, tmp);
8716 dead_tmp(addr);
99c475ab
FB
8717 break;
8718
8719 case 6:
8720 /* load/store word immediate offset */
8721 rd = insn & 7;
8722 rn = (insn >> 3) & 7;
b0109805 8723 addr = load_reg(s, rn);
99c475ab 8724 val = (insn >> 4) & 0x7c;
b0109805 8725 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8726
8727 if (insn & (1 << 11)) {
8728 /* load */
b0109805
PB
8729 tmp = gen_ld32(addr, IS_USER(s));
8730 store_reg(s, rd, tmp);
99c475ab
FB
8731 } else {
8732 /* store */
b0109805
PB
8733 tmp = load_reg(s, rd);
8734 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8735 }
b0109805 8736 dead_tmp(addr);
99c475ab
FB
8737 break;
8738
8739 case 7:
8740 /* load/store byte immediate offset */
8741 rd = insn & 7;
8742 rn = (insn >> 3) & 7;
b0109805 8743 addr = load_reg(s, rn);
99c475ab 8744 val = (insn >> 6) & 0x1f;
b0109805 8745 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8746
8747 if (insn & (1 << 11)) {
8748 /* load */
b0109805
PB
8749 tmp = gen_ld8u(addr, IS_USER(s));
8750 store_reg(s, rd, tmp);
99c475ab
FB
8751 } else {
8752 /* store */
b0109805
PB
8753 tmp = load_reg(s, rd);
8754 gen_st8(tmp, addr, IS_USER(s));
99c475ab 8755 }
b0109805 8756 dead_tmp(addr);
99c475ab
FB
8757 break;
8758
8759 case 8:
8760 /* load/store halfword immediate offset */
8761 rd = insn & 7;
8762 rn = (insn >> 3) & 7;
b0109805 8763 addr = load_reg(s, rn);
99c475ab 8764 val = (insn >> 5) & 0x3e;
b0109805 8765 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8766
8767 if (insn & (1 << 11)) {
8768 /* load */
b0109805
PB
8769 tmp = gen_ld16u(addr, IS_USER(s));
8770 store_reg(s, rd, tmp);
99c475ab
FB
8771 } else {
8772 /* store */
b0109805
PB
8773 tmp = load_reg(s, rd);
8774 gen_st16(tmp, addr, IS_USER(s));
99c475ab 8775 }
b0109805 8776 dead_tmp(addr);
99c475ab
FB
8777 break;
8778
8779 case 9:
8780 /* load/store from stack */
8781 rd = (insn >> 8) & 7;
b0109805 8782 addr = load_reg(s, 13);
99c475ab 8783 val = (insn & 0xff) * 4;
b0109805 8784 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8785
8786 if (insn & (1 << 11)) {
8787 /* load */
b0109805
PB
8788 tmp = gen_ld32(addr, IS_USER(s));
8789 store_reg(s, rd, tmp);
99c475ab
FB
8790 } else {
8791 /* store */
b0109805
PB
8792 tmp = load_reg(s, rd);
8793 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8794 }
b0109805 8795 dead_tmp(addr);
99c475ab
FB
8796 break;
8797
8798 case 10:
8799 /* add to high reg */
8800 rd = (insn >> 8) & 7;
5899f386
FB
8801 if (insn & (1 << 11)) {
8802 /* SP */
5e3f878a 8803 tmp = load_reg(s, 13);
5899f386
FB
8804 } else {
8805 /* PC. bit 1 is ignored. */
5e3f878a
PB
8806 tmp = new_tmp();
8807 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 8808 }
99c475ab 8809 val = (insn & 0xff) * 4;
5e3f878a
PB
8810 tcg_gen_addi_i32(tmp, tmp, val);
8811 store_reg(s, rd, tmp);
99c475ab
FB
8812 break;
8813
8814 case 11:
8815 /* misc */
8816 op = (insn >> 8) & 0xf;
8817 switch (op) {
8818 case 0:
8819 /* adjust stack pointer */
b26eefb6 8820 tmp = load_reg(s, 13);
99c475ab
FB
8821 val = (insn & 0x7f) * 4;
8822 if (insn & (1 << 7))
6a0d8a1d 8823 val = -(int32_t)val;
b26eefb6
PB
8824 tcg_gen_addi_i32(tmp, tmp, val);
8825 store_reg(s, 13, tmp);
99c475ab
FB
8826 break;
8827
9ee6e8bb
PB
8828 case 2: /* sign/zero extend. */
8829 ARCH(6);
8830 rd = insn & 7;
8831 rm = (insn >> 3) & 7;
b0109805 8832 tmp = load_reg(s, rm);
9ee6e8bb 8833 switch ((insn >> 6) & 3) {
b0109805
PB
8834 case 0: gen_sxth(tmp); break;
8835 case 1: gen_sxtb(tmp); break;
8836 case 2: gen_uxth(tmp); break;
8837 case 3: gen_uxtb(tmp); break;
9ee6e8bb 8838 }
b0109805 8839 store_reg(s, rd, tmp);
9ee6e8bb 8840 break;
99c475ab
FB
8841 case 4: case 5: case 0xc: case 0xd:
8842 /* push/pop */
b0109805 8843 addr = load_reg(s, 13);
5899f386
FB
8844 if (insn & (1 << 8))
8845 offset = 4;
99c475ab 8846 else
5899f386
FB
8847 offset = 0;
8848 for (i = 0; i < 8; i++) {
8849 if (insn & (1 << i))
8850 offset += 4;
8851 }
8852 if ((insn & (1 << 11)) == 0) {
b0109805 8853 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8854 }
99c475ab
FB
8855 for (i = 0; i < 8; i++) {
8856 if (insn & (1 << i)) {
8857 if (insn & (1 << 11)) {
8858 /* pop */
b0109805
PB
8859 tmp = gen_ld32(addr, IS_USER(s));
8860 store_reg(s, i, tmp);
99c475ab
FB
8861 } else {
8862 /* push */
b0109805
PB
8863 tmp = load_reg(s, i);
8864 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8865 }
5899f386 8866 /* advance to the next address. */
b0109805 8867 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8868 }
8869 }
a50f5b91 8870 TCGV_UNUSED(tmp);
99c475ab
FB
8871 if (insn & (1 << 8)) {
8872 if (insn & (1 << 11)) {
8873 /* pop pc */
b0109805 8874 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8875 /* don't set the pc until the rest of the instruction
8876 has completed */
8877 } else {
8878 /* push lr */
b0109805
PB
8879 tmp = load_reg(s, 14);
8880 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8881 }
b0109805 8882 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 8883 }
5899f386 8884 if ((insn & (1 << 11)) == 0) {
b0109805 8885 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8886 }
99c475ab 8887 /* write back the new stack pointer */
b0109805 8888 store_reg(s, 13, addr);
99c475ab
FB
8889 /* set the new PC value */
8890 if ((insn & 0x0900) == 0x0900)
b0109805 8891 gen_bx(s, tmp);
99c475ab
FB
8892 break;
8893
9ee6e8bb
PB
8894 case 1: case 3: case 9: case 11: /* czb */
8895 rm = insn & 7;
d9ba4830 8896 tmp = load_reg(s, rm);
9ee6e8bb
PB
8897 s->condlabel = gen_new_label();
8898 s->condjmp = 1;
8899 if (insn & (1 << 11))
cb63669a 8900 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 8901 else
cb63669a 8902 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
d9ba4830 8903 dead_tmp(tmp);
9ee6e8bb
PB
8904 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8905 val = (uint32_t)s->pc + 2;
8906 val += offset;
8907 gen_jmp(s, val);
8908 break;
8909
8910 case 15: /* IT, nop-hint. */
8911 if ((insn & 0xf) == 0) {
8912 gen_nop_hint(s, (insn >> 4) & 0xf);
8913 break;
8914 }
8915 /* If Then. */
8916 s->condexec_cond = (insn >> 4) & 0xe;
8917 s->condexec_mask = insn & 0x1f;
8918 /* No actual code generated for this insn, just setup state. */
8919 break;
8920
06c949e6 8921 case 0xe: /* bkpt */
9ee6e8bb 8922 gen_set_condexec(s);
5e3f878a 8923 gen_set_pc_im(s->pc - 2);
d9ba4830 8924 gen_exception(EXCP_BKPT);
06c949e6
PB
8925 s->is_jmp = DISAS_JUMP;
8926 break;
8927
9ee6e8bb
PB
8928 case 0xa: /* rev */
8929 ARCH(6);
8930 rn = (insn >> 3) & 0x7;
8931 rd = insn & 0x7;
b0109805 8932 tmp = load_reg(s, rn);
9ee6e8bb 8933 switch ((insn >> 6) & 3) {
66896cb8 8934 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
8935 case 1: gen_rev16(tmp); break;
8936 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
8937 default: goto illegal_op;
8938 }
b0109805 8939 store_reg(s, rd, tmp);
9ee6e8bb
PB
8940 break;
8941
8942 case 6: /* cps */
8943 ARCH(6);
8944 if (IS_USER(s))
8945 break;
8946 if (IS_M(env)) {
8984bd2e 8947 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9ee6e8bb 8948 /* PRIMASK */
8984bd2e
PB
8949 if (insn & 1) {
8950 addr = tcg_const_i32(16);
8951 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8952 tcg_temp_free_i32(addr);
8984bd2e 8953 }
9ee6e8bb 8954 /* FAULTMASK */
8984bd2e
PB
8955 if (insn & 2) {
8956 addr = tcg_const_i32(17);
8957 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8958 tcg_temp_free_i32(addr);
8984bd2e 8959 }
b75263d6 8960 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8961 gen_lookup_tb(s);
8962 } else {
8963 if (insn & (1 << 4))
8964 shift = CPSR_A | CPSR_I | CPSR_F;
8965 else
8966 shift = 0;
fa26df03 8967 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9ee6e8bb
PB
8968 }
8969 break;
8970
99c475ab
FB
8971 default:
8972 goto undef;
8973 }
8974 break;
8975
8976 case 12:
8977 /* load/store multiple */
8978 rn = (insn >> 8) & 0x7;
b0109805 8979 addr = load_reg(s, rn);
99c475ab
FB
8980 for (i = 0; i < 8; i++) {
8981 if (insn & (1 << i)) {
99c475ab
FB
8982 if (insn & (1 << 11)) {
8983 /* load */
b0109805
PB
8984 tmp = gen_ld32(addr, IS_USER(s));
8985 store_reg(s, i, tmp);
99c475ab
FB
8986 } else {
8987 /* store */
b0109805
PB
8988 tmp = load_reg(s, i);
8989 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8990 }
5899f386 8991 /* advance to the next address */
b0109805 8992 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8993 }
8994 }
5899f386 8995 /* Base register writeback. */
b0109805
PB
8996 if ((insn & (1 << rn)) == 0) {
8997 store_reg(s, rn, addr);
8998 } else {
8999 dead_tmp(addr);
9000 }
99c475ab
FB
9001 break;
9002
9003 case 13:
9004 /* conditional branch or swi */
9005 cond = (insn >> 8) & 0xf;
9006 if (cond == 0xe)
9007 goto undef;
9008
9009 if (cond == 0xf) {
9010 /* swi */
9ee6e8bb 9011 gen_set_condexec(s);
422ebf69 9012 gen_set_pc_im(s->pc);
9ee6e8bb 9013 s->is_jmp = DISAS_SWI;
99c475ab
FB
9014 break;
9015 }
9016 /* generate a conditional jump to next instruction */
e50e6a20 9017 s->condlabel = gen_new_label();
d9ba4830 9018 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 9019 s->condjmp = 1;
99c475ab
FB
9020
9021 /* jump to the offset */
5899f386 9022 val = (uint32_t)s->pc + 2;
99c475ab 9023 offset = ((int32_t)insn << 24) >> 24;
5899f386 9024 val += offset << 1;
8aaca4c0 9025 gen_jmp(s, val);
99c475ab
FB
9026 break;
9027
9028 case 14:
358bf29e 9029 if (insn & (1 << 11)) {
9ee6e8bb
PB
9030 if (disas_thumb2_insn(env, s, insn))
9031 goto undef32;
358bf29e
PB
9032 break;
9033 }
9ee6e8bb 9034 /* unconditional branch */
99c475ab
FB
9035 val = (uint32_t)s->pc;
9036 offset = ((int32_t)insn << 21) >> 21;
9037 val += (offset << 1) + 2;
8aaca4c0 9038 gen_jmp(s, val);
99c475ab
FB
9039 break;
9040
9041 case 15:
9ee6e8bb 9042 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 9043 goto undef32;
9ee6e8bb 9044 break;
99c475ab
FB
9045 }
9046 return;
9ee6e8bb
PB
9047undef32:
9048 gen_set_condexec(s);
5e3f878a 9049 gen_set_pc_im(s->pc - 4);
d9ba4830 9050 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
9051 s->is_jmp = DISAS_JUMP;
9052 return;
9053illegal_op:
99c475ab 9054undef:
9ee6e8bb 9055 gen_set_condexec(s);
5e3f878a 9056 gen_set_pc_im(s->pc - 2);
d9ba4830 9057 gen_exception(EXCP_UDEF);
99c475ab
FB
9058 s->is_jmp = DISAS_JUMP;
9059}
9060
2c0262af
FB
9061/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9062 basic block 'tb'. If search_pc is TRUE, also generate PC
9063 information for each intermediate instruction. */
2cfc5f17
TS
9064static inline void gen_intermediate_code_internal(CPUState *env,
9065 TranslationBlock *tb,
9066 int search_pc)
2c0262af
FB
9067{
9068 DisasContext dc1, *dc = &dc1;
a1d1bb31 9069 CPUBreakpoint *bp;
2c0262af
FB
9070 uint16_t *gen_opc_end;
9071 int j, lj;
0fa85d43 9072 target_ulong pc_start;
b5ff1b31 9073 uint32_t next_page_start;
2e70f6ef
PB
9074 int num_insns;
9075 int max_insns;
3b46e624 9076
2c0262af 9077 /* generate intermediate code */
b26eefb6 9078 num_temps = 0;
b26eefb6 9079
0fa85d43 9080 pc_start = tb->pc;
3b46e624 9081
2c0262af
FB
9082 dc->tb = tb;
9083
2c0262af 9084 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
9085
9086 dc->is_jmp = DISAS_NEXT;
9087 dc->pc = pc_start;
8aaca4c0 9088 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 9089 dc->condjmp = 0;
5899f386 9090 dc->thumb = env->thumb;
9ee6e8bb
PB
9091 dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
9092 dc->condexec_cond = env->condexec_bits >> 4;
b5ff1b31 9093#if !defined(CONFIG_USER_ONLY)
9ee6e8bb
PB
9094 if (IS_M(env)) {
9095 dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
9096 } else {
9097 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
9098 }
b5ff1b31 9099#endif
a7812ae4
PB
9100 cpu_F0s = tcg_temp_new_i32();
9101 cpu_F1s = tcg_temp_new_i32();
9102 cpu_F0d = tcg_temp_new_i64();
9103 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
9104 cpu_V0 = cpu_F0d;
9105 cpu_V1 = cpu_F1d;
e677137d 9106 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 9107 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 9108 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 9109 lj = -1;
2e70f6ef
PB
9110 num_insns = 0;
9111 max_insns = tb->cflags & CF_COUNT_MASK;
9112 if (max_insns == 0)
9113 max_insns = CF_COUNT_MASK;
9114
9115 gen_icount_start();
9ee6e8bb
PB
9116 /* Reset the conditional execution bits immediately. This avoids
9117 complications trying to do it at the end of the block. */
9118 if (env->condexec_bits)
8f01245e
PB
9119 {
9120 TCGv tmp = new_tmp();
9121 tcg_gen_movi_i32(tmp, 0);
d9ba4830 9122 store_cpu_field(tmp, condexec_bits);
8f01245e 9123 }
2c0262af 9124 do {
fbb4a2e3
PB
9125#ifdef CONFIG_USER_ONLY
9126 /* Intercept jump to the magic kernel page. */
9127 if (dc->pc >= 0xffff0000) {
9128 /* We always get here via a jump, so know we are not in a
9129 conditional execution block. */
9130 gen_exception(EXCP_KERNEL_TRAP);
9131 dc->is_jmp = DISAS_UPDATE;
9132 break;
9133 }
9134#else
9ee6e8bb
PB
9135 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9136 /* We always get here via a jump, so know we are not in a
9137 conditional execution block. */
d9ba4830 9138 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
9139 dc->is_jmp = DISAS_UPDATE;
9140 break;
9ee6e8bb
PB
9141 }
9142#endif
9143
72cf2d4f
BS
9144 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9145 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 9146 if (bp->pc == dc->pc) {
9ee6e8bb 9147 gen_set_condexec(dc);
5e3f878a 9148 gen_set_pc_im(dc->pc);
d9ba4830 9149 gen_exception(EXCP_DEBUG);
1fddef4b 9150 dc->is_jmp = DISAS_JUMP;
9ee6e8bb
PB
9151 /* Advance PC so that clearing the breakpoint will
9152 invalidate this TB. */
9153 dc->pc += 2;
9154 goto done_generating;
1fddef4b
FB
9155 break;
9156 }
9157 }
9158 }
2c0262af
FB
9159 if (search_pc) {
9160 j = gen_opc_ptr - gen_opc_buf;
9161 if (lj < j) {
9162 lj++;
9163 while (lj < j)
9164 gen_opc_instr_start[lj++] = 0;
9165 }
0fa85d43 9166 gen_opc_pc[lj] = dc->pc;
2c0262af 9167 gen_opc_instr_start[lj] = 1;
2e70f6ef 9168 gen_opc_icount[lj] = num_insns;
2c0262af 9169 }
e50e6a20 9170
2e70f6ef
PB
9171 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9172 gen_io_start();
9173
9ee6e8bb
PB
9174 if (env->thumb) {
9175 disas_thumb_insn(env, dc);
9176 if (dc->condexec_mask) {
9177 dc->condexec_cond = (dc->condexec_cond & 0xe)
9178 | ((dc->condexec_mask >> 4) & 1);
9179 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9180 if (dc->condexec_mask == 0) {
9181 dc->condexec_cond = 0;
9182 }
9183 }
9184 } else {
9185 disas_arm_insn(env, dc);
9186 }
b26eefb6
PB
9187 if (num_temps) {
9188 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
9189 num_temps = 0;
9190 }
e50e6a20
FB
9191
9192 if (dc->condjmp && !dc->is_jmp) {
9193 gen_set_label(dc->condlabel);
9194 dc->condjmp = 0;
9195 }
aaf2d97d 9196 /* Translation stops when a conditional branch is encountered.
e50e6a20 9197 * Otherwise the subsequent code could get translated several times.
b5ff1b31 9198 * Also stop translation when a page boundary is reached. This
bf20dc07 9199 * ensures prefetch aborts occur at the right place. */
2e70f6ef 9200 num_insns ++;
1fddef4b
FB
9201 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9202 !env->singlestep_enabled &&
1b530a6d 9203 !singlestep &&
2e70f6ef
PB
9204 dc->pc < next_page_start &&
9205 num_insns < max_insns);
9206
9207 if (tb->cflags & CF_LAST_IO) {
9208 if (dc->condjmp) {
9209 /* FIXME: This can theoretically happen with self-modifying
9210 code. */
9211 cpu_abort(env, "IO on conditional branch instruction");
9212 }
9213 gen_io_end();
9214 }
9ee6e8bb 9215
b5ff1b31 9216 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
9217 instruction was a conditional branch or trap, and the PC has
9218 already been written. */
551bd27f 9219 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 9220 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 9221 if (dc->condjmp) {
9ee6e8bb
PB
9222 gen_set_condexec(dc);
9223 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 9224 gen_exception(EXCP_SWI);
9ee6e8bb 9225 } else {
d9ba4830 9226 gen_exception(EXCP_DEBUG);
9ee6e8bb 9227 }
e50e6a20
FB
9228 gen_set_label(dc->condlabel);
9229 }
9230 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 9231 gen_set_pc_im(dc->pc);
e50e6a20 9232 dc->condjmp = 0;
8aaca4c0 9233 }
9ee6e8bb
PB
9234 gen_set_condexec(dc);
9235 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 9236 gen_exception(EXCP_SWI);
9ee6e8bb
PB
9237 } else {
9238 /* FIXME: Single stepping a WFI insn will not halt
9239 the CPU. */
d9ba4830 9240 gen_exception(EXCP_DEBUG);
9ee6e8bb 9241 }
8aaca4c0 9242 } else {
9ee6e8bb
PB
9243 /* While branches must always occur at the end of an IT block,
9244 there are a few other things that can cause us to terminate
9245 the TB in the middel of an IT block:
9246 - Exception generating instructions (bkpt, swi, undefined).
9247 - Page boundaries.
9248 - Hardware watchpoints.
9249 Hardware breakpoints have already been handled and skip this code.
9250 */
9251 gen_set_condexec(dc);
8aaca4c0 9252 switch(dc->is_jmp) {
8aaca4c0 9253 case DISAS_NEXT:
6e256c93 9254 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
9255 break;
9256 default:
9257 case DISAS_JUMP:
9258 case DISAS_UPDATE:
9259 /* indicate that the hash table must be used to find the next TB */
57fec1fe 9260 tcg_gen_exit_tb(0);
8aaca4c0
FB
9261 break;
9262 case DISAS_TB_JUMP:
9263 /* nothing more to generate */
9264 break;
9ee6e8bb 9265 case DISAS_WFI:
d9ba4830 9266 gen_helper_wfi();
9ee6e8bb
PB
9267 break;
9268 case DISAS_SWI:
d9ba4830 9269 gen_exception(EXCP_SWI);
9ee6e8bb 9270 break;
8aaca4c0 9271 }
e50e6a20
FB
9272 if (dc->condjmp) {
9273 gen_set_label(dc->condlabel);
9ee6e8bb 9274 gen_set_condexec(dc);
6e256c93 9275 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
9276 dc->condjmp = 0;
9277 }
2c0262af 9278 }
2e70f6ef 9279
9ee6e8bb 9280done_generating:
2e70f6ef 9281 gen_icount_end(tb, num_insns);
2c0262af
FB
9282 *gen_opc_ptr = INDEX_op_end;
9283
9284#ifdef DEBUG_DISAS
8fec2b8c 9285 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
9286 qemu_log("----------------\n");
9287 qemu_log("IN: %s\n", lookup_symbol(pc_start));
9288 log_target_disas(pc_start, dc->pc - pc_start, env->thumb);
9289 qemu_log("\n");
2c0262af
FB
9290 }
9291#endif
b5ff1b31
FB
9292 if (search_pc) {
9293 j = gen_opc_ptr - gen_opc_buf;
9294 lj++;
9295 while (lj <= j)
9296 gen_opc_instr_start[lj++] = 0;
b5ff1b31 9297 } else {
2c0262af 9298 tb->size = dc->pc - pc_start;
2e70f6ef 9299 tb->icount = num_insns;
b5ff1b31 9300 }
2c0262af
FB
9301}
9302
2cfc5f17 9303void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2c0262af 9304{
2cfc5f17 9305 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
9306}
9307
2cfc5f17 9308void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2c0262af 9309{
2cfc5f17 9310 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
9311}
9312
b5ff1b31
FB
9313static const char *cpu_mode_names[16] = {
9314 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9315 "???", "???", "???", "und", "???", "???", "???", "sys"
9316};
9ee6e8bb 9317
9a78eead 9318void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
7fe48483 9319 int flags)
2c0262af
FB
9320{
9321 int i;
06e80fc9 9322#if 0
bc380d17 9323 union {
b7bcbe95
FB
9324 uint32_t i;
9325 float s;
9326 } s0, s1;
9327 CPU_DoubleU d;
a94a6abf
PB
9328 /* ??? This assumes float64 and double have the same layout.
9329 Oh well, it's only debug dumps. */
9330 union {
9331 float64 f64;
9332 double d;
9333 } d0;
06e80fc9 9334#endif
b5ff1b31 9335 uint32_t psr;
2c0262af
FB
9336
9337 for(i=0;i<16;i++) {
7fe48483 9338 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 9339 if ((i % 4) == 3)
7fe48483 9340 cpu_fprintf(f, "\n");
2c0262af 9341 else
7fe48483 9342 cpu_fprintf(f, " ");
2c0262af 9343 }
b5ff1b31 9344 psr = cpsr_read(env);
687fa640
TS
9345 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9346 psr,
b5ff1b31
FB
9347 psr & (1 << 31) ? 'N' : '-',
9348 psr & (1 << 30) ? 'Z' : '-',
9349 psr & (1 << 29) ? 'C' : '-',
9350 psr & (1 << 28) ? 'V' : '-',
5fafdf24 9351 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 9352 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 9353
5e3f878a 9354#if 0
b7bcbe95 9355 for (i = 0; i < 16; i++) {
8e96005d
FB
9356 d.d = env->vfp.regs[i];
9357 s0.i = d.l.lower;
9358 s1.i = d.l.upper;
a94a6abf
PB
9359 d0.f64 = d.d;
9360 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 9361 i * 2, (int)s0.i, s0.s,
a94a6abf 9362 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 9363 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 9364 d0.d);
b7bcbe95 9365 }
40f137e1 9366 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
5e3f878a 9367#endif
2c0262af 9368}
a6b025d3 9369
d2856f1a
AJ
9370void gen_pc_load(CPUState *env, TranslationBlock *tb,
9371 unsigned long searched_pc, int pc_pos, void *puc)
9372{
9373 env->regs[15] = gen_opc_pc[pc_pos];
9374}