]> git.proxmox.com Git - qemu.git/blame - target-arm/translate.c
target-arm: Set privileged bit in TB flags correctly for M profile
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
28#include "exec-all.h"
29#include "disas.h"
57fec1fe 30#include "tcg-op.h"
79383c9c 31#include "qemu-log.h"
1497c961 32
a7812ae4 33#include "helpers.h"
1497c961 34#define GEN_HELPER 1
b26eefb6 35#include "helpers.h"
2c0262af 36
9ee6e8bb
PB
37#define ENABLE_ARCH_5J 0
38#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 42
86753403 43#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 44
2c0262af
FB
45/* internal defines */
46typedef struct DisasContext {
0fa85d43 47 target_ulong pc;
2c0262af 48 int is_jmp;
e50e6a20
FB
49 /* Nonzero if this instruction has been conditionally skipped. */
50 int condjmp;
51 /* The label that will be jumped to when the instruction is skipped. */
52 int condlabel;
9ee6e8bb
PB
53 /* Thumb-2 condtional execution bits. */
54 int condexec_mask;
55 int condexec_cond;
2c0262af 56 struct TranslationBlock *tb;
8aaca4c0 57 int singlestep_enabled;
5899f386 58 int thumb;
b5ff1b31
FB
59#if !defined(CONFIG_USER_ONLY)
60 int user;
61#endif
5df8bac1 62 int vfp_enabled;
69d1fc22
PM
63 int vec_len;
64 int vec_stride;
2c0262af
FB
65} DisasContext;
66
b5ff1b31
FB
67#if defined(CONFIG_USER_ONLY)
68#define IS_USER(s) 1
69#else
70#define IS_USER(s) (s->user)
71#endif
72
9ee6e8bb
PB
73/* These instructions trap after executing, so defer them until after the
74 conditional executions state has been updated. */
75#define DISAS_WFI 4
76#define DISAS_SWI 5
2c0262af 77
a7812ae4 78static TCGv_ptr cpu_env;
ad69471c 79/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 80static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 81static TCGv_i32 cpu_R[16];
426f5abc
PB
82static TCGv_i32 cpu_exclusive_addr;
83static TCGv_i32 cpu_exclusive_val;
84static TCGv_i32 cpu_exclusive_high;
85#ifdef CONFIG_USER_ONLY
86static TCGv_i32 cpu_exclusive_test;
87static TCGv_i32 cpu_exclusive_info;
88#endif
ad69471c 89
b26eefb6 90/* FIXME: These should be removed. */
a7812ae4
PB
91static TCGv cpu_F0s, cpu_F1s;
92static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 93
2e70f6ef
PB
94#include "gen-icount.h"
95
155c3eac
FN
96static const char *regnames[] =
97 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
98 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
99
b26eefb6
PB
100/* initialize TCG globals. */
101void arm_translate_init(void)
102{
155c3eac
FN
103 int i;
104
a7812ae4
PB
105 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
106
155c3eac
FN
107 for (i = 0; i < 16; i++) {
108 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
109 offsetof(CPUState, regs[i]),
110 regnames[i]);
111 }
426f5abc
PB
112 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
113 offsetof(CPUState, exclusive_addr), "exclusive_addr");
114 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
115 offsetof(CPUState, exclusive_val), "exclusive_val");
116 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
117 offsetof(CPUState, exclusive_high), "exclusive_high");
118#ifdef CONFIG_USER_ONLY
119 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
120 offsetof(CPUState, exclusive_test), "exclusive_test");
121 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
122 offsetof(CPUState, exclusive_info), "exclusive_info");
123#endif
155c3eac 124
a7812ae4
PB
125#define GEN_HELPER 2
126#include "helpers.h"
b26eefb6
PB
127}
128
b26eefb6 129static int num_temps;
b26eefb6
PB
130
131/* Allocate a temporary variable. */
a7812ae4 132static TCGv_i32 new_tmp(void)
b26eefb6 133{
12edd4f2
FN
134 num_temps++;
135 return tcg_temp_new_i32();
b26eefb6
PB
136}
137
138/* Release a temporary variable. */
139static void dead_tmp(TCGv tmp)
140{
12edd4f2 141 tcg_temp_free(tmp);
b26eefb6 142 num_temps--;
b26eefb6
PB
143}
144
d9ba4830
PB
145static inline TCGv load_cpu_offset(int offset)
146{
147 TCGv tmp = new_tmp();
148 tcg_gen_ld_i32(tmp, cpu_env, offset);
149 return tmp;
150}
151
152#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
153
154static inline void store_cpu_offset(TCGv var, int offset)
155{
156 tcg_gen_st_i32(var, cpu_env, offset);
157 dead_tmp(var);
158}
159
160#define store_cpu_field(var, name) \
161 store_cpu_offset(var, offsetof(CPUState, name))
162
b26eefb6
PB
163/* Set a variable to the value of a CPU register. */
164static void load_reg_var(DisasContext *s, TCGv var, int reg)
165{
166 if (reg == 15) {
167 uint32_t addr;
168 /* normaly, since we updated PC, we need only to add one insn */
169 if (s->thumb)
170 addr = (long)s->pc + 2;
171 else
172 addr = (long)s->pc + 4;
173 tcg_gen_movi_i32(var, addr);
174 } else {
155c3eac 175 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
176 }
177}
178
179/* Create a new temporary and set it to the value of a CPU register. */
180static inline TCGv load_reg(DisasContext *s, int reg)
181{
182 TCGv tmp = new_tmp();
183 load_reg_var(s, tmp, reg);
184 return tmp;
185}
186
187/* Set a CPU register. The source must be a temporary and will be
188 marked as dead. */
189static void store_reg(DisasContext *s, int reg, TCGv var)
190{
191 if (reg == 15) {
192 tcg_gen_andi_i32(var, var, ~1);
193 s->is_jmp = DISAS_JUMP;
194 }
155c3eac 195 tcg_gen_mov_i32(cpu_R[reg], var);
b26eefb6
PB
196 dead_tmp(var);
197}
198
b26eefb6 199/* Value extensions. */
86831435
PB
200#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
201#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
202#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
203#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
204
1497c961
PB
205#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
206#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 207
b26eefb6 208
b75263d6
JR
209static inline void gen_set_cpsr(TCGv var, uint32_t mask)
210{
211 TCGv tmp_mask = tcg_const_i32(mask);
212 gen_helper_cpsr_write(var, tmp_mask);
213 tcg_temp_free_i32(tmp_mask);
214}
d9ba4830
PB
215/* Set NZCV flags from the high 4 bits of var. */
216#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
217
218static void gen_exception(int excp)
219{
220 TCGv tmp = new_tmp();
221 tcg_gen_movi_i32(tmp, excp);
222 gen_helper_exception(tmp);
223 dead_tmp(tmp);
224}
225
3670669c
PB
226static void gen_smul_dual(TCGv a, TCGv b)
227{
228 TCGv tmp1 = new_tmp();
229 TCGv tmp2 = new_tmp();
22478e79
AZ
230 tcg_gen_ext16s_i32(tmp1, a);
231 tcg_gen_ext16s_i32(tmp2, b);
3670669c
PB
232 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
233 dead_tmp(tmp2);
234 tcg_gen_sari_i32(a, a, 16);
235 tcg_gen_sari_i32(b, b, 16);
236 tcg_gen_mul_i32(b, b, a);
237 tcg_gen_mov_i32(a, tmp1);
238 dead_tmp(tmp1);
239}
240
241/* Byteswap each halfword. */
242static void gen_rev16(TCGv var)
243{
244 TCGv tmp = new_tmp();
245 tcg_gen_shri_i32(tmp, var, 8);
246 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
247 tcg_gen_shli_i32(var, var, 8);
248 tcg_gen_andi_i32(var, var, 0xff00ff00);
249 tcg_gen_or_i32(var, var, tmp);
250 dead_tmp(tmp);
251}
252
253/* Byteswap low halfword and sign extend. */
254static void gen_revsh(TCGv var)
255{
1a855029
AJ
256 tcg_gen_ext16u_i32(var, var);
257 tcg_gen_bswap16_i32(var, var);
258 tcg_gen_ext16s_i32(var, var);
3670669c
PB
259}
260
261/* Unsigned bitfield extract. */
262static void gen_ubfx(TCGv var, int shift, uint32_t mask)
263{
264 if (shift)
265 tcg_gen_shri_i32(var, var, shift);
266 tcg_gen_andi_i32(var, var, mask);
267}
268
269/* Signed bitfield extract. */
270static void gen_sbfx(TCGv var, int shift, int width)
271{
272 uint32_t signbit;
273
274 if (shift)
275 tcg_gen_sari_i32(var, var, shift);
276 if (shift + width < 32) {
277 signbit = 1u << (width - 1);
278 tcg_gen_andi_i32(var, var, (1u << width) - 1);
279 tcg_gen_xori_i32(var, var, signbit);
280 tcg_gen_subi_i32(var, var, signbit);
281 }
282}
283
284/* Bitfield insertion. Insert val into base. Clobbers base and val. */
285static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
286{
3670669c 287 tcg_gen_andi_i32(val, val, mask);
8f8e3aa4
PB
288 tcg_gen_shli_i32(val, val, shift);
289 tcg_gen_andi_i32(base, base, ~(mask << shift));
3670669c
PB
290 tcg_gen_or_i32(dest, base, val);
291}
292
838fa72d
AJ
293/* Return (b << 32) + a. Mark inputs as dead */
294static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
3670669c 295{
838fa72d
AJ
296 TCGv_i64 tmp64 = tcg_temp_new_i64();
297
298 tcg_gen_extu_i32_i64(tmp64, b);
299 dead_tmp(b);
300 tcg_gen_shli_i64(tmp64, tmp64, 32);
301 tcg_gen_add_i64(a, tmp64, a);
302
303 tcg_temp_free_i64(tmp64);
304 return a;
305}
306
307/* Return (b << 32) - a. Mark inputs as dead. */
308static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
309{
310 TCGv_i64 tmp64 = tcg_temp_new_i64();
311
312 tcg_gen_extu_i32_i64(tmp64, b);
313 dead_tmp(b);
314 tcg_gen_shli_i64(tmp64, tmp64, 32);
315 tcg_gen_sub_i64(a, tmp64, a);
316
317 tcg_temp_free_i64(tmp64);
318 return a;
3670669c
PB
319}
320
8f01245e
PB
321/* FIXME: Most targets have native widening multiplication.
322 It would be good to use that instead of a full wide multiply. */
5e3f878a 323/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 324static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 325{
a7812ae4
PB
326 TCGv_i64 tmp1 = tcg_temp_new_i64();
327 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
328
329 tcg_gen_extu_i32_i64(tmp1, a);
330 dead_tmp(a);
331 tcg_gen_extu_i32_i64(tmp2, b);
332 dead_tmp(b);
333 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 334 tcg_temp_free_i64(tmp2);
5e3f878a
PB
335 return tmp1;
336}
337
a7812ae4 338static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 339{
a7812ae4
PB
340 TCGv_i64 tmp1 = tcg_temp_new_i64();
341 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
342
343 tcg_gen_ext_i32_i64(tmp1, a);
344 dead_tmp(a);
345 tcg_gen_ext_i32_i64(tmp2, b);
346 dead_tmp(b);
347 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 348 tcg_temp_free_i64(tmp2);
5e3f878a
PB
349 return tmp1;
350}
351
8f01245e
PB
352/* Swap low and high halfwords. */
353static void gen_swap_half(TCGv var)
354{
355 TCGv tmp = new_tmp();
356 tcg_gen_shri_i32(tmp, var, 16);
357 tcg_gen_shli_i32(var, var, 16);
358 tcg_gen_or_i32(var, var, tmp);
3670669c 359 dead_tmp(tmp);
8f01245e
PB
360}
361
b26eefb6
PB
362/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
363 tmp = (t0 ^ t1) & 0x8000;
364 t0 &= ~0x8000;
365 t1 &= ~0x8000;
366 t0 = (t0 + t1) ^ tmp;
367 */
368
369static void gen_add16(TCGv t0, TCGv t1)
370{
371 TCGv tmp = new_tmp();
372 tcg_gen_xor_i32(tmp, t0, t1);
373 tcg_gen_andi_i32(tmp, tmp, 0x8000);
374 tcg_gen_andi_i32(t0, t0, ~0x8000);
375 tcg_gen_andi_i32(t1, t1, ~0x8000);
376 tcg_gen_add_i32(t0, t0, t1);
377 tcg_gen_xor_i32(t0, t0, tmp);
378 dead_tmp(tmp);
379 dead_tmp(t1);
380}
381
9a119ff6
PB
382#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
383
b26eefb6
PB
384/* Set CF to the top bit of var. */
385static void gen_set_CF_bit31(TCGv var)
386{
387 TCGv tmp = new_tmp();
388 tcg_gen_shri_i32(tmp, var, 31);
4cc633c3 389 gen_set_CF(tmp);
b26eefb6
PB
390 dead_tmp(tmp);
391}
392
393/* Set N and Z flags from var. */
394static inline void gen_logic_CC(TCGv var)
395{
6fbe23d5
PB
396 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
397 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
b26eefb6
PB
398}
399
400/* T0 += T1 + CF. */
396e467c 401static void gen_adc(TCGv t0, TCGv t1)
b26eefb6 402{
d9ba4830 403 TCGv tmp;
396e467c 404 tcg_gen_add_i32(t0, t0, t1);
d9ba4830 405 tmp = load_cpu_field(CF);
396e467c 406 tcg_gen_add_i32(t0, t0, tmp);
b26eefb6
PB
407 dead_tmp(tmp);
408}
409
e9bb4aa9
JR
410/* dest = T0 + T1 + CF. */
411static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
412{
413 TCGv tmp;
414 tcg_gen_add_i32(dest, t0, t1);
415 tmp = load_cpu_field(CF);
416 tcg_gen_add_i32(dest, dest, tmp);
417 dead_tmp(tmp);
418}
419
3670669c
PB
420/* dest = T0 - T1 + CF - 1. */
421static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
422{
d9ba4830 423 TCGv tmp;
3670669c 424 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 425 tmp = load_cpu_field(CF);
3670669c
PB
426 tcg_gen_add_i32(dest, dest, tmp);
427 tcg_gen_subi_i32(dest, dest, 1);
428 dead_tmp(tmp);
429}
430
ad69471c
PB
431/* FIXME: Implement this natively. */
432#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
433
9a119ff6 434static void shifter_out_im(TCGv var, int shift)
b26eefb6 435{
9a119ff6
PB
436 TCGv tmp = new_tmp();
437 if (shift == 0) {
438 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 439 } else {
9a119ff6 440 tcg_gen_shri_i32(tmp, var, shift);
4cc633c3 441 if (shift != 31)
9a119ff6
PB
442 tcg_gen_andi_i32(tmp, tmp, 1);
443 }
444 gen_set_CF(tmp);
445 dead_tmp(tmp);
446}
b26eefb6 447
9a119ff6
PB
448/* Shift by immediate. Includes special handling for shift == 0. */
449static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
450{
451 switch (shiftop) {
452 case 0: /* LSL */
453 if (shift != 0) {
454 if (flags)
455 shifter_out_im(var, 32 - shift);
456 tcg_gen_shli_i32(var, var, shift);
457 }
458 break;
459 case 1: /* LSR */
460 if (shift == 0) {
461 if (flags) {
462 tcg_gen_shri_i32(var, var, 31);
463 gen_set_CF(var);
464 }
465 tcg_gen_movi_i32(var, 0);
466 } else {
467 if (flags)
468 shifter_out_im(var, shift - 1);
469 tcg_gen_shri_i32(var, var, shift);
470 }
471 break;
472 case 2: /* ASR */
473 if (shift == 0)
474 shift = 32;
475 if (flags)
476 shifter_out_im(var, shift - 1);
477 if (shift == 32)
478 shift = 31;
479 tcg_gen_sari_i32(var, var, shift);
480 break;
481 case 3: /* ROR/RRX */
482 if (shift != 0) {
483 if (flags)
484 shifter_out_im(var, shift - 1);
f669df27 485 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 486 } else {
d9ba4830 487 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
488 if (flags)
489 shifter_out_im(var, 0);
490 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
491 tcg_gen_shli_i32(tmp, tmp, 31);
492 tcg_gen_or_i32(var, var, tmp);
493 dead_tmp(tmp);
b26eefb6
PB
494 }
495 }
496};
497
8984bd2e
PB
498static inline void gen_arm_shift_reg(TCGv var, int shiftop,
499 TCGv shift, int flags)
500{
501 if (flags) {
502 switch (shiftop) {
503 case 0: gen_helper_shl_cc(var, var, shift); break;
504 case 1: gen_helper_shr_cc(var, var, shift); break;
505 case 2: gen_helper_sar_cc(var, var, shift); break;
506 case 3: gen_helper_ror_cc(var, var, shift); break;
507 }
508 } else {
509 switch (shiftop) {
510 case 0: gen_helper_shl(var, var, shift); break;
511 case 1: gen_helper_shr(var, var, shift); break;
512 case 2: gen_helper_sar(var, var, shift); break;
f669df27
AJ
513 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
514 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
515 }
516 }
517 dead_tmp(shift);
518}
519
6ddbc6e4
PB
520#define PAS_OP(pfx) \
521 switch (op2) { \
522 case 0: gen_pas_helper(glue(pfx,add16)); break; \
523 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
524 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
525 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
526 case 4: gen_pas_helper(glue(pfx,add8)); break; \
527 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
528 }
d9ba4830 529static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 530{
a7812ae4 531 TCGv_ptr tmp;
6ddbc6e4
PB
532
533 switch (op1) {
534#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
535 case 1:
a7812ae4 536 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
537 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
538 PAS_OP(s)
b75263d6 539 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
540 break;
541 case 5:
a7812ae4 542 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
543 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
544 PAS_OP(u)
b75263d6 545 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
546 break;
547#undef gen_pas_helper
548#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
549 case 2:
550 PAS_OP(q);
551 break;
552 case 3:
553 PAS_OP(sh);
554 break;
555 case 6:
556 PAS_OP(uq);
557 break;
558 case 7:
559 PAS_OP(uh);
560 break;
561#undef gen_pas_helper
562 }
563}
9ee6e8bb
PB
564#undef PAS_OP
565
6ddbc6e4
PB
566/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
567#define PAS_OP(pfx) \
ed89a2f1 568 switch (op1) { \
6ddbc6e4
PB
569 case 0: gen_pas_helper(glue(pfx,add8)); break; \
570 case 1: gen_pas_helper(glue(pfx,add16)); break; \
571 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
572 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
573 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
574 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
575 }
d9ba4830 576static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 577{
a7812ae4 578 TCGv_ptr tmp;
6ddbc6e4 579
ed89a2f1 580 switch (op2) {
6ddbc6e4
PB
581#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
582 case 0:
a7812ae4 583 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
584 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
585 PAS_OP(s)
b75263d6 586 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
587 break;
588 case 4:
a7812ae4 589 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
590 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
591 PAS_OP(u)
b75263d6 592 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
593 break;
594#undef gen_pas_helper
595#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
596 case 1:
597 PAS_OP(q);
598 break;
599 case 2:
600 PAS_OP(sh);
601 break;
602 case 5:
603 PAS_OP(uq);
604 break;
605 case 6:
606 PAS_OP(uh);
607 break;
608#undef gen_pas_helper
609 }
610}
9ee6e8bb
PB
611#undef PAS_OP
612
d9ba4830
PB
613static void gen_test_cc(int cc, int label)
614{
615 TCGv tmp;
616 TCGv tmp2;
d9ba4830
PB
617 int inv;
618
d9ba4830
PB
619 switch (cc) {
620 case 0: /* eq: Z */
6fbe23d5 621 tmp = load_cpu_field(ZF);
cb63669a 622 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
623 break;
624 case 1: /* ne: !Z */
6fbe23d5 625 tmp = load_cpu_field(ZF);
cb63669a 626 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
627 break;
628 case 2: /* cs: C */
629 tmp = load_cpu_field(CF);
cb63669a 630 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
631 break;
632 case 3: /* cc: !C */
633 tmp = load_cpu_field(CF);
cb63669a 634 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
635 break;
636 case 4: /* mi: N */
6fbe23d5 637 tmp = load_cpu_field(NF);
cb63669a 638 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
639 break;
640 case 5: /* pl: !N */
6fbe23d5 641 tmp = load_cpu_field(NF);
cb63669a 642 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
643 break;
644 case 6: /* vs: V */
645 tmp = load_cpu_field(VF);
cb63669a 646 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
647 break;
648 case 7: /* vc: !V */
649 tmp = load_cpu_field(VF);
cb63669a 650 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
651 break;
652 case 8: /* hi: C && !Z */
653 inv = gen_new_label();
654 tmp = load_cpu_field(CF);
cb63669a 655 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830 656 dead_tmp(tmp);
6fbe23d5 657 tmp = load_cpu_field(ZF);
cb63669a 658 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
659 gen_set_label(inv);
660 break;
661 case 9: /* ls: !C || Z */
662 tmp = load_cpu_field(CF);
cb63669a 663 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830 664 dead_tmp(tmp);
6fbe23d5 665 tmp = load_cpu_field(ZF);
cb63669a 666 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
667 break;
668 case 10: /* ge: N == V -> N ^ V == 0 */
669 tmp = load_cpu_field(VF);
6fbe23d5 670 tmp2 = load_cpu_field(NF);
d9ba4830
PB
671 tcg_gen_xor_i32(tmp, tmp, tmp2);
672 dead_tmp(tmp2);
cb63669a 673 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
674 break;
675 case 11: /* lt: N != V -> N ^ V != 0 */
676 tmp = load_cpu_field(VF);
6fbe23d5 677 tmp2 = load_cpu_field(NF);
d9ba4830
PB
678 tcg_gen_xor_i32(tmp, tmp, tmp2);
679 dead_tmp(tmp2);
cb63669a 680 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
681 break;
682 case 12: /* gt: !Z && N == V */
683 inv = gen_new_label();
6fbe23d5 684 tmp = load_cpu_field(ZF);
cb63669a 685 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830
PB
686 dead_tmp(tmp);
687 tmp = load_cpu_field(VF);
6fbe23d5 688 tmp2 = load_cpu_field(NF);
d9ba4830
PB
689 tcg_gen_xor_i32(tmp, tmp, tmp2);
690 dead_tmp(tmp2);
cb63669a 691 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
692 gen_set_label(inv);
693 break;
694 case 13: /* le: Z || N != V */
6fbe23d5 695 tmp = load_cpu_field(ZF);
cb63669a 696 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
697 dead_tmp(tmp);
698 tmp = load_cpu_field(VF);
6fbe23d5 699 tmp2 = load_cpu_field(NF);
d9ba4830
PB
700 tcg_gen_xor_i32(tmp, tmp, tmp2);
701 dead_tmp(tmp2);
cb63669a 702 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
703 break;
704 default:
705 fprintf(stderr, "Bad condition code 0x%x\n", cc);
706 abort();
707 }
708 dead_tmp(tmp);
709}
2c0262af 710
b1d8e52e 711static const uint8_t table_logic_cc[16] = {
2c0262af
FB
712 1, /* and */
713 1, /* xor */
714 0, /* sub */
715 0, /* rsb */
716 0, /* add */
717 0, /* adc */
718 0, /* sbc */
719 0, /* rsc */
720 1, /* andl */
721 1, /* xorl */
722 0, /* cmp */
723 0, /* cmn */
724 1, /* orr */
725 1, /* mov */
726 1, /* bic */
727 1, /* mvn */
728};
3b46e624 729
d9ba4830
PB
730/* Set PC and Thumb state from an immediate address. */
731static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 732{
b26eefb6 733 TCGv tmp;
99c475ab 734
b26eefb6 735 s->is_jmp = DISAS_UPDATE;
d9ba4830 736 if (s->thumb != (addr & 1)) {
155c3eac 737 tmp = new_tmp();
d9ba4830
PB
738 tcg_gen_movi_i32(tmp, addr & 1);
739 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
155c3eac 740 dead_tmp(tmp);
d9ba4830 741 }
155c3eac 742 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
743}
744
745/* Set PC and Thumb state from var. var is marked as dead. */
746static inline void gen_bx(DisasContext *s, TCGv var)
747{
d9ba4830 748 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
749 tcg_gen_andi_i32(cpu_R[15], var, ~1);
750 tcg_gen_andi_i32(var, var, 1);
751 store_cpu_field(var, thumb);
d9ba4830
PB
752}
753
21aeb343
JR
754/* Variant of store_reg which uses branch&exchange logic when storing
755 to r15 in ARM architecture v7 and above. The source must be a temporary
756 and will be marked as dead. */
757static inline void store_reg_bx(CPUState *env, DisasContext *s,
758 int reg, TCGv var)
759{
760 if (reg == 15 && ENABLE_ARCH_7) {
761 gen_bx(s, var);
762 } else {
763 store_reg(s, reg, var);
764 }
765}
766
b0109805
PB
767static inline TCGv gen_ld8s(TCGv addr, int index)
768{
769 TCGv tmp = new_tmp();
770 tcg_gen_qemu_ld8s(tmp, addr, index);
771 return tmp;
772}
773static inline TCGv gen_ld8u(TCGv addr, int index)
774{
775 TCGv tmp = new_tmp();
776 tcg_gen_qemu_ld8u(tmp, addr, index);
777 return tmp;
778}
779static inline TCGv gen_ld16s(TCGv addr, int index)
780{
781 TCGv tmp = new_tmp();
782 tcg_gen_qemu_ld16s(tmp, addr, index);
783 return tmp;
784}
785static inline TCGv gen_ld16u(TCGv addr, int index)
786{
787 TCGv tmp = new_tmp();
788 tcg_gen_qemu_ld16u(tmp, addr, index);
789 return tmp;
790}
791static inline TCGv gen_ld32(TCGv addr, int index)
792{
793 TCGv tmp = new_tmp();
794 tcg_gen_qemu_ld32u(tmp, addr, index);
795 return tmp;
796}
84496233
JR
797static inline TCGv_i64 gen_ld64(TCGv addr, int index)
798{
799 TCGv_i64 tmp = tcg_temp_new_i64();
800 tcg_gen_qemu_ld64(tmp, addr, index);
801 return tmp;
802}
b0109805
PB
803static inline void gen_st8(TCGv val, TCGv addr, int index)
804{
805 tcg_gen_qemu_st8(val, addr, index);
806 dead_tmp(val);
807}
808static inline void gen_st16(TCGv val, TCGv addr, int index)
809{
810 tcg_gen_qemu_st16(val, addr, index);
811 dead_tmp(val);
812}
813static inline void gen_st32(TCGv val, TCGv addr, int index)
814{
815 tcg_gen_qemu_st32(val, addr, index);
816 dead_tmp(val);
817}
84496233
JR
818static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
819{
820 tcg_gen_qemu_st64(val, addr, index);
821 tcg_temp_free_i64(val);
822}
b5ff1b31 823
5e3f878a
PB
824static inline void gen_set_pc_im(uint32_t val)
825{
155c3eac 826 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
827}
828
b5ff1b31
FB
829/* Force a TB lookup after an instruction that changes the CPU state. */
830static inline void gen_lookup_tb(DisasContext *s)
831{
a6445c52 832 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
833 s->is_jmp = DISAS_UPDATE;
834}
835
b0109805
PB
836static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
837 TCGv var)
2c0262af 838{
1e8d4eec 839 int val, rm, shift, shiftop;
b26eefb6 840 TCGv offset;
2c0262af
FB
841
842 if (!(insn & (1 << 25))) {
843 /* immediate */
844 val = insn & 0xfff;
845 if (!(insn & (1 << 23)))
846 val = -val;
537730b9 847 if (val != 0)
b0109805 848 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
849 } else {
850 /* shift/register */
851 rm = (insn) & 0xf;
852 shift = (insn >> 7) & 0x1f;
1e8d4eec 853 shiftop = (insn >> 5) & 3;
b26eefb6 854 offset = load_reg(s, rm);
9a119ff6 855 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 856 if (!(insn & (1 << 23)))
b0109805 857 tcg_gen_sub_i32(var, var, offset);
2c0262af 858 else
b0109805 859 tcg_gen_add_i32(var, var, offset);
b26eefb6 860 dead_tmp(offset);
2c0262af
FB
861 }
862}
863
191f9a93 864static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 865 int extra, TCGv var)
2c0262af
FB
866{
867 int val, rm;
b26eefb6 868 TCGv offset;
3b46e624 869
2c0262af
FB
870 if (insn & (1 << 22)) {
871 /* immediate */
872 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
873 if (!(insn & (1 << 23)))
874 val = -val;
18acad92 875 val += extra;
537730b9 876 if (val != 0)
b0109805 877 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
878 } else {
879 /* register */
191f9a93 880 if (extra)
b0109805 881 tcg_gen_addi_i32(var, var, extra);
2c0262af 882 rm = (insn) & 0xf;
b26eefb6 883 offset = load_reg(s, rm);
2c0262af 884 if (!(insn & (1 << 23)))
b0109805 885 tcg_gen_sub_i32(var, var, offset);
2c0262af 886 else
b0109805 887 tcg_gen_add_i32(var, var, offset);
b26eefb6 888 dead_tmp(offset);
2c0262af
FB
889 }
890}
891
4373f3ce
PB
892#define VFP_OP2(name) \
893static inline void gen_vfp_##name(int dp) \
894{ \
895 if (dp) \
896 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
897 else \
898 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
b7bcbe95
FB
899}
900
4373f3ce
PB
901VFP_OP2(add)
902VFP_OP2(sub)
903VFP_OP2(mul)
904VFP_OP2(div)
905
906#undef VFP_OP2
907
908static inline void gen_vfp_abs(int dp)
909{
910 if (dp)
911 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
912 else
913 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
914}
915
916static inline void gen_vfp_neg(int dp)
917{
918 if (dp)
919 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
920 else
921 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
922}
923
924static inline void gen_vfp_sqrt(int dp)
925{
926 if (dp)
927 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
928 else
929 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
930}
931
932static inline void gen_vfp_cmp(int dp)
933{
934 if (dp)
935 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
936 else
937 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
938}
939
940static inline void gen_vfp_cmpe(int dp)
941{
942 if (dp)
943 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
944 else
945 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
946}
947
948static inline void gen_vfp_F1_ld0(int dp)
949{
950 if (dp)
5b340b51 951 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 952 else
5b340b51 953 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
954}
955
956static inline void gen_vfp_uito(int dp)
957{
958 if (dp)
959 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
960 else
961 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
962}
963
964static inline void gen_vfp_sito(int dp)
965{
966 if (dp)
66230e0d 967 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
4373f3ce 968 else
66230e0d 969 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
4373f3ce
PB
970}
971
972static inline void gen_vfp_toui(int dp)
973{
974 if (dp)
975 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
976 else
977 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
978}
979
980static inline void gen_vfp_touiz(int dp)
981{
982 if (dp)
983 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
984 else
985 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
986}
987
988static inline void gen_vfp_tosi(int dp)
989{
990 if (dp)
991 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
992 else
993 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
994}
995
996static inline void gen_vfp_tosiz(int dp)
9ee6e8bb
PB
997{
998 if (dp)
4373f3ce 999 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
9ee6e8bb 1000 else
4373f3ce
PB
1001 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
1002}
1003
1004#define VFP_GEN_FIX(name) \
1005static inline void gen_vfp_##name(int dp, int shift) \
1006{ \
b75263d6 1007 TCGv tmp_shift = tcg_const_i32(shift); \
4373f3ce 1008 if (dp) \
b75263d6 1009 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
4373f3ce 1010 else \
b75263d6
JR
1011 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
1012 tcg_temp_free_i32(tmp_shift); \
9ee6e8bb 1013}
4373f3ce
PB
1014VFP_GEN_FIX(tosh)
1015VFP_GEN_FIX(tosl)
1016VFP_GEN_FIX(touh)
1017VFP_GEN_FIX(toul)
1018VFP_GEN_FIX(shto)
1019VFP_GEN_FIX(slto)
1020VFP_GEN_FIX(uhto)
1021VFP_GEN_FIX(ulto)
1022#undef VFP_GEN_FIX
9ee6e8bb 1023
312eea9f 1024static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1025{
1026 if (dp)
312eea9f 1027 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1028 else
312eea9f 1029 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1030}
1031
312eea9f 1032static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1033{
1034 if (dp)
312eea9f 1035 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1036 else
312eea9f 1037 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1038}
1039
8e96005d
FB
1040static inline long
1041vfp_reg_offset (int dp, int reg)
1042{
1043 if (dp)
1044 return offsetof(CPUARMState, vfp.regs[reg]);
1045 else if (reg & 1) {
1046 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1047 + offsetof(CPU_DoubleU, l.upper);
1048 } else {
1049 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1050 + offsetof(CPU_DoubleU, l.lower);
1051 }
1052}
9ee6e8bb
PB
1053
1054/* Return the offset of a 32-bit piece of a NEON register.
1055 zero is the least significant end of the register. */
1056static inline long
1057neon_reg_offset (int reg, int n)
1058{
1059 int sreg;
1060 sreg = reg * 2 + n;
1061 return vfp_reg_offset(0, sreg);
1062}
1063
8f8e3aa4
PB
1064static TCGv neon_load_reg(int reg, int pass)
1065{
1066 TCGv tmp = new_tmp();
1067 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1068 return tmp;
1069}
1070
1071static void neon_store_reg(int reg, int pass, TCGv var)
1072{
1073 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1074 dead_tmp(var);
1075}
1076
a7812ae4 1077static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1078{
1079 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1080}
1081
a7812ae4 1082static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1083{
1084 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1085}
1086
4373f3ce
PB
1087#define tcg_gen_ld_f32 tcg_gen_ld_i32
1088#define tcg_gen_ld_f64 tcg_gen_ld_i64
1089#define tcg_gen_st_f32 tcg_gen_st_i32
1090#define tcg_gen_st_f64 tcg_gen_st_i64
1091
b7bcbe95
FB
1092static inline void gen_mov_F0_vreg(int dp, int reg)
1093{
1094 if (dp)
4373f3ce 1095 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1096 else
4373f3ce 1097 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1098}
1099
1100static inline void gen_mov_F1_vreg(int dp, int reg)
1101{
1102 if (dp)
4373f3ce 1103 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1104 else
4373f3ce 1105 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1106}
1107
1108static inline void gen_mov_vreg_F0(int dp, int reg)
1109{
1110 if (dp)
4373f3ce 1111 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1112 else
4373f3ce 1113 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1114}
1115
18c9b560
AZ
1116#define ARM_CP_RW_BIT (1 << 20)
1117
a7812ae4 1118static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d
PB
1119{
1120 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1121}
1122
a7812ae4 1123static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d
PB
1124{
1125 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1126}
1127
da6b5335 1128static inline TCGv iwmmxt_load_creg(int reg)
e677137d 1129{
da6b5335
FN
1130 TCGv var = new_tmp();
1131 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1132 return var;
e677137d
PB
1133}
1134
da6b5335 1135static inline void iwmmxt_store_creg(int reg, TCGv var)
e677137d 1136{
da6b5335 1137 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
d9968827 1138 dead_tmp(var);
e677137d
PB
1139}
1140
1141static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1142{
1143 iwmmxt_store_reg(cpu_M0, rn);
1144}
1145
1146static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1147{
1148 iwmmxt_load_reg(cpu_M0, rn);
1149}
1150
1151static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1152{
1153 iwmmxt_load_reg(cpu_V1, rn);
1154 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1155}
1156
1157static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1158{
1159 iwmmxt_load_reg(cpu_V1, rn);
1160 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1161}
1162
1163static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1164{
1165 iwmmxt_load_reg(cpu_V1, rn);
1166 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1167}
1168
1169#define IWMMXT_OP(name) \
1170static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1171{ \
1172 iwmmxt_load_reg(cpu_V1, rn); \
1173 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1174}
1175
1176#define IWMMXT_OP_ENV(name) \
1177static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1178{ \
1179 iwmmxt_load_reg(cpu_V1, rn); \
1180 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1181}
1182
1183#define IWMMXT_OP_ENV_SIZE(name) \
1184IWMMXT_OP_ENV(name##b) \
1185IWMMXT_OP_ENV(name##w) \
1186IWMMXT_OP_ENV(name##l)
1187
1188#define IWMMXT_OP_ENV1(name) \
1189static inline void gen_op_iwmmxt_##name##_M0(void) \
1190{ \
1191 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1192}
1193
1194IWMMXT_OP(maddsq)
1195IWMMXT_OP(madduq)
1196IWMMXT_OP(sadb)
1197IWMMXT_OP(sadw)
1198IWMMXT_OP(mulslw)
1199IWMMXT_OP(mulshw)
1200IWMMXT_OP(mululw)
1201IWMMXT_OP(muluhw)
1202IWMMXT_OP(macsw)
1203IWMMXT_OP(macuw)
1204
1205IWMMXT_OP_ENV_SIZE(unpackl)
1206IWMMXT_OP_ENV_SIZE(unpackh)
1207
1208IWMMXT_OP_ENV1(unpacklub)
1209IWMMXT_OP_ENV1(unpackluw)
1210IWMMXT_OP_ENV1(unpacklul)
1211IWMMXT_OP_ENV1(unpackhub)
1212IWMMXT_OP_ENV1(unpackhuw)
1213IWMMXT_OP_ENV1(unpackhul)
1214IWMMXT_OP_ENV1(unpacklsb)
1215IWMMXT_OP_ENV1(unpacklsw)
1216IWMMXT_OP_ENV1(unpacklsl)
1217IWMMXT_OP_ENV1(unpackhsb)
1218IWMMXT_OP_ENV1(unpackhsw)
1219IWMMXT_OP_ENV1(unpackhsl)
1220
1221IWMMXT_OP_ENV_SIZE(cmpeq)
1222IWMMXT_OP_ENV_SIZE(cmpgtu)
1223IWMMXT_OP_ENV_SIZE(cmpgts)
1224
1225IWMMXT_OP_ENV_SIZE(mins)
1226IWMMXT_OP_ENV_SIZE(minu)
1227IWMMXT_OP_ENV_SIZE(maxs)
1228IWMMXT_OP_ENV_SIZE(maxu)
1229
1230IWMMXT_OP_ENV_SIZE(subn)
1231IWMMXT_OP_ENV_SIZE(addn)
1232IWMMXT_OP_ENV_SIZE(subu)
1233IWMMXT_OP_ENV_SIZE(addu)
1234IWMMXT_OP_ENV_SIZE(subs)
1235IWMMXT_OP_ENV_SIZE(adds)
1236
1237IWMMXT_OP_ENV(avgb0)
1238IWMMXT_OP_ENV(avgb1)
1239IWMMXT_OP_ENV(avgw0)
1240IWMMXT_OP_ENV(avgw1)
1241
1242IWMMXT_OP(msadb)
1243
1244IWMMXT_OP_ENV(packuw)
1245IWMMXT_OP_ENV(packul)
1246IWMMXT_OP_ENV(packuq)
1247IWMMXT_OP_ENV(packsw)
1248IWMMXT_OP_ENV(packsl)
1249IWMMXT_OP_ENV(packsq)
1250
e677137d
PB
1251static void gen_op_iwmmxt_set_mup(void)
1252{
1253 TCGv tmp;
1254 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1255 tcg_gen_ori_i32(tmp, tmp, 2);
1256 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1257}
1258
1259static void gen_op_iwmmxt_set_cup(void)
1260{
1261 TCGv tmp;
1262 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1263 tcg_gen_ori_i32(tmp, tmp, 1);
1264 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1265}
1266
1267static void gen_op_iwmmxt_setpsr_nz(void)
1268{
1269 TCGv tmp = new_tmp();
1270 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1271 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1272}
1273
1274static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1275{
1276 iwmmxt_load_reg(cpu_V1, rn);
86831435 1277 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1278 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1279}
1280
da6b5335 1281static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
18c9b560
AZ
1282{
1283 int rd;
1284 uint32_t offset;
da6b5335 1285 TCGv tmp;
18c9b560
AZ
1286
1287 rd = (insn >> 16) & 0xf;
da6b5335 1288 tmp = load_reg(s, rd);
18c9b560
AZ
1289
1290 offset = (insn & 0xff) << ((insn >> 7) & 2);
1291 if (insn & (1 << 24)) {
1292 /* Pre indexed */
1293 if (insn & (1 << 23))
da6b5335 1294 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1295 else
da6b5335
FN
1296 tcg_gen_addi_i32(tmp, tmp, -offset);
1297 tcg_gen_mov_i32(dest, tmp);
18c9b560 1298 if (insn & (1 << 21))
da6b5335
FN
1299 store_reg(s, rd, tmp);
1300 else
1301 dead_tmp(tmp);
18c9b560
AZ
1302 } else if (insn & (1 << 21)) {
1303 /* Post indexed */
da6b5335 1304 tcg_gen_mov_i32(dest, tmp);
18c9b560 1305 if (insn & (1 << 23))
da6b5335 1306 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1307 else
da6b5335
FN
1308 tcg_gen_addi_i32(tmp, tmp, -offset);
1309 store_reg(s, rd, tmp);
18c9b560
AZ
1310 } else if (!(insn & (1 << 23)))
1311 return 1;
1312 return 0;
1313}
1314
da6b5335 1315static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
18c9b560
AZ
1316{
1317 int rd = (insn >> 0) & 0xf;
da6b5335 1318 TCGv tmp;
18c9b560 1319
da6b5335
FN
1320 if (insn & (1 << 8)) {
1321 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1322 return 1;
da6b5335
FN
1323 } else {
1324 tmp = iwmmxt_load_creg(rd);
1325 }
1326 } else {
1327 tmp = new_tmp();
1328 iwmmxt_load_reg(cpu_V0, rd);
1329 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1330 }
1331 tcg_gen_andi_i32(tmp, tmp, mask);
1332 tcg_gen_mov_i32(dest, tmp);
1333 dead_tmp(tmp);
18c9b560
AZ
1334 return 0;
1335}
1336
1337/* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1338 (ie. an undefined instruction). */
1339static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1340{
1341 int rd, wrd;
1342 int rdhi, rdlo, rd0, rd1, i;
da6b5335
FN
1343 TCGv addr;
1344 TCGv tmp, tmp2, tmp3;
18c9b560
AZ
1345
1346 if ((insn & 0x0e000e00) == 0x0c000000) {
1347 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1348 wrd = insn & 0xf;
1349 rdlo = (insn >> 12) & 0xf;
1350 rdhi = (insn >> 16) & 0xf;
1351 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1352 iwmmxt_load_reg(cpu_V0, wrd);
1353 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1354 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1355 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1356 } else { /* TMCRR */
da6b5335
FN
1357 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1358 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1359 gen_op_iwmmxt_set_mup();
1360 }
1361 return 0;
1362 }
1363
1364 wrd = (insn >> 12) & 0xf;
da6b5335
FN
1365 addr = new_tmp();
1366 if (gen_iwmmxt_address(s, insn, addr)) {
1367 dead_tmp(addr);
18c9b560 1368 return 1;
da6b5335 1369 }
18c9b560
AZ
1370 if (insn & ARM_CP_RW_BIT) {
1371 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
da6b5335
FN
1372 tmp = new_tmp();
1373 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1374 iwmmxt_store_creg(wrd, tmp);
18c9b560 1375 } else {
e677137d
PB
1376 i = 1;
1377 if (insn & (1 << 8)) {
1378 if (insn & (1 << 22)) { /* WLDRD */
da6b5335 1379 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1380 i = 0;
1381 } else { /* WLDRW wRd */
da6b5335 1382 tmp = gen_ld32(addr, IS_USER(s));
e677137d
PB
1383 }
1384 } else {
1385 if (insn & (1 << 22)) { /* WLDRH */
da6b5335 1386 tmp = gen_ld16u(addr, IS_USER(s));
e677137d 1387 } else { /* WLDRB */
da6b5335 1388 tmp = gen_ld8u(addr, IS_USER(s));
e677137d
PB
1389 }
1390 }
1391 if (i) {
1392 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1393 dead_tmp(tmp);
1394 }
18c9b560
AZ
1395 gen_op_iwmmxt_movq_wRn_M0(wrd);
1396 }
1397 } else {
1398 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335
FN
1399 tmp = iwmmxt_load_creg(wrd);
1400 gen_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1401 } else {
1402 gen_op_iwmmxt_movq_M0_wRn(wrd);
e677137d
PB
1403 tmp = new_tmp();
1404 if (insn & (1 << 8)) {
1405 if (insn & (1 << 22)) { /* WSTRD */
1406 dead_tmp(tmp);
da6b5335 1407 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1408 } else { /* WSTRW wRd */
1409 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1410 gen_st32(tmp, addr, IS_USER(s));
e677137d
PB
1411 }
1412 } else {
1413 if (insn & (1 << 22)) { /* WSTRH */
1414 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1415 gen_st16(tmp, addr, IS_USER(s));
e677137d
PB
1416 } else { /* WSTRB */
1417 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1418 gen_st8(tmp, addr, IS_USER(s));
e677137d
PB
1419 }
1420 }
18c9b560
AZ
1421 }
1422 }
d9968827 1423 dead_tmp(addr);
18c9b560
AZ
1424 return 0;
1425 }
1426
1427 if ((insn & 0x0f000000) != 0x0e000000)
1428 return 1;
1429
1430 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1431 case 0x000: /* WOR */
1432 wrd = (insn >> 12) & 0xf;
1433 rd0 = (insn >> 0) & 0xf;
1434 rd1 = (insn >> 16) & 0xf;
1435 gen_op_iwmmxt_movq_M0_wRn(rd0);
1436 gen_op_iwmmxt_orq_M0_wRn(rd1);
1437 gen_op_iwmmxt_setpsr_nz();
1438 gen_op_iwmmxt_movq_wRn_M0(wrd);
1439 gen_op_iwmmxt_set_mup();
1440 gen_op_iwmmxt_set_cup();
1441 break;
1442 case 0x011: /* TMCR */
1443 if (insn & 0xf)
1444 return 1;
1445 rd = (insn >> 12) & 0xf;
1446 wrd = (insn >> 16) & 0xf;
1447 switch (wrd) {
1448 case ARM_IWMMXT_wCID:
1449 case ARM_IWMMXT_wCASF:
1450 break;
1451 case ARM_IWMMXT_wCon:
1452 gen_op_iwmmxt_set_cup();
1453 /* Fall through. */
1454 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1455 tmp = iwmmxt_load_creg(wrd);
1456 tmp2 = load_reg(s, rd);
f669df27 1457 tcg_gen_andc_i32(tmp, tmp, tmp2);
da6b5335
FN
1458 dead_tmp(tmp2);
1459 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1460 break;
1461 case ARM_IWMMXT_wCGR0:
1462 case ARM_IWMMXT_wCGR1:
1463 case ARM_IWMMXT_wCGR2:
1464 case ARM_IWMMXT_wCGR3:
1465 gen_op_iwmmxt_set_cup();
da6b5335
FN
1466 tmp = load_reg(s, rd);
1467 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1468 break;
1469 default:
1470 return 1;
1471 }
1472 break;
1473 case 0x100: /* WXOR */
1474 wrd = (insn >> 12) & 0xf;
1475 rd0 = (insn >> 0) & 0xf;
1476 rd1 = (insn >> 16) & 0xf;
1477 gen_op_iwmmxt_movq_M0_wRn(rd0);
1478 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1479 gen_op_iwmmxt_setpsr_nz();
1480 gen_op_iwmmxt_movq_wRn_M0(wrd);
1481 gen_op_iwmmxt_set_mup();
1482 gen_op_iwmmxt_set_cup();
1483 break;
1484 case 0x111: /* TMRC */
1485 if (insn & 0xf)
1486 return 1;
1487 rd = (insn >> 12) & 0xf;
1488 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1489 tmp = iwmmxt_load_creg(wrd);
1490 store_reg(s, rd, tmp);
18c9b560
AZ
1491 break;
1492 case 0x300: /* WANDN */
1493 wrd = (insn >> 12) & 0xf;
1494 rd0 = (insn >> 0) & 0xf;
1495 rd1 = (insn >> 16) & 0xf;
1496 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1497 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1498 gen_op_iwmmxt_andq_M0_wRn(rd1);
1499 gen_op_iwmmxt_setpsr_nz();
1500 gen_op_iwmmxt_movq_wRn_M0(wrd);
1501 gen_op_iwmmxt_set_mup();
1502 gen_op_iwmmxt_set_cup();
1503 break;
1504 case 0x200: /* WAND */
1505 wrd = (insn >> 12) & 0xf;
1506 rd0 = (insn >> 0) & 0xf;
1507 rd1 = (insn >> 16) & 0xf;
1508 gen_op_iwmmxt_movq_M0_wRn(rd0);
1509 gen_op_iwmmxt_andq_M0_wRn(rd1);
1510 gen_op_iwmmxt_setpsr_nz();
1511 gen_op_iwmmxt_movq_wRn_M0(wrd);
1512 gen_op_iwmmxt_set_mup();
1513 gen_op_iwmmxt_set_cup();
1514 break;
1515 case 0x810: case 0xa10: /* WMADD */
1516 wrd = (insn >> 12) & 0xf;
1517 rd0 = (insn >> 0) & 0xf;
1518 rd1 = (insn >> 16) & 0xf;
1519 gen_op_iwmmxt_movq_M0_wRn(rd0);
1520 if (insn & (1 << 21))
1521 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1522 else
1523 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1524 gen_op_iwmmxt_movq_wRn_M0(wrd);
1525 gen_op_iwmmxt_set_mup();
1526 break;
1527 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1528 wrd = (insn >> 12) & 0xf;
1529 rd0 = (insn >> 16) & 0xf;
1530 rd1 = (insn >> 0) & 0xf;
1531 gen_op_iwmmxt_movq_M0_wRn(rd0);
1532 switch ((insn >> 22) & 3) {
1533 case 0:
1534 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1535 break;
1536 case 1:
1537 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1538 break;
1539 case 2:
1540 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1541 break;
1542 case 3:
1543 return 1;
1544 }
1545 gen_op_iwmmxt_movq_wRn_M0(wrd);
1546 gen_op_iwmmxt_set_mup();
1547 gen_op_iwmmxt_set_cup();
1548 break;
1549 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1550 wrd = (insn >> 12) & 0xf;
1551 rd0 = (insn >> 16) & 0xf;
1552 rd1 = (insn >> 0) & 0xf;
1553 gen_op_iwmmxt_movq_M0_wRn(rd0);
1554 switch ((insn >> 22) & 3) {
1555 case 0:
1556 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1557 break;
1558 case 1:
1559 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1560 break;
1561 case 2:
1562 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1563 break;
1564 case 3:
1565 return 1;
1566 }
1567 gen_op_iwmmxt_movq_wRn_M0(wrd);
1568 gen_op_iwmmxt_set_mup();
1569 gen_op_iwmmxt_set_cup();
1570 break;
1571 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1572 wrd = (insn >> 12) & 0xf;
1573 rd0 = (insn >> 16) & 0xf;
1574 rd1 = (insn >> 0) & 0xf;
1575 gen_op_iwmmxt_movq_M0_wRn(rd0);
1576 if (insn & (1 << 22))
1577 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1578 else
1579 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1580 if (!(insn & (1 << 20)))
1581 gen_op_iwmmxt_addl_M0_wRn(wrd);
1582 gen_op_iwmmxt_movq_wRn_M0(wrd);
1583 gen_op_iwmmxt_set_mup();
1584 break;
1585 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1586 wrd = (insn >> 12) & 0xf;
1587 rd0 = (insn >> 16) & 0xf;
1588 rd1 = (insn >> 0) & 0xf;
1589 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1590 if (insn & (1 << 21)) {
1591 if (insn & (1 << 20))
1592 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1593 else
1594 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1595 } else {
1596 if (insn & (1 << 20))
1597 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1598 else
1599 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1600 }
18c9b560
AZ
1601 gen_op_iwmmxt_movq_wRn_M0(wrd);
1602 gen_op_iwmmxt_set_mup();
1603 break;
1604 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1605 wrd = (insn >> 12) & 0xf;
1606 rd0 = (insn >> 16) & 0xf;
1607 rd1 = (insn >> 0) & 0xf;
1608 gen_op_iwmmxt_movq_M0_wRn(rd0);
1609 if (insn & (1 << 21))
1610 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1611 else
1612 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1613 if (!(insn & (1 << 20))) {
e677137d
PB
1614 iwmmxt_load_reg(cpu_V1, wrd);
1615 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1616 }
1617 gen_op_iwmmxt_movq_wRn_M0(wrd);
1618 gen_op_iwmmxt_set_mup();
1619 break;
1620 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1621 wrd = (insn >> 12) & 0xf;
1622 rd0 = (insn >> 16) & 0xf;
1623 rd1 = (insn >> 0) & 0xf;
1624 gen_op_iwmmxt_movq_M0_wRn(rd0);
1625 switch ((insn >> 22) & 3) {
1626 case 0:
1627 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1628 break;
1629 case 1:
1630 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1631 break;
1632 case 2:
1633 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1634 break;
1635 case 3:
1636 return 1;
1637 }
1638 gen_op_iwmmxt_movq_wRn_M0(wrd);
1639 gen_op_iwmmxt_set_mup();
1640 gen_op_iwmmxt_set_cup();
1641 break;
1642 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1643 wrd = (insn >> 12) & 0xf;
1644 rd0 = (insn >> 16) & 0xf;
1645 rd1 = (insn >> 0) & 0xf;
1646 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1647 if (insn & (1 << 22)) {
1648 if (insn & (1 << 20))
1649 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1650 else
1651 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1652 } else {
1653 if (insn & (1 << 20))
1654 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1655 else
1656 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1657 }
18c9b560
AZ
1658 gen_op_iwmmxt_movq_wRn_M0(wrd);
1659 gen_op_iwmmxt_set_mup();
1660 gen_op_iwmmxt_set_cup();
1661 break;
1662 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1663 wrd = (insn >> 12) & 0xf;
1664 rd0 = (insn >> 16) & 0xf;
1665 rd1 = (insn >> 0) & 0xf;
1666 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1667 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1668 tcg_gen_andi_i32(tmp, tmp, 7);
1669 iwmmxt_load_reg(cpu_V1, rd1);
1670 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1671 dead_tmp(tmp);
18c9b560
AZ
1672 gen_op_iwmmxt_movq_wRn_M0(wrd);
1673 gen_op_iwmmxt_set_mup();
1674 break;
1675 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1676 if (((insn >> 6) & 3) == 3)
1677 return 1;
18c9b560
AZ
1678 rd = (insn >> 12) & 0xf;
1679 wrd = (insn >> 16) & 0xf;
da6b5335 1680 tmp = load_reg(s, rd);
18c9b560
AZ
1681 gen_op_iwmmxt_movq_M0_wRn(wrd);
1682 switch ((insn >> 6) & 3) {
1683 case 0:
da6b5335
FN
1684 tmp2 = tcg_const_i32(0xff);
1685 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1686 break;
1687 case 1:
da6b5335
FN
1688 tmp2 = tcg_const_i32(0xffff);
1689 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1690 break;
1691 case 2:
da6b5335
FN
1692 tmp2 = tcg_const_i32(0xffffffff);
1693 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1694 break;
da6b5335
FN
1695 default:
1696 TCGV_UNUSED(tmp2);
1697 TCGV_UNUSED(tmp3);
18c9b560 1698 }
da6b5335
FN
1699 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1700 tcg_temp_free(tmp3);
1701 tcg_temp_free(tmp2);
1702 dead_tmp(tmp);
18c9b560
AZ
1703 gen_op_iwmmxt_movq_wRn_M0(wrd);
1704 gen_op_iwmmxt_set_mup();
1705 break;
1706 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1707 rd = (insn >> 12) & 0xf;
1708 wrd = (insn >> 16) & 0xf;
da6b5335 1709 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1710 return 1;
1711 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335 1712 tmp = new_tmp();
18c9b560
AZ
1713 switch ((insn >> 22) & 3) {
1714 case 0:
da6b5335
FN
1715 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1716 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1717 if (insn & 8) {
1718 tcg_gen_ext8s_i32(tmp, tmp);
1719 } else {
1720 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1721 }
1722 break;
1723 case 1:
da6b5335
FN
1724 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1725 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1726 if (insn & 8) {
1727 tcg_gen_ext16s_i32(tmp, tmp);
1728 } else {
1729 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1730 }
1731 break;
1732 case 2:
da6b5335
FN
1733 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1734 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1735 break;
18c9b560 1736 }
da6b5335 1737 store_reg(s, rd, tmp);
18c9b560
AZ
1738 break;
1739 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1740 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1741 return 1;
da6b5335 1742 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1743 switch ((insn >> 22) & 3) {
1744 case 0:
da6b5335 1745 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1746 break;
1747 case 1:
da6b5335 1748 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1749 break;
1750 case 2:
da6b5335 1751 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1752 break;
18c9b560 1753 }
da6b5335
FN
1754 tcg_gen_shli_i32(tmp, tmp, 28);
1755 gen_set_nzcv(tmp);
1756 dead_tmp(tmp);
18c9b560
AZ
1757 break;
1758 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1759 if (((insn >> 6) & 3) == 3)
1760 return 1;
18c9b560
AZ
1761 rd = (insn >> 12) & 0xf;
1762 wrd = (insn >> 16) & 0xf;
da6b5335 1763 tmp = load_reg(s, rd);
18c9b560
AZ
1764 switch ((insn >> 6) & 3) {
1765 case 0:
da6b5335 1766 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1767 break;
1768 case 1:
da6b5335 1769 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1770 break;
1771 case 2:
da6b5335 1772 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1773 break;
18c9b560 1774 }
da6b5335 1775 dead_tmp(tmp);
18c9b560
AZ
1776 gen_op_iwmmxt_movq_wRn_M0(wrd);
1777 gen_op_iwmmxt_set_mup();
1778 break;
1779 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1780 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1781 return 1;
da6b5335
FN
1782 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1783 tmp2 = new_tmp();
1784 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1785 switch ((insn >> 22) & 3) {
1786 case 0:
1787 for (i = 0; i < 7; i ++) {
da6b5335
FN
1788 tcg_gen_shli_i32(tmp2, tmp2, 4);
1789 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1790 }
1791 break;
1792 case 1:
1793 for (i = 0; i < 3; i ++) {
da6b5335
FN
1794 tcg_gen_shli_i32(tmp2, tmp2, 8);
1795 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1796 }
1797 break;
1798 case 2:
da6b5335
FN
1799 tcg_gen_shli_i32(tmp2, tmp2, 16);
1800 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1801 break;
18c9b560 1802 }
da6b5335
FN
1803 gen_set_nzcv(tmp);
1804 dead_tmp(tmp2);
1805 dead_tmp(tmp);
18c9b560
AZ
1806 break;
1807 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1808 wrd = (insn >> 12) & 0xf;
1809 rd0 = (insn >> 16) & 0xf;
1810 gen_op_iwmmxt_movq_M0_wRn(rd0);
1811 switch ((insn >> 22) & 3) {
1812 case 0:
e677137d 1813 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1814 break;
1815 case 1:
e677137d 1816 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1817 break;
1818 case 2:
e677137d 1819 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1820 break;
1821 case 3:
1822 return 1;
1823 }
1824 gen_op_iwmmxt_movq_wRn_M0(wrd);
1825 gen_op_iwmmxt_set_mup();
1826 break;
1827 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1828 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1829 return 1;
da6b5335
FN
1830 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1831 tmp2 = new_tmp();
1832 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1833 switch ((insn >> 22) & 3) {
1834 case 0:
1835 for (i = 0; i < 7; i ++) {
da6b5335
FN
1836 tcg_gen_shli_i32(tmp2, tmp2, 4);
1837 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1838 }
1839 break;
1840 case 1:
1841 for (i = 0; i < 3; i ++) {
da6b5335
FN
1842 tcg_gen_shli_i32(tmp2, tmp2, 8);
1843 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1844 }
1845 break;
1846 case 2:
da6b5335
FN
1847 tcg_gen_shli_i32(tmp2, tmp2, 16);
1848 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1849 break;
18c9b560 1850 }
da6b5335
FN
1851 gen_set_nzcv(tmp);
1852 dead_tmp(tmp2);
1853 dead_tmp(tmp);
18c9b560
AZ
1854 break;
1855 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1856 rd = (insn >> 12) & 0xf;
1857 rd0 = (insn >> 16) & 0xf;
da6b5335 1858 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1859 return 1;
1860 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 1861 tmp = new_tmp();
18c9b560
AZ
1862 switch ((insn >> 22) & 3) {
1863 case 0:
da6b5335 1864 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1865 break;
1866 case 1:
da6b5335 1867 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1868 break;
1869 case 2:
da6b5335 1870 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1871 break;
18c9b560 1872 }
da6b5335 1873 store_reg(s, rd, tmp);
18c9b560
AZ
1874 break;
1875 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1876 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1877 wrd = (insn >> 12) & 0xf;
1878 rd0 = (insn >> 16) & 0xf;
1879 rd1 = (insn >> 0) & 0xf;
1880 gen_op_iwmmxt_movq_M0_wRn(rd0);
1881 switch ((insn >> 22) & 3) {
1882 case 0:
1883 if (insn & (1 << 21))
1884 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1885 else
1886 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1887 break;
1888 case 1:
1889 if (insn & (1 << 21))
1890 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1891 else
1892 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1893 break;
1894 case 2:
1895 if (insn & (1 << 21))
1896 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1897 else
1898 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1899 break;
1900 case 3:
1901 return 1;
1902 }
1903 gen_op_iwmmxt_movq_wRn_M0(wrd);
1904 gen_op_iwmmxt_set_mup();
1905 gen_op_iwmmxt_set_cup();
1906 break;
1907 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1908 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1909 wrd = (insn >> 12) & 0xf;
1910 rd0 = (insn >> 16) & 0xf;
1911 gen_op_iwmmxt_movq_M0_wRn(rd0);
1912 switch ((insn >> 22) & 3) {
1913 case 0:
1914 if (insn & (1 << 21))
1915 gen_op_iwmmxt_unpacklsb_M0();
1916 else
1917 gen_op_iwmmxt_unpacklub_M0();
1918 break;
1919 case 1:
1920 if (insn & (1 << 21))
1921 gen_op_iwmmxt_unpacklsw_M0();
1922 else
1923 gen_op_iwmmxt_unpackluw_M0();
1924 break;
1925 case 2:
1926 if (insn & (1 << 21))
1927 gen_op_iwmmxt_unpacklsl_M0();
1928 else
1929 gen_op_iwmmxt_unpacklul_M0();
1930 break;
1931 case 3:
1932 return 1;
1933 }
1934 gen_op_iwmmxt_movq_wRn_M0(wrd);
1935 gen_op_iwmmxt_set_mup();
1936 gen_op_iwmmxt_set_cup();
1937 break;
1938 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1939 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1940 wrd = (insn >> 12) & 0xf;
1941 rd0 = (insn >> 16) & 0xf;
1942 gen_op_iwmmxt_movq_M0_wRn(rd0);
1943 switch ((insn >> 22) & 3) {
1944 case 0:
1945 if (insn & (1 << 21))
1946 gen_op_iwmmxt_unpackhsb_M0();
1947 else
1948 gen_op_iwmmxt_unpackhub_M0();
1949 break;
1950 case 1:
1951 if (insn & (1 << 21))
1952 gen_op_iwmmxt_unpackhsw_M0();
1953 else
1954 gen_op_iwmmxt_unpackhuw_M0();
1955 break;
1956 case 2:
1957 if (insn & (1 << 21))
1958 gen_op_iwmmxt_unpackhsl_M0();
1959 else
1960 gen_op_iwmmxt_unpackhul_M0();
1961 break;
1962 case 3:
1963 return 1;
1964 }
1965 gen_op_iwmmxt_movq_wRn_M0(wrd);
1966 gen_op_iwmmxt_set_mup();
1967 gen_op_iwmmxt_set_cup();
1968 break;
1969 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1970 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
1971 if (((insn >> 22) & 3) == 0)
1972 return 1;
18c9b560
AZ
1973 wrd = (insn >> 12) & 0xf;
1974 rd0 = (insn >> 16) & 0xf;
1975 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1976 tmp = new_tmp();
1977 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
1978 dead_tmp(tmp);
18c9b560 1979 return 1;
da6b5335 1980 }
18c9b560 1981 switch ((insn >> 22) & 3) {
18c9b560 1982 case 1:
da6b5335 1983 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1984 break;
1985 case 2:
da6b5335 1986 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1987 break;
1988 case 3:
da6b5335 1989 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1990 break;
1991 }
da6b5335 1992 dead_tmp(tmp);
18c9b560
AZ
1993 gen_op_iwmmxt_movq_wRn_M0(wrd);
1994 gen_op_iwmmxt_set_mup();
1995 gen_op_iwmmxt_set_cup();
1996 break;
1997 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1998 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
1999 if (((insn >> 22) & 3) == 0)
2000 return 1;
18c9b560
AZ
2001 wrd = (insn >> 12) & 0xf;
2002 rd0 = (insn >> 16) & 0xf;
2003 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2004 tmp = new_tmp();
2005 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2006 dead_tmp(tmp);
18c9b560 2007 return 1;
da6b5335 2008 }
18c9b560 2009 switch ((insn >> 22) & 3) {
18c9b560 2010 case 1:
da6b5335 2011 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2012 break;
2013 case 2:
da6b5335 2014 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2015 break;
2016 case 3:
da6b5335 2017 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2018 break;
2019 }
da6b5335 2020 dead_tmp(tmp);
18c9b560
AZ
2021 gen_op_iwmmxt_movq_wRn_M0(wrd);
2022 gen_op_iwmmxt_set_mup();
2023 gen_op_iwmmxt_set_cup();
2024 break;
2025 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2026 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2027 if (((insn >> 22) & 3) == 0)
2028 return 1;
18c9b560
AZ
2029 wrd = (insn >> 12) & 0xf;
2030 rd0 = (insn >> 16) & 0xf;
2031 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2032 tmp = new_tmp();
2033 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2034 dead_tmp(tmp);
18c9b560 2035 return 1;
da6b5335 2036 }
18c9b560 2037 switch ((insn >> 22) & 3) {
18c9b560 2038 case 1:
da6b5335 2039 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2040 break;
2041 case 2:
da6b5335 2042 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2043 break;
2044 case 3:
da6b5335 2045 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2046 break;
2047 }
da6b5335 2048 dead_tmp(tmp);
18c9b560
AZ
2049 gen_op_iwmmxt_movq_wRn_M0(wrd);
2050 gen_op_iwmmxt_set_mup();
2051 gen_op_iwmmxt_set_cup();
2052 break;
2053 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2054 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2055 if (((insn >> 22) & 3) == 0)
2056 return 1;
18c9b560
AZ
2057 wrd = (insn >> 12) & 0xf;
2058 rd0 = (insn >> 16) & 0xf;
2059 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2060 tmp = new_tmp();
18c9b560 2061 switch ((insn >> 22) & 3) {
18c9b560 2062 case 1:
da6b5335
FN
2063 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2064 dead_tmp(tmp);
18c9b560 2065 return 1;
da6b5335
FN
2066 }
2067 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2068 break;
2069 case 2:
da6b5335
FN
2070 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2071 dead_tmp(tmp);
18c9b560 2072 return 1;
da6b5335
FN
2073 }
2074 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2075 break;
2076 case 3:
da6b5335
FN
2077 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2078 dead_tmp(tmp);
18c9b560 2079 return 1;
da6b5335
FN
2080 }
2081 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2082 break;
2083 }
da6b5335 2084 dead_tmp(tmp);
18c9b560
AZ
2085 gen_op_iwmmxt_movq_wRn_M0(wrd);
2086 gen_op_iwmmxt_set_mup();
2087 gen_op_iwmmxt_set_cup();
2088 break;
2089 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2090 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2091 wrd = (insn >> 12) & 0xf;
2092 rd0 = (insn >> 16) & 0xf;
2093 rd1 = (insn >> 0) & 0xf;
2094 gen_op_iwmmxt_movq_M0_wRn(rd0);
2095 switch ((insn >> 22) & 3) {
2096 case 0:
2097 if (insn & (1 << 21))
2098 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2099 else
2100 gen_op_iwmmxt_minub_M0_wRn(rd1);
2101 break;
2102 case 1:
2103 if (insn & (1 << 21))
2104 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2105 else
2106 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2107 break;
2108 case 2:
2109 if (insn & (1 << 21))
2110 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2111 else
2112 gen_op_iwmmxt_minul_M0_wRn(rd1);
2113 break;
2114 case 3:
2115 return 1;
2116 }
2117 gen_op_iwmmxt_movq_wRn_M0(wrd);
2118 gen_op_iwmmxt_set_mup();
2119 break;
2120 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2121 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2122 wrd = (insn >> 12) & 0xf;
2123 rd0 = (insn >> 16) & 0xf;
2124 rd1 = (insn >> 0) & 0xf;
2125 gen_op_iwmmxt_movq_M0_wRn(rd0);
2126 switch ((insn >> 22) & 3) {
2127 case 0:
2128 if (insn & (1 << 21))
2129 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2130 else
2131 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2132 break;
2133 case 1:
2134 if (insn & (1 << 21))
2135 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2136 else
2137 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2138 break;
2139 case 2:
2140 if (insn & (1 << 21))
2141 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2142 else
2143 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2144 break;
2145 case 3:
2146 return 1;
2147 }
2148 gen_op_iwmmxt_movq_wRn_M0(wrd);
2149 gen_op_iwmmxt_set_mup();
2150 break;
2151 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2152 case 0x402: case 0x502: case 0x602: case 0x702:
2153 wrd = (insn >> 12) & 0xf;
2154 rd0 = (insn >> 16) & 0xf;
2155 rd1 = (insn >> 0) & 0xf;
2156 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2157 tmp = tcg_const_i32((insn >> 20) & 3);
2158 iwmmxt_load_reg(cpu_V1, rd1);
2159 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2160 tcg_temp_free(tmp);
18c9b560
AZ
2161 gen_op_iwmmxt_movq_wRn_M0(wrd);
2162 gen_op_iwmmxt_set_mup();
2163 break;
2164 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2165 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2166 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2167 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2168 wrd = (insn >> 12) & 0xf;
2169 rd0 = (insn >> 16) & 0xf;
2170 rd1 = (insn >> 0) & 0xf;
2171 gen_op_iwmmxt_movq_M0_wRn(rd0);
2172 switch ((insn >> 20) & 0xf) {
2173 case 0x0:
2174 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2175 break;
2176 case 0x1:
2177 gen_op_iwmmxt_subub_M0_wRn(rd1);
2178 break;
2179 case 0x3:
2180 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2181 break;
2182 case 0x4:
2183 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2184 break;
2185 case 0x5:
2186 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2187 break;
2188 case 0x7:
2189 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2190 break;
2191 case 0x8:
2192 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2193 break;
2194 case 0x9:
2195 gen_op_iwmmxt_subul_M0_wRn(rd1);
2196 break;
2197 case 0xb:
2198 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2199 break;
2200 default:
2201 return 1;
2202 }
2203 gen_op_iwmmxt_movq_wRn_M0(wrd);
2204 gen_op_iwmmxt_set_mup();
2205 gen_op_iwmmxt_set_cup();
2206 break;
2207 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2208 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2209 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2210 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2211 wrd = (insn >> 12) & 0xf;
2212 rd0 = (insn >> 16) & 0xf;
2213 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2214 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2215 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2216 tcg_temp_free(tmp);
18c9b560
AZ
2217 gen_op_iwmmxt_movq_wRn_M0(wrd);
2218 gen_op_iwmmxt_set_mup();
2219 gen_op_iwmmxt_set_cup();
2220 break;
2221 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2222 case 0x418: case 0x518: case 0x618: case 0x718:
2223 case 0x818: case 0x918: case 0xa18: case 0xb18:
2224 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2225 wrd = (insn >> 12) & 0xf;
2226 rd0 = (insn >> 16) & 0xf;
2227 rd1 = (insn >> 0) & 0xf;
2228 gen_op_iwmmxt_movq_M0_wRn(rd0);
2229 switch ((insn >> 20) & 0xf) {
2230 case 0x0:
2231 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2232 break;
2233 case 0x1:
2234 gen_op_iwmmxt_addub_M0_wRn(rd1);
2235 break;
2236 case 0x3:
2237 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2238 break;
2239 case 0x4:
2240 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2241 break;
2242 case 0x5:
2243 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2244 break;
2245 case 0x7:
2246 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2247 break;
2248 case 0x8:
2249 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2250 break;
2251 case 0x9:
2252 gen_op_iwmmxt_addul_M0_wRn(rd1);
2253 break;
2254 case 0xb:
2255 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2256 break;
2257 default:
2258 return 1;
2259 }
2260 gen_op_iwmmxt_movq_wRn_M0(wrd);
2261 gen_op_iwmmxt_set_mup();
2262 gen_op_iwmmxt_set_cup();
2263 break;
2264 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2265 case 0x408: case 0x508: case 0x608: case 0x708:
2266 case 0x808: case 0x908: case 0xa08: case 0xb08:
2267 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2268 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2269 return 1;
18c9b560
AZ
2270 wrd = (insn >> 12) & 0xf;
2271 rd0 = (insn >> 16) & 0xf;
2272 rd1 = (insn >> 0) & 0xf;
2273 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2274 switch ((insn >> 22) & 3) {
18c9b560
AZ
2275 case 1:
2276 if (insn & (1 << 21))
2277 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2278 else
2279 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2280 break;
2281 case 2:
2282 if (insn & (1 << 21))
2283 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2284 else
2285 gen_op_iwmmxt_packul_M0_wRn(rd1);
2286 break;
2287 case 3:
2288 if (insn & (1 << 21))
2289 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2290 else
2291 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2292 break;
2293 }
2294 gen_op_iwmmxt_movq_wRn_M0(wrd);
2295 gen_op_iwmmxt_set_mup();
2296 gen_op_iwmmxt_set_cup();
2297 break;
2298 case 0x201: case 0x203: case 0x205: case 0x207:
2299 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2300 case 0x211: case 0x213: case 0x215: case 0x217:
2301 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2302 wrd = (insn >> 5) & 0xf;
2303 rd0 = (insn >> 12) & 0xf;
2304 rd1 = (insn >> 0) & 0xf;
2305 if (rd0 == 0xf || rd1 == 0xf)
2306 return 1;
2307 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2308 tmp = load_reg(s, rd0);
2309 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2310 switch ((insn >> 16) & 0xf) {
2311 case 0x0: /* TMIA */
da6b5335 2312 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2313 break;
2314 case 0x8: /* TMIAPH */
da6b5335 2315 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2316 break;
2317 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2318 if (insn & (1 << 16))
da6b5335 2319 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2320 if (insn & (1 << 17))
da6b5335
FN
2321 tcg_gen_shri_i32(tmp2, tmp2, 16);
2322 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2323 break;
2324 default:
da6b5335
FN
2325 dead_tmp(tmp2);
2326 dead_tmp(tmp);
18c9b560
AZ
2327 return 1;
2328 }
da6b5335
FN
2329 dead_tmp(tmp2);
2330 dead_tmp(tmp);
18c9b560
AZ
2331 gen_op_iwmmxt_movq_wRn_M0(wrd);
2332 gen_op_iwmmxt_set_mup();
2333 break;
2334 default:
2335 return 1;
2336 }
2337
2338 return 0;
2339}
2340
2341/* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2342 (ie. an undefined instruction). */
2343static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2344{
2345 int acc, rd0, rd1, rdhi, rdlo;
3a554c0f 2346 TCGv tmp, tmp2;
18c9b560
AZ
2347
2348 if ((insn & 0x0ff00f10) == 0x0e200010) {
2349 /* Multiply with Internal Accumulate Format */
2350 rd0 = (insn >> 12) & 0xf;
2351 rd1 = insn & 0xf;
2352 acc = (insn >> 5) & 7;
2353
2354 if (acc != 0)
2355 return 1;
2356
3a554c0f
FN
2357 tmp = load_reg(s, rd0);
2358 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2359 switch ((insn >> 16) & 0xf) {
2360 case 0x0: /* MIA */
3a554c0f 2361 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2362 break;
2363 case 0x8: /* MIAPH */
3a554c0f 2364 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2365 break;
2366 case 0xc: /* MIABB */
2367 case 0xd: /* MIABT */
2368 case 0xe: /* MIATB */
2369 case 0xf: /* MIATT */
18c9b560 2370 if (insn & (1 << 16))
3a554c0f 2371 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2372 if (insn & (1 << 17))
3a554c0f
FN
2373 tcg_gen_shri_i32(tmp2, tmp2, 16);
2374 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2375 break;
2376 default:
2377 return 1;
2378 }
3a554c0f
FN
2379 dead_tmp(tmp2);
2380 dead_tmp(tmp);
18c9b560
AZ
2381
2382 gen_op_iwmmxt_movq_wRn_M0(acc);
2383 return 0;
2384 }
2385
2386 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2387 /* Internal Accumulator Access Format */
2388 rdhi = (insn >> 16) & 0xf;
2389 rdlo = (insn >> 12) & 0xf;
2390 acc = insn & 7;
2391
2392 if (acc != 0)
2393 return 1;
2394
2395 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2396 iwmmxt_load_reg(cpu_V0, acc);
2397 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2398 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2399 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2400 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2401 } else { /* MAR */
3a554c0f
FN
2402 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2403 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2404 }
2405 return 0;
2406 }
2407
2408 return 1;
2409}
2410
c1713132
AZ
2411/* Disassemble system coprocessor instruction. Return nonzero if
2412 instruction is not defined. */
2413static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2414{
b75263d6 2415 TCGv tmp, tmp2;
c1713132
AZ
2416 uint32_t rd = (insn >> 12) & 0xf;
2417 uint32_t cp = (insn >> 8) & 0xf;
2418 if (IS_USER(s)) {
2419 return 1;
2420 }
2421
18c9b560 2422 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2423 if (!env->cp[cp].cp_read)
2424 return 1;
8984bd2e
PB
2425 gen_set_pc_im(s->pc);
2426 tmp = new_tmp();
b75263d6
JR
2427 tmp2 = tcg_const_i32(insn);
2428 gen_helper_get_cp(tmp, cpu_env, tmp2);
2429 tcg_temp_free(tmp2);
8984bd2e 2430 store_reg(s, rd, tmp);
c1713132
AZ
2431 } else {
2432 if (!env->cp[cp].cp_write)
2433 return 1;
8984bd2e
PB
2434 gen_set_pc_im(s->pc);
2435 tmp = load_reg(s, rd);
b75263d6
JR
2436 tmp2 = tcg_const_i32(insn);
2437 gen_helper_set_cp(cpu_env, tmp2, tmp);
2438 tcg_temp_free(tmp2);
a60de947 2439 dead_tmp(tmp);
c1713132
AZ
2440 }
2441 return 0;
2442}
2443
9ee6e8bb
PB
2444static int cp15_user_ok(uint32_t insn)
2445{
2446 int cpn = (insn >> 16) & 0xf;
2447 int cpm = insn & 0xf;
2448 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2449
2450 if (cpn == 13 && cpm == 0) {
2451 /* TLS register. */
2452 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2453 return 1;
2454 }
2455 if (cpn == 7) {
2456 /* ISB, DSB, DMB. */
2457 if ((cpm == 5 && op == 4)
2458 || (cpm == 10 && (op == 4 || op == 5)))
2459 return 1;
2460 }
2461 return 0;
2462}
2463
3f26c122
RV
2464static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2465{
2466 TCGv tmp;
2467 int cpn = (insn >> 16) & 0xf;
2468 int cpm = insn & 0xf;
2469 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2470
2471 if (!arm_feature(env, ARM_FEATURE_V6K))
2472 return 0;
2473
2474 if (!(cpn == 13 && cpm == 0))
2475 return 0;
2476
2477 if (insn & ARM_CP_RW_BIT) {
3f26c122
RV
2478 switch (op) {
2479 case 2:
c5883be2 2480 tmp = load_cpu_field(cp15.c13_tls1);
3f26c122
RV
2481 break;
2482 case 3:
c5883be2 2483 tmp = load_cpu_field(cp15.c13_tls2);
3f26c122
RV
2484 break;
2485 case 4:
c5883be2 2486 tmp = load_cpu_field(cp15.c13_tls3);
3f26c122
RV
2487 break;
2488 default:
3f26c122
RV
2489 return 0;
2490 }
2491 store_reg(s, rd, tmp);
2492
2493 } else {
2494 tmp = load_reg(s, rd);
2495 switch (op) {
2496 case 2:
c5883be2 2497 store_cpu_field(tmp, cp15.c13_tls1);
3f26c122
RV
2498 break;
2499 case 3:
c5883be2 2500 store_cpu_field(tmp, cp15.c13_tls2);
3f26c122
RV
2501 break;
2502 case 4:
c5883be2 2503 store_cpu_field(tmp, cp15.c13_tls3);
3f26c122
RV
2504 break;
2505 default:
c5883be2 2506 dead_tmp(tmp);
3f26c122
RV
2507 return 0;
2508 }
3f26c122
RV
2509 }
2510 return 1;
2511}
2512
b5ff1b31
FB
2513/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2514 instruction is not defined. */
a90b7318 2515static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2516{
2517 uint32_t rd;
b75263d6 2518 TCGv tmp, tmp2;
b5ff1b31 2519
9ee6e8bb
PB
2520 /* M profile cores use memory mapped registers instead of cp15. */
2521 if (arm_feature(env, ARM_FEATURE_M))
2522 return 1;
2523
2524 if ((insn & (1 << 25)) == 0) {
2525 if (insn & (1 << 20)) {
2526 /* mrrc */
2527 return 1;
2528 }
2529 /* mcrr. Used for block cache operations, so implement as no-op. */
2530 return 0;
2531 }
2532 if ((insn & (1 << 4)) == 0) {
2533 /* cdp */
2534 return 1;
2535 }
2536 if (IS_USER(s) && !cp15_user_ok(insn)) {
b5ff1b31
FB
2537 return 1;
2538 }
9332f9da
FB
2539 if ((insn & 0x0fff0fff) == 0x0e070f90
2540 || (insn & 0x0fff0fff) == 0x0e070f58) {
2541 /* Wait for interrupt. */
8984bd2e 2542 gen_set_pc_im(s->pc);
9ee6e8bb 2543 s->is_jmp = DISAS_WFI;
9332f9da
FB
2544 return 0;
2545 }
b5ff1b31 2546 rd = (insn >> 12) & 0xf;
3f26c122
RV
2547
2548 if (cp15_tls_load_store(env, s, insn, rd))
2549 return 0;
2550
b75263d6 2551 tmp2 = tcg_const_i32(insn);
18c9b560 2552 if (insn & ARM_CP_RW_BIT) {
8984bd2e 2553 tmp = new_tmp();
b75263d6 2554 gen_helper_get_cp15(tmp, cpu_env, tmp2);
b5ff1b31
FB
2555 /* If the destination register is r15 then sets condition codes. */
2556 if (rd != 15)
8984bd2e
PB
2557 store_reg(s, rd, tmp);
2558 else
2559 dead_tmp(tmp);
b5ff1b31 2560 } else {
8984bd2e 2561 tmp = load_reg(s, rd);
b75263d6 2562 gen_helper_set_cp15(cpu_env, tmp2, tmp);
8984bd2e 2563 dead_tmp(tmp);
a90b7318
AZ
2564 /* Normally we would always end the TB here, but Linux
2565 * arch/arm/mach-pxa/sleep.S expects two instructions following
2566 * an MMU enable to execute from cache. Imitate this behaviour. */
2567 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2568 (insn & 0x0fff0fff) != 0x0e010f10)
2569 gen_lookup_tb(s);
b5ff1b31 2570 }
b75263d6 2571 tcg_temp_free_i32(tmp2);
b5ff1b31
FB
2572 return 0;
2573}
2574
9ee6e8bb
PB
2575#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2576#define VFP_SREG(insn, bigbit, smallbit) \
2577 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2578#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2579 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2580 reg = (((insn) >> (bigbit)) & 0x0f) \
2581 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2582 } else { \
2583 if (insn & (1 << (smallbit))) \
2584 return 1; \
2585 reg = ((insn) >> (bigbit)) & 0x0f; \
2586 }} while (0)
2587
2588#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2589#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2590#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2591#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2592#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2593#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2594
4373f3ce
PB
2595/* Move between integer and VFP cores. */
2596static TCGv gen_vfp_mrs(void)
2597{
2598 TCGv tmp = new_tmp();
2599 tcg_gen_mov_i32(tmp, cpu_F0s);
2600 return tmp;
2601}
2602
2603static void gen_vfp_msr(TCGv tmp)
2604{
2605 tcg_gen_mov_i32(cpu_F0s, tmp);
2606 dead_tmp(tmp);
2607}
2608
ad69471c
PB
2609static void gen_neon_dup_u8(TCGv var, int shift)
2610{
2611 TCGv tmp = new_tmp();
2612 if (shift)
2613 tcg_gen_shri_i32(var, var, shift);
86831435 2614 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2615 tcg_gen_shli_i32(tmp, var, 8);
2616 tcg_gen_or_i32(var, var, tmp);
2617 tcg_gen_shli_i32(tmp, var, 16);
2618 tcg_gen_or_i32(var, var, tmp);
2619 dead_tmp(tmp);
2620}
2621
2622static void gen_neon_dup_low16(TCGv var)
2623{
2624 TCGv tmp = new_tmp();
86831435 2625 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2626 tcg_gen_shli_i32(tmp, var, 16);
2627 tcg_gen_or_i32(var, var, tmp);
2628 dead_tmp(tmp);
2629}
2630
2631static void gen_neon_dup_high16(TCGv var)
2632{
2633 TCGv tmp = new_tmp();
2634 tcg_gen_andi_i32(var, var, 0xffff0000);
2635 tcg_gen_shri_i32(tmp, var, 16);
2636 tcg_gen_or_i32(var, var, tmp);
2637 dead_tmp(tmp);
2638}
2639
b7bcbe95
FB
2640/* Disassemble a VFP instruction. Returns nonzero if an error occured
2641 (ie. an undefined instruction). */
2642static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2643{
2644 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2645 int dp, veclen;
312eea9f 2646 TCGv addr;
4373f3ce 2647 TCGv tmp;
ad69471c 2648 TCGv tmp2;
b7bcbe95 2649
40f137e1
PB
2650 if (!arm_feature(env, ARM_FEATURE_VFP))
2651 return 1;
2652
5df8bac1 2653 if (!s->vfp_enabled) {
9ee6e8bb 2654 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2655 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2656 return 1;
2657 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2658 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2659 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2660 return 1;
2661 }
b7bcbe95
FB
2662 dp = ((insn & 0xf00) == 0xb00);
2663 switch ((insn >> 24) & 0xf) {
2664 case 0xe:
2665 if (insn & (1 << 4)) {
2666 /* single register transfer */
b7bcbe95
FB
2667 rd = (insn >> 12) & 0xf;
2668 if (dp) {
9ee6e8bb
PB
2669 int size;
2670 int pass;
2671
2672 VFP_DREG_N(rn, insn);
2673 if (insn & 0xf)
b7bcbe95 2674 return 1;
9ee6e8bb
PB
2675 if (insn & 0x00c00060
2676 && !arm_feature(env, ARM_FEATURE_NEON))
2677 return 1;
2678
2679 pass = (insn >> 21) & 1;
2680 if (insn & (1 << 22)) {
2681 size = 0;
2682 offset = ((insn >> 5) & 3) * 8;
2683 } else if (insn & (1 << 5)) {
2684 size = 1;
2685 offset = (insn & (1 << 6)) ? 16 : 0;
2686 } else {
2687 size = 2;
2688 offset = 0;
2689 }
18c9b560 2690 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2691 /* vfp->arm */
ad69471c 2692 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2693 switch (size) {
2694 case 0:
9ee6e8bb 2695 if (offset)
ad69471c 2696 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2697 if (insn & (1 << 23))
ad69471c 2698 gen_uxtb(tmp);
9ee6e8bb 2699 else
ad69471c 2700 gen_sxtb(tmp);
9ee6e8bb
PB
2701 break;
2702 case 1:
9ee6e8bb
PB
2703 if (insn & (1 << 23)) {
2704 if (offset) {
ad69471c 2705 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2706 } else {
ad69471c 2707 gen_uxth(tmp);
9ee6e8bb
PB
2708 }
2709 } else {
2710 if (offset) {
ad69471c 2711 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2712 } else {
ad69471c 2713 gen_sxth(tmp);
9ee6e8bb
PB
2714 }
2715 }
2716 break;
2717 case 2:
9ee6e8bb
PB
2718 break;
2719 }
ad69471c 2720 store_reg(s, rd, tmp);
b7bcbe95
FB
2721 } else {
2722 /* arm->vfp */
ad69471c 2723 tmp = load_reg(s, rd);
9ee6e8bb
PB
2724 if (insn & (1 << 23)) {
2725 /* VDUP */
2726 if (size == 0) {
ad69471c 2727 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2728 } else if (size == 1) {
ad69471c 2729 gen_neon_dup_low16(tmp);
9ee6e8bb 2730 }
cbbccffc
PB
2731 for (n = 0; n <= pass * 2; n++) {
2732 tmp2 = new_tmp();
2733 tcg_gen_mov_i32(tmp2, tmp);
2734 neon_store_reg(rn, n, tmp2);
2735 }
2736 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2737 } else {
2738 /* VMOV */
2739 switch (size) {
2740 case 0:
ad69471c
PB
2741 tmp2 = neon_load_reg(rn, pass);
2742 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2743 dead_tmp(tmp2);
9ee6e8bb
PB
2744 break;
2745 case 1:
ad69471c
PB
2746 tmp2 = neon_load_reg(rn, pass);
2747 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2748 dead_tmp(tmp2);
9ee6e8bb
PB
2749 break;
2750 case 2:
9ee6e8bb
PB
2751 break;
2752 }
ad69471c 2753 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2754 }
b7bcbe95 2755 }
9ee6e8bb
PB
2756 } else { /* !dp */
2757 if ((insn & 0x6f) != 0x00)
2758 return 1;
2759 rn = VFP_SREG_N(insn);
18c9b560 2760 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2761 /* vfp->arm */
2762 if (insn & (1 << 21)) {
2763 /* system register */
40f137e1 2764 rn >>= 1;
9ee6e8bb 2765
b7bcbe95 2766 switch (rn) {
40f137e1 2767 case ARM_VFP_FPSID:
4373f3ce 2768 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2769 VFP3 restricts all id registers to privileged
2770 accesses. */
2771 if (IS_USER(s)
2772 && arm_feature(env, ARM_FEATURE_VFP3))
2773 return 1;
4373f3ce 2774 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2775 break;
40f137e1 2776 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2777 if (IS_USER(s))
2778 return 1;
4373f3ce 2779 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2780 break;
40f137e1
PB
2781 case ARM_VFP_FPINST:
2782 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2783 /* Not present in VFP3. */
2784 if (IS_USER(s)
2785 || arm_feature(env, ARM_FEATURE_VFP3))
2786 return 1;
4373f3ce 2787 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2788 break;
40f137e1 2789 case ARM_VFP_FPSCR:
601d70b9 2790 if (rd == 15) {
4373f3ce
PB
2791 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2792 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2793 } else {
2794 tmp = new_tmp();
2795 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2796 }
b7bcbe95 2797 break;
9ee6e8bb
PB
2798 case ARM_VFP_MVFR0:
2799 case ARM_VFP_MVFR1:
2800 if (IS_USER(s)
2801 || !arm_feature(env, ARM_FEATURE_VFP3))
2802 return 1;
4373f3ce 2803 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2804 break;
b7bcbe95
FB
2805 default:
2806 return 1;
2807 }
2808 } else {
2809 gen_mov_F0_vreg(0, rn);
4373f3ce 2810 tmp = gen_vfp_mrs();
b7bcbe95
FB
2811 }
2812 if (rd == 15) {
b5ff1b31 2813 /* Set the 4 flag bits in the CPSR. */
4373f3ce
PB
2814 gen_set_nzcv(tmp);
2815 dead_tmp(tmp);
2816 } else {
2817 store_reg(s, rd, tmp);
2818 }
b7bcbe95
FB
2819 } else {
2820 /* arm->vfp */
4373f3ce 2821 tmp = load_reg(s, rd);
b7bcbe95 2822 if (insn & (1 << 21)) {
40f137e1 2823 rn >>= 1;
b7bcbe95
FB
2824 /* system register */
2825 switch (rn) {
40f137e1 2826 case ARM_VFP_FPSID:
9ee6e8bb
PB
2827 case ARM_VFP_MVFR0:
2828 case ARM_VFP_MVFR1:
b7bcbe95
FB
2829 /* Writes are ignored. */
2830 break;
40f137e1 2831 case ARM_VFP_FPSCR:
4373f3ce
PB
2832 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2833 dead_tmp(tmp);
b5ff1b31 2834 gen_lookup_tb(s);
b7bcbe95 2835 break;
40f137e1 2836 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2837 if (IS_USER(s))
2838 return 1;
71b3c3de
JR
2839 /* TODO: VFP subarchitecture support.
2840 * For now, keep the EN bit only */
2841 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 2842 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2843 gen_lookup_tb(s);
2844 break;
2845 case ARM_VFP_FPINST:
2846 case ARM_VFP_FPINST2:
4373f3ce 2847 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2848 break;
b7bcbe95
FB
2849 default:
2850 return 1;
2851 }
2852 } else {
4373f3ce 2853 gen_vfp_msr(tmp);
b7bcbe95
FB
2854 gen_mov_vreg_F0(0, rn);
2855 }
2856 }
2857 }
2858 } else {
2859 /* data processing */
2860 /* The opcode is in bits 23, 21, 20 and 6. */
2861 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2862 if (dp) {
2863 if (op == 15) {
2864 /* rn is opcode */
2865 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2866 } else {
2867 /* rn is register number */
9ee6e8bb 2868 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2869 }
2870
04595bf6 2871 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
b7bcbe95 2872 /* Integer or single precision destination. */
9ee6e8bb 2873 rd = VFP_SREG_D(insn);
b7bcbe95 2874 } else {
9ee6e8bb 2875 VFP_DREG_D(rd, insn);
b7bcbe95 2876 }
04595bf6
PM
2877 if (op == 15 &&
2878 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2879 /* VCVT from int is always from S reg regardless of dp bit.
2880 * VCVT with immediate frac_bits has same format as SREG_M
2881 */
2882 rm = VFP_SREG_M(insn);
b7bcbe95 2883 } else {
9ee6e8bb 2884 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2885 }
2886 } else {
9ee6e8bb 2887 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2888 if (op == 15 && rn == 15) {
2889 /* Double precision destination. */
9ee6e8bb
PB
2890 VFP_DREG_D(rd, insn);
2891 } else {
2892 rd = VFP_SREG_D(insn);
2893 }
04595bf6
PM
2894 /* NB that we implicitly rely on the encoding for the frac_bits
2895 * in VCVT of fixed to float being the same as that of an SREG_M
2896 */
9ee6e8bb 2897 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2898 }
2899
69d1fc22 2900 veclen = s->vec_len;
b7bcbe95
FB
2901 if (op == 15 && rn > 3)
2902 veclen = 0;
2903
2904 /* Shut up compiler warnings. */
2905 delta_m = 0;
2906 delta_d = 0;
2907 bank_mask = 0;
3b46e624 2908
b7bcbe95
FB
2909 if (veclen > 0) {
2910 if (dp)
2911 bank_mask = 0xc;
2912 else
2913 bank_mask = 0x18;
2914
2915 /* Figure out what type of vector operation this is. */
2916 if ((rd & bank_mask) == 0) {
2917 /* scalar */
2918 veclen = 0;
2919 } else {
2920 if (dp)
69d1fc22 2921 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 2922 else
69d1fc22 2923 delta_d = s->vec_stride + 1;
b7bcbe95
FB
2924
2925 if ((rm & bank_mask) == 0) {
2926 /* mixed scalar/vector */
2927 delta_m = 0;
2928 } else {
2929 /* vector */
2930 delta_m = delta_d;
2931 }
2932 }
2933 }
2934
2935 /* Load the initial operands. */
2936 if (op == 15) {
2937 switch (rn) {
2938 case 16:
2939 case 17:
2940 /* Integer source */
2941 gen_mov_F0_vreg(0, rm);
2942 break;
2943 case 8:
2944 case 9:
2945 /* Compare */
2946 gen_mov_F0_vreg(dp, rd);
2947 gen_mov_F1_vreg(dp, rm);
2948 break;
2949 case 10:
2950 case 11:
2951 /* Compare with zero */
2952 gen_mov_F0_vreg(dp, rd);
2953 gen_vfp_F1_ld0(dp);
2954 break;
9ee6e8bb
PB
2955 case 20:
2956 case 21:
2957 case 22:
2958 case 23:
644ad806
PB
2959 case 28:
2960 case 29:
2961 case 30:
2962 case 31:
9ee6e8bb
PB
2963 /* Source and destination the same. */
2964 gen_mov_F0_vreg(dp, rd);
2965 break;
b7bcbe95
FB
2966 default:
2967 /* One source operand. */
2968 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 2969 break;
b7bcbe95
FB
2970 }
2971 } else {
2972 /* Two source operands. */
2973 gen_mov_F0_vreg(dp, rn);
2974 gen_mov_F1_vreg(dp, rm);
2975 }
2976
2977 for (;;) {
2978 /* Perform the calculation. */
2979 switch (op) {
2980 case 0: /* mac: fd + (fn * fm) */
2981 gen_vfp_mul(dp);
2982 gen_mov_F1_vreg(dp, rd);
2983 gen_vfp_add(dp);
2984 break;
2985 case 1: /* nmac: fd - (fn * fm) */
2986 gen_vfp_mul(dp);
2987 gen_vfp_neg(dp);
2988 gen_mov_F1_vreg(dp, rd);
2989 gen_vfp_add(dp);
2990 break;
2991 case 2: /* msc: -fd + (fn * fm) */
2992 gen_vfp_mul(dp);
2993 gen_mov_F1_vreg(dp, rd);
2994 gen_vfp_sub(dp);
2995 break;
2996 case 3: /* nmsc: -fd - (fn * fm) */
2997 gen_vfp_mul(dp);
b7bcbe95 2998 gen_vfp_neg(dp);
c9fb531a
PB
2999 gen_mov_F1_vreg(dp, rd);
3000 gen_vfp_sub(dp);
b7bcbe95
FB
3001 break;
3002 case 4: /* mul: fn * fm */
3003 gen_vfp_mul(dp);
3004 break;
3005 case 5: /* nmul: -(fn * fm) */
3006 gen_vfp_mul(dp);
3007 gen_vfp_neg(dp);
3008 break;
3009 case 6: /* add: fn + fm */
3010 gen_vfp_add(dp);
3011 break;
3012 case 7: /* sub: fn - fm */
3013 gen_vfp_sub(dp);
3014 break;
3015 case 8: /* div: fn / fm */
3016 gen_vfp_div(dp);
3017 break;
9ee6e8bb
PB
3018 case 14: /* fconst */
3019 if (!arm_feature(env, ARM_FEATURE_VFP3))
3020 return 1;
3021
3022 n = (insn << 12) & 0x80000000;
3023 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3024 if (dp) {
3025 if (i & 0x40)
3026 i |= 0x3f80;
3027 else
3028 i |= 0x4000;
3029 n |= i << 16;
4373f3ce 3030 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3031 } else {
3032 if (i & 0x40)
3033 i |= 0x780;
3034 else
3035 i |= 0x800;
3036 n |= i << 19;
5b340b51 3037 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3038 }
9ee6e8bb 3039 break;
b7bcbe95
FB
3040 case 15: /* extension space */
3041 switch (rn) {
3042 case 0: /* cpy */
3043 /* no-op */
3044 break;
3045 case 1: /* abs */
3046 gen_vfp_abs(dp);
3047 break;
3048 case 2: /* neg */
3049 gen_vfp_neg(dp);
3050 break;
3051 case 3: /* sqrt */
3052 gen_vfp_sqrt(dp);
3053 break;
60011498
PB
3054 case 4: /* vcvtb.f32.f16 */
3055 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3056 return 1;
3057 tmp = gen_vfp_mrs();
3058 tcg_gen_ext16u_i32(tmp, tmp);
3059 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3060 dead_tmp(tmp);
3061 break;
3062 case 5: /* vcvtt.f32.f16 */
3063 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3064 return 1;
3065 tmp = gen_vfp_mrs();
3066 tcg_gen_shri_i32(tmp, tmp, 16);
3067 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3068 dead_tmp(tmp);
3069 break;
3070 case 6: /* vcvtb.f16.f32 */
3071 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3072 return 1;
3073 tmp = new_tmp();
3074 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3075 gen_mov_F0_vreg(0, rd);
3076 tmp2 = gen_vfp_mrs();
3077 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3078 tcg_gen_or_i32(tmp, tmp, tmp2);
3079 dead_tmp(tmp2);
3080 gen_vfp_msr(tmp);
3081 break;
3082 case 7: /* vcvtt.f16.f32 */
3083 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3084 return 1;
3085 tmp = new_tmp();
3086 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3087 tcg_gen_shli_i32(tmp, tmp, 16);
3088 gen_mov_F0_vreg(0, rd);
3089 tmp2 = gen_vfp_mrs();
3090 tcg_gen_ext16u_i32(tmp2, tmp2);
3091 tcg_gen_or_i32(tmp, tmp, tmp2);
3092 dead_tmp(tmp2);
3093 gen_vfp_msr(tmp);
3094 break;
b7bcbe95
FB
3095 case 8: /* cmp */
3096 gen_vfp_cmp(dp);
3097 break;
3098 case 9: /* cmpe */
3099 gen_vfp_cmpe(dp);
3100 break;
3101 case 10: /* cmpz */
3102 gen_vfp_cmp(dp);
3103 break;
3104 case 11: /* cmpez */
3105 gen_vfp_F1_ld0(dp);
3106 gen_vfp_cmpe(dp);
3107 break;
3108 case 15: /* single<->double conversion */
3109 if (dp)
4373f3ce 3110 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3111 else
4373f3ce 3112 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3113 break;
3114 case 16: /* fuito */
3115 gen_vfp_uito(dp);
3116 break;
3117 case 17: /* fsito */
3118 gen_vfp_sito(dp);
3119 break;
9ee6e8bb
PB
3120 case 20: /* fshto */
3121 if (!arm_feature(env, ARM_FEATURE_VFP3))
3122 return 1;
644ad806 3123 gen_vfp_shto(dp, 16 - rm);
9ee6e8bb
PB
3124 break;
3125 case 21: /* fslto */
3126 if (!arm_feature(env, ARM_FEATURE_VFP3))
3127 return 1;
644ad806 3128 gen_vfp_slto(dp, 32 - rm);
9ee6e8bb
PB
3129 break;
3130 case 22: /* fuhto */
3131 if (!arm_feature(env, ARM_FEATURE_VFP3))
3132 return 1;
644ad806 3133 gen_vfp_uhto(dp, 16 - rm);
9ee6e8bb
PB
3134 break;
3135 case 23: /* fulto */
3136 if (!arm_feature(env, ARM_FEATURE_VFP3))
3137 return 1;
644ad806 3138 gen_vfp_ulto(dp, 32 - rm);
9ee6e8bb 3139 break;
b7bcbe95
FB
3140 case 24: /* ftoui */
3141 gen_vfp_toui(dp);
3142 break;
3143 case 25: /* ftouiz */
3144 gen_vfp_touiz(dp);
3145 break;
3146 case 26: /* ftosi */
3147 gen_vfp_tosi(dp);
3148 break;
3149 case 27: /* ftosiz */
3150 gen_vfp_tosiz(dp);
3151 break;
9ee6e8bb
PB
3152 case 28: /* ftosh */
3153 if (!arm_feature(env, ARM_FEATURE_VFP3))
3154 return 1;
644ad806 3155 gen_vfp_tosh(dp, 16 - rm);
9ee6e8bb
PB
3156 break;
3157 case 29: /* ftosl */
3158 if (!arm_feature(env, ARM_FEATURE_VFP3))
3159 return 1;
644ad806 3160 gen_vfp_tosl(dp, 32 - rm);
9ee6e8bb
PB
3161 break;
3162 case 30: /* ftouh */
3163 if (!arm_feature(env, ARM_FEATURE_VFP3))
3164 return 1;
644ad806 3165 gen_vfp_touh(dp, 16 - rm);
9ee6e8bb
PB
3166 break;
3167 case 31: /* ftoul */
3168 if (!arm_feature(env, ARM_FEATURE_VFP3))
3169 return 1;
644ad806 3170 gen_vfp_toul(dp, 32 - rm);
9ee6e8bb 3171 break;
b7bcbe95
FB
3172 default: /* undefined */
3173 printf ("rn:%d\n", rn);
3174 return 1;
3175 }
3176 break;
3177 default: /* undefined */
3178 printf ("op:%d\n", op);
3179 return 1;
3180 }
3181
3182 /* Write back the result. */
3183 if (op == 15 && (rn >= 8 && rn <= 11))
3184 ; /* Comparison, do nothing. */
04595bf6
PM
3185 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3186 /* VCVT double to int: always integer result. */
b7bcbe95
FB
3187 gen_mov_vreg_F0(0, rd);
3188 else if (op == 15 && rn == 15)
3189 /* conversion */
3190 gen_mov_vreg_F0(!dp, rd);
3191 else
3192 gen_mov_vreg_F0(dp, rd);
3193
3194 /* break out of the loop if we have finished */
3195 if (veclen == 0)
3196 break;
3197
3198 if (op == 15 && delta_m == 0) {
3199 /* single source one-many */
3200 while (veclen--) {
3201 rd = ((rd + delta_d) & (bank_mask - 1))
3202 | (rd & bank_mask);
3203 gen_mov_vreg_F0(dp, rd);
3204 }
3205 break;
3206 }
3207 /* Setup the next operands. */
3208 veclen--;
3209 rd = ((rd + delta_d) & (bank_mask - 1))
3210 | (rd & bank_mask);
3211
3212 if (op == 15) {
3213 /* One source operand. */
3214 rm = ((rm + delta_m) & (bank_mask - 1))
3215 | (rm & bank_mask);
3216 gen_mov_F0_vreg(dp, rm);
3217 } else {
3218 /* Two source operands. */
3219 rn = ((rn + delta_d) & (bank_mask - 1))
3220 | (rn & bank_mask);
3221 gen_mov_F0_vreg(dp, rn);
3222 if (delta_m) {
3223 rm = ((rm + delta_m) & (bank_mask - 1))
3224 | (rm & bank_mask);
3225 gen_mov_F1_vreg(dp, rm);
3226 }
3227 }
3228 }
3229 }
3230 break;
3231 case 0xc:
3232 case 0xd:
9ee6e8bb 3233 if (dp && (insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3234 /* two-register transfer */
3235 rn = (insn >> 16) & 0xf;
3236 rd = (insn >> 12) & 0xf;
3237 if (dp) {
9ee6e8bb
PB
3238 VFP_DREG_M(rm, insn);
3239 } else {
3240 rm = VFP_SREG_M(insn);
3241 }
b7bcbe95 3242
18c9b560 3243 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3244 /* vfp->arm */
3245 if (dp) {
4373f3ce
PB
3246 gen_mov_F0_vreg(0, rm * 2);
3247 tmp = gen_vfp_mrs();
3248 store_reg(s, rd, tmp);
3249 gen_mov_F0_vreg(0, rm * 2 + 1);
3250 tmp = gen_vfp_mrs();
3251 store_reg(s, rn, tmp);
b7bcbe95
FB
3252 } else {
3253 gen_mov_F0_vreg(0, rm);
4373f3ce
PB
3254 tmp = gen_vfp_mrs();
3255 store_reg(s, rn, tmp);
b7bcbe95 3256 gen_mov_F0_vreg(0, rm + 1);
4373f3ce
PB
3257 tmp = gen_vfp_mrs();
3258 store_reg(s, rd, tmp);
b7bcbe95
FB
3259 }
3260 } else {
3261 /* arm->vfp */
3262 if (dp) {
4373f3ce
PB
3263 tmp = load_reg(s, rd);
3264 gen_vfp_msr(tmp);
3265 gen_mov_vreg_F0(0, rm * 2);
3266 tmp = load_reg(s, rn);
3267 gen_vfp_msr(tmp);
3268 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3269 } else {
4373f3ce
PB
3270 tmp = load_reg(s, rn);
3271 gen_vfp_msr(tmp);
b7bcbe95 3272 gen_mov_vreg_F0(0, rm);
4373f3ce
PB
3273 tmp = load_reg(s, rd);
3274 gen_vfp_msr(tmp);
b7bcbe95
FB
3275 gen_mov_vreg_F0(0, rm + 1);
3276 }
3277 }
3278 } else {
3279 /* Load/store */
3280 rn = (insn >> 16) & 0xf;
3281 if (dp)
9ee6e8bb 3282 VFP_DREG_D(rd, insn);
b7bcbe95 3283 else
9ee6e8bb
PB
3284 rd = VFP_SREG_D(insn);
3285 if (s->thumb && rn == 15) {
312eea9f
FN
3286 addr = new_tmp();
3287 tcg_gen_movi_i32(addr, s->pc & ~2);
9ee6e8bb 3288 } else {
312eea9f 3289 addr = load_reg(s, rn);
9ee6e8bb 3290 }
b7bcbe95
FB
3291 if ((insn & 0x01200000) == 0x01000000) {
3292 /* Single load/store */
3293 offset = (insn & 0xff) << 2;
3294 if ((insn & (1 << 23)) == 0)
3295 offset = -offset;
312eea9f 3296 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3297 if (insn & (1 << 20)) {
312eea9f 3298 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3299 gen_mov_vreg_F0(dp, rd);
3300 } else {
3301 gen_mov_F0_vreg(dp, rd);
312eea9f 3302 gen_vfp_st(s, dp, addr);
b7bcbe95 3303 }
312eea9f 3304 dead_tmp(addr);
b7bcbe95
FB
3305 } else {
3306 /* load/store multiple */
3307 if (dp)
3308 n = (insn >> 1) & 0x7f;
3309 else
3310 n = insn & 0xff;
3311
3312 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3313 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3314
3315 if (dp)
3316 offset = 8;
3317 else
3318 offset = 4;
3319 for (i = 0; i < n; i++) {
18c9b560 3320 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3321 /* load */
312eea9f 3322 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3323 gen_mov_vreg_F0(dp, rd + i);
3324 } else {
3325 /* store */
3326 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3327 gen_vfp_st(s, dp, addr);
b7bcbe95 3328 }
312eea9f 3329 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95
FB
3330 }
3331 if (insn & (1 << 21)) {
3332 /* writeback */
3333 if (insn & (1 << 24))
3334 offset = -offset * n;
3335 else if (dp && (insn & 1))
3336 offset = 4;
3337 else
3338 offset = 0;
3339
3340 if (offset != 0)
312eea9f
FN
3341 tcg_gen_addi_i32(addr, addr, offset);
3342 store_reg(s, rn, addr);
3343 } else {
3344 dead_tmp(addr);
b7bcbe95
FB
3345 }
3346 }
3347 }
3348 break;
3349 default:
3350 /* Should never happen. */
3351 return 1;
3352 }
3353 return 0;
3354}
3355
6e256c93 3356static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3357{
6e256c93
FB
3358 TranslationBlock *tb;
3359
3360 tb = s->tb;
3361 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3362 tcg_gen_goto_tb(n);
8984bd2e 3363 gen_set_pc_im(dest);
57fec1fe 3364 tcg_gen_exit_tb((long)tb + n);
6e256c93 3365 } else {
8984bd2e 3366 gen_set_pc_im(dest);
57fec1fe 3367 tcg_gen_exit_tb(0);
6e256c93 3368 }
c53be334
FB
3369}
3370
8aaca4c0
FB
3371static inline void gen_jmp (DisasContext *s, uint32_t dest)
3372{
551bd27f 3373 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3374 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3375 if (s->thumb)
d9ba4830
PB
3376 dest |= 1;
3377 gen_bx_im(s, dest);
8aaca4c0 3378 } else {
6e256c93 3379 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3380 s->is_jmp = DISAS_TB_JUMP;
3381 }
3382}
3383
d9ba4830 3384static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3385{
ee097184 3386 if (x)
d9ba4830 3387 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3388 else
d9ba4830 3389 gen_sxth(t0);
ee097184 3390 if (y)
d9ba4830 3391 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3392 else
d9ba4830
PB
3393 gen_sxth(t1);
3394 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3395}
3396
3397/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 3398static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3399 uint32_t mask;
3400
3401 mask = 0;
3402 if (flags & (1 << 0))
3403 mask |= 0xff;
3404 if (flags & (1 << 1))
3405 mask |= 0xff00;
3406 if (flags & (1 << 2))
3407 mask |= 0xff0000;
3408 if (flags & (1 << 3))
3409 mask |= 0xff000000;
9ee6e8bb 3410
2ae23e75 3411 /* Mask out undefined bits. */
9ee6e8bb
PB
3412 mask &= ~CPSR_RESERVED;
3413 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3414 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3415 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3416 mask &= ~CPSR_IT;
9ee6e8bb 3417 /* Mask out execution state bits. */
2ae23e75 3418 if (!spsr)
e160c51c 3419 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3420 /* Mask out privileged bits. */
3421 if (IS_USER(s))
9ee6e8bb 3422 mask &= CPSR_USER;
b5ff1b31
FB
3423 return mask;
3424}
3425
2fbac54b
FN
3426/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3427static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
b5ff1b31 3428{
d9ba4830 3429 TCGv tmp;
b5ff1b31
FB
3430 if (spsr) {
3431 /* ??? This is also undefined in system mode. */
3432 if (IS_USER(s))
3433 return 1;
d9ba4830
PB
3434
3435 tmp = load_cpu_field(spsr);
3436 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3437 tcg_gen_andi_i32(t0, t0, mask);
3438 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3439 store_cpu_field(tmp, spsr);
b5ff1b31 3440 } else {
2fbac54b 3441 gen_set_cpsr(t0, mask);
b5ff1b31 3442 }
2fbac54b 3443 dead_tmp(t0);
b5ff1b31
FB
3444 gen_lookup_tb(s);
3445 return 0;
3446}
3447
2fbac54b
FN
3448/* Returns nonzero if access to the PSR is not permitted. */
3449static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3450{
3451 TCGv tmp;
3452 tmp = new_tmp();
3453 tcg_gen_movi_i32(tmp, val);
3454 return gen_set_psr(s, mask, spsr, tmp);
3455}
3456
e9bb4aa9
JR
3457/* Generate an old-style exception return. Marks pc as dead. */
3458static void gen_exception_return(DisasContext *s, TCGv pc)
b5ff1b31 3459{
d9ba4830 3460 TCGv tmp;
e9bb4aa9 3461 store_reg(s, 15, pc);
d9ba4830
PB
3462 tmp = load_cpu_field(spsr);
3463 gen_set_cpsr(tmp, 0xffffffff);
3464 dead_tmp(tmp);
b5ff1b31
FB
3465 s->is_jmp = DISAS_UPDATE;
3466}
3467
b0109805
PB
3468/* Generate a v6 exception return. Marks both values as dead. */
3469static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3470{
b0109805
PB
3471 gen_set_cpsr(cpsr, 0xffffffff);
3472 dead_tmp(cpsr);
3473 store_reg(s, 15, pc);
9ee6e8bb
PB
3474 s->is_jmp = DISAS_UPDATE;
3475}
3b46e624 3476
9ee6e8bb
PB
3477static inline void
3478gen_set_condexec (DisasContext *s)
3479{
3480 if (s->condexec_mask) {
8f01245e
PB
3481 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3482 TCGv tmp = new_tmp();
3483 tcg_gen_movi_i32(tmp, val);
d9ba4830 3484 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3485 }
3486}
3b46e624 3487
9ee6e8bb
PB
3488static void gen_nop_hint(DisasContext *s, int val)
3489{
3490 switch (val) {
3491 case 3: /* wfi */
8984bd2e 3492 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3493 s->is_jmp = DISAS_WFI;
3494 break;
3495 case 2: /* wfe */
3496 case 4: /* sev */
3497 /* TODO: Implement SEV and WFE. May help SMP performance. */
3498 default: /* nop */
3499 break;
3500 }
3501}
99c475ab 3502
ad69471c 3503#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3504
dd8fbd78 3505static inline int gen_neon_add(int size, TCGv t0, TCGv t1)
9ee6e8bb
PB
3506{
3507 switch (size) {
dd8fbd78
FN
3508 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3509 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3510 case 2: tcg_gen_add_i32(t0, t0, t1); break;
9ee6e8bb
PB
3511 default: return 1;
3512 }
3513 return 0;
3514}
3515
dd8fbd78 3516static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
ad69471c
PB
3517{
3518 switch (size) {
dd8fbd78
FN
3519 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3520 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3521 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3522 default: return;
3523 }
3524}
3525
3526/* 32-bit pairwise ops end up the same as the elementwise versions. */
3527#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3528#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3529#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3530#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3531
3532/* FIXME: This is wrong. They set the wrong overflow bit. */
3533#define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3534#define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3535#define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3536#define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3537
3538#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3539 switch ((size << 1) | u) { \
3540 case 0: \
dd8fbd78 3541 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3542 break; \
3543 case 1: \
dd8fbd78 3544 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3545 break; \
3546 case 2: \
dd8fbd78 3547 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3548 break; \
3549 case 3: \
dd8fbd78 3550 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3551 break; \
3552 case 4: \
dd8fbd78 3553 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3554 break; \
3555 case 5: \
dd8fbd78 3556 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3557 break; \
3558 default: return 1; \
3559 }} while (0)
9ee6e8bb
PB
3560
3561#define GEN_NEON_INTEGER_OP(name) do { \
3562 switch ((size << 1) | u) { \
ad69471c 3563 case 0: \
dd8fbd78 3564 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3565 break; \
3566 case 1: \
dd8fbd78 3567 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3568 break; \
3569 case 2: \
dd8fbd78 3570 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3571 break; \
3572 case 3: \
dd8fbd78 3573 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3574 break; \
3575 case 4: \
dd8fbd78 3576 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3577 break; \
3578 case 5: \
dd8fbd78 3579 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3580 break; \
9ee6e8bb
PB
3581 default: return 1; \
3582 }} while (0)
3583
dd8fbd78 3584static TCGv neon_load_scratch(int scratch)
9ee6e8bb 3585{
dd8fbd78
FN
3586 TCGv tmp = new_tmp();
3587 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3588 return tmp;
9ee6e8bb
PB
3589}
3590
dd8fbd78 3591static void neon_store_scratch(int scratch, TCGv var)
9ee6e8bb 3592{
dd8fbd78
FN
3593 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3594 dead_tmp(var);
9ee6e8bb
PB
3595}
3596
dd8fbd78 3597static inline TCGv neon_get_scalar(int size, int reg)
9ee6e8bb 3598{
dd8fbd78 3599 TCGv tmp;
9ee6e8bb 3600 if (size == 1) {
dd8fbd78 3601 tmp = neon_load_reg(reg >> 1, reg & 1);
9ee6e8bb 3602 } else {
dd8fbd78
FN
3603 tmp = neon_load_reg(reg >> 2, (reg >> 1) & 1);
3604 if (reg & 1) {
3605 gen_neon_dup_low16(tmp);
3606 } else {
3607 gen_neon_dup_high16(tmp);
3608 }
9ee6e8bb 3609 }
dd8fbd78 3610 return tmp;
9ee6e8bb
PB
3611}
3612
19457615
FN
3613static void gen_neon_unzip_u8(TCGv t0, TCGv t1)
3614{
3615 TCGv rd, rm, tmp;
3616
3617 rd = new_tmp();
3618 rm = new_tmp();
3619 tmp = new_tmp();
3620
3621 tcg_gen_andi_i32(rd, t0, 0xff);
3622 tcg_gen_shri_i32(tmp, t0, 8);
3623 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3624 tcg_gen_or_i32(rd, rd, tmp);
3625 tcg_gen_shli_i32(tmp, t1, 16);
3626 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3627 tcg_gen_or_i32(rd, rd, tmp);
3628 tcg_gen_shli_i32(tmp, t1, 8);
3629 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3630 tcg_gen_or_i32(rd, rd, tmp);
3631
3632 tcg_gen_shri_i32(rm, t0, 8);
3633 tcg_gen_andi_i32(rm, rm, 0xff);
3634 tcg_gen_shri_i32(tmp, t0, 16);
3635 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3636 tcg_gen_or_i32(rm, rm, tmp);
3637 tcg_gen_shli_i32(tmp, t1, 8);
3638 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3639 tcg_gen_or_i32(rm, rm, tmp);
3640 tcg_gen_andi_i32(tmp, t1, 0xff000000);
3641 tcg_gen_or_i32(t1, rm, tmp);
3642 tcg_gen_mov_i32(t0, rd);
3643
3644 dead_tmp(tmp);
3645 dead_tmp(rm);
3646 dead_tmp(rd);
3647}
3648
3649static void gen_neon_zip_u8(TCGv t0, TCGv t1)
3650{
3651 TCGv rd, rm, tmp;
3652
3653 rd = new_tmp();
3654 rm = new_tmp();
3655 tmp = new_tmp();
3656
3657 tcg_gen_andi_i32(rd, t0, 0xff);
3658 tcg_gen_shli_i32(tmp, t1, 8);
3659 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3660 tcg_gen_or_i32(rd, rd, tmp);
3661 tcg_gen_shli_i32(tmp, t0, 16);
3662 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3663 tcg_gen_or_i32(rd, rd, tmp);
3664 tcg_gen_shli_i32(tmp, t1, 24);
3665 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3666 tcg_gen_or_i32(rd, rd, tmp);
3667
3668 tcg_gen_andi_i32(rm, t1, 0xff000000);
3669 tcg_gen_shri_i32(tmp, t0, 8);
3670 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3671 tcg_gen_or_i32(rm, rm, tmp);
3672 tcg_gen_shri_i32(tmp, t1, 8);
3673 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3674 tcg_gen_or_i32(rm, rm, tmp);
3675 tcg_gen_shri_i32(tmp, t0, 16);
3676 tcg_gen_andi_i32(tmp, tmp, 0xff);
3677 tcg_gen_or_i32(t1, rm, tmp);
3678 tcg_gen_mov_i32(t0, rd);
3679
3680 dead_tmp(tmp);
3681 dead_tmp(rm);
3682 dead_tmp(rd);
3683}
3684
3685static void gen_neon_zip_u16(TCGv t0, TCGv t1)
3686{
3687 TCGv tmp, tmp2;
3688
3689 tmp = new_tmp();
3690 tmp2 = new_tmp();
3691
3692 tcg_gen_andi_i32(tmp, t0, 0xffff);
3693 tcg_gen_shli_i32(tmp2, t1, 16);
3694 tcg_gen_or_i32(tmp, tmp, tmp2);
3695 tcg_gen_andi_i32(t1, t1, 0xffff0000);
3696 tcg_gen_shri_i32(tmp2, t0, 16);
3697 tcg_gen_or_i32(t1, t1, tmp2);
3698 tcg_gen_mov_i32(t0, tmp);
3699
3700 dead_tmp(tmp2);
3701 dead_tmp(tmp);
3702}
3703
9ee6e8bb
PB
3704static void gen_neon_unzip(int reg, int q, int tmp, int size)
3705{
3706 int n;
dd8fbd78 3707 TCGv t0, t1;
9ee6e8bb
PB
3708
3709 for (n = 0; n < q + 1; n += 2) {
dd8fbd78
FN
3710 t0 = neon_load_reg(reg, n);
3711 t1 = neon_load_reg(reg, n + 1);
9ee6e8bb 3712 switch (size) {
dd8fbd78
FN
3713 case 0: gen_neon_unzip_u8(t0, t1); break;
3714 case 1: gen_neon_zip_u16(t0, t1); break; /* zip and unzip are the same. */
9ee6e8bb
PB
3715 case 2: /* no-op */; break;
3716 default: abort();
3717 }
dd8fbd78
FN
3718 neon_store_scratch(tmp + n, t0);
3719 neon_store_scratch(tmp + n + 1, t1);
9ee6e8bb
PB
3720 }
3721}
3722
19457615
FN
3723static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3724{
3725 TCGv rd, tmp;
3726
3727 rd = new_tmp();
3728 tmp = new_tmp();
3729
3730 tcg_gen_shli_i32(rd, t0, 8);
3731 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3732 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3733 tcg_gen_or_i32(rd, rd, tmp);
3734
3735 tcg_gen_shri_i32(t1, t1, 8);
3736 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3737 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3738 tcg_gen_or_i32(t1, t1, tmp);
3739 tcg_gen_mov_i32(t0, rd);
3740
3741 dead_tmp(tmp);
3742 dead_tmp(rd);
3743}
3744
3745static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3746{
3747 TCGv rd, tmp;
3748
3749 rd = new_tmp();
3750 tmp = new_tmp();
3751
3752 tcg_gen_shli_i32(rd, t0, 16);
3753 tcg_gen_andi_i32(tmp, t1, 0xffff);
3754 tcg_gen_or_i32(rd, rd, tmp);
3755 tcg_gen_shri_i32(t1, t1, 16);
3756 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3757 tcg_gen_or_i32(t1, t1, tmp);
3758 tcg_gen_mov_i32(t0, rd);
3759
3760 dead_tmp(tmp);
3761 dead_tmp(rd);
3762}
3763
3764
9ee6e8bb
PB
3765static struct {
3766 int nregs;
3767 int interleave;
3768 int spacing;
3769} neon_ls_element_type[11] = {
3770 {4, 4, 1},
3771 {4, 4, 2},
3772 {4, 1, 1},
3773 {4, 2, 1},
3774 {3, 3, 1},
3775 {3, 3, 2},
3776 {3, 1, 1},
3777 {1, 1, 1},
3778 {2, 2, 1},
3779 {2, 2, 2},
3780 {2, 1, 1}
3781};
3782
3783/* Translate a NEON load/store element instruction. Return nonzero if the
3784 instruction is invalid. */
3785static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3786{
3787 int rd, rn, rm;
3788 int op;
3789 int nregs;
3790 int interleave;
84496233 3791 int spacing;
9ee6e8bb
PB
3792 int stride;
3793 int size;
3794 int reg;
3795 int pass;
3796 int load;
3797 int shift;
9ee6e8bb 3798 int n;
1b2b1e54 3799 TCGv addr;
b0109805 3800 TCGv tmp;
8f8e3aa4 3801 TCGv tmp2;
84496233 3802 TCGv_i64 tmp64;
9ee6e8bb 3803
5df8bac1 3804 if (!s->vfp_enabled)
9ee6e8bb
PB
3805 return 1;
3806 VFP_DREG_D(rd, insn);
3807 rn = (insn >> 16) & 0xf;
3808 rm = insn & 0xf;
3809 load = (insn & (1 << 21)) != 0;
1b2b1e54 3810 addr = new_tmp();
9ee6e8bb
PB
3811 if ((insn & (1 << 23)) == 0) {
3812 /* Load store all elements. */
3813 op = (insn >> 8) & 0xf;
3814 size = (insn >> 6) & 3;
84496233 3815 if (op > 10)
9ee6e8bb
PB
3816 return 1;
3817 nregs = neon_ls_element_type[op].nregs;
3818 interleave = neon_ls_element_type[op].interleave;
84496233
JR
3819 spacing = neon_ls_element_type[op].spacing;
3820 if (size == 3 && (interleave | spacing) != 1)
3821 return 1;
dcc65026 3822 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3823 stride = (1 << size) * interleave;
3824 for (reg = 0; reg < nregs; reg++) {
3825 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
3826 load_reg_var(s, addr, rn);
3827 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 3828 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
3829 load_reg_var(s, addr, rn);
3830 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 3831 }
84496233
JR
3832 if (size == 3) {
3833 if (load) {
3834 tmp64 = gen_ld64(addr, IS_USER(s));
3835 neon_store_reg64(tmp64, rd);
3836 tcg_temp_free_i64(tmp64);
3837 } else {
3838 tmp64 = tcg_temp_new_i64();
3839 neon_load_reg64(tmp64, rd);
3840 gen_st64(tmp64, addr, IS_USER(s));
3841 }
3842 tcg_gen_addi_i32(addr, addr, stride);
3843 } else {
3844 for (pass = 0; pass < 2; pass++) {
3845 if (size == 2) {
3846 if (load) {
3847 tmp = gen_ld32(addr, IS_USER(s));
3848 neon_store_reg(rd, pass, tmp);
3849 } else {
3850 tmp = neon_load_reg(rd, pass);
3851 gen_st32(tmp, addr, IS_USER(s));
3852 }
1b2b1e54 3853 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
3854 } else if (size == 1) {
3855 if (load) {
3856 tmp = gen_ld16u(addr, IS_USER(s));
3857 tcg_gen_addi_i32(addr, addr, stride);
3858 tmp2 = gen_ld16u(addr, IS_USER(s));
3859 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
3860 tcg_gen_shli_i32(tmp2, tmp2, 16);
3861 tcg_gen_or_i32(tmp, tmp, tmp2);
84496233
JR
3862 dead_tmp(tmp2);
3863 neon_store_reg(rd, pass, tmp);
3864 } else {
3865 tmp = neon_load_reg(rd, pass);
3866 tmp2 = new_tmp();
3867 tcg_gen_shri_i32(tmp2, tmp, 16);
3868 gen_st16(tmp, addr, IS_USER(s));
3869 tcg_gen_addi_i32(addr, addr, stride);
3870 gen_st16(tmp2, addr, IS_USER(s));
1b2b1e54 3871 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3872 }
84496233
JR
3873 } else /* size == 0 */ {
3874 if (load) {
3875 TCGV_UNUSED(tmp2);
3876 for (n = 0; n < 4; n++) {
3877 tmp = gen_ld8u(addr, IS_USER(s));
3878 tcg_gen_addi_i32(addr, addr, stride);
3879 if (n == 0) {
3880 tmp2 = tmp;
3881 } else {
41ba8341
PB
3882 tcg_gen_shli_i32(tmp, tmp, n * 8);
3883 tcg_gen_or_i32(tmp2, tmp2, tmp);
84496233
JR
3884 dead_tmp(tmp);
3885 }
9ee6e8bb 3886 }
84496233
JR
3887 neon_store_reg(rd, pass, tmp2);
3888 } else {
3889 tmp2 = neon_load_reg(rd, pass);
3890 for (n = 0; n < 4; n++) {
3891 tmp = new_tmp();
3892 if (n == 0) {
3893 tcg_gen_mov_i32(tmp, tmp2);
3894 } else {
3895 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3896 }
3897 gen_st8(tmp, addr, IS_USER(s));
3898 tcg_gen_addi_i32(addr, addr, stride);
3899 }
3900 dead_tmp(tmp2);
9ee6e8bb
PB
3901 }
3902 }
3903 }
3904 }
84496233 3905 rd += spacing;
9ee6e8bb
PB
3906 }
3907 stride = nregs * 8;
3908 } else {
3909 size = (insn >> 10) & 3;
3910 if (size == 3) {
3911 /* Load single element to all lanes. */
3912 if (!load)
3913 return 1;
3914 size = (insn >> 6) & 3;
3915 nregs = ((insn >> 8) & 3) + 1;
3916 stride = (insn & (1 << 5)) ? 2 : 1;
dcc65026 3917 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3918 for (reg = 0; reg < nregs; reg++) {
3919 switch (size) {
3920 case 0:
1b2b1e54 3921 tmp = gen_ld8u(addr, IS_USER(s));
ad69471c 3922 gen_neon_dup_u8(tmp, 0);
9ee6e8bb
PB
3923 break;
3924 case 1:
1b2b1e54 3925 tmp = gen_ld16u(addr, IS_USER(s));
ad69471c 3926 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
3927 break;
3928 case 2:
1b2b1e54 3929 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
3930 break;
3931 case 3:
3932 return 1;
a50f5b91
PB
3933 default: /* Avoid compiler warnings. */
3934 abort();
99c475ab 3935 }
1b2b1e54 3936 tcg_gen_addi_i32(addr, addr, 1 << size);
ad69471c
PB
3937 tmp2 = new_tmp();
3938 tcg_gen_mov_i32(tmp2, tmp);
3939 neon_store_reg(rd, 0, tmp2);
3018f259 3940 neon_store_reg(rd, 1, tmp);
9ee6e8bb
PB
3941 rd += stride;
3942 }
3943 stride = (1 << size) * nregs;
3944 } else {
3945 /* Single element. */
3946 pass = (insn >> 7) & 1;
3947 switch (size) {
3948 case 0:
3949 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
3950 stride = 1;
3951 break;
3952 case 1:
3953 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
3954 stride = (insn & (1 << 5)) ? 2 : 1;
3955 break;
3956 case 2:
3957 shift = 0;
9ee6e8bb
PB
3958 stride = (insn & (1 << 6)) ? 2 : 1;
3959 break;
3960 default:
3961 abort();
3962 }
3963 nregs = ((insn >> 8) & 3) + 1;
dcc65026 3964 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3965 for (reg = 0; reg < nregs; reg++) {
3966 if (load) {
9ee6e8bb
PB
3967 switch (size) {
3968 case 0:
1b2b1e54 3969 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb
PB
3970 break;
3971 case 1:
1b2b1e54 3972 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
3973 break;
3974 case 2:
1b2b1e54 3975 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 3976 break;
a50f5b91
PB
3977 default: /* Avoid compiler warnings. */
3978 abort();
9ee6e8bb
PB
3979 }
3980 if (size != 2) {
8f8e3aa4
PB
3981 tmp2 = neon_load_reg(rd, pass);
3982 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3983 dead_tmp(tmp2);
9ee6e8bb 3984 }
8f8e3aa4 3985 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3986 } else { /* Store */
8f8e3aa4
PB
3987 tmp = neon_load_reg(rd, pass);
3988 if (shift)
3989 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
3990 switch (size) {
3991 case 0:
1b2b1e54 3992 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
3993 break;
3994 case 1:
1b2b1e54 3995 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
3996 break;
3997 case 2:
1b2b1e54 3998 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 3999 break;
99c475ab 4000 }
99c475ab 4001 }
9ee6e8bb 4002 rd += stride;
1b2b1e54 4003 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4004 }
9ee6e8bb 4005 stride = nregs * (1 << size);
99c475ab 4006 }
9ee6e8bb 4007 }
1b2b1e54 4008 dead_tmp(addr);
9ee6e8bb 4009 if (rm != 15) {
b26eefb6
PB
4010 TCGv base;
4011
4012 base = load_reg(s, rn);
9ee6e8bb 4013 if (rm == 13) {
b26eefb6 4014 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4015 } else {
b26eefb6
PB
4016 TCGv index;
4017 index = load_reg(s, rm);
4018 tcg_gen_add_i32(base, base, index);
4019 dead_tmp(index);
9ee6e8bb 4020 }
b26eefb6 4021 store_reg(s, rn, base);
9ee6e8bb
PB
4022 }
4023 return 0;
4024}
3b46e624 4025
8f8e3aa4
PB
4026/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4027static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4028{
4029 tcg_gen_and_i32(t, t, c);
f669df27 4030 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4031 tcg_gen_or_i32(dest, t, f);
4032}
4033
a7812ae4 4034static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4035{
4036 switch (size) {
4037 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4038 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4039 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4040 default: abort();
4041 }
4042}
4043
a7812ae4 4044static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4045{
4046 switch (size) {
4047 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4048 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4049 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
4050 default: abort();
4051 }
4052}
4053
a7812ae4 4054static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4055{
4056 switch (size) {
4057 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4058 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4059 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4060 default: abort();
4061 }
4062}
4063
4064static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4065 int q, int u)
4066{
4067 if (q) {
4068 if (u) {
4069 switch (size) {
4070 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4071 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4072 default: abort();
4073 }
4074 } else {
4075 switch (size) {
4076 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4077 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4078 default: abort();
4079 }
4080 }
4081 } else {
4082 if (u) {
4083 switch (size) {
4084 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4085 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4086 default: abort();
4087 }
4088 } else {
4089 switch (size) {
4090 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4091 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4092 default: abort();
4093 }
4094 }
4095 }
4096}
4097
a7812ae4 4098static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
4099{
4100 if (u) {
4101 switch (size) {
4102 case 0: gen_helper_neon_widen_u8(dest, src); break;
4103 case 1: gen_helper_neon_widen_u16(dest, src); break;
4104 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4105 default: abort();
4106 }
4107 } else {
4108 switch (size) {
4109 case 0: gen_helper_neon_widen_s8(dest, src); break;
4110 case 1: gen_helper_neon_widen_s16(dest, src); break;
4111 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4112 default: abort();
4113 }
4114 }
4115 dead_tmp(src);
4116}
4117
4118static inline void gen_neon_addl(int size)
4119{
4120 switch (size) {
4121 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4122 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4123 case 2: tcg_gen_add_i64(CPU_V001); break;
4124 default: abort();
4125 }
4126}
4127
4128static inline void gen_neon_subl(int size)
4129{
4130 switch (size) {
4131 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4132 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4133 case 2: tcg_gen_sub_i64(CPU_V001); break;
4134 default: abort();
4135 }
4136}
4137
a7812ae4 4138static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4139{
4140 switch (size) {
4141 case 0: gen_helper_neon_negl_u16(var, var); break;
4142 case 1: gen_helper_neon_negl_u32(var, var); break;
4143 case 2: gen_helper_neon_negl_u64(var, var); break;
4144 default: abort();
4145 }
4146}
4147
a7812ae4 4148static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4149{
4150 switch (size) {
4151 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4152 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4153 default: abort();
4154 }
4155}
4156
a7812ae4 4157static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4158{
a7812ae4 4159 TCGv_i64 tmp;
ad69471c
PB
4160
4161 switch ((size << 1) | u) {
4162 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4163 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4164 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4165 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4166 case 4:
4167 tmp = gen_muls_i64_i32(a, b);
4168 tcg_gen_mov_i64(dest, tmp);
4169 break;
4170 case 5:
4171 tmp = gen_mulu_i64_i32(a, b);
4172 tcg_gen_mov_i64(dest, tmp);
4173 break;
4174 default: abort();
4175 }
ad69471c
PB
4176}
4177
9ee6e8bb
PB
4178/* Translate a NEON data processing instruction. Return nonzero if the
4179 instruction is invalid.
ad69471c
PB
4180 We process data in a mixture of 32-bit and 64-bit chunks.
4181 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4182
9ee6e8bb
PB
4183static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4184{
4185 int op;
4186 int q;
4187 int rd, rn, rm;
4188 int size;
4189 int shift;
4190 int pass;
4191 int count;
4192 int pairwise;
4193 int u;
4194 int n;
ca9a32e4 4195 uint32_t imm, mask;
b75263d6 4196 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4197 TCGv_i64 tmp64;
9ee6e8bb 4198
5df8bac1 4199 if (!s->vfp_enabled)
9ee6e8bb
PB
4200 return 1;
4201 q = (insn & (1 << 6)) != 0;
4202 u = (insn >> 24) & 1;
4203 VFP_DREG_D(rd, insn);
4204 VFP_DREG_N(rn, insn);
4205 VFP_DREG_M(rm, insn);
4206 size = (insn >> 20) & 3;
4207 if ((insn & (1 << 23)) == 0) {
4208 /* Three register same length. */
4209 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
ad69471c
PB
4210 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4211 || op == 10 || op == 11 || op == 16)) {
4212 /* 64-bit element instructions. */
9ee6e8bb 4213 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4214 neon_load_reg64(cpu_V0, rn + pass);
4215 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb
PB
4216 switch (op) {
4217 case 1: /* VQADD */
4218 if (u) {
ad69471c 4219 gen_helper_neon_add_saturate_u64(CPU_V001);
2c0262af 4220 } else {
ad69471c 4221 gen_helper_neon_add_saturate_s64(CPU_V001);
2c0262af 4222 }
9ee6e8bb
PB
4223 break;
4224 case 5: /* VQSUB */
4225 if (u) {
ad69471c
PB
4226 gen_helper_neon_sub_saturate_u64(CPU_V001);
4227 } else {
4228 gen_helper_neon_sub_saturate_s64(CPU_V001);
4229 }
4230 break;
4231 case 8: /* VSHL */
4232 if (u) {
4233 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4234 } else {
4235 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4236 }
4237 break;
4238 case 9: /* VQSHL */
4239 if (u) {
4240 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
def126ce 4241 cpu_V1, cpu_V0);
ad69471c 4242 } else {
def126ce 4243 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
ad69471c
PB
4244 cpu_V1, cpu_V0);
4245 }
4246 break;
4247 case 10: /* VRSHL */
4248 if (u) {
4249 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4250 } else {
ad69471c
PB
4251 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4252 }
4253 break;
4254 case 11: /* VQRSHL */
4255 if (u) {
4256 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4257 cpu_V1, cpu_V0);
4258 } else {
4259 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4260 cpu_V1, cpu_V0);
1e8d4eec 4261 }
9ee6e8bb
PB
4262 break;
4263 case 16:
4264 if (u) {
ad69471c 4265 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4266 } else {
ad69471c 4267 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4268 }
4269 break;
4270 default:
4271 abort();
2c0262af 4272 }
ad69471c 4273 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4274 }
9ee6e8bb 4275 return 0;
2c0262af 4276 }
9ee6e8bb
PB
4277 switch (op) {
4278 case 8: /* VSHL */
4279 case 9: /* VQSHL */
4280 case 10: /* VRSHL */
ad69471c 4281 case 11: /* VQRSHL */
9ee6e8bb 4282 {
ad69471c
PB
4283 int rtmp;
4284 /* Shift instruction operands are reversed. */
4285 rtmp = rn;
9ee6e8bb 4286 rn = rm;
ad69471c 4287 rm = rtmp;
9ee6e8bb
PB
4288 pairwise = 0;
4289 }
2c0262af 4290 break;
9ee6e8bb
PB
4291 case 20: /* VPMAX */
4292 case 21: /* VPMIN */
4293 case 23: /* VPADD */
4294 pairwise = 1;
2c0262af 4295 break;
9ee6e8bb
PB
4296 case 26: /* VPADD (float) */
4297 pairwise = (u && size < 2);
2c0262af 4298 break;
9ee6e8bb
PB
4299 case 30: /* VPMIN/VPMAX (float) */
4300 pairwise = u;
2c0262af 4301 break;
9ee6e8bb
PB
4302 default:
4303 pairwise = 0;
2c0262af 4304 break;
9ee6e8bb 4305 }
dd8fbd78 4306
9ee6e8bb
PB
4307 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4308
4309 if (pairwise) {
4310 /* Pairwise. */
4311 if (q)
4312 n = (pass & 1) * 2;
2c0262af 4313 else
9ee6e8bb
PB
4314 n = 0;
4315 if (pass < q + 1) {
dd8fbd78
FN
4316 tmp = neon_load_reg(rn, n);
4317 tmp2 = neon_load_reg(rn, n + 1);
9ee6e8bb 4318 } else {
dd8fbd78
FN
4319 tmp = neon_load_reg(rm, n);
4320 tmp2 = neon_load_reg(rm, n + 1);
9ee6e8bb
PB
4321 }
4322 } else {
4323 /* Elementwise. */
dd8fbd78
FN
4324 tmp = neon_load_reg(rn, pass);
4325 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4326 }
4327 switch (op) {
4328 case 0: /* VHADD */
4329 GEN_NEON_INTEGER_OP(hadd);
4330 break;
4331 case 1: /* VQADD */
ad69471c 4332 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4333 break;
9ee6e8bb
PB
4334 case 2: /* VRHADD */
4335 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4336 break;
9ee6e8bb
PB
4337 case 3: /* Logic ops. */
4338 switch ((u << 2) | size) {
4339 case 0: /* VAND */
dd8fbd78 4340 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4341 break;
4342 case 1: /* BIC */
f669df27 4343 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4344 break;
4345 case 2: /* VORR */
dd8fbd78 4346 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4347 break;
4348 case 3: /* VORN */
f669df27 4349 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4350 break;
4351 case 4: /* VEOR */
dd8fbd78 4352 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4353 break;
4354 case 5: /* VBSL */
dd8fbd78
FN
4355 tmp3 = neon_load_reg(rd, pass);
4356 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4357 dead_tmp(tmp3);
9ee6e8bb
PB
4358 break;
4359 case 6: /* VBIT */
dd8fbd78
FN
4360 tmp3 = neon_load_reg(rd, pass);
4361 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4362 dead_tmp(tmp3);
9ee6e8bb
PB
4363 break;
4364 case 7: /* VBIF */
dd8fbd78
FN
4365 tmp3 = neon_load_reg(rd, pass);
4366 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4367 dead_tmp(tmp3);
9ee6e8bb 4368 break;
2c0262af
FB
4369 }
4370 break;
9ee6e8bb
PB
4371 case 4: /* VHSUB */
4372 GEN_NEON_INTEGER_OP(hsub);
4373 break;
4374 case 5: /* VQSUB */
ad69471c 4375 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4376 break;
9ee6e8bb
PB
4377 case 6: /* VCGT */
4378 GEN_NEON_INTEGER_OP(cgt);
4379 break;
4380 case 7: /* VCGE */
4381 GEN_NEON_INTEGER_OP(cge);
4382 break;
4383 case 8: /* VSHL */
ad69471c 4384 GEN_NEON_INTEGER_OP(shl);
2c0262af 4385 break;
9ee6e8bb 4386 case 9: /* VQSHL */
ad69471c 4387 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4388 break;
9ee6e8bb 4389 case 10: /* VRSHL */
ad69471c 4390 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4391 break;
9ee6e8bb 4392 case 11: /* VQRSHL */
ad69471c 4393 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb
PB
4394 break;
4395 case 12: /* VMAX */
4396 GEN_NEON_INTEGER_OP(max);
4397 break;
4398 case 13: /* VMIN */
4399 GEN_NEON_INTEGER_OP(min);
4400 break;
4401 case 14: /* VABD */
4402 GEN_NEON_INTEGER_OP(abd);
4403 break;
4404 case 15: /* VABA */
4405 GEN_NEON_INTEGER_OP(abd);
dd8fbd78
FN
4406 dead_tmp(tmp2);
4407 tmp2 = neon_load_reg(rd, pass);
4408 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4409 break;
4410 case 16:
4411 if (!u) { /* VADD */
dd8fbd78 4412 if (gen_neon_add(size, tmp, tmp2))
9ee6e8bb
PB
4413 return 1;
4414 } else { /* VSUB */
4415 switch (size) {
dd8fbd78
FN
4416 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4417 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4418 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4419 default: return 1;
4420 }
4421 }
4422 break;
4423 case 17:
4424 if (!u) { /* VTST */
4425 switch (size) {
dd8fbd78
FN
4426 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4427 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4428 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4429 default: return 1;
4430 }
4431 } else { /* VCEQ */
4432 switch (size) {
dd8fbd78
FN
4433 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4434 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4435 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4436 default: return 1;
4437 }
4438 }
4439 break;
4440 case 18: /* Multiply. */
4441 switch (size) {
dd8fbd78
FN
4442 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4443 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4444 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4445 default: return 1;
4446 }
dd8fbd78
FN
4447 dead_tmp(tmp2);
4448 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4449 if (u) { /* VMLS */
dd8fbd78 4450 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4451 } else { /* VMLA */
dd8fbd78 4452 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4453 }
4454 break;
4455 case 19: /* VMUL */
4456 if (u) { /* polynomial */
dd8fbd78 4457 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4458 } else { /* Integer */
4459 switch (size) {
dd8fbd78
FN
4460 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4461 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4462 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4463 default: return 1;
4464 }
4465 }
4466 break;
4467 case 20: /* VPMAX */
4468 GEN_NEON_INTEGER_OP(pmax);
4469 break;
4470 case 21: /* VPMIN */
4471 GEN_NEON_INTEGER_OP(pmin);
4472 break;
4473 case 22: /* Hultiply high. */
4474 if (!u) { /* VQDMULH */
4475 switch (size) {
dd8fbd78
FN
4476 case 1: gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4477 case 2: gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
9ee6e8bb
PB
4478 default: return 1;
4479 }
4480 } else { /* VQRDHMUL */
4481 switch (size) {
dd8fbd78
FN
4482 case 1: gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4483 case 2: gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
9ee6e8bb
PB
4484 default: return 1;
4485 }
4486 }
4487 break;
4488 case 23: /* VPADD */
4489 if (u)
4490 return 1;
4491 switch (size) {
dd8fbd78
FN
4492 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4493 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4494 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4495 default: return 1;
4496 }
4497 break;
4498 case 26: /* Floating point arithnetic. */
4499 switch ((u << 2) | size) {
4500 case 0: /* VADD */
dd8fbd78 4501 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4502 break;
4503 case 2: /* VSUB */
dd8fbd78 4504 gen_helper_neon_sub_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4505 break;
4506 case 4: /* VPADD */
dd8fbd78 4507 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4508 break;
4509 case 6: /* VABD */
dd8fbd78 4510 gen_helper_neon_abd_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4511 break;
4512 default:
4513 return 1;
4514 }
4515 break;
4516 case 27: /* Float multiply. */
dd8fbd78 4517 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb 4518 if (!u) {
dd8fbd78
FN
4519 dead_tmp(tmp2);
4520 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4521 if (size == 0) {
dd8fbd78 4522 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb 4523 } else {
dd8fbd78 4524 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
4525 }
4526 }
4527 break;
4528 case 28: /* Float compare. */
4529 if (!u) {
dd8fbd78 4530 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
b5ff1b31 4531 } else {
9ee6e8bb 4532 if (size == 0)
dd8fbd78 4533 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
9ee6e8bb 4534 else
dd8fbd78 4535 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
b5ff1b31 4536 }
2c0262af 4537 break;
9ee6e8bb
PB
4538 case 29: /* Float compare absolute. */
4539 if (!u)
4540 return 1;
4541 if (size == 0)
dd8fbd78 4542 gen_helper_neon_acge_f32(tmp, tmp, tmp2);
9ee6e8bb 4543 else
dd8fbd78 4544 gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
2c0262af 4545 break;
9ee6e8bb
PB
4546 case 30: /* Float min/max. */
4547 if (size == 0)
dd8fbd78 4548 gen_helper_neon_max_f32(tmp, tmp, tmp2);
9ee6e8bb 4549 else
dd8fbd78 4550 gen_helper_neon_min_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4551 break;
4552 case 31:
4553 if (size == 0)
dd8fbd78 4554 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 4555 else
dd8fbd78 4556 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 4557 break;
9ee6e8bb
PB
4558 default:
4559 abort();
2c0262af 4560 }
dd8fbd78
FN
4561 dead_tmp(tmp2);
4562
9ee6e8bb
PB
4563 /* Save the result. For elementwise operations we can put it
4564 straight into the destination register. For pairwise operations
4565 we have to be careful to avoid clobbering the source operands. */
4566 if (pairwise && rd == rm) {
dd8fbd78 4567 neon_store_scratch(pass, tmp);
9ee6e8bb 4568 } else {
dd8fbd78 4569 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4570 }
4571
4572 } /* for pass */
4573 if (pairwise && rd == rm) {
4574 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
4575 tmp = neon_load_scratch(pass);
4576 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4577 }
4578 }
ad69471c 4579 /* End of 3 register same size operations. */
9ee6e8bb
PB
4580 } else if (insn & (1 << 4)) {
4581 if ((insn & 0x00380080) != 0) {
4582 /* Two registers and shift. */
4583 op = (insn >> 8) & 0xf;
4584 if (insn & (1 << 7)) {
4585 /* 64-bit shift. */
4586 size = 3;
4587 } else {
4588 size = 2;
4589 while ((insn & (1 << (size + 19))) == 0)
4590 size--;
4591 }
4592 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4593 /* To avoid excessive dumplication of ops we implement shift
4594 by immediate using the variable shift operations. */
4595 if (op < 8) {
4596 /* Shift by immediate:
4597 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4598 /* Right shifts are encoded as N - shift, where N is the
4599 element size in bits. */
4600 if (op <= 4)
4601 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4602 if (size == 3) {
4603 count = q + 1;
4604 } else {
4605 count = q ? 4: 2;
4606 }
4607 switch (size) {
4608 case 0:
4609 imm = (uint8_t) shift;
4610 imm |= imm << 8;
4611 imm |= imm << 16;
4612 break;
4613 case 1:
4614 imm = (uint16_t) shift;
4615 imm |= imm << 16;
4616 break;
4617 case 2:
4618 case 3:
4619 imm = shift;
4620 break;
4621 default:
4622 abort();
4623 }
4624
4625 for (pass = 0; pass < count; pass++) {
ad69471c
PB
4626 if (size == 3) {
4627 neon_load_reg64(cpu_V0, rm + pass);
4628 tcg_gen_movi_i64(cpu_V1, imm);
4629 switch (op) {
4630 case 0: /* VSHR */
4631 case 1: /* VSRA */
4632 if (u)
4633 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4634 else
ad69471c 4635 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4636 break;
ad69471c
PB
4637 case 2: /* VRSHR */
4638 case 3: /* VRSRA */
4639 if (u)
4640 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4641 else
ad69471c 4642 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4643 break;
ad69471c
PB
4644 case 4: /* VSRI */
4645 if (!u)
4646 return 1;
4647 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4648 break;
4649 case 5: /* VSHL, VSLI */
4650 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4651 break;
0322b26e
PM
4652 case 6: /* VQSHLU */
4653 if (u) {
4654 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
4655 cpu_V0, cpu_V1);
4656 } else {
4657 return 1;
4658 }
ad69471c 4659 break;
0322b26e
PM
4660 case 7: /* VQSHL */
4661 if (u) {
4662 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4663 cpu_V0, cpu_V1);
4664 } else {
4665 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4666 cpu_V0, cpu_V1);
4667 }
9ee6e8bb 4668 break;
9ee6e8bb 4669 }
ad69471c
PB
4670 if (op == 1 || op == 3) {
4671 /* Accumulate. */
4672 neon_load_reg64(cpu_V0, rd + pass);
4673 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4674 } else if (op == 4 || (op == 5 && u)) {
4675 /* Insert */
4676 cpu_abort(env, "VS[LR]I.64 not implemented");
4677 }
4678 neon_store_reg64(cpu_V0, rd + pass);
4679 } else { /* size < 3 */
4680 /* Operands in T0 and T1. */
dd8fbd78
FN
4681 tmp = neon_load_reg(rm, pass);
4682 tmp2 = new_tmp();
4683 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
4684 switch (op) {
4685 case 0: /* VSHR */
4686 case 1: /* VSRA */
4687 GEN_NEON_INTEGER_OP(shl);
4688 break;
4689 case 2: /* VRSHR */
4690 case 3: /* VRSRA */
4691 GEN_NEON_INTEGER_OP(rshl);
4692 break;
4693 case 4: /* VSRI */
4694 if (!u)
4695 return 1;
4696 GEN_NEON_INTEGER_OP(shl);
4697 break;
4698 case 5: /* VSHL, VSLI */
4699 switch (size) {
dd8fbd78
FN
4700 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
4701 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
4702 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
ad69471c
PB
4703 default: return 1;
4704 }
4705 break;
0322b26e
PM
4706 case 6: /* VQSHLU */
4707 if (!u) {
4708 return 1;
4709 }
ad69471c 4710 switch (size) {
0322b26e
PM
4711 case 0:
4712 gen_helper_neon_qshlu_s8(tmp, cpu_env,
4713 tmp, tmp2);
4714 break;
4715 case 1:
4716 gen_helper_neon_qshlu_s16(tmp, cpu_env,
4717 tmp, tmp2);
4718 break;
4719 case 2:
4720 gen_helper_neon_qshlu_s32(tmp, cpu_env,
4721 tmp, tmp2);
4722 break;
4723 default:
4724 return 1;
ad69471c
PB
4725 }
4726 break;
0322b26e
PM
4727 case 7: /* VQSHL */
4728 GEN_NEON_INTEGER_OP_ENV(qshl);
4729 break;
ad69471c 4730 }
dd8fbd78 4731 dead_tmp(tmp2);
ad69471c
PB
4732
4733 if (op == 1 || op == 3) {
4734 /* Accumulate. */
dd8fbd78
FN
4735 tmp2 = neon_load_reg(rd, pass);
4736 gen_neon_add(size, tmp2, tmp);
4737 dead_tmp(tmp2);
ad69471c
PB
4738 } else if (op == 4 || (op == 5 && u)) {
4739 /* Insert */
4740 switch (size) {
4741 case 0:
4742 if (op == 4)
ca9a32e4 4743 mask = 0xff >> -shift;
ad69471c 4744 else
ca9a32e4
JR
4745 mask = (uint8_t)(0xff << shift);
4746 mask |= mask << 8;
4747 mask |= mask << 16;
ad69471c
PB
4748 break;
4749 case 1:
4750 if (op == 4)
ca9a32e4 4751 mask = 0xffff >> -shift;
ad69471c 4752 else
ca9a32e4
JR
4753 mask = (uint16_t)(0xffff << shift);
4754 mask |= mask << 16;
ad69471c
PB
4755 break;
4756 case 2:
ca9a32e4
JR
4757 if (shift < -31 || shift > 31) {
4758 mask = 0;
4759 } else {
4760 if (op == 4)
4761 mask = 0xffffffffu >> -shift;
4762 else
4763 mask = 0xffffffffu << shift;
4764 }
ad69471c
PB
4765 break;
4766 default:
4767 abort();
4768 }
dd8fbd78 4769 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
4770 tcg_gen_andi_i32(tmp, tmp, mask);
4771 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78
FN
4772 tcg_gen_or_i32(tmp, tmp, tmp2);
4773 dead_tmp(tmp2);
ad69471c 4774 }
dd8fbd78 4775 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4776 }
4777 } /* for pass */
4778 } else if (op < 10) {
ad69471c 4779 /* Shift by immediate and narrow:
9ee6e8bb
PB
4780 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4781 shift = shift - (1 << (size + 3));
4782 size++;
9ee6e8bb
PB
4783 switch (size) {
4784 case 1:
ad69471c 4785 imm = (uint16_t)shift;
9ee6e8bb 4786 imm |= imm << 16;
ad69471c 4787 tmp2 = tcg_const_i32(imm);
a7812ae4 4788 TCGV_UNUSED_I64(tmp64);
9ee6e8bb
PB
4789 break;
4790 case 2:
ad69471c
PB
4791 imm = (uint32_t)shift;
4792 tmp2 = tcg_const_i32(imm);
a7812ae4 4793 TCGV_UNUSED_I64(tmp64);
4cc633c3 4794 break;
9ee6e8bb 4795 case 3:
a7812ae4
PB
4796 tmp64 = tcg_const_i64(shift);
4797 TCGV_UNUSED(tmp2);
9ee6e8bb
PB
4798 break;
4799 default:
4800 abort();
4801 }
4802
ad69471c
PB
4803 for (pass = 0; pass < 2; pass++) {
4804 if (size == 3) {
4805 neon_load_reg64(cpu_V0, rm + pass);
4806 if (q) {
4807 if (u)
a7812ae4 4808 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4809 else
a7812ae4 4810 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c
PB
4811 } else {
4812 if (u)
a7812ae4 4813 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4814 else
a7812ae4 4815 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c 4816 }
2c0262af 4817 } else {
ad69471c
PB
4818 tmp = neon_load_reg(rm + pass, 0);
4819 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
36aa55dc
PB
4820 tmp3 = neon_load_reg(rm + pass, 1);
4821 gen_neon_shift_narrow(size, tmp3, tmp2, q, u);
4822 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
ad69471c 4823 dead_tmp(tmp);
36aa55dc 4824 dead_tmp(tmp3);
9ee6e8bb 4825 }
ad69471c
PB
4826 tmp = new_tmp();
4827 if (op == 8 && !u) {
4828 gen_neon_narrow(size - 1, tmp, cpu_V0);
9ee6e8bb 4829 } else {
ad69471c
PB
4830 if (op == 8)
4831 gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
9ee6e8bb 4832 else
ad69471c
PB
4833 gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
4834 }
2301db49 4835 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4836 } /* for pass */
b75263d6
JR
4837 if (size == 3) {
4838 tcg_temp_free_i64(tmp64);
2301db49
JR
4839 } else {
4840 dead_tmp(tmp2);
b75263d6 4841 }
9ee6e8bb
PB
4842 } else if (op == 10) {
4843 /* VSHLL */
ad69471c 4844 if (q || size == 3)
9ee6e8bb 4845 return 1;
ad69471c
PB
4846 tmp = neon_load_reg(rm, 0);
4847 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 4848 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4849 if (pass == 1)
4850 tmp = tmp2;
4851
4852 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 4853
9ee6e8bb
PB
4854 if (shift != 0) {
4855 /* The shift is less than the width of the source
ad69471c
PB
4856 type, so we can just shift the whole register. */
4857 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4858 if (size < 2 || !u) {
4859 uint64_t imm64;
4860 if (size == 0) {
4861 imm = (0xffu >> (8 - shift));
4862 imm |= imm << 16;
4863 } else {
4864 imm = 0xffff >> (16 - shift);
9ee6e8bb 4865 }
ad69471c
PB
4866 imm64 = imm | (((uint64_t)imm) << 32);
4867 tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
9ee6e8bb
PB
4868 }
4869 }
ad69471c 4870 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 4871 }
f73534a5 4872 } else if (op >= 14) {
9ee6e8bb 4873 /* VCVT fixed-point. */
f73534a5
PM
4874 /* We have already masked out the must-be-1 top bit of imm6,
4875 * hence this 32-shift where the ARM ARM has 64-imm6.
4876 */
4877 shift = 32 - shift;
9ee6e8bb 4878 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 4879 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 4880 if (!(op & 1)) {
9ee6e8bb 4881 if (u)
4373f3ce 4882 gen_vfp_ulto(0, shift);
9ee6e8bb 4883 else
4373f3ce 4884 gen_vfp_slto(0, shift);
9ee6e8bb
PB
4885 } else {
4886 if (u)
4373f3ce 4887 gen_vfp_toul(0, shift);
9ee6e8bb 4888 else
4373f3ce 4889 gen_vfp_tosl(0, shift);
2c0262af 4890 }
4373f3ce 4891 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
4892 }
4893 } else {
9ee6e8bb
PB
4894 return 1;
4895 }
4896 } else { /* (insn & 0x00380080) == 0 */
4897 int invert;
4898
4899 op = (insn >> 8) & 0xf;
4900 /* One register and immediate. */
4901 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4902 invert = (insn & (1 << 5)) != 0;
4903 switch (op) {
4904 case 0: case 1:
4905 /* no-op */
4906 break;
4907 case 2: case 3:
4908 imm <<= 8;
4909 break;
4910 case 4: case 5:
4911 imm <<= 16;
4912 break;
4913 case 6: case 7:
4914 imm <<= 24;
4915 break;
4916 case 8: case 9:
4917 imm |= imm << 16;
4918 break;
4919 case 10: case 11:
4920 imm = (imm << 8) | (imm << 24);
4921 break;
4922 case 12:
8e31209e 4923 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
4924 break;
4925 case 13:
4926 imm = (imm << 16) | 0xffff;
4927 break;
4928 case 14:
4929 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4930 if (invert)
4931 imm = ~imm;
4932 break;
4933 case 15:
4934 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4935 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4936 break;
4937 }
4938 if (invert)
4939 imm = ~imm;
4940
9ee6e8bb
PB
4941 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4942 if (op & 1 && op < 12) {
ad69471c 4943 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
4944 if (invert) {
4945 /* The immediate value has already been inverted, so
4946 BIC becomes AND. */
ad69471c 4947 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 4948 } else {
ad69471c 4949 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 4950 }
9ee6e8bb 4951 } else {
ad69471c
PB
4952 /* VMOV, VMVN. */
4953 tmp = new_tmp();
9ee6e8bb 4954 if (op == 14 && invert) {
ad69471c
PB
4955 uint32_t val;
4956 val = 0;
9ee6e8bb
PB
4957 for (n = 0; n < 4; n++) {
4958 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 4959 val |= 0xff << (n * 8);
9ee6e8bb 4960 }
ad69471c
PB
4961 tcg_gen_movi_i32(tmp, val);
4962 } else {
4963 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 4964 }
9ee6e8bb 4965 }
ad69471c 4966 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4967 }
4968 }
e4b3861d 4969 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
4970 if (size != 3) {
4971 op = (insn >> 8) & 0xf;
4972 if ((insn & (1 << 6)) == 0) {
4973 /* Three registers of different lengths. */
4974 int src1_wide;
4975 int src2_wide;
4976 int prewiden;
4977 /* prewiden, src1_wide, src2_wide */
4978 static const int neon_3reg_wide[16][3] = {
4979 {1, 0, 0}, /* VADDL */
4980 {1, 1, 0}, /* VADDW */
4981 {1, 0, 0}, /* VSUBL */
4982 {1, 1, 0}, /* VSUBW */
4983 {0, 1, 1}, /* VADDHN */
4984 {0, 0, 0}, /* VABAL */
4985 {0, 1, 1}, /* VSUBHN */
4986 {0, 0, 0}, /* VABDL */
4987 {0, 0, 0}, /* VMLAL */
4988 {0, 0, 0}, /* VQDMLAL */
4989 {0, 0, 0}, /* VMLSL */
4990 {0, 0, 0}, /* VQDMLSL */
4991 {0, 0, 0}, /* Integer VMULL */
4992 {0, 0, 0}, /* VQDMULL */
4993 {0, 0, 0} /* Polynomial VMULL */
4994 };
4995
4996 prewiden = neon_3reg_wide[op][0];
4997 src1_wide = neon_3reg_wide[op][1];
4998 src2_wide = neon_3reg_wide[op][2];
4999
ad69471c
PB
5000 if (size == 0 && (op == 9 || op == 11 || op == 13))
5001 return 1;
5002
9ee6e8bb
PB
5003 /* Avoid overlapping operands. Wide source operands are
5004 always aligned so will never overlap with wide
5005 destinations in problematic ways. */
8f8e3aa4 5006 if (rd == rm && !src2_wide) {
dd8fbd78
FN
5007 tmp = neon_load_reg(rm, 1);
5008 neon_store_scratch(2, tmp);
8f8e3aa4 5009 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
5010 tmp = neon_load_reg(rn, 1);
5011 neon_store_scratch(2, tmp);
9ee6e8bb 5012 }
a50f5b91 5013 TCGV_UNUSED(tmp3);
9ee6e8bb 5014 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5015 if (src1_wide) {
5016 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 5017 TCGV_UNUSED(tmp);
9ee6e8bb 5018 } else {
ad69471c 5019 if (pass == 1 && rd == rn) {
dd8fbd78 5020 tmp = neon_load_scratch(2);
9ee6e8bb 5021 } else {
ad69471c
PB
5022 tmp = neon_load_reg(rn, pass);
5023 }
5024 if (prewiden) {
5025 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5026 }
5027 }
ad69471c
PB
5028 if (src2_wide) {
5029 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 5030 TCGV_UNUSED(tmp2);
9ee6e8bb 5031 } else {
ad69471c 5032 if (pass == 1 && rd == rm) {
dd8fbd78 5033 tmp2 = neon_load_scratch(2);
9ee6e8bb 5034 } else {
ad69471c
PB
5035 tmp2 = neon_load_reg(rm, pass);
5036 }
5037 if (prewiden) {
5038 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5039 }
9ee6e8bb
PB
5040 }
5041 switch (op) {
5042 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5043 gen_neon_addl(size);
9ee6e8bb 5044 break;
79b0e534 5045 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5046 gen_neon_subl(size);
9ee6e8bb
PB
5047 break;
5048 case 5: case 7: /* VABAL, VABDL */
5049 switch ((size << 1) | u) {
ad69471c
PB
5050 case 0:
5051 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5052 break;
5053 case 1:
5054 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5055 break;
5056 case 2:
5057 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5058 break;
5059 case 3:
5060 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5061 break;
5062 case 4:
5063 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5064 break;
5065 case 5:
5066 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5067 break;
9ee6e8bb
PB
5068 default: abort();
5069 }
ad69471c
PB
5070 dead_tmp(tmp2);
5071 dead_tmp(tmp);
9ee6e8bb
PB
5072 break;
5073 case 8: case 9: case 10: case 11: case 12: case 13:
5074 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5075 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
dd8fbd78
FN
5076 dead_tmp(tmp2);
5077 dead_tmp(tmp);
9ee6e8bb
PB
5078 break;
5079 case 14: /* Polynomial VMULL */
5080 cpu_abort(env, "Polynomial VMULL not implemented");
5081
5082 default: /* 15 is RESERVED. */
5083 return 1;
5084 }
5085 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
5086 /* Accumulate. */
5087 if (op == 10 || op == 11) {
ad69471c 5088 gen_neon_negl(cpu_V0, size);
9ee6e8bb
PB
5089 }
5090
9ee6e8bb 5091 if (op != 13) {
ad69471c 5092 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb
PB
5093 }
5094
5095 switch (op) {
5096 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
ad69471c 5097 gen_neon_addl(size);
9ee6e8bb
PB
5098 break;
5099 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c
PB
5100 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5101 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5102 break;
9ee6e8bb
PB
5103 /* Fall through. */
5104 case 13: /* VQDMULL */
ad69471c 5105 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5106 break;
5107 default:
5108 abort();
5109 }
ad69471c 5110 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5111 } else if (op == 4 || op == 6) {
5112 /* Narrowing operation. */
ad69471c 5113 tmp = new_tmp();
79b0e534 5114 if (!u) {
9ee6e8bb 5115 switch (size) {
ad69471c
PB
5116 case 0:
5117 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5118 break;
5119 case 1:
5120 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5121 break;
5122 case 2:
5123 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5124 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5125 break;
9ee6e8bb
PB
5126 default: abort();
5127 }
5128 } else {
5129 switch (size) {
ad69471c
PB
5130 case 0:
5131 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5132 break;
5133 case 1:
5134 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5135 break;
5136 case 2:
5137 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5138 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5139 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5140 break;
9ee6e8bb
PB
5141 default: abort();
5142 }
5143 }
ad69471c
PB
5144 if (pass == 0) {
5145 tmp3 = tmp;
5146 } else {
5147 neon_store_reg(rd, 0, tmp3);
5148 neon_store_reg(rd, 1, tmp);
5149 }
9ee6e8bb
PB
5150 } else {
5151 /* Write back the result. */
ad69471c 5152 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5153 }
5154 }
5155 } else {
5156 /* Two registers and a scalar. */
5157 switch (op) {
5158 case 0: /* Integer VMLA scalar */
5159 case 1: /* Float VMLA scalar */
5160 case 4: /* Integer VMLS scalar */
5161 case 5: /* Floating point VMLS scalar */
5162 case 8: /* Integer VMUL scalar */
5163 case 9: /* Floating point VMUL scalar */
5164 case 12: /* VQDMULH scalar */
5165 case 13: /* VQRDMULH scalar */
dd8fbd78
FN
5166 tmp = neon_get_scalar(size, rm);
5167 neon_store_scratch(0, tmp);
9ee6e8bb 5168 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5169 tmp = neon_load_scratch(0);
5170 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5171 if (op == 12) {
5172 if (size == 1) {
dd8fbd78 5173 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5174 } else {
dd8fbd78 5175 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5176 }
5177 } else if (op == 13) {
5178 if (size == 1) {
dd8fbd78 5179 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5180 } else {
dd8fbd78 5181 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5182 }
5183 } else if (op & 1) {
dd8fbd78 5184 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5185 } else {
5186 switch (size) {
dd8fbd78
FN
5187 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5188 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5189 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5190 default: return 1;
5191 }
5192 }
dd8fbd78 5193 dead_tmp(tmp2);
9ee6e8bb
PB
5194 if (op < 8) {
5195 /* Accumulate. */
dd8fbd78 5196 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5197 switch (op) {
5198 case 0:
dd8fbd78 5199 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5200 break;
5201 case 1:
dd8fbd78 5202 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5203 break;
5204 case 4:
dd8fbd78 5205 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5206 break;
5207 case 5:
dd8fbd78 5208 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
5209 break;
5210 default:
5211 abort();
5212 }
dd8fbd78 5213 dead_tmp(tmp2);
9ee6e8bb 5214 }
dd8fbd78 5215 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5216 }
5217 break;
5218 case 2: /* VMLAL sclar */
5219 case 3: /* VQDMLAL scalar */
5220 case 6: /* VMLSL scalar */
5221 case 7: /* VQDMLSL scalar */
5222 case 10: /* VMULL scalar */
5223 case 11: /* VQDMULL scalar */
ad69471c
PB
5224 if (size == 0 && (op == 3 || op == 7 || op == 11))
5225 return 1;
5226
dd8fbd78
FN
5227 tmp2 = neon_get_scalar(size, rm);
5228 tmp3 = neon_load_reg(rn, 1);
ad69471c 5229
9ee6e8bb 5230 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5231 if (pass == 0) {
5232 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5233 } else {
dd8fbd78 5234 tmp = tmp3;
9ee6e8bb 5235 }
ad69471c 5236 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
dd8fbd78 5237 dead_tmp(tmp);
9ee6e8bb 5238 if (op == 6 || op == 7) {
ad69471c
PB
5239 gen_neon_negl(cpu_V0, size);
5240 }
5241 if (op != 11) {
5242 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5243 }
9ee6e8bb
PB
5244 switch (op) {
5245 case 2: case 6:
ad69471c 5246 gen_neon_addl(size);
9ee6e8bb
PB
5247 break;
5248 case 3: case 7:
ad69471c
PB
5249 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5250 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5251 break;
5252 case 10:
5253 /* no-op */
5254 break;
5255 case 11:
ad69471c 5256 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5257 break;
5258 default:
5259 abort();
5260 }
ad69471c 5261 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5262 }
dd8fbd78
FN
5263
5264 dead_tmp(tmp2);
5265
9ee6e8bb
PB
5266 break;
5267 default: /* 14 and 15 are RESERVED */
5268 return 1;
5269 }
5270 }
5271 } else { /* size == 3 */
5272 if (!u) {
5273 /* Extract. */
9ee6e8bb 5274 imm = (insn >> 8) & 0xf;
ad69471c
PB
5275
5276 if (imm > 7 && !q)
5277 return 1;
5278
5279 if (imm == 0) {
5280 neon_load_reg64(cpu_V0, rn);
5281 if (q) {
5282 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5283 }
ad69471c
PB
5284 } else if (imm == 8) {
5285 neon_load_reg64(cpu_V0, rn + 1);
5286 if (q) {
5287 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5288 }
ad69471c 5289 } else if (q) {
a7812ae4 5290 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5291 if (imm < 8) {
5292 neon_load_reg64(cpu_V0, rn);
a7812ae4 5293 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5294 } else {
5295 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5296 neon_load_reg64(tmp64, rm);
ad69471c
PB
5297 }
5298 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5299 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5300 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5301 if (imm < 8) {
5302 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5303 } else {
ad69471c
PB
5304 neon_load_reg64(cpu_V1, rm + 1);
5305 imm -= 8;
9ee6e8bb 5306 }
ad69471c 5307 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5308 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5309 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 5310 tcg_temp_free_i64(tmp64);
ad69471c 5311 } else {
a7812ae4 5312 /* BUGFIX */
ad69471c 5313 neon_load_reg64(cpu_V0, rn);
a7812ae4 5314 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5315 neon_load_reg64(cpu_V1, rm);
a7812ae4 5316 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5317 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5318 }
5319 neon_store_reg64(cpu_V0, rd);
5320 if (q) {
5321 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5322 }
5323 } else if ((insn & (1 << 11)) == 0) {
5324 /* Two register misc. */
5325 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5326 size = (insn >> 18) & 3;
5327 switch (op) {
5328 case 0: /* VREV64 */
5329 if (size == 3)
5330 return 1;
5331 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5332 tmp = neon_load_reg(rm, pass * 2);
5333 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5334 switch (size) {
dd8fbd78
FN
5335 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5336 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5337 case 2: /* no-op */ break;
5338 default: abort();
5339 }
dd8fbd78 5340 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5341 if (size == 2) {
dd8fbd78 5342 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5343 } else {
9ee6e8bb 5344 switch (size) {
dd8fbd78
FN
5345 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5346 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5347 default: abort();
5348 }
dd8fbd78 5349 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5350 }
5351 }
5352 break;
5353 case 4: case 5: /* VPADDL */
5354 case 12: case 13: /* VPADAL */
9ee6e8bb
PB
5355 if (size == 3)
5356 return 1;
ad69471c
PB
5357 for (pass = 0; pass < q + 1; pass++) {
5358 tmp = neon_load_reg(rm, pass * 2);
5359 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5360 tmp = neon_load_reg(rm, pass * 2 + 1);
5361 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5362 switch (size) {
5363 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5364 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5365 case 2: tcg_gen_add_i64(CPU_V001); break;
5366 default: abort();
5367 }
9ee6e8bb
PB
5368 if (op >= 12) {
5369 /* Accumulate. */
ad69471c
PB
5370 neon_load_reg64(cpu_V1, rd + pass);
5371 gen_neon_addl(size);
9ee6e8bb 5372 }
ad69471c 5373 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5374 }
5375 break;
5376 case 33: /* VTRN */
5377 if (size == 2) {
5378 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
5379 tmp = neon_load_reg(rm, n);
5380 tmp2 = neon_load_reg(rd, n + 1);
5381 neon_store_reg(rm, n, tmp2);
5382 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
5383 }
5384 } else {
5385 goto elementwise;
5386 }
5387 break;
5388 case 34: /* VUZP */
5389 /* Reg Before After
5390 Rd A3 A2 A1 A0 B2 B0 A2 A0
5391 Rm B3 B2 B1 B0 B3 B1 A3 A1
5392 */
5393 if (size == 3)
5394 return 1;
5395 gen_neon_unzip(rd, q, 0, size);
5396 gen_neon_unzip(rm, q, 4, size);
5397 if (q) {
5398 static int unzip_order_q[8] =
5399 {0, 2, 4, 6, 1, 3, 5, 7};
5400 for (n = 0; n < 8; n++) {
5401 int reg = (n < 4) ? rd : rm;
dd8fbd78
FN
5402 tmp = neon_load_scratch(unzip_order_q[n]);
5403 neon_store_reg(reg, n % 4, tmp);
9ee6e8bb
PB
5404 }
5405 } else {
5406 static int unzip_order[4] =
5407 {0, 4, 1, 5};
5408 for (n = 0; n < 4; n++) {
5409 int reg = (n < 2) ? rd : rm;
dd8fbd78
FN
5410 tmp = neon_load_scratch(unzip_order[n]);
5411 neon_store_reg(reg, n % 2, tmp);
9ee6e8bb
PB
5412 }
5413 }
5414 break;
5415 case 35: /* VZIP */
5416 /* Reg Before After
5417 Rd A3 A2 A1 A0 B1 A1 B0 A0
5418 Rm B3 B2 B1 B0 B3 A3 B2 A2
5419 */
5420 if (size == 3)
5421 return 1;
5422 count = (q ? 4 : 2);
5423 for (n = 0; n < count; n++) {
dd8fbd78
FN
5424 tmp = neon_load_reg(rd, n);
5425 tmp2 = neon_load_reg(rd, n);
9ee6e8bb 5426 switch (size) {
dd8fbd78
FN
5427 case 0: gen_neon_zip_u8(tmp, tmp2); break;
5428 case 1: gen_neon_zip_u16(tmp, tmp2); break;
9ee6e8bb
PB
5429 case 2: /* no-op */; break;
5430 default: abort();
5431 }
dd8fbd78
FN
5432 neon_store_scratch(n * 2, tmp);
5433 neon_store_scratch(n * 2 + 1, tmp2);
9ee6e8bb
PB
5434 }
5435 for (n = 0; n < count * 2; n++) {
5436 int reg = (n < count) ? rd : rm;
dd8fbd78
FN
5437 tmp = neon_load_scratch(n);
5438 neon_store_reg(reg, n % count, tmp);
9ee6e8bb
PB
5439 }
5440 break;
5441 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
ad69471c
PB
5442 if (size == 3)
5443 return 1;
a50f5b91 5444 TCGV_UNUSED(tmp2);
9ee6e8bb 5445 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5446 neon_load_reg64(cpu_V0, rm + pass);
5447 tmp = new_tmp();
9ee6e8bb 5448 if (op == 36 && q == 0) {
ad69471c 5449 gen_neon_narrow(size, tmp, cpu_V0);
9ee6e8bb 5450 } else if (q) {
ad69471c 5451 gen_neon_narrow_satu(size, tmp, cpu_V0);
9ee6e8bb 5452 } else {
ad69471c
PB
5453 gen_neon_narrow_sats(size, tmp, cpu_V0);
5454 }
5455 if (pass == 0) {
5456 tmp2 = tmp;
5457 } else {
5458 neon_store_reg(rd, 0, tmp2);
5459 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5460 }
9ee6e8bb
PB
5461 }
5462 break;
5463 case 38: /* VSHLL */
ad69471c 5464 if (q || size == 3)
9ee6e8bb 5465 return 1;
ad69471c
PB
5466 tmp = neon_load_reg(rm, 0);
5467 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5468 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5469 if (pass == 1)
5470 tmp = tmp2;
5471 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 5472 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 5473 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5474 }
5475 break;
60011498
PB
5476 case 44: /* VCVT.F16.F32 */
5477 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5478 return 1;
5479 tmp = new_tmp();
5480 tmp2 = new_tmp();
5481 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
5482 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5483 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
5484 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5485 tcg_gen_shli_i32(tmp2, tmp2, 16);
5486 tcg_gen_or_i32(tmp2, tmp2, tmp);
5487 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
5488 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5489 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5490 neon_store_reg(rd, 0, tmp2);
5491 tmp2 = new_tmp();
5492 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5493 tcg_gen_shli_i32(tmp2, tmp2, 16);
5494 tcg_gen_or_i32(tmp2, tmp2, tmp);
5495 neon_store_reg(rd, 1, tmp2);
5496 dead_tmp(tmp);
5497 break;
5498 case 46: /* VCVT.F32.F16 */
5499 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5500 return 1;
5501 tmp3 = new_tmp();
5502 tmp = neon_load_reg(rm, 0);
5503 tmp2 = neon_load_reg(rm, 1);
5504 tcg_gen_ext16u_i32(tmp3, tmp);
5505 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5506 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5507 tcg_gen_shri_i32(tmp3, tmp, 16);
5508 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5509 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
5510 dead_tmp(tmp);
5511 tcg_gen_ext16u_i32(tmp3, tmp2);
5512 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5513 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5514 tcg_gen_shri_i32(tmp3, tmp2, 16);
5515 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5516 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
5517 dead_tmp(tmp2);
5518 dead_tmp(tmp3);
5519 break;
9ee6e8bb
PB
5520 default:
5521 elementwise:
5522 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5523 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5524 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5525 neon_reg_offset(rm, pass));
dd8fbd78 5526 TCGV_UNUSED(tmp);
9ee6e8bb 5527 } else {
dd8fbd78 5528 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
5529 }
5530 switch (op) {
5531 case 1: /* VREV32 */
5532 switch (size) {
dd8fbd78
FN
5533 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5534 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5535 default: return 1;
5536 }
5537 break;
5538 case 2: /* VREV16 */
5539 if (size != 0)
5540 return 1;
dd8fbd78 5541 gen_rev16(tmp);
9ee6e8bb 5542 break;
9ee6e8bb
PB
5543 case 8: /* CLS */
5544 switch (size) {
dd8fbd78
FN
5545 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5546 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5547 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
9ee6e8bb
PB
5548 default: return 1;
5549 }
5550 break;
5551 case 9: /* CLZ */
5552 switch (size) {
dd8fbd78
FN
5553 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5554 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5555 case 2: gen_helper_clz(tmp, tmp); break;
9ee6e8bb
PB
5556 default: return 1;
5557 }
5558 break;
5559 case 10: /* CNT */
5560 if (size != 0)
5561 return 1;
dd8fbd78 5562 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb
PB
5563 break;
5564 case 11: /* VNOT */
5565 if (size != 0)
5566 return 1;
dd8fbd78 5567 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5568 break;
5569 case 14: /* VQABS */
5570 switch (size) {
dd8fbd78
FN
5571 case 0: gen_helper_neon_qabs_s8(tmp, cpu_env, tmp); break;
5572 case 1: gen_helper_neon_qabs_s16(tmp, cpu_env, tmp); break;
5573 case 2: gen_helper_neon_qabs_s32(tmp, cpu_env, tmp); break;
9ee6e8bb
PB
5574 default: return 1;
5575 }
5576 break;
5577 case 15: /* VQNEG */
5578 switch (size) {
dd8fbd78
FN
5579 case 0: gen_helper_neon_qneg_s8(tmp, cpu_env, tmp); break;
5580 case 1: gen_helper_neon_qneg_s16(tmp, cpu_env, tmp); break;
5581 case 2: gen_helper_neon_qneg_s32(tmp, cpu_env, tmp); break;
9ee6e8bb
PB
5582 default: return 1;
5583 }
5584 break;
5585 case 16: case 19: /* VCGT #0, VCLE #0 */
dd8fbd78 5586 tmp2 = tcg_const_i32(0);
9ee6e8bb 5587 switch(size) {
dd8fbd78
FN
5588 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5589 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5590 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5591 default: return 1;
5592 }
dd8fbd78 5593 tcg_temp_free(tmp2);
9ee6e8bb 5594 if (op == 19)
dd8fbd78 5595 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5596 break;
5597 case 17: case 20: /* VCGE #0, VCLT #0 */
dd8fbd78 5598 tmp2 = tcg_const_i32(0);
9ee6e8bb 5599 switch(size) {
dd8fbd78
FN
5600 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5601 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5602 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5603 default: return 1;
5604 }
dd8fbd78 5605 tcg_temp_free(tmp2);
9ee6e8bb 5606 if (op == 20)
dd8fbd78 5607 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5608 break;
5609 case 18: /* VCEQ #0 */
dd8fbd78 5610 tmp2 = tcg_const_i32(0);
9ee6e8bb 5611 switch(size) {
dd8fbd78
FN
5612 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5613 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5614 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5615 default: return 1;
5616 }
dd8fbd78 5617 tcg_temp_free(tmp2);
9ee6e8bb
PB
5618 break;
5619 case 22: /* VABS */
5620 switch(size) {
dd8fbd78
FN
5621 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
5622 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
5623 case 2: tcg_gen_abs_i32(tmp, tmp); break;
9ee6e8bb
PB
5624 default: return 1;
5625 }
5626 break;
5627 case 23: /* VNEG */
ad69471c
PB
5628 if (size == 3)
5629 return 1;
dd8fbd78
FN
5630 tmp2 = tcg_const_i32(0);
5631 gen_neon_rsb(size, tmp, tmp2);
5632 tcg_temp_free(tmp2);
9ee6e8bb
PB
5633 break;
5634 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
dd8fbd78
FN
5635 tmp2 = tcg_const_i32(0);
5636 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
5637 tcg_temp_free(tmp2);
9ee6e8bb 5638 if (op == 27)
dd8fbd78 5639 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5640 break;
5641 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
dd8fbd78
FN
5642 tmp2 = tcg_const_i32(0);
5643 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
5644 tcg_temp_free(tmp2);
9ee6e8bb 5645 if (op == 28)
dd8fbd78 5646 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5647 break;
5648 case 26: /* Float VCEQ #0 */
dd8fbd78
FN
5649 tmp2 = tcg_const_i32(0);
5650 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
5651 tcg_temp_free(tmp2);
9ee6e8bb
PB
5652 break;
5653 case 30: /* Float VABS */
4373f3ce 5654 gen_vfp_abs(0);
9ee6e8bb
PB
5655 break;
5656 case 31: /* Float VNEG */
4373f3ce 5657 gen_vfp_neg(0);
9ee6e8bb
PB
5658 break;
5659 case 32: /* VSWP */
dd8fbd78
FN
5660 tmp2 = neon_load_reg(rd, pass);
5661 neon_store_reg(rm, pass, tmp2);
9ee6e8bb
PB
5662 break;
5663 case 33: /* VTRN */
dd8fbd78 5664 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5665 switch (size) {
dd8fbd78
FN
5666 case 0: gen_neon_trn_u8(tmp, tmp2); break;
5667 case 1: gen_neon_trn_u16(tmp, tmp2); break;
9ee6e8bb
PB
5668 case 2: abort();
5669 default: return 1;
5670 }
dd8fbd78 5671 neon_store_reg(rm, pass, tmp2);
9ee6e8bb
PB
5672 break;
5673 case 56: /* Integer VRECPE */
dd8fbd78 5674 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb
PB
5675 break;
5676 case 57: /* Integer VRSQRTE */
dd8fbd78 5677 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb
PB
5678 break;
5679 case 58: /* Float VRECPE */
4373f3ce 5680 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5681 break;
5682 case 59: /* Float VRSQRTE */
4373f3ce 5683 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5684 break;
5685 case 60: /* VCVT.F32.S32 */
d3587ef8 5686 gen_vfp_sito(0);
9ee6e8bb
PB
5687 break;
5688 case 61: /* VCVT.F32.U32 */
d3587ef8 5689 gen_vfp_uito(0);
9ee6e8bb
PB
5690 break;
5691 case 62: /* VCVT.S32.F32 */
d3587ef8 5692 gen_vfp_tosiz(0);
9ee6e8bb
PB
5693 break;
5694 case 63: /* VCVT.U32.F32 */
d3587ef8 5695 gen_vfp_touiz(0);
9ee6e8bb
PB
5696 break;
5697 default:
5698 /* Reserved: 21, 29, 39-56 */
5699 return 1;
5700 }
5701 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5702 tcg_gen_st_f32(cpu_F0s, cpu_env,
5703 neon_reg_offset(rd, pass));
9ee6e8bb 5704 } else {
dd8fbd78 5705 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5706 }
5707 }
5708 break;
5709 }
5710 } else if ((insn & (1 << 10)) == 0) {
5711 /* VTBL, VTBX. */
3018f259 5712 n = ((insn >> 5) & 0x18) + 8;
9ee6e8bb 5713 if (insn & (1 << 6)) {
8f8e3aa4 5714 tmp = neon_load_reg(rd, 0);
9ee6e8bb 5715 } else {
8f8e3aa4
PB
5716 tmp = new_tmp();
5717 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5718 }
8f8e3aa4 5719 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
5720 tmp4 = tcg_const_i32(rn);
5721 tmp5 = tcg_const_i32(n);
5722 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
3018f259 5723 dead_tmp(tmp);
9ee6e8bb 5724 if (insn & (1 << 6)) {
8f8e3aa4 5725 tmp = neon_load_reg(rd, 1);
9ee6e8bb 5726 } else {
8f8e3aa4
PB
5727 tmp = new_tmp();
5728 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5729 }
8f8e3aa4 5730 tmp3 = neon_load_reg(rm, 1);
b75263d6 5731 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
5732 tcg_temp_free_i32(tmp5);
5733 tcg_temp_free_i32(tmp4);
8f8e3aa4 5734 neon_store_reg(rd, 0, tmp2);
3018f259
PB
5735 neon_store_reg(rd, 1, tmp3);
5736 dead_tmp(tmp);
9ee6e8bb
PB
5737 } else if ((insn & 0x380) == 0) {
5738 /* VDUP */
5739 if (insn & (1 << 19)) {
dd8fbd78 5740 tmp = neon_load_reg(rm, 1);
9ee6e8bb 5741 } else {
dd8fbd78 5742 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
5743 }
5744 if (insn & (1 << 16)) {
dd8fbd78 5745 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
5746 } else if (insn & (1 << 17)) {
5747 if ((insn >> 18) & 1)
dd8fbd78 5748 gen_neon_dup_high16(tmp);
9ee6e8bb 5749 else
dd8fbd78 5750 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
5751 }
5752 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5753 tmp2 = new_tmp();
5754 tcg_gen_mov_i32(tmp2, tmp);
5755 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 5756 }
dd8fbd78 5757 dead_tmp(tmp);
9ee6e8bb
PB
5758 } else {
5759 return 1;
5760 }
5761 }
5762 }
5763 return 0;
5764}
5765
fe1479c3
PB
5766static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5767{
5768 int crn = (insn >> 16) & 0xf;
5769 int crm = insn & 0xf;
5770 int op1 = (insn >> 21) & 7;
5771 int op2 = (insn >> 5) & 7;
5772 int rt = (insn >> 12) & 0xf;
5773 TCGv tmp;
5774
5775 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5776 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5777 /* TEECR */
5778 if (IS_USER(s))
5779 return 1;
5780 tmp = load_cpu_field(teecr);
5781 store_reg(s, rt, tmp);
5782 return 0;
5783 }
5784 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5785 /* TEEHBR */
5786 if (IS_USER(s) && (env->teecr & 1))
5787 return 1;
5788 tmp = load_cpu_field(teehbr);
5789 store_reg(s, rt, tmp);
5790 return 0;
5791 }
5792 }
5793 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5794 op1, crn, crm, op2);
5795 return 1;
5796}
5797
5798static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5799{
5800 int crn = (insn >> 16) & 0xf;
5801 int crm = insn & 0xf;
5802 int op1 = (insn >> 21) & 7;
5803 int op2 = (insn >> 5) & 7;
5804 int rt = (insn >> 12) & 0xf;
5805 TCGv tmp;
5806
5807 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5808 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5809 /* TEECR */
5810 if (IS_USER(s))
5811 return 1;
5812 tmp = load_reg(s, rt);
5813 gen_helper_set_teecr(cpu_env, tmp);
5814 dead_tmp(tmp);
5815 return 0;
5816 }
5817 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5818 /* TEEHBR */
5819 if (IS_USER(s) && (env->teecr & 1))
5820 return 1;
5821 tmp = load_reg(s, rt);
5822 store_cpu_field(tmp, teehbr);
5823 return 0;
5824 }
5825 }
5826 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5827 op1, crn, crm, op2);
5828 return 1;
5829}
5830
9ee6e8bb
PB
5831static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5832{
5833 int cpnum;
5834
5835 cpnum = (insn >> 8) & 0xf;
5836 if (arm_feature(env, ARM_FEATURE_XSCALE)
5837 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5838 return 1;
5839
5840 switch (cpnum) {
5841 case 0:
5842 case 1:
5843 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5844 return disas_iwmmxt_insn(env, s, insn);
5845 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5846 return disas_dsp_insn(env, s, insn);
5847 }
5848 return 1;
5849 case 10:
5850 case 11:
5851 return disas_vfp_insn (env, s, insn);
fe1479c3
PB
5852 case 14:
5853 /* Coprocessors 7-15 are architecturally reserved by ARM.
5854 Unfortunately Intel decided to ignore this. */
5855 if (arm_feature(env, ARM_FEATURE_XSCALE))
5856 goto board;
5857 if (insn & (1 << 20))
5858 return disas_cp14_read(env, s, insn);
5859 else
5860 return disas_cp14_write(env, s, insn);
9ee6e8bb
PB
5861 case 15:
5862 return disas_cp15_insn (env, s, insn);
5863 default:
fe1479c3 5864 board:
9ee6e8bb
PB
5865 /* Unknown coprocessor. See if the board has hooked it. */
5866 return disas_cp_insn (env, s, insn);
5867 }
5868}
5869
5e3f878a
PB
5870
5871/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 5872static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
5873{
5874 TCGv tmp;
5875 tmp = new_tmp();
5876 tcg_gen_trunc_i64_i32(tmp, val);
5877 store_reg(s, rlow, tmp);
5878 tmp = new_tmp();
5879 tcg_gen_shri_i64(val, val, 32);
5880 tcg_gen_trunc_i64_i32(tmp, val);
5881 store_reg(s, rhigh, tmp);
5882}
5883
5884/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 5885static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 5886{
a7812ae4 5887 TCGv_i64 tmp;
5e3f878a
PB
5888 TCGv tmp2;
5889
36aa55dc 5890 /* Load value and extend to 64 bits. */
a7812ae4 5891 tmp = tcg_temp_new_i64();
5e3f878a
PB
5892 tmp2 = load_reg(s, rlow);
5893 tcg_gen_extu_i32_i64(tmp, tmp2);
5894 dead_tmp(tmp2);
5895 tcg_gen_add_i64(val, val, tmp);
b75263d6 5896 tcg_temp_free_i64(tmp);
5e3f878a
PB
5897}
5898
5899/* load and add a 64-bit value from a register pair. */
a7812ae4 5900static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 5901{
a7812ae4 5902 TCGv_i64 tmp;
36aa55dc
PB
5903 TCGv tmpl;
5904 TCGv tmph;
5e3f878a
PB
5905
5906 /* Load 64-bit value rd:rn. */
36aa55dc
PB
5907 tmpl = load_reg(s, rlow);
5908 tmph = load_reg(s, rhigh);
a7812ae4 5909 tmp = tcg_temp_new_i64();
36aa55dc
PB
5910 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5911 dead_tmp(tmpl);
5912 dead_tmp(tmph);
5e3f878a 5913 tcg_gen_add_i64(val, val, tmp);
b75263d6 5914 tcg_temp_free_i64(tmp);
5e3f878a
PB
5915}
5916
5917/* Set N and Z flags from a 64-bit value. */
a7812ae4 5918static void gen_logicq_cc(TCGv_i64 val)
5e3f878a
PB
5919{
5920 TCGv tmp = new_tmp();
5921 gen_helper_logicq_cc(tmp, val);
6fbe23d5
PB
5922 gen_logic_CC(tmp);
5923 dead_tmp(tmp);
5e3f878a
PB
5924}
5925
426f5abc
PB
5926/* Load/Store exclusive instructions are implemented by remembering
5927 the value/address loaded, and seeing if these are the same
5928 when the store is performed. This should be is sufficient to implement
5929 the architecturally mandated semantics, and avoids having to monitor
5930 regular stores.
5931
5932 In system emulation mode only one CPU will be running at once, so
5933 this sequence is effectively atomic. In user emulation mode we
5934 throw an exception and handle the atomic operation elsewhere. */
5935static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
5936 TCGv addr, int size)
5937{
5938 TCGv tmp;
5939
5940 switch (size) {
5941 case 0:
5942 tmp = gen_ld8u(addr, IS_USER(s));
5943 break;
5944 case 1:
5945 tmp = gen_ld16u(addr, IS_USER(s));
5946 break;
5947 case 2:
5948 case 3:
5949 tmp = gen_ld32(addr, IS_USER(s));
5950 break;
5951 default:
5952 abort();
5953 }
5954 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
5955 store_reg(s, rt, tmp);
5956 if (size == 3) {
2c9adbda
PM
5957 TCGv tmp2 = new_tmp();
5958 tcg_gen_addi_i32(tmp2, addr, 4);
5959 tmp = gen_ld32(tmp2, IS_USER(s));
5960 dead_tmp(tmp2);
426f5abc
PB
5961 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
5962 store_reg(s, rt2, tmp);
5963 }
5964 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
5965}
5966
5967static void gen_clrex(DisasContext *s)
5968{
5969 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
5970}
5971
5972#ifdef CONFIG_USER_ONLY
5973static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
5974 TCGv addr, int size)
5975{
5976 tcg_gen_mov_i32(cpu_exclusive_test, addr);
5977 tcg_gen_movi_i32(cpu_exclusive_info,
5978 size | (rd << 4) | (rt << 8) | (rt2 << 12));
5979 gen_set_condexec(s);
5980 gen_set_pc_im(s->pc - 4);
5981 gen_exception(EXCP_STREX);
5982 s->is_jmp = DISAS_JUMP;
5983}
5984#else
5985static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
5986 TCGv addr, int size)
5987{
5988 TCGv tmp;
5989 int done_label;
5990 int fail_label;
5991
5992 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
5993 [addr] = {Rt};
5994 {Rd} = 0;
5995 } else {
5996 {Rd} = 1;
5997 } */
5998 fail_label = gen_new_label();
5999 done_label = gen_new_label();
6000 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6001 switch (size) {
6002 case 0:
6003 tmp = gen_ld8u(addr, IS_USER(s));
6004 break;
6005 case 1:
6006 tmp = gen_ld16u(addr, IS_USER(s));
6007 break;
6008 case 2:
6009 case 3:
6010 tmp = gen_ld32(addr, IS_USER(s));
6011 break;
6012 default:
6013 abort();
6014 }
6015 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
6016 dead_tmp(tmp);
6017 if (size == 3) {
6018 TCGv tmp2 = new_tmp();
6019 tcg_gen_addi_i32(tmp2, addr, 4);
2c9adbda 6020 tmp = gen_ld32(tmp2, IS_USER(s));
426f5abc
PB
6021 dead_tmp(tmp2);
6022 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
6023 dead_tmp(tmp);
6024 }
6025 tmp = load_reg(s, rt);
6026 switch (size) {
6027 case 0:
6028 gen_st8(tmp, addr, IS_USER(s));
6029 break;
6030 case 1:
6031 gen_st16(tmp, addr, IS_USER(s));
6032 break;
6033 case 2:
6034 case 3:
6035 gen_st32(tmp, addr, IS_USER(s));
6036 break;
6037 default:
6038 abort();
6039 }
6040 if (size == 3) {
6041 tcg_gen_addi_i32(addr, addr, 4);
6042 tmp = load_reg(s, rt2);
6043 gen_st32(tmp, addr, IS_USER(s));
6044 }
6045 tcg_gen_movi_i32(cpu_R[rd], 0);
6046 tcg_gen_br(done_label);
6047 gen_set_label(fail_label);
6048 tcg_gen_movi_i32(cpu_R[rd], 1);
6049 gen_set_label(done_label);
6050 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6051}
6052#endif
6053
9ee6e8bb
PB
6054static void disas_arm_insn(CPUState * env, DisasContext *s)
6055{
6056 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 6057 TCGv tmp;
3670669c 6058 TCGv tmp2;
6ddbc6e4 6059 TCGv tmp3;
b0109805 6060 TCGv addr;
a7812ae4 6061 TCGv_i64 tmp64;
9ee6e8bb
PB
6062
6063 insn = ldl_code(s->pc);
6064 s->pc += 4;
6065
6066 /* M variants do not implement ARM mode. */
6067 if (IS_M(env))
6068 goto illegal_op;
6069 cond = insn >> 28;
6070 if (cond == 0xf){
6071 /* Unconditional instructions. */
6072 if (((insn >> 25) & 7) == 1) {
6073 /* NEON Data processing. */
6074 if (!arm_feature(env, ARM_FEATURE_NEON))
6075 goto illegal_op;
6076
6077 if (disas_neon_data_insn(env, s, insn))
6078 goto illegal_op;
6079 return;
6080 }
6081 if ((insn & 0x0f100000) == 0x04000000) {
6082 /* NEON load/store. */
6083 if (!arm_feature(env, ARM_FEATURE_NEON))
6084 goto illegal_op;
6085
6086 if (disas_neon_ls_insn(env, s, insn))
6087 goto illegal_op;
6088 return;
6089 }
6090 if ((insn & 0x0d70f000) == 0x0550f000)
6091 return; /* PLD */
6092 else if ((insn & 0x0ffffdff) == 0x01010000) {
6093 ARCH(6);
6094 /* setend */
6095 if (insn & (1 << 9)) {
6096 /* BE8 mode not implemented. */
6097 goto illegal_op;
6098 }
6099 return;
6100 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6101 switch ((insn >> 4) & 0xf) {
6102 case 1: /* clrex */
6103 ARCH(6K);
426f5abc 6104 gen_clrex(s);
9ee6e8bb
PB
6105 return;
6106 case 4: /* dsb */
6107 case 5: /* dmb */
6108 case 6: /* isb */
6109 ARCH(7);
6110 /* We don't emulate caches so these are a no-op. */
6111 return;
6112 default:
6113 goto illegal_op;
6114 }
6115 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6116 /* srs */
c67b6b71 6117 int32_t offset;
9ee6e8bb
PB
6118 if (IS_USER(s))
6119 goto illegal_op;
6120 ARCH(6);
6121 op1 = (insn & 0x1f);
39ea3d4e
PM
6122 addr = new_tmp();
6123 tmp = tcg_const_i32(op1);
6124 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6125 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6126 i = (insn >> 23) & 3;
6127 switch (i) {
6128 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6129 case 1: offset = 0; break; /* IA */
6130 case 2: offset = -8; break; /* DB */
9ee6e8bb
PB
6131 case 3: offset = 4; break; /* IB */
6132 default: abort();
6133 }
6134 if (offset)
b0109805
PB
6135 tcg_gen_addi_i32(addr, addr, offset);
6136 tmp = load_reg(s, 14);
6137 gen_st32(tmp, addr, 0);
c67b6b71 6138 tmp = load_cpu_field(spsr);
b0109805
PB
6139 tcg_gen_addi_i32(addr, addr, 4);
6140 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
6141 if (insn & (1 << 21)) {
6142 /* Base writeback. */
6143 switch (i) {
6144 case 0: offset = -8; break;
c67b6b71
FN
6145 case 1: offset = 4; break;
6146 case 2: offset = -4; break;
9ee6e8bb
PB
6147 case 3: offset = 0; break;
6148 default: abort();
6149 }
6150 if (offset)
c67b6b71 6151 tcg_gen_addi_i32(addr, addr, offset);
39ea3d4e
PM
6152 tmp = tcg_const_i32(op1);
6153 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6154 tcg_temp_free_i32(tmp);
6155 dead_tmp(addr);
b0109805
PB
6156 } else {
6157 dead_tmp(addr);
9ee6e8bb 6158 }
a990f58f 6159 return;
ea825eee 6160 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 6161 /* rfe */
c67b6b71 6162 int32_t offset;
9ee6e8bb
PB
6163 if (IS_USER(s))
6164 goto illegal_op;
6165 ARCH(6);
6166 rn = (insn >> 16) & 0xf;
b0109805 6167 addr = load_reg(s, rn);
9ee6e8bb
PB
6168 i = (insn >> 23) & 3;
6169 switch (i) {
b0109805 6170 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6171 case 1: offset = 0; break; /* IA */
6172 case 2: offset = -8; break; /* DB */
b0109805 6173 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
6174 default: abort();
6175 }
6176 if (offset)
b0109805
PB
6177 tcg_gen_addi_i32(addr, addr, offset);
6178 /* Load PC into tmp and CPSR into tmp2. */
6179 tmp = gen_ld32(addr, 0);
6180 tcg_gen_addi_i32(addr, addr, 4);
6181 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
6182 if (insn & (1 << 21)) {
6183 /* Base writeback. */
6184 switch (i) {
b0109805 6185 case 0: offset = -8; break;
c67b6b71
FN
6186 case 1: offset = 4; break;
6187 case 2: offset = -4; break;
b0109805 6188 case 3: offset = 0; break;
9ee6e8bb
PB
6189 default: abort();
6190 }
6191 if (offset)
b0109805
PB
6192 tcg_gen_addi_i32(addr, addr, offset);
6193 store_reg(s, rn, addr);
6194 } else {
6195 dead_tmp(addr);
9ee6e8bb 6196 }
b0109805 6197 gen_rfe(s, tmp, tmp2);
c67b6b71 6198 return;
9ee6e8bb
PB
6199 } else if ((insn & 0x0e000000) == 0x0a000000) {
6200 /* branch link and change to thumb (blx <offset>) */
6201 int32_t offset;
6202
6203 val = (uint32_t)s->pc;
d9ba4830
PB
6204 tmp = new_tmp();
6205 tcg_gen_movi_i32(tmp, val);
6206 store_reg(s, 14, tmp);
9ee6e8bb
PB
6207 /* Sign-extend the 24-bit offset */
6208 offset = (((int32_t)insn) << 8) >> 8;
6209 /* offset * 4 + bit24 * 2 + (thumb bit) */
6210 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6211 /* pipeline offset */
6212 val += 4;
d9ba4830 6213 gen_bx_im(s, val);
9ee6e8bb
PB
6214 return;
6215 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6216 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6217 /* iWMMXt register transfer. */
6218 if (env->cp15.c15_cpar & (1 << 1))
6219 if (!disas_iwmmxt_insn(env, s, insn))
6220 return;
6221 }
6222 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6223 /* Coprocessor double register transfer. */
6224 } else if ((insn & 0x0f000010) == 0x0e000010) {
6225 /* Additional coprocessor register transfer. */
7997d92f 6226 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
6227 uint32_t mask;
6228 uint32_t val;
6229 /* cps (privileged) */
6230 if (IS_USER(s))
6231 return;
6232 mask = val = 0;
6233 if (insn & (1 << 19)) {
6234 if (insn & (1 << 8))
6235 mask |= CPSR_A;
6236 if (insn & (1 << 7))
6237 mask |= CPSR_I;
6238 if (insn & (1 << 6))
6239 mask |= CPSR_F;
6240 if (insn & (1 << 18))
6241 val |= mask;
6242 }
7997d92f 6243 if (insn & (1 << 17)) {
9ee6e8bb
PB
6244 mask |= CPSR_M;
6245 val |= (insn & 0x1f);
6246 }
6247 if (mask) {
2fbac54b 6248 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
6249 }
6250 return;
6251 }
6252 goto illegal_op;
6253 }
6254 if (cond != 0xe) {
6255 /* if not always execute, we generate a conditional jump to
6256 next instruction */
6257 s->condlabel = gen_new_label();
d9ba4830 6258 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
6259 s->condjmp = 1;
6260 }
6261 if ((insn & 0x0f900000) == 0x03000000) {
6262 if ((insn & (1 << 21)) == 0) {
6263 ARCH(6T2);
6264 rd = (insn >> 12) & 0xf;
6265 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6266 if ((insn & (1 << 22)) == 0) {
6267 /* MOVW */
5e3f878a
PB
6268 tmp = new_tmp();
6269 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
6270 } else {
6271 /* MOVT */
5e3f878a 6272 tmp = load_reg(s, rd);
86831435 6273 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 6274 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 6275 }
5e3f878a 6276 store_reg(s, rd, tmp);
9ee6e8bb
PB
6277 } else {
6278 if (((insn >> 12) & 0xf) != 0xf)
6279 goto illegal_op;
6280 if (((insn >> 16) & 0xf) == 0) {
6281 gen_nop_hint(s, insn & 0xff);
6282 } else {
6283 /* CPSR = immediate */
6284 val = insn & 0xff;
6285 shift = ((insn >> 8) & 0xf) * 2;
6286 if (shift)
6287 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 6288 i = ((insn & (1 << 22)) != 0);
2fbac54b 6289 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
6290 goto illegal_op;
6291 }
6292 }
6293 } else if ((insn & 0x0f900000) == 0x01000000
6294 && (insn & 0x00000090) != 0x00000090) {
6295 /* miscellaneous instructions */
6296 op1 = (insn >> 21) & 3;
6297 sh = (insn >> 4) & 0xf;
6298 rm = insn & 0xf;
6299 switch (sh) {
6300 case 0x0: /* move program status register */
6301 if (op1 & 1) {
6302 /* PSR = reg */
2fbac54b 6303 tmp = load_reg(s, rm);
9ee6e8bb 6304 i = ((op1 & 2) != 0);
2fbac54b 6305 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
6306 goto illegal_op;
6307 } else {
6308 /* reg = PSR */
6309 rd = (insn >> 12) & 0xf;
6310 if (op1 & 2) {
6311 if (IS_USER(s))
6312 goto illegal_op;
d9ba4830 6313 tmp = load_cpu_field(spsr);
9ee6e8bb 6314 } else {
d9ba4830
PB
6315 tmp = new_tmp();
6316 gen_helper_cpsr_read(tmp);
9ee6e8bb 6317 }
d9ba4830 6318 store_reg(s, rd, tmp);
9ee6e8bb
PB
6319 }
6320 break;
6321 case 0x1:
6322 if (op1 == 1) {
6323 /* branch/exchange thumb (bx). */
d9ba4830
PB
6324 tmp = load_reg(s, rm);
6325 gen_bx(s, tmp);
9ee6e8bb
PB
6326 } else if (op1 == 3) {
6327 /* clz */
6328 rd = (insn >> 12) & 0xf;
1497c961
PB
6329 tmp = load_reg(s, rm);
6330 gen_helper_clz(tmp, tmp);
6331 store_reg(s, rd, tmp);
9ee6e8bb
PB
6332 } else {
6333 goto illegal_op;
6334 }
6335 break;
6336 case 0x2:
6337 if (op1 == 1) {
6338 ARCH(5J); /* bxj */
6339 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6340 tmp = load_reg(s, rm);
6341 gen_bx(s, tmp);
9ee6e8bb
PB
6342 } else {
6343 goto illegal_op;
6344 }
6345 break;
6346 case 0x3:
6347 if (op1 != 1)
6348 goto illegal_op;
6349
6350 /* branch link/exchange thumb (blx) */
d9ba4830
PB
6351 tmp = load_reg(s, rm);
6352 tmp2 = new_tmp();
6353 tcg_gen_movi_i32(tmp2, s->pc);
6354 store_reg(s, 14, tmp2);
6355 gen_bx(s, tmp);
9ee6e8bb
PB
6356 break;
6357 case 0x5: /* saturating add/subtract */
6358 rd = (insn >> 12) & 0xf;
6359 rn = (insn >> 16) & 0xf;
b40d0353 6360 tmp = load_reg(s, rm);
5e3f878a 6361 tmp2 = load_reg(s, rn);
9ee6e8bb 6362 if (op1 & 2)
5e3f878a 6363 gen_helper_double_saturate(tmp2, tmp2);
9ee6e8bb 6364 if (op1 & 1)
5e3f878a 6365 gen_helper_sub_saturate(tmp, tmp, tmp2);
9ee6e8bb 6366 else
5e3f878a
PB
6367 gen_helper_add_saturate(tmp, tmp, tmp2);
6368 dead_tmp(tmp2);
6369 store_reg(s, rd, tmp);
9ee6e8bb 6370 break;
49e14940
AL
6371 case 7:
6372 /* SMC instruction (op1 == 3)
6373 and undefined instructions (op1 == 0 || op1 == 2)
6374 will trap */
6375 if (op1 != 1) {
6376 goto illegal_op;
6377 }
6378 /* bkpt */
9ee6e8bb 6379 gen_set_condexec(s);
5e3f878a 6380 gen_set_pc_im(s->pc - 4);
d9ba4830 6381 gen_exception(EXCP_BKPT);
9ee6e8bb
PB
6382 s->is_jmp = DISAS_JUMP;
6383 break;
6384 case 0x8: /* signed multiply */
6385 case 0xa:
6386 case 0xc:
6387 case 0xe:
6388 rs = (insn >> 8) & 0xf;
6389 rn = (insn >> 12) & 0xf;
6390 rd = (insn >> 16) & 0xf;
6391 if (op1 == 1) {
6392 /* (32 * 16) >> 16 */
5e3f878a
PB
6393 tmp = load_reg(s, rm);
6394 tmp2 = load_reg(s, rs);
9ee6e8bb 6395 if (sh & 4)
5e3f878a 6396 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 6397 else
5e3f878a 6398 gen_sxth(tmp2);
a7812ae4
PB
6399 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6400 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 6401 tmp = new_tmp();
a7812ae4 6402 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 6403 tcg_temp_free_i64(tmp64);
9ee6e8bb 6404 if ((sh & 2) == 0) {
5e3f878a
PB
6405 tmp2 = load_reg(s, rn);
6406 gen_helper_add_setq(tmp, tmp, tmp2);
6407 dead_tmp(tmp2);
9ee6e8bb 6408 }
5e3f878a 6409 store_reg(s, rd, tmp);
9ee6e8bb
PB
6410 } else {
6411 /* 16 * 16 */
5e3f878a
PB
6412 tmp = load_reg(s, rm);
6413 tmp2 = load_reg(s, rs);
6414 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6415 dead_tmp(tmp2);
9ee6e8bb 6416 if (op1 == 2) {
a7812ae4
PB
6417 tmp64 = tcg_temp_new_i64();
6418 tcg_gen_ext_i32_i64(tmp64, tmp);
22478e79 6419 dead_tmp(tmp);
a7812ae4
PB
6420 gen_addq(s, tmp64, rn, rd);
6421 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 6422 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
6423 } else {
6424 if (op1 == 0) {
5e3f878a
PB
6425 tmp2 = load_reg(s, rn);
6426 gen_helper_add_setq(tmp, tmp, tmp2);
6427 dead_tmp(tmp2);
9ee6e8bb 6428 }
5e3f878a 6429 store_reg(s, rd, tmp);
9ee6e8bb
PB
6430 }
6431 }
6432 break;
6433 default:
6434 goto illegal_op;
6435 }
6436 } else if (((insn & 0x0e000000) == 0 &&
6437 (insn & 0x00000090) != 0x90) ||
6438 ((insn & 0x0e000000) == (1 << 25))) {
6439 int set_cc, logic_cc, shiftop;
6440
6441 op1 = (insn >> 21) & 0xf;
6442 set_cc = (insn >> 20) & 1;
6443 logic_cc = table_logic_cc[op1] & set_cc;
6444
6445 /* data processing instruction */
6446 if (insn & (1 << 25)) {
6447 /* immediate operand */
6448 val = insn & 0xff;
6449 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 6450 if (shift) {
9ee6e8bb 6451 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9
JR
6452 }
6453 tmp2 = new_tmp();
6454 tcg_gen_movi_i32(tmp2, val);
6455 if (logic_cc && shift) {
6456 gen_set_CF_bit31(tmp2);
6457 }
9ee6e8bb
PB
6458 } else {
6459 /* register */
6460 rm = (insn) & 0xf;
e9bb4aa9 6461 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6462 shiftop = (insn >> 5) & 3;
6463 if (!(insn & (1 << 4))) {
6464 shift = (insn >> 7) & 0x1f;
e9bb4aa9 6465 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
6466 } else {
6467 rs = (insn >> 8) & 0xf;
8984bd2e 6468 tmp = load_reg(s, rs);
e9bb4aa9 6469 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
6470 }
6471 }
6472 if (op1 != 0x0f && op1 != 0x0d) {
6473 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
6474 tmp = load_reg(s, rn);
6475 } else {
6476 TCGV_UNUSED(tmp);
9ee6e8bb
PB
6477 }
6478 rd = (insn >> 12) & 0xf;
6479 switch(op1) {
6480 case 0x00:
e9bb4aa9
JR
6481 tcg_gen_and_i32(tmp, tmp, tmp2);
6482 if (logic_cc) {
6483 gen_logic_CC(tmp);
6484 }
21aeb343 6485 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6486 break;
6487 case 0x01:
e9bb4aa9
JR
6488 tcg_gen_xor_i32(tmp, tmp, tmp2);
6489 if (logic_cc) {
6490 gen_logic_CC(tmp);
6491 }
21aeb343 6492 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6493 break;
6494 case 0x02:
6495 if (set_cc && rd == 15) {
6496 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 6497 if (IS_USER(s)) {
9ee6e8bb 6498 goto illegal_op;
e9bb4aa9
JR
6499 }
6500 gen_helper_sub_cc(tmp, tmp, tmp2);
6501 gen_exception_return(s, tmp);
9ee6e8bb 6502 } else {
e9bb4aa9
JR
6503 if (set_cc) {
6504 gen_helper_sub_cc(tmp, tmp, tmp2);
6505 } else {
6506 tcg_gen_sub_i32(tmp, tmp, tmp2);
6507 }
21aeb343 6508 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6509 }
6510 break;
6511 case 0x03:
e9bb4aa9
JR
6512 if (set_cc) {
6513 gen_helper_sub_cc(tmp, tmp2, tmp);
6514 } else {
6515 tcg_gen_sub_i32(tmp, tmp2, tmp);
6516 }
21aeb343 6517 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6518 break;
6519 case 0x04:
e9bb4aa9
JR
6520 if (set_cc) {
6521 gen_helper_add_cc(tmp, tmp, tmp2);
6522 } else {
6523 tcg_gen_add_i32(tmp, tmp, tmp2);
6524 }
21aeb343 6525 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6526 break;
6527 case 0x05:
e9bb4aa9
JR
6528 if (set_cc) {
6529 gen_helper_adc_cc(tmp, tmp, tmp2);
6530 } else {
6531 gen_add_carry(tmp, tmp, tmp2);
6532 }
21aeb343 6533 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6534 break;
6535 case 0x06:
e9bb4aa9
JR
6536 if (set_cc) {
6537 gen_helper_sbc_cc(tmp, tmp, tmp2);
6538 } else {
6539 gen_sub_carry(tmp, tmp, tmp2);
6540 }
21aeb343 6541 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6542 break;
6543 case 0x07:
e9bb4aa9
JR
6544 if (set_cc) {
6545 gen_helper_sbc_cc(tmp, tmp2, tmp);
6546 } else {
6547 gen_sub_carry(tmp, tmp2, tmp);
6548 }
21aeb343 6549 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6550 break;
6551 case 0x08:
6552 if (set_cc) {
e9bb4aa9
JR
6553 tcg_gen_and_i32(tmp, tmp, tmp2);
6554 gen_logic_CC(tmp);
9ee6e8bb 6555 }
e9bb4aa9 6556 dead_tmp(tmp);
9ee6e8bb
PB
6557 break;
6558 case 0x09:
6559 if (set_cc) {
e9bb4aa9
JR
6560 tcg_gen_xor_i32(tmp, tmp, tmp2);
6561 gen_logic_CC(tmp);
9ee6e8bb 6562 }
e9bb4aa9 6563 dead_tmp(tmp);
9ee6e8bb
PB
6564 break;
6565 case 0x0a:
6566 if (set_cc) {
e9bb4aa9 6567 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb 6568 }
e9bb4aa9 6569 dead_tmp(tmp);
9ee6e8bb
PB
6570 break;
6571 case 0x0b:
6572 if (set_cc) {
e9bb4aa9 6573 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 6574 }
e9bb4aa9 6575 dead_tmp(tmp);
9ee6e8bb
PB
6576 break;
6577 case 0x0c:
e9bb4aa9
JR
6578 tcg_gen_or_i32(tmp, tmp, tmp2);
6579 if (logic_cc) {
6580 gen_logic_CC(tmp);
6581 }
21aeb343 6582 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6583 break;
6584 case 0x0d:
6585 if (logic_cc && rd == 15) {
6586 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 6587 if (IS_USER(s)) {
9ee6e8bb 6588 goto illegal_op;
e9bb4aa9
JR
6589 }
6590 gen_exception_return(s, tmp2);
9ee6e8bb 6591 } else {
e9bb4aa9
JR
6592 if (logic_cc) {
6593 gen_logic_CC(tmp2);
6594 }
21aeb343 6595 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6596 }
6597 break;
6598 case 0x0e:
f669df27 6599 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
6600 if (logic_cc) {
6601 gen_logic_CC(tmp);
6602 }
21aeb343 6603 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6604 break;
6605 default:
6606 case 0x0f:
e9bb4aa9
JR
6607 tcg_gen_not_i32(tmp2, tmp2);
6608 if (logic_cc) {
6609 gen_logic_CC(tmp2);
6610 }
21aeb343 6611 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6612 break;
6613 }
e9bb4aa9
JR
6614 if (op1 != 0x0f && op1 != 0x0d) {
6615 dead_tmp(tmp2);
6616 }
9ee6e8bb
PB
6617 } else {
6618 /* other instructions */
6619 op1 = (insn >> 24) & 0xf;
6620 switch(op1) {
6621 case 0x0:
6622 case 0x1:
6623 /* multiplies, extra load/stores */
6624 sh = (insn >> 5) & 3;
6625 if (sh == 0) {
6626 if (op1 == 0x0) {
6627 rd = (insn >> 16) & 0xf;
6628 rn = (insn >> 12) & 0xf;
6629 rs = (insn >> 8) & 0xf;
6630 rm = (insn) & 0xf;
6631 op1 = (insn >> 20) & 0xf;
6632 switch (op1) {
6633 case 0: case 1: case 2: case 3: case 6:
6634 /* 32 bit mul */
5e3f878a
PB
6635 tmp = load_reg(s, rs);
6636 tmp2 = load_reg(s, rm);
6637 tcg_gen_mul_i32(tmp, tmp, tmp2);
6638 dead_tmp(tmp2);
9ee6e8bb
PB
6639 if (insn & (1 << 22)) {
6640 /* Subtract (mls) */
6641 ARCH(6T2);
5e3f878a
PB
6642 tmp2 = load_reg(s, rn);
6643 tcg_gen_sub_i32(tmp, tmp2, tmp);
6644 dead_tmp(tmp2);
9ee6e8bb
PB
6645 } else if (insn & (1 << 21)) {
6646 /* Add */
5e3f878a
PB
6647 tmp2 = load_reg(s, rn);
6648 tcg_gen_add_i32(tmp, tmp, tmp2);
6649 dead_tmp(tmp2);
9ee6e8bb
PB
6650 }
6651 if (insn & (1 << 20))
5e3f878a
PB
6652 gen_logic_CC(tmp);
6653 store_reg(s, rd, tmp);
9ee6e8bb 6654 break;
8aac08b1
AJ
6655 case 4:
6656 /* 64 bit mul double accumulate (UMAAL) */
6657 ARCH(6);
6658 tmp = load_reg(s, rs);
6659 tmp2 = load_reg(s, rm);
6660 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6661 gen_addq_lo(s, tmp64, rn);
6662 gen_addq_lo(s, tmp64, rd);
6663 gen_storeq_reg(s, rn, rd, tmp64);
6664 tcg_temp_free_i64(tmp64);
6665 break;
6666 case 8: case 9: case 10: case 11:
6667 case 12: case 13: case 14: case 15:
6668 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
6669 tmp = load_reg(s, rs);
6670 tmp2 = load_reg(s, rm);
8aac08b1 6671 if (insn & (1 << 22)) {
a7812ae4 6672 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8aac08b1 6673 } else {
a7812ae4 6674 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8aac08b1
AJ
6675 }
6676 if (insn & (1 << 21)) { /* mult accumulate */
a7812ae4 6677 gen_addq(s, tmp64, rn, rd);
9ee6e8bb 6678 }
8aac08b1 6679 if (insn & (1 << 20)) {
a7812ae4 6680 gen_logicq_cc(tmp64);
8aac08b1 6681 }
a7812ae4 6682 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 6683 tcg_temp_free_i64(tmp64);
9ee6e8bb 6684 break;
8aac08b1
AJ
6685 default:
6686 goto illegal_op;
9ee6e8bb
PB
6687 }
6688 } else {
6689 rn = (insn >> 16) & 0xf;
6690 rd = (insn >> 12) & 0xf;
6691 if (insn & (1 << 23)) {
6692 /* load/store exclusive */
86753403
PB
6693 op1 = (insn >> 21) & 0x3;
6694 if (op1)
a47f43d2 6695 ARCH(6K);
86753403
PB
6696 else
6697 ARCH(6);
3174f8e9 6698 addr = tcg_temp_local_new_i32();
98a46317 6699 load_reg_var(s, addr, rn);
9ee6e8bb 6700 if (insn & (1 << 20)) {
86753403
PB
6701 switch (op1) {
6702 case 0: /* ldrex */
426f5abc 6703 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
6704 break;
6705 case 1: /* ldrexd */
426f5abc 6706 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
6707 break;
6708 case 2: /* ldrexb */
426f5abc 6709 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
6710 break;
6711 case 3: /* ldrexh */
426f5abc 6712 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
6713 break;
6714 default:
6715 abort();
6716 }
9ee6e8bb
PB
6717 } else {
6718 rm = insn & 0xf;
86753403
PB
6719 switch (op1) {
6720 case 0: /* strex */
426f5abc 6721 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
6722 break;
6723 case 1: /* strexd */
502e64fe 6724 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
6725 break;
6726 case 2: /* strexb */
426f5abc 6727 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
6728 break;
6729 case 3: /* strexh */
426f5abc 6730 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
6731 break;
6732 default:
6733 abort();
6734 }
9ee6e8bb 6735 }
3174f8e9 6736 tcg_temp_free(addr);
9ee6e8bb
PB
6737 } else {
6738 /* SWP instruction */
6739 rm = (insn) & 0xf;
6740
8984bd2e
PB
6741 /* ??? This is not really atomic. However we know
6742 we never have multiple CPUs running in parallel,
6743 so it is good enough. */
6744 addr = load_reg(s, rn);
6745 tmp = load_reg(s, rm);
9ee6e8bb 6746 if (insn & (1 << 22)) {
8984bd2e
PB
6747 tmp2 = gen_ld8u(addr, IS_USER(s));
6748 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 6749 } else {
8984bd2e
PB
6750 tmp2 = gen_ld32(addr, IS_USER(s));
6751 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 6752 }
8984bd2e
PB
6753 dead_tmp(addr);
6754 store_reg(s, rd, tmp2);
9ee6e8bb
PB
6755 }
6756 }
6757 } else {
6758 int address_offset;
6759 int load;
6760 /* Misc load/store */
6761 rn = (insn >> 16) & 0xf;
6762 rd = (insn >> 12) & 0xf;
b0109805 6763 addr = load_reg(s, rn);
9ee6e8bb 6764 if (insn & (1 << 24))
b0109805 6765 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
6766 address_offset = 0;
6767 if (insn & (1 << 20)) {
6768 /* load */
6769 switch(sh) {
6770 case 1:
b0109805 6771 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
6772 break;
6773 case 2:
b0109805 6774 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
6775 break;
6776 default:
6777 case 3:
b0109805 6778 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
6779 break;
6780 }
6781 load = 1;
6782 } else if (sh & 2) {
6783 /* doubleword */
6784 if (sh & 1) {
6785 /* store */
b0109805
PB
6786 tmp = load_reg(s, rd);
6787 gen_st32(tmp, addr, IS_USER(s));
6788 tcg_gen_addi_i32(addr, addr, 4);
6789 tmp = load_reg(s, rd + 1);
6790 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6791 load = 0;
6792 } else {
6793 /* load */
b0109805
PB
6794 tmp = gen_ld32(addr, IS_USER(s));
6795 store_reg(s, rd, tmp);
6796 tcg_gen_addi_i32(addr, addr, 4);
6797 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
6798 rd++;
6799 load = 1;
6800 }
6801 address_offset = -4;
6802 } else {
6803 /* store */
b0109805
PB
6804 tmp = load_reg(s, rd);
6805 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6806 load = 0;
6807 }
6808 /* Perform base writeback before the loaded value to
6809 ensure correct behavior with overlapping index registers.
6810 ldrd with base writeback is is undefined if the
6811 destination and index registers overlap. */
6812 if (!(insn & (1 << 24))) {
b0109805
PB
6813 gen_add_datah_offset(s, insn, address_offset, addr);
6814 store_reg(s, rn, addr);
9ee6e8bb
PB
6815 } else if (insn & (1 << 21)) {
6816 if (address_offset)
b0109805
PB
6817 tcg_gen_addi_i32(addr, addr, address_offset);
6818 store_reg(s, rn, addr);
6819 } else {
6820 dead_tmp(addr);
9ee6e8bb
PB
6821 }
6822 if (load) {
6823 /* Complete the load. */
b0109805 6824 store_reg(s, rd, tmp);
9ee6e8bb
PB
6825 }
6826 }
6827 break;
6828 case 0x4:
6829 case 0x5:
6830 goto do_ldst;
6831 case 0x6:
6832 case 0x7:
6833 if (insn & (1 << 4)) {
6834 ARCH(6);
6835 /* Armv6 Media instructions. */
6836 rm = insn & 0xf;
6837 rn = (insn >> 16) & 0xf;
2c0262af 6838 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
6839 rs = (insn >> 8) & 0xf;
6840 switch ((insn >> 23) & 3) {
6841 case 0: /* Parallel add/subtract. */
6842 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
6843 tmp = load_reg(s, rn);
6844 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6845 sh = (insn >> 5) & 7;
6846 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6847 goto illegal_op;
6ddbc6e4
PB
6848 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6849 dead_tmp(tmp2);
6850 store_reg(s, rd, tmp);
9ee6e8bb
PB
6851 break;
6852 case 1:
6853 if ((insn & 0x00700020) == 0) {
6c95676b 6854 /* Halfword pack. */
3670669c
PB
6855 tmp = load_reg(s, rn);
6856 tmp2 = load_reg(s, rm);
9ee6e8bb 6857 shift = (insn >> 7) & 0x1f;
3670669c
PB
6858 if (insn & (1 << 6)) {
6859 /* pkhtb */
22478e79
AZ
6860 if (shift == 0)
6861 shift = 31;
6862 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 6863 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 6864 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
6865 } else {
6866 /* pkhbt */
22478e79
AZ
6867 if (shift)
6868 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 6869 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
6870 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6871 }
6872 tcg_gen_or_i32(tmp, tmp, tmp2);
22478e79 6873 dead_tmp(tmp2);
3670669c 6874 store_reg(s, rd, tmp);
9ee6e8bb
PB
6875 } else if ((insn & 0x00200020) == 0x00200000) {
6876 /* [us]sat */
6ddbc6e4 6877 tmp = load_reg(s, rm);
9ee6e8bb
PB
6878 shift = (insn >> 7) & 0x1f;
6879 if (insn & (1 << 6)) {
6880 if (shift == 0)
6881 shift = 31;
6ddbc6e4 6882 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 6883 } else {
6ddbc6e4 6884 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
6885 }
6886 sh = (insn >> 16) & 0x1f;
6887 if (sh != 0) {
b75263d6 6888 tmp2 = tcg_const_i32(sh);
9ee6e8bb 6889 if (insn & (1 << 22))
b75263d6 6890 gen_helper_usat(tmp, tmp, tmp2);
9ee6e8bb 6891 else
b75263d6
JR
6892 gen_helper_ssat(tmp, tmp, tmp2);
6893 tcg_temp_free_i32(tmp2);
9ee6e8bb 6894 }
6ddbc6e4 6895 store_reg(s, rd, tmp);
9ee6e8bb
PB
6896 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6897 /* [us]sat16 */
6ddbc6e4 6898 tmp = load_reg(s, rm);
9ee6e8bb
PB
6899 sh = (insn >> 16) & 0x1f;
6900 if (sh != 0) {
b75263d6 6901 tmp2 = tcg_const_i32(sh);
9ee6e8bb 6902 if (insn & (1 << 22))
b75263d6 6903 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 6904 else
b75263d6
JR
6905 gen_helper_ssat16(tmp, tmp, tmp2);
6906 tcg_temp_free_i32(tmp2);
9ee6e8bb 6907 }
6ddbc6e4 6908 store_reg(s, rd, tmp);
9ee6e8bb
PB
6909 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6910 /* Select bytes. */
6ddbc6e4
PB
6911 tmp = load_reg(s, rn);
6912 tmp2 = load_reg(s, rm);
6913 tmp3 = new_tmp();
6914 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6915 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6916 dead_tmp(tmp3);
6917 dead_tmp(tmp2);
6918 store_reg(s, rd, tmp);
9ee6e8bb 6919 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 6920 tmp = load_reg(s, rm);
9ee6e8bb
PB
6921 shift = (insn >> 10) & 3;
6922 /* ??? In many cases it's not neccessary to do a
6923 rotate, a shift is sufficient. */
6924 if (shift != 0)
f669df27 6925 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
6926 op1 = (insn >> 20) & 7;
6927 switch (op1) {
5e3f878a
PB
6928 case 0: gen_sxtb16(tmp); break;
6929 case 2: gen_sxtb(tmp); break;
6930 case 3: gen_sxth(tmp); break;
6931 case 4: gen_uxtb16(tmp); break;
6932 case 6: gen_uxtb(tmp); break;
6933 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
6934 default: goto illegal_op;
6935 }
6936 if (rn != 15) {
5e3f878a 6937 tmp2 = load_reg(s, rn);
9ee6e8bb 6938 if ((op1 & 3) == 0) {
5e3f878a 6939 gen_add16(tmp, tmp2);
9ee6e8bb 6940 } else {
5e3f878a
PB
6941 tcg_gen_add_i32(tmp, tmp, tmp2);
6942 dead_tmp(tmp2);
9ee6e8bb
PB
6943 }
6944 }
6c95676b 6945 store_reg(s, rd, tmp);
9ee6e8bb
PB
6946 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6947 /* rev */
b0109805 6948 tmp = load_reg(s, rm);
9ee6e8bb
PB
6949 if (insn & (1 << 22)) {
6950 if (insn & (1 << 7)) {
b0109805 6951 gen_revsh(tmp);
9ee6e8bb
PB
6952 } else {
6953 ARCH(6T2);
b0109805 6954 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
6955 }
6956 } else {
6957 if (insn & (1 << 7))
b0109805 6958 gen_rev16(tmp);
9ee6e8bb 6959 else
66896cb8 6960 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 6961 }
b0109805 6962 store_reg(s, rd, tmp);
9ee6e8bb
PB
6963 } else {
6964 goto illegal_op;
6965 }
6966 break;
6967 case 2: /* Multiplies (Type 3). */
5e3f878a
PB
6968 tmp = load_reg(s, rm);
6969 tmp2 = load_reg(s, rs);
9ee6e8bb 6970 if (insn & (1 << 20)) {
838fa72d
AJ
6971 /* Signed multiply most significant [accumulate].
6972 (SMMUL, SMMLA, SMMLS) */
a7812ae4 6973 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 6974
955a7dd5 6975 if (rd != 15) {
838fa72d 6976 tmp = load_reg(s, rd);
9ee6e8bb 6977 if (insn & (1 << 6)) {
838fa72d 6978 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 6979 } else {
838fa72d 6980 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
6981 }
6982 }
838fa72d
AJ
6983 if (insn & (1 << 5)) {
6984 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
6985 }
6986 tcg_gen_shri_i64(tmp64, tmp64, 32);
6987 tmp = new_tmp();
6988 tcg_gen_trunc_i64_i32(tmp, tmp64);
6989 tcg_temp_free_i64(tmp64);
955a7dd5 6990 store_reg(s, rn, tmp);
9ee6e8bb
PB
6991 } else {
6992 if (insn & (1 << 5))
5e3f878a
PB
6993 gen_swap_half(tmp2);
6994 gen_smul_dual(tmp, tmp2);
6995 /* This addition cannot overflow. */
6996 if (insn & (1 << 6)) {
6997 tcg_gen_sub_i32(tmp, tmp, tmp2);
6998 } else {
6999 tcg_gen_add_i32(tmp, tmp, tmp2);
7000 }
7001 dead_tmp(tmp2);
9ee6e8bb 7002 if (insn & (1 << 22)) {
5e3f878a 7003 /* smlald, smlsld */
a7812ae4
PB
7004 tmp64 = tcg_temp_new_i64();
7005 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 7006 dead_tmp(tmp);
a7812ae4
PB
7007 gen_addq(s, tmp64, rd, rn);
7008 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 7009 tcg_temp_free_i64(tmp64);
9ee6e8bb 7010 } else {
5e3f878a 7011 /* smuad, smusd, smlad, smlsd */
22478e79 7012 if (rd != 15)
9ee6e8bb 7013 {
22478e79 7014 tmp2 = load_reg(s, rd);
5e3f878a
PB
7015 gen_helper_add_setq(tmp, tmp, tmp2);
7016 dead_tmp(tmp2);
9ee6e8bb 7017 }
22478e79 7018 store_reg(s, rn, tmp);
9ee6e8bb
PB
7019 }
7020 }
7021 break;
7022 case 3:
7023 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7024 switch (op1) {
7025 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
7026 ARCH(6);
7027 tmp = load_reg(s, rm);
7028 tmp2 = load_reg(s, rs);
7029 gen_helper_usad8(tmp, tmp, tmp2);
7030 dead_tmp(tmp2);
ded9d295
AZ
7031 if (rd != 15) {
7032 tmp2 = load_reg(s, rd);
6ddbc6e4
PB
7033 tcg_gen_add_i32(tmp, tmp, tmp2);
7034 dead_tmp(tmp2);
9ee6e8bb 7035 }
ded9d295 7036 store_reg(s, rn, tmp);
9ee6e8bb
PB
7037 break;
7038 case 0x20: case 0x24: case 0x28: case 0x2c:
7039 /* Bitfield insert/clear. */
7040 ARCH(6T2);
7041 shift = (insn >> 7) & 0x1f;
7042 i = (insn >> 16) & 0x1f;
7043 i = i + 1 - shift;
7044 if (rm == 15) {
5e3f878a
PB
7045 tmp = new_tmp();
7046 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7047 } else {
5e3f878a 7048 tmp = load_reg(s, rm);
9ee6e8bb
PB
7049 }
7050 if (i != 32) {
5e3f878a 7051 tmp2 = load_reg(s, rd);
8f8e3aa4 7052 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
5e3f878a 7053 dead_tmp(tmp2);
9ee6e8bb 7054 }
5e3f878a 7055 store_reg(s, rd, tmp);
9ee6e8bb
PB
7056 break;
7057 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7058 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 7059 ARCH(6T2);
5e3f878a 7060 tmp = load_reg(s, rm);
9ee6e8bb
PB
7061 shift = (insn >> 7) & 0x1f;
7062 i = ((insn >> 16) & 0x1f) + 1;
7063 if (shift + i > 32)
7064 goto illegal_op;
7065 if (i < 32) {
7066 if (op1 & 0x20) {
5e3f878a 7067 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 7068 } else {
5e3f878a 7069 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
7070 }
7071 }
5e3f878a 7072 store_reg(s, rd, tmp);
9ee6e8bb
PB
7073 break;
7074 default:
7075 goto illegal_op;
7076 }
7077 break;
7078 }
7079 break;
7080 }
7081 do_ldst:
7082 /* Check for undefined extension instructions
7083 * per the ARM Bible IE:
7084 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7085 */
7086 sh = (0xf << 20) | (0xf << 4);
7087 if (op1 == 0x7 && ((insn & sh) == sh))
7088 {
7089 goto illegal_op;
7090 }
7091 /* load/store byte/word */
7092 rn = (insn >> 16) & 0xf;
7093 rd = (insn >> 12) & 0xf;
b0109805 7094 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
7095 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7096 if (insn & (1 << 24))
b0109805 7097 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
7098 if (insn & (1 << 20)) {
7099 /* load */
9ee6e8bb 7100 if (insn & (1 << 22)) {
b0109805 7101 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 7102 } else {
b0109805 7103 tmp = gen_ld32(tmp2, i);
9ee6e8bb 7104 }
9ee6e8bb
PB
7105 } else {
7106 /* store */
b0109805 7107 tmp = load_reg(s, rd);
9ee6e8bb 7108 if (insn & (1 << 22))
b0109805 7109 gen_st8(tmp, tmp2, i);
9ee6e8bb 7110 else
b0109805 7111 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
7112 }
7113 if (!(insn & (1 << 24))) {
b0109805
PB
7114 gen_add_data_offset(s, insn, tmp2);
7115 store_reg(s, rn, tmp2);
7116 } else if (insn & (1 << 21)) {
7117 store_reg(s, rn, tmp2);
7118 } else {
7119 dead_tmp(tmp2);
9ee6e8bb
PB
7120 }
7121 if (insn & (1 << 20)) {
7122 /* Complete the load. */
7123 if (rd == 15)
b0109805 7124 gen_bx(s, tmp);
9ee6e8bb 7125 else
b0109805 7126 store_reg(s, rd, tmp);
9ee6e8bb
PB
7127 }
7128 break;
7129 case 0x08:
7130 case 0x09:
7131 {
7132 int j, n, user, loaded_base;
b0109805 7133 TCGv loaded_var;
9ee6e8bb
PB
7134 /* load/store multiple words */
7135 /* XXX: store correct base if write back */
7136 user = 0;
7137 if (insn & (1 << 22)) {
7138 if (IS_USER(s))
7139 goto illegal_op; /* only usable in supervisor mode */
7140
7141 if ((insn & (1 << 15)) == 0)
7142 user = 1;
7143 }
7144 rn = (insn >> 16) & 0xf;
b0109805 7145 addr = load_reg(s, rn);
9ee6e8bb
PB
7146
7147 /* compute total size */
7148 loaded_base = 0;
a50f5b91 7149 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
7150 n = 0;
7151 for(i=0;i<16;i++) {
7152 if (insn & (1 << i))
7153 n++;
7154 }
7155 /* XXX: test invalid n == 0 case ? */
7156 if (insn & (1 << 23)) {
7157 if (insn & (1 << 24)) {
7158 /* pre increment */
b0109805 7159 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7160 } else {
7161 /* post increment */
7162 }
7163 } else {
7164 if (insn & (1 << 24)) {
7165 /* pre decrement */
b0109805 7166 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7167 } else {
7168 /* post decrement */
7169 if (n != 1)
b0109805 7170 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7171 }
7172 }
7173 j = 0;
7174 for(i=0;i<16;i++) {
7175 if (insn & (1 << i)) {
7176 if (insn & (1 << 20)) {
7177 /* load */
b0109805 7178 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 7179 if (i == 15) {
b0109805 7180 gen_bx(s, tmp);
9ee6e8bb 7181 } else if (user) {
b75263d6
JR
7182 tmp2 = tcg_const_i32(i);
7183 gen_helper_set_user_reg(tmp2, tmp);
7184 tcg_temp_free_i32(tmp2);
b0109805 7185 dead_tmp(tmp);
9ee6e8bb 7186 } else if (i == rn) {
b0109805 7187 loaded_var = tmp;
9ee6e8bb
PB
7188 loaded_base = 1;
7189 } else {
b0109805 7190 store_reg(s, i, tmp);
9ee6e8bb
PB
7191 }
7192 } else {
7193 /* store */
7194 if (i == 15) {
7195 /* special case: r15 = PC + 8 */
7196 val = (long)s->pc + 4;
b0109805
PB
7197 tmp = new_tmp();
7198 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 7199 } else if (user) {
b0109805 7200 tmp = new_tmp();
b75263d6
JR
7201 tmp2 = tcg_const_i32(i);
7202 gen_helper_get_user_reg(tmp, tmp2);
7203 tcg_temp_free_i32(tmp2);
9ee6e8bb 7204 } else {
b0109805 7205 tmp = load_reg(s, i);
9ee6e8bb 7206 }
b0109805 7207 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7208 }
7209 j++;
7210 /* no need to add after the last transfer */
7211 if (j != n)
b0109805 7212 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7213 }
7214 }
7215 if (insn & (1 << 21)) {
7216 /* write back */
7217 if (insn & (1 << 23)) {
7218 if (insn & (1 << 24)) {
7219 /* pre increment */
7220 } else {
7221 /* post increment */
b0109805 7222 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7223 }
7224 } else {
7225 if (insn & (1 << 24)) {
7226 /* pre decrement */
7227 if (n != 1)
b0109805 7228 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7229 } else {
7230 /* post decrement */
b0109805 7231 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7232 }
7233 }
b0109805
PB
7234 store_reg(s, rn, addr);
7235 } else {
7236 dead_tmp(addr);
9ee6e8bb
PB
7237 }
7238 if (loaded_base) {
b0109805 7239 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
7240 }
7241 if ((insn & (1 << 22)) && !user) {
7242 /* Restore CPSR from SPSR. */
d9ba4830
PB
7243 tmp = load_cpu_field(spsr);
7244 gen_set_cpsr(tmp, 0xffffffff);
7245 dead_tmp(tmp);
9ee6e8bb
PB
7246 s->is_jmp = DISAS_UPDATE;
7247 }
7248 }
7249 break;
7250 case 0xa:
7251 case 0xb:
7252 {
7253 int32_t offset;
7254
7255 /* branch (and link) */
7256 val = (int32_t)s->pc;
7257 if (insn & (1 << 24)) {
5e3f878a
PB
7258 tmp = new_tmp();
7259 tcg_gen_movi_i32(tmp, val);
7260 store_reg(s, 14, tmp);
9ee6e8bb
PB
7261 }
7262 offset = (((int32_t)insn << 8) >> 8);
7263 val += (offset << 2) + 4;
7264 gen_jmp(s, val);
7265 }
7266 break;
7267 case 0xc:
7268 case 0xd:
7269 case 0xe:
7270 /* Coprocessor. */
7271 if (disas_coproc_insn(env, s, insn))
7272 goto illegal_op;
7273 break;
7274 case 0xf:
7275 /* swi */
5e3f878a 7276 gen_set_pc_im(s->pc);
9ee6e8bb
PB
7277 s->is_jmp = DISAS_SWI;
7278 break;
7279 default:
7280 illegal_op:
7281 gen_set_condexec(s);
5e3f878a 7282 gen_set_pc_im(s->pc - 4);
d9ba4830 7283 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
7284 s->is_jmp = DISAS_JUMP;
7285 break;
7286 }
7287 }
7288}
7289
7290/* Return true if this is a Thumb-2 logical op. */
7291static int
7292thumb2_logic_op(int op)
7293{
7294 return (op < 8);
7295}
7296
7297/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7298 then set condition code flags based on the result of the operation.
7299 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7300 to the high bit of T1.
7301 Returns zero if the opcode is valid. */
7302
7303static int
396e467c 7304gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
9ee6e8bb
PB
7305{
7306 int logic_cc;
7307
7308 logic_cc = 0;
7309 switch (op) {
7310 case 0: /* and */
396e467c 7311 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
7312 logic_cc = conds;
7313 break;
7314 case 1: /* bic */
f669df27 7315 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
7316 logic_cc = conds;
7317 break;
7318 case 2: /* orr */
396e467c 7319 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7320 logic_cc = conds;
7321 break;
7322 case 3: /* orn */
396e467c
FN
7323 tcg_gen_not_i32(t1, t1);
7324 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7325 logic_cc = conds;
7326 break;
7327 case 4: /* eor */
396e467c 7328 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
7329 logic_cc = conds;
7330 break;
7331 case 8: /* add */
7332 if (conds)
396e467c 7333 gen_helper_add_cc(t0, t0, t1);
9ee6e8bb 7334 else
396e467c 7335 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
7336 break;
7337 case 10: /* adc */
7338 if (conds)
396e467c 7339 gen_helper_adc_cc(t0, t0, t1);
9ee6e8bb 7340 else
396e467c 7341 gen_adc(t0, t1);
9ee6e8bb
PB
7342 break;
7343 case 11: /* sbc */
7344 if (conds)
396e467c 7345 gen_helper_sbc_cc(t0, t0, t1);
9ee6e8bb 7346 else
396e467c 7347 gen_sub_carry(t0, t0, t1);
9ee6e8bb
PB
7348 break;
7349 case 13: /* sub */
7350 if (conds)
396e467c 7351 gen_helper_sub_cc(t0, t0, t1);
9ee6e8bb 7352 else
396e467c 7353 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
7354 break;
7355 case 14: /* rsb */
7356 if (conds)
396e467c 7357 gen_helper_sub_cc(t0, t1, t0);
9ee6e8bb 7358 else
396e467c 7359 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
7360 break;
7361 default: /* 5, 6, 7, 9, 12, 15. */
7362 return 1;
7363 }
7364 if (logic_cc) {
396e467c 7365 gen_logic_CC(t0);
9ee6e8bb 7366 if (shifter_out)
396e467c 7367 gen_set_CF_bit31(t1);
9ee6e8bb
PB
7368 }
7369 return 0;
7370}
7371
7372/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7373 is not legal. */
7374static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7375{
b0109805 7376 uint32_t insn, imm, shift, offset;
9ee6e8bb 7377 uint32_t rd, rn, rm, rs;
b26eefb6 7378 TCGv tmp;
6ddbc6e4
PB
7379 TCGv tmp2;
7380 TCGv tmp3;
b0109805 7381 TCGv addr;
a7812ae4 7382 TCGv_i64 tmp64;
9ee6e8bb
PB
7383 int op;
7384 int shiftop;
7385 int conds;
7386 int logic_cc;
7387
7388 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7389 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 7390 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
7391 16-bit instructions to get correct prefetch abort behavior. */
7392 insn = insn_hw1;
7393 if ((insn & (1 << 12)) == 0) {
7394 /* Second half of blx. */
7395 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
7396 tmp = load_reg(s, 14);
7397 tcg_gen_addi_i32(tmp, tmp, offset);
7398 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 7399
d9ba4830 7400 tmp2 = new_tmp();
b0109805 7401 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7402 store_reg(s, 14, tmp2);
7403 gen_bx(s, tmp);
9ee6e8bb
PB
7404 return 0;
7405 }
7406 if (insn & (1 << 11)) {
7407 /* Second half of bl. */
7408 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 7409 tmp = load_reg(s, 14);
6a0d8a1d 7410 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 7411
d9ba4830 7412 tmp2 = new_tmp();
b0109805 7413 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7414 store_reg(s, 14, tmp2);
7415 gen_bx(s, tmp);
9ee6e8bb
PB
7416 return 0;
7417 }
7418 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7419 /* Instruction spans a page boundary. Implement it as two
7420 16-bit instructions in case the second half causes an
7421 prefetch abort. */
7422 offset = ((int32_t)insn << 21) >> 9;
396e467c 7423 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
7424 return 0;
7425 }
7426 /* Fall through to 32-bit decode. */
7427 }
7428
7429 insn = lduw_code(s->pc);
7430 s->pc += 2;
7431 insn |= (uint32_t)insn_hw1 << 16;
7432
7433 if ((insn & 0xf800e800) != 0xf000e800) {
7434 ARCH(6T2);
7435 }
7436
7437 rn = (insn >> 16) & 0xf;
7438 rs = (insn >> 12) & 0xf;
7439 rd = (insn >> 8) & 0xf;
7440 rm = insn & 0xf;
7441 switch ((insn >> 25) & 0xf) {
7442 case 0: case 1: case 2: case 3:
7443 /* 16-bit instructions. Should never happen. */
7444 abort();
7445 case 4:
7446 if (insn & (1 << 22)) {
7447 /* Other load/store, table branch. */
7448 if (insn & 0x01200000) {
7449 /* Load/store doubleword. */
7450 if (rn == 15) {
b0109805
PB
7451 addr = new_tmp();
7452 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 7453 } else {
b0109805 7454 addr = load_reg(s, rn);
9ee6e8bb
PB
7455 }
7456 offset = (insn & 0xff) * 4;
7457 if ((insn & (1 << 23)) == 0)
7458 offset = -offset;
7459 if (insn & (1 << 24)) {
b0109805 7460 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
7461 offset = 0;
7462 }
7463 if (insn & (1 << 20)) {
7464 /* ldrd */
b0109805
PB
7465 tmp = gen_ld32(addr, IS_USER(s));
7466 store_reg(s, rs, tmp);
7467 tcg_gen_addi_i32(addr, addr, 4);
7468 tmp = gen_ld32(addr, IS_USER(s));
7469 store_reg(s, rd, tmp);
9ee6e8bb
PB
7470 } else {
7471 /* strd */
b0109805
PB
7472 tmp = load_reg(s, rs);
7473 gen_st32(tmp, addr, IS_USER(s));
7474 tcg_gen_addi_i32(addr, addr, 4);
7475 tmp = load_reg(s, rd);
7476 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7477 }
7478 if (insn & (1 << 21)) {
7479 /* Base writeback. */
7480 if (rn == 15)
7481 goto illegal_op;
b0109805
PB
7482 tcg_gen_addi_i32(addr, addr, offset - 4);
7483 store_reg(s, rn, addr);
7484 } else {
7485 dead_tmp(addr);
9ee6e8bb
PB
7486 }
7487 } else if ((insn & (1 << 23)) == 0) {
7488 /* Load/store exclusive word. */
3174f8e9 7489 addr = tcg_temp_local_new();
98a46317 7490 load_reg_var(s, addr, rn);
426f5abc 7491 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 7492 if (insn & (1 << 20)) {
426f5abc 7493 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 7494 } else {
426f5abc 7495 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 7496 }
3174f8e9 7497 tcg_temp_free(addr);
9ee6e8bb
PB
7498 } else if ((insn & (1 << 6)) == 0) {
7499 /* Table Branch. */
7500 if (rn == 15) {
b0109805
PB
7501 addr = new_tmp();
7502 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 7503 } else {
b0109805 7504 addr = load_reg(s, rn);
9ee6e8bb 7505 }
b26eefb6 7506 tmp = load_reg(s, rm);
b0109805 7507 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
7508 if (insn & (1 << 4)) {
7509 /* tbh */
b0109805 7510 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 7511 dead_tmp(tmp);
b0109805 7512 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 7513 } else { /* tbb */
b26eefb6 7514 dead_tmp(tmp);
b0109805 7515 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7516 }
b0109805
PB
7517 dead_tmp(addr);
7518 tcg_gen_shli_i32(tmp, tmp, 1);
7519 tcg_gen_addi_i32(tmp, tmp, s->pc);
7520 store_reg(s, 15, tmp);
9ee6e8bb
PB
7521 } else {
7522 /* Load/store exclusive byte/halfword/doubleword. */
426f5abc 7523 ARCH(7);
9ee6e8bb 7524 op = (insn >> 4) & 0x3;
426f5abc
PB
7525 if (op == 2) {
7526 goto illegal_op;
7527 }
3174f8e9 7528 addr = tcg_temp_local_new();
98a46317 7529 load_reg_var(s, addr, rn);
9ee6e8bb 7530 if (insn & (1 << 20)) {
426f5abc 7531 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 7532 } else {
426f5abc 7533 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 7534 }
3174f8e9 7535 tcg_temp_free(addr);
9ee6e8bb
PB
7536 }
7537 } else {
7538 /* Load/store multiple, RFE, SRS. */
7539 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7540 /* Not available in user mode. */
b0109805 7541 if (IS_USER(s))
9ee6e8bb
PB
7542 goto illegal_op;
7543 if (insn & (1 << 20)) {
7544 /* rfe */
b0109805
PB
7545 addr = load_reg(s, rn);
7546 if ((insn & (1 << 24)) == 0)
7547 tcg_gen_addi_i32(addr, addr, -8);
7548 /* Load PC into tmp and CPSR into tmp2. */
7549 tmp = gen_ld32(addr, 0);
7550 tcg_gen_addi_i32(addr, addr, 4);
7551 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
7552 if (insn & (1 << 21)) {
7553 /* Base writeback. */
b0109805
PB
7554 if (insn & (1 << 24)) {
7555 tcg_gen_addi_i32(addr, addr, 4);
7556 } else {
7557 tcg_gen_addi_i32(addr, addr, -4);
7558 }
7559 store_reg(s, rn, addr);
7560 } else {
7561 dead_tmp(addr);
9ee6e8bb 7562 }
b0109805 7563 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
7564 } else {
7565 /* srs */
7566 op = (insn & 0x1f);
39ea3d4e
PM
7567 addr = new_tmp();
7568 tmp = tcg_const_i32(op);
7569 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7570 tcg_temp_free_i32(tmp);
9ee6e8bb 7571 if ((insn & (1 << 24)) == 0) {
b0109805 7572 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 7573 }
b0109805
PB
7574 tmp = load_reg(s, 14);
7575 gen_st32(tmp, addr, 0);
7576 tcg_gen_addi_i32(addr, addr, 4);
7577 tmp = new_tmp();
7578 gen_helper_cpsr_read(tmp);
7579 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
7580 if (insn & (1 << 21)) {
7581 if ((insn & (1 << 24)) == 0) {
b0109805 7582 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 7583 } else {
b0109805 7584 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 7585 }
39ea3d4e
PM
7586 tmp = tcg_const_i32(op);
7587 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7588 tcg_temp_free_i32(tmp);
b0109805
PB
7589 } else {
7590 dead_tmp(addr);
9ee6e8bb
PB
7591 }
7592 }
7593 } else {
7594 int i;
7595 /* Load/store multiple. */
b0109805 7596 addr = load_reg(s, rn);
9ee6e8bb
PB
7597 offset = 0;
7598 for (i = 0; i < 16; i++) {
7599 if (insn & (1 << i))
7600 offset += 4;
7601 }
7602 if (insn & (1 << 24)) {
b0109805 7603 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7604 }
7605
7606 for (i = 0; i < 16; i++) {
7607 if ((insn & (1 << i)) == 0)
7608 continue;
7609 if (insn & (1 << 20)) {
7610 /* Load. */
b0109805 7611 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 7612 if (i == 15) {
b0109805 7613 gen_bx(s, tmp);
9ee6e8bb 7614 } else {
b0109805 7615 store_reg(s, i, tmp);
9ee6e8bb
PB
7616 }
7617 } else {
7618 /* Store. */
b0109805
PB
7619 tmp = load_reg(s, i);
7620 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7621 }
b0109805 7622 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7623 }
7624 if (insn & (1 << 21)) {
7625 /* Base register writeback. */
7626 if (insn & (1 << 24)) {
b0109805 7627 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7628 }
7629 /* Fault if writeback register is in register list. */
7630 if (insn & (1 << rn))
7631 goto illegal_op;
b0109805
PB
7632 store_reg(s, rn, addr);
7633 } else {
7634 dead_tmp(addr);
9ee6e8bb
PB
7635 }
7636 }
7637 }
7638 break;
2af9ab77
JB
7639 case 5:
7640
9ee6e8bb 7641 op = (insn >> 21) & 0xf;
2af9ab77
JB
7642 if (op == 6) {
7643 /* Halfword pack. */
7644 tmp = load_reg(s, rn);
7645 tmp2 = load_reg(s, rm);
7646 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
7647 if (insn & (1 << 5)) {
7648 /* pkhtb */
7649 if (shift == 0)
7650 shift = 31;
7651 tcg_gen_sari_i32(tmp2, tmp2, shift);
7652 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
7653 tcg_gen_ext16u_i32(tmp2, tmp2);
7654 } else {
7655 /* pkhbt */
7656 if (shift)
7657 tcg_gen_shli_i32(tmp2, tmp2, shift);
7658 tcg_gen_ext16u_i32(tmp, tmp);
7659 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7660 }
7661 tcg_gen_or_i32(tmp, tmp, tmp2);
7662 dead_tmp(tmp2);
3174f8e9
FN
7663 store_reg(s, rd, tmp);
7664 } else {
2af9ab77
JB
7665 /* Data processing register constant shift. */
7666 if (rn == 15) {
7667 tmp = new_tmp();
7668 tcg_gen_movi_i32(tmp, 0);
7669 } else {
7670 tmp = load_reg(s, rn);
7671 }
7672 tmp2 = load_reg(s, rm);
7673
7674 shiftop = (insn >> 4) & 3;
7675 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7676 conds = (insn & (1 << 20)) != 0;
7677 logic_cc = (conds && thumb2_logic_op(op));
7678 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7679 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
7680 goto illegal_op;
7681 dead_tmp(tmp2);
7682 if (rd != 15) {
7683 store_reg(s, rd, tmp);
7684 } else {
7685 dead_tmp(tmp);
7686 }
3174f8e9 7687 }
9ee6e8bb
PB
7688 break;
7689 case 13: /* Misc data processing. */
7690 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7691 if (op < 4 && (insn & 0xf000) != 0xf000)
7692 goto illegal_op;
7693 switch (op) {
7694 case 0: /* Register controlled shift. */
8984bd2e
PB
7695 tmp = load_reg(s, rn);
7696 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7697 if ((insn & 0x70) != 0)
7698 goto illegal_op;
7699 op = (insn >> 21) & 3;
8984bd2e
PB
7700 logic_cc = (insn & (1 << 20)) != 0;
7701 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7702 if (logic_cc)
7703 gen_logic_CC(tmp);
21aeb343 7704 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7705 break;
7706 case 1: /* Sign/zero extend. */
5e3f878a 7707 tmp = load_reg(s, rm);
9ee6e8bb
PB
7708 shift = (insn >> 4) & 3;
7709 /* ??? In many cases it's not neccessary to do a
7710 rotate, a shift is sufficient. */
7711 if (shift != 0)
f669df27 7712 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7713 op = (insn >> 20) & 7;
7714 switch (op) {
5e3f878a
PB
7715 case 0: gen_sxth(tmp); break;
7716 case 1: gen_uxth(tmp); break;
7717 case 2: gen_sxtb16(tmp); break;
7718 case 3: gen_uxtb16(tmp); break;
7719 case 4: gen_sxtb(tmp); break;
7720 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
7721 default: goto illegal_op;
7722 }
7723 if (rn != 15) {
5e3f878a 7724 tmp2 = load_reg(s, rn);
9ee6e8bb 7725 if ((op >> 1) == 1) {
5e3f878a 7726 gen_add16(tmp, tmp2);
9ee6e8bb 7727 } else {
5e3f878a
PB
7728 tcg_gen_add_i32(tmp, tmp, tmp2);
7729 dead_tmp(tmp2);
9ee6e8bb
PB
7730 }
7731 }
5e3f878a 7732 store_reg(s, rd, tmp);
9ee6e8bb
PB
7733 break;
7734 case 2: /* SIMD add/subtract. */
7735 op = (insn >> 20) & 7;
7736 shift = (insn >> 4) & 7;
7737 if ((op & 3) == 3 || (shift & 3) == 3)
7738 goto illegal_op;
6ddbc6e4
PB
7739 tmp = load_reg(s, rn);
7740 tmp2 = load_reg(s, rm);
7741 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7742 dead_tmp(tmp2);
7743 store_reg(s, rd, tmp);
9ee6e8bb
PB
7744 break;
7745 case 3: /* Other data processing. */
7746 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7747 if (op < 4) {
7748 /* Saturating add/subtract. */
d9ba4830
PB
7749 tmp = load_reg(s, rn);
7750 tmp2 = load_reg(s, rm);
9ee6e8bb 7751 if (op & 1)
4809c612
JB
7752 gen_helper_double_saturate(tmp, tmp);
7753 if (op & 2)
d9ba4830 7754 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 7755 else
d9ba4830
PB
7756 gen_helper_add_saturate(tmp, tmp, tmp2);
7757 dead_tmp(tmp2);
9ee6e8bb 7758 } else {
d9ba4830 7759 tmp = load_reg(s, rn);
9ee6e8bb
PB
7760 switch (op) {
7761 case 0x0a: /* rbit */
d9ba4830 7762 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7763 break;
7764 case 0x08: /* rev */
66896cb8 7765 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
7766 break;
7767 case 0x09: /* rev16 */
d9ba4830 7768 gen_rev16(tmp);
9ee6e8bb
PB
7769 break;
7770 case 0x0b: /* revsh */
d9ba4830 7771 gen_revsh(tmp);
9ee6e8bb
PB
7772 break;
7773 case 0x10: /* sel */
d9ba4830 7774 tmp2 = load_reg(s, rm);
6ddbc6e4
PB
7775 tmp3 = new_tmp();
7776 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
d9ba4830 7777 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6ddbc6e4 7778 dead_tmp(tmp3);
d9ba4830 7779 dead_tmp(tmp2);
9ee6e8bb
PB
7780 break;
7781 case 0x18: /* clz */
d9ba4830 7782 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
7783 break;
7784 default:
7785 goto illegal_op;
7786 }
7787 }
d9ba4830 7788 store_reg(s, rd, tmp);
9ee6e8bb
PB
7789 break;
7790 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7791 op = (insn >> 4) & 0xf;
d9ba4830
PB
7792 tmp = load_reg(s, rn);
7793 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7794 switch ((insn >> 20) & 7) {
7795 case 0: /* 32 x 32 -> 32 */
d9ba4830
PB
7796 tcg_gen_mul_i32(tmp, tmp, tmp2);
7797 dead_tmp(tmp2);
9ee6e8bb 7798 if (rs != 15) {
d9ba4830 7799 tmp2 = load_reg(s, rs);
9ee6e8bb 7800 if (op)
d9ba4830 7801 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 7802 else
d9ba4830
PB
7803 tcg_gen_add_i32(tmp, tmp, tmp2);
7804 dead_tmp(tmp2);
9ee6e8bb 7805 }
9ee6e8bb
PB
7806 break;
7807 case 1: /* 16 x 16 -> 32 */
d9ba4830
PB
7808 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7809 dead_tmp(tmp2);
9ee6e8bb 7810 if (rs != 15) {
d9ba4830
PB
7811 tmp2 = load_reg(s, rs);
7812 gen_helper_add_setq(tmp, tmp, tmp2);
7813 dead_tmp(tmp2);
9ee6e8bb 7814 }
9ee6e8bb
PB
7815 break;
7816 case 2: /* Dual multiply add. */
7817 case 4: /* Dual multiply subtract. */
7818 if (op)
d9ba4830
PB
7819 gen_swap_half(tmp2);
7820 gen_smul_dual(tmp, tmp2);
9ee6e8bb
PB
7821 /* This addition cannot overflow. */
7822 if (insn & (1 << 22)) {
d9ba4830 7823 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 7824 } else {
d9ba4830 7825 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 7826 }
d9ba4830 7827 dead_tmp(tmp2);
9ee6e8bb
PB
7828 if (rs != 15)
7829 {
d9ba4830
PB
7830 tmp2 = load_reg(s, rs);
7831 gen_helper_add_setq(tmp, tmp, tmp2);
7832 dead_tmp(tmp2);
9ee6e8bb 7833 }
9ee6e8bb
PB
7834 break;
7835 case 3: /* 32 * 16 -> 32msb */
7836 if (op)
d9ba4830 7837 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7838 else
d9ba4830 7839 gen_sxth(tmp2);
a7812ae4
PB
7840 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7841 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 7842 tmp = new_tmp();
a7812ae4 7843 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 7844 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7845 if (rs != 15)
7846 {
d9ba4830
PB
7847 tmp2 = load_reg(s, rs);
7848 gen_helper_add_setq(tmp, tmp, tmp2);
7849 dead_tmp(tmp2);
9ee6e8bb 7850 }
9ee6e8bb 7851 break;
838fa72d
AJ
7852 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
7853 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 7854 if (rs != 15) {
838fa72d
AJ
7855 tmp = load_reg(s, rs);
7856 if (insn & (1 << 20)) {
7857 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 7858 } else {
838fa72d 7859 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 7860 }
2c0262af 7861 }
838fa72d
AJ
7862 if (insn & (1 << 4)) {
7863 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7864 }
7865 tcg_gen_shri_i64(tmp64, tmp64, 32);
7866 tmp = new_tmp();
7867 tcg_gen_trunc_i64_i32(tmp, tmp64);
7868 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7869 break;
7870 case 7: /* Unsigned sum of absolute differences. */
d9ba4830
PB
7871 gen_helper_usad8(tmp, tmp, tmp2);
7872 dead_tmp(tmp2);
9ee6e8bb 7873 if (rs != 15) {
d9ba4830
PB
7874 tmp2 = load_reg(s, rs);
7875 tcg_gen_add_i32(tmp, tmp, tmp2);
7876 dead_tmp(tmp2);
5fd46862 7877 }
9ee6e8bb 7878 break;
2c0262af 7879 }
d9ba4830 7880 store_reg(s, rd, tmp);
2c0262af 7881 break;
9ee6e8bb
PB
7882 case 6: case 7: /* 64-bit multiply, Divide. */
7883 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
7884 tmp = load_reg(s, rn);
7885 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7886 if ((op & 0x50) == 0x10) {
7887 /* sdiv, udiv */
7888 if (!arm_feature(env, ARM_FEATURE_DIV))
7889 goto illegal_op;
7890 if (op & 0x20)
5e3f878a 7891 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 7892 else
5e3f878a
PB
7893 gen_helper_sdiv(tmp, tmp, tmp2);
7894 dead_tmp(tmp2);
7895 store_reg(s, rd, tmp);
9ee6e8bb
PB
7896 } else if ((op & 0xe) == 0xc) {
7897 /* Dual multiply accumulate long. */
7898 if (op & 1)
5e3f878a
PB
7899 gen_swap_half(tmp2);
7900 gen_smul_dual(tmp, tmp2);
9ee6e8bb 7901 if (op & 0x10) {
5e3f878a 7902 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 7903 } else {
5e3f878a 7904 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 7905 }
5e3f878a 7906 dead_tmp(tmp2);
a7812ae4
PB
7907 /* BUGFIX */
7908 tmp64 = tcg_temp_new_i64();
7909 tcg_gen_ext_i32_i64(tmp64, tmp);
7910 dead_tmp(tmp);
7911 gen_addq(s, tmp64, rs, rd);
7912 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 7913 tcg_temp_free_i64(tmp64);
2c0262af 7914 } else {
9ee6e8bb
PB
7915 if (op & 0x20) {
7916 /* Unsigned 64-bit multiply */
a7812ae4 7917 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 7918 } else {
9ee6e8bb
PB
7919 if (op & 8) {
7920 /* smlalxy */
5e3f878a
PB
7921 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7922 dead_tmp(tmp2);
a7812ae4
PB
7923 tmp64 = tcg_temp_new_i64();
7924 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 7925 dead_tmp(tmp);
9ee6e8bb
PB
7926 } else {
7927 /* Signed 64-bit multiply */
a7812ae4 7928 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 7929 }
b5ff1b31 7930 }
9ee6e8bb
PB
7931 if (op & 4) {
7932 /* umaal */
a7812ae4
PB
7933 gen_addq_lo(s, tmp64, rs);
7934 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
7935 } else if (op & 0x40) {
7936 /* 64-bit accumulate. */
a7812ae4 7937 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 7938 }
a7812ae4 7939 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 7940 tcg_temp_free_i64(tmp64);
5fd46862 7941 }
2c0262af 7942 break;
9ee6e8bb
PB
7943 }
7944 break;
7945 case 6: case 7: case 14: case 15:
7946 /* Coprocessor. */
7947 if (((insn >> 24) & 3) == 3) {
7948 /* Translate into the equivalent ARM encoding. */
7949 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
7950 if (disas_neon_data_insn(env, s, insn))
7951 goto illegal_op;
7952 } else {
7953 if (insn & (1 << 28))
7954 goto illegal_op;
7955 if (disas_coproc_insn (env, s, insn))
7956 goto illegal_op;
7957 }
7958 break;
7959 case 8: case 9: case 10: case 11:
7960 if (insn & (1 << 15)) {
7961 /* Branches, misc control. */
7962 if (insn & 0x5000) {
7963 /* Unconditional branch. */
7964 /* signextend(hw1[10:0]) -> offset[:12]. */
7965 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7966 /* hw1[10:0] -> offset[11:1]. */
7967 offset |= (insn & 0x7ff) << 1;
7968 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7969 offset[24:22] already have the same value because of the
7970 sign extension above. */
7971 offset ^= ((~insn) & (1 << 13)) << 10;
7972 offset ^= ((~insn) & (1 << 11)) << 11;
7973
9ee6e8bb
PB
7974 if (insn & (1 << 14)) {
7975 /* Branch and link. */
3174f8e9 7976 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 7977 }
3b46e624 7978
b0109805 7979 offset += s->pc;
9ee6e8bb
PB
7980 if (insn & (1 << 12)) {
7981 /* b/bl */
b0109805 7982 gen_jmp(s, offset);
9ee6e8bb
PB
7983 } else {
7984 /* blx */
b0109805
PB
7985 offset &= ~(uint32_t)2;
7986 gen_bx_im(s, offset);
2c0262af 7987 }
9ee6e8bb
PB
7988 } else if (((insn >> 23) & 7) == 7) {
7989 /* Misc control */
7990 if (insn & (1 << 13))
7991 goto illegal_op;
7992
7993 if (insn & (1 << 26)) {
7994 /* Secure monitor call (v6Z) */
7995 goto illegal_op; /* not implemented. */
2c0262af 7996 } else {
9ee6e8bb
PB
7997 op = (insn >> 20) & 7;
7998 switch (op) {
7999 case 0: /* msr cpsr. */
8000 if (IS_M(env)) {
8984bd2e
PB
8001 tmp = load_reg(s, rn);
8002 addr = tcg_const_i32(insn & 0xff);
8003 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6
JR
8004 tcg_temp_free_i32(addr);
8005 dead_tmp(tmp);
9ee6e8bb
PB
8006 gen_lookup_tb(s);
8007 break;
8008 }
8009 /* fall through */
8010 case 1: /* msr spsr. */
8011 if (IS_M(env))
8012 goto illegal_op;
2fbac54b
FN
8013 tmp = load_reg(s, rn);
8014 if (gen_set_psr(s,
9ee6e8bb 8015 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 8016 op == 1, tmp))
9ee6e8bb
PB
8017 goto illegal_op;
8018 break;
8019 case 2: /* cps, nop-hint. */
8020 if (((insn >> 8) & 7) == 0) {
8021 gen_nop_hint(s, insn & 0xff);
8022 }
8023 /* Implemented as NOP in user mode. */
8024 if (IS_USER(s))
8025 break;
8026 offset = 0;
8027 imm = 0;
8028 if (insn & (1 << 10)) {
8029 if (insn & (1 << 7))
8030 offset |= CPSR_A;
8031 if (insn & (1 << 6))
8032 offset |= CPSR_I;
8033 if (insn & (1 << 5))
8034 offset |= CPSR_F;
8035 if (insn & (1 << 9))
8036 imm = CPSR_A | CPSR_I | CPSR_F;
8037 }
8038 if (insn & (1 << 8)) {
8039 offset |= 0x1f;
8040 imm |= (insn & 0x1f);
8041 }
8042 if (offset) {
2fbac54b 8043 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
8044 }
8045 break;
8046 case 3: /* Special control operations. */
426f5abc 8047 ARCH(7);
9ee6e8bb
PB
8048 op = (insn >> 4) & 0xf;
8049 switch (op) {
8050 case 2: /* clrex */
426f5abc 8051 gen_clrex(s);
9ee6e8bb
PB
8052 break;
8053 case 4: /* dsb */
8054 case 5: /* dmb */
8055 case 6: /* isb */
8056 /* These execute as NOPs. */
9ee6e8bb
PB
8057 break;
8058 default:
8059 goto illegal_op;
8060 }
8061 break;
8062 case 4: /* bxj */
8063 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8064 tmp = load_reg(s, rn);
8065 gen_bx(s, tmp);
9ee6e8bb
PB
8066 break;
8067 case 5: /* Exception return. */
b8b45b68
RV
8068 if (IS_USER(s)) {
8069 goto illegal_op;
8070 }
8071 if (rn != 14 || rd != 15) {
8072 goto illegal_op;
8073 }
8074 tmp = load_reg(s, rn);
8075 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8076 gen_exception_return(s, tmp);
8077 break;
9ee6e8bb 8078 case 6: /* mrs cpsr. */
8984bd2e 8079 tmp = new_tmp();
9ee6e8bb 8080 if (IS_M(env)) {
8984bd2e
PB
8081 addr = tcg_const_i32(insn & 0xff);
8082 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 8083 tcg_temp_free_i32(addr);
9ee6e8bb 8084 } else {
8984bd2e 8085 gen_helper_cpsr_read(tmp);
9ee6e8bb 8086 }
8984bd2e 8087 store_reg(s, rd, tmp);
9ee6e8bb
PB
8088 break;
8089 case 7: /* mrs spsr. */
8090 /* Not accessible in user mode. */
8091 if (IS_USER(s) || IS_M(env))
8092 goto illegal_op;
d9ba4830
PB
8093 tmp = load_cpu_field(spsr);
8094 store_reg(s, rd, tmp);
9ee6e8bb 8095 break;
2c0262af
FB
8096 }
8097 }
9ee6e8bb
PB
8098 } else {
8099 /* Conditional branch. */
8100 op = (insn >> 22) & 0xf;
8101 /* Generate a conditional jump to next instruction. */
8102 s->condlabel = gen_new_label();
d9ba4830 8103 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
8104 s->condjmp = 1;
8105
8106 /* offset[11:1] = insn[10:0] */
8107 offset = (insn & 0x7ff) << 1;
8108 /* offset[17:12] = insn[21:16]. */
8109 offset |= (insn & 0x003f0000) >> 4;
8110 /* offset[31:20] = insn[26]. */
8111 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8112 /* offset[18] = insn[13]. */
8113 offset |= (insn & (1 << 13)) << 5;
8114 /* offset[19] = insn[11]. */
8115 offset |= (insn & (1 << 11)) << 8;
8116
8117 /* jump to the offset */
b0109805 8118 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
8119 }
8120 } else {
8121 /* Data processing immediate. */
8122 if (insn & (1 << 25)) {
8123 if (insn & (1 << 24)) {
8124 if (insn & (1 << 20))
8125 goto illegal_op;
8126 /* Bitfield/Saturate. */
8127 op = (insn >> 21) & 7;
8128 imm = insn & 0x1f;
8129 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4
PB
8130 if (rn == 15) {
8131 tmp = new_tmp();
8132 tcg_gen_movi_i32(tmp, 0);
8133 } else {
8134 tmp = load_reg(s, rn);
8135 }
9ee6e8bb
PB
8136 switch (op) {
8137 case 2: /* Signed bitfield extract. */
8138 imm++;
8139 if (shift + imm > 32)
8140 goto illegal_op;
8141 if (imm < 32)
6ddbc6e4 8142 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
8143 break;
8144 case 6: /* Unsigned bitfield extract. */
8145 imm++;
8146 if (shift + imm > 32)
8147 goto illegal_op;
8148 if (imm < 32)
6ddbc6e4 8149 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
8150 break;
8151 case 3: /* Bitfield insert/clear. */
8152 if (imm < shift)
8153 goto illegal_op;
8154 imm = imm + 1 - shift;
8155 if (imm != 32) {
6ddbc6e4 8156 tmp2 = load_reg(s, rd);
8f8e3aa4 8157 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
6ddbc6e4 8158 dead_tmp(tmp2);
9ee6e8bb
PB
8159 }
8160 break;
8161 case 7:
8162 goto illegal_op;
8163 default: /* Saturate. */
9ee6e8bb
PB
8164 if (shift) {
8165 if (op & 1)
6ddbc6e4 8166 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8167 else
6ddbc6e4 8168 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 8169 }
6ddbc6e4 8170 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
8171 if (op & 4) {
8172 /* Unsigned. */
9ee6e8bb 8173 if ((op & 1) && shift == 0)
6ddbc6e4 8174 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 8175 else
6ddbc6e4 8176 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 8177 } else {
9ee6e8bb 8178 /* Signed. */
9ee6e8bb 8179 if ((op & 1) && shift == 0)
6ddbc6e4 8180 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 8181 else
6ddbc6e4 8182 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 8183 }
b75263d6 8184 tcg_temp_free_i32(tmp2);
9ee6e8bb 8185 break;
2c0262af 8186 }
6ddbc6e4 8187 store_reg(s, rd, tmp);
9ee6e8bb
PB
8188 } else {
8189 imm = ((insn & 0x04000000) >> 15)
8190 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8191 if (insn & (1 << 22)) {
8192 /* 16-bit immediate. */
8193 imm |= (insn >> 4) & 0xf000;
8194 if (insn & (1 << 23)) {
8195 /* movt */
5e3f878a 8196 tmp = load_reg(s, rd);
86831435 8197 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8198 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 8199 } else {
9ee6e8bb 8200 /* movw */
5e3f878a
PB
8201 tmp = new_tmp();
8202 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
8203 }
8204 } else {
9ee6e8bb
PB
8205 /* Add/sub 12-bit immediate. */
8206 if (rn == 15) {
b0109805 8207 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 8208 if (insn & (1 << 23))
b0109805 8209 offset -= imm;
9ee6e8bb 8210 else
b0109805 8211 offset += imm;
5e3f878a
PB
8212 tmp = new_tmp();
8213 tcg_gen_movi_i32(tmp, offset);
2c0262af 8214 } else {
5e3f878a 8215 tmp = load_reg(s, rn);
9ee6e8bb 8216 if (insn & (1 << 23))
5e3f878a 8217 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 8218 else
5e3f878a 8219 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 8220 }
9ee6e8bb 8221 }
5e3f878a 8222 store_reg(s, rd, tmp);
191abaa2 8223 }
9ee6e8bb
PB
8224 } else {
8225 int shifter_out = 0;
8226 /* modified 12-bit immediate. */
8227 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8228 imm = (insn & 0xff);
8229 switch (shift) {
8230 case 0: /* XY */
8231 /* Nothing to do. */
8232 break;
8233 case 1: /* 00XY00XY */
8234 imm |= imm << 16;
8235 break;
8236 case 2: /* XY00XY00 */
8237 imm |= imm << 16;
8238 imm <<= 8;
8239 break;
8240 case 3: /* XYXYXYXY */
8241 imm |= imm << 16;
8242 imm |= imm << 8;
8243 break;
8244 default: /* Rotated constant. */
8245 shift = (shift << 1) | (imm >> 7);
8246 imm |= 0x80;
8247 imm = imm << (32 - shift);
8248 shifter_out = 1;
8249 break;
b5ff1b31 8250 }
3174f8e9
FN
8251 tmp2 = new_tmp();
8252 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 8253 rn = (insn >> 16) & 0xf;
3174f8e9
FN
8254 if (rn == 15) {
8255 tmp = new_tmp();
8256 tcg_gen_movi_i32(tmp, 0);
8257 } else {
8258 tmp = load_reg(s, rn);
8259 }
9ee6e8bb
PB
8260 op = (insn >> 21) & 0xf;
8261 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 8262 shifter_out, tmp, tmp2))
9ee6e8bb 8263 goto illegal_op;
3174f8e9 8264 dead_tmp(tmp2);
9ee6e8bb
PB
8265 rd = (insn >> 8) & 0xf;
8266 if (rd != 15) {
3174f8e9
FN
8267 store_reg(s, rd, tmp);
8268 } else {
8269 dead_tmp(tmp);
2c0262af 8270 }
2c0262af 8271 }
9ee6e8bb
PB
8272 }
8273 break;
8274 case 12: /* Load/store single data item. */
8275 {
8276 int postinc = 0;
8277 int writeback = 0;
b0109805 8278 int user;
9ee6e8bb
PB
8279 if ((insn & 0x01100000) == 0x01000000) {
8280 if (disas_neon_ls_insn(env, s, insn))
c1713132 8281 goto illegal_op;
9ee6e8bb
PB
8282 break;
8283 }
b0109805 8284 user = IS_USER(s);
9ee6e8bb 8285 if (rn == 15) {
b0109805 8286 addr = new_tmp();
9ee6e8bb
PB
8287 /* PC relative. */
8288 /* s->pc has already been incremented by 4. */
8289 imm = s->pc & 0xfffffffc;
8290 if (insn & (1 << 23))
8291 imm += insn & 0xfff;
8292 else
8293 imm -= insn & 0xfff;
b0109805 8294 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 8295 } else {
b0109805 8296 addr = load_reg(s, rn);
9ee6e8bb
PB
8297 if (insn & (1 << 23)) {
8298 /* Positive offset. */
8299 imm = insn & 0xfff;
b0109805 8300 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8301 } else {
8302 op = (insn >> 8) & 7;
8303 imm = insn & 0xff;
8304 switch (op) {
8305 case 0: case 8: /* Shifted Register. */
8306 shift = (insn >> 4) & 0xf;
8307 if (shift > 3)
18c9b560 8308 goto illegal_op;
b26eefb6 8309 tmp = load_reg(s, rm);
9ee6e8bb 8310 if (shift)
b26eefb6 8311 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 8312 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 8313 dead_tmp(tmp);
9ee6e8bb
PB
8314 break;
8315 case 4: /* Negative offset. */
b0109805 8316 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb
PB
8317 break;
8318 case 6: /* User privilege. */
b0109805
PB
8319 tcg_gen_addi_i32(addr, addr, imm);
8320 user = 1;
9ee6e8bb
PB
8321 break;
8322 case 1: /* Post-decrement. */
8323 imm = -imm;
8324 /* Fall through. */
8325 case 3: /* Post-increment. */
9ee6e8bb
PB
8326 postinc = 1;
8327 writeback = 1;
8328 break;
8329 case 5: /* Pre-decrement. */
8330 imm = -imm;
8331 /* Fall through. */
8332 case 7: /* Pre-increment. */
b0109805 8333 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8334 writeback = 1;
8335 break;
8336 default:
b7bcbe95 8337 goto illegal_op;
9ee6e8bb
PB
8338 }
8339 }
8340 }
8341 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8342 if (insn & (1 << 20)) {
8343 /* Load. */
8344 if (rs == 15 && op != 2) {
8345 if (op & 2)
b5ff1b31 8346 goto illegal_op;
9ee6e8bb
PB
8347 /* Memory hint. Implemented as NOP. */
8348 } else {
8349 switch (op) {
b0109805
PB
8350 case 0: tmp = gen_ld8u(addr, user); break;
8351 case 4: tmp = gen_ld8s(addr, user); break;
8352 case 1: tmp = gen_ld16u(addr, user); break;
8353 case 5: tmp = gen_ld16s(addr, user); break;
8354 case 2: tmp = gen_ld32(addr, user); break;
9ee6e8bb
PB
8355 default: goto illegal_op;
8356 }
8357 if (rs == 15) {
b0109805 8358 gen_bx(s, tmp);
9ee6e8bb 8359 } else {
b0109805 8360 store_reg(s, rs, tmp);
9ee6e8bb
PB
8361 }
8362 }
8363 } else {
8364 /* Store. */
8365 if (rs == 15)
b7bcbe95 8366 goto illegal_op;
b0109805 8367 tmp = load_reg(s, rs);
9ee6e8bb 8368 switch (op) {
b0109805
PB
8369 case 0: gen_st8(tmp, addr, user); break;
8370 case 1: gen_st16(tmp, addr, user); break;
8371 case 2: gen_st32(tmp, addr, user); break;
9ee6e8bb 8372 default: goto illegal_op;
b7bcbe95 8373 }
2c0262af 8374 }
9ee6e8bb 8375 if (postinc)
b0109805
PB
8376 tcg_gen_addi_i32(addr, addr, imm);
8377 if (writeback) {
8378 store_reg(s, rn, addr);
8379 } else {
8380 dead_tmp(addr);
8381 }
9ee6e8bb
PB
8382 }
8383 break;
8384 default:
8385 goto illegal_op;
2c0262af 8386 }
9ee6e8bb
PB
8387 return 0;
8388illegal_op:
8389 return 1;
2c0262af
FB
8390}
8391
9ee6e8bb 8392static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
8393{
8394 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8395 int32_t offset;
8396 int i;
b26eefb6 8397 TCGv tmp;
d9ba4830 8398 TCGv tmp2;
b0109805 8399 TCGv addr;
99c475ab 8400
9ee6e8bb
PB
8401 if (s->condexec_mask) {
8402 cond = s->condexec_cond;
bedd2912
JB
8403 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
8404 s->condlabel = gen_new_label();
8405 gen_test_cc(cond ^ 1, s->condlabel);
8406 s->condjmp = 1;
8407 }
9ee6e8bb
PB
8408 }
8409
b5ff1b31 8410 insn = lduw_code(s->pc);
99c475ab 8411 s->pc += 2;
b5ff1b31 8412
99c475ab
FB
8413 switch (insn >> 12) {
8414 case 0: case 1:
396e467c 8415
99c475ab
FB
8416 rd = insn & 7;
8417 op = (insn >> 11) & 3;
8418 if (op == 3) {
8419 /* add/subtract */
8420 rn = (insn >> 3) & 7;
396e467c 8421 tmp = load_reg(s, rn);
99c475ab
FB
8422 if (insn & (1 << 10)) {
8423 /* immediate */
396e467c
FN
8424 tmp2 = new_tmp();
8425 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
8426 } else {
8427 /* reg */
8428 rm = (insn >> 6) & 7;
396e467c 8429 tmp2 = load_reg(s, rm);
99c475ab 8430 }
9ee6e8bb
PB
8431 if (insn & (1 << 9)) {
8432 if (s->condexec_mask)
396e467c 8433 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8434 else
396e467c 8435 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb
PB
8436 } else {
8437 if (s->condexec_mask)
396e467c 8438 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 8439 else
396e467c 8440 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 8441 }
396e467c
FN
8442 dead_tmp(tmp2);
8443 store_reg(s, rd, tmp);
99c475ab
FB
8444 } else {
8445 /* shift immediate */
8446 rm = (insn >> 3) & 7;
8447 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
8448 tmp = load_reg(s, rm);
8449 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8450 if (!s->condexec_mask)
8451 gen_logic_CC(tmp);
8452 store_reg(s, rd, tmp);
99c475ab
FB
8453 }
8454 break;
8455 case 2: case 3:
8456 /* arithmetic large immediate */
8457 op = (insn >> 11) & 3;
8458 rd = (insn >> 8) & 0x7;
396e467c
FN
8459 if (op == 0) { /* mov */
8460 tmp = new_tmp();
8461 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 8462 if (!s->condexec_mask)
396e467c
FN
8463 gen_logic_CC(tmp);
8464 store_reg(s, rd, tmp);
8465 } else {
8466 tmp = load_reg(s, rd);
8467 tmp2 = new_tmp();
8468 tcg_gen_movi_i32(tmp2, insn & 0xff);
8469 switch (op) {
8470 case 1: /* cmp */
8471 gen_helper_sub_cc(tmp, tmp, tmp2);
8472 dead_tmp(tmp);
8473 dead_tmp(tmp2);
8474 break;
8475 case 2: /* add */
8476 if (s->condexec_mask)
8477 tcg_gen_add_i32(tmp, tmp, tmp2);
8478 else
8479 gen_helper_add_cc(tmp, tmp, tmp2);
8480 dead_tmp(tmp2);
8481 store_reg(s, rd, tmp);
8482 break;
8483 case 3: /* sub */
8484 if (s->condexec_mask)
8485 tcg_gen_sub_i32(tmp, tmp, tmp2);
8486 else
8487 gen_helper_sub_cc(tmp, tmp, tmp2);
8488 dead_tmp(tmp2);
8489 store_reg(s, rd, tmp);
8490 break;
8491 }
99c475ab 8492 }
99c475ab
FB
8493 break;
8494 case 4:
8495 if (insn & (1 << 11)) {
8496 rd = (insn >> 8) & 7;
5899f386
FB
8497 /* load pc-relative. Bit 1 of PC is ignored. */
8498 val = s->pc + 2 + ((insn & 0xff) * 4);
8499 val &= ~(uint32_t)2;
b0109805
PB
8500 addr = new_tmp();
8501 tcg_gen_movi_i32(addr, val);
8502 tmp = gen_ld32(addr, IS_USER(s));
8503 dead_tmp(addr);
8504 store_reg(s, rd, tmp);
99c475ab
FB
8505 break;
8506 }
8507 if (insn & (1 << 10)) {
8508 /* data processing extended or blx */
8509 rd = (insn & 7) | ((insn >> 4) & 8);
8510 rm = (insn >> 3) & 0xf;
8511 op = (insn >> 8) & 3;
8512 switch (op) {
8513 case 0: /* add */
396e467c
FN
8514 tmp = load_reg(s, rd);
8515 tmp2 = load_reg(s, rm);
8516 tcg_gen_add_i32(tmp, tmp, tmp2);
8517 dead_tmp(tmp2);
8518 store_reg(s, rd, tmp);
99c475ab
FB
8519 break;
8520 case 1: /* cmp */
396e467c
FN
8521 tmp = load_reg(s, rd);
8522 tmp2 = load_reg(s, rm);
8523 gen_helper_sub_cc(tmp, tmp, tmp2);
8524 dead_tmp(tmp2);
8525 dead_tmp(tmp);
99c475ab
FB
8526 break;
8527 case 2: /* mov/cpy */
396e467c
FN
8528 tmp = load_reg(s, rm);
8529 store_reg(s, rd, tmp);
99c475ab
FB
8530 break;
8531 case 3:/* branch [and link] exchange thumb register */
b0109805 8532 tmp = load_reg(s, rm);
99c475ab
FB
8533 if (insn & (1 << 7)) {
8534 val = (uint32_t)s->pc | 1;
b0109805
PB
8535 tmp2 = new_tmp();
8536 tcg_gen_movi_i32(tmp2, val);
8537 store_reg(s, 14, tmp2);
99c475ab 8538 }
d9ba4830 8539 gen_bx(s, tmp);
99c475ab
FB
8540 break;
8541 }
8542 break;
8543 }
8544
8545 /* data processing register */
8546 rd = insn & 7;
8547 rm = (insn >> 3) & 7;
8548 op = (insn >> 6) & 0xf;
8549 if (op == 2 || op == 3 || op == 4 || op == 7) {
8550 /* the shift/rotate ops want the operands backwards */
8551 val = rm;
8552 rm = rd;
8553 rd = val;
8554 val = 1;
8555 } else {
8556 val = 0;
8557 }
8558
396e467c
FN
8559 if (op == 9) { /* neg */
8560 tmp = new_tmp();
8561 tcg_gen_movi_i32(tmp, 0);
8562 } else if (op != 0xf) { /* mvn doesn't read its first operand */
8563 tmp = load_reg(s, rd);
8564 } else {
8565 TCGV_UNUSED(tmp);
8566 }
99c475ab 8567
396e467c 8568 tmp2 = load_reg(s, rm);
5899f386 8569 switch (op) {
99c475ab 8570 case 0x0: /* and */
396e467c 8571 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 8572 if (!s->condexec_mask)
396e467c 8573 gen_logic_CC(tmp);
99c475ab
FB
8574 break;
8575 case 0x1: /* eor */
396e467c 8576 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 8577 if (!s->condexec_mask)
396e467c 8578 gen_logic_CC(tmp);
99c475ab
FB
8579 break;
8580 case 0x2: /* lsl */
9ee6e8bb 8581 if (s->condexec_mask) {
396e467c 8582 gen_helper_shl(tmp2, tmp2, tmp);
9ee6e8bb 8583 } else {
396e467c
FN
8584 gen_helper_shl_cc(tmp2, tmp2, tmp);
8585 gen_logic_CC(tmp2);
9ee6e8bb 8586 }
99c475ab
FB
8587 break;
8588 case 0x3: /* lsr */
9ee6e8bb 8589 if (s->condexec_mask) {
396e467c 8590 gen_helper_shr(tmp2, tmp2, tmp);
9ee6e8bb 8591 } else {
396e467c
FN
8592 gen_helper_shr_cc(tmp2, tmp2, tmp);
8593 gen_logic_CC(tmp2);
9ee6e8bb 8594 }
99c475ab
FB
8595 break;
8596 case 0x4: /* asr */
9ee6e8bb 8597 if (s->condexec_mask) {
396e467c 8598 gen_helper_sar(tmp2, tmp2, tmp);
9ee6e8bb 8599 } else {
396e467c
FN
8600 gen_helper_sar_cc(tmp2, tmp2, tmp);
8601 gen_logic_CC(tmp2);
9ee6e8bb 8602 }
99c475ab
FB
8603 break;
8604 case 0x5: /* adc */
9ee6e8bb 8605 if (s->condexec_mask)
396e467c 8606 gen_adc(tmp, tmp2);
9ee6e8bb 8607 else
396e467c 8608 gen_helper_adc_cc(tmp, tmp, tmp2);
99c475ab
FB
8609 break;
8610 case 0x6: /* sbc */
9ee6e8bb 8611 if (s->condexec_mask)
396e467c 8612 gen_sub_carry(tmp, tmp, tmp2);
9ee6e8bb 8613 else
396e467c 8614 gen_helper_sbc_cc(tmp, tmp, tmp2);
99c475ab
FB
8615 break;
8616 case 0x7: /* ror */
9ee6e8bb 8617 if (s->condexec_mask) {
f669df27
AJ
8618 tcg_gen_andi_i32(tmp, tmp, 0x1f);
8619 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 8620 } else {
396e467c
FN
8621 gen_helper_ror_cc(tmp2, tmp2, tmp);
8622 gen_logic_CC(tmp2);
9ee6e8bb 8623 }
99c475ab
FB
8624 break;
8625 case 0x8: /* tst */
396e467c
FN
8626 tcg_gen_and_i32(tmp, tmp, tmp2);
8627 gen_logic_CC(tmp);
99c475ab 8628 rd = 16;
5899f386 8629 break;
99c475ab 8630 case 0x9: /* neg */
9ee6e8bb 8631 if (s->condexec_mask)
396e467c 8632 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 8633 else
396e467c 8634 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
8635 break;
8636 case 0xa: /* cmp */
396e467c 8637 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
8638 rd = 16;
8639 break;
8640 case 0xb: /* cmn */
396e467c 8641 gen_helper_add_cc(tmp, tmp, tmp2);
99c475ab
FB
8642 rd = 16;
8643 break;
8644 case 0xc: /* orr */
396e467c 8645 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 8646 if (!s->condexec_mask)
396e467c 8647 gen_logic_CC(tmp);
99c475ab
FB
8648 break;
8649 case 0xd: /* mul */
7b2919a0 8650 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 8651 if (!s->condexec_mask)
396e467c 8652 gen_logic_CC(tmp);
99c475ab
FB
8653 break;
8654 case 0xe: /* bic */
f669df27 8655 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 8656 if (!s->condexec_mask)
396e467c 8657 gen_logic_CC(tmp);
99c475ab
FB
8658 break;
8659 case 0xf: /* mvn */
396e467c 8660 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 8661 if (!s->condexec_mask)
396e467c 8662 gen_logic_CC(tmp2);
99c475ab 8663 val = 1;
5899f386 8664 rm = rd;
99c475ab
FB
8665 break;
8666 }
8667 if (rd != 16) {
396e467c
FN
8668 if (val) {
8669 store_reg(s, rm, tmp2);
8670 if (op != 0xf)
8671 dead_tmp(tmp);
8672 } else {
8673 store_reg(s, rd, tmp);
8674 dead_tmp(tmp2);
8675 }
8676 } else {
8677 dead_tmp(tmp);
8678 dead_tmp(tmp2);
99c475ab
FB
8679 }
8680 break;
8681
8682 case 5:
8683 /* load/store register offset. */
8684 rd = insn & 7;
8685 rn = (insn >> 3) & 7;
8686 rm = (insn >> 6) & 7;
8687 op = (insn >> 9) & 7;
b0109805 8688 addr = load_reg(s, rn);
b26eefb6 8689 tmp = load_reg(s, rm);
b0109805 8690 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 8691 dead_tmp(tmp);
99c475ab
FB
8692
8693 if (op < 3) /* store */
b0109805 8694 tmp = load_reg(s, rd);
99c475ab
FB
8695
8696 switch (op) {
8697 case 0: /* str */
b0109805 8698 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
8699 break;
8700 case 1: /* strh */
b0109805 8701 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
8702 break;
8703 case 2: /* strb */
b0109805 8704 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
8705 break;
8706 case 3: /* ldrsb */
b0109805 8707 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
8708 break;
8709 case 4: /* ldr */
b0109805 8710 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8711 break;
8712 case 5: /* ldrh */
b0109805 8713 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
8714 break;
8715 case 6: /* ldrb */
b0109805 8716 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
8717 break;
8718 case 7: /* ldrsh */
b0109805 8719 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
8720 break;
8721 }
8722 if (op >= 3) /* load */
b0109805
PB
8723 store_reg(s, rd, tmp);
8724 dead_tmp(addr);
99c475ab
FB
8725 break;
8726
8727 case 6:
8728 /* load/store word immediate offset */
8729 rd = insn & 7;
8730 rn = (insn >> 3) & 7;
b0109805 8731 addr = load_reg(s, rn);
99c475ab 8732 val = (insn >> 4) & 0x7c;
b0109805 8733 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8734
8735 if (insn & (1 << 11)) {
8736 /* load */
b0109805
PB
8737 tmp = gen_ld32(addr, IS_USER(s));
8738 store_reg(s, rd, tmp);
99c475ab
FB
8739 } else {
8740 /* store */
b0109805
PB
8741 tmp = load_reg(s, rd);
8742 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8743 }
b0109805 8744 dead_tmp(addr);
99c475ab
FB
8745 break;
8746
8747 case 7:
8748 /* load/store byte immediate offset */
8749 rd = insn & 7;
8750 rn = (insn >> 3) & 7;
b0109805 8751 addr = load_reg(s, rn);
99c475ab 8752 val = (insn >> 6) & 0x1f;
b0109805 8753 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8754
8755 if (insn & (1 << 11)) {
8756 /* load */
b0109805
PB
8757 tmp = gen_ld8u(addr, IS_USER(s));
8758 store_reg(s, rd, tmp);
99c475ab
FB
8759 } else {
8760 /* store */
b0109805
PB
8761 tmp = load_reg(s, rd);
8762 gen_st8(tmp, addr, IS_USER(s));
99c475ab 8763 }
b0109805 8764 dead_tmp(addr);
99c475ab
FB
8765 break;
8766
8767 case 8:
8768 /* load/store halfword immediate offset */
8769 rd = insn & 7;
8770 rn = (insn >> 3) & 7;
b0109805 8771 addr = load_reg(s, rn);
99c475ab 8772 val = (insn >> 5) & 0x3e;
b0109805 8773 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8774
8775 if (insn & (1 << 11)) {
8776 /* load */
b0109805
PB
8777 tmp = gen_ld16u(addr, IS_USER(s));
8778 store_reg(s, rd, tmp);
99c475ab
FB
8779 } else {
8780 /* store */
b0109805
PB
8781 tmp = load_reg(s, rd);
8782 gen_st16(tmp, addr, IS_USER(s));
99c475ab 8783 }
b0109805 8784 dead_tmp(addr);
99c475ab
FB
8785 break;
8786
8787 case 9:
8788 /* load/store from stack */
8789 rd = (insn >> 8) & 7;
b0109805 8790 addr = load_reg(s, 13);
99c475ab 8791 val = (insn & 0xff) * 4;
b0109805 8792 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8793
8794 if (insn & (1 << 11)) {
8795 /* load */
b0109805
PB
8796 tmp = gen_ld32(addr, IS_USER(s));
8797 store_reg(s, rd, tmp);
99c475ab
FB
8798 } else {
8799 /* store */
b0109805
PB
8800 tmp = load_reg(s, rd);
8801 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8802 }
b0109805 8803 dead_tmp(addr);
99c475ab
FB
8804 break;
8805
8806 case 10:
8807 /* add to high reg */
8808 rd = (insn >> 8) & 7;
5899f386
FB
8809 if (insn & (1 << 11)) {
8810 /* SP */
5e3f878a 8811 tmp = load_reg(s, 13);
5899f386
FB
8812 } else {
8813 /* PC. bit 1 is ignored. */
5e3f878a
PB
8814 tmp = new_tmp();
8815 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 8816 }
99c475ab 8817 val = (insn & 0xff) * 4;
5e3f878a
PB
8818 tcg_gen_addi_i32(tmp, tmp, val);
8819 store_reg(s, rd, tmp);
99c475ab
FB
8820 break;
8821
8822 case 11:
8823 /* misc */
8824 op = (insn >> 8) & 0xf;
8825 switch (op) {
8826 case 0:
8827 /* adjust stack pointer */
b26eefb6 8828 tmp = load_reg(s, 13);
99c475ab
FB
8829 val = (insn & 0x7f) * 4;
8830 if (insn & (1 << 7))
6a0d8a1d 8831 val = -(int32_t)val;
b26eefb6
PB
8832 tcg_gen_addi_i32(tmp, tmp, val);
8833 store_reg(s, 13, tmp);
99c475ab
FB
8834 break;
8835
9ee6e8bb
PB
8836 case 2: /* sign/zero extend. */
8837 ARCH(6);
8838 rd = insn & 7;
8839 rm = (insn >> 3) & 7;
b0109805 8840 tmp = load_reg(s, rm);
9ee6e8bb 8841 switch ((insn >> 6) & 3) {
b0109805
PB
8842 case 0: gen_sxth(tmp); break;
8843 case 1: gen_sxtb(tmp); break;
8844 case 2: gen_uxth(tmp); break;
8845 case 3: gen_uxtb(tmp); break;
9ee6e8bb 8846 }
b0109805 8847 store_reg(s, rd, tmp);
9ee6e8bb 8848 break;
99c475ab
FB
8849 case 4: case 5: case 0xc: case 0xd:
8850 /* push/pop */
b0109805 8851 addr = load_reg(s, 13);
5899f386
FB
8852 if (insn & (1 << 8))
8853 offset = 4;
99c475ab 8854 else
5899f386
FB
8855 offset = 0;
8856 for (i = 0; i < 8; i++) {
8857 if (insn & (1 << i))
8858 offset += 4;
8859 }
8860 if ((insn & (1 << 11)) == 0) {
b0109805 8861 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8862 }
99c475ab
FB
8863 for (i = 0; i < 8; i++) {
8864 if (insn & (1 << i)) {
8865 if (insn & (1 << 11)) {
8866 /* pop */
b0109805
PB
8867 tmp = gen_ld32(addr, IS_USER(s));
8868 store_reg(s, i, tmp);
99c475ab
FB
8869 } else {
8870 /* push */
b0109805
PB
8871 tmp = load_reg(s, i);
8872 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8873 }
5899f386 8874 /* advance to the next address. */
b0109805 8875 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8876 }
8877 }
a50f5b91 8878 TCGV_UNUSED(tmp);
99c475ab
FB
8879 if (insn & (1 << 8)) {
8880 if (insn & (1 << 11)) {
8881 /* pop pc */
b0109805 8882 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8883 /* don't set the pc until the rest of the instruction
8884 has completed */
8885 } else {
8886 /* push lr */
b0109805
PB
8887 tmp = load_reg(s, 14);
8888 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8889 }
b0109805 8890 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 8891 }
5899f386 8892 if ((insn & (1 << 11)) == 0) {
b0109805 8893 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8894 }
99c475ab 8895 /* write back the new stack pointer */
b0109805 8896 store_reg(s, 13, addr);
99c475ab
FB
8897 /* set the new PC value */
8898 if ((insn & 0x0900) == 0x0900)
b0109805 8899 gen_bx(s, tmp);
99c475ab
FB
8900 break;
8901
9ee6e8bb
PB
8902 case 1: case 3: case 9: case 11: /* czb */
8903 rm = insn & 7;
d9ba4830 8904 tmp = load_reg(s, rm);
9ee6e8bb
PB
8905 s->condlabel = gen_new_label();
8906 s->condjmp = 1;
8907 if (insn & (1 << 11))
cb63669a 8908 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 8909 else
cb63669a 8910 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
d9ba4830 8911 dead_tmp(tmp);
9ee6e8bb
PB
8912 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8913 val = (uint32_t)s->pc + 2;
8914 val += offset;
8915 gen_jmp(s, val);
8916 break;
8917
8918 case 15: /* IT, nop-hint. */
8919 if ((insn & 0xf) == 0) {
8920 gen_nop_hint(s, (insn >> 4) & 0xf);
8921 break;
8922 }
8923 /* If Then. */
8924 s->condexec_cond = (insn >> 4) & 0xe;
8925 s->condexec_mask = insn & 0x1f;
8926 /* No actual code generated for this insn, just setup state. */
8927 break;
8928
06c949e6 8929 case 0xe: /* bkpt */
9ee6e8bb 8930 gen_set_condexec(s);
5e3f878a 8931 gen_set_pc_im(s->pc - 2);
d9ba4830 8932 gen_exception(EXCP_BKPT);
06c949e6
PB
8933 s->is_jmp = DISAS_JUMP;
8934 break;
8935
9ee6e8bb
PB
8936 case 0xa: /* rev */
8937 ARCH(6);
8938 rn = (insn >> 3) & 0x7;
8939 rd = insn & 0x7;
b0109805 8940 tmp = load_reg(s, rn);
9ee6e8bb 8941 switch ((insn >> 6) & 3) {
66896cb8 8942 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
8943 case 1: gen_rev16(tmp); break;
8944 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
8945 default: goto illegal_op;
8946 }
b0109805 8947 store_reg(s, rd, tmp);
9ee6e8bb
PB
8948 break;
8949
8950 case 6: /* cps */
8951 ARCH(6);
8952 if (IS_USER(s))
8953 break;
8954 if (IS_M(env)) {
8984bd2e 8955 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9ee6e8bb 8956 /* PRIMASK */
8984bd2e
PB
8957 if (insn & 1) {
8958 addr = tcg_const_i32(16);
8959 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8960 tcg_temp_free_i32(addr);
8984bd2e 8961 }
9ee6e8bb 8962 /* FAULTMASK */
8984bd2e
PB
8963 if (insn & 2) {
8964 addr = tcg_const_i32(17);
8965 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8966 tcg_temp_free_i32(addr);
8984bd2e 8967 }
b75263d6 8968 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8969 gen_lookup_tb(s);
8970 } else {
8971 if (insn & (1 << 4))
8972 shift = CPSR_A | CPSR_I | CPSR_F;
8973 else
8974 shift = 0;
fa26df03 8975 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9ee6e8bb
PB
8976 }
8977 break;
8978
99c475ab
FB
8979 default:
8980 goto undef;
8981 }
8982 break;
8983
8984 case 12:
8985 /* load/store multiple */
8986 rn = (insn >> 8) & 0x7;
b0109805 8987 addr = load_reg(s, rn);
99c475ab
FB
8988 for (i = 0; i < 8; i++) {
8989 if (insn & (1 << i)) {
99c475ab
FB
8990 if (insn & (1 << 11)) {
8991 /* load */
b0109805
PB
8992 tmp = gen_ld32(addr, IS_USER(s));
8993 store_reg(s, i, tmp);
99c475ab
FB
8994 } else {
8995 /* store */
b0109805
PB
8996 tmp = load_reg(s, i);
8997 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8998 }
5899f386 8999 /* advance to the next address */
b0109805 9000 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9001 }
9002 }
5899f386 9003 /* Base register writeback. */
b0109805
PB
9004 if ((insn & (1 << rn)) == 0) {
9005 store_reg(s, rn, addr);
9006 } else {
9007 dead_tmp(addr);
9008 }
99c475ab
FB
9009 break;
9010
9011 case 13:
9012 /* conditional branch or swi */
9013 cond = (insn >> 8) & 0xf;
9014 if (cond == 0xe)
9015 goto undef;
9016
9017 if (cond == 0xf) {
9018 /* swi */
9ee6e8bb 9019 gen_set_condexec(s);
422ebf69 9020 gen_set_pc_im(s->pc);
9ee6e8bb 9021 s->is_jmp = DISAS_SWI;
99c475ab
FB
9022 break;
9023 }
9024 /* generate a conditional jump to next instruction */
e50e6a20 9025 s->condlabel = gen_new_label();
d9ba4830 9026 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 9027 s->condjmp = 1;
99c475ab
FB
9028
9029 /* jump to the offset */
5899f386 9030 val = (uint32_t)s->pc + 2;
99c475ab 9031 offset = ((int32_t)insn << 24) >> 24;
5899f386 9032 val += offset << 1;
8aaca4c0 9033 gen_jmp(s, val);
99c475ab
FB
9034 break;
9035
9036 case 14:
358bf29e 9037 if (insn & (1 << 11)) {
9ee6e8bb
PB
9038 if (disas_thumb2_insn(env, s, insn))
9039 goto undef32;
358bf29e
PB
9040 break;
9041 }
9ee6e8bb 9042 /* unconditional branch */
99c475ab
FB
9043 val = (uint32_t)s->pc;
9044 offset = ((int32_t)insn << 21) >> 21;
9045 val += (offset << 1) + 2;
8aaca4c0 9046 gen_jmp(s, val);
99c475ab
FB
9047 break;
9048
9049 case 15:
9ee6e8bb 9050 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 9051 goto undef32;
9ee6e8bb 9052 break;
99c475ab
FB
9053 }
9054 return;
9ee6e8bb
PB
9055undef32:
9056 gen_set_condexec(s);
5e3f878a 9057 gen_set_pc_im(s->pc - 4);
d9ba4830 9058 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
9059 s->is_jmp = DISAS_JUMP;
9060 return;
9061illegal_op:
99c475ab 9062undef:
9ee6e8bb 9063 gen_set_condexec(s);
5e3f878a 9064 gen_set_pc_im(s->pc - 2);
d9ba4830 9065 gen_exception(EXCP_UDEF);
99c475ab
FB
9066 s->is_jmp = DISAS_JUMP;
9067}
9068
2c0262af
FB
9069/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9070 basic block 'tb'. If search_pc is TRUE, also generate PC
9071 information for each intermediate instruction. */
2cfc5f17
TS
9072static inline void gen_intermediate_code_internal(CPUState *env,
9073 TranslationBlock *tb,
9074 int search_pc)
2c0262af
FB
9075{
9076 DisasContext dc1, *dc = &dc1;
a1d1bb31 9077 CPUBreakpoint *bp;
2c0262af
FB
9078 uint16_t *gen_opc_end;
9079 int j, lj;
0fa85d43 9080 target_ulong pc_start;
b5ff1b31 9081 uint32_t next_page_start;
2e70f6ef
PB
9082 int num_insns;
9083 int max_insns;
3b46e624 9084
2c0262af 9085 /* generate intermediate code */
b26eefb6 9086 num_temps = 0;
b26eefb6 9087
0fa85d43 9088 pc_start = tb->pc;
3b46e624 9089
2c0262af
FB
9090 dc->tb = tb;
9091
2c0262af 9092 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
9093
9094 dc->is_jmp = DISAS_NEXT;
9095 dc->pc = pc_start;
8aaca4c0 9096 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 9097 dc->condjmp = 0;
7204ab88 9098 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
98eac7ca
PM
9099 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9100 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
b5ff1b31 9101#if !defined(CONFIG_USER_ONLY)
9ee6e8bb
PB
9102 if (IS_M(env)) {
9103 dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
9104 } else {
9105 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
9106 }
b5ff1b31 9107#endif
5df8bac1 9108 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
69d1fc22
PM
9109 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9110 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
a7812ae4
PB
9111 cpu_F0s = tcg_temp_new_i32();
9112 cpu_F1s = tcg_temp_new_i32();
9113 cpu_F0d = tcg_temp_new_i64();
9114 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
9115 cpu_V0 = cpu_F0d;
9116 cpu_V1 = cpu_F1d;
e677137d 9117 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 9118 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 9119 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 9120 lj = -1;
2e70f6ef
PB
9121 num_insns = 0;
9122 max_insns = tb->cflags & CF_COUNT_MASK;
9123 if (max_insns == 0)
9124 max_insns = CF_COUNT_MASK;
9125
9126 gen_icount_start();
9ee6e8bb
PB
9127 /* Reset the conditional execution bits immediately. This avoids
9128 complications trying to do it at the end of the block. */
98eac7ca 9129 if (dc->condexec_mask || dc->condexec_cond)
8f01245e
PB
9130 {
9131 TCGv tmp = new_tmp();
9132 tcg_gen_movi_i32(tmp, 0);
d9ba4830 9133 store_cpu_field(tmp, condexec_bits);
8f01245e 9134 }
2c0262af 9135 do {
fbb4a2e3
PB
9136#ifdef CONFIG_USER_ONLY
9137 /* Intercept jump to the magic kernel page. */
9138 if (dc->pc >= 0xffff0000) {
9139 /* We always get here via a jump, so know we are not in a
9140 conditional execution block. */
9141 gen_exception(EXCP_KERNEL_TRAP);
9142 dc->is_jmp = DISAS_UPDATE;
9143 break;
9144 }
9145#else
9ee6e8bb
PB
9146 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9147 /* We always get here via a jump, so know we are not in a
9148 conditional execution block. */
d9ba4830 9149 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
9150 dc->is_jmp = DISAS_UPDATE;
9151 break;
9ee6e8bb
PB
9152 }
9153#endif
9154
72cf2d4f
BS
9155 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9156 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 9157 if (bp->pc == dc->pc) {
9ee6e8bb 9158 gen_set_condexec(dc);
5e3f878a 9159 gen_set_pc_im(dc->pc);
d9ba4830 9160 gen_exception(EXCP_DEBUG);
1fddef4b 9161 dc->is_jmp = DISAS_JUMP;
9ee6e8bb
PB
9162 /* Advance PC so that clearing the breakpoint will
9163 invalidate this TB. */
9164 dc->pc += 2;
9165 goto done_generating;
1fddef4b
FB
9166 break;
9167 }
9168 }
9169 }
2c0262af
FB
9170 if (search_pc) {
9171 j = gen_opc_ptr - gen_opc_buf;
9172 if (lj < j) {
9173 lj++;
9174 while (lj < j)
9175 gen_opc_instr_start[lj++] = 0;
9176 }
0fa85d43 9177 gen_opc_pc[lj] = dc->pc;
2c0262af 9178 gen_opc_instr_start[lj] = 1;
2e70f6ef 9179 gen_opc_icount[lj] = num_insns;
2c0262af 9180 }
e50e6a20 9181
2e70f6ef
PB
9182 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9183 gen_io_start();
9184
7204ab88 9185 if (dc->thumb) {
9ee6e8bb
PB
9186 disas_thumb_insn(env, dc);
9187 if (dc->condexec_mask) {
9188 dc->condexec_cond = (dc->condexec_cond & 0xe)
9189 | ((dc->condexec_mask >> 4) & 1);
9190 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9191 if (dc->condexec_mask == 0) {
9192 dc->condexec_cond = 0;
9193 }
9194 }
9195 } else {
9196 disas_arm_insn(env, dc);
9197 }
b26eefb6
PB
9198 if (num_temps) {
9199 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
9200 num_temps = 0;
9201 }
e50e6a20
FB
9202
9203 if (dc->condjmp && !dc->is_jmp) {
9204 gen_set_label(dc->condlabel);
9205 dc->condjmp = 0;
9206 }
aaf2d97d 9207 /* Translation stops when a conditional branch is encountered.
e50e6a20 9208 * Otherwise the subsequent code could get translated several times.
b5ff1b31 9209 * Also stop translation when a page boundary is reached. This
bf20dc07 9210 * ensures prefetch aborts occur at the right place. */
2e70f6ef 9211 num_insns ++;
1fddef4b
FB
9212 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9213 !env->singlestep_enabled &&
1b530a6d 9214 !singlestep &&
2e70f6ef
PB
9215 dc->pc < next_page_start &&
9216 num_insns < max_insns);
9217
9218 if (tb->cflags & CF_LAST_IO) {
9219 if (dc->condjmp) {
9220 /* FIXME: This can theoretically happen with self-modifying
9221 code. */
9222 cpu_abort(env, "IO on conditional branch instruction");
9223 }
9224 gen_io_end();
9225 }
9ee6e8bb 9226
b5ff1b31 9227 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
9228 instruction was a conditional branch or trap, and the PC has
9229 already been written. */
551bd27f 9230 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 9231 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 9232 if (dc->condjmp) {
9ee6e8bb
PB
9233 gen_set_condexec(dc);
9234 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 9235 gen_exception(EXCP_SWI);
9ee6e8bb 9236 } else {
d9ba4830 9237 gen_exception(EXCP_DEBUG);
9ee6e8bb 9238 }
e50e6a20
FB
9239 gen_set_label(dc->condlabel);
9240 }
9241 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 9242 gen_set_pc_im(dc->pc);
e50e6a20 9243 dc->condjmp = 0;
8aaca4c0 9244 }
9ee6e8bb
PB
9245 gen_set_condexec(dc);
9246 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 9247 gen_exception(EXCP_SWI);
9ee6e8bb
PB
9248 } else {
9249 /* FIXME: Single stepping a WFI insn will not halt
9250 the CPU. */
d9ba4830 9251 gen_exception(EXCP_DEBUG);
9ee6e8bb 9252 }
8aaca4c0 9253 } else {
9ee6e8bb
PB
9254 /* While branches must always occur at the end of an IT block,
9255 there are a few other things that can cause us to terminate
9256 the TB in the middel of an IT block:
9257 - Exception generating instructions (bkpt, swi, undefined).
9258 - Page boundaries.
9259 - Hardware watchpoints.
9260 Hardware breakpoints have already been handled and skip this code.
9261 */
9262 gen_set_condexec(dc);
8aaca4c0 9263 switch(dc->is_jmp) {
8aaca4c0 9264 case DISAS_NEXT:
6e256c93 9265 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
9266 break;
9267 default:
9268 case DISAS_JUMP:
9269 case DISAS_UPDATE:
9270 /* indicate that the hash table must be used to find the next TB */
57fec1fe 9271 tcg_gen_exit_tb(0);
8aaca4c0
FB
9272 break;
9273 case DISAS_TB_JUMP:
9274 /* nothing more to generate */
9275 break;
9ee6e8bb 9276 case DISAS_WFI:
d9ba4830 9277 gen_helper_wfi();
9ee6e8bb
PB
9278 break;
9279 case DISAS_SWI:
d9ba4830 9280 gen_exception(EXCP_SWI);
9ee6e8bb 9281 break;
8aaca4c0 9282 }
e50e6a20
FB
9283 if (dc->condjmp) {
9284 gen_set_label(dc->condlabel);
9ee6e8bb 9285 gen_set_condexec(dc);
6e256c93 9286 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
9287 dc->condjmp = 0;
9288 }
2c0262af 9289 }
2e70f6ef 9290
9ee6e8bb 9291done_generating:
2e70f6ef 9292 gen_icount_end(tb, num_insns);
2c0262af
FB
9293 *gen_opc_ptr = INDEX_op_end;
9294
9295#ifdef DEBUG_DISAS
8fec2b8c 9296 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
9297 qemu_log("----------------\n");
9298 qemu_log("IN: %s\n", lookup_symbol(pc_start));
7204ab88 9299 log_target_disas(pc_start, dc->pc - pc_start, dc->thumb);
93fcfe39 9300 qemu_log("\n");
2c0262af
FB
9301 }
9302#endif
b5ff1b31
FB
9303 if (search_pc) {
9304 j = gen_opc_ptr - gen_opc_buf;
9305 lj++;
9306 while (lj <= j)
9307 gen_opc_instr_start[lj++] = 0;
b5ff1b31 9308 } else {
2c0262af 9309 tb->size = dc->pc - pc_start;
2e70f6ef 9310 tb->icount = num_insns;
b5ff1b31 9311 }
2c0262af
FB
9312}
9313
2cfc5f17 9314void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2c0262af 9315{
2cfc5f17 9316 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
9317}
9318
2cfc5f17 9319void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2c0262af 9320{
2cfc5f17 9321 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
9322}
9323
b5ff1b31
FB
9324static const char *cpu_mode_names[16] = {
9325 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9326 "???", "???", "???", "und", "???", "???", "???", "sys"
9327};
9ee6e8bb 9328
9a78eead 9329void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
7fe48483 9330 int flags)
2c0262af
FB
9331{
9332 int i;
06e80fc9 9333#if 0
bc380d17 9334 union {
b7bcbe95
FB
9335 uint32_t i;
9336 float s;
9337 } s0, s1;
9338 CPU_DoubleU d;
a94a6abf
PB
9339 /* ??? This assumes float64 and double have the same layout.
9340 Oh well, it's only debug dumps. */
9341 union {
9342 float64 f64;
9343 double d;
9344 } d0;
06e80fc9 9345#endif
b5ff1b31 9346 uint32_t psr;
2c0262af
FB
9347
9348 for(i=0;i<16;i++) {
7fe48483 9349 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 9350 if ((i % 4) == 3)
7fe48483 9351 cpu_fprintf(f, "\n");
2c0262af 9352 else
7fe48483 9353 cpu_fprintf(f, " ");
2c0262af 9354 }
b5ff1b31 9355 psr = cpsr_read(env);
687fa640
TS
9356 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9357 psr,
b5ff1b31
FB
9358 psr & (1 << 31) ? 'N' : '-',
9359 psr & (1 << 30) ? 'Z' : '-',
9360 psr & (1 << 29) ? 'C' : '-',
9361 psr & (1 << 28) ? 'V' : '-',
5fafdf24 9362 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 9363 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 9364
5e3f878a 9365#if 0
b7bcbe95 9366 for (i = 0; i < 16; i++) {
8e96005d
FB
9367 d.d = env->vfp.regs[i];
9368 s0.i = d.l.lower;
9369 s1.i = d.l.upper;
a94a6abf
PB
9370 d0.f64 = d.d;
9371 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 9372 i * 2, (int)s0.i, s0.s,
a94a6abf 9373 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 9374 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 9375 d0.d);
b7bcbe95 9376 }
40f137e1 9377 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
5e3f878a 9378#endif
2c0262af 9379}
a6b025d3 9380
d2856f1a
AJ
9381void gen_pc_load(CPUState *env, TranslationBlock *tb,
9382 unsigned long searched_pc, int pc_pos, void *puc)
9383{
9384 env->regs[15] = gen_opc_pc[pc_pos];
9385}