]> git.proxmox.com Git - qemu.git/blame - target-arm/translate.c
target-arm: Use the standard FPSCR value for VRSQRTS
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
28#include "exec-all.h"
29#include "disas.h"
57fec1fe 30#include "tcg-op.h"
79383c9c 31#include "qemu-log.h"
1497c961 32
a7812ae4 33#include "helpers.h"
1497c961 34#define GEN_HELPER 1
b26eefb6 35#include "helpers.h"
2c0262af 36
9ee6e8bb
PB
37#define ENABLE_ARCH_5J 0
38#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 42
86753403 43#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 44
2c0262af
FB
45/* internal defines */
46typedef struct DisasContext {
0fa85d43 47 target_ulong pc;
2c0262af 48 int is_jmp;
e50e6a20
FB
49 /* Nonzero if this instruction has been conditionally skipped. */
50 int condjmp;
51 /* The label that will be jumped to when the instruction is skipped. */
52 int condlabel;
9ee6e8bb
PB
53 /* Thumb-2 condtional execution bits. */
54 int condexec_mask;
55 int condexec_cond;
2c0262af 56 struct TranslationBlock *tb;
8aaca4c0 57 int singlestep_enabled;
5899f386 58 int thumb;
b5ff1b31
FB
59#if !defined(CONFIG_USER_ONLY)
60 int user;
61#endif
2c0262af
FB
62} DisasContext;
63
b5ff1b31
FB
64#if defined(CONFIG_USER_ONLY)
65#define IS_USER(s) 1
66#else
67#define IS_USER(s) (s->user)
68#endif
69
9ee6e8bb
PB
70/* These instructions trap after executing, so defer them until after the
71 conditional executions state has been updated. */
72#define DISAS_WFI 4
73#define DISAS_SWI 5
2c0262af 74
a7812ae4 75static TCGv_ptr cpu_env;
ad69471c 76/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 77static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 78static TCGv_i32 cpu_R[16];
426f5abc
PB
79static TCGv_i32 cpu_exclusive_addr;
80static TCGv_i32 cpu_exclusive_val;
81static TCGv_i32 cpu_exclusive_high;
82#ifdef CONFIG_USER_ONLY
83static TCGv_i32 cpu_exclusive_test;
84static TCGv_i32 cpu_exclusive_info;
85#endif
ad69471c 86
b26eefb6 87/* FIXME: These should be removed. */
a7812ae4
PB
88static TCGv cpu_F0s, cpu_F1s;
89static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 90
2e70f6ef
PB
91#include "gen-icount.h"
92
155c3eac
FN
93static const char *regnames[] =
94 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
95 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
96
b26eefb6
PB
97/* initialize TCG globals. */
98void arm_translate_init(void)
99{
155c3eac
FN
100 int i;
101
a7812ae4
PB
102 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
103
155c3eac
FN
104 for (i = 0; i < 16; i++) {
105 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
106 offsetof(CPUState, regs[i]),
107 regnames[i]);
108 }
426f5abc
PB
109 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
110 offsetof(CPUState, exclusive_addr), "exclusive_addr");
111 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
112 offsetof(CPUState, exclusive_val), "exclusive_val");
113 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
114 offsetof(CPUState, exclusive_high), "exclusive_high");
115#ifdef CONFIG_USER_ONLY
116 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
117 offsetof(CPUState, exclusive_test), "exclusive_test");
118 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUState, exclusive_info), "exclusive_info");
120#endif
155c3eac 121
a7812ae4
PB
122#define GEN_HELPER 2
123#include "helpers.h"
b26eefb6
PB
124}
125
b26eefb6 126static int num_temps;
b26eefb6
PB
127
128/* Allocate a temporary variable. */
a7812ae4 129static TCGv_i32 new_tmp(void)
b26eefb6 130{
12edd4f2
FN
131 num_temps++;
132 return tcg_temp_new_i32();
b26eefb6
PB
133}
134
135/* Release a temporary variable. */
136static void dead_tmp(TCGv tmp)
137{
12edd4f2 138 tcg_temp_free(tmp);
b26eefb6 139 num_temps--;
b26eefb6
PB
140}
141
d9ba4830
PB
142static inline TCGv load_cpu_offset(int offset)
143{
144 TCGv tmp = new_tmp();
145 tcg_gen_ld_i32(tmp, cpu_env, offset);
146 return tmp;
147}
148
149#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
150
151static inline void store_cpu_offset(TCGv var, int offset)
152{
153 tcg_gen_st_i32(var, cpu_env, offset);
154 dead_tmp(var);
155}
156
157#define store_cpu_field(var, name) \
158 store_cpu_offset(var, offsetof(CPUState, name))
159
b26eefb6
PB
160/* Set a variable to the value of a CPU register. */
161static void load_reg_var(DisasContext *s, TCGv var, int reg)
162{
163 if (reg == 15) {
164 uint32_t addr;
165 /* normaly, since we updated PC, we need only to add one insn */
166 if (s->thumb)
167 addr = (long)s->pc + 2;
168 else
169 addr = (long)s->pc + 4;
170 tcg_gen_movi_i32(var, addr);
171 } else {
155c3eac 172 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
173 }
174}
175
176/* Create a new temporary and set it to the value of a CPU register. */
177static inline TCGv load_reg(DisasContext *s, int reg)
178{
179 TCGv tmp = new_tmp();
180 load_reg_var(s, tmp, reg);
181 return tmp;
182}
183
184/* Set a CPU register. The source must be a temporary and will be
185 marked as dead. */
186static void store_reg(DisasContext *s, int reg, TCGv var)
187{
188 if (reg == 15) {
189 tcg_gen_andi_i32(var, var, ~1);
190 s->is_jmp = DISAS_JUMP;
191 }
155c3eac 192 tcg_gen_mov_i32(cpu_R[reg], var);
b26eefb6
PB
193 dead_tmp(var);
194}
195
b26eefb6 196/* Value extensions. */
86831435
PB
197#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
198#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
199#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
200#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
201
1497c961
PB
202#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
203#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 204
b26eefb6 205
b75263d6
JR
206static inline void gen_set_cpsr(TCGv var, uint32_t mask)
207{
208 TCGv tmp_mask = tcg_const_i32(mask);
209 gen_helper_cpsr_write(var, tmp_mask);
210 tcg_temp_free_i32(tmp_mask);
211}
d9ba4830
PB
212/* Set NZCV flags from the high 4 bits of var. */
213#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
214
215static void gen_exception(int excp)
216{
217 TCGv tmp = new_tmp();
218 tcg_gen_movi_i32(tmp, excp);
219 gen_helper_exception(tmp);
220 dead_tmp(tmp);
221}
222
3670669c
PB
223static void gen_smul_dual(TCGv a, TCGv b)
224{
225 TCGv tmp1 = new_tmp();
226 TCGv tmp2 = new_tmp();
22478e79
AZ
227 tcg_gen_ext16s_i32(tmp1, a);
228 tcg_gen_ext16s_i32(tmp2, b);
3670669c
PB
229 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
230 dead_tmp(tmp2);
231 tcg_gen_sari_i32(a, a, 16);
232 tcg_gen_sari_i32(b, b, 16);
233 tcg_gen_mul_i32(b, b, a);
234 tcg_gen_mov_i32(a, tmp1);
235 dead_tmp(tmp1);
236}
237
238/* Byteswap each halfword. */
239static void gen_rev16(TCGv var)
240{
241 TCGv tmp = new_tmp();
242 tcg_gen_shri_i32(tmp, var, 8);
243 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
244 tcg_gen_shli_i32(var, var, 8);
245 tcg_gen_andi_i32(var, var, 0xff00ff00);
246 tcg_gen_or_i32(var, var, tmp);
247 dead_tmp(tmp);
248}
249
250/* Byteswap low halfword and sign extend. */
251static void gen_revsh(TCGv var)
252{
1a855029
AJ
253 tcg_gen_ext16u_i32(var, var);
254 tcg_gen_bswap16_i32(var, var);
255 tcg_gen_ext16s_i32(var, var);
3670669c
PB
256}
257
258/* Unsigned bitfield extract. */
259static void gen_ubfx(TCGv var, int shift, uint32_t mask)
260{
261 if (shift)
262 tcg_gen_shri_i32(var, var, shift);
263 tcg_gen_andi_i32(var, var, mask);
264}
265
266/* Signed bitfield extract. */
267static void gen_sbfx(TCGv var, int shift, int width)
268{
269 uint32_t signbit;
270
271 if (shift)
272 tcg_gen_sari_i32(var, var, shift);
273 if (shift + width < 32) {
274 signbit = 1u << (width - 1);
275 tcg_gen_andi_i32(var, var, (1u << width) - 1);
276 tcg_gen_xori_i32(var, var, signbit);
277 tcg_gen_subi_i32(var, var, signbit);
278 }
279}
280
281/* Bitfield insertion. Insert val into base. Clobbers base and val. */
282static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
283{
3670669c 284 tcg_gen_andi_i32(val, val, mask);
8f8e3aa4
PB
285 tcg_gen_shli_i32(val, val, shift);
286 tcg_gen_andi_i32(base, base, ~(mask << shift));
3670669c
PB
287 tcg_gen_or_i32(dest, base, val);
288}
289
838fa72d
AJ
290/* Return (b << 32) + a. Mark inputs as dead */
291static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
3670669c 292{
838fa72d
AJ
293 TCGv_i64 tmp64 = tcg_temp_new_i64();
294
295 tcg_gen_extu_i32_i64(tmp64, b);
296 dead_tmp(b);
297 tcg_gen_shli_i64(tmp64, tmp64, 32);
298 tcg_gen_add_i64(a, tmp64, a);
299
300 tcg_temp_free_i64(tmp64);
301 return a;
302}
303
304/* Return (b << 32) - a. Mark inputs as dead. */
305static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
306{
307 TCGv_i64 tmp64 = tcg_temp_new_i64();
308
309 tcg_gen_extu_i32_i64(tmp64, b);
310 dead_tmp(b);
311 tcg_gen_shli_i64(tmp64, tmp64, 32);
312 tcg_gen_sub_i64(a, tmp64, a);
313
314 tcg_temp_free_i64(tmp64);
315 return a;
3670669c
PB
316}
317
8f01245e
PB
318/* FIXME: Most targets have native widening multiplication.
319 It would be good to use that instead of a full wide multiply. */
5e3f878a 320/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 321static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 322{
a7812ae4
PB
323 TCGv_i64 tmp1 = tcg_temp_new_i64();
324 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
325
326 tcg_gen_extu_i32_i64(tmp1, a);
327 dead_tmp(a);
328 tcg_gen_extu_i32_i64(tmp2, b);
329 dead_tmp(b);
330 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 331 tcg_temp_free_i64(tmp2);
5e3f878a
PB
332 return tmp1;
333}
334
a7812ae4 335static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 336{
a7812ae4
PB
337 TCGv_i64 tmp1 = tcg_temp_new_i64();
338 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
339
340 tcg_gen_ext_i32_i64(tmp1, a);
341 dead_tmp(a);
342 tcg_gen_ext_i32_i64(tmp2, b);
343 dead_tmp(b);
344 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 345 tcg_temp_free_i64(tmp2);
5e3f878a
PB
346 return tmp1;
347}
348
8f01245e
PB
349/* Swap low and high halfwords. */
350static void gen_swap_half(TCGv var)
351{
352 TCGv tmp = new_tmp();
353 tcg_gen_shri_i32(tmp, var, 16);
354 tcg_gen_shli_i32(var, var, 16);
355 tcg_gen_or_i32(var, var, tmp);
3670669c 356 dead_tmp(tmp);
8f01245e
PB
357}
358
b26eefb6
PB
359/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
360 tmp = (t0 ^ t1) & 0x8000;
361 t0 &= ~0x8000;
362 t1 &= ~0x8000;
363 t0 = (t0 + t1) ^ tmp;
364 */
365
366static void gen_add16(TCGv t0, TCGv t1)
367{
368 TCGv tmp = new_tmp();
369 tcg_gen_xor_i32(tmp, t0, t1);
370 tcg_gen_andi_i32(tmp, tmp, 0x8000);
371 tcg_gen_andi_i32(t0, t0, ~0x8000);
372 tcg_gen_andi_i32(t1, t1, ~0x8000);
373 tcg_gen_add_i32(t0, t0, t1);
374 tcg_gen_xor_i32(t0, t0, tmp);
375 dead_tmp(tmp);
376 dead_tmp(t1);
377}
378
9a119ff6
PB
379#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
380
b26eefb6
PB
381/* Set CF to the top bit of var. */
382static void gen_set_CF_bit31(TCGv var)
383{
384 TCGv tmp = new_tmp();
385 tcg_gen_shri_i32(tmp, var, 31);
4cc633c3 386 gen_set_CF(tmp);
b26eefb6
PB
387 dead_tmp(tmp);
388}
389
390/* Set N and Z flags from var. */
391static inline void gen_logic_CC(TCGv var)
392{
6fbe23d5
PB
393 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
394 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
b26eefb6
PB
395}
396
397/* T0 += T1 + CF. */
396e467c 398static void gen_adc(TCGv t0, TCGv t1)
b26eefb6 399{
d9ba4830 400 TCGv tmp;
396e467c 401 tcg_gen_add_i32(t0, t0, t1);
d9ba4830 402 tmp = load_cpu_field(CF);
396e467c 403 tcg_gen_add_i32(t0, t0, tmp);
b26eefb6
PB
404 dead_tmp(tmp);
405}
406
e9bb4aa9
JR
407/* dest = T0 + T1 + CF. */
408static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
409{
410 TCGv tmp;
411 tcg_gen_add_i32(dest, t0, t1);
412 tmp = load_cpu_field(CF);
413 tcg_gen_add_i32(dest, dest, tmp);
414 dead_tmp(tmp);
415}
416
3670669c
PB
417/* dest = T0 - T1 + CF - 1. */
418static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
419{
d9ba4830 420 TCGv tmp;
3670669c 421 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 422 tmp = load_cpu_field(CF);
3670669c
PB
423 tcg_gen_add_i32(dest, dest, tmp);
424 tcg_gen_subi_i32(dest, dest, 1);
425 dead_tmp(tmp);
426}
427
ad69471c
PB
428/* FIXME: Implement this natively. */
429#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
430
9a119ff6 431static void shifter_out_im(TCGv var, int shift)
b26eefb6 432{
9a119ff6
PB
433 TCGv tmp = new_tmp();
434 if (shift == 0) {
435 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 436 } else {
9a119ff6 437 tcg_gen_shri_i32(tmp, var, shift);
4cc633c3 438 if (shift != 31)
9a119ff6
PB
439 tcg_gen_andi_i32(tmp, tmp, 1);
440 }
441 gen_set_CF(tmp);
442 dead_tmp(tmp);
443}
b26eefb6 444
9a119ff6
PB
445/* Shift by immediate. Includes special handling for shift == 0. */
446static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
447{
448 switch (shiftop) {
449 case 0: /* LSL */
450 if (shift != 0) {
451 if (flags)
452 shifter_out_im(var, 32 - shift);
453 tcg_gen_shli_i32(var, var, shift);
454 }
455 break;
456 case 1: /* LSR */
457 if (shift == 0) {
458 if (flags) {
459 tcg_gen_shri_i32(var, var, 31);
460 gen_set_CF(var);
461 }
462 tcg_gen_movi_i32(var, 0);
463 } else {
464 if (flags)
465 shifter_out_im(var, shift - 1);
466 tcg_gen_shri_i32(var, var, shift);
467 }
468 break;
469 case 2: /* ASR */
470 if (shift == 0)
471 shift = 32;
472 if (flags)
473 shifter_out_im(var, shift - 1);
474 if (shift == 32)
475 shift = 31;
476 tcg_gen_sari_i32(var, var, shift);
477 break;
478 case 3: /* ROR/RRX */
479 if (shift != 0) {
480 if (flags)
481 shifter_out_im(var, shift - 1);
f669df27 482 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 483 } else {
d9ba4830 484 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
485 if (flags)
486 shifter_out_im(var, 0);
487 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
488 tcg_gen_shli_i32(tmp, tmp, 31);
489 tcg_gen_or_i32(var, var, tmp);
490 dead_tmp(tmp);
b26eefb6
PB
491 }
492 }
493};
494
8984bd2e
PB
495static inline void gen_arm_shift_reg(TCGv var, int shiftop,
496 TCGv shift, int flags)
497{
498 if (flags) {
499 switch (shiftop) {
500 case 0: gen_helper_shl_cc(var, var, shift); break;
501 case 1: gen_helper_shr_cc(var, var, shift); break;
502 case 2: gen_helper_sar_cc(var, var, shift); break;
503 case 3: gen_helper_ror_cc(var, var, shift); break;
504 }
505 } else {
506 switch (shiftop) {
507 case 0: gen_helper_shl(var, var, shift); break;
508 case 1: gen_helper_shr(var, var, shift); break;
509 case 2: gen_helper_sar(var, var, shift); break;
f669df27
AJ
510 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
511 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
512 }
513 }
514 dead_tmp(shift);
515}
516
6ddbc6e4
PB
517#define PAS_OP(pfx) \
518 switch (op2) { \
519 case 0: gen_pas_helper(glue(pfx,add16)); break; \
520 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
521 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
522 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
523 case 4: gen_pas_helper(glue(pfx,add8)); break; \
524 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
525 }
d9ba4830 526static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 527{
a7812ae4 528 TCGv_ptr tmp;
6ddbc6e4
PB
529
530 switch (op1) {
531#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
532 case 1:
a7812ae4 533 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
534 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
535 PAS_OP(s)
b75263d6 536 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
537 break;
538 case 5:
a7812ae4 539 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
540 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
541 PAS_OP(u)
b75263d6 542 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
543 break;
544#undef gen_pas_helper
545#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
546 case 2:
547 PAS_OP(q);
548 break;
549 case 3:
550 PAS_OP(sh);
551 break;
552 case 6:
553 PAS_OP(uq);
554 break;
555 case 7:
556 PAS_OP(uh);
557 break;
558#undef gen_pas_helper
559 }
560}
9ee6e8bb
PB
561#undef PAS_OP
562
6ddbc6e4
PB
563/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
564#define PAS_OP(pfx) \
ed89a2f1 565 switch (op1) { \
6ddbc6e4
PB
566 case 0: gen_pas_helper(glue(pfx,add8)); break; \
567 case 1: gen_pas_helper(glue(pfx,add16)); break; \
568 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
569 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
570 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
571 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
572 }
d9ba4830 573static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 574{
a7812ae4 575 TCGv_ptr tmp;
6ddbc6e4 576
ed89a2f1 577 switch (op2) {
6ddbc6e4
PB
578#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
579 case 0:
a7812ae4 580 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
581 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
582 PAS_OP(s)
b75263d6 583 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
584 break;
585 case 4:
a7812ae4 586 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
587 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
588 PAS_OP(u)
b75263d6 589 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
590 break;
591#undef gen_pas_helper
592#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
593 case 1:
594 PAS_OP(q);
595 break;
596 case 2:
597 PAS_OP(sh);
598 break;
599 case 5:
600 PAS_OP(uq);
601 break;
602 case 6:
603 PAS_OP(uh);
604 break;
605#undef gen_pas_helper
606 }
607}
9ee6e8bb
PB
608#undef PAS_OP
609
d9ba4830
PB
610static void gen_test_cc(int cc, int label)
611{
612 TCGv tmp;
613 TCGv tmp2;
d9ba4830
PB
614 int inv;
615
d9ba4830
PB
616 switch (cc) {
617 case 0: /* eq: Z */
6fbe23d5 618 tmp = load_cpu_field(ZF);
cb63669a 619 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
620 break;
621 case 1: /* ne: !Z */
6fbe23d5 622 tmp = load_cpu_field(ZF);
cb63669a 623 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
624 break;
625 case 2: /* cs: C */
626 tmp = load_cpu_field(CF);
cb63669a 627 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
628 break;
629 case 3: /* cc: !C */
630 tmp = load_cpu_field(CF);
cb63669a 631 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
632 break;
633 case 4: /* mi: N */
6fbe23d5 634 tmp = load_cpu_field(NF);
cb63669a 635 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
636 break;
637 case 5: /* pl: !N */
6fbe23d5 638 tmp = load_cpu_field(NF);
cb63669a 639 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
640 break;
641 case 6: /* vs: V */
642 tmp = load_cpu_field(VF);
cb63669a 643 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
644 break;
645 case 7: /* vc: !V */
646 tmp = load_cpu_field(VF);
cb63669a 647 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
648 break;
649 case 8: /* hi: C && !Z */
650 inv = gen_new_label();
651 tmp = load_cpu_field(CF);
cb63669a 652 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830 653 dead_tmp(tmp);
6fbe23d5 654 tmp = load_cpu_field(ZF);
cb63669a 655 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
656 gen_set_label(inv);
657 break;
658 case 9: /* ls: !C || Z */
659 tmp = load_cpu_field(CF);
cb63669a 660 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830 661 dead_tmp(tmp);
6fbe23d5 662 tmp = load_cpu_field(ZF);
cb63669a 663 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
664 break;
665 case 10: /* ge: N == V -> N ^ V == 0 */
666 tmp = load_cpu_field(VF);
6fbe23d5 667 tmp2 = load_cpu_field(NF);
d9ba4830
PB
668 tcg_gen_xor_i32(tmp, tmp, tmp2);
669 dead_tmp(tmp2);
cb63669a 670 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
671 break;
672 case 11: /* lt: N != V -> N ^ V != 0 */
673 tmp = load_cpu_field(VF);
6fbe23d5 674 tmp2 = load_cpu_field(NF);
d9ba4830
PB
675 tcg_gen_xor_i32(tmp, tmp, tmp2);
676 dead_tmp(tmp2);
cb63669a 677 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
678 break;
679 case 12: /* gt: !Z && N == V */
680 inv = gen_new_label();
6fbe23d5 681 tmp = load_cpu_field(ZF);
cb63669a 682 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830
PB
683 dead_tmp(tmp);
684 tmp = load_cpu_field(VF);
6fbe23d5 685 tmp2 = load_cpu_field(NF);
d9ba4830
PB
686 tcg_gen_xor_i32(tmp, tmp, tmp2);
687 dead_tmp(tmp2);
cb63669a 688 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
689 gen_set_label(inv);
690 break;
691 case 13: /* le: Z || N != V */
6fbe23d5 692 tmp = load_cpu_field(ZF);
cb63669a 693 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
694 dead_tmp(tmp);
695 tmp = load_cpu_field(VF);
6fbe23d5 696 tmp2 = load_cpu_field(NF);
d9ba4830
PB
697 tcg_gen_xor_i32(tmp, tmp, tmp2);
698 dead_tmp(tmp2);
cb63669a 699 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
700 break;
701 default:
702 fprintf(stderr, "Bad condition code 0x%x\n", cc);
703 abort();
704 }
705 dead_tmp(tmp);
706}
2c0262af 707
b1d8e52e 708static const uint8_t table_logic_cc[16] = {
2c0262af
FB
709 1, /* and */
710 1, /* xor */
711 0, /* sub */
712 0, /* rsb */
713 0, /* add */
714 0, /* adc */
715 0, /* sbc */
716 0, /* rsc */
717 1, /* andl */
718 1, /* xorl */
719 0, /* cmp */
720 0, /* cmn */
721 1, /* orr */
722 1, /* mov */
723 1, /* bic */
724 1, /* mvn */
725};
3b46e624 726
d9ba4830
PB
727/* Set PC and Thumb state from an immediate address. */
728static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 729{
b26eefb6 730 TCGv tmp;
99c475ab 731
b26eefb6 732 s->is_jmp = DISAS_UPDATE;
d9ba4830 733 if (s->thumb != (addr & 1)) {
155c3eac 734 tmp = new_tmp();
d9ba4830
PB
735 tcg_gen_movi_i32(tmp, addr & 1);
736 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
155c3eac 737 dead_tmp(tmp);
d9ba4830 738 }
155c3eac 739 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
740}
741
742/* Set PC and Thumb state from var. var is marked as dead. */
743static inline void gen_bx(DisasContext *s, TCGv var)
744{
d9ba4830 745 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
746 tcg_gen_andi_i32(cpu_R[15], var, ~1);
747 tcg_gen_andi_i32(var, var, 1);
748 store_cpu_field(var, thumb);
d9ba4830
PB
749}
750
21aeb343
JR
751/* Variant of store_reg which uses branch&exchange logic when storing
752 to r15 in ARM architecture v7 and above. The source must be a temporary
753 and will be marked as dead. */
754static inline void store_reg_bx(CPUState *env, DisasContext *s,
755 int reg, TCGv var)
756{
757 if (reg == 15 && ENABLE_ARCH_7) {
758 gen_bx(s, var);
759 } else {
760 store_reg(s, reg, var);
761 }
762}
763
b0109805
PB
764static inline TCGv gen_ld8s(TCGv addr, int index)
765{
766 TCGv tmp = new_tmp();
767 tcg_gen_qemu_ld8s(tmp, addr, index);
768 return tmp;
769}
770static inline TCGv gen_ld8u(TCGv addr, int index)
771{
772 TCGv tmp = new_tmp();
773 tcg_gen_qemu_ld8u(tmp, addr, index);
774 return tmp;
775}
776static inline TCGv gen_ld16s(TCGv addr, int index)
777{
778 TCGv tmp = new_tmp();
779 tcg_gen_qemu_ld16s(tmp, addr, index);
780 return tmp;
781}
782static inline TCGv gen_ld16u(TCGv addr, int index)
783{
784 TCGv tmp = new_tmp();
785 tcg_gen_qemu_ld16u(tmp, addr, index);
786 return tmp;
787}
788static inline TCGv gen_ld32(TCGv addr, int index)
789{
790 TCGv tmp = new_tmp();
791 tcg_gen_qemu_ld32u(tmp, addr, index);
792 return tmp;
793}
84496233
JR
794static inline TCGv_i64 gen_ld64(TCGv addr, int index)
795{
796 TCGv_i64 tmp = tcg_temp_new_i64();
797 tcg_gen_qemu_ld64(tmp, addr, index);
798 return tmp;
799}
b0109805
PB
800static inline void gen_st8(TCGv val, TCGv addr, int index)
801{
802 tcg_gen_qemu_st8(val, addr, index);
803 dead_tmp(val);
804}
805static inline void gen_st16(TCGv val, TCGv addr, int index)
806{
807 tcg_gen_qemu_st16(val, addr, index);
808 dead_tmp(val);
809}
810static inline void gen_st32(TCGv val, TCGv addr, int index)
811{
812 tcg_gen_qemu_st32(val, addr, index);
813 dead_tmp(val);
814}
84496233
JR
815static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
816{
817 tcg_gen_qemu_st64(val, addr, index);
818 tcg_temp_free_i64(val);
819}
b5ff1b31 820
5e3f878a
PB
821static inline void gen_set_pc_im(uint32_t val)
822{
155c3eac 823 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
824}
825
b5ff1b31
FB
826/* Force a TB lookup after an instruction that changes the CPU state. */
827static inline void gen_lookup_tb(DisasContext *s)
828{
a6445c52 829 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
830 s->is_jmp = DISAS_UPDATE;
831}
832
b0109805
PB
833static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
834 TCGv var)
2c0262af 835{
1e8d4eec 836 int val, rm, shift, shiftop;
b26eefb6 837 TCGv offset;
2c0262af
FB
838
839 if (!(insn & (1 << 25))) {
840 /* immediate */
841 val = insn & 0xfff;
842 if (!(insn & (1 << 23)))
843 val = -val;
537730b9 844 if (val != 0)
b0109805 845 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
846 } else {
847 /* shift/register */
848 rm = (insn) & 0xf;
849 shift = (insn >> 7) & 0x1f;
1e8d4eec 850 shiftop = (insn >> 5) & 3;
b26eefb6 851 offset = load_reg(s, rm);
9a119ff6 852 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 853 if (!(insn & (1 << 23)))
b0109805 854 tcg_gen_sub_i32(var, var, offset);
2c0262af 855 else
b0109805 856 tcg_gen_add_i32(var, var, offset);
b26eefb6 857 dead_tmp(offset);
2c0262af
FB
858 }
859}
860
191f9a93 861static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 862 int extra, TCGv var)
2c0262af
FB
863{
864 int val, rm;
b26eefb6 865 TCGv offset;
3b46e624 866
2c0262af
FB
867 if (insn & (1 << 22)) {
868 /* immediate */
869 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
870 if (!(insn & (1 << 23)))
871 val = -val;
18acad92 872 val += extra;
537730b9 873 if (val != 0)
b0109805 874 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
875 } else {
876 /* register */
191f9a93 877 if (extra)
b0109805 878 tcg_gen_addi_i32(var, var, extra);
2c0262af 879 rm = (insn) & 0xf;
b26eefb6 880 offset = load_reg(s, rm);
2c0262af 881 if (!(insn & (1 << 23)))
b0109805 882 tcg_gen_sub_i32(var, var, offset);
2c0262af 883 else
b0109805 884 tcg_gen_add_i32(var, var, offset);
b26eefb6 885 dead_tmp(offset);
2c0262af
FB
886 }
887}
888
4373f3ce
PB
889#define VFP_OP2(name) \
890static inline void gen_vfp_##name(int dp) \
891{ \
892 if (dp) \
893 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
894 else \
895 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
b7bcbe95
FB
896}
897
4373f3ce
PB
898VFP_OP2(add)
899VFP_OP2(sub)
900VFP_OP2(mul)
901VFP_OP2(div)
902
903#undef VFP_OP2
904
905static inline void gen_vfp_abs(int dp)
906{
907 if (dp)
908 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
909 else
910 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
911}
912
913static inline void gen_vfp_neg(int dp)
914{
915 if (dp)
916 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
917 else
918 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
919}
920
921static inline void gen_vfp_sqrt(int dp)
922{
923 if (dp)
924 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
925 else
926 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
927}
928
929static inline void gen_vfp_cmp(int dp)
930{
931 if (dp)
932 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
933 else
934 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
935}
936
937static inline void gen_vfp_cmpe(int dp)
938{
939 if (dp)
940 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
941 else
942 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
943}
944
945static inline void gen_vfp_F1_ld0(int dp)
946{
947 if (dp)
5b340b51 948 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 949 else
5b340b51 950 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
951}
952
953static inline void gen_vfp_uito(int dp)
954{
955 if (dp)
956 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
957 else
958 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
959}
960
961static inline void gen_vfp_sito(int dp)
962{
963 if (dp)
66230e0d 964 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
4373f3ce 965 else
66230e0d 966 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
4373f3ce
PB
967}
968
969static inline void gen_vfp_toui(int dp)
970{
971 if (dp)
972 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
973 else
974 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
975}
976
977static inline void gen_vfp_touiz(int dp)
978{
979 if (dp)
980 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
981 else
982 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
983}
984
985static inline void gen_vfp_tosi(int dp)
986{
987 if (dp)
988 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
989 else
990 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
991}
992
993static inline void gen_vfp_tosiz(int dp)
9ee6e8bb
PB
994{
995 if (dp)
4373f3ce 996 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
9ee6e8bb 997 else
4373f3ce
PB
998 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
999}
1000
1001#define VFP_GEN_FIX(name) \
1002static inline void gen_vfp_##name(int dp, int shift) \
1003{ \
b75263d6 1004 TCGv tmp_shift = tcg_const_i32(shift); \
4373f3ce 1005 if (dp) \
b75263d6 1006 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
4373f3ce 1007 else \
b75263d6
JR
1008 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
1009 tcg_temp_free_i32(tmp_shift); \
9ee6e8bb 1010}
4373f3ce
PB
1011VFP_GEN_FIX(tosh)
1012VFP_GEN_FIX(tosl)
1013VFP_GEN_FIX(touh)
1014VFP_GEN_FIX(toul)
1015VFP_GEN_FIX(shto)
1016VFP_GEN_FIX(slto)
1017VFP_GEN_FIX(uhto)
1018VFP_GEN_FIX(ulto)
1019#undef VFP_GEN_FIX
9ee6e8bb 1020
312eea9f 1021static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1022{
1023 if (dp)
312eea9f 1024 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1025 else
312eea9f 1026 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1027}
1028
312eea9f 1029static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1030{
1031 if (dp)
312eea9f 1032 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1033 else
312eea9f 1034 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1035}
1036
8e96005d
FB
1037static inline long
1038vfp_reg_offset (int dp, int reg)
1039{
1040 if (dp)
1041 return offsetof(CPUARMState, vfp.regs[reg]);
1042 else if (reg & 1) {
1043 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1044 + offsetof(CPU_DoubleU, l.upper);
1045 } else {
1046 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1047 + offsetof(CPU_DoubleU, l.lower);
1048 }
1049}
9ee6e8bb
PB
1050
1051/* Return the offset of a 32-bit piece of a NEON register.
1052 zero is the least significant end of the register. */
1053static inline long
1054neon_reg_offset (int reg, int n)
1055{
1056 int sreg;
1057 sreg = reg * 2 + n;
1058 return vfp_reg_offset(0, sreg);
1059}
1060
8f8e3aa4
PB
1061static TCGv neon_load_reg(int reg, int pass)
1062{
1063 TCGv tmp = new_tmp();
1064 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1065 return tmp;
1066}
1067
1068static void neon_store_reg(int reg, int pass, TCGv var)
1069{
1070 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1071 dead_tmp(var);
1072}
1073
a7812ae4 1074static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1075{
1076 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1077}
1078
a7812ae4 1079static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1080{
1081 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1082}
1083
4373f3ce
PB
1084#define tcg_gen_ld_f32 tcg_gen_ld_i32
1085#define tcg_gen_ld_f64 tcg_gen_ld_i64
1086#define tcg_gen_st_f32 tcg_gen_st_i32
1087#define tcg_gen_st_f64 tcg_gen_st_i64
1088
b7bcbe95
FB
1089static inline void gen_mov_F0_vreg(int dp, int reg)
1090{
1091 if (dp)
4373f3ce 1092 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1093 else
4373f3ce 1094 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1095}
1096
1097static inline void gen_mov_F1_vreg(int dp, int reg)
1098{
1099 if (dp)
4373f3ce 1100 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1101 else
4373f3ce 1102 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1103}
1104
1105static inline void gen_mov_vreg_F0(int dp, int reg)
1106{
1107 if (dp)
4373f3ce 1108 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1109 else
4373f3ce 1110 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1111}
1112
18c9b560
AZ
1113#define ARM_CP_RW_BIT (1 << 20)
1114
a7812ae4 1115static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d
PB
1116{
1117 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1118}
1119
a7812ae4 1120static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d
PB
1121{
1122 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1123}
1124
da6b5335 1125static inline TCGv iwmmxt_load_creg(int reg)
e677137d 1126{
da6b5335
FN
1127 TCGv var = new_tmp();
1128 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1129 return var;
e677137d
PB
1130}
1131
da6b5335 1132static inline void iwmmxt_store_creg(int reg, TCGv var)
e677137d 1133{
da6b5335 1134 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
d9968827 1135 dead_tmp(var);
e677137d
PB
1136}
1137
1138static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1139{
1140 iwmmxt_store_reg(cpu_M0, rn);
1141}
1142
1143static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1144{
1145 iwmmxt_load_reg(cpu_M0, rn);
1146}
1147
1148static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1149{
1150 iwmmxt_load_reg(cpu_V1, rn);
1151 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1152}
1153
1154static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1155{
1156 iwmmxt_load_reg(cpu_V1, rn);
1157 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1158}
1159
1160static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1161{
1162 iwmmxt_load_reg(cpu_V1, rn);
1163 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1164}
1165
1166#define IWMMXT_OP(name) \
1167static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1168{ \
1169 iwmmxt_load_reg(cpu_V1, rn); \
1170 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1171}
1172
1173#define IWMMXT_OP_ENV(name) \
1174static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1175{ \
1176 iwmmxt_load_reg(cpu_V1, rn); \
1177 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1178}
1179
1180#define IWMMXT_OP_ENV_SIZE(name) \
1181IWMMXT_OP_ENV(name##b) \
1182IWMMXT_OP_ENV(name##w) \
1183IWMMXT_OP_ENV(name##l)
1184
1185#define IWMMXT_OP_ENV1(name) \
1186static inline void gen_op_iwmmxt_##name##_M0(void) \
1187{ \
1188 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1189}
1190
1191IWMMXT_OP(maddsq)
1192IWMMXT_OP(madduq)
1193IWMMXT_OP(sadb)
1194IWMMXT_OP(sadw)
1195IWMMXT_OP(mulslw)
1196IWMMXT_OP(mulshw)
1197IWMMXT_OP(mululw)
1198IWMMXT_OP(muluhw)
1199IWMMXT_OP(macsw)
1200IWMMXT_OP(macuw)
1201
1202IWMMXT_OP_ENV_SIZE(unpackl)
1203IWMMXT_OP_ENV_SIZE(unpackh)
1204
1205IWMMXT_OP_ENV1(unpacklub)
1206IWMMXT_OP_ENV1(unpackluw)
1207IWMMXT_OP_ENV1(unpacklul)
1208IWMMXT_OP_ENV1(unpackhub)
1209IWMMXT_OP_ENV1(unpackhuw)
1210IWMMXT_OP_ENV1(unpackhul)
1211IWMMXT_OP_ENV1(unpacklsb)
1212IWMMXT_OP_ENV1(unpacklsw)
1213IWMMXT_OP_ENV1(unpacklsl)
1214IWMMXT_OP_ENV1(unpackhsb)
1215IWMMXT_OP_ENV1(unpackhsw)
1216IWMMXT_OP_ENV1(unpackhsl)
1217
1218IWMMXT_OP_ENV_SIZE(cmpeq)
1219IWMMXT_OP_ENV_SIZE(cmpgtu)
1220IWMMXT_OP_ENV_SIZE(cmpgts)
1221
1222IWMMXT_OP_ENV_SIZE(mins)
1223IWMMXT_OP_ENV_SIZE(minu)
1224IWMMXT_OP_ENV_SIZE(maxs)
1225IWMMXT_OP_ENV_SIZE(maxu)
1226
1227IWMMXT_OP_ENV_SIZE(subn)
1228IWMMXT_OP_ENV_SIZE(addn)
1229IWMMXT_OP_ENV_SIZE(subu)
1230IWMMXT_OP_ENV_SIZE(addu)
1231IWMMXT_OP_ENV_SIZE(subs)
1232IWMMXT_OP_ENV_SIZE(adds)
1233
1234IWMMXT_OP_ENV(avgb0)
1235IWMMXT_OP_ENV(avgb1)
1236IWMMXT_OP_ENV(avgw0)
1237IWMMXT_OP_ENV(avgw1)
1238
1239IWMMXT_OP(msadb)
1240
1241IWMMXT_OP_ENV(packuw)
1242IWMMXT_OP_ENV(packul)
1243IWMMXT_OP_ENV(packuq)
1244IWMMXT_OP_ENV(packsw)
1245IWMMXT_OP_ENV(packsl)
1246IWMMXT_OP_ENV(packsq)
1247
e677137d
PB
1248static void gen_op_iwmmxt_set_mup(void)
1249{
1250 TCGv tmp;
1251 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1252 tcg_gen_ori_i32(tmp, tmp, 2);
1253 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1254}
1255
1256static void gen_op_iwmmxt_set_cup(void)
1257{
1258 TCGv tmp;
1259 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1260 tcg_gen_ori_i32(tmp, tmp, 1);
1261 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1262}
1263
1264static void gen_op_iwmmxt_setpsr_nz(void)
1265{
1266 TCGv tmp = new_tmp();
1267 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1268 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1269}
1270
1271static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1272{
1273 iwmmxt_load_reg(cpu_V1, rn);
86831435 1274 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1275 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1276}
1277
da6b5335 1278static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
18c9b560
AZ
1279{
1280 int rd;
1281 uint32_t offset;
da6b5335 1282 TCGv tmp;
18c9b560
AZ
1283
1284 rd = (insn >> 16) & 0xf;
da6b5335 1285 tmp = load_reg(s, rd);
18c9b560
AZ
1286
1287 offset = (insn & 0xff) << ((insn >> 7) & 2);
1288 if (insn & (1 << 24)) {
1289 /* Pre indexed */
1290 if (insn & (1 << 23))
da6b5335 1291 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1292 else
da6b5335
FN
1293 tcg_gen_addi_i32(tmp, tmp, -offset);
1294 tcg_gen_mov_i32(dest, tmp);
18c9b560 1295 if (insn & (1 << 21))
da6b5335
FN
1296 store_reg(s, rd, tmp);
1297 else
1298 dead_tmp(tmp);
18c9b560
AZ
1299 } else if (insn & (1 << 21)) {
1300 /* Post indexed */
da6b5335 1301 tcg_gen_mov_i32(dest, tmp);
18c9b560 1302 if (insn & (1 << 23))
da6b5335 1303 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1304 else
da6b5335
FN
1305 tcg_gen_addi_i32(tmp, tmp, -offset);
1306 store_reg(s, rd, tmp);
18c9b560
AZ
1307 } else if (!(insn & (1 << 23)))
1308 return 1;
1309 return 0;
1310}
1311
da6b5335 1312static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
18c9b560
AZ
1313{
1314 int rd = (insn >> 0) & 0xf;
da6b5335 1315 TCGv tmp;
18c9b560 1316
da6b5335
FN
1317 if (insn & (1 << 8)) {
1318 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1319 return 1;
da6b5335
FN
1320 } else {
1321 tmp = iwmmxt_load_creg(rd);
1322 }
1323 } else {
1324 tmp = new_tmp();
1325 iwmmxt_load_reg(cpu_V0, rd);
1326 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1327 }
1328 tcg_gen_andi_i32(tmp, tmp, mask);
1329 tcg_gen_mov_i32(dest, tmp);
1330 dead_tmp(tmp);
18c9b560
AZ
1331 return 0;
1332}
1333
1334/* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1335 (ie. an undefined instruction). */
1336static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1337{
1338 int rd, wrd;
1339 int rdhi, rdlo, rd0, rd1, i;
da6b5335
FN
1340 TCGv addr;
1341 TCGv tmp, tmp2, tmp3;
18c9b560
AZ
1342
1343 if ((insn & 0x0e000e00) == 0x0c000000) {
1344 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1345 wrd = insn & 0xf;
1346 rdlo = (insn >> 12) & 0xf;
1347 rdhi = (insn >> 16) & 0xf;
1348 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1349 iwmmxt_load_reg(cpu_V0, wrd);
1350 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1351 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1352 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1353 } else { /* TMCRR */
da6b5335
FN
1354 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1355 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1356 gen_op_iwmmxt_set_mup();
1357 }
1358 return 0;
1359 }
1360
1361 wrd = (insn >> 12) & 0xf;
da6b5335
FN
1362 addr = new_tmp();
1363 if (gen_iwmmxt_address(s, insn, addr)) {
1364 dead_tmp(addr);
18c9b560 1365 return 1;
da6b5335 1366 }
18c9b560
AZ
1367 if (insn & ARM_CP_RW_BIT) {
1368 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
da6b5335
FN
1369 tmp = new_tmp();
1370 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1371 iwmmxt_store_creg(wrd, tmp);
18c9b560 1372 } else {
e677137d
PB
1373 i = 1;
1374 if (insn & (1 << 8)) {
1375 if (insn & (1 << 22)) { /* WLDRD */
da6b5335 1376 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1377 i = 0;
1378 } else { /* WLDRW wRd */
da6b5335 1379 tmp = gen_ld32(addr, IS_USER(s));
e677137d
PB
1380 }
1381 } else {
1382 if (insn & (1 << 22)) { /* WLDRH */
da6b5335 1383 tmp = gen_ld16u(addr, IS_USER(s));
e677137d 1384 } else { /* WLDRB */
da6b5335 1385 tmp = gen_ld8u(addr, IS_USER(s));
e677137d
PB
1386 }
1387 }
1388 if (i) {
1389 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1390 dead_tmp(tmp);
1391 }
18c9b560
AZ
1392 gen_op_iwmmxt_movq_wRn_M0(wrd);
1393 }
1394 } else {
1395 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335
FN
1396 tmp = iwmmxt_load_creg(wrd);
1397 gen_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1398 } else {
1399 gen_op_iwmmxt_movq_M0_wRn(wrd);
e677137d
PB
1400 tmp = new_tmp();
1401 if (insn & (1 << 8)) {
1402 if (insn & (1 << 22)) { /* WSTRD */
1403 dead_tmp(tmp);
da6b5335 1404 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1405 } else { /* WSTRW wRd */
1406 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1407 gen_st32(tmp, addr, IS_USER(s));
e677137d
PB
1408 }
1409 } else {
1410 if (insn & (1 << 22)) { /* WSTRH */
1411 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1412 gen_st16(tmp, addr, IS_USER(s));
e677137d
PB
1413 } else { /* WSTRB */
1414 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1415 gen_st8(tmp, addr, IS_USER(s));
e677137d
PB
1416 }
1417 }
18c9b560
AZ
1418 }
1419 }
d9968827 1420 dead_tmp(addr);
18c9b560
AZ
1421 return 0;
1422 }
1423
1424 if ((insn & 0x0f000000) != 0x0e000000)
1425 return 1;
1426
1427 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1428 case 0x000: /* WOR */
1429 wrd = (insn >> 12) & 0xf;
1430 rd0 = (insn >> 0) & 0xf;
1431 rd1 = (insn >> 16) & 0xf;
1432 gen_op_iwmmxt_movq_M0_wRn(rd0);
1433 gen_op_iwmmxt_orq_M0_wRn(rd1);
1434 gen_op_iwmmxt_setpsr_nz();
1435 gen_op_iwmmxt_movq_wRn_M0(wrd);
1436 gen_op_iwmmxt_set_mup();
1437 gen_op_iwmmxt_set_cup();
1438 break;
1439 case 0x011: /* TMCR */
1440 if (insn & 0xf)
1441 return 1;
1442 rd = (insn >> 12) & 0xf;
1443 wrd = (insn >> 16) & 0xf;
1444 switch (wrd) {
1445 case ARM_IWMMXT_wCID:
1446 case ARM_IWMMXT_wCASF:
1447 break;
1448 case ARM_IWMMXT_wCon:
1449 gen_op_iwmmxt_set_cup();
1450 /* Fall through. */
1451 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1452 tmp = iwmmxt_load_creg(wrd);
1453 tmp2 = load_reg(s, rd);
f669df27 1454 tcg_gen_andc_i32(tmp, tmp, tmp2);
da6b5335
FN
1455 dead_tmp(tmp2);
1456 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1457 break;
1458 case ARM_IWMMXT_wCGR0:
1459 case ARM_IWMMXT_wCGR1:
1460 case ARM_IWMMXT_wCGR2:
1461 case ARM_IWMMXT_wCGR3:
1462 gen_op_iwmmxt_set_cup();
da6b5335
FN
1463 tmp = load_reg(s, rd);
1464 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1465 break;
1466 default:
1467 return 1;
1468 }
1469 break;
1470 case 0x100: /* WXOR */
1471 wrd = (insn >> 12) & 0xf;
1472 rd0 = (insn >> 0) & 0xf;
1473 rd1 = (insn >> 16) & 0xf;
1474 gen_op_iwmmxt_movq_M0_wRn(rd0);
1475 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1476 gen_op_iwmmxt_setpsr_nz();
1477 gen_op_iwmmxt_movq_wRn_M0(wrd);
1478 gen_op_iwmmxt_set_mup();
1479 gen_op_iwmmxt_set_cup();
1480 break;
1481 case 0x111: /* TMRC */
1482 if (insn & 0xf)
1483 return 1;
1484 rd = (insn >> 12) & 0xf;
1485 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1486 tmp = iwmmxt_load_creg(wrd);
1487 store_reg(s, rd, tmp);
18c9b560
AZ
1488 break;
1489 case 0x300: /* WANDN */
1490 wrd = (insn >> 12) & 0xf;
1491 rd0 = (insn >> 0) & 0xf;
1492 rd1 = (insn >> 16) & 0xf;
1493 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1494 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1495 gen_op_iwmmxt_andq_M0_wRn(rd1);
1496 gen_op_iwmmxt_setpsr_nz();
1497 gen_op_iwmmxt_movq_wRn_M0(wrd);
1498 gen_op_iwmmxt_set_mup();
1499 gen_op_iwmmxt_set_cup();
1500 break;
1501 case 0x200: /* WAND */
1502 wrd = (insn >> 12) & 0xf;
1503 rd0 = (insn >> 0) & 0xf;
1504 rd1 = (insn >> 16) & 0xf;
1505 gen_op_iwmmxt_movq_M0_wRn(rd0);
1506 gen_op_iwmmxt_andq_M0_wRn(rd1);
1507 gen_op_iwmmxt_setpsr_nz();
1508 gen_op_iwmmxt_movq_wRn_M0(wrd);
1509 gen_op_iwmmxt_set_mup();
1510 gen_op_iwmmxt_set_cup();
1511 break;
1512 case 0x810: case 0xa10: /* WMADD */
1513 wrd = (insn >> 12) & 0xf;
1514 rd0 = (insn >> 0) & 0xf;
1515 rd1 = (insn >> 16) & 0xf;
1516 gen_op_iwmmxt_movq_M0_wRn(rd0);
1517 if (insn & (1 << 21))
1518 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1519 else
1520 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1521 gen_op_iwmmxt_movq_wRn_M0(wrd);
1522 gen_op_iwmmxt_set_mup();
1523 break;
1524 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1525 wrd = (insn >> 12) & 0xf;
1526 rd0 = (insn >> 16) & 0xf;
1527 rd1 = (insn >> 0) & 0xf;
1528 gen_op_iwmmxt_movq_M0_wRn(rd0);
1529 switch ((insn >> 22) & 3) {
1530 case 0:
1531 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1532 break;
1533 case 1:
1534 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1535 break;
1536 case 2:
1537 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1538 break;
1539 case 3:
1540 return 1;
1541 }
1542 gen_op_iwmmxt_movq_wRn_M0(wrd);
1543 gen_op_iwmmxt_set_mup();
1544 gen_op_iwmmxt_set_cup();
1545 break;
1546 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1547 wrd = (insn >> 12) & 0xf;
1548 rd0 = (insn >> 16) & 0xf;
1549 rd1 = (insn >> 0) & 0xf;
1550 gen_op_iwmmxt_movq_M0_wRn(rd0);
1551 switch ((insn >> 22) & 3) {
1552 case 0:
1553 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1554 break;
1555 case 1:
1556 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1557 break;
1558 case 2:
1559 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1560 break;
1561 case 3:
1562 return 1;
1563 }
1564 gen_op_iwmmxt_movq_wRn_M0(wrd);
1565 gen_op_iwmmxt_set_mup();
1566 gen_op_iwmmxt_set_cup();
1567 break;
1568 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1569 wrd = (insn >> 12) & 0xf;
1570 rd0 = (insn >> 16) & 0xf;
1571 rd1 = (insn >> 0) & 0xf;
1572 gen_op_iwmmxt_movq_M0_wRn(rd0);
1573 if (insn & (1 << 22))
1574 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1575 else
1576 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1577 if (!(insn & (1 << 20)))
1578 gen_op_iwmmxt_addl_M0_wRn(wrd);
1579 gen_op_iwmmxt_movq_wRn_M0(wrd);
1580 gen_op_iwmmxt_set_mup();
1581 break;
1582 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1583 wrd = (insn >> 12) & 0xf;
1584 rd0 = (insn >> 16) & 0xf;
1585 rd1 = (insn >> 0) & 0xf;
1586 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1587 if (insn & (1 << 21)) {
1588 if (insn & (1 << 20))
1589 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1590 else
1591 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1592 } else {
1593 if (insn & (1 << 20))
1594 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1595 else
1596 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1597 }
18c9b560
AZ
1598 gen_op_iwmmxt_movq_wRn_M0(wrd);
1599 gen_op_iwmmxt_set_mup();
1600 break;
1601 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1602 wrd = (insn >> 12) & 0xf;
1603 rd0 = (insn >> 16) & 0xf;
1604 rd1 = (insn >> 0) & 0xf;
1605 gen_op_iwmmxt_movq_M0_wRn(rd0);
1606 if (insn & (1 << 21))
1607 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1608 else
1609 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1610 if (!(insn & (1 << 20))) {
e677137d
PB
1611 iwmmxt_load_reg(cpu_V1, wrd);
1612 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1613 }
1614 gen_op_iwmmxt_movq_wRn_M0(wrd);
1615 gen_op_iwmmxt_set_mup();
1616 break;
1617 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1618 wrd = (insn >> 12) & 0xf;
1619 rd0 = (insn >> 16) & 0xf;
1620 rd1 = (insn >> 0) & 0xf;
1621 gen_op_iwmmxt_movq_M0_wRn(rd0);
1622 switch ((insn >> 22) & 3) {
1623 case 0:
1624 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1625 break;
1626 case 1:
1627 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1628 break;
1629 case 2:
1630 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1631 break;
1632 case 3:
1633 return 1;
1634 }
1635 gen_op_iwmmxt_movq_wRn_M0(wrd);
1636 gen_op_iwmmxt_set_mup();
1637 gen_op_iwmmxt_set_cup();
1638 break;
1639 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1640 wrd = (insn >> 12) & 0xf;
1641 rd0 = (insn >> 16) & 0xf;
1642 rd1 = (insn >> 0) & 0xf;
1643 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1644 if (insn & (1 << 22)) {
1645 if (insn & (1 << 20))
1646 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1647 else
1648 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1649 } else {
1650 if (insn & (1 << 20))
1651 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1652 else
1653 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1654 }
18c9b560
AZ
1655 gen_op_iwmmxt_movq_wRn_M0(wrd);
1656 gen_op_iwmmxt_set_mup();
1657 gen_op_iwmmxt_set_cup();
1658 break;
1659 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1660 wrd = (insn >> 12) & 0xf;
1661 rd0 = (insn >> 16) & 0xf;
1662 rd1 = (insn >> 0) & 0xf;
1663 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1664 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1665 tcg_gen_andi_i32(tmp, tmp, 7);
1666 iwmmxt_load_reg(cpu_V1, rd1);
1667 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1668 dead_tmp(tmp);
18c9b560
AZ
1669 gen_op_iwmmxt_movq_wRn_M0(wrd);
1670 gen_op_iwmmxt_set_mup();
1671 break;
1672 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1673 if (((insn >> 6) & 3) == 3)
1674 return 1;
18c9b560
AZ
1675 rd = (insn >> 12) & 0xf;
1676 wrd = (insn >> 16) & 0xf;
da6b5335 1677 tmp = load_reg(s, rd);
18c9b560
AZ
1678 gen_op_iwmmxt_movq_M0_wRn(wrd);
1679 switch ((insn >> 6) & 3) {
1680 case 0:
da6b5335
FN
1681 tmp2 = tcg_const_i32(0xff);
1682 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1683 break;
1684 case 1:
da6b5335
FN
1685 tmp2 = tcg_const_i32(0xffff);
1686 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1687 break;
1688 case 2:
da6b5335
FN
1689 tmp2 = tcg_const_i32(0xffffffff);
1690 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1691 break;
da6b5335
FN
1692 default:
1693 TCGV_UNUSED(tmp2);
1694 TCGV_UNUSED(tmp3);
18c9b560 1695 }
da6b5335
FN
1696 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1697 tcg_temp_free(tmp3);
1698 tcg_temp_free(tmp2);
1699 dead_tmp(tmp);
18c9b560
AZ
1700 gen_op_iwmmxt_movq_wRn_M0(wrd);
1701 gen_op_iwmmxt_set_mup();
1702 break;
1703 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1704 rd = (insn >> 12) & 0xf;
1705 wrd = (insn >> 16) & 0xf;
da6b5335 1706 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1707 return 1;
1708 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335 1709 tmp = new_tmp();
18c9b560
AZ
1710 switch ((insn >> 22) & 3) {
1711 case 0:
da6b5335
FN
1712 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1713 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1714 if (insn & 8) {
1715 tcg_gen_ext8s_i32(tmp, tmp);
1716 } else {
1717 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1718 }
1719 break;
1720 case 1:
da6b5335
FN
1721 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1722 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1723 if (insn & 8) {
1724 tcg_gen_ext16s_i32(tmp, tmp);
1725 } else {
1726 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1727 }
1728 break;
1729 case 2:
da6b5335
FN
1730 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1731 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1732 break;
18c9b560 1733 }
da6b5335 1734 store_reg(s, rd, tmp);
18c9b560
AZ
1735 break;
1736 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1737 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1738 return 1;
da6b5335 1739 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1740 switch ((insn >> 22) & 3) {
1741 case 0:
da6b5335 1742 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1743 break;
1744 case 1:
da6b5335 1745 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1746 break;
1747 case 2:
da6b5335 1748 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1749 break;
18c9b560 1750 }
da6b5335
FN
1751 tcg_gen_shli_i32(tmp, tmp, 28);
1752 gen_set_nzcv(tmp);
1753 dead_tmp(tmp);
18c9b560
AZ
1754 break;
1755 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1756 if (((insn >> 6) & 3) == 3)
1757 return 1;
18c9b560
AZ
1758 rd = (insn >> 12) & 0xf;
1759 wrd = (insn >> 16) & 0xf;
da6b5335 1760 tmp = load_reg(s, rd);
18c9b560
AZ
1761 switch ((insn >> 6) & 3) {
1762 case 0:
da6b5335 1763 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1764 break;
1765 case 1:
da6b5335 1766 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1767 break;
1768 case 2:
da6b5335 1769 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1770 break;
18c9b560 1771 }
da6b5335 1772 dead_tmp(tmp);
18c9b560
AZ
1773 gen_op_iwmmxt_movq_wRn_M0(wrd);
1774 gen_op_iwmmxt_set_mup();
1775 break;
1776 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1777 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1778 return 1;
da6b5335
FN
1779 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1780 tmp2 = new_tmp();
1781 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1782 switch ((insn >> 22) & 3) {
1783 case 0:
1784 for (i = 0; i < 7; i ++) {
da6b5335
FN
1785 tcg_gen_shli_i32(tmp2, tmp2, 4);
1786 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1787 }
1788 break;
1789 case 1:
1790 for (i = 0; i < 3; i ++) {
da6b5335
FN
1791 tcg_gen_shli_i32(tmp2, tmp2, 8);
1792 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1793 }
1794 break;
1795 case 2:
da6b5335
FN
1796 tcg_gen_shli_i32(tmp2, tmp2, 16);
1797 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1798 break;
18c9b560 1799 }
da6b5335
FN
1800 gen_set_nzcv(tmp);
1801 dead_tmp(tmp2);
1802 dead_tmp(tmp);
18c9b560
AZ
1803 break;
1804 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1805 wrd = (insn >> 12) & 0xf;
1806 rd0 = (insn >> 16) & 0xf;
1807 gen_op_iwmmxt_movq_M0_wRn(rd0);
1808 switch ((insn >> 22) & 3) {
1809 case 0:
e677137d 1810 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1811 break;
1812 case 1:
e677137d 1813 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1814 break;
1815 case 2:
e677137d 1816 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1817 break;
1818 case 3:
1819 return 1;
1820 }
1821 gen_op_iwmmxt_movq_wRn_M0(wrd);
1822 gen_op_iwmmxt_set_mup();
1823 break;
1824 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1825 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1826 return 1;
da6b5335
FN
1827 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1828 tmp2 = new_tmp();
1829 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1830 switch ((insn >> 22) & 3) {
1831 case 0:
1832 for (i = 0; i < 7; i ++) {
da6b5335
FN
1833 tcg_gen_shli_i32(tmp2, tmp2, 4);
1834 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1835 }
1836 break;
1837 case 1:
1838 for (i = 0; i < 3; i ++) {
da6b5335
FN
1839 tcg_gen_shli_i32(tmp2, tmp2, 8);
1840 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1841 }
1842 break;
1843 case 2:
da6b5335
FN
1844 tcg_gen_shli_i32(tmp2, tmp2, 16);
1845 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1846 break;
18c9b560 1847 }
da6b5335
FN
1848 gen_set_nzcv(tmp);
1849 dead_tmp(tmp2);
1850 dead_tmp(tmp);
18c9b560
AZ
1851 break;
1852 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1853 rd = (insn >> 12) & 0xf;
1854 rd0 = (insn >> 16) & 0xf;
da6b5335 1855 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1856 return 1;
1857 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 1858 tmp = new_tmp();
18c9b560
AZ
1859 switch ((insn >> 22) & 3) {
1860 case 0:
da6b5335 1861 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1862 break;
1863 case 1:
da6b5335 1864 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1865 break;
1866 case 2:
da6b5335 1867 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1868 break;
18c9b560 1869 }
da6b5335 1870 store_reg(s, rd, tmp);
18c9b560
AZ
1871 break;
1872 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1873 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1874 wrd = (insn >> 12) & 0xf;
1875 rd0 = (insn >> 16) & 0xf;
1876 rd1 = (insn >> 0) & 0xf;
1877 gen_op_iwmmxt_movq_M0_wRn(rd0);
1878 switch ((insn >> 22) & 3) {
1879 case 0:
1880 if (insn & (1 << 21))
1881 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1882 else
1883 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1884 break;
1885 case 1:
1886 if (insn & (1 << 21))
1887 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1888 else
1889 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1890 break;
1891 case 2:
1892 if (insn & (1 << 21))
1893 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1894 else
1895 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1896 break;
1897 case 3:
1898 return 1;
1899 }
1900 gen_op_iwmmxt_movq_wRn_M0(wrd);
1901 gen_op_iwmmxt_set_mup();
1902 gen_op_iwmmxt_set_cup();
1903 break;
1904 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1905 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1906 wrd = (insn >> 12) & 0xf;
1907 rd0 = (insn >> 16) & 0xf;
1908 gen_op_iwmmxt_movq_M0_wRn(rd0);
1909 switch ((insn >> 22) & 3) {
1910 case 0:
1911 if (insn & (1 << 21))
1912 gen_op_iwmmxt_unpacklsb_M0();
1913 else
1914 gen_op_iwmmxt_unpacklub_M0();
1915 break;
1916 case 1:
1917 if (insn & (1 << 21))
1918 gen_op_iwmmxt_unpacklsw_M0();
1919 else
1920 gen_op_iwmmxt_unpackluw_M0();
1921 break;
1922 case 2:
1923 if (insn & (1 << 21))
1924 gen_op_iwmmxt_unpacklsl_M0();
1925 else
1926 gen_op_iwmmxt_unpacklul_M0();
1927 break;
1928 case 3:
1929 return 1;
1930 }
1931 gen_op_iwmmxt_movq_wRn_M0(wrd);
1932 gen_op_iwmmxt_set_mup();
1933 gen_op_iwmmxt_set_cup();
1934 break;
1935 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1936 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1937 wrd = (insn >> 12) & 0xf;
1938 rd0 = (insn >> 16) & 0xf;
1939 gen_op_iwmmxt_movq_M0_wRn(rd0);
1940 switch ((insn >> 22) & 3) {
1941 case 0:
1942 if (insn & (1 << 21))
1943 gen_op_iwmmxt_unpackhsb_M0();
1944 else
1945 gen_op_iwmmxt_unpackhub_M0();
1946 break;
1947 case 1:
1948 if (insn & (1 << 21))
1949 gen_op_iwmmxt_unpackhsw_M0();
1950 else
1951 gen_op_iwmmxt_unpackhuw_M0();
1952 break;
1953 case 2:
1954 if (insn & (1 << 21))
1955 gen_op_iwmmxt_unpackhsl_M0();
1956 else
1957 gen_op_iwmmxt_unpackhul_M0();
1958 break;
1959 case 3:
1960 return 1;
1961 }
1962 gen_op_iwmmxt_movq_wRn_M0(wrd);
1963 gen_op_iwmmxt_set_mup();
1964 gen_op_iwmmxt_set_cup();
1965 break;
1966 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1967 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
1968 if (((insn >> 22) & 3) == 0)
1969 return 1;
18c9b560
AZ
1970 wrd = (insn >> 12) & 0xf;
1971 rd0 = (insn >> 16) & 0xf;
1972 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1973 tmp = new_tmp();
1974 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
1975 dead_tmp(tmp);
18c9b560 1976 return 1;
da6b5335 1977 }
18c9b560 1978 switch ((insn >> 22) & 3) {
18c9b560 1979 case 1:
da6b5335 1980 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1981 break;
1982 case 2:
da6b5335 1983 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1984 break;
1985 case 3:
da6b5335 1986 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1987 break;
1988 }
da6b5335 1989 dead_tmp(tmp);
18c9b560
AZ
1990 gen_op_iwmmxt_movq_wRn_M0(wrd);
1991 gen_op_iwmmxt_set_mup();
1992 gen_op_iwmmxt_set_cup();
1993 break;
1994 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1995 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
1996 if (((insn >> 22) & 3) == 0)
1997 return 1;
18c9b560
AZ
1998 wrd = (insn >> 12) & 0xf;
1999 rd0 = (insn >> 16) & 0xf;
2000 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2001 tmp = new_tmp();
2002 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2003 dead_tmp(tmp);
18c9b560 2004 return 1;
da6b5335 2005 }
18c9b560 2006 switch ((insn >> 22) & 3) {
18c9b560 2007 case 1:
da6b5335 2008 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2009 break;
2010 case 2:
da6b5335 2011 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2012 break;
2013 case 3:
da6b5335 2014 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2015 break;
2016 }
da6b5335 2017 dead_tmp(tmp);
18c9b560
AZ
2018 gen_op_iwmmxt_movq_wRn_M0(wrd);
2019 gen_op_iwmmxt_set_mup();
2020 gen_op_iwmmxt_set_cup();
2021 break;
2022 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2023 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2024 if (((insn >> 22) & 3) == 0)
2025 return 1;
18c9b560
AZ
2026 wrd = (insn >> 12) & 0xf;
2027 rd0 = (insn >> 16) & 0xf;
2028 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2029 tmp = new_tmp();
2030 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2031 dead_tmp(tmp);
18c9b560 2032 return 1;
da6b5335 2033 }
18c9b560 2034 switch ((insn >> 22) & 3) {
18c9b560 2035 case 1:
da6b5335 2036 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2037 break;
2038 case 2:
da6b5335 2039 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2040 break;
2041 case 3:
da6b5335 2042 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2043 break;
2044 }
da6b5335 2045 dead_tmp(tmp);
18c9b560
AZ
2046 gen_op_iwmmxt_movq_wRn_M0(wrd);
2047 gen_op_iwmmxt_set_mup();
2048 gen_op_iwmmxt_set_cup();
2049 break;
2050 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2051 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2052 if (((insn >> 22) & 3) == 0)
2053 return 1;
18c9b560
AZ
2054 wrd = (insn >> 12) & 0xf;
2055 rd0 = (insn >> 16) & 0xf;
2056 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2057 tmp = new_tmp();
18c9b560 2058 switch ((insn >> 22) & 3) {
18c9b560 2059 case 1:
da6b5335
FN
2060 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2061 dead_tmp(tmp);
18c9b560 2062 return 1;
da6b5335
FN
2063 }
2064 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2065 break;
2066 case 2:
da6b5335
FN
2067 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2068 dead_tmp(tmp);
18c9b560 2069 return 1;
da6b5335
FN
2070 }
2071 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2072 break;
2073 case 3:
da6b5335
FN
2074 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2075 dead_tmp(tmp);
18c9b560 2076 return 1;
da6b5335
FN
2077 }
2078 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2079 break;
2080 }
da6b5335 2081 dead_tmp(tmp);
18c9b560
AZ
2082 gen_op_iwmmxt_movq_wRn_M0(wrd);
2083 gen_op_iwmmxt_set_mup();
2084 gen_op_iwmmxt_set_cup();
2085 break;
2086 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2087 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2088 wrd = (insn >> 12) & 0xf;
2089 rd0 = (insn >> 16) & 0xf;
2090 rd1 = (insn >> 0) & 0xf;
2091 gen_op_iwmmxt_movq_M0_wRn(rd0);
2092 switch ((insn >> 22) & 3) {
2093 case 0:
2094 if (insn & (1 << 21))
2095 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2096 else
2097 gen_op_iwmmxt_minub_M0_wRn(rd1);
2098 break;
2099 case 1:
2100 if (insn & (1 << 21))
2101 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2102 else
2103 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2104 break;
2105 case 2:
2106 if (insn & (1 << 21))
2107 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2108 else
2109 gen_op_iwmmxt_minul_M0_wRn(rd1);
2110 break;
2111 case 3:
2112 return 1;
2113 }
2114 gen_op_iwmmxt_movq_wRn_M0(wrd);
2115 gen_op_iwmmxt_set_mup();
2116 break;
2117 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2118 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2119 wrd = (insn >> 12) & 0xf;
2120 rd0 = (insn >> 16) & 0xf;
2121 rd1 = (insn >> 0) & 0xf;
2122 gen_op_iwmmxt_movq_M0_wRn(rd0);
2123 switch ((insn >> 22) & 3) {
2124 case 0:
2125 if (insn & (1 << 21))
2126 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2127 else
2128 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2129 break;
2130 case 1:
2131 if (insn & (1 << 21))
2132 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2133 else
2134 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2135 break;
2136 case 2:
2137 if (insn & (1 << 21))
2138 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2139 else
2140 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2141 break;
2142 case 3:
2143 return 1;
2144 }
2145 gen_op_iwmmxt_movq_wRn_M0(wrd);
2146 gen_op_iwmmxt_set_mup();
2147 break;
2148 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2149 case 0x402: case 0x502: case 0x602: case 0x702:
2150 wrd = (insn >> 12) & 0xf;
2151 rd0 = (insn >> 16) & 0xf;
2152 rd1 = (insn >> 0) & 0xf;
2153 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2154 tmp = tcg_const_i32((insn >> 20) & 3);
2155 iwmmxt_load_reg(cpu_V1, rd1);
2156 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2157 tcg_temp_free(tmp);
18c9b560
AZ
2158 gen_op_iwmmxt_movq_wRn_M0(wrd);
2159 gen_op_iwmmxt_set_mup();
2160 break;
2161 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2162 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2163 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2164 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2165 wrd = (insn >> 12) & 0xf;
2166 rd0 = (insn >> 16) & 0xf;
2167 rd1 = (insn >> 0) & 0xf;
2168 gen_op_iwmmxt_movq_M0_wRn(rd0);
2169 switch ((insn >> 20) & 0xf) {
2170 case 0x0:
2171 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2172 break;
2173 case 0x1:
2174 gen_op_iwmmxt_subub_M0_wRn(rd1);
2175 break;
2176 case 0x3:
2177 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2178 break;
2179 case 0x4:
2180 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2181 break;
2182 case 0x5:
2183 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2184 break;
2185 case 0x7:
2186 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2187 break;
2188 case 0x8:
2189 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2190 break;
2191 case 0x9:
2192 gen_op_iwmmxt_subul_M0_wRn(rd1);
2193 break;
2194 case 0xb:
2195 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2196 break;
2197 default:
2198 return 1;
2199 }
2200 gen_op_iwmmxt_movq_wRn_M0(wrd);
2201 gen_op_iwmmxt_set_mup();
2202 gen_op_iwmmxt_set_cup();
2203 break;
2204 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2205 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2206 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2207 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2208 wrd = (insn >> 12) & 0xf;
2209 rd0 = (insn >> 16) & 0xf;
2210 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2211 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2212 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2213 tcg_temp_free(tmp);
18c9b560
AZ
2214 gen_op_iwmmxt_movq_wRn_M0(wrd);
2215 gen_op_iwmmxt_set_mup();
2216 gen_op_iwmmxt_set_cup();
2217 break;
2218 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2219 case 0x418: case 0x518: case 0x618: case 0x718:
2220 case 0x818: case 0x918: case 0xa18: case 0xb18:
2221 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2222 wrd = (insn >> 12) & 0xf;
2223 rd0 = (insn >> 16) & 0xf;
2224 rd1 = (insn >> 0) & 0xf;
2225 gen_op_iwmmxt_movq_M0_wRn(rd0);
2226 switch ((insn >> 20) & 0xf) {
2227 case 0x0:
2228 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2229 break;
2230 case 0x1:
2231 gen_op_iwmmxt_addub_M0_wRn(rd1);
2232 break;
2233 case 0x3:
2234 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2235 break;
2236 case 0x4:
2237 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2238 break;
2239 case 0x5:
2240 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2241 break;
2242 case 0x7:
2243 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2244 break;
2245 case 0x8:
2246 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2247 break;
2248 case 0x9:
2249 gen_op_iwmmxt_addul_M0_wRn(rd1);
2250 break;
2251 case 0xb:
2252 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2253 break;
2254 default:
2255 return 1;
2256 }
2257 gen_op_iwmmxt_movq_wRn_M0(wrd);
2258 gen_op_iwmmxt_set_mup();
2259 gen_op_iwmmxt_set_cup();
2260 break;
2261 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2262 case 0x408: case 0x508: case 0x608: case 0x708:
2263 case 0x808: case 0x908: case 0xa08: case 0xb08:
2264 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2265 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2266 return 1;
18c9b560
AZ
2267 wrd = (insn >> 12) & 0xf;
2268 rd0 = (insn >> 16) & 0xf;
2269 rd1 = (insn >> 0) & 0xf;
2270 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2271 switch ((insn >> 22) & 3) {
18c9b560
AZ
2272 case 1:
2273 if (insn & (1 << 21))
2274 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2275 else
2276 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2277 break;
2278 case 2:
2279 if (insn & (1 << 21))
2280 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2281 else
2282 gen_op_iwmmxt_packul_M0_wRn(rd1);
2283 break;
2284 case 3:
2285 if (insn & (1 << 21))
2286 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2287 else
2288 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2289 break;
2290 }
2291 gen_op_iwmmxt_movq_wRn_M0(wrd);
2292 gen_op_iwmmxt_set_mup();
2293 gen_op_iwmmxt_set_cup();
2294 break;
2295 case 0x201: case 0x203: case 0x205: case 0x207:
2296 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2297 case 0x211: case 0x213: case 0x215: case 0x217:
2298 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2299 wrd = (insn >> 5) & 0xf;
2300 rd0 = (insn >> 12) & 0xf;
2301 rd1 = (insn >> 0) & 0xf;
2302 if (rd0 == 0xf || rd1 == 0xf)
2303 return 1;
2304 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2305 tmp = load_reg(s, rd0);
2306 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2307 switch ((insn >> 16) & 0xf) {
2308 case 0x0: /* TMIA */
da6b5335 2309 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2310 break;
2311 case 0x8: /* TMIAPH */
da6b5335 2312 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2313 break;
2314 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2315 if (insn & (1 << 16))
da6b5335 2316 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2317 if (insn & (1 << 17))
da6b5335
FN
2318 tcg_gen_shri_i32(tmp2, tmp2, 16);
2319 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2320 break;
2321 default:
da6b5335
FN
2322 dead_tmp(tmp2);
2323 dead_tmp(tmp);
18c9b560
AZ
2324 return 1;
2325 }
da6b5335
FN
2326 dead_tmp(tmp2);
2327 dead_tmp(tmp);
18c9b560
AZ
2328 gen_op_iwmmxt_movq_wRn_M0(wrd);
2329 gen_op_iwmmxt_set_mup();
2330 break;
2331 default:
2332 return 1;
2333 }
2334
2335 return 0;
2336}
2337
2338/* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2339 (ie. an undefined instruction). */
2340static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2341{
2342 int acc, rd0, rd1, rdhi, rdlo;
3a554c0f 2343 TCGv tmp, tmp2;
18c9b560
AZ
2344
2345 if ((insn & 0x0ff00f10) == 0x0e200010) {
2346 /* Multiply with Internal Accumulate Format */
2347 rd0 = (insn >> 12) & 0xf;
2348 rd1 = insn & 0xf;
2349 acc = (insn >> 5) & 7;
2350
2351 if (acc != 0)
2352 return 1;
2353
3a554c0f
FN
2354 tmp = load_reg(s, rd0);
2355 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2356 switch ((insn >> 16) & 0xf) {
2357 case 0x0: /* MIA */
3a554c0f 2358 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2359 break;
2360 case 0x8: /* MIAPH */
3a554c0f 2361 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2362 break;
2363 case 0xc: /* MIABB */
2364 case 0xd: /* MIABT */
2365 case 0xe: /* MIATB */
2366 case 0xf: /* MIATT */
18c9b560 2367 if (insn & (1 << 16))
3a554c0f 2368 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2369 if (insn & (1 << 17))
3a554c0f
FN
2370 tcg_gen_shri_i32(tmp2, tmp2, 16);
2371 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2372 break;
2373 default:
2374 return 1;
2375 }
3a554c0f
FN
2376 dead_tmp(tmp2);
2377 dead_tmp(tmp);
18c9b560
AZ
2378
2379 gen_op_iwmmxt_movq_wRn_M0(acc);
2380 return 0;
2381 }
2382
2383 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2384 /* Internal Accumulator Access Format */
2385 rdhi = (insn >> 16) & 0xf;
2386 rdlo = (insn >> 12) & 0xf;
2387 acc = insn & 7;
2388
2389 if (acc != 0)
2390 return 1;
2391
2392 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2393 iwmmxt_load_reg(cpu_V0, acc);
2394 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2395 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2396 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2397 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2398 } else { /* MAR */
3a554c0f
FN
2399 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2400 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2401 }
2402 return 0;
2403 }
2404
2405 return 1;
2406}
2407
c1713132
AZ
2408/* Disassemble system coprocessor instruction. Return nonzero if
2409 instruction is not defined. */
2410static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2411{
b75263d6 2412 TCGv tmp, tmp2;
c1713132
AZ
2413 uint32_t rd = (insn >> 12) & 0xf;
2414 uint32_t cp = (insn >> 8) & 0xf;
2415 if (IS_USER(s)) {
2416 return 1;
2417 }
2418
18c9b560 2419 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2420 if (!env->cp[cp].cp_read)
2421 return 1;
8984bd2e
PB
2422 gen_set_pc_im(s->pc);
2423 tmp = new_tmp();
b75263d6
JR
2424 tmp2 = tcg_const_i32(insn);
2425 gen_helper_get_cp(tmp, cpu_env, tmp2);
2426 tcg_temp_free(tmp2);
8984bd2e 2427 store_reg(s, rd, tmp);
c1713132
AZ
2428 } else {
2429 if (!env->cp[cp].cp_write)
2430 return 1;
8984bd2e
PB
2431 gen_set_pc_im(s->pc);
2432 tmp = load_reg(s, rd);
b75263d6
JR
2433 tmp2 = tcg_const_i32(insn);
2434 gen_helper_set_cp(cpu_env, tmp2, tmp);
2435 tcg_temp_free(tmp2);
a60de947 2436 dead_tmp(tmp);
c1713132
AZ
2437 }
2438 return 0;
2439}
2440
9ee6e8bb
PB
2441static int cp15_user_ok(uint32_t insn)
2442{
2443 int cpn = (insn >> 16) & 0xf;
2444 int cpm = insn & 0xf;
2445 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2446
2447 if (cpn == 13 && cpm == 0) {
2448 /* TLS register. */
2449 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2450 return 1;
2451 }
2452 if (cpn == 7) {
2453 /* ISB, DSB, DMB. */
2454 if ((cpm == 5 && op == 4)
2455 || (cpm == 10 && (op == 4 || op == 5)))
2456 return 1;
2457 }
2458 return 0;
2459}
2460
3f26c122
RV
2461static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2462{
2463 TCGv tmp;
2464 int cpn = (insn >> 16) & 0xf;
2465 int cpm = insn & 0xf;
2466 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2467
2468 if (!arm_feature(env, ARM_FEATURE_V6K))
2469 return 0;
2470
2471 if (!(cpn == 13 && cpm == 0))
2472 return 0;
2473
2474 if (insn & ARM_CP_RW_BIT) {
3f26c122
RV
2475 switch (op) {
2476 case 2:
c5883be2 2477 tmp = load_cpu_field(cp15.c13_tls1);
3f26c122
RV
2478 break;
2479 case 3:
c5883be2 2480 tmp = load_cpu_field(cp15.c13_tls2);
3f26c122
RV
2481 break;
2482 case 4:
c5883be2 2483 tmp = load_cpu_field(cp15.c13_tls3);
3f26c122
RV
2484 break;
2485 default:
3f26c122
RV
2486 return 0;
2487 }
2488 store_reg(s, rd, tmp);
2489
2490 } else {
2491 tmp = load_reg(s, rd);
2492 switch (op) {
2493 case 2:
c5883be2 2494 store_cpu_field(tmp, cp15.c13_tls1);
3f26c122
RV
2495 break;
2496 case 3:
c5883be2 2497 store_cpu_field(tmp, cp15.c13_tls2);
3f26c122
RV
2498 break;
2499 case 4:
c5883be2 2500 store_cpu_field(tmp, cp15.c13_tls3);
3f26c122
RV
2501 break;
2502 default:
c5883be2 2503 dead_tmp(tmp);
3f26c122
RV
2504 return 0;
2505 }
3f26c122
RV
2506 }
2507 return 1;
2508}
2509
b5ff1b31
FB
2510/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2511 instruction is not defined. */
a90b7318 2512static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2513{
2514 uint32_t rd;
b75263d6 2515 TCGv tmp, tmp2;
b5ff1b31 2516
9ee6e8bb
PB
2517 /* M profile cores use memory mapped registers instead of cp15. */
2518 if (arm_feature(env, ARM_FEATURE_M))
2519 return 1;
2520
2521 if ((insn & (1 << 25)) == 0) {
2522 if (insn & (1 << 20)) {
2523 /* mrrc */
2524 return 1;
2525 }
2526 /* mcrr. Used for block cache operations, so implement as no-op. */
2527 return 0;
2528 }
2529 if ((insn & (1 << 4)) == 0) {
2530 /* cdp */
2531 return 1;
2532 }
2533 if (IS_USER(s) && !cp15_user_ok(insn)) {
b5ff1b31
FB
2534 return 1;
2535 }
9332f9da
FB
2536 if ((insn & 0x0fff0fff) == 0x0e070f90
2537 || (insn & 0x0fff0fff) == 0x0e070f58) {
2538 /* Wait for interrupt. */
8984bd2e 2539 gen_set_pc_im(s->pc);
9ee6e8bb 2540 s->is_jmp = DISAS_WFI;
9332f9da
FB
2541 return 0;
2542 }
b5ff1b31 2543 rd = (insn >> 12) & 0xf;
3f26c122
RV
2544
2545 if (cp15_tls_load_store(env, s, insn, rd))
2546 return 0;
2547
b75263d6 2548 tmp2 = tcg_const_i32(insn);
18c9b560 2549 if (insn & ARM_CP_RW_BIT) {
8984bd2e 2550 tmp = new_tmp();
b75263d6 2551 gen_helper_get_cp15(tmp, cpu_env, tmp2);
b5ff1b31
FB
2552 /* If the destination register is r15 then sets condition codes. */
2553 if (rd != 15)
8984bd2e
PB
2554 store_reg(s, rd, tmp);
2555 else
2556 dead_tmp(tmp);
b5ff1b31 2557 } else {
8984bd2e 2558 tmp = load_reg(s, rd);
b75263d6 2559 gen_helper_set_cp15(cpu_env, tmp2, tmp);
8984bd2e 2560 dead_tmp(tmp);
a90b7318
AZ
2561 /* Normally we would always end the TB here, but Linux
2562 * arch/arm/mach-pxa/sleep.S expects two instructions following
2563 * an MMU enable to execute from cache. Imitate this behaviour. */
2564 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2565 (insn & 0x0fff0fff) != 0x0e010f10)
2566 gen_lookup_tb(s);
b5ff1b31 2567 }
b75263d6 2568 tcg_temp_free_i32(tmp2);
b5ff1b31
FB
2569 return 0;
2570}
2571
9ee6e8bb
PB
2572#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2573#define VFP_SREG(insn, bigbit, smallbit) \
2574 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2575#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2576 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2577 reg = (((insn) >> (bigbit)) & 0x0f) \
2578 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2579 } else { \
2580 if (insn & (1 << (smallbit))) \
2581 return 1; \
2582 reg = ((insn) >> (bigbit)) & 0x0f; \
2583 }} while (0)
2584
2585#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2586#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2587#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2588#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2589#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2590#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2591
4373f3ce
PB
2592/* Move between integer and VFP cores. */
2593static TCGv gen_vfp_mrs(void)
2594{
2595 TCGv tmp = new_tmp();
2596 tcg_gen_mov_i32(tmp, cpu_F0s);
2597 return tmp;
2598}
2599
2600static void gen_vfp_msr(TCGv tmp)
2601{
2602 tcg_gen_mov_i32(cpu_F0s, tmp);
2603 dead_tmp(tmp);
2604}
2605
9ee6e8bb
PB
2606static inline int
2607vfp_enabled(CPUState * env)
2608{
2609 return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
2610}
2611
ad69471c
PB
2612static void gen_neon_dup_u8(TCGv var, int shift)
2613{
2614 TCGv tmp = new_tmp();
2615 if (shift)
2616 tcg_gen_shri_i32(var, var, shift);
86831435 2617 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2618 tcg_gen_shli_i32(tmp, var, 8);
2619 tcg_gen_or_i32(var, var, tmp);
2620 tcg_gen_shli_i32(tmp, var, 16);
2621 tcg_gen_or_i32(var, var, tmp);
2622 dead_tmp(tmp);
2623}
2624
2625static void gen_neon_dup_low16(TCGv var)
2626{
2627 TCGv tmp = new_tmp();
86831435 2628 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2629 tcg_gen_shli_i32(tmp, var, 16);
2630 tcg_gen_or_i32(var, var, tmp);
2631 dead_tmp(tmp);
2632}
2633
2634static void gen_neon_dup_high16(TCGv var)
2635{
2636 TCGv tmp = new_tmp();
2637 tcg_gen_andi_i32(var, var, 0xffff0000);
2638 tcg_gen_shri_i32(tmp, var, 16);
2639 tcg_gen_or_i32(var, var, tmp);
2640 dead_tmp(tmp);
2641}
2642
b7bcbe95
FB
2643/* Disassemble a VFP instruction. Returns nonzero if an error occured
2644 (ie. an undefined instruction). */
2645static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2646{
2647 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2648 int dp, veclen;
312eea9f 2649 TCGv addr;
4373f3ce 2650 TCGv tmp;
ad69471c 2651 TCGv tmp2;
b7bcbe95 2652
40f137e1
PB
2653 if (!arm_feature(env, ARM_FEATURE_VFP))
2654 return 1;
2655
9ee6e8bb
PB
2656 if (!vfp_enabled(env)) {
2657 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2658 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2659 return 1;
2660 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2661 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2662 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2663 return 1;
2664 }
b7bcbe95
FB
2665 dp = ((insn & 0xf00) == 0xb00);
2666 switch ((insn >> 24) & 0xf) {
2667 case 0xe:
2668 if (insn & (1 << 4)) {
2669 /* single register transfer */
b7bcbe95
FB
2670 rd = (insn >> 12) & 0xf;
2671 if (dp) {
9ee6e8bb
PB
2672 int size;
2673 int pass;
2674
2675 VFP_DREG_N(rn, insn);
2676 if (insn & 0xf)
b7bcbe95 2677 return 1;
9ee6e8bb
PB
2678 if (insn & 0x00c00060
2679 && !arm_feature(env, ARM_FEATURE_NEON))
2680 return 1;
2681
2682 pass = (insn >> 21) & 1;
2683 if (insn & (1 << 22)) {
2684 size = 0;
2685 offset = ((insn >> 5) & 3) * 8;
2686 } else if (insn & (1 << 5)) {
2687 size = 1;
2688 offset = (insn & (1 << 6)) ? 16 : 0;
2689 } else {
2690 size = 2;
2691 offset = 0;
2692 }
18c9b560 2693 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2694 /* vfp->arm */
ad69471c 2695 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2696 switch (size) {
2697 case 0:
9ee6e8bb 2698 if (offset)
ad69471c 2699 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2700 if (insn & (1 << 23))
ad69471c 2701 gen_uxtb(tmp);
9ee6e8bb 2702 else
ad69471c 2703 gen_sxtb(tmp);
9ee6e8bb
PB
2704 break;
2705 case 1:
9ee6e8bb
PB
2706 if (insn & (1 << 23)) {
2707 if (offset) {
ad69471c 2708 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2709 } else {
ad69471c 2710 gen_uxth(tmp);
9ee6e8bb
PB
2711 }
2712 } else {
2713 if (offset) {
ad69471c 2714 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2715 } else {
ad69471c 2716 gen_sxth(tmp);
9ee6e8bb
PB
2717 }
2718 }
2719 break;
2720 case 2:
9ee6e8bb
PB
2721 break;
2722 }
ad69471c 2723 store_reg(s, rd, tmp);
b7bcbe95
FB
2724 } else {
2725 /* arm->vfp */
ad69471c 2726 tmp = load_reg(s, rd);
9ee6e8bb
PB
2727 if (insn & (1 << 23)) {
2728 /* VDUP */
2729 if (size == 0) {
ad69471c 2730 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2731 } else if (size == 1) {
ad69471c 2732 gen_neon_dup_low16(tmp);
9ee6e8bb 2733 }
cbbccffc
PB
2734 for (n = 0; n <= pass * 2; n++) {
2735 tmp2 = new_tmp();
2736 tcg_gen_mov_i32(tmp2, tmp);
2737 neon_store_reg(rn, n, tmp2);
2738 }
2739 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2740 } else {
2741 /* VMOV */
2742 switch (size) {
2743 case 0:
ad69471c
PB
2744 tmp2 = neon_load_reg(rn, pass);
2745 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2746 dead_tmp(tmp2);
9ee6e8bb
PB
2747 break;
2748 case 1:
ad69471c
PB
2749 tmp2 = neon_load_reg(rn, pass);
2750 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2751 dead_tmp(tmp2);
9ee6e8bb
PB
2752 break;
2753 case 2:
9ee6e8bb
PB
2754 break;
2755 }
ad69471c 2756 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2757 }
b7bcbe95 2758 }
9ee6e8bb
PB
2759 } else { /* !dp */
2760 if ((insn & 0x6f) != 0x00)
2761 return 1;
2762 rn = VFP_SREG_N(insn);
18c9b560 2763 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2764 /* vfp->arm */
2765 if (insn & (1 << 21)) {
2766 /* system register */
40f137e1 2767 rn >>= 1;
9ee6e8bb 2768
b7bcbe95 2769 switch (rn) {
40f137e1 2770 case ARM_VFP_FPSID:
4373f3ce 2771 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2772 VFP3 restricts all id registers to privileged
2773 accesses. */
2774 if (IS_USER(s)
2775 && arm_feature(env, ARM_FEATURE_VFP3))
2776 return 1;
4373f3ce 2777 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2778 break;
40f137e1 2779 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2780 if (IS_USER(s))
2781 return 1;
4373f3ce 2782 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2783 break;
40f137e1
PB
2784 case ARM_VFP_FPINST:
2785 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2786 /* Not present in VFP3. */
2787 if (IS_USER(s)
2788 || arm_feature(env, ARM_FEATURE_VFP3))
2789 return 1;
4373f3ce 2790 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2791 break;
40f137e1 2792 case ARM_VFP_FPSCR:
601d70b9 2793 if (rd == 15) {
4373f3ce
PB
2794 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2795 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2796 } else {
2797 tmp = new_tmp();
2798 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2799 }
b7bcbe95 2800 break;
9ee6e8bb
PB
2801 case ARM_VFP_MVFR0:
2802 case ARM_VFP_MVFR1:
2803 if (IS_USER(s)
2804 || !arm_feature(env, ARM_FEATURE_VFP3))
2805 return 1;
4373f3ce 2806 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2807 break;
b7bcbe95
FB
2808 default:
2809 return 1;
2810 }
2811 } else {
2812 gen_mov_F0_vreg(0, rn);
4373f3ce 2813 tmp = gen_vfp_mrs();
b7bcbe95
FB
2814 }
2815 if (rd == 15) {
b5ff1b31 2816 /* Set the 4 flag bits in the CPSR. */
4373f3ce
PB
2817 gen_set_nzcv(tmp);
2818 dead_tmp(tmp);
2819 } else {
2820 store_reg(s, rd, tmp);
2821 }
b7bcbe95
FB
2822 } else {
2823 /* arm->vfp */
4373f3ce 2824 tmp = load_reg(s, rd);
b7bcbe95 2825 if (insn & (1 << 21)) {
40f137e1 2826 rn >>= 1;
b7bcbe95
FB
2827 /* system register */
2828 switch (rn) {
40f137e1 2829 case ARM_VFP_FPSID:
9ee6e8bb
PB
2830 case ARM_VFP_MVFR0:
2831 case ARM_VFP_MVFR1:
b7bcbe95
FB
2832 /* Writes are ignored. */
2833 break;
40f137e1 2834 case ARM_VFP_FPSCR:
4373f3ce
PB
2835 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2836 dead_tmp(tmp);
b5ff1b31 2837 gen_lookup_tb(s);
b7bcbe95 2838 break;
40f137e1 2839 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2840 if (IS_USER(s))
2841 return 1;
71b3c3de
JR
2842 /* TODO: VFP subarchitecture support.
2843 * For now, keep the EN bit only */
2844 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 2845 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2846 gen_lookup_tb(s);
2847 break;
2848 case ARM_VFP_FPINST:
2849 case ARM_VFP_FPINST2:
4373f3ce 2850 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2851 break;
b7bcbe95
FB
2852 default:
2853 return 1;
2854 }
2855 } else {
4373f3ce 2856 gen_vfp_msr(tmp);
b7bcbe95
FB
2857 gen_mov_vreg_F0(0, rn);
2858 }
2859 }
2860 }
2861 } else {
2862 /* data processing */
2863 /* The opcode is in bits 23, 21, 20 and 6. */
2864 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2865 if (dp) {
2866 if (op == 15) {
2867 /* rn is opcode */
2868 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2869 } else {
2870 /* rn is register number */
9ee6e8bb 2871 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2872 }
2873
04595bf6 2874 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
b7bcbe95 2875 /* Integer or single precision destination. */
9ee6e8bb 2876 rd = VFP_SREG_D(insn);
b7bcbe95 2877 } else {
9ee6e8bb 2878 VFP_DREG_D(rd, insn);
b7bcbe95 2879 }
04595bf6
PM
2880 if (op == 15 &&
2881 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2882 /* VCVT from int is always from S reg regardless of dp bit.
2883 * VCVT with immediate frac_bits has same format as SREG_M
2884 */
2885 rm = VFP_SREG_M(insn);
b7bcbe95 2886 } else {
9ee6e8bb 2887 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2888 }
2889 } else {
9ee6e8bb 2890 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2891 if (op == 15 && rn == 15) {
2892 /* Double precision destination. */
9ee6e8bb
PB
2893 VFP_DREG_D(rd, insn);
2894 } else {
2895 rd = VFP_SREG_D(insn);
2896 }
04595bf6
PM
2897 /* NB that we implicitly rely on the encoding for the frac_bits
2898 * in VCVT of fixed to float being the same as that of an SREG_M
2899 */
9ee6e8bb 2900 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2901 }
2902
2903 veclen = env->vfp.vec_len;
2904 if (op == 15 && rn > 3)
2905 veclen = 0;
2906
2907 /* Shut up compiler warnings. */
2908 delta_m = 0;
2909 delta_d = 0;
2910 bank_mask = 0;
3b46e624 2911
b7bcbe95
FB
2912 if (veclen > 0) {
2913 if (dp)
2914 bank_mask = 0xc;
2915 else
2916 bank_mask = 0x18;
2917
2918 /* Figure out what type of vector operation this is. */
2919 if ((rd & bank_mask) == 0) {
2920 /* scalar */
2921 veclen = 0;
2922 } else {
2923 if (dp)
2924 delta_d = (env->vfp.vec_stride >> 1) + 1;
2925 else
2926 delta_d = env->vfp.vec_stride + 1;
2927
2928 if ((rm & bank_mask) == 0) {
2929 /* mixed scalar/vector */
2930 delta_m = 0;
2931 } else {
2932 /* vector */
2933 delta_m = delta_d;
2934 }
2935 }
2936 }
2937
2938 /* Load the initial operands. */
2939 if (op == 15) {
2940 switch (rn) {
2941 case 16:
2942 case 17:
2943 /* Integer source */
2944 gen_mov_F0_vreg(0, rm);
2945 break;
2946 case 8:
2947 case 9:
2948 /* Compare */
2949 gen_mov_F0_vreg(dp, rd);
2950 gen_mov_F1_vreg(dp, rm);
2951 break;
2952 case 10:
2953 case 11:
2954 /* Compare with zero */
2955 gen_mov_F0_vreg(dp, rd);
2956 gen_vfp_F1_ld0(dp);
2957 break;
9ee6e8bb
PB
2958 case 20:
2959 case 21:
2960 case 22:
2961 case 23:
644ad806
PB
2962 case 28:
2963 case 29:
2964 case 30:
2965 case 31:
9ee6e8bb
PB
2966 /* Source and destination the same. */
2967 gen_mov_F0_vreg(dp, rd);
2968 break;
b7bcbe95
FB
2969 default:
2970 /* One source operand. */
2971 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 2972 break;
b7bcbe95
FB
2973 }
2974 } else {
2975 /* Two source operands. */
2976 gen_mov_F0_vreg(dp, rn);
2977 gen_mov_F1_vreg(dp, rm);
2978 }
2979
2980 for (;;) {
2981 /* Perform the calculation. */
2982 switch (op) {
2983 case 0: /* mac: fd + (fn * fm) */
2984 gen_vfp_mul(dp);
2985 gen_mov_F1_vreg(dp, rd);
2986 gen_vfp_add(dp);
2987 break;
2988 case 1: /* nmac: fd - (fn * fm) */
2989 gen_vfp_mul(dp);
2990 gen_vfp_neg(dp);
2991 gen_mov_F1_vreg(dp, rd);
2992 gen_vfp_add(dp);
2993 break;
2994 case 2: /* msc: -fd + (fn * fm) */
2995 gen_vfp_mul(dp);
2996 gen_mov_F1_vreg(dp, rd);
2997 gen_vfp_sub(dp);
2998 break;
2999 case 3: /* nmsc: -fd - (fn * fm) */
3000 gen_vfp_mul(dp);
b7bcbe95 3001 gen_vfp_neg(dp);
c9fb531a
PB
3002 gen_mov_F1_vreg(dp, rd);
3003 gen_vfp_sub(dp);
b7bcbe95
FB
3004 break;
3005 case 4: /* mul: fn * fm */
3006 gen_vfp_mul(dp);
3007 break;
3008 case 5: /* nmul: -(fn * fm) */
3009 gen_vfp_mul(dp);
3010 gen_vfp_neg(dp);
3011 break;
3012 case 6: /* add: fn + fm */
3013 gen_vfp_add(dp);
3014 break;
3015 case 7: /* sub: fn - fm */
3016 gen_vfp_sub(dp);
3017 break;
3018 case 8: /* div: fn / fm */
3019 gen_vfp_div(dp);
3020 break;
9ee6e8bb
PB
3021 case 14: /* fconst */
3022 if (!arm_feature(env, ARM_FEATURE_VFP3))
3023 return 1;
3024
3025 n = (insn << 12) & 0x80000000;
3026 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3027 if (dp) {
3028 if (i & 0x40)
3029 i |= 0x3f80;
3030 else
3031 i |= 0x4000;
3032 n |= i << 16;
4373f3ce 3033 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3034 } else {
3035 if (i & 0x40)
3036 i |= 0x780;
3037 else
3038 i |= 0x800;
3039 n |= i << 19;
5b340b51 3040 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3041 }
9ee6e8bb 3042 break;
b7bcbe95
FB
3043 case 15: /* extension space */
3044 switch (rn) {
3045 case 0: /* cpy */
3046 /* no-op */
3047 break;
3048 case 1: /* abs */
3049 gen_vfp_abs(dp);
3050 break;
3051 case 2: /* neg */
3052 gen_vfp_neg(dp);
3053 break;
3054 case 3: /* sqrt */
3055 gen_vfp_sqrt(dp);
3056 break;
60011498
PB
3057 case 4: /* vcvtb.f32.f16 */
3058 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3059 return 1;
3060 tmp = gen_vfp_mrs();
3061 tcg_gen_ext16u_i32(tmp, tmp);
3062 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3063 dead_tmp(tmp);
3064 break;
3065 case 5: /* vcvtt.f32.f16 */
3066 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3067 return 1;
3068 tmp = gen_vfp_mrs();
3069 tcg_gen_shri_i32(tmp, tmp, 16);
3070 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3071 dead_tmp(tmp);
3072 break;
3073 case 6: /* vcvtb.f16.f32 */
3074 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3075 return 1;
3076 tmp = new_tmp();
3077 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3078 gen_mov_F0_vreg(0, rd);
3079 tmp2 = gen_vfp_mrs();
3080 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3081 tcg_gen_or_i32(tmp, tmp, tmp2);
3082 dead_tmp(tmp2);
3083 gen_vfp_msr(tmp);
3084 break;
3085 case 7: /* vcvtt.f16.f32 */
3086 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3087 return 1;
3088 tmp = new_tmp();
3089 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3090 tcg_gen_shli_i32(tmp, tmp, 16);
3091 gen_mov_F0_vreg(0, rd);
3092 tmp2 = gen_vfp_mrs();
3093 tcg_gen_ext16u_i32(tmp2, tmp2);
3094 tcg_gen_or_i32(tmp, tmp, tmp2);
3095 dead_tmp(tmp2);
3096 gen_vfp_msr(tmp);
3097 break;
b7bcbe95
FB
3098 case 8: /* cmp */
3099 gen_vfp_cmp(dp);
3100 break;
3101 case 9: /* cmpe */
3102 gen_vfp_cmpe(dp);
3103 break;
3104 case 10: /* cmpz */
3105 gen_vfp_cmp(dp);
3106 break;
3107 case 11: /* cmpez */
3108 gen_vfp_F1_ld0(dp);
3109 gen_vfp_cmpe(dp);
3110 break;
3111 case 15: /* single<->double conversion */
3112 if (dp)
4373f3ce 3113 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3114 else
4373f3ce 3115 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3116 break;
3117 case 16: /* fuito */
3118 gen_vfp_uito(dp);
3119 break;
3120 case 17: /* fsito */
3121 gen_vfp_sito(dp);
3122 break;
9ee6e8bb
PB
3123 case 20: /* fshto */
3124 if (!arm_feature(env, ARM_FEATURE_VFP3))
3125 return 1;
644ad806 3126 gen_vfp_shto(dp, 16 - rm);
9ee6e8bb
PB
3127 break;
3128 case 21: /* fslto */
3129 if (!arm_feature(env, ARM_FEATURE_VFP3))
3130 return 1;
644ad806 3131 gen_vfp_slto(dp, 32 - rm);
9ee6e8bb
PB
3132 break;
3133 case 22: /* fuhto */
3134 if (!arm_feature(env, ARM_FEATURE_VFP3))
3135 return 1;
644ad806 3136 gen_vfp_uhto(dp, 16 - rm);
9ee6e8bb
PB
3137 break;
3138 case 23: /* fulto */
3139 if (!arm_feature(env, ARM_FEATURE_VFP3))
3140 return 1;
644ad806 3141 gen_vfp_ulto(dp, 32 - rm);
9ee6e8bb 3142 break;
b7bcbe95
FB
3143 case 24: /* ftoui */
3144 gen_vfp_toui(dp);
3145 break;
3146 case 25: /* ftouiz */
3147 gen_vfp_touiz(dp);
3148 break;
3149 case 26: /* ftosi */
3150 gen_vfp_tosi(dp);
3151 break;
3152 case 27: /* ftosiz */
3153 gen_vfp_tosiz(dp);
3154 break;
9ee6e8bb
PB
3155 case 28: /* ftosh */
3156 if (!arm_feature(env, ARM_FEATURE_VFP3))
3157 return 1;
644ad806 3158 gen_vfp_tosh(dp, 16 - rm);
9ee6e8bb
PB
3159 break;
3160 case 29: /* ftosl */
3161 if (!arm_feature(env, ARM_FEATURE_VFP3))
3162 return 1;
644ad806 3163 gen_vfp_tosl(dp, 32 - rm);
9ee6e8bb
PB
3164 break;
3165 case 30: /* ftouh */
3166 if (!arm_feature(env, ARM_FEATURE_VFP3))
3167 return 1;
644ad806 3168 gen_vfp_touh(dp, 16 - rm);
9ee6e8bb
PB
3169 break;
3170 case 31: /* ftoul */
3171 if (!arm_feature(env, ARM_FEATURE_VFP3))
3172 return 1;
644ad806 3173 gen_vfp_toul(dp, 32 - rm);
9ee6e8bb 3174 break;
b7bcbe95
FB
3175 default: /* undefined */
3176 printf ("rn:%d\n", rn);
3177 return 1;
3178 }
3179 break;
3180 default: /* undefined */
3181 printf ("op:%d\n", op);
3182 return 1;
3183 }
3184
3185 /* Write back the result. */
3186 if (op == 15 && (rn >= 8 && rn <= 11))
3187 ; /* Comparison, do nothing. */
04595bf6
PM
3188 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3189 /* VCVT double to int: always integer result. */
b7bcbe95
FB
3190 gen_mov_vreg_F0(0, rd);
3191 else if (op == 15 && rn == 15)
3192 /* conversion */
3193 gen_mov_vreg_F0(!dp, rd);
3194 else
3195 gen_mov_vreg_F0(dp, rd);
3196
3197 /* break out of the loop if we have finished */
3198 if (veclen == 0)
3199 break;
3200
3201 if (op == 15 && delta_m == 0) {
3202 /* single source one-many */
3203 while (veclen--) {
3204 rd = ((rd + delta_d) & (bank_mask - 1))
3205 | (rd & bank_mask);
3206 gen_mov_vreg_F0(dp, rd);
3207 }
3208 break;
3209 }
3210 /* Setup the next operands. */
3211 veclen--;
3212 rd = ((rd + delta_d) & (bank_mask - 1))
3213 | (rd & bank_mask);
3214
3215 if (op == 15) {
3216 /* One source operand. */
3217 rm = ((rm + delta_m) & (bank_mask - 1))
3218 | (rm & bank_mask);
3219 gen_mov_F0_vreg(dp, rm);
3220 } else {
3221 /* Two source operands. */
3222 rn = ((rn + delta_d) & (bank_mask - 1))
3223 | (rn & bank_mask);
3224 gen_mov_F0_vreg(dp, rn);
3225 if (delta_m) {
3226 rm = ((rm + delta_m) & (bank_mask - 1))
3227 | (rm & bank_mask);
3228 gen_mov_F1_vreg(dp, rm);
3229 }
3230 }
3231 }
3232 }
3233 break;
3234 case 0xc:
3235 case 0xd:
9ee6e8bb 3236 if (dp && (insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3237 /* two-register transfer */
3238 rn = (insn >> 16) & 0xf;
3239 rd = (insn >> 12) & 0xf;
3240 if (dp) {
9ee6e8bb
PB
3241 VFP_DREG_M(rm, insn);
3242 } else {
3243 rm = VFP_SREG_M(insn);
3244 }
b7bcbe95 3245
18c9b560 3246 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3247 /* vfp->arm */
3248 if (dp) {
4373f3ce
PB
3249 gen_mov_F0_vreg(0, rm * 2);
3250 tmp = gen_vfp_mrs();
3251 store_reg(s, rd, tmp);
3252 gen_mov_F0_vreg(0, rm * 2 + 1);
3253 tmp = gen_vfp_mrs();
3254 store_reg(s, rn, tmp);
b7bcbe95
FB
3255 } else {
3256 gen_mov_F0_vreg(0, rm);
4373f3ce
PB
3257 tmp = gen_vfp_mrs();
3258 store_reg(s, rn, tmp);
b7bcbe95 3259 gen_mov_F0_vreg(0, rm + 1);
4373f3ce
PB
3260 tmp = gen_vfp_mrs();
3261 store_reg(s, rd, tmp);
b7bcbe95
FB
3262 }
3263 } else {
3264 /* arm->vfp */
3265 if (dp) {
4373f3ce
PB
3266 tmp = load_reg(s, rd);
3267 gen_vfp_msr(tmp);
3268 gen_mov_vreg_F0(0, rm * 2);
3269 tmp = load_reg(s, rn);
3270 gen_vfp_msr(tmp);
3271 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3272 } else {
4373f3ce
PB
3273 tmp = load_reg(s, rn);
3274 gen_vfp_msr(tmp);
b7bcbe95 3275 gen_mov_vreg_F0(0, rm);
4373f3ce
PB
3276 tmp = load_reg(s, rd);
3277 gen_vfp_msr(tmp);
b7bcbe95
FB
3278 gen_mov_vreg_F0(0, rm + 1);
3279 }
3280 }
3281 } else {
3282 /* Load/store */
3283 rn = (insn >> 16) & 0xf;
3284 if (dp)
9ee6e8bb 3285 VFP_DREG_D(rd, insn);
b7bcbe95 3286 else
9ee6e8bb
PB
3287 rd = VFP_SREG_D(insn);
3288 if (s->thumb && rn == 15) {
312eea9f
FN
3289 addr = new_tmp();
3290 tcg_gen_movi_i32(addr, s->pc & ~2);
9ee6e8bb 3291 } else {
312eea9f 3292 addr = load_reg(s, rn);
9ee6e8bb 3293 }
b7bcbe95
FB
3294 if ((insn & 0x01200000) == 0x01000000) {
3295 /* Single load/store */
3296 offset = (insn & 0xff) << 2;
3297 if ((insn & (1 << 23)) == 0)
3298 offset = -offset;
312eea9f 3299 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3300 if (insn & (1 << 20)) {
312eea9f 3301 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3302 gen_mov_vreg_F0(dp, rd);
3303 } else {
3304 gen_mov_F0_vreg(dp, rd);
312eea9f 3305 gen_vfp_st(s, dp, addr);
b7bcbe95 3306 }
312eea9f 3307 dead_tmp(addr);
b7bcbe95
FB
3308 } else {
3309 /* load/store multiple */
3310 if (dp)
3311 n = (insn >> 1) & 0x7f;
3312 else
3313 n = insn & 0xff;
3314
3315 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3316 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3317
3318 if (dp)
3319 offset = 8;
3320 else
3321 offset = 4;
3322 for (i = 0; i < n; i++) {
18c9b560 3323 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3324 /* load */
312eea9f 3325 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3326 gen_mov_vreg_F0(dp, rd + i);
3327 } else {
3328 /* store */
3329 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3330 gen_vfp_st(s, dp, addr);
b7bcbe95 3331 }
312eea9f 3332 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95
FB
3333 }
3334 if (insn & (1 << 21)) {
3335 /* writeback */
3336 if (insn & (1 << 24))
3337 offset = -offset * n;
3338 else if (dp && (insn & 1))
3339 offset = 4;
3340 else
3341 offset = 0;
3342
3343 if (offset != 0)
312eea9f
FN
3344 tcg_gen_addi_i32(addr, addr, offset);
3345 store_reg(s, rn, addr);
3346 } else {
3347 dead_tmp(addr);
b7bcbe95
FB
3348 }
3349 }
3350 }
3351 break;
3352 default:
3353 /* Should never happen. */
3354 return 1;
3355 }
3356 return 0;
3357}
3358
6e256c93 3359static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3360{
6e256c93
FB
3361 TranslationBlock *tb;
3362
3363 tb = s->tb;
3364 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3365 tcg_gen_goto_tb(n);
8984bd2e 3366 gen_set_pc_im(dest);
57fec1fe 3367 tcg_gen_exit_tb((long)tb + n);
6e256c93 3368 } else {
8984bd2e 3369 gen_set_pc_im(dest);
57fec1fe 3370 tcg_gen_exit_tb(0);
6e256c93 3371 }
c53be334
FB
3372}
3373
8aaca4c0
FB
3374static inline void gen_jmp (DisasContext *s, uint32_t dest)
3375{
551bd27f 3376 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3377 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3378 if (s->thumb)
d9ba4830
PB
3379 dest |= 1;
3380 gen_bx_im(s, dest);
8aaca4c0 3381 } else {
6e256c93 3382 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3383 s->is_jmp = DISAS_TB_JUMP;
3384 }
3385}
3386
d9ba4830 3387static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3388{
ee097184 3389 if (x)
d9ba4830 3390 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3391 else
d9ba4830 3392 gen_sxth(t0);
ee097184 3393 if (y)
d9ba4830 3394 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3395 else
d9ba4830
PB
3396 gen_sxth(t1);
3397 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3398}
3399
3400/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 3401static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3402 uint32_t mask;
3403
3404 mask = 0;
3405 if (flags & (1 << 0))
3406 mask |= 0xff;
3407 if (flags & (1 << 1))
3408 mask |= 0xff00;
3409 if (flags & (1 << 2))
3410 mask |= 0xff0000;
3411 if (flags & (1 << 3))
3412 mask |= 0xff000000;
9ee6e8bb 3413
2ae23e75 3414 /* Mask out undefined bits. */
9ee6e8bb
PB
3415 mask &= ~CPSR_RESERVED;
3416 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3417 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3418 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3419 mask &= ~CPSR_IT;
9ee6e8bb 3420 /* Mask out execution state bits. */
2ae23e75 3421 if (!spsr)
e160c51c 3422 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3423 /* Mask out privileged bits. */
3424 if (IS_USER(s))
9ee6e8bb 3425 mask &= CPSR_USER;
b5ff1b31
FB
3426 return mask;
3427}
3428
2fbac54b
FN
3429/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3430static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
b5ff1b31 3431{
d9ba4830 3432 TCGv tmp;
b5ff1b31
FB
3433 if (spsr) {
3434 /* ??? This is also undefined in system mode. */
3435 if (IS_USER(s))
3436 return 1;
d9ba4830
PB
3437
3438 tmp = load_cpu_field(spsr);
3439 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3440 tcg_gen_andi_i32(t0, t0, mask);
3441 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3442 store_cpu_field(tmp, spsr);
b5ff1b31 3443 } else {
2fbac54b 3444 gen_set_cpsr(t0, mask);
b5ff1b31 3445 }
2fbac54b 3446 dead_tmp(t0);
b5ff1b31
FB
3447 gen_lookup_tb(s);
3448 return 0;
3449}
3450
2fbac54b
FN
3451/* Returns nonzero if access to the PSR is not permitted. */
3452static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3453{
3454 TCGv tmp;
3455 tmp = new_tmp();
3456 tcg_gen_movi_i32(tmp, val);
3457 return gen_set_psr(s, mask, spsr, tmp);
3458}
3459
e9bb4aa9
JR
3460/* Generate an old-style exception return. Marks pc as dead. */
3461static void gen_exception_return(DisasContext *s, TCGv pc)
b5ff1b31 3462{
d9ba4830 3463 TCGv tmp;
e9bb4aa9 3464 store_reg(s, 15, pc);
d9ba4830
PB
3465 tmp = load_cpu_field(spsr);
3466 gen_set_cpsr(tmp, 0xffffffff);
3467 dead_tmp(tmp);
b5ff1b31
FB
3468 s->is_jmp = DISAS_UPDATE;
3469}
3470
b0109805
PB
3471/* Generate a v6 exception return. Marks both values as dead. */
3472static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3473{
b0109805
PB
3474 gen_set_cpsr(cpsr, 0xffffffff);
3475 dead_tmp(cpsr);
3476 store_reg(s, 15, pc);
9ee6e8bb
PB
3477 s->is_jmp = DISAS_UPDATE;
3478}
3b46e624 3479
9ee6e8bb
PB
3480static inline void
3481gen_set_condexec (DisasContext *s)
3482{
3483 if (s->condexec_mask) {
8f01245e
PB
3484 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3485 TCGv tmp = new_tmp();
3486 tcg_gen_movi_i32(tmp, val);
d9ba4830 3487 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3488 }
3489}
3b46e624 3490
9ee6e8bb
PB
3491static void gen_nop_hint(DisasContext *s, int val)
3492{
3493 switch (val) {
3494 case 3: /* wfi */
8984bd2e 3495 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3496 s->is_jmp = DISAS_WFI;
3497 break;
3498 case 2: /* wfe */
3499 case 4: /* sev */
3500 /* TODO: Implement SEV and WFE. May help SMP performance. */
3501 default: /* nop */
3502 break;
3503 }
3504}
99c475ab 3505
ad69471c 3506#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3507
dd8fbd78 3508static inline int gen_neon_add(int size, TCGv t0, TCGv t1)
9ee6e8bb
PB
3509{
3510 switch (size) {
dd8fbd78
FN
3511 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3512 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3513 case 2: tcg_gen_add_i32(t0, t0, t1); break;
9ee6e8bb
PB
3514 default: return 1;
3515 }
3516 return 0;
3517}
3518
dd8fbd78 3519static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
ad69471c
PB
3520{
3521 switch (size) {
dd8fbd78
FN
3522 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3523 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3524 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3525 default: return;
3526 }
3527}
3528
3529/* 32-bit pairwise ops end up the same as the elementwise versions. */
3530#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3531#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3532#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3533#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3534
3535/* FIXME: This is wrong. They set the wrong overflow bit. */
3536#define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3537#define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3538#define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3539#define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3540
3541#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3542 switch ((size << 1) | u) { \
3543 case 0: \
dd8fbd78 3544 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3545 break; \
3546 case 1: \
dd8fbd78 3547 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3548 break; \
3549 case 2: \
dd8fbd78 3550 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3551 break; \
3552 case 3: \
dd8fbd78 3553 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3554 break; \
3555 case 4: \
dd8fbd78 3556 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3557 break; \
3558 case 5: \
dd8fbd78 3559 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3560 break; \
3561 default: return 1; \
3562 }} while (0)
9ee6e8bb
PB
3563
3564#define GEN_NEON_INTEGER_OP(name) do { \
3565 switch ((size << 1) | u) { \
ad69471c 3566 case 0: \
dd8fbd78 3567 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3568 break; \
3569 case 1: \
dd8fbd78 3570 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3571 break; \
3572 case 2: \
dd8fbd78 3573 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3574 break; \
3575 case 3: \
dd8fbd78 3576 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3577 break; \
3578 case 4: \
dd8fbd78 3579 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3580 break; \
3581 case 5: \
dd8fbd78 3582 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3583 break; \
9ee6e8bb
PB
3584 default: return 1; \
3585 }} while (0)
3586
dd8fbd78 3587static TCGv neon_load_scratch(int scratch)
9ee6e8bb 3588{
dd8fbd78
FN
3589 TCGv tmp = new_tmp();
3590 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3591 return tmp;
9ee6e8bb
PB
3592}
3593
dd8fbd78 3594static void neon_store_scratch(int scratch, TCGv var)
9ee6e8bb 3595{
dd8fbd78
FN
3596 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3597 dead_tmp(var);
9ee6e8bb
PB
3598}
3599
dd8fbd78 3600static inline TCGv neon_get_scalar(int size, int reg)
9ee6e8bb 3601{
dd8fbd78 3602 TCGv tmp;
9ee6e8bb 3603 if (size == 1) {
dd8fbd78 3604 tmp = neon_load_reg(reg >> 1, reg & 1);
9ee6e8bb 3605 } else {
dd8fbd78
FN
3606 tmp = neon_load_reg(reg >> 2, (reg >> 1) & 1);
3607 if (reg & 1) {
3608 gen_neon_dup_low16(tmp);
3609 } else {
3610 gen_neon_dup_high16(tmp);
3611 }
9ee6e8bb 3612 }
dd8fbd78 3613 return tmp;
9ee6e8bb
PB
3614}
3615
19457615
FN
3616static void gen_neon_unzip_u8(TCGv t0, TCGv t1)
3617{
3618 TCGv rd, rm, tmp;
3619
3620 rd = new_tmp();
3621 rm = new_tmp();
3622 tmp = new_tmp();
3623
3624 tcg_gen_andi_i32(rd, t0, 0xff);
3625 tcg_gen_shri_i32(tmp, t0, 8);
3626 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3627 tcg_gen_or_i32(rd, rd, tmp);
3628 tcg_gen_shli_i32(tmp, t1, 16);
3629 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3630 tcg_gen_or_i32(rd, rd, tmp);
3631 tcg_gen_shli_i32(tmp, t1, 8);
3632 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3633 tcg_gen_or_i32(rd, rd, tmp);
3634
3635 tcg_gen_shri_i32(rm, t0, 8);
3636 tcg_gen_andi_i32(rm, rm, 0xff);
3637 tcg_gen_shri_i32(tmp, t0, 16);
3638 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3639 tcg_gen_or_i32(rm, rm, tmp);
3640 tcg_gen_shli_i32(tmp, t1, 8);
3641 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3642 tcg_gen_or_i32(rm, rm, tmp);
3643 tcg_gen_andi_i32(tmp, t1, 0xff000000);
3644 tcg_gen_or_i32(t1, rm, tmp);
3645 tcg_gen_mov_i32(t0, rd);
3646
3647 dead_tmp(tmp);
3648 dead_tmp(rm);
3649 dead_tmp(rd);
3650}
3651
3652static void gen_neon_zip_u8(TCGv t0, TCGv t1)
3653{
3654 TCGv rd, rm, tmp;
3655
3656 rd = new_tmp();
3657 rm = new_tmp();
3658 tmp = new_tmp();
3659
3660 tcg_gen_andi_i32(rd, t0, 0xff);
3661 tcg_gen_shli_i32(tmp, t1, 8);
3662 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3663 tcg_gen_or_i32(rd, rd, tmp);
3664 tcg_gen_shli_i32(tmp, t0, 16);
3665 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3666 tcg_gen_or_i32(rd, rd, tmp);
3667 tcg_gen_shli_i32(tmp, t1, 24);
3668 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3669 tcg_gen_or_i32(rd, rd, tmp);
3670
3671 tcg_gen_andi_i32(rm, t1, 0xff000000);
3672 tcg_gen_shri_i32(tmp, t0, 8);
3673 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3674 tcg_gen_or_i32(rm, rm, tmp);
3675 tcg_gen_shri_i32(tmp, t1, 8);
3676 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3677 tcg_gen_or_i32(rm, rm, tmp);
3678 tcg_gen_shri_i32(tmp, t0, 16);
3679 tcg_gen_andi_i32(tmp, tmp, 0xff);
3680 tcg_gen_or_i32(t1, rm, tmp);
3681 tcg_gen_mov_i32(t0, rd);
3682
3683 dead_tmp(tmp);
3684 dead_tmp(rm);
3685 dead_tmp(rd);
3686}
3687
3688static void gen_neon_zip_u16(TCGv t0, TCGv t1)
3689{
3690 TCGv tmp, tmp2;
3691
3692 tmp = new_tmp();
3693 tmp2 = new_tmp();
3694
3695 tcg_gen_andi_i32(tmp, t0, 0xffff);
3696 tcg_gen_shli_i32(tmp2, t1, 16);
3697 tcg_gen_or_i32(tmp, tmp, tmp2);
3698 tcg_gen_andi_i32(t1, t1, 0xffff0000);
3699 tcg_gen_shri_i32(tmp2, t0, 16);
3700 tcg_gen_or_i32(t1, t1, tmp2);
3701 tcg_gen_mov_i32(t0, tmp);
3702
3703 dead_tmp(tmp2);
3704 dead_tmp(tmp);
3705}
3706
9ee6e8bb
PB
3707static void gen_neon_unzip(int reg, int q, int tmp, int size)
3708{
3709 int n;
dd8fbd78 3710 TCGv t0, t1;
9ee6e8bb
PB
3711
3712 for (n = 0; n < q + 1; n += 2) {
dd8fbd78
FN
3713 t0 = neon_load_reg(reg, n);
3714 t1 = neon_load_reg(reg, n + 1);
9ee6e8bb 3715 switch (size) {
dd8fbd78
FN
3716 case 0: gen_neon_unzip_u8(t0, t1); break;
3717 case 1: gen_neon_zip_u16(t0, t1); break; /* zip and unzip are the same. */
9ee6e8bb
PB
3718 case 2: /* no-op */; break;
3719 default: abort();
3720 }
dd8fbd78
FN
3721 neon_store_scratch(tmp + n, t0);
3722 neon_store_scratch(tmp + n + 1, t1);
9ee6e8bb
PB
3723 }
3724}
3725
19457615
FN
3726static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3727{
3728 TCGv rd, tmp;
3729
3730 rd = new_tmp();
3731 tmp = new_tmp();
3732
3733 tcg_gen_shli_i32(rd, t0, 8);
3734 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3735 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3736 tcg_gen_or_i32(rd, rd, tmp);
3737
3738 tcg_gen_shri_i32(t1, t1, 8);
3739 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3740 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3741 tcg_gen_or_i32(t1, t1, tmp);
3742 tcg_gen_mov_i32(t0, rd);
3743
3744 dead_tmp(tmp);
3745 dead_tmp(rd);
3746}
3747
3748static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3749{
3750 TCGv rd, tmp;
3751
3752 rd = new_tmp();
3753 tmp = new_tmp();
3754
3755 tcg_gen_shli_i32(rd, t0, 16);
3756 tcg_gen_andi_i32(tmp, t1, 0xffff);
3757 tcg_gen_or_i32(rd, rd, tmp);
3758 tcg_gen_shri_i32(t1, t1, 16);
3759 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3760 tcg_gen_or_i32(t1, t1, tmp);
3761 tcg_gen_mov_i32(t0, rd);
3762
3763 dead_tmp(tmp);
3764 dead_tmp(rd);
3765}
3766
3767
9ee6e8bb
PB
3768static struct {
3769 int nregs;
3770 int interleave;
3771 int spacing;
3772} neon_ls_element_type[11] = {
3773 {4, 4, 1},
3774 {4, 4, 2},
3775 {4, 1, 1},
3776 {4, 2, 1},
3777 {3, 3, 1},
3778 {3, 3, 2},
3779 {3, 1, 1},
3780 {1, 1, 1},
3781 {2, 2, 1},
3782 {2, 2, 2},
3783 {2, 1, 1}
3784};
3785
3786/* Translate a NEON load/store element instruction. Return nonzero if the
3787 instruction is invalid. */
3788static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3789{
3790 int rd, rn, rm;
3791 int op;
3792 int nregs;
3793 int interleave;
84496233 3794 int spacing;
9ee6e8bb
PB
3795 int stride;
3796 int size;
3797 int reg;
3798 int pass;
3799 int load;
3800 int shift;
9ee6e8bb 3801 int n;
1b2b1e54 3802 TCGv addr;
b0109805 3803 TCGv tmp;
8f8e3aa4 3804 TCGv tmp2;
84496233 3805 TCGv_i64 tmp64;
9ee6e8bb
PB
3806
3807 if (!vfp_enabled(env))
3808 return 1;
3809 VFP_DREG_D(rd, insn);
3810 rn = (insn >> 16) & 0xf;
3811 rm = insn & 0xf;
3812 load = (insn & (1 << 21)) != 0;
1b2b1e54 3813 addr = new_tmp();
9ee6e8bb
PB
3814 if ((insn & (1 << 23)) == 0) {
3815 /* Load store all elements. */
3816 op = (insn >> 8) & 0xf;
3817 size = (insn >> 6) & 3;
84496233 3818 if (op > 10)
9ee6e8bb
PB
3819 return 1;
3820 nregs = neon_ls_element_type[op].nregs;
3821 interleave = neon_ls_element_type[op].interleave;
84496233
JR
3822 spacing = neon_ls_element_type[op].spacing;
3823 if (size == 3 && (interleave | spacing) != 1)
3824 return 1;
dcc65026 3825 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3826 stride = (1 << size) * interleave;
3827 for (reg = 0; reg < nregs; reg++) {
3828 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
3829 load_reg_var(s, addr, rn);
3830 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 3831 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
3832 load_reg_var(s, addr, rn);
3833 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 3834 }
84496233
JR
3835 if (size == 3) {
3836 if (load) {
3837 tmp64 = gen_ld64(addr, IS_USER(s));
3838 neon_store_reg64(tmp64, rd);
3839 tcg_temp_free_i64(tmp64);
3840 } else {
3841 tmp64 = tcg_temp_new_i64();
3842 neon_load_reg64(tmp64, rd);
3843 gen_st64(tmp64, addr, IS_USER(s));
3844 }
3845 tcg_gen_addi_i32(addr, addr, stride);
3846 } else {
3847 for (pass = 0; pass < 2; pass++) {
3848 if (size == 2) {
3849 if (load) {
3850 tmp = gen_ld32(addr, IS_USER(s));
3851 neon_store_reg(rd, pass, tmp);
3852 } else {
3853 tmp = neon_load_reg(rd, pass);
3854 gen_st32(tmp, addr, IS_USER(s));
3855 }
1b2b1e54 3856 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
3857 } else if (size == 1) {
3858 if (load) {
3859 tmp = gen_ld16u(addr, IS_USER(s));
3860 tcg_gen_addi_i32(addr, addr, stride);
3861 tmp2 = gen_ld16u(addr, IS_USER(s));
3862 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
3863 tcg_gen_shli_i32(tmp2, tmp2, 16);
3864 tcg_gen_or_i32(tmp, tmp, tmp2);
84496233
JR
3865 dead_tmp(tmp2);
3866 neon_store_reg(rd, pass, tmp);
3867 } else {
3868 tmp = neon_load_reg(rd, pass);
3869 tmp2 = new_tmp();
3870 tcg_gen_shri_i32(tmp2, tmp, 16);
3871 gen_st16(tmp, addr, IS_USER(s));
3872 tcg_gen_addi_i32(addr, addr, stride);
3873 gen_st16(tmp2, addr, IS_USER(s));
1b2b1e54 3874 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3875 }
84496233
JR
3876 } else /* size == 0 */ {
3877 if (load) {
3878 TCGV_UNUSED(tmp2);
3879 for (n = 0; n < 4; n++) {
3880 tmp = gen_ld8u(addr, IS_USER(s));
3881 tcg_gen_addi_i32(addr, addr, stride);
3882 if (n == 0) {
3883 tmp2 = tmp;
3884 } else {
41ba8341
PB
3885 tcg_gen_shli_i32(tmp, tmp, n * 8);
3886 tcg_gen_or_i32(tmp2, tmp2, tmp);
84496233
JR
3887 dead_tmp(tmp);
3888 }
9ee6e8bb 3889 }
84496233
JR
3890 neon_store_reg(rd, pass, tmp2);
3891 } else {
3892 tmp2 = neon_load_reg(rd, pass);
3893 for (n = 0; n < 4; n++) {
3894 tmp = new_tmp();
3895 if (n == 0) {
3896 tcg_gen_mov_i32(tmp, tmp2);
3897 } else {
3898 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3899 }
3900 gen_st8(tmp, addr, IS_USER(s));
3901 tcg_gen_addi_i32(addr, addr, stride);
3902 }
3903 dead_tmp(tmp2);
9ee6e8bb
PB
3904 }
3905 }
3906 }
3907 }
84496233 3908 rd += spacing;
9ee6e8bb
PB
3909 }
3910 stride = nregs * 8;
3911 } else {
3912 size = (insn >> 10) & 3;
3913 if (size == 3) {
3914 /* Load single element to all lanes. */
3915 if (!load)
3916 return 1;
3917 size = (insn >> 6) & 3;
3918 nregs = ((insn >> 8) & 3) + 1;
3919 stride = (insn & (1 << 5)) ? 2 : 1;
dcc65026 3920 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3921 for (reg = 0; reg < nregs; reg++) {
3922 switch (size) {
3923 case 0:
1b2b1e54 3924 tmp = gen_ld8u(addr, IS_USER(s));
ad69471c 3925 gen_neon_dup_u8(tmp, 0);
9ee6e8bb
PB
3926 break;
3927 case 1:
1b2b1e54 3928 tmp = gen_ld16u(addr, IS_USER(s));
ad69471c 3929 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
3930 break;
3931 case 2:
1b2b1e54 3932 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
3933 break;
3934 case 3:
3935 return 1;
a50f5b91
PB
3936 default: /* Avoid compiler warnings. */
3937 abort();
99c475ab 3938 }
1b2b1e54 3939 tcg_gen_addi_i32(addr, addr, 1 << size);
ad69471c
PB
3940 tmp2 = new_tmp();
3941 tcg_gen_mov_i32(tmp2, tmp);
3942 neon_store_reg(rd, 0, tmp2);
3018f259 3943 neon_store_reg(rd, 1, tmp);
9ee6e8bb
PB
3944 rd += stride;
3945 }
3946 stride = (1 << size) * nregs;
3947 } else {
3948 /* Single element. */
3949 pass = (insn >> 7) & 1;
3950 switch (size) {
3951 case 0:
3952 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
3953 stride = 1;
3954 break;
3955 case 1:
3956 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
3957 stride = (insn & (1 << 5)) ? 2 : 1;
3958 break;
3959 case 2:
3960 shift = 0;
9ee6e8bb
PB
3961 stride = (insn & (1 << 6)) ? 2 : 1;
3962 break;
3963 default:
3964 abort();
3965 }
3966 nregs = ((insn >> 8) & 3) + 1;
dcc65026 3967 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3968 for (reg = 0; reg < nregs; reg++) {
3969 if (load) {
9ee6e8bb
PB
3970 switch (size) {
3971 case 0:
1b2b1e54 3972 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb
PB
3973 break;
3974 case 1:
1b2b1e54 3975 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
3976 break;
3977 case 2:
1b2b1e54 3978 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 3979 break;
a50f5b91
PB
3980 default: /* Avoid compiler warnings. */
3981 abort();
9ee6e8bb
PB
3982 }
3983 if (size != 2) {
8f8e3aa4
PB
3984 tmp2 = neon_load_reg(rd, pass);
3985 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3986 dead_tmp(tmp2);
9ee6e8bb 3987 }
8f8e3aa4 3988 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3989 } else { /* Store */
8f8e3aa4
PB
3990 tmp = neon_load_reg(rd, pass);
3991 if (shift)
3992 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
3993 switch (size) {
3994 case 0:
1b2b1e54 3995 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
3996 break;
3997 case 1:
1b2b1e54 3998 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
3999 break;
4000 case 2:
1b2b1e54 4001 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 4002 break;
99c475ab 4003 }
99c475ab 4004 }
9ee6e8bb 4005 rd += stride;
1b2b1e54 4006 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4007 }
9ee6e8bb 4008 stride = nregs * (1 << size);
99c475ab 4009 }
9ee6e8bb 4010 }
1b2b1e54 4011 dead_tmp(addr);
9ee6e8bb 4012 if (rm != 15) {
b26eefb6
PB
4013 TCGv base;
4014
4015 base = load_reg(s, rn);
9ee6e8bb 4016 if (rm == 13) {
b26eefb6 4017 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4018 } else {
b26eefb6
PB
4019 TCGv index;
4020 index = load_reg(s, rm);
4021 tcg_gen_add_i32(base, base, index);
4022 dead_tmp(index);
9ee6e8bb 4023 }
b26eefb6 4024 store_reg(s, rn, base);
9ee6e8bb
PB
4025 }
4026 return 0;
4027}
3b46e624 4028
8f8e3aa4
PB
4029/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4030static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4031{
4032 tcg_gen_and_i32(t, t, c);
f669df27 4033 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4034 tcg_gen_or_i32(dest, t, f);
4035}
4036
a7812ae4 4037static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4038{
4039 switch (size) {
4040 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4041 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4042 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4043 default: abort();
4044 }
4045}
4046
a7812ae4 4047static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4048{
4049 switch (size) {
4050 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4051 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4052 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
4053 default: abort();
4054 }
4055}
4056
a7812ae4 4057static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4058{
4059 switch (size) {
4060 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4061 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4062 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4063 default: abort();
4064 }
4065}
4066
4067static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4068 int q, int u)
4069{
4070 if (q) {
4071 if (u) {
4072 switch (size) {
4073 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4074 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4075 default: abort();
4076 }
4077 } else {
4078 switch (size) {
4079 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4080 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4081 default: abort();
4082 }
4083 }
4084 } else {
4085 if (u) {
4086 switch (size) {
4087 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4088 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4089 default: abort();
4090 }
4091 } else {
4092 switch (size) {
4093 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4094 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4095 default: abort();
4096 }
4097 }
4098 }
4099}
4100
a7812ae4 4101static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
4102{
4103 if (u) {
4104 switch (size) {
4105 case 0: gen_helper_neon_widen_u8(dest, src); break;
4106 case 1: gen_helper_neon_widen_u16(dest, src); break;
4107 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4108 default: abort();
4109 }
4110 } else {
4111 switch (size) {
4112 case 0: gen_helper_neon_widen_s8(dest, src); break;
4113 case 1: gen_helper_neon_widen_s16(dest, src); break;
4114 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4115 default: abort();
4116 }
4117 }
4118 dead_tmp(src);
4119}
4120
4121static inline void gen_neon_addl(int size)
4122{
4123 switch (size) {
4124 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4125 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4126 case 2: tcg_gen_add_i64(CPU_V001); break;
4127 default: abort();
4128 }
4129}
4130
4131static inline void gen_neon_subl(int size)
4132{
4133 switch (size) {
4134 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4135 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4136 case 2: tcg_gen_sub_i64(CPU_V001); break;
4137 default: abort();
4138 }
4139}
4140
a7812ae4 4141static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4142{
4143 switch (size) {
4144 case 0: gen_helper_neon_negl_u16(var, var); break;
4145 case 1: gen_helper_neon_negl_u32(var, var); break;
4146 case 2: gen_helper_neon_negl_u64(var, var); break;
4147 default: abort();
4148 }
4149}
4150
a7812ae4 4151static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4152{
4153 switch (size) {
4154 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4155 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4156 default: abort();
4157 }
4158}
4159
a7812ae4 4160static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4161{
a7812ae4 4162 TCGv_i64 tmp;
ad69471c
PB
4163
4164 switch ((size << 1) | u) {
4165 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4166 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4167 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4168 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4169 case 4:
4170 tmp = gen_muls_i64_i32(a, b);
4171 tcg_gen_mov_i64(dest, tmp);
4172 break;
4173 case 5:
4174 tmp = gen_mulu_i64_i32(a, b);
4175 tcg_gen_mov_i64(dest, tmp);
4176 break;
4177 default: abort();
4178 }
ad69471c
PB
4179}
4180
9ee6e8bb
PB
4181/* Translate a NEON data processing instruction. Return nonzero if the
4182 instruction is invalid.
ad69471c
PB
4183 We process data in a mixture of 32-bit and 64-bit chunks.
4184 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4185
9ee6e8bb
PB
4186static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4187{
4188 int op;
4189 int q;
4190 int rd, rn, rm;
4191 int size;
4192 int shift;
4193 int pass;
4194 int count;
4195 int pairwise;
4196 int u;
4197 int n;
ca9a32e4 4198 uint32_t imm, mask;
b75263d6 4199 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4200 TCGv_i64 tmp64;
9ee6e8bb
PB
4201
4202 if (!vfp_enabled(env))
4203 return 1;
4204 q = (insn & (1 << 6)) != 0;
4205 u = (insn >> 24) & 1;
4206 VFP_DREG_D(rd, insn);
4207 VFP_DREG_N(rn, insn);
4208 VFP_DREG_M(rm, insn);
4209 size = (insn >> 20) & 3;
4210 if ((insn & (1 << 23)) == 0) {
4211 /* Three register same length. */
4212 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
ad69471c
PB
4213 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4214 || op == 10 || op == 11 || op == 16)) {
4215 /* 64-bit element instructions. */
9ee6e8bb 4216 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4217 neon_load_reg64(cpu_V0, rn + pass);
4218 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb
PB
4219 switch (op) {
4220 case 1: /* VQADD */
4221 if (u) {
ad69471c 4222 gen_helper_neon_add_saturate_u64(CPU_V001);
2c0262af 4223 } else {
ad69471c 4224 gen_helper_neon_add_saturate_s64(CPU_V001);
2c0262af 4225 }
9ee6e8bb
PB
4226 break;
4227 case 5: /* VQSUB */
4228 if (u) {
ad69471c
PB
4229 gen_helper_neon_sub_saturate_u64(CPU_V001);
4230 } else {
4231 gen_helper_neon_sub_saturate_s64(CPU_V001);
4232 }
4233 break;
4234 case 8: /* VSHL */
4235 if (u) {
4236 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4237 } else {
4238 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4239 }
4240 break;
4241 case 9: /* VQSHL */
4242 if (u) {
4243 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
def126ce 4244 cpu_V1, cpu_V0);
ad69471c 4245 } else {
def126ce 4246 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
ad69471c
PB
4247 cpu_V1, cpu_V0);
4248 }
4249 break;
4250 case 10: /* VRSHL */
4251 if (u) {
4252 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4253 } else {
ad69471c
PB
4254 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4255 }
4256 break;
4257 case 11: /* VQRSHL */
4258 if (u) {
4259 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4260 cpu_V1, cpu_V0);
4261 } else {
4262 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4263 cpu_V1, cpu_V0);
1e8d4eec 4264 }
9ee6e8bb
PB
4265 break;
4266 case 16:
4267 if (u) {
ad69471c 4268 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4269 } else {
ad69471c 4270 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4271 }
4272 break;
4273 default:
4274 abort();
2c0262af 4275 }
ad69471c 4276 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4277 }
9ee6e8bb 4278 return 0;
2c0262af 4279 }
9ee6e8bb
PB
4280 switch (op) {
4281 case 8: /* VSHL */
4282 case 9: /* VQSHL */
4283 case 10: /* VRSHL */
ad69471c 4284 case 11: /* VQRSHL */
9ee6e8bb 4285 {
ad69471c
PB
4286 int rtmp;
4287 /* Shift instruction operands are reversed. */
4288 rtmp = rn;
9ee6e8bb 4289 rn = rm;
ad69471c 4290 rm = rtmp;
9ee6e8bb
PB
4291 pairwise = 0;
4292 }
2c0262af 4293 break;
9ee6e8bb
PB
4294 case 20: /* VPMAX */
4295 case 21: /* VPMIN */
4296 case 23: /* VPADD */
4297 pairwise = 1;
2c0262af 4298 break;
9ee6e8bb
PB
4299 case 26: /* VPADD (float) */
4300 pairwise = (u && size < 2);
2c0262af 4301 break;
9ee6e8bb
PB
4302 case 30: /* VPMIN/VPMAX (float) */
4303 pairwise = u;
2c0262af 4304 break;
9ee6e8bb
PB
4305 default:
4306 pairwise = 0;
2c0262af 4307 break;
9ee6e8bb 4308 }
dd8fbd78 4309
9ee6e8bb
PB
4310 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4311
4312 if (pairwise) {
4313 /* Pairwise. */
4314 if (q)
4315 n = (pass & 1) * 2;
2c0262af 4316 else
9ee6e8bb
PB
4317 n = 0;
4318 if (pass < q + 1) {
dd8fbd78
FN
4319 tmp = neon_load_reg(rn, n);
4320 tmp2 = neon_load_reg(rn, n + 1);
9ee6e8bb 4321 } else {
dd8fbd78
FN
4322 tmp = neon_load_reg(rm, n);
4323 tmp2 = neon_load_reg(rm, n + 1);
9ee6e8bb
PB
4324 }
4325 } else {
4326 /* Elementwise. */
dd8fbd78
FN
4327 tmp = neon_load_reg(rn, pass);
4328 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4329 }
4330 switch (op) {
4331 case 0: /* VHADD */
4332 GEN_NEON_INTEGER_OP(hadd);
4333 break;
4334 case 1: /* VQADD */
ad69471c 4335 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4336 break;
9ee6e8bb
PB
4337 case 2: /* VRHADD */
4338 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4339 break;
9ee6e8bb
PB
4340 case 3: /* Logic ops. */
4341 switch ((u << 2) | size) {
4342 case 0: /* VAND */
dd8fbd78 4343 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4344 break;
4345 case 1: /* BIC */
f669df27 4346 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4347 break;
4348 case 2: /* VORR */
dd8fbd78 4349 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4350 break;
4351 case 3: /* VORN */
f669df27 4352 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4353 break;
4354 case 4: /* VEOR */
dd8fbd78 4355 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4356 break;
4357 case 5: /* VBSL */
dd8fbd78
FN
4358 tmp3 = neon_load_reg(rd, pass);
4359 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4360 dead_tmp(tmp3);
9ee6e8bb
PB
4361 break;
4362 case 6: /* VBIT */
dd8fbd78
FN
4363 tmp3 = neon_load_reg(rd, pass);
4364 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4365 dead_tmp(tmp3);
9ee6e8bb
PB
4366 break;
4367 case 7: /* VBIF */
dd8fbd78
FN
4368 tmp3 = neon_load_reg(rd, pass);
4369 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4370 dead_tmp(tmp3);
9ee6e8bb 4371 break;
2c0262af
FB
4372 }
4373 break;
9ee6e8bb
PB
4374 case 4: /* VHSUB */
4375 GEN_NEON_INTEGER_OP(hsub);
4376 break;
4377 case 5: /* VQSUB */
ad69471c 4378 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4379 break;
9ee6e8bb
PB
4380 case 6: /* VCGT */
4381 GEN_NEON_INTEGER_OP(cgt);
4382 break;
4383 case 7: /* VCGE */
4384 GEN_NEON_INTEGER_OP(cge);
4385 break;
4386 case 8: /* VSHL */
ad69471c 4387 GEN_NEON_INTEGER_OP(shl);
2c0262af 4388 break;
9ee6e8bb 4389 case 9: /* VQSHL */
ad69471c 4390 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4391 break;
9ee6e8bb 4392 case 10: /* VRSHL */
ad69471c 4393 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4394 break;
9ee6e8bb 4395 case 11: /* VQRSHL */
ad69471c 4396 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb
PB
4397 break;
4398 case 12: /* VMAX */
4399 GEN_NEON_INTEGER_OP(max);
4400 break;
4401 case 13: /* VMIN */
4402 GEN_NEON_INTEGER_OP(min);
4403 break;
4404 case 14: /* VABD */
4405 GEN_NEON_INTEGER_OP(abd);
4406 break;
4407 case 15: /* VABA */
4408 GEN_NEON_INTEGER_OP(abd);
dd8fbd78
FN
4409 dead_tmp(tmp2);
4410 tmp2 = neon_load_reg(rd, pass);
4411 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4412 break;
4413 case 16:
4414 if (!u) { /* VADD */
dd8fbd78 4415 if (gen_neon_add(size, tmp, tmp2))
9ee6e8bb
PB
4416 return 1;
4417 } else { /* VSUB */
4418 switch (size) {
dd8fbd78
FN
4419 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4420 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4421 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4422 default: return 1;
4423 }
4424 }
4425 break;
4426 case 17:
4427 if (!u) { /* VTST */
4428 switch (size) {
dd8fbd78
FN
4429 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4430 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4431 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4432 default: return 1;
4433 }
4434 } else { /* VCEQ */
4435 switch (size) {
dd8fbd78
FN
4436 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4437 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4438 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4439 default: return 1;
4440 }
4441 }
4442 break;
4443 case 18: /* Multiply. */
4444 switch (size) {
dd8fbd78
FN
4445 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4446 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4447 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4448 default: return 1;
4449 }
dd8fbd78
FN
4450 dead_tmp(tmp2);
4451 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4452 if (u) { /* VMLS */
dd8fbd78 4453 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4454 } else { /* VMLA */
dd8fbd78 4455 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4456 }
4457 break;
4458 case 19: /* VMUL */
4459 if (u) { /* polynomial */
dd8fbd78 4460 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4461 } else { /* Integer */
4462 switch (size) {
dd8fbd78
FN
4463 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4464 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4465 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4466 default: return 1;
4467 }
4468 }
4469 break;
4470 case 20: /* VPMAX */
4471 GEN_NEON_INTEGER_OP(pmax);
4472 break;
4473 case 21: /* VPMIN */
4474 GEN_NEON_INTEGER_OP(pmin);
4475 break;
4476 case 22: /* Hultiply high. */
4477 if (!u) { /* VQDMULH */
4478 switch (size) {
dd8fbd78
FN
4479 case 1: gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4480 case 2: gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
9ee6e8bb
PB
4481 default: return 1;
4482 }
4483 } else { /* VQRDHMUL */
4484 switch (size) {
dd8fbd78
FN
4485 case 1: gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4486 case 2: gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
9ee6e8bb
PB
4487 default: return 1;
4488 }
4489 }
4490 break;
4491 case 23: /* VPADD */
4492 if (u)
4493 return 1;
4494 switch (size) {
dd8fbd78
FN
4495 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4496 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4497 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4498 default: return 1;
4499 }
4500 break;
4501 case 26: /* Floating point arithnetic. */
4502 switch ((u << 2) | size) {
4503 case 0: /* VADD */
dd8fbd78 4504 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4505 break;
4506 case 2: /* VSUB */
dd8fbd78 4507 gen_helper_neon_sub_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4508 break;
4509 case 4: /* VPADD */
dd8fbd78 4510 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4511 break;
4512 case 6: /* VABD */
dd8fbd78 4513 gen_helper_neon_abd_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4514 break;
4515 default:
4516 return 1;
4517 }
4518 break;
4519 case 27: /* Float multiply. */
dd8fbd78 4520 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb 4521 if (!u) {
dd8fbd78
FN
4522 dead_tmp(tmp2);
4523 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4524 if (size == 0) {
dd8fbd78 4525 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb 4526 } else {
dd8fbd78 4527 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
4528 }
4529 }
4530 break;
4531 case 28: /* Float compare. */
4532 if (!u) {
dd8fbd78 4533 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
b5ff1b31 4534 } else {
9ee6e8bb 4535 if (size == 0)
dd8fbd78 4536 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
9ee6e8bb 4537 else
dd8fbd78 4538 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
b5ff1b31 4539 }
2c0262af 4540 break;
9ee6e8bb
PB
4541 case 29: /* Float compare absolute. */
4542 if (!u)
4543 return 1;
4544 if (size == 0)
dd8fbd78 4545 gen_helper_neon_acge_f32(tmp, tmp, tmp2);
9ee6e8bb 4546 else
dd8fbd78 4547 gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
2c0262af 4548 break;
9ee6e8bb
PB
4549 case 30: /* Float min/max. */
4550 if (size == 0)
dd8fbd78 4551 gen_helper_neon_max_f32(tmp, tmp, tmp2);
9ee6e8bb 4552 else
dd8fbd78 4553 gen_helper_neon_min_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4554 break;
4555 case 31:
4556 if (size == 0)
dd8fbd78 4557 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 4558 else
dd8fbd78 4559 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 4560 break;
9ee6e8bb
PB
4561 default:
4562 abort();
2c0262af 4563 }
dd8fbd78
FN
4564 dead_tmp(tmp2);
4565
9ee6e8bb
PB
4566 /* Save the result. For elementwise operations we can put it
4567 straight into the destination register. For pairwise operations
4568 we have to be careful to avoid clobbering the source operands. */
4569 if (pairwise && rd == rm) {
dd8fbd78 4570 neon_store_scratch(pass, tmp);
9ee6e8bb 4571 } else {
dd8fbd78 4572 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4573 }
4574
4575 } /* for pass */
4576 if (pairwise && rd == rm) {
4577 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
4578 tmp = neon_load_scratch(pass);
4579 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4580 }
4581 }
ad69471c 4582 /* End of 3 register same size operations. */
9ee6e8bb
PB
4583 } else if (insn & (1 << 4)) {
4584 if ((insn & 0x00380080) != 0) {
4585 /* Two registers and shift. */
4586 op = (insn >> 8) & 0xf;
4587 if (insn & (1 << 7)) {
4588 /* 64-bit shift. */
4589 size = 3;
4590 } else {
4591 size = 2;
4592 while ((insn & (1 << (size + 19))) == 0)
4593 size--;
4594 }
4595 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4596 /* To avoid excessive dumplication of ops we implement shift
4597 by immediate using the variable shift operations. */
4598 if (op < 8) {
4599 /* Shift by immediate:
4600 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4601 /* Right shifts are encoded as N - shift, where N is the
4602 element size in bits. */
4603 if (op <= 4)
4604 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4605 if (size == 3) {
4606 count = q + 1;
4607 } else {
4608 count = q ? 4: 2;
4609 }
4610 switch (size) {
4611 case 0:
4612 imm = (uint8_t) shift;
4613 imm |= imm << 8;
4614 imm |= imm << 16;
4615 break;
4616 case 1:
4617 imm = (uint16_t) shift;
4618 imm |= imm << 16;
4619 break;
4620 case 2:
4621 case 3:
4622 imm = shift;
4623 break;
4624 default:
4625 abort();
4626 }
4627
4628 for (pass = 0; pass < count; pass++) {
ad69471c
PB
4629 if (size == 3) {
4630 neon_load_reg64(cpu_V0, rm + pass);
4631 tcg_gen_movi_i64(cpu_V1, imm);
4632 switch (op) {
4633 case 0: /* VSHR */
4634 case 1: /* VSRA */
4635 if (u)
4636 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4637 else
ad69471c 4638 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4639 break;
ad69471c
PB
4640 case 2: /* VRSHR */
4641 case 3: /* VRSRA */
4642 if (u)
4643 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4644 else
ad69471c 4645 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4646 break;
ad69471c
PB
4647 case 4: /* VSRI */
4648 if (!u)
4649 return 1;
4650 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4651 break;
4652 case 5: /* VSHL, VSLI */
4653 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4654 break;
0322b26e
PM
4655 case 6: /* VQSHLU */
4656 if (u) {
4657 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
4658 cpu_V0, cpu_V1);
4659 } else {
4660 return 1;
4661 }
ad69471c 4662 break;
0322b26e
PM
4663 case 7: /* VQSHL */
4664 if (u) {
4665 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4666 cpu_V0, cpu_V1);
4667 } else {
4668 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4669 cpu_V0, cpu_V1);
4670 }
9ee6e8bb 4671 break;
9ee6e8bb 4672 }
ad69471c
PB
4673 if (op == 1 || op == 3) {
4674 /* Accumulate. */
4675 neon_load_reg64(cpu_V0, rd + pass);
4676 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4677 } else if (op == 4 || (op == 5 && u)) {
4678 /* Insert */
4679 cpu_abort(env, "VS[LR]I.64 not implemented");
4680 }
4681 neon_store_reg64(cpu_V0, rd + pass);
4682 } else { /* size < 3 */
4683 /* Operands in T0 and T1. */
dd8fbd78
FN
4684 tmp = neon_load_reg(rm, pass);
4685 tmp2 = new_tmp();
4686 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
4687 switch (op) {
4688 case 0: /* VSHR */
4689 case 1: /* VSRA */
4690 GEN_NEON_INTEGER_OP(shl);
4691 break;
4692 case 2: /* VRSHR */
4693 case 3: /* VRSRA */
4694 GEN_NEON_INTEGER_OP(rshl);
4695 break;
4696 case 4: /* VSRI */
4697 if (!u)
4698 return 1;
4699 GEN_NEON_INTEGER_OP(shl);
4700 break;
4701 case 5: /* VSHL, VSLI */
4702 switch (size) {
dd8fbd78
FN
4703 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
4704 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
4705 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
ad69471c
PB
4706 default: return 1;
4707 }
4708 break;
0322b26e
PM
4709 case 6: /* VQSHLU */
4710 if (!u) {
4711 return 1;
4712 }
ad69471c 4713 switch (size) {
0322b26e
PM
4714 case 0:
4715 gen_helper_neon_qshlu_s8(tmp, cpu_env,
4716 tmp, tmp2);
4717 break;
4718 case 1:
4719 gen_helper_neon_qshlu_s16(tmp, cpu_env,
4720 tmp, tmp2);
4721 break;
4722 case 2:
4723 gen_helper_neon_qshlu_s32(tmp, cpu_env,
4724 tmp, tmp2);
4725 break;
4726 default:
4727 return 1;
ad69471c
PB
4728 }
4729 break;
0322b26e
PM
4730 case 7: /* VQSHL */
4731 GEN_NEON_INTEGER_OP_ENV(qshl);
4732 break;
ad69471c 4733 }
dd8fbd78 4734 dead_tmp(tmp2);
ad69471c
PB
4735
4736 if (op == 1 || op == 3) {
4737 /* Accumulate. */
dd8fbd78
FN
4738 tmp2 = neon_load_reg(rd, pass);
4739 gen_neon_add(size, tmp2, tmp);
4740 dead_tmp(tmp2);
ad69471c
PB
4741 } else if (op == 4 || (op == 5 && u)) {
4742 /* Insert */
4743 switch (size) {
4744 case 0:
4745 if (op == 4)
ca9a32e4 4746 mask = 0xff >> -shift;
ad69471c 4747 else
ca9a32e4
JR
4748 mask = (uint8_t)(0xff << shift);
4749 mask |= mask << 8;
4750 mask |= mask << 16;
ad69471c
PB
4751 break;
4752 case 1:
4753 if (op == 4)
ca9a32e4 4754 mask = 0xffff >> -shift;
ad69471c 4755 else
ca9a32e4
JR
4756 mask = (uint16_t)(0xffff << shift);
4757 mask |= mask << 16;
ad69471c
PB
4758 break;
4759 case 2:
ca9a32e4
JR
4760 if (shift < -31 || shift > 31) {
4761 mask = 0;
4762 } else {
4763 if (op == 4)
4764 mask = 0xffffffffu >> -shift;
4765 else
4766 mask = 0xffffffffu << shift;
4767 }
ad69471c
PB
4768 break;
4769 default:
4770 abort();
4771 }
dd8fbd78 4772 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
4773 tcg_gen_andi_i32(tmp, tmp, mask);
4774 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78
FN
4775 tcg_gen_or_i32(tmp, tmp, tmp2);
4776 dead_tmp(tmp2);
ad69471c 4777 }
dd8fbd78 4778 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4779 }
4780 } /* for pass */
4781 } else if (op < 10) {
ad69471c 4782 /* Shift by immediate and narrow:
9ee6e8bb
PB
4783 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4784 shift = shift - (1 << (size + 3));
4785 size++;
9ee6e8bb
PB
4786 switch (size) {
4787 case 1:
ad69471c 4788 imm = (uint16_t)shift;
9ee6e8bb 4789 imm |= imm << 16;
ad69471c 4790 tmp2 = tcg_const_i32(imm);
a7812ae4 4791 TCGV_UNUSED_I64(tmp64);
9ee6e8bb
PB
4792 break;
4793 case 2:
ad69471c
PB
4794 imm = (uint32_t)shift;
4795 tmp2 = tcg_const_i32(imm);
a7812ae4 4796 TCGV_UNUSED_I64(tmp64);
4cc633c3 4797 break;
9ee6e8bb 4798 case 3:
a7812ae4
PB
4799 tmp64 = tcg_const_i64(shift);
4800 TCGV_UNUSED(tmp2);
9ee6e8bb
PB
4801 break;
4802 default:
4803 abort();
4804 }
4805
ad69471c
PB
4806 for (pass = 0; pass < 2; pass++) {
4807 if (size == 3) {
4808 neon_load_reg64(cpu_V0, rm + pass);
4809 if (q) {
4810 if (u)
a7812ae4 4811 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4812 else
a7812ae4 4813 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c
PB
4814 } else {
4815 if (u)
a7812ae4 4816 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4817 else
a7812ae4 4818 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c 4819 }
2c0262af 4820 } else {
ad69471c
PB
4821 tmp = neon_load_reg(rm + pass, 0);
4822 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
36aa55dc
PB
4823 tmp3 = neon_load_reg(rm + pass, 1);
4824 gen_neon_shift_narrow(size, tmp3, tmp2, q, u);
4825 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
ad69471c 4826 dead_tmp(tmp);
36aa55dc 4827 dead_tmp(tmp3);
9ee6e8bb 4828 }
ad69471c
PB
4829 tmp = new_tmp();
4830 if (op == 8 && !u) {
4831 gen_neon_narrow(size - 1, tmp, cpu_V0);
9ee6e8bb 4832 } else {
ad69471c
PB
4833 if (op == 8)
4834 gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
9ee6e8bb 4835 else
ad69471c
PB
4836 gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
4837 }
2301db49 4838 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4839 } /* for pass */
b75263d6
JR
4840 if (size == 3) {
4841 tcg_temp_free_i64(tmp64);
2301db49
JR
4842 } else {
4843 dead_tmp(tmp2);
b75263d6 4844 }
9ee6e8bb
PB
4845 } else if (op == 10) {
4846 /* VSHLL */
ad69471c 4847 if (q || size == 3)
9ee6e8bb 4848 return 1;
ad69471c
PB
4849 tmp = neon_load_reg(rm, 0);
4850 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 4851 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4852 if (pass == 1)
4853 tmp = tmp2;
4854
4855 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 4856
9ee6e8bb
PB
4857 if (shift != 0) {
4858 /* The shift is less than the width of the source
ad69471c
PB
4859 type, so we can just shift the whole register. */
4860 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4861 if (size < 2 || !u) {
4862 uint64_t imm64;
4863 if (size == 0) {
4864 imm = (0xffu >> (8 - shift));
4865 imm |= imm << 16;
4866 } else {
4867 imm = 0xffff >> (16 - shift);
9ee6e8bb 4868 }
ad69471c
PB
4869 imm64 = imm | (((uint64_t)imm) << 32);
4870 tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
9ee6e8bb
PB
4871 }
4872 }
ad69471c 4873 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 4874 }
f73534a5 4875 } else if (op >= 14) {
9ee6e8bb 4876 /* VCVT fixed-point. */
f73534a5
PM
4877 /* We have already masked out the must-be-1 top bit of imm6,
4878 * hence this 32-shift where the ARM ARM has 64-imm6.
4879 */
4880 shift = 32 - shift;
9ee6e8bb 4881 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 4882 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 4883 if (!(op & 1)) {
9ee6e8bb 4884 if (u)
4373f3ce 4885 gen_vfp_ulto(0, shift);
9ee6e8bb 4886 else
4373f3ce 4887 gen_vfp_slto(0, shift);
9ee6e8bb
PB
4888 } else {
4889 if (u)
4373f3ce 4890 gen_vfp_toul(0, shift);
9ee6e8bb 4891 else
4373f3ce 4892 gen_vfp_tosl(0, shift);
2c0262af 4893 }
4373f3ce 4894 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
4895 }
4896 } else {
9ee6e8bb
PB
4897 return 1;
4898 }
4899 } else { /* (insn & 0x00380080) == 0 */
4900 int invert;
4901
4902 op = (insn >> 8) & 0xf;
4903 /* One register and immediate. */
4904 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4905 invert = (insn & (1 << 5)) != 0;
4906 switch (op) {
4907 case 0: case 1:
4908 /* no-op */
4909 break;
4910 case 2: case 3:
4911 imm <<= 8;
4912 break;
4913 case 4: case 5:
4914 imm <<= 16;
4915 break;
4916 case 6: case 7:
4917 imm <<= 24;
4918 break;
4919 case 8: case 9:
4920 imm |= imm << 16;
4921 break;
4922 case 10: case 11:
4923 imm = (imm << 8) | (imm << 24);
4924 break;
4925 case 12:
8e31209e 4926 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
4927 break;
4928 case 13:
4929 imm = (imm << 16) | 0xffff;
4930 break;
4931 case 14:
4932 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4933 if (invert)
4934 imm = ~imm;
4935 break;
4936 case 15:
4937 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4938 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4939 break;
4940 }
4941 if (invert)
4942 imm = ~imm;
4943
9ee6e8bb
PB
4944 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4945 if (op & 1 && op < 12) {
ad69471c 4946 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
4947 if (invert) {
4948 /* The immediate value has already been inverted, so
4949 BIC becomes AND. */
ad69471c 4950 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 4951 } else {
ad69471c 4952 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 4953 }
9ee6e8bb 4954 } else {
ad69471c
PB
4955 /* VMOV, VMVN. */
4956 tmp = new_tmp();
9ee6e8bb 4957 if (op == 14 && invert) {
ad69471c
PB
4958 uint32_t val;
4959 val = 0;
9ee6e8bb
PB
4960 for (n = 0; n < 4; n++) {
4961 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 4962 val |= 0xff << (n * 8);
9ee6e8bb 4963 }
ad69471c
PB
4964 tcg_gen_movi_i32(tmp, val);
4965 } else {
4966 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 4967 }
9ee6e8bb 4968 }
ad69471c 4969 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4970 }
4971 }
e4b3861d 4972 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
4973 if (size != 3) {
4974 op = (insn >> 8) & 0xf;
4975 if ((insn & (1 << 6)) == 0) {
4976 /* Three registers of different lengths. */
4977 int src1_wide;
4978 int src2_wide;
4979 int prewiden;
4980 /* prewiden, src1_wide, src2_wide */
4981 static const int neon_3reg_wide[16][3] = {
4982 {1, 0, 0}, /* VADDL */
4983 {1, 1, 0}, /* VADDW */
4984 {1, 0, 0}, /* VSUBL */
4985 {1, 1, 0}, /* VSUBW */
4986 {0, 1, 1}, /* VADDHN */
4987 {0, 0, 0}, /* VABAL */
4988 {0, 1, 1}, /* VSUBHN */
4989 {0, 0, 0}, /* VABDL */
4990 {0, 0, 0}, /* VMLAL */
4991 {0, 0, 0}, /* VQDMLAL */
4992 {0, 0, 0}, /* VMLSL */
4993 {0, 0, 0}, /* VQDMLSL */
4994 {0, 0, 0}, /* Integer VMULL */
4995 {0, 0, 0}, /* VQDMULL */
4996 {0, 0, 0} /* Polynomial VMULL */
4997 };
4998
4999 prewiden = neon_3reg_wide[op][0];
5000 src1_wide = neon_3reg_wide[op][1];
5001 src2_wide = neon_3reg_wide[op][2];
5002
ad69471c
PB
5003 if (size == 0 && (op == 9 || op == 11 || op == 13))
5004 return 1;
5005
9ee6e8bb
PB
5006 /* Avoid overlapping operands. Wide source operands are
5007 always aligned so will never overlap with wide
5008 destinations in problematic ways. */
8f8e3aa4 5009 if (rd == rm && !src2_wide) {
dd8fbd78
FN
5010 tmp = neon_load_reg(rm, 1);
5011 neon_store_scratch(2, tmp);
8f8e3aa4 5012 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
5013 tmp = neon_load_reg(rn, 1);
5014 neon_store_scratch(2, tmp);
9ee6e8bb 5015 }
a50f5b91 5016 TCGV_UNUSED(tmp3);
9ee6e8bb 5017 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5018 if (src1_wide) {
5019 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 5020 TCGV_UNUSED(tmp);
9ee6e8bb 5021 } else {
ad69471c 5022 if (pass == 1 && rd == rn) {
dd8fbd78 5023 tmp = neon_load_scratch(2);
9ee6e8bb 5024 } else {
ad69471c
PB
5025 tmp = neon_load_reg(rn, pass);
5026 }
5027 if (prewiden) {
5028 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5029 }
5030 }
ad69471c
PB
5031 if (src2_wide) {
5032 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 5033 TCGV_UNUSED(tmp2);
9ee6e8bb 5034 } else {
ad69471c 5035 if (pass == 1 && rd == rm) {
dd8fbd78 5036 tmp2 = neon_load_scratch(2);
9ee6e8bb 5037 } else {
ad69471c
PB
5038 tmp2 = neon_load_reg(rm, pass);
5039 }
5040 if (prewiden) {
5041 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5042 }
9ee6e8bb
PB
5043 }
5044 switch (op) {
5045 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5046 gen_neon_addl(size);
9ee6e8bb 5047 break;
79b0e534 5048 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5049 gen_neon_subl(size);
9ee6e8bb
PB
5050 break;
5051 case 5: case 7: /* VABAL, VABDL */
5052 switch ((size << 1) | u) {
ad69471c
PB
5053 case 0:
5054 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5055 break;
5056 case 1:
5057 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5058 break;
5059 case 2:
5060 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5061 break;
5062 case 3:
5063 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5064 break;
5065 case 4:
5066 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5067 break;
5068 case 5:
5069 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5070 break;
9ee6e8bb
PB
5071 default: abort();
5072 }
ad69471c
PB
5073 dead_tmp(tmp2);
5074 dead_tmp(tmp);
9ee6e8bb
PB
5075 break;
5076 case 8: case 9: case 10: case 11: case 12: case 13:
5077 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5078 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
dd8fbd78
FN
5079 dead_tmp(tmp2);
5080 dead_tmp(tmp);
9ee6e8bb
PB
5081 break;
5082 case 14: /* Polynomial VMULL */
5083 cpu_abort(env, "Polynomial VMULL not implemented");
5084
5085 default: /* 15 is RESERVED. */
5086 return 1;
5087 }
5088 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
5089 /* Accumulate. */
5090 if (op == 10 || op == 11) {
ad69471c 5091 gen_neon_negl(cpu_V0, size);
9ee6e8bb
PB
5092 }
5093
9ee6e8bb 5094 if (op != 13) {
ad69471c 5095 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb
PB
5096 }
5097
5098 switch (op) {
5099 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
ad69471c 5100 gen_neon_addl(size);
9ee6e8bb
PB
5101 break;
5102 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c
PB
5103 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5104 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5105 break;
9ee6e8bb
PB
5106 /* Fall through. */
5107 case 13: /* VQDMULL */
ad69471c 5108 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5109 break;
5110 default:
5111 abort();
5112 }
ad69471c 5113 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5114 } else if (op == 4 || op == 6) {
5115 /* Narrowing operation. */
ad69471c 5116 tmp = new_tmp();
79b0e534 5117 if (!u) {
9ee6e8bb 5118 switch (size) {
ad69471c
PB
5119 case 0:
5120 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5121 break;
5122 case 1:
5123 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5124 break;
5125 case 2:
5126 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5127 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5128 break;
9ee6e8bb
PB
5129 default: abort();
5130 }
5131 } else {
5132 switch (size) {
ad69471c
PB
5133 case 0:
5134 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5135 break;
5136 case 1:
5137 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5138 break;
5139 case 2:
5140 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5141 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5142 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5143 break;
9ee6e8bb
PB
5144 default: abort();
5145 }
5146 }
ad69471c
PB
5147 if (pass == 0) {
5148 tmp3 = tmp;
5149 } else {
5150 neon_store_reg(rd, 0, tmp3);
5151 neon_store_reg(rd, 1, tmp);
5152 }
9ee6e8bb
PB
5153 } else {
5154 /* Write back the result. */
ad69471c 5155 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5156 }
5157 }
5158 } else {
5159 /* Two registers and a scalar. */
5160 switch (op) {
5161 case 0: /* Integer VMLA scalar */
5162 case 1: /* Float VMLA scalar */
5163 case 4: /* Integer VMLS scalar */
5164 case 5: /* Floating point VMLS scalar */
5165 case 8: /* Integer VMUL scalar */
5166 case 9: /* Floating point VMUL scalar */
5167 case 12: /* VQDMULH scalar */
5168 case 13: /* VQRDMULH scalar */
dd8fbd78
FN
5169 tmp = neon_get_scalar(size, rm);
5170 neon_store_scratch(0, tmp);
9ee6e8bb 5171 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5172 tmp = neon_load_scratch(0);
5173 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5174 if (op == 12) {
5175 if (size == 1) {
dd8fbd78 5176 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5177 } else {
dd8fbd78 5178 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5179 }
5180 } else if (op == 13) {
5181 if (size == 1) {
dd8fbd78 5182 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5183 } else {
dd8fbd78 5184 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5185 }
5186 } else if (op & 1) {
dd8fbd78 5187 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5188 } else {
5189 switch (size) {
dd8fbd78
FN
5190 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5191 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5192 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5193 default: return 1;
5194 }
5195 }
dd8fbd78 5196 dead_tmp(tmp2);
9ee6e8bb
PB
5197 if (op < 8) {
5198 /* Accumulate. */
dd8fbd78 5199 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5200 switch (op) {
5201 case 0:
dd8fbd78 5202 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5203 break;
5204 case 1:
dd8fbd78 5205 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5206 break;
5207 case 4:
dd8fbd78 5208 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5209 break;
5210 case 5:
dd8fbd78 5211 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
5212 break;
5213 default:
5214 abort();
5215 }
dd8fbd78 5216 dead_tmp(tmp2);
9ee6e8bb 5217 }
dd8fbd78 5218 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5219 }
5220 break;
5221 case 2: /* VMLAL sclar */
5222 case 3: /* VQDMLAL scalar */
5223 case 6: /* VMLSL scalar */
5224 case 7: /* VQDMLSL scalar */
5225 case 10: /* VMULL scalar */
5226 case 11: /* VQDMULL scalar */
ad69471c
PB
5227 if (size == 0 && (op == 3 || op == 7 || op == 11))
5228 return 1;
5229
dd8fbd78
FN
5230 tmp2 = neon_get_scalar(size, rm);
5231 tmp3 = neon_load_reg(rn, 1);
ad69471c 5232
9ee6e8bb 5233 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5234 if (pass == 0) {
5235 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5236 } else {
dd8fbd78 5237 tmp = tmp3;
9ee6e8bb 5238 }
ad69471c 5239 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
dd8fbd78 5240 dead_tmp(tmp);
9ee6e8bb 5241 if (op == 6 || op == 7) {
ad69471c
PB
5242 gen_neon_negl(cpu_V0, size);
5243 }
5244 if (op != 11) {
5245 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5246 }
9ee6e8bb
PB
5247 switch (op) {
5248 case 2: case 6:
ad69471c 5249 gen_neon_addl(size);
9ee6e8bb
PB
5250 break;
5251 case 3: case 7:
ad69471c
PB
5252 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5253 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5254 break;
5255 case 10:
5256 /* no-op */
5257 break;
5258 case 11:
ad69471c 5259 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5260 break;
5261 default:
5262 abort();
5263 }
ad69471c 5264 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5265 }
dd8fbd78
FN
5266
5267 dead_tmp(tmp2);
5268
9ee6e8bb
PB
5269 break;
5270 default: /* 14 and 15 are RESERVED */
5271 return 1;
5272 }
5273 }
5274 } else { /* size == 3 */
5275 if (!u) {
5276 /* Extract. */
9ee6e8bb 5277 imm = (insn >> 8) & 0xf;
ad69471c
PB
5278
5279 if (imm > 7 && !q)
5280 return 1;
5281
5282 if (imm == 0) {
5283 neon_load_reg64(cpu_V0, rn);
5284 if (q) {
5285 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5286 }
ad69471c
PB
5287 } else if (imm == 8) {
5288 neon_load_reg64(cpu_V0, rn + 1);
5289 if (q) {
5290 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5291 }
ad69471c 5292 } else if (q) {
a7812ae4 5293 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5294 if (imm < 8) {
5295 neon_load_reg64(cpu_V0, rn);
a7812ae4 5296 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5297 } else {
5298 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5299 neon_load_reg64(tmp64, rm);
ad69471c
PB
5300 }
5301 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5302 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5303 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5304 if (imm < 8) {
5305 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5306 } else {
ad69471c
PB
5307 neon_load_reg64(cpu_V1, rm + 1);
5308 imm -= 8;
9ee6e8bb 5309 }
ad69471c 5310 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5311 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5312 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 5313 tcg_temp_free_i64(tmp64);
ad69471c 5314 } else {
a7812ae4 5315 /* BUGFIX */
ad69471c 5316 neon_load_reg64(cpu_V0, rn);
a7812ae4 5317 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5318 neon_load_reg64(cpu_V1, rm);
a7812ae4 5319 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5320 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5321 }
5322 neon_store_reg64(cpu_V0, rd);
5323 if (q) {
5324 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5325 }
5326 } else if ((insn & (1 << 11)) == 0) {
5327 /* Two register misc. */
5328 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5329 size = (insn >> 18) & 3;
5330 switch (op) {
5331 case 0: /* VREV64 */
5332 if (size == 3)
5333 return 1;
5334 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5335 tmp = neon_load_reg(rm, pass * 2);
5336 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5337 switch (size) {
dd8fbd78
FN
5338 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5339 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5340 case 2: /* no-op */ break;
5341 default: abort();
5342 }
dd8fbd78 5343 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5344 if (size == 2) {
dd8fbd78 5345 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5346 } else {
9ee6e8bb 5347 switch (size) {
dd8fbd78
FN
5348 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5349 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5350 default: abort();
5351 }
dd8fbd78 5352 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5353 }
5354 }
5355 break;
5356 case 4: case 5: /* VPADDL */
5357 case 12: case 13: /* VPADAL */
9ee6e8bb
PB
5358 if (size == 3)
5359 return 1;
ad69471c
PB
5360 for (pass = 0; pass < q + 1; pass++) {
5361 tmp = neon_load_reg(rm, pass * 2);
5362 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5363 tmp = neon_load_reg(rm, pass * 2 + 1);
5364 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5365 switch (size) {
5366 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5367 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5368 case 2: tcg_gen_add_i64(CPU_V001); break;
5369 default: abort();
5370 }
9ee6e8bb
PB
5371 if (op >= 12) {
5372 /* Accumulate. */
ad69471c
PB
5373 neon_load_reg64(cpu_V1, rd + pass);
5374 gen_neon_addl(size);
9ee6e8bb 5375 }
ad69471c 5376 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5377 }
5378 break;
5379 case 33: /* VTRN */
5380 if (size == 2) {
5381 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
5382 tmp = neon_load_reg(rm, n);
5383 tmp2 = neon_load_reg(rd, n + 1);
5384 neon_store_reg(rm, n, tmp2);
5385 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
5386 }
5387 } else {
5388 goto elementwise;
5389 }
5390 break;
5391 case 34: /* VUZP */
5392 /* Reg Before After
5393 Rd A3 A2 A1 A0 B2 B0 A2 A0
5394 Rm B3 B2 B1 B0 B3 B1 A3 A1
5395 */
5396 if (size == 3)
5397 return 1;
5398 gen_neon_unzip(rd, q, 0, size);
5399 gen_neon_unzip(rm, q, 4, size);
5400 if (q) {
5401 static int unzip_order_q[8] =
5402 {0, 2, 4, 6, 1, 3, 5, 7};
5403 for (n = 0; n < 8; n++) {
5404 int reg = (n < 4) ? rd : rm;
dd8fbd78
FN
5405 tmp = neon_load_scratch(unzip_order_q[n]);
5406 neon_store_reg(reg, n % 4, tmp);
9ee6e8bb
PB
5407 }
5408 } else {
5409 static int unzip_order[4] =
5410 {0, 4, 1, 5};
5411 for (n = 0; n < 4; n++) {
5412 int reg = (n < 2) ? rd : rm;
dd8fbd78
FN
5413 tmp = neon_load_scratch(unzip_order[n]);
5414 neon_store_reg(reg, n % 2, tmp);
9ee6e8bb
PB
5415 }
5416 }
5417 break;
5418 case 35: /* VZIP */
5419 /* Reg Before After
5420 Rd A3 A2 A1 A0 B1 A1 B0 A0
5421 Rm B3 B2 B1 B0 B3 A3 B2 A2
5422 */
5423 if (size == 3)
5424 return 1;
5425 count = (q ? 4 : 2);
5426 for (n = 0; n < count; n++) {
dd8fbd78
FN
5427 tmp = neon_load_reg(rd, n);
5428 tmp2 = neon_load_reg(rd, n);
9ee6e8bb 5429 switch (size) {
dd8fbd78
FN
5430 case 0: gen_neon_zip_u8(tmp, tmp2); break;
5431 case 1: gen_neon_zip_u16(tmp, tmp2); break;
9ee6e8bb
PB
5432 case 2: /* no-op */; break;
5433 default: abort();
5434 }
dd8fbd78
FN
5435 neon_store_scratch(n * 2, tmp);
5436 neon_store_scratch(n * 2 + 1, tmp2);
9ee6e8bb
PB
5437 }
5438 for (n = 0; n < count * 2; n++) {
5439 int reg = (n < count) ? rd : rm;
dd8fbd78
FN
5440 tmp = neon_load_scratch(n);
5441 neon_store_reg(reg, n % count, tmp);
9ee6e8bb
PB
5442 }
5443 break;
5444 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
ad69471c
PB
5445 if (size == 3)
5446 return 1;
a50f5b91 5447 TCGV_UNUSED(tmp2);
9ee6e8bb 5448 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5449 neon_load_reg64(cpu_V0, rm + pass);
5450 tmp = new_tmp();
9ee6e8bb 5451 if (op == 36 && q == 0) {
ad69471c 5452 gen_neon_narrow(size, tmp, cpu_V0);
9ee6e8bb 5453 } else if (q) {
ad69471c 5454 gen_neon_narrow_satu(size, tmp, cpu_V0);
9ee6e8bb 5455 } else {
ad69471c
PB
5456 gen_neon_narrow_sats(size, tmp, cpu_V0);
5457 }
5458 if (pass == 0) {
5459 tmp2 = tmp;
5460 } else {
5461 neon_store_reg(rd, 0, tmp2);
5462 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5463 }
9ee6e8bb
PB
5464 }
5465 break;
5466 case 38: /* VSHLL */
ad69471c 5467 if (q || size == 3)
9ee6e8bb 5468 return 1;
ad69471c
PB
5469 tmp = neon_load_reg(rm, 0);
5470 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5471 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5472 if (pass == 1)
5473 tmp = tmp2;
5474 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 5475 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 5476 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5477 }
5478 break;
60011498
PB
5479 case 44: /* VCVT.F16.F32 */
5480 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5481 return 1;
5482 tmp = new_tmp();
5483 tmp2 = new_tmp();
5484 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
5485 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5486 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
5487 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5488 tcg_gen_shli_i32(tmp2, tmp2, 16);
5489 tcg_gen_or_i32(tmp2, tmp2, tmp);
5490 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
5491 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5492 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5493 neon_store_reg(rd, 0, tmp2);
5494 tmp2 = new_tmp();
5495 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5496 tcg_gen_shli_i32(tmp2, tmp2, 16);
5497 tcg_gen_or_i32(tmp2, tmp2, tmp);
5498 neon_store_reg(rd, 1, tmp2);
5499 dead_tmp(tmp);
5500 break;
5501 case 46: /* VCVT.F32.F16 */
5502 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5503 return 1;
5504 tmp3 = new_tmp();
5505 tmp = neon_load_reg(rm, 0);
5506 tmp2 = neon_load_reg(rm, 1);
5507 tcg_gen_ext16u_i32(tmp3, tmp);
5508 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5509 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5510 tcg_gen_shri_i32(tmp3, tmp, 16);
5511 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5512 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
5513 dead_tmp(tmp);
5514 tcg_gen_ext16u_i32(tmp3, tmp2);
5515 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5516 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5517 tcg_gen_shri_i32(tmp3, tmp2, 16);
5518 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5519 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
5520 dead_tmp(tmp2);
5521 dead_tmp(tmp3);
5522 break;
9ee6e8bb
PB
5523 default:
5524 elementwise:
5525 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5526 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5527 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5528 neon_reg_offset(rm, pass));
dd8fbd78 5529 TCGV_UNUSED(tmp);
9ee6e8bb 5530 } else {
dd8fbd78 5531 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
5532 }
5533 switch (op) {
5534 case 1: /* VREV32 */
5535 switch (size) {
dd8fbd78
FN
5536 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5537 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5538 default: return 1;
5539 }
5540 break;
5541 case 2: /* VREV16 */
5542 if (size != 0)
5543 return 1;
dd8fbd78 5544 gen_rev16(tmp);
9ee6e8bb 5545 break;
9ee6e8bb
PB
5546 case 8: /* CLS */
5547 switch (size) {
dd8fbd78
FN
5548 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5549 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5550 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
9ee6e8bb
PB
5551 default: return 1;
5552 }
5553 break;
5554 case 9: /* CLZ */
5555 switch (size) {
dd8fbd78
FN
5556 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5557 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5558 case 2: gen_helper_clz(tmp, tmp); break;
9ee6e8bb
PB
5559 default: return 1;
5560 }
5561 break;
5562 case 10: /* CNT */
5563 if (size != 0)
5564 return 1;
dd8fbd78 5565 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb
PB
5566 break;
5567 case 11: /* VNOT */
5568 if (size != 0)
5569 return 1;
dd8fbd78 5570 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5571 break;
5572 case 14: /* VQABS */
5573 switch (size) {
dd8fbd78
FN
5574 case 0: gen_helper_neon_qabs_s8(tmp, cpu_env, tmp); break;
5575 case 1: gen_helper_neon_qabs_s16(tmp, cpu_env, tmp); break;
5576 case 2: gen_helper_neon_qabs_s32(tmp, cpu_env, tmp); break;
9ee6e8bb
PB
5577 default: return 1;
5578 }
5579 break;
5580 case 15: /* VQNEG */
5581 switch (size) {
dd8fbd78
FN
5582 case 0: gen_helper_neon_qneg_s8(tmp, cpu_env, tmp); break;
5583 case 1: gen_helper_neon_qneg_s16(tmp, cpu_env, tmp); break;
5584 case 2: gen_helper_neon_qneg_s32(tmp, cpu_env, tmp); break;
9ee6e8bb
PB
5585 default: return 1;
5586 }
5587 break;
5588 case 16: case 19: /* VCGT #0, VCLE #0 */
dd8fbd78 5589 tmp2 = tcg_const_i32(0);
9ee6e8bb 5590 switch(size) {
dd8fbd78
FN
5591 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5592 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5593 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5594 default: return 1;
5595 }
dd8fbd78 5596 tcg_temp_free(tmp2);
9ee6e8bb 5597 if (op == 19)
dd8fbd78 5598 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5599 break;
5600 case 17: case 20: /* VCGE #0, VCLT #0 */
dd8fbd78 5601 tmp2 = tcg_const_i32(0);
9ee6e8bb 5602 switch(size) {
dd8fbd78
FN
5603 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5604 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5605 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5606 default: return 1;
5607 }
dd8fbd78 5608 tcg_temp_free(tmp2);
9ee6e8bb 5609 if (op == 20)
dd8fbd78 5610 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5611 break;
5612 case 18: /* VCEQ #0 */
dd8fbd78 5613 tmp2 = tcg_const_i32(0);
9ee6e8bb 5614 switch(size) {
dd8fbd78
FN
5615 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5616 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5617 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5618 default: return 1;
5619 }
dd8fbd78 5620 tcg_temp_free(tmp2);
9ee6e8bb
PB
5621 break;
5622 case 22: /* VABS */
5623 switch(size) {
dd8fbd78
FN
5624 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
5625 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
5626 case 2: tcg_gen_abs_i32(tmp, tmp); break;
9ee6e8bb
PB
5627 default: return 1;
5628 }
5629 break;
5630 case 23: /* VNEG */
ad69471c
PB
5631 if (size == 3)
5632 return 1;
dd8fbd78
FN
5633 tmp2 = tcg_const_i32(0);
5634 gen_neon_rsb(size, tmp, tmp2);
5635 tcg_temp_free(tmp2);
9ee6e8bb
PB
5636 break;
5637 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
dd8fbd78
FN
5638 tmp2 = tcg_const_i32(0);
5639 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
5640 tcg_temp_free(tmp2);
9ee6e8bb 5641 if (op == 27)
dd8fbd78 5642 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5643 break;
5644 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
dd8fbd78
FN
5645 tmp2 = tcg_const_i32(0);
5646 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
5647 tcg_temp_free(tmp2);
9ee6e8bb 5648 if (op == 28)
dd8fbd78 5649 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5650 break;
5651 case 26: /* Float VCEQ #0 */
dd8fbd78
FN
5652 tmp2 = tcg_const_i32(0);
5653 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
5654 tcg_temp_free(tmp2);
9ee6e8bb
PB
5655 break;
5656 case 30: /* Float VABS */
4373f3ce 5657 gen_vfp_abs(0);
9ee6e8bb
PB
5658 break;
5659 case 31: /* Float VNEG */
4373f3ce 5660 gen_vfp_neg(0);
9ee6e8bb
PB
5661 break;
5662 case 32: /* VSWP */
dd8fbd78
FN
5663 tmp2 = neon_load_reg(rd, pass);
5664 neon_store_reg(rm, pass, tmp2);
9ee6e8bb
PB
5665 break;
5666 case 33: /* VTRN */
dd8fbd78 5667 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5668 switch (size) {
dd8fbd78
FN
5669 case 0: gen_neon_trn_u8(tmp, tmp2); break;
5670 case 1: gen_neon_trn_u16(tmp, tmp2); break;
9ee6e8bb
PB
5671 case 2: abort();
5672 default: return 1;
5673 }
dd8fbd78 5674 neon_store_reg(rm, pass, tmp2);
9ee6e8bb
PB
5675 break;
5676 case 56: /* Integer VRECPE */
dd8fbd78 5677 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb
PB
5678 break;
5679 case 57: /* Integer VRSQRTE */
dd8fbd78 5680 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb
PB
5681 break;
5682 case 58: /* Float VRECPE */
4373f3ce 5683 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5684 break;
5685 case 59: /* Float VRSQRTE */
4373f3ce 5686 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5687 break;
5688 case 60: /* VCVT.F32.S32 */
d3587ef8 5689 gen_vfp_sito(0);
9ee6e8bb
PB
5690 break;
5691 case 61: /* VCVT.F32.U32 */
d3587ef8 5692 gen_vfp_uito(0);
9ee6e8bb
PB
5693 break;
5694 case 62: /* VCVT.S32.F32 */
d3587ef8 5695 gen_vfp_tosiz(0);
9ee6e8bb
PB
5696 break;
5697 case 63: /* VCVT.U32.F32 */
d3587ef8 5698 gen_vfp_touiz(0);
9ee6e8bb
PB
5699 break;
5700 default:
5701 /* Reserved: 21, 29, 39-56 */
5702 return 1;
5703 }
5704 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5705 tcg_gen_st_f32(cpu_F0s, cpu_env,
5706 neon_reg_offset(rd, pass));
9ee6e8bb 5707 } else {
dd8fbd78 5708 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5709 }
5710 }
5711 break;
5712 }
5713 } else if ((insn & (1 << 10)) == 0) {
5714 /* VTBL, VTBX. */
3018f259 5715 n = ((insn >> 5) & 0x18) + 8;
9ee6e8bb 5716 if (insn & (1 << 6)) {
8f8e3aa4 5717 tmp = neon_load_reg(rd, 0);
9ee6e8bb 5718 } else {
8f8e3aa4
PB
5719 tmp = new_tmp();
5720 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5721 }
8f8e3aa4 5722 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
5723 tmp4 = tcg_const_i32(rn);
5724 tmp5 = tcg_const_i32(n);
5725 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
3018f259 5726 dead_tmp(tmp);
9ee6e8bb 5727 if (insn & (1 << 6)) {
8f8e3aa4 5728 tmp = neon_load_reg(rd, 1);
9ee6e8bb 5729 } else {
8f8e3aa4
PB
5730 tmp = new_tmp();
5731 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5732 }
8f8e3aa4 5733 tmp3 = neon_load_reg(rm, 1);
b75263d6 5734 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
5735 tcg_temp_free_i32(tmp5);
5736 tcg_temp_free_i32(tmp4);
8f8e3aa4 5737 neon_store_reg(rd, 0, tmp2);
3018f259
PB
5738 neon_store_reg(rd, 1, tmp3);
5739 dead_tmp(tmp);
9ee6e8bb
PB
5740 } else if ((insn & 0x380) == 0) {
5741 /* VDUP */
5742 if (insn & (1 << 19)) {
dd8fbd78 5743 tmp = neon_load_reg(rm, 1);
9ee6e8bb 5744 } else {
dd8fbd78 5745 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
5746 }
5747 if (insn & (1 << 16)) {
dd8fbd78 5748 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
5749 } else if (insn & (1 << 17)) {
5750 if ((insn >> 18) & 1)
dd8fbd78 5751 gen_neon_dup_high16(tmp);
9ee6e8bb 5752 else
dd8fbd78 5753 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
5754 }
5755 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5756 tmp2 = new_tmp();
5757 tcg_gen_mov_i32(tmp2, tmp);
5758 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 5759 }
dd8fbd78 5760 dead_tmp(tmp);
9ee6e8bb
PB
5761 } else {
5762 return 1;
5763 }
5764 }
5765 }
5766 return 0;
5767}
5768
fe1479c3
PB
5769static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5770{
5771 int crn = (insn >> 16) & 0xf;
5772 int crm = insn & 0xf;
5773 int op1 = (insn >> 21) & 7;
5774 int op2 = (insn >> 5) & 7;
5775 int rt = (insn >> 12) & 0xf;
5776 TCGv tmp;
5777
5778 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5779 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5780 /* TEECR */
5781 if (IS_USER(s))
5782 return 1;
5783 tmp = load_cpu_field(teecr);
5784 store_reg(s, rt, tmp);
5785 return 0;
5786 }
5787 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5788 /* TEEHBR */
5789 if (IS_USER(s) && (env->teecr & 1))
5790 return 1;
5791 tmp = load_cpu_field(teehbr);
5792 store_reg(s, rt, tmp);
5793 return 0;
5794 }
5795 }
5796 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5797 op1, crn, crm, op2);
5798 return 1;
5799}
5800
5801static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5802{
5803 int crn = (insn >> 16) & 0xf;
5804 int crm = insn & 0xf;
5805 int op1 = (insn >> 21) & 7;
5806 int op2 = (insn >> 5) & 7;
5807 int rt = (insn >> 12) & 0xf;
5808 TCGv tmp;
5809
5810 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5811 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5812 /* TEECR */
5813 if (IS_USER(s))
5814 return 1;
5815 tmp = load_reg(s, rt);
5816 gen_helper_set_teecr(cpu_env, tmp);
5817 dead_tmp(tmp);
5818 return 0;
5819 }
5820 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5821 /* TEEHBR */
5822 if (IS_USER(s) && (env->teecr & 1))
5823 return 1;
5824 tmp = load_reg(s, rt);
5825 store_cpu_field(tmp, teehbr);
5826 return 0;
5827 }
5828 }
5829 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5830 op1, crn, crm, op2);
5831 return 1;
5832}
5833
9ee6e8bb
PB
5834static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5835{
5836 int cpnum;
5837
5838 cpnum = (insn >> 8) & 0xf;
5839 if (arm_feature(env, ARM_FEATURE_XSCALE)
5840 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5841 return 1;
5842
5843 switch (cpnum) {
5844 case 0:
5845 case 1:
5846 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5847 return disas_iwmmxt_insn(env, s, insn);
5848 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5849 return disas_dsp_insn(env, s, insn);
5850 }
5851 return 1;
5852 case 10:
5853 case 11:
5854 return disas_vfp_insn (env, s, insn);
fe1479c3
PB
5855 case 14:
5856 /* Coprocessors 7-15 are architecturally reserved by ARM.
5857 Unfortunately Intel decided to ignore this. */
5858 if (arm_feature(env, ARM_FEATURE_XSCALE))
5859 goto board;
5860 if (insn & (1 << 20))
5861 return disas_cp14_read(env, s, insn);
5862 else
5863 return disas_cp14_write(env, s, insn);
9ee6e8bb
PB
5864 case 15:
5865 return disas_cp15_insn (env, s, insn);
5866 default:
fe1479c3 5867 board:
9ee6e8bb
PB
5868 /* Unknown coprocessor. See if the board has hooked it. */
5869 return disas_cp_insn (env, s, insn);
5870 }
5871}
5872
5e3f878a
PB
5873
5874/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 5875static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
5876{
5877 TCGv tmp;
5878 tmp = new_tmp();
5879 tcg_gen_trunc_i64_i32(tmp, val);
5880 store_reg(s, rlow, tmp);
5881 tmp = new_tmp();
5882 tcg_gen_shri_i64(val, val, 32);
5883 tcg_gen_trunc_i64_i32(tmp, val);
5884 store_reg(s, rhigh, tmp);
5885}
5886
5887/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 5888static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 5889{
a7812ae4 5890 TCGv_i64 tmp;
5e3f878a
PB
5891 TCGv tmp2;
5892
36aa55dc 5893 /* Load value and extend to 64 bits. */
a7812ae4 5894 tmp = tcg_temp_new_i64();
5e3f878a
PB
5895 tmp2 = load_reg(s, rlow);
5896 tcg_gen_extu_i32_i64(tmp, tmp2);
5897 dead_tmp(tmp2);
5898 tcg_gen_add_i64(val, val, tmp);
b75263d6 5899 tcg_temp_free_i64(tmp);
5e3f878a
PB
5900}
5901
5902/* load and add a 64-bit value from a register pair. */
a7812ae4 5903static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 5904{
a7812ae4 5905 TCGv_i64 tmp;
36aa55dc
PB
5906 TCGv tmpl;
5907 TCGv tmph;
5e3f878a
PB
5908
5909 /* Load 64-bit value rd:rn. */
36aa55dc
PB
5910 tmpl = load_reg(s, rlow);
5911 tmph = load_reg(s, rhigh);
a7812ae4 5912 tmp = tcg_temp_new_i64();
36aa55dc
PB
5913 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5914 dead_tmp(tmpl);
5915 dead_tmp(tmph);
5e3f878a 5916 tcg_gen_add_i64(val, val, tmp);
b75263d6 5917 tcg_temp_free_i64(tmp);
5e3f878a
PB
5918}
5919
5920/* Set N and Z flags from a 64-bit value. */
a7812ae4 5921static void gen_logicq_cc(TCGv_i64 val)
5e3f878a
PB
5922{
5923 TCGv tmp = new_tmp();
5924 gen_helper_logicq_cc(tmp, val);
6fbe23d5
PB
5925 gen_logic_CC(tmp);
5926 dead_tmp(tmp);
5e3f878a
PB
5927}
5928
426f5abc
PB
5929/* Load/Store exclusive instructions are implemented by remembering
5930 the value/address loaded, and seeing if these are the same
5931 when the store is performed. This should be is sufficient to implement
5932 the architecturally mandated semantics, and avoids having to monitor
5933 regular stores.
5934
5935 In system emulation mode only one CPU will be running at once, so
5936 this sequence is effectively atomic. In user emulation mode we
5937 throw an exception and handle the atomic operation elsewhere. */
5938static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
5939 TCGv addr, int size)
5940{
5941 TCGv tmp;
5942
5943 switch (size) {
5944 case 0:
5945 tmp = gen_ld8u(addr, IS_USER(s));
5946 break;
5947 case 1:
5948 tmp = gen_ld16u(addr, IS_USER(s));
5949 break;
5950 case 2:
5951 case 3:
5952 tmp = gen_ld32(addr, IS_USER(s));
5953 break;
5954 default:
5955 abort();
5956 }
5957 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
5958 store_reg(s, rt, tmp);
5959 if (size == 3) {
2c9adbda
PM
5960 TCGv tmp2 = new_tmp();
5961 tcg_gen_addi_i32(tmp2, addr, 4);
5962 tmp = gen_ld32(tmp2, IS_USER(s));
5963 dead_tmp(tmp2);
426f5abc
PB
5964 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
5965 store_reg(s, rt2, tmp);
5966 }
5967 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
5968}
5969
5970static void gen_clrex(DisasContext *s)
5971{
5972 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
5973}
5974
5975#ifdef CONFIG_USER_ONLY
5976static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
5977 TCGv addr, int size)
5978{
5979 tcg_gen_mov_i32(cpu_exclusive_test, addr);
5980 tcg_gen_movi_i32(cpu_exclusive_info,
5981 size | (rd << 4) | (rt << 8) | (rt2 << 12));
5982 gen_set_condexec(s);
5983 gen_set_pc_im(s->pc - 4);
5984 gen_exception(EXCP_STREX);
5985 s->is_jmp = DISAS_JUMP;
5986}
5987#else
5988static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
5989 TCGv addr, int size)
5990{
5991 TCGv tmp;
5992 int done_label;
5993 int fail_label;
5994
5995 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
5996 [addr] = {Rt};
5997 {Rd} = 0;
5998 } else {
5999 {Rd} = 1;
6000 } */
6001 fail_label = gen_new_label();
6002 done_label = gen_new_label();
6003 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6004 switch (size) {
6005 case 0:
6006 tmp = gen_ld8u(addr, IS_USER(s));
6007 break;
6008 case 1:
6009 tmp = gen_ld16u(addr, IS_USER(s));
6010 break;
6011 case 2:
6012 case 3:
6013 tmp = gen_ld32(addr, IS_USER(s));
6014 break;
6015 default:
6016 abort();
6017 }
6018 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
6019 dead_tmp(tmp);
6020 if (size == 3) {
6021 TCGv tmp2 = new_tmp();
6022 tcg_gen_addi_i32(tmp2, addr, 4);
2c9adbda 6023 tmp = gen_ld32(tmp2, IS_USER(s));
426f5abc
PB
6024 dead_tmp(tmp2);
6025 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
6026 dead_tmp(tmp);
6027 }
6028 tmp = load_reg(s, rt);
6029 switch (size) {
6030 case 0:
6031 gen_st8(tmp, addr, IS_USER(s));
6032 break;
6033 case 1:
6034 gen_st16(tmp, addr, IS_USER(s));
6035 break;
6036 case 2:
6037 case 3:
6038 gen_st32(tmp, addr, IS_USER(s));
6039 break;
6040 default:
6041 abort();
6042 }
6043 if (size == 3) {
6044 tcg_gen_addi_i32(addr, addr, 4);
6045 tmp = load_reg(s, rt2);
6046 gen_st32(tmp, addr, IS_USER(s));
6047 }
6048 tcg_gen_movi_i32(cpu_R[rd], 0);
6049 tcg_gen_br(done_label);
6050 gen_set_label(fail_label);
6051 tcg_gen_movi_i32(cpu_R[rd], 1);
6052 gen_set_label(done_label);
6053 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6054}
6055#endif
6056
9ee6e8bb
PB
6057static void disas_arm_insn(CPUState * env, DisasContext *s)
6058{
6059 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 6060 TCGv tmp;
3670669c 6061 TCGv tmp2;
6ddbc6e4 6062 TCGv tmp3;
b0109805 6063 TCGv addr;
a7812ae4 6064 TCGv_i64 tmp64;
9ee6e8bb
PB
6065
6066 insn = ldl_code(s->pc);
6067 s->pc += 4;
6068
6069 /* M variants do not implement ARM mode. */
6070 if (IS_M(env))
6071 goto illegal_op;
6072 cond = insn >> 28;
6073 if (cond == 0xf){
6074 /* Unconditional instructions. */
6075 if (((insn >> 25) & 7) == 1) {
6076 /* NEON Data processing. */
6077 if (!arm_feature(env, ARM_FEATURE_NEON))
6078 goto illegal_op;
6079
6080 if (disas_neon_data_insn(env, s, insn))
6081 goto illegal_op;
6082 return;
6083 }
6084 if ((insn & 0x0f100000) == 0x04000000) {
6085 /* NEON load/store. */
6086 if (!arm_feature(env, ARM_FEATURE_NEON))
6087 goto illegal_op;
6088
6089 if (disas_neon_ls_insn(env, s, insn))
6090 goto illegal_op;
6091 return;
6092 }
6093 if ((insn & 0x0d70f000) == 0x0550f000)
6094 return; /* PLD */
6095 else if ((insn & 0x0ffffdff) == 0x01010000) {
6096 ARCH(6);
6097 /* setend */
6098 if (insn & (1 << 9)) {
6099 /* BE8 mode not implemented. */
6100 goto illegal_op;
6101 }
6102 return;
6103 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6104 switch ((insn >> 4) & 0xf) {
6105 case 1: /* clrex */
6106 ARCH(6K);
426f5abc 6107 gen_clrex(s);
9ee6e8bb
PB
6108 return;
6109 case 4: /* dsb */
6110 case 5: /* dmb */
6111 case 6: /* isb */
6112 ARCH(7);
6113 /* We don't emulate caches so these are a no-op. */
6114 return;
6115 default:
6116 goto illegal_op;
6117 }
6118 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6119 /* srs */
c67b6b71 6120 int32_t offset;
9ee6e8bb
PB
6121 if (IS_USER(s))
6122 goto illegal_op;
6123 ARCH(6);
6124 op1 = (insn & 0x1f);
6125 if (op1 == (env->uncached_cpsr & CPSR_M)) {
b0109805 6126 addr = load_reg(s, 13);
9ee6e8bb 6127 } else {
b0109805 6128 addr = new_tmp();
b75263d6
JR
6129 tmp = tcg_const_i32(op1);
6130 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6131 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6132 }
6133 i = (insn >> 23) & 3;
6134 switch (i) {
6135 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6136 case 1: offset = 0; break; /* IA */
6137 case 2: offset = -8; break; /* DB */
9ee6e8bb
PB
6138 case 3: offset = 4; break; /* IB */
6139 default: abort();
6140 }
6141 if (offset)
b0109805
PB
6142 tcg_gen_addi_i32(addr, addr, offset);
6143 tmp = load_reg(s, 14);
6144 gen_st32(tmp, addr, 0);
c67b6b71 6145 tmp = load_cpu_field(spsr);
b0109805
PB
6146 tcg_gen_addi_i32(addr, addr, 4);
6147 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
6148 if (insn & (1 << 21)) {
6149 /* Base writeback. */
6150 switch (i) {
6151 case 0: offset = -8; break;
c67b6b71
FN
6152 case 1: offset = 4; break;
6153 case 2: offset = -4; break;
9ee6e8bb
PB
6154 case 3: offset = 0; break;
6155 default: abort();
6156 }
6157 if (offset)
c67b6b71 6158 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb 6159 if (op1 == (env->uncached_cpsr & CPSR_M)) {
c67b6b71 6160 store_reg(s, 13, addr);
9ee6e8bb 6161 } else {
b75263d6
JR
6162 tmp = tcg_const_i32(op1);
6163 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6164 tcg_temp_free_i32(tmp);
c67b6b71 6165 dead_tmp(addr);
9ee6e8bb 6166 }
b0109805
PB
6167 } else {
6168 dead_tmp(addr);
9ee6e8bb 6169 }
a990f58f 6170 return;
ea825eee 6171 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 6172 /* rfe */
c67b6b71 6173 int32_t offset;
9ee6e8bb
PB
6174 if (IS_USER(s))
6175 goto illegal_op;
6176 ARCH(6);
6177 rn = (insn >> 16) & 0xf;
b0109805 6178 addr = load_reg(s, rn);
9ee6e8bb
PB
6179 i = (insn >> 23) & 3;
6180 switch (i) {
b0109805 6181 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6182 case 1: offset = 0; break; /* IA */
6183 case 2: offset = -8; break; /* DB */
b0109805 6184 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
6185 default: abort();
6186 }
6187 if (offset)
b0109805
PB
6188 tcg_gen_addi_i32(addr, addr, offset);
6189 /* Load PC into tmp and CPSR into tmp2. */
6190 tmp = gen_ld32(addr, 0);
6191 tcg_gen_addi_i32(addr, addr, 4);
6192 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
6193 if (insn & (1 << 21)) {
6194 /* Base writeback. */
6195 switch (i) {
b0109805 6196 case 0: offset = -8; break;
c67b6b71
FN
6197 case 1: offset = 4; break;
6198 case 2: offset = -4; break;
b0109805 6199 case 3: offset = 0; break;
9ee6e8bb
PB
6200 default: abort();
6201 }
6202 if (offset)
b0109805
PB
6203 tcg_gen_addi_i32(addr, addr, offset);
6204 store_reg(s, rn, addr);
6205 } else {
6206 dead_tmp(addr);
9ee6e8bb 6207 }
b0109805 6208 gen_rfe(s, tmp, tmp2);
c67b6b71 6209 return;
9ee6e8bb
PB
6210 } else if ((insn & 0x0e000000) == 0x0a000000) {
6211 /* branch link and change to thumb (blx <offset>) */
6212 int32_t offset;
6213
6214 val = (uint32_t)s->pc;
d9ba4830
PB
6215 tmp = new_tmp();
6216 tcg_gen_movi_i32(tmp, val);
6217 store_reg(s, 14, tmp);
9ee6e8bb
PB
6218 /* Sign-extend the 24-bit offset */
6219 offset = (((int32_t)insn) << 8) >> 8;
6220 /* offset * 4 + bit24 * 2 + (thumb bit) */
6221 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6222 /* pipeline offset */
6223 val += 4;
d9ba4830 6224 gen_bx_im(s, val);
9ee6e8bb
PB
6225 return;
6226 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6227 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6228 /* iWMMXt register transfer. */
6229 if (env->cp15.c15_cpar & (1 << 1))
6230 if (!disas_iwmmxt_insn(env, s, insn))
6231 return;
6232 }
6233 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6234 /* Coprocessor double register transfer. */
6235 } else if ((insn & 0x0f000010) == 0x0e000010) {
6236 /* Additional coprocessor register transfer. */
7997d92f 6237 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
6238 uint32_t mask;
6239 uint32_t val;
6240 /* cps (privileged) */
6241 if (IS_USER(s))
6242 return;
6243 mask = val = 0;
6244 if (insn & (1 << 19)) {
6245 if (insn & (1 << 8))
6246 mask |= CPSR_A;
6247 if (insn & (1 << 7))
6248 mask |= CPSR_I;
6249 if (insn & (1 << 6))
6250 mask |= CPSR_F;
6251 if (insn & (1 << 18))
6252 val |= mask;
6253 }
7997d92f 6254 if (insn & (1 << 17)) {
9ee6e8bb
PB
6255 mask |= CPSR_M;
6256 val |= (insn & 0x1f);
6257 }
6258 if (mask) {
2fbac54b 6259 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
6260 }
6261 return;
6262 }
6263 goto illegal_op;
6264 }
6265 if (cond != 0xe) {
6266 /* if not always execute, we generate a conditional jump to
6267 next instruction */
6268 s->condlabel = gen_new_label();
d9ba4830 6269 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
6270 s->condjmp = 1;
6271 }
6272 if ((insn & 0x0f900000) == 0x03000000) {
6273 if ((insn & (1 << 21)) == 0) {
6274 ARCH(6T2);
6275 rd = (insn >> 12) & 0xf;
6276 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6277 if ((insn & (1 << 22)) == 0) {
6278 /* MOVW */
5e3f878a
PB
6279 tmp = new_tmp();
6280 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
6281 } else {
6282 /* MOVT */
5e3f878a 6283 tmp = load_reg(s, rd);
86831435 6284 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 6285 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 6286 }
5e3f878a 6287 store_reg(s, rd, tmp);
9ee6e8bb
PB
6288 } else {
6289 if (((insn >> 12) & 0xf) != 0xf)
6290 goto illegal_op;
6291 if (((insn >> 16) & 0xf) == 0) {
6292 gen_nop_hint(s, insn & 0xff);
6293 } else {
6294 /* CPSR = immediate */
6295 val = insn & 0xff;
6296 shift = ((insn >> 8) & 0xf) * 2;
6297 if (shift)
6298 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 6299 i = ((insn & (1 << 22)) != 0);
2fbac54b 6300 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
6301 goto illegal_op;
6302 }
6303 }
6304 } else if ((insn & 0x0f900000) == 0x01000000
6305 && (insn & 0x00000090) != 0x00000090) {
6306 /* miscellaneous instructions */
6307 op1 = (insn >> 21) & 3;
6308 sh = (insn >> 4) & 0xf;
6309 rm = insn & 0xf;
6310 switch (sh) {
6311 case 0x0: /* move program status register */
6312 if (op1 & 1) {
6313 /* PSR = reg */
2fbac54b 6314 tmp = load_reg(s, rm);
9ee6e8bb 6315 i = ((op1 & 2) != 0);
2fbac54b 6316 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
6317 goto illegal_op;
6318 } else {
6319 /* reg = PSR */
6320 rd = (insn >> 12) & 0xf;
6321 if (op1 & 2) {
6322 if (IS_USER(s))
6323 goto illegal_op;
d9ba4830 6324 tmp = load_cpu_field(spsr);
9ee6e8bb 6325 } else {
d9ba4830
PB
6326 tmp = new_tmp();
6327 gen_helper_cpsr_read(tmp);
9ee6e8bb 6328 }
d9ba4830 6329 store_reg(s, rd, tmp);
9ee6e8bb
PB
6330 }
6331 break;
6332 case 0x1:
6333 if (op1 == 1) {
6334 /* branch/exchange thumb (bx). */
d9ba4830
PB
6335 tmp = load_reg(s, rm);
6336 gen_bx(s, tmp);
9ee6e8bb
PB
6337 } else if (op1 == 3) {
6338 /* clz */
6339 rd = (insn >> 12) & 0xf;
1497c961
PB
6340 tmp = load_reg(s, rm);
6341 gen_helper_clz(tmp, tmp);
6342 store_reg(s, rd, tmp);
9ee6e8bb
PB
6343 } else {
6344 goto illegal_op;
6345 }
6346 break;
6347 case 0x2:
6348 if (op1 == 1) {
6349 ARCH(5J); /* bxj */
6350 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6351 tmp = load_reg(s, rm);
6352 gen_bx(s, tmp);
9ee6e8bb
PB
6353 } else {
6354 goto illegal_op;
6355 }
6356 break;
6357 case 0x3:
6358 if (op1 != 1)
6359 goto illegal_op;
6360
6361 /* branch link/exchange thumb (blx) */
d9ba4830
PB
6362 tmp = load_reg(s, rm);
6363 tmp2 = new_tmp();
6364 tcg_gen_movi_i32(tmp2, s->pc);
6365 store_reg(s, 14, tmp2);
6366 gen_bx(s, tmp);
9ee6e8bb
PB
6367 break;
6368 case 0x5: /* saturating add/subtract */
6369 rd = (insn >> 12) & 0xf;
6370 rn = (insn >> 16) & 0xf;
b40d0353 6371 tmp = load_reg(s, rm);
5e3f878a 6372 tmp2 = load_reg(s, rn);
9ee6e8bb 6373 if (op1 & 2)
5e3f878a 6374 gen_helper_double_saturate(tmp2, tmp2);
9ee6e8bb 6375 if (op1 & 1)
5e3f878a 6376 gen_helper_sub_saturate(tmp, tmp, tmp2);
9ee6e8bb 6377 else
5e3f878a
PB
6378 gen_helper_add_saturate(tmp, tmp, tmp2);
6379 dead_tmp(tmp2);
6380 store_reg(s, rd, tmp);
9ee6e8bb 6381 break;
49e14940
AL
6382 case 7:
6383 /* SMC instruction (op1 == 3)
6384 and undefined instructions (op1 == 0 || op1 == 2)
6385 will trap */
6386 if (op1 != 1) {
6387 goto illegal_op;
6388 }
6389 /* bkpt */
9ee6e8bb 6390 gen_set_condexec(s);
5e3f878a 6391 gen_set_pc_im(s->pc - 4);
d9ba4830 6392 gen_exception(EXCP_BKPT);
9ee6e8bb
PB
6393 s->is_jmp = DISAS_JUMP;
6394 break;
6395 case 0x8: /* signed multiply */
6396 case 0xa:
6397 case 0xc:
6398 case 0xe:
6399 rs = (insn >> 8) & 0xf;
6400 rn = (insn >> 12) & 0xf;
6401 rd = (insn >> 16) & 0xf;
6402 if (op1 == 1) {
6403 /* (32 * 16) >> 16 */
5e3f878a
PB
6404 tmp = load_reg(s, rm);
6405 tmp2 = load_reg(s, rs);
9ee6e8bb 6406 if (sh & 4)
5e3f878a 6407 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 6408 else
5e3f878a 6409 gen_sxth(tmp2);
a7812ae4
PB
6410 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6411 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 6412 tmp = new_tmp();
a7812ae4 6413 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 6414 tcg_temp_free_i64(tmp64);
9ee6e8bb 6415 if ((sh & 2) == 0) {
5e3f878a
PB
6416 tmp2 = load_reg(s, rn);
6417 gen_helper_add_setq(tmp, tmp, tmp2);
6418 dead_tmp(tmp2);
9ee6e8bb 6419 }
5e3f878a 6420 store_reg(s, rd, tmp);
9ee6e8bb
PB
6421 } else {
6422 /* 16 * 16 */
5e3f878a
PB
6423 tmp = load_reg(s, rm);
6424 tmp2 = load_reg(s, rs);
6425 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6426 dead_tmp(tmp2);
9ee6e8bb 6427 if (op1 == 2) {
a7812ae4
PB
6428 tmp64 = tcg_temp_new_i64();
6429 tcg_gen_ext_i32_i64(tmp64, tmp);
22478e79 6430 dead_tmp(tmp);
a7812ae4
PB
6431 gen_addq(s, tmp64, rn, rd);
6432 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 6433 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
6434 } else {
6435 if (op1 == 0) {
5e3f878a
PB
6436 tmp2 = load_reg(s, rn);
6437 gen_helper_add_setq(tmp, tmp, tmp2);
6438 dead_tmp(tmp2);
9ee6e8bb 6439 }
5e3f878a 6440 store_reg(s, rd, tmp);
9ee6e8bb
PB
6441 }
6442 }
6443 break;
6444 default:
6445 goto illegal_op;
6446 }
6447 } else if (((insn & 0x0e000000) == 0 &&
6448 (insn & 0x00000090) != 0x90) ||
6449 ((insn & 0x0e000000) == (1 << 25))) {
6450 int set_cc, logic_cc, shiftop;
6451
6452 op1 = (insn >> 21) & 0xf;
6453 set_cc = (insn >> 20) & 1;
6454 logic_cc = table_logic_cc[op1] & set_cc;
6455
6456 /* data processing instruction */
6457 if (insn & (1 << 25)) {
6458 /* immediate operand */
6459 val = insn & 0xff;
6460 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 6461 if (shift) {
9ee6e8bb 6462 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9
JR
6463 }
6464 tmp2 = new_tmp();
6465 tcg_gen_movi_i32(tmp2, val);
6466 if (logic_cc && shift) {
6467 gen_set_CF_bit31(tmp2);
6468 }
9ee6e8bb
PB
6469 } else {
6470 /* register */
6471 rm = (insn) & 0xf;
e9bb4aa9 6472 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6473 shiftop = (insn >> 5) & 3;
6474 if (!(insn & (1 << 4))) {
6475 shift = (insn >> 7) & 0x1f;
e9bb4aa9 6476 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
6477 } else {
6478 rs = (insn >> 8) & 0xf;
8984bd2e 6479 tmp = load_reg(s, rs);
e9bb4aa9 6480 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
6481 }
6482 }
6483 if (op1 != 0x0f && op1 != 0x0d) {
6484 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
6485 tmp = load_reg(s, rn);
6486 } else {
6487 TCGV_UNUSED(tmp);
9ee6e8bb
PB
6488 }
6489 rd = (insn >> 12) & 0xf;
6490 switch(op1) {
6491 case 0x00:
e9bb4aa9
JR
6492 tcg_gen_and_i32(tmp, tmp, tmp2);
6493 if (logic_cc) {
6494 gen_logic_CC(tmp);
6495 }
21aeb343 6496 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6497 break;
6498 case 0x01:
e9bb4aa9
JR
6499 tcg_gen_xor_i32(tmp, tmp, tmp2);
6500 if (logic_cc) {
6501 gen_logic_CC(tmp);
6502 }
21aeb343 6503 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6504 break;
6505 case 0x02:
6506 if (set_cc && rd == 15) {
6507 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 6508 if (IS_USER(s)) {
9ee6e8bb 6509 goto illegal_op;
e9bb4aa9
JR
6510 }
6511 gen_helper_sub_cc(tmp, tmp, tmp2);
6512 gen_exception_return(s, tmp);
9ee6e8bb 6513 } else {
e9bb4aa9
JR
6514 if (set_cc) {
6515 gen_helper_sub_cc(tmp, tmp, tmp2);
6516 } else {
6517 tcg_gen_sub_i32(tmp, tmp, tmp2);
6518 }
21aeb343 6519 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6520 }
6521 break;
6522 case 0x03:
e9bb4aa9
JR
6523 if (set_cc) {
6524 gen_helper_sub_cc(tmp, tmp2, tmp);
6525 } else {
6526 tcg_gen_sub_i32(tmp, tmp2, tmp);
6527 }
21aeb343 6528 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6529 break;
6530 case 0x04:
e9bb4aa9
JR
6531 if (set_cc) {
6532 gen_helper_add_cc(tmp, tmp, tmp2);
6533 } else {
6534 tcg_gen_add_i32(tmp, tmp, tmp2);
6535 }
21aeb343 6536 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6537 break;
6538 case 0x05:
e9bb4aa9
JR
6539 if (set_cc) {
6540 gen_helper_adc_cc(tmp, tmp, tmp2);
6541 } else {
6542 gen_add_carry(tmp, tmp, tmp2);
6543 }
21aeb343 6544 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6545 break;
6546 case 0x06:
e9bb4aa9
JR
6547 if (set_cc) {
6548 gen_helper_sbc_cc(tmp, tmp, tmp2);
6549 } else {
6550 gen_sub_carry(tmp, tmp, tmp2);
6551 }
21aeb343 6552 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6553 break;
6554 case 0x07:
e9bb4aa9
JR
6555 if (set_cc) {
6556 gen_helper_sbc_cc(tmp, tmp2, tmp);
6557 } else {
6558 gen_sub_carry(tmp, tmp2, tmp);
6559 }
21aeb343 6560 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6561 break;
6562 case 0x08:
6563 if (set_cc) {
e9bb4aa9
JR
6564 tcg_gen_and_i32(tmp, tmp, tmp2);
6565 gen_logic_CC(tmp);
9ee6e8bb 6566 }
e9bb4aa9 6567 dead_tmp(tmp);
9ee6e8bb
PB
6568 break;
6569 case 0x09:
6570 if (set_cc) {
e9bb4aa9
JR
6571 tcg_gen_xor_i32(tmp, tmp, tmp2);
6572 gen_logic_CC(tmp);
9ee6e8bb 6573 }
e9bb4aa9 6574 dead_tmp(tmp);
9ee6e8bb
PB
6575 break;
6576 case 0x0a:
6577 if (set_cc) {
e9bb4aa9 6578 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb 6579 }
e9bb4aa9 6580 dead_tmp(tmp);
9ee6e8bb
PB
6581 break;
6582 case 0x0b:
6583 if (set_cc) {
e9bb4aa9 6584 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 6585 }
e9bb4aa9 6586 dead_tmp(tmp);
9ee6e8bb
PB
6587 break;
6588 case 0x0c:
e9bb4aa9
JR
6589 tcg_gen_or_i32(tmp, tmp, tmp2);
6590 if (logic_cc) {
6591 gen_logic_CC(tmp);
6592 }
21aeb343 6593 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6594 break;
6595 case 0x0d:
6596 if (logic_cc && rd == 15) {
6597 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 6598 if (IS_USER(s)) {
9ee6e8bb 6599 goto illegal_op;
e9bb4aa9
JR
6600 }
6601 gen_exception_return(s, tmp2);
9ee6e8bb 6602 } else {
e9bb4aa9
JR
6603 if (logic_cc) {
6604 gen_logic_CC(tmp2);
6605 }
21aeb343 6606 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6607 }
6608 break;
6609 case 0x0e:
f669df27 6610 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
6611 if (logic_cc) {
6612 gen_logic_CC(tmp);
6613 }
21aeb343 6614 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6615 break;
6616 default:
6617 case 0x0f:
e9bb4aa9
JR
6618 tcg_gen_not_i32(tmp2, tmp2);
6619 if (logic_cc) {
6620 gen_logic_CC(tmp2);
6621 }
21aeb343 6622 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6623 break;
6624 }
e9bb4aa9
JR
6625 if (op1 != 0x0f && op1 != 0x0d) {
6626 dead_tmp(tmp2);
6627 }
9ee6e8bb
PB
6628 } else {
6629 /* other instructions */
6630 op1 = (insn >> 24) & 0xf;
6631 switch(op1) {
6632 case 0x0:
6633 case 0x1:
6634 /* multiplies, extra load/stores */
6635 sh = (insn >> 5) & 3;
6636 if (sh == 0) {
6637 if (op1 == 0x0) {
6638 rd = (insn >> 16) & 0xf;
6639 rn = (insn >> 12) & 0xf;
6640 rs = (insn >> 8) & 0xf;
6641 rm = (insn) & 0xf;
6642 op1 = (insn >> 20) & 0xf;
6643 switch (op1) {
6644 case 0: case 1: case 2: case 3: case 6:
6645 /* 32 bit mul */
5e3f878a
PB
6646 tmp = load_reg(s, rs);
6647 tmp2 = load_reg(s, rm);
6648 tcg_gen_mul_i32(tmp, tmp, tmp2);
6649 dead_tmp(tmp2);
9ee6e8bb
PB
6650 if (insn & (1 << 22)) {
6651 /* Subtract (mls) */
6652 ARCH(6T2);
5e3f878a
PB
6653 tmp2 = load_reg(s, rn);
6654 tcg_gen_sub_i32(tmp, tmp2, tmp);
6655 dead_tmp(tmp2);
9ee6e8bb
PB
6656 } else if (insn & (1 << 21)) {
6657 /* Add */
5e3f878a
PB
6658 tmp2 = load_reg(s, rn);
6659 tcg_gen_add_i32(tmp, tmp, tmp2);
6660 dead_tmp(tmp2);
9ee6e8bb
PB
6661 }
6662 if (insn & (1 << 20))
5e3f878a
PB
6663 gen_logic_CC(tmp);
6664 store_reg(s, rd, tmp);
9ee6e8bb 6665 break;
8aac08b1
AJ
6666 case 4:
6667 /* 64 bit mul double accumulate (UMAAL) */
6668 ARCH(6);
6669 tmp = load_reg(s, rs);
6670 tmp2 = load_reg(s, rm);
6671 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6672 gen_addq_lo(s, tmp64, rn);
6673 gen_addq_lo(s, tmp64, rd);
6674 gen_storeq_reg(s, rn, rd, tmp64);
6675 tcg_temp_free_i64(tmp64);
6676 break;
6677 case 8: case 9: case 10: case 11:
6678 case 12: case 13: case 14: case 15:
6679 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
6680 tmp = load_reg(s, rs);
6681 tmp2 = load_reg(s, rm);
8aac08b1 6682 if (insn & (1 << 22)) {
a7812ae4 6683 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8aac08b1 6684 } else {
a7812ae4 6685 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8aac08b1
AJ
6686 }
6687 if (insn & (1 << 21)) { /* mult accumulate */
a7812ae4 6688 gen_addq(s, tmp64, rn, rd);
9ee6e8bb 6689 }
8aac08b1 6690 if (insn & (1 << 20)) {
a7812ae4 6691 gen_logicq_cc(tmp64);
8aac08b1 6692 }
a7812ae4 6693 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 6694 tcg_temp_free_i64(tmp64);
9ee6e8bb 6695 break;
8aac08b1
AJ
6696 default:
6697 goto illegal_op;
9ee6e8bb
PB
6698 }
6699 } else {
6700 rn = (insn >> 16) & 0xf;
6701 rd = (insn >> 12) & 0xf;
6702 if (insn & (1 << 23)) {
6703 /* load/store exclusive */
86753403
PB
6704 op1 = (insn >> 21) & 0x3;
6705 if (op1)
a47f43d2 6706 ARCH(6K);
86753403
PB
6707 else
6708 ARCH(6);
3174f8e9 6709 addr = tcg_temp_local_new_i32();
98a46317 6710 load_reg_var(s, addr, rn);
9ee6e8bb 6711 if (insn & (1 << 20)) {
86753403
PB
6712 switch (op1) {
6713 case 0: /* ldrex */
426f5abc 6714 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
6715 break;
6716 case 1: /* ldrexd */
426f5abc 6717 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
6718 break;
6719 case 2: /* ldrexb */
426f5abc 6720 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
6721 break;
6722 case 3: /* ldrexh */
426f5abc 6723 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
6724 break;
6725 default:
6726 abort();
6727 }
9ee6e8bb
PB
6728 } else {
6729 rm = insn & 0xf;
86753403
PB
6730 switch (op1) {
6731 case 0: /* strex */
426f5abc 6732 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
6733 break;
6734 case 1: /* strexd */
502e64fe 6735 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
6736 break;
6737 case 2: /* strexb */
426f5abc 6738 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
6739 break;
6740 case 3: /* strexh */
426f5abc 6741 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
6742 break;
6743 default:
6744 abort();
6745 }
9ee6e8bb 6746 }
3174f8e9 6747 tcg_temp_free(addr);
9ee6e8bb
PB
6748 } else {
6749 /* SWP instruction */
6750 rm = (insn) & 0xf;
6751
8984bd2e
PB
6752 /* ??? This is not really atomic. However we know
6753 we never have multiple CPUs running in parallel,
6754 so it is good enough. */
6755 addr = load_reg(s, rn);
6756 tmp = load_reg(s, rm);
9ee6e8bb 6757 if (insn & (1 << 22)) {
8984bd2e
PB
6758 tmp2 = gen_ld8u(addr, IS_USER(s));
6759 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 6760 } else {
8984bd2e
PB
6761 tmp2 = gen_ld32(addr, IS_USER(s));
6762 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 6763 }
8984bd2e
PB
6764 dead_tmp(addr);
6765 store_reg(s, rd, tmp2);
9ee6e8bb
PB
6766 }
6767 }
6768 } else {
6769 int address_offset;
6770 int load;
6771 /* Misc load/store */
6772 rn = (insn >> 16) & 0xf;
6773 rd = (insn >> 12) & 0xf;
b0109805 6774 addr = load_reg(s, rn);
9ee6e8bb 6775 if (insn & (1 << 24))
b0109805 6776 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
6777 address_offset = 0;
6778 if (insn & (1 << 20)) {
6779 /* load */
6780 switch(sh) {
6781 case 1:
b0109805 6782 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
6783 break;
6784 case 2:
b0109805 6785 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
6786 break;
6787 default:
6788 case 3:
b0109805 6789 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
6790 break;
6791 }
6792 load = 1;
6793 } else if (sh & 2) {
6794 /* doubleword */
6795 if (sh & 1) {
6796 /* store */
b0109805
PB
6797 tmp = load_reg(s, rd);
6798 gen_st32(tmp, addr, IS_USER(s));
6799 tcg_gen_addi_i32(addr, addr, 4);
6800 tmp = load_reg(s, rd + 1);
6801 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6802 load = 0;
6803 } else {
6804 /* load */
b0109805
PB
6805 tmp = gen_ld32(addr, IS_USER(s));
6806 store_reg(s, rd, tmp);
6807 tcg_gen_addi_i32(addr, addr, 4);
6808 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
6809 rd++;
6810 load = 1;
6811 }
6812 address_offset = -4;
6813 } else {
6814 /* store */
b0109805
PB
6815 tmp = load_reg(s, rd);
6816 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6817 load = 0;
6818 }
6819 /* Perform base writeback before the loaded value to
6820 ensure correct behavior with overlapping index registers.
6821 ldrd with base writeback is is undefined if the
6822 destination and index registers overlap. */
6823 if (!(insn & (1 << 24))) {
b0109805
PB
6824 gen_add_datah_offset(s, insn, address_offset, addr);
6825 store_reg(s, rn, addr);
9ee6e8bb
PB
6826 } else if (insn & (1 << 21)) {
6827 if (address_offset)
b0109805
PB
6828 tcg_gen_addi_i32(addr, addr, address_offset);
6829 store_reg(s, rn, addr);
6830 } else {
6831 dead_tmp(addr);
9ee6e8bb
PB
6832 }
6833 if (load) {
6834 /* Complete the load. */
b0109805 6835 store_reg(s, rd, tmp);
9ee6e8bb
PB
6836 }
6837 }
6838 break;
6839 case 0x4:
6840 case 0x5:
6841 goto do_ldst;
6842 case 0x6:
6843 case 0x7:
6844 if (insn & (1 << 4)) {
6845 ARCH(6);
6846 /* Armv6 Media instructions. */
6847 rm = insn & 0xf;
6848 rn = (insn >> 16) & 0xf;
2c0262af 6849 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
6850 rs = (insn >> 8) & 0xf;
6851 switch ((insn >> 23) & 3) {
6852 case 0: /* Parallel add/subtract. */
6853 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
6854 tmp = load_reg(s, rn);
6855 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6856 sh = (insn >> 5) & 7;
6857 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6858 goto illegal_op;
6ddbc6e4
PB
6859 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6860 dead_tmp(tmp2);
6861 store_reg(s, rd, tmp);
9ee6e8bb
PB
6862 break;
6863 case 1:
6864 if ((insn & 0x00700020) == 0) {
6c95676b 6865 /* Halfword pack. */
3670669c
PB
6866 tmp = load_reg(s, rn);
6867 tmp2 = load_reg(s, rm);
9ee6e8bb 6868 shift = (insn >> 7) & 0x1f;
3670669c
PB
6869 if (insn & (1 << 6)) {
6870 /* pkhtb */
22478e79
AZ
6871 if (shift == 0)
6872 shift = 31;
6873 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 6874 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 6875 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
6876 } else {
6877 /* pkhbt */
22478e79
AZ
6878 if (shift)
6879 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 6880 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
6881 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6882 }
6883 tcg_gen_or_i32(tmp, tmp, tmp2);
22478e79 6884 dead_tmp(tmp2);
3670669c 6885 store_reg(s, rd, tmp);
9ee6e8bb
PB
6886 } else if ((insn & 0x00200020) == 0x00200000) {
6887 /* [us]sat */
6ddbc6e4 6888 tmp = load_reg(s, rm);
9ee6e8bb
PB
6889 shift = (insn >> 7) & 0x1f;
6890 if (insn & (1 << 6)) {
6891 if (shift == 0)
6892 shift = 31;
6ddbc6e4 6893 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 6894 } else {
6ddbc6e4 6895 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
6896 }
6897 sh = (insn >> 16) & 0x1f;
6898 if (sh != 0) {
b75263d6 6899 tmp2 = tcg_const_i32(sh);
9ee6e8bb 6900 if (insn & (1 << 22))
b75263d6 6901 gen_helper_usat(tmp, tmp, tmp2);
9ee6e8bb 6902 else
b75263d6
JR
6903 gen_helper_ssat(tmp, tmp, tmp2);
6904 tcg_temp_free_i32(tmp2);
9ee6e8bb 6905 }
6ddbc6e4 6906 store_reg(s, rd, tmp);
9ee6e8bb
PB
6907 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6908 /* [us]sat16 */
6ddbc6e4 6909 tmp = load_reg(s, rm);
9ee6e8bb
PB
6910 sh = (insn >> 16) & 0x1f;
6911 if (sh != 0) {
b75263d6 6912 tmp2 = tcg_const_i32(sh);
9ee6e8bb 6913 if (insn & (1 << 22))
b75263d6 6914 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 6915 else
b75263d6
JR
6916 gen_helper_ssat16(tmp, tmp, tmp2);
6917 tcg_temp_free_i32(tmp2);
9ee6e8bb 6918 }
6ddbc6e4 6919 store_reg(s, rd, tmp);
9ee6e8bb
PB
6920 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6921 /* Select bytes. */
6ddbc6e4
PB
6922 tmp = load_reg(s, rn);
6923 tmp2 = load_reg(s, rm);
6924 tmp3 = new_tmp();
6925 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6926 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6927 dead_tmp(tmp3);
6928 dead_tmp(tmp2);
6929 store_reg(s, rd, tmp);
9ee6e8bb 6930 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 6931 tmp = load_reg(s, rm);
9ee6e8bb
PB
6932 shift = (insn >> 10) & 3;
6933 /* ??? In many cases it's not neccessary to do a
6934 rotate, a shift is sufficient. */
6935 if (shift != 0)
f669df27 6936 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
6937 op1 = (insn >> 20) & 7;
6938 switch (op1) {
5e3f878a
PB
6939 case 0: gen_sxtb16(tmp); break;
6940 case 2: gen_sxtb(tmp); break;
6941 case 3: gen_sxth(tmp); break;
6942 case 4: gen_uxtb16(tmp); break;
6943 case 6: gen_uxtb(tmp); break;
6944 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
6945 default: goto illegal_op;
6946 }
6947 if (rn != 15) {
5e3f878a 6948 tmp2 = load_reg(s, rn);
9ee6e8bb 6949 if ((op1 & 3) == 0) {
5e3f878a 6950 gen_add16(tmp, tmp2);
9ee6e8bb 6951 } else {
5e3f878a
PB
6952 tcg_gen_add_i32(tmp, tmp, tmp2);
6953 dead_tmp(tmp2);
9ee6e8bb
PB
6954 }
6955 }
6c95676b 6956 store_reg(s, rd, tmp);
9ee6e8bb
PB
6957 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6958 /* rev */
b0109805 6959 tmp = load_reg(s, rm);
9ee6e8bb
PB
6960 if (insn & (1 << 22)) {
6961 if (insn & (1 << 7)) {
b0109805 6962 gen_revsh(tmp);
9ee6e8bb
PB
6963 } else {
6964 ARCH(6T2);
b0109805 6965 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
6966 }
6967 } else {
6968 if (insn & (1 << 7))
b0109805 6969 gen_rev16(tmp);
9ee6e8bb 6970 else
66896cb8 6971 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 6972 }
b0109805 6973 store_reg(s, rd, tmp);
9ee6e8bb
PB
6974 } else {
6975 goto illegal_op;
6976 }
6977 break;
6978 case 2: /* Multiplies (Type 3). */
5e3f878a
PB
6979 tmp = load_reg(s, rm);
6980 tmp2 = load_reg(s, rs);
9ee6e8bb 6981 if (insn & (1 << 20)) {
838fa72d
AJ
6982 /* Signed multiply most significant [accumulate].
6983 (SMMUL, SMMLA, SMMLS) */
a7812ae4 6984 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 6985
955a7dd5 6986 if (rd != 15) {
838fa72d 6987 tmp = load_reg(s, rd);
9ee6e8bb 6988 if (insn & (1 << 6)) {
838fa72d 6989 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 6990 } else {
838fa72d 6991 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
6992 }
6993 }
838fa72d
AJ
6994 if (insn & (1 << 5)) {
6995 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
6996 }
6997 tcg_gen_shri_i64(tmp64, tmp64, 32);
6998 tmp = new_tmp();
6999 tcg_gen_trunc_i64_i32(tmp, tmp64);
7000 tcg_temp_free_i64(tmp64);
955a7dd5 7001 store_reg(s, rn, tmp);
9ee6e8bb
PB
7002 } else {
7003 if (insn & (1 << 5))
5e3f878a
PB
7004 gen_swap_half(tmp2);
7005 gen_smul_dual(tmp, tmp2);
7006 /* This addition cannot overflow. */
7007 if (insn & (1 << 6)) {
7008 tcg_gen_sub_i32(tmp, tmp, tmp2);
7009 } else {
7010 tcg_gen_add_i32(tmp, tmp, tmp2);
7011 }
7012 dead_tmp(tmp2);
9ee6e8bb 7013 if (insn & (1 << 22)) {
5e3f878a 7014 /* smlald, smlsld */
a7812ae4
PB
7015 tmp64 = tcg_temp_new_i64();
7016 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 7017 dead_tmp(tmp);
a7812ae4
PB
7018 gen_addq(s, tmp64, rd, rn);
7019 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 7020 tcg_temp_free_i64(tmp64);
9ee6e8bb 7021 } else {
5e3f878a 7022 /* smuad, smusd, smlad, smlsd */
22478e79 7023 if (rd != 15)
9ee6e8bb 7024 {
22478e79 7025 tmp2 = load_reg(s, rd);
5e3f878a
PB
7026 gen_helper_add_setq(tmp, tmp, tmp2);
7027 dead_tmp(tmp2);
9ee6e8bb 7028 }
22478e79 7029 store_reg(s, rn, tmp);
9ee6e8bb
PB
7030 }
7031 }
7032 break;
7033 case 3:
7034 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7035 switch (op1) {
7036 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
7037 ARCH(6);
7038 tmp = load_reg(s, rm);
7039 tmp2 = load_reg(s, rs);
7040 gen_helper_usad8(tmp, tmp, tmp2);
7041 dead_tmp(tmp2);
ded9d295
AZ
7042 if (rd != 15) {
7043 tmp2 = load_reg(s, rd);
6ddbc6e4
PB
7044 tcg_gen_add_i32(tmp, tmp, tmp2);
7045 dead_tmp(tmp2);
9ee6e8bb 7046 }
ded9d295 7047 store_reg(s, rn, tmp);
9ee6e8bb
PB
7048 break;
7049 case 0x20: case 0x24: case 0x28: case 0x2c:
7050 /* Bitfield insert/clear. */
7051 ARCH(6T2);
7052 shift = (insn >> 7) & 0x1f;
7053 i = (insn >> 16) & 0x1f;
7054 i = i + 1 - shift;
7055 if (rm == 15) {
5e3f878a
PB
7056 tmp = new_tmp();
7057 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7058 } else {
5e3f878a 7059 tmp = load_reg(s, rm);
9ee6e8bb
PB
7060 }
7061 if (i != 32) {
5e3f878a 7062 tmp2 = load_reg(s, rd);
8f8e3aa4 7063 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
5e3f878a 7064 dead_tmp(tmp2);
9ee6e8bb 7065 }
5e3f878a 7066 store_reg(s, rd, tmp);
9ee6e8bb
PB
7067 break;
7068 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7069 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 7070 ARCH(6T2);
5e3f878a 7071 tmp = load_reg(s, rm);
9ee6e8bb
PB
7072 shift = (insn >> 7) & 0x1f;
7073 i = ((insn >> 16) & 0x1f) + 1;
7074 if (shift + i > 32)
7075 goto illegal_op;
7076 if (i < 32) {
7077 if (op1 & 0x20) {
5e3f878a 7078 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 7079 } else {
5e3f878a 7080 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
7081 }
7082 }
5e3f878a 7083 store_reg(s, rd, tmp);
9ee6e8bb
PB
7084 break;
7085 default:
7086 goto illegal_op;
7087 }
7088 break;
7089 }
7090 break;
7091 }
7092 do_ldst:
7093 /* Check for undefined extension instructions
7094 * per the ARM Bible IE:
7095 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7096 */
7097 sh = (0xf << 20) | (0xf << 4);
7098 if (op1 == 0x7 && ((insn & sh) == sh))
7099 {
7100 goto illegal_op;
7101 }
7102 /* load/store byte/word */
7103 rn = (insn >> 16) & 0xf;
7104 rd = (insn >> 12) & 0xf;
b0109805 7105 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
7106 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7107 if (insn & (1 << 24))
b0109805 7108 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
7109 if (insn & (1 << 20)) {
7110 /* load */
9ee6e8bb 7111 if (insn & (1 << 22)) {
b0109805 7112 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 7113 } else {
b0109805 7114 tmp = gen_ld32(tmp2, i);
9ee6e8bb 7115 }
9ee6e8bb
PB
7116 } else {
7117 /* store */
b0109805 7118 tmp = load_reg(s, rd);
9ee6e8bb 7119 if (insn & (1 << 22))
b0109805 7120 gen_st8(tmp, tmp2, i);
9ee6e8bb 7121 else
b0109805 7122 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
7123 }
7124 if (!(insn & (1 << 24))) {
b0109805
PB
7125 gen_add_data_offset(s, insn, tmp2);
7126 store_reg(s, rn, tmp2);
7127 } else if (insn & (1 << 21)) {
7128 store_reg(s, rn, tmp2);
7129 } else {
7130 dead_tmp(tmp2);
9ee6e8bb
PB
7131 }
7132 if (insn & (1 << 20)) {
7133 /* Complete the load. */
7134 if (rd == 15)
b0109805 7135 gen_bx(s, tmp);
9ee6e8bb 7136 else
b0109805 7137 store_reg(s, rd, tmp);
9ee6e8bb
PB
7138 }
7139 break;
7140 case 0x08:
7141 case 0x09:
7142 {
7143 int j, n, user, loaded_base;
b0109805 7144 TCGv loaded_var;
9ee6e8bb
PB
7145 /* load/store multiple words */
7146 /* XXX: store correct base if write back */
7147 user = 0;
7148 if (insn & (1 << 22)) {
7149 if (IS_USER(s))
7150 goto illegal_op; /* only usable in supervisor mode */
7151
7152 if ((insn & (1 << 15)) == 0)
7153 user = 1;
7154 }
7155 rn = (insn >> 16) & 0xf;
b0109805 7156 addr = load_reg(s, rn);
9ee6e8bb
PB
7157
7158 /* compute total size */
7159 loaded_base = 0;
a50f5b91 7160 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
7161 n = 0;
7162 for(i=0;i<16;i++) {
7163 if (insn & (1 << i))
7164 n++;
7165 }
7166 /* XXX: test invalid n == 0 case ? */
7167 if (insn & (1 << 23)) {
7168 if (insn & (1 << 24)) {
7169 /* pre increment */
b0109805 7170 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7171 } else {
7172 /* post increment */
7173 }
7174 } else {
7175 if (insn & (1 << 24)) {
7176 /* pre decrement */
b0109805 7177 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7178 } else {
7179 /* post decrement */
7180 if (n != 1)
b0109805 7181 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7182 }
7183 }
7184 j = 0;
7185 for(i=0;i<16;i++) {
7186 if (insn & (1 << i)) {
7187 if (insn & (1 << 20)) {
7188 /* load */
b0109805 7189 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 7190 if (i == 15) {
b0109805 7191 gen_bx(s, tmp);
9ee6e8bb 7192 } else if (user) {
b75263d6
JR
7193 tmp2 = tcg_const_i32(i);
7194 gen_helper_set_user_reg(tmp2, tmp);
7195 tcg_temp_free_i32(tmp2);
b0109805 7196 dead_tmp(tmp);
9ee6e8bb 7197 } else if (i == rn) {
b0109805 7198 loaded_var = tmp;
9ee6e8bb
PB
7199 loaded_base = 1;
7200 } else {
b0109805 7201 store_reg(s, i, tmp);
9ee6e8bb
PB
7202 }
7203 } else {
7204 /* store */
7205 if (i == 15) {
7206 /* special case: r15 = PC + 8 */
7207 val = (long)s->pc + 4;
b0109805
PB
7208 tmp = new_tmp();
7209 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 7210 } else if (user) {
b0109805 7211 tmp = new_tmp();
b75263d6
JR
7212 tmp2 = tcg_const_i32(i);
7213 gen_helper_get_user_reg(tmp, tmp2);
7214 tcg_temp_free_i32(tmp2);
9ee6e8bb 7215 } else {
b0109805 7216 tmp = load_reg(s, i);
9ee6e8bb 7217 }
b0109805 7218 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7219 }
7220 j++;
7221 /* no need to add after the last transfer */
7222 if (j != n)
b0109805 7223 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7224 }
7225 }
7226 if (insn & (1 << 21)) {
7227 /* write back */
7228 if (insn & (1 << 23)) {
7229 if (insn & (1 << 24)) {
7230 /* pre increment */
7231 } else {
7232 /* post increment */
b0109805 7233 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7234 }
7235 } else {
7236 if (insn & (1 << 24)) {
7237 /* pre decrement */
7238 if (n != 1)
b0109805 7239 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7240 } else {
7241 /* post decrement */
b0109805 7242 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7243 }
7244 }
b0109805
PB
7245 store_reg(s, rn, addr);
7246 } else {
7247 dead_tmp(addr);
9ee6e8bb
PB
7248 }
7249 if (loaded_base) {
b0109805 7250 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
7251 }
7252 if ((insn & (1 << 22)) && !user) {
7253 /* Restore CPSR from SPSR. */
d9ba4830
PB
7254 tmp = load_cpu_field(spsr);
7255 gen_set_cpsr(tmp, 0xffffffff);
7256 dead_tmp(tmp);
9ee6e8bb
PB
7257 s->is_jmp = DISAS_UPDATE;
7258 }
7259 }
7260 break;
7261 case 0xa:
7262 case 0xb:
7263 {
7264 int32_t offset;
7265
7266 /* branch (and link) */
7267 val = (int32_t)s->pc;
7268 if (insn & (1 << 24)) {
5e3f878a
PB
7269 tmp = new_tmp();
7270 tcg_gen_movi_i32(tmp, val);
7271 store_reg(s, 14, tmp);
9ee6e8bb
PB
7272 }
7273 offset = (((int32_t)insn << 8) >> 8);
7274 val += (offset << 2) + 4;
7275 gen_jmp(s, val);
7276 }
7277 break;
7278 case 0xc:
7279 case 0xd:
7280 case 0xe:
7281 /* Coprocessor. */
7282 if (disas_coproc_insn(env, s, insn))
7283 goto illegal_op;
7284 break;
7285 case 0xf:
7286 /* swi */
5e3f878a 7287 gen_set_pc_im(s->pc);
9ee6e8bb
PB
7288 s->is_jmp = DISAS_SWI;
7289 break;
7290 default:
7291 illegal_op:
7292 gen_set_condexec(s);
5e3f878a 7293 gen_set_pc_im(s->pc - 4);
d9ba4830 7294 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
7295 s->is_jmp = DISAS_JUMP;
7296 break;
7297 }
7298 }
7299}
7300
7301/* Return true if this is a Thumb-2 logical op. */
7302static int
7303thumb2_logic_op(int op)
7304{
7305 return (op < 8);
7306}
7307
7308/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7309 then set condition code flags based on the result of the operation.
7310 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7311 to the high bit of T1.
7312 Returns zero if the opcode is valid. */
7313
7314static int
396e467c 7315gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
9ee6e8bb
PB
7316{
7317 int logic_cc;
7318
7319 logic_cc = 0;
7320 switch (op) {
7321 case 0: /* and */
396e467c 7322 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
7323 logic_cc = conds;
7324 break;
7325 case 1: /* bic */
f669df27 7326 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
7327 logic_cc = conds;
7328 break;
7329 case 2: /* orr */
396e467c 7330 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7331 logic_cc = conds;
7332 break;
7333 case 3: /* orn */
396e467c
FN
7334 tcg_gen_not_i32(t1, t1);
7335 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7336 logic_cc = conds;
7337 break;
7338 case 4: /* eor */
396e467c 7339 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
7340 logic_cc = conds;
7341 break;
7342 case 8: /* add */
7343 if (conds)
396e467c 7344 gen_helper_add_cc(t0, t0, t1);
9ee6e8bb 7345 else
396e467c 7346 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
7347 break;
7348 case 10: /* adc */
7349 if (conds)
396e467c 7350 gen_helper_adc_cc(t0, t0, t1);
9ee6e8bb 7351 else
396e467c 7352 gen_adc(t0, t1);
9ee6e8bb
PB
7353 break;
7354 case 11: /* sbc */
7355 if (conds)
396e467c 7356 gen_helper_sbc_cc(t0, t0, t1);
9ee6e8bb 7357 else
396e467c 7358 gen_sub_carry(t0, t0, t1);
9ee6e8bb
PB
7359 break;
7360 case 13: /* sub */
7361 if (conds)
396e467c 7362 gen_helper_sub_cc(t0, t0, t1);
9ee6e8bb 7363 else
396e467c 7364 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
7365 break;
7366 case 14: /* rsb */
7367 if (conds)
396e467c 7368 gen_helper_sub_cc(t0, t1, t0);
9ee6e8bb 7369 else
396e467c 7370 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
7371 break;
7372 default: /* 5, 6, 7, 9, 12, 15. */
7373 return 1;
7374 }
7375 if (logic_cc) {
396e467c 7376 gen_logic_CC(t0);
9ee6e8bb 7377 if (shifter_out)
396e467c 7378 gen_set_CF_bit31(t1);
9ee6e8bb
PB
7379 }
7380 return 0;
7381}
7382
7383/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7384 is not legal. */
7385static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7386{
b0109805 7387 uint32_t insn, imm, shift, offset;
9ee6e8bb 7388 uint32_t rd, rn, rm, rs;
b26eefb6 7389 TCGv tmp;
6ddbc6e4
PB
7390 TCGv tmp2;
7391 TCGv tmp3;
b0109805 7392 TCGv addr;
a7812ae4 7393 TCGv_i64 tmp64;
9ee6e8bb
PB
7394 int op;
7395 int shiftop;
7396 int conds;
7397 int logic_cc;
7398
7399 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7400 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 7401 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
7402 16-bit instructions to get correct prefetch abort behavior. */
7403 insn = insn_hw1;
7404 if ((insn & (1 << 12)) == 0) {
7405 /* Second half of blx. */
7406 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
7407 tmp = load_reg(s, 14);
7408 tcg_gen_addi_i32(tmp, tmp, offset);
7409 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 7410
d9ba4830 7411 tmp2 = new_tmp();
b0109805 7412 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7413 store_reg(s, 14, tmp2);
7414 gen_bx(s, tmp);
9ee6e8bb
PB
7415 return 0;
7416 }
7417 if (insn & (1 << 11)) {
7418 /* Second half of bl. */
7419 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 7420 tmp = load_reg(s, 14);
6a0d8a1d 7421 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 7422
d9ba4830 7423 tmp2 = new_tmp();
b0109805 7424 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7425 store_reg(s, 14, tmp2);
7426 gen_bx(s, tmp);
9ee6e8bb
PB
7427 return 0;
7428 }
7429 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7430 /* Instruction spans a page boundary. Implement it as two
7431 16-bit instructions in case the second half causes an
7432 prefetch abort. */
7433 offset = ((int32_t)insn << 21) >> 9;
396e467c 7434 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
7435 return 0;
7436 }
7437 /* Fall through to 32-bit decode. */
7438 }
7439
7440 insn = lduw_code(s->pc);
7441 s->pc += 2;
7442 insn |= (uint32_t)insn_hw1 << 16;
7443
7444 if ((insn & 0xf800e800) != 0xf000e800) {
7445 ARCH(6T2);
7446 }
7447
7448 rn = (insn >> 16) & 0xf;
7449 rs = (insn >> 12) & 0xf;
7450 rd = (insn >> 8) & 0xf;
7451 rm = insn & 0xf;
7452 switch ((insn >> 25) & 0xf) {
7453 case 0: case 1: case 2: case 3:
7454 /* 16-bit instructions. Should never happen. */
7455 abort();
7456 case 4:
7457 if (insn & (1 << 22)) {
7458 /* Other load/store, table branch. */
7459 if (insn & 0x01200000) {
7460 /* Load/store doubleword. */
7461 if (rn == 15) {
b0109805
PB
7462 addr = new_tmp();
7463 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 7464 } else {
b0109805 7465 addr = load_reg(s, rn);
9ee6e8bb
PB
7466 }
7467 offset = (insn & 0xff) * 4;
7468 if ((insn & (1 << 23)) == 0)
7469 offset = -offset;
7470 if (insn & (1 << 24)) {
b0109805 7471 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
7472 offset = 0;
7473 }
7474 if (insn & (1 << 20)) {
7475 /* ldrd */
b0109805
PB
7476 tmp = gen_ld32(addr, IS_USER(s));
7477 store_reg(s, rs, tmp);
7478 tcg_gen_addi_i32(addr, addr, 4);
7479 tmp = gen_ld32(addr, IS_USER(s));
7480 store_reg(s, rd, tmp);
9ee6e8bb
PB
7481 } else {
7482 /* strd */
b0109805
PB
7483 tmp = load_reg(s, rs);
7484 gen_st32(tmp, addr, IS_USER(s));
7485 tcg_gen_addi_i32(addr, addr, 4);
7486 tmp = load_reg(s, rd);
7487 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7488 }
7489 if (insn & (1 << 21)) {
7490 /* Base writeback. */
7491 if (rn == 15)
7492 goto illegal_op;
b0109805
PB
7493 tcg_gen_addi_i32(addr, addr, offset - 4);
7494 store_reg(s, rn, addr);
7495 } else {
7496 dead_tmp(addr);
9ee6e8bb
PB
7497 }
7498 } else if ((insn & (1 << 23)) == 0) {
7499 /* Load/store exclusive word. */
3174f8e9 7500 addr = tcg_temp_local_new();
98a46317 7501 load_reg_var(s, addr, rn);
426f5abc 7502 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 7503 if (insn & (1 << 20)) {
426f5abc 7504 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 7505 } else {
426f5abc 7506 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 7507 }
3174f8e9 7508 tcg_temp_free(addr);
9ee6e8bb
PB
7509 } else if ((insn & (1 << 6)) == 0) {
7510 /* Table Branch. */
7511 if (rn == 15) {
b0109805
PB
7512 addr = new_tmp();
7513 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 7514 } else {
b0109805 7515 addr = load_reg(s, rn);
9ee6e8bb 7516 }
b26eefb6 7517 tmp = load_reg(s, rm);
b0109805 7518 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
7519 if (insn & (1 << 4)) {
7520 /* tbh */
b0109805 7521 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 7522 dead_tmp(tmp);
b0109805 7523 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 7524 } else { /* tbb */
b26eefb6 7525 dead_tmp(tmp);
b0109805 7526 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7527 }
b0109805
PB
7528 dead_tmp(addr);
7529 tcg_gen_shli_i32(tmp, tmp, 1);
7530 tcg_gen_addi_i32(tmp, tmp, s->pc);
7531 store_reg(s, 15, tmp);
9ee6e8bb
PB
7532 } else {
7533 /* Load/store exclusive byte/halfword/doubleword. */
426f5abc 7534 ARCH(7);
9ee6e8bb 7535 op = (insn >> 4) & 0x3;
426f5abc
PB
7536 if (op == 2) {
7537 goto illegal_op;
7538 }
3174f8e9 7539 addr = tcg_temp_local_new();
98a46317 7540 load_reg_var(s, addr, rn);
9ee6e8bb 7541 if (insn & (1 << 20)) {
426f5abc 7542 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 7543 } else {
426f5abc 7544 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 7545 }
3174f8e9 7546 tcg_temp_free(addr);
9ee6e8bb
PB
7547 }
7548 } else {
7549 /* Load/store multiple, RFE, SRS. */
7550 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7551 /* Not available in user mode. */
b0109805 7552 if (IS_USER(s))
9ee6e8bb
PB
7553 goto illegal_op;
7554 if (insn & (1 << 20)) {
7555 /* rfe */
b0109805
PB
7556 addr = load_reg(s, rn);
7557 if ((insn & (1 << 24)) == 0)
7558 tcg_gen_addi_i32(addr, addr, -8);
7559 /* Load PC into tmp and CPSR into tmp2. */
7560 tmp = gen_ld32(addr, 0);
7561 tcg_gen_addi_i32(addr, addr, 4);
7562 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
7563 if (insn & (1 << 21)) {
7564 /* Base writeback. */
b0109805
PB
7565 if (insn & (1 << 24)) {
7566 tcg_gen_addi_i32(addr, addr, 4);
7567 } else {
7568 tcg_gen_addi_i32(addr, addr, -4);
7569 }
7570 store_reg(s, rn, addr);
7571 } else {
7572 dead_tmp(addr);
9ee6e8bb 7573 }
b0109805 7574 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
7575 } else {
7576 /* srs */
7577 op = (insn & 0x1f);
7578 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7579 addr = load_reg(s, 13);
9ee6e8bb 7580 } else {
b0109805 7581 addr = new_tmp();
b75263d6
JR
7582 tmp = tcg_const_i32(op);
7583 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7584 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7585 }
7586 if ((insn & (1 << 24)) == 0) {
b0109805 7587 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 7588 }
b0109805
PB
7589 tmp = load_reg(s, 14);
7590 gen_st32(tmp, addr, 0);
7591 tcg_gen_addi_i32(addr, addr, 4);
7592 tmp = new_tmp();
7593 gen_helper_cpsr_read(tmp);
7594 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
7595 if (insn & (1 << 21)) {
7596 if ((insn & (1 << 24)) == 0) {
b0109805 7597 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 7598 } else {
b0109805 7599 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7600 }
7601 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7602 store_reg(s, 13, addr);
9ee6e8bb 7603 } else {
b75263d6
JR
7604 tmp = tcg_const_i32(op);
7605 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7606 tcg_temp_free_i32(tmp);
9ee6e8bb 7607 }
b0109805
PB
7608 } else {
7609 dead_tmp(addr);
9ee6e8bb
PB
7610 }
7611 }
7612 } else {
7613 int i;
7614 /* Load/store multiple. */
b0109805 7615 addr = load_reg(s, rn);
9ee6e8bb
PB
7616 offset = 0;
7617 for (i = 0; i < 16; i++) {
7618 if (insn & (1 << i))
7619 offset += 4;
7620 }
7621 if (insn & (1 << 24)) {
b0109805 7622 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7623 }
7624
7625 for (i = 0; i < 16; i++) {
7626 if ((insn & (1 << i)) == 0)
7627 continue;
7628 if (insn & (1 << 20)) {
7629 /* Load. */
b0109805 7630 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 7631 if (i == 15) {
b0109805 7632 gen_bx(s, tmp);
9ee6e8bb 7633 } else {
b0109805 7634 store_reg(s, i, tmp);
9ee6e8bb
PB
7635 }
7636 } else {
7637 /* Store. */
b0109805
PB
7638 tmp = load_reg(s, i);
7639 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7640 }
b0109805 7641 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7642 }
7643 if (insn & (1 << 21)) {
7644 /* Base register writeback. */
7645 if (insn & (1 << 24)) {
b0109805 7646 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7647 }
7648 /* Fault if writeback register is in register list. */
7649 if (insn & (1 << rn))
7650 goto illegal_op;
b0109805
PB
7651 store_reg(s, rn, addr);
7652 } else {
7653 dead_tmp(addr);
9ee6e8bb
PB
7654 }
7655 }
7656 }
7657 break;
2af9ab77
JB
7658 case 5:
7659
9ee6e8bb 7660 op = (insn >> 21) & 0xf;
2af9ab77
JB
7661 if (op == 6) {
7662 /* Halfword pack. */
7663 tmp = load_reg(s, rn);
7664 tmp2 = load_reg(s, rm);
7665 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
7666 if (insn & (1 << 5)) {
7667 /* pkhtb */
7668 if (shift == 0)
7669 shift = 31;
7670 tcg_gen_sari_i32(tmp2, tmp2, shift);
7671 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
7672 tcg_gen_ext16u_i32(tmp2, tmp2);
7673 } else {
7674 /* pkhbt */
7675 if (shift)
7676 tcg_gen_shli_i32(tmp2, tmp2, shift);
7677 tcg_gen_ext16u_i32(tmp, tmp);
7678 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7679 }
7680 tcg_gen_or_i32(tmp, tmp, tmp2);
7681 dead_tmp(tmp2);
3174f8e9
FN
7682 store_reg(s, rd, tmp);
7683 } else {
2af9ab77
JB
7684 /* Data processing register constant shift. */
7685 if (rn == 15) {
7686 tmp = new_tmp();
7687 tcg_gen_movi_i32(tmp, 0);
7688 } else {
7689 tmp = load_reg(s, rn);
7690 }
7691 tmp2 = load_reg(s, rm);
7692
7693 shiftop = (insn >> 4) & 3;
7694 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7695 conds = (insn & (1 << 20)) != 0;
7696 logic_cc = (conds && thumb2_logic_op(op));
7697 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7698 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
7699 goto illegal_op;
7700 dead_tmp(tmp2);
7701 if (rd != 15) {
7702 store_reg(s, rd, tmp);
7703 } else {
7704 dead_tmp(tmp);
7705 }
3174f8e9 7706 }
9ee6e8bb
PB
7707 break;
7708 case 13: /* Misc data processing. */
7709 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7710 if (op < 4 && (insn & 0xf000) != 0xf000)
7711 goto illegal_op;
7712 switch (op) {
7713 case 0: /* Register controlled shift. */
8984bd2e
PB
7714 tmp = load_reg(s, rn);
7715 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7716 if ((insn & 0x70) != 0)
7717 goto illegal_op;
7718 op = (insn >> 21) & 3;
8984bd2e
PB
7719 logic_cc = (insn & (1 << 20)) != 0;
7720 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7721 if (logic_cc)
7722 gen_logic_CC(tmp);
21aeb343 7723 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7724 break;
7725 case 1: /* Sign/zero extend. */
5e3f878a 7726 tmp = load_reg(s, rm);
9ee6e8bb
PB
7727 shift = (insn >> 4) & 3;
7728 /* ??? In many cases it's not neccessary to do a
7729 rotate, a shift is sufficient. */
7730 if (shift != 0)
f669df27 7731 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7732 op = (insn >> 20) & 7;
7733 switch (op) {
5e3f878a
PB
7734 case 0: gen_sxth(tmp); break;
7735 case 1: gen_uxth(tmp); break;
7736 case 2: gen_sxtb16(tmp); break;
7737 case 3: gen_uxtb16(tmp); break;
7738 case 4: gen_sxtb(tmp); break;
7739 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
7740 default: goto illegal_op;
7741 }
7742 if (rn != 15) {
5e3f878a 7743 tmp2 = load_reg(s, rn);
9ee6e8bb 7744 if ((op >> 1) == 1) {
5e3f878a 7745 gen_add16(tmp, tmp2);
9ee6e8bb 7746 } else {
5e3f878a
PB
7747 tcg_gen_add_i32(tmp, tmp, tmp2);
7748 dead_tmp(tmp2);
9ee6e8bb
PB
7749 }
7750 }
5e3f878a 7751 store_reg(s, rd, tmp);
9ee6e8bb
PB
7752 break;
7753 case 2: /* SIMD add/subtract. */
7754 op = (insn >> 20) & 7;
7755 shift = (insn >> 4) & 7;
7756 if ((op & 3) == 3 || (shift & 3) == 3)
7757 goto illegal_op;
6ddbc6e4
PB
7758 tmp = load_reg(s, rn);
7759 tmp2 = load_reg(s, rm);
7760 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7761 dead_tmp(tmp2);
7762 store_reg(s, rd, tmp);
9ee6e8bb
PB
7763 break;
7764 case 3: /* Other data processing. */
7765 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7766 if (op < 4) {
7767 /* Saturating add/subtract. */
d9ba4830
PB
7768 tmp = load_reg(s, rn);
7769 tmp2 = load_reg(s, rm);
9ee6e8bb 7770 if (op & 1)
4809c612
JB
7771 gen_helper_double_saturate(tmp, tmp);
7772 if (op & 2)
d9ba4830 7773 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 7774 else
d9ba4830
PB
7775 gen_helper_add_saturate(tmp, tmp, tmp2);
7776 dead_tmp(tmp2);
9ee6e8bb 7777 } else {
d9ba4830 7778 tmp = load_reg(s, rn);
9ee6e8bb
PB
7779 switch (op) {
7780 case 0x0a: /* rbit */
d9ba4830 7781 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7782 break;
7783 case 0x08: /* rev */
66896cb8 7784 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
7785 break;
7786 case 0x09: /* rev16 */
d9ba4830 7787 gen_rev16(tmp);
9ee6e8bb
PB
7788 break;
7789 case 0x0b: /* revsh */
d9ba4830 7790 gen_revsh(tmp);
9ee6e8bb
PB
7791 break;
7792 case 0x10: /* sel */
d9ba4830 7793 tmp2 = load_reg(s, rm);
6ddbc6e4
PB
7794 tmp3 = new_tmp();
7795 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
d9ba4830 7796 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6ddbc6e4 7797 dead_tmp(tmp3);
d9ba4830 7798 dead_tmp(tmp2);
9ee6e8bb
PB
7799 break;
7800 case 0x18: /* clz */
d9ba4830 7801 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
7802 break;
7803 default:
7804 goto illegal_op;
7805 }
7806 }
d9ba4830 7807 store_reg(s, rd, tmp);
9ee6e8bb
PB
7808 break;
7809 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7810 op = (insn >> 4) & 0xf;
d9ba4830
PB
7811 tmp = load_reg(s, rn);
7812 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7813 switch ((insn >> 20) & 7) {
7814 case 0: /* 32 x 32 -> 32 */
d9ba4830
PB
7815 tcg_gen_mul_i32(tmp, tmp, tmp2);
7816 dead_tmp(tmp2);
9ee6e8bb 7817 if (rs != 15) {
d9ba4830 7818 tmp2 = load_reg(s, rs);
9ee6e8bb 7819 if (op)
d9ba4830 7820 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 7821 else
d9ba4830
PB
7822 tcg_gen_add_i32(tmp, tmp, tmp2);
7823 dead_tmp(tmp2);
9ee6e8bb 7824 }
9ee6e8bb
PB
7825 break;
7826 case 1: /* 16 x 16 -> 32 */
d9ba4830
PB
7827 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7828 dead_tmp(tmp2);
9ee6e8bb 7829 if (rs != 15) {
d9ba4830
PB
7830 tmp2 = load_reg(s, rs);
7831 gen_helper_add_setq(tmp, tmp, tmp2);
7832 dead_tmp(tmp2);
9ee6e8bb 7833 }
9ee6e8bb
PB
7834 break;
7835 case 2: /* Dual multiply add. */
7836 case 4: /* Dual multiply subtract. */
7837 if (op)
d9ba4830
PB
7838 gen_swap_half(tmp2);
7839 gen_smul_dual(tmp, tmp2);
9ee6e8bb
PB
7840 /* This addition cannot overflow. */
7841 if (insn & (1 << 22)) {
d9ba4830 7842 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 7843 } else {
d9ba4830 7844 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 7845 }
d9ba4830 7846 dead_tmp(tmp2);
9ee6e8bb
PB
7847 if (rs != 15)
7848 {
d9ba4830
PB
7849 tmp2 = load_reg(s, rs);
7850 gen_helper_add_setq(tmp, tmp, tmp2);
7851 dead_tmp(tmp2);
9ee6e8bb 7852 }
9ee6e8bb
PB
7853 break;
7854 case 3: /* 32 * 16 -> 32msb */
7855 if (op)
d9ba4830 7856 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7857 else
d9ba4830 7858 gen_sxth(tmp2);
a7812ae4
PB
7859 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7860 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 7861 tmp = new_tmp();
a7812ae4 7862 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 7863 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7864 if (rs != 15)
7865 {
d9ba4830
PB
7866 tmp2 = load_reg(s, rs);
7867 gen_helper_add_setq(tmp, tmp, tmp2);
7868 dead_tmp(tmp2);
9ee6e8bb 7869 }
9ee6e8bb 7870 break;
838fa72d
AJ
7871 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
7872 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 7873 if (rs != 15) {
838fa72d
AJ
7874 tmp = load_reg(s, rs);
7875 if (insn & (1 << 20)) {
7876 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 7877 } else {
838fa72d 7878 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 7879 }
2c0262af 7880 }
838fa72d
AJ
7881 if (insn & (1 << 4)) {
7882 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7883 }
7884 tcg_gen_shri_i64(tmp64, tmp64, 32);
7885 tmp = new_tmp();
7886 tcg_gen_trunc_i64_i32(tmp, tmp64);
7887 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7888 break;
7889 case 7: /* Unsigned sum of absolute differences. */
d9ba4830
PB
7890 gen_helper_usad8(tmp, tmp, tmp2);
7891 dead_tmp(tmp2);
9ee6e8bb 7892 if (rs != 15) {
d9ba4830
PB
7893 tmp2 = load_reg(s, rs);
7894 tcg_gen_add_i32(tmp, tmp, tmp2);
7895 dead_tmp(tmp2);
5fd46862 7896 }
9ee6e8bb 7897 break;
2c0262af 7898 }
d9ba4830 7899 store_reg(s, rd, tmp);
2c0262af 7900 break;
9ee6e8bb
PB
7901 case 6: case 7: /* 64-bit multiply, Divide. */
7902 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
7903 tmp = load_reg(s, rn);
7904 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7905 if ((op & 0x50) == 0x10) {
7906 /* sdiv, udiv */
7907 if (!arm_feature(env, ARM_FEATURE_DIV))
7908 goto illegal_op;
7909 if (op & 0x20)
5e3f878a 7910 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 7911 else
5e3f878a
PB
7912 gen_helper_sdiv(tmp, tmp, tmp2);
7913 dead_tmp(tmp2);
7914 store_reg(s, rd, tmp);
9ee6e8bb
PB
7915 } else if ((op & 0xe) == 0xc) {
7916 /* Dual multiply accumulate long. */
7917 if (op & 1)
5e3f878a
PB
7918 gen_swap_half(tmp2);
7919 gen_smul_dual(tmp, tmp2);
9ee6e8bb 7920 if (op & 0x10) {
5e3f878a 7921 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 7922 } else {
5e3f878a 7923 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 7924 }
5e3f878a 7925 dead_tmp(tmp2);
a7812ae4
PB
7926 /* BUGFIX */
7927 tmp64 = tcg_temp_new_i64();
7928 tcg_gen_ext_i32_i64(tmp64, tmp);
7929 dead_tmp(tmp);
7930 gen_addq(s, tmp64, rs, rd);
7931 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 7932 tcg_temp_free_i64(tmp64);
2c0262af 7933 } else {
9ee6e8bb
PB
7934 if (op & 0x20) {
7935 /* Unsigned 64-bit multiply */
a7812ae4 7936 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 7937 } else {
9ee6e8bb
PB
7938 if (op & 8) {
7939 /* smlalxy */
5e3f878a
PB
7940 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7941 dead_tmp(tmp2);
a7812ae4
PB
7942 tmp64 = tcg_temp_new_i64();
7943 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 7944 dead_tmp(tmp);
9ee6e8bb
PB
7945 } else {
7946 /* Signed 64-bit multiply */
a7812ae4 7947 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 7948 }
b5ff1b31 7949 }
9ee6e8bb
PB
7950 if (op & 4) {
7951 /* umaal */
a7812ae4
PB
7952 gen_addq_lo(s, tmp64, rs);
7953 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
7954 } else if (op & 0x40) {
7955 /* 64-bit accumulate. */
a7812ae4 7956 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 7957 }
a7812ae4 7958 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 7959 tcg_temp_free_i64(tmp64);
5fd46862 7960 }
2c0262af 7961 break;
9ee6e8bb
PB
7962 }
7963 break;
7964 case 6: case 7: case 14: case 15:
7965 /* Coprocessor. */
7966 if (((insn >> 24) & 3) == 3) {
7967 /* Translate into the equivalent ARM encoding. */
7968 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
7969 if (disas_neon_data_insn(env, s, insn))
7970 goto illegal_op;
7971 } else {
7972 if (insn & (1 << 28))
7973 goto illegal_op;
7974 if (disas_coproc_insn (env, s, insn))
7975 goto illegal_op;
7976 }
7977 break;
7978 case 8: case 9: case 10: case 11:
7979 if (insn & (1 << 15)) {
7980 /* Branches, misc control. */
7981 if (insn & 0x5000) {
7982 /* Unconditional branch. */
7983 /* signextend(hw1[10:0]) -> offset[:12]. */
7984 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7985 /* hw1[10:0] -> offset[11:1]. */
7986 offset |= (insn & 0x7ff) << 1;
7987 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7988 offset[24:22] already have the same value because of the
7989 sign extension above. */
7990 offset ^= ((~insn) & (1 << 13)) << 10;
7991 offset ^= ((~insn) & (1 << 11)) << 11;
7992
9ee6e8bb
PB
7993 if (insn & (1 << 14)) {
7994 /* Branch and link. */
3174f8e9 7995 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 7996 }
3b46e624 7997
b0109805 7998 offset += s->pc;
9ee6e8bb
PB
7999 if (insn & (1 << 12)) {
8000 /* b/bl */
b0109805 8001 gen_jmp(s, offset);
9ee6e8bb
PB
8002 } else {
8003 /* blx */
b0109805
PB
8004 offset &= ~(uint32_t)2;
8005 gen_bx_im(s, offset);
2c0262af 8006 }
9ee6e8bb
PB
8007 } else if (((insn >> 23) & 7) == 7) {
8008 /* Misc control */
8009 if (insn & (1 << 13))
8010 goto illegal_op;
8011
8012 if (insn & (1 << 26)) {
8013 /* Secure monitor call (v6Z) */
8014 goto illegal_op; /* not implemented. */
2c0262af 8015 } else {
9ee6e8bb
PB
8016 op = (insn >> 20) & 7;
8017 switch (op) {
8018 case 0: /* msr cpsr. */
8019 if (IS_M(env)) {
8984bd2e
PB
8020 tmp = load_reg(s, rn);
8021 addr = tcg_const_i32(insn & 0xff);
8022 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6
JR
8023 tcg_temp_free_i32(addr);
8024 dead_tmp(tmp);
9ee6e8bb
PB
8025 gen_lookup_tb(s);
8026 break;
8027 }
8028 /* fall through */
8029 case 1: /* msr spsr. */
8030 if (IS_M(env))
8031 goto illegal_op;
2fbac54b
FN
8032 tmp = load_reg(s, rn);
8033 if (gen_set_psr(s,
9ee6e8bb 8034 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 8035 op == 1, tmp))
9ee6e8bb
PB
8036 goto illegal_op;
8037 break;
8038 case 2: /* cps, nop-hint. */
8039 if (((insn >> 8) & 7) == 0) {
8040 gen_nop_hint(s, insn & 0xff);
8041 }
8042 /* Implemented as NOP in user mode. */
8043 if (IS_USER(s))
8044 break;
8045 offset = 0;
8046 imm = 0;
8047 if (insn & (1 << 10)) {
8048 if (insn & (1 << 7))
8049 offset |= CPSR_A;
8050 if (insn & (1 << 6))
8051 offset |= CPSR_I;
8052 if (insn & (1 << 5))
8053 offset |= CPSR_F;
8054 if (insn & (1 << 9))
8055 imm = CPSR_A | CPSR_I | CPSR_F;
8056 }
8057 if (insn & (1 << 8)) {
8058 offset |= 0x1f;
8059 imm |= (insn & 0x1f);
8060 }
8061 if (offset) {
2fbac54b 8062 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
8063 }
8064 break;
8065 case 3: /* Special control operations. */
426f5abc 8066 ARCH(7);
9ee6e8bb
PB
8067 op = (insn >> 4) & 0xf;
8068 switch (op) {
8069 case 2: /* clrex */
426f5abc 8070 gen_clrex(s);
9ee6e8bb
PB
8071 break;
8072 case 4: /* dsb */
8073 case 5: /* dmb */
8074 case 6: /* isb */
8075 /* These execute as NOPs. */
9ee6e8bb
PB
8076 break;
8077 default:
8078 goto illegal_op;
8079 }
8080 break;
8081 case 4: /* bxj */
8082 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8083 tmp = load_reg(s, rn);
8084 gen_bx(s, tmp);
9ee6e8bb
PB
8085 break;
8086 case 5: /* Exception return. */
b8b45b68
RV
8087 if (IS_USER(s)) {
8088 goto illegal_op;
8089 }
8090 if (rn != 14 || rd != 15) {
8091 goto illegal_op;
8092 }
8093 tmp = load_reg(s, rn);
8094 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8095 gen_exception_return(s, tmp);
8096 break;
9ee6e8bb 8097 case 6: /* mrs cpsr. */
8984bd2e 8098 tmp = new_tmp();
9ee6e8bb 8099 if (IS_M(env)) {
8984bd2e
PB
8100 addr = tcg_const_i32(insn & 0xff);
8101 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 8102 tcg_temp_free_i32(addr);
9ee6e8bb 8103 } else {
8984bd2e 8104 gen_helper_cpsr_read(tmp);
9ee6e8bb 8105 }
8984bd2e 8106 store_reg(s, rd, tmp);
9ee6e8bb
PB
8107 break;
8108 case 7: /* mrs spsr. */
8109 /* Not accessible in user mode. */
8110 if (IS_USER(s) || IS_M(env))
8111 goto illegal_op;
d9ba4830
PB
8112 tmp = load_cpu_field(spsr);
8113 store_reg(s, rd, tmp);
9ee6e8bb 8114 break;
2c0262af
FB
8115 }
8116 }
9ee6e8bb
PB
8117 } else {
8118 /* Conditional branch. */
8119 op = (insn >> 22) & 0xf;
8120 /* Generate a conditional jump to next instruction. */
8121 s->condlabel = gen_new_label();
d9ba4830 8122 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
8123 s->condjmp = 1;
8124
8125 /* offset[11:1] = insn[10:0] */
8126 offset = (insn & 0x7ff) << 1;
8127 /* offset[17:12] = insn[21:16]. */
8128 offset |= (insn & 0x003f0000) >> 4;
8129 /* offset[31:20] = insn[26]. */
8130 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8131 /* offset[18] = insn[13]. */
8132 offset |= (insn & (1 << 13)) << 5;
8133 /* offset[19] = insn[11]. */
8134 offset |= (insn & (1 << 11)) << 8;
8135
8136 /* jump to the offset */
b0109805 8137 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
8138 }
8139 } else {
8140 /* Data processing immediate. */
8141 if (insn & (1 << 25)) {
8142 if (insn & (1 << 24)) {
8143 if (insn & (1 << 20))
8144 goto illegal_op;
8145 /* Bitfield/Saturate. */
8146 op = (insn >> 21) & 7;
8147 imm = insn & 0x1f;
8148 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4
PB
8149 if (rn == 15) {
8150 tmp = new_tmp();
8151 tcg_gen_movi_i32(tmp, 0);
8152 } else {
8153 tmp = load_reg(s, rn);
8154 }
9ee6e8bb
PB
8155 switch (op) {
8156 case 2: /* Signed bitfield extract. */
8157 imm++;
8158 if (shift + imm > 32)
8159 goto illegal_op;
8160 if (imm < 32)
6ddbc6e4 8161 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
8162 break;
8163 case 6: /* Unsigned bitfield extract. */
8164 imm++;
8165 if (shift + imm > 32)
8166 goto illegal_op;
8167 if (imm < 32)
6ddbc6e4 8168 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
8169 break;
8170 case 3: /* Bitfield insert/clear. */
8171 if (imm < shift)
8172 goto illegal_op;
8173 imm = imm + 1 - shift;
8174 if (imm != 32) {
6ddbc6e4 8175 tmp2 = load_reg(s, rd);
8f8e3aa4 8176 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
6ddbc6e4 8177 dead_tmp(tmp2);
9ee6e8bb
PB
8178 }
8179 break;
8180 case 7:
8181 goto illegal_op;
8182 default: /* Saturate. */
9ee6e8bb
PB
8183 if (shift) {
8184 if (op & 1)
6ddbc6e4 8185 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8186 else
6ddbc6e4 8187 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 8188 }
6ddbc6e4 8189 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
8190 if (op & 4) {
8191 /* Unsigned. */
9ee6e8bb 8192 if ((op & 1) && shift == 0)
6ddbc6e4 8193 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 8194 else
6ddbc6e4 8195 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 8196 } else {
9ee6e8bb 8197 /* Signed. */
9ee6e8bb 8198 if ((op & 1) && shift == 0)
6ddbc6e4 8199 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 8200 else
6ddbc6e4 8201 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 8202 }
b75263d6 8203 tcg_temp_free_i32(tmp2);
9ee6e8bb 8204 break;
2c0262af 8205 }
6ddbc6e4 8206 store_reg(s, rd, tmp);
9ee6e8bb
PB
8207 } else {
8208 imm = ((insn & 0x04000000) >> 15)
8209 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8210 if (insn & (1 << 22)) {
8211 /* 16-bit immediate. */
8212 imm |= (insn >> 4) & 0xf000;
8213 if (insn & (1 << 23)) {
8214 /* movt */
5e3f878a 8215 tmp = load_reg(s, rd);
86831435 8216 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8217 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 8218 } else {
9ee6e8bb 8219 /* movw */
5e3f878a
PB
8220 tmp = new_tmp();
8221 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
8222 }
8223 } else {
9ee6e8bb
PB
8224 /* Add/sub 12-bit immediate. */
8225 if (rn == 15) {
b0109805 8226 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 8227 if (insn & (1 << 23))
b0109805 8228 offset -= imm;
9ee6e8bb 8229 else
b0109805 8230 offset += imm;
5e3f878a
PB
8231 tmp = new_tmp();
8232 tcg_gen_movi_i32(tmp, offset);
2c0262af 8233 } else {
5e3f878a 8234 tmp = load_reg(s, rn);
9ee6e8bb 8235 if (insn & (1 << 23))
5e3f878a 8236 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 8237 else
5e3f878a 8238 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 8239 }
9ee6e8bb 8240 }
5e3f878a 8241 store_reg(s, rd, tmp);
191abaa2 8242 }
9ee6e8bb
PB
8243 } else {
8244 int shifter_out = 0;
8245 /* modified 12-bit immediate. */
8246 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8247 imm = (insn & 0xff);
8248 switch (shift) {
8249 case 0: /* XY */
8250 /* Nothing to do. */
8251 break;
8252 case 1: /* 00XY00XY */
8253 imm |= imm << 16;
8254 break;
8255 case 2: /* XY00XY00 */
8256 imm |= imm << 16;
8257 imm <<= 8;
8258 break;
8259 case 3: /* XYXYXYXY */
8260 imm |= imm << 16;
8261 imm |= imm << 8;
8262 break;
8263 default: /* Rotated constant. */
8264 shift = (shift << 1) | (imm >> 7);
8265 imm |= 0x80;
8266 imm = imm << (32 - shift);
8267 shifter_out = 1;
8268 break;
b5ff1b31 8269 }
3174f8e9
FN
8270 tmp2 = new_tmp();
8271 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 8272 rn = (insn >> 16) & 0xf;
3174f8e9
FN
8273 if (rn == 15) {
8274 tmp = new_tmp();
8275 tcg_gen_movi_i32(tmp, 0);
8276 } else {
8277 tmp = load_reg(s, rn);
8278 }
9ee6e8bb
PB
8279 op = (insn >> 21) & 0xf;
8280 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 8281 shifter_out, tmp, tmp2))
9ee6e8bb 8282 goto illegal_op;
3174f8e9 8283 dead_tmp(tmp2);
9ee6e8bb
PB
8284 rd = (insn >> 8) & 0xf;
8285 if (rd != 15) {
3174f8e9
FN
8286 store_reg(s, rd, tmp);
8287 } else {
8288 dead_tmp(tmp);
2c0262af 8289 }
2c0262af 8290 }
9ee6e8bb
PB
8291 }
8292 break;
8293 case 12: /* Load/store single data item. */
8294 {
8295 int postinc = 0;
8296 int writeback = 0;
b0109805 8297 int user;
9ee6e8bb
PB
8298 if ((insn & 0x01100000) == 0x01000000) {
8299 if (disas_neon_ls_insn(env, s, insn))
c1713132 8300 goto illegal_op;
9ee6e8bb
PB
8301 break;
8302 }
b0109805 8303 user = IS_USER(s);
9ee6e8bb 8304 if (rn == 15) {
b0109805 8305 addr = new_tmp();
9ee6e8bb
PB
8306 /* PC relative. */
8307 /* s->pc has already been incremented by 4. */
8308 imm = s->pc & 0xfffffffc;
8309 if (insn & (1 << 23))
8310 imm += insn & 0xfff;
8311 else
8312 imm -= insn & 0xfff;
b0109805 8313 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 8314 } else {
b0109805 8315 addr = load_reg(s, rn);
9ee6e8bb
PB
8316 if (insn & (1 << 23)) {
8317 /* Positive offset. */
8318 imm = insn & 0xfff;
b0109805 8319 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8320 } else {
8321 op = (insn >> 8) & 7;
8322 imm = insn & 0xff;
8323 switch (op) {
8324 case 0: case 8: /* Shifted Register. */
8325 shift = (insn >> 4) & 0xf;
8326 if (shift > 3)
18c9b560 8327 goto illegal_op;
b26eefb6 8328 tmp = load_reg(s, rm);
9ee6e8bb 8329 if (shift)
b26eefb6 8330 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 8331 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 8332 dead_tmp(tmp);
9ee6e8bb
PB
8333 break;
8334 case 4: /* Negative offset. */
b0109805 8335 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb
PB
8336 break;
8337 case 6: /* User privilege. */
b0109805
PB
8338 tcg_gen_addi_i32(addr, addr, imm);
8339 user = 1;
9ee6e8bb
PB
8340 break;
8341 case 1: /* Post-decrement. */
8342 imm = -imm;
8343 /* Fall through. */
8344 case 3: /* Post-increment. */
9ee6e8bb
PB
8345 postinc = 1;
8346 writeback = 1;
8347 break;
8348 case 5: /* Pre-decrement. */
8349 imm = -imm;
8350 /* Fall through. */
8351 case 7: /* Pre-increment. */
b0109805 8352 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8353 writeback = 1;
8354 break;
8355 default:
b7bcbe95 8356 goto illegal_op;
9ee6e8bb
PB
8357 }
8358 }
8359 }
8360 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8361 if (insn & (1 << 20)) {
8362 /* Load. */
8363 if (rs == 15 && op != 2) {
8364 if (op & 2)
b5ff1b31 8365 goto illegal_op;
9ee6e8bb
PB
8366 /* Memory hint. Implemented as NOP. */
8367 } else {
8368 switch (op) {
b0109805
PB
8369 case 0: tmp = gen_ld8u(addr, user); break;
8370 case 4: tmp = gen_ld8s(addr, user); break;
8371 case 1: tmp = gen_ld16u(addr, user); break;
8372 case 5: tmp = gen_ld16s(addr, user); break;
8373 case 2: tmp = gen_ld32(addr, user); break;
9ee6e8bb
PB
8374 default: goto illegal_op;
8375 }
8376 if (rs == 15) {
b0109805 8377 gen_bx(s, tmp);
9ee6e8bb 8378 } else {
b0109805 8379 store_reg(s, rs, tmp);
9ee6e8bb
PB
8380 }
8381 }
8382 } else {
8383 /* Store. */
8384 if (rs == 15)
b7bcbe95 8385 goto illegal_op;
b0109805 8386 tmp = load_reg(s, rs);
9ee6e8bb 8387 switch (op) {
b0109805
PB
8388 case 0: gen_st8(tmp, addr, user); break;
8389 case 1: gen_st16(tmp, addr, user); break;
8390 case 2: gen_st32(tmp, addr, user); break;
9ee6e8bb 8391 default: goto illegal_op;
b7bcbe95 8392 }
2c0262af 8393 }
9ee6e8bb 8394 if (postinc)
b0109805
PB
8395 tcg_gen_addi_i32(addr, addr, imm);
8396 if (writeback) {
8397 store_reg(s, rn, addr);
8398 } else {
8399 dead_tmp(addr);
8400 }
9ee6e8bb
PB
8401 }
8402 break;
8403 default:
8404 goto illegal_op;
2c0262af 8405 }
9ee6e8bb
PB
8406 return 0;
8407illegal_op:
8408 return 1;
2c0262af
FB
8409}
8410
9ee6e8bb 8411static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
8412{
8413 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8414 int32_t offset;
8415 int i;
b26eefb6 8416 TCGv tmp;
d9ba4830 8417 TCGv tmp2;
b0109805 8418 TCGv addr;
99c475ab 8419
9ee6e8bb
PB
8420 if (s->condexec_mask) {
8421 cond = s->condexec_cond;
bedd2912
JB
8422 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
8423 s->condlabel = gen_new_label();
8424 gen_test_cc(cond ^ 1, s->condlabel);
8425 s->condjmp = 1;
8426 }
9ee6e8bb
PB
8427 }
8428
b5ff1b31 8429 insn = lduw_code(s->pc);
99c475ab 8430 s->pc += 2;
b5ff1b31 8431
99c475ab
FB
8432 switch (insn >> 12) {
8433 case 0: case 1:
396e467c 8434
99c475ab
FB
8435 rd = insn & 7;
8436 op = (insn >> 11) & 3;
8437 if (op == 3) {
8438 /* add/subtract */
8439 rn = (insn >> 3) & 7;
396e467c 8440 tmp = load_reg(s, rn);
99c475ab
FB
8441 if (insn & (1 << 10)) {
8442 /* immediate */
396e467c
FN
8443 tmp2 = new_tmp();
8444 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
8445 } else {
8446 /* reg */
8447 rm = (insn >> 6) & 7;
396e467c 8448 tmp2 = load_reg(s, rm);
99c475ab 8449 }
9ee6e8bb
PB
8450 if (insn & (1 << 9)) {
8451 if (s->condexec_mask)
396e467c 8452 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8453 else
396e467c 8454 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb
PB
8455 } else {
8456 if (s->condexec_mask)
396e467c 8457 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 8458 else
396e467c 8459 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 8460 }
396e467c
FN
8461 dead_tmp(tmp2);
8462 store_reg(s, rd, tmp);
99c475ab
FB
8463 } else {
8464 /* shift immediate */
8465 rm = (insn >> 3) & 7;
8466 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
8467 tmp = load_reg(s, rm);
8468 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8469 if (!s->condexec_mask)
8470 gen_logic_CC(tmp);
8471 store_reg(s, rd, tmp);
99c475ab
FB
8472 }
8473 break;
8474 case 2: case 3:
8475 /* arithmetic large immediate */
8476 op = (insn >> 11) & 3;
8477 rd = (insn >> 8) & 0x7;
396e467c
FN
8478 if (op == 0) { /* mov */
8479 tmp = new_tmp();
8480 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 8481 if (!s->condexec_mask)
396e467c
FN
8482 gen_logic_CC(tmp);
8483 store_reg(s, rd, tmp);
8484 } else {
8485 tmp = load_reg(s, rd);
8486 tmp2 = new_tmp();
8487 tcg_gen_movi_i32(tmp2, insn & 0xff);
8488 switch (op) {
8489 case 1: /* cmp */
8490 gen_helper_sub_cc(tmp, tmp, tmp2);
8491 dead_tmp(tmp);
8492 dead_tmp(tmp2);
8493 break;
8494 case 2: /* add */
8495 if (s->condexec_mask)
8496 tcg_gen_add_i32(tmp, tmp, tmp2);
8497 else
8498 gen_helper_add_cc(tmp, tmp, tmp2);
8499 dead_tmp(tmp2);
8500 store_reg(s, rd, tmp);
8501 break;
8502 case 3: /* sub */
8503 if (s->condexec_mask)
8504 tcg_gen_sub_i32(tmp, tmp, tmp2);
8505 else
8506 gen_helper_sub_cc(tmp, tmp, tmp2);
8507 dead_tmp(tmp2);
8508 store_reg(s, rd, tmp);
8509 break;
8510 }
99c475ab 8511 }
99c475ab
FB
8512 break;
8513 case 4:
8514 if (insn & (1 << 11)) {
8515 rd = (insn >> 8) & 7;
5899f386
FB
8516 /* load pc-relative. Bit 1 of PC is ignored. */
8517 val = s->pc + 2 + ((insn & 0xff) * 4);
8518 val &= ~(uint32_t)2;
b0109805
PB
8519 addr = new_tmp();
8520 tcg_gen_movi_i32(addr, val);
8521 tmp = gen_ld32(addr, IS_USER(s));
8522 dead_tmp(addr);
8523 store_reg(s, rd, tmp);
99c475ab
FB
8524 break;
8525 }
8526 if (insn & (1 << 10)) {
8527 /* data processing extended or blx */
8528 rd = (insn & 7) | ((insn >> 4) & 8);
8529 rm = (insn >> 3) & 0xf;
8530 op = (insn >> 8) & 3;
8531 switch (op) {
8532 case 0: /* add */
396e467c
FN
8533 tmp = load_reg(s, rd);
8534 tmp2 = load_reg(s, rm);
8535 tcg_gen_add_i32(tmp, tmp, tmp2);
8536 dead_tmp(tmp2);
8537 store_reg(s, rd, tmp);
99c475ab
FB
8538 break;
8539 case 1: /* cmp */
396e467c
FN
8540 tmp = load_reg(s, rd);
8541 tmp2 = load_reg(s, rm);
8542 gen_helper_sub_cc(tmp, tmp, tmp2);
8543 dead_tmp(tmp2);
8544 dead_tmp(tmp);
99c475ab
FB
8545 break;
8546 case 2: /* mov/cpy */
396e467c
FN
8547 tmp = load_reg(s, rm);
8548 store_reg(s, rd, tmp);
99c475ab
FB
8549 break;
8550 case 3:/* branch [and link] exchange thumb register */
b0109805 8551 tmp = load_reg(s, rm);
99c475ab
FB
8552 if (insn & (1 << 7)) {
8553 val = (uint32_t)s->pc | 1;
b0109805
PB
8554 tmp2 = new_tmp();
8555 tcg_gen_movi_i32(tmp2, val);
8556 store_reg(s, 14, tmp2);
99c475ab 8557 }
d9ba4830 8558 gen_bx(s, tmp);
99c475ab
FB
8559 break;
8560 }
8561 break;
8562 }
8563
8564 /* data processing register */
8565 rd = insn & 7;
8566 rm = (insn >> 3) & 7;
8567 op = (insn >> 6) & 0xf;
8568 if (op == 2 || op == 3 || op == 4 || op == 7) {
8569 /* the shift/rotate ops want the operands backwards */
8570 val = rm;
8571 rm = rd;
8572 rd = val;
8573 val = 1;
8574 } else {
8575 val = 0;
8576 }
8577
396e467c
FN
8578 if (op == 9) { /* neg */
8579 tmp = new_tmp();
8580 tcg_gen_movi_i32(tmp, 0);
8581 } else if (op != 0xf) { /* mvn doesn't read its first operand */
8582 tmp = load_reg(s, rd);
8583 } else {
8584 TCGV_UNUSED(tmp);
8585 }
99c475ab 8586
396e467c 8587 tmp2 = load_reg(s, rm);
5899f386 8588 switch (op) {
99c475ab 8589 case 0x0: /* and */
396e467c 8590 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 8591 if (!s->condexec_mask)
396e467c 8592 gen_logic_CC(tmp);
99c475ab
FB
8593 break;
8594 case 0x1: /* eor */
396e467c 8595 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 8596 if (!s->condexec_mask)
396e467c 8597 gen_logic_CC(tmp);
99c475ab
FB
8598 break;
8599 case 0x2: /* lsl */
9ee6e8bb 8600 if (s->condexec_mask) {
396e467c 8601 gen_helper_shl(tmp2, tmp2, tmp);
9ee6e8bb 8602 } else {
396e467c
FN
8603 gen_helper_shl_cc(tmp2, tmp2, tmp);
8604 gen_logic_CC(tmp2);
9ee6e8bb 8605 }
99c475ab
FB
8606 break;
8607 case 0x3: /* lsr */
9ee6e8bb 8608 if (s->condexec_mask) {
396e467c 8609 gen_helper_shr(tmp2, tmp2, tmp);
9ee6e8bb 8610 } else {
396e467c
FN
8611 gen_helper_shr_cc(tmp2, tmp2, tmp);
8612 gen_logic_CC(tmp2);
9ee6e8bb 8613 }
99c475ab
FB
8614 break;
8615 case 0x4: /* asr */
9ee6e8bb 8616 if (s->condexec_mask) {
396e467c 8617 gen_helper_sar(tmp2, tmp2, tmp);
9ee6e8bb 8618 } else {
396e467c
FN
8619 gen_helper_sar_cc(tmp2, tmp2, tmp);
8620 gen_logic_CC(tmp2);
9ee6e8bb 8621 }
99c475ab
FB
8622 break;
8623 case 0x5: /* adc */
9ee6e8bb 8624 if (s->condexec_mask)
396e467c 8625 gen_adc(tmp, tmp2);
9ee6e8bb 8626 else
396e467c 8627 gen_helper_adc_cc(tmp, tmp, tmp2);
99c475ab
FB
8628 break;
8629 case 0x6: /* sbc */
9ee6e8bb 8630 if (s->condexec_mask)
396e467c 8631 gen_sub_carry(tmp, tmp, tmp2);
9ee6e8bb 8632 else
396e467c 8633 gen_helper_sbc_cc(tmp, tmp, tmp2);
99c475ab
FB
8634 break;
8635 case 0x7: /* ror */
9ee6e8bb 8636 if (s->condexec_mask) {
f669df27
AJ
8637 tcg_gen_andi_i32(tmp, tmp, 0x1f);
8638 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 8639 } else {
396e467c
FN
8640 gen_helper_ror_cc(tmp2, tmp2, tmp);
8641 gen_logic_CC(tmp2);
9ee6e8bb 8642 }
99c475ab
FB
8643 break;
8644 case 0x8: /* tst */
396e467c
FN
8645 tcg_gen_and_i32(tmp, tmp, tmp2);
8646 gen_logic_CC(tmp);
99c475ab 8647 rd = 16;
5899f386 8648 break;
99c475ab 8649 case 0x9: /* neg */
9ee6e8bb 8650 if (s->condexec_mask)
396e467c 8651 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 8652 else
396e467c 8653 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
8654 break;
8655 case 0xa: /* cmp */
396e467c 8656 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
8657 rd = 16;
8658 break;
8659 case 0xb: /* cmn */
396e467c 8660 gen_helper_add_cc(tmp, tmp, tmp2);
99c475ab
FB
8661 rd = 16;
8662 break;
8663 case 0xc: /* orr */
396e467c 8664 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 8665 if (!s->condexec_mask)
396e467c 8666 gen_logic_CC(tmp);
99c475ab
FB
8667 break;
8668 case 0xd: /* mul */
7b2919a0 8669 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 8670 if (!s->condexec_mask)
396e467c 8671 gen_logic_CC(tmp);
99c475ab
FB
8672 break;
8673 case 0xe: /* bic */
f669df27 8674 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 8675 if (!s->condexec_mask)
396e467c 8676 gen_logic_CC(tmp);
99c475ab
FB
8677 break;
8678 case 0xf: /* mvn */
396e467c 8679 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 8680 if (!s->condexec_mask)
396e467c 8681 gen_logic_CC(tmp2);
99c475ab 8682 val = 1;
5899f386 8683 rm = rd;
99c475ab
FB
8684 break;
8685 }
8686 if (rd != 16) {
396e467c
FN
8687 if (val) {
8688 store_reg(s, rm, tmp2);
8689 if (op != 0xf)
8690 dead_tmp(tmp);
8691 } else {
8692 store_reg(s, rd, tmp);
8693 dead_tmp(tmp2);
8694 }
8695 } else {
8696 dead_tmp(tmp);
8697 dead_tmp(tmp2);
99c475ab
FB
8698 }
8699 break;
8700
8701 case 5:
8702 /* load/store register offset. */
8703 rd = insn & 7;
8704 rn = (insn >> 3) & 7;
8705 rm = (insn >> 6) & 7;
8706 op = (insn >> 9) & 7;
b0109805 8707 addr = load_reg(s, rn);
b26eefb6 8708 tmp = load_reg(s, rm);
b0109805 8709 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 8710 dead_tmp(tmp);
99c475ab
FB
8711
8712 if (op < 3) /* store */
b0109805 8713 tmp = load_reg(s, rd);
99c475ab
FB
8714
8715 switch (op) {
8716 case 0: /* str */
b0109805 8717 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
8718 break;
8719 case 1: /* strh */
b0109805 8720 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
8721 break;
8722 case 2: /* strb */
b0109805 8723 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
8724 break;
8725 case 3: /* ldrsb */
b0109805 8726 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
8727 break;
8728 case 4: /* ldr */
b0109805 8729 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8730 break;
8731 case 5: /* ldrh */
b0109805 8732 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
8733 break;
8734 case 6: /* ldrb */
b0109805 8735 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
8736 break;
8737 case 7: /* ldrsh */
b0109805 8738 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
8739 break;
8740 }
8741 if (op >= 3) /* load */
b0109805
PB
8742 store_reg(s, rd, tmp);
8743 dead_tmp(addr);
99c475ab
FB
8744 break;
8745
8746 case 6:
8747 /* load/store word immediate offset */
8748 rd = insn & 7;
8749 rn = (insn >> 3) & 7;
b0109805 8750 addr = load_reg(s, rn);
99c475ab 8751 val = (insn >> 4) & 0x7c;
b0109805 8752 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8753
8754 if (insn & (1 << 11)) {
8755 /* load */
b0109805
PB
8756 tmp = gen_ld32(addr, IS_USER(s));
8757 store_reg(s, rd, tmp);
99c475ab
FB
8758 } else {
8759 /* store */
b0109805
PB
8760 tmp = load_reg(s, rd);
8761 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8762 }
b0109805 8763 dead_tmp(addr);
99c475ab
FB
8764 break;
8765
8766 case 7:
8767 /* load/store byte immediate offset */
8768 rd = insn & 7;
8769 rn = (insn >> 3) & 7;
b0109805 8770 addr = load_reg(s, rn);
99c475ab 8771 val = (insn >> 6) & 0x1f;
b0109805 8772 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8773
8774 if (insn & (1 << 11)) {
8775 /* load */
b0109805
PB
8776 tmp = gen_ld8u(addr, IS_USER(s));
8777 store_reg(s, rd, tmp);
99c475ab
FB
8778 } else {
8779 /* store */
b0109805
PB
8780 tmp = load_reg(s, rd);
8781 gen_st8(tmp, addr, IS_USER(s));
99c475ab 8782 }
b0109805 8783 dead_tmp(addr);
99c475ab
FB
8784 break;
8785
8786 case 8:
8787 /* load/store halfword immediate offset */
8788 rd = insn & 7;
8789 rn = (insn >> 3) & 7;
b0109805 8790 addr = load_reg(s, rn);
99c475ab 8791 val = (insn >> 5) & 0x3e;
b0109805 8792 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8793
8794 if (insn & (1 << 11)) {
8795 /* load */
b0109805
PB
8796 tmp = gen_ld16u(addr, IS_USER(s));
8797 store_reg(s, rd, tmp);
99c475ab
FB
8798 } else {
8799 /* store */
b0109805
PB
8800 tmp = load_reg(s, rd);
8801 gen_st16(tmp, addr, IS_USER(s));
99c475ab 8802 }
b0109805 8803 dead_tmp(addr);
99c475ab
FB
8804 break;
8805
8806 case 9:
8807 /* load/store from stack */
8808 rd = (insn >> 8) & 7;
b0109805 8809 addr = load_reg(s, 13);
99c475ab 8810 val = (insn & 0xff) * 4;
b0109805 8811 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8812
8813 if (insn & (1 << 11)) {
8814 /* load */
b0109805
PB
8815 tmp = gen_ld32(addr, IS_USER(s));
8816 store_reg(s, rd, tmp);
99c475ab
FB
8817 } else {
8818 /* store */
b0109805
PB
8819 tmp = load_reg(s, rd);
8820 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8821 }
b0109805 8822 dead_tmp(addr);
99c475ab
FB
8823 break;
8824
8825 case 10:
8826 /* add to high reg */
8827 rd = (insn >> 8) & 7;
5899f386
FB
8828 if (insn & (1 << 11)) {
8829 /* SP */
5e3f878a 8830 tmp = load_reg(s, 13);
5899f386
FB
8831 } else {
8832 /* PC. bit 1 is ignored. */
5e3f878a
PB
8833 tmp = new_tmp();
8834 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 8835 }
99c475ab 8836 val = (insn & 0xff) * 4;
5e3f878a
PB
8837 tcg_gen_addi_i32(tmp, tmp, val);
8838 store_reg(s, rd, tmp);
99c475ab
FB
8839 break;
8840
8841 case 11:
8842 /* misc */
8843 op = (insn >> 8) & 0xf;
8844 switch (op) {
8845 case 0:
8846 /* adjust stack pointer */
b26eefb6 8847 tmp = load_reg(s, 13);
99c475ab
FB
8848 val = (insn & 0x7f) * 4;
8849 if (insn & (1 << 7))
6a0d8a1d 8850 val = -(int32_t)val;
b26eefb6
PB
8851 tcg_gen_addi_i32(tmp, tmp, val);
8852 store_reg(s, 13, tmp);
99c475ab
FB
8853 break;
8854
9ee6e8bb
PB
8855 case 2: /* sign/zero extend. */
8856 ARCH(6);
8857 rd = insn & 7;
8858 rm = (insn >> 3) & 7;
b0109805 8859 tmp = load_reg(s, rm);
9ee6e8bb 8860 switch ((insn >> 6) & 3) {
b0109805
PB
8861 case 0: gen_sxth(tmp); break;
8862 case 1: gen_sxtb(tmp); break;
8863 case 2: gen_uxth(tmp); break;
8864 case 3: gen_uxtb(tmp); break;
9ee6e8bb 8865 }
b0109805 8866 store_reg(s, rd, tmp);
9ee6e8bb 8867 break;
99c475ab
FB
8868 case 4: case 5: case 0xc: case 0xd:
8869 /* push/pop */
b0109805 8870 addr = load_reg(s, 13);
5899f386
FB
8871 if (insn & (1 << 8))
8872 offset = 4;
99c475ab 8873 else
5899f386
FB
8874 offset = 0;
8875 for (i = 0; i < 8; i++) {
8876 if (insn & (1 << i))
8877 offset += 4;
8878 }
8879 if ((insn & (1 << 11)) == 0) {
b0109805 8880 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8881 }
99c475ab
FB
8882 for (i = 0; i < 8; i++) {
8883 if (insn & (1 << i)) {
8884 if (insn & (1 << 11)) {
8885 /* pop */
b0109805
PB
8886 tmp = gen_ld32(addr, IS_USER(s));
8887 store_reg(s, i, tmp);
99c475ab
FB
8888 } else {
8889 /* push */
b0109805
PB
8890 tmp = load_reg(s, i);
8891 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8892 }
5899f386 8893 /* advance to the next address. */
b0109805 8894 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8895 }
8896 }
a50f5b91 8897 TCGV_UNUSED(tmp);
99c475ab
FB
8898 if (insn & (1 << 8)) {
8899 if (insn & (1 << 11)) {
8900 /* pop pc */
b0109805 8901 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8902 /* don't set the pc until the rest of the instruction
8903 has completed */
8904 } else {
8905 /* push lr */
b0109805
PB
8906 tmp = load_reg(s, 14);
8907 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8908 }
b0109805 8909 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 8910 }
5899f386 8911 if ((insn & (1 << 11)) == 0) {
b0109805 8912 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8913 }
99c475ab 8914 /* write back the new stack pointer */
b0109805 8915 store_reg(s, 13, addr);
99c475ab
FB
8916 /* set the new PC value */
8917 if ((insn & 0x0900) == 0x0900)
b0109805 8918 gen_bx(s, tmp);
99c475ab
FB
8919 break;
8920
9ee6e8bb
PB
8921 case 1: case 3: case 9: case 11: /* czb */
8922 rm = insn & 7;
d9ba4830 8923 tmp = load_reg(s, rm);
9ee6e8bb
PB
8924 s->condlabel = gen_new_label();
8925 s->condjmp = 1;
8926 if (insn & (1 << 11))
cb63669a 8927 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 8928 else
cb63669a 8929 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
d9ba4830 8930 dead_tmp(tmp);
9ee6e8bb
PB
8931 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8932 val = (uint32_t)s->pc + 2;
8933 val += offset;
8934 gen_jmp(s, val);
8935 break;
8936
8937 case 15: /* IT, nop-hint. */
8938 if ((insn & 0xf) == 0) {
8939 gen_nop_hint(s, (insn >> 4) & 0xf);
8940 break;
8941 }
8942 /* If Then. */
8943 s->condexec_cond = (insn >> 4) & 0xe;
8944 s->condexec_mask = insn & 0x1f;
8945 /* No actual code generated for this insn, just setup state. */
8946 break;
8947
06c949e6 8948 case 0xe: /* bkpt */
9ee6e8bb 8949 gen_set_condexec(s);
5e3f878a 8950 gen_set_pc_im(s->pc - 2);
d9ba4830 8951 gen_exception(EXCP_BKPT);
06c949e6
PB
8952 s->is_jmp = DISAS_JUMP;
8953 break;
8954
9ee6e8bb
PB
8955 case 0xa: /* rev */
8956 ARCH(6);
8957 rn = (insn >> 3) & 0x7;
8958 rd = insn & 0x7;
b0109805 8959 tmp = load_reg(s, rn);
9ee6e8bb 8960 switch ((insn >> 6) & 3) {
66896cb8 8961 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
8962 case 1: gen_rev16(tmp); break;
8963 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
8964 default: goto illegal_op;
8965 }
b0109805 8966 store_reg(s, rd, tmp);
9ee6e8bb
PB
8967 break;
8968
8969 case 6: /* cps */
8970 ARCH(6);
8971 if (IS_USER(s))
8972 break;
8973 if (IS_M(env)) {
8984bd2e 8974 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9ee6e8bb 8975 /* PRIMASK */
8984bd2e
PB
8976 if (insn & 1) {
8977 addr = tcg_const_i32(16);
8978 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8979 tcg_temp_free_i32(addr);
8984bd2e 8980 }
9ee6e8bb 8981 /* FAULTMASK */
8984bd2e
PB
8982 if (insn & 2) {
8983 addr = tcg_const_i32(17);
8984 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8985 tcg_temp_free_i32(addr);
8984bd2e 8986 }
b75263d6 8987 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8988 gen_lookup_tb(s);
8989 } else {
8990 if (insn & (1 << 4))
8991 shift = CPSR_A | CPSR_I | CPSR_F;
8992 else
8993 shift = 0;
fa26df03 8994 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9ee6e8bb
PB
8995 }
8996 break;
8997
99c475ab
FB
8998 default:
8999 goto undef;
9000 }
9001 break;
9002
9003 case 12:
9004 /* load/store multiple */
9005 rn = (insn >> 8) & 0x7;
b0109805 9006 addr = load_reg(s, rn);
99c475ab
FB
9007 for (i = 0; i < 8; i++) {
9008 if (insn & (1 << i)) {
99c475ab
FB
9009 if (insn & (1 << 11)) {
9010 /* load */
b0109805
PB
9011 tmp = gen_ld32(addr, IS_USER(s));
9012 store_reg(s, i, tmp);
99c475ab
FB
9013 } else {
9014 /* store */
b0109805
PB
9015 tmp = load_reg(s, i);
9016 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9017 }
5899f386 9018 /* advance to the next address */
b0109805 9019 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9020 }
9021 }
5899f386 9022 /* Base register writeback. */
b0109805
PB
9023 if ((insn & (1 << rn)) == 0) {
9024 store_reg(s, rn, addr);
9025 } else {
9026 dead_tmp(addr);
9027 }
99c475ab
FB
9028 break;
9029
9030 case 13:
9031 /* conditional branch or swi */
9032 cond = (insn >> 8) & 0xf;
9033 if (cond == 0xe)
9034 goto undef;
9035
9036 if (cond == 0xf) {
9037 /* swi */
9ee6e8bb 9038 gen_set_condexec(s);
422ebf69 9039 gen_set_pc_im(s->pc);
9ee6e8bb 9040 s->is_jmp = DISAS_SWI;
99c475ab
FB
9041 break;
9042 }
9043 /* generate a conditional jump to next instruction */
e50e6a20 9044 s->condlabel = gen_new_label();
d9ba4830 9045 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 9046 s->condjmp = 1;
99c475ab
FB
9047
9048 /* jump to the offset */
5899f386 9049 val = (uint32_t)s->pc + 2;
99c475ab 9050 offset = ((int32_t)insn << 24) >> 24;
5899f386 9051 val += offset << 1;
8aaca4c0 9052 gen_jmp(s, val);
99c475ab
FB
9053 break;
9054
9055 case 14:
358bf29e 9056 if (insn & (1 << 11)) {
9ee6e8bb
PB
9057 if (disas_thumb2_insn(env, s, insn))
9058 goto undef32;
358bf29e
PB
9059 break;
9060 }
9ee6e8bb 9061 /* unconditional branch */
99c475ab
FB
9062 val = (uint32_t)s->pc;
9063 offset = ((int32_t)insn << 21) >> 21;
9064 val += (offset << 1) + 2;
8aaca4c0 9065 gen_jmp(s, val);
99c475ab
FB
9066 break;
9067
9068 case 15:
9ee6e8bb 9069 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 9070 goto undef32;
9ee6e8bb 9071 break;
99c475ab
FB
9072 }
9073 return;
9ee6e8bb
PB
9074undef32:
9075 gen_set_condexec(s);
5e3f878a 9076 gen_set_pc_im(s->pc - 4);
d9ba4830 9077 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
9078 s->is_jmp = DISAS_JUMP;
9079 return;
9080illegal_op:
99c475ab 9081undef:
9ee6e8bb 9082 gen_set_condexec(s);
5e3f878a 9083 gen_set_pc_im(s->pc - 2);
d9ba4830 9084 gen_exception(EXCP_UDEF);
99c475ab
FB
9085 s->is_jmp = DISAS_JUMP;
9086}
9087
2c0262af
FB
9088/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9089 basic block 'tb'. If search_pc is TRUE, also generate PC
9090 information for each intermediate instruction. */
2cfc5f17
TS
9091static inline void gen_intermediate_code_internal(CPUState *env,
9092 TranslationBlock *tb,
9093 int search_pc)
2c0262af
FB
9094{
9095 DisasContext dc1, *dc = &dc1;
a1d1bb31 9096 CPUBreakpoint *bp;
2c0262af
FB
9097 uint16_t *gen_opc_end;
9098 int j, lj;
0fa85d43 9099 target_ulong pc_start;
b5ff1b31 9100 uint32_t next_page_start;
2e70f6ef
PB
9101 int num_insns;
9102 int max_insns;
3b46e624 9103
2c0262af 9104 /* generate intermediate code */
b26eefb6 9105 num_temps = 0;
b26eefb6 9106
0fa85d43 9107 pc_start = tb->pc;
3b46e624 9108
2c0262af
FB
9109 dc->tb = tb;
9110
2c0262af 9111 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
9112
9113 dc->is_jmp = DISAS_NEXT;
9114 dc->pc = pc_start;
8aaca4c0 9115 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 9116 dc->condjmp = 0;
5899f386 9117 dc->thumb = env->thumb;
9ee6e8bb
PB
9118 dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
9119 dc->condexec_cond = env->condexec_bits >> 4;
b5ff1b31 9120#if !defined(CONFIG_USER_ONLY)
9ee6e8bb
PB
9121 if (IS_M(env)) {
9122 dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
9123 } else {
9124 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
9125 }
b5ff1b31 9126#endif
a7812ae4
PB
9127 cpu_F0s = tcg_temp_new_i32();
9128 cpu_F1s = tcg_temp_new_i32();
9129 cpu_F0d = tcg_temp_new_i64();
9130 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
9131 cpu_V0 = cpu_F0d;
9132 cpu_V1 = cpu_F1d;
e677137d 9133 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 9134 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 9135 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 9136 lj = -1;
2e70f6ef
PB
9137 num_insns = 0;
9138 max_insns = tb->cflags & CF_COUNT_MASK;
9139 if (max_insns == 0)
9140 max_insns = CF_COUNT_MASK;
9141
9142 gen_icount_start();
9ee6e8bb
PB
9143 /* Reset the conditional execution bits immediately. This avoids
9144 complications trying to do it at the end of the block. */
9145 if (env->condexec_bits)
8f01245e
PB
9146 {
9147 TCGv tmp = new_tmp();
9148 tcg_gen_movi_i32(tmp, 0);
d9ba4830 9149 store_cpu_field(tmp, condexec_bits);
8f01245e 9150 }
2c0262af 9151 do {
fbb4a2e3
PB
9152#ifdef CONFIG_USER_ONLY
9153 /* Intercept jump to the magic kernel page. */
9154 if (dc->pc >= 0xffff0000) {
9155 /* We always get here via a jump, so know we are not in a
9156 conditional execution block. */
9157 gen_exception(EXCP_KERNEL_TRAP);
9158 dc->is_jmp = DISAS_UPDATE;
9159 break;
9160 }
9161#else
9ee6e8bb
PB
9162 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9163 /* We always get here via a jump, so know we are not in a
9164 conditional execution block. */
d9ba4830 9165 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
9166 dc->is_jmp = DISAS_UPDATE;
9167 break;
9ee6e8bb
PB
9168 }
9169#endif
9170
72cf2d4f
BS
9171 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9172 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 9173 if (bp->pc == dc->pc) {
9ee6e8bb 9174 gen_set_condexec(dc);
5e3f878a 9175 gen_set_pc_im(dc->pc);
d9ba4830 9176 gen_exception(EXCP_DEBUG);
1fddef4b 9177 dc->is_jmp = DISAS_JUMP;
9ee6e8bb
PB
9178 /* Advance PC so that clearing the breakpoint will
9179 invalidate this TB. */
9180 dc->pc += 2;
9181 goto done_generating;
1fddef4b
FB
9182 break;
9183 }
9184 }
9185 }
2c0262af
FB
9186 if (search_pc) {
9187 j = gen_opc_ptr - gen_opc_buf;
9188 if (lj < j) {
9189 lj++;
9190 while (lj < j)
9191 gen_opc_instr_start[lj++] = 0;
9192 }
0fa85d43 9193 gen_opc_pc[lj] = dc->pc;
2c0262af 9194 gen_opc_instr_start[lj] = 1;
2e70f6ef 9195 gen_opc_icount[lj] = num_insns;
2c0262af 9196 }
e50e6a20 9197
2e70f6ef
PB
9198 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9199 gen_io_start();
9200
9ee6e8bb
PB
9201 if (env->thumb) {
9202 disas_thumb_insn(env, dc);
9203 if (dc->condexec_mask) {
9204 dc->condexec_cond = (dc->condexec_cond & 0xe)
9205 | ((dc->condexec_mask >> 4) & 1);
9206 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9207 if (dc->condexec_mask == 0) {
9208 dc->condexec_cond = 0;
9209 }
9210 }
9211 } else {
9212 disas_arm_insn(env, dc);
9213 }
b26eefb6
PB
9214 if (num_temps) {
9215 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
9216 num_temps = 0;
9217 }
e50e6a20
FB
9218
9219 if (dc->condjmp && !dc->is_jmp) {
9220 gen_set_label(dc->condlabel);
9221 dc->condjmp = 0;
9222 }
aaf2d97d 9223 /* Translation stops when a conditional branch is encountered.
e50e6a20 9224 * Otherwise the subsequent code could get translated several times.
b5ff1b31 9225 * Also stop translation when a page boundary is reached. This
bf20dc07 9226 * ensures prefetch aborts occur at the right place. */
2e70f6ef 9227 num_insns ++;
1fddef4b
FB
9228 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9229 !env->singlestep_enabled &&
1b530a6d 9230 !singlestep &&
2e70f6ef
PB
9231 dc->pc < next_page_start &&
9232 num_insns < max_insns);
9233
9234 if (tb->cflags & CF_LAST_IO) {
9235 if (dc->condjmp) {
9236 /* FIXME: This can theoretically happen with self-modifying
9237 code. */
9238 cpu_abort(env, "IO on conditional branch instruction");
9239 }
9240 gen_io_end();
9241 }
9ee6e8bb 9242
b5ff1b31 9243 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
9244 instruction was a conditional branch or trap, and the PC has
9245 already been written. */
551bd27f 9246 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 9247 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 9248 if (dc->condjmp) {
9ee6e8bb
PB
9249 gen_set_condexec(dc);
9250 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 9251 gen_exception(EXCP_SWI);
9ee6e8bb 9252 } else {
d9ba4830 9253 gen_exception(EXCP_DEBUG);
9ee6e8bb 9254 }
e50e6a20
FB
9255 gen_set_label(dc->condlabel);
9256 }
9257 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 9258 gen_set_pc_im(dc->pc);
e50e6a20 9259 dc->condjmp = 0;
8aaca4c0 9260 }
9ee6e8bb
PB
9261 gen_set_condexec(dc);
9262 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 9263 gen_exception(EXCP_SWI);
9ee6e8bb
PB
9264 } else {
9265 /* FIXME: Single stepping a WFI insn will not halt
9266 the CPU. */
d9ba4830 9267 gen_exception(EXCP_DEBUG);
9ee6e8bb 9268 }
8aaca4c0 9269 } else {
9ee6e8bb
PB
9270 /* While branches must always occur at the end of an IT block,
9271 there are a few other things that can cause us to terminate
9272 the TB in the middel of an IT block:
9273 - Exception generating instructions (bkpt, swi, undefined).
9274 - Page boundaries.
9275 - Hardware watchpoints.
9276 Hardware breakpoints have already been handled and skip this code.
9277 */
9278 gen_set_condexec(dc);
8aaca4c0 9279 switch(dc->is_jmp) {
8aaca4c0 9280 case DISAS_NEXT:
6e256c93 9281 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
9282 break;
9283 default:
9284 case DISAS_JUMP:
9285 case DISAS_UPDATE:
9286 /* indicate that the hash table must be used to find the next TB */
57fec1fe 9287 tcg_gen_exit_tb(0);
8aaca4c0
FB
9288 break;
9289 case DISAS_TB_JUMP:
9290 /* nothing more to generate */
9291 break;
9ee6e8bb 9292 case DISAS_WFI:
d9ba4830 9293 gen_helper_wfi();
9ee6e8bb
PB
9294 break;
9295 case DISAS_SWI:
d9ba4830 9296 gen_exception(EXCP_SWI);
9ee6e8bb 9297 break;
8aaca4c0 9298 }
e50e6a20
FB
9299 if (dc->condjmp) {
9300 gen_set_label(dc->condlabel);
9ee6e8bb 9301 gen_set_condexec(dc);
6e256c93 9302 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
9303 dc->condjmp = 0;
9304 }
2c0262af 9305 }
2e70f6ef 9306
9ee6e8bb 9307done_generating:
2e70f6ef 9308 gen_icount_end(tb, num_insns);
2c0262af
FB
9309 *gen_opc_ptr = INDEX_op_end;
9310
9311#ifdef DEBUG_DISAS
8fec2b8c 9312 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
9313 qemu_log("----------------\n");
9314 qemu_log("IN: %s\n", lookup_symbol(pc_start));
9315 log_target_disas(pc_start, dc->pc - pc_start, env->thumb);
9316 qemu_log("\n");
2c0262af
FB
9317 }
9318#endif
b5ff1b31
FB
9319 if (search_pc) {
9320 j = gen_opc_ptr - gen_opc_buf;
9321 lj++;
9322 while (lj <= j)
9323 gen_opc_instr_start[lj++] = 0;
b5ff1b31 9324 } else {
2c0262af 9325 tb->size = dc->pc - pc_start;
2e70f6ef 9326 tb->icount = num_insns;
b5ff1b31 9327 }
2c0262af
FB
9328}
9329
2cfc5f17 9330void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2c0262af 9331{
2cfc5f17 9332 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
9333}
9334
2cfc5f17 9335void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2c0262af 9336{
2cfc5f17 9337 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
9338}
9339
b5ff1b31
FB
9340static const char *cpu_mode_names[16] = {
9341 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9342 "???", "???", "???", "und", "???", "???", "???", "sys"
9343};
9ee6e8bb 9344
9a78eead 9345void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
7fe48483 9346 int flags)
2c0262af
FB
9347{
9348 int i;
06e80fc9 9349#if 0
bc380d17 9350 union {
b7bcbe95
FB
9351 uint32_t i;
9352 float s;
9353 } s0, s1;
9354 CPU_DoubleU d;
a94a6abf
PB
9355 /* ??? This assumes float64 and double have the same layout.
9356 Oh well, it's only debug dumps. */
9357 union {
9358 float64 f64;
9359 double d;
9360 } d0;
06e80fc9 9361#endif
b5ff1b31 9362 uint32_t psr;
2c0262af
FB
9363
9364 for(i=0;i<16;i++) {
7fe48483 9365 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 9366 if ((i % 4) == 3)
7fe48483 9367 cpu_fprintf(f, "\n");
2c0262af 9368 else
7fe48483 9369 cpu_fprintf(f, " ");
2c0262af 9370 }
b5ff1b31 9371 psr = cpsr_read(env);
687fa640
TS
9372 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9373 psr,
b5ff1b31
FB
9374 psr & (1 << 31) ? 'N' : '-',
9375 psr & (1 << 30) ? 'Z' : '-',
9376 psr & (1 << 29) ? 'C' : '-',
9377 psr & (1 << 28) ? 'V' : '-',
5fafdf24 9378 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 9379 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 9380
5e3f878a 9381#if 0
b7bcbe95 9382 for (i = 0; i < 16; i++) {
8e96005d
FB
9383 d.d = env->vfp.regs[i];
9384 s0.i = d.l.lower;
9385 s1.i = d.l.upper;
a94a6abf
PB
9386 d0.f64 = d.d;
9387 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 9388 i * 2, (int)s0.i, s0.s,
a94a6abf 9389 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 9390 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 9391 d0.d);
b7bcbe95 9392 }
40f137e1 9393 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
5e3f878a 9394#endif
2c0262af 9395}
a6b025d3 9396
d2856f1a
AJ
9397void gen_pc_load(CPUState *env, TranslationBlock *tb,
9398 unsigned long searched_pc, int pc_pos, void *puc)
9399{
9400 env->regs[15] = gen_opc_pc[pc_pos];
9401}