]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/translate.c
ARM: fix ldrexd/strexd
[mirror_qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
28#include "exec-all.h"
29#include "disas.h"
57fec1fe 30#include "tcg-op.h"
79383c9c 31#include "qemu-log.h"
1497c961 32
a7812ae4 33#include "helpers.h"
1497c961 34#define GEN_HELPER 1
b26eefb6 35#include "helpers.h"
2c0262af 36
9ee6e8bb
PB
37#define ENABLE_ARCH_5J 0
38#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 42
86753403 43#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 44
2c0262af
FB
45/* internal defines */
46typedef struct DisasContext {
0fa85d43 47 target_ulong pc;
2c0262af 48 int is_jmp;
e50e6a20
FB
49 /* Nonzero if this instruction has been conditionally skipped. */
50 int condjmp;
51 /* The label that will be jumped to when the instruction is skipped. */
52 int condlabel;
9ee6e8bb
PB
53 /* Thumb-2 condtional execution bits. */
54 int condexec_mask;
55 int condexec_cond;
2c0262af 56 struct TranslationBlock *tb;
8aaca4c0 57 int singlestep_enabled;
5899f386 58 int thumb;
b5ff1b31
FB
59#if !defined(CONFIG_USER_ONLY)
60 int user;
61#endif
2c0262af
FB
62} DisasContext;
63
b5ff1b31
FB
64#if defined(CONFIG_USER_ONLY)
65#define IS_USER(s) 1
66#else
67#define IS_USER(s) (s->user)
68#endif
69
9ee6e8bb
PB
70/* These instructions trap after executing, so defer them until after the
71 conditional executions state has been updated. */
72#define DISAS_WFI 4
73#define DISAS_SWI 5
2c0262af 74
a7812ae4 75static TCGv_ptr cpu_env;
ad69471c 76/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 77static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 78static TCGv_i32 cpu_R[16];
426f5abc
PB
79static TCGv_i32 cpu_exclusive_addr;
80static TCGv_i32 cpu_exclusive_val;
81static TCGv_i32 cpu_exclusive_high;
82#ifdef CONFIG_USER_ONLY
83static TCGv_i32 cpu_exclusive_test;
84static TCGv_i32 cpu_exclusive_info;
85#endif
ad69471c 86
b26eefb6 87/* FIXME: These should be removed. */
a7812ae4
PB
88static TCGv cpu_F0s, cpu_F1s;
89static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 90
2e70f6ef
PB
91#include "gen-icount.h"
92
155c3eac
FN
93static const char *regnames[] =
94 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
95 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
96
b26eefb6
PB
97/* initialize TCG globals. */
98void arm_translate_init(void)
99{
155c3eac
FN
100 int i;
101
a7812ae4
PB
102 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
103
155c3eac
FN
104 for (i = 0; i < 16; i++) {
105 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
106 offsetof(CPUState, regs[i]),
107 regnames[i]);
108 }
426f5abc
PB
109 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
110 offsetof(CPUState, exclusive_addr), "exclusive_addr");
111 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
112 offsetof(CPUState, exclusive_val), "exclusive_val");
113 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
114 offsetof(CPUState, exclusive_high), "exclusive_high");
115#ifdef CONFIG_USER_ONLY
116 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
117 offsetof(CPUState, exclusive_test), "exclusive_test");
118 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUState, exclusive_info), "exclusive_info");
120#endif
155c3eac 121
a7812ae4
PB
122#define GEN_HELPER 2
123#include "helpers.h"
b26eefb6
PB
124}
125
b26eefb6 126static int num_temps;
b26eefb6
PB
127
128/* Allocate a temporary variable. */
a7812ae4 129static TCGv_i32 new_tmp(void)
b26eefb6 130{
12edd4f2
FN
131 num_temps++;
132 return tcg_temp_new_i32();
b26eefb6
PB
133}
134
135/* Release a temporary variable. */
136static void dead_tmp(TCGv tmp)
137{
12edd4f2 138 tcg_temp_free(tmp);
b26eefb6 139 num_temps--;
b26eefb6
PB
140}
141
d9ba4830
PB
142static inline TCGv load_cpu_offset(int offset)
143{
144 TCGv tmp = new_tmp();
145 tcg_gen_ld_i32(tmp, cpu_env, offset);
146 return tmp;
147}
148
149#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
150
151static inline void store_cpu_offset(TCGv var, int offset)
152{
153 tcg_gen_st_i32(var, cpu_env, offset);
154 dead_tmp(var);
155}
156
157#define store_cpu_field(var, name) \
158 store_cpu_offset(var, offsetof(CPUState, name))
159
b26eefb6
PB
160/* Set a variable to the value of a CPU register. */
161static void load_reg_var(DisasContext *s, TCGv var, int reg)
162{
163 if (reg == 15) {
164 uint32_t addr;
165 /* normaly, since we updated PC, we need only to add one insn */
166 if (s->thumb)
167 addr = (long)s->pc + 2;
168 else
169 addr = (long)s->pc + 4;
170 tcg_gen_movi_i32(var, addr);
171 } else {
155c3eac 172 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
173 }
174}
175
176/* Create a new temporary and set it to the value of a CPU register. */
177static inline TCGv load_reg(DisasContext *s, int reg)
178{
179 TCGv tmp = new_tmp();
180 load_reg_var(s, tmp, reg);
181 return tmp;
182}
183
184/* Set a CPU register. The source must be a temporary and will be
185 marked as dead. */
186static void store_reg(DisasContext *s, int reg, TCGv var)
187{
188 if (reg == 15) {
189 tcg_gen_andi_i32(var, var, ~1);
190 s->is_jmp = DISAS_JUMP;
191 }
155c3eac 192 tcg_gen_mov_i32(cpu_R[reg], var);
b26eefb6
PB
193 dead_tmp(var);
194}
195
b26eefb6 196/* Value extensions. */
86831435
PB
197#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
198#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
199#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
200#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
201
1497c961
PB
202#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
203#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 204
b26eefb6 205
b75263d6
JR
206static inline void gen_set_cpsr(TCGv var, uint32_t mask)
207{
208 TCGv tmp_mask = tcg_const_i32(mask);
209 gen_helper_cpsr_write(var, tmp_mask);
210 tcg_temp_free_i32(tmp_mask);
211}
d9ba4830
PB
212/* Set NZCV flags from the high 4 bits of var. */
213#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
214
215static void gen_exception(int excp)
216{
217 TCGv tmp = new_tmp();
218 tcg_gen_movi_i32(tmp, excp);
219 gen_helper_exception(tmp);
220 dead_tmp(tmp);
221}
222
3670669c
PB
223static void gen_smul_dual(TCGv a, TCGv b)
224{
225 TCGv tmp1 = new_tmp();
226 TCGv tmp2 = new_tmp();
22478e79
AZ
227 tcg_gen_ext16s_i32(tmp1, a);
228 tcg_gen_ext16s_i32(tmp2, b);
3670669c
PB
229 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
230 dead_tmp(tmp2);
231 tcg_gen_sari_i32(a, a, 16);
232 tcg_gen_sari_i32(b, b, 16);
233 tcg_gen_mul_i32(b, b, a);
234 tcg_gen_mov_i32(a, tmp1);
235 dead_tmp(tmp1);
236}
237
238/* Byteswap each halfword. */
239static void gen_rev16(TCGv var)
240{
241 TCGv tmp = new_tmp();
242 tcg_gen_shri_i32(tmp, var, 8);
243 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
244 tcg_gen_shli_i32(var, var, 8);
245 tcg_gen_andi_i32(var, var, 0xff00ff00);
246 tcg_gen_or_i32(var, var, tmp);
247 dead_tmp(tmp);
248}
249
250/* Byteswap low halfword and sign extend. */
251static void gen_revsh(TCGv var)
252{
253 TCGv tmp = new_tmp();
254 tcg_gen_shri_i32(tmp, var, 8);
255 tcg_gen_andi_i32(tmp, tmp, 0x00ff);
256 tcg_gen_shli_i32(var, var, 8);
257 tcg_gen_ext8s_i32(var, var);
258 tcg_gen_or_i32(var, var, tmp);
259 dead_tmp(tmp);
260}
261
262/* Unsigned bitfield extract. */
263static void gen_ubfx(TCGv var, int shift, uint32_t mask)
264{
265 if (shift)
266 tcg_gen_shri_i32(var, var, shift);
267 tcg_gen_andi_i32(var, var, mask);
268}
269
270/* Signed bitfield extract. */
271static void gen_sbfx(TCGv var, int shift, int width)
272{
273 uint32_t signbit;
274
275 if (shift)
276 tcg_gen_sari_i32(var, var, shift);
277 if (shift + width < 32) {
278 signbit = 1u << (width - 1);
279 tcg_gen_andi_i32(var, var, (1u << width) - 1);
280 tcg_gen_xori_i32(var, var, signbit);
281 tcg_gen_subi_i32(var, var, signbit);
282 }
283}
284
285/* Bitfield insertion. Insert val into base. Clobbers base and val. */
286static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
287{
3670669c 288 tcg_gen_andi_i32(val, val, mask);
8f8e3aa4
PB
289 tcg_gen_shli_i32(val, val, shift);
290 tcg_gen_andi_i32(base, base, ~(mask << shift));
3670669c
PB
291 tcg_gen_or_i32(dest, base, val);
292}
293
d9ba4830
PB
294/* Round the top 32 bits of a 64-bit value. */
295static void gen_roundqd(TCGv a, TCGv b)
3670669c 296{
d9ba4830
PB
297 tcg_gen_shri_i32(a, a, 31);
298 tcg_gen_add_i32(a, a, b);
3670669c
PB
299}
300
8f01245e
PB
301/* FIXME: Most targets have native widening multiplication.
302 It would be good to use that instead of a full wide multiply. */
5e3f878a 303/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 304static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 305{
a7812ae4
PB
306 TCGv_i64 tmp1 = tcg_temp_new_i64();
307 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
308
309 tcg_gen_extu_i32_i64(tmp1, a);
310 dead_tmp(a);
311 tcg_gen_extu_i32_i64(tmp2, b);
312 dead_tmp(b);
313 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 314 tcg_temp_free_i64(tmp2);
5e3f878a
PB
315 return tmp1;
316}
317
a7812ae4 318static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 319{
a7812ae4
PB
320 TCGv_i64 tmp1 = tcg_temp_new_i64();
321 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
322
323 tcg_gen_ext_i32_i64(tmp1, a);
324 dead_tmp(a);
325 tcg_gen_ext_i32_i64(tmp2, b);
326 dead_tmp(b);
327 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 328 tcg_temp_free_i64(tmp2);
5e3f878a
PB
329 return tmp1;
330}
331
8f01245e 332/* Signed 32x32->64 multiply. */
d9ba4830 333static void gen_imull(TCGv a, TCGv b)
8f01245e 334{
a7812ae4
PB
335 TCGv_i64 tmp1 = tcg_temp_new_i64();
336 TCGv_i64 tmp2 = tcg_temp_new_i64();
8f01245e 337
d9ba4830
PB
338 tcg_gen_ext_i32_i64(tmp1, a);
339 tcg_gen_ext_i32_i64(tmp2, b);
8f01245e 340 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 341 tcg_temp_free_i64(tmp2);
d9ba4830 342 tcg_gen_trunc_i64_i32(a, tmp1);
8f01245e 343 tcg_gen_shri_i64(tmp1, tmp1, 32);
d9ba4830 344 tcg_gen_trunc_i64_i32(b, tmp1);
b75263d6 345 tcg_temp_free_i64(tmp1);
d9ba4830 346}
d9ba4830 347
8f01245e
PB
348/* Swap low and high halfwords. */
349static void gen_swap_half(TCGv var)
350{
351 TCGv tmp = new_tmp();
352 tcg_gen_shri_i32(tmp, var, 16);
353 tcg_gen_shli_i32(var, var, 16);
354 tcg_gen_or_i32(var, var, tmp);
3670669c 355 dead_tmp(tmp);
8f01245e
PB
356}
357
b26eefb6
PB
358/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
359 tmp = (t0 ^ t1) & 0x8000;
360 t0 &= ~0x8000;
361 t1 &= ~0x8000;
362 t0 = (t0 + t1) ^ tmp;
363 */
364
365static void gen_add16(TCGv t0, TCGv t1)
366{
367 TCGv tmp = new_tmp();
368 tcg_gen_xor_i32(tmp, t0, t1);
369 tcg_gen_andi_i32(tmp, tmp, 0x8000);
370 tcg_gen_andi_i32(t0, t0, ~0x8000);
371 tcg_gen_andi_i32(t1, t1, ~0x8000);
372 tcg_gen_add_i32(t0, t0, t1);
373 tcg_gen_xor_i32(t0, t0, tmp);
374 dead_tmp(tmp);
375 dead_tmp(t1);
376}
377
9a119ff6
PB
378#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
379
b26eefb6
PB
380/* Set CF to the top bit of var. */
381static void gen_set_CF_bit31(TCGv var)
382{
383 TCGv tmp = new_tmp();
384 tcg_gen_shri_i32(tmp, var, 31);
4cc633c3 385 gen_set_CF(tmp);
b26eefb6
PB
386 dead_tmp(tmp);
387}
388
389/* Set N and Z flags from var. */
390static inline void gen_logic_CC(TCGv var)
391{
6fbe23d5
PB
392 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
393 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
b26eefb6
PB
394}
395
396/* T0 += T1 + CF. */
396e467c 397static void gen_adc(TCGv t0, TCGv t1)
b26eefb6 398{
d9ba4830 399 TCGv tmp;
396e467c 400 tcg_gen_add_i32(t0, t0, t1);
d9ba4830 401 tmp = load_cpu_field(CF);
396e467c 402 tcg_gen_add_i32(t0, t0, tmp);
b26eefb6
PB
403 dead_tmp(tmp);
404}
405
e9bb4aa9
JR
406/* dest = T0 + T1 + CF. */
407static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
408{
409 TCGv tmp;
410 tcg_gen_add_i32(dest, t0, t1);
411 tmp = load_cpu_field(CF);
412 tcg_gen_add_i32(dest, dest, tmp);
413 dead_tmp(tmp);
414}
415
3670669c
PB
416/* dest = T0 - T1 + CF - 1. */
417static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
418{
d9ba4830 419 TCGv tmp;
3670669c 420 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 421 tmp = load_cpu_field(CF);
3670669c
PB
422 tcg_gen_add_i32(dest, dest, tmp);
423 tcg_gen_subi_i32(dest, dest, 1);
424 dead_tmp(tmp);
425}
426
ad69471c
PB
427/* FIXME: Implement this natively. */
428#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
429
9a119ff6 430static void shifter_out_im(TCGv var, int shift)
b26eefb6 431{
9a119ff6
PB
432 TCGv tmp = new_tmp();
433 if (shift == 0) {
434 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 435 } else {
9a119ff6 436 tcg_gen_shri_i32(tmp, var, shift);
4cc633c3 437 if (shift != 31)
9a119ff6
PB
438 tcg_gen_andi_i32(tmp, tmp, 1);
439 }
440 gen_set_CF(tmp);
441 dead_tmp(tmp);
442}
b26eefb6 443
9a119ff6
PB
444/* Shift by immediate. Includes special handling for shift == 0. */
445static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
446{
447 switch (shiftop) {
448 case 0: /* LSL */
449 if (shift != 0) {
450 if (flags)
451 shifter_out_im(var, 32 - shift);
452 tcg_gen_shli_i32(var, var, shift);
453 }
454 break;
455 case 1: /* LSR */
456 if (shift == 0) {
457 if (flags) {
458 tcg_gen_shri_i32(var, var, 31);
459 gen_set_CF(var);
460 }
461 tcg_gen_movi_i32(var, 0);
462 } else {
463 if (flags)
464 shifter_out_im(var, shift - 1);
465 tcg_gen_shri_i32(var, var, shift);
466 }
467 break;
468 case 2: /* ASR */
469 if (shift == 0)
470 shift = 32;
471 if (flags)
472 shifter_out_im(var, shift - 1);
473 if (shift == 32)
474 shift = 31;
475 tcg_gen_sari_i32(var, var, shift);
476 break;
477 case 3: /* ROR/RRX */
478 if (shift != 0) {
479 if (flags)
480 shifter_out_im(var, shift - 1);
f669df27 481 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 482 } else {
d9ba4830 483 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
484 if (flags)
485 shifter_out_im(var, 0);
486 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
487 tcg_gen_shli_i32(tmp, tmp, 31);
488 tcg_gen_or_i32(var, var, tmp);
489 dead_tmp(tmp);
b26eefb6
PB
490 }
491 }
492};
493
8984bd2e
PB
494static inline void gen_arm_shift_reg(TCGv var, int shiftop,
495 TCGv shift, int flags)
496{
497 if (flags) {
498 switch (shiftop) {
499 case 0: gen_helper_shl_cc(var, var, shift); break;
500 case 1: gen_helper_shr_cc(var, var, shift); break;
501 case 2: gen_helper_sar_cc(var, var, shift); break;
502 case 3: gen_helper_ror_cc(var, var, shift); break;
503 }
504 } else {
505 switch (shiftop) {
506 case 0: gen_helper_shl(var, var, shift); break;
507 case 1: gen_helper_shr(var, var, shift); break;
508 case 2: gen_helper_sar(var, var, shift); break;
f669df27
AJ
509 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
510 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
511 }
512 }
513 dead_tmp(shift);
514}
515
6ddbc6e4
PB
516#define PAS_OP(pfx) \
517 switch (op2) { \
518 case 0: gen_pas_helper(glue(pfx,add16)); break; \
519 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
520 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
521 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
522 case 4: gen_pas_helper(glue(pfx,add8)); break; \
523 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
524 }
d9ba4830 525static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 526{
a7812ae4 527 TCGv_ptr tmp;
6ddbc6e4
PB
528
529 switch (op1) {
530#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
531 case 1:
a7812ae4 532 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
533 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
534 PAS_OP(s)
b75263d6 535 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
536 break;
537 case 5:
a7812ae4 538 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
539 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
540 PAS_OP(u)
b75263d6 541 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
542 break;
543#undef gen_pas_helper
544#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
545 case 2:
546 PAS_OP(q);
547 break;
548 case 3:
549 PAS_OP(sh);
550 break;
551 case 6:
552 PAS_OP(uq);
553 break;
554 case 7:
555 PAS_OP(uh);
556 break;
557#undef gen_pas_helper
558 }
559}
9ee6e8bb
PB
560#undef PAS_OP
561
6ddbc6e4
PB
562/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
563#define PAS_OP(pfx) \
ed89a2f1 564 switch (op1) { \
6ddbc6e4
PB
565 case 0: gen_pas_helper(glue(pfx,add8)); break; \
566 case 1: gen_pas_helper(glue(pfx,add16)); break; \
567 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
568 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
569 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
570 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
571 }
d9ba4830 572static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 573{
a7812ae4 574 TCGv_ptr tmp;
6ddbc6e4 575
ed89a2f1 576 switch (op2) {
6ddbc6e4
PB
577#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
578 case 0:
a7812ae4 579 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
580 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
581 PAS_OP(s)
b75263d6 582 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
583 break;
584 case 4:
a7812ae4 585 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
586 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
587 PAS_OP(u)
b75263d6 588 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
589 break;
590#undef gen_pas_helper
591#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
592 case 1:
593 PAS_OP(q);
594 break;
595 case 2:
596 PAS_OP(sh);
597 break;
598 case 5:
599 PAS_OP(uq);
600 break;
601 case 6:
602 PAS_OP(uh);
603 break;
604#undef gen_pas_helper
605 }
606}
9ee6e8bb
PB
607#undef PAS_OP
608
d9ba4830
PB
609static void gen_test_cc(int cc, int label)
610{
611 TCGv tmp;
612 TCGv tmp2;
d9ba4830
PB
613 int inv;
614
d9ba4830
PB
615 switch (cc) {
616 case 0: /* eq: Z */
6fbe23d5 617 tmp = load_cpu_field(ZF);
cb63669a 618 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
619 break;
620 case 1: /* ne: !Z */
6fbe23d5 621 tmp = load_cpu_field(ZF);
cb63669a 622 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
623 break;
624 case 2: /* cs: C */
625 tmp = load_cpu_field(CF);
cb63669a 626 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
627 break;
628 case 3: /* cc: !C */
629 tmp = load_cpu_field(CF);
cb63669a 630 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
631 break;
632 case 4: /* mi: N */
6fbe23d5 633 tmp = load_cpu_field(NF);
cb63669a 634 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
635 break;
636 case 5: /* pl: !N */
6fbe23d5 637 tmp = load_cpu_field(NF);
cb63669a 638 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
639 break;
640 case 6: /* vs: V */
641 tmp = load_cpu_field(VF);
cb63669a 642 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
643 break;
644 case 7: /* vc: !V */
645 tmp = load_cpu_field(VF);
cb63669a 646 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
647 break;
648 case 8: /* hi: C && !Z */
649 inv = gen_new_label();
650 tmp = load_cpu_field(CF);
cb63669a 651 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830 652 dead_tmp(tmp);
6fbe23d5 653 tmp = load_cpu_field(ZF);
cb63669a 654 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
655 gen_set_label(inv);
656 break;
657 case 9: /* ls: !C || Z */
658 tmp = load_cpu_field(CF);
cb63669a 659 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830 660 dead_tmp(tmp);
6fbe23d5 661 tmp = load_cpu_field(ZF);
cb63669a 662 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
663 break;
664 case 10: /* ge: N == V -> N ^ V == 0 */
665 tmp = load_cpu_field(VF);
6fbe23d5 666 tmp2 = load_cpu_field(NF);
d9ba4830
PB
667 tcg_gen_xor_i32(tmp, tmp, tmp2);
668 dead_tmp(tmp2);
cb63669a 669 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
670 break;
671 case 11: /* lt: N != V -> N ^ V != 0 */
672 tmp = load_cpu_field(VF);
6fbe23d5 673 tmp2 = load_cpu_field(NF);
d9ba4830
PB
674 tcg_gen_xor_i32(tmp, tmp, tmp2);
675 dead_tmp(tmp2);
cb63669a 676 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
677 break;
678 case 12: /* gt: !Z && N == V */
679 inv = gen_new_label();
6fbe23d5 680 tmp = load_cpu_field(ZF);
cb63669a 681 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830
PB
682 dead_tmp(tmp);
683 tmp = load_cpu_field(VF);
6fbe23d5 684 tmp2 = load_cpu_field(NF);
d9ba4830
PB
685 tcg_gen_xor_i32(tmp, tmp, tmp2);
686 dead_tmp(tmp2);
cb63669a 687 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
688 gen_set_label(inv);
689 break;
690 case 13: /* le: Z || N != V */
6fbe23d5 691 tmp = load_cpu_field(ZF);
cb63669a 692 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
693 dead_tmp(tmp);
694 tmp = load_cpu_field(VF);
6fbe23d5 695 tmp2 = load_cpu_field(NF);
d9ba4830
PB
696 tcg_gen_xor_i32(tmp, tmp, tmp2);
697 dead_tmp(tmp2);
cb63669a 698 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
699 break;
700 default:
701 fprintf(stderr, "Bad condition code 0x%x\n", cc);
702 abort();
703 }
704 dead_tmp(tmp);
705}
2c0262af 706
b1d8e52e 707static const uint8_t table_logic_cc[16] = {
2c0262af
FB
708 1, /* and */
709 1, /* xor */
710 0, /* sub */
711 0, /* rsb */
712 0, /* add */
713 0, /* adc */
714 0, /* sbc */
715 0, /* rsc */
716 1, /* andl */
717 1, /* xorl */
718 0, /* cmp */
719 0, /* cmn */
720 1, /* orr */
721 1, /* mov */
722 1, /* bic */
723 1, /* mvn */
724};
3b46e624 725
d9ba4830
PB
726/* Set PC and Thumb state from an immediate address. */
727static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 728{
b26eefb6 729 TCGv tmp;
99c475ab 730
b26eefb6 731 s->is_jmp = DISAS_UPDATE;
d9ba4830 732 if (s->thumb != (addr & 1)) {
155c3eac 733 tmp = new_tmp();
d9ba4830
PB
734 tcg_gen_movi_i32(tmp, addr & 1);
735 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
155c3eac 736 dead_tmp(tmp);
d9ba4830 737 }
155c3eac 738 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
739}
740
741/* Set PC and Thumb state from var. var is marked as dead. */
742static inline void gen_bx(DisasContext *s, TCGv var)
743{
d9ba4830 744 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
745 tcg_gen_andi_i32(cpu_R[15], var, ~1);
746 tcg_gen_andi_i32(var, var, 1);
747 store_cpu_field(var, thumb);
d9ba4830
PB
748}
749
21aeb343
JR
750/* Variant of store_reg which uses branch&exchange logic when storing
751 to r15 in ARM architecture v7 and above. The source must be a temporary
752 and will be marked as dead. */
753static inline void store_reg_bx(CPUState *env, DisasContext *s,
754 int reg, TCGv var)
755{
756 if (reg == 15 && ENABLE_ARCH_7) {
757 gen_bx(s, var);
758 } else {
759 store_reg(s, reg, var);
760 }
761}
762
b0109805
PB
763static inline TCGv gen_ld8s(TCGv addr, int index)
764{
765 TCGv tmp = new_tmp();
766 tcg_gen_qemu_ld8s(tmp, addr, index);
767 return tmp;
768}
769static inline TCGv gen_ld8u(TCGv addr, int index)
770{
771 TCGv tmp = new_tmp();
772 tcg_gen_qemu_ld8u(tmp, addr, index);
773 return tmp;
774}
775static inline TCGv gen_ld16s(TCGv addr, int index)
776{
777 TCGv tmp = new_tmp();
778 tcg_gen_qemu_ld16s(tmp, addr, index);
779 return tmp;
780}
781static inline TCGv gen_ld16u(TCGv addr, int index)
782{
783 TCGv tmp = new_tmp();
784 tcg_gen_qemu_ld16u(tmp, addr, index);
785 return tmp;
786}
787static inline TCGv gen_ld32(TCGv addr, int index)
788{
789 TCGv tmp = new_tmp();
790 tcg_gen_qemu_ld32u(tmp, addr, index);
791 return tmp;
792}
84496233
JR
793static inline TCGv_i64 gen_ld64(TCGv addr, int index)
794{
795 TCGv_i64 tmp = tcg_temp_new_i64();
796 tcg_gen_qemu_ld64(tmp, addr, index);
797 return tmp;
798}
b0109805
PB
799static inline void gen_st8(TCGv val, TCGv addr, int index)
800{
801 tcg_gen_qemu_st8(val, addr, index);
802 dead_tmp(val);
803}
804static inline void gen_st16(TCGv val, TCGv addr, int index)
805{
806 tcg_gen_qemu_st16(val, addr, index);
807 dead_tmp(val);
808}
809static inline void gen_st32(TCGv val, TCGv addr, int index)
810{
811 tcg_gen_qemu_st32(val, addr, index);
812 dead_tmp(val);
813}
84496233
JR
814static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
815{
816 tcg_gen_qemu_st64(val, addr, index);
817 tcg_temp_free_i64(val);
818}
b5ff1b31 819
5e3f878a
PB
820static inline void gen_set_pc_im(uint32_t val)
821{
155c3eac 822 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
823}
824
b5ff1b31
FB
825/* Force a TB lookup after an instruction that changes the CPU state. */
826static inline void gen_lookup_tb(DisasContext *s)
827{
a6445c52 828 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
829 s->is_jmp = DISAS_UPDATE;
830}
831
b0109805
PB
832static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
833 TCGv var)
2c0262af 834{
1e8d4eec 835 int val, rm, shift, shiftop;
b26eefb6 836 TCGv offset;
2c0262af
FB
837
838 if (!(insn & (1 << 25))) {
839 /* immediate */
840 val = insn & 0xfff;
841 if (!(insn & (1 << 23)))
842 val = -val;
537730b9 843 if (val != 0)
b0109805 844 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
845 } else {
846 /* shift/register */
847 rm = (insn) & 0xf;
848 shift = (insn >> 7) & 0x1f;
1e8d4eec 849 shiftop = (insn >> 5) & 3;
b26eefb6 850 offset = load_reg(s, rm);
9a119ff6 851 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 852 if (!(insn & (1 << 23)))
b0109805 853 tcg_gen_sub_i32(var, var, offset);
2c0262af 854 else
b0109805 855 tcg_gen_add_i32(var, var, offset);
b26eefb6 856 dead_tmp(offset);
2c0262af
FB
857 }
858}
859
191f9a93 860static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 861 int extra, TCGv var)
2c0262af
FB
862{
863 int val, rm;
b26eefb6 864 TCGv offset;
3b46e624 865
2c0262af
FB
866 if (insn & (1 << 22)) {
867 /* immediate */
868 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
869 if (!(insn & (1 << 23)))
870 val = -val;
18acad92 871 val += extra;
537730b9 872 if (val != 0)
b0109805 873 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
874 } else {
875 /* register */
191f9a93 876 if (extra)
b0109805 877 tcg_gen_addi_i32(var, var, extra);
2c0262af 878 rm = (insn) & 0xf;
b26eefb6 879 offset = load_reg(s, rm);
2c0262af 880 if (!(insn & (1 << 23)))
b0109805 881 tcg_gen_sub_i32(var, var, offset);
2c0262af 882 else
b0109805 883 tcg_gen_add_i32(var, var, offset);
b26eefb6 884 dead_tmp(offset);
2c0262af
FB
885 }
886}
887
4373f3ce
PB
888#define VFP_OP2(name) \
889static inline void gen_vfp_##name(int dp) \
890{ \
891 if (dp) \
892 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
893 else \
894 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
b7bcbe95
FB
895}
896
4373f3ce
PB
897VFP_OP2(add)
898VFP_OP2(sub)
899VFP_OP2(mul)
900VFP_OP2(div)
901
902#undef VFP_OP2
903
904static inline void gen_vfp_abs(int dp)
905{
906 if (dp)
907 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
908 else
909 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
910}
911
912static inline void gen_vfp_neg(int dp)
913{
914 if (dp)
915 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
916 else
917 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
918}
919
920static inline void gen_vfp_sqrt(int dp)
921{
922 if (dp)
923 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
924 else
925 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
926}
927
928static inline void gen_vfp_cmp(int dp)
929{
930 if (dp)
931 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
932 else
933 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
934}
935
936static inline void gen_vfp_cmpe(int dp)
937{
938 if (dp)
939 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
940 else
941 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
942}
943
944static inline void gen_vfp_F1_ld0(int dp)
945{
946 if (dp)
5b340b51 947 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 948 else
5b340b51 949 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
950}
951
952static inline void gen_vfp_uito(int dp)
953{
954 if (dp)
955 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
956 else
957 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
958}
959
960static inline void gen_vfp_sito(int dp)
961{
962 if (dp)
66230e0d 963 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
4373f3ce 964 else
66230e0d 965 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
4373f3ce
PB
966}
967
968static inline void gen_vfp_toui(int dp)
969{
970 if (dp)
971 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
972 else
973 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
974}
975
976static inline void gen_vfp_touiz(int dp)
977{
978 if (dp)
979 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
980 else
981 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
982}
983
984static inline void gen_vfp_tosi(int dp)
985{
986 if (dp)
987 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
988 else
989 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
990}
991
992static inline void gen_vfp_tosiz(int dp)
9ee6e8bb
PB
993{
994 if (dp)
4373f3ce 995 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
9ee6e8bb 996 else
4373f3ce
PB
997 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
998}
999
1000#define VFP_GEN_FIX(name) \
1001static inline void gen_vfp_##name(int dp, int shift) \
1002{ \
b75263d6 1003 TCGv tmp_shift = tcg_const_i32(shift); \
4373f3ce 1004 if (dp) \
b75263d6 1005 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
4373f3ce 1006 else \
b75263d6
JR
1007 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
1008 tcg_temp_free_i32(tmp_shift); \
9ee6e8bb 1009}
4373f3ce
PB
1010VFP_GEN_FIX(tosh)
1011VFP_GEN_FIX(tosl)
1012VFP_GEN_FIX(touh)
1013VFP_GEN_FIX(toul)
1014VFP_GEN_FIX(shto)
1015VFP_GEN_FIX(slto)
1016VFP_GEN_FIX(uhto)
1017VFP_GEN_FIX(ulto)
1018#undef VFP_GEN_FIX
9ee6e8bb 1019
312eea9f 1020static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1021{
1022 if (dp)
312eea9f 1023 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1024 else
312eea9f 1025 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1026}
1027
312eea9f 1028static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1029{
1030 if (dp)
312eea9f 1031 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1032 else
312eea9f 1033 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1034}
1035
8e96005d
FB
1036static inline long
1037vfp_reg_offset (int dp, int reg)
1038{
1039 if (dp)
1040 return offsetof(CPUARMState, vfp.regs[reg]);
1041 else if (reg & 1) {
1042 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1043 + offsetof(CPU_DoubleU, l.upper);
1044 } else {
1045 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1046 + offsetof(CPU_DoubleU, l.lower);
1047 }
1048}
9ee6e8bb
PB
1049
1050/* Return the offset of a 32-bit piece of a NEON register.
1051 zero is the least significant end of the register. */
1052static inline long
1053neon_reg_offset (int reg, int n)
1054{
1055 int sreg;
1056 sreg = reg * 2 + n;
1057 return vfp_reg_offset(0, sreg);
1058}
1059
8f8e3aa4
PB
1060static TCGv neon_load_reg(int reg, int pass)
1061{
1062 TCGv tmp = new_tmp();
1063 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1064 return tmp;
1065}
1066
1067static void neon_store_reg(int reg, int pass, TCGv var)
1068{
1069 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1070 dead_tmp(var);
1071}
1072
a7812ae4 1073static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1074{
1075 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1076}
1077
a7812ae4 1078static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1079{
1080 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1081}
1082
4373f3ce
PB
1083#define tcg_gen_ld_f32 tcg_gen_ld_i32
1084#define tcg_gen_ld_f64 tcg_gen_ld_i64
1085#define tcg_gen_st_f32 tcg_gen_st_i32
1086#define tcg_gen_st_f64 tcg_gen_st_i64
1087
b7bcbe95
FB
1088static inline void gen_mov_F0_vreg(int dp, int reg)
1089{
1090 if (dp)
4373f3ce 1091 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1092 else
4373f3ce 1093 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1094}
1095
1096static inline void gen_mov_F1_vreg(int dp, int reg)
1097{
1098 if (dp)
4373f3ce 1099 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1100 else
4373f3ce 1101 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1102}
1103
1104static inline void gen_mov_vreg_F0(int dp, int reg)
1105{
1106 if (dp)
4373f3ce 1107 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1108 else
4373f3ce 1109 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1110}
1111
18c9b560
AZ
1112#define ARM_CP_RW_BIT (1 << 20)
1113
a7812ae4 1114static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d
PB
1115{
1116 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1117}
1118
a7812ae4 1119static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d
PB
1120{
1121 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1122}
1123
da6b5335 1124static inline TCGv iwmmxt_load_creg(int reg)
e677137d 1125{
da6b5335
FN
1126 TCGv var = new_tmp();
1127 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1128 return var;
e677137d
PB
1129}
1130
da6b5335 1131static inline void iwmmxt_store_creg(int reg, TCGv var)
e677137d 1132{
da6b5335 1133 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
d9968827 1134 dead_tmp(var);
e677137d
PB
1135}
1136
1137static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1138{
1139 iwmmxt_store_reg(cpu_M0, rn);
1140}
1141
1142static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1143{
1144 iwmmxt_load_reg(cpu_M0, rn);
1145}
1146
1147static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1148{
1149 iwmmxt_load_reg(cpu_V1, rn);
1150 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1151}
1152
1153static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1154{
1155 iwmmxt_load_reg(cpu_V1, rn);
1156 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1157}
1158
1159static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1160{
1161 iwmmxt_load_reg(cpu_V1, rn);
1162 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1163}
1164
1165#define IWMMXT_OP(name) \
1166static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1167{ \
1168 iwmmxt_load_reg(cpu_V1, rn); \
1169 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1170}
1171
1172#define IWMMXT_OP_ENV(name) \
1173static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1174{ \
1175 iwmmxt_load_reg(cpu_V1, rn); \
1176 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1177}
1178
1179#define IWMMXT_OP_ENV_SIZE(name) \
1180IWMMXT_OP_ENV(name##b) \
1181IWMMXT_OP_ENV(name##w) \
1182IWMMXT_OP_ENV(name##l)
1183
1184#define IWMMXT_OP_ENV1(name) \
1185static inline void gen_op_iwmmxt_##name##_M0(void) \
1186{ \
1187 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1188}
1189
1190IWMMXT_OP(maddsq)
1191IWMMXT_OP(madduq)
1192IWMMXT_OP(sadb)
1193IWMMXT_OP(sadw)
1194IWMMXT_OP(mulslw)
1195IWMMXT_OP(mulshw)
1196IWMMXT_OP(mululw)
1197IWMMXT_OP(muluhw)
1198IWMMXT_OP(macsw)
1199IWMMXT_OP(macuw)
1200
1201IWMMXT_OP_ENV_SIZE(unpackl)
1202IWMMXT_OP_ENV_SIZE(unpackh)
1203
1204IWMMXT_OP_ENV1(unpacklub)
1205IWMMXT_OP_ENV1(unpackluw)
1206IWMMXT_OP_ENV1(unpacklul)
1207IWMMXT_OP_ENV1(unpackhub)
1208IWMMXT_OP_ENV1(unpackhuw)
1209IWMMXT_OP_ENV1(unpackhul)
1210IWMMXT_OP_ENV1(unpacklsb)
1211IWMMXT_OP_ENV1(unpacklsw)
1212IWMMXT_OP_ENV1(unpacklsl)
1213IWMMXT_OP_ENV1(unpackhsb)
1214IWMMXT_OP_ENV1(unpackhsw)
1215IWMMXT_OP_ENV1(unpackhsl)
1216
1217IWMMXT_OP_ENV_SIZE(cmpeq)
1218IWMMXT_OP_ENV_SIZE(cmpgtu)
1219IWMMXT_OP_ENV_SIZE(cmpgts)
1220
1221IWMMXT_OP_ENV_SIZE(mins)
1222IWMMXT_OP_ENV_SIZE(minu)
1223IWMMXT_OP_ENV_SIZE(maxs)
1224IWMMXT_OP_ENV_SIZE(maxu)
1225
1226IWMMXT_OP_ENV_SIZE(subn)
1227IWMMXT_OP_ENV_SIZE(addn)
1228IWMMXT_OP_ENV_SIZE(subu)
1229IWMMXT_OP_ENV_SIZE(addu)
1230IWMMXT_OP_ENV_SIZE(subs)
1231IWMMXT_OP_ENV_SIZE(adds)
1232
1233IWMMXT_OP_ENV(avgb0)
1234IWMMXT_OP_ENV(avgb1)
1235IWMMXT_OP_ENV(avgw0)
1236IWMMXT_OP_ENV(avgw1)
1237
1238IWMMXT_OP(msadb)
1239
1240IWMMXT_OP_ENV(packuw)
1241IWMMXT_OP_ENV(packul)
1242IWMMXT_OP_ENV(packuq)
1243IWMMXT_OP_ENV(packsw)
1244IWMMXT_OP_ENV(packsl)
1245IWMMXT_OP_ENV(packsq)
1246
e677137d
PB
1247static void gen_op_iwmmxt_set_mup(void)
1248{
1249 TCGv tmp;
1250 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1251 tcg_gen_ori_i32(tmp, tmp, 2);
1252 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1253}
1254
1255static void gen_op_iwmmxt_set_cup(void)
1256{
1257 TCGv tmp;
1258 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1259 tcg_gen_ori_i32(tmp, tmp, 1);
1260 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1261}
1262
1263static void gen_op_iwmmxt_setpsr_nz(void)
1264{
1265 TCGv tmp = new_tmp();
1266 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1267 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1268}
1269
1270static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1271{
1272 iwmmxt_load_reg(cpu_V1, rn);
86831435 1273 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1274 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1275}
1276
da6b5335 1277static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
18c9b560
AZ
1278{
1279 int rd;
1280 uint32_t offset;
da6b5335 1281 TCGv tmp;
18c9b560
AZ
1282
1283 rd = (insn >> 16) & 0xf;
da6b5335 1284 tmp = load_reg(s, rd);
18c9b560
AZ
1285
1286 offset = (insn & 0xff) << ((insn >> 7) & 2);
1287 if (insn & (1 << 24)) {
1288 /* Pre indexed */
1289 if (insn & (1 << 23))
da6b5335 1290 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1291 else
da6b5335
FN
1292 tcg_gen_addi_i32(tmp, tmp, -offset);
1293 tcg_gen_mov_i32(dest, tmp);
18c9b560 1294 if (insn & (1 << 21))
da6b5335
FN
1295 store_reg(s, rd, tmp);
1296 else
1297 dead_tmp(tmp);
18c9b560
AZ
1298 } else if (insn & (1 << 21)) {
1299 /* Post indexed */
da6b5335 1300 tcg_gen_mov_i32(dest, tmp);
18c9b560 1301 if (insn & (1 << 23))
da6b5335 1302 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1303 else
da6b5335
FN
1304 tcg_gen_addi_i32(tmp, tmp, -offset);
1305 store_reg(s, rd, tmp);
18c9b560
AZ
1306 } else if (!(insn & (1 << 23)))
1307 return 1;
1308 return 0;
1309}
1310
da6b5335 1311static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
18c9b560
AZ
1312{
1313 int rd = (insn >> 0) & 0xf;
da6b5335 1314 TCGv tmp;
18c9b560 1315
da6b5335
FN
1316 if (insn & (1 << 8)) {
1317 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1318 return 1;
da6b5335
FN
1319 } else {
1320 tmp = iwmmxt_load_creg(rd);
1321 }
1322 } else {
1323 tmp = new_tmp();
1324 iwmmxt_load_reg(cpu_V0, rd);
1325 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1326 }
1327 tcg_gen_andi_i32(tmp, tmp, mask);
1328 tcg_gen_mov_i32(dest, tmp);
1329 dead_tmp(tmp);
18c9b560
AZ
1330 return 0;
1331}
1332
1333/* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1334 (ie. an undefined instruction). */
1335static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1336{
1337 int rd, wrd;
1338 int rdhi, rdlo, rd0, rd1, i;
da6b5335
FN
1339 TCGv addr;
1340 TCGv tmp, tmp2, tmp3;
18c9b560
AZ
1341
1342 if ((insn & 0x0e000e00) == 0x0c000000) {
1343 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1344 wrd = insn & 0xf;
1345 rdlo = (insn >> 12) & 0xf;
1346 rdhi = (insn >> 16) & 0xf;
1347 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1348 iwmmxt_load_reg(cpu_V0, wrd);
1349 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1350 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1351 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1352 } else { /* TMCRR */
da6b5335
FN
1353 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1354 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1355 gen_op_iwmmxt_set_mup();
1356 }
1357 return 0;
1358 }
1359
1360 wrd = (insn >> 12) & 0xf;
da6b5335
FN
1361 addr = new_tmp();
1362 if (gen_iwmmxt_address(s, insn, addr)) {
1363 dead_tmp(addr);
18c9b560 1364 return 1;
da6b5335 1365 }
18c9b560
AZ
1366 if (insn & ARM_CP_RW_BIT) {
1367 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
da6b5335
FN
1368 tmp = new_tmp();
1369 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1370 iwmmxt_store_creg(wrd, tmp);
18c9b560 1371 } else {
e677137d
PB
1372 i = 1;
1373 if (insn & (1 << 8)) {
1374 if (insn & (1 << 22)) { /* WLDRD */
da6b5335 1375 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1376 i = 0;
1377 } else { /* WLDRW wRd */
da6b5335 1378 tmp = gen_ld32(addr, IS_USER(s));
e677137d
PB
1379 }
1380 } else {
1381 if (insn & (1 << 22)) { /* WLDRH */
da6b5335 1382 tmp = gen_ld16u(addr, IS_USER(s));
e677137d 1383 } else { /* WLDRB */
da6b5335 1384 tmp = gen_ld8u(addr, IS_USER(s));
e677137d
PB
1385 }
1386 }
1387 if (i) {
1388 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1389 dead_tmp(tmp);
1390 }
18c9b560
AZ
1391 gen_op_iwmmxt_movq_wRn_M0(wrd);
1392 }
1393 } else {
1394 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335
FN
1395 tmp = iwmmxt_load_creg(wrd);
1396 gen_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1397 } else {
1398 gen_op_iwmmxt_movq_M0_wRn(wrd);
e677137d
PB
1399 tmp = new_tmp();
1400 if (insn & (1 << 8)) {
1401 if (insn & (1 << 22)) { /* WSTRD */
1402 dead_tmp(tmp);
da6b5335 1403 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1404 } else { /* WSTRW wRd */
1405 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1406 gen_st32(tmp, addr, IS_USER(s));
e677137d
PB
1407 }
1408 } else {
1409 if (insn & (1 << 22)) { /* WSTRH */
1410 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1411 gen_st16(tmp, addr, IS_USER(s));
e677137d
PB
1412 } else { /* WSTRB */
1413 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1414 gen_st8(tmp, addr, IS_USER(s));
e677137d
PB
1415 }
1416 }
18c9b560
AZ
1417 }
1418 }
d9968827 1419 dead_tmp(addr);
18c9b560
AZ
1420 return 0;
1421 }
1422
1423 if ((insn & 0x0f000000) != 0x0e000000)
1424 return 1;
1425
1426 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1427 case 0x000: /* WOR */
1428 wrd = (insn >> 12) & 0xf;
1429 rd0 = (insn >> 0) & 0xf;
1430 rd1 = (insn >> 16) & 0xf;
1431 gen_op_iwmmxt_movq_M0_wRn(rd0);
1432 gen_op_iwmmxt_orq_M0_wRn(rd1);
1433 gen_op_iwmmxt_setpsr_nz();
1434 gen_op_iwmmxt_movq_wRn_M0(wrd);
1435 gen_op_iwmmxt_set_mup();
1436 gen_op_iwmmxt_set_cup();
1437 break;
1438 case 0x011: /* TMCR */
1439 if (insn & 0xf)
1440 return 1;
1441 rd = (insn >> 12) & 0xf;
1442 wrd = (insn >> 16) & 0xf;
1443 switch (wrd) {
1444 case ARM_IWMMXT_wCID:
1445 case ARM_IWMMXT_wCASF:
1446 break;
1447 case ARM_IWMMXT_wCon:
1448 gen_op_iwmmxt_set_cup();
1449 /* Fall through. */
1450 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1451 tmp = iwmmxt_load_creg(wrd);
1452 tmp2 = load_reg(s, rd);
f669df27 1453 tcg_gen_andc_i32(tmp, tmp, tmp2);
da6b5335
FN
1454 dead_tmp(tmp2);
1455 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1456 break;
1457 case ARM_IWMMXT_wCGR0:
1458 case ARM_IWMMXT_wCGR1:
1459 case ARM_IWMMXT_wCGR2:
1460 case ARM_IWMMXT_wCGR3:
1461 gen_op_iwmmxt_set_cup();
da6b5335
FN
1462 tmp = load_reg(s, rd);
1463 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1464 break;
1465 default:
1466 return 1;
1467 }
1468 break;
1469 case 0x100: /* WXOR */
1470 wrd = (insn >> 12) & 0xf;
1471 rd0 = (insn >> 0) & 0xf;
1472 rd1 = (insn >> 16) & 0xf;
1473 gen_op_iwmmxt_movq_M0_wRn(rd0);
1474 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1475 gen_op_iwmmxt_setpsr_nz();
1476 gen_op_iwmmxt_movq_wRn_M0(wrd);
1477 gen_op_iwmmxt_set_mup();
1478 gen_op_iwmmxt_set_cup();
1479 break;
1480 case 0x111: /* TMRC */
1481 if (insn & 0xf)
1482 return 1;
1483 rd = (insn >> 12) & 0xf;
1484 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1485 tmp = iwmmxt_load_creg(wrd);
1486 store_reg(s, rd, tmp);
18c9b560
AZ
1487 break;
1488 case 0x300: /* WANDN */
1489 wrd = (insn >> 12) & 0xf;
1490 rd0 = (insn >> 0) & 0xf;
1491 rd1 = (insn >> 16) & 0xf;
1492 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1493 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1494 gen_op_iwmmxt_andq_M0_wRn(rd1);
1495 gen_op_iwmmxt_setpsr_nz();
1496 gen_op_iwmmxt_movq_wRn_M0(wrd);
1497 gen_op_iwmmxt_set_mup();
1498 gen_op_iwmmxt_set_cup();
1499 break;
1500 case 0x200: /* WAND */
1501 wrd = (insn >> 12) & 0xf;
1502 rd0 = (insn >> 0) & 0xf;
1503 rd1 = (insn >> 16) & 0xf;
1504 gen_op_iwmmxt_movq_M0_wRn(rd0);
1505 gen_op_iwmmxt_andq_M0_wRn(rd1);
1506 gen_op_iwmmxt_setpsr_nz();
1507 gen_op_iwmmxt_movq_wRn_M0(wrd);
1508 gen_op_iwmmxt_set_mup();
1509 gen_op_iwmmxt_set_cup();
1510 break;
1511 case 0x810: case 0xa10: /* WMADD */
1512 wrd = (insn >> 12) & 0xf;
1513 rd0 = (insn >> 0) & 0xf;
1514 rd1 = (insn >> 16) & 0xf;
1515 gen_op_iwmmxt_movq_M0_wRn(rd0);
1516 if (insn & (1 << 21))
1517 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1518 else
1519 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1520 gen_op_iwmmxt_movq_wRn_M0(wrd);
1521 gen_op_iwmmxt_set_mup();
1522 break;
1523 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1524 wrd = (insn >> 12) & 0xf;
1525 rd0 = (insn >> 16) & 0xf;
1526 rd1 = (insn >> 0) & 0xf;
1527 gen_op_iwmmxt_movq_M0_wRn(rd0);
1528 switch ((insn >> 22) & 3) {
1529 case 0:
1530 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1531 break;
1532 case 1:
1533 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1534 break;
1535 case 2:
1536 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1537 break;
1538 case 3:
1539 return 1;
1540 }
1541 gen_op_iwmmxt_movq_wRn_M0(wrd);
1542 gen_op_iwmmxt_set_mup();
1543 gen_op_iwmmxt_set_cup();
1544 break;
1545 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1546 wrd = (insn >> 12) & 0xf;
1547 rd0 = (insn >> 16) & 0xf;
1548 rd1 = (insn >> 0) & 0xf;
1549 gen_op_iwmmxt_movq_M0_wRn(rd0);
1550 switch ((insn >> 22) & 3) {
1551 case 0:
1552 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1553 break;
1554 case 1:
1555 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1556 break;
1557 case 2:
1558 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1559 break;
1560 case 3:
1561 return 1;
1562 }
1563 gen_op_iwmmxt_movq_wRn_M0(wrd);
1564 gen_op_iwmmxt_set_mup();
1565 gen_op_iwmmxt_set_cup();
1566 break;
1567 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1568 wrd = (insn >> 12) & 0xf;
1569 rd0 = (insn >> 16) & 0xf;
1570 rd1 = (insn >> 0) & 0xf;
1571 gen_op_iwmmxt_movq_M0_wRn(rd0);
1572 if (insn & (1 << 22))
1573 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1574 else
1575 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1576 if (!(insn & (1 << 20)))
1577 gen_op_iwmmxt_addl_M0_wRn(wrd);
1578 gen_op_iwmmxt_movq_wRn_M0(wrd);
1579 gen_op_iwmmxt_set_mup();
1580 break;
1581 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1582 wrd = (insn >> 12) & 0xf;
1583 rd0 = (insn >> 16) & 0xf;
1584 rd1 = (insn >> 0) & 0xf;
1585 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1586 if (insn & (1 << 21)) {
1587 if (insn & (1 << 20))
1588 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1589 else
1590 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1591 } else {
1592 if (insn & (1 << 20))
1593 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1594 else
1595 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1596 }
18c9b560
AZ
1597 gen_op_iwmmxt_movq_wRn_M0(wrd);
1598 gen_op_iwmmxt_set_mup();
1599 break;
1600 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1601 wrd = (insn >> 12) & 0xf;
1602 rd0 = (insn >> 16) & 0xf;
1603 rd1 = (insn >> 0) & 0xf;
1604 gen_op_iwmmxt_movq_M0_wRn(rd0);
1605 if (insn & (1 << 21))
1606 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1607 else
1608 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1609 if (!(insn & (1 << 20))) {
e677137d
PB
1610 iwmmxt_load_reg(cpu_V1, wrd);
1611 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1612 }
1613 gen_op_iwmmxt_movq_wRn_M0(wrd);
1614 gen_op_iwmmxt_set_mup();
1615 break;
1616 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1617 wrd = (insn >> 12) & 0xf;
1618 rd0 = (insn >> 16) & 0xf;
1619 rd1 = (insn >> 0) & 0xf;
1620 gen_op_iwmmxt_movq_M0_wRn(rd0);
1621 switch ((insn >> 22) & 3) {
1622 case 0:
1623 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1624 break;
1625 case 1:
1626 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1627 break;
1628 case 2:
1629 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1630 break;
1631 case 3:
1632 return 1;
1633 }
1634 gen_op_iwmmxt_movq_wRn_M0(wrd);
1635 gen_op_iwmmxt_set_mup();
1636 gen_op_iwmmxt_set_cup();
1637 break;
1638 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1639 wrd = (insn >> 12) & 0xf;
1640 rd0 = (insn >> 16) & 0xf;
1641 rd1 = (insn >> 0) & 0xf;
1642 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1643 if (insn & (1 << 22)) {
1644 if (insn & (1 << 20))
1645 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1646 else
1647 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1648 } else {
1649 if (insn & (1 << 20))
1650 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1651 else
1652 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1653 }
18c9b560
AZ
1654 gen_op_iwmmxt_movq_wRn_M0(wrd);
1655 gen_op_iwmmxt_set_mup();
1656 gen_op_iwmmxt_set_cup();
1657 break;
1658 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1659 wrd = (insn >> 12) & 0xf;
1660 rd0 = (insn >> 16) & 0xf;
1661 rd1 = (insn >> 0) & 0xf;
1662 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1663 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1664 tcg_gen_andi_i32(tmp, tmp, 7);
1665 iwmmxt_load_reg(cpu_V1, rd1);
1666 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1667 dead_tmp(tmp);
18c9b560
AZ
1668 gen_op_iwmmxt_movq_wRn_M0(wrd);
1669 gen_op_iwmmxt_set_mup();
1670 break;
1671 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1672 if (((insn >> 6) & 3) == 3)
1673 return 1;
18c9b560
AZ
1674 rd = (insn >> 12) & 0xf;
1675 wrd = (insn >> 16) & 0xf;
da6b5335 1676 tmp = load_reg(s, rd);
18c9b560
AZ
1677 gen_op_iwmmxt_movq_M0_wRn(wrd);
1678 switch ((insn >> 6) & 3) {
1679 case 0:
da6b5335
FN
1680 tmp2 = tcg_const_i32(0xff);
1681 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1682 break;
1683 case 1:
da6b5335
FN
1684 tmp2 = tcg_const_i32(0xffff);
1685 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1686 break;
1687 case 2:
da6b5335
FN
1688 tmp2 = tcg_const_i32(0xffffffff);
1689 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1690 break;
da6b5335
FN
1691 default:
1692 TCGV_UNUSED(tmp2);
1693 TCGV_UNUSED(tmp3);
18c9b560 1694 }
da6b5335
FN
1695 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1696 tcg_temp_free(tmp3);
1697 tcg_temp_free(tmp2);
1698 dead_tmp(tmp);
18c9b560
AZ
1699 gen_op_iwmmxt_movq_wRn_M0(wrd);
1700 gen_op_iwmmxt_set_mup();
1701 break;
1702 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1703 rd = (insn >> 12) & 0xf;
1704 wrd = (insn >> 16) & 0xf;
da6b5335 1705 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1706 return 1;
1707 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335 1708 tmp = new_tmp();
18c9b560
AZ
1709 switch ((insn >> 22) & 3) {
1710 case 0:
da6b5335
FN
1711 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1712 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1713 if (insn & 8) {
1714 tcg_gen_ext8s_i32(tmp, tmp);
1715 } else {
1716 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1717 }
1718 break;
1719 case 1:
da6b5335
FN
1720 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1721 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1722 if (insn & 8) {
1723 tcg_gen_ext16s_i32(tmp, tmp);
1724 } else {
1725 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1726 }
1727 break;
1728 case 2:
da6b5335
FN
1729 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1730 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1731 break;
18c9b560 1732 }
da6b5335 1733 store_reg(s, rd, tmp);
18c9b560
AZ
1734 break;
1735 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1736 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1737 return 1;
da6b5335 1738 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1739 switch ((insn >> 22) & 3) {
1740 case 0:
da6b5335 1741 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1742 break;
1743 case 1:
da6b5335 1744 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1745 break;
1746 case 2:
da6b5335 1747 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1748 break;
18c9b560 1749 }
da6b5335
FN
1750 tcg_gen_shli_i32(tmp, tmp, 28);
1751 gen_set_nzcv(tmp);
1752 dead_tmp(tmp);
18c9b560
AZ
1753 break;
1754 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1755 if (((insn >> 6) & 3) == 3)
1756 return 1;
18c9b560
AZ
1757 rd = (insn >> 12) & 0xf;
1758 wrd = (insn >> 16) & 0xf;
da6b5335 1759 tmp = load_reg(s, rd);
18c9b560
AZ
1760 switch ((insn >> 6) & 3) {
1761 case 0:
da6b5335 1762 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1763 break;
1764 case 1:
da6b5335 1765 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1766 break;
1767 case 2:
da6b5335 1768 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1769 break;
18c9b560 1770 }
da6b5335 1771 dead_tmp(tmp);
18c9b560
AZ
1772 gen_op_iwmmxt_movq_wRn_M0(wrd);
1773 gen_op_iwmmxt_set_mup();
1774 break;
1775 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1776 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1777 return 1;
da6b5335
FN
1778 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1779 tmp2 = new_tmp();
1780 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1781 switch ((insn >> 22) & 3) {
1782 case 0:
1783 for (i = 0; i < 7; i ++) {
da6b5335
FN
1784 tcg_gen_shli_i32(tmp2, tmp2, 4);
1785 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1786 }
1787 break;
1788 case 1:
1789 for (i = 0; i < 3; i ++) {
da6b5335
FN
1790 tcg_gen_shli_i32(tmp2, tmp2, 8);
1791 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1792 }
1793 break;
1794 case 2:
da6b5335
FN
1795 tcg_gen_shli_i32(tmp2, tmp2, 16);
1796 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1797 break;
18c9b560 1798 }
da6b5335
FN
1799 gen_set_nzcv(tmp);
1800 dead_tmp(tmp2);
1801 dead_tmp(tmp);
18c9b560
AZ
1802 break;
1803 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1804 wrd = (insn >> 12) & 0xf;
1805 rd0 = (insn >> 16) & 0xf;
1806 gen_op_iwmmxt_movq_M0_wRn(rd0);
1807 switch ((insn >> 22) & 3) {
1808 case 0:
e677137d 1809 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1810 break;
1811 case 1:
e677137d 1812 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1813 break;
1814 case 2:
e677137d 1815 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1816 break;
1817 case 3:
1818 return 1;
1819 }
1820 gen_op_iwmmxt_movq_wRn_M0(wrd);
1821 gen_op_iwmmxt_set_mup();
1822 break;
1823 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1824 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1825 return 1;
da6b5335
FN
1826 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1827 tmp2 = new_tmp();
1828 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1829 switch ((insn >> 22) & 3) {
1830 case 0:
1831 for (i = 0; i < 7; i ++) {
da6b5335
FN
1832 tcg_gen_shli_i32(tmp2, tmp2, 4);
1833 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1834 }
1835 break;
1836 case 1:
1837 for (i = 0; i < 3; i ++) {
da6b5335
FN
1838 tcg_gen_shli_i32(tmp2, tmp2, 8);
1839 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1840 }
1841 break;
1842 case 2:
da6b5335
FN
1843 tcg_gen_shli_i32(tmp2, tmp2, 16);
1844 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1845 break;
18c9b560 1846 }
da6b5335
FN
1847 gen_set_nzcv(tmp);
1848 dead_tmp(tmp2);
1849 dead_tmp(tmp);
18c9b560
AZ
1850 break;
1851 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1852 rd = (insn >> 12) & 0xf;
1853 rd0 = (insn >> 16) & 0xf;
da6b5335 1854 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1855 return 1;
1856 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 1857 tmp = new_tmp();
18c9b560
AZ
1858 switch ((insn >> 22) & 3) {
1859 case 0:
da6b5335 1860 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1861 break;
1862 case 1:
da6b5335 1863 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1864 break;
1865 case 2:
da6b5335 1866 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1867 break;
18c9b560 1868 }
da6b5335 1869 store_reg(s, rd, tmp);
18c9b560
AZ
1870 break;
1871 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1872 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1873 wrd = (insn >> 12) & 0xf;
1874 rd0 = (insn >> 16) & 0xf;
1875 rd1 = (insn >> 0) & 0xf;
1876 gen_op_iwmmxt_movq_M0_wRn(rd0);
1877 switch ((insn >> 22) & 3) {
1878 case 0:
1879 if (insn & (1 << 21))
1880 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1881 else
1882 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1883 break;
1884 case 1:
1885 if (insn & (1 << 21))
1886 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1887 else
1888 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1889 break;
1890 case 2:
1891 if (insn & (1 << 21))
1892 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1893 else
1894 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1895 break;
1896 case 3:
1897 return 1;
1898 }
1899 gen_op_iwmmxt_movq_wRn_M0(wrd);
1900 gen_op_iwmmxt_set_mup();
1901 gen_op_iwmmxt_set_cup();
1902 break;
1903 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1904 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1905 wrd = (insn >> 12) & 0xf;
1906 rd0 = (insn >> 16) & 0xf;
1907 gen_op_iwmmxt_movq_M0_wRn(rd0);
1908 switch ((insn >> 22) & 3) {
1909 case 0:
1910 if (insn & (1 << 21))
1911 gen_op_iwmmxt_unpacklsb_M0();
1912 else
1913 gen_op_iwmmxt_unpacklub_M0();
1914 break;
1915 case 1:
1916 if (insn & (1 << 21))
1917 gen_op_iwmmxt_unpacklsw_M0();
1918 else
1919 gen_op_iwmmxt_unpackluw_M0();
1920 break;
1921 case 2:
1922 if (insn & (1 << 21))
1923 gen_op_iwmmxt_unpacklsl_M0();
1924 else
1925 gen_op_iwmmxt_unpacklul_M0();
1926 break;
1927 case 3:
1928 return 1;
1929 }
1930 gen_op_iwmmxt_movq_wRn_M0(wrd);
1931 gen_op_iwmmxt_set_mup();
1932 gen_op_iwmmxt_set_cup();
1933 break;
1934 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1935 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1936 wrd = (insn >> 12) & 0xf;
1937 rd0 = (insn >> 16) & 0xf;
1938 gen_op_iwmmxt_movq_M0_wRn(rd0);
1939 switch ((insn >> 22) & 3) {
1940 case 0:
1941 if (insn & (1 << 21))
1942 gen_op_iwmmxt_unpackhsb_M0();
1943 else
1944 gen_op_iwmmxt_unpackhub_M0();
1945 break;
1946 case 1:
1947 if (insn & (1 << 21))
1948 gen_op_iwmmxt_unpackhsw_M0();
1949 else
1950 gen_op_iwmmxt_unpackhuw_M0();
1951 break;
1952 case 2:
1953 if (insn & (1 << 21))
1954 gen_op_iwmmxt_unpackhsl_M0();
1955 else
1956 gen_op_iwmmxt_unpackhul_M0();
1957 break;
1958 case 3:
1959 return 1;
1960 }
1961 gen_op_iwmmxt_movq_wRn_M0(wrd);
1962 gen_op_iwmmxt_set_mup();
1963 gen_op_iwmmxt_set_cup();
1964 break;
1965 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1966 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
1967 if (((insn >> 22) & 3) == 0)
1968 return 1;
18c9b560
AZ
1969 wrd = (insn >> 12) & 0xf;
1970 rd0 = (insn >> 16) & 0xf;
1971 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1972 tmp = new_tmp();
1973 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
1974 dead_tmp(tmp);
18c9b560 1975 return 1;
da6b5335 1976 }
18c9b560 1977 switch ((insn >> 22) & 3) {
18c9b560 1978 case 1:
da6b5335 1979 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1980 break;
1981 case 2:
da6b5335 1982 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1983 break;
1984 case 3:
da6b5335 1985 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1986 break;
1987 }
da6b5335 1988 dead_tmp(tmp);
18c9b560
AZ
1989 gen_op_iwmmxt_movq_wRn_M0(wrd);
1990 gen_op_iwmmxt_set_mup();
1991 gen_op_iwmmxt_set_cup();
1992 break;
1993 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1994 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
1995 if (((insn >> 22) & 3) == 0)
1996 return 1;
18c9b560
AZ
1997 wrd = (insn >> 12) & 0xf;
1998 rd0 = (insn >> 16) & 0xf;
1999 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2000 tmp = new_tmp();
2001 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2002 dead_tmp(tmp);
18c9b560 2003 return 1;
da6b5335 2004 }
18c9b560 2005 switch ((insn >> 22) & 3) {
18c9b560 2006 case 1:
da6b5335 2007 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2008 break;
2009 case 2:
da6b5335 2010 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2011 break;
2012 case 3:
da6b5335 2013 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2014 break;
2015 }
da6b5335 2016 dead_tmp(tmp);
18c9b560
AZ
2017 gen_op_iwmmxt_movq_wRn_M0(wrd);
2018 gen_op_iwmmxt_set_mup();
2019 gen_op_iwmmxt_set_cup();
2020 break;
2021 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2022 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2023 if (((insn >> 22) & 3) == 0)
2024 return 1;
18c9b560
AZ
2025 wrd = (insn >> 12) & 0xf;
2026 rd0 = (insn >> 16) & 0xf;
2027 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2028 tmp = new_tmp();
2029 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2030 dead_tmp(tmp);
18c9b560 2031 return 1;
da6b5335 2032 }
18c9b560 2033 switch ((insn >> 22) & 3) {
18c9b560 2034 case 1:
da6b5335 2035 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2036 break;
2037 case 2:
da6b5335 2038 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2039 break;
2040 case 3:
da6b5335 2041 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2042 break;
2043 }
da6b5335 2044 dead_tmp(tmp);
18c9b560
AZ
2045 gen_op_iwmmxt_movq_wRn_M0(wrd);
2046 gen_op_iwmmxt_set_mup();
2047 gen_op_iwmmxt_set_cup();
2048 break;
2049 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2050 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2051 if (((insn >> 22) & 3) == 0)
2052 return 1;
18c9b560
AZ
2053 wrd = (insn >> 12) & 0xf;
2054 rd0 = (insn >> 16) & 0xf;
2055 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2056 tmp = new_tmp();
18c9b560 2057 switch ((insn >> 22) & 3) {
18c9b560 2058 case 1:
da6b5335
FN
2059 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2060 dead_tmp(tmp);
18c9b560 2061 return 1;
da6b5335
FN
2062 }
2063 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2064 break;
2065 case 2:
da6b5335
FN
2066 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2067 dead_tmp(tmp);
18c9b560 2068 return 1;
da6b5335
FN
2069 }
2070 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2071 break;
2072 case 3:
da6b5335
FN
2073 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2074 dead_tmp(tmp);
18c9b560 2075 return 1;
da6b5335
FN
2076 }
2077 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2078 break;
2079 }
da6b5335 2080 dead_tmp(tmp);
18c9b560
AZ
2081 gen_op_iwmmxt_movq_wRn_M0(wrd);
2082 gen_op_iwmmxt_set_mup();
2083 gen_op_iwmmxt_set_cup();
2084 break;
2085 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2086 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2087 wrd = (insn >> 12) & 0xf;
2088 rd0 = (insn >> 16) & 0xf;
2089 rd1 = (insn >> 0) & 0xf;
2090 gen_op_iwmmxt_movq_M0_wRn(rd0);
2091 switch ((insn >> 22) & 3) {
2092 case 0:
2093 if (insn & (1 << 21))
2094 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2095 else
2096 gen_op_iwmmxt_minub_M0_wRn(rd1);
2097 break;
2098 case 1:
2099 if (insn & (1 << 21))
2100 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2101 else
2102 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2103 break;
2104 case 2:
2105 if (insn & (1 << 21))
2106 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2107 else
2108 gen_op_iwmmxt_minul_M0_wRn(rd1);
2109 break;
2110 case 3:
2111 return 1;
2112 }
2113 gen_op_iwmmxt_movq_wRn_M0(wrd);
2114 gen_op_iwmmxt_set_mup();
2115 break;
2116 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2117 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2118 wrd = (insn >> 12) & 0xf;
2119 rd0 = (insn >> 16) & 0xf;
2120 rd1 = (insn >> 0) & 0xf;
2121 gen_op_iwmmxt_movq_M0_wRn(rd0);
2122 switch ((insn >> 22) & 3) {
2123 case 0:
2124 if (insn & (1 << 21))
2125 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2126 else
2127 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2128 break;
2129 case 1:
2130 if (insn & (1 << 21))
2131 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2132 else
2133 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2134 break;
2135 case 2:
2136 if (insn & (1 << 21))
2137 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2138 else
2139 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2140 break;
2141 case 3:
2142 return 1;
2143 }
2144 gen_op_iwmmxt_movq_wRn_M0(wrd);
2145 gen_op_iwmmxt_set_mup();
2146 break;
2147 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2148 case 0x402: case 0x502: case 0x602: case 0x702:
2149 wrd = (insn >> 12) & 0xf;
2150 rd0 = (insn >> 16) & 0xf;
2151 rd1 = (insn >> 0) & 0xf;
2152 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2153 tmp = tcg_const_i32((insn >> 20) & 3);
2154 iwmmxt_load_reg(cpu_V1, rd1);
2155 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2156 tcg_temp_free(tmp);
18c9b560
AZ
2157 gen_op_iwmmxt_movq_wRn_M0(wrd);
2158 gen_op_iwmmxt_set_mup();
2159 break;
2160 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2161 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2162 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2163 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2164 wrd = (insn >> 12) & 0xf;
2165 rd0 = (insn >> 16) & 0xf;
2166 rd1 = (insn >> 0) & 0xf;
2167 gen_op_iwmmxt_movq_M0_wRn(rd0);
2168 switch ((insn >> 20) & 0xf) {
2169 case 0x0:
2170 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2171 break;
2172 case 0x1:
2173 gen_op_iwmmxt_subub_M0_wRn(rd1);
2174 break;
2175 case 0x3:
2176 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2177 break;
2178 case 0x4:
2179 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2180 break;
2181 case 0x5:
2182 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2183 break;
2184 case 0x7:
2185 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2186 break;
2187 case 0x8:
2188 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2189 break;
2190 case 0x9:
2191 gen_op_iwmmxt_subul_M0_wRn(rd1);
2192 break;
2193 case 0xb:
2194 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2195 break;
2196 default:
2197 return 1;
2198 }
2199 gen_op_iwmmxt_movq_wRn_M0(wrd);
2200 gen_op_iwmmxt_set_mup();
2201 gen_op_iwmmxt_set_cup();
2202 break;
2203 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2204 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2205 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2206 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2207 wrd = (insn >> 12) & 0xf;
2208 rd0 = (insn >> 16) & 0xf;
2209 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2210 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2211 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2212 tcg_temp_free(tmp);
18c9b560
AZ
2213 gen_op_iwmmxt_movq_wRn_M0(wrd);
2214 gen_op_iwmmxt_set_mup();
2215 gen_op_iwmmxt_set_cup();
2216 break;
2217 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2218 case 0x418: case 0x518: case 0x618: case 0x718:
2219 case 0x818: case 0x918: case 0xa18: case 0xb18:
2220 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2221 wrd = (insn >> 12) & 0xf;
2222 rd0 = (insn >> 16) & 0xf;
2223 rd1 = (insn >> 0) & 0xf;
2224 gen_op_iwmmxt_movq_M0_wRn(rd0);
2225 switch ((insn >> 20) & 0xf) {
2226 case 0x0:
2227 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2228 break;
2229 case 0x1:
2230 gen_op_iwmmxt_addub_M0_wRn(rd1);
2231 break;
2232 case 0x3:
2233 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2234 break;
2235 case 0x4:
2236 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2237 break;
2238 case 0x5:
2239 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2240 break;
2241 case 0x7:
2242 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2243 break;
2244 case 0x8:
2245 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2246 break;
2247 case 0x9:
2248 gen_op_iwmmxt_addul_M0_wRn(rd1);
2249 break;
2250 case 0xb:
2251 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2252 break;
2253 default:
2254 return 1;
2255 }
2256 gen_op_iwmmxt_movq_wRn_M0(wrd);
2257 gen_op_iwmmxt_set_mup();
2258 gen_op_iwmmxt_set_cup();
2259 break;
2260 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2261 case 0x408: case 0x508: case 0x608: case 0x708:
2262 case 0x808: case 0x908: case 0xa08: case 0xb08:
2263 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2264 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2265 return 1;
18c9b560
AZ
2266 wrd = (insn >> 12) & 0xf;
2267 rd0 = (insn >> 16) & 0xf;
2268 rd1 = (insn >> 0) & 0xf;
2269 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2270 switch ((insn >> 22) & 3) {
18c9b560
AZ
2271 case 1:
2272 if (insn & (1 << 21))
2273 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2274 else
2275 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2276 break;
2277 case 2:
2278 if (insn & (1 << 21))
2279 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2280 else
2281 gen_op_iwmmxt_packul_M0_wRn(rd1);
2282 break;
2283 case 3:
2284 if (insn & (1 << 21))
2285 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2286 else
2287 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2288 break;
2289 }
2290 gen_op_iwmmxt_movq_wRn_M0(wrd);
2291 gen_op_iwmmxt_set_mup();
2292 gen_op_iwmmxt_set_cup();
2293 break;
2294 case 0x201: case 0x203: case 0x205: case 0x207:
2295 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2296 case 0x211: case 0x213: case 0x215: case 0x217:
2297 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2298 wrd = (insn >> 5) & 0xf;
2299 rd0 = (insn >> 12) & 0xf;
2300 rd1 = (insn >> 0) & 0xf;
2301 if (rd0 == 0xf || rd1 == 0xf)
2302 return 1;
2303 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2304 tmp = load_reg(s, rd0);
2305 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2306 switch ((insn >> 16) & 0xf) {
2307 case 0x0: /* TMIA */
da6b5335 2308 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2309 break;
2310 case 0x8: /* TMIAPH */
da6b5335 2311 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2312 break;
2313 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2314 if (insn & (1 << 16))
da6b5335 2315 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2316 if (insn & (1 << 17))
da6b5335
FN
2317 tcg_gen_shri_i32(tmp2, tmp2, 16);
2318 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2319 break;
2320 default:
da6b5335
FN
2321 dead_tmp(tmp2);
2322 dead_tmp(tmp);
18c9b560
AZ
2323 return 1;
2324 }
da6b5335
FN
2325 dead_tmp(tmp2);
2326 dead_tmp(tmp);
18c9b560
AZ
2327 gen_op_iwmmxt_movq_wRn_M0(wrd);
2328 gen_op_iwmmxt_set_mup();
2329 break;
2330 default:
2331 return 1;
2332 }
2333
2334 return 0;
2335}
2336
2337/* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2338 (ie. an undefined instruction). */
2339static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2340{
2341 int acc, rd0, rd1, rdhi, rdlo;
3a554c0f 2342 TCGv tmp, tmp2;
18c9b560
AZ
2343
2344 if ((insn & 0x0ff00f10) == 0x0e200010) {
2345 /* Multiply with Internal Accumulate Format */
2346 rd0 = (insn >> 12) & 0xf;
2347 rd1 = insn & 0xf;
2348 acc = (insn >> 5) & 7;
2349
2350 if (acc != 0)
2351 return 1;
2352
3a554c0f
FN
2353 tmp = load_reg(s, rd0);
2354 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2355 switch ((insn >> 16) & 0xf) {
2356 case 0x0: /* MIA */
3a554c0f 2357 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2358 break;
2359 case 0x8: /* MIAPH */
3a554c0f 2360 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2361 break;
2362 case 0xc: /* MIABB */
2363 case 0xd: /* MIABT */
2364 case 0xe: /* MIATB */
2365 case 0xf: /* MIATT */
18c9b560 2366 if (insn & (1 << 16))
3a554c0f 2367 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2368 if (insn & (1 << 17))
3a554c0f
FN
2369 tcg_gen_shri_i32(tmp2, tmp2, 16);
2370 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2371 break;
2372 default:
2373 return 1;
2374 }
3a554c0f
FN
2375 dead_tmp(tmp2);
2376 dead_tmp(tmp);
18c9b560
AZ
2377
2378 gen_op_iwmmxt_movq_wRn_M0(acc);
2379 return 0;
2380 }
2381
2382 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2383 /* Internal Accumulator Access Format */
2384 rdhi = (insn >> 16) & 0xf;
2385 rdlo = (insn >> 12) & 0xf;
2386 acc = insn & 7;
2387
2388 if (acc != 0)
2389 return 1;
2390
2391 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2392 iwmmxt_load_reg(cpu_V0, acc);
2393 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2394 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2395 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2396 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2397 } else { /* MAR */
3a554c0f
FN
2398 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2399 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2400 }
2401 return 0;
2402 }
2403
2404 return 1;
2405}
2406
c1713132
AZ
2407/* Disassemble system coprocessor instruction. Return nonzero if
2408 instruction is not defined. */
2409static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2410{
b75263d6 2411 TCGv tmp, tmp2;
c1713132
AZ
2412 uint32_t rd = (insn >> 12) & 0xf;
2413 uint32_t cp = (insn >> 8) & 0xf;
2414 if (IS_USER(s)) {
2415 return 1;
2416 }
2417
18c9b560 2418 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2419 if (!env->cp[cp].cp_read)
2420 return 1;
8984bd2e
PB
2421 gen_set_pc_im(s->pc);
2422 tmp = new_tmp();
b75263d6
JR
2423 tmp2 = tcg_const_i32(insn);
2424 gen_helper_get_cp(tmp, cpu_env, tmp2);
2425 tcg_temp_free(tmp2);
8984bd2e 2426 store_reg(s, rd, tmp);
c1713132
AZ
2427 } else {
2428 if (!env->cp[cp].cp_write)
2429 return 1;
8984bd2e
PB
2430 gen_set_pc_im(s->pc);
2431 tmp = load_reg(s, rd);
b75263d6
JR
2432 tmp2 = tcg_const_i32(insn);
2433 gen_helper_set_cp(cpu_env, tmp2, tmp);
2434 tcg_temp_free(tmp2);
a60de947 2435 dead_tmp(tmp);
c1713132
AZ
2436 }
2437 return 0;
2438}
2439
9ee6e8bb
PB
2440static int cp15_user_ok(uint32_t insn)
2441{
2442 int cpn = (insn >> 16) & 0xf;
2443 int cpm = insn & 0xf;
2444 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2445
2446 if (cpn == 13 && cpm == 0) {
2447 /* TLS register. */
2448 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2449 return 1;
2450 }
2451 if (cpn == 7) {
2452 /* ISB, DSB, DMB. */
2453 if ((cpm == 5 && op == 4)
2454 || (cpm == 10 && (op == 4 || op == 5)))
2455 return 1;
2456 }
2457 return 0;
2458}
2459
3f26c122
RV
2460static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2461{
2462 TCGv tmp;
2463 int cpn = (insn >> 16) & 0xf;
2464 int cpm = insn & 0xf;
2465 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2466
2467 if (!arm_feature(env, ARM_FEATURE_V6K))
2468 return 0;
2469
2470 if (!(cpn == 13 && cpm == 0))
2471 return 0;
2472
2473 if (insn & ARM_CP_RW_BIT) {
3f26c122
RV
2474 switch (op) {
2475 case 2:
c5883be2 2476 tmp = load_cpu_field(cp15.c13_tls1);
3f26c122
RV
2477 break;
2478 case 3:
c5883be2 2479 tmp = load_cpu_field(cp15.c13_tls2);
3f26c122
RV
2480 break;
2481 case 4:
c5883be2 2482 tmp = load_cpu_field(cp15.c13_tls3);
3f26c122
RV
2483 break;
2484 default:
3f26c122
RV
2485 return 0;
2486 }
2487 store_reg(s, rd, tmp);
2488
2489 } else {
2490 tmp = load_reg(s, rd);
2491 switch (op) {
2492 case 2:
c5883be2 2493 store_cpu_field(tmp, cp15.c13_tls1);
3f26c122
RV
2494 break;
2495 case 3:
c5883be2 2496 store_cpu_field(tmp, cp15.c13_tls2);
3f26c122
RV
2497 break;
2498 case 4:
c5883be2 2499 store_cpu_field(tmp, cp15.c13_tls3);
3f26c122
RV
2500 break;
2501 default:
c5883be2 2502 dead_tmp(tmp);
3f26c122
RV
2503 return 0;
2504 }
3f26c122
RV
2505 }
2506 return 1;
2507}
2508
b5ff1b31
FB
2509/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2510 instruction is not defined. */
a90b7318 2511static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2512{
2513 uint32_t rd;
b75263d6 2514 TCGv tmp, tmp2;
b5ff1b31 2515
9ee6e8bb
PB
2516 /* M profile cores use memory mapped registers instead of cp15. */
2517 if (arm_feature(env, ARM_FEATURE_M))
2518 return 1;
2519
2520 if ((insn & (1 << 25)) == 0) {
2521 if (insn & (1 << 20)) {
2522 /* mrrc */
2523 return 1;
2524 }
2525 /* mcrr. Used for block cache operations, so implement as no-op. */
2526 return 0;
2527 }
2528 if ((insn & (1 << 4)) == 0) {
2529 /* cdp */
2530 return 1;
2531 }
2532 if (IS_USER(s) && !cp15_user_ok(insn)) {
b5ff1b31
FB
2533 return 1;
2534 }
9332f9da
FB
2535 if ((insn & 0x0fff0fff) == 0x0e070f90
2536 || (insn & 0x0fff0fff) == 0x0e070f58) {
2537 /* Wait for interrupt. */
8984bd2e 2538 gen_set_pc_im(s->pc);
9ee6e8bb 2539 s->is_jmp = DISAS_WFI;
9332f9da
FB
2540 return 0;
2541 }
b5ff1b31 2542 rd = (insn >> 12) & 0xf;
3f26c122
RV
2543
2544 if (cp15_tls_load_store(env, s, insn, rd))
2545 return 0;
2546
b75263d6 2547 tmp2 = tcg_const_i32(insn);
18c9b560 2548 if (insn & ARM_CP_RW_BIT) {
8984bd2e 2549 tmp = new_tmp();
b75263d6 2550 gen_helper_get_cp15(tmp, cpu_env, tmp2);
b5ff1b31
FB
2551 /* If the destination register is r15 then sets condition codes. */
2552 if (rd != 15)
8984bd2e
PB
2553 store_reg(s, rd, tmp);
2554 else
2555 dead_tmp(tmp);
b5ff1b31 2556 } else {
8984bd2e 2557 tmp = load_reg(s, rd);
b75263d6 2558 gen_helper_set_cp15(cpu_env, tmp2, tmp);
8984bd2e 2559 dead_tmp(tmp);
a90b7318
AZ
2560 /* Normally we would always end the TB here, but Linux
2561 * arch/arm/mach-pxa/sleep.S expects two instructions following
2562 * an MMU enable to execute from cache. Imitate this behaviour. */
2563 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2564 (insn & 0x0fff0fff) != 0x0e010f10)
2565 gen_lookup_tb(s);
b5ff1b31 2566 }
b75263d6 2567 tcg_temp_free_i32(tmp2);
b5ff1b31
FB
2568 return 0;
2569}
2570
9ee6e8bb
PB
2571#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2572#define VFP_SREG(insn, bigbit, smallbit) \
2573 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2574#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2575 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2576 reg = (((insn) >> (bigbit)) & 0x0f) \
2577 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2578 } else { \
2579 if (insn & (1 << (smallbit))) \
2580 return 1; \
2581 reg = ((insn) >> (bigbit)) & 0x0f; \
2582 }} while (0)
2583
2584#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2585#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2586#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2587#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2588#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2589#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2590
4373f3ce
PB
2591/* Move between integer and VFP cores. */
2592static TCGv gen_vfp_mrs(void)
2593{
2594 TCGv tmp = new_tmp();
2595 tcg_gen_mov_i32(tmp, cpu_F0s);
2596 return tmp;
2597}
2598
2599static void gen_vfp_msr(TCGv tmp)
2600{
2601 tcg_gen_mov_i32(cpu_F0s, tmp);
2602 dead_tmp(tmp);
2603}
2604
9ee6e8bb
PB
2605static inline int
2606vfp_enabled(CPUState * env)
2607{
2608 return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
2609}
2610
ad69471c
PB
2611static void gen_neon_dup_u8(TCGv var, int shift)
2612{
2613 TCGv tmp = new_tmp();
2614 if (shift)
2615 tcg_gen_shri_i32(var, var, shift);
86831435 2616 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2617 tcg_gen_shli_i32(tmp, var, 8);
2618 tcg_gen_or_i32(var, var, tmp);
2619 tcg_gen_shli_i32(tmp, var, 16);
2620 tcg_gen_or_i32(var, var, tmp);
2621 dead_tmp(tmp);
2622}
2623
2624static void gen_neon_dup_low16(TCGv var)
2625{
2626 TCGv tmp = new_tmp();
86831435 2627 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2628 tcg_gen_shli_i32(tmp, var, 16);
2629 tcg_gen_or_i32(var, var, tmp);
2630 dead_tmp(tmp);
2631}
2632
2633static void gen_neon_dup_high16(TCGv var)
2634{
2635 TCGv tmp = new_tmp();
2636 tcg_gen_andi_i32(var, var, 0xffff0000);
2637 tcg_gen_shri_i32(tmp, var, 16);
2638 tcg_gen_or_i32(var, var, tmp);
2639 dead_tmp(tmp);
2640}
2641
b7bcbe95
FB
2642/* Disassemble a VFP instruction. Returns nonzero if an error occured
2643 (ie. an undefined instruction). */
2644static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2645{
2646 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2647 int dp, veclen;
312eea9f 2648 TCGv addr;
4373f3ce 2649 TCGv tmp;
ad69471c 2650 TCGv tmp2;
b7bcbe95 2651
40f137e1
PB
2652 if (!arm_feature(env, ARM_FEATURE_VFP))
2653 return 1;
2654
9ee6e8bb
PB
2655 if (!vfp_enabled(env)) {
2656 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2657 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2658 return 1;
2659 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2660 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2661 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2662 return 1;
2663 }
b7bcbe95
FB
2664 dp = ((insn & 0xf00) == 0xb00);
2665 switch ((insn >> 24) & 0xf) {
2666 case 0xe:
2667 if (insn & (1 << 4)) {
2668 /* single register transfer */
b7bcbe95
FB
2669 rd = (insn >> 12) & 0xf;
2670 if (dp) {
9ee6e8bb
PB
2671 int size;
2672 int pass;
2673
2674 VFP_DREG_N(rn, insn);
2675 if (insn & 0xf)
b7bcbe95 2676 return 1;
9ee6e8bb
PB
2677 if (insn & 0x00c00060
2678 && !arm_feature(env, ARM_FEATURE_NEON))
2679 return 1;
2680
2681 pass = (insn >> 21) & 1;
2682 if (insn & (1 << 22)) {
2683 size = 0;
2684 offset = ((insn >> 5) & 3) * 8;
2685 } else if (insn & (1 << 5)) {
2686 size = 1;
2687 offset = (insn & (1 << 6)) ? 16 : 0;
2688 } else {
2689 size = 2;
2690 offset = 0;
2691 }
18c9b560 2692 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2693 /* vfp->arm */
ad69471c 2694 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2695 switch (size) {
2696 case 0:
9ee6e8bb 2697 if (offset)
ad69471c 2698 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2699 if (insn & (1 << 23))
ad69471c 2700 gen_uxtb(tmp);
9ee6e8bb 2701 else
ad69471c 2702 gen_sxtb(tmp);
9ee6e8bb
PB
2703 break;
2704 case 1:
9ee6e8bb
PB
2705 if (insn & (1 << 23)) {
2706 if (offset) {
ad69471c 2707 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2708 } else {
ad69471c 2709 gen_uxth(tmp);
9ee6e8bb
PB
2710 }
2711 } else {
2712 if (offset) {
ad69471c 2713 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2714 } else {
ad69471c 2715 gen_sxth(tmp);
9ee6e8bb
PB
2716 }
2717 }
2718 break;
2719 case 2:
9ee6e8bb
PB
2720 break;
2721 }
ad69471c 2722 store_reg(s, rd, tmp);
b7bcbe95
FB
2723 } else {
2724 /* arm->vfp */
ad69471c 2725 tmp = load_reg(s, rd);
9ee6e8bb
PB
2726 if (insn & (1 << 23)) {
2727 /* VDUP */
2728 if (size == 0) {
ad69471c 2729 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2730 } else if (size == 1) {
ad69471c 2731 gen_neon_dup_low16(tmp);
9ee6e8bb 2732 }
cbbccffc
PB
2733 for (n = 0; n <= pass * 2; n++) {
2734 tmp2 = new_tmp();
2735 tcg_gen_mov_i32(tmp2, tmp);
2736 neon_store_reg(rn, n, tmp2);
2737 }
2738 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2739 } else {
2740 /* VMOV */
2741 switch (size) {
2742 case 0:
ad69471c
PB
2743 tmp2 = neon_load_reg(rn, pass);
2744 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2745 dead_tmp(tmp2);
9ee6e8bb
PB
2746 break;
2747 case 1:
ad69471c
PB
2748 tmp2 = neon_load_reg(rn, pass);
2749 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2750 dead_tmp(tmp2);
9ee6e8bb
PB
2751 break;
2752 case 2:
9ee6e8bb
PB
2753 break;
2754 }
ad69471c 2755 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2756 }
b7bcbe95 2757 }
9ee6e8bb
PB
2758 } else { /* !dp */
2759 if ((insn & 0x6f) != 0x00)
2760 return 1;
2761 rn = VFP_SREG_N(insn);
18c9b560 2762 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2763 /* vfp->arm */
2764 if (insn & (1 << 21)) {
2765 /* system register */
40f137e1 2766 rn >>= 1;
9ee6e8bb 2767
b7bcbe95 2768 switch (rn) {
40f137e1 2769 case ARM_VFP_FPSID:
4373f3ce 2770 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2771 VFP3 restricts all id registers to privileged
2772 accesses. */
2773 if (IS_USER(s)
2774 && arm_feature(env, ARM_FEATURE_VFP3))
2775 return 1;
4373f3ce 2776 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2777 break;
40f137e1 2778 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2779 if (IS_USER(s))
2780 return 1;
4373f3ce 2781 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2782 break;
40f137e1
PB
2783 case ARM_VFP_FPINST:
2784 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2785 /* Not present in VFP3. */
2786 if (IS_USER(s)
2787 || arm_feature(env, ARM_FEATURE_VFP3))
2788 return 1;
4373f3ce 2789 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2790 break;
40f137e1 2791 case ARM_VFP_FPSCR:
601d70b9 2792 if (rd == 15) {
4373f3ce
PB
2793 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2794 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2795 } else {
2796 tmp = new_tmp();
2797 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2798 }
b7bcbe95 2799 break;
9ee6e8bb
PB
2800 case ARM_VFP_MVFR0:
2801 case ARM_VFP_MVFR1:
2802 if (IS_USER(s)
2803 || !arm_feature(env, ARM_FEATURE_VFP3))
2804 return 1;
4373f3ce 2805 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2806 break;
b7bcbe95
FB
2807 default:
2808 return 1;
2809 }
2810 } else {
2811 gen_mov_F0_vreg(0, rn);
4373f3ce 2812 tmp = gen_vfp_mrs();
b7bcbe95
FB
2813 }
2814 if (rd == 15) {
b5ff1b31 2815 /* Set the 4 flag bits in the CPSR. */
4373f3ce
PB
2816 gen_set_nzcv(tmp);
2817 dead_tmp(tmp);
2818 } else {
2819 store_reg(s, rd, tmp);
2820 }
b7bcbe95
FB
2821 } else {
2822 /* arm->vfp */
4373f3ce 2823 tmp = load_reg(s, rd);
b7bcbe95 2824 if (insn & (1 << 21)) {
40f137e1 2825 rn >>= 1;
b7bcbe95
FB
2826 /* system register */
2827 switch (rn) {
40f137e1 2828 case ARM_VFP_FPSID:
9ee6e8bb
PB
2829 case ARM_VFP_MVFR0:
2830 case ARM_VFP_MVFR1:
b7bcbe95
FB
2831 /* Writes are ignored. */
2832 break;
40f137e1 2833 case ARM_VFP_FPSCR:
4373f3ce
PB
2834 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2835 dead_tmp(tmp);
b5ff1b31 2836 gen_lookup_tb(s);
b7bcbe95 2837 break;
40f137e1 2838 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2839 if (IS_USER(s))
2840 return 1;
71b3c3de
JR
2841 /* TODO: VFP subarchitecture support.
2842 * For now, keep the EN bit only */
2843 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 2844 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2845 gen_lookup_tb(s);
2846 break;
2847 case ARM_VFP_FPINST:
2848 case ARM_VFP_FPINST2:
4373f3ce 2849 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2850 break;
b7bcbe95
FB
2851 default:
2852 return 1;
2853 }
2854 } else {
4373f3ce 2855 gen_vfp_msr(tmp);
b7bcbe95
FB
2856 gen_mov_vreg_F0(0, rn);
2857 }
2858 }
2859 }
2860 } else {
2861 /* data processing */
2862 /* The opcode is in bits 23, 21, 20 and 6. */
2863 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2864 if (dp) {
2865 if (op == 15) {
2866 /* rn is opcode */
2867 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2868 } else {
2869 /* rn is register number */
9ee6e8bb 2870 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2871 }
2872
2873 if (op == 15 && (rn == 15 || rn > 17)) {
2874 /* Integer or single precision destination. */
9ee6e8bb 2875 rd = VFP_SREG_D(insn);
b7bcbe95 2876 } else {
9ee6e8bb 2877 VFP_DREG_D(rd, insn);
b7bcbe95
FB
2878 }
2879
2880 if (op == 15 && (rn == 16 || rn == 17)) {
2881 /* Integer source. */
2882 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
2883 } else {
9ee6e8bb 2884 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2885 }
2886 } else {
9ee6e8bb 2887 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2888 if (op == 15 && rn == 15) {
2889 /* Double precision destination. */
9ee6e8bb
PB
2890 VFP_DREG_D(rd, insn);
2891 } else {
2892 rd = VFP_SREG_D(insn);
2893 }
2894 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2895 }
2896
2897 veclen = env->vfp.vec_len;
2898 if (op == 15 && rn > 3)
2899 veclen = 0;
2900
2901 /* Shut up compiler warnings. */
2902 delta_m = 0;
2903 delta_d = 0;
2904 bank_mask = 0;
3b46e624 2905
b7bcbe95
FB
2906 if (veclen > 0) {
2907 if (dp)
2908 bank_mask = 0xc;
2909 else
2910 bank_mask = 0x18;
2911
2912 /* Figure out what type of vector operation this is. */
2913 if ((rd & bank_mask) == 0) {
2914 /* scalar */
2915 veclen = 0;
2916 } else {
2917 if (dp)
2918 delta_d = (env->vfp.vec_stride >> 1) + 1;
2919 else
2920 delta_d = env->vfp.vec_stride + 1;
2921
2922 if ((rm & bank_mask) == 0) {
2923 /* mixed scalar/vector */
2924 delta_m = 0;
2925 } else {
2926 /* vector */
2927 delta_m = delta_d;
2928 }
2929 }
2930 }
2931
2932 /* Load the initial operands. */
2933 if (op == 15) {
2934 switch (rn) {
2935 case 16:
2936 case 17:
2937 /* Integer source */
2938 gen_mov_F0_vreg(0, rm);
2939 break;
2940 case 8:
2941 case 9:
2942 /* Compare */
2943 gen_mov_F0_vreg(dp, rd);
2944 gen_mov_F1_vreg(dp, rm);
2945 break;
2946 case 10:
2947 case 11:
2948 /* Compare with zero */
2949 gen_mov_F0_vreg(dp, rd);
2950 gen_vfp_F1_ld0(dp);
2951 break;
9ee6e8bb
PB
2952 case 20:
2953 case 21:
2954 case 22:
2955 case 23:
644ad806
PB
2956 case 28:
2957 case 29:
2958 case 30:
2959 case 31:
9ee6e8bb
PB
2960 /* Source and destination the same. */
2961 gen_mov_F0_vreg(dp, rd);
2962 break;
b7bcbe95
FB
2963 default:
2964 /* One source operand. */
2965 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 2966 break;
b7bcbe95
FB
2967 }
2968 } else {
2969 /* Two source operands. */
2970 gen_mov_F0_vreg(dp, rn);
2971 gen_mov_F1_vreg(dp, rm);
2972 }
2973
2974 for (;;) {
2975 /* Perform the calculation. */
2976 switch (op) {
2977 case 0: /* mac: fd + (fn * fm) */
2978 gen_vfp_mul(dp);
2979 gen_mov_F1_vreg(dp, rd);
2980 gen_vfp_add(dp);
2981 break;
2982 case 1: /* nmac: fd - (fn * fm) */
2983 gen_vfp_mul(dp);
2984 gen_vfp_neg(dp);
2985 gen_mov_F1_vreg(dp, rd);
2986 gen_vfp_add(dp);
2987 break;
2988 case 2: /* msc: -fd + (fn * fm) */
2989 gen_vfp_mul(dp);
2990 gen_mov_F1_vreg(dp, rd);
2991 gen_vfp_sub(dp);
2992 break;
2993 case 3: /* nmsc: -fd - (fn * fm) */
2994 gen_vfp_mul(dp);
b7bcbe95 2995 gen_vfp_neg(dp);
c9fb531a
PB
2996 gen_mov_F1_vreg(dp, rd);
2997 gen_vfp_sub(dp);
b7bcbe95
FB
2998 break;
2999 case 4: /* mul: fn * fm */
3000 gen_vfp_mul(dp);
3001 break;
3002 case 5: /* nmul: -(fn * fm) */
3003 gen_vfp_mul(dp);
3004 gen_vfp_neg(dp);
3005 break;
3006 case 6: /* add: fn + fm */
3007 gen_vfp_add(dp);
3008 break;
3009 case 7: /* sub: fn - fm */
3010 gen_vfp_sub(dp);
3011 break;
3012 case 8: /* div: fn / fm */
3013 gen_vfp_div(dp);
3014 break;
9ee6e8bb
PB
3015 case 14: /* fconst */
3016 if (!arm_feature(env, ARM_FEATURE_VFP3))
3017 return 1;
3018
3019 n = (insn << 12) & 0x80000000;
3020 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3021 if (dp) {
3022 if (i & 0x40)
3023 i |= 0x3f80;
3024 else
3025 i |= 0x4000;
3026 n |= i << 16;
4373f3ce 3027 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3028 } else {
3029 if (i & 0x40)
3030 i |= 0x780;
3031 else
3032 i |= 0x800;
3033 n |= i << 19;
5b340b51 3034 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3035 }
9ee6e8bb 3036 break;
b7bcbe95
FB
3037 case 15: /* extension space */
3038 switch (rn) {
3039 case 0: /* cpy */
3040 /* no-op */
3041 break;
3042 case 1: /* abs */
3043 gen_vfp_abs(dp);
3044 break;
3045 case 2: /* neg */
3046 gen_vfp_neg(dp);
3047 break;
3048 case 3: /* sqrt */
3049 gen_vfp_sqrt(dp);
3050 break;
60011498
PB
3051 case 4: /* vcvtb.f32.f16 */
3052 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3053 return 1;
3054 tmp = gen_vfp_mrs();
3055 tcg_gen_ext16u_i32(tmp, tmp);
3056 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3057 dead_tmp(tmp);
3058 break;
3059 case 5: /* vcvtt.f32.f16 */
3060 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3061 return 1;
3062 tmp = gen_vfp_mrs();
3063 tcg_gen_shri_i32(tmp, tmp, 16);
3064 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3065 dead_tmp(tmp);
3066 break;
3067 case 6: /* vcvtb.f16.f32 */
3068 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3069 return 1;
3070 tmp = new_tmp();
3071 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3072 gen_mov_F0_vreg(0, rd);
3073 tmp2 = gen_vfp_mrs();
3074 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3075 tcg_gen_or_i32(tmp, tmp, tmp2);
3076 dead_tmp(tmp2);
3077 gen_vfp_msr(tmp);
3078 break;
3079 case 7: /* vcvtt.f16.f32 */
3080 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3081 return 1;
3082 tmp = new_tmp();
3083 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3084 tcg_gen_shli_i32(tmp, tmp, 16);
3085 gen_mov_F0_vreg(0, rd);
3086 tmp2 = gen_vfp_mrs();
3087 tcg_gen_ext16u_i32(tmp2, tmp2);
3088 tcg_gen_or_i32(tmp, tmp, tmp2);
3089 dead_tmp(tmp2);
3090 gen_vfp_msr(tmp);
3091 break;
b7bcbe95
FB
3092 case 8: /* cmp */
3093 gen_vfp_cmp(dp);
3094 break;
3095 case 9: /* cmpe */
3096 gen_vfp_cmpe(dp);
3097 break;
3098 case 10: /* cmpz */
3099 gen_vfp_cmp(dp);
3100 break;
3101 case 11: /* cmpez */
3102 gen_vfp_F1_ld0(dp);
3103 gen_vfp_cmpe(dp);
3104 break;
3105 case 15: /* single<->double conversion */
3106 if (dp)
4373f3ce 3107 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3108 else
4373f3ce 3109 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3110 break;
3111 case 16: /* fuito */
3112 gen_vfp_uito(dp);
3113 break;
3114 case 17: /* fsito */
3115 gen_vfp_sito(dp);
3116 break;
9ee6e8bb
PB
3117 case 20: /* fshto */
3118 if (!arm_feature(env, ARM_FEATURE_VFP3))
3119 return 1;
644ad806 3120 gen_vfp_shto(dp, 16 - rm);
9ee6e8bb
PB
3121 break;
3122 case 21: /* fslto */
3123 if (!arm_feature(env, ARM_FEATURE_VFP3))
3124 return 1;
644ad806 3125 gen_vfp_slto(dp, 32 - rm);
9ee6e8bb
PB
3126 break;
3127 case 22: /* fuhto */
3128 if (!arm_feature(env, ARM_FEATURE_VFP3))
3129 return 1;
644ad806 3130 gen_vfp_uhto(dp, 16 - rm);
9ee6e8bb
PB
3131 break;
3132 case 23: /* fulto */
3133 if (!arm_feature(env, ARM_FEATURE_VFP3))
3134 return 1;
644ad806 3135 gen_vfp_ulto(dp, 32 - rm);
9ee6e8bb 3136 break;
b7bcbe95
FB
3137 case 24: /* ftoui */
3138 gen_vfp_toui(dp);
3139 break;
3140 case 25: /* ftouiz */
3141 gen_vfp_touiz(dp);
3142 break;
3143 case 26: /* ftosi */
3144 gen_vfp_tosi(dp);
3145 break;
3146 case 27: /* ftosiz */
3147 gen_vfp_tosiz(dp);
3148 break;
9ee6e8bb
PB
3149 case 28: /* ftosh */
3150 if (!arm_feature(env, ARM_FEATURE_VFP3))
3151 return 1;
644ad806 3152 gen_vfp_tosh(dp, 16 - rm);
9ee6e8bb
PB
3153 break;
3154 case 29: /* ftosl */
3155 if (!arm_feature(env, ARM_FEATURE_VFP3))
3156 return 1;
644ad806 3157 gen_vfp_tosl(dp, 32 - rm);
9ee6e8bb
PB
3158 break;
3159 case 30: /* ftouh */
3160 if (!arm_feature(env, ARM_FEATURE_VFP3))
3161 return 1;
644ad806 3162 gen_vfp_touh(dp, 16 - rm);
9ee6e8bb
PB
3163 break;
3164 case 31: /* ftoul */
3165 if (!arm_feature(env, ARM_FEATURE_VFP3))
3166 return 1;
644ad806 3167 gen_vfp_toul(dp, 32 - rm);
9ee6e8bb 3168 break;
b7bcbe95
FB
3169 default: /* undefined */
3170 printf ("rn:%d\n", rn);
3171 return 1;
3172 }
3173 break;
3174 default: /* undefined */
3175 printf ("op:%d\n", op);
3176 return 1;
3177 }
3178
3179 /* Write back the result. */
3180 if (op == 15 && (rn >= 8 && rn <= 11))
3181 ; /* Comparison, do nothing. */
3182 else if (op == 15 && rn > 17)
3183 /* Integer result. */
3184 gen_mov_vreg_F0(0, rd);
3185 else if (op == 15 && rn == 15)
3186 /* conversion */
3187 gen_mov_vreg_F0(!dp, rd);
3188 else
3189 gen_mov_vreg_F0(dp, rd);
3190
3191 /* break out of the loop if we have finished */
3192 if (veclen == 0)
3193 break;
3194
3195 if (op == 15 && delta_m == 0) {
3196 /* single source one-many */
3197 while (veclen--) {
3198 rd = ((rd + delta_d) & (bank_mask - 1))
3199 | (rd & bank_mask);
3200 gen_mov_vreg_F0(dp, rd);
3201 }
3202 break;
3203 }
3204 /* Setup the next operands. */
3205 veclen--;
3206 rd = ((rd + delta_d) & (bank_mask - 1))
3207 | (rd & bank_mask);
3208
3209 if (op == 15) {
3210 /* One source operand. */
3211 rm = ((rm + delta_m) & (bank_mask - 1))
3212 | (rm & bank_mask);
3213 gen_mov_F0_vreg(dp, rm);
3214 } else {
3215 /* Two source operands. */
3216 rn = ((rn + delta_d) & (bank_mask - 1))
3217 | (rn & bank_mask);
3218 gen_mov_F0_vreg(dp, rn);
3219 if (delta_m) {
3220 rm = ((rm + delta_m) & (bank_mask - 1))
3221 | (rm & bank_mask);
3222 gen_mov_F1_vreg(dp, rm);
3223 }
3224 }
3225 }
3226 }
3227 break;
3228 case 0xc:
3229 case 0xd:
9ee6e8bb 3230 if (dp && (insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3231 /* two-register transfer */
3232 rn = (insn >> 16) & 0xf;
3233 rd = (insn >> 12) & 0xf;
3234 if (dp) {
9ee6e8bb
PB
3235 VFP_DREG_M(rm, insn);
3236 } else {
3237 rm = VFP_SREG_M(insn);
3238 }
b7bcbe95 3239
18c9b560 3240 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3241 /* vfp->arm */
3242 if (dp) {
4373f3ce
PB
3243 gen_mov_F0_vreg(0, rm * 2);
3244 tmp = gen_vfp_mrs();
3245 store_reg(s, rd, tmp);
3246 gen_mov_F0_vreg(0, rm * 2 + 1);
3247 tmp = gen_vfp_mrs();
3248 store_reg(s, rn, tmp);
b7bcbe95
FB
3249 } else {
3250 gen_mov_F0_vreg(0, rm);
4373f3ce
PB
3251 tmp = gen_vfp_mrs();
3252 store_reg(s, rn, tmp);
b7bcbe95 3253 gen_mov_F0_vreg(0, rm + 1);
4373f3ce
PB
3254 tmp = gen_vfp_mrs();
3255 store_reg(s, rd, tmp);
b7bcbe95
FB
3256 }
3257 } else {
3258 /* arm->vfp */
3259 if (dp) {
4373f3ce
PB
3260 tmp = load_reg(s, rd);
3261 gen_vfp_msr(tmp);
3262 gen_mov_vreg_F0(0, rm * 2);
3263 tmp = load_reg(s, rn);
3264 gen_vfp_msr(tmp);
3265 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3266 } else {
4373f3ce
PB
3267 tmp = load_reg(s, rn);
3268 gen_vfp_msr(tmp);
b7bcbe95 3269 gen_mov_vreg_F0(0, rm);
4373f3ce
PB
3270 tmp = load_reg(s, rd);
3271 gen_vfp_msr(tmp);
b7bcbe95
FB
3272 gen_mov_vreg_F0(0, rm + 1);
3273 }
3274 }
3275 } else {
3276 /* Load/store */
3277 rn = (insn >> 16) & 0xf;
3278 if (dp)
9ee6e8bb 3279 VFP_DREG_D(rd, insn);
b7bcbe95 3280 else
9ee6e8bb
PB
3281 rd = VFP_SREG_D(insn);
3282 if (s->thumb && rn == 15) {
312eea9f
FN
3283 addr = new_tmp();
3284 tcg_gen_movi_i32(addr, s->pc & ~2);
9ee6e8bb 3285 } else {
312eea9f 3286 addr = load_reg(s, rn);
9ee6e8bb 3287 }
b7bcbe95
FB
3288 if ((insn & 0x01200000) == 0x01000000) {
3289 /* Single load/store */
3290 offset = (insn & 0xff) << 2;
3291 if ((insn & (1 << 23)) == 0)
3292 offset = -offset;
312eea9f 3293 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3294 if (insn & (1 << 20)) {
312eea9f 3295 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3296 gen_mov_vreg_F0(dp, rd);
3297 } else {
3298 gen_mov_F0_vreg(dp, rd);
312eea9f 3299 gen_vfp_st(s, dp, addr);
b7bcbe95 3300 }
312eea9f 3301 dead_tmp(addr);
b7bcbe95
FB
3302 } else {
3303 /* load/store multiple */
3304 if (dp)
3305 n = (insn >> 1) & 0x7f;
3306 else
3307 n = insn & 0xff;
3308
3309 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3310 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3311
3312 if (dp)
3313 offset = 8;
3314 else
3315 offset = 4;
3316 for (i = 0; i < n; i++) {
18c9b560 3317 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3318 /* load */
312eea9f 3319 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3320 gen_mov_vreg_F0(dp, rd + i);
3321 } else {
3322 /* store */
3323 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3324 gen_vfp_st(s, dp, addr);
b7bcbe95 3325 }
312eea9f 3326 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95
FB
3327 }
3328 if (insn & (1 << 21)) {
3329 /* writeback */
3330 if (insn & (1 << 24))
3331 offset = -offset * n;
3332 else if (dp && (insn & 1))
3333 offset = 4;
3334 else
3335 offset = 0;
3336
3337 if (offset != 0)
312eea9f
FN
3338 tcg_gen_addi_i32(addr, addr, offset);
3339 store_reg(s, rn, addr);
3340 } else {
3341 dead_tmp(addr);
b7bcbe95
FB
3342 }
3343 }
3344 }
3345 break;
3346 default:
3347 /* Should never happen. */
3348 return 1;
3349 }
3350 return 0;
3351}
3352
6e256c93 3353static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3354{
6e256c93
FB
3355 TranslationBlock *tb;
3356
3357 tb = s->tb;
3358 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3359 tcg_gen_goto_tb(n);
8984bd2e 3360 gen_set_pc_im(dest);
57fec1fe 3361 tcg_gen_exit_tb((long)tb + n);
6e256c93 3362 } else {
8984bd2e 3363 gen_set_pc_im(dest);
57fec1fe 3364 tcg_gen_exit_tb(0);
6e256c93 3365 }
c53be334
FB
3366}
3367
8aaca4c0
FB
3368static inline void gen_jmp (DisasContext *s, uint32_t dest)
3369{
551bd27f 3370 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3371 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3372 if (s->thumb)
d9ba4830
PB
3373 dest |= 1;
3374 gen_bx_im(s, dest);
8aaca4c0 3375 } else {
6e256c93 3376 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3377 s->is_jmp = DISAS_TB_JUMP;
3378 }
3379}
3380
d9ba4830 3381static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3382{
ee097184 3383 if (x)
d9ba4830 3384 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3385 else
d9ba4830 3386 gen_sxth(t0);
ee097184 3387 if (y)
d9ba4830 3388 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3389 else
d9ba4830
PB
3390 gen_sxth(t1);
3391 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3392}
3393
3394/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 3395static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3396 uint32_t mask;
3397
3398 mask = 0;
3399 if (flags & (1 << 0))
3400 mask |= 0xff;
3401 if (flags & (1 << 1))
3402 mask |= 0xff00;
3403 if (flags & (1 << 2))
3404 mask |= 0xff0000;
3405 if (flags & (1 << 3))
3406 mask |= 0xff000000;
9ee6e8bb 3407
2ae23e75 3408 /* Mask out undefined bits. */
9ee6e8bb
PB
3409 mask &= ~CPSR_RESERVED;
3410 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3411 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3412 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3413 mask &= ~CPSR_IT;
9ee6e8bb 3414 /* Mask out execution state bits. */
2ae23e75 3415 if (!spsr)
e160c51c 3416 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3417 /* Mask out privileged bits. */
3418 if (IS_USER(s))
9ee6e8bb 3419 mask &= CPSR_USER;
b5ff1b31
FB
3420 return mask;
3421}
3422
2fbac54b
FN
3423/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3424static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
b5ff1b31 3425{
d9ba4830 3426 TCGv tmp;
b5ff1b31
FB
3427 if (spsr) {
3428 /* ??? This is also undefined in system mode. */
3429 if (IS_USER(s))
3430 return 1;
d9ba4830
PB
3431
3432 tmp = load_cpu_field(spsr);
3433 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3434 tcg_gen_andi_i32(t0, t0, mask);
3435 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3436 store_cpu_field(tmp, spsr);
b5ff1b31 3437 } else {
2fbac54b 3438 gen_set_cpsr(t0, mask);
b5ff1b31 3439 }
2fbac54b 3440 dead_tmp(t0);
b5ff1b31
FB
3441 gen_lookup_tb(s);
3442 return 0;
3443}
3444
2fbac54b
FN
3445/* Returns nonzero if access to the PSR is not permitted. */
3446static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3447{
3448 TCGv tmp;
3449 tmp = new_tmp();
3450 tcg_gen_movi_i32(tmp, val);
3451 return gen_set_psr(s, mask, spsr, tmp);
3452}
3453
e9bb4aa9
JR
3454/* Generate an old-style exception return. Marks pc as dead. */
3455static void gen_exception_return(DisasContext *s, TCGv pc)
b5ff1b31 3456{
d9ba4830 3457 TCGv tmp;
e9bb4aa9 3458 store_reg(s, 15, pc);
d9ba4830
PB
3459 tmp = load_cpu_field(spsr);
3460 gen_set_cpsr(tmp, 0xffffffff);
3461 dead_tmp(tmp);
b5ff1b31
FB
3462 s->is_jmp = DISAS_UPDATE;
3463}
3464
b0109805
PB
3465/* Generate a v6 exception return. Marks both values as dead. */
3466static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3467{
b0109805
PB
3468 gen_set_cpsr(cpsr, 0xffffffff);
3469 dead_tmp(cpsr);
3470 store_reg(s, 15, pc);
9ee6e8bb
PB
3471 s->is_jmp = DISAS_UPDATE;
3472}
3b46e624 3473
9ee6e8bb
PB
3474static inline void
3475gen_set_condexec (DisasContext *s)
3476{
3477 if (s->condexec_mask) {
8f01245e
PB
3478 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3479 TCGv tmp = new_tmp();
3480 tcg_gen_movi_i32(tmp, val);
d9ba4830 3481 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3482 }
3483}
3b46e624 3484
9ee6e8bb
PB
3485static void gen_nop_hint(DisasContext *s, int val)
3486{
3487 switch (val) {
3488 case 3: /* wfi */
8984bd2e 3489 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3490 s->is_jmp = DISAS_WFI;
3491 break;
3492 case 2: /* wfe */
3493 case 4: /* sev */
3494 /* TODO: Implement SEV and WFE. May help SMP performance. */
3495 default: /* nop */
3496 break;
3497 }
3498}
99c475ab 3499
ad69471c 3500#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3501
dd8fbd78 3502static inline int gen_neon_add(int size, TCGv t0, TCGv t1)
9ee6e8bb
PB
3503{
3504 switch (size) {
dd8fbd78
FN
3505 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3506 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3507 case 2: tcg_gen_add_i32(t0, t0, t1); break;
9ee6e8bb
PB
3508 default: return 1;
3509 }
3510 return 0;
3511}
3512
dd8fbd78 3513static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
ad69471c
PB
3514{
3515 switch (size) {
dd8fbd78
FN
3516 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3517 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3518 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3519 default: return;
3520 }
3521}
3522
3523/* 32-bit pairwise ops end up the same as the elementwise versions. */
3524#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3525#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3526#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3527#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3528
3529/* FIXME: This is wrong. They set the wrong overflow bit. */
3530#define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3531#define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3532#define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3533#define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3534
3535#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3536 switch ((size << 1) | u) { \
3537 case 0: \
dd8fbd78 3538 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3539 break; \
3540 case 1: \
dd8fbd78 3541 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3542 break; \
3543 case 2: \
dd8fbd78 3544 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3545 break; \
3546 case 3: \
dd8fbd78 3547 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3548 break; \
3549 case 4: \
dd8fbd78 3550 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3551 break; \
3552 case 5: \
dd8fbd78 3553 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3554 break; \
3555 default: return 1; \
3556 }} while (0)
9ee6e8bb
PB
3557
3558#define GEN_NEON_INTEGER_OP(name) do { \
3559 switch ((size << 1) | u) { \
ad69471c 3560 case 0: \
dd8fbd78 3561 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3562 break; \
3563 case 1: \
dd8fbd78 3564 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3565 break; \
3566 case 2: \
dd8fbd78 3567 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3568 break; \
3569 case 3: \
dd8fbd78 3570 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3571 break; \
3572 case 4: \
dd8fbd78 3573 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3574 break; \
3575 case 5: \
dd8fbd78 3576 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3577 break; \
9ee6e8bb
PB
3578 default: return 1; \
3579 }} while (0)
3580
dd8fbd78 3581static TCGv neon_load_scratch(int scratch)
9ee6e8bb 3582{
dd8fbd78
FN
3583 TCGv tmp = new_tmp();
3584 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3585 return tmp;
9ee6e8bb
PB
3586}
3587
dd8fbd78 3588static void neon_store_scratch(int scratch, TCGv var)
9ee6e8bb 3589{
dd8fbd78
FN
3590 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3591 dead_tmp(var);
9ee6e8bb
PB
3592}
3593
dd8fbd78 3594static inline TCGv neon_get_scalar(int size, int reg)
9ee6e8bb 3595{
dd8fbd78 3596 TCGv tmp;
9ee6e8bb 3597 if (size == 1) {
dd8fbd78 3598 tmp = neon_load_reg(reg >> 1, reg & 1);
9ee6e8bb 3599 } else {
dd8fbd78
FN
3600 tmp = neon_load_reg(reg >> 2, (reg >> 1) & 1);
3601 if (reg & 1) {
3602 gen_neon_dup_low16(tmp);
3603 } else {
3604 gen_neon_dup_high16(tmp);
3605 }
9ee6e8bb 3606 }
dd8fbd78 3607 return tmp;
9ee6e8bb
PB
3608}
3609
19457615
FN
3610static void gen_neon_unzip_u8(TCGv t0, TCGv t1)
3611{
3612 TCGv rd, rm, tmp;
3613
3614 rd = new_tmp();
3615 rm = new_tmp();
3616 tmp = new_tmp();
3617
3618 tcg_gen_andi_i32(rd, t0, 0xff);
3619 tcg_gen_shri_i32(tmp, t0, 8);
3620 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3621 tcg_gen_or_i32(rd, rd, tmp);
3622 tcg_gen_shli_i32(tmp, t1, 16);
3623 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3624 tcg_gen_or_i32(rd, rd, tmp);
3625 tcg_gen_shli_i32(tmp, t1, 8);
3626 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3627 tcg_gen_or_i32(rd, rd, tmp);
3628
3629 tcg_gen_shri_i32(rm, t0, 8);
3630 tcg_gen_andi_i32(rm, rm, 0xff);
3631 tcg_gen_shri_i32(tmp, t0, 16);
3632 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3633 tcg_gen_or_i32(rm, rm, tmp);
3634 tcg_gen_shli_i32(tmp, t1, 8);
3635 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3636 tcg_gen_or_i32(rm, rm, tmp);
3637 tcg_gen_andi_i32(tmp, t1, 0xff000000);
3638 tcg_gen_or_i32(t1, rm, tmp);
3639 tcg_gen_mov_i32(t0, rd);
3640
3641 dead_tmp(tmp);
3642 dead_tmp(rm);
3643 dead_tmp(rd);
3644}
3645
3646static void gen_neon_zip_u8(TCGv t0, TCGv t1)
3647{
3648 TCGv rd, rm, tmp;
3649
3650 rd = new_tmp();
3651 rm = new_tmp();
3652 tmp = new_tmp();
3653
3654 tcg_gen_andi_i32(rd, t0, 0xff);
3655 tcg_gen_shli_i32(tmp, t1, 8);
3656 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3657 tcg_gen_or_i32(rd, rd, tmp);
3658 tcg_gen_shli_i32(tmp, t0, 16);
3659 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3660 tcg_gen_or_i32(rd, rd, tmp);
3661 tcg_gen_shli_i32(tmp, t1, 24);
3662 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3663 tcg_gen_or_i32(rd, rd, tmp);
3664
3665 tcg_gen_andi_i32(rm, t1, 0xff000000);
3666 tcg_gen_shri_i32(tmp, t0, 8);
3667 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3668 tcg_gen_or_i32(rm, rm, tmp);
3669 tcg_gen_shri_i32(tmp, t1, 8);
3670 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3671 tcg_gen_or_i32(rm, rm, tmp);
3672 tcg_gen_shri_i32(tmp, t0, 16);
3673 tcg_gen_andi_i32(tmp, tmp, 0xff);
3674 tcg_gen_or_i32(t1, rm, tmp);
3675 tcg_gen_mov_i32(t0, rd);
3676
3677 dead_tmp(tmp);
3678 dead_tmp(rm);
3679 dead_tmp(rd);
3680}
3681
3682static void gen_neon_zip_u16(TCGv t0, TCGv t1)
3683{
3684 TCGv tmp, tmp2;
3685
3686 tmp = new_tmp();
3687 tmp2 = new_tmp();
3688
3689 tcg_gen_andi_i32(tmp, t0, 0xffff);
3690 tcg_gen_shli_i32(tmp2, t1, 16);
3691 tcg_gen_or_i32(tmp, tmp, tmp2);
3692 tcg_gen_andi_i32(t1, t1, 0xffff0000);
3693 tcg_gen_shri_i32(tmp2, t0, 16);
3694 tcg_gen_or_i32(t1, t1, tmp2);
3695 tcg_gen_mov_i32(t0, tmp);
3696
3697 dead_tmp(tmp2);
3698 dead_tmp(tmp);
3699}
3700
9ee6e8bb
PB
3701static void gen_neon_unzip(int reg, int q, int tmp, int size)
3702{
3703 int n;
dd8fbd78 3704 TCGv t0, t1;
9ee6e8bb
PB
3705
3706 for (n = 0; n < q + 1; n += 2) {
dd8fbd78
FN
3707 t0 = neon_load_reg(reg, n);
3708 t1 = neon_load_reg(reg, n + 1);
9ee6e8bb 3709 switch (size) {
dd8fbd78
FN
3710 case 0: gen_neon_unzip_u8(t0, t1); break;
3711 case 1: gen_neon_zip_u16(t0, t1); break; /* zip and unzip are the same. */
9ee6e8bb
PB
3712 case 2: /* no-op */; break;
3713 default: abort();
3714 }
dd8fbd78
FN
3715 neon_store_scratch(tmp + n, t0);
3716 neon_store_scratch(tmp + n + 1, t1);
9ee6e8bb
PB
3717 }
3718}
3719
19457615
FN
3720static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3721{
3722 TCGv rd, tmp;
3723
3724 rd = new_tmp();
3725 tmp = new_tmp();
3726
3727 tcg_gen_shli_i32(rd, t0, 8);
3728 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3729 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3730 tcg_gen_or_i32(rd, rd, tmp);
3731
3732 tcg_gen_shri_i32(t1, t1, 8);
3733 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3734 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3735 tcg_gen_or_i32(t1, t1, tmp);
3736 tcg_gen_mov_i32(t0, rd);
3737
3738 dead_tmp(tmp);
3739 dead_tmp(rd);
3740}
3741
3742static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3743{
3744 TCGv rd, tmp;
3745
3746 rd = new_tmp();
3747 tmp = new_tmp();
3748
3749 tcg_gen_shli_i32(rd, t0, 16);
3750 tcg_gen_andi_i32(tmp, t1, 0xffff);
3751 tcg_gen_or_i32(rd, rd, tmp);
3752 tcg_gen_shri_i32(t1, t1, 16);
3753 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3754 tcg_gen_or_i32(t1, t1, tmp);
3755 tcg_gen_mov_i32(t0, rd);
3756
3757 dead_tmp(tmp);
3758 dead_tmp(rd);
3759}
3760
3761
9ee6e8bb
PB
3762static struct {
3763 int nregs;
3764 int interleave;
3765 int spacing;
3766} neon_ls_element_type[11] = {
3767 {4, 4, 1},
3768 {4, 4, 2},
3769 {4, 1, 1},
3770 {4, 2, 1},
3771 {3, 3, 1},
3772 {3, 3, 2},
3773 {3, 1, 1},
3774 {1, 1, 1},
3775 {2, 2, 1},
3776 {2, 2, 2},
3777 {2, 1, 1}
3778};
3779
3780/* Translate a NEON load/store element instruction. Return nonzero if the
3781 instruction is invalid. */
3782static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3783{
3784 int rd, rn, rm;
3785 int op;
3786 int nregs;
3787 int interleave;
84496233 3788 int spacing;
9ee6e8bb
PB
3789 int stride;
3790 int size;
3791 int reg;
3792 int pass;
3793 int load;
3794 int shift;
9ee6e8bb 3795 int n;
1b2b1e54 3796 TCGv addr;
b0109805 3797 TCGv tmp;
8f8e3aa4 3798 TCGv tmp2;
84496233 3799 TCGv_i64 tmp64;
9ee6e8bb
PB
3800
3801 if (!vfp_enabled(env))
3802 return 1;
3803 VFP_DREG_D(rd, insn);
3804 rn = (insn >> 16) & 0xf;
3805 rm = insn & 0xf;
3806 load = (insn & (1 << 21)) != 0;
1b2b1e54 3807 addr = new_tmp();
9ee6e8bb
PB
3808 if ((insn & (1 << 23)) == 0) {
3809 /* Load store all elements. */
3810 op = (insn >> 8) & 0xf;
3811 size = (insn >> 6) & 3;
84496233 3812 if (op > 10)
9ee6e8bb
PB
3813 return 1;
3814 nregs = neon_ls_element_type[op].nregs;
3815 interleave = neon_ls_element_type[op].interleave;
84496233
JR
3816 spacing = neon_ls_element_type[op].spacing;
3817 if (size == 3 && (interleave | spacing) != 1)
3818 return 1;
dcc65026 3819 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3820 stride = (1 << size) * interleave;
3821 for (reg = 0; reg < nregs; reg++) {
3822 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
3823 load_reg_var(s, addr, rn);
3824 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 3825 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
3826 load_reg_var(s, addr, rn);
3827 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 3828 }
84496233
JR
3829 if (size == 3) {
3830 if (load) {
3831 tmp64 = gen_ld64(addr, IS_USER(s));
3832 neon_store_reg64(tmp64, rd);
3833 tcg_temp_free_i64(tmp64);
3834 } else {
3835 tmp64 = tcg_temp_new_i64();
3836 neon_load_reg64(tmp64, rd);
3837 gen_st64(tmp64, addr, IS_USER(s));
3838 }
3839 tcg_gen_addi_i32(addr, addr, stride);
3840 } else {
3841 for (pass = 0; pass < 2; pass++) {
3842 if (size == 2) {
3843 if (load) {
3844 tmp = gen_ld32(addr, IS_USER(s));
3845 neon_store_reg(rd, pass, tmp);
3846 } else {
3847 tmp = neon_load_reg(rd, pass);
3848 gen_st32(tmp, addr, IS_USER(s));
3849 }
1b2b1e54 3850 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
3851 } else if (size == 1) {
3852 if (load) {
3853 tmp = gen_ld16u(addr, IS_USER(s));
3854 tcg_gen_addi_i32(addr, addr, stride);
3855 tmp2 = gen_ld16u(addr, IS_USER(s));
3856 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
3857 tcg_gen_shli_i32(tmp2, tmp2, 16);
3858 tcg_gen_or_i32(tmp, tmp, tmp2);
84496233
JR
3859 dead_tmp(tmp2);
3860 neon_store_reg(rd, pass, tmp);
3861 } else {
3862 tmp = neon_load_reg(rd, pass);
3863 tmp2 = new_tmp();
3864 tcg_gen_shri_i32(tmp2, tmp, 16);
3865 gen_st16(tmp, addr, IS_USER(s));
3866 tcg_gen_addi_i32(addr, addr, stride);
3867 gen_st16(tmp2, addr, IS_USER(s));
1b2b1e54 3868 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3869 }
84496233
JR
3870 } else /* size == 0 */ {
3871 if (load) {
3872 TCGV_UNUSED(tmp2);
3873 for (n = 0; n < 4; n++) {
3874 tmp = gen_ld8u(addr, IS_USER(s));
3875 tcg_gen_addi_i32(addr, addr, stride);
3876 if (n == 0) {
3877 tmp2 = tmp;
3878 } else {
41ba8341
PB
3879 tcg_gen_shli_i32(tmp, tmp, n * 8);
3880 tcg_gen_or_i32(tmp2, tmp2, tmp);
84496233
JR
3881 dead_tmp(tmp);
3882 }
9ee6e8bb 3883 }
84496233
JR
3884 neon_store_reg(rd, pass, tmp2);
3885 } else {
3886 tmp2 = neon_load_reg(rd, pass);
3887 for (n = 0; n < 4; n++) {
3888 tmp = new_tmp();
3889 if (n == 0) {
3890 tcg_gen_mov_i32(tmp, tmp2);
3891 } else {
3892 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3893 }
3894 gen_st8(tmp, addr, IS_USER(s));
3895 tcg_gen_addi_i32(addr, addr, stride);
3896 }
3897 dead_tmp(tmp2);
9ee6e8bb
PB
3898 }
3899 }
3900 }
3901 }
84496233 3902 rd += spacing;
9ee6e8bb
PB
3903 }
3904 stride = nregs * 8;
3905 } else {
3906 size = (insn >> 10) & 3;
3907 if (size == 3) {
3908 /* Load single element to all lanes. */
3909 if (!load)
3910 return 1;
3911 size = (insn >> 6) & 3;
3912 nregs = ((insn >> 8) & 3) + 1;
3913 stride = (insn & (1 << 5)) ? 2 : 1;
dcc65026 3914 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3915 for (reg = 0; reg < nregs; reg++) {
3916 switch (size) {
3917 case 0:
1b2b1e54 3918 tmp = gen_ld8u(addr, IS_USER(s));
ad69471c 3919 gen_neon_dup_u8(tmp, 0);
9ee6e8bb
PB
3920 break;
3921 case 1:
1b2b1e54 3922 tmp = gen_ld16u(addr, IS_USER(s));
ad69471c 3923 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
3924 break;
3925 case 2:
1b2b1e54 3926 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
3927 break;
3928 case 3:
3929 return 1;
a50f5b91
PB
3930 default: /* Avoid compiler warnings. */
3931 abort();
99c475ab 3932 }
1b2b1e54 3933 tcg_gen_addi_i32(addr, addr, 1 << size);
ad69471c
PB
3934 tmp2 = new_tmp();
3935 tcg_gen_mov_i32(tmp2, tmp);
3936 neon_store_reg(rd, 0, tmp2);
3018f259 3937 neon_store_reg(rd, 1, tmp);
9ee6e8bb
PB
3938 rd += stride;
3939 }
3940 stride = (1 << size) * nregs;
3941 } else {
3942 /* Single element. */
3943 pass = (insn >> 7) & 1;
3944 switch (size) {
3945 case 0:
3946 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
3947 stride = 1;
3948 break;
3949 case 1:
3950 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
3951 stride = (insn & (1 << 5)) ? 2 : 1;
3952 break;
3953 case 2:
3954 shift = 0;
9ee6e8bb
PB
3955 stride = (insn & (1 << 6)) ? 2 : 1;
3956 break;
3957 default:
3958 abort();
3959 }
3960 nregs = ((insn >> 8) & 3) + 1;
dcc65026 3961 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3962 for (reg = 0; reg < nregs; reg++) {
3963 if (load) {
9ee6e8bb
PB
3964 switch (size) {
3965 case 0:
1b2b1e54 3966 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb
PB
3967 break;
3968 case 1:
1b2b1e54 3969 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
3970 break;
3971 case 2:
1b2b1e54 3972 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 3973 break;
a50f5b91
PB
3974 default: /* Avoid compiler warnings. */
3975 abort();
9ee6e8bb
PB
3976 }
3977 if (size != 2) {
8f8e3aa4
PB
3978 tmp2 = neon_load_reg(rd, pass);
3979 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3980 dead_tmp(tmp2);
9ee6e8bb 3981 }
8f8e3aa4 3982 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3983 } else { /* Store */
8f8e3aa4
PB
3984 tmp = neon_load_reg(rd, pass);
3985 if (shift)
3986 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
3987 switch (size) {
3988 case 0:
1b2b1e54 3989 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
3990 break;
3991 case 1:
1b2b1e54 3992 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
3993 break;
3994 case 2:
1b2b1e54 3995 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 3996 break;
99c475ab 3997 }
99c475ab 3998 }
9ee6e8bb 3999 rd += stride;
1b2b1e54 4000 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4001 }
9ee6e8bb 4002 stride = nregs * (1 << size);
99c475ab 4003 }
9ee6e8bb 4004 }
1b2b1e54 4005 dead_tmp(addr);
9ee6e8bb 4006 if (rm != 15) {
b26eefb6
PB
4007 TCGv base;
4008
4009 base = load_reg(s, rn);
9ee6e8bb 4010 if (rm == 13) {
b26eefb6 4011 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4012 } else {
b26eefb6
PB
4013 TCGv index;
4014 index = load_reg(s, rm);
4015 tcg_gen_add_i32(base, base, index);
4016 dead_tmp(index);
9ee6e8bb 4017 }
b26eefb6 4018 store_reg(s, rn, base);
9ee6e8bb
PB
4019 }
4020 return 0;
4021}
3b46e624 4022
8f8e3aa4
PB
4023/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4024static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4025{
4026 tcg_gen_and_i32(t, t, c);
f669df27 4027 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4028 tcg_gen_or_i32(dest, t, f);
4029}
4030
a7812ae4 4031static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4032{
4033 switch (size) {
4034 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4035 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4036 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4037 default: abort();
4038 }
4039}
4040
a7812ae4 4041static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4042{
4043 switch (size) {
4044 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4045 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4046 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
4047 default: abort();
4048 }
4049}
4050
a7812ae4 4051static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4052{
4053 switch (size) {
4054 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4055 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4056 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4057 default: abort();
4058 }
4059}
4060
4061static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4062 int q, int u)
4063{
4064 if (q) {
4065 if (u) {
4066 switch (size) {
4067 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4068 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4069 default: abort();
4070 }
4071 } else {
4072 switch (size) {
4073 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4074 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4075 default: abort();
4076 }
4077 }
4078 } else {
4079 if (u) {
4080 switch (size) {
4081 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4082 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4083 default: abort();
4084 }
4085 } else {
4086 switch (size) {
4087 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4088 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4089 default: abort();
4090 }
4091 }
4092 }
4093}
4094
a7812ae4 4095static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
4096{
4097 if (u) {
4098 switch (size) {
4099 case 0: gen_helper_neon_widen_u8(dest, src); break;
4100 case 1: gen_helper_neon_widen_u16(dest, src); break;
4101 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4102 default: abort();
4103 }
4104 } else {
4105 switch (size) {
4106 case 0: gen_helper_neon_widen_s8(dest, src); break;
4107 case 1: gen_helper_neon_widen_s16(dest, src); break;
4108 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4109 default: abort();
4110 }
4111 }
4112 dead_tmp(src);
4113}
4114
4115static inline void gen_neon_addl(int size)
4116{
4117 switch (size) {
4118 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4119 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4120 case 2: tcg_gen_add_i64(CPU_V001); break;
4121 default: abort();
4122 }
4123}
4124
4125static inline void gen_neon_subl(int size)
4126{
4127 switch (size) {
4128 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4129 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4130 case 2: tcg_gen_sub_i64(CPU_V001); break;
4131 default: abort();
4132 }
4133}
4134
a7812ae4 4135static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4136{
4137 switch (size) {
4138 case 0: gen_helper_neon_negl_u16(var, var); break;
4139 case 1: gen_helper_neon_negl_u32(var, var); break;
4140 case 2: gen_helper_neon_negl_u64(var, var); break;
4141 default: abort();
4142 }
4143}
4144
a7812ae4 4145static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4146{
4147 switch (size) {
4148 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4149 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4150 default: abort();
4151 }
4152}
4153
a7812ae4 4154static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4155{
a7812ae4 4156 TCGv_i64 tmp;
ad69471c
PB
4157
4158 switch ((size << 1) | u) {
4159 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4160 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4161 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4162 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4163 case 4:
4164 tmp = gen_muls_i64_i32(a, b);
4165 tcg_gen_mov_i64(dest, tmp);
4166 break;
4167 case 5:
4168 tmp = gen_mulu_i64_i32(a, b);
4169 tcg_gen_mov_i64(dest, tmp);
4170 break;
4171 default: abort();
4172 }
ad69471c
PB
4173}
4174
9ee6e8bb
PB
4175/* Translate a NEON data processing instruction. Return nonzero if the
4176 instruction is invalid.
ad69471c
PB
4177 We process data in a mixture of 32-bit and 64-bit chunks.
4178 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4179
9ee6e8bb
PB
4180static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4181{
4182 int op;
4183 int q;
4184 int rd, rn, rm;
4185 int size;
4186 int shift;
4187 int pass;
4188 int count;
4189 int pairwise;
4190 int u;
4191 int n;
ca9a32e4 4192 uint32_t imm, mask;
b75263d6 4193 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4194 TCGv_i64 tmp64;
9ee6e8bb
PB
4195
4196 if (!vfp_enabled(env))
4197 return 1;
4198 q = (insn & (1 << 6)) != 0;
4199 u = (insn >> 24) & 1;
4200 VFP_DREG_D(rd, insn);
4201 VFP_DREG_N(rn, insn);
4202 VFP_DREG_M(rm, insn);
4203 size = (insn >> 20) & 3;
4204 if ((insn & (1 << 23)) == 0) {
4205 /* Three register same length. */
4206 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
ad69471c
PB
4207 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4208 || op == 10 || op == 11 || op == 16)) {
4209 /* 64-bit element instructions. */
9ee6e8bb 4210 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4211 neon_load_reg64(cpu_V0, rn + pass);
4212 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb
PB
4213 switch (op) {
4214 case 1: /* VQADD */
4215 if (u) {
ad69471c 4216 gen_helper_neon_add_saturate_u64(CPU_V001);
2c0262af 4217 } else {
ad69471c 4218 gen_helper_neon_add_saturate_s64(CPU_V001);
2c0262af 4219 }
9ee6e8bb
PB
4220 break;
4221 case 5: /* VQSUB */
4222 if (u) {
ad69471c
PB
4223 gen_helper_neon_sub_saturate_u64(CPU_V001);
4224 } else {
4225 gen_helper_neon_sub_saturate_s64(CPU_V001);
4226 }
4227 break;
4228 case 8: /* VSHL */
4229 if (u) {
4230 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4231 } else {
4232 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4233 }
4234 break;
4235 case 9: /* VQSHL */
4236 if (u) {
4237 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4238 cpu_V0, cpu_V0);
4239 } else {
4240 gen_helper_neon_qshl_s64(cpu_V1, cpu_env,
4241 cpu_V1, cpu_V0);
4242 }
4243 break;
4244 case 10: /* VRSHL */
4245 if (u) {
4246 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4247 } else {
ad69471c
PB
4248 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4249 }
4250 break;
4251 case 11: /* VQRSHL */
4252 if (u) {
4253 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4254 cpu_V1, cpu_V0);
4255 } else {
4256 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4257 cpu_V1, cpu_V0);
1e8d4eec 4258 }
9ee6e8bb
PB
4259 break;
4260 case 16:
4261 if (u) {
ad69471c 4262 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4263 } else {
ad69471c 4264 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4265 }
4266 break;
4267 default:
4268 abort();
2c0262af 4269 }
ad69471c 4270 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4271 }
9ee6e8bb 4272 return 0;
2c0262af 4273 }
9ee6e8bb
PB
4274 switch (op) {
4275 case 8: /* VSHL */
4276 case 9: /* VQSHL */
4277 case 10: /* VRSHL */
ad69471c 4278 case 11: /* VQRSHL */
9ee6e8bb 4279 {
ad69471c
PB
4280 int rtmp;
4281 /* Shift instruction operands are reversed. */
4282 rtmp = rn;
9ee6e8bb 4283 rn = rm;
ad69471c 4284 rm = rtmp;
9ee6e8bb
PB
4285 pairwise = 0;
4286 }
2c0262af 4287 break;
9ee6e8bb
PB
4288 case 20: /* VPMAX */
4289 case 21: /* VPMIN */
4290 case 23: /* VPADD */
4291 pairwise = 1;
2c0262af 4292 break;
9ee6e8bb
PB
4293 case 26: /* VPADD (float) */
4294 pairwise = (u && size < 2);
2c0262af 4295 break;
9ee6e8bb
PB
4296 case 30: /* VPMIN/VPMAX (float) */
4297 pairwise = u;
2c0262af 4298 break;
9ee6e8bb
PB
4299 default:
4300 pairwise = 0;
2c0262af 4301 break;
9ee6e8bb 4302 }
dd8fbd78 4303
9ee6e8bb
PB
4304 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4305
4306 if (pairwise) {
4307 /* Pairwise. */
4308 if (q)
4309 n = (pass & 1) * 2;
2c0262af 4310 else
9ee6e8bb
PB
4311 n = 0;
4312 if (pass < q + 1) {
dd8fbd78
FN
4313 tmp = neon_load_reg(rn, n);
4314 tmp2 = neon_load_reg(rn, n + 1);
9ee6e8bb 4315 } else {
dd8fbd78
FN
4316 tmp = neon_load_reg(rm, n);
4317 tmp2 = neon_load_reg(rm, n + 1);
9ee6e8bb
PB
4318 }
4319 } else {
4320 /* Elementwise. */
dd8fbd78
FN
4321 tmp = neon_load_reg(rn, pass);
4322 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4323 }
4324 switch (op) {
4325 case 0: /* VHADD */
4326 GEN_NEON_INTEGER_OP(hadd);
4327 break;
4328 case 1: /* VQADD */
ad69471c 4329 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4330 break;
9ee6e8bb
PB
4331 case 2: /* VRHADD */
4332 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4333 break;
9ee6e8bb
PB
4334 case 3: /* Logic ops. */
4335 switch ((u << 2) | size) {
4336 case 0: /* VAND */
dd8fbd78 4337 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4338 break;
4339 case 1: /* BIC */
f669df27 4340 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4341 break;
4342 case 2: /* VORR */
dd8fbd78 4343 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4344 break;
4345 case 3: /* VORN */
f669df27 4346 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4347 break;
4348 case 4: /* VEOR */
dd8fbd78 4349 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4350 break;
4351 case 5: /* VBSL */
dd8fbd78
FN
4352 tmp3 = neon_load_reg(rd, pass);
4353 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4354 dead_tmp(tmp3);
9ee6e8bb
PB
4355 break;
4356 case 6: /* VBIT */
dd8fbd78
FN
4357 tmp3 = neon_load_reg(rd, pass);
4358 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4359 dead_tmp(tmp3);
9ee6e8bb
PB
4360 break;
4361 case 7: /* VBIF */
dd8fbd78
FN
4362 tmp3 = neon_load_reg(rd, pass);
4363 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4364 dead_tmp(tmp3);
9ee6e8bb 4365 break;
2c0262af
FB
4366 }
4367 break;
9ee6e8bb
PB
4368 case 4: /* VHSUB */
4369 GEN_NEON_INTEGER_OP(hsub);
4370 break;
4371 case 5: /* VQSUB */
ad69471c 4372 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4373 break;
9ee6e8bb
PB
4374 case 6: /* VCGT */
4375 GEN_NEON_INTEGER_OP(cgt);
4376 break;
4377 case 7: /* VCGE */
4378 GEN_NEON_INTEGER_OP(cge);
4379 break;
4380 case 8: /* VSHL */
ad69471c 4381 GEN_NEON_INTEGER_OP(shl);
2c0262af 4382 break;
9ee6e8bb 4383 case 9: /* VQSHL */
ad69471c 4384 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4385 break;
9ee6e8bb 4386 case 10: /* VRSHL */
ad69471c 4387 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4388 break;
9ee6e8bb 4389 case 11: /* VQRSHL */
ad69471c 4390 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb
PB
4391 break;
4392 case 12: /* VMAX */
4393 GEN_NEON_INTEGER_OP(max);
4394 break;
4395 case 13: /* VMIN */
4396 GEN_NEON_INTEGER_OP(min);
4397 break;
4398 case 14: /* VABD */
4399 GEN_NEON_INTEGER_OP(abd);
4400 break;
4401 case 15: /* VABA */
4402 GEN_NEON_INTEGER_OP(abd);
dd8fbd78
FN
4403 dead_tmp(tmp2);
4404 tmp2 = neon_load_reg(rd, pass);
4405 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4406 break;
4407 case 16:
4408 if (!u) { /* VADD */
dd8fbd78 4409 if (gen_neon_add(size, tmp, tmp2))
9ee6e8bb
PB
4410 return 1;
4411 } else { /* VSUB */
4412 switch (size) {
dd8fbd78
FN
4413 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4414 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4415 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4416 default: return 1;
4417 }
4418 }
4419 break;
4420 case 17:
4421 if (!u) { /* VTST */
4422 switch (size) {
dd8fbd78
FN
4423 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4424 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4425 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4426 default: return 1;
4427 }
4428 } else { /* VCEQ */
4429 switch (size) {
dd8fbd78
FN
4430 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4431 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4432 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4433 default: return 1;
4434 }
4435 }
4436 break;
4437 case 18: /* Multiply. */
4438 switch (size) {
dd8fbd78
FN
4439 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4440 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4441 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4442 default: return 1;
4443 }
dd8fbd78
FN
4444 dead_tmp(tmp2);
4445 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4446 if (u) { /* VMLS */
dd8fbd78 4447 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4448 } else { /* VMLA */
dd8fbd78 4449 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4450 }
4451 break;
4452 case 19: /* VMUL */
4453 if (u) { /* polynomial */
dd8fbd78 4454 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4455 } else { /* Integer */
4456 switch (size) {
dd8fbd78
FN
4457 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4458 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4459 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4460 default: return 1;
4461 }
4462 }
4463 break;
4464 case 20: /* VPMAX */
4465 GEN_NEON_INTEGER_OP(pmax);
4466 break;
4467 case 21: /* VPMIN */
4468 GEN_NEON_INTEGER_OP(pmin);
4469 break;
4470 case 22: /* Hultiply high. */
4471 if (!u) { /* VQDMULH */
4472 switch (size) {
dd8fbd78
FN
4473 case 1: gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4474 case 2: gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
9ee6e8bb
PB
4475 default: return 1;
4476 }
4477 } else { /* VQRDHMUL */
4478 switch (size) {
dd8fbd78
FN
4479 case 1: gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4480 case 2: gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
9ee6e8bb
PB
4481 default: return 1;
4482 }
4483 }
4484 break;
4485 case 23: /* VPADD */
4486 if (u)
4487 return 1;
4488 switch (size) {
dd8fbd78
FN
4489 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4490 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4491 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4492 default: return 1;
4493 }
4494 break;
4495 case 26: /* Floating point arithnetic. */
4496 switch ((u << 2) | size) {
4497 case 0: /* VADD */
dd8fbd78 4498 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4499 break;
4500 case 2: /* VSUB */
dd8fbd78 4501 gen_helper_neon_sub_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4502 break;
4503 case 4: /* VPADD */
dd8fbd78 4504 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4505 break;
4506 case 6: /* VABD */
dd8fbd78 4507 gen_helper_neon_abd_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4508 break;
4509 default:
4510 return 1;
4511 }
4512 break;
4513 case 27: /* Float multiply. */
dd8fbd78 4514 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb 4515 if (!u) {
dd8fbd78
FN
4516 dead_tmp(tmp2);
4517 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4518 if (size == 0) {
dd8fbd78 4519 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb 4520 } else {
dd8fbd78 4521 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
4522 }
4523 }
4524 break;
4525 case 28: /* Float compare. */
4526 if (!u) {
dd8fbd78 4527 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
b5ff1b31 4528 } else {
9ee6e8bb 4529 if (size == 0)
dd8fbd78 4530 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
9ee6e8bb 4531 else
dd8fbd78 4532 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
b5ff1b31 4533 }
2c0262af 4534 break;
9ee6e8bb
PB
4535 case 29: /* Float compare absolute. */
4536 if (!u)
4537 return 1;
4538 if (size == 0)
dd8fbd78 4539 gen_helper_neon_acge_f32(tmp, tmp, tmp2);
9ee6e8bb 4540 else
dd8fbd78 4541 gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
2c0262af 4542 break;
9ee6e8bb
PB
4543 case 30: /* Float min/max. */
4544 if (size == 0)
dd8fbd78 4545 gen_helper_neon_max_f32(tmp, tmp, tmp2);
9ee6e8bb 4546 else
dd8fbd78 4547 gen_helper_neon_min_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4548 break;
4549 case 31:
4550 if (size == 0)
dd8fbd78 4551 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 4552 else
dd8fbd78 4553 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 4554 break;
9ee6e8bb
PB
4555 default:
4556 abort();
2c0262af 4557 }
dd8fbd78
FN
4558 dead_tmp(tmp2);
4559
9ee6e8bb
PB
4560 /* Save the result. For elementwise operations we can put it
4561 straight into the destination register. For pairwise operations
4562 we have to be careful to avoid clobbering the source operands. */
4563 if (pairwise && rd == rm) {
dd8fbd78 4564 neon_store_scratch(pass, tmp);
9ee6e8bb 4565 } else {
dd8fbd78 4566 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4567 }
4568
4569 } /* for pass */
4570 if (pairwise && rd == rm) {
4571 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
4572 tmp = neon_load_scratch(pass);
4573 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4574 }
4575 }
ad69471c 4576 /* End of 3 register same size operations. */
9ee6e8bb
PB
4577 } else if (insn & (1 << 4)) {
4578 if ((insn & 0x00380080) != 0) {
4579 /* Two registers and shift. */
4580 op = (insn >> 8) & 0xf;
4581 if (insn & (1 << 7)) {
4582 /* 64-bit shift. */
4583 size = 3;
4584 } else {
4585 size = 2;
4586 while ((insn & (1 << (size + 19))) == 0)
4587 size--;
4588 }
4589 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4590 /* To avoid excessive dumplication of ops we implement shift
4591 by immediate using the variable shift operations. */
4592 if (op < 8) {
4593 /* Shift by immediate:
4594 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4595 /* Right shifts are encoded as N - shift, where N is the
4596 element size in bits. */
4597 if (op <= 4)
4598 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4599 if (size == 3) {
4600 count = q + 1;
4601 } else {
4602 count = q ? 4: 2;
4603 }
4604 switch (size) {
4605 case 0:
4606 imm = (uint8_t) shift;
4607 imm |= imm << 8;
4608 imm |= imm << 16;
4609 break;
4610 case 1:
4611 imm = (uint16_t) shift;
4612 imm |= imm << 16;
4613 break;
4614 case 2:
4615 case 3:
4616 imm = shift;
4617 break;
4618 default:
4619 abort();
4620 }
4621
4622 for (pass = 0; pass < count; pass++) {
ad69471c
PB
4623 if (size == 3) {
4624 neon_load_reg64(cpu_V0, rm + pass);
4625 tcg_gen_movi_i64(cpu_V1, imm);
4626 switch (op) {
4627 case 0: /* VSHR */
4628 case 1: /* VSRA */
4629 if (u)
4630 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4631 else
ad69471c 4632 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4633 break;
ad69471c
PB
4634 case 2: /* VRSHR */
4635 case 3: /* VRSRA */
4636 if (u)
4637 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4638 else
ad69471c 4639 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4640 break;
ad69471c
PB
4641 case 4: /* VSRI */
4642 if (!u)
4643 return 1;
4644 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4645 break;
4646 case 5: /* VSHL, VSLI */
4647 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4648 break;
4649 case 6: /* VQSHL */
4650 if (u)
4651 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4652 else
ad69471c
PB
4653 gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4654 break;
4655 case 7: /* VQSHLU */
4656 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4657 break;
9ee6e8bb 4658 }
ad69471c
PB
4659 if (op == 1 || op == 3) {
4660 /* Accumulate. */
4661 neon_load_reg64(cpu_V0, rd + pass);
4662 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4663 } else if (op == 4 || (op == 5 && u)) {
4664 /* Insert */
4665 cpu_abort(env, "VS[LR]I.64 not implemented");
4666 }
4667 neon_store_reg64(cpu_V0, rd + pass);
4668 } else { /* size < 3 */
4669 /* Operands in T0 and T1. */
dd8fbd78
FN
4670 tmp = neon_load_reg(rm, pass);
4671 tmp2 = new_tmp();
4672 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
4673 switch (op) {
4674 case 0: /* VSHR */
4675 case 1: /* VSRA */
4676 GEN_NEON_INTEGER_OP(shl);
4677 break;
4678 case 2: /* VRSHR */
4679 case 3: /* VRSRA */
4680 GEN_NEON_INTEGER_OP(rshl);
4681 break;
4682 case 4: /* VSRI */
4683 if (!u)
4684 return 1;
4685 GEN_NEON_INTEGER_OP(shl);
4686 break;
4687 case 5: /* VSHL, VSLI */
4688 switch (size) {
dd8fbd78
FN
4689 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
4690 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
4691 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
ad69471c
PB
4692 default: return 1;
4693 }
4694 break;
4695 case 6: /* VQSHL */
4696 GEN_NEON_INTEGER_OP_ENV(qshl);
4697 break;
4698 case 7: /* VQSHLU */
4699 switch (size) {
dd8fbd78
FN
4700 case 0: gen_helper_neon_qshl_u8(tmp, cpu_env, tmp, tmp2); break;
4701 case 1: gen_helper_neon_qshl_u16(tmp, cpu_env, tmp, tmp2); break;
4702 case 2: gen_helper_neon_qshl_u32(tmp, cpu_env, tmp, tmp2); break;
ad69471c
PB
4703 default: return 1;
4704 }
4705 break;
4706 }
dd8fbd78 4707 dead_tmp(tmp2);
ad69471c
PB
4708
4709 if (op == 1 || op == 3) {
4710 /* Accumulate. */
dd8fbd78
FN
4711 tmp2 = neon_load_reg(rd, pass);
4712 gen_neon_add(size, tmp2, tmp);
4713 dead_tmp(tmp2);
ad69471c
PB
4714 } else if (op == 4 || (op == 5 && u)) {
4715 /* Insert */
4716 switch (size) {
4717 case 0:
4718 if (op == 4)
ca9a32e4 4719 mask = 0xff >> -shift;
ad69471c 4720 else
ca9a32e4
JR
4721 mask = (uint8_t)(0xff << shift);
4722 mask |= mask << 8;
4723 mask |= mask << 16;
ad69471c
PB
4724 break;
4725 case 1:
4726 if (op == 4)
ca9a32e4 4727 mask = 0xffff >> -shift;
ad69471c 4728 else
ca9a32e4
JR
4729 mask = (uint16_t)(0xffff << shift);
4730 mask |= mask << 16;
ad69471c
PB
4731 break;
4732 case 2:
ca9a32e4
JR
4733 if (shift < -31 || shift > 31) {
4734 mask = 0;
4735 } else {
4736 if (op == 4)
4737 mask = 0xffffffffu >> -shift;
4738 else
4739 mask = 0xffffffffu << shift;
4740 }
ad69471c
PB
4741 break;
4742 default:
4743 abort();
4744 }
dd8fbd78 4745 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
4746 tcg_gen_andi_i32(tmp, tmp, mask);
4747 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78
FN
4748 tcg_gen_or_i32(tmp, tmp, tmp2);
4749 dead_tmp(tmp2);
ad69471c 4750 }
dd8fbd78 4751 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4752 }
4753 } /* for pass */
4754 } else if (op < 10) {
ad69471c 4755 /* Shift by immediate and narrow:
9ee6e8bb
PB
4756 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4757 shift = shift - (1 << (size + 3));
4758 size++;
9ee6e8bb
PB
4759 switch (size) {
4760 case 1:
ad69471c 4761 imm = (uint16_t)shift;
9ee6e8bb 4762 imm |= imm << 16;
ad69471c 4763 tmp2 = tcg_const_i32(imm);
a7812ae4 4764 TCGV_UNUSED_I64(tmp64);
9ee6e8bb
PB
4765 break;
4766 case 2:
ad69471c
PB
4767 imm = (uint32_t)shift;
4768 tmp2 = tcg_const_i32(imm);
a7812ae4 4769 TCGV_UNUSED_I64(tmp64);
4cc633c3 4770 break;
9ee6e8bb 4771 case 3:
a7812ae4
PB
4772 tmp64 = tcg_const_i64(shift);
4773 TCGV_UNUSED(tmp2);
9ee6e8bb
PB
4774 break;
4775 default:
4776 abort();
4777 }
4778
ad69471c
PB
4779 for (pass = 0; pass < 2; pass++) {
4780 if (size == 3) {
4781 neon_load_reg64(cpu_V0, rm + pass);
4782 if (q) {
4783 if (u)
a7812ae4 4784 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4785 else
a7812ae4 4786 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c
PB
4787 } else {
4788 if (u)
a7812ae4 4789 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4790 else
a7812ae4 4791 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c 4792 }
2c0262af 4793 } else {
ad69471c
PB
4794 tmp = neon_load_reg(rm + pass, 0);
4795 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
36aa55dc
PB
4796 tmp3 = neon_load_reg(rm + pass, 1);
4797 gen_neon_shift_narrow(size, tmp3, tmp2, q, u);
4798 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
ad69471c 4799 dead_tmp(tmp);
36aa55dc 4800 dead_tmp(tmp3);
9ee6e8bb 4801 }
ad69471c
PB
4802 tmp = new_tmp();
4803 if (op == 8 && !u) {
4804 gen_neon_narrow(size - 1, tmp, cpu_V0);
9ee6e8bb 4805 } else {
ad69471c
PB
4806 if (op == 8)
4807 gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
9ee6e8bb 4808 else
ad69471c
PB
4809 gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
4810 }
2301db49 4811 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4812 } /* for pass */
b75263d6
JR
4813 if (size == 3) {
4814 tcg_temp_free_i64(tmp64);
2301db49
JR
4815 } else {
4816 dead_tmp(tmp2);
b75263d6 4817 }
9ee6e8bb
PB
4818 } else if (op == 10) {
4819 /* VSHLL */
ad69471c 4820 if (q || size == 3)
9ee6e8bb 4821 return 1;
ad69471c
PB
4822 tmp = neon_load_reg(rm, 0);
4823 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 4824 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4825 if (pass == 1)
4826 tmp = tmp2;
4827
4828 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 4829
9ee6e8bb
PB
4830 if (shift != 0) {
4831 /* The shift is less than the width of the source
ad69471c
PB
4832 type, so we can just shift the whole register. */
4833 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4834 if (size < 2 || !u) {
4835 uint64_t imm64;
4836 if (size == 0) {
4837 imm = (0xffu >> (8 - shift));
4838 imm |= imm << 16;
4839 } else {
4840 imm = 0xffff >> (16 - shift);
9ee6e8bb 4841 }
ad69471c
PB
4842 imm64 = imm | (((uint64_t)imm) << 32);
4843 tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
9ee6e8bb
PB
4844 }
4845 }
ad69471c 4846 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4847 }
4848 } else if (op == 15 || op == 16) {
4849 /* VCVT fixed-point. */
4850 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 4851 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
9ee6e8bb
PB
4852 if (op & 1) {
4853 if (u)
4373f3ce 4854 gen_vfp_ulto(0, shift);
9ee6e8bb 4855 else
4373f3ce 4856 gen_vfp_slto(0, shift);
9ee6e8bb
PB
4857 } else {
4858 if (u)
4373f3ce 4859 gen_vfp_toul(0, shift);
9ee6e8bb 4860 else
4373f3ce 4861 gen_vfp_tosl(0, shift);
2c0262af 4862 }
4373f3ce 4863 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
4864 }
4865 } else {
9ee6e8bb
PB
4866 return 1;
4867 }
4868 } else { /* (insn & 0x00380080) == 0 */
4869 int invert;
4870
4871 op = (insn >> 8) & 0xf;
4872 /* One register and immediate. */
4873 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4874 invert = (insn & (1 << 5)) != 0;
4875 switch (op) {
4876 case 0: case 1:
4877 /* no-op */
4878 break;
4879 case 2: case 3:
4880 imm <<= 8;
4881 break;
4882 case 4: case 5:
4883 imm <<= 16;
4884 break;
4885 case 6: case 7:
4886 imm <<= 24;
4887 break;
4888 case 8: case 9:
4889 imm |= imm << 16;
4890 break;
4891 case 10: case 11:
4892 imm = (imm << 8) | (imm << 24);
4893 break;
4894 case 12:
8e31209e 4895 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
4896 break;
4897 case 13:
4898 imm = (imm << 16) | 0xffff;
4899 break;
4900 case 14:
4901 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4902 if (invert)
4903 imm = ~imm;
4904 break;
4905 case 15:
4906 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4907 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4908 break;
4909 }
4910 if (invert)
4911 imm = ~imm;
4912
9ee6e8bb
PB
4913 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4914 if (op & 1 && op < 12) {
ad69471c 4915 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
4916 if (invert) {
4917 /* The immediate value has already been inverted, so
4918 BIC becomes AND. */
ad69471c 4919 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 4920 } else {
ad69471c 4921 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 4922 }
9ee6e8bb 4923 } else {
ad69471c
PB
4924 /* VMOV, VMVN. */
4925 tmp = new_tmp();
9ee6e8bb 4926 if (op == 14 && invert) {
ad69471c
PB
4927 uint32_t val;
4928 val = 0;
9ee6e8bb
PB
4929 for (n = 0; n < 4; n++) {
4930 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 4931 val |= 0xff << (n * 8);
9ee6e8bb 4932 }
ad69471c
PB
4933 tcg_gen_movi_i32(tmp, val);
4934 } else {
4935 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 4936 }
9ee6e8bb 4937 }
ad69471c 4938 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4939 }
4940 }
e4b3861d 4941 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
4942 if (size != 3) {
4943 op = (insn >> 8) & 0xf;
4944 if ((insn & (1 << 6)) == 0) {
4945 /* Three registers of different lengths. */
4946 int src1_wide;
4947 int src2_wide;
4948 int prewiden;
4949 /* prewiden, src1_wide, src2_wide */
4950 static const int neon_3reg_wide[16][3] = {
4951 {1, 0, 0}, /* VADDL */
4952 {1, 1, 0}, /* VADDW */
4953 {1, 0, 0}, /* VSUBL */
4954 {1, 1, 0}, /* VSUBW */
4955 {0, 1, 1}, /* VADDHN */
4956 {0, 0, 0}, /* VABAL */
4957 {0, 1, 1}, /* VSUBHN */
4958 {0, 0, 0}, /* VABDL */
4959 {0, 0, 0}, /* VMLAL */
4960 {0, 0, 0}, /* VQDMLAL */
4961 {0, 0, 0}, /* VMLSL */
4962 {0, 0, 0}, /* VQDMLSL */
4963 {0, 0, 0}, /* Integer VMULL */
4964 {0, 0, 0}, /* VQDMULL */
4965 {0, 0, 0} /* Polynomial VMULL */
4966 };
4967
4968 prewiden = neon_3reg_wide[op][0];
4969 src1_wide = neon_3reg_wide[op][1];
4970 src2_wide = neon_3reg_wide[op][2];
4971
ad69471c
PB
4972 if (size == 0 && (op == 9 || op == 11 || op == 13))
4973 return 1;
4974
9ee6e8bb
PB
4975 /* Avoid overlapping operands. Wide source operands are
4976 always aligned so will never overlap with wide
4977 destinations in problematic ways. */
8f8e3aa4 4978 if (rd == rm && !src2_wide) {
dd8fbd78
FN
4979 tmp = neon_load_reg(rm, 1);
4980 neon_store_scratch(2, tmp);
8f8e3aa4 4981 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
4982 tmp = neon_load_reg(rn, 1);
4983 neon_store_scratch(2, tmp);
9ee6e8bb 4984 }
a50f5b91 4985 TCGV_UNUSED(tmp3);
9ee6e8bb 4986 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4987 if (src1_wide) {
4988 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 4989 TCGV_UNUSED(tmp);
9ee6e8bb 4990 } else {
ad69471c 4991 if (pass == 1 && rd == rn) {
dd8fbd78 4992 tmp = neon_load_scratch(2);
9ee6e8bb 4993 } else {
ad69471c
PB
4994 tmp = neon_load_reg(rn, pass);
4995 }
4996 if (prewiden) {
4997 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
4998 }
4999 }
ad69471c
PB
5000 if (src2_wide) {
5001 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 5002 TCGV_UNUSED(tmp2);
9ee6e8bb 5003 } else {
ad69471c 5004 if (pass == 1 && rd == rm) {
dd8fbd78 5005 tmp2 = neon_load_scratch(2);
9ee6e8bb 5006 } else {
ad69471c
PB
5007 tmp2 = neon_load_reg(rm, pass);
5008 }
5009 if (prewiden) {
5010 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5011 }
9ee6e8bb
PB
5012 }
5013 switch (op) {
5014 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5015 gen_neon_addl(size);
9ee6e8bb 5016 break;
79b0e534 5017 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5018 gen_neon_subl(size);
9ee6e8bb
PB
5019 break;
5020 case 5: case 7: /* VABAL, VABDL */
5021 switch ((size << 1) | u) {
ad69471c
PB
5022 case 0:
5023 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5024 break;
5025 case 1:
5026 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5027 break;
5028 case 2:
5029 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5030 break;
5031 case 3:
5032 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5033 break;
5034 case 4:
5035 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5036 break;
5037 case 5:
5038 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5039 break;
9ee6e8bb
PB
5040 default: abort();
5041 }
ad69471c
PB
5042 dead_tmp(tmp2);
5043 dead_tmp(tmp);
9ee6e8bb
PB
5044 break;
5045 case 8: case 9: case 10: case 11: case 12: case 13:
5046 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5047 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
dd8fbd78
FN
5048 dead_tmp(tmp2);
5049 dead_tmp(tmp);
9ee6e8bb
PB
5050 break;
5051 case 14: /* Polynomial VMULL */
5052 cpu_abort(env, "Polynomial VMULL not implemented");
5053
5054 default: /* 15 is RESERVED. */
5055 return 1;
5056 }
5057 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
5058 /* Accumulate. */
5059 if (op == 10 || op == 11) {
ad69471c 5060 gen_neon_negl(cpu_V0, size);
9ee6e8bb
PB
5061 }
5062
9ee6e8bb 5063 if (op != 13) {
ad69471c 5064 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb
PB
5065 }
5066
5067 switch (op) {
5068 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
ad69471c 5069 gen_neon_addl(size);
9ee6e8bb
PB
5070 break;
5071 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c
PB
5072 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5073 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5074 break;
9ee6e8bb
PB
5075 /* Fall through. */
5076 case 13: /* VQDMULL */
ad69471c 5077 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5078 break;
5079 default:
5080 abort();
5081 }
ad69471c 5082 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5083 } else if (op == 4 || op == 6) {
5084 /* Narrowing operation. */
ad69471c 5085 tmp = new_tmp();
79b0e534 5086 if (!u) {
9ee6e8bb 5087 switch (size) {
ad69471c
PB
5088 case 0:
5089 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5090 break;
5091 case 1:
5092 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5093 break;
5094 case 2:
5095 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5096 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5097 break;
9ee6e8bb
PB
5098 default: abort();
5099 }
5100 } else {
5101 switch (size) {
ad69471c
PB
5102 case 0:
5103 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5104 break;
5105 case 1:
5106 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5107 break;
5108 case 2:
5109 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5110 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5111 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5112 break;
9ee6e8bb
PB
5113 default: abort();
5114 }
5115 }
ad69471c
PB
5116 if (pass == 0) {
5117 tmp3 = tmp;
5118 } else {
5119 neon_store_reg(rd, 0, tmp3);
5120 neon_store_reg(rd, 1, tmp);
5121 }
9ee6e8bb
PB
5122 } else {
5123 /* Write back the result. */
ad69471c 5124 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5125 }
5126 }
5127 } else {
5128 /* Two registers and a scalar. */
5129 switch (op) {
5130 case 0: /* Integer VMLA scalar */
5131 case 1: /* Float VMLA scalar */
5132 case 4: /* Integer VMLS scalar */
5133 case 5: /* Floating point VMLS scalar */
5134 case 8: /* Integer VMUL scalar */
5135 case 9: /* Floating point VMUL scalar */
5136 case 12: /* VQDMULH scalar */
5137 case 13: /* VQRDMULH scalar */
dd8fbd78
FN
5138 tmp = neon_get_scalar(size, rm);
5139 neon_store_scratch(0, tmp);
9ee6e8bb 5140 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5141 tmp = neon_load_scratch(0);
5142 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5143 if (op == 12) {
5144 if (size == 1) {
dd8fbd78 5145 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5146 } else {
dd8fbd78 5147 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5148 }
5149 } else if (op == 13) {
5150 if (size == 1) {
dd8fbd78 5151 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5152 } else {
dd8fbd78 5153 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5154 }
5155 } else if (op & 1) {
dd8fbd78 5156 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5157 } else {
5158 switch (size) {
dd8fbd78
FN
5159 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5160 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5161 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5162 default: return 1;
5163 }
5164 }
dd8fbd78 5165 dead_tmp(tmp2);
9ee6e8bb
PB
5166 if (op < 8) {
5167 /* Accumulate. */
dd8fbd78 5168 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5169 switch (op) {
5170 case 0:
dd8fbd78 5171 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5172 break;
5173 case 1:
dd8fbd78 5174 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5175 break;
5176 case 4:
dd8fbd78 5177 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5178 break;
5179 case 5:
dd8fbd78 5180 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
5181 break;
5182 default:
5183 abort();
5184 }
dd8fbd78 5185 dead_tmp(tmp2);
9ee6e8bb 5186 }
dd8fbd78 5187 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5188 }
5189 break;
5190 case 2: /* VMLAL sclar */
5191 case 3: /* VQDMLAL scalar */
5192 case 6: /* VMLSL scalar */
5193 case 7: /* VQDMLSL scalar */
5194 case 10: /* VMULL scalar */
5195 case 11: /* VQDMULL scalar */
ad69471c
PB
5196 if (size == 0 && (op == 3 || op == 7 || op == 11))
5197 return 1;
5198
dd8fbd78
FN
5199 tmp2 = neon_get_scalar(size, rm);
5200 tmp3 = neon_load_reg(rn, 1);
ad69471c 5201
9ee6e8bb 5202 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5203 if (pass == 0) {
5204 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5205 } else {
dd8fbd78 5206 tmp = tmp3;
9ee6e8bb 5207 }
ad69471c 5208 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
dd8fbd78 5209 dead_tmp(tmp);
9ee6e8bb 5210 if (op == 6 || op == 7) {
ad69471c
PB
5211 gen_neon_negl(cpu_V0, size);
5212 }
5213 if (op != 11) {
5214 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5215 }
9ee6e8bb
PB
5216 switch (op) {
5217 case 2: case 6:
ad69471c 5218 gen_neon_addl(size);
9ee6e8bb
PB
5219 break;
5220 case 3: case 7:
ad69471c
PB
5221 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5222 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5223 break;
5224 case 10:
5225 /* no-op */
5226 break;
5227 case 11:
ad69471c 5228 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5229 break;
5230 default:
5231 abort();
5232 }
ad69471c 5233 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5234 }
dd8fbd78
FN
5235
5236 dead_tmp(tmp2);
5237
9ee6e8bb
PB
5238 break;
5239 default: /* 14 and 15 are RESERVED */
5240 return 1;
5241 }
5242 }
5243 } else { /* size == 3 */
5244 if (!u) {
5245 /* Extract. */
9ee6e8bb 5246 imm = (insn >> 8) & 0xf;
ad69471c
PB
5247
5248 if (imm > 7 && !q)
5249 return 1;
5250
5251 if (imm == 0) {
5252 neon_load_reg64(cpu_V0, rn);
5253 if (q) {
5254 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5255 }
ad69471c
PB
5256 } else if (imm == 8) {
5257 neon_load_reg64(cpu_V0, rn + 1);
5258 if (q) {
5259 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5260 }
ad69471c 5261 } else if (q) {
a7812ae4 5262 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5263 if (imm < 8) {
5264 neon_load_reg64(cpu_V0, rn);
a7812ae4 5265 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5266 } else {
5267 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5268 neon_load_reg64(tmp64, rm);
ad69471c
PB
5269 }
5270 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5271 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5272 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5273 if (imm < 8) {
5274 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5275 } else {
ad69471c
PB
5276 neon_load_reg64(cpu_V1, rm + 1);
5277 imm -= 8;
9ee6e8bb 5278 }
ad69471c 5279 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5280 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5281 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 5282 tcg_temp_free_i64(tmp64);
ad69471c 5283 } else {
a7812ae4 5284 /* BUGFIX */
ad69471c 5285 neon_load_reg64(cpu_V0, rn);
a7812ae4 5286 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5287 neon_load_reg64(cpu_V1, rm);
a7812ae4 5288 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5289 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5290 }
5291 neon_store_reg64(cpu_V0, rd);
5292 if (q) {
5293 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5294 }
5295 } else if ((insn & (1 << 11)) == 0) {
5296 /* Two register misc. */
5297 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5298 size = (insn >> 18) & 3;
5299 switch (op) {
5300 case 0: /* VREV64 */
5301 if (size == 3)
5302 return 1;
5303 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5304 tmp = neon_load_reg(rm, pass * 2);
5305 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5306 switch (size) {
dd8fbd78
FN
5307 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5308 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5309 case 2: /* no-op */ break;
5310 default: abort();
5311 }
dd8fbd78 5312 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5313 if (size == 2) {
dd8fbd78 5314 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5315 } else {
9ee6e8bb 5316 switch (size) {
dd8fbd78
FN
5317 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5318 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5319 default: abort();
5320 }
dd8fbd78 5321 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5322 }
5323 }
5324 break;
5325 case 4: case 5: /* VPADDL */
5326 case 12: case 13: /* VPADAL */
9ee6e8bb
PB
5327 if (size == 3)
5328 return 1;
ad69471c
PB
5329 for (pass = 0; pass < q + 1; pass++) {
5330 tmp = neon_load_reg(rm, pass * 2);
5331 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5332 tmp = neon_load_reg(rm, pass * 2 + 1);
5333 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5334 switch (size) {
5335 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5336 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5337 case 2: tcg_gen_add_i64(CPU_V001); break;
5338 default: abort();
5339 }
9ee6e8bb
PB
5340 if (op >= 12) {
5341 /* Accumulate. */
ad69471c
PB
5342 neon_load_reg64(cpu_V1, rd + pass);
5343 gen_neon_addl(size);
9ee6e8bb 5344 }
ad69471c 5345 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5346 }
5347 break;
5348 case 33: /* VTRN */
5349 if (size == 2) {
5350 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
5351 tmp = neon_load_reg(rm, n);
5352 tmp2 = neon_load_reg(rd, n + 1);
5353 neon_store_reg(rm, n, tmp2);
5354 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
5355 }
5356 } else {
5357 goto elementwise;
5358 }
5359 break;
5360 case 34: /* VUZP */
5361 /* Reg Before After
5362 Rd A3 A2 A1 A0 B2 B0 A2 A0
5363 Rm B3 B2 B1 B0 B3 B1 A3 A1
5364 */
5365 if (size == 3)
5366 return 1;
5367 gen_neon_unzip(rd, q, 0, size);
5368 gen_neon_unzip(rm, q, 4, size);
5369 if (q) {
5370 static int unzip_order_q[8] =
5371 {0, 2, 4, 6, 1, 3, 5, 7};
5372 for (n = 0; n < 8; n++) {
5373 int reg = (n < 4) ? rd : rm;
dd8fbd78
FN
5374 tmp = neon_load_scratch(unzip_order_q[n]);
5375 neon_store_reg(reg, n % 4, tmp);
9ee6e8bb
PB
5376 }
5377 } else {
5378 static int unzip_order[4] =
5379 {0, 4, 1, 5};
5380 for (n = 0; n < 4; n++) {
5381 int reg = (n < 2) ? rd : rm;
dd8fbd78
FN
5382 tmp = neon_load_scratch(unzip_order[n]);
5383 neon_store_reg(reg, n % 2, tmp);
9ee6e8bb
PB
5384 }
5385 }
5386 break;
5387 case 35: /* VZIP */
5388 /* Reg Before After
5389 Rd A3 A2 A1 A0 B1 A1 B0 A0
5390 Rm B3 B2 B1 B0 B3 A3 B2 A2
5391 */
5392 if (size == 3)
5393 return 1;
5394 count = (q ? 4 : 2);
5395 for (n = 0; n < count; n++) {
dd8fbd78
FN
5396 tmp = neon_load_reg(rd, n);
5397 tmp2 = neon_load_reg(rd, n);
9ee6e8bb 5398 switch (size) {
dd8fbd78
FN
5399 case 0: gen_neon_zip_u8(tmp, tmp2); break;
5400 case 1: gen_neon_zip_u16(tmp, tmp2); break;
9ee6e8bb
PB
5401 case 2: /* no-op */; break;
5402 default: abort();
5403 }
dd8fbd78
FN
5404 neon_store_scratch(n * 2, tmp);
5405 neon_store_scratch(n * 2 + 1, tmp2);
9ee6e8bb
PB
5406 }
5407 for (n = 0; n < count * 2; n++) {
5408 int reg = (n < count) ? rd : rm;
dd8fbd78
FN
5409 tmp = neon_load_scratch(n);
5410 neon_store_reg(reg, n % count, tmp);
9ee6e8bb
PB
5411 }
5412 break;
5413 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
ad69471c
PB
5414 if (size == 3)
5415 return 1;
a50f5b91 5416 TCGV_UNUSED(tmp2);
9ee6e8bb 5417 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5418 neon_load_reg64(cpu_V0, rm + pass);
5419 tmp = new_tmp();
9ee6e8bb 5420 if (op == 36 && q == 0) {
ad69471c 5421 gen_neon_narrow(size, tmp, cpu_V0);
9ee6e8bb 5422 } else if (q) {
ad69471c 5423 gen_neon_narrow_satu(size, tmp, cpu_V0);
9ee6e8bb 5424 } else {
ad69471c
PB
5425 gen_neon_narrow_sats(size, tmp, cpu_V0);
5426 }
5427 if (pass == 0) {
5428 tmp2 = tmp;
5429 } else {
5430 neon_store_reg(rd, 0, tmp2);
5431 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5432 }
9ee6e8bb
PB
5433 }
5434 break;
5435 case 38: /* VSHLL */
ad69471c 5436 if (q || size == 3)
9ee6e8bb 5437 return 1;
ad69471c
PB
5438 tmp = neon_load_reg(rm, 0);
5439 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5440 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5441 if (pass == 1)
5442 tmp = tmp2;
5443 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 5444 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 5445 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5446 }
5447 break;
60011498
PB
5448 case 44: /* VCVT.F16.F32 */
5449 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5450 return 1;
5451 tmp = new_tmp();
5452 tmp2 = new_tmp();
5453 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
5454 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5455 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
5456 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5457 tcg_gen_shli_i32(tmp2, tmp2, 16);
5458 tcg_gen_or_i32(tmp2, tmp2, tmp);
5459 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
5460 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5461 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5462 neon_store_reg(rd, 0, tmp2);
5463 tmp2 = new_tmp();
5464 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5465 tcg_gen_shli_i32(tmp2, tmp2, 16);
5466 tcg_gen_or_i32(tmp2, tmp2, tmp);
5467 neon_store_reg(rd, 1, tmp2);
5468 dead_tmp(tmp);
5469 break;
5470 case 46: /* VCVT.F32.F16 */
5471 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5472 return 1;
5473 tmp3 = new_tmp();
5474 tmp = neon_load_reg(rm, 0);
5475 tmp2 = neon_load_reg(rm, 1);
5476 tcg_gen_ext16u_i32(tmp3, tmp);
5477 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5478 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5479 tcg_gen_shri_i32(tmp3, tmp, 16);
5480 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5481 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
5482 dead_tmp(tmp);
5483 tcg_gen_ext16u_i32(tmp3, tmp2);
5484 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5485 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5486 tcg_gen_shri_i32(tmp3, tmp2, 16);
5487 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5488 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
5489 dead_tmp(tmp2);
5490 dead_tmp(tmp3);
5491 break;
9ee6e8bb
PB
5492 default:
5493 elementwise:
5494 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5495 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5496 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5497 neon_reg_offset(rm, pass));
dd8fbd78 5498 TCGV_UNUSED(tmp);
9ee6e8bb 5499 } else {
dd8fbd78 5500 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
5501 }
5502 switch (op) {
5503 case 1: /* VREV32 */
5504 switch (size) {
dd8fbd78
FN
5505 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5506 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5507 default: return 1;
5508 }
5509 break;
5510 case 2: /* VREV16 */
5511 if (size != 0)
5512 return 1;
dd8fbd78 5513 gen_rev16(tmp);
9ee6e8bb 5514 break;
9ee6e8bb
PB
5515 case 8: /* CLS */
5516 switch (size) {
dd8fbd78
FN
5517 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5518 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5519 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
9ee6e8bb
PB
5520 default: return 1;
5521 }
5522 break;
5523 case 9: /* CLZ */
5524 switch (size) {
dd8fbd78
FN
5525 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5526 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5527 case 2: gen_helper_clz(tmp, tmp); break;
9ee6e8bb
PB
5528 default: return 1;
5529 }
5530 break;
5531 case 10: /* CNT */
5532 if (size != 0)
5533 return 1;
dd8fbd78 5534 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb
PB
5535 break;
5536 case 11: /* VNOT */
5537 if (size != 0)
5538 return 1;
dd8fbd78 5539 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5540 break;
5541 case 14: /* VQABS */
5542 switch (size) {
dd8fbd78
FN
5543 case 0: gen_helper_neon_qabs_s8(tmp, cpu_env, tmp); break;
5544 case 1: gen_helper_neon_qabs_s16(tmp, cpu_env, tmp); break;
5545 case 2: gen_helper_neon_qabs_s32(tmp, cpu_env, tmp); break;
9ee6e8bb
PB
5546 default: return 1;
5547 }
5548 break;
5549 case 15: /* VQNEG */
5550 switch (size) {
dd8fbd78
FN
5551 case 0: gen_helper_neon_qneg_s8(tmp, cpu_env, tmp); break;
5552 case 1: gen_helper_neon_qneg_s16(tmp, cpu_env, tmp); break;
5553 case 2: gen_helper_neon_qneg_s32(tmp, cpu_env, tmp); break;
9ee6e8bb
PB
5554 default: return 1;
5555 }
5556 break;
5557 case 16: case 19: /* VCGT #0, VCLE #0 */
dd8fbd78 5558 tmp2 = tcg_const_i32(0);
9ee6e8bb 5559 switch(size) {
dd8fbd78
FN
5560 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5561 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5562 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5563 default: return 1;
5564 }
dd8fbd78 5565 tcg_temp_free(tmp2);
9ee6e8bb 5566 if (op == 19)
dd8fbd78 5567 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5568 break;
5569 case 17: case 20: /* VCGE #0, VCLT #0 */
dd8fbd78 5570 tmp2 = tcg_const_i32(0);
9ee6e8bb 5571 switch(size) {
dd8fbd78
FN
5572 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5573 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5574 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5575 default: return 1;
5576 }
dd8fbd78 5577 tcg_temp_free(tmp2);
9ee6e8bb 5578 if (op == 20)
dd8fbd78 5579 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5580 break;
5581 case 18: /* VCEQ #0 */
dd8fbd78 5582 tmp2 = tcg_const_i32(0);
9ee6e8bb 5583 switch(size) {
dd8fbd78
FN
5584 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5585 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5586 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5587 default: return 1;
5588 }
dd8fbd78 5589 tcg_temp_free(tmp2);
9ee6e8bb
PB
5590 break;
5591 case 22: /* VABS */
5592 switch(size) {
dd8fbd78
FN
5593 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
5594 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
5595 case 2: tcg_gen_abs_i32(tmp, tmp); break;
9ee6e8bb
PB
5596 default: return 1;
5597 }
5598 break;
5599 case 23: /* VNEG */
ad69471c
PB
5600 if (size == 3)
5601 return 1;
dd8fbd78
FN
5602 tmp2 = tcg_const_i32(0);
5603 gen_neon_rsb(size, tmp, tmp2);
5604 tcg_temp_free(tmp2);
9ee6e8bb
PB
5605 break;
5606 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
dd8fbd78
FN
5607 tmp2 = tcg_const_i32(0);
5608 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
5609 tcg_temp_free(tmp2);
9ee6e8bb 5610 if (op == 27)
dd8fbd78 5611 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5612 break;
5613 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
dd8fbd78
FN
5614 tmp2 = tcg_const_i32(0);
5615 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
5616 tcg_temp_free(tmp2);
9ee6e8bb 5617 if (op == 28)
dd8fbd78 5618 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5619 break;
5620 case 26: /* Float VCEQ #0 */
dd8fbd78
FN
5621 tmp2 = tcg_const_i32(0);
5622 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
5623 tcg_temp_free(tmp2);
9ee6e8bb
PB
5624 break;
5625 case 30: /* Float VABS */
4373f3ce 5626 gen_vfp_abs(0);
9ee6e8bb
PB
5627 break;
5628 case 31: /* Float VNEG */
4373f3ce 5629 gen_vfp_neg(0);
9ee6e8bb
PB
5630 break;
5631 case 32: /* VSWP */
dd8fbd78
FN
5632 tmp2 = neon_load_reg(rd, pass);
5633 neon_store_reg(rm, pass, tmp2);
9ee6e8bb
PB
5634 break;
5635 case 33: /* VTRN */
dd8fbd78 5636 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5637 switch (size) {
dd8fbd78
FN
5638 case 0: gen_neon_trn_u8(tmp, tmp2); break;
5639 case 1: gen_neon_trn_u16(tmp, tmp2); break;
9ee6e8bb
PB
5640 case 2: abort();
5641 default: return 1;
5642 }
dd8fbd78 5643 neon_store_reg(rm, pass, tmp2);
9ee6e8bb
PB
5644 break;
5645 case 56: /* Integer VRECPE */
dd8fbd78 5646 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb
PB
5647 break;
5648 case 57: /* Integer VRSQRTE */
dd8fbd78 5649 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb
PB
5650 break;
5651 case 58: /* Float VRECPE */
4373f3ce 5652 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5653 break;
5654 case 59: /* Float VRSQRTE */
4373f3ce 5655 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5656 break;
5657 case 60: /* VCVT.F32.S32 */
4373f3ce 5658 gen_vfp_tosiz(0);
9ee6e8bb
PB
5659 break;
5660 case 61: /* VCVT.F32.U32 */
4373f3ce 5661 gen_vfp_touiz(0);
9ee6e8bb
PB
5662 break;
5663 case 62: /* VCVT.S32.F32 */
4373f3ce 5664 gen_vfp_sito(0);
9ee6e8bb
PB
5665 break;
5666 case 63: /* VCVT.U32.F32 */
4373f3ce 5667 gen_vfp_uito(0);
9ee6e8bb
PB
5668 break;
5669 default:
5670 /* Reserved: 21, 29, 39-56 */
5671 return 1;
5672 }
5673 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5674 tcg_gen_st_f32(cpu_F0s, cpu_env,
5675 neon_reg_offset(rd, pass));
9ee6e8bb 5676 } else {
dd8fbd78 5677 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5678 }
5679 }
5680 break;
5681 }
5682 } else if ((insn & (1 << 10)) == 0) {
5683 /* VTBL, VTBX. */
3018f259 5684 n = ((insn >> 5) & 0x18) + 8;
9ee6e8bb 5685 if (insn & (1 << 6)) {
8f8e3aa4 5686 tmp = neon_load_reg(rd, 0);
9ee6e8bb 5687 } else {
8f8e3aa4
PB
5688 tmp = new_tmp();
5689 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5690 }
8f8e3aa4 5691 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
5692 tmp4 = tcg_const_i32(rn);
5693 tmp5 = tcg_const_i32(n);
5694 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
3018f259 5695 dead_tmp(tmp);
9ee6e8bb 5696 if (insn & (1 << 6)) {
8f8e3aa4 5697 tmp = neon_load_reg(rd, 1);
9ee6e8bb 5698 } else {
8f8e3aa4
PB
5699 tmp = new_tmp();
5700 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5701 }
8f8e3aa4 5702 tmp3 = neon_load_reg(rm, 1);
b75263d6 5703 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
5704 tcg_temp_free_i32(tmp5);
5705 tcg_temp_free_i32(tmp4);
8f8e3aa4 5706 neon_store_reg(rd, 0, tmp2);
3018f259
PB
5707 neon_store_reg(rd, 1, tmp3);
5708 dead_tmp(tmp);
9ee6e8bb
PB
5709 } else if ((insn & 0x380) == 0) {
5710 /* VDUP */
5711 if (insn & (1 << 19)) {
dd8fbd78 5712 tmp = neon_load_reg(rm, 1);
9ee6e8bb 5713 } else {
dd8fbd78 5714 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
5715 }
5716 if (insn & (1 << 16)) {
dd8fbd78 5717 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
5718 } else if (insn & (1 << 17)) {
5719 if ((insn >> 18) & 1)
dd8fbd78 5720 gen_neon_dup_high16(tmp);
9ee6e8bb 5721 else
dd8fbd78 5722 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
5723 }
5724 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5725 tmp2 = new_tmp();
5726 tcg_gen_mov_i32(tmp2, tmp);
5727 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 5728 }
dd8fbd78 5729 dead_tmp(tmp);
9ee6e8bb
PB
5730 } else {
5731 return 1;
5732 }
5733 }
5734 }
5735 return 0;
5736}
5737
fe1479c3
PB
5738static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5739{
5740 int crn = (insn >> 16) & 0xf;
5741 int crm = insn & 0xf;
5742 int op1 = (insn >> 21) & 7;
5743 int op2 = (insn >> 5) & 7;
5744 int rt = (insn >> 12) & 0xf;
5745 TCGv tmp;
5746
5747 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5748 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5749 /* TEECR */
5750 if (IS_USER(s))
5751 return 1;
5752 tmp = load_cpu_field(teecr);
5753 store_reg(s, rt, tmp);
5754 return 0;
5755 }
5756 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5757 /* TEEHBR */
5758 if (IS_USER(s) && (env->teecr & 1))
5759 return 1;
5760 tmp = load_cpu_field(teehbr);
5761 store_reg(s, rt, tmp);
5762 return 0;
5763 }
5764 }
5765 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5766 op1, crn, crm, op2);
5767 return 1;
5768}
5769
5770static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5771{
5772 int crn = (insn >> 16) & 0xf;
5773 int crm = insn & 0xf;
5774 int op1 = (insn >> 21) & 7;
5775 int op2 = (insn >> 5) & 7;
5776 int rt = (insn >> 12) & 0xf;
5777 TCGv tmp;
5778
5779 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5780 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5781 /* TEECR */
5782 if (IS_USER(s))
5783 return 1;
5784 tmp = load_reg(s, rt);
5785 gen_helper_set_teecr(cpu_env, tmp);
5786 dead_tmp(tmp);
5787 return 0;
5788 }
5789 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5790 /* TEEHBR */
5791 if (IS_USER(s) && (env->teecr & 1))
5792 return 1;
5793 tmp = load_reg(s, rt);
5794 store_cpu_field(tmp, teehbr);
5795 return 0;
5796 }
5797 }
5798 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5799 op1, crn, crm, op2);
5800 return 1;
5801}
5802
9ee6e8bb
PB
5803static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5804{
5805 int cpnum;
5806
5807 cpnum = (insn >> 8) & 0xf;
5808 if (arm_feature(env, ARM_FEATURE_XSCALE)
5809 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5810 return 1;
5811
5812 switch (cpnum) {
5813 case 0:
5814 case 1:
5815 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5816 return disas_iwmmxt_insn(env, s, insn);
5817 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5818 return disas_dsp_insn(env, s, insn);
5819 }
5820 return 1;
5821 case 10:
5822 case 11:
5823 return disas_vfp_insn (env, s, insn);
fe1479c3
PB
5824 case 14:
5825 /* Coprocessors 7-15 are architecturally reserved by ARM.
5826 Unfortunately Intel decided to ignore this. */
5827 if (arm_feature(env, ARM_FEATURE_XSCALE))
5828 goto board;
5829 if (insn & (1 << 20))
5830 return disas_cp14_read(env, s, insn);
5831 else
5832 return disas_cp14_write(env, s, insn);
9ee6e8bb
PB
5833 case 15:
5834 return disas_cp15_insn (env, s, insn);
5835 default:
fe1479c3 5836 board:
9ee6e8bb
PB
5837 /* Unknown coprocessor. See if the board has hooked it. */
5838 return disas_cp_insn (env, s, insn);
5839 }
5840}
5841
5e3f878a
PB
5842
5843/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 5844static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
5845{
5846 TCGv tmp;
5847 tmp = new_tmp();
5848 tcg_gen_trunc_i64_i32(tmp, val);
5849 store_reg(s, rlow, tmp);
5850 tmp = new_tmp();
5851 tcg_gen_shri_i64(val, val, 32);
5852 tcg_gen_trunc_i64_i32(tmp, val);
5853 store_reg(s, rhigh, tmp);
5854}
5855
5856/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 5857static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 5858{
a7812ae4 5859 TCGv_i64 tmp;
5e3f878a
PB
5860 TCGv tmp2;
5861
36aa55dc 5862 /* Load value and extend to 64 bits. */
a7812ae4 5863 tmp = tcg_temp_new_i64();
5e3f878a
PB
5864 tmp2 = load_reg(s, rlow);
5865 tcg_gen_extu_i32_i64(tmp, tmp2);
5866 dead_tmp(tmp2);
5867 tcg_gen_add_i64(val, val, tmp);
b75263d6 5868 tcg_temp_free_i64(tmp);
5e3f878a
PB
5869}
5870
5871/* load and add a 64-bit value from a register pair. */
a7812ae4 5872static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 5873{
a7812ae4 5874 TCGv_i64 tmp;
36aa55dc
PB
5875 TCGv tmpl;
5876 TCGv tmph;
5e3f878a
PB
5877
5878 /* Load 64-bit value rd:rn. */
36aa55dc
PB
5879 tmpl = load_reg(s, rlow);
5880 tmph = load_reg(s, rhigh);
a7812ae4 5881 tmp = tcg_temp_new_i64();
36aa55dc
PB
5882 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5883 dead_tmp(tmpl);
5884 dead_tmp(tmph);
5e3f878a 5885 tcg_gen_add_i64(val, val, tmp);
b75263d6 5886 tcg_temp_free_i64(tmp);
5e3f878a
PB
5887}
5888
5889/* Set N and Z flags from a 64-bit value. */
a7812ae4 5890static void gen_logicq_cc(TCGv_i64 val)
5e3f878a
PB
5891{
5892 TCGv tmp = new_tmp();
5893 gen_helper_logicq_cc(tmp, val);
6fbe23d5
PB
5894 gen_logic_CC(tmp);
5895 dead_tmp(tmp);
5e3f878a
PB
5896}
5897
426f5abc
PB
5898/* Load/Store exclusive instructions are implemented by remembering
5899 the value/address loaded, and seeing if these are the same
5900 when the store is performed. This should be is sufficient to implement
5901 the architecturally mandated semantics, and avoids having to monitor
5902 regular stores.
5903
5904 In system emulation mode only one CPU will be running at once, so
5905 this sequence is effectively atomic. In user emulation mode we
5906 throw an exception and handle the atomic operation elsewhere. */
5907static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
5908 TCGv addr, int size)
5909{
5910 TCGv tmp;
5911
5912 switch (size) {
5913 case 0:
5914 tmp = gen_ld8u(addr, IS_USER(s));
5915 break;
5916 case 1:
5917 tmp = gen_ld16u(addr, IS_USER(s));
5918 break;
5919 case 2:
5920 case 3:
5921 tmp = gen_ld32(addr, IS_USER(s));
5922 break;
5923 default:
5924 abort();
5925 }
5926 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
5927 store_reg(s, rt, tmp);
5928 if (size == 3) {
2c9adbda
PM
5929 TCGv tmp2 = new_tmp();
5930 tcg_gen_addi_i32(tmp2, addr, 4);
5931 tmp = gen_ld32(tmp2, IS_USER(s));
5932 dead_tmp(tmp2);
426f5abc
PB
5933 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
5934 store_reg(s, rt2, tmp);
5935 }
5936 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
5937}
5938
5939static void gen_clrex(DisasContext *s)
5940{
5941 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
5942}
5943
5944#ifdef CONFIG_USER_ONLY
5945static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
5946 TCGv addr, int size)
5947{
5948 tcg_gen_mov_i32(cpu_exclusive_test, addr);
5949 tcg_gen_movi_i32(cpu_exclusive_info,
5950 size | (rd << 4) | (rt << 8) | (rt2 << 12));
5951 gen_set_condexec(s);
5952 gen_set_pc_im(s->pc - 4);
5953 gen_exception(EXCP_STREX);
5954 s->is_jmp = DISAS_JUMP;
5955}
5956#else
5957static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
5958 TCGv addr, int size)
5959{
5960 TCGv tmp;
5961 int done_label;
5962 int fail_label;
5963
5964 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
5965 [addr] = {Rt};
5966 {Rd} = 0;
5967 } else {
5968 {Rd} = 1;
5969 } */
5970 fail_label = gen_new_label();
5971 done_label = gen_new_label();
5972 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
5973 switch (size) {
5974 case 0:
5975 tmp = gen_ld8u(addr, IS_USER(s));
5976 break;
5977 case 1:
5978 tmp = gen_ld16u(addr, IS_USER(s));
5979 break;
5980 case 2:
5981 case 3:
5982 tmp = gen_ld32(addr, IS_USER(s));
5983 break;
5984 default:
5985 abort();
5986 }
5987 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
5988 dead_tmp(tmp);
5989 if (size == 3) {
5990 TCGv tmp2 = new_tmp();
5991 tcg_gen_addi_i32(tmp2, addr, 4);
2c9adbda 5992 tmp = gen_ld32(tmp2, IS_USER(s));
426f5abc
PB
5993 dead_tmp(tmp2);
5994 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
5995 dead_tmp(tmp);
5996 }
5997 tmp = load_reg(s, rt);
5998 switch (size) {
5999 case 0:
6000 gen_st8(tmp, addr, IS_USER(s));
6001 break;
6002 case 1:
6003 gen_st16(tmp, addr, IS_USER(s));
6004 break;
6005 case 2:
6006 case 3:
6007 gen_st32(tmp, addr, IS_USER(s));
6008 break;
6009 default:
6010 abort();
6011 }
6012 if (size == 3) {
6013 tcg_gen_addi_i32(addr, addr, 4);
6014 tmp = load_reg(s, rt2);
6015 gen_st32(tmp, addr, IS_USER(s));
6016 }
6017 tcg_gen_movi_i32(cpu_R[rd], 0);
6018 tcg_gen_br(done_label);
6019 gen_set_label(fail_label);
6020 tcg_gen_movi_i32(cpu_R[rd], 1);
6021 gen_set_label(done_label);
6022 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6023}
6024#endif
6025
9ee6e8bb
PB
6026static void disas_arm_insn(CPUState * env, DisasContext *s)
6027{
6028 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 6029 TCGv tmp;
3670669c 6030 TCGv tmp2;
6ddbc6e4 6031 TCGv tmp3;
b0109805 6032 TCGv addr;
a7812ae4 6033 TCGv_i64 tmp64;
9ee6e8bb
PB
6034
6035 insn = ldl_code(s->pc);
6036 s->pc += 4;
6037
6038 /* M variants do not implement ARM mode. */
6039 if (IS_M(env))
6040 goto illegal_op;
6041 cond = insn >> 28;
6042 if (cond == 0xf){
6043 /* Unconditional instructions. */
6044 if (((insn >> 25) & 7) == 1) {
6045 /* NEON Data processing. */
6046 if (!arm_feature(env, ARM_FEATURE_NEON))
6047 goto illegal_op;
6048
6049 if (disas_neon_data_insn(env, s, insn))
6050 goto illegal_op;
6051 return;
6052 }
6053 if ((insn & 0x0f100000) == 0x04000000) {
6054 /* NEON load/store. */
6055 if (!arm_feature(env, ARM_FEATURE_NEON))
6056 goto illegal_op;
6057
6058 if (disas_neon_ls_insn(env, s, insn))
6059 goto illegal_op;
6060 return;
6061 }
6062 if ((insn & 0x0d70f000) == 0x0550f000)
6063 return; /* PLD */
6064 else if ((insn & 0x0ffffdff) == 0x01010000) {
6065 ARCH(6);
6066 /* setend */
6067 if (insn & (1 << 9)) {
6068 /* BE8 mode not implemented. */
6069 goto illegal_op;
6070 }
6071 return;
6072 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6073 switch ((insn >> 4) & 0xf) {
6074 case 1: /* clrex */
6075 ARCH(6K);
426f5abc 6076 gen_clrex(s);
9ee6e8bb
PB
6077 return;
6078 case 4: /* dsb */
6079 case 5: /* dmb */
6080 case 6: /* isb */
6081 ARCH(7);
6082 /* We don't emulate caches so these are a no-op. */
6083 return;
6084 default:
6085 goto illegal_op;
6086 }
6087 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6088 /* srs */
c67b6b71 6089 int32_t offset;
9ee6e8bb
PB
6090 if (IS_USER(s))
6091 goto illegal_op;
6092 ARCH(6);
6093 op1 = (insn & 0x1f);
6094 if (op1 == (env->uncached_cpsr & CPSR_M)) {
b0109805 6095 addr = load_reg(s, 13);
9ee6e8bb 6096 } else {
b0109805 6097 addr = new_tmp();
b75263d6
JR
6098 tmp = tcg_const_i32(op1);
6099 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6100 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6101 }
6102 i = (insn >> 23) & 3;
6103 switch (i) {
6104 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6105 case 1: offset = 0; break; /* IA */
6106 case 2: offset = -8; break; /* DB */
9ee6e8bb
PB
6107 case 3: offset = 4; break; /* IB */
6108 default: abort();
6109 }
6110 if (offset)
b0109805
PB
6111 tcg_gen_addi_i32(addr, addr, offset);
6112 tmp = load_reg(s, 14);
6113 gen_st32(tmp, addr, 0);
c67b6b71 6114 tmp = load_cpu_field(spsr);
b0109805
PB
6115 tcg_gen_addi_i32(addr, addr, 4);
6116 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
6117 if (insn & (1 << 21)) {
6118 /* Base writeback. */
6119 switch (i) {
6120 case 0: offset = -8; break;
c67b6b71
FN
6121 case 1: offset = 4; break;
6122 case 2: offset = -4; break;
9ee6e8bb
PB
6123 case 3: offset = 0; break;
6124 default: abort();
6125 }
6126 if (offset)
c67b6b71 6127 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb 6128 if (op1 == (env->uncached_cpsr & CPSR_M)) {
c67b6b71 6129 store_reg(s, 13, addr);
9ee6e8bb 6130 } else {
b75263d6
JR
6131 tmp = tcg_const_i32(op1);
6132 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6133 tcg_temp_free_i32(tmp);
c67b6b71 6134 dead_tmp(addr);
9ee6e8bb 6135 }
b0109805
PB
6136 } else {
6137 dead_tmp(addr);
9ee6e8bb 6138 }
a990f58f 6139 return;
ea825eee 6140 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 6141 /* rfe */
c67b6b71 6142 int32_t offset;
9ee6e8bb
PB
6143 if (IS_USER(s))
6144 goto illegal_op;
6145 ARCH(6);
6146 rn = (insn >> 16) & 0xf;
b0109805 6147 addr = load_reg(s, rn);
9ee6e8bb
PB
6148 i = (insn >> 23) & 3;
6149 switch (i) {
b0109805 6150 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6151 case 1: offset = 0; break; /* IA */
6152 case 2: offset = -8; break; /* DB */
b0109805 6153 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
6154 default: abort();
6155 }
6156 if (offset)
b0109805
PB
6157 tcg_gen_addi_i32(addr, addr, offset);
6158 /* Load PC into tmp and CPSR into tmp2. */
6159 tmp = gen_ld32(addr, 0);
6160 tcg_gen_addi_i32(addr, addr, 4);
6161 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
6162 if (insn & (1 << 21)) {
6163 /* Base writeback. */
6164 switch (i) {
b0109805 6165 case 0: offset = -8; break;
c67b6b71
FN
6166 case 1: offset = 4; break;
6167 case 2: offset = -4; break;
b0109805 6168 case 3: offset = 0; break;
9ee6e8bb
PB
6169 default: abort();
6170 }
6171 if (offset)
b0109805
PB
6172 tcg_gen_addi_i32(addr, addr, offset);
6173 store_reg(s, rn, addr);
6174 } else {
6175 dead_tmp(addr);
9ee6e8bb 6176 }
b0109805 6177 gen_rfe(s, tmp, tmp2);
c67b6b71 6178 return;
9ee6e8bb
PB
6179 } else if ((insn & 0x0e000000) == 0x0a000000) {
6180 /* branch link and change to thumb (blx <offset>) */
6181 int32_t offset;
6182
6183 val = (uint32_t)s->pc;
d9ba4830
PB
6184 tmp = new_tmp();
6185 tcg_gen_movi_i32(tmp, val);
6186 store_reg(s, 14, tmp);
9ee6e8bb
PB
6187 /* Sign-extend the 24-bit offset */
6188 offset = (((int32_t)insn) << 8) >> 8;
6189 /* offset * 4 + bit24 * 2 + (thumb bit) */
6190 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6191 /* pipeline offset */
6192 val += 4;
d9ba4830 6193 gen_bx_im(s, val);
9ee6e8bb
PB
6194 return;
6195 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6196 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6197 /* iWMMXt register transfer. */
6198 if (env->cp15.c15_cpar & (1 << 1))
6199 if (!disas_iwmmxt_insn(env, s, insn))
6200 return;
6201 }
6202 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6203 /* Coprocessor double register transfer. */
6204 } else if ((insn & 0x0f000010) == 0x0e000010) {
6205 /* Additional coprocessor register transfer. */
7997d92f 6206 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
6207 uint32_t mask;
6208 uint32_t val;
6209 /* cps (privileged) */
6210 if (IS_USER(s))
6211 return;
6212 mask = val = 0;
6213 if (insn & (1 << 19)) {
6214 if (insn & (1 << 8))
6215 mask |= CPSR_A;
6216 if (insn & (1 << 7))
6217 mask |= CPSR_I;
6218 if (insn & (1 << 6))
6219 mask |= CPSR_F;
6220 if (insn & (1 << 18))
6221 val |= mask;
6222 }
7997d92f 6223 if (insn & (1 << 17)) {
9ee6e8bb
PB
6224 mask |= CPSR_M;
6225 val |= (insn & 0x1f);
6226 }
6227 if (mask) {
2fbac54b 6228 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
6229 }
6230 return;
6231 }
6232 goto illegal_op;
6233 }
6234 if (cond != 0xe) {
6235 /* if not always execute, we generate a conditional jump to
6236 next instruction */
6237 s->condlabel = gen_new_label();
d9ba4830 6238 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
6239 s->condjmp = 1;
6240 }
6241 if ((insn & 0x0f900000) == 0x03000000) {
6242 if ((insn & (1 << 21)) == 0) {
6243 ARCH(6T2);
6244 rd = (insn >> 12) & 0xf;
6245 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6246 if ((insn & (1 << 22)) == 0) {
6247 /* MOVW */
5e3f878a
PB
6248 tmp = new_tmp();
6249 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
6250 } else {
6251 /* MOVT */
5e3f878a 6252 tmp = load_reg(s, rd);
86831435 6253 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 6254 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 6255 }
5e3f878a 6256 store_reg(s, rd, tmp);
9ee6e8bb
PB
6257 } else {
6258 if (((insn >> 12) & 0xf) != 0xf)
6259 goto illegal_op;
6260 if (((insn >> 16) & 0xf) == 0) {
6261 gen_nop_hint(s, insn & 0xff);
6262 } else {
6263 /* CPSR = immediate */
6264 val = insn & 0xff;
6265 shift = ((insn >> 8) & 0xf) * 2;
6266 if (shift)
6267 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 6268 i = ((insn & (1 << 22)) != 0);
2fbac54b 6269 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
6270 goto illegal_op;
6271 }
6272 }
6273 } else if ((insn & 0x0f900000) == 0x01000000
6274 && (insn & 0x00000090) != 0x00000090) {
6275 /* miscellaneous instructions */
6276 op1 = (insn >> 21) & 3;
6277 sh = (insn >> 4) & 0xf;
6278 rm = insn & 0xf;
6279 switch (sh) {
6280 case 0x0: /* move program status register */
6281 if (op1 & 1) {
6282 /* PSR = reg */
2fbac54b 6283 tmp = load_reg(s, rm);
9ee6e8bb 6284 i = ((op1 & 2) != 0);
2fbac54b 6285 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
6286 goto illegal_op;
6287 } else {
6288 /* reg = PSR */
6289 rd = (insn >> 12) & 0xf;
6290 if (op1 & 2) {
6291 if (IS_USER(s))
6292 goto illegal_op;
d9ba4830 6293 tmp = load_cpu_field(spsr);
9ee6e8bb 6294 } else {
d9ba4830
PB
6295 tmp = new_tmp();
6296 gen_helper_cpsr_read(tmp);
9ee6e8bb 6297 }
d9ba4830 6298 store_reg(s, rd, tmp);
9ee6e8bb
PB
6299 }
6300 break;
6301 case 0x1:
6302 if (op1 == 1) {
6303 /* branch/exchange thumb (bx). */
d9ba4830
PB
6304 tmp = load_reg(s, rm);
6305 gen_bx(s, tmp);
9ee6e8bb
PB
6306 } else if (op1 == 3) {
6307 /* clz */
6308 rd = (insn >> 12) & 0xf;
1497c961
PB
6309 tmp = load_reg(s, rm);
6310 gen_helper_clz(tmp, tmp);
6311 store_reg(s, rd, tmp);
9ee6e8bb
PB
6312 } else {
6313 goto illegal_op;
6314 }
6315 break;
6316 case 0x2:
6317 if (op1 == 1) {
6318 ARCH(5J); /* bxj */
6319 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6320 tmp = load_reg(s, rm);
6321 gen_bx(s, tmp);
9ee6e8bb
PB
6322 } else {
6323 goto illegal_op;
6324 }
6325 break;
6326 case 0x3:
6327 if (op1 != 1)
6328 goto illegal_op;
6329
6330 /* branch link/exchange thumb (blx) */
d9ba4830
PB
6331 tmp = load_reg(s, rm);
6332 tmp2 = new_tmp();
6333 tcg_gen_movi_i32(tmp2, s->pc);
6334 store_reg(s, 14, tmp2);
6335 gen_bx(s, tmp);
9ee6e8bb
PB
6336 break;
6337 case 0x5: /* saturating add/subtract */
6338 rd = (insn >> 12) & 0xf;
6339 rn = (insn >> 16) & 0xf;
b40d0353 6340 tmp = load_reg(s, rm);
5e3f878a 6341 tmp2 = load_reg(s, rn);
9ee6e8bb 6342 if (op1 & 2)
5e3f878a 6343 gen_helper_double_saturate(tmp2, tmp2);
9ee6e8bb 6344 if (op1 & 1)
5e3f878a 6345 gen_helper_sub_saturate(tmp, tmp, tmp2);
9ee6e8bb 6346 else
5e3f878a
PB
6347 gen_helper_add_saturate(tmp, tmp, tmp2);
6348 dead_tmp(tmp2);
6349 store_reg(s, rd, tmp);
9ee6e8bb 6350 break;
49e14940
AL
6351 case 7:
6352 /* SMC instruction (op1 == 3)
6353 and undefined instructions (op1 == 0 || op1 == 2)
6354 will trap */
6355 if (op1 != 1) {
6356 goto illegal_op;
6357 }
6358 /* bkpt */
9ee6e8bb 6359 gen_set_condexec(s);
5e3f878a 6360 gen_set_pc_im(s->pc - 4);
d9ba4830 6361 gen_exception(EXCP_BKPT);
9ee6e8bb
PB
6362 s->is_jmp = DISAS_JUMP;
6363 break;
6364 case 0x8: /* signed multiply */
6365 case 0xa:
6366 case 0xc:
6367 case 0xe:
6368 rs = (insn >> 8) & 0xf;
6369 rn = (insn >> 12) & 0xf;
6370 rd = (insn >> 16) & 0xf;
6371 if (op1 == 1) {
6372 /* (32 * 16) >> 16 */
5e3f878a
PB
6373 tmp = load_reg(s, rm);
6374 tmp2 = load_reg(s, rs);
9ee6e8bb 6375 if (sh & 4)
5e3f878a 6376 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 6377 else
5e3f878a 6378 gen_sxth(tmp2);
a7812ae4
PB
6379 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6380 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 6381 tmp = new_tmp();
a7812ae4 6382 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 6383 tcg_temp_free_i64(tmp64);
9ee6e8bb 6384 if ((sh & 2) == 0) {
5e3f878a
PB
6385 tmp2 = load_reg(s, rn);
6386 gen_helper_add_setq(tmp, tmp, tmp2);
6387 dead_tmp(tmp2);
9ee6e8bb 6388 }
5e3f878a 6389 store_reg(s, rd, tmp);
9ee6e8bb
PB
6390 } else {
6391 /* 16 * 16 */
5e3f878a
PB
6392 tmp = load_reg(s, rm);
6393 tmp2 = load_reg(s, rs);
6394 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6395 dead_tmp(tmp2);
9ee6e8bb 6396 if (op1 == 2) {
a7812ae4
PB
6397 tmp64 = tcg_temp_new_i64();
6398 tcg_gen_ext_i32_i64(tmp64, tmp);
22478e79 6399 dead_tmp(tmp);
a7812ae4
PB
6400 gen_addq(s, tmp64, rn, rd);
6401 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 6402 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
6403 } else {
6404 if (op1 == 0) {
5e3f878a
PB
6405 tmp2 = load_reg(s, rn);
6406 gen_helper_add_setq(tmp, tmp, tmp2);
6407 dead_tmp(tmp2);
9ee6e8bb 6408 }
5e3f878a 6409 store_reg(s, rd, tmp);
9ee6e8bb
PB
6410 }
6411 }
6412 break;
6413 default:
6414 goto illegal_op;
6415 }
6416 } else if (((insn & 0x0e000000) == 0 &&
6417 (insn & 0x00000090) != 0x90) ||
6418 ((insn & 0x0e000000) == (1 << 25))) {
6419 int set_cc, logic_cc, shiftop;
6420
6421 op1 = (insn >> 21) & 0xf;
6422 set_cc = (insn >> 20) & 1;
6423 logic_cc = table_logic_cc[op1] & set_cc;
6424
6425 /* data processing instruction */
6426 if (insn & (1 << 25)) {
6427 /* immediate operand */
6428 val = insn & 0xff;
6429 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 6430 if (shift) {
9ee6e8bb 6431 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9
JR
6432 }
6433 tmp2 = new_tmp();
6434 tcg_gen_movi_i32(tmp2, val);
6435 if (logic_cc && shift) {
6436 gen_set_CF_bit31(tmp2);
6437 }
9ee6e8bb
PB
6438 } else {
6439 /* register */
6440 rm = (insn) & 0xf;
e9bb4aa9 6441 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6442 shiftop = (insn >> 5) & 3;
6443 if (!(insn & (1 << 4))) {
6444 shift = (insn >> 7) & 0x1f;
e9bb4aa9 6445 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
6446 } else {
6447 rs = (insn >> 8) & 0xf;
8984bd2e 6448 tmp = load_reg(s, rs);
e9bb4aa9 6449 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
6450 }
6451 }
6452 if (op1 != 0x0f && op1 != 0x0d) {
6453 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
6454 tmp = load_reg(s, rn);
6455 } else {
6456 TCGV_UNUSED(tmp);
9ee6e8bb
PB
6457 }
6458 rd = (insn >> 12) & 0xf;
6459 switch(op1) {
6460 case 0x00:
e9bb4aa9
JR
6461 tcg_gen_and_i32(tmp, tmp, tmp2);
6462 if (logic_cc) {
6463 gen_logic_CC(tmp);
6464 }
21aeb343 6465 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6466 break;
6467 case 0x01:
e9bb4aa9
JR
6468 tcg_gen_xor_i32(tmp, tmp, tmp2);
6469 if (logic_cc) {
6470 gen_logic_CC(tmp);
6471 }
21aeb343 6472 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6473 break;
6474 case 0x02:
6475 if (set_cc && rd == 15) {
6476 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 6477 if (IS_USER(s)) {
9ee6e8bb 6478 goto illegal_op;
e9bb4aa9
JR
6479 }
6480 gen_helper_sub_cc(tmp, tmp, tmp2);
6481 gen_exception_return(s, tmp);
9ee6e8bb 6482 } else {
e9bb4aa9
JR
6483 if (set_cc) {
6484 gen_helper_sub_cc(tmp, tmp, tmp2);
6485 } else {
6486 tcg_gen_sub_i32(tmp, tmp, tmp2);
6487 }
21aeb343 6488 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6489 }
6490 break;
6491 case 0x03:
e9bb4aa9
JR
6492 if (set_cc) {
6493 gen_helper_sub_cc(tmp, tmp2, tmp);
6494 } else {
6495 tcg_gen_sub_i32(tmp, tmp2, tmp);
6496 }
21aeb343 6497 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6498 break;
6499 case 0x04:
e9bb4aa9
JR
6500 if (set_cc) {
6501 gen_helper_add_cc(tmp, tmp, tmp2);
6502 } else {
6503 tcg_gen_add_i32(tmp, tmp, tmp2);
6504 }
21aeb343 6505 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6506 break;
6507 case 0x05:
e9bb4aa9
JR
6508 if (set_cc) {
6509 gen_helper_adc_cc(tmp, tmp, tmp2);
6510 } else {
6511 gen_add_carry(tmp, tmp, tmp2);
6512 }
21aeb343 6513 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6514 break;
6515 case 0x06:
e9bb4aa9
JR
6516 if (set_cc) {
6517 gen_helper_sbc_cc(tmp, tmp, tmp2);
6518 } else {
6519 gen_sub_carry(tmp, tmp, tmp2);
6520 }
21aeb343 6521 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6522 break;
6523 case 0x07:
e9bb4aa9
JR
6524 if (set_cc) {
6525 gen_helper_sbc_cc(tmp, tmp2, tmp);
6526 } else {
6527 gen_sub_carry(tmp, tmp2, tmp);
6528 }
21aeb343 6529 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6530 break;
6531 case 0x08:
6532 if (set_cc) {
e9bb4aa9
JR
6533 tcg_gen_and_i32(tmp, tmp, tmp2);
6534 gen_logic_CC(tmp);
9ee6e8bb 6535 }
e9bb4aa9 6536 dead_tmp(tmp);
9ee6e8bb
PB
6537 break;
6538 case 0x09:
6539 if (set_cc) {
e9bb4aa9
JR
6540 tcg_gen_xor_i32(tmp, tmp, tmp2);
6541 gen_logic_CC(tmp);
9ee6e8bb 6542 }
e9bb4aa9 6543 dead_tmp(tmp);
9ee6e8bb
PB
6544 break;
6545 case 0x0a:
6546 if (set_cc) {
e9bb4aa9 6547 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb 6548 }
e9bb4aa9 6549 dead_tmp(tmp);
9ee6e8bb
PB
6550 break;
6551 case 0x0b:
6552 if (set_cc) {
e9bb4aa9 6553 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 6554 }
e9bb4aa9 6555 dead_tmp(tmp);
9ee6e8bb
PB
6556 break;
6557 case 0x0c:
e9bb4aa9
JR
6558 tcg_gen_or_i32(tmp, tmp, tmp2);
6559 if (logic_cc) {
6560 gen_logic_CC(tmp);
6561 }
21aeb343 6562 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6563 break;
6564 case 0x0d:
6565 if (logic_cc && rd == 15) {
6566 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 6567 if (IS_USER(s)) {
9ee6e8bb 6568 goto illegal_op;
e9bb4aa9
JR
6569 }
6570 gen_exception_return(s, tmp2);
9ee6e8bb 6571 } else {
e9bb4aa9
JR
6572 if (logic_cc) {
6573 gen_logic_CC(tmp2);
6574 }
21aeb343 6575 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6576 }
6577 break;
6578 case 0x0e:
f669df27 6579 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
6580 if (logic_cc) {
6581 gen_logic_CC(tmp);
6582 }
21aeb343 6583 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6584 break;
6585 default:
6586 case 0x0f:
e9bb4aa9
JR
6587 tcg_gen_not_i32(tmp2, tmp2);
6588 if (logic_cc) {
6589 gen_logic_CC(tmp2);
6590 }
21aeb343 6591 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6592 break;
6593 }
e9bb4aa9
JR
6594 if (op1 != 0x0f && op1 != 0x0d) {
6595 dead_tmp(tmp2);
6596 }
9ee6e8bb
PB
6597 } else {
6598 /* other instructions */
6599 op1 = (insn >> 24) & 0xf;
6600 switch(op1) {
6601 case 0x0:
6602 case 0x1:
6603 /* multiplies, extra load/stores */
6604 sh = (insn >> 5) & 3;
6605 if (sh == 0) {
6606 if (op1 == 0x0) {
6607 rd = (insn >> 16) & 0xf;
6608 rn = (insn >> 12) & 0xf;
6609 rs = (insn >> 8) & 0xf;
6610 rm = (insn) & 0xf;
6611 op1 = (insn >> 20) & 0xf;
6612 switch (op1) {
6613 case 0: case 1: case 2: case 3: case 6:
6614 /* 32 bit mul */
5e3f878a
PB
6615 tmp = load_reg(s, rs);
6616 tmp2 = load_reg(s, rm);
6617 tcg_gen_mul_i32(tmp, tmp, tmp2);
6618 dead_tmp(tmp2);
9ee6e8bb
PB
6619 if (insn & (1 << 22)) {
6620 /* Subtract (mls) */
6621 ARCH(6T2);
5e3f878a
PB
6622 tmp2 = load_reg(s, rn);
6623 tcg_gen_sub_i32(tmp, tmp2, tmp);
6624 dead_tmp(tmp2);
9ee6e8bb
PB
6625 } else if (insn & (1 << 21)) {
6626 /* Add */
5e3f878a
PB
6627 tmp2 = load_reg(s, rn);
6628 tcg_gen_add_i32(tmp, tmp, tmp2);
6629 dead_tmp(tmp2);
9ee6e8bb
PB
6630 }
6631 if (insn & (1 << 20))
5e3f878a
PB
6632 gen_logic_CC(tmp);
6633 store_reg(s, rd, tmp);
9ee6e8bb
PB
6634 break;
6635 default:
6636 /* 64 bit mul */
5e3f878a
PB
6637 tmp = load_reg(s, rs);
6638 tmp2 = load_reg(s, rm);
9ee6e8bb 6639 if (insn & (1 << 22))
a7812ae4 6640 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6641 else
a7812ae4 6642 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
9ee6e8bb 6643 if (insn & (1 << 21)) /* mult accumulate */
a7812ae4 6644 gen_addq(s, tmp64, rn, rd);
9ee6e8bb
PB
6645 if (!(insn & (1 << 23))) { /* double accumulate */
6646 ARCH(6);
a7812ae4
PB
6647 gen_addq_lo(s, tmp64, rn);
6648 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
6649 }
6650 if (insn & (1 << 20))
a7812ae4
PB
6651 gen_logicq_cc(tmp64);
6652 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 6653 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
6654 break;
6655 }
6656 } else {
6657 rn = (insn >> 16) & 0xf;
6658 rd = (insn >> 12) & 0xf;
6659 if (insn & (1 << 23)) {
6660 /* load/store exclusive */
86753403
PB
6661 op1 = (insn >> 21) & 0x3;
6662 if (op1)
a47f43d2 6663 ARCH(6K);
86753403
PB
6664 else
6665 ARCH(6);
3174f8e9 6666 addr = tcg_temp_local_new_i32();
98a46317 6667 load_reg_var(s, addr, rn);
9ee6e8bb 6668 if (insn & (1 << 20)) {
86753403
PB
6669 switch (op1) {
6670 case 0: /* ldrex */
426f5abc 6671 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
6672 break;
6673 case 1: /* ldrexd */
426f5abc 6674 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
6675 break;
6676 case 2: /* ldrexb */
426f5abc 6677 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
6678 break;
6679 case 3: /* ldrexh */
426f5abc 6680 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
6681 break;
6682 default:
6683 abort();
6684 }
9ee6e8bb
PB
6685 } else {
6686 rm = insn & 0xf;
86753403
PB
6687 switch (op1) {
6688 case 0: /* strex */
426f5abc 6689 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
6690 break;
6691 case 1: /* strexd */
502e64fe 6692 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
6693 break;
6694 case 2: /* strexb */
426f5abc 6695 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
6696 break;
6697 case 3: /* strexh */
426f5abc 6698 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
6699 break;
6700 default:
6701 abort();
6702 }
9ee6e8bb 6703 }
3174f8e9 6704 tcg_temp_free(addr);
9ee6e8bb
PB
6705 } else {
6706 /* SWP instruction */
6707 rm = (insn) & 0xf;
6708
8984bd2e
PB
6709 /* ??? This is not really atomic. However we know
6710 we never have multiple CPUs running in parallel,
6711 so it is good enough. */
6712 addr = load_reg(s, rn);
6713 tmp = load_reg(s, rm);
9ee6e8bb 6714 if (insn & (1 << 22)) {
8984bd2e
PB
6715 tmp2 = gen_ld8u(addr, IS_USER(s));
6716 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 6717 } else {
8984bd2e
PB
6718 tmp2 = gen_ld32(addr, IS_USER(s));
6719 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 6720 }
8984bd2e
PB
6721 dead_tmp(addr);
6722 store_reg(s, rd, tmp2);
9ee6e8bb
PB
6723 }
6724 }
6725 } else {
6726 int address_offset;
6727 int load;
6728 /* Misc load/store */
6729 rn = (insn >> 16) & 0xf;
6730 rd = (insn >> 12) & 0xf;
b0109805 6731 addr = load_reg(s, rn);
9ee6e8bb 6732 if (insn & (1 << 24))
b0109805 6733 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
6734 address_offset = 0;
6735 if (insn & (1 << 20)) {
6736 /* load */
6737 switch(sh) {
6738 case 1:
b0109805 6739 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
6740 break;
6741 case 2:
b0109805 6742 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
6743 break;
6744 default:
6745 case 3:
b0109805 6746 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
6747 break;
6748 }
6749 load = 1;
6750 } else if (sh & 2) {
6751 /* doubleword */
6752 if (sh & 1) {
6753 /* store */
b0109805
PB
6754 tmp = load_reg(s, rd);
6755 gen_st32(tmp, addr, IS_USER(s));
6756 tcg_gen_addi_i32(addr, addr, 4);
6757 tmp = load_reg(s, rd + 1);
6758 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6759 load = 0;
6760 } else {
6761 /* load */
b0109805
PB
6762 tmp = gen_ld32(addr, IS_USER(s));
6763 store_reg(s, rd, tmp);
6764 tcg_gen_addi_i32(addr, addr, 4);
6765 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
6766 rd++;
6767 load = 1;
6768 }
6769 address_offset = -4;
6770 } else {
6771 /* store */
b0109805
PB
6772 tmp = load_reg(s, rd);
6773 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6774 load = 0;
6775 }
6776 /* Perform base writeback before the loaded value to
6777 ensure correct behavior with overlapping index registers.
6778 ldrd with base writeback is is undefined if the
6779 destination and index registers overlap. */
6780 if (!(insn & (1 << 24))) {
b0109805
PB
6781 gen_add_datah_offset(s, insn, address_offset, addr);
6782 store_reg(s, rn, addr);
9ee6e8bb
PB
6783 } else if (insn & (1 << 21)) {
6784 if (address_offset)
b0109805
PB
6785 tcg_gen_addi_i32(addr, addr, address_offset);
6786 store_reg(s, rn, addr);
6787 } else {
6788 dead_tmp(addr);
9ee6e8bb
PB
6789 }
6790 if (load) {
6791 /* Complete the load. */
b0109805 6792 store_reg(s, rd, tmp);
9ee6e8bb
PB
6793 }
6794 }
6795 break;
6796 case 0x4:
6797 case 0x5:
6798 goto do_ldst;
6799 case 0x6:
6800 case 0x7:
6801 if (insn & (1 << 4)) {
6802 ARCH(6);
6803 /* Armv6 Media instructions. */
6804 rm = insn & 0xf;
6805 rn = (insn >> 16) & 0xf;
2c0262af 6806 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
6807 rs = (insn >> 8) & 0xf;
6808 switch ((insn >> 23) & 3) {
6809 case 0: /* Parallel add/subtract. */
6810 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
6811 tmp = load_reg(s, rn);
6812 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6813 sh = (insn >> 5) & 7;
6814 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6815 goto illegal_op;
6ddbc6e4
PB
6816 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6817 dead_tmp(tmp2);
6818 store_reg(s, rd, tmp);
9ee6e8bb
PB
6819 break;
6820 case 1:
6821 if ((insn & 0x00700020) == 0) {
6c95676b 6822 /* Halfword pack. */
3670669c
PB
6823 tmp = load_reg(s, rn);
6824 tmp2 = load_reg(s, rm);
9ee6e8bb 6825 shift = (insn >> 7) & 0x1f;
3670669c
PB
6826 if (insn & (1 << 6)) {
6827 /* pkhtb */
22478e79
AZ
6828 if (shift == 0)
6829 shift = 31;
6830 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 6831 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 6832 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
6833 } else {
6834 /* pkhbt */
22478e79
AZ
6835 if (shift)
6836 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 6837 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
6838 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6839 }
6840 tcg_gen_or_i32(tmp, tmp, tmp2);
22478e79 6841 dead_tmp(tmp2);
3670669c 6842 store_reg(s, rd, tmp);
9ee6e8bb
PB
6843 } else if ((insn & 0x00200020) == 0x00200000) {
6844 /* [us]sat */
6ddbc6e4 6845 tmp = load_reg(s, rm);
9ee6e8bb
PB
6846 shift = (insn >> 7) & 0x1f;
6847 if (insn & (1 << 6)) {
6848 if (shift == 0)
6849 shift = 31;
6ddbc6e4 6850 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 6851 } else {
6ddbc6e4 6852 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
6853 }
6854 sh = (insn >> 16) & 0x1f;
6855 if (sh != 0) {
b75263d6 6856 tmp2 = tcg_const_i32(sh);
9ee6e8bb 6857 if (insn & (1 << 22))
b75263d6 6858 gen_helper_usat(tmp, tmp, tmp2);
9ee6e8bb 6859 else
b75263d6
JR
6860 gen_helper_ssat(tmp, tmp, tmp2);
6861 tcg_temp_free_i32(tmp2);
9ee6e8bb 6862 }
6ddbc6e4 6863 store_reg(s, rd, tmp);
9ee6e8bb
PB
6864 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6865 /* [us]sat16 */
6ddbc6e4 6866 tmp = load_reg(s, rm);
9ee6e8bb
PB
6867 sh = (insn >> 16) & 0x1f;
6868 if (sh != 0) {
b75263d6 6869 tmp2 = tcg_const_i32(sh);
9ee6e8bb 6870 if (insn & (1 << 22))
b75263d6 6871 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 6872 else
b75263d6
JR
6873 gen_helper_ssat16(tmp, tmp, tmp2);
6874 tcg_temp_free_i32(tmp2);
9ee6e8bb 6875 }
6ddbc6e4 6876 store_reg(s, rd, tmp);
9ee6e8bb
PB
6877 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6878 /* Select bytes. */
6ddbc6e4
PB
6879 tmp = load_reg(s, rn);
6880 tmp2 = load_reg(s, rm);
6881 tmp3 = new_tmp();
6882 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6883 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6884 dead_tmp(tmp3);
6885 dead_tmp(tmp2);
6886 store_reg(s, rd, tmp);
9ee6e8bb 6887 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 6888 tmp = load_reg(s, rm);
9ee6e8bb
PB
6889 shift = (insn >> 10) & 3;
6890 /* ??? In many cases it's not neccessary to do a
6891 rotate, a shift is sufficient. */
6892 if (shift != 0)
f669df27 6893 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
6894 op1 = (insn >> 20) & 7;
6895 switch (op1) {
5e3f878a
PB
6896 case 0: gen_sxtb16(tmp); break;
6897 case 2: gen_sxtb(tmp); break;
6898 case 3: gen_sxth(tmp); break;
6899 case 4: gen_uxtb16(tmp); break;
6900 case 6: gen_uxtb(tmp); break;
6901 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
6902 default: goto illegal_op;
6903 }
6904 if (rn != 15) {
5e3f878a 6905 tmp2 = load_reg(s, rn);
9ee6e8bb 6906 if ((op1 & 3) == 0) {
5e3f878a 6907 gen_add16(tmp, tmp2);
9ee6e8bb 6908 } else {
5e3f878a
PB
6909 tcg_gen_add_i32(tmp, tmp, tmp2);
6910 dead_tmp(tmp2);
9ee6e8bb
PB
6911 }
6912 }
6c95676b 6913 store_reg(s, rd, tmp);
9ee6e8bb
PB
6914 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6915 /* rev */
b0109805 6916 tmp = load_reg(s, rm);
9ee6e8bb
PB
6917 if (insn & (1 << 22)) {
6918 if (insn & (1 << 7)) {
b0109805 6919 gen_revsh(tmp);
9ee6e8bb
PB
6920 } else {
6921 ARCH(6T2);
b0109805 6922 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
6923 }
6924 } else {
6925 if (insn & (1 << 7))
b0109805 6926 gen_rev16(tmp);
9ee6e8bb 6927 else
66896cb8 6928 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 6929 }
b0109805 6930 store_reg(s, rd, tmp);
9ee6e8bb
PB
6931 } else {
6932 goto illegal_op;
6933 }
6934 break;
6935 case 2: /* Multiplies (Type 3). */
5e3f878a
PB
6936 tmp = load_reg(s, rm);
6937 tmp2 = load_reg(s, rs);
9ee6e8bb
PB
6938 if (insn & (1 << 20)) {
6939 /* Signed multiply most significant [accumulate]. */
a7812ae4 6940 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6941 if (insn & (1 << 5))
a7812ae4
PB
6942 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
6943 tcg_gen_shri_i64(tmp64, tmp64, 32);
5e3f878a 6944 tmp = new_tmp();
a7812ae4 6945 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 6946 tcg_temp_free_i64(tmp64);
955a7dd5
AZ
6947 if (rd != 15) {
6948 tmp2 = load_reg(s, rd);
9ee6e8bb 6949 if (insn & (1 << 6)) {
5e3f878a 6950 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 6951 } else {
5e3f878a 6952 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 6953 }
5e3f878a 6954 dead_tmp(tmp2);
9ee6e8bb 6955 }
955a7dd5 6956 store_reg(s, rn, tmp);
9ee6e8bb
PB
6957 } else {
6958 if (insn & (1 << 5))
5e3f878a
PB
6959 gen_swap_half(tmp2);
6960 gen_smul_dual(tmp, tmp2);
6961 /* This addition cannot overflow. */
6962 if (insn & (1 << 6)) {
6963 tcg_gen_sub_i32(tmp, tmp, tmp2);
6964 } else {
6965 tcg_gen_add_i32(tmp, tmp, tmp2);
6966 }
6967 dead_tmp(tmp2);
9ee6e8bb 6968 if (insn & (1 << 22)) {
5e3f878a 6969 /* smlald, smlsld */
a7812ae4
PB
6970 tmp64 = tcg_temp_new_i64();
6971 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 6972 dead_tmp(tmp);
a7812ae4
PB
6973 gen_addq(s, tmp64, rd, rn);
6974 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 6975 tcg_temp_free_i64(tmp64);
9ee6e8bb 6976 } else {
5e3f878a 6977 /* smuad, smusd, smlad, smlsd */
22478e79 6978 if (rd != 15)
9ee6e8bb 6979 {
22478e79 6980 tmp2 = load_reg(s, rd);
5e3f878a
PB
6981 gen_helper_add_setq(tmp, tmp, tmp2);
6982 dead_tmp(tmp2);
9ee6e8bb 6983 }
22478e79 6984 store_reg(s, rn, tmp);
9ee6e8bb
PB
6985 }
6986 }
6987 break;
6988 case 3:
6989 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
6990 switch (op1) {
6991 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
6992 ARCH(6);
6993 tmp = load_reg(s, rm);
6994 tmp2 = load_reg(s, rs);
6995 gen_helper_usad8(tmp, tmp, tmp2);
6996 dead_tmp(tmp2);
ded9d295
AZ
6997 if (rd != 15) {
6998 tmp2 = load_reg(s, rd);
6ddbc6e4
PB
6999 tcg_gen_add_i32(tmp, tmp, tmp2);
7000 dead_tmp(tmp2);
9ee6e8bb 7001 }
ded9d295 7002 store_reg(s, rn, tmp);
9ee6e8bb
PB
7003 break;
7004 case 0x20: case 0x24: case 0x28: case 0x2c:
7005 /* Bitfield insert/clear. */
7006 ARCH(6T2);
7007 shift = (insn >> 7) & 0x1f;
7008 i = (insn >> 16) & 0x1f;
7009 i = i + 1 - shift;
7010 if (rm == 15) {
5e3f878a
PB
7011 tmp = new_tmp();
7012 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7013 } else {
5e3f878a 7014 tmp = load_reg(s, rm);
9ee6e8bb
PB
7015 }
7016 if (i != 32) {
5e3f878a 7017 tmp2 = load_reg(s, rd);
8f8e3aa4 7018 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
5e3f878a 7019 dead_tmp(tmp2);
9ee6e8bb 7020 }
5e3f878a 7021 store_reg(s, rd, tmp);
9ee6e8bb
PB
7022 break;
7023 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7024 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 7025 ARCH(6T2);
5e3f878a 7026 tmp = load_reg(s, rm);
9ee6e8bb
PB
7027 shift = (insn >> 7) & 0x1f;
7028 i = ((insn >> 16) & 0x1f) + 1;
7029 if (shift + i > 32)
7030 goto illegal_op;
7031 if (i < 32) {
7032 if (op1 & 0x20) {
5e3f878a 7033 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 7034 } else {
5e3f878a 7035 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
7036 }
7037 }
5e3f878a 7038 store_reg(s, rd, tmp);
9ee6e8bb
PB
7039 break;
7040 default:
7041 goto illegal_op;
7042 }
7043 break;
7044 }
7045 break;
7046 }
7047 do_ldst:
7048 /* Check for undefined extension instructions
7049 * per the ARM Bible IE:
7050 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7051 */
7052 sh = (0xf << 20) | (0xf << 4);
7053 if (op1 == 0x7 && ((insn & sh) == sh))
7054 {
7055 goto illegal_op;
7056 }
7057 /* load/store byte/word */
7058 rn = (insn >> 16) & 0xf;
7059 rd = (insn >> 12) & 0xf;
b0109805 7060 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
7061 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7062 if (insn & (1 << 24))
b0109805 7063 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
7064 if (insn & (1 << 20)) {
7065 /* load */
9ee6e8bb 7066 if (insn & (1 << 22)) {
b0109805 7067 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 7068 } else {
b0109805 7069 tmp = gen_ld32(tmp2, i);
9ee6e8bb 7070 }
9ee6e8bb
PB
7071 } else {
7072 /* store */
b0109805 7073 tmp = load_reg(s, rd);
9ee6e8bb 7074 if (insn & (1 << 22))
b0109805 7075 gen_st8(tmp, tmp2, i);
9ee6e8bb 7076 else
b0109805 7077 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
7078 }
7079 if (!(insn & (1 << 24))) {
b0109805
PB
7080 gen_add_data_offset(s, insn, tmp2);
7081 store_reg(s, rn, tmp2);
7082 } else if (insn & (1 << 21)) {
7083 store_reg(s, rn, tmp2);
7084 } else {
7085 dead_tmp(tmp2);
9ee6e8bb
PB
7086 }
7087 if (insn & (1 << 20)) {
7088 /* Complete the load. */
7089 if (rd == 15)
b0109805 7090 gen_bx(s, tmp);
9ee6e8bb 7091 else
b0109805 7092 store_reg(s, rd, tmp);
9ee6e8bb
PB
7093 }
7094 break;
7095 case 0x08:
7096 case 0x09:
7097 {
7098 int j, n, user, loaded_base;
b0109805 7099 TCGv loaded_var;
9ee6e8bb
PB
7100 /* load/store multiple words */
7101 /* XXX: store correct base if write back */
7102 user = 0;
7103 if (insn & (1 << 22)) {
7104 if (IS_USER(s))
7105 goto illegal_op; /* only usable in supervisor mode */
7106
7107 if ((insn & (1 << 15)) == 0)
7108 user = 1;
7109 }
7110 rn = (insn >> 16) & 0xf;
b0109805 7111 addr = load_reg(s, rn);
9ee6e8bb
PB
7112
7113 /* compute total size */
7114 loaded_base = 0;
a50f5b91 7115 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
7116 n = 0;
7117 for(i=0;i<16;i++) {
7118 if (insn & (1 << i))
7119 n++;
7120 }
7121 /* XXX: test invalid n == 0 case ? */
7122 if (insn & (1 << 23)) {
7123 if (insn & (1 << 24)) {
7124 /* pre increment */
b0109805 7125 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7126 } else {
7127 /* post increment */
7128 }
7129 } else {
7130 if (insn & (1 << 24)) {
7131 /* pre decrement */
b0109805 7132 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7133 } else {
7134 /* post decrement */
7135 if (n != 1)
b0109805 7136 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7137 }
7138 }
7139 j = 0;
7140 for(i=0;i<16;i++) {
7141 if (insn & (1 << i)) {
7142 if (insn & (1 << 20)) {
7143 /* load */
b0109805 7144 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 7145 if (i == 15) {
b0109805 7146 gen_bx(s, tmp);
9ee6e8bb 7147 } else if (user) {
b75263d6
JR
7148 tmp2 = tcg_const_i32(i);
7149 gen_helper_set_user_reg(tmp2, tmp);
7150 tcg_temp_free_i32(tmp2);
b0109805 7151 dead_tmp(tmp);
9ee6e8bb 7152 } else if (i == rn) {
b0109805 7153 loaded_var = tmp;
9ee6e8bb
PB
7154 loaded_base = 1;
7155 } else {
b0109805 7156 store_reg(s, i, tmp);
9ee6e8bb
PB
7157 }
7158 } else {
7159 /* store */
7160 if (i == 15) {
7161 /* special case: r15 = PC + 8 */
7162 val = (long)s->pc + 4;
b0109805
PB
7163 tmp = new_tmp();
7164 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 7165 } else if (user) {
b0109805 7166 tmp = new_tmp();
b75263d6
JR
7167 tmp2 = tcg_const_i32(i);
7168 gen_helper_get_user_reg(tmp, tmp2);
7169 tcg_temp_free_i32(tmp2);
9ee6e8bb 7170 } else {
b0109805 7171 tmp = load_reg(s, i);
9ee6e8bb 7172 }
b0109805 7173 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7174 }
7175 j++;
7176 /* no need to add after the last transfer */
7177 if (j != n)
b0109805 7178 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7179 }
7180 }
7181 if (insn & (1 << 21)) {
7182 /* write back */
7183 if (insn & (1 << 23)) {
7184 if (insn & (1 << 24)) {
7185 /* pre increment */
7186 } else {
7187 /* post increment */
b0109805 7188 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7189 }
7190 } else {
7191 if (insn & (1 << 24)) {
7192 /* pre decrement */
7193 if (n != 1)
b0109805 7194 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7195 } else {
7196 /* post decrement */
b0109805 7197 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7198 }
7199 }
b0109805
PB
7200 store_reg(s, rn, addr);
7201 } else {
7202 dead_tmp(addr);
9ee6e8bb
PB
7203 }
7204 if (loaded_base) {
b0109805 7205 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
7206 }
7207 if ((insn & (1 << 22)) && !user) {
7208 /* Restore CPSR from SPSR. */
d9ba4830
PB
7209 tmp = load_cpu_field(spsr);
7210 gen_set_cpsr(tmp, 0xffffffff);
7211 dead_tmp(tmp);
9ee6e8bb
PB
7212 s->is_jmp = DISAS_UPDATE;
7213 }
7214 }
7215 break;
7216 case 0xa:
7217 case 0xb:
7218 {
7219 int32_t offset;
7220
7221 /* branch (and link) */
7222 val = (int32_t)s->pc;
7223 if (insn & (1 << 24)) {
5e3f878a
PB
7224 tmp = new_tmp();
7225 tcg_gen_movi_i32(tmp, val);
7226 store_reg(s, 14, tmp);
9ee6e8bb
PB
7227 }
7228 offset = (((int32_t)insn << 8) >> 8);
7229 val += (offset << 2) + 4;
7230 gen_jmp(s, val);
7231 }
7232 break;
7233 case 0xc:
7234 case 0xd:
7235 case 0xe:
7236 /* Coprocessor. */
7237 if (disas_coproc_insn(env, s, insn))
7238 goto illegal_op;
7239 break;
7240 case 0xf:
7241 /* swi */
5e3f878a 7242 gen_set_pc_im(s->pc);
9ee6e8bb
PB
7243 s->is_jmp = DISAS_SWI;
7244 break;
7245 default:
7246 illegal_op:
7247 gen_set_condexec(s);
5e3f878a 7248 gen_set_pc_im(s->pc - 4);
d9ba4830 7249 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
7250 s->is_jmp = DISAS_JUMP;
7251 break;
7252 }
7253 }
7254}
7255
7256/* Return true if this is a Thumb-2 logical op. */
7257static int
7258thumb2_logic_op(int op)
7259{
7260 return (op < 8);
7261}
7262
7263/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7264 then set condition code flags based on the result of the operation.
7265 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7266 to the high bit of T1.
7267 Returns zero if the opcode is valid. */
7268
7269static int
396e467c 7270gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
9ee6e8bb
PB
7271{
7272 int logic_cc;
7273
7274 logic_cc = 0;
7275 switch (op) {
7276 case 0: /* and */
396e467c 7277 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
7278 logic_cc = conds;
7279 break;
7280 case 1: /* bic */
f669df27 7281 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
7282 logic_cc = conds;
7283 break;
7284 case 2: /* orr */
396e467c 7285 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7286 logic_cc = conds;
7287 break;
7288 case 3: /* orn */
396e467c
FN
7289 tcg_gen_not_i32(t1, t1);
7290 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7291 logic_cc = conds;
7292 break;
7293 case 4: /* eor */
396e467c 7294 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
7295 logic_cc = conds;
7296 break;
7297 case 8: /* add */
7298 if (conds)
396e467c 7299 gen_helper_add_cc(t0, t0, t1);
9ee6e8bb 7300 else
396e467c 7301 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
7302 break;
7303 case 10: /* adc */
7304 if (conds)
396e467c 7305 gen_helper_adc_cc(t0, t0, t1);
9ee6e8bb 7306 else
396e467c 7307 gen_adc(t0, t1);
9ee6e8bb
PB
7308 break;
7309 case 11: /* sbc */
7310 if (conds)
396e467c 7311 gen_helper_sbc_cc(t0, t0, t1);
9ee6e8bb 7312 else
396e467c 7313 gen_sub_carry(t0, t0, t1);
9ee6e8bb
PB
7314 break;
7315 case 13: /* sub */
7316 if (conds)
396e467c 7317 gen_helper_sub_cc(t0, t0, t1);
9ee6e8bb 7318 else
396e467c 7319 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
7320 break;
7321 case 14: /* rsb */
7322 if (conds)
396e467c 7323 gen_helper_sub_cc(t0, t1, t0);
9ee6e8bb 7324 else
396e467c 7325 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
7326 break;
7327 default: /* 5, 6, 7, 9, 12, 15. */
7328 return 1;
7329 }
7330 if (logic_cc) {
396e467c 7331 gen_logic_CC(t0);
9ee6e8bb 7332 if (shifter_out)
396e467c 7333 gen_set_CF_bit31(t1);
9ee6e8bb
PB
7334 }
7335 return 0;
7336}
7337
7338/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7339 is not legal. */
7340static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7341{
b0109805 7342 uint32_t insn, imm, shift, offset;
9ee6e8bb 7343 uint32_t rd, rn, rm, rs;
b26eefb6 7344 TCGv tmp;
6ddbc6e4
PB
7345 TCGv tmp2;
7346 TCGv tmp3;
b0109805 7347 TCGv addr;
a7812ae4 7348 TCGv_i64 tmp64;
9ee6e8bb
PB
7349 int op;
7350 int shiftop;
7351 int conds;
7352 int logic_cc;
7353
7354 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7355 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 7356 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
7357 16-bit instructions to get correct prefetch abort behavior. */
7358 insn = insn_hw1;
7359 if ((insn & (1 << 12)) == 0) {
7360 /* Second half of blx. */
7361 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
7362 tmp = load_reg(s, 14);
7363 tcg_gen_addi_i32(tmp, tmp, offset);
7364 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 7365
d9ba4830 7366 tmp2 = new_tmp();
b0109805 7367 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7368 store_reg(s, 14, tmp2);
7369 gen_bx(s, tmp);
9ee6e8bb
PB
7370 return 0;
7371 }
7372 if (insn & (1 << 11)) {
7373 /* Second half of bl. */
7374 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 7375 tmp = load_reg(s, 14);
6a0d8a1d 7376 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 7377
d9ba4830 7378 tmp2 = new_tmp();
b0109805 7379 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7380 store_reg(s, 14, tmp2);
7381 gen_bx(s, tmp);
9ee6e8bb
PB
7382 return 0;
7383 }
7384 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7385 /* Instruction spans a page boundary. Implement it as two
7386 16-bit instructions in case the second half causes an
7387 prefetch abort. */
7388 offset = ((int32_t)insn << 21) >> 9;
396e467c 7389 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
7390 return 0;
7391 }
7392 /* Fall through to 32-bit decode. */
7393 }
7394
7395 insn = lduw_code(s->pc);
7396 s->pc += 2;
7397 insn |= (uint32_t)insn_hw1 << 16;
7398
7399 if ((insn & 0xf800e800) != 0xf000e800) {
7400 ARCH(6T2);
7401 }
7402
7403 rn = (insn >> 16) & 0xf;
7404 rs = (insn >> 12) & 0xf;
7405 rd = (insn >> 8) & 0xf;
7406 rm = insn & 0xf;
7407 switch ((insn >> 25) & 0xf) {
7408 case 0: case 1: case 2: case 3:
7409 /* 16-bit instructions. Should never happen. */
7410 abort();
7411 case 4:
7412 if (insn & (1 << 22)) {
7413 /* Other load/store, table branch. */
7414 if (insn & 0x01200000) {
7415 /* Load/store doubleword. */
7416 if (rn == 15) {
b0109805
PB
7417 addr = new_tmp();
7418 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 7419 } else {
b0109805 7420 addr = load_reg(s, rn);
9ee6e8bb
PB
7421 }
7422 offset = (insn & 0xff) * 4;
7423 if ((insn & (1 << 23)) == 0)
7424 offset = -offset;
7425 if (insn & (1 << 24)) {
b0109805 7426 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
7427 offset = 0;
7428 }
7429 if (insn & (1 << 20)) {
7430 /* ldrd */
b0109805
PB
7431 tmp = gen_ld32(addr, IS_USER(s));
7432 store_reg(s, rs, tmp);
7433 tcg_gen_addi_i32(addr, addr, 4);
7434 tmp = gen_ld32(addr, IS_USER(s));
7435 store_reg(s, rd, tmp);
9ee6e8bb
PB
7436 } else {
7437 /* strd */
b0109805
PB
7438 tmp = load_reg(s, rs);
7439 gen_st32(tmp, addr, IS_USER(s));
7440 tcg_gen_addi_i32(addr, addr, 4);
7441 tmp = load_reg(s, rd);
7442 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7443 }
7444 if (insn & (1 << 21)) {
7445 /* Base writeback. */
7446 if (rn == 15)
7447 goto illegal_op;
b0109805
PB
7448 tcg_gen_addi_i32(addr, addr, offset - 4);
7449 store_reg(s, rn, addr);
7450 } else {
7451 dead_tmp(addr);
9ee6e8bb
PB
7452 }
7453 } else if ((insn & (1 << 23)) == 0) {
7454 /* Load/store exclusive word. */
3174f8e9 7455 addr = tcg_temp_local_new();
98a46317 7456 load_reg_var(s, addr, rn);
426f5abc 7457 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 7458 if (insn & (1 << 20)) {
426f5abc 7459 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 7460 } else {
426f5abc 7461 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 7462 }
3174f8e9 7463 tcg_temp_free(addr);
9ee6e8bb
PB
7464 } else if ((insn & (1 << 6)) == 0) {
7465 /* Table Branch. */
7466 if (rn == 15) {
b0109805
PB
7467 addr = new_tmp();
7468 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 7469 } else {
b0109805 7470 addr = load_reg(s, rn);
9ee6e8bb 7471 }
b26eefb6 7472 tmp = load_reg(s, rm);
b0109805 7473 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
7474 if (insn & (1 << 4)) {
7475 /* tbh */
b0109805 7476 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 7477 dead_tmp(tmp);
b0109805 7478 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 7479 } else { /* tbb */
b26eefb6 7480 dead_tmp(tmp);
b0109805 7481 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7482 }
b0109805
PB
7483 dead_tmp(addr);
7484 tcg_gen_shli_i32(tmp, tmp, 1);
7485 tcg_gen_addi_i32(tmp, tmp, s->pc);
7486 store_reg(s, 15, tmp);
9ee6e8bb
PB
7487 } else {
7488 /* Load/store exclusive byte/halfword/doubleword. */
426f5abc 7489 ARCH(7);
9ee6e8bb 7490 op = (insn >> 4) & 0x3;
426f5abc
PB
7491 if (op == 2) {
7492 goto illegal_op;
7493 }
3174f8e9 7494 addr = tcg_temp_local_new();
98a46317 7495 load_reg_var(s, addr, rn);
9ee6e8bb 7496 if (insn & (1 << 20)) {
426f5abc 7497 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 7498 } else {
426f5abc 7499 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 7500 }
3174f8e9 7501 tcg_temp_free(addr);
9ee6e8bb
PB
7502 }
7503 } else {
7504 /* Load/store multiple, RFE, SRS. */
7505 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7506 /* Not available in user mode. */
b0109805 7507 if (IS_USER(s))
9ee6e8bb
PB
7508 goto illegal_op;
7509 if (insn & (1 << 20)) {
7510 /* rfe */
b0109805
PB
7511 addr = load_reg(s, rn);
7512 if ((insn & (1 << 24)) == 0)
7513 tcg_gen_addi_i32(addr, addr, -8);
7514 /* Load PC into tmp and CPSR into tmp2. */
7515 tmp = gen_ld32(addr, 0);
7516 tcg_gen_addi_i32(addr, addr, 4);
7517 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
7518 if (insn & (1 << 21)) {
7519 /* Base writeback. */
b0109805
PB
7520 if (insn & (1 << 24)) {
7521 tcg_gen_addi_i32(addr, addr, 4);
7522 } else {
7523 tcg_gen_addi_i32(addr, addr, -4);
7524 }
7525 store_reg(s, rn, addr);
7526 } else {
7527 dead_tmp(addr);
9ee6e8bb 7528 }
b0109805 7529 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
7530 } else {
7531 /* srs */
7532 op = (insn & 0x1f);
7533 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7534 addr = load_reg(s, 13);
9ee6e8bb 7535 } else {
b0109805 7536 addr = new_tmp();
b75263d6
JR
7537 tmp = tcg_const_i32(op);
7538 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7539 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7540 }
7541 if ((insn & (1 << 24)) == 0) {
b0109805 7542 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 7543 }
b0109805
PB
7544 tmp = load_reg(s, 14);
7545 gen_st32(tmp, addr, 0);
7546 tcg_gen_addi_i32(addr, addr, 4);
7547 tmp = new_tmp();
7548 gen_helper_cpsr_read(tmp);
7549 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
7550 if (insn & (1 << 21)) {
7551 if ((insn & (1 << 24)) == 0) {
b0109805 7552 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 7553 } else {
b0109805 7554 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7555 }
7556 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7557 store_reg(s, 13, addr);
9ee6e8bb 7558 } else {
b75263d6
JR
7559 tmp = tcg_const_i32(op);
7560 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7561 tcg_temp_free_i32(tmp);
9ee6e8bb 7562 }
b0109805
PB
7563 } else {
7564 dead_tmp(addr);
9ee6e8bb
PB
7565 }
7566 }
7567 } else {
7568 int i;
7569 /* Load/store multiple. */
b0109805 7570 addr = load_reg(s, rn);
9ee6e8bb
PB
7571 offset = 0;
7572 for (i = 0; i < 16; i++) {
7573 if (insn & (1 << i))
7574 offset += 4;
7575 }
7576 if (insn & (1 << 24)) {
b0109805 7577 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7578 }
7579
7580 for (i = 0; i < 16; i++) {
7581 if ((insn & (1 << i)) == 0)
7582 continue;
7583 if (insn & (1 << 20)) {
7584 /* Load. */
b0109805 7585 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 7586 if (i == 15) {
b0109805 7587 gen_bx(s, tmp);
9ee6e8bb 7588 } else {
b0109805 7589 store_reg(s, i, tmp);
9ee6e8bb
PB
7590 }
7591 } else {
7592 /* Store. */
b0109805
PB
7593 tmp = load_reg(s, i);
7594 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7595 }
b0109805 7596 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7597 }
7598 if (insn & (1 << 21)) {
7599 /* Base register writeback. */
7600 if (insn & (1 << 24)) {
b0109805 7601 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7602 }
7603 /* Fault if writeback register is in register list. */
7604 if (insn & (1 << rn))
7605 goto illegal_op;
b0109805
PB
7606 store_reg(s, rn, addr);
7607 } else {
7608 dead_tmp(addr);
9ee6e8bb
PB
7609 }
7610 }
7611 }
7612 break;
2af9ab77
JB
7613 case 5:
7614
9ee6e8bb 7615 op = (insn >> 21) & 0xf;
2af9ab77
JB
7616 if (op == 6) {
7617 /* Halfword pack. */
7618 tmp = load_reg(s, rn);
7619 tmp2 = load_reg(s, rm);
7620 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
7621 if (insn & (1 << 5)) {
7622 /* pkhtb */
7623 if (shift == 0)
7624 shift = 31;
7625 tcg_gen_sari_i32(tmp2, tmp2, shift);
7626 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
7627 tcg_gen_ext16u_i32(tmp2, tmp2);
7628 } else {
7629 /* pkhbt */
7630 if (shift)
7631 tcg_gen_shli_i32(tmp2, tmp2, shift);
7632 tcg_gen_ext16u_i32(tmp, tmp);
7633 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7634 }
7635 tcg_gen_or_i32(tmp, tmp, tmp2);
7636 dead_tmp(tmp2);
3174f8e9
FN
7637 store_reg(s, rd, tmp);
7638 } else {
2af9ab77
JB
7639 /* Data processing register constant shift. */
7640 if (rn == 15) {
7641 tmp = new_tmp();
7642 tcg_gen_movi_i32(tmp, 0);
7643 } else {
7644 tmp = load_reg(s, rn);
7645 }
7646 tmp2 = load_reg(s, rm);
7647
7648 shiftop = (insn >> 4) & 3;
7649 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7650 conds = (insn & (1 << 20)) != 0;
7651 logic_cc = (conds && thumb2_logic_op(op));
7652 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7653 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
7654 goto illegal_op;
7655 dead_tmp(tmp2);
7656 if (rd != 15) {
7657 store_reg(s, rd, tmp);
7658 } else {
7659 dead_tmp(tmp);
7660 }
3174f8e9 7661 }
9ee6e8bb
PB
7662 break;
7663 case 13: /* Misc data processing. */
7664 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7665 if (op < 4 && (insn & 0xf000) != 0xf000)
7666 goto illegal_op;
7667 switch (op) {
7668 case 0: /* Register controlled shift. */
8984bd2e
PB
7669 tmp = load_reg(s, rn);
7670 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7671 if ((insn & 0x70) != 0)
7672 goto illegal_op;
7673 op = (insn >> 21) & 3;
8984bd2e
PB
7674 logic_cc = (insn & (1 << 20)) != 0;
7675 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7676 if (logic_cc)
7677 gen_logic_CC(tmp);
21aeb343 7678 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7679 break;
7680 case 1: /* Sign/zero extend. */
5e3f878a 7681 tmp = load_reg(s, rm);
9ee6e8bb
PB
7682 shift = (insn >> 4) & 3;
7683 /* ??? In many cases it's not neccessary to do a
7684 rotate, a shift is sufficient. */
7685 if (shift != 0)
f669df27 7686 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7687 op = (insn >> 20) & 7;
7688 switch (op) {
5e3f878a
PB
7689 case 0: gen_sxth(tmp); break;
7690 case 1: gen_uxth(tmp); break;
7691 case 2: gen_sxtb16(tmp); break;
7692 case 3: gen_uxtb16(tmp); break;
7693 case 4: gen_sxtb(tmp); break;
7694 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
7695 default: goto illegal_op;
7696 }
7697 if (rn != 15) {
5e3f878a 7698 tmp2 = load_reg(s, rn);
9ee6e8bb 7699 if ((op >> 1) == 1) {
5e3f878a 7700 gen_add16(tmp, tmp2);
9ee6e8bb 7701 } else {
5e3f878a
PB
7702 tcg_gen_add_i32(tmp, tmp, tmp2);
7703 dead_tmp(tmp2);
9ee6e8bb
PB
7704 }
7705 }
5e3f878a 7706 store_reg(s, rd, tmp);
9ee6e8bb
PB
7707 break;
7708 case 2: /* SIMD add/subtract. */
7709 op = (insn >> 20) & 7;
7710 shift = (insn >> 4) & 7;
7711 if ((op & 3) == 3 || (shift & 3) == 3)
7712 goto illegal_op;
6ddbc6e4
PB
7713 tmp = load_reg(s, rn);
7714 tmp2 = load_reg(s, rm);
7715 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7716 dead_tmp(tmp2);
7717 store_reg(s, rd, tmp);
9ee6e8bb
PB
7718 break;
7719 case 3: /* Other data processing. */
7720 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7721 if (op < 4) {
7722 /* Saturating add/subtract. */
d9ba4830
PB
7723 tmp = load_reg(s, rn);
7724 tmp2 = load_reg(s, rm);
9ee6e8bb 7725 if (op & 1)
4809c612
JB
7726 gen_helper_double_saturate(tmp, tmp);
7727 if (op & 2)
d9ba4830 7728 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 7729 else
d9ba4830
PB
7730 gen_helper_add_saturate(tmp, tmp, tmp2);
7731 dead_tmp(tmp2);
9ee6e8bb 7732 } else {
d9ba4830 7733 tmp = load_reg(s, rn);
9ee6e8bb
PB
7734 switch (op) {
7735 case 0x0a: /* rbit */
d9ba4830 7736 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7737 break;
7738 case 0x08: /* rev */
66896cb8 7739 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
7740 break;
7741 case 0x09: /* rev16 */
d9ba4830 7742 gen_rev16(tmp);
9ee6e8bb
PB
7743 break;
7744 case 0x0b: /* revsh */
d9ba4830 7745 gen_revsh(tmp);
9ee6e8bb
PB
7746 break;
7747 case 0x10: /* sel */
d9ba4830 7748 tmp2 = load_reg(s, rm);
6ddbc6e4
PB
7749 tmp3 = new_tmp();
7750 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
d9ba4830 7751 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6ddbc6e4 7752 dead_tmp(tmp3);
d9ba4830 7753 dead_tmp(tmp2);
9ee6e8bb
PB
7754 break;
7755 case 0x18: /* clz */
d9ba4830 7756 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
7757 break;
7758 default:
7759 goto illegal_op;
7760 }
7761 }
d9ba4830 7762 store_reg(s, rd, tmp);
9ee6e8bb
PB
7763 break;
7764 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7765 op = (insn >> 4) & 0xf;
d9ba4830
PB
7766 tmp = load_reg(s, rn);
7767 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7768 switch ((insn >> 20) & 7) {
7769 case 0: /* 32 x 32 -> 32 */
d9ba4830
PB
7770 tcg_gen_mul_i32(tmp, tmp, tmp2);
7771 dead_tmp(tmp2);
9ee6e8bb 7772 if (rs != 15) {
d9ba4830 7773 tmp2 = load_reg(s, rs);
9ee6e8bb 7774 if (op)
d9ba4830 7775 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 7776 else
d9ba4830
PB
7777 tcg_gen_add_i32(tmp, tmp, tmp2);
7778 dead_tmp(tmp2);
9ee6e8bb 7779 }
9ee6e8bb
PB
7780 break;
7781 case 1: /* 16 x 16 -> 32 */
d9ba4830
PB
7782 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7783 dead_tmp(tmp2);
9ee6e8bb 7784 if (rs != 15) {
d9ba4830
PB
7785 tmp2 = load_reg(s, rs);
7786 gen_helper_add_setq(tmp, tmp, tmp2);
7787 dead_tmp(tmp2);
9ee6e8bb 7788 }
9ee6e8bb
PB
7789 break;
7790 case 2: /* Dual multiply add. */
7791 case 4: /* Dual multiply subtract. */
7792 if (op)
d9ba4830
PB
7793 gen_swap_half(tmp2);
7794 gen_smul_dual(tmp, tmp2);
9ee6e8bb
PB
7795 /* This addition cannot overflow. */
7796 if (insn & (1 << 22)) {
d9ba4830 7797 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 7798 } else {
d9ba4830 7799 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 7800 }
d9ba4830 7801 dead_tmp(tmp2);
9ee6e8bb
PB
7802 if (rs != 15)
7803 {
d9ba4830
PB
7804 tmp2 = load_reg(s, rs);
7805 gen_helper_add_setq(tmp, tmp, tmp2);
7806 dead_tmp(tmp2);
9ee6e8bb 7807 }
9ee6e8bb
PB
7808 break;
7809 case 3: /* 32 * 16 -> 32msb */
7810 if (op)
d9ba4830 7811 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7812 else
d9ba4830 7813 gen_sxth(tmp2);
a7812ae4
PB
7814 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7815 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 7816 tmp = new_tmp();
a7812ae4 7817 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 7818 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7819 if (rs != 15)
7820 {
d9ba4830
PB
7821 tmp2 = load_reg(s, rs);
7822 gen_helper_add_setq(tmp, tmp, tmp2);
7823 dead_tmp(tmp2);
9ee6e8bb 7824 }
9ee6e8bb
PB
7825 break;
7826 case 5: case 6: /* 32 * 32 -> 32msb */
d9ba4830
PB
7827 gen_imull(tmp, tmp2);
7828 if (insn & (1 << 5)) {
7829 gen_roundqd(tmp, tmp2);
7830 dead_tmp(tmp2);
7831 } else {
7832 dead_tmp(tmp);
7833 tmp = tmp2;
7834 }
9ee6e8bb 7835 if (rs != 15) {
d9ba4830 7836 tmp2 = load_reg(s, rs);
9ee6e8bb 7837 if (insn & (1 << 21)) {
d9ba4830 7838 tcg_gen_add_i32(tmp, tmp, tmp2);
99c475ab 7839 } else {
d9ba4830 7840 tcg_gen_sub_i32(tmp, tmp2, tmp);
99c475ab 7841 }
d9ba4830 7842 dead_tmp(tmp2);
2c0262af 7843 }
9ee6e8bb
PB
7844 break;
7845 case 7: /* Unsigned sum of absolute differences. */
d9ba4830
PB
7846 gen_helper_usad8(tmp, tmp, tmp2);
7847 dead_tmp(tmp2);
9ee6e8bb 7848 if (rs != 15) {
d9ba4830
PB
7849 tmp2 = load_reg(s, rs);
7850 tcg_gen_add_i32(tmp, tmp, tmp2);
7851 dead_tmp(tmp2);
5fd46862 7852 }
9ee6e8bb 7853 break;
2c0262af 7854 }
d9ba4830 7855 store_reg(s, rd, tmp);
2c0262af 7856 break;
9ee6e8bb
PB
7857 case 6: case 7: /* 64-bit multiply, Divide. */
7858 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
7859 tmp = load_reg(s, rn);
7860 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7861 if ((op & 0x50) == 0x10) {
7862 /* sdiv, udiv */
7863 if (!arm_feature(env, ARM_FEATURE_DIV))
7864 goto illegal_op;
7865 if (op & 0x20)
5e3f878a 7866 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 7867 else
5e3f878a
PB
7868 gen_helper_sdiv(tmp, tmp, tmp2);
7869 dead_tmp(tmp2);
7870 store_reg(s, rd, tmp);
9ee6e8bb
PB
7871 } else if ((op & 0xe) == 0xc) {
7872 /* Dual multiply accumulate long. */
7873 if (op & 1)
5e3f878a
PB
7874 gen_swap_half(tmp2);
7875 gen_smul_dual(tmp, tmp2);
9ee6e8bb 7876 if (op & 0x10) {
5e3f878a 7877 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 7878 } else {
5e3f878a 7879 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 7880 }
5e3f878a 7881 dead_tmp(tmp2);
a7812ae4
PB
7882 /* BUGFIX */
7883 tmp64 = tcg_temp_new_i64();
7884 tcg_gen_ext_i32_i64(tmp64, tmp);
7885 dead_tmp(tmp);
7886 gen_addq(s, tmp64, rs, rd);
7887 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 7888 tcg_temp_free_i64(tmp64);
2c0262af 7889 } else {
9ee6e8bb
PB
7890 if (op & 0x20) {
7891 /* Unsigned 64-bit multiply */
a7812ae4 7892 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 7893 } else {
9ee6e8bb
PB
7894 if (op & 8) {
7895 /* smlalxy */
5e3f878a
PB
7896 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7897 dead_tmp(tmp2);
a7812ae4
PB
7898 tmp64 = tcg_temp_new_i64();
7899 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 7900 dead_tmp(tmp);
9ee6e8bb
PB
7901 } else {
7902 /* Signed 64-bit multiply */
a7812ae4 7903 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 7904 }
b5ff1b31 7905 }
9ee6e8bb
PB
7906 if (op & 4) {
7907 /* umaal */
a7812ae4
PB
7908 gen_addq_lo(s, tmp64, rs);
7909 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
7910 } else if (op & 0x40) {
7911 /* 64-bit accumulate. */
a7812ae4 7912 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 7913 }
a7812ae4 7914 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 7915 tcg_temp_free_i64(tmp64);
5fd46862 7916 }
2c0262af 7917 break;
9ee6e8bb
PB
7918 }
7919 break;
7920 case 6: case 7: case 14: case 15:
7921 /* Coprocessor. */
7922 if (((insn >> 24) & 3) == 3) {
7923 /* Translate into the equivalent ARM encoding. */
7924 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
7925 if (disas_neon_data_insn(env, s, insn))
7926 goto illegal_op;
7927 } else {
7928 if (insn & (1 << 28))
7929 goto illegal_op;
7930 if (disas_coproc_insn (env, s, insn))
7931 goto illegal_op;
7932 }
7933 break;
7934 case 8: case 9: case 10: case 11:
7935 if (insn & (1 << 15)) {
7936 /* Branches, misc control. */
7937 if (insn & 0x5000) {
7938 /* Unconditional branch. */
7939 /* signextend(hw1[10:0]) -> offset[:12]. */
7940 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7941 /* hw1[10:0] -> offset[11:1]. */
7942 offset |= (insn & 0x7ff) << 1;
7943 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7944 offset[24:22] already have the same value because of the
7945 sign extension above. */
7946 offset ^= ((~insn) & (1 << 13)) << 10;
7947 offset ^= ((~insn) & (1 << 11)) << 11;
7948
9ee6e8bb
PB
7949 if (insn & (1 << 14)) {
7950 /* Branch and link. */
3174f8e9 7951 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 7952 }
3b46e624 7953
b0109805 7954 offset += s->pc;
9ee6e8bb
PB
7955 if (insn & (1 << 12)) {
7956 /* b/bl */
b0109805 7957 gen_jmp(s, offset);
9ee6e8bb
PB
7958 } else {
7959 /* blx */
b0109805
PB
7960 offset &= ~(uint32_t)2;
7961 gen_bx_im(s, offset);
2c0262af 7962 }
9ee6e8bb
PB
7963 } else if (((insn >> 23) & 7) == 7) {
7964 /* Misc control */
7965 if (insn & (1 << 13))
7966 goto illegal_op;
7967
7968 if (insn & (1 << 26)) {
7969 /* Secure monitor call (v6Z) */
7970 goto illegal_op; /* not implemented. */
2c0262af 7971 } else {
9ee6e8bb
PB
7972 op = (insn >> 20) & 7;
7973 switch (op) {
7974 case 0: /* msr cpsr. */
7975 if (IS_M(env)) {
8984bd2e
PB
7976 tmp = load_reg(s, rn);
7977 addr = tcg_const_i32(insn & 0xff);
7978 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6
JR
7979 tcg_temp_free_i32(addr);
7980 dead_tmp(tmp);
9ee6e8bb
PB
7981 gen_lookup_tb(s);
7982 break;
7983 }
7984 /* fall through */
7985 case 1: /* msr spsr. */
7986 if (IS_M(env))
7987 goto illegal_op;
2fbac54b
FN
7988 tmp = load_reg(s, rn);
7989 if (gen_set_psr(s,
9ee6e8bb 7990 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 7991 op == 1, tmp))
9ee6e8bb
PB
7992 goto illegal_op;
7993 break;
7994 case 2: /* cps, nop-hint. */
7995 if (((insn >> 8) & 7) == 0) {
7996 gen_nop_hint(s, insn & 0xff);
7997 }
7998 /* Implemented as NOP in user mode. */
7999 if (IS_USER(s))
8000 break;
8001 offset = 0;
8002 imm = 0;
8003 if (insn & (1 << 10)) {
8004 if (insn & (1 << 7))
8005 offset |= CPSR_A;
8006 if (insn & (1 << 6))
8007 offset |= CPSR_I;
8008 if (insn & (1 << 5))
8009 offset |= CPSR_F;
8010 if (insn & (1 << 9))
8011 imm = CPSR_A | CPSR_I | CPSR_F;
8012 }
8013 if (insn & (1 << 8)) {
8014 offset |= 0x1f;
8015 imm |= (insn & 0x1f);
8016 }
8017 if (offset) {
2fbac54b 8018 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
8019 }
8020 break;
8021 case 3: /* Special control operations. */
426f5abc 8022 ARCH(7);
9ee6e8bb
PB
8023 op = (insn >> 4) & 0xf;
8024 switch (op) {
8025 case 2: /* clrex */
426f5abc 8026 gen_clrex(s);
9ee6e8bb
PB
8027 break;
8028 case 4: /* dsb */
8029 case 5: /* dmb */
8030 case 6: /* isb */
8031 /* These execute as NOPs. */
9ee6e8bb
PB
8032 break;
8033 default:
8034 goto illegal_op;
8035 }
8036 break;
8037 case 4: /* bxj */
8038 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8039 tmp = load_reg(s, rn);
8040 gen_bx(s, tmp);
9ee6e8bb
PB
8041 break;
8042 case 5: /* Exception return. */
b8b45b68
RV
8043 if (IS_USER(s)) {
8044 goto illegal_op;
8045 }
8046 if (rn != 14 || rd != 15) {
8047 goto illegal_op;
8048 }
8049 tmp = load_reg(s, rn);
8050 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8051 gen_exception_return(s, tmp);
8052 break;
9ee6e8bb 8053 case 6: /* mrs cpsr. */
8984bd2e 8054 tmp = new_tmp();
9ee6e8bb 8055 if (IS_M(env)) {
8984bd2e
PB
8056 addr = tcg_const_i32(insn & 0xff);
8057 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 8058 tcg_temp_free_i32(addr);
9ee6e8bb 8059 } else {
8984bd2e 8060 gen_helper_cpsr_read(tmp);
9ee6e8bb 8061 }
8984bd2e 8062 store_reg(s, rd, tmp);
9ee6e8bb
PB
8063 break;
8064 case 7: /* mrs spsr. */
8065 /* Not accessible in user mode. */
8066 if (IS_USER(s) || IS_M(env))
8067 goto illegal_op;
d9ba4830
PB
8068 tmp = load_cpu_field(spsr);
8069 store_reg(s, rd, tmp);
9ee6e8bb 8070 break;
2c0262af
FB
8071 }
8072 }
9ee6e8bb
PB
8073 } else {
8074 /* Conditional branch. */
8075 op = (insn >> 22) & 0xf;
8076 /* Generate a conditional jump to next instruction. */
8077 s->condlabel = gen_new_label();
d9ba4830 8078 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
8079 s->condjmp = 1;
8080
8081 /* offset[11:1] = insn[10:0] */
8082 offset = (insn & 0x7ff) << 1;
8083 /* offset[17:12] = insn[21:16]. */
8084 offset |= (insn & 0x003f0000) >> 4;
8085 /* offset[31:20] = insn[26]. */
8086 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8087 /* offset[18] = insn[13]. */
8088 offset |= (insn & (1 << 13)) << 5;
8089 /* offset[19] = insn[11]. */
8090 offset |= (insn & (1 << 11)) << 8;
8091
8092 /* jump to the offset */
b0109805 8093 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
8094 }
8095 } else {
8096 /* Data processing immediate. */
8097 if (insn & (1 << 25)) {
8098 if (insn & (1 << 24)) {
8099 if (insn & (1 << 20))
8100 goto illegal_op;
8101 /* Bitfield/Saturate. */
8102 op = (insn >> 21) & 7;
8103 imm = insn & 0x1f;
8104 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4
PB
8105 if (rn == 15) {
8106 tmp = new_tmp();
8107 tcg_gen_movi_i32(tmp, 0);
8108 } else {
8109 tmp = load_reg(s, rn);
8110 }
9ee6e8bb
PB
8111 switch (op) {
8112 case 2: /* Signed bitfield extract. */
8113 imm++;
8114 if (shift + imm > 32)
8115 goto illegal_op;
8116 if (imm < 32)
6ddbc6e4 8117 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
8118 break;
8119 case 6: /* Unsigned bitfield extract. */
8120 imm++;
8121 if (shift + imm > 32)
8122 goto illegal_op;
8123 if (imm < 32)
6ddbc6e4 8124 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
8125 break;
8126 case 3: /* Bitfield insert/clear. */
8127 if (imm < shift)
8128 goto illegal_op;
8129 imm = imm + 1 - shift;
8130 if (imm != 32) {
6ddbc6e4 8131 tmp2 = load_reg(s, rd);
8f8e3aa4 8132 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
6ddbc6e4 8133 dead_tmp(tmp2);
9ee6e8bb
PB
8134 }
8135 break;
8136 case 7:
8137 goto illegal_op;
8138 default: /* Saturate. */
9ee6e8bb
PB
8139 if (shift) {
8140 if (op & 1)
6ddbc6e4 8141 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8142 else
6ddbc6e4 8143 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 8144 }
6ddbc6e4 8145 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
8146 if (op & 4) {
8147 /* Unsigned. */
9ee6e8bb 8148 if ((op & 1) && shift == 0)
6ddbc6e4 8149 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 8150 else
6ddbc6e4 8151 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 8152 } else {
9ee6e8bb 8153 /* Signed. */
9ee6e8bb 8154 if ((op & 1) && shift == 0)
6ddbc6e4 8155 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 8156 else
6ddbc6e4 8157 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 8158 }
b75263d6 8159 tcg_temp_free_i32(tmp2);
9ee6e8bb 8160 break;
2c0262af 8161 }
6ddbc6e4 8162 store_reg(s, rd, tmp);
9ee6e8bb
PB
8163 } else {
8164 imm = ((insn & 0x04000000) >> 15)
8165 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8166 if (insn & (1 << 22)) {
8167 /* 16-bit immediate. */
8168 imm |= (insn >> 4) & 0xf000;
8169 if (insn & (1 << 23)) {
8170 /* movt */
5e3f878a 8171 tmp = load_reg(s, rd);
86831435 8172 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8173 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 8174 } else {
9ee6e8bb 8175 /* movw */
5e3f878a
PB
8176 tmp = new_tmp();
8177 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
8178 }
8179 } else {
9ee6e8bb
PB
8180 /* Add/sub 12-bit immediate. */
8181 if (rn == 15) {
b0109805 8182 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 8183 if (insn & (1 << 23))
b0109805 8184 offset -= imm;
9ee6e8bb 8185 else
b0109805 8186 offset += imm;
5e3f878a
PB
8187 tmp = new_tmp();
8188 tcg_gen_movi_i32(tmp, offset);
2c0262af 8189 } else {
5e3f878a 8190 tmp = load_reg(s, rn);
9ee6e8bb 8191 if (insn & (1 << 23))
5e3f878a 8192 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 8193 else
5e3f878a 8194 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 8195 }
9ee6e8bb 8196 }
5e3f878a 8197 store_reg(s, rd, tmp);
191abaa2 8198 }
9ee6e8bb
PB
8199 } else {
8200 int shifter_out = 0;
8201 /* modified 12-bit immediate. */
8202 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8203 imm = (insn & 0xff);
8204 switch (shift) {
8205 case 0: /* XY */
8206 /* Nothing to do. */
8207 break;
8208 case 1: /* 00XY00XY */
8209 imm |= imm << 16;
8210 break;
8211 case 2: /* XY00XY00 */
8212 imm |= imm << 16;
8213 imm <<= 8;
8214 break;
8215 case 3: /* XYXYXYXY */
8216 imm |= imm << 16;
8217 imm |= imm << 8;
8218 break;
8219 default: /* Rotated constant. */
8220 shift = (shift << 1) | (imm >> 7);
8221 imm |= 0x80;
8222 imm = imm << (32 - shift);
8223 shifter_out = 1;
8224 break;
b5ff1b31 8225 }
3174f8e9
FN
8226 tmp2 = new_tmp();
8227 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 8228 rn = (insn >> 16) & 0xf;
3174f8e9
FN
8229 if (rn == 15) {
8230 tmp = new_tmp();
8231 tcg_gen_movi_i32(tmp, 0);
8232 } else {
8233 tmp = load_reg(s, rn);
8234 }
9ee6e8bb
PB
8235 op = (insn >> 21) & 0xf;
8236 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 8237 shifter_out, tmp, tmp2))
9ee6e8bb 8238 goto illegal_op;
3174f8e9 8239 dead_tmp(tmp2);
9ee6e8bb
PB
8240 rd = (insn >> 8) & 0xf;
8241 if (rd != 15) {
3174f8e9
FN
8242 store_reg(s, rd, tmp);
8243 } else {
8244 dead_tmp(tmp);
2c0262af 8245 }
2c0262af 8246 }
9ee6e8bb
PB
8247 }
8248 break;
8249 case 12: /* Load/store single data item. */
8250 {
8251 int postinc = 0;
8252 int writeback = 0;
b0109805 8253 int user;
9ee6e8bb
PB
8254 if ((insn & 0x01100000) == 0x01000000) {
8255 if (disas_neon_ls_insn(env, s, insn))
c1713132 8256 goto illegal_op;
9ee6e8bb
PB
8257 break;
8258 }
b0109805 8259 user = IS_USER(s);
9ee6e8bb 8260 if (rn == 15) {
b0109805 8261 addr = new_tmp();
9ee6e8bb
PB
8262 /* PC relative. */
8263 /* s->pc has already been incremented by 4. */
8264 imm = s->pc & 0xfffffffc;
8265 if (insn & (1 << 23))
8266 imm += insn & 0xfff;
8267 else
8268 imm -= insn & 0xfff;
b0109805 8269 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 8270 } else {
b0109805 8271 addr = load_reg(s, rn);
9ee6e8bb
PB
8272 if (insn & (1 << 23)) {
8273 /* Positive offset. */
8274 imm = insn & 0xfff;
b0109805 8275 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8276 } else {
8277 op = (insn >> 8) & 7;
8278 imm = insn & 0xff;
8279 switch (op) {
8280 case 0: case 8: /* Shifted Register. */
8281 shift = (insn >> 4) & 0xf;
8282 if (shift > 3)
18c9b560 8283 goto illegal_op;
b26eefb6 8284 tmp = load_reg(s, rm);
9ee6e8bb 8285 if (shift)
b26eefb6 8286 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 8287 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 8288 dead_tmp(tmp);
9ee6e8bb
PB
8289 break;
8290 case 4: /* Negative offset. */
b0109805 8291 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb
PB
8292 break;
8293 case 6: /* User privilege. */
b0109805
PB
8294 tcg_gen_addi_i32(addr, addr, imm);
8295 user = 1;
9ee6e8bb
PB
8296 break;
8297 case 1: /* Post-decrement. */
8298 imm = -imm;
8299 /* Fall through. */
8300 case 3: /* Post-increment. */
9ee6e8bb
PB
8301 postinc = 1;
8302 writeback = 1;
8303 break;
8304 case 5: /* Pre-decrement. */
8305 imm = -imm;
8306 /* Fall through. */
8307 case 7: /* Pre-increment. */
b0109805 8308 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8309 writeback = 1;
8310 break;
8311 default:
b7bcbe95 8312 goto illegal_op;
9ee6e8bb
PB
8313 }
8314 }
8315 }
8316 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8317 if (insn & (1 << 20)) {
8318 /* Load. */
8319 if (rs == 15 && op != 2) {
8320 if (op & 2)
b5ff1b31 8321 goto illegal_op;
9ee6e8bb
PB
8322 /* Memory hint. Implemented as NOP. */
8323 } else {
8324 switch (op) {
b0109805
PB
8325 case 0: tmp = gen_ld8u(addr, user); break;
8326 case 4: tmp = gen_ld8s(addr, user); break;
8327 case 1: tmp = gen_ld16u(addr, user); break;
8328 case 5: tmp = gen_ld16s(addr, user); break;
8329 case 2: tmp = gen_ld32(addr, user); break;
9ee6e8bb
PB
8330 default: goto illegal_op;
8331 }
8332 if (rs == 15) {
b0109805 8333 gen_bx(s, tmp);
9ee6e8bb 8334 } else {
b0109805 8335 store_reg(s, rs, tmp);
9ee6e8bb
PB
8336 }
8337 }
8338 } else {
8339 /* Store. */
8340 if (rs == 15)
b7bcbe95 8341 goto illegal_op;
b0109805 8342 tmp = load_reg(s, rs);
9ee6e8bb 8343 switch (op) {
b0109805
PB
8344 case 0: gen_st8(tmp, addr, user); break;
8345 case 1: gen_st16(tmp, addr, user); break;
8346 case 2: gen_st32(tmp, addr, user); break;
9ee6e8bb 8347 default: goto illegal_op;
b7bcbe95 8348 }
2c0262af 8349 }
9ee6e8bb 8350 if (postinc)
b0109805
PB
8351 tcg_gen_addi_i32(addr, addr, imm);
8352 if (writeback) {
8353 store_reg(s, rn, addr);
8354 } else {
8355 dead_tmp(addr);
8356 }
9ee6e8bb
PB
8357 }
8358 break;
8359 default:
8360 goto illegal_op;
2c0262af 8361 }
9ee6e8bb
PB
8362 return 0;
8363illegal_op:
8364 return 1;
2c0262af
FB
8365}
8366
9ee6e8bb 8367static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
8368{
8369 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8370 int32_t offset;
8371 int i;
b26eefb6 8372 TCGv tmp;
d9ba4830 8373 TCGv tmp2;
b0109805 8374 TCGv addr;
99c475ab 8375
9ee6e8bb
PB
8376 if (s->condexec_mask) {
8377 cond = s->condexec_cond;
bedd2912
JB
8378 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
8379 s->condlabel = gen_new_label();
8380 gen_test_cc(cond ^ 1, s->condlabel);
8381 s->condjmp = 1;
8382 }
9ee6e8bb
PB
8383 }
8384
b5ff1b31 8385 insn = lduw_code(s->pc);
99c475ab 8386 s->pc += 2;
b5ff1b31 8387
99c475ab
FB
8388 switch (insn >> 12) {
8389 case 0: case 1:
396e467c 8390
99c475ab
FB
8391 rd = insn & 7;
8392 op = (insn >> 11) & 3;
8393 if (op == 3) {
8394 /* add/subtract */
8395 rn = (insn >> 3) & 7;
396e467c 8396 tmp = load_reg(s, rn);
99c475ab
FB
8397 if (insn & (1 << 10)) {
8398 /* immediate */
396e467c
FN
8399 tmp2 = new_tmp();
8400 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
8401 } else {
8402 /* reg */
8403 rm = (insn >> 6) & 7;
396e467c 8404 tmp2 = load_reg(s, rm);
99c475ab 8405 }
9ee6e8bb
PB
8406 if (insn & (1 << 9)) {
8407 if (s->condexec_mask)
396e467c 8408 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8409 else
396e467c 8410 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb
PB
8411 } else {
8412 if (s->condexec_mask)
396e467c 8413 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 8414 else
396e467c 8415 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 8416 }
396e467c
FN
8417 dead_tmp(tmp2);
8418 store_reg(s, rd, tmp);
99c475ab
FB
8419 } else {
8420 /* shift immediate */
8421 rm = (insn >> 3) & 7;
8422 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
8423 tmp = load_reg(s, rm);
8424 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8425 if (!s->condexec_mask)
8426 gen_logic_CC(tmp);
8427 store_reg(s, rd, tmp);
99c475ab
FB
8428 }
8429 break;
8430 case 2: case 3:
8431 /* arithmetic large immediate */
8432 op = (insn >> 11) & 3;
8433 rd = (insn >> 8) & 0x7;
396e467c
FN
8434 if (op == 0) { /* mov */
8435 tmp = new_tmp();
8436 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 8437 if (!s->condexec_mask)
396e467c
FN
8438 gen_logic_CC(tmp);
8439 store_reg(s, rd, tmp);
8440 } else {
8441 tmp = load_reg(s, rd);
8442 tmp2 = new_tmp();
8443 tcg_gen_movi_i32(tmp2, insn & 0xff);
8444 switch (op) {
8445 case 1: /* cmp */
8446 gen_helper_sub_cc(tmp, tmp, tmp2);
8447 dead_tmp(tmp);
8448 dead_tmp(tmp2);
8449 break;
8450 case 2: /* add */
8451 if (s->condexec_mask)
8452 tcg_gen_add_i32(tmp, tmp, tmp2);
8453 else
8454 gen_helper_add_cc(tmp, tmp, tmp2);
8455 dead_tmp(tmp2);
8456 store_reg(s, rd, tmp);
8457 break;
8458 case 3: /* sub */
8459 if (s->condexec_mask)
8460 tcg_gen_sub_i32(tmp, tmp, tmp2);
8461 else
8462 gen_helper_sub_cc(tmp, tmp, tmp2);
8463 dead_tmp(tmp2);
8464 store_reg(s, rd, tmp);
8465 break;
8466 }
99c475ab 8467 }
99c475ab
FB
8468 break;
8469 case 4:
8470 if (insn & (1 << 11)) {
8471 rd = (insn >> 8) & 7;
5899f386
FB
8472 /* load pc-relative. Bit 1 of PC is ignored. */
8473 val = s->pc + 2 + ((insn & 0xff) * 4);
8474 val &= ~(uint32_t)2;
b0109805
PB
8475 addr = new_tmp();
8476 tcg_gen_movi_i32(addr, val);
8477 tmp = gen_ld32(addr, IS_USER(s));
8478 dead_tmp(addr);
8479 store_reg(s, rd, tmp);
99c475ab
FB
8480 break;
8481 }
8482 if (insn & (1 << 10)) {
8483 /* data processing extended or blx */
8484 rd = (insn & 7) | ((insn >> 4) & 8);
8485 rm = (insn >> 3) & 0xf;
8486 op = (insn >> 8) & 3;
8487 switch (op) {
8488 case 0: /* add */
396e467c
FN
8489 tmp = load_reg(s, rd);
8490 tmp2 = load_reg(s, rm);
8491 tcg_gen_add_i32(tmp, tmp, tmp2);
8492 dead_tmp(tmp2);
8493 store_reg(s, rd, tmp);
99c475ab
FB
8494 break;
8495 case 1: /* cmp */
396e467c
FN
8496 tmp = load_reg(s, rd);
8497 tmp2 = load_reg(s, rm);
8498 gen_helper_sub_cc(tmp, tmp, tmp2);
8499 dead_tmp(tmp2);
8500 dead_tmp(tmp);
99c475ab
FB
8501 break;
8502 case 2: /* mov/cpy */
396e467c
FN
8503 tmp = load_reg(s, rm);
8504 store_reg(s, rd, tmp);
99c475ab
FB
8505 break;
8506 case 3:/* branch [and link] exchange thumb register */
b0109805 8507 tmp = load_reg(s, rm);
99c475ab
FB
8508 if (insn & (1 << 7)) {
8509 val = (uint32_t)s->pc | 1;
b0109805
PB
8510 tmp2 = new_tmp();
8511 tcg_gen_movi_i32(tmp2, val);
8512 store_reg(s, 14, tmp2);
99c475ab 8513 }
d9ba4830 8514 gen_bx(s, tmp);
99c475ab
FB
8515 break;
8516 }
8517 break;
8518 }
8519
8520 /* data processing register */
8521 rd = insn & 7;
8522 rm = (insn >> 3) & 7;
8523 op = (insn >> 6) & 0xf;
8524 if (op == 2 || op == 3 || op == 4 || op == 7) {
8525 /* the shift/rotate ops want the operands backwards */
8526 val = rm;
8527 rm = rd;
8528 rd = val;
8529 val = 1;
8530 } else {
8531 val = 0;
8532 }
8533
396e467c
FN
8534 if (op == 9) { /* neg */
8535 tmp = new_tmp();
8536 tcg_gen_movi_i32(tmp, 0);
8537 } else if (op != 0xf) { /* mvn doesn't read its first operand */
8538 tmp = load_reg(s, rd);
8539 } else {
8540 TCGV_UNUSED(tmp);
8541 }
99c475ab 8542
396e467c 8543 tmp2 = load_reg(s, rm);
5899f386 8544 switch (op) {
99c475ab 8545 case 0x0: /* and */
396e467c 8546 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 8547 if (!s->condexec_mask)
396e467c 8548 gen_logic_CC(tmp);
99c475ab
FB
8549 break;
8550 case 0x1: /* eor */
396e467c 8551 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 8552 if (!s->condexec_mask)
396e467c 8553 gen_logic_CC(tmp);
99c475ab
FB
8554 break;
8555 case 0x2: /* lsl */
9ee6e8bb 8556 if (s->condexec_mask) {
396e467c 8557 gen_helper_shl(tmp2, tmp2, tmp);
9ee6e8bb 8558 } else {
396e467c
FN
8559 gen_helper_shl_cc(tmp2, tmp2, tmp);
8560 gen_logic_CC(tmp2);
9ee6e8bb 8561 }
99c475ab
FB
8562 break;
8563 case 0x3: /* lsr */
9ee6e8bb 8564 if (s->condexec_mask) {
396e467c 8565 gen_helper_shr(tmp2, tmp2, tmp);
9ee6e8bb 8566 } else {
396e467c
FN
8567 gen_helper_shr_cc(tmp2, tmp2, tmp);
8568 gen_logic_CC(tmp2);
9ee6e8bb 8569 }
99c475ab
FB
8570 break;
8571 case 0x4: /* asr */
9ee6e8bb 8572 if (s->condexec_mask) {
396e467c 8573 gen_helper_sar(tmp2, tmp2, tmp);
9ee6e8bb 8574 } else {
396e467c
FN
8575 gen_helper_sar_cc(tmp2, tmp2, tmp);
8576 gen_logic_CC(tmp2);
9ee6e8bb 8577 }
99c475ab
FB
8578 break;
8579 case 0x5: /* adc */
9ee6e8bb 8580 if (s->condexec_mask)
396e467c 8581 gen_adc(tmp, tmp2);
9ee6e8bb 8582 else
396e467c 8583 gen_helper_adc_cc(tmp, tmp, tmp2);
99c475ab
FB
8584 break;
8585 case 0x6: /* sbc */
9ee6e8bb 8586 if (s->condexec_mask)
396e467c 8587 gen_sub_carry(tmp, tmp, tmp2);
9ee6e8bb 8588 else
396e467c 8589 gen_helper_sbc_cc(tmp, tmp, tmp2);
99c475ab
FB
8590 break;
8591 case 0x7: /* ror */
9ee6e8bb 8592 if (s->condexec_mask) {
f669df27
AJ
8593 tcg_gen_andi_i32(tmp, tmp, 0x1f);
8594 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 8595 } else {
396e467c
FN
8596 gen_helper_ror_cc(tmp2, tmp2, tmp);
8597 gen_logic_CC(tmp2);
9ee6e8bb 8598 }
99c475ab
FB
8599 break;
8600 case 0x8: /* tst */
396e467c
FN
8601 tcg_gen_and_i32(tmp, tmp, tmp2);
8602 gen_logic_CC(tmp);
99c475ab 8603 rd = 16;
5899f386 8604 break;
99c475ab 8605 case 0x9: /* neg */
9ee6e8bb 8606 if (s->condexec_mask)
396e467c 8607 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 8608 else
396e467c 8609 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
8610 break;
8611 case 0xa: /* cmp */
396e467c 8612 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
8613 rd = 16;
8614 break;
8615 case 0xb: /* cmn */
396e467c 8616 gen_helper_add_cc(tmp, tmp, tmp2);
99c475ab
FB
8617 rd = 16;
8618 break;
8619 case 0xc: /* orr */
396e467c 8620 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 8621 if (!s->condexec_mask)
396e467c 8622 gen_logic_CC(tmp);
99c475ab
FB
8623 break;
8624 case 0xd: /* mul */
7b2919a0 8625 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 8626 if (!s->condexec_mask)
396e467c 8627 gen_logic_CC(tmp);
99c475ab
FB
8628 break;
8629 case 0xe: /* bic */
f669df27 8630 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 8631 if (!s->condexec_mask)
396e467c 8632 gen_logic_CC(tmp);
99c475ab
FB
8633 break;
8634 case 0xf: /* mvn */
396e467c 8635 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 8636 if (!s->condexec_mask)
396e467c 8637 gen_logic_CC(tmp2);
99c475ab 8638 val = 1;
5899f386 8639 rm = rd;
99c475ab
FB
8640 break;
8641 }
8642 if (rd != 16) {
396e467c
FN
8643 if (val) {
8644 store_reg(s, rm, tmp2);
8645 if (op != 0xf)
8646 dead_tmp(tmp);
8647 } else {
8648 store_reg(s, rd, tmp);
8649 dead_tmp(tmp2);
8650 }
8651 } else {
8652 dead_tmp(tmp);
8653 dead_tmp(tmp2);
99c475ab
FB
8654 }
8655 break;
8656
8657 case 5:
8658 /* load/store register offset. */
8659 rd = insn & 7;
8660 rn = (insn >> 3) & 7;
8661 rm = (insn >> 6) & 7;
8662 op = (insn >> 9) & 7;
b0109805 8663 addr = load_reg(s, rn);
b26eefb6 8664 tmp = load_reg(s, rm);
b0109805 8665 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 8666 dead_tmp(tmp);
99c475ab
FB
8667
8668 if (op < 3) /* store */
b0109805 8669 tmp = load_reg(s, rd);
99c475ab
FB
8670
8671 switch (op) {
8672 case 0: /* str */
b0109805 8673 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
8674 break;
8675 case 1: /* strh */
b0109805 8676 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
8677 break;
8678 case 2: /* strb */
b0109805 8679 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
8680 break;
8681 case 3: /* ldrsb */
b0109805 8682 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
8683 break;
8684 case 4: /* ldr */
b0109805 8685 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8686 break;
8687 case 5: /* ldrh */
b0109805 8688 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
8689 break;
8690 case 6: /* ldrb */
b0109805 8691 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
8692 break;
8693 case 7: /* ldrsh */
b0109805 8694 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
8695 break;
8696 }
8697 if (op >= 3) /* load */
b0109805
PB
8698 store_reg(s, rd, tmp);
8699 dead_tmp(addr);
99c475ab
FB
8700 break;
8701
8702 case 6:
8703 /* load/store word immediate offset */
8704 rd = insn & 7;
8705 rn = (insn >> 3) & 7;
b0109805 8706 addr = load_reg(s, rn);
99c475ab 8707 val = (insn >> 4) & 0x7c;
b0109805 8708 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8709
8710 if (insn & (1 << 11)) {
8711 /* load */
b0109805
PB
8712 tmp = gen_ld32(addr, IS_USER(s));
8713 store_reg(s, rd, tmp);
99c475ab
FB
8714 } else {
8715 /* store */
b0109805
PB
8716 tmp = load_reg(s, rd);
8717 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8718 }
b0109805 8719 dead_tmp(addr);
99c475ab
FB
8720 break;
8721
8722 case 7:
8723 /* load/store byte immediate offset */
8724 rd = insn & 7;
8725 rn = (insn >> 3) & 7;
b0109805 8726 addr = load_reg(s, rn);
99c475ab 8727 val = (insn >> 6) & 0x1f;
b0109805 8728 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8729
8730 if (insn & (1 << 11)) {
8731 /* load */
b0109805
PB
8732 tmp = gen_ld8u(addr, IS_USER(s));
8733 store_reg(s, rd, tmp);
99c475ab
FB
8734 } else {
8735 /* store */
b0109805
PB
8736 tmp = load_reg(s, rd);
8737 gen_st8(tmp, addr, IS_USER(s));
99c475ab 8738 }
b0109805 8739 dead_tmp(addr);
99c475ab
FB
8740 break;
8741
8742 case 8:
8743 /* load/store halfword immediate offset */
8744 rd = insn & 7;
8745 rn = (insn >> 3) & 7;
b0109805 8746 addr = load_reg(s, rn);
99c475ab 8747 val = (insn >> 5) & 0x3e;
b0109805 8748 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8749
8750 if (insn & (1 << 11)) {
8751 /* load */
b0109805
PB
8752 tmp = gen_ld16u(addr, IS_USER(s));
8753 store_reg(s, rd, tmp);
99c475ab
FB
8754 } else {
8755 /* store */
b0109805
PB
8756 tmp = load_reg(s, rd);
8757 gen_st16(tmp, addr, IS_USER(s));
99c475ab 8758 }
b0109805 8759 dead_tmp(addr);
99c475ab
FB
8760 break;
8761
8762 case 9:
8763 /* load/store from stack */
8764 rd = (insn >> 8) & 7;
b0109805 8765 addr = load_reg(s, 13);
99c475ab 8766 val = (insn & 0xff) * 4;
b0109805 8767 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8768
8769 if (insn & (1 << 11)) {
8770 /* load */
b0109805
PB
8771 tmp = gen_ld32(addr, IS_USER(s));
8772 store_reg(s, rd, tmp);
99c475ab
FB
8773 } else {
8774 /* store */
b0109805
PB
8775 tmp = load_reg(s, rd);
8776 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8777 }
b0109805 8778 dead_tmp(addr);
99c475ab
FB
8779 break;
8780
8781 case 10:
8782 /* add to high reg */
8783 rd = (insn >> 8) & 7;
5899f386
FB
8784 if (insn & (1 << 11)) {
8785 /* SP */
5e3f878a 8786 tmp = load_reg(s, 13);
5899f386
FB
8787 } else {
8788 /* PC. bit 1 is ignored. */
5e3f878a
PB
8789 tmp = new_tmp();
8790 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 8791 }
99c475ab 8792 val = (insn & 0xff) * 4;
5e3f878a
PB
8793 tcg_gen_addi_i32(tmp, tmp, val);
8794 store_reg(s, rd, tmp);
99c475ab
FB
8795 break;
8796
8797 case 11:
8798 /* misc */
8799 op = (insn >> 8) & 0xf;
8800 switch (op) {
8801 case 0:
8802 /* adjust stack pointer */
b26eefb6 8803 tmp = load_reg(s, 13);
99c475ab
FB
8804 val = (insn & 0x7f) * 4;
8805 if (insn & (1 << 7))
6a0d8a1d 8806 val = -(int32_t)val;
b26eefb6
PB
8807 tcg_gen_addi_i32(tmp, tmp, val);
8808 store_reg(s, 13, tmp);
99c475ab
FB
8809 break;
8810
9ee6e8bb
PB
8811 case 2: /* sign/zero extend. */
8812 ARCH(6);
8813 rd = insn & 7;
8814 rm = (insn >> 3) & 7;
b0109805 8815 tmp = load_reg(s, rm);
9ee6e8bb 8816 switch ((insn >> 6) & 3) {
b0109805
PB
8817 case 0: gen_sxth(tmp); break;
8818 case 1: gen_sxtb(tmp); break;
8819 case 2: gen_uxth(tmp); break;
8820 case 3: gen_uxtb(tmp); break;
9ee6e8bb 8821 }
b0109805 8822 store_reg(s, rd, tmp);
9ee6e8bb 8823 break;
99c475ab
FB
8824 case 4: case 5: case 0xc: case 0xd:
8825 /* push/pop */
b0109805 8826 addr = load_reg(s, 13);
5899f386
FB
8827 if (insn & (1 << 8))
8828 offset = 4;
99c475ab 8829 else
5899f386
FB
8830 offset = 0;
8831 for (i = 0; i < 8; i++) {
8832 if (insn & (1 << i))
8833 offset += 4;
8834 }
8835 if ((insn & (1 << 11)) == 0) {
b0109805 8836 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8837 }
99c475ab
FB
8838 for (i = 0; i < 8; i++) {
8839 if (insn & (1 << i)) {
8840 if (insn & (1 << 11)) {
8841 /* pop */
b0109805
PB
8842 tmp = gen_ld32(addr, IS_USER(s));
8843 store_reg(s, i, tmp);
99c475ab
FB
8844 } else {
8845 /* push */
b0109805
PB
8846 tmp = load_reg(s, i);
8847 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8848 }
5899f386 8849 /* advance to the next address. */
b0109805 8850 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8851 }
8852 }
a50f5b91 8853 TCGV_UNUSED(tmp);
99c475ab
FB
8854 if (insn & (1 << 8)) {
8855 if (insn & (1 << 11)) {
8856 /* pop pc */
b0109805 8857 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8858 /* don't set the pc until the rest of the instruction
8859 has completed */
8860 } else {
8861 /* push lr */
b0109805
PB
8862 tmp = load_reg(s, 14);
8863 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8864 }
b0109805 8865 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 8866 }
5899f386 8867 if ((insn & (1 << 11)) == 0) {
b0109805 8868 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8869 }
99c475ab 8870 /* write back the new stack pointer */
b0109805 8871 store_reg(s, 13, addr);
99c475ab
FB
8872 /* set the new PC value */
8873 if ((insn & 0x0900) == 0x0900)
b0109805 8874 gen_bx(s, tmp);
99c475ab
FB
8875 break;
8876
9ee6e8bb
PB
8877 case 1: case 3: case 9: case 11: /* czb */
8878 rm = insn & 7;
d9ba4830 8879 tmp = load_reg(s, rm);
9ee6e8bb
PB
8880 s->condlabel = gen_new_label();
8881 s->condjmp = 1;
8882 if (insn & (1 << 11))
cb63669a 8883 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 8884 else
cb63669a 8885 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
d9ba4830 8886 dead_tmp(tmp);
9ee6e8bb
PB
8887 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8888 val = (uint32_t)s->pc + 2;
8889 val += offset;
8890 gen_jmp(s, val);
8891 break;
8892
8893 case 15: /* IT, nop-hint. */
8894 if ((insn & 0xf) == 0) {
8895 gen_nop_hint(s, (insn >> 4) & 0xf);
8896 break;
8897 }
8898 /* If Then. */
8899 s->condexec_cond = (insn >> 4) & 0xe;
8900 s->condexec_mask = insn & 0x1f;
8901 /* No actual code generated for this insn, just setup state. */
8902 break;
8903
06c949e6 8904 case 0xe: /* bkpt */
9ee6e8bb 8905 gen_set_condexec(s);
5e3f878a 8906 gen_set_pc_im(s->pc - 2);
d9ba4830 8907 gen_exception(EXCP_BKPT);
06c949e6
PB
8908 s->is_jmp = DISAS_JUMP;
8909 break;
8910
9ee6e8bb
PB
8911 case 0xa: /* rev */
8912 ARCH(6);
8913 rn = (insn >> 3) & 0x7;
8914 rd = insn & 0x7;
b0109805 8915 tmp = load_reg(s, rn);
9ee6e8bb 8916 switch ((insn >> 6) & 3) {
66896cb8 8917 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
8918 case 1: gen_rev16(tmp); break;
8919 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
8920 default: goto illegal_op;
8921 }
b0109805 8922 store_reg(s, rd, tmp);
9ee6e8bb
PB
8923 break;
8924
8925 case 6: /* cps */
8926 ARCH(6);
8927 if (IS_USER(s))
8928 break;
8929 if (IS_M(env)) {
8984bd2e 8930 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9ee6e8bb 8931 /* PRIMASK */
8984bd2e
PB
8932 if (insn & 1) {
8933 addr = tcg_const_i32(16);
8934 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8935 tcg_temp_free_i32(addr);
8984bd2e 8936 }
9ee6e8bb 8937 /* FAULTMASK */
8984bd2e
PB
8938 if (insn & 2) {
8939 addr = tcg_const_i32(17);
8940 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8941 tcg_temp_free_i32(addr);
8984bd2e 8942 }
b75263d6 8943 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8944 gen_lookup_tb(s);
8945 } else {
8946 if (insn & (1 << 4))
8947 shift = CPSR_A | CPSR_I | CPSR_F;
8948 else
8949 shift = 0;
fa26df03 8950 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9ee6e8bb
PB
8951 }
8952 break;
8953
99c475ab
FB
8954 default:
8955 goto undef;
8956 }
8957 break;
8958
8959 case 12:
8960 /* load/store multiple */
8961 rn = (insn >> 8) & 0x7;
b0109805 8962 addr = load_reg(s, rn);
99c475ab
FB
8963 for (i = 0; i < 8; i++) {
8964 if (insn & (1 << i)) {
99c475ab
FB
8965 if (insn & (1 << 11)) {
8966 /* load */
b0109805
PB
8967 tmp = gen_ld32(addr, IS_USER(s));
8968 store_reg(s, i, tmp);
99c475ab
FB
8969 } else {
8970 /* store */
b0109805
PB
8971 tmp = load_reg(s, i);
8972 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8973 }
5899f386 8974 /* advance to the next address */
b0109805 8975 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8976 }
8977 }
5899f386 8978 /* Base register writeback. */
b0109805
PB
8979 if ((insn & (1 << rn)) == 0) {
8980 store_reg(s, rn, addr);
8981 } else {
8982 dead_tmp(addr);
8983 }
99c475ab
FB
8984 break;
8985
8986 case 13:
8987 /* conditional branch or swi */
8988 cond = (insn >> 8) & 0xf;
8989 if (cond == 0xe)
8990 goto undef;
8991
8992 if (cond == 0xf) {
8993 /* swi */
9ee6e8bb 8994 gen_set_condexec(s);
422ebf69 8995 gen_set_pc_im(s->pc);
9ee6e8bb 8996 s->is_jmp = DISAS_SWI;
99c475ab
FB
8997 break;
8998 }
8999 /* generate a conditional jump to next instruction */
e50e6a20 9000 s->condlabel = gen_new_label();
d9ba4830 9001 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 9002 s->condjmp = 1;
99c475ab
FB
9003
9004 /* jump to the offset */
5899f386 9005 val = (uint32_t)s->pc + 2;
99c475ab 9006 offset = ((int32_t)insn << 24) >> 24;
5899f386 9007 val += offset << 1;
8aaca4c0 9008 gen_jmp(s, val);
99c475ab
FB
9009 break;
9010
9011 case 14:
358bf29e 9012 if (insn & (1 << 11)) {
9ee6e8bb
PB
9013 if (disas_thumb2_insn(env, s, insn))
9014 goto undef32;
358bf29e
PB
9015 break;
9016 }
9ee6e8bb 9017 /* unconditional branch */
99c475ab
FB
9018 val = (uint32_t)s->pc;
9019 offset = ((int32_t)insn << 21) >> 21;
9020 val += (offset << 1) + 2;
8aaca4c0 9021 gen_jmp(s, val);
99c475ab
FB
9022 break;
9023
9024 case 15:
9ee6e8bb 9025 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 9026 goto undef32;
9ee6e8bb 9027 break;
99c475ab
FB
9028 }
9029 return;
9ee6e8bb
PB
9030undef32:
9031 gen_set_condexec(s);
5e3f878a 9032 gen_set_pc_im(s->pc - 4);
d9ba4830 9033 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
9034 s->is_jmp = DISAS_JUMP;
9035 return;
9036illegal_op:
99c475ab 9037undef:
9ee6e8bb 9038 gen_set_condexec(s);
5e3f878a 9039 gen_set_pc_im(s->pc - 2);
d9ba4830 9040 gen_exception(EXCP_UDEF);
99c475ab
FB
9041 s->is_jmp = DISAS_JUMP;
9042}
9043
2c0262af
FB
9044/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9045 basic block 'tb'. If search_pc is TRUE, also generate PC
9046 information for each intermediate instruction. */
2cfc5f17
TS
9047static inline void gen_intermediate_code_internal(CPUState *env,
9048 TranslationBlock *tb,
9049 int search_pc)
2c0262af
FB
9050{
9051 DisasContext dc1, *dc = &dc1;
a1d1bb31 9052 CPUBreakpoint *bp;
2c0262af
FB
9053 uint16_t *gen_opc_end;
9054 int j, lj;
0fa85d43 9055 target_ulong pc_start;
b5ff1b31 9056 uint32_t next_page_start;
2e70f6ef
PB
9057 int num_insns;
9058 int max_insns;
3b46e624 9059
2c0262af 9060 /* generate intermediate code */
b26eefb6 9061 num_temps = 0;
b26eefb6 9062
0fa85d43 9063 pc_start = tb->pc;
3b46e624 9064
2c0262af
FB
9065 dc->tb = tb;
9066
2c0262af 9067 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
9068
9069 dc->is_jmp = DISAS_NEXT;
9070 dc->pc = pc_start;
8aaca4c0 9071 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 9072 dc->condjmp = 0;
5899f386 9073 dc->thumb = env->thumb;
9ee6e8bb
PB
9074 dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
9075 dc->condexec_cond = env->condexec_bits >> 4;
b5ff1b31 9076#if !defined(CONFIG_USER_ONLY)
9ee6e8bb
PB
9077 if (IS_M(env)) {
9078 dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
9079 } else {
9080 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
9081 }
b5ff1b31 9082#endif
a7812ae4
PB
9083 cpu_F0s = tcg_temp_new_i32();
9084 cpu_F1s = tcg_temp_new_i32();
9085 cpu_F0d = tcg_temp_new_i64();
9086 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
9087 cpu_V0 = cpu_F0d;
9088 cpu_V1 = cpu_F1d;
e677137d 9089 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 9090 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 9091 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 9092 lj = -1;
2e70f6ef
PB
9093 num_insns = 0;
9094 max_insns = tb->cflags & CF_COUNT_MASK;
9095 if (max_insns == 0)
9096 max_insns = CF_COUNT_MASK;
9097
9098 gen_icount_start();
9ee6e8bb
PB
9099 /* Reset the conditional execution bits immediately. This avoids
9100 complications trying to do it at the end of the block. */
9101 if (env->condexec_bits)
8f01245e
PB
9102 {
9103 TCGv tmp = new_tmp();
9104 tcg_gen_movi_i32(tmp, 0);
d9ba4830 9105 store_cpu_field(tmp, condexec_bits);
8f01245e 9106 }
2c0262af 9107 do {
fbb4a2e3
PB
9108#ifdef CONFIG_USER_ONLY
9109 /* Intercept jump to the magic kernel page. */
9110 if (dc->pc >= 0xffff0000) {
9111 /* We always get here via a jump, so know we are not in a
9112 conditional execution block. */
9113 gen_exception(EXCP_KERNEL_TRAP);
9114 dc->is_jmp = DISAS_UPDATE;
9115 break;
9116 }
9117#else
9ee6e8bb
PB
9118 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9119 /* We always get here via a jump, so know we are not in a
9120 conditional execution block. */
d9ba4830 9121 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
9122 dc->is_jmp = DISAS_UPDATE;
9123 break;
9ee6e8bb
PB
9124 }
9125#endif
9126
72cf2d4f
BS
9127 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9128 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 9129 if (bp->pc == dc->pc) {
9ee6e8bb 9130 gen_set_condexec(dc);
5e3f878a 9131 gen_set_pc_im(dc->pc);
d9ba4830 9132 gen_exception(EXCP_DEBUG);
1fddef4b 9133 dc->is_jmp = DISAS_JUMP;
9ee6e8bb
PB
9134 /* Advance PC so that clearing the breakpoint will
9135 invalidate this TB. */
9136 dc->pc += 2;
9137 goto done_generating;
1fddef4b
FB
9138 break;
9139 }
9140 }
9141 }
2c0262af
FB
9142 if (search_pc) {
9143 j = gen_opc_ptr - gen_opc_buf;
9144 if (lj < j) {
9145 lj++;
9146 while (lj < j)
9147 gen_opc_instr_start[lj++] = 0;
9148 }
0fa85d43 9149 gen_opc_pc[lj] = dc->pc;
2c0262af 9150 gen_opc_instr_start[lj] = 1;
2e70f6ef 9151 gen_opc_icount[lj] = num_insns;
2c0262af 9152 }
e50e6a20 9153
2e70f6ef
PB
9154 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9155 gen_io_start();
9156
9ee6e8bb
PB
9157 if (env->thumb) {
9158 disas_thumb_insn(env, dc);
9159 if (dc->condexec_mask) {
9160 dc->condexec_cond = (dc->condexec_cond & 0xe)
9161 | ((dc->condexec_mask >> 4) & 1);
9162 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9163 if (dc->condexec_mask == 0) {
9164 dc->condexec_cond = 0;
9165 }
9166 }
9167 } else {
9168 disas_arm_insn(env, dc);
9169 }
b26eefb6
PB
9170 if (num_temps) {
9171 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
9172 num_temps = 0;
9173 }
e50e6a20
FB
9174
9175 if (dc->condjmp && !dc->is_jmp) {
9176 gen_set_label(dc->condlabel);
9177 dc->condjmp = 0;
9178 }
aaf2d97d 9179 /* Translation stops when a conditional branch is encountered.
e50e6a20 9180 * Otherwise the subsequent code could get translated several times.
b5ff1b31 9181 * Also stop translation when a page boundary is reached. This
bf20dc07 9182 * ensures prefetch aborts occur at the right place. */
2e70f6ef 9183 num_insns ++;
1fddef4b
FB
9184 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9185 !env->singlestep_enabled &&
1b530a6d 9186 !singlestep &&
2e70f6ef
PB
9187 dc->pc < next_page_start &&
9188 num_insns < max_insns);
9189
9190 if (tb->cflags & CF_LAST_IO) {
9191 if (dc->condjmp) {
9192 /* FIXME: This can theoretically happen with self-modifying
9193 code. */
9194 cpu_abort(env, "IO on conditional branch instruction");
9195 }
9196 gen_io_end();
9197 }
9ee6e8bb 9198
b5ff1b31 9199 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
9200 instruction was a conditional branch or trap, and the PC has
9201 already been written. */
551bd27f 9202 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 9203 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 9204 if (dc->condjmp) {
9ee6e8bb
PB
9205 gen_set_condexec(dc);
9206 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 9207 gen_exception(EXCP_SWI);
9ee6e8bb 9208 } else {
d9ba4830 9209 gen_exception(EXCP_DEBUG);
9ee6e8bb 9210 }
e50e6a20
FB
9211 gen_set_label(dc->condlabel);
9212 }
9213 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 9214 gen_set_pc_im(dc->pc);
e50e6a20 9215 dc->condjmp = 0;
8aaca4c0 9216 }
9ee6e8bb
PB
9217 gen_set_condexec(dc);
9218 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 9219 gen_exception(EXCP_SWI);
9ee6e8bb
PB
9220 } else {
9221 /* FIXME: Single stepping a WFI insn will not halt
9222 the CPU. */
d9ba4830 9223 gen_exception(EXCP_DEBUG);
9ee6e8bb 9224 }
8aaca4c0 9225 } else {
9ee6e8bb
PB
9226 /* While branches must always occur at the end of an IT block,
9227 there are a few other things that can cause us to terminate
9228 the TB in the middel of an IT block:
9229 - Exception generating instructions (bkpt, swi, undefined).
9230 - Page boundaries.
9231 - Hardware watchpoints.
9232 Hardware breakpoints have already been handled and skip this code.
9233 */
9234 gen_set_condexec(dc);
8aaca4c0 9235 switch(dc->is_jmp) {
8aaca4c0 9236 case DISAS_NEXT:
6e256c93 9237 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
9238 break;
9239 default:
9240 case DISAS_JUMP:
9241 case DISAS_UPDATE:
9242 /* indicate that the hash table must be used to find the next TB */
57fec1fe 9243 tcg_gen_exit_tb(0);
8aaca4c0
FB
9244 break;
9245 case DISAS_TB_JUMP:
9246 /* nothing more to generate */
9247 break;
9ee6e8bb 9248 case DISAS_WFI:
d9ba4830 9249 gen_helper_wfi();
9ee6e8bb
PB
9250 break;
9251 case DISAS_SWI:
d9ba4830 9252 gen_exception(EXCP_SWI);
9ee6e8bb 9253 break;
8aaca4c0 9254 }
e50e6a20
FB
9255 if (dc->condjmp) {
9256 gen_set_label(dc->condlabel);
9ee6e8bb 9257 gen_set_condexec(dc);
6e256c93 9258 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
9259 dc->condjmp = 0;
9260 }
2c0262af 9261 }
2e70f6ef 9262
9ee6e8bb 9263done_generating:
2e70f6ef 9264 gen_icount_end(tb, num_insns);
2c0262af
FB
9265 *gen_opc_ptr = INDEX_op_end;
9266
9267#ifdef DEBUG_DISAS
8fec2b8c 9268 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
9269 qemu_log("----------------\n");
9270 qemu_log("IN: %s\n", lookup_symbol(pc_start));
9271 log_target_disas(pc_start, dc->pc - pc_start, env->thumb);
9272 qemu_log("\n");
2c0262af
FB
9273 }
9274#endif
b5ff1b31
FB
9275 if (search_pc) {
9276 j = gen_opc_ptr - gen_opc_buf;
9277 lj++;
9278 while (lj <= j)
9279 gen_opc_instr_start[lj++] = 0;
b5ff1b31 9280 } else {
2c0262af 9281 tb->size = dc->pc - pc_start;
2e70f6ef 9282 tb->icount = num_insns;
b5ff1b31 9283 }
2c0262af
FB
9284}
9285
2cfc5f17 9286void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2c0262af 9287{
2cfc5f17 9288 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
9289}
9290
2cfc5f17 9291void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2c0262af 9292{
2cfc5f17 9293 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
9294}
9295
b5ff1b31
FB
9296static const char *cpu_mode_names[16] = {
9297 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9298 "???", "???", "???", "und", "???", "???", "???", "sys"
9299};
9ee6e8bb 9300
9a78eead 9301void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
7fe48483 9302 int flags)
2c0262af
FB
9303{
9304 int i;
06e80fc9 9305#if 0
bc380d17 9306 union {
b7bcbe95
FB
9307 uint32_t i;
9308 float s;
9309 } s0, s1;
9310 CPU_DoubleU d;
a94a6abf
PB
9311 /* ??? This assumes float64 and double have the same layout.
9312 Oh well, it's only debug dumps. */
9313 union {
9314 float64 f64;
9315 double d;
9316 } d0;
06e80fc9 9317#endif
b5ff1b31 9318 uint32_t psr;
2c0262af
FB
9319
9320 for(i=0;i<16;i++) {
7fe48483 9321 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 9322 if ((i % 4) == 3)
7fe48483 9323 cpu_fprintf(f, "\n");
2c0262af 9324 else
7fe48483 9325 cpu_fprintf(f, " ");
2c0262af 9326 }
b5ff1b31 9327 psr = cpsr_read(env);
687fa640
TS
9328 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9329 psr,
b5ff1b31
FB
9330 psr & (1 << 31) ? 'N' : '-',
9331 psr & (1 << 30) ? 'Z' : '-',
9332 psr & (1 << 29) ? 'C' : '-',
9333 psr & (1 << 28) ? 'V' : '-',
5fafdf24 9334 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 9335 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 9336
5e3f878a 9337#if 0
b7bcbe95 9338 for (i = 0; i < 16; i++) {
8e96005d
FB
9339 d.d = env->vfp.regs[i];
9340 s0.i = d.l.lower;
9341 s1.i = d.l.upper;
a94a6abf
PB
9342 d0.f64 = d.d;
9343 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 9344 i * 2, (int)s0.i, s0.s,
a94a6abf 9345 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 9346 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 9347 d0.d);
b7bcbe95 9348 }
40f137e1 9349 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
5e3f878a 9350#endif
2c0262af 9351}
a6b025d3 9352
d2856f1a
AJ
9353void gen_pc_load(CPUState *env, TranslationBlock *tb,
9354 unsigned long searched_pc, int pc_pos, void *puc)
9355{
9356 env->regs[15] = gen_opc_pc[pc_pos];
9357}