]> git.proxmox.com Git - qemu.git/blame - target-arm/translate.c
linux-user: fix build with gcc-4.1
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
28#include "exec-all.h"
29#include "disas.h"
57fec1fe 30#include "tcg-op.h"
79383c9c 31#include "qemu-log.h"
1497c961 32
a7812ae4 33#include "helpers.h"
1497c961 34#define GEN_HELPER 1
b26eefb6 35#include "helpers.h"
2c0262af 36
9ee6e8bb
PB
37#define ENABLE_ARCH_5J 0
38#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 42
86753403 43#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 44
2c0262af
FB
45/* internal defines */
46typedef struct DisasContext {
0fa85d43 47 target_ulong pc;
2c0262af 48 int is_jmp;
e50e6a20
FB
49 /* Nonzero if this instruction has been conditionally skipped. */
50 int condjmp;
51 /* The label that will be jumped to when the instruction is skipped. */
52 int condlabel;
9ee6e8bb
PB
53 /* Thumb-2 condtional execution bits. */
54 int condexec_mask;
55 int condexec_cond;
2c0262af 56 struct TranslationBlock *tb;
8aaca4c0 57 int singlestep_enabled;
5899f386 58 int thumb;
b5ff1b31
FB
59#if !defined(CONFIG_USER_ONLY)
60 int user;
61#endif
2c0262af
FB
62} DisasContext;
63
b5ff1b31
FB
64#if defined(CONFIG_USER_ONLY)
65#define IS_USER(s) 1
66#else
67#define IS_USER(s) (s->user)
68#endif
69
9ee6e8bb
PB
70/* These instructions trap after executing, so defer them until after the
71 conditional executions state has been updated. */
72#define DISAS_WFI 4
73#define DISAS_SWI 5
2c0262af 74
a7812ae4 75static TCGv_ptr cpu_env;
ad69471c 76/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 77static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 78static TCGv_i32 cpu_R[16];
426f5abc
PB
79static TCGv_i32 cpu_exclusive_addr;
80static TCGv_i32 cpu_exclusive_val;
81static TCGv_i32 cpu_exclusive_high;
82#ifdef CONFIG_USER_ONLY
83static TCGv_i32 cpu_exclusive_test;
84static TCGv_i32 cpu_exclusive_info;
85#endif
ad69471c 86
b26eefb6 87/* FIXME: These should be removed. */
a7812ae4
PB
88static TCGv cpu_F0s, cpu_F1s;
89static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 90
2e70f6ef
PB
91#include "gen-icount.h"
92
155c3eac
FN
93static const char *regnames[] =
94 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
95 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
96
b26eefb6
PB
97/* initialize TCG globals. */
98void arm_translate_init(void)
99{
155c3eac
FN
100 int i;
101
a7812ae4
PB
102 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
103
155c3eac
FN
104 for (i = 0; i < 16; i++) {
105 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
106 offsetof(CPUState, regs[i]),
107 regnames[i]);
108 }
426f5abc
PB
109 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
110 offsetof(CPUState, exclusive_addr), "exclusive_addr");
111 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
112 offsetof(CPUState, exclusive_val), "exclusive_val");
113 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
114 offsetof(CPUState, exclusive_high), "exclusive_high");
115#ifdef CONFIG_USER_ONLY
116 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
117 offsetof(CPUState, exclusive_test), "exclusive_test");
118 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUState, exclusive_info), "exclusive_info");
120#endif
155c3eac 121
a7812ae4
PB
122#define GEN_HELPER 2
123#include "helpers.h"
b26eefb6
PB
124}
125
b26eefb6 126static int num_temps;
b26eefb6
PB
127
128/* Allocate a temporary variable. */
a7812ae4 129static TCGv_i32 new_tmp(void)
b26eefb6 130{
12edd4f2
FN
131 num_temps++;
132 return tcg_temp_new_i32();
b26eefb6
PB
133}
134
135/* Release a temporary variable. */
136static void dead_tmp(TCGv tmp)
137{
12edd4f2 138 tcg_temp_free(tmp);
b26eefb6 139 num_temps--;
b26eefb6
PB
140}
141
d9ba4830
PB
142static inline TCGv load_cpu_offset(int offset)
143{
144 TCGv tmp = new_tmp();
145 tcg_gen_ld_i32(tmp, cpu_env, offset);
146 return tmp;
147}
148
149#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
150
151static inline void store_cpu_offset(TCGv var, int offset)
152{
153 tcg_gen_st_i32(var, cpu_env, offset);
154 dead_tmp(var);
155}
156
157#define store_cpu_field(var, name) \
158 store_cpu_offset(var, offsetof(CPUState, name))
159
b26eefb6
PB
160/* Set a variable to the value of a CPU register. */
161static void load_reg_var(DisasContext *s, TCGv var, int reg)
162{
163 if (reg == 15) {
164 uint32_t addr;
165 /* normaly, since we updated PC, we need only to add one insn */
166 if (s->thumb)
167 addr = (long)s->pc + 2;
168 else
169 addr = (long)s->pc + 4;
170 tcg_gen_movi_i32(var, addr);
171 } else {
155c3eac 172 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
173 }
174}
175
176/* Create a new temporary and set it to the value of a CPU register. */
177static inline TCGv load_reg(DisasContext *s, int reg)
178{
179 TCGv tmp = new_tmp();
180 load_reg_var(s, tmp, reg);
181 return tmp;
182}
183
184/* Set a CPU register. The source must be a temporary and will be
185 marked as dead. */
186static void store_reg(DisasContext *s, int reg, TCGv var)
187{
188 if (reg == 15) {
189 tcg_gen_andi_i32(var, var, ~1);
190 s->is_jmp = DISAS_JUMP;
191 }
155c3eac 192 tcg_gen_mov_i32(cpu_R[reg], var);
b26eefb6
PB
193 dead_tmp(var);
194}
195
b26eefb6 196/* Value extensions. */
86831435
PB
197#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
198#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
199#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
200#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
201
1497c961
PB
202#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
203#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 204
b26eefb6 205
b75263d6
JR
206static inline void gen_set_cpsr(TCGv var, uint32_t mask)
207{
208 TCGv tmp_mask = tcg_const_i32(mask);
209 gen_helper_cpsr_write(var, tmp_mask);
210 tcg_temp_free_i32(tmp_mask);
211}
d9ba4830
PB
212/* Set NZCV flags from the high 4 bits of var. */
213#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
214
215static void gen_exception(int excp)
216{
217 TCGv tmp = new_tmp();
218 tcg_gen_movi_i32(tmp, excp);
219 gen_helper_exception(tmp);
220 dead_tmp(tmp);
221}
222
3670669c
PB
223static void gen_smul_dual(TCGv a, TCGv b)
224{
225 TCGv tmp1 = new_tmp();
226 TCGv tmp2 = new_tmp();
22478e79
AZ
227 tcg_gen_ext16s_i32(tmp1, a);
228 tcg_gen_ext16s_i32(tmp2, b);
3670669c
PB
229 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
230 dead_tmp(tmp2);
231 tcg_gen_sari_i32(a, a, 16);
232 tcg_gen_sari_i32(b, b, 16);
233 tcg_gen_mul_i32(b, b, a);
234 tcg_gen_mov_i32(a, tmp1);
235 dead_tmp(tmp1);
236}
237
238/* Byteswap each halfword. */
239static void gen_rev16(TCGv var)
240{
241 TCGv tmp = new_tmp();
242 tcg_gen_shri_i32(tmp, var, 8);
243 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
244 tcg_gen_shli_i32(var, var, 8);
245 tcg_gen_andi_i32(var, var, 0xff00ff00);
246 tcg_gen_or_i32(var, var, tmp);
247 dead_tmp(tmp);
248}
249
250/* Byteswap low halfword and sign extend. */
251static void gen_revsh(TCGv var)
252{
253 TCGv tmp = new_tmp();
254 tcg_gen_shri_i32(tmp, var, 8);
255 tcg_gen_andi_i32(tmp, tmp, 0x00ff);
256 tcg_gen_shli_i32(var, var, 8);
257 tcg_gen_ext8s_i32(var, var);
258 tcg_gen_or_i32(var, var, tmp);
259 dead_tmp(tmp);
260}
261
262/* Unsigned bitfield extract. */
263static void gen_ubfx(TCGv var, int shift, uint32_t mask)
264{
265 if (shift)
266 tcg_gen_shri_i32(var, var, shift);
267 tcg_gen_andi_i32(var, var, mask);
268}
269
270/* Signed bitfield extract. */
271static void gen_sbfx(TCGv var, int shift, int width)
272{
273 uint32_t signbit;
274
275 if (shift)
276 tcg_gen_sari_i32(var, var, shift);
277 if (shift + width < 32) {
278 signbit = 1u << (width - 1);
279 tcg_gen_andi_i32(var, var, (1u << width) - 1);
280 tcg_gen_xori_i32(var, var, signbit);
281 tcg_gen_subi_i32(var, var, signbit);
282 }
283}
284
285/* Bitfield insertion. Insert val into base. Clobbers base and val. */
286static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
287{
3670669c 288 tcg_gen_andi_i32(val, val, mask);
8f8e3aa4
PB
289 tcg_gen_shli_i32(val, val, shift);
290 tcg_gen_andi_i32(base, base, ~(mask << shift));
3670669c
PB
291 tcg_gen_or_i32(dest, base, val);
292}
293
d9ba4830
PB
294/* Round the top 32 bits of a 64-bit value. */
295static void gen_roundqd(TCGv a, TCGv b)
3670669c 296{
d9ba4830
PB
297 tcg_gen_shri_i32(a, a, 31);
298 tcg_gen_add_i32(a, a, b);
3670669c
PB
299}
300
8f01245e
PB
301/* FIXME: Most targets have native widening multiplication.
302 It would be good to use that instead of a full wide multiply. */
5e3f878a 303/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 304static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 305{
a7812ae4
PB
306 TCGv_i64 tmp1 = tcg_temp_new_i64();
307 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
308
309 tcg_gen_extu_i32_i64(tmp1, a);
310 dead_tmp(a);
311 tcg_gen_extu_i32_i64(tmp2, b);
312 dead_tmp(b);
313 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 314 tcg_temp_free_i64(tmp2);
5e3f878a
PB
315 return tmp1;
316}
317
a7812ae4 318static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 319{
a7812ae4
PB
320 TCGv_i64 tmp1 = tcg_temp_new_i64();
321 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
322
323 tcg_gen_ext_i32_i64(tmp1, a);
324 dead_tmp(a);
325 tcg_gen_ext_i32_i64(tmp2, b);
326 dead_tmp(b);
327 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 328 tcg_temp_free_i64(tmp2);
5e3f878a
PB
329 return tmp1;
330}
331
8f01245e 332/* Signed 32x32->64 multiply. */
d9ba4830 333static void gen_imull(TCGv a, TCGv b)
8f01245e 334{
a7812ae4
PB
335 TCGv_i64 tmp1 = tcg_temp_new_i64();
336 TCGv_i64 tmp2 = tcg_temp_new_i64();
8f01245e 337
d9ba4830
PB
338 tcg_gen_ext_i32_i64(tmp1, a);
339 tcg_gen_ext_i32_i64(tmp2, b);
8f01245e 340 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 341 tcg_temp_free_i64(tmp2);
d9ba4830 342 tcg_gen_trunc_i64_i32(a, tmp1);
8f01245e 343 tcg_gen_shri_i64(tmp1, tmp1, 32);
d9ba4830 344 tcg_gen_trunc_i64_i32(b, tmp1);
b75263d6 345 tcg_temp_free_i64(tmp1);
d9ba4830 346}
d9ba4830 347
8f01245e
PB
348/* Swap low and high halfwords. */
349static void gen_swap_half(TCGv var)
350{
351 TCGv tmp = new_tmp();
352 tcg_gen_shri_i32(tmp, var, 16);
353 tcg_gen_shli_i32(var, var, 16);
354 tcg_gen_or_i32(var, var, tmp);
3670669c 355 dead_tmp(tmp);
8f01245e
PB
356}
357
b26eefb6
PB
358/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
359 tmp = (t0 ^ t1) & 0x8000;
360 t0 &= ~0x8000;
361 t1 &= ~0x8000;
362 t0 = (t0 + t1) ^ tmp;
363 */
364
365static void gen_add16(TCGv t0, TCGv t1)
366{
367 TCGv tmp = new_tmp();
368 tcg_gen_xor_i32(tmp, t0, t1);
369 tcg_gen_andi_i32(tmp, tmp, 0x8000);
370 tcg_gen_andi_i32(t0, t0, ~0x8000);
371 tcg_gen_andi_i32(t1, t1, ~0x8000);
372 tcg_gen_add_i32(t0, t0, t1);
373 tcg_gen_xor_i32(t0, t0, tmp);
374 dead_tmp(tmp);
375 dead_tmp(t1);
376}
377
9a119ff6
PB
378#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
379
b26eefb6
PB
380/* Set CF to the top bit of var. */
381static void gen_set_CF_bit31(TCGv var)
382{
383 TCGv tmp = new_tmp();
384 tcg_gen_shri_i32(tmp, var, 31);
4cc633c3 385 gen_set_CF(tmp);
b26eefb6
PB
386 dead_tmp(tmp);
387}
388
389/* Set N and Z flags from var. */
390static inline void gen_logic_CC(TCGv var)
391{
6fbe23d5
PB
392 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
393 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
b26eefb6
PB
394}
395
396/* T0 += T1 + CF. */
396e467c 397static void gen_adc(TCGv t0, TCGv t1)
b26eefb6 398{
d9ba4830 399 TCGv tmp;
396e467c 400 tcg_gen_add_i32(t0, t0, t1);
d9ba4830 401 tmp = load_cpu_field(CF);
396e467c 402 tcg_gen_add_i32(t0, t0, tmp);
b26eefb6
PB
403 dead_tmp(tmp);
404}
405
e9bb4aa9
JR
406/* dest = T0 + T1 + CF. */
407static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
408{
409 TCGv tmp;
410 tcg_gen_add_i32(dest, t0, t1);
411 tmp = load_cpu_field(CF);
412 tcg_gen_add_i32(dest, dest, tmp);
413 dead_tmp(tmp);
414}
415
3670669c
PB
416/* dest = T0 - T1 + CF - 1. */
417static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
418{
d9ba4830 419 TCGv tmp;
3670669c 420 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 421 tmp = load_cpu_field(CF);
3670669c
PB
422 tcg_gen_add_i32(dest, dest, tmp);
423 tcg_gen_subi_i32(dest, dest, 1);
424 dead_tmp(tmp);
425}
426
ad69471c
PB
427/* FIXME: Implement this natively. */
428#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
429
9a119ff6 430static void shifter_out_im(TCGv var, int shift)
b26eefb6 431{
9a119ff6
PB
432 TCGv tmp = new_tmp();
433 if (shift == 0) {
434 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 435 } else {
9a119ff6 436 tcg_gen_shri_i32(tmp, var, shift);
4cc633c3 437 if (shift != 31)
9a119ff6
PB
438 tcg_gen_andi_i32(tmp, tmp, 1);
439 }
440 gen_set_CF(tmp);
441 dead_tmp(tmp);
442}
b26eefb6 443
9a119ff6
PB
444/* Shift by immediate. Includes special handling for shift == 0. */
445static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
446{
447 switch (shiftop) {
448 case 0: /* LSL */
449 if (shift != 0) {
450 if (flags)
451 shifter_out_im(var, 32 - shift);
452 tcg_gen_shli_i32(var, var, shift);
453 }
454 break;
455 case 1: /* LSR */
456 if (shift == 0) {
457 if (flags) {
458 tcg_gen_shri_i32(var, var, 31);
459 gen_set_CF(var);
460 }
461 tcg_gen_movi_i32(var, 0);
462 } else {
463 if (flags)
464 shifter_out_im(var, shift - 1);
465 tcg_gen_shri_i32(var, var, shift);
466 }
467 break;
468 case 2: /* ASR */
469 if (shift == 0)
470 shift = 32;
471 if (flags)
472 shifter_out_im(var, shift - 1);
473 if (shift == 32)
474 shift = 31;
475 tcg_gen_sari_i32(var, var, shift);
476 break;
477 case 3: /* ROR/RRX */
478 if (shift != 0) {
479 if (flags)
480 shifter_out_im(var, shift - 1);
f669df27 481 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 482 } else {
d9ba4830 483 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
484 if (flags)
485 shifter_out_im(var, 0);
486 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
487 tcg_gen_shli_i32(tmp, tmp, 31);
488 tcg_gen_or_i32(var, var, tmp);
489 dead_tmp(tmp);
b26eefb6
PB
490 }
491 }
492};
493
8984bd2e
PB
494static inline void gen_arm_shift_reg(TCGv var, int shiftop,
495 TCGv shift, int flags)
496{
497 if (flags) {
498 switch (shiftop) {
499 case 0: gen_helper_shl_cc(var, var, shift); break;
500 case 1: gen_helper_shr_cc(var, var, shift); break;
501 case 2: gen_helper_sar_cc(var, var, shift); break;
502 case 3: gen_helper_ror_cc(var, var, shift); break;
503 }
504 } else {
505 switch (shiftop) {
506 case 0: gen_helper_shl(var, var, shift); break;
507 case 1: gen_helper_shr(var, var, shift); break;
508 case 2: gen_helper_sar(var, var, shift); break;
f669df27
AJ
509 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
510 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
511 }
512 }
513 dead_tmp(shift);
514}
515
6ddbc6e4
PB
516#define PAS_OP(pfx) \
517 switch (op2) { \
518 case 0: gen_pas_helper(glue(pfx,add16)); break; \
519 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
520 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
521 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
522 case 4: gen_pas_helper(glue(pfx,add8)); break; \
523 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
524 }
d9ba4830 525static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 526{
a7812ae4 527 TCGv_ptr tmp;
6ddbc6e4
PB
528
529 switch (op1) {
530#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
531 case 1:
a7812ae4 532 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
533 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
534 PAS_OP(s)
b75263d6 535 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
536 break;
537 case 5:
a7812ae4 538 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
539 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
540 PAS_OP(u)
b75263d6 541 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
542 break;
543#undef gen_pas_helper
544#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
545 case 2:
546 PAS_OP(q);
547 break;
548 case 3:
549 PAS_OP(sh);
550 break;
551 case 6:
552 PAS_OP(uq);
553 break;
554 case 7:
555 PAS_OP(uh);
556 break;
557#undef gen_pas_helper
558 }
559}
9ee6e8bb
PB
560#undef PAS_OP
561
6ddbc6e4
PB
562/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
563#define PAS_OP(pfx) \
564 switch (op2) { \
565 case 0: gen_pas_helper(glue(pfx,add8)); break; \
566 case 1: gen_pas_helper(glue(pfx,add16)); break; \
567 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
568 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
569 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
570 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
571 }
d9ba4830 572static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 573{
a7812ae4 574 TCGv_ptr tmp;
6ddbc6e4
PB
575
576 switch (op1) {
577#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
578 case 0:
a7812ae4 579 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
580 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
581 PAS_OP(s)
b75263d6 582 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
583 break;
584 case 4:
a7812ae4 585 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
586 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
587 PAS_OP(u)
b75263d6 588 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
589 break;
590#undef gen_pas_helper
591#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
592 case 1:
593 PAS_OP(q);
594 break;
595 case 2:
596 PAS_OP(sh);
597 break;
598 case 5:
599 PAS_OP(uq);
600 break;
601 case 6:
602 PAS_OP(uh);
603 break;
604#undef gen_pas_helper
605 }
606}
9ee6e8bb
PB
607#undef PAS_OP
608
d9ba4830
PB
609static void gen_test_cc(int cc, int label)
610{
611 TCGv tmp;
612 TCGv tmp2;
d9ba4830
PB
613 int inv;
614
d9ba4830
PB
615 switch (cc) {
616 case 0: /* eq: Z */
6fbe23d5 617 tmp = load_cpu_field(ZF);
cb63669a 618 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
619 break;
620 case 1: /* ne: !Z */
6fbe23d5 621 tmp = load_cpu_field(ZF);
cb63669a 622 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
623 break;
624 case 2: /* cs: C */
625 tmp = load_cpu_field(CF);
cb63669a 626 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
627 break;
628 case 3: /* cc: !C */
629 tmp = load_cpu_field(CF);
cb63669a 630 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
631 break;
632 case 4: /* mi: N */
6fbe23d5 633 tmp = load_cpu_field(NF);
cb63669a 634 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
635 break;
636 case 5: /* pl: !N */
6fbe23d5 637 tmp = load_cpu_field(NF);
cb63669a 638 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
639 break;
640 case 6: /* vs: V */
641 tmp = load_cpu_field(VF);
cb63669a 642 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
643 break;
644 case 7: /* vc: !V */
645 tmp = load_cpu_field(VF);
cb63669a 646 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
647 break;
648 case 8: /* hi: C && !Z */
649 inv = gen_new_label();
650 tmp = load_cpu_field(CF);
cb63669a 651 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830 652 dead_tmp(tmp);
6fbe23d5 653 tmp = load_cpu_field(ZF);
cb63669a 654 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
655 gen_set_label(inv);
656 break;
657 case 9: /* ls: !C || Z */
658 tmp = load_cpu_field(CF);
cb63669a 659 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830 660 dead_tmp(tmp);
6fbe23d5 661 tmp = load_cpu_field(ZF);
cb63669a 662 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
663 break;
664 case 10: /* ge: N == V -> N ^ V == 0 */
665 tmp = load_cpu_field(VF);
6fbe23d5 666 tmp2 = load_cpu_field(NF);
d9ba4830
PB
667 tcg_gen_xor_i32(tmp, tmp, tmp2);
668 dead_tmp(tmp2);
cb63669a 669 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
670 break;
671 case 11: /* lt: N != V -> N ^ V != 0 */
672 tmp = load_cpu_field(VF);
6fbe23d5 673 tmp2 = load_cpu_field(NF);
d9ba4830
PB
674 tcg_gen_xor_i32(tmp, tmp, tmp2);
675 dead_tmp(tmp2);
cb63669a 676 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
677 break;
678 case 12: /* gt: !Z && N == V */
679 inv = gen_new_label();
6fbe23d5 680 tmp = load_cpu_field(ZF);
cb63669a 681 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830
PB
682 dead_tmp(tmp);
683 tmp = load_cpu_field(VF);
6fbe23d5 684 tmp2 = load_cpu_field(NF);
d9ba4830
PB
685 tcg_gen_xor_i32(tmp, tmp, tmp2);
686 dead_tmp(tmp2);
cb63669a 687 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
688 gen_set_label(inv);
689 break;
690 case 13: /* le: Z || N != V */
6fbe23d5 691 tmp = load_cpu_field(ZF);
cb63669a 692 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
693 dead_tmp(tmp);
694 tmp = load_cpu_field(VF);
6fbe23d5 695 tmp2 = load_cpu_field(NF);
d9ba4830
PB
696 tcg_gen_xor_i32(tmp, tmp, tmp2);
697 dead_tmp(tmp2);
cb63669a 698 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
699 break;
700 default:
701 fprintf(stderr, "Bad condition code 0x%x\n", cc);
702 abort();
703 }
704 dead_tmp(tmp);
705}
2c0262af 706
b1d8e52e 707static const uint8_t table_logic_cc[16] = {
2c0262af
FB
708 1, /* and */
709 1, /* xor */
710 0, /* sub */
711 0, /* rsb */
712 0, /* add */
713 0, /* adc */
714 0, /* sbc */
715 0, /* rsc */
716 1, /* andl */
717 1, /* xorl */
718 0, /* cmp */
719 0, /* cmn */
720 1, /* orr */
721 1, /* mov */
722 1, /* bic */
723 1, /* mvn */
724};
3b46e624 725
d9ba4830
PB
726/* Set PC and Thumb state from an immediate address. */
727static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 728{
b26eefb6 729 TCGv tmp;
99c475ab 730
b26eefb6 731 s->is_jmp = DISAS_UPDATE;
d9ba4830 732 if (s->thumb != (addr & 1)) {
155c3eac 733 tmp = new_tmp();
d9ba4830
PB
734 tcg_gen_movi_i32(tmp, addr & 1);
735 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
155c3eac 736 dead_tmp(tmp);
d9ba4830 737 }
155c3eac 738 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
739}
740
741/* Set PC and Thumb state from var. var is marked as dead. */
742static inline void gen_bx(DisasContext *s, TCGv var)
743{
d9ba4830 744 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
745 tcg_gen_andi_i32(cpu_R[15], var, ~1);
746 tcg_gen_andi_i32(var, var, 1);
747 store_cpu_field(var, thumb);
d9ba4830
PB
748}
749
21aeb343
JR
750/* Variant of store_reg which uses branch&exchange logic when storing
751 to r15 in ARM architecture v7 and above. The source must be a temporary
752 and will be marked as dead. */
753static inline void store_reg_bx(CPUState *env, DisasContext *s,
754 int reg, TCGv var)
755{
756 if (reg == 15 && ENABLE_ARCH_7) {
757 gen_bx(s, var);
758 } else {
759 store_reg(s, reg, var);
760 }
761}
762
b0109805
PB
763static inline TCGv gen_ld8s(TCGv addr, int index)
764{
765 TCGv tmp = new_tmp();
766 tcg_gen_qemu_ld8s(tmp, addr, index);
767 return tmp;
768}
769static inline TCGv gen_ld8u(TCGv addr, int index)
770{
771 TCGv tmp = new_tmp();
772 tcg_gen_qemu_ld8u(tmp, addr, index);
773 return tmp;
774}
775static inline TCGv gen_ld16s(TCGv addr, int index)
776{
777 TCGv tmp = new_tmp();
778 tcg_gen_qemu_ld16s(tmp, addr, index);
779 return tmp;
780}
781static inline TCGv gen_ld16u(TCGv addr, int index)
782{
783 TCGv tmp = new_tmp();
784 tcg_gen_qemu_ld16u(tmp, addr, index);
785 return tmp;
786}
787static inline TCGv gen_ld32(TCGv addr, int index)
788{
789 TCGv tmp = new_tmp();
790 tcg_gen_qemu_ld32u(tmp, addr, index);
791 return tmp;
792}
84496233
JR
793static inline TCGv_i64 gen_ld64(TCGv addr, int index)
794{
795 TCGv_i64 tmp = tcg_temp_new_i64();
796 tcg_gen_qemu_ld64(tmp, addr, index);
797 return tmp;
798}
b0109805
PB
799static inline void gen_st8(TCGv val, TCGv addr, int index)
800{
801 tcg_gen_qemu_st8(val, addr, index);
802 dead_tmp(val);
803}
804static inline void gen_st16(TCGv val, TCGv addr, int index)
805{
806 tcg_gen_qemu_st16(val, addr, index);
807 dead_tmp(val);
808}
809static inline void gen_st32(TCGv val, TCGv addr, int index)
810{
811 tcg_gen_qemu_st32(val, addr, index);
812 dead_tmp(val);
813}
84496233
JR
814static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
815{
816 tcg_gen_qemu_st64(val, addr, index);
817 tcg_temp_free_i64(val);
818}
b5ff1b31 819
5e3f878a
PB
820static inline void gen_set_pc_im(uint32_t val)
821{
155c3eac 822 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
823}
824
b5ff1b31
FB
825/* Force a TB lookup after an instruction that changes the CPU state. */
826static inline void gen_lookup_tb(DisasContext *s)
827{
a6445c52 828 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
829 s->is_jmp = DISAS_UPDATE;
830}
831
b0109805
PB
832static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
833 TCGv var)
2c0262af 834{
1e8d4eec 835 int val, rm, shift, shiftop;
b26eefb6 836 TCGv offset;
2c0262af
FB
837
838 if (!(insn & (1 << 25))) {
839 /* immediate */
840 val = insn & 0xfff;
841 if (!(insn & (1 << 23)))
842 val = -val;
537730b9 843 if (val != 0)
b0109805 844 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
845 } else {
846 /* shift/register */
847 rm = (insn) & 0xf;
848 shift = (insn >> 7) & 0x1f;
1e8d4eec 849 shiftop = (insn >> 5) & 3;
b26eefb6 850 offset = load_reg(s, rm);
9a119ff6 851 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 852 if (!(insn & (1 << 23)))
b0109805 853 tcg_gen_sub_i32(var, var, offset);
2c0262af 854 else
b0109805 855 tcg_gen_add_i32(var, var, offset);
b26eefb6 856 dead_tmp(offset);
2c0262af
FB
857 }
858}
859
191f9a93 860static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 861 int extra, TCGv var)
2c0262af
FB
862{
863 int val, rm;
b26eefb6 864 TCGv offset;
3b46e624 865
2c0262af
FB
866 if (insn & (1 << 22)) {
867 /* immediate */
868 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
869 if (!(insn & (1 << 23)))
870 val = -val;
18acad92 871 val += extra;
537730b9 872 if (val != 0)
b0109805 873 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
874 } else {
875 /* register */
191f9a93 876 if (extra)
b0109805 877 tcg_gen_addi_i32(var, var, extra);
2c0262af 878 rm = (insn) & 0xf;
b26eefb6 879 offset = load_reg(s, rm);
2c0262af 880 if (!(insn & (1 << 23)))
b0109805 881 tcg_gen_sub_i32(var, var, offset);
2c0262af 882 else
b0109805 883 tcg_gen_add_i32(var, var, offset);
b26eefb6 884 dead_tmp(offset);
2c0262af
FB
885 }
886}
887
4373f3ce
PB
888#define VFP_OP2(name) \
889static inline void gen_vfp_##name(int dp) \
890{ \
891 if (dp) \
892 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
893 else \
894 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
b7bcbe95
FB
895}
896
4373f3ce
PB
897VFP_OP2(add)
898VFP_OP2(sub)
899VFP_OP2(mul)
900VFP_OP2(div)
901
902#undef VFP_OP2
903
904static inline void gen_vfp_abs(int dp)
905{
906 if (dp)
907 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
908 else
909 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
910}
911
912static inline void gen_vfp_neg(int dp)
913{
914 if (dp)
915 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
916 else
917 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
918}
919
920static inline void gen_vfp_sqrt(int dp)
921{
922 if (dp)
923 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
924 else
925 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
926}
927
928static inline void gen_vfp_cmp(int dp)
929{
930 if (dp)
931 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
932 else
933 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
934}
935
936static inline void gen_vfp_cmpe(int dp)
937{
938 if (dp)
939 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
940 else
941 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
942}
943
944static inline void gen_vfp_F1_ld0(int dp)
945{
946 if (dp)
5b340b51 947 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 948 else
5b340b51 949 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
950}
951
952static inline void gen_vfp_uito(int dp)
953{
954 if (dp)
955 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
956 else
957 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
958}
959
960static inline void gen_vfp_sito(int dp)
961{
962 if (dp)
66230e0d 963 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
4373f3ce 964 else
66230e0d 965 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
4373f3ce
PB
966}
967
968static inline void gen_vfp_toui(int dp)
969{
970 if (dp)
971 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
972 else
973 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
974}
975
976static inline void gen_vfp_touiz(int dp)
977{
978 if (dp)
979 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
980 else
981 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
982}
983
984static inline void gen_vfp_tosi(int dp)
985{
986 if (dp)
987 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
988 else
989 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
990}
991
992static inline void gen_vfp_tosiz(int dp)
9ee6e8bb
PB
993{
994 if (dp)
4373f3ce 995 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
9ee6e8bb 996 else
4373f3ce
PB
997 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
998}
999
1000#define VFP_GEN_FIX(name) \
1001static inline void gen_vfp_##name(int dp, int shift) \
1002{ \
b75263d6 1003 TCGv tmp_shift = tcg_const_i32(shift); \
4373f3ce 1004 if (dp) \
b75263d6 1005 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
4373f3ce 1006 else \
b75263d6
JR
1007 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
1008 tcg_temp_free_i32(tmp_shift); \
9ee6e8bb 1009}
4373f3ce
PB
1010VFP_GEN_FIX(tosh)
1011VFP_GEN_FIX(tosl)
1012VFP_GEN_FIX(touh)
1013VFP_GEN_FIX(toul)
1014VFP_GEN_FIX(shto)
1015VFP_GEN_FIX(slto)
1016VFP_GEN_FIX(uhto)
1017VFP_GEN_FIX(ulto)
1018#undef VFP_GEN_FIX
9ee6e8bb 1019
312eea9f 1020static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1021{
1022 if (dp)
312eea9f 1023 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1024 else
312eea9f 1025 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1026}
1027
312eea9f 1028static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1029{
1030 if (dp)
312eea9f 1031 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1032 else
312eea9f 1033 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1034}
1035
8e96005d
FB
1036static inline long
1037vfp_reg_offset (int dp, int reg)
1038{
1039 if (dp)
1040 return offsetof(CPUARMState, vfp.regs[reg]);
1041 else if (reg & 1) {
1042 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1043 + offsetof(CPU_DoubleU, l.upper);
1044 } else {
1045 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1046 + offsetof(CPU_DoubleU, l.lower);
1047 }
1048}
9ee6e8bb
PB
1049
1050/* Return the offset of a 32-bit piece of a NEON register.
1051 zero is the least significant end of the register. */
1052static inline long
1053neon_reg_offset (int reg, int n)
1054{
1055 int sreg;
1056 sreg = reg * 2 + n;
1057 return vfp_reg_offset(0, sreg);
1058}
1059
8f8e3aa4
PB
1060static TCGv neon_load_reg(int reg, int pass)
1061{
1062 TCGv tmp = new_tmp();
1063 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1064 return tmp;
1065}
1066
1067static void neon_store_reg(int reg, int pass, TCGv var)
1068{
1069 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1070 dead_tmp(var);
1071}
1072
a7812ae4 1073static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1074{
1075 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1076}
1077
a7812ae4 1078static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1079{
1080 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1081}
1082
4373f3ce
PB
1083#define tcg_gen_ld_f32 tcg_gen_ld_i32
1084#define tcg_gen_ld_f64 tcg_gen_ld_i64
1085#define tcg_gen_st_f32 tcg_gen_st_i32
1086#define tcg_gen_st_f64 tcg_gen_st_i64
1087
b7bcbe95
FB
1088static inline void gen_mov_F0_vreg(int dp, int reg)
1089{
1090 if (dp)
4373f3ce 1091 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1092 else
4373f3ce 1093 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1094}
1095
1096static inline void gen_mov_F1_vreg(int dp, int reg)
1097{
1098 if (dp)
4373f3ce 1099 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1100 else
4373f3ce 1101 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1102}
1103
1104static inline void gen_mov_vreg_F0(int dp, int reg)
1105{
1106 if (dp)
4373f3ce 1107 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1108 else
4373f3ce 1109 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1110}
1111
18c9b560
AZ
1112#define ARM_CP_RW_BIT (1 << 20)
1113
a7812ae4 1114static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d
PB
1115{
1116 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1117}
1118
a7812ae4 1119static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d
PB
1120{
1121 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1122}
1123
da6b5335 1124static inline TCGv iwmmxt_load_creg(int reg)
e677137d 1125{
da6b5335
FN
1126 TCGv var = new_tmp();
1127 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1128 return var;
e677137d
PB
1129}
1130
da6b5335 1131static inline void iwmmxt_store_creg(int reg, TCGv var)
e677137d 1132{
da6b5335 1133 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
e677137d
PB
1134}
1135
1136static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1137{
1138 iwmmxt_store_reg(cpu_M0, rn);
1139}
1140
1141static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1142{
1143 iwmmxt_load_reg(cpu_M0, rn);
1144}
1145
1146static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1147{
1148 iwmmxt_load_reg(cpu_V1, rn);
1149 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1150}
1151
1152static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1153{
1154 iwmmxt_load_reg(cpu_V1, rn);
1155 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1156}
1157
1158static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1159{
1160 iwmmxt_load_reg(cpu_V1, rn);
1161 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1162}
1163
1164#define IWMMXT_OP(name) \
1165static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1166{ \
1167 iwmmxt_load_reg(cpu_V1, rn); \
1168 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1169}
1170
1171#define IWMMXT_OP_ENV(name) \
1172static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1173{ \
1174 iwmmxt_load_reg(cpu_V1, rn); \
1175 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1176}
1177
1178#define IWMMXT_OP_ENV_SIZE(name) \
1179IWMMXT_OP_ENV(name##b) \
1180IWMMXT_OP_ENV(name##w) \
1181IWMMXT_OP_ENV(name##l)
1182
1183#define IWMMXT_OP_ENV1(name) \
1184static inline void gen_op_iwmmxt_##name##_M0(void) \
1185{ \
1186 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1187}
1188
1189IWMMXT_OP(maddsq)
1190IWMMXT_OP(madduq)
1191IWMMXT_OP(sadb)
1192IWMMXT_OP(sadw)
1193IWMMXT_OP(mulslw)
1194IWMMXT_OP(mulshw)
1195IWMMXT_OP(mululw)
1196IWMMXT_OP(muluhw)
1197IWMMXT_OP(macsw)
1198IWMMXT_OP(macuw)
1199
1200IWMMXT_OP_ENV_SIZE(unpackl)
1201IWMMXT_OP_ENV_SIZE(unpackh)
1202
1203IWMMXT_OP_ENV1(unpacklub)
1204IWMMXT_OP_ENV1(unpackluw)
1205IWMMXT_OP_ENV1(unpacklul)
1206IWMMXT_OP_ENV1(unpackhub)
1207IWMMXT_OP_ENV1(unpackhuw)
1208IWMMXT_OP_ENV1(unpackhul)
1209IWMMXT_OP_ENV1(unpacklsb)
1210IWMMXT_OP_ENV1(unpacklsw)
1211IWMMXT_OP_ENV1(unpacklsl)
1212IWMMXT_OP_ENV1(unpackhsb)
1213IWMMXT_OP_ENV1(unpackhsw)
1214IWMMXT_OP_ENV1(unpackhsl)
1215
1216IWMMXT_OP_ENV_SIZE(cmpeq)
1217IWMMXT_OP_ENV_SIZE(cmpgtu)
1218IWMMXT_OP_ENV_SIZE(cmpgts)
1219
1220IWMMXT_OP_ENV_SIZE(mins)
1221IWMMXT_OP_ENV_SIZE(minu)
1222IWMMXT_OP_ENV_SIZE(maxs)
1223IWMMXT_OP_ENV_SIZE(maxu)
1224
1225IWMMXT_OP_ENV_SIZE(subn)
1226IWMMXT_OP_ENV_SIZE(addn)
1227IWMMXT_OP_ENV_SIZE(subu)
1228IWMMXT_OP_ENV_SIZE(addu)
1229IWMMXT_OP_ENV_SIZE(subs)
1230IWMMXT_OP_ENV_SIZE(adds)
1231
1232IWMMXT_OP_ENV(avgb0)
1233IWMMXT_OP_ENV(avgb1)
1234IWMMXT_OP_ENV(avgw0)
1235IWMMXT_OP_ENV(avgw1)
1236
1237IWMMXT_OP(msadb)
1238
1239IWMMXT_OP_ENV(packuw)
1240IWMMXT_OP_ENV(packul)
1241IWMMXT_OP_ENV(packuq)
1242IWMMXT_OP_ENV(packsw)
1243IWMMXT_OP_ENV(packsl)
1244IWMMXT_OP_ENV(packsq)
1245
e677137d
PB
1246static void gen_op_iwmmxt_set_mup(void)
1247{
1248 TCGv tmp;
1249 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1250 tcg_gen_ori_i32(tmp, tmp, 2);
1251 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1252}
1253
1254static void gen_op_iwmmxt_set_cup(void)
1255{
1256 TCGv tmp;
1257 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1258 tcg_gen_ori_i32(tmp, tmp, 1);
1259 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1260}
1261
1262static void gen_op_iwmmxt_setpsr_nz(void)
1263{
1264 TCGv tmp = new_tmp();
1265 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1266 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1267}
1268
1269static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1270{
1271 iwmmxt_load_reg(cpu_V1, rn);
86831435 1272 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1273 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1274}
1275
da6b5335 1276static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
18c9b560
AZ
1277{
1278 int rd;
1279 uint32_t offset;
da6b5335 1280 TCGv tmp;
18c9b560
AZ
1281
1282 rd = (insn >> 16) & 0xf;
da6b5335 1283 tmp = load_reg(s, rd);
18c9b560
AZ
1284
1285 offset = (insn & 0xff) << ((insn >> 7) & 2);
1286 if (insn & (1 << 24)) {
1287 /* Pre indexed */
1288 if (insn & (1 << 23))
da6b5335 1289 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1290 else
da6b5335
FN
1291 tcg_gen_addi_i32(tmp, tmp, -offset);
1292 tcg_gen_mov_i32(dest, tmp);
18c9b560 1293 if (insn & (1 << 21))
da6b5335
FN
1294 store_reg(s, rd, tmp);
1295 else
1296 dead_tmp(tmp);
18c9b560
AZ
1297 } else if (insn & (1 << 21)) {
1298 /* Post indexed */
da6b5335 1299 tcg_gen_mov_i32(dest, tmp);
18c9b560 1300 if (insn & (1 << 23))
da6b5335 1301 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1302 else
da6b5335
FN
1303 tcg_gen_addi_i32(tmp, tmp, -offset);
1304 store_reg(s, rd, tmp);
18c9b560
AZ
1305 } else if (!(insn & (1 << 23)))
1306 return 1;
1307 return 0;
1308}
1309
da6b5335 1310static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
18c9b560
AZ
1311{
1312 int rd = (insn >> 0) & 0xf;
da6b5335 1313 TCGv tmp;
18c9b560 1314
da6b5335
FN
1315 if (insn & (1 << 8)) {
1316 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1317 return 1;
da6b5335
FN
1318 } else {
1319 tmp = iwmmxt_load_creg(rd);
1320 }
1321 } else {
1322 tmp = new_tmp();
1323 iwmmxt_load_reg(cpu_V0, rd);
1324 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1325 }
1326 tcg_gen_andi_i32(tmp, tmp, mask);
1327 tcg_gen_mov_i32(dest, tmp);
1328 dead_tmp(tmp);
18c9b560
AZ
1329 return 0;
1330}
1331
1332/* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1333 (ie. an undefined instruction). */
1334static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1335{
1336 int rd, wrd;
1337 int rdhi, rdlo, rd0, rd1, i;
da6b5335
FN
1338 TCGv addr;
1339 TCGv tmp, tmp2, tmp3;
18c9b560
AZ
1340
1341 if ((insn & 0x0e000e00) == 0x0c000000) {
1342 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1343 wrd = insn & 0xf;
1344 rdlo = (insn >> 12) & 0xf;
1345 rdhi = (insn >> 16) & 0xf;
1346 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1347 iwmmxt_load_reg(cpu_V0, wrd);
1348 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1349 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1350 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1351 } else { /* TMCRR */
da6b5335
FN
1352 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1353 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1354 gen_op_iwmmxt_set_mup();
1355 }
1356 return 0;
1357 }
1358
1359 wrd = (insn >> 12) & 0xf;
da6b5335
FN
1360 addr = new_tmp();
1361 if (gen_iwmmxt_address(s, insn, addr)) {
1362 dead_tmp(addr);
18c9b560 1363 return 1;
da6b5335 1364 }
18c9b560
AZ
1365 if (insn & ARM_CP_RW_BIT) {
1366 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
da6b5335
FN
1367 tmp = new_tmp();
1368 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1369 iwmmxt_store_creg(wrd, tmp);
18c9b560 1370 } else {
e677137d
PB
1371 i = 1;
1372 if (insn & (1 << 8)) {
1373 if (insn & (1 << 22)) { /* WLDRD */
da6b5335 1374 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1375 i = 0;
1376 } else { /* WLDRW wRd */
da6b5335 1377 tmp = gen_ld32(addr, IS_USER(s));
e677137d
PB
1378 }
1379 } else {
1380 if (insn & (1 << 22)) { /* WLDRH */
da6b5335 1381 tmp = gen_ld16u(addr, IS_USER(s));
e677137d 1382 } else { /* WLDRB */
da6b5335 1383 tmp = gen_ld8u(addr, IS_USER(s));
e677137d
PB
1384 }
1385 }
1386 if (i) {
1387 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1388 dead_tmp(tmp);
1389 }
18c9b560
AZ
1390 gen_op_iwmmxt_movq_wRn_M0(wrd);
1391 }
1392 } else {
1393 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335
FN
1394 tmp = iwmmxt_load_creg(wrd);
1395 gen_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1396 } else {
1397 gen_op_iwmmxt_movq_M0_wRn(wrd);
e677137d
PB
1398 tmp = new_tmp();
1399 if (insn & (1 << 8)) {
1400 if (insn & (1 << 22)) { /* WSTRD */
1401 dead_tmp(tmp);
da6b5335 1402 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1403 } else { /* WSTRW wRd */
1404 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1405 gen_st32(tmp, addr, IS_USER(s));
e677137d
PB
1406 }
1407 } else {
1408 if (insn & (1 << 22)) { /* WSTRH */
1409 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1410 gen_st16(tmp, addr, IS_USER(s));
e677137d
PB
1411 } else { /* WSTRB */
1412 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1413 gen_st8(tmp, addr, IS_USER(s));
e677137d
PB
1414 }
1415 }
18c9b560
AZ
1416 }
1417 }
1418 return 0;
1419 }
1420
1421 if ((insn & 0x0f000000) != 0x0e000000)
1422 return 1;
1423
1424 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1425 case 0x000: /* WOR */
1426 wrd = (insn >> 12) & 0xf;
1427 rd0 = (insn >> 0) & 0xf;
1428 rd1 = (insn >> 16) & 0xf;
1429 gen_op_iwmmxt_movq_M0_wRn(rd0);
1430 gen_op_iwmmxt_orq_M0_wRn(rd1);
1431 gen_op_iwmmxt_setpsr_nz();
1432 gen_op_iwmmxt_movq_wRn_M0(wrd);
1433 gen_op_iwmmxt_set_mup();
1434 gen_op_iwmmxt_set_cup();
1435 break;
1436 case 0x011: /* TMCR */
1437 if (insn & 0xf)
1438 return 1;
1439 rd = (insn >> 12) & 0xf;
1440 wrd = (insn >> 16) & 0xf;
1441 switch (wrd) {
1442 case ARM_IWMMXT_wCID:
1443 case ARM_IWMMXT_wCASF:
1444 break;
1445 case ARM_IWMMXT_wCon:
1446 gen_op_iwmmxt_set_cup();
1447 /* Fall through. */
1448 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1449 tmp = iwmmxt_load_creg(wrd);
1450 tmp2 = load_reg(s, rd);
f669df27 1451 tcg_gen_andc_i32(tmp, tmp, tmp2);
da6b5335
FN
1452 dead_tmp(tmp2);
1453 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1454 break;
1455 case ARM_IWMMXT_wCGR0:
1456 case ARM_IWMMXT_wCGR1:
1457 case ARM_IWMMXT_wCGR2:
1458 case ARM_IWMMXT_wCGR3:
1459 gen_op_iwmmxt_set_cup();
da6b5335
FN
1460 tmp = load_reg(s, rd);
1461 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1462 break;
1463 default:
1464 return 1;
1465 }
1466 break;
1467 case 0x100: /* WXOR */
1468 wrd = (insn >> 12) & 0xf;
1469 rd0 = (insn >> 0) & 0xf;
1470 rd1 = (insn >> 16) & 0xf;
1471 gen_op_iwmmxt_movq_M0_wRn(rd0);
1472 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1473 gen_op_iwmmxt_setpsr_nz();
1474 gen_op_iwmmxt_movq_wRn_M0(wrd);
1475 gen_op_iwmmxt_set_mup();
1476 gen_op_iwmmxt_set_cup();
1477 break;
1478 case 0x111: /* TMRC */
1479 if (insn & 0xf)
1480 return 1;
1481 rd = (insn >> 12) & 0xf;
1482 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1483 tmp = iwmmxt_load_creg(wrd);
1484 store_reg(s, rd, tmp);
18c9b560
AZ
1485 break;
1486 case 0x300: /* WANDN */
1487 wrd = (insn >> 12) & 0xf;
1488 rd0 = (insn >> 0) & 0xf;
1489 rd1 = (insn >> 16) & 0xf;
1490 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1491 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1492 gen_op_iwmmxt_andq_M0_wRn(rd1);
1493 gen_op_iwmmxt_setpsr_nz();
1494 gen_op_iwmmxt_movq_wRn_M0(wrd);
1495 gen_op_iwmmxt_set_mup();
1496 gen_op_iwmmxt_set_cup();
1497 break;
1498 case 0x200: /* WAND */
1499 wrd = (insn >> 12) & 0xf;
1500 rd0 = (insn >> 0) & 0xf;
1501 rd1 = (insn >> 16) & 0xf;
1502 gen_op_iwmmxt_movq_M0_wRn(rd0);
1503 gen_op_iwmmxt_andq_M0_wRn(rd1);
1504 gen_op_iwmmxt_setpsr_nz();
1505 gen_op_iwmmxt_movq_wRn_M0(wrd);
1506 gen_op_iwmmxt_set_mup();
1507 gen_op_iwmmxt_set_cup();
1508 break;
1509 case 0x810: case 0xa10: /* WMADD */
1510 wrd = (insn >> 12) & 0xf;
1511 rd0 = (insn >> 0) & 0xf;
1512 rd1 = (insn >> 16) & 0xf;
1513 gen_op_iwmmxt_movq_M0_wRn(rd0);
1514 if (insn & (1 << 21))
1515 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1516 else
1517 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1518 gen_op_iwmmxt_movq_wRn_M0(wrd);
1519 gen_op_iwmmxt_set_mup();
1520 break;
1521 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1522 wrd = (insn >> 12) & 0xf;
1523 rd0 = (insn >> 16) & 0xf;
1524 rd1 = (insn >> 0) & 0xf;
1525 gen_op_iwmmxt_movq_M0_wRn(rd0);
1526 switch ((insn >> 22) & 3) {
1527 case 0:
1528 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1529 break;
1530 case 1:
1531 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1532 break;
1533 case 2:
1534 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1535 break;
1536 case 3:
1537 return 1;
1538 }
1539 gen_op_iwmmxt_movq_wRn_M0(wrd);
1540 gen_op_iwmmxt_set_mup();
1541 gen_op_iwmmxt_set_cup();
1542 break;
1543 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1544 wrd = (insn >> 12) & 0xf;
1545 rd0 = (insn >> 16) & 0xf;
1546 rd1 = (insn >> 0) & 0xf;
1547 gen_op_iwmmxt_movq_M0_wRn(rd0);
1548 switch ((insn >> 22) & 3) {
1549 case 0:
1550 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1551 break;
1552 case 1:
1553 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1554 break;
1555 case 2:
1556 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1557 break;
1558 case 3:
1559 return 1;
1560 }
1561 gen_op_iwmmxt_movq_wRn_M0(wrd);
1562 gen_op_iwmmxt_set_mup();
1563 gen_op_iwmmxt_set_cup();
1564 break;
1565 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1566 wrd = (insn >> 12) & 0xf;
1567 rd0 = (insn >> 16) & 0xf;
1568 rd1 = (insn >> 0) & 0xf;
1569 gen_op_iwmmxt_movq_M0_wRn(rd0);
1570 if (insn & (1 << 22))
1571 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1572 else
1573 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1574 if (!(insn & (1 << 20)))
1575 gen_op_iwmmxt_addl_M0_wRn(wrd);
1576 gen_op_iwmmxt_movq_wRn_M0(wrd);
1577 gen_op_iwmmxt_set_mup();
1578 break;
1579 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1580 wrd = (insn >> 12) & 0xf;
1581 rd0 = (insn >> 16) & 0xf;
1582 rd1 = (insn >> 0) & 0xf;
1583 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1584 if (insn & (1 << 21)) {
1585 if (insn & (1 << 20))
1586 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1587 else
1588 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1589 } else {
1590 if (insn & (1 << 20))
1591 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1592 else
1593 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1594 }
18c9b560
AZ
1595 gen_op_iwmmxt_movq_wRn_M0(wrd);
1596 gen_op_iwmmxt_set_mup();
1597 break;
1598 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1599 wrd = (insn >> 12) & 0xf;
1600 rd0 = (insn >> 16) & 0xf;
1601 rd1 = (insn >> 0) & 0xf;
1602 gen_op_iwmmxt_movq_M0_wRn(rd0);
1603 if (insn & (1 << 21))
1604 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1605 else
1606 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1607 if (!(insn & (1 << 20))) {
e677137d
PB
1608 iwmmxt_load_reg(cpu_V1, wrd);
1609 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1610 }
1611 gen_op_iwmmxt_movq_wRn_M0(wrd);
1612 gen_op_iwmmxt_set_mup();
1613 break;
1614 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1615 wrd = (insn >> 12) & 0xf;
1616 rd0 = (insn >> 16) & 0xf;
1617 rd1 = (insn >> 0) & 0xf;
1618 gen_op_iwmmxt_movq_M0_wRn(rd0);
1619 switch ((insn >> 22) & 3) {
1620 case 0:
1621 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1622 break;
1623 case 1:
1624 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1625 break;
1626 case 2:
1627 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1628 break;
1629 case 3:
1630 return 1;
1631 }
1632 gen_op_iwmmxt_movq_wRn_M0(wrd);
1633 gen_op_iwmmxt_set_mup();
1634 gen_op_iwmmxt_set_cup();
1635 break;
1636 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1637 wrd = (insn >> 12) & 0xf;
1638 rd0 = (insn >> 16) & 0xf;
1639 rd1 = (insn >> 0) & 0xf;
1640 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1641 if (insn & (1 << 22)) {
1642 if (insn & (1 << 20))
1643 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1644 else
1645 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1646 } else {
1647 if (insn & (1 << 20))
1648 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1649 else
1650 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1651 }
18c9b560
AZ
1652 gen_op_iwmmxt_movq_wRn_M0(wrd);
1653 gen_op_iwmmxt_set_mup();
1654 gen_op_iwmmxt_set_cup();
1655 break;
1656 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1657 wrd = (insn >> 12) & 0xf;
1658 rd0 = (insn >> 16) & 0xf;
1659 rd1 = (insn >> 0) & 0xf;
1660 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1661 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1662 tcg_gen_andi_i32(tmp, tmp, 7);
1663 iwmmxt_load_reg(cpu_V1, rd1);
1664 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1665 dead_tmp(tmp);
18c9b560
AZ
1666 gen_op_iwmmxt_movq_wRn_M0(wrd);
1667 gen_op_iwmmxt_set_mup();
1668 break;
1669 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1670 if (((insn >> 6) & 3) == 3)
1671 return 1;
18c9b560
AZ
1672 rd = (insn >> 12) & 0xf;
1673 wrd = (insn >> 16) & 0xf;
da6b5335 1674 tmp = load_reg(s, rd);
18c9b560
AZ
1675 gen_op_iwmmxt_movq_M0_wRn(wrd);
1676 switch ((insn >> 6) & 3) {
1677 case 0:
da6b5335
FN
1678 tmp2 = tcg_const_i32(0xff);
1679 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1680 break;
1681 case 1:
da6b5335
FN
1682 tmp2 = tcg_const_i32(0xffff);
1683 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1684 break;
1685 case 2:
da6b5335
FN
1686 tmp2 = tcg_const_i32(0xffffffff);
1687 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1688 break;
da6b5335
FN
1689 default:
1690 TCGV_UNUSED(tmp2);
1691 TCGV_UNUSED(tmp3);
18c9b560 1692 }
da6b5335
FN
1693 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1694 tcg_temp_free(tmp3);
1695 tcg_temp_free(tmp2);
1696 dead_tmp(tmp);
18c9b560
AZ
1697 gen_op_iwmmxt_movq_wRn_M0(wrd);
1698 gen_op_iwmmxt_set_mup();
1699 break;
1700 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1701 rd = (insn >> 12) & 0xf;
1702 wrd = (insn >> 16) & 0xf;
da6b5335 1703 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1704 return 1;
1705 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335 1706 tmp = new_tmp();
18c9b560
AZ
1707 switch ((insn >> 22) & 3) {
1708 case 0:
da6b5335
FN
1709 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1710 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1711 if (insn & 8) {
1712 tcg_gen_ext8s_i32(tmp, tmp);
1713 } else {
1714 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1715 }
1716 break;
1717 case 1:
da6b5335
FN
1718 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1719 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1720 if (insn & 8) {
1721 tcg_gen_ext16s_i32(tmp, tmp);
1722 } else {
1723 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1724 }
1725 break;
1726 case 2:
da6b5335
FN
1727 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1728 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1729 break;
18c9b560 1730 }
da6b5335 1731 store_reg(s, rd, tmp);
18c9b560
AZ
1732 break;
1733 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1734 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1735 return 1;
da6b5335 1736 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1737 switch ((insn >> 22) & 3) {
1738 case 0:
da6b5335 1739 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1740 break;
1741 case 1:
da6b5335 1742 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1743 break;
1744 case 2:
da6b5335 1745 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1746 break;
18c9b560 1747 }
da6b5335
FN
1748 tcg_gen_shli_i32(tmp, tmp, 28);
1749 gen_set_nzcv(tmp);
1750 dead_tmp(tmp);
18c9b560
AZ
1751 break;
1752 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1753 if (((insn >> 6) & 3) == 3)
1754 return 1;
18c9b560
AZ
1755 rd = (insn >> 12) & 0xf;
1756 wrd = (insn >> 16) & 0xf;
da6b5335 1757 tmp = load_reg(s, rd);
18c9b560
AZ
1758 switch ((insn >> 6) & 3) {
1759 case 0:
da6b5335 1760 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1761 break;
1762 case 1:
da6b5335 1763 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1764 break;
1765 case 2:
da6b5335 1766 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1767 break;
18c9b560 1768 }
da6b5335 1769 dead_tmp(tmp);
18c9b560
AZ
1770 gen_op_iwmmxt_movq_wRn_M0(wrd);
1771 gen_op_iwmmxt_set_mup();
1772 break;
1773 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1774 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1775 return 1;
da6b5335
FN
1776 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1777 tmp2 = new_tmp();
1778 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1779 switch ((insn >> 22) & 3) {
1780 case 0:
1781 for (i = 0; i < 7; i ++) {
da6b5335
FN
1782 tcg_gen_shli_i32(tmp2, tmp2, 4);
1783 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1784 }
1785 break;
1786 case 1:
1787 for (i = 0; i < 3; i ++) {
da6b5335
FN
1788 tcg_gen_shli_i32(tmp2, tmp2, 8);
1789 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1790 }
1791 break;
1792 case 2:
da6b5335
FN
1793 tcg_gen_shli_i32(tmp2, tmp2, 16);
1794 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1795 break;
18c9b560 1796 }
da6b5335
FN
1797 gen_set_nzcv(tmp);
1798 dead_tmp(tmp2);
1799 dead_tmp(tmp);
18c9b560
AZ
1800 break;
1801 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1802 wrd = (insn >> 12) & 0xf;
1803 rd0 = (insn >> 16) & 0xf;
1804 gen_op_iwmmxt_movq_M0_wRn(rd0);
1805 switch ((insn >> 22) & 3) {
1806 case 0:
e677137d 1807 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1808 break;
1809 case 1:
e677137d 1810 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1811 break;
1812 case 2:
e677137d 1813 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1814 break;
1815 case 3:
1816 return 1;
1817 }
1818 gen_op_iwmmxt_movq_wRn_M0(wrd);
1819 gen_op_iwmmxt_set_mup();
1820 break;
1821 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1822 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1823 return 1;
da6b5335
FN
1824 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1825 tmp2 = new_tmp();
1826 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1827 switch ((insn >> 22) & 3) {
1828 case 0:
1829 for (i = 0; i < 7; i ++) {
da6b5335
FN
1830 tcg_gen_shli_i32(tmp2, tmp2, 4);
1831 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1832 }
1833 break;
1834 case 1:
1835 for (i = 0; i < 3; i ++) {
da6b5335
FN
1836 tcg_gen_shli_i32(tmp2, tmp2, 8);
1837 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1838 }
1839 break;
1840 case 2:
da6b5335
FN
1841 tcg_gen_shli_i32(tmp2, tmp2, 16);
1842 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1843 break;
18c9b560 1844 }
da6b5335
FN
1845 gen_set_nzcv(tmp);
1846 dead_tmp(tmp2);
1847 dead_tmp(tmp);
18c9b560
AZ
1848 break;
1849 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1850 rd = (insn >> 12) & 0xf;
1851 rd0 = (insn >> 16) & 0xf;
da6b5335 1852 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1853 return 1;
1854 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 1855 tmp = new_tmp();
18c9b560
AZ
1856 switch ((insn >> 22) & 3) {
1857 case 0:
da6b5335 1858 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1859 break;
1860 case 1:
da6b5335 1861 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1862 break;
1863 case 2:
da6b5335 1864 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1865 break;
18c9b560 1866 }
da6b5335 1867 store_reg(s, rd, tmp);
18c9b560
AZ
1868 break;
1869 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1870 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1871 wrd = (insn >> 12) & 0xf;
1872 rd0 = (insn >> 16) & 0xf;
1873 rd1 = (insn >> 0) & 0xf;
1874 gen_op_iwmmxt_movq_M0_wRn(rd0);
1875 switch ((insn >> 22) & 3) {
1876 case 0:
1877 if (insn & (1 << 21))
1878 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1879 else
1880 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1881 break;
1882 case 1:
1883 if (insn & (1 << 21))
1884 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1885 else
1886 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1887 break;
1888 case 2:
1889 if (insn & (1 << 21))
1890 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1891 else
1892 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1893 break;
1894 case 3:
1895 return 1;
1896 }
1897 gen_op_iwmmxt_movq_wRn_M0(wrd);
1898 gen_op_iwmmxt_set_mup();
1899 gen_op_iwmmxt_set_cup();
1900 break;
1901 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1902 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1903 wrd = (insn >> 12) & 0xf;
1904 rd0 = (insn >> 16) & 0xf;
1905 gen_op_iwmmxt_movq_M0_wRn(rd0);
1906 switch ((insn >> 22) & 3) {
1907 case 0:
1908 if (insn & (1 << 21))
1909 gen_op_iwmmxt_unpacklsb_M0();
1910 else
1911 gen_op_iwmmxt_unpacklub_M0();
1912 break;
1913 case 1:
1914 if (insn & (1 << 21))
1915 gen_op_iwmmxt_unpacklsw_M0();
1916 else
1917 gen_op_iwmmxt_unpackluw_M0();
1918 break;
1919 case 2:
1920 if (insn & (1 << 21))
1921 gen_op_iwmmxt_unpacklsl_M0();
1922 else
1923 gen_op_iwmmxt_unpacklul_M0();
1924 break;
1925 case 3:
1926 return 1;
1927 }
1928 gen_op_iwmmxt_movq_wRn_M0(wrd);
1929 gen_op_iwmmxt_set_mup();
1930 gen_op_iwmmxt_set_cup();
1931 break;
1932 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1933 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1934 wrd = (insn >> 12) & 0xf;
1935 rd0 = (insn >> 16) & 0xf;
1936 gen_op_iwmmxt_movq_M0_wRn(rd0);
1937 switch ((insn >> 22) & 3) {
1938 case 0:
1939 if (insn & (1 << 21))
1940 gen_op_iwmmxt_unpackhsb_M0();
1941 else
1942 gen_op_iwmmxt_unpackhub_M0();
1943 break;
1944 case 1:
1945 if (insn & (1 << 21))
1946 gen_op_iwmmxt_unpackhsw_M0();
1947 else
1948 gen_op_iwmmxt_unpackhuw_M0();
1949 break;
1950 case 2:
1951 if (insn & (1 << 21))
1952 gen_op_iwmmxt_unpackhsl_M0();
1953 else
1954 gen_op_iwmmxt_unpackhul_M0();
1955 break;
1956 case 3:
1957 return 1;
1958 }
1959 gen_op_iwmmxt_movq_wRn_M0(wrd);
1960 gen_op_iwmmxt_set_mup();
1961 gen_op_iwmmxt_set_cup();
1962 break;
1963 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1964 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
1965 if (((insn >> 22) & 3) == 0)
1966 return 1;
18c9b560
AZ
1967 wrd = (insn >> 12) & 0xf;
1968 rd0 = (insn >> 16) & 0xf;
1969 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1970 tmp = new_tmp();
1971 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
1972 dead_tmp(tmp);
18c9b560 1973 return 1;
da6b5335 1974 }
18c9b560 1975 switch ((insn >> 22) & 3) {
18c9b560 1976 case 1:
da6b5335 1977 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1978 break;
1979 case 2:
da6b5335 1980 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1981 break;
1982 case 3:
da6b5335 1983 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1984 break;
1985 }
da6b5335 1986 dead_tmp(tmp);
18c9b560
AZ
1987 gen_op_iwmmxt_movq_wRn_M0(wrd);
1988 gen_op_iwmmxt_set_mup();
1989 gen_op_iwmmxt_set_cup();
1990 break;
1991 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1992 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
1993 if (((insn >> 22) & 3) == 0)
1994 return 1;
18c9b560
AZ
1995 wrd = (insn >> 12) & 0xf;
1996 rd0 = (insn >> 16) & 0xf;
1997 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1998 tmp = new_tmp();
1999 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2000 dead_tmp(tmp);
18c9b560 2001 return 1;
da6b5335 2002 }
18c9b560 2003 switch ((insn >> 22) & 3) {
18c9b560 2004 case 1:
da6b5335 2005 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2006 break;
2007 case 2:
da6b5335 2008 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2009 break;
2010 case 3:
da6b5335 2011 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2012 break;
2013 }
da6b5335 2014 dead_tmp(tmp);
18c9b560
AZ
2015 gen_op_iwmmxt_movq_wRn_M0(wrd);
2016 gen_op_iwmmxt_set_mup();
2017 gen_op_iwmmxt_set_cup();
2018 break;
2019 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2020 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2021 if (((insn >> 22) & 3) == 0)
2022 return 1;
18c9b560
AZ
2023 wrd = (insn >> 12) & 0xf;
2024 rd0 = (insn >> 16) & 0xf;
2025 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2026 tmp = new_tmp();
2027 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2028 dead_tmp(tmp);
18c9b560 2029 return 1;
da6b5335 2030 }
18c9b560 2031 switch ((insn >> 22) & 3) {
18c9b560 2032 case 1:
da6b5335 2033 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2034 break;
2035 case 2:
da6b5335 2036 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2037 break;
2038 case 3:
da6b5335 2039 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2040 break;
2041 }
da6b5335 2042 dead_tmp(tmp);
18c9b560
AZ
2043 gen_op_iwmmxt_movq_wRn_M0(wrd);
2044 gen_op_iwmmxt_set_mup();
2045 gen_op_iwmmxt_set_cup();
2046 break;
2047 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2048 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2049 if (((insn >> 22) & 3) == 0)
2050 return 1;
18c9b560
AZ
2051 wrd = (insn >> 12) & 0xf;
2052 rd0 = (insn >> 16) & 0xf;
2053 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2054 tmp = new_tmp();
18c9b560 2055 switch ((insn >> 22) & 3) {
18c9b560 2056 case 1:
da6b5335
FN
2057 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2058 dead_tmp(tmp);
18c9b560 2059 return 1;
da6b5335
FN
2060 }
2061 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2062 break;
2063 case 2:
da6b5335
FN
2064 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2065 dead_tmp(tmp);
18c9b560 2066 return 1;
da6b5335
FN
2067 }
2068 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2069 break;
2070 case 3:
da6b5335
FN
2071 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2072 dead_tmp(tmp);
18c9b560 2073 return 1;
da6b5335
FN
2074 }
2075 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2076 break;
2077 }
da6b5335 2078 dead_tmp(tmp);
18c9b560
AZ
2079 gen_op_iwmmxt_movq_wRn_M0(wrd);
2080 gen_op_iwmmxt_set_mup();
2081 gen_op_iwmmxt_set_cup();
2082 break;
2083 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2084 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2085 wrd = (insn >> 12) & 0xf;
2086 rd0 = (insn >> 16) & 0xf;
2087 rd1 = (insn >> 0) & 0xf;
2088 gen_op_iwmmxt_movq_M0_wRn(rd0);
2089 switch ((insn >> 22) & 3) {
2090 case 0:
2091 if (insn & (1 << 21))
2092 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2093 else
2094 gen_op_iwmmxt_minub_M0_wRn(rd1);
2095 break;
2096 case 1:
2097 if (insn & (1 << 21))
2098 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2099 else
2100 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2101 break;
2102 case 2:
2103 if (insn & (1 << 21))
2104 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2105 else
2106 gen_op_iwmmxt_minul_M0_wRn(rd1);
2107 break;
2108 case 3:
2109 return 1;
2110 }
2111 gen_op_iwmmxt_movq_wRn_M0(wrd);
2112 gen_op_iwmmxt_set_mup();
2113 break;
2114 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2115 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2116 wrd = (insn >> 12) & 0xf;
2117 rd0 = (insn >> 16) & 0xf;
2118 rd1 = (insn >> 0) & 0xf;
2119 gen_op_iwmmxt_movq_M0_wRn(rd0);
2120 switch ((insn >> 22) & 3) {
2121 case 0:
2122 if (insn & (1 << 21))
2123 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2124 else
2125 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2126 break;
2127 case 1:
2128 if (insn & (1 << 21))
2129 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2130 else
2131 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2132 break;
2133 case 2:
2134 if (insn & (1 << 21))
2135 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2136 else
2137 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2138 break;
2139 case 3:
2140 return 1;
2141 }
2142 gen_op_iwmmxt_movq_wRn_M0(wrd);
2143 gen_op_iwmmxt_set_mup();
2144 break;
2145 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2146 case 0x402: case 0x502: case 0x602: case 0x702:
2147 wrd = (insn >> 12) & 0xf;
2148 rd0 = (insn >> 16) & 0xf;
2149 rd1 = (insn >> 0) & 0xf;
2150 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2151 tmp = tcg_const_i32((insn >> 20) & 3);
2152 iwmmxt_load_reg(cpu_V1, rd1);
2153 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2154 tcg_temp_free(tmp);
18c9b560
AZ
2155 gen_op_iwmmxt_movq_wRn_M0(wrd);
2156 gen_op_iwmmxt_set_mup();
2157 break;
2158 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2159 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2160 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2161 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2162 wrd = (insn >> 12) & 0xf;
2163 rd0 = (insn >> 16) & 0xf;
2164 rd1 = (insn >> 0) & 0xf;
2165 gen_op_iwmmxt_movq_M0_wRn(rd0);
2166 switch ((insn >> 20) & 0xf) {
2167 case 0x0:
2168 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2169 break;
2170 case 0x1:
2171 gen_op_iwmmxt_subub_M0_wRn(rd1);
2172 break;
2173 case 0x3:
2174 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2175 break;
2176 case 0x4:
2177 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2178 break;
2179 case 0x5:
2180 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2181 break;
2182 case 0x7:
2183 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2184 break;
2185 case 0x8:
2186 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2187 break;
2188 case 0x9:
2189 gen_op_iwmmxt_subul_M0_wRn(rd1);
2190 break;
2191 case 0xb:
2192 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2193 break;
2194 default:
2195 return 1;
2196 }
2197 gen_op_iwmmxt_movq_wRn_M0(wrd);
2198 gen_op_iwmmxt_set_mup();
2199 gen_op_iwmmxt_set_cup();
2200 break;
2201 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2202 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2203 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2204 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2205 wrd = (insn >> 12) & 0xf;
2206 rd0 = (insn >> 16) & 0xf;
2207 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2208 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2209 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2210 tcg_temp_free(tmp);
18c9b560
AZ
2211 gen_op_iwmmxt_movq_wRn_M0(wrd);
2212 gen_op_iwmmxt_set_mup();
2213 gen_op_iwmmxt_set_cup();
2214 break;
2215 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2216 case 0x418: case 0x518: case 0x618: case 0x718:
2217 case 0x818: case 0x918: case 0xa18: case 0xb18:
2218 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2219 wrd = (insn >> 12) & 0xf;
2220 rd0 = (insn >> 16) & 0xf;
2221 rd1 = (insn >> 0) & 0xf;
2222 gen_op_iwmmxt_movq_M0_wRn(rd0);
2223 switch ((insn >> 20) & 0xf) {
2224 case 0x0:
2225 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2226 break;
2227 case 0x1:
2228 gen_op_iwmmxt_addub_M0_wRn(rd1);
2229 break;
2230 case 0x3:
2231 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2232 break;
2233 case 0x4:
2234 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2235 break;
2236 case 0x5:
2237 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2238 break;
2239 case 0x7:
2240 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2241 break;
2242 case 0x8:
2243 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2244 break;
2245 case 0x9:
2246 gen_op_iwmmxt_addul_M0_wRn(rd1);
2247 break;
2248 case 0xb:
2249 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2250 break;
2251 default:
2252 return 1;
2253 }
2254 gen_op_iwmmxt_movq_wRn_M0(wrd);
2255 gen_op_iwmmxt_set_mup();
2256 gen_op_iwmmxt_set_cup();
2257 break;
2258 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2259 case 0x408: case 0x508: case 0x608: case 0x708:
2260 case 0x808: case 0x908: case 0xa08: case 0xb08:
2261 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2262 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2263 return 1;
18c9b560
AZ
2264 wrd = (insn >> 12) & 0xf;
2265 rd0 = (insn >> 16) & 0xf;
2266 rd1 = (insn >> 0) & 0xf;
2267 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2268 switch ((insn >> 22) & 3) {
18c9b560
AZ
2269 case 1:
2270 if (insn & (1 << 21))
2271 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2272 else
2273 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2274 break;
2275 case 2:
2276 if (insn & (1 << 21))
2277 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2278 else
2279 gen_op_iwmmxt_packul_M0_wRn(rd1);
2280 break;
2281 case 3:
2282 if (insn & (1 << 21))
2283 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2284 else
2285 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2286 break;
2287 }
2288 gen_op_iwmmxt_movq_wRn_M0(wrd);
2289 gen_op_iwmmxt_set_mup();
2290 gen_op_iwmmxt_set_cup();
2291 break;
2292 case 0x201: case 0x203: case 0x205: case 0x207:
2293 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2294 case 0x211: case 0x213: case 0x215: case 0x217:
2295 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2296 wrd = (insn >> 5) & 0xf;
2297 rd0 = (insn >> 12) & 0xf;
2298 rd1 = (insn >> 0) & 0xf;
2299 if (rd0 == 0xf || rd1 == 0xf)
2300 return 1;
2301 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2302 tmp = load_reg(s, rd0);
2303 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2304 switch ((insn >> 16) & 0xf) {
2305 case 0x0: /* TMIA */
da6b5335 2306 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2307 break;
2308 case 0x8: /* TMIAPH */
da6b5335 2309 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2310 break;
2311 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2312 if (insn & (1 << 16))
da6b5335 2313 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2314 if (insn & (1 << 17))
da6b5335
FN
2315 tcg_gen_shri_i32(tmp2, tmp2, 16);
2316 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2317 break;
2318 default:
da6b5335
FN
2319 dead_tmp(tmp2);
2320 dead_tmp(tmp);
18c9b560
AZ
2321 return 1;
2322 }
da6b5335
FN
2323 dead_tmp(tmp2);
2324 dead_tmp(tmp);
18c9b560
AZ
2325 gen_op_iwmmxt_movq_wRn_M0(wrd);
2326 gen_op_iwmmxt_set_mup();
2327 break;
2328 default:
2329 return 1;
2330 }
2331
2332 return 0;
2333}
2334
2335/* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2336 (ie. an undefined instruction). */
2337static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2338{
2339 int acc, rd0, rd1, rdhi, rdlo;
3a554c0f 2340 TCGv tmp, tmp2;
18c9b560
AZ
2341
2342 if ((insn & 0x0ff00f10) == 0x0e200010) {
2343 /* Multiply with Internal Accumulate Format */
2344 rd0 = (insn >> 12) & 0xf;
2345 rd1 = insn & 0xf;
2346 acc = (insn >> 5) & 7;
2347
2348 if (acc != 0)
2349 return 1;
2350
3a554c0f
FN
2351 tmp = load_reg(s, rd0);
2352 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2353 switch ((insn >> 16) & 0xf) {
2354 case 0x0: /* MIA */
3a554c0f 2355 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2356 break;
2357 case 0x8: /* MIAPH */
3a554c0f 2358 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2359 break;
2360 case 0xc: /* MIABB */
2361 case 0xd: /* MIABT */
2362 case 0xe: /* MIATB */
2363 case 0xf: /* MIATT */
18c9b560 2364 if (insn & (1 << 16))
3a554c0f 2365 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2366 if (insn & (1 << 17))
3a554c0f
FN
2367 tcg_gen_shri_i32(tmp2, tmp2, 16);
2368 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2369 break;
2370 default:
2371 return 1;
2372 }
3a554c0f
FN
2373 dead_tmp(tmp2);
2374 dead_tmp(tmp);
18c9b560
AZ
2375
2376 gen_op_iwmmxt_movq_wRn_M0(acc);
2377 return 0;
2378 }
2379
2380 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2381 /* Internal Accumulator Access Format */
2382 rdhi = (insn >> 16) & 0xf;
2383 rdlo = (insn >> 12) & 0xf;
2384 acc = insn & 7;
2385
2386 if (acc != 0)
2387 return 1;
2388
2389 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2390 iwmmxt_load_reg(cpu_V0, acc);
2391 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2392 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2393 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2394 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2395 } else { /* MAR */
3a554c0f
FN
2396 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2397 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2398 }
2399 return 0;
2400 }
2401
2402 return 1;
2403}
2404
c1713132
AZ
2405/* Disassemble system coprocessor instruction. Return nonzero if
2406 instruction is not defined. */
2407static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2408{
b75263d6 2409 TCGv tmp, tmp2;
c1713132
AZ
2410 uint32_t rd = (insn >> 12) & 0xf;
2411 uint32_t cp = (insn >> 8) & 0xf;
2412 if (IS_USER(s)) {
2413 return 1;
2414 }
2415
18c9b560 2416 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2417 if (!env->cp[cp].cp_read)
2418 return 1;
8984bd2e
PB
2419 gen_set_pc_im(s->pc);
2420 tmp = new_tmp();
b75263d6
JR
2421 tmp2 = tcg_const_i32(insn);
2422 gen_helper_get_cp(tmp, cpu_env, tmp2);
2423 tcg_temp_free(tmp2);
8984bd2e 2424 store_reg(s, rd, tmp);
c1713132
AZ
2425 } else {
2426 if (!env->cp[cp].cp_write)
2427 return 1;
8984bd2e
PB
2428 gen_set_pc_im(s->pc);
2429 tmp = load_reg(s, rd);
b75263d6
JR
2430 tmp2 = tcg_const_i32(insn);
2431 gen_helper_set_cp(cpu_env, tmp2, tmp);
2432 tcg_temp_free(tmp2);
a60de947 2433 dead_tmp(tmp);
c1713132
AZ
2434 }
2435 return 0;
2436}
2437
9ee6e8bb
PB
2438static int cp15_user_ok(uint32_t insn)
2439{
2440 int cpn = (insn >> 16) & 0xf;
2441 int cpm = insn & 0xf;
2442 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2443
2444 if (cpn == 13 && cpm == 0) {
2445 /* TLS register. */
2446 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2447 return 1;
2448 }
2449 if (cpn == 7) {
2450 /* ISB, DSB, DMB. */
2451 if ((cpm == 5 && op == 4)
2452 || (cpm == 10 && (op == 4 || op == 5)))
2453 return 1;
2454 }
2455 return 0;
2456}
2457
b5ff1b31
FB
2458/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2459 instruction is not defined. */
a90b7318 2460static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2461{
2462 uint32_t rd;
b75263d6 2463 TCGv tmp, tmp2;
b5ff1b31 2464
9ee6e8bb
PB
2465 /* M profile cores use memory mapped registers instead of cp15. */
2466 if (arm_feature(env, ARM_FEATURE_M))
2467 return 1;
2468
2469 if ((insn & (1 << 25)) == 0) {
2470 if (insn & (1 << 20)) {
2471 /* mrrc */
2472 return 1;
2473 }
2474 /* mcrr. Used for block cache operations, so implement as no-op. */
2475 return 0;
2476 }
2477 if ((insn & (1 << 4)) == 0) {
2478 /* cdp */
2479 return 1;
2480 }
2481 if (IS_USER(s) && !cp15_user_ok(insn)) {
b5ff1b31
FB
2482 return 1;
2483 }
9332f9da
FB
2484 if ((insn & 0x0fff0fff) == 0x0e070f90
2485 || (insn & 0x0fff0fff) == 0x0e070f58) {
2486 /* Wait for interrupt. */
8984bd2e 2487 gen_set_pc_im(s->pc);
9ee6e8bb 2488 s->is_jmp = DISAS_WFI;
9332f9da
FB
2489 return 0;
2490 }
b5ff1b31 2491 rd = (insn >> 12) & 0xf;
b75263d6 2492 tmp2 = tcg_const_i32(insn);
18c9b560 2493 if (insn & ARM_CP_RW_BIT) {
8984bd2e 2494 tmp = new_tmp();
b75263d6 2495 gen_helper_get_cp15(tmp, cpu_env, tmp2);
b5ff1b31
FB
2496 /* If the destination register is r15 then sets condition codes. */
2497 if (rd != 15)
8984bd2e
PB
2498 store_reg(s, rd, tmp);
2499 else
2500 dead_tmp(tmp);
b5ff1b31 2501 } else {
8984bd2e 2502 tmp = load_reg(s, rd);
b75263d6 2503 gen_helper_set_cp15(cpu_env, tmp2, tmp);
8984bd2e 2504 dead_tmp(tmp);
a90b7318
AZ
2505 /* Normally we would always end the TB here, but Linux
2506 * arch/arm/mach-pxa/sleep.S expects two instructions following
2507 * an MMU enable to execute from cache. Imitate this behaviour. */
2508 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2509 (insn & 0x0fff0fff) != 0x0e010f10)
2510 gen_lookup_tb(s);
b5ff1b31 2511 }
b75263d6 2512 tcg_temp_free_i32(tmp2);
b5ff1b31
FB
2513 return 0;
2514}
2515
9ee6e8bb
PB
2516#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2517#define VFP_SREG(insn, bigbit, smallbit) \
2518 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2519#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2520 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2521 reg = (((insn) >> (bigbit)) & 0x0f) \
2522 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2523 } else { \
2524 if (insn & (1 << (smallbit))) \
2525 return 1; \
2526 reg = ((insn) >> (bigbit)) & 0x0f; \
2527 }} while (0)
2528
2529#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2530#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2531#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2532#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2533#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2534#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2535
4373f3ce
PB
2536/* Move between integer and VFP cores. */
2537static TCGv gen_vfp_mrs(void)
2538{
2539 TCGv tmp = new_tmp();
2540 tcg_gen_mov_i32(tmp, cpu_F0s);
2541 return tmp;
2542}
2543
2544static void gen_vfp_msr(TCGv tmp)
2545{
2546 tcg_gen_mov_i32(cpu_F0s, tmp);
2547 dead_tmp(tmp);
2548}
2549
9ee6e8bb
PB
2550static inline int
2551vfp_enabled(CPUState * env)
2552{
2553 return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
2554}
2555
ad69471c
PB
2556static void gen_neon_dup_u8(TCGv var, int shift)
2557{
2558 TCGv tmp = new_tmp();
2559 if (shift)
2560 tcg_gen_shri_i32(var, var, shift);
86831435 2561 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2562 tcg_gen_shli_i32(tmp, var, 8);
2563 tcg_gen_or_i32(var, var, tmp);
2564 tcg_gen_shli_i32(tmp, var, 16);
2565 tcg_gen_or_i32(var, var, tmp);
2566 dead_tmp(tmp);
2567}
2568
2569static void gen_neon_dup_low16(TCGv var)
2570{
2571 TCGv tmp = new_tmp();
86831435 2572 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2573 tcg_gen_shli_i32(tmp, var, 16);
2574 tcg_gen_or_i32(var, var, tmp);
2575 dead_tmp(tmp);
2576}
2577
2578static void gen_neon_dup_high16(TCGv var)
2579{
2580 TCGv tmp = new_tmp();
2581 tcg_gen_andi_i32(var, var, 0xffff0000);
2582 tcg_gen_shri_i32(tmp, var, 16);
2583 tcg_gen_or_i32(var, var, tmp);
2584 dead_tmp(tmp);
2585}
2586
b7bcbe95
FB
2587/* Disassemble a VFP instruction. Returns nonzero if an error occured
2588 (ie. an undefined instruction). */
2589static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2590{
2591 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2592 int dp, veclen;
312eea9f 2593 TCGv addr;
4373f3ce 2594 TCGv tmp;
ad69471c 2595 TCGv tmp2;
b7bcbe95 2596
40f137e1
PB
2597 if (!arm_feature(env, ARM_FEATURE_VFP))
2598 return 1;
2599
9ee6e8bb
PB
2600 if (!vfp_enabled(env)) {
2601 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2602 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2603 return 1;
2604 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2605 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2606 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2607 return 1;
2608 }
b7bcbe95
FB
2609 dp = ((insn & 0xf00) == 0xb00);
2610 switch ((insn >> 24) & 0xf) {
2611 case 0xe:
2612 if (insn & (1 << 4)) {
2613 /* single register transfer */
b7bcbe95
FB
2614 rd = (insn >> 12) & 0xf;
2615 if (dp) {
9ee6e8bb
PB
2616 int size;
2617 int pass;
2618
2619 VFP_DREG_N(rn, insn);
2620 if (insn & 0xf)
b7bcbe95 2621 return 1;
9ee6e8bb
PB
2622 if (insn & 0x00c00060
2623 && !arm_feature(env, ARM_FEATURE_NEON))
2624 return 1;
2625
2626 pass = (insn >> 21) & 1;
2627 if (insn & (1 << 22)) {
2628 size = 0;
2629 offset = ((insn >> 5) & 3) * 8;
2630 } else if (insn & (1 << 5)) {
2631 size = 1;
2632 offset = (insn & (1 << 6)) ? 16 : 0;
2633 } else {
2634 size = 2;
2635 offset = 0;
2636 }
18c9b560 2637 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2638 /* vfp->arm */
ad69471c 2639 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2640 switch (size) {
2641 case 0:
9ee6e8bb 2642 if (offset)
ad69471c 2643 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2644 if (insn & (1 << 23))
ad69471c 2645 gen_uxtb(tmp);
9ee6e8bb 2646 else
ad69471c 2647 gen_sxtb(tmp);
9ee6e8bb
PB
2648 break;
2649 case 1:
9ee6e8bb
PB
2650 if (insn & (1 << 23)) {
2651 if (offset) {
ad69471c 2652 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2653 } else {
ad69471c 2654 gen_uxth(tmp);
9ee6e8bb
PB
2655 }
2656 } else {
2657 if (offset) {
ad69471c 2658 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2659 } else {
ad69471c 2660 gen_sxth(tmp);
9ee6e8bb
PB
2661 }
2662 }
2663 break;
2664 case 2:
9ee6e8bb
PB
2665 break;
2666 }
ad69471c 2667 store_reg(s, rd, tmp);
b7bcbe95
FB
2668 } else {
2669 /* arm->vfp */
ad69471c 2670 tmp = load_reg(s, rd);
9ee6e8bb
PB
2671 if (insn & (1 << 23)) {
2672 /* VDUP */
2673 if (size == 0) {
ad69471c 2674 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2675 } else if (size == 1) {
ad69471c 2676 gen_neon_dup_low16(tmp);
9ee6e8bb 2677 }
cbbccffc
PB
2678 for (n = 0; n <= pass * 2; n++) {
2679 tmp2 = new_tmp();
2680 tcg_gen_mov_i32(tmp2, tmp);
2681 neon_store_reg(rn, n, tmp2);
2682 }
2683 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2684 } else {
2685 /* VMOV */
2686 switch (size) {
2687 case 0:
ad69471c
PB
2688 tmp2 = neon_load_reg(rn, pass);
2689 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2690 dead_tmp(tmp2);
9ee6e8bb
PB
2691 break;
2692 case 1:
ad69471c
PB
2693 tmp2 = neon_load_reg(rn, pass);
2694 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2695 dead_tmp(tmp2);
9ee6e8bb
PB
2696 break;
2697 case 2:
9ee6e8bb
PB
2698 break;
2699 }
ad69471c 2700 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2701 }
b7bcbe95 2702 }
9ee6e8bb
PB
2703 } else { /* !dp */
2704 if ((insn & 0x6f) != 0x00)
2705 return 1;
2706 rn = VFP_SREG_N(insn);
18c9b560 2707 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2708 /* vfp->arm */
2709 if (insn & (1 << 21)) {
2710 /* system register */
40f137e1 2711 rn >>= 1;
9ee6e8bb 2712
b7bcbe95 2713 switch (rn) {
40f137e1 2714 case ARM_VFP_FPSID:
4373f3ce 2715 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2716 VFP3 restricts all id registers to privileged
2717 accesses. */
2718 if (IS_USER(s)
2719 && arm_feature(env, ARM_FEATURE_VFP3))
2720 return 1;
4373f3ce 2721 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2722 break;
40f137e1 2723 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2724 if (IS_USER(s))
2725 return 1;
4373f3ce 2726 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2727 break;
40f137e1
PB
2728 case ARM_VFP_FPINST:
2729 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2730 /* Not present in VFP3. */
2731 if (IS_USER(s)
2732 || arm_feature(env, ARM_FEATURE_VFP3))
2733 return 1;
4373f3ce 2734 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2735 break;
40f137e1 2736 case ARM_VFP_FPSCR:
601d70b9 2737 if (rd == 15) {
4373f3ce
PB
2738 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2739 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2740 } else {
2741 tmp = new_tmp();
2742 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2743 }
b7bcbe95 2744 break;
9ee6e8bb
PB
2745 case ARM_VFP_MVFR0:
2746 case ARM_VFP_MVFR1:
2747 if (IS_USER(s)
2748 || !arm_feature(env, ARM_FEATURE_VFP3))
2749 return 1;
4373f3ce 2750 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2751 break;
b7bcbe95
FB
2752 default:
2753 return 1;
2754 }
2755 } else {
2756 gen_mov_F0_vreg(0, rn);
4373f3ce 2757 tmp = gen_vfp_mrs();
b7bcbe95
FB
2758 }
2759 if (rd == 15) {
b5ff1b31 2760 /* Set the 4 flag bits in the CPSR. */
4373f3ce
PB
2761 gen_set_nzcv(tmp);
2762 dead_tmp(tmp);
2763 } else {
2764 store_reg(s, rd, tmp);
2765 }
b7bcbe95
FB
2766 } else {
2767 /* arm->vfp */
4373f3ce 2768 tmp = load_reg(s, rd);
b7bcbe95 2769 if (insn & (1 << 21)) {
40f137e1 2770 rn >>= 1;
b7bcbe95
FB
2771 /* system register */
2772 switch (rn) {
40f137e1 2773 case ARM_VFP_FPSID:
9ee6e8bb
PB
2774 case ARM_VFP_MVFR0:
2775 case ARM_VFP_MVFR1:
b7bcbe95
FB
2776 /* Writes are ignored. */
2777 break;
40f137e1 2778 case ARM_VFP_FPSCR:
4373f3ce
PB
2779 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2780 dead_tmp(tmp);
b5ff1b31 2781 gen_lookup_tb(s);
b7bcbe95 2782 break;
40f137e1 2783 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2784 if (IS_USER(s))
2785 return 1;
71b3c3de
JR
2786 /* TODO: VFP subarchitecture support.
2787 * For now, keep the EN bit only */
2788 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 2789 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2790 gen_lookup_tb(s);
2791 break;
2792 case ARM_VFP_FPINST:
2793 case ARM_VFP_FPINST2:
4373f3ce 2794 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2795 break;
b7bcbe95
FB
2796 default:
2797 return 1;
2798 }
2799 } else {
4373f3ce 2800 gen_vfp_msr(tmp);
b7bcbe95
FB
2801 gen_mov_vreg_F0(0, rn);
2802 }
2803 }
2804 }
2805 } else {
2806 /* data processing */
2807 /* The opcode is in bits 23, 21, 20 and 6. */
2808 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2809 if (dp) {
2810 if (op == 15) {
2811 /* rn is opcode */
2812 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2813 } else {
2814 /* rn is register number */
9ee6e8bb 2815 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2816 }
2817
2818 if (op == 15 && (rn == 15 || rn > 17)) {
2819 /* Integer or single precision destination. */
9ee6e8bb 2820 rd = VFP_SREG_D(insn);
b7bcbe95 2821 } else {
9ee6e8bb 2822 VFP_DREG_D(rd, insn);
b7bcbe95
FB
2823 }
2824
2825 if (op == 15 && (rn == 16 || rn == 17)) {
2826 /* Integer source. */
2827 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
2828 } else {
9ee6e8bb 2829 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2830 }
2831 } else {
9ee6e8bb 2832 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2833 if (op == 15 && rn == 15) {
2834 /* Double precision destination. */
9ee6e8bb
PB
2835 VFP_DREG_D(rd, insn);
2836 } else {
2837 rd = VFP_SREG_D(insn);
2838 }
2839 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2840 }
2841
2842 veclen = env->vfp.vec_len;
2843 if (op == 15 && rn > 3)
2844 veclen = 0;
2845
2846 /* Shut up compiler warnings. */
2847 delta_m = 0;
2848 delta_d = 0;
2849 bank_mask = 0;
3b46e624 2850
b7bcbe95
FB
2851 if (veclen > 0) {
2852 if (dp)
2853 bank_mask = 0xc;
2854 else
2855 bank_mask = 0x18;
2856
2857 /* Figure out what type of vector operation this is. */
2858 if ((rd & bank_mask) == 0) {
2859 /* scalar */
2860 veclen = 0;
2861 } else {
2862 if (dp)
2863 delta_d = (env->vfp.vec_stride >> 1) + 1;
2864 else
2865 delta_d = env->vfp.vec_stride + 1;
2866
2867 if ((rm & bank_mask) == 0) {
2868 /* mixed scalar/vector */
2869 delta_m = 0;
2870 } else {
2871 /* vector */
2872 delta_m = delta_d;
2873 }
2874 }
2875 }
2876
2877 /* Load the initial operands. */
2878 if (op == 15) {
2879 switch (rn) {
2880 case 16:
2881 case 17:
2882 /* Integer source */
2883 gen_mov_F0_vreg(0, rm);
2884 break;
2885 case 8:
2886 case 9:
2887 /* Compare */
2888 gen_mov_F0_vreg(dp, rd);
2889 gen_mov_F1_vreg(dp, rm);
2890 break;
2891 case 10:
2892 case 11:
2893 /* Compare with zero */
2894 gen_mov_F0_vreg(dp, rd);
2895 gen_vfp_F1_ld0(dp);
2896 break;
9ee6e8bb
PB
2897 case 20:
2898 case 21:
2899 case 22:
2900 case 23:
644ad806
PB
2901 case 28:
2902 case 29:
2903 case 30:
2904 case 31:
9ee6e8bb
PB
2905 /* Source and destination the same. */
2906 gen_mov_F0_vreg(dp, rd);
2907 break;
b7bcbe95
FB
2908 default:
2909 /* One source operand. */
2910 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 2911 break;
b7bcbe95
FB
2912 }
2913 } else {
2914 /* Two source operands. */
2915 gen_mov_F0_vreg(dp, rn);
2916 gen_mov_F1_vreg(dp, rm);
2917 }
2918
2919 for (;;) {
2920 /* Perform the calculation. */
2921 switch (op) {
2922 case 0: /* mac: fd + (fn * fm) */
2923 gen_vfp_mul(dp);
2924 gen_mov_F1_vreg(dp, rd);
2925 gen_vfp_add(dp);
2926 break;
2927 case 1: /* nmac: fd - (fn * fm) */
2928 gen_vfp_mul(dp);
2929 gen_vfp_neg(dp);
2930 gen_mov_F1_vreg(dp, rd);
2931 gen_vfp_add(dp);
2932 break;
2933 case 2: /* msc: -fd + (fn * fm) */
2934 gen_vfp_mul(dp);
2935 gen_mov_F1_vreg(dp, rd);
2936 gen_vfp_sub(dp);
2937 break;
2938 case 3: /* nmsc: -fd - (fn * fm) */
2939 gen_vfp_mul(dp);
b7bcbe95 2940 gen_vfp_neg(dp);
c9fb531a
PB
2941 gen_mov_F1_vreg(dp, rd);
2942 gen_vfp_sub(dp);
b7bcbe95
FB
2943 break;
2944 case 4: /* mul: fn * fm */
2945 gen_vfp_mul(dp);
2946 break;
2947 case 5: /* nmul: -(fn * fm) */
2948 gen_vfp_mul(dp);
2949 gen_vfp_neg(dp);
2950 break;
2951 case 6: /* add: fn + fm */
2952 gen_vfp_add(dp);
2953 break;
2954 case 7: /* sub: fn - fm */
2955 gen_vfp_sub(dp);
2956 break;
2957 case 8: /* div: fn / fm */
2958 gen_vfp_div(dp);
2959 break;
9ee6e8bb
PB
2960 case 14: /* fconst */
2961 if (!arm_feature(env, ARM_FEATURE_VFP3))
2962 return 1;
2963
2964 n = (insn << 12) & 0x80000000;
2965 i = ((insn >> 12) & 0x70) | (insn & 0xf);
2966 if (dp) {
2967 if (i & 0x40)
2968 i |= 0x3f80;
2969 else
2970 i |= 0x4000;
2971 n |= i << 16;
4373f3ce 2972 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
2973 } else {
2974 if (i & 0x40)
2975 i |= 0x780;
2976 else
2977 i |= 0x800;
2978 n |= i << 19;
5b340b51 2979 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 2980 }
9ee6e8bb 2981 break;
b7bcbe95
FB
2982 case 15: /* extension space */
2983 switch (rn) {
2984 case 0: /* cpy */
2985 /* no-op */
2986 break;
2987 case 1: /* abs */
2988 gen_vfp_abs(dp);
2989 break;
2990 case 2: /* neg */
2991 gen_vfp_neg(dp);
2992 break;
2993 case 3: /* sqrt */
2994 gen_vfp_sqrt(dp);
2995 break;
60011498
PB
2996 case 4: /* vcvtb.f32.f16 */
2997 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
2998 return 1;
2999 tmp = gen_vfp_mrs();
3000 tcg_gen_ext16u_i32(tmp, tmp);
3001 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3002 dead_tmp(tmp);
3003 break;
3004 case 5: /* vcvtt.f32.f16 */
3005 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3006 return 1;
3007 tmp = gen_vfp_mrs();
3008 tcg_gen_shri_i32(tmp, tmp, 16);
3009 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3010 dead_tmp(tmp);
3011 break;
3012 case 6: /* vcvtb.f16.f32 */
3013 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3014 return 1;
3015 tmp = new_tmp();
3016 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3017 gen_mov_F0_vreg(0, rd);
3018 tmp2 = gen_vfp_mrs();
3019 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3020 tcg_gen_or_i32(tmp, tmp, tmp2);
3021 dead_tmp(tmp2);
3022 gen_vfp_msr(tmp);
3023 break;
3024 case 7: /* vcvtt.f16.f32 */
3025 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3026 return 1;
3027 tmp = new_tmp();
3028 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3029 tcg_gen_shli_i32(tmp, tmp, 16);
3030 gen_mov_F0_vreg(0, rd);
3031 tmp2 = gen_vfp_mrs();
3032 tcg_gen_ext16u_i32(tmp2, tmp2);
3033 tcg_gen_or_i32(tmp, tmp, tmp2);
3034 dead_tmp(tmp2);
3035 gen_vfp_msr(tmp);
3036 break;
b7bcbe95
FB
3037 case 8: /* cmp */
3038 gen_vfp_cmp(dp);
3039 break;
3040 case 9: /* cmpe */
3041 gen_vfp_cmpe(dp);
3042 break;
3043 case 10: /* cmpz */
3044 gen_vfp_cmp(dp);
3045 break;
3046 case 11: /* cmpez */
3047 gen_vfp_F1_ld0(dp);
3048 gen_vfp_cmpe(dp);
3049 break;
3050 case 15: /* single<->double conversion */
3051 if (dp)
4373f3ce 3052 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3053 else
4373f3ce 3054 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3055 break;
3056 case 16: /* fuito */
3057 gen_vfp_uito(dp);
3058 break;
3059 case 17: /* fsito */
3060 gen_vfp_sito(dp);
3061 break;
9ee6e8bb
PB
3062 case 20: /* fshto */
3063 if (!arm_feature(env, ARM_FEATURE_VFP3))
3064 return 1;
644ad806 3065 gen_vfp_shto(dp, 16 - rm);
9ee6e8bb
PB
3066 break;
3067 case 21: /* fslto */
3068 if (!arm_feature(env, ARM_FEATURE_VFP3))
3069 return 1;
644ad806 3070 gen_vfp_slto(dp, 32 - rm);
9ee6e8bb
PB
3071 break;
3072 case 22: /* fuhto */
3073 if (!arm_feature(env, ARM_FEATURE_VFP3))
3074 return 1;
644ad806 3075 gen_vfp_uhto(dp, 16 - rm);
9ee6e8bb
PB
3076 break;
3077 case 23: /* fulto */
3078 if (!arm_feature(env, ARM_FEATURE_VFP3))
3079 return 1;
644ad806 3080 gen_vfp_ulto(dp, 32 - rm);
9ee6e8bb 3081 break;
b7bcbe95
FB
3082 case 24: /* ftoui */
3083 gen_vfp_toui(dp);
3084 break;
3085 case 25: /* ftouiz */
3086 gen_vfp_touiz(dp);
3087 break;
3088 case 26: /* ftosi */
3089 gen_vfp_tosi(dp);
3090 break;
3091 case 27: /* ftosiz */
3092 gen_vfp_tosiz(dp);
3093 break;
9ee6e8bb
PB
3094 case 28: /* ftosh */
3095 if (!arm_feature(env, ARM_FEATURE_VFP3))
3096 return 1;
644ad806 3097 gen_vfp_tosh(dp, 16 - rm);
9ee6e8bb
PB
3098 break;
3099 case 29: /* ftosl */
3100 if (!arm_feature(env, ARM_FEATURE_VFP3))
3101 return 1;
644ad806 3102 gen_vfp_tosl(dp, 32 - rm);
9ee6e8bb
PB
3103 break;
3104 case 30: /* ftouh */
3105 if (!arm_feature(env, ARM_FEATURE_VFP3))
3106 return 1;
644ad806 3107 gen_vfp_touh(dp, 16 - rm);
9ee6e8bb
PB
3108 break;
3109 case 31: /* ftoul */
3110 if (!arm_feature(env, ARM_FEATURE_VFP3))
3111 return 1;
644ad806 3112 gen_vfp_toul(dp, 32 - rm);
9ee6e8bb 3113 break;
b7bcbe95
FB
3114 default: /* undefined */
3115 printf ("rn:%d\n", rn);
3116 return 1;
3117 }
3118 break;
3119 default: /* undefined */
3120 printf ("op:%d\n", op);
3121 return 1;
3122 }
3123
3124 /* Write back the result. */
3125 if (op == 15 && (rn >= 8 && rn <= 11))
3126 ; /* Comparison, do nothing. */
3127 else if (op == 15 && rn > 17)
3128 /* Integer result. */
3129 gen_mov_vreg_F0(0, rd);
3130 else if (op == 15 && rn == 15)
3131 /* conversion */
3132 gen_mov_vreg_F0(!dp, rd);
3133 else
3134 gen_mov_vreg_F0(dp, rd);
3135
3136 /* break out of the loop if we have finished */
3137 if (veclen == 0)
3138 break;
3139
3140 if (op == 15 && delta_m == 0) {
3141 /* single source one-many */
3142 while (veclen--) {
3143 rd = ((rd + delta_d) & (bank_mask - 1))
3144 | (rd & bank_mask);
3145 gen_mov_vreg_F0(dp, rd);
3146 }
3147 break;
3148 }
3149 /* Setup the next operands. */
3150 veclen--;
3151 rd = ((rd + delta_d) & (bank_mask - 1))
3152 | (rd & bank_mask);
3153
3154 if (op == 15) {
3155 /* One source operand. */
3156 rm = ((rm + delta_m) & (bank_mask - 1))
3157 | (rm & bank_mask);
3158 gen_mov_F0_vreg(dp, rm);
3159 } else {
3160 /* Two source operands. */
3161 rn = ((rn + delta_d) & (bank_mask - 1))
3162 | (rn & bank_mask);
3163 gen_mov_F0_vreg(dp, rn);
3164 if (delta_m) {
3165 rm = ((rm + delta_m) & (bank_mask - 1))
3166 | (rm & bank_mask);
3167 gen_mov_F1_vreg(dp, rm);
3168 }
3169 }
3170 }
3171 }
3172 break;
3173 case 0xc:
3174 case 0xd:
9ee6e8bb 3175 if (dp && (insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3176 /* two-register transfer */
3177 rn = (insn >> 16) & 0xf;
3178 rd = (insn >> 12) & 0xf;
3179 if (dp) {
9ee6e8bb
PB
3180 VFP_DREG_M(rm, insn);
3181 } else {
3182 rm = VFP_SREG_M(insn);
3183 }
b7bcbe95 3184
18c9b560 3185 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3186 /* vfp->arm */
3187 if (dp) {
4373f3ce
PB
3188 gen_mov_F0_vreg(0, rm * 2);
3189 tmp = gen_vfp_mrs();
3190 store_reg(s, rd, tmp);
3191 gen_mov_F0_vreg(0, rm * 2 + 1);
3192 tmp = gen_vfp_mrs();
3193 store_reg(s, rn, tmp);
b7bcbe95
FB
3194 } else {
3195 gen_mov_F0_vreg(0, rm);
4373f3ce
PB
3196 tmp = gen_vfp_mrs();
3197 store_reg(s, rn, tmp);
b7bcbe95 3198 gen_mov_F0_vreg(0, rm + 1);
4373f3ce
PB
3199 tmp = gen_vfp_mrs();
3200 store_reg(s, rd, tmp);
b7bcbe95
FB
3201 }
3202 } else {
3203 /* arm->vfp */
3204 if (dp) {
4373f3ce
PB
3205 tmp = load_reg(s, rd);
3206 gen_vfp_msr(tmp);
3207 gen_mov_vreg_F0(0, rm * 2);
3208 tmp = load_reg(s, rn);
3209 gen_vfp_msr(tmp);
3210 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3211 } else {
4373f3ce
PB
3212 tmp = load_reg(s, rn);
3213 gen_vfp_msr(tmp);
b7bcbe95 3214 gen_mov_vreg_F0(0, rm);
4373f3ce
PB
3215 tmp = load_reg(s, rd);
3216 gen_vfp_msr(tmp);
b7bcbe95
FB
3217 gen_mov_vreg_F0(0, rm + 1);
3218 }
3219 }
3220 } else {
3221 /* Load/store */
3222 rn = (insn >> 16) & 0xf;
3223 if (dp)
9ee6e8bb 3224 VFP_DREG_D(rd, insn);
b7bcbe95 3225 else
9ee6e8bb
PB
3226 rd = VFP_SREG_D(insn);
3227 if (s->thumb && rn == 15) {
312eea9f
FN
3228 addr = new_tmp();
3229 tcg_gen_movi_i32(addr, s->pc & ~2);
9ee6e8bb 3230 } else {
312eea9f 3231 addr = load_reg(s, rn);
9ee6e8bb 3232 }
b7bcbe95
FB
3233 if ((insn & 0x01200000) == 0x01000000) {
3234 /* Single load/store */
3235 offset = (insn & 0xff) << 2;
3236 if ((insn & (1 << 23)) == 0)
3237 offset = -offset;
312eea9f 3238 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3239 if (insn & (1 << 20)) {
312eea9f 3240 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3241 gen_mov_vreg_F0(dp, rd);
3242 } else {
3243 gen_mov_F0_vreg(dp, rd);
312eea9f 3244 gen_vfp_st(s, dp, addr);
b7bcbe95 3245 }
312eea9f 3246 dead_tmp(addr);
b7bcbe95
FB
3247 } else {
3248 /* load/store multiple */
3249 if (dp)
3250 n = (insn >> 1) & 0x7f;
3251 else
3252 n = insn & 0xff;
3253
3254 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3255 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3256
3257 if (dp)
3258 offset = 8;
3259 else
3260 offset = 4;
3261 for (i = 0; i < n; i++) {
18c9b560 3262 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3263 /* load */
312eea9f 3264 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3265 gen_mov_vreg_F0(dp, rd + i);
3266 } else {
3267 /* store */
3268 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3269 gen_vfp_st(s, dp, addr);
b7bcbe95 3270 }
312eea9f 3271 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95
FB
3272 }
3273 if (insn & (1 << 21)) {
3274 /* writeback */
3275 if (insn & (1 << 24))
3276 offset = -offset * n;
3277 else if (dp && (insn & 1))
3278 offset = 4;
3279 else
3280 offset = 0;
3281
3282 if (offset != 0)
312eea9f
FN
3283 tcg_gen_addi_i32(addr, addr, offset);
3284 store_reg(s, rn, addr);
3285 } else {
3286 dead_tmp(addr);
b7bcbe95
FB
3287 }
3288 }
3289 }
3290 break;
3291 default:
3292 /* Should never happen. */
3293 return 1;
3294 }
3295 return 0;
3296}
3297
6e256c93 3298static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3299{
6e256c93
FB
3300 TranslationBlock *tb;
3301
3302 tb = s->tb;
3303 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3304 tcg_gen_goto_tb(n);
8984bd2e 3305 gen_set_pc_im(dest);
57fec1fe 3306 tcg_gen_exit_tb((long)tb + n);
6e256c93 3307 } else {
8984bd2e 3308 gen_set_pc_im(dest);
57fec1fe 3309 tcg_gen_exit_tb(0);
6e256c93 3310 }
c53be334
FB
3311}
3312
8aaca4c0
FB
3313static inline void gen_jmp (DisasContext *s, uint32_t dest)
3314{
551bd27f 3315 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3316 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3317 if (s->thumb)
d9ba4830
PB
3318 dest |= 1;
3319 gen_bx_im(s, dest);
8aaca4c0 3320 } else {
6e256c93 3321 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3322 s->is_jmp = DISAS_TB_JUMP;
3323 }
3324}
3325
d9ba4830 3326static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3327{
ee097184 3328 if (x)
d9ba4830 3329 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3330 else
d9ba4830 3331 gen_sxth(t0);
ee097184 3332 if (y)
d9ba4830 3333 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3334 else
d9ba4830
PB
3335 gen_sxth(t1);
3336 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3337}
3338
3339/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 3340static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3341 uint32_t mask;
3342
3343 mask = 0;
3344 if (flags & (1 << 0))
3345 mask |= 0xff;
3346 if (flags & (1 << 1))
3347 mask |= 0xff00;
3348 if (flags & (1 << 2))
3349 mask |= 0xff0000;
3350 if (flags & (1 << 3))
3351 mask |= 0xff000000;
9ee6e8bb 3352
2ae23e75 3353 /* Mask out undefined bits. */
9ee6e8bb
PB
3354 mask &= ~CPSR_RESERVED;
3355 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3356 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3357 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3358 mask &= ~CPSR_IT;
9ee6e8bb 3359 /* Mask out execution state bits. */
2ae23e75 3360 if (!spsr)
e160c51c 3361 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3362 /* Mask out privileged bits. */
3363 if (IS_USER(s))
9ee6e8bb 3364 mask &= CPSR_USER;
b5ff1b31
FB
3365 return mask;
3366}
3367
2fbac54b
FN
3368/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3369static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
b5ff1b31 3370{
d9ba4830 3371 TCGv tmp;
b5ff1b31
FB
3372 if (spsr) {
3373 /* ??? This is also undefined in system mode. */
3374 if (IS_USER(s))
3375 return 1;
d9ba4830
PB
3376
3377 tmp = load_cpu_field(spsr);
3378 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3379 tcg_gen_andi_i32(t0, t0, mask);
3380 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3381 store_cpu_field(tmp, spsr);
b5ff1b31 3382 } else {
2fbac54b 3383 gen_set_cpsr(t0, mask);
b5ff1b31 3384 }
2fbac54b 3385 dead_tmp(t0);
b5ff1b31
FB
3386 gen_lookup_tb(s);
3387 return 0;
3388}
3389
2fbac54b
FN
3390/* Returns nonzero if access to the PSR is not permitted. */
3391static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3392{
3393 TCGv tmp;
3394 tmp = new_tmp();
3395 tcg_gen_movi_i32(tmp, val);
3396 return gen_set_psr(s, mask, spsr, tmp);
3397}
3398
e9bb4aa9
JR
3399/* Generate an old-style exception return. Marks pc as dead. */
3400static void gen_exception_return(DisasContext *s, TCGv pc)
b5ff1b31 3401{
d9ba4830 3402 TCGv tmp;
e9bb4aa9 3403 store_reg(s, 15, pc);
d9ba4830
PB
3404 tmp = load_cpu_field(spsr);
3405 gen_set_cpsr(tmp, 0xffffffff);
3406 dead_tmp(tmp);
b5ff1b31
FB
3407 s->is_jmp = DISAS_UPDATE;
3408}
3409
b0109805
PB
3410/* Generate a v6 exception return. Marks both values as dead. */
3411static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3412{
b0109805
PB
3413 gen_set_cpsr(cpsr, 0xffffffff);
3414 dead_tmp(cpsr);
3415 store_reg(s, 15, pc);
9ee6e8bb
PB
3416 s->is_jmp = DISAS_UPDATE;
3417}
3b46e624 3418
9ee6e8bb
PB
3419static inline void
3420gen_set_condexec (DisasContext *s)
3421{
3422 if (s->condexec_mask) {
8f01245e
PB
3423 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3424 TCGv tmp = new_tmp();
3425 tcg_gen_movi_i32(tmp, val);
d9ba4830 3426 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3427 }
3428}
3b46e624 3429
9ee6e8bb
PB
3430static void gen_nop_hint(DisasContext *s, int val)
3431{
3432 switch (val) {
3433 case 3: /* wfi */
8984bd2e 3434 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3435 s->is_jmp = DISAS_WFI;
3436 break;
3437 case 2: /* wfe */
3438 case 4: /* sev */
3439 /* TODO: Implement SEV and WFE. May help SMP performance. */
3440 default: /* nop */
3441 break;
3442 }
3443}
99c475ab 3444
ad69471c 3445#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3446
dd8fbd78 3447static inline int gen_neon_add(int size, TCGv t0, TCGv t1)
9ee6e8bb
PB
3448{
3449 switch (size) {
dd8fbd78
FN
3450 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3451 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3452 case 2: tcg_gen_add_i32(t0, t0, t1); break;
9ee6e8bb
PB
3453 default: return 1;
3454 }
3455 return 0;
3456}
3457
dd8fbd78 3458static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
ad69471c
PB
3459{
3460 switch (size) {
dd8fbd78
FN
3461 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3462 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3463 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3464 default: return;
3465 }
3466}
3467
3468/* 32-bit pairwise ops end up the same as the elementwise versions. */
3469#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3470#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3471#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3472#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3473
3474/* FIXME: This is wrong. They set the wrong overflow bit. */
3475#define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3476#define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3477#define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3478#define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3479
3480#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3481 switch ((size << 1) | u) { \
3482 case 0: \
dd8fbd78 3483 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3484 break; \
3485 case 1: \
dd8fbd78 3486 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3487 break; \
3488 case 2: \
dd8fbd78 3489 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3490 break; \
3491 case 3: \
dd8fbd78 3492 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3493 break; \
3494 case 4: \
dd8fbd78 3495 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3496 break; \
3497 case 5: \
dd8fbd78 3498 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3499 break; \
3500 default: return 1; \
3501 }} while (0)
9ee6e8bb
PB
3502
3503#define GEN_NEON_INTEGER_OP(name) do { \
3504 switch ((size << 1) | u) { \
ad69471c 3505 case 0: \
dd8fbd78 3506 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3507 break; \
3508 case 1: \
dd8fbd78 3509 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3510 break; \
3511 case 2: \
dd8fbd78 3512 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3513 break; \
3514 case 3: \
dd8fbd78 3515 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3516 break; \
3517 case 4: \
dd8fbd78 3518 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3519 break; \
3520 case 5: \
dd8fbd78 3521 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3522 break; \
9ee6e8bb
PB
3523 default: return 1; \
3524 }} while (0)
3525
dd8fbd78 3526static TCGv neon_load_scratch(int scratch)
9ee6e8bb 3527{
dd8fbd78
FN
3528 TCGv tmp = new_tmp();
3529 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3530 return tmp;
9ee6e8bb
PB
3531}
3532
dd8fbd78 3533static void neon_store_scratch(int scratch, TCGv var)
9ee6e8bb 3534{
dd8fbd78
FN
3535 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3536 dead_tmp(var);
9ee6e8bb
PB
3537}
3538
dd8fbd78 3539static inline TCGv neon_get_scalar(int size, int reg)
9ee6e8bb 3540{
dd8fbd78 3541 TCGv tmp;
9ee6e8bb 3542 if (size == 1) {
dd8fbd78 3543 tmp = neon_load_reg(reg >> 1, reg & 1);
9ee6e8bb 3544 } else {
dd8fbd78
FN
3545 tmp = neon_load_reg(reg >> 2, (reg >> 1) & 1);
3546 if (reg & 1) {
3547 gen_neon_dup_low16(tmp);
3548 } else {
3549 gen_neon_dup_high16(tmp);
3550 }
9ee6e8bb 3551 }
dd8fbd78 3552 return tmp;
9ee6e8bb
PB
3553}
3554
19457615
FN
3555static void gen_neon_unzip_u8(TCGv t0, TCGv t1)
3556{
3557 TCGv rd, rm, tmp;
3558
3559 rd = new_tmp();
3560 rm = new_tmp();
3561 tmp = new_tmp();
3562
3563 tcg_gen_andi_i32(rd, t0, 0xff);
3564 tcg_gen_shri_i32(tmp, t0, 8);
3565 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3566 tcg_gen_or_i32(rd, rd, tmp);
3567 tcg_gen_shli_i32(tmp, t1, 16);
3568 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3569 tcg_gen_or_i32(rd, rd, tmp);
3570 tcg_gen_shli_i32(tmp, t1, 8);
3571 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3572 tcg_gen_or_i32(rd, rd, tmp);
3573
3574 tcg_gen_shri_i32(rm, t0, 8);
3575 tcg_gen_andi_i32(rm, rm, 0xff);
3576 tcg_gen_shri_i32(tmp, t0, 16);
3577 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3578 tcg_gen_or_i32(rm, rm, tmp);
3579 tcg_gen_shli_i32(tmp, t1, 8);
3580 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3581 tcg_gen_or_i32(rm, rm, tmp);
3582 tcg_gen_andi_i32(tmp, t1, 0xff000000);
3583 tcg_gen_or_i32(t1, rm, tmp);
3584 tcg_gen_mov_i32(t0, rd);
3585
3586 dead_tmp(tmp);
3587 dead_tmp(rm);
3588 dead_tmp(rd);
3589}
3590
3591static void gen_neon_zip_u8(TCGv t0, TCGv t1)
3592{
3593 TCGv rd, rm, tmp;
3594
3595 rd = new_tmp();
3596 rm = new_tmp();
3597 tmp = new_tmp();
3598
3599 tcg_gen_andi_i32(rd, t0, 0xff);
3600 tcg_gen_shli_i32(tmp, t1, 8);
3601 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3602 tcg_gen_or_i32(rd, rd, tmp);
3603 tcg_gen_shli_i32(tmp, t0, 16);
3604 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3605 tcg_gen_or_i32(rd, rd, tmp);
3606 tcg_gen_shli_i32(tmp, t1, 24);
3607 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3608 tcg_gen_or_i32(rd, rd, tmp);
3609
3610 tcg_gen_andi_i32(rm, t1, 0xff000000);
3611 tcg_gen_shri_i32(tmp, t0, 8);
3612 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3613 tcg_gen_or_i32(rm, rm, tmp);
3614 tcg_gen_shri_i32(tmp, t1, 8);
3615 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3616 tcg_gen_or_i32(rm, rm, tmp);
3617 tcg_gen_shri_i32(tmp, t0, 16);
3618 tcg_gen_andi_i32(tmp, tmp, 0xff);
3619 tcg_gen_or_i32(t1, rm, tmp);
3620 tcg_gen_mov_i32(t0, rd);
3621
3622 dead_tmp(tmp);
3623 dead_tmp(rm);
3624 dead_tmp(rd);
3625}
3626
3627static void gen_neon_zip_u16(TCGv t0, TCGv t1)
3628{
3629 TCGv tmp, tmp2;
3630
3631 tmp = new_tmp();
3632 tmp2 = new_tmp();
3633
3634 tcg_gen_andi_i32(tmp, t0, 0xffff);
3635 tcg_gen_shli_i32(tmp2, t1, 16);
3636 tcg_gen_or_i32(tmp, tmp, tmp2);
3637 tcg_gen_andi_i32(t1, t1, 0xffff0000);
3638 tcg_gen_shri_i32(tmp2, t0, 16);
3639 tcg_gen_or_i32(t1, t1, tmp2);
3640 tcg_gen_mov_i32(t0, tmp);
3641
3642 dead_tmp(tmp2);
3643 dead_tmp(tmp);
3644}
3645
9ee6e8bb
PB
3646static void gen_neon_unzip(int reg, int q, int tmp, int size)
3647{
3648 int n;
dd8fbd78 3649 TCGv t0, t1;
9ee6e8bb
PB
3650
3651 for (n = 0; n < q + 1; n += 2) {
dd8fbd78
FN
3652 t0 = neon_load_reg(reg, n);
3653 t1 = neon_load_reg(reg, n + 1);
9ee6e8bb 3654 switch (size) {
dd8fbd78
FN
3655 case 0: gen_neon_unzip_u8(t0, t1); break;
3656 case 1: gen_neon_zip_u16(t0, t1); break; /* zip and unzip are the same. */
9ee6e8bb
PB
3657 case 2: /* no-op */; break;
3658 default: abort();
3659 }
dd8fbd78
FN
3660 neon_store_scratch(tmp + n, t0);
3661 neon_store_scratch(tmp + n + 1, t1);
9ee6e8bb
PB
3662 }
3663}
3664
19457615
FN
3665static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3666{
3667 TCGv rd, tmp;
3668
3669 rd = new_tmp();
3670 tmp = new_tmp();
3671
3672 tcg_gen_shli_i32(rd, t0, 8);
3673 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3674 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3675 tcg_gen_or_i32(rd, rd, tmp);
3676
3677 tcg_gen_shri_i32(t1, t1, 8);
3678 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3679 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3680 tcg_gen_or_i32(t1, t1, tmp);
3681 tcg_gen_mov_i32(t0, rd);
3682
3683 dead_tmp(tmp);
3684 dead_tmp(rd);
3685}
3686
3687static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3688{
3689 TCGv rd, tmp;
3690
3691 rd = new_tmp();
3692 tmp = new_tmp();
3693
3694 tcg_gen_shli_i32(rd, t0, 16);
3695 tcg_gen_andi_i32(tmp, t1, 0xffff);
3696 tcg_gen_or_i32(rd, rd, tmp);
3697 tcg_gen_shri_i32(t1, t1, 16);
3698 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3699 tcg_gen_or_i32(t1, t1, tmp);
3700 tcg_gen_mov_i32(t0, rd);
3701
3702 dead_tmp(tmp);
3703 dead_tmp(rd);
3704}
3705
3706
9ee6e8bb
PB
3707static struct {
3708 int nregs;
3709 int interleave;
3710 int spacing;
3711} neon_ls_element_type[11] = {
3712 {4, 4, 1},
3713 {4, 4, 2},
3714 {4, 1, 1},
3715 {4, 2, 1},
3716 {3, 3, 1},
3717 {3, 3, 2},
3718 {3, 1, 1},
3719 {1, 1, 1},
3720 {2, 2, 1},
3721 {2, 2, 2},
3722 {2, 1, 1}
3723};
3724
3725/* Translate a NEON load/store element instruction. Return nonzero if the
3726 instruction is invalid. */
3727static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3728{
3729 int rd, rn, rm;
3730 int op;
3731 int nregs;
3732 int interleave;
84496233 3733 int spacing;
9ee6e8bb
PB
3734 int stride;
3735 int size;
3736 int reg;
3737 int pass;
3738 int load;
3739 int shift;
9ee6e8bb 3740 int n;
1b2b1e54 3741 TCGv addr;
b0109805 3742 TCGv tmp;
8f8e3aa4 3743 TCGv tmp2;
84496233 3744 TCGv_i64 tmp64;
9ee6e8bb
PB
3745
3746 if (!vfp_enabled(env))
3747 return 1;
3748 VFP_DREG_D(rd, insn);
3749 rn = (insn >> 16) & 0xf;
3750 rm = insn & 0xf;
3751 load = (insn & (1 << 21)) != 0;
1b2b1e54 3752 addr = new_tmp();
9ee6e8bb
PB
3753 if ((insn & (1 << 23)) == 0) {
3754 /* Load store all elements. */
3755 op = (insn >> 8) & 0xf;
3756 size = (insn >> 6) & 3;
84496233 3757 if (op > 10)
9ee6e8bb
PB
3758 return 1;
3759 nregs = neon_ls_element_type[op].nregs;
3760 interleave = neon_ls_element_type[op].interleave;
84496233
JR
3761 spacing = neon_ls_element_type[op].spacing;
3762 if (size == 3 && (interleave | spacing) != 1)
3763 return 1;
dcc65026 3764 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3765 stride = (1 << size) * interleave;
3766 for (reg = 0; reg < nregs; reg++) {
3767 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
3768 load_reg_var(s, addr, rn);
3769 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 3770 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
3771 load_reg_var(s, addr, rn);
3772 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 3773 }
84496233
JR
3774 if (size == 3) {
3775 if (load) {
3776 tmp64 = gen_ld64(addr, IS_USER(s));
3777 neon_store_reg64(tmp64, rd);
3778 tcg_temp_free_i64(tmp64);
3779 } else {
3780 tmp64 = tcg_temp_new_i64();
3781 neon_load_reg64(tmp64, rd);
3782 gen_st64(tmp64, addr, IS_USER(s));
3783 }
3784 tcg_gen_addi_i32(addr, addr, stride);
3785 } else {
3786 for (pass = 0; pass < 2; pass++) {
3787 if (size == 2) {
3788 if (load) {
3789 tmp = gen_ld32(addr, IS_USER(s));
3790 neon_store_reg(rd, pass, tmp);
3791 } else {
3792 tmp = neon_load_reg(rd, pass);
3793 gen_st32(tmp, addr, IS_USER(s));
3794 }
1b2b1e54 3795 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
3796 } else if (size == 1) {
3797 if (load) {
3798 tmp = gen_ld16u(addr, IS_USER(s));
3799 tcg_gen_addi_i32(addr, addr, stride);
3800 tmp2 = gen_ld16u(addr, IS_USER(s));
3801 tcg_gen_addi_i32(addr, addr, stride);
3802 gen_bfi(tmp, tmp, tmp2, 16, 0xffff);
3803 dead_tmp(tmp2);
3804 neon_store_reg(rd, pass, tmp);
3805 } else {
3806 tmp = neon_load_reg(rd, pass);
3807 tmp2 = new_tmp();
3808 tcg_gen_shri_i32(tmp2, tmp, 16);
3809 gen_st16(tmp, addr, IS_USER(s));
3810 tcg_gen_addi_i32(addr, addr, stride);
3811 gen_st16(tmp2, addr, IS_USER(s));
1b2b1e54 3812 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3813 }
84496233
JR
3814 } else /* size == 0 */ {
3815 if (load) {
3816 TCGV_UNUSED(tmp2);
3817 for (n = 0; n < 4; n++) {
3818 tmp = gen_ld8u(addr, IS_USER(s));
3819 tcg_gen_addi_i32(addr, addr, stride);
3820 if (n == 0) {
3821 tmp2 = tmp;
3822 } else {
3823 gen_bfi(tmp2, tmp2, tmp, n * 8, 0xff);
3824 dead_tmp(tmp);
3825 }
9ee6e8bb 3826 }
84496233
JR
3827 neon_store_reg(rd, pass, tmp2);
3828 } else {
3829 tmp2 = neon_load_reg(rd, pass);
3830 for (n = 0; n < 4; n++) {
3831 tmp = new_tmp();
3832 if (n == 0) {
3833 tcg_gen_mov_i32(tmp, tmp2);
3834 } else {
3835 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3836 }
3837 gen_st8(tmp, addr, IS_USER(s));
3838 tcg_gen_addi_i32(addr, addr, stride);
3839 }
3840 dead_tmp(tmp2);
9ee6e8bb
PB
3841 }
3842 }
3843 }
3844 }
84496233 3845 rd += spacing;
9ee6e8bb
PB
3846 }
3847 stride = nregs * 8;
3848 } else {
3849 size = (insn >> 10) & 3;
3850 if (size == 3) {
3851 /* Load single element to all lanes. */
3852 if (!load)
3853 return 1;
3854 size = (insn >> 6) & 3;
3855 nregs = ((insn >> 8) & 3) + 1;
3856 stride = (insn & (1 << 5)) ? 2 : 1;
dcc65026 3857 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3858 for (reg = 0; reg < nregs; reg++) {
3859 switch (size) {
3860 case 0:
1b2b1e54 3861 tmp = gen_ld8u(addr, IS_USER(s));
ad69471c 3862 gen_neon_dup_u8(tmp, 0);
9ee6e8bb
PB
3863 break;
3864 case 1:
1b2b1e54 3865 tmp = gen_ld16u(addr, IS_USER(s));
ad69471c 3866 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
3867 break;
3868 case 2:
1b2b1e54 3869 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
3870 break;
3871 case 3:
3872 return 1;
a50f5b91
PB
3873 default: /* Avoid compiler warnings. */
3874 abort();
99c475ab 3875 }
1b2b1e54 3876 tcg_gen_addi_i32(addr, addr, 1 << size);
ad69471c
PB
3877 tmp2 = new_tmp();
3878 tcg_gen_mov_i32(tmp2, tmp);
3879 neon_store_reg(rd, 0, tmp2);
3018f259 3880 neon_store_reg(rd, 1, tmp);
9ee6e8bb
PB
3881 rd += stride;
3882 }
3883 stride = (1 << size) * nregs;
3884 } else {
3885 /* Single element. */
3886 pass = (insn >> 7) & 1;
3887 switch (size) {
3888 case 0:
3889 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
3890 stride = 1;
3891 break;
3892 case 1:
3893 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
3894 stride = (insn & (1 << 5)) ? 2 : 1;
3895 break;
3896 case 2:
3897 shift = 0;
9ee6e8bb
PB
3898 stride = (insn & (1 << 6)) ? 2 : 1;
3899 break;
3900 default:
3901 abort();
3902 }
3903 nregs = ((insn >> 8) & 3) + 1;
dcc65026 3904 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3905 for (reg = 0; reg < nregs; reg++) {
3906 if (load) {
9ee6e8bb
PB
3907 switch (size) {
3908 case 0:
1b2b1e54 3909 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb
PB
3910 break;
3911 case 1:
1b2b1e54 3912 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
3913 break;
3914 case 2:
1b2b1e54 3915 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 3916 break;
a50f5b91
PB
3917 default: /* Avoid compiler warnings. */
3918 abort();
9ee6e8bb
PB
3919 }
3920 if (size != 2) {
8f8e3aa4
PB
3921 tmp2 = neon_load_reg(rd, pass);
3922 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3923 dead_tmp(tmp2);
9ee6e8bb 3924 }
8f8e3aa4 3925 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3926 } else { /* Store */
8f8e3aa4
PB
3927 tmp = neon_load_reg(rd, pass);
3928 if (shift)
3929 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
3930 switch (size) {
3931 case 0:
1b2b1e54 3932 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
3933 break;
3934 case 1:
1b2b1e54 3935 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
3936 break;
3937 case 2:
1b2b1e54 3938 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 3939 break;
99c475ab 3940 }
99c475ab 3941 }
9ee6e8bb 3942 rd += stride;
1b2b1e54 3943 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 3944 }
9ee6e8bb 3945 stride = nregs * (1 << size);
99c475ab 3946 }
9ee6e8bb 3947 }
1b2b1e54 3948 dead_tmp(addr);
9ee6e8bb 3949 if (rm != 15) {
b26eefb6
PB
3950 TCGv base;
3951
3952 base = load_reg(s, rn);
9ee6e8bb 3953 if (rm == 13) {
b26eefb6 3954 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 3955 } else {
b26eefb6
PB
3956 TCGv index;
3957 index = load_reg(s, rm);
3958 tcg_gen_add_i32(base, base, index);
3959 dead_tmp(index);
9ee6e8bb 3960 }
b26eefb6 3961 store_reg(s, rn, base);
9ee6e8bb
PB
3962 }
3963 return 0;
3964}
3b46e624 3965
8f8e3aa4
PB
3966/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
3967static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
3968{
3969 tcg_gen_and_i32(t, t, c);
f669df27 3970 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
3971 tcg_gen_or_i32(dest, t, f);
3972}
3973
a7812ae4 3974static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
3975{
3976 switch (size) {
3977 case 0: gen_helper_neon_narrow_u8(dest, src); break;
3978 case 1: gen_helper_neon_narrow_u16(dest, src); break;
3979 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
3980 default: abort();
3981 }
3982}
3983
a7812ae4 3984static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
3985{
3986 switch (size) {
3987 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
3988 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
3989 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
3990 default: abort();
3991 }
3992}
3993
a7812ae4 3994static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
3995{
3996 switch (size) {
3997 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
3998 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
3999 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4000 default: abort();
4001 }
4002}
4003
4004static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4005 int q, int u)
4006{
4007 if (q) {
4008 if (u) {
4009 switch (size) {
4010 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4011 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4012 default: abort();
4013 }
4014 } else {
4015 switch (size) {
4016 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4017 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4018 default: abort();
4019 }
4020 }
4021 } else {
4022 if (u) {
4023 switch (size) {
4024 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4025 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4026 default: abort();
4027 }
4028 } else {
4029 switch (size) {
4030 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4031 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4032 default: abort();
4033 }
4034 }
4035 }
4036}
4037
a7812ae4 4038static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
4039{
4040 if (u) {
4041 switch (size) {
4042 case 0: gen_helper_neon_widen_u8(dest, src); break;
4043 case 1: gen_helper_neon_widen_u16(dest, src); break;
4044 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4045 default: abort();
4046 }
4047 } else {
4048 switch (size) {
4049 case 0: gen_helper_neon_widen_s8(dest, src); break;
4050 case 1: gen_helper_neon_widen_s16(dest, src); break;
4051 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4052 default: abort();
4053 }
4054 }
4055 dead_tmp(src);
4056}
4057
4058static inline void gen_neon_addl(int size)
4059{
4060 switch (size) {
4061 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4062 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4063 case 2: tcg_gen_add_i64(CPU_V001); break;
4064 default: abort();
4065 }
4066}
4067
4068static inline void gen_neon_subl(int size)
4069{
4070 switch (size) {
4071 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4072 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4073 case 2: tcg_gen_sub_i64(CPU_V001); break;
4074 default: abort();
4075 }
4076}
4077
a7812ae4 4078static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4079{
4080 switch (size) {
4081 case 0: gen_helper_neon_negl_u16(var, var); break;
4082 case 1: gen_helper_neon_negl_u32(var, var); break;
4083 case 2: gen_helper_neon_negl_u64(var, var); break;
4084 default: abort();
4085 }
4086}
4087
a7812ae4 4088static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4089{
4090 switch (size) {
4091 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4092 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4093 default: abort();
4094 }
4095}
4096
a7812ae4 4097static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4098{
a7812ae4 4099 TCGv_i64 tmp;
ad69471c
PB
4100
4101 switch ((size << 1) | u) {
4102 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4103 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4104 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4105 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4106 case 4:
4107 tmp = gen_muls_i64_i32(a, b);
4108 tcg_gen_mov_i64(dest, tmp);
4109 break;
4110 case 5:
4111 tmp = gen_mulu_i64_i32(a, b);
4112 tcg_gen_mov_i64(dest, tmp);
4113 break;
4114 default: abort();
4115 }
ad69471c
PB
4116}
4117
9ee6e8bb
PB
4118/* Translate a NEON data processing instruction. Return nonzero if the
4119 instruction is invalid.
ad69471c
PB
4120 We process data in a mixture of 32-bit and 64-bit chunks.
4121 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4122
9ee6e8bb
PB
4123static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4124{
4125 int op;
4126 int q;
4127 int rd, rn, rm;
4128 int size;
4129 int shift;
4130 int pass;
4131 int count;
4132 int pairwise;
4133 int u;
4134 int n;
ca9a32e4 4135 uint32_t imm, mask;
b75263d6 4136 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4137 TCGv_i64 tmp64;
9ee6e8bb
PB
4138
4139 if (!vfp_enabled(env))
4140 return 1;
4141 q = (insn & (1 << 6)) != 0;
4142 u = (insn >> 24) & 1;
4143 VFP_DREG_D(rd, insn);
4144 VFP_DREG_N(rn, insn);
4145 VFP_DREG_M(rm, insn);
4146 size = (insn >> 20) & 3;
4147 if ((insn & (1 << 23)) == 0) {
4148 /* Three register same length. */
4149 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
ad69471c
PB
4150 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4151 || op == 10 || op == 11 || op == 16)) {
4152 /* 64-bit element instructions. */
9ee6e8bb 4153 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4154 neon_load_reg64(cpu_V0, rn + pass);
4155 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb
PB
4156 switch (op) {
4157 case 1: /* VQADD */
4158 if (u) {
ad69471c 4159 gen_helper_neon_add_saturate_u64(CPU_V001);
2c0262af 4160 } else {
ad69471c 4161 gen_helper_neon_add_saturate_s64(CPU_V001);
2c0262af 4162 }
9ee6e8bb
PB
4163 break;
4164 case 5: /* VQSUB */
4165 if (u) {
ad69471c
PB
4166 gen_helper_neon_sub_saturate_u64(CPU_V001);
4167 } else {
4168 gen_helper_neon_sub_saturate_s64(CPU_V001);
4169 }
4170 break;
4171 case 8: /* VSHL */
4172 if (u) {
4173 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4174 } else {
4175 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4176 }
4177 break;
4178 case 9: /* VQSHL */
4179 if (u) {
4180 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4181 cpu_V0, cpu_V0);
4182 } else {
4183 gen_helper_neon_qshl_s64(cpu_V1, cpu_env,
4184 cpu_V1, cpu_V0);
4185 }
4186 break;
4187 case 10: /* VRSHL */
4188 if (u) {
4189 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4190 } else {
ad69471c
PB
4191 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4192 }
4193 break;
4194 case 11: /* VQRSHL */
4195 if (u) {
4196 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4197 cpu_V1, cpu_V0);
4198 } else {
4199 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4200 cpu_V1, cpu_V0);
1e8d4eec 4201 }
9ee6e8bb
PB
4202 break;
4203 case 16:
4204 if (u) {
ad69471c 4205 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4206 } else {
ad69471c 4207 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4208 }
4209 break;
4210 default:
4211 abort();
2c0262af 4212 }
ad69471c 4213 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4214 }
9ee6e8bb 4215 return 0;
2c0262af 4216 }
9ee6e8bb
PB
4217 switch (op) {
4218 case 8: /* VSHL */
4219 case 9: /* VQSHL */
4220 case 10: /* VRSHL */
ad69471c 4221 case 11: /* VQRSHL */
9ee6e8bb 4222 {
ad69471c
PB
4223 int rtmp;
4224 /* Shift instruction operands are reversed. */
4225 rtmp = rn;
9ee6e8bb 4226 rn = rm;
ad69471c 4227 rm = rtmp;
9ee6e8bb
PB
4228 pairwise = 0;
4229 }
2c0262af 4230 break;
9ee6e8bb
PB
4231 case 20: /* VPMAX */
4232 case 21: /* VPMIN */
4233 case 23: /* VPADD */
4234 pairwise = 1;
2c0262af 4235 break;
9ee6e8bb
PB
4236 case 26: /* VPADD (float) */
4237 pairwise = (u && size < 2);
2c0262af 4238 break;
9ee6e8bb
PB
4239 case 30: /* VPMIN/VPMAX (float) */
4240 pairwise = u;
2c0262af 4241 break;
9ee6e8bb
PB
4242 default:
4243 pairwise = 0;
2c0262af 4244 break;
9ee6e8bb 4245 }
dd8fbd78 4246
9ee6e8bb
PB
4247 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4248
4249 if (pairwise) {
4250 /* Pairwise. */
4251 if (q)
4252 n = (pass & 1) * 2;
2c0262af 4253 else
9ee6e8bb
PB
4254 n = 0;
4255 if (pass < q + 1) {
dd8fbd78
FN
4256 tmp = neon_load_reg(rn, n);
4257 tmp2 = neon_load_reg(rn, n + 1);
9ee6e8bb 4258 } else {
dd8fbd78
FN
4259 tmp = neon_load_reg(rm, n);
4260 tmp2 = neon_load_reg(rm, n + 1);
9ee6e8bb
PB
4261 }
4262 } else {
4263 /* Elementwise. */
dd8fbd78
FN
4264 tmp = neon_load_reg(rn, pass);
4265 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4266 }
4267 switch (op) {
4268 case 0: /* VHADD */
4269 GEN_NEON_INTEGER_OP(hadd);
4270 break;
4271 case 1: /* VQADD */
ad69471c 4272 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4273 break;
9ee6e8bb
PB
4274 case 2: /* VRHADD */
4275 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4276 break;
9ee6e8bb
PB
4277 case 3: /* Logic ops. */
4278 switch ((u << 2) | size) {
4279 case 0: /* VAND */
dd8fbd78 4280 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4281 break;
4282 case 1: /* BIC */
f669df27 4283 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4284 break;
4285 case 2: /* VORR */
dd8fbd78 4286 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4287 break;
4288 case 3: /* VORN */
f669df27 4289 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4290 break;
4291 case 4: /* VEOR */
dd8fbd78 4292 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4293 break;
4294 case 5: /* VBSL */
dd8fbd78
FN
4295 tmp3 = neon_load_reg(rd, pass);
4296 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4297 dead_tmp(tmp3);
9ee6e8bb
PB
4298 break;
4299 case 6: /* VBIT */
dd8fbd78
FN
4300 tmp3 = neon_load_reg(rd, pass);
4301 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4302 dead_tmp(tmp3);
9ee6e8bb
PB
4303 break;
4304 case 7: /* VBIF */
dd8fbd78
FN
4305 tmp3 = neon_load_reg(rd, pass);
4306 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4307 dead_tmp(tmp3);
9ee6e8bb 4308 break;
2c0262af
FB
4309 }
4310 break;
9ee6e8bb
PB
4311 case 4: /* VHSUB */
4312 GEN_NEON_INTEGER_OP(hsub);
4313 break;
4314 case 5: /* VQSUB */
ad69471c 4315 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4316 break;
9ee6e8bb
PB
4317 case 6: /* VCGT */
4318 GEN_NEON_INTEGER_OP(cgt);
4319 break;
4320 case 7: /* VCGE */
4321 GEN_NEON_INTEGER_OP(cge);
4322 break;
4323 case 8: /* VSHL */
ad69471c 4324 GEN_NEON_INTEGER_OP(shl);
2c0262af 4325 break;
9ee6e8bb 4326 case 9: /* VQSHL */
ad69471c 4327 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4328 break;
9ee6e8bb 4329 case 10: /* VRSHL */
ad69471c 4330 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4331 break;
9ee6e8bb 4332 case 11: /* VQRSHL */
ad69471c 4333 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb
PB
4334 break;
4335 case 12: /* VMAX */
4336 GEN_NEON_INTEGER_OP(max);
4337 break;
4338 case 13: /* VMIN */
4339 GEN_NEON_INTEGER_OP(min);
4340 break;
4341 case 14: /* VABD */
4342 GEN_NEON_INTEGER_OP(abd);
4343 break;
4344 case 15: /* VABA */
4345 GEN_NEON_INTEGER_OP(abd);
dd8fbd78
FN
4346 dead_tmp(tmp2);
4347 tmp2 = neon_load_reg(rd, pass);
4348 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4349 break;
4350 case 16:
4351 if (!u) { /* VADD */
dd8fbd78 4352 if (gen_neon_add(size, tmp, tmp2))
9ee6e8bb
PB
4353 return 1;
4354 } else { /* VSUB */
4355 switch (size) {
dd8fbd78
FN
4356 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4357 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4358 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4359 default: return 1;
4360 }
4361 }
4362 break;
4363 case 17:
4364 if (!u) { /* VTST */
4365 switch (size) {
dd8fbd78
FN
4366 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4367 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4368 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4369 default: return 1;
4370 }
4371 } else { /* VCEQ */
4372 switch (size) {
dd8fbd78
FN
4373 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4374 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4375 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4376 default: return 1;
4377 }
4378 }
4379 break;
4380 case 18: /* Multiply. */
4381 switch (size) {
dd8fbd78
FN
4382 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4383 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4384 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4385 default: return 1;
4386 }
dd8fbd78
FN
4387 dead_tmp(tmp2);
4388 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4389 if (u) { /* VMLS */
dd8fbd78 4390 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4391 } else { /* VMLA */
dd8fbd78 4392 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4393 }
4394 break;
4395 case 19: /* VMUL */
4396 if (u) { /* polynomial */
dd8fbd78 4397 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4398 } else { /* Integer */
4399 switch (size) {
dd8fbd78
FN
4400 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4401 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4402 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4403 default: return 1;
4404 }
4405 }
4406 break;
4407 case 20: /* VPMAX */
4408 GEN_NEON_INTEGER_OP(pmax);
4409 break;
4410 case 21: /* VPMIN */
4411 GEN_NEON_INTEGER_OP(pmin);
4412 break;
4413 case 22: /* Hultiply high. */
4414 if (!u) { /* VQDMULH */
4415 switch (size) {
dd8fbd78
FN
4416 case 1: gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4417 case 2: gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
9ee6e8bb
PB
4418 default: return 1;
4419 }
4420 } else { /* VQRDHMUL */
4421 switch (size) {
dd8fbd78
FN
4422 case 1: gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4423 case 2: gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
9ee6e8bb
PB
4424 default: return 1;
4425 }
4426 }
4427 break;
4428 case 23: /* VPADD */
4429 if (u)
4430 return 1;
4431 switch (size) {
dd8fbd78
FN
4432 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4433 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4434 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4435 default: return 1;
4436 }
4437 break;
4438 case 26: /* Floating point arithnetic. */
4439 switch ((u << 2) | size) {
4440 case 0: /* VADD */
dd8fbd78 4441 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4442 break;
4443 case 2: /* VSUB */
dd8fbd78 4444 gen_helper_neon_sub_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4445 break;
4446 case 4: /* VPADD */
dd8fbd78 4447 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4448 break;
4449 case 6: /* VABD */
dd8fbd78 4450 gen_helper_neon_abd_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4451 break;
4452 default:
4453 return 1;
4454 }
4455 break;
4456 case 27: /* Float multiply. */
dd8fbd78 4457 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb 4458 if (!u) {
dd8fbd78
FN
4459 dead_tmp(tmp2);
4460 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4461 if (size == 0) {
dd8fbd78 4462 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb 4463 } else {
dd8fbd78 4464 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
4465 }
4466 }
4467 break;
4468 case 28: /* Float compare. */
4469 if (!u) {
dd8fbd78 4470 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
b5ff1b31 4471 } else {
9ee6e8bb 4472 if (size == 0)
dd8fbd78 4473 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
9ee6e8bb 4474 else
dd8fbd78 4475 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
b5ff1b31 4476 }
2c0262af 4477 break;
9ee6e8bb
PB
4478 case 29: /* Float compare absolute. */
4479 if (!u)
4480 return 1;
4481 if (size == 0)
dd8fbd78 4482 gen_helper_neon_acge_f32(tmp, tmp, tmp2);
9ee6e8bb 4483 else
dd8fbd78 4484 gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
2c0262af 4485 break;
9ee6e8bb
PB
4486 case 30: /* Float min/max. */
4487 if (size == 0)
dd8fbd78 4488 gen_helper_neon_max_f32(tmp, tmp, tmp2);
9ee6e8bb 4489 else
dd8fbd78 4490 gen_helper_neon_min_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4491 break;
4492 case 31:
4493 if (size == 0)
dd8fbd78 4494 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 4495 else
dd8fbd78 4496 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 4497 break;
9ee6e8bb
PB
4498 default:
4499 abort();
2c0262af 4500 }
dd8fbd78
FN
4501 dead_tmp(tmp2);
4502
9ee6e8bb
PB
4503 /* Save the result. For elementwise operations we can put it
4504 straight into the destination register. For pairwise operations
4505 we have to be careful to avoid clobbering the source operands. */
4506 if (pairwise && rd == rm) {
dd8fbd78 4507 neon_store_scratch(pass, tmp);
9ee6e8bb 4508 } else {
dd8fbd78 4509 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4510 }
4511
4512 } /* for pass */
4513 if (pairwise && rd == rm) {
4514 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
4515 tmp = neon_load_scratch(pass);
4516 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4517 }
4518 }
ad69471c 4519 /* End of 3 register same size operations. */
9ee6e8bb
PB
4520 } else if (insn & (1 << 4)) {
4521 if ((insn & 0x00380080) != 0) {
4522 /* Two registers and shift. */
4523 op = (insn >> 8) & 0xf;
4524 if (insn & (1 << 7)) {
4525 /* 64-bit shift. */
4526 size = 3;
4527 } else {
4528 size = 2;
4529 while ((insn & (1 << (size + 19))) == 0)
4530 size--;
4531 }
4532 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4533 /* To avoid excessive dumplication of ops we implement shift
4534 by immediate using the variable shift operations. */
4535 if (op < 8) {
4536 /* Shift by immediate:
4537 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4538 /* Right shifts are encoded as N - shift, where N is the
4539 element size in bits. */
4540 if (op <= 4)
4541 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4542 if (size == 3) {
4543 count = q + 1;
4544 } else {
4545 count = q ? 4: 2;
4546 }
4547 switch (size) {
4548 case 0:
4549 imm = (uint8_t) shift;
4550 imm |= imm << 8;
4551 imm |= imm << 16;
4552 break;
4553 case 1:
4554 imm = (uint16_t) shift;
4555 imm |= imm << 16;
4556 break;
4557 case 2:
4558 case 3:
4559 imm = shift;
4560 break;
4561 default:
4562 abort();
4563 }
4564
4565 for (pass = 0; pass < count; pass++) {
ad69471c
PB
4566 if (size == 3) {
4567 neon_load_reg64(cpu_V0, rm + pass);
4568 tcg_gen_movi_i64(cpu_V1, imm);
4569 switch (op) {
4570 case 0: /* VSHR */
4571 case 1: /* VSRA */
4572 if (u)
4573 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4574 else
ad69471c 4575 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4576 break;
ad69471c
PB
4577 case 2: /* VRSHR */
4578 case 3: /* VRSRA */
4579 if (u)
4580 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4581 else
ad69471c 4582 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4583 break;
ad69471c
PB
4584 case 4: /* VSRI */
4585 if (!u)
4586 return 1;
4587 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4588 break;
4589 case 5: /* VSHL, VSLI */
4590 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4591 break;
4592 case 6: /* VQSHL */
4593 if (u)
4594 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4595 else
ad69471c
PB
4596 gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4597 break;
4598 case 7: /* VQSHLU */
4599 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4600 break;
9ee6e8bb 4601 }
ad69471c
PB
4602 if (op == 1 || op == 3) {
4603 /* Accumulate. */
4604 neon_load_reg64(cpu_V0, rd + pass);
4605 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4606 } else if (op == 4 || (op == 5 && u)) {
4607 /* Insert */
4608 cpu_abort(env, "VS[LR]I.64 not implemented");
4609 }
4610 neon_store_reg64(cpu_V0, rd + pass);
4611 } else { /* size < 3 */
4612 /* Operands in T0 and T1. */
dd8fbd78
FN
4613 tmp = neon_load_reg(rm, pass);
4614 tmp2 = new_tmp();
4615 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
4616 switch (op) {
4617 case 0: /* VSHR */
4618 case 1: /* VSRA */
4619 GEN_NEON_INTEGER_OP(shl);
4620 break;
4621 case 2: /* VRSHR */
4622 case 3: /* VRSRA */
4623 GEN_NEON_INTEGER_OP(rshl);
4624 break;
4625 case 4: /* VSRI */
4626 if (!u)
4627 return 1;
4628 GEN_NEON_INTEGER_OP(shl);
4629 break;
4630 case 5: /* VSHL, VSLI */
4631 switch (size) {
dd8fbd78
FN
4632 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
4633 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
4634 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
ad69471c
PB
4635 default: return 1;
4636 }
4637 break;
4638 case 6: /* VQSHL */
4639 GEN_NEON_INTEGER_OP_ENV(qshl);
4640 break;
4641 case 7: /* VQSHLU */
4642 switch (size) {
dd8fbd78
FN
4643 case 0: gen_helper_neon_qshl_u8(tmp, cpu_env, tmp, tmp2); break;
4644 case 1: gen_helper_neon_qshl_u16(tmp, cpu_env, tmp, tmp2); break;
4645 case 2: gen_helper_neon_qshl_u32(tmp, cpu_env, tmp, tmp2); break;
ad69471c
PB
4646 default: return 1;
4647 }
4648 break;
4649 }
dd8fbd78 4650 dead_tmp(tmp2);
ad69471c
PB
4651
4652 if (op == 1 || op == 3) {
4653 /* Accumulate. */
dd8fbd78
FN
4654 tmp2 = neon_load_reg(rd, pass);
4655 gen_neon_add(size, tmp2, tmp);
4656 dead_tmp(tmp2);
ad69471c
PB
4657 } else if (op == 4 || (op == 5 && u)) {
4658 /* Insert */
4659 switch (size) {
4660 case 0:
4661 if (op == 4)
ca9a32e4 4662 mask = 0xff >> -shift;
ad69471c 4663 else
ca9a32e4
JR
4664 mask = (uint8_t)(0xff << shift);
4665 mask |= mask << 8;
4666 mask |= mask << 16;
ad69471c
PB
4667 break;
4668 case 1:
4669 if (op == 4)
ca9a32e4 4670 mask = 0xffff >> -shift;
ad69471c 4671 else
ca9a32e4
JR
4672 mask = (uint16_t)(0xffff << shift);
4673 mask |= mask << 16;
ad69471c
PB
4674 break;
4675 case 2:
ca9a32e4
JR
4676 if (shift < -31 || shift > 31) {
4677 mask = 0;
4678 } else {
4679 if (op == 4)
4680 mask = 0xffffffffu >> -shift;
4681 else
4682 mask = 0xffffffffu << shift;
4683 }
ad69471c
PB
4684 break;
4685 default:
4686 abort();
4687 }
dd8fbd78 4688 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
4689 tcg_gen_andi_i32(tmp, tmp, mask);
4690 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78
FN
4691 tcg_gen_or_i32(tmp, tmp, tmp2);
4692 dead_tmp(tmp2);
ad69471c 4693 }
dd8fbd78 4694 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4695 }
4696 } /* for pass */
4697 } else if (op < 10) {
ad69471c 4698 /* Shift by immediate and narrow:
9ee6e8bb
PB
4699 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4700 shift = shift - (1 << (size + 3));
4701 size++;
9ee6e8bb
PB
4702 switch (size) {
4703 case 1:
ad69471c 4704 imm = (uint16_t)shift;
9ee6e8bb 4705 imm |= imm << 16;
ad69471c 4706 tmp2 = tcg_const_i32(imm);
a7812ae4 4707 TCGV_UNUSED_I64(tmp64);
9ee6e8bb
PB
4708 break;
4709 case 2:
ad69471c
PB
4710 imm = (uint32_t)shift;
4711 tmp2 = tcg_const_i32(imm);
a7812ae4 4712 TCGV_UNUSED_I64(tmp64);
4cc633c3 4713 break;
9ee6e8bb 4714 case 3:
a7812ae4
PB
4715 tmp64 = tcg_const_i64(shift);
4716 TCGV_UNUSED(tmp2);
9ee6e8bb
PB
4717 break;
4718 default:
4719 abort();
4720 }
4721
ad69471c
PB
4722 for (pass = 0; pass < 2; pass++) {
4723 if (size == 3) {
4724 neon_load_reg64(cpu_V0, rm + pass);
4725 if (q) {
4726 if (u)
a7812ae4 4727 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4728 else
a7812ae4 4729 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c
PB
4730 } else {
4731 if (u)
a7812ae4 4732 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4733 else
a7812ae4 4734 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c 4735 }
2c0262af 4736 } else {
ad69471c
PB
4737 tmp = neon_load_reg(rm + pass, 0);
4738 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
36aa55dc
PB
4739 tmp3 = neon_load_reg(rm + pass, 1);
4740 gen_neon_shift_narrow(size, tmp3, tmp2, q, u);
4741 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
ad69471c 4742 dead_tmp(tmp);
36aa55dc 4743 dead_tmp(tmp3);
9ee6e8bb 4744 }
ad69471c
PB
4745 tmp = new_tmp();
4746 if (op == 8 && !u) {
4747 gen_neon_narrow(size - 1, tmp, cpu_V0);
9ee6e8bb 4748 } else {
ad69471c
PB
4749 if (op == 8)
4750 gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
9ee6e8bb 4751 else
ad69471c
PB
4752 gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
4753 }
2301db49 4754 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4755 } /* for pass */
b75263d6
JR
4756 if (size == 3) {
4757 tcg_temp_free_i64(tmp64);
2301db49
JR
4758 } else {
4759 dead_tmp(tmp2);
b75263d6 4760 }
9ee6e8bb
PB
4761 } else if (op == 10) {
4762 /* VSHLL */
ad69471c 4763 if (q || size == 3)
9ee6e8bb 4764 return 1;
ad69471c
PB
4765 tmp = neon_load_reg(rm, 0);
4766 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 4767 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4768 if (pass == 1)
4769 tmp = tmp2;
4770
4771 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 4772
9ee6e8bb
PB
4773 if (shift != 0) {
4774 /* The shift is less than the width of the source
ad69471c
PB
4775 type, so we can just shift the whole register. */
4776 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4777 if (size < 2 || !u) {
4778 uint64_t imm64;
4779 if (size == 0) {
4780 imm = (0xffu >> (8 - shift));
4781 imm |= imm << 16;
4782 } else {
4783 imm = 0xffff >> (16 - shift);
9ee6e8bb 4784 }
ad69471c
PB
4785 imm64 = imm | (((uint64_t)imm) << 32);
4786 tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
9ee6e8bb
PB
4787 }
4788 }
ad69471c 4789 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4790 }
4791 } else if (op == 15 || op == 16) {
4792 /* VCVT fixed-point. */
4793 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 4794 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
9ee6e8bb
PB
4795 if (op & 1) {
4796 if (u)
4373f3ce 4797 gen_vfp_ulto(0, shift);
9ee6e8bb 4798 else
4373f3ce 4799 gen_vfp_slto(0, shift);
9ee6e8bb
PB
4800 } else {
4801 if (u)
4373f3ce 4802 gen_vfp_toul(0, shift);
9ee6e8bb 4803 else
4373f3ce 4804 gen_vfp_tosl(0, shift);
2c0262af 4805 }
4373f3ce 4806 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
4807 }
4808 } else {
9ee6e8bb
PB
4809 return 1;
4810 }
4811 } else { /* (insn & 0x00380080) == 0 */
4812 int invert;
4813
4814 op = (insn >> 8) & 0xf;
4815 /* One register and immediate. */
4816 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4817 invert = (insn & (1 << 5)) != 0;
4818 switch (op) {
4819 case 0: case 1:
4820 /* no-op */
4821 break;
4822 case 2: case 3:
4823 imm <<= 8;
4824 break;
4825 case 4: case 5:
4826 imm <<= 16;
4827 break;
4828 case 6: case 7:
4829 imm <<= 24;
4830 break;
4831 case 8: case 9:
4832 imm |= imm << 16;
4833 break;
4834 case 10: case 11:
4835 imm = (imm << 8) | (imm << 24);
4836 break;
4837 case 12:
4838 imm = (imm < 8) | 0xff;
4839 break;
4840 case 13:
4841 imm = (imm << 16) | 0xffff;
4842 break;
4843 case 14:
4844 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4845 if (invert)
4846 imm = ~imm;
4847 break;
4848 case 15:
4849 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4850 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4851 break;
4852 }
4853 if (invert)
4854 imm = ~imm;
4855
9ee6e8bb
PB
4856 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4857 if (op & 1 && op < 12) {
ad69471c 4858 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
4859 if (invert) {
4860 /* The immediate value has already been inverted, so
4861 BIC becomes AND. */
ad69471c 4862 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 4863 } else {
ad69471c 4864 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 4865 }
9ee6e8bb 4866 } else {
ad69471c
PB
4867 /* VMOV, VMVN. */
4868 tmp = new_tmp();
9ee6e8bb 4869 if (op == 14 && invert) {
ad69471c
PB
4870 uint32_t val;
4871 val = 0;
9ee6e8bb
PB
4872 for (n = 0; n < 4; n++) {
4873 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 4874 val |= 0xff << (n * 8);
9ee6e8bb 4875 }
ad69471c
PB
4876 tcg_gen_movi_i32(tmp, val);
4877 } else {
4878 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 4879 }
9ee6e8bb 4880 }
ad69471c 4881 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4882 }
4883 }
e4b3861d 4884 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
4885 if (size != 3) {
4886 op = (insn >> 8) & 0xf;
4887 if ((insn & (1 << 6)) == 0) {
4888 /* Three registers of different lengths. */
4889 int src1_wide;
4890 int src2_wide;
4891 int prewiden;
4892 /* prewiden, src1_wide, src2_wide */
4893 static const int neon_3reg_wide[16][3] = {
4894 {1, 0, 0}, /* VADDL */
4895 {1, 1, 0}, /* VADDW */
4896 {1, 0, 0}, /* VSUBL */
4897 {1, 1, 0}, /* VSUBW */
4898 {0, 1, 1}, /* VADDHN */
4899 {0, 0, 0}, /* VABAL */
4900 {0, 1, 1}, /* VSUBHN */
4901 {0, 0, 0}, /* VABDL */
4902 {0, 0, 0}, /* VMLAL */
4903 {0, 0, 0}, /* VQDMLAL */
4904 {0, 0, 0}, /* VMLSL */
4905 {0, 0, 0}, /* VQDMLSL */
4906 {0, 0, 0}, /* Integer VMULL */
4907 {0, 0, 0}, /* VQDMULL */
4908 {0, 0, 0} /* Polynomial VMULL */
4909 };
4910
4911 prewiden = neon_3reg_wide[op][0];
4912 src1_wide = neon_3reg_wide[op][1];
4913 src2_wide = neon_3reg_wide[op][2];
4914
ad69471c
PB
4915 if (size == 0 && (op == 9 || op == 11 || op == 13))
4916 return 1;
4917
9ee6e8bb
PB
4918 /* Avoid overlapping operands. Wide source operands are
4919 always aligned so will never overlap with wide
4920 destinations in problematic ways. */
8f8e3aa4 4921 if (rd == rm && !src2_wide) {
dd8fbd78
FN
4922 tmp = neon_load_reg(rm, 1);
4923 neon_store_scratch(2, tmp);
8f8e3aa4 4924 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
4925 tmp = neon_load_reg(rn, 1);
4926 neon_store_scratch(2, tmp);
9ee6e8bb 4927 }
a50f5b91 4928 TCGV_UNUSED(tmp3);
9ee6e8bb 4929 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4930 if (src1_wide) {
4931 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 4932 TCGV_UNUSED(tmp);
9ee6e8bb 4933 } else {
ad69471c 4934 if (pass == 1 && rd == rn) {
dd8fbd78 4935 tmp = neon_load_scratch(2);
9ee6e8bb 4936 } else {
ad69471c
PB
4937 tmp = neon_load_reg(rn, pass);
4938 }
4939 if (prewiden) {
4940 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
4941 }
4942 }
ad69471c
PB
4943 if (src2_wide) {
4944 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 4945 TCGV_UNUSED(tmp2);
9ee6e8bb 4946 } else {
ad69471c 4947 if (pass == 1 && rd == rm) {
dd8fbd78 4948 tmp2 = neon_load_scratch(2);
9ee6e8bb 4949 } else {
ad69471c
PB
4950 tmp2 = neon_load_reg(rm, pass);
4951 }
4952 if (prewiden) {
4953 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 4954 }
9ee6e8bb
PB
4955 }
4956 switch (op) {
4957 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 4958 gen_neon_addl(size);
9ee6e8bb
PB
4959 break;
4960 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
ad69471c 4961 gen_neon_subl(size);
9ee6e8bb
PB
4962 break;
4963 case 5: case 7: /* VABAL, VABDL */
4964 switch ((size << 1) | u) {
ad69471c
PB
4965 case 0:
4966 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
4967 break;
4968 case 1:
4969 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
4970 break;
4971 case 2:
4972 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
4973 break;
4974 case 3:
4975 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
4976 break;
4977 case 4:
4978 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
4979 break;
4980 case 5:
4981 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
4982 break;
9ee6e8bb
PB
4983 default: abort();
4984 }
ad69471c
PB
4985 dead_tmp(tmp2);
4986 dead_tmp(tmp);
9ee6e8bb
PB
4987 break;
4988 case 8: case 9: case 10: case 11: case 12: case 13:
4989 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 4990 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
dd8fbd78
FN
4991 dead_tmp(tmp2);
4992 dead_tmp(tmp);
9ee6e8bb
PB
4993 break;
4994 case 14: /* Polynomial VMULL */
4995 cpu_abort(env, "Polynomial VMULL not implemented");
4996
4997 default: /* 15 is RESERVED. */
4998 return 1;
4999 }
5000 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
5001 /* Accumulate. */
5002 if (op == 10 || op == 11) {
ad69471c 5003 gen_neon_negl(cpu_V0, size);
9ee6e8bb
PB
5004 }
5005
9ee6e8bb 5006 if (op != 13) {
ad69471c 5007 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb
PB
5008 }
5009
5010 switch (op) {
5011 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
ad69471c 5012 gen_neon_addl(size);
9ee6e8bb
PB
5013 break;
5014 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c
PB
5015 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5016 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5017 break;
9ee6e8bb
PB
5018 /* Fall through. */
5019 case 13: /* VQDMULL */
ad69471c 5020 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5021 break;
5022 default:
5023 abort();
5024 }
ad69471c 5025 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5026 } else if (op == 4 || op == 6) {
5027 /* Narrowing operation. */
ad69471c 5028 tmp = new_tmp();
9ee6e8bb
PB
5029 if (u) {
5030 switch (size) {
ad69471c
PB
5031 case 0:
5032 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5033 break;
5034 case 1:
5035 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5036 break;
5037 case 2:
5038 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5039 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5040 break;
9ee6e8bb
PB
5041 default: abort();
5042 }
5043 } else {
5044 switch (size) {
ad69471c
PB
5045 case 0:
5046 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5047 break;
5048 case 1:
5049 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5050 break;
5051 case 2:
5052 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5053 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5054 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5055 break;
9ee6e8bb
PB
5056 default: abort();
5057 }
5058 }
ad69471c
PB
5059 if (pass == 0) {
5060 tmp3 = tmp;
5061 } else {
5062 neon_store_reg(rd, 0, tmp3);
5063 neon_store_reg(rd, 1, tmp);
5064 }
9ee6e8bb
PB
5065 } else {
5066 /* Write back the result. */
ad69471c 5067 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5068 }
5069 }
5070 } else {
5071 /* Two registers and a scalar. */
5072 switch (op) {
5073 case 0: /* Integer VMLA scalar */
5074 case 1: /* Float VMLA scalar */
5075 case 4: /* Integer VMLS scalar */
5076 case 5: /* Floating point VMLS scalar */
5077 case 8: /* Integer VMUL scalar */
5078 case 9: /* Floating point VMUL scalar */
5079 case 12: /* VQDMULH scalar */
5080 case 13: /* VQRDMULH scalar */
dd8fbd78
FN
5081 tmp = neon_get_scalar(size, rm);
5082 neon_store_scratch(0, tmp);
9ee6e8bb 5083 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5084 tmp = neon_load_scratch(0);
5085 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5086 if (op == 12) {
5087 if (size == 1) {
dd8fbd78 5088 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5089 } else {
dd8fbd78 5090 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5091 }
5092 } else if (op == 13) {
5093 if (size == 1) {
dd8fbd78 5094 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5095 } else {
dd8fbd78 5096 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5097 }
5098 } else if (op & 1) {
dd8fbd78 5099 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5100 } else {
5101 switch (size) {
dd8fbd78
FN
5102 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5103 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5104 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5105 default: return 1;
5106 }
5107 }
dd8fbd78 5108 dead_tmp(tmp2);
9ee6e8bb
PB
5109 if (op < 8) {
5110 /* Accumulate. */
dd8fbd78 5111 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5112 switch (op) {
5113 case 0:
dd8fbd78 5114 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5115 break;
5116 case 1:
dd8fbd78 5117 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5118 break;
5119 case 4:
dd8fbd78 5120 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5121 break;
5122 case 5:
dd8fbd78 5123 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
5124 break;
5125 default:
5126 abort();
5127 }
dd8fbd78 5128 dead_tmp(tmp2);
9ee6e8bb 5129 }
dd8fbd78 5130 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5131 }
5132 break;
5133 case 2: /* VMLAL sclar */
5134 case 3: /* VQDMLAL scalar */
5135 case 6: /* VMLSL scalar */
5136 case 7: /* VQDMLSL scalar */
5137 case 10: /* VMULL scalar */
5138 case 11: /* VQDMULL scalar */
ad69471c
PB
5139 if (size == 0 && (op == 3 || op == 7 || op == 11))
5140 return 1;
5141
dd8fbd78
FN
5142 tmp2 = neon_get_scalar(size, rm);
5143 tmp3 = neon_load_reg(rn, 1);
ad69471c 5144
9ee6e8bb 5145 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5146 if (pass == 0) {
5147 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5148 } else {
dd8fbd78 5149 tmp = tmp3;
9ee6e8bb 5150 }
ad69471c 5151 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
dd8fbd78 5152 dead_tmp(tmp);
9ee6e8bb 5153 if (op == 6 || op == 7) {
ad69471c
PB
5154 gen_neon_negl(cpu_V0, size);
5155 }
5156 if (op != 11) {
5157 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5158 }
9ee6e8bb
PB
5159 switch (op) {
5160 case 2: case 6:
ad69471c 5161 gen_neon_addl(size);
9ee6e8bb
PB
5162 break;
5163 case 3: case 7:
ad69471c
PB
5164 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5165 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5166 break;
5167 case 10:
5168 /* no-op */
5169 break;
5170 case 11:
ad69471c 5171 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5172 break;
5173 default:
5174 abort();
5175 }
ad69471c 5176 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5177 }
dd8fbd78
FN
5178
5179 dead_tmp(tmp2);
5180
9ee6e8bb
PB
5181 break;
5182 default: /* 14 and 15 are RESERVED */
5183 return 1;
5184 }
5185 }
5186 } else { /* size == 3 */
5187 if (!u) {
5188 /* Extract. */
9ee6e8bb 5189 imm = (insn >> 8) & 0xf;
ad69471c
PB
5190 count = q + 1;
5191
5192 if (imm > 7 && !q)
5193 return 1;
5194
5195 if (imm == 0) {
5196 neon_load_reg64(cpu_V0, rn);
5197 if (q) {
5198 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5199 }
ad69471c
PB
5200 } else if (imm == 8) {
5201 neon_load_reg64(cpu_V0, rn + 1);
5202 if (q) {
5203 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5204 }
ad69471c 5205 } else if (q) {
a7812ae4 5206 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5207 if (imm < 8) {
5208 neon_load_reg64(cpu_V0, rn);
a7812ae4 5209 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5210 } else {
5211 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5212 neon_load_reg64(tmp64, rm);
ad69471c
PB
5213 }
5214 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5215 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5216 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5217 if (imm < 8) {
5218 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5219 } else {
ad69471c
PB
5220 neon_load_reg64(cpu_V1, rm + 1);
5221 imm -= 8;
9ee6e8bb 5222 }
ad69471c 5223 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5224 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5225 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 5226 tcg_temp_free_i64(tmp64);
ad69471c 5227 } else {
a7812ae4 5228 /* BUGFIX */
ad69471c 5229 neon_load_reg64(cpu_V0, rn);
a7812ae4 5230 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5231 neon_load_reg64(cpu_V1, rm);
a7812ae4 5232 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5233 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5234 }
5235 neon_store_reg64(cpu_V0, rd);
5236 if (q) {
5237 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5238 }
5239 } else if ((insn & (1 << 11)) == 0) {
5240 /* Two register misc. */
5241 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5242 size = (insn >> 18) & 3;
5243 switch (op) {
5244 case 0: /* VREV64 */
5245 if (size == 3)
5246 return 1;
5247 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5248 tmp = neon_load_reg(rm, pass * 2);
5249 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5250 switch (size) {
dd8fbd78
FN
5251 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5252 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5253 case 2: /* no-op */ break;
5254 default: abort();
5255 }
dd8fbd78 5256 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5257 if (size == 2) {
dd8fbd78 5258 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5259 } else {
9ee6e8bb 5260 switch (size) {
dd8fbd78
FN
5261 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5262 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5263 default: abort();
5264 }
dd8fbd78 5265 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5266 }
5267 }
5268 break;
5269 case 4: case 5: /* VPADDL */
5270 case 12: case 13: /* VPADAL */
9ee6e8bb
PB
5271 if (size == 3)
5272 return 1;
ad69471c
PB
5273 for (pass = 0; pass < q + 1; pass++) {
5274 tmp = neon_load_reg(rm, pass * 2);
5275 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5276 tmp = neon_load_reg(rm, pass * 2 + 1);
5277 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5278 switch (size) {
5279 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5280 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5281 case 2: tcg_gen_add_i64(CPU_V001); break;
5282 default: abort();
5283 }
9ee6e8bb
PB
5284 if (op >= 12) {
5285 /* Accumulate. */
ad69471c
PB
5286 neon_load_reg64(cpu_V1, rd + pass);
5287 gen_neon_addl(size);
9ee6e8bb 5288 }
ad69471c 5289 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5290 }
5291 break;
5292 case 33: /* VTRN */
5293 if (size == 2) {
5294 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
5295 tmp = neon_load_reg(rm, n);
5296 tmp2 = neon_load_reg(rd, n + 1);
5297 neon_store_reg(rm, n, tmp2);
5298 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
5299 }
5300 } else {
5301 goto elementwise;
5302 }
5303 break;
5304 case 34: /* VUZP */
5305 /* Reg Before After
5306 Rd A3 A2 A1 A0 B2 B0 A2 A0
5307 Rm B3 B2 B1 B0 B3 B1 A3 A1
5308 */
5309 if (size == 3)
5310 return 1;
5311 gen_neon_unzip(rd, q, 0, size);
5312 gen_neon_unzip(rm, q, 4, size);
5313 if (q) {
5314 static int unzip_order_q[8] =
5315 {0, 2, 4, 6, 1, 3, 5, 7};
5316 for (n = 0; n < 8; n++) {
5317 int reg = (n < 4) ? rd : rm;
dd8fbd78
FN
5318 tmp = neon_load_scratch(unzip_order_q[n]);
5319 neon_store_reg(reg, n % 4, tmp);
9ee6e8bb
PB
5320 }
5321 } else {
5322 static int unzip_order[4] =
5323 {0, 4, 1, 5};
5324 for (n = 0; n < 4; n++) {
5325 int reg = (n < 2) ? rd : rm;
dd8fbd78
FN
5326 tmp = neon_load_scratch(unzip_order[n]);
5327 neon_store_reg(reg, n % 2, tmp);
9ee6e8bb
PB
5328 }
5329 }
5330 break;
5331 case 35: /* VZIP */
5332 /* Reg Before After
5333 Rd A3 A2 A1 A0 B1 A1 B0 A0
5334 Rm B3 B2 B1 B0 B3 A3 B2 A2
5335 */
5336 if (size == 3)
5337 return 1;
5338 count = (q ? 4 : 2);
5339 for (n = 0; n < count; n++) {
dd8fbd78
FN
5340 tmp = neon_load_reg(rd, n);
5341 tmp2 = neon_load_reg(rd, n);
9ee6e8bb 5342 switch (size) {
dd8fbd78
FN
5343 case 0: gen_neon_zip_u8(tmp, tmp2); break;
5344 case 1: gen_neon_zip_u16(tmp, tmp2); break;
9ee6e8bb
PB
5345 case 2: /* no-op */; break;
5346 default: abort();
5347 }
dd8fbd78
FN
5348 neon_store_scratch(n * 2, tmp);
5349 neon_store_scratch(n * 2 + 1, tmp2);
9ee6e8bb
PB
5350 }
5351 for (n = 0; n < count * 2; n++) {
5352 int reg = (n < count) ? rd : rm;
dd8fbd78
FN
5353 tmp = neon_load_scratch(n);
5354 neon_store_reg(reg, n % count, tmp);
9ee6e8bb
PB
5355 }
5356 break;
5357 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
ad69471c
PB
5358 if (size == 3)
5359 return 1;
a50f5b91 5360 TCGV_UNUSED(tmp2);
9ee6e8bb 5361 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5362 neon_load_reg64(cpu_V0, rm + pass);
5363 tmp = new_tmp();
9ee6e8bb 5364 if (op == 36 && q == 0) {
ad69471c 5365 gen_neon_narrow(size, tmp, cpu_V0);
9ee6e8bb 5366 } else if (q) {
ad69471c 5367 gen_neon_narrow_satu(size, tmp, cpu_V0);
9ee6e8bb 5368 } else {
ad69471c
PB
5369 gen_neon_narrow_sats(size, tmp, cpu_V0);
5370 }
5371 if (pass == 0) {
5372 tmp2 = tmp;
5373 } else {
5374 neon_store_reg(rd, 0, tmp2);
5375 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5376 }
9ee6e8bb
PB
5377 }
5378 break;
5379 case 38: /* VSHLL */
ad69471c 5380 if (q || size == 3)
9ee6e8bb 5381 return 1;
ad69471c
PB
5382 tmp = neon_load_reg(rm, 0);
5383 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5384 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5385 if (pass == 1)
5386 tmp = tmp2;
5387 gen_neon_widen(cpu_V0, tmp, size, 1);
5388 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5389 }
5390 break;
60011498
PB
5391 case 44: /* VCVT.F16.F32 */
5392 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5393 return 1;
5394 tmp = new_tmp();
5395 tmp2 = new_tmp();
5396 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
5397 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5398 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
5399 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5400 tcg_gen_shli_i32(tmp2, tmp2, 16);
5401 tcg_gen_or_i32(tmp2, tmp2, tmp);
5402 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
5403 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5404 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5405 neon_store_reg(rd, 0, tmp2);
5406 tmp2 = new_tmp();
5407 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5408 tcg_gen_shli_i32(tmp2, tmp2, 16);
5409 tcg_gen_or_i32(tmp2, tmp2, tmp);
5410 neon_store_reg(rd, 1, tmp2);
5411 dead_tmp(tmp);
5412 break;
5413 case 46: /* VCVT.F32.F16 */
5414 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5415 return 1;
5416 tmp3 = new_tmp();
5417 tmp = neon_load_reg(rm, 0);
5418 tmp2 = neon_load_reg(rm, 1);
5419 tcg_gen_ext16u_i32(tmp3, tmp);
5420 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5421 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5422 tcg_gen_shri_i32(tmp3, tmp, 16);
5423 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5424 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
5425 dead_tmp(tmp);
5426 tcg_gen_ext16u_i32(tmp3, tmp2);
5427 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5428 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5429 tcg_gen_shri_i32(tmp3, tmp2, 16);
5430 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5431 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
5432 dead_tmp(tmp2);
5433 dead_tmp(tmp3);
5434 break;
9ee6e8bb
PB
5435 default:
5436 elementwise:
5437 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5438 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5439 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5440 neon_reg_offset(rm, pass));
dd8fbd78 5441 TCGV_UNUSED(tmp);
9ee6e8bb 5442 } else {
dd8fbd78 5443 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
5444 }
5445 switch (op) {
5446 case 1: /* VREV32 */
5447 switch (size) {
dd8fbd78
FN
5448 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5449 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5450 default: return 1;
5451 }
5452 break;
5453 case 2: /* VREV16 */
5454 if (size != 0)
5455 return 1;
dd8fbd78 5456 gen_rev16(tmp);
9ee6e8bb 5457 break;
9ee6e8bb
PB
5458 case 8: /* CLS */
5459 switch (size) {
dd8fbd78
FN
5460 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5461 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5462 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
9ee6e8bb
PB
5463 default: return 1;
5464 }
5465 break;
5466 case 9: /* CLZ */
5467 switch (size) {
dd8fbd78
FN
5468 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5469 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5470 case 2: gen_helper_clz(tmp, tmp); break;
9ee6e8bb
PB
5471 default: return 1;
5472 }
5473 break;
5474 case 10: /* CNT */
5475 if (size != 0)
5476 return 1;
dd8fbd78 5477 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb
PB
5478 break;
5479 case 11: /* VNOT */
5480 if (size != 0)
5481 return 1;
dd8fbd78 5482 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5483 break;
5484 case 14: /* VQABS */
5485 switch (size) {
dd8fbd78
FN
5486 case 0: gen_helper_neon_qabs_s8(tmp, cpu_env, tmp); break;
5487 case 1: gen_helper_neon_qabs_s16(tmp, cpu_env, tmp); break;
5488 case 2: gen_helper_neon_qabs_s32(tmp, cpu_env, tmp); break;
9ee6e8bb
PB
5489 default: return 1;
5490 }
5491 break;
5492 case 15: /* VQNEG */
5493 switch (size) {
dd8fbd78
FN
5494 case 0: gen_helper_neon_qneg_s8(tmp, cpu_env, tmp); break;
5495 case 1: gen_helper_neon_qneg_s16(tmp, cpu_env, tmp); break;
5496 case 2: gen_helper_neon_qneg_s32(tmp, cpu_env, tmp); break;
9ee6e8bb
PB
5497 default: return 1;
5498 }
5499 break;
5500 case 16: case 19: /* VCGT #0, VCLE #0 */
dd8fbd78 5501 tmp2 = tcg_const_i32(0);
9ee6e8bb 5502 switch(size) {
dd8fbd78
FN
5503 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5504 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5505 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5506 default: return 1;
5507 }
dd8fbd78 5508 tcg_temp_free(tmp2);
9ee6e8bb 5509 if (op == 19)
dd8fbd78 5510 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5511 break;
5512 case 17: case 20: /* VCGE #0, VCLT #0 */
dd8fbd78 5513 tmp2 = tcg_const_i32(0);
9ee6e8bb 5514 switch(size) {
dd8fbd78
FN
5515 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5516 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5517 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5518 default: return 1;
5519 }
dd8fbd78 5520 tcg_temp_free(tmp2);
9ee6e8bb 5521 if (op == 20)
dd8fbd78 5522 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5523 break;
5524 case 18: /* VCEQ #0 */
dd8fbd78 5525 tmp2 = tcg_const_i32(0);
9ee6e8bb 5526 switch(size) {
dd8fbd78
FN
5527 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5528 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5529 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5530 default: return 1;
5531 }
dd8fbd78 5532 tcg_temp_free(tmp2);
9ee6e8bb
PB
5533 break;
5534 case 22: /* VABS */
5535 switch(size) {
dd8fbd78
FN
5536 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
5537 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
5538 case 2: tcg_gen_abs_i32(tmp, tmp); break;
9ee6e8bb
PB
5539 default: return 1;
5540 }
5541 break;
5542 case 23: /* VNEG */
ad69471c
PB
5543 if (size == 3)
5544 return 1;
dd8fbd78
FN
5545 tmp2 = tcg_const_i32(0);
5546 gen_neon_rsb(size, tmp, tmp2);
5547 tcg_temp_free(tmp2);
9ee6e8bb
PB
5548 break;
5549 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
dd8fbd78
FN
5550 tmp2 = tcg_const_i32(0);
5551 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
5552 tcg_temp_free(tmp2);
9ee6e8bb 5553 if (op == 27)
dd8fbd78 5554 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5555 break;
5556 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
dd8fbd78
FN
5557 tmp2 = tcg_const_i32(0);
5558 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
5559 tcg_temp_free(tmp2);
9ee6e8bb 5560 if (op == 28)
dd8fbd78 5561 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5562 break;
5563 case 26: /* Float VCEQ #0 */
dd8fbd78
FN
5564 tmp2 = tcg_const_i32(0);
5565 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
5566 tcg_temp_free(tmp2);
9ee6e8bb
PB
5567 break;
5568 case 30: /* Float VABS */
4373f3ce 5569 gen_vfp_abs(0);
9ee6e8bb
PB
5570 break;
5571 case 31: /* Float VNEG */
4373f3ce 5572 gen_vfp_neg(0);
9ee6e8bb
PB
5573 break;
5574 case 32: /* VSWP */
dd8fbd78
FN
5575 tmp2 = neon_load_reg(rd, pass);
5576 neon_store_reg(rm, pass, tmp2);
9ee6e8bb
PB
5577 break;
5578 case 33: /* VTRN */
dd8fbd78 5579 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5580 switch (size) {
dd8fbd78
FN
5581 case 0: gen_neon_trn_u8(tmp, tmp2); break;
5582 case 1: gen_neon_trn_u16(tmp, tmp2); break;
9ee6e8bb
PB
5583 case 2: abort();
5584 default: return 1;
5585 }
dd8fbd78 5586 neon_store_reg(rm, pass, tmp2);
9ee6e8bb
PB
5587 break;
5588 case 56: /* Integer VRECPE */
dd8fbd78 5589 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb
PB
5590 break;
5591 case 57: /* Integer VRSQRTE */
dd8fbd78 5592 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb
PB
5593 break;
5594 case 58: /* Float VRECPE */
4373f3ce 5595 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5596 break;
5597 case 59: /* Float VRSQRTE */
4373f3ce 5598 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5599 break;
5600 case 60: /* VCVT.F32.S32 */
4373f3ce 5601 gen_vfp_tosiz(0);
9ee6e8bb
PB
5602 break;
5603 case 61: /* VCVT.F32.U32 */
4373f3ce 5604 gen_vfp_touiz(0);
9ee6e8bb
PB
5605 break;
5606 case 62: /* VCVT.S32.F32 */
4373f3ce 5607 gen_vfp_sito(0);
9ee6e8bb
PB
5608 break;
5609 case 63: /* VCVT.U32.F32 */
4373f3ce 5610 gen_vfp_uito(0);
9ee6e8bb
PB
5611 break;
5612 default:
5613 /* Reserved: 21, 29, 39-56 */
5614 return 1;
5615 }
5616 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5617 tcg_gen_st_f32(cpu_F0s, cpu_env,
5618 neon_reg_offset(rd, pass));
9ee6e8bb 5619 } else {
dd8fbd78 5620 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5621 }
5622 }
5623 break;
5624 }
5625 } else if ((insn & (1 << 10)) == 0) {
5626 /* VTBL, VTBX. */
3018f259 5627 n = ((insn >> 5) & 0x18) + 8;
9ee6e8bb 5628 if (insn & (1 << 6)) {
8f8e3aa4 5629 tmp = neon_load_reg(rd, 0);
9ee6e8bb 5630 } else {
8f8e3aa4
PB
5631 tmp = new_tmp();
5632 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5633 }
8f8e3aa4 5634 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
5635 tmp4 = tcg_const_i32(rn);
5636 tmp5 = tcg_const_i32(n);
5637 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
3018f259 5638 dead_tmp(tmp);
9ee6e8bb 5639 if (insn & (1 << 6)) {
8f8e3aa4 5640 tmp = neon_load_reg(rd, 1);
9ee6e8bb 5641 } else {
8f8e3aa4
PB
5642 tmp = new_tmp();
5643 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5644 }
8f8e3aa4 5645 tmp3 = neon_load_reg(rm, 1);
b75263d6 5646 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
5647 tcg_temp_free_i32(tmp5);
5648 tcg_temp_free_i32(tmp4);
8f8e3aa4 5649 neon_store_reg(rd, 0, tmp2);
3018f259
PB
5650 neon_store_reg(rd, 1, tmp3);
5651 dead_tmp(tmp);
9ee6e8bb
PB
5652 } else if ((insn & 0x380) == 0) {
5653 /* VDUP */
5654 if (insn & (1 << 19)) {
dd8fbd78 5655 tmp = neon_load_reg(rm, 1);
9ee6e8bb 5656 } else {
dd8fbd78 5657 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
5658 }
5659 if (insn & (1 << 16)) {
dd8fbd78 5660 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
5661 } else if (insn & (1 << 17)) {
5662 if ((insn >> 18) & 1)
dd8fbd78 5663 gen_neon_dup_high16(tmp);
9ee6e8bb 5664 else
dd8fbd78 5665 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
5666 }
5667 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5668 tmp2 = new_tmp();
5669 tcg_gen_mov_i32(tmp2, tmp);
5670 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 5671 }
dd8fbd78 5672 dead_tmp(tmp);
9ee6e8bb
PB
5673 } else {
5674 return 1;
5675 }
5676 }
5677 }
5678 return 0;
5679}
5680
fe1479c3
PB
5681static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5682{
5683 int crn = (insn >> 16) & 0xf;
5684 int crm = insn & 0xf;
5685 int op1 = (insn >> 21) & 7;
5686 int op2 = (insn >> 5) & 7;
5687 int rt = (insn >> 12) & 0xf;
5688 TCGv tmp;
5689
5690 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5691 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5692 /* TEECR */
5693 if (IS_USER(s))
5694 return 1;
5695 tmp = load_cpu_field(teecr);
5696 store_reg(s, rt, tmp);
5697 return 0;
5698 }
5699 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5700 /* TEEHBR */
5701 if (IS_USER(s) && (env->teecr & 1))
5702 return 1;
5703 tmp = load_cpu_field(teehbr);
5704 store_reg(s, rt, tmp);
5705 return 0;
5706 }
5707 }
5708 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5709 op1, crn, crm, op2);
5710 return 1;
5711}
5712
5713static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5714{
5715 int crn = (insn >> 16) & 0xf;
5716 int crm = insn & 0xf;
5717 int op1 = (insn >> 21) & 7;
5718 int op2 = (insn >> 5) & 7;
5719 int rt = (insn >> 12) & 0xf;
5720 TCGv tmp;
5721
5722 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5723 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5724 /* TEECR */
5725 if (IS_USER(s))
5726 return 1;
5727 tmp = load_reg(s, rt);
5728 gen_helper_set_teecr(cpu_env, tmp);
5729 dead_tmp(tmp);
5730 return 0;
5731 }
5732 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5733 /* TEEHBR */
5734 if (IS_USER(s) && (env->teecr & 1))
5735 return 1;
5736 tmp = load_reg(s, rt);
5737 store_cpu_field(tmp, teehbr);
5738 return 0;
5739 }
5740 }
5741 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5742 op1, crn, crm, op2);
5743 return 1;
5744}
5745
9ee6e8bb
PB
5746static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5747{
5748 int cpnum;
5749
5750 cpnum = (insn >> 8) & 0xf;
5751 if (arm_feature(env, ARM_FEATURE_XSCALE)
5752 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5753 return 1;
5754
5755 switch (cpnum) {
5756 case 0:
5757 case 1:
5758 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5759 return disas_iwmmxt_insn(env, s, insn);
5760 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5761 return disas_dsp_insn(env, s, insn);
5762 }
5763 return 1;
5764 case 10:
5765 case 11:
5766 return disas_vfp_insn (env, s, insn);
fe1479c3
PB
5767 case 14:
5768 /* Coprocessors 7-15 are architecturally reserved by ARM.
5769 Unfortunately Intel decided to ignore this. */
5770 if (arm_feature(env, ARM_FEATURE_XSCALE))
5771 goto board;
5772 if (insn & (1 << 20))
5773 return disas_cp14_read(env, s, insn);
5774 else
5775 return disas_cp14_write(env, s, insn);
9ee6e8bb
PB
5776 case 15:
5777 return disas_cp15_insn (env, s, insn);
5778 default:
fe1479c3 5779 board:
9ee6e8bb
PB
5780 /* Unknown coprocessor. See if the board has hooked it. */
5781 return disas_cp_insn (env, s, insn);
5782 }
5783}
5784
5e3f878a
PB
5785
5786/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 5787static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
5788{
5789 TCGv tmp;
5790 tmp = new_tmp();
5791 tcg_gen_trunc_i64_i32(tmp, val);
5792 store_reg(s, rlow, tmp);
5793 tmp = new_tmp();
5794 tcg_gen_shri_i64(val, val, 32);
5795 tcg_gen_trunc_i64_i32(tmp, val);
5796 store_reg(s, rhigh, tmp);
5797}
5798
5799/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 5800static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 5801{
a7812ae4 5802 TCGv_i64 tmp;
5e3f878a
PB
5803 TCGv tmp2;
5804
36aa55dc 5805 /* Load value and extend to 64 bits. */
a7812ae4 5806 tmp = tcg_temp_new_i64();
5e3f878a
PB
5807 tmp2 = load_reg(s, rlow);
5808 tcg_gen_extu_i32_i64(tmp, tmp2);
5809 dead_tmp(tmp2);
5810 tcg_gen_add_i64(val, val, tmp);
b75263d6 5811 tcg_temp_free_i64(tmp);
5e3f878a
PB
5812}
5813
5814/* load and add a 64-bit value from a register pair. */
a7812ae4 5815static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 5816{
a7812ae4 5817 TCGv_i64 tmp;
36aa55dc
PB
5818 TCGv tmpl;
5819 TCGv tmph;
5e3f878a
PB
5820
5821 /* Load 64-bit value rd:rn. */
36aa55dc
PB
5822 tmpl = load_reg(s, rlow);
5823 tmph = load_reg(s, rhigh);
a7812ae4 5824 tmp = tcg_temp_new_i64();
36aa55dc
PB
5825 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5826 dead_tmp(tmpl);
5827 dead_tmp(tmph);
5e3f878a 5828 tcg_gen_add_i64(val, val, tmp);
b75263d6 5829 tcg_temp_free_i64(tmp);
5e3f878a
PB
5830}
5831
5832/* Set N and Z flags from a 64-bit value. */
a7812ae4 5833static void gen_logicq_cc(TCGv_i64 val)
5e3f878a
PB
5834{
5835 TCGv tmp = new_tmp();
5836 gen_helper_logicq_cc(tmp, val);
6fbe23d5
PB
5837 gen_logic_CC(tmp);
5838 dead_tmp(tmp);
5e3f878a
PB
5839}
5840
426f5abc
PB
5841/* Load/Store exclusive instructions are implemented by remembering
5842 the value/address loaded, and seeing if these are the same
5843 when the store is performed. This should be is sufficient to implement
5844 the architecturally mandated semantics, and avoids having to monitor
5845 regular stores.
5846
5847 In system emulation mode only one CPU will be running at once, so
5848 this sequence is effectively atomic. In user emulation mode we
5849 throw an exception and handle the atomic operation elsewhere. */
5850static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
5851 TCGv addr, int size)
5852{
5853 TCGv tmp;
5854
5855 switch (size) {
5856 case 0:
5857 tmp = gen_ld8u(addr, IS_USER(s));
5858 break;
5859 case 1:
5860 tmp = gen_ld16u(addr, IS_USER(s));
5861 break;
5862 case 2:
5863 case 3:
5864 tmp = gen_ld32(addr, IS_USER(s));
5865 break;
5866 default:
5867 abort();
5868 }
5869 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
5870 store_reg(s, rt, tmp);
5871 if (size == 3) {
5872 tcg_gen_addi_i32(addr, addr, 4);
5873 tmp = gen_ld32(addr, IS_USER(s));
5874 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
5875 store_reg(s, rt2, tmp);
5876 }
5877 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
5878}
5879
5880static void gen_clrex(DisasContext *s)
5881{
5882 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
5883}
5884
5885#ifdef CONFIG_USER_ONLY
5886static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
5887 TCGv addr, int size)
5888{
5889 tcg_gen_mov_i32(cpu_exclusive_test, addr);
5890 tcg_gen_movi_i32(cpu_exclusive_info,
5891 size | (rd << 4) | (rt << 8) | (rt2 << 12));
5892 gen_set_condexec(s);
5893 gen_set_pc_im(s->pc - 4);
5894 gen_exception(EXCP_STREX);
5895 s->is_jmp = DISAS_JUMP;
5896}
5897#else
5898static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
5899 TCGv addr, int size)
5900{
5901 TCGv tmp;
5902 int done_label;
5903 int fail_label;
5904
5905 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
5906 [addr] = {Rt};
5907 {Rd} = 0;
5908 } else {
5909 {Rd} = 1;
5910 } */
5911 fail_label = gen_new_label();
5912 done_label = gen_new_label();
5913 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
5914 switch (size) {
5915 case 0:
5916 tmp = gen_ld8u(addr, IS_USER(s));
5917 break;
5918 case 1:
5919 tmp = gen_ld16u(addr, IS_USER(s));
5920 break;
5921 case 2:
5922 case 3:
5923 tmp = gen_ld32(addr, IS_USER(s));
5924 break;
5925 default:
5926 abort();
5927 }
5928 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
5929 dead_tmp(tmp);
5930 if (size == 3) {
5931 TCGv tmp2 = new_tmp();
5932 tcg_gen_addi_i32(tmp2, addr, 4);
5933 tmp = gen_ld32(addr, IS_USER(s));
5934 dead_tmp(tmp2);
5935 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
5936 dead_tmp(tmp);
5937 }
5938 tmp = load_reg(s, rt);
5939 switch (size) {
5940 case 0:
5941 gen_st8(tmp, addr, IS_USER(s));
5942 break;
5943 case 1:
5944 gen_st16(tmp, addr, IS_USER(s));
5945 break;
5946 case 2:
5947 case 3:
5948 gen_st32(tmp, addr, IS_USER(s));
5949 break;
5950 default:
5951 abort();
5952 }
5953 if (size == 3) {
5954 tcg_gen_addi_i32(addr, addr, 4);
5955 tmp = load_reg(s, rt2);
5956 gen_st32(tmp, addr, IS_USER(s));
5957 }
5958 tcg_gen_movi_i32(cpu_R[rd], 0);
5959 tcg_gen_br(done_label);
5960 gen_set_label(fail_label);
5961 tcg_gen_movi_i32(cpu_R[rd], 1);
5962 gen_set_label(done_label);
5963 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
5964}
5965#endif
5966
9ee6e8bb
PB
5967static void disas_arm_insn(CPUState * env, DisasContext *s)
5968{
5969 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 5970 TCGv tmp;
3670669c 5971 TCGv tmp2;
6ddbc6e4 5972 TCGv tmp3;
b0109805 5973 TCGv addr;
a7812ae4 5974 TCGv_i64 tmp64;
9ee6e8bb
PB
5975
5976 insn = ldl_code(s->pc);
5977 s->pc += 4;
5978
5979 /* M variants do not implement ARM mode. */
5980 if (IS_M(env))
5981 goto illegal_op;
5982 cond = insn >> 28;
5983 if (cond == 0xf){
5984 /* Unconditional instructions. */
5985 if (((insn >> 25) & 7) == 1) {
5986 /* NEON Data processing. */
5987 if (!arm_feature(env, ARM_FEATURE_NEON))
5988 goto illegal_op;
5989
5990 if (disas_neon_data_insn(env, s, insn))
5991 goto illegal_op;
5992 return;
5993 }
5994 if ((insn & 0x0f100000) == 0x04000000) {
5995 /* NEON load/store. */
5996 if (!arm_feature(env, ARM_FEATURE_NEON))
5997 goto illegal_op;
5998
5999 if (disas_neon_ls_insn(env, s, insn))
6000 goto illegal_op;
6001 return;
6002 }
6003 if ((insn & 0x0d70f000) == 0x0550f000)
6004 return; /* PLD */
6005 else if ((insn & 0x0ffffdff) == 0x01010000) {
6006 ARCH(6);
6007 /* setend */
6008 if (insn & (1 << 9)) {
6009 /* BE8 mode not implemented. */
6010 goto illegal_op;
6011 }
6012 return;
6013 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6014 switch ((insn >> 4) & 0xf) {
6015 case 1: /* clrex */
6016 ARCH(6K);
426f5abc 6017 gen_clrex(s);
9ee6e8bb
PB
6018 return;
6019 case 4: /* dsb */
6020 case 5: /* dmb */
6021 case 6: /* isb */
6022 ARCH(7);
6023 /* We don't emulate caches so these are a no-op. */
6024 return;
6025 default:
6026 goto illegal_op;
6027 }
6028 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6029 /* srs */
c67b6b71 6030 int32_t offset;
9ee6e8bb
PB
6031 if (IS_USER(s))
6032 goto illegal_op;
6033 ARCH(6);
6034 op1 = (insn & 0x1f);
6035 if (op1 == (env->uncached_cpsr & CPSR_M)) {
b0109805 6036 addr = load_reg(s, 13);
9ee6e8bb 6037 } else {
b0109805 6038 addr = new_tmp();
b75263d6
JR
6039 tmp = tcg_const_i32(op1);
6040 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6041 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6042 }
6043 i = (insn >> 23) & 3;
6044 switch (i) {
6045 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6046 case 1: offset = 0; break; /* IA */
6047 case 2: offset = -8; break; /* DB */
9ee6e8bb
PB
6048 case 3: offset = 4; break; /* IB */
6049 default: abort();
6050 }
6051 if (offset)
b0109805
PB
6052 tcg_gen_addi_i32(addr, addr, offset);
6053 tmp = load_reg(s, 14);
6054 gen_st32(tmp, addr, 0);
c67b6b71 6055 tmp = load_cpu_field(spsr);
b0109805
PB
6056 tcg_gen_addi_i32(addr, addr, 4);
6057 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
6058 if (insn & (1 << 21)) {
6059 /* Base writeback. */
6060 switch (i) {
6061 case 0: offset = -8; break;
c67b6b71
FN
6062 case 1: offset = 4; break;
6063 case 2: offset = -4; break;
9ee6e8bb
PB
6064 case 3: offset = 0; break;
6065 default: abort();
6066 }
6067 if (offset)
c67b6b71 6068 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb 6069 if (op1 == (env->uncached_cpsr & CPSR_M)) {
c67b6b71 6070 store_reg(s, 13, addr);
9ee6e8bb 6071 } else {
b75263d6
JR
6072 tmp = tcg_const_i32(op1);
6073 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6074 tcg_temp_free_i32(tmp);
c67b6b71 6075 dead_tmp(addr);
9ee6e8bb 6076 }
b0109805
PB
6077 } else {
6078 dead_tmp(addr);
9ee6e8bb
PB
6079 }
6080 } else if ((insn & 0x0e5fffe0) == 0x081d0a00) {
6081 /* rfe */
c67b6b71 6082 int32_t offset;
9ee6e8bb
PB
6083 if (IS_USER(s))
6084 goto illegal_op;
6085 ARCH(6);
6086 rn = (insn >> 16) & 0xf;
b0109805 6087 addr = load_reg(s, rn);
9ee6e8bb
PB
6088 i = (insn >> 23) & 3;
6089 switch (i) {
b0109805 6090 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6091 case 1: offset = 0; break; /* IA */
6092 case 2: offset = -8; break; /* DB */
b0109805 6093 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
6094 default: abort();
6095 }
6096 if (offset)
b0109805
PB
6097 tcg_gen_addi_i32(addr, addr, offset);
6098 /* Load PC into tmp and CPSR into tmp2. */
6099 tmp = gen_ld32(addr, 0);
6100 tcg_gen_addi_i32(addr, addr, 4);
6101 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
6102 if (insn & (1 << 21)) {
6103 /* Base writeback. */
6104 switch (i) {
b0109805 6105 case 0: offset = -8; break;
c67b6b71
FN
6106 case 1: offset = 4; break;
6107 case 2: offset = -4; break;
b0109805 6108 case 3: offset = 0; break;
9ee6e8bb
PB
6109 default: abort();
6110 }
6111 if (offset)
b0109805
PB
6112 tcg_gen_addi_i32(addr, addr, offset);
6113 store_reg(s, rn, addr);
6114 } else {
6115 dead_tmp(addr);
9ee6e8bb 6116 }
b0109805 6117 gen_rfe(s, tmp, tmp2);
c67b6b71 6118 return;
9ee6e8bb
PB
6119 } else if ((insn & 0x0e000000) == 0x0a000000) {
6120 /* branch link and change to thumb (blx <offset>) */
6121 int32_t offset;
6122
6123 val = (uint32_t)s->pc;
d9ba4830
PB
6124 tmp = new_tmp();
6125 tcg_gen_movi_i32(tmp, val);
6126 store_reg(s, 14, tmp);
9ee6e8bb
PB
6127 /* Sign-extend the 24-bit offset */
6128 offset = (((int32_t)insn) << 8) >> 8;
6129 /* offset * 4 + bit24 * 2 + (thumb bit) */
6130 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6131 /* pipeline offset */
6132 val += 4;
d9ba4830 6133 gen_bx_im(s, val);
9ee6e8bb
PB
6134 return;
6135 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6136 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6137 /* iWMMXt register transfer. */
6138 if (env->cp15.c15_cpar & (1 << 1))
6139 if (!disas_iwmmxt_insn(env, s, insn))
6140 return;
6141 }
6142 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6143 /* Coprocessor double register transfer. */
6144 } else if ((insn & 0x0f000010) == 0x0e000010) {
6145 /* Additional coprocessor register transfer. */
7997d92f 6146 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
6147 uint32_t mask;
6148 uint32_t val;
6149 /* cps (privileged) */
6150 if (IS_USER(s))
6151 return;
6152 mask = val = 0;
6153 if (insn & (1 << 19)) {
6154 if (insn & (1 << 8))
6155 mask |= CPSR_A;
6156 if (insn & (1 << 7))
6157 mask |= CPSR_I;
6158 if (insn & (1 << 6))
6159 mask |= CPSR_F;
6160 if (insn & (1 << 18))
6161 val |= mask;
6162 }
7997d92f 6163 if (insn & (1 << 17)) {
9ee6e8bb
PB
6164 mask |= CPSR_M;
6165 val |= (insn & 0x1f);
6166 }
6167 if (mask) {
2fbac54b 6168 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
6169 }
6170 return;
6171 }
6172 goto illegal_op;
6173 }
6174 if (cond != 0xe) {
6175 /* if not always execute, we generate a conditional jump to
6176 next instruction */
6177 s->condlabel = gen_new_label();
d9ba4830 6178 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
6179 s->condjmp = 1;
6180 }
6181 if ((insn & 0x0f900000) == 0x03000000) {
6182 if ((insn & (1 << 21)) == 0) {
6183 ARCH(6T2);
6184 rd = (insn >> 12) & 0xf;
6185 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6186 if ((insn & (1 << 22)) == 0) {
6187 /* MOVW */
5e3f878a
PB
6188 tmp = new_tmp();
6189 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
6190 } else {
6191 /* MOVT */
5e3f878a 6192 tmp = load_reg(s, rd);
86831435 6193 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 6194 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 6195 }
5e3f878a 6196 store_reg(s, rd, tmp);
9ee6e8bb
PB
6197 } else {
6198 if (((insn >> 12) & 0xf) != 0xf)
6199 goto illegal_op;
6200 if (((insn >> 16) & 0xf) == 0) {
6201 gen_nop_hint(s, insn & 0xff);
6202 } else {
6203 /* CPSR = immediate */
6204 val = insn & 0xff;
6205 shift = ((insn >> 8) & 0xf) * 2;
6206 if (shift)
6207 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 6208 i = ((insn & (1 << 22)) != 0);
2fbac54b 6209 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
6210 goto illegal_op;
6211 }
6212 }
6213 } else if ((insn & 0x0f900000) == 0x01000000
6214 && (insn & 0x00000090) != 0x00000090) {
6215 /* miscellaneous instructions */
6216 op1 = (insn >> 21) & 3;
6217 sh = (insn >> 4) & 0xf;
6218 rm = insn & 0xf;
6219 switch (sh) {
6220 case 0x0: /* move program status register */
6221 if (op1 & 1) {
6222 /* PSR = reg */
2fbac54b 6223 tmp = load_reg(s, rm);
9ee6e8bb 6224 i = ((op1 & 2) != 0);
2fbac54b 6225 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
6226 goto illegal_op;
6227 } else {
6228 /* reg = PSR */
6229 rd = (insn >> 12) & 0xf;
6230 if (op1 & 2) {
6231 if (IS_USER(s))
6232 goto illegal_op;
d9ba4830 6233 tmp = load_cpu_field(spsr);
9ee6e8bb 6234 } else {
d9ba4830
PB
6235 tmp = new_tmp();
6236 gen_helper_cpsr_read(tmp);
9ee6e8bb 6237 }
d9ba4830 6238 store_reg(s, rd, tmp);
9ee6e8bb
PB
6239 }
6240 break;
6241 case 0x1:
6242 if (op1 == 1) {
6243 /* branch/exchange thumb (bx). */
d9ba4830
PB
6244 tmp = load_reg(s, rm);
6245 gen_bx(s, tmp);
9ee6e8bb
PB
6246 } else if (op1 == 3) {
6247 /* clz */
6248 rd = (insn >> 12) & 0xf;
1497c961
PB
6249 tmp = load_reg(s, rm);
6250 gen_helper_clz(tmp, tmp);
6251 store_reg(s, rd, tmp);
9ee6e8bb
PB
6252 } else {
6253 goto illegal_op;
6254 }
6255 break;
6256 case 0x2:
6257 if (op1 == 1) {
6258 ARCH(5J); /* bxj */
6259 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6260 tmp = load_reg(s, rm);
6261 gen_bx(s, tmp);
9ee6e8bb
PB
6262 } else {
6263 goto illegal_op;
6264 }
6265 break;
6266 case 0x3:
6267 if (op1 != 1)
6268 goto illegal_op;
6269
6270 /* branch link/exchange thumb (blx) */
d9ba4830
PB
6271 tmp = load_reg(s, rm);
6272 tmp2 = new_tmp();
6273 tcg_gen_movi_i32(tmp2, s->pc);
6274 store_reg(s, 14, tmp2);
6275 gen_bx(s, tmp);
9ee6e8bb
PB
6276 break;
6277 case 0x5: /* saturating add/subtract */
6278 rd = (insn >> 12) & 0xf;
6279 rn = (insn >> 16) & 0xf;
b40d0353 6280 tmp = load_reg(s, rm);
5e3f878a 6281 tmp2 = load_reg(s, rn);
9ee6e8bb 6282 if (op1 & 2)
5e3f878a 6283 gen_helper_double_saturate(tmp2, tmp2);
9ee6e8bb 6284 if (op1 & 1)
5e3f878a 6285 gen_helper_sub_saturate(tmp, tmp, tmp2);
9ee6e8bb 6286 else
5e3f878a
PB
6287 gen_helper_add_saturate(tmp, tmp, tmp2);
6288 dead_tmp(tmp2);
6289 store_reg(s, rd, tmp);
9ee6e8bb
PB
6290 break;
6291 case 7: /* bkpt */
6292 gen_set_condexec(s);
5e3f878a 6293 gen_set_pc_im(s->pc - 4);
d9ba4830 6294 gen_exception(EXCP_BKPT);
9ee6e8bb
PB
6295 s->is_jmp = DISAS_JUMP;
6296 break;
6297 case 0x8: /* signed multiply */
6298 case 0xa:
6299 case 0xc:
6300 case 0xe:
6301 rs = (insn >> 8) & 0xf;
6302 rn = (insn >> 12) & 0xf;
6303 rd = (insn >> 16) & 0xf;
6304 if (op1 == 1) {
6305 /* (32 * 16) >> 16 */
5e3f878a
PB
6306 tmp = load_reg(s, rm);
6307 tmp2 = load_reg(s, rs);
9ee6e8bb 6308 if (sh & 4)
5e3f878a 6309 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 6310 else
5e3f878a 6311 gen_sxth(tmp2);
a7812ae4
PB
6312 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6313 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 6314 tmp = new_tmp();
a7812ae4 6315 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 6316 tcg_temp_free_i64(tmp64);
9ee6e8bb 6317 if ((sh & 2) == 0) {
5e3f878a
PB
6318 tmp2 = load_reg(s, rn);
6319 gen_helper_add_setq(tmp, tmp, tmp2);
6320 dead_tmp(tmp2);
9ee6e8bb 6321 }
5e3f878a 6322 store_reg(s, rd, tmp);
9ee6e8bb
PB
6323 } else {
6324 /* 16 * 16 */
5e3f878a
PB
6325 tmp = load_reg(s, rm);
6326 tmp2 = load_reg(s, rs);
6327 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6328 dead_tmp(tmp2);
9ee6e8bb 6329 if (op1 == 2) {
a7812ae4
PB
6330 tmp64 = tcg_temp_new_i64();
6331 tcg_gen_ext_i32_i64(tmp64, tmp);
22478e79 6332 dead_tmp(tmp);
a7812ae4
PB
6333 gen_addq(s, tmp64, rn, rd);
6334 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 6335 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
6336 } else {
6337 if (op1 == 0) {
5e3f878a
PB
6338 tmp2 = load_reg(s, rn);
6339 gen_helper_add_setq(tmp, tmp, tmp2);
6340 dead_tmp(tmp2);
9ee6e8bb 6341 }
5e3f878a 6342 store_reg(s, rd, tmp);
9ee6e8bb
PB
6343 }
6344 }
6345 break;
6346 default:
6347 goto illegal_op;
6348 }
6349 } else if (((insn & 0x0e000000) == 0 &&
6350 (insn & 0x00000090) != 0x90) ||
6351 ((insn & 0x0e000000) == (1 << 25))) {
6352 int set_cc, logic_cc, shiftop;
6353
6354 op1 = (insn >> 21) & 0xf;
6355 set_cc = (insn >> 20) & 1;
6356 logic_cc = table_logic_cc[op1] & set_cc;
6357
6358 /* data processing instruction */
6359 if (insn & (1 << 25)) {
6360 /* immediate operand */
6361 val = insn & 0xff;
6362 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 6363 if (shift) {
9ee6e8bb 6364 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9
JR
6365 }
6366 tmp2 = new_tmp();
6367 tcg_gen_movi_i32(tmp2, val);
6368 if (logic_cc && shift) {
6369 gen_set_CF_bit31(tmp2);
6370 }
9ee6e8bb
PB
6371 } else {
6372 /* register */
6373 rm = (insn) & 0xf;
e9bb4aa9 6374 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6375 shiftop = (insn >> 5) & 3;
6376 if (!(insn & (1 << 4))) {
6377 shift = (insn >> 7) & 0x1f;
e9bb4aa9 6378 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
6379 } else {
6380 rs = (insn >> 8) & 0xf;
8984bd2e 6381 tmp = load_reg(s, rs);
e9bb4aa9 6382 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
6383 }
6384 }
6385 if (op1 != 0x0f && op1 != 0x0d) {
6386 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
6387 tmp = load_reg(s, rn);
6388 } else {
6389 TCGV_UNUSED(tmp);
9ee6e8bb
PB
6390 }
6391 rd = (insn >> 12) & 0xf;
6392 switch(op1) {
6393 case 0x00:
e9bb4aa9
JR
6394 tcg_gen_and_i32(tmp, tmp, tmp2);
6395 if (logic_cc) {
6396 gen_logic_CC(tmp);
6397 }
21aeb343 6398 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6399 break;
6400 case 0x01:
e9bb4aa9
JR
6401 tcg_gen_xor_i32(tmp, tmp, tmp2);
6402 if (logic_cc) {
6403 gen_logic_CC(tmp);
6404 }
21aeb343 6405 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6406 break;
6407 case 0x02:
6408 if (set_cc && rd == 15) {
6409 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 6410 if (IS_USER(s)) {
9ee6e8bb 6411 goto illegal_op;
e9bb4aa9
JR
6412 }
6413 gen_helper_sub_cc(tmp, tmp, tmp2);
6414 gen_exception_return(s, tmp);
9ee6e8bb 6415 } else {
e9bb4aa9
JR
6416 if (set_cc) {
6417 gen_helper_sub_cc(tmp, tmp, tmp2);
6418 } else {
6419 tcg_gen_sub_i32(tmp, tmp, tmp2);
6420 }
21aeb343 6421 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6422 }
6423 break;
6424 case 0x03:
e9bb4aa9
JR
6425 if (set_cc) {
6426 gen_helper_sub_cc(tmp, tmp2, tmp);
6427 } else {
6428 tcg_gen_sub_i32(tmp, tmp2, tmp);
6429 }
21aeb343 6430 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6431 break;
6432 case 0x04:
e9bb4aa9
JR
6433 if (set_cc) {
6434 gen_helper_add_cc(tmp, tmp, tmp2);
6435 } else {
6436 tcg_gen_add_i32(tmp, tmp, tmp2);
6437 }
21aeb343 6438 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6439 break;
6440 case 0x05:
e9bb4aa9
JR
6441 if (set_cc) {
6442 gen_helper_adc_cc(tmp, tmp, tmp2);
6443 } else {
6444 gen_add_carry(tmp, tmp, tmp2);
6445 }
21aeb343 6446 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6447 break;
6448 case 0x06:
e9bb4aa9
JR
6449 if (set_cc) {
6450 gen_helper_sbc_cc(tmp, tmp, tmp2);
6451 } else {
6452 gen_sub_carry(tmp, tmp, tmp2);
6453 }
21aeb343 6454 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6455 break;
6456 case 0x07:
e9bb4aa9
JR
6457 if (set_cc) {
6458 gen_helper_sbc_cc(tmp, tmp2, tmp);
6459 } else {
6460 gen_sub_carry(tmp, tmp2, tmp);
6461 }
21aeb343 6462 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6463 break;
6464 case 0x08:
6465 if (set_cc) {
e9bb4aa9
JR
6466 tcg_gen_and_i32(tmp, tmp, tmp2);
6467 gen_logic_CC(tmp);
9ee6e8bb 6468 }
e9bb4aa9 6469 dead_tmp(tmp);
9ee6e8bb
PB
6470 break;
6471 case 0x09:
6472 if (set_cc) {
e9bb4aa9
JR
6473 tcg_gen_xor_i32(tmp, tmp, tmp2);
6474 gen_logic_CC(tmp);
9ee6e8bb 6475 }
e9bb4aa9 6476 dead_tmp(tmp);
9ee6e8bb
PB
6477 break;
6478 case 0x0a:
6479 if (set_cc) {
e9bb4aa9 6480 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb 6481 }
e9bb4aa9 6482 dead_tmp(tmp);
9ee6e8bb
PB
6483 break;
6484 case 0x0b:
6485 if (set_cc) {
e9bb4aa9 6486 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 6487 }
e9bb4aa9 6488 dead_tmp(tmp);
9ee6e8bb
PB
6489 break;
6490 case 0x0c:
e9bb4aa9
JR
6491 tcg_gen_or_i32(tmp, tmp, tmp2);
6492 if (logic_cc) {
6493 gen_logic_CC(tmp);
6494 }
21aeb343 6495 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6496 break;
6497 case 0x0d:
6498 if (logic_cc && rd == 15) {
6499 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 6500 if (IS_USER(s)) {
9ee6e8bb 6501 goto illegal_op;
e9bb4aa9
JR
6502 }
6503 gen_exception_return(s, tmp2);
9ee6e8bb 6504 } else {
e9bb4aa9
JR
6505 if (logic_cc) {
6506 gen_logic_CC(tmp2);
6507 }
21aeb343 6508 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6509 }
6510 break;
6511 case 0x0e:
f669df27 6512 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
6513 if (logic_cc) {
6514 gen_logic_CC(tmp);
6515 }
21aeb343 6516 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6517 break;
6518 default:
6519 case 0x0f:
e9bb4aa9
JR
6520 tcg_gen_not_i32(tmp2, tmp2);
6521 if (logic_cc) {
6522 gen_logic_CC(tmp2);
6523 }
21aeb343 6524 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6525 break;
6526 }
e9bb4aa9
JR
6527 if (op1 != 0x0f && op1 != 0x0d) {
6528 dead_tmp(tmp2);
6529 }
9ee6e8bb
PB
6530 } else {
6531 /* other instructions */
6532 op1 = (insn >> 24) & 0xf;
6533 switch(op1) {
6534 case 0x0:
6535 case 0x1:
6536 /* multiplies, extra load/stores */
6537 sh = (insn >> 5) & 3;
6538 if (sh == 0) {
6539 if (op1 == 0x0) {
6540 rd = (insn >> 16) & 0xf;
6541 rn = (insn >> 12) & 0xf;
6542 rs = (insn >> 8) & 0xf;
6543 rm = (insn) & 0xf;
6544 op1 = (insn >> 20) & 0xf;
6545 switch (op1) {
6546 case 0: case 1: case 2: case 3: case 6:
6547 /* 32 bit mul */
5e3f878a
PB
6548 tmp = load_reg(s, rs);
6549 tmp2 = load_reg(s, rm);
6550 tcg_gen_mul_i32(tmp, tmp, tmp2);
6551 dead_tmp(tmp2);
9ee6e8bb
PB
6552 if (insn & (1 << 22)) {
6553 /* Subtract (mls) */
6554 ARCH(6T2);
5e3f878a
PB
6555 tmp2 = load_reg(s, rn);
6556 tcg_gen_sub_i32(tmp, tmp2, tmp);
6557 dead_tmp(tmp2);
9ee6e8bb
PB
6558 } else if (insn & (1 << 21)) {
6559 /* Add */
5e3f878a
PB
6560 tmp2 = load_reg(s, rn);
6561 tcg_gen_add_i32(tmp, tmp, tmp2);
6562 dead_tmp(tmp2);
9ee6e8bb
PB
6563 }
6564 if (insn & (1 << 20))
5e3f878a
PB
6565 gen_logic_CC(tmp);
6566 store_reg(s, rd, tmp);
9ee6e8bb
PB
6567 break;
6568 default:
6569 /* 64 bit mul */
5e3f878a
PB
6570 tmp = load_reg(s, rs);
6571 tmp2 = load_reg(s, rm);
9ee6e8bb 6572 if (insn & (1 << 22))
a7812ae4 6573 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6574 else
a7812ae4 6575 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
9ee6e8bb 6576 if (insn & (1 << 21)) /* mult accumulate */
a7812ae4 6577 gen_addq(s, tmp64, rn, rd);
9ee6e8bb
PB
6578 if (!(insn & (1 << 23))) { /* double accumulate */
6579 ARCH(6);
a7812ae4
PB
6580 gen_addq_lo(s, tmp64, rn);
6581 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
6582 }
6583 if (insn & (1 << 20))
a7812ae4
PB
6584 gen_logicq_cc(tmp64);
6585 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 6586 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
6587 break;
6588 }
6589 } else {
6590 rn = (insn >> 16) & 0xf;
6591 rd = (insn >> 12) & 0xf;
6592 if (insn & (1 << 23)) {
6593 /* load/store exclusive */
86753403
PB
6594 op1 = (insn >> 21) & 0x3;
6595 if (op1)
a47f43d2 6596 ARCH(6K);
86753403
PB
6597 else
6598 ARCH(6);
3174f8e9 6599 addr = tcg_temp_local_new_i32();
98a46317 6600 load_reg_var(s, addr, rn);
9ee6e8bb 6601 if (insn & (1 << 20)) {
86753403
PB
6602 switch (op1) {
6603 case 0: /* ldrex */
426f5abc 6604 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
6605 break;
6606 case 1: /* ldrexd */
426f5abc 6607 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
6608 break;
6609 case 2: /* ldrexb */
426f5abc 6610 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
6611 break;
6612 case 3: /* ldrexh */
426f5abc 6613 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
6614 break;
6615 default:
6616 abort();
6617 }
9ee6e8bb
PB
6618 } else {
6619 rm = insn & 0xf;
86753403
PB
6620 switch (op1) {
6621 case 0: /* strex */
426f5abc 6622 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
6623 break;
6624 case 1: /* strexd */
426f5abc 6625 gen_store_exclusive(s, rd, rm, rm + 1, addr, 2);
86753403
PB
6626 break;
6627 case 2: /* strexb */
426f5abc 6628 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
6629 break;
6630 case 3: /* strexh */
426f5abc 6631 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
6632 break;
6633 default:
6634 abort();
6635 }
9ee6e8bb 6636 }
3174f8e9 6637 tcg_temp_free(addr);
9ee6e8bb
PB
6638 } else {
6639 /* SWP instruction */
6640 rm = (insn) & 0xf;
6641
8984bd2e
PB
6642 /* ??? This is not really atomic. However we know
6643 we never have multiple CPUs running in parallel,
6644 so it is good enough. */
6645 addr = load_reg(s, rn);
6646 tmp = load_reg(s, rm);
9ee6e8bb 6647 if (insn & (1 << 22)) {
8984bd2e
PB
6648 tmp2 = gen_ld8u(addr, IS_USER(s));
6649 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 6650 } else {
8984bd2e
PB
6651 tmp2 = gen_ld32(addr, IS_USER(s));
6652 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 6653 }
8984bd2e
PB
6654 dead_tmp(addr);
6655 store_reg(s, rd, tmp2);
9ee6e8bb
PB
6656 }
6657 }
6658 } else {
6659 int address_offset;
6660 int load;
6661 /* Misc load/store */
6662 rn = (insn >> 16) & 0xf;
6663 rd = (insn >> 12) & 0xf;
b0109805 6664 addr = load_reg(s, rn);
9ee6e8bb 6665 if (insn & (1 << 24))
b0109805 6666 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
6667 address_offset = 0;
6668 if (insn & (1 << 20)) {
6669 /* load */
6670 switch(sh) {
6671 case 1:
b0109805 6672 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
6673 break;
6674 case 2:
b0109805 6675 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
6676 break;
6677 default:
6678 case 3:
b0109805 6679 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
6680 break;
6681 }
6682 load = 1;
6683 } else if (sh & 2) {
6684 /* doubleword */
6685 if (sh & 1) {
6686 /* store */
b0109805
PB
6687 tmp = load_reg(s, rd);
6688 gen_st32(tmp, addr, IS_USER(s));
6689 tcg_gen_addi_i32(addr, addr, 4);
6690 tmp = load_reg(s, rd + 1);
6691 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6692 load = 0;
6693 } else {
6694 /* load */
b0109805
PB
6695 tmp = gen_ld32(addr, IS_USER(s));
6696 store_reg(s, rd, tmp);
6697 tcg_gen_addi_i32(addr, addr, 4);
6698 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
6699 rd++;
6700 load = 1;
6701 }
6702 address_offset = -4;
6703 } else {
6704 /* store */
b0109805
PB
6705 tmp = load_reg(s, rd);
6706 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6707 load = 0;
6708 }
6709 /* Perform base writeback before the loaded value to
6710 ensure correct behavior with overlapping index registers.
6711 ldrd with base writeback is is undefined if the
6712 destination and index registers overlap. */
6713 if (!(insn & (1 << 24))) {
b0109805
PB
6714 gen_add_datah_offset(s, insn, address_offset, addr);
6715 store_reg(s, rn, addr);
9ee6e8bb
PB
6716 } else if (insn & (1 << 21)) {
6717 if (address_offset)
b0109805
PB
6718 tcg_gen_addi_i32(addr, addr, address_offset);
6719 store_reg(s, rn, addr);
6720 } else {
6721 dead_tmp(addr);
9ee6e8bb
PB
6722 }
6723 if (load) {
6724 /* Complete the load. */
b0109805 6725 store_reg(s, rd, tmp);
9ee6e8bb
PB
6726 }
6727 }
6728 break;
6729 case 0x4:
6730 case 0x5:
6731 goto do_ldst;
6732 case 0x6:
6733 case 0x7:
6734 if (insn & (1 << 4)) {
6735 ARCH(6);
6736 /* Armv6 Media instructions. */
6737 rm = insn & 0xf;
6738 rn = (insn >> 16) & 0xf;
2c0262af 6739 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
6740 rs = (insn >> 8) & 0xf;
6741 switch ((insn >> 23) & 3) {
6742 case 0: /* Parallel add/subtract. */
6743 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
6744 tmp = load_reg(s, rn);
6745 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6746 sh = (insn >> 5) & 7;
6747 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6748 goto illegal_op;
6ddbc6e4
PB
6749 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6750 dead_tmp(tmp2);
6751 store_reg(s, rd, tmp);
9ee6e8bb
PB
6752 break;
6753 case 1:
6754 if ((insn & 0x00700020) == 0) {
6c95676b 6755 /* Halfword pack. */
3670669c
PB
6756 tmp = load_reg(s, rn);
6757 tmp2 = load_reg(s, rm);
9ee6e8bb 6758 shift = (insn >> 7) & 0x1f;
3670669c
PB
6759 if (insn & (1 << 6)) {
6760 /* pkhtb */
22478e79
AZ
6761 if (shift == 0)
6762 shift = 31;
6763 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 6764 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 6765 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
6766 } else {
6767 /* pkhbt */
22478e79
AZ
6768 if (shift)
6769 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 6770 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
6771 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6772 }
6773 tcg_gen_or_i32(tmp, tmp, tmp2);
22478e79 6774 dead_tmp(tmp2);
3670669c 6775 store_reg(s, rd, tmp);
9ee6e8bb
PB
6776 } else if ((insn & 0x00200020) == 0x00200000) {
6777 /* [us]sat */
6ddbc6e4 6778 tmp = load_reg(s, rm);
9ee6e8bb
PB
6779 shift = (insn >> 7) & 0x1f;
6780 if (insn & (1 << 6)) {
6781 if (shift == 0)
6782 shift = 31;
6ddbc6e4 6783 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 6784 } else {
6ddbc6e4 6785 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
6786 }
6787 sh = (insn >> 16) & 0x1f;
6788 if (sh != 0) {
b75263d6 6789 tmp2 = tcg_const_i32(sh);
9ee6e8bb 6790 if (insn & (1 << 22))
b75263d6 6791 gen_helper_usat(tmp, tmp, tmp2);
9ee6e8bb 6792 else
b75263d6
JR
6793 gen_helper_ssat(tmp, tmp, tmp2);
6794 tcg_temp_free_i32(tmp2);
9ee6e8bb 6795 }
6ddbc6e4 6796 store_reg(s, rd, tmp);
9ee6e8bb
PB
6797 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6798 /* [us]sat16 */
6ddbc6e4 6799 tmp = load_reg(s, rm);
9ee6e8bb
PB
6800 sh = (insn >> 16) & 0x1f;
6801 if (sh != 0) {
b75263d6 6802 tmp2 = tcg_const_i32(sh);
9ee6e8bb 6803 if (insn & (1 << 22))
b75263d6 6804 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 6805 else
b75263d6
JR
6806 gen_helper_ssat16(tmp, tmp, tmp2);
6807 tcg_temp_free_i32(tmp2);
9ee6e8bb 6808 }
6ddbc6e4 6809 store_reg(s, rd, tmp);
9ee6e8bb
PB
6810 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6811 /* Select bytes. */
6ddbc6e4
PB
6812 tmp = load_reg(s, rn);
6813 tmp2 = load_reg(s, rm);
6814 tmp3 = new_tmp();
6815 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6816 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6817 dead_tmp(tmp3);
6818 dead_tmp(tmp2);
6819 store_reg(s, rd, tmp);
9ee6e8bb 6820 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 6821 tmp = load_reg(s, rm);
9ee6e8bb
PB
6822 shift = (insn >> 10) & 3;
6823 /* ??? In many cases it's not neccessary to do a
6824 rotate, a shift is sufficient. */
6825 if (shift != 0)
f669df27 6826 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
6827 op1 = (insn >> 20) & 7;
6828 switch (op1) {
5e3f878a
PB
6829 case 0: gen_sxtb16(tmp); break;
6830 case 2: gen_sxtb(tmp); break;
6831 case 3: gen_sxth(tmp); break;
6832 case 4: gen_uxtb16(tmp); break;
6833 case 6: gen_uxtb(tmp); break;
6834 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
6835 default: goto illegal_op;
6836 }
6837 if (rn != 15) {
5e3f878a 6838 tmp2 = load_reg(s, rn);
9ee6e8bb 6839 if ((op1 & 3) == 0) {
5e3f878a 6840 gen_add16(tmp, tmp2);
9ee6e8bb 6841 } else {
5e3f878a
PB
6842 tcg_gen_add_i32(tmp, tmp, tmp2);
6843 dead_tmp(tmp2);
9ee6e8bb
PB
6844 }
6845 }
6c95676b 6846 store_reg(s, rd, tmp);
9ee6e8bb
PB
6847 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6848 /* rev */
b0109805 6849 tmp = load_reg(s, rm);
9ee6e8bb
PB
6850 if (insn & (1 << 22)) {
6851 if (insn & (1 << 7)) {
b0109805 6852 gen_revsh(tmp);
9ee6e8bb
PB
6853 } else {
6854 ARCH(6T2);
b0109805 6855 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
6856 }
6857 } else {
6858 if (insn & (1 << 7))
b0109805 6859 gen_rev16(tmp);
9ee6e8bb 6860 else
66896cb8 6861 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 6862 }
b0109805 6863 store_reg(s, rd, tmp);
9ee6e8bb
PB
6864 } else {
6865 goto illegal_op;
6866 }
6867 break;
6868 case 2: /* Multiplies (Type 3). */
5e3f878a
PB
6869 tmp = load_reg(s, rm);
6870 tmp2 = load_reg(s, rs);
9ee6e8bb
PB
6871 if (insn & (1 << 20)) {
6872 /* Signed multiply most significant [accumulate]. */
a7812ae4 6873 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6874 if (insn & (1 << 5))
a7812ae4
PB
6875 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
6876 tcg_gen_shri_i64(tmp64, tmp64, 32);
5e3f878a 6877 tmp = new_tmp();
a7812ae4 6878 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 6879 tcg_temp_free_i64(tmp64);
955a7dd5
AZ
6880 if (rd != 15) {
6881 tmp2 = load_reg(s, rd);
9ee6e8bb 6882 if (insn & (1 << 6)) {
5e3f878a 6883 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 6884 } else {
5e3f878a 6885 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 6886 }
5e3f878a 6887 dead_tmp(tmp2);
9ee6e8bb 6888 }
955a7dd5 6889 store_reg(s, rn, tmp);
9ee6e8bb
PB
6890 } else {
6891 if (insn & (1 << 5))
5e3f878a
PB
6892 gen_swap_half(tmp2);
6893 gen_smul_dual(tmp, tmp2);
6894 /* This addition cannot overflow. */
6895 if (insn & (1 << 6)) {
6896 tcg_gen_sub_i32(tmp, tmp, tmp2);
6897 } else {
6898 tcg_gen_add_i32(tmp, tmp, tmp2);
6899 }
6900 dead_tmp(tmp2);
9ee6e8bb 6901 if (insn & (1 << 22)) {
5e3f878a 6902 /* smlald, smlsld */
a7812ae4
PB
6903 tmp64 = tcg_temp_new_i64();
6904 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 6905 dead_tmp(tmp);
a7812ae4
PB
6906 gen_addq(s, tmp64, rd, rn);
6907 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 6908 tcg_temp_free_i64(tmp64);
9ee6e8bb 6909 } else {
5e3f878a 6910 /* smuad, smusd, smlad, smlsd */
22478e79 6911 if (rd != 15)
9ee6e8bb 6912 {
22478e79 6913 tmp2 = load_reg(s, rd);
5e3f878a
PB
6914 gen_helper_add_setq(tmp, tmp, tmp2);
6915 dead_tmp(tmp2);
9ee6e8bb 6916 }
22478e79 6917 store_reg(s, rn, tmp);
9ee6e8bb
PB
6918 }
6919 }
6920 break;
6921 case 3:
6922 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
6923 switch (op1) {
6924 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
6925 ARCH(6);
6926 tmp = load_reg(s, rm);
6927 tmp2 = load_reg(s, rs);
6928 gen_helper_usad8(tmp, tmp, tmp2);
6929 dead_tmp(tmp2);
ded9d295
AZ
6930 if (rd != 15) {
6931 tmp2 = load_reg(s, rd);
6ddbc6e4
PB
6932 tcg_gen_add_i32(tmp, tmp, tmp2);
6933 dead_tmp(tmp2);
9ee6e8bb 6934 }
ded9d295 6935 store_reg(s, rn, tmp);
9ee6e8bb
PB
6936 break;
6937 case 0x20: case 0x24: case 0x28: case 0x2c:
6938 /* Bitfield insert/clear. */
6939 ARCH(6T2);
6940 shift = (insn >> 7) & 0x1f;
6941 i = (insn >> 16) & 0x1f;
6942 i = i + 1 - shift;
6943 if (rm == 15) {
5e3f878a
PB
6944 tmp = new_tmp();
6945 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6946 } else {
5e3f878a 6947 tmp = load_reg(s, rm);
9ee6e8bb
PB
6948 }
6949 if (i != 32) {
5e3f878a 6950 tmp2 = load_reg(s, rd);
8f8e3aa4 6951 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
5e3f878a 6952 dead_tmp(tmp2);
9ee6e8bb 6953 }
5e3f878a 6954 store_reg(s, rd, tmp);
9ee6e8bb
PB
6955 break;
6956 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
6957 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 6958 ARCH(6T2);
5e3f878a 6959 tmp = load_reg(s, rm);
9ee6e8bb
PB
6960 shift = (insn >> 7) & 0x1f;
6961 i = ((insn >> 16) & 0x1f) + 1;
6962 if (shift + i > 32)
6963 goto illegal_op;
6964 if (i < 32) {
6965 if (op1 & 0x20) {
5e3f878a 6966 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 6967 } else {
5e3f878a 6968 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
6969 }
6970 }
5e3f878a 6971 store_reg(s, rd, tmp);
9ee6e8bb
PB
6972 break;
6973 default:
6974 goto illegal_op;
6975 }
6976 break;
6977 }
6978 break;
6979 }
6980 do_ldst:
6981 /* Check for undefined extension instructions
6982 * per the ARM Bible IE:
6983 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
6984 */
6985 sh = (0xf << 20) | (0xf << 4);
6986 if (op1 == 0x7 && ((insn & sh) == sh))
6987 {
6988 goto illegal_op;
6989 }
6990 /* load/store byte/word */
6991 rn = (insn >> 16) & 0xf;
6992 rd = (insn >> 12) & 0xf;
b0109805 6993 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
6994 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
6995 if (insn & (1 << 24))
b0109805 6996 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
6997 if (insn & (1 << 20)) {
6998 /* load */
9ee6e8bb 6999 if (insn & (1 << 22)) {
b0109805 7000 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 7001 } else {
b0109805 7002 tmp = gen_ld32(tmp2, i);
9ee6e8bb 7003 }
9ee6e8bb
PB
7004 } else {
7005 /* store */
b0109805 7006 tmp = load_reg(s, rd);
9ee6e8bb 7007 if (insn & (1 << 22))
b0109805 7008 gen_st8(tmp, tmp2, i);
9ee6e8bb 7009 else
b0109805 7010 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
7011 }
7012 if (!(insn & (1 << 24))) {
b0109805
PB
7013 gen_add_data_offset(s, insn, tmp2);
7014 store_reg(s, rn, tmp2);
7015 } else if (insn & (1 << 21)) {
7016 store_reg(s, rn, tmp2);
7017 } else {
7018 dead_tmp(tmp2);
9ee6e8bb
PB
7019 }
7020 if (insn & (1 << 20)) {
7021 /* Complete the load. */
7022 if (rd == 15)
b0109805 7023 gen_bx(s, tmp);
9ee6e8bb 7024 else
b0109805 7025 store_reg(s, rd, tmp);
9ee6e8bb
PB
7026 }
7027 break;
7028 case 0x08:
7029 case 0x09:
7030 {
7031 int j, n, user, loaded_base;
b0109805 7032 TCGv loaded_var;
9ee6e8bb
PB
7033 /* load/store multiple words */
7034 /* XXX: store correct base if write back */
7035 user = 0;
7036 if (insn & (1 << 22)) {
7037 if (IS_USER(s))
7038 goto illegal_op; /* only usable in supervisor mode */
7039
7040 if ((insn & (1 << 15)) == 0)
7041 user = 1;
7042 }
7043 rn = (insn >> 16) & 0xf;
b0109805 7044 addr = load_reg(s, rn);
9ee6e8bb
PB
7045
7046 /* compute total size */
7047 loaded_base = 0;
a50f5b91 7048 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
7049 n = 0;
7050 for(i=0;i<16;i++) {
7051 if (insn & (1 << i))
7052 n++;
7053 }
7054 /* XXX: test invalid n == 0 case ? */
7055 if (insn & (1 << 23)) {
7056 if (insn & (1 << 24)) {
7057 /* pre increment */
b0109805 7058 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7059 } else {
7060 /* post increment */
7061 }
7062 } else {
7063 if (insn & (1 << 24)) {
7064 /* pre decrement */
b0109805 7065 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7066 } else {
7067 /* post decrement */
7068 if (n != 1)
b0109805 7069 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7070 }
7071 }
7072 j = 0;
7073 for(i=0;i<16;i++) {
7074 if (insn & (1 << i)) {
7075 if (insn & (1 << 20)) {
7076 /* load */
b0109805 7077 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 7078 if (i == 15) {
b0109805 7079 gen_bx(s, tmp);
9ee6e8bb 7080 } else if (user) {
b75263d6
JR
7081 tmp2 = tcg_const_i32(i);
7082 gen_helper_set_user_reg(tmp2, tmp);
7083 tcg_temp_free_i32(tmp2);
b0109805 7084 dead_tmp(tmp);
9ee6e8bb 7085 } else if (i == rn) {
b0109805 7086 loaded_var = tmp;
9ee6e8bb
PB
7087 loaded_base = 1;
7088 } else {
b0109805 7089 store_reg(s, i, tmp);
9ee6e8bb
PB
7090 }
7091 } else {
7092 /* store */
7093 if (i == 15) {
7094 /* special case: r15 = PC + 8 */
7095 val = (long)s->pc + 4;
b0109805
PB
7096 tmp = new_tmp();
7097 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 7098 } else if (user) {
b0109805 7099 tmp = new_tmp();
b75263d6
JR
7100 tmp2 = tcg_const_i32(i);
7101 gen_helper_get_user_reg(tmp, tmp2);
7102 tcg_temp_free_i32(tmp2);
9ee6e8bb 7103 } else {
b0109805 7104 tmp = load_reg(s, i);
9ee6e8bb 7105 }
b0109805 7106 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7107 }
7108 j++;
7109 /* no need to add after the last transfer */
7110 if (j != n)
b0109805 7111 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7112 }
7113 }
7114 if (insn & (1 << 21)) {
7115 /* write back */
7116 if (insn & (1 << 23)) {
7117 if (insn & (1 << 24)) {
7118 /* pre increment */
7119 } else {
7120 /* post increment */
b0109805 7121 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7122 }
7123 } else {
7124 if (insn & (1 << 24)) {
7125 /* pre decrement */
7126 if (n != 1)
b0109805 7127 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7128 } else {
7129 /* post decrement */
b0109805 7130 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7131 }
7132 }
b0109805
PB
7133 store_reg(s, rn, addr);
7134 } else {
7135 dead_tmp(addr);
9ee6e8bb
PB
7136 }
7137 if (loaded_base) {
b0109805 7138 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
7139 }
7140 if ((insn & (1 << 22)) && !user) {
7141 /* Restore CPSR from SPSR. */
d9ba4830
PB
7142 tmp = load_cpu_field(spsr);
7143 gen_set_cpsr(tmp, 0xffffffff);
7144 dead_tmp(tmp);
9ee6e8bb
PB
7145 s->is_jmp = DISAS_UPDATE;
7146 }
7147 }
7148 break;
7149 case 0xa:
7150 case 0xb:
7151 {
7152 int32_t offset;
7153
7154 /* branch (and link) */
7155 val = (int32_t)s->pc;
7156 if (insn & (1 << 24)) {
5e3f878a
PB
7157 tmp = new_tmp();
7158 tcg_gen_movi_i32(tmp, val);
7159 store_reg(s, 14, tmp);
9ee6e8bb
PB
7160 }
7161 offset = (((int32_t)insn << 8) >> 8);
7162 val += (offset << 2) + 4;
7163 gen_jmp(s, val);
7164 }
7165 break;
7166 case 0xc:
7167 case 0xd:
7168 case 0xe:
7169 /* Coprocessor. */
7170 if (disas_coproc_insn(env, s, insn))
7171 goto illegal_op;
7172 break;
7173 case 0xf:
7174 /* swi */
5e3f878a 7175 gen_set_pc_im(s->pc);
9ee6e8bb
PB
7176 s->is_jmp = DISAS_SWI;
7177 break;
7178 default:
7179 illegal_op:
7180 gen_set_condexec(s);
5e3f878a 7181 gen_set_pc_im(s->pc - 4);
d9ba4830 7182 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
7183 s->is_jmp = DISAS_JUMP;
7184 break;
7185 }
7186 }
7187}
7188
7189/* Return true if this is a Thumb-2 logical op. */
7190static int
7191thumb2_logic_op(int op)
7192{
7193 return (op < 8);
7194}
7195
7196/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7197 then set condition code flags based on the result of the operation.
7198 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7199 to the high bit of T1.
7200 Returns zero if the opcode is valid. */
7201
7202static int
396e467c 7203gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
9ee6e8bb
PB
7204{
7205 int logic_cc;
7206
7207 logic_cc = 0;
7208 switch (op) {
7209 case 0: /* and */
396e467c 7210 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
7211 logic_cc = conds;
7212 break;
7213 case 1: /* bic */
f669df27 7214 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
7215 logic_cc = conds;
7216 break;
7217 case 2: /* orr */
396e467c 7218 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7219 logic_cc = conds;
7220 break;
7221 case 3: /* orn */
396e467c
FN
7222 tcg_gen_not_i32(t1, t1);
7223 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7224 logic_cc = conds;
7225 break;
7226 case 4: /* eor */
396e467c 7227 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
7228 logic_cc = conds;
7229 break;
7230 case 8: /* add */
7231 if (conds)
396e467c 7232 gen_helper_add_cc(t0, t0, t1);
9ee6e8bb 7233 else
396e467c 7234 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
7235 break;
7236 case 10: /* adc */
7237 if (conds)
396e467c 7238 gen_helper_adc_cc(t0, t0, t1);
9ee6e8bb 7239 else
396e467c 7240 gen_adc(t0, t1);
9ee6e8bb
PB
7241 break;
7242 case 11: /* sbc */
7243 if (conds)
396e467c 7244 gen_helper_sbc_cc(t0, t0, t1);
9ee6e8bb 7245 else
396e467c 7246 gen_sub_carry(t0, t0, t1);
9ee6e8bb
PB
7247 break;
7248 case 13: /* sub */
7249 if (conds)
396e467c 7250 gen_helper_sub_cc(t0, t0, t1);
9ee6e8bb 7251 else
396e467c 7252 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
7253 break;
7254 case 14: /* rsb */
7255 if (conds)
396e467c 7256 gen_helper_sub_cc(t0, t1, t0);
9ee6e8bb 7257 else
396e467c 7258 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
7259 break;
7260 default: /* 5, 6, 7, 9, 12, 15. */
7261 return 1;
7262 }
7263 if (logic_cc) {
396e467c 7264 gen_logic_CC(t0);
9ee6e8bb 7265 if (shifter_out)
396e467c 7266 gen_set_CF_bit31(t1);
9ee6e8bb
PB
7267 }
7268 return 0;
7269}
7270
7271/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7272 is not legal. */
7273static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7274{
b0109805 7275 uint32_t insn, imm, shift, offset;
9ee6e8bb 7276 uint32_t rd, rn, rm, rs;
b26eefb6 7277 TCGv tmp;
6ddbc6e4
PB
7278 TCGv tmp2;
7279 TCGv tmp3;
b0109805 7280 TCGv addr;
a7812ae4 7281 TCGv_i64 tmp64;
9ee6e8bb
PB
7282 int op;
7283 int shiftop;
7284 int conds;
7285 int logic_cc;
7286
7287 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7288 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 7289 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
7290 16-bit instructions to get correct prefetch abort behavior. */
7291 insn = insn_hw1;
7292 if ((insn & (1 << 12)) == 0) {
7293 /* Second half of blx. */
7294 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
7295 tmp = load_reg(s, 14);
7296 tcg_gen_addi_i32(tmp, tmp, offset);
7297 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 7298
d9ba4830 7299 tmp2 = new_tmp();
b0109805 7300 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7301 store_reg(s, 14, tmp2);
7302 gen_bx(s, tmp);
9ee6e8bb
PB
7303 return 0;
7304 }
7305 if (insn & (1 << 11)) {
7306 /* Second half of bl. */
7307 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 7308 tmp = load_reg(s, 14);
6a0d8a1d 7309 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 7310
d9ba4830 7311 tmp2 = new_tmp();
b0109805 7312 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7313 store_reg(s, 14, tmp2);
7314 gen_bx(s, tmp);
9ee6e8bb
PB
7315 return 0;
7316 }
7317 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7318 /* Instruction spans a page boundary. Implement it as two
7319 16-bit instructions in case the second half causes an
7320 prefetch abort. */
7321 offset = ((int32_t)insn << 21) >> 9;
396e467c 7322 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
7323 return 0;
7324 }
7325 /* Fall through to 32-bit decode. */
7326 }
7327
7328 insn = lduw_code(s->pc);
7329 s->pc += 2;
7330 insn |= (uint32_t)insn_hw1 << 16;
7331
7332 if ((insn & 0xf800e800) != 0xf000e800) {
7333 ARCH(6T2);
7334 }
7335
7336 rn = (insn >> 16) & 0xf;
7337 rs = (insn >> 12) & 0xf;
7338 rd = (insn >> 8) & 0xf;
7339 rm = insn & 0xf;
7340 switch ((insn >> 25) & 0xf) {
7341 case 0: case 1: case 2: case 3:
7342 /* 16-bit instructions. Should never happen. */
7343 abort();
7344 case 4:
7345 if (insn & (1 << 22)) {
7346 /* Other load/store, table branch. */
7347 if (insn & 0x01200000) {
7348 /* Load/store doubleword. */
7349 if (rn == 15) {
b0109805
PB
7350 addr = new_tmp();
7351 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 7352 } else {
b0109805 7353 addr = load_reg(s, rn);
9ee6e8bb
PB
7354 }
7355 offset = (insn & 0xff) * 4;
7356 if ((insn & (1 << 23)) == 0)
7357 offset = -offset;
7358 if (insn & (1 << 24)) {
b0109805 7359 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
7360 offset = 0;
7361 }
7362 if (insn & (1 << 20)) {
7363 /* ldrd */
b0109805
PB
7364 tmp = gen_ld32(addr, IS_USER(s));
7365 store_reg(s, rs, tmp);
7366 tcg_gen_addi_i32(addr, addr, 4);
7367 tmp = gen_ld32(addr, IS_USER(s));
7368 store_reg(s, rd, tmp);
9ee6e8bb
PB
7369 } else {
7370 /* strd */
b0109805
PB
7371 tmp = load_reg(s, rs);
7372 gen_st32(tmp, addr, IS_USER(s));
7373 tcg_gen_addi_i32(addr, addr, 4);
7374 tmp = load_reg(s, rd);
7375 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7376 }
7377 if (insn & (1 << 21)) {
7378 /* Base writeback. */
7379 if (rn == 15)
7380 goto illegal_op;
b0109805
PB
7381 tcg_gen_addi_i32(addr, addr, offset - 4);
7382 store_reg(s, rn, addr);
7383 } else {
7384 dead_tmp(addr);
9ee6e8bb
PB
7385 }
7386 } else if ((insn & (1 << 23)) == 0) {
7387 /* Load/store exclusive word. */
3174f8e9 7388 addr = tcg_temp_local_new();
98a46317 7389 load_reg_var(s, addr, rn);
426f5abc 7390 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 7391 if (insn & (1 << 20)) {
426f5abc 7392 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 7393 } else {
426f5abc 7394 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 7395 }
3174f8e9 7396 tcg_temp_free(addr);
9ee6e8bb
PB
7397 } else if ((insn & (1 << 6)) == 0) {
7398 /* Table Branch. */
7399 if (rn == 15) {
b0109805
PB
7400 addr = new_tmp();
7401 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 7402 } else {
b0109805 7403 addr = load_reg(s, rn);
9ee6e8bb 7404 }
b26eefb6 7405 tmp = load_reg(s, rm);
b0109805 7406 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
7407 if (insn & (1 << 4)) {
7408 /* tbh */
b0109805 7409 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 7410 dead_tmp(tmp);
b0109805 7411 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 7412 } else { /* tbb */
b26eefb6 7413 dead_tmp(tmp);
b0109805 7414 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7415 }
b0109805
PB
7416 dead_tmp(addr);
7417 tcg_gen_shli_i32(tmp, tmp, 1);
7418 tcg_gen_addi_i32(tmp, tmp, s->pc);
7419 store_reg(s, 15, tmp);
9ee6e8bb
PB
7420 } else {
7421 /* Load/store exclusive byte/halfword/doubleword. */
426f5abc 7422 ARCH(7);
9ee6e8bb 7423 op = (insn >> 4) & 0x3;
426f5abc
PB
7424 if (op == 2) {
7425 goto illegal_op;
7426 }
3174f8e9 7427 addr = tcg_temp_local_new();
98a46317 7428 load_reg_var(s, addr, rn);
9ee6e8bb 7429 if (insn & (1 << 20)) {
426f5abc 7430 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 7431 } else {
426f5abc 7432 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 7433 }
3174f8e9 7434 tcg_temp_free(addr);
9ee6e8bb
PB
7435 }
7436 } else {
7437 /* Load/store multiple, RFE, SRS. */
7438 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7439 /* Not available in user mode. */
b0109805 7440 if (IS_USER(s))
9ee6e8bb
PB
7441 goto illegal_op;
7442 if (insn & (1 << 20)) {
7443 /* rfe */
b0109805
PB
7444 addr = load_reg(s, rn);
7445 if ((insn & (1 << 24)) == 0)
7446 tcg_gen_addi_i32(addr, addr, -8);
7447 /* Load PC into tmp and CPSR into tmp2. */
7448 tmp = gen_ld32(addr, 0);
7449 tcg_gen_addi_i32(addr, addr, 4);
7450 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
7451 if (insn & (1 << 21)) {
7452 /* Base writeback. */
b0109805
PB
7453 if (insn & (1 << 24)) {
7454 tcg_gen_addi_i32(addr, addr, 4);
7455 } else {
7456 tcg_gen_addi_i32(addr, addr, -4);
7457 }
7458 store_reg(s, rn, addr);
7459 } else {
7460 dead_tmp(addr);
9ee6e8bb 7461 }
b0109805 7462 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
7463 } else {
7464 /* srs */
7465 op = (insn & 0x1f);
7466 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7467 addr = load_reg(s, 13);
9ee6e8bb 7468 } else {
b0109805 7469 addr = new_tmp();
b75263d6
JR
7470 tmp = tcg_const_i32(op);
7471 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7472 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7473 }
7474 if ((insn & (1 << 24)) == 0) {
b0109805 7475 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 7476 }
b0109805
PB
7477 tmp = load_reg(s, 14);
7478 gen_st32(tmp, addr, 0);
7479 tcg_gen_addi_i32(addr, addr, 4);
7480 tmp = new_tmp();
7481 gen_helper_cpsr_read(tmp);
7482 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
7483 if (insn & (1 << 21)) {
7484 if ((insn & (1 << 24)) == 0) {
b0109805 7485 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 7486 } else {
b0109805 7487 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7488 }
7489 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7490 store_reg(s, 13, addr);
9ee6e8bb 7491 } else {
b75263d6
JR
7492 tmp = tcg_const_i32(op);
7493 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7494 tcg_temp_free_i32(tmp);
9ee6e8bb 7495 }
b0109805
PB
7496 } else {
7497 dead_tmp(addr);
9ee6e8bb
PB
7498 }
7499 }
7500 } else {
7501 int i;
7502 /* Load/store multiple. */
b0109805 7503 addr = load_reg(s, rn);
9ee6e8bb
PB
7504 offset = 0;
7505 for (i = 0; i < 16; i++) {
7506 if (insn & (1 << i))
7507 offset += 4;
7508 }
7509 if (insn & (1 << 24)) {
b0109805 7510 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7511 }
7512
7513 for (i = 0; i < 16; i++) {
7514 if ((insn & (1 << i)) == 0)
7515 continue;
7516 if (insn & (1 << 20)) {
7517 /* Load. */
b0109805 7518 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 7519 if (i == 15) {
b0109805 7520 gen_bx(s, tmp);
9ee6e8bb 7521 } else {
b0109805 7522 store_reg(s, i, tmp);
9ee6e8bb
PB
7523 }
7524 } else {
7525 /* Store. */
b0109805
PB
7526 tmp = load_reg(s, i);
7527 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7528 }
b0109805 7529 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7530 }
7531 if (insn & (1 << 21)) {
7532 /* Base register writeback. */
7533 if (insn & (1 << 24)) {
b0109805 7534 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7535 }
7536 /* Fault if writeback register is in register list. */
7537 if (insn & (1 << rn))
7538 goto illegal_op;
b0109805
PB
7539 store_reg(s, rn, addr);
7540 } else {
7541 dead_tmp(addr);
9ee6e8bb
PB
7542 }
7543 }
7544 }
7545 break;
7546 case 5: /* Data processing register constant shift. */
3174f8e9
FN
7547 if (rn == 15) {
7548 tmp = new_tmp();
7549 tcg_gen_movi_i32(tmp, 0);
7550 } else {
7551 tmp = load_reg(s, rn);
7552 }
7553 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7554 op = (insn >> 21) & 0xf;
7555 shiftop = (insn >> 4) & 3;
7556 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7557 conds = (insn & (1 << 20)) != 0;
7558 logic_cc = (conds && thumb2_logic_op(op));
3174f8e9
FN
7559 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7560 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9ee6e8bb 7561 goto illegal_op;
3174f8e9
FN
7562 dead_tmp(tmp2);
7563 if (rd != 15) {
7564 store_reg(s, rd, tmp);
7565 } else {
7566 dead_tmp(tmp);
7567 }
9ee6e8bb
PB
7568 break;
7569 case 13: /* Misc data processing. */
7570 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7571 if (op < 4 && (insn & 0xf000) != 0xf000)
7572 goto illegal_op;
7573 switch (op) {
7574 case 0: /* Register controlled shift. */
8984bd2e
PB
7575 tmp = load_reg(s, rn);
7576 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7577 if ((insn & 0x70) != 0)
7578 goto illegal_op;
7579 op = (insn >> 21) & 3;
8984bd2e
PB
7580 logic_cc = (insn & (1 << 20)) != 0;
7581 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7582 if (logic_cc)
7583 gen_logic_CC(tmp);
21aeb343 7584 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7585 break;
7586 case 1: /* Sign/zero extend. */
5e3f878a 7587 tmp = load_reg(s, rm);
9ee6e8bb
PB
7588 shift = (insn >> 4) & 3;
7589 /* ??? In many cases it's not neccessary to do a
7590 rotate, a shift is sufficient. */
7591 if (shift != 0)
f669df27 7592 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7593 op = (insn >> 20) & 7;
7594 switch (op) {
5e3f878a
PB
7595 case 0: gen_sxth(tmp); break;
7596 case 1: gen_uxth(tmp); break;
7597 case 2: gen_sxtb16(tmp); break;
7598 case 3: gen_uxtb16(tmp); break;
7599 case 4: gen_sxtb(tmp); break;
7600 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
7601 default: goto illegal_op;
7602 }
7603 if (rn != 15) {
5e3f878a 7604 tmp2 = load_reg(s, rn);
9ee6e8bb 7605 if ((op >> 1) == 1) {
5e3f878a 7606 gen_add16(tmp, tmp2);
9ee6e8bb 7607 } else {
5e3f878a
PB
7608 tcg_gen_add_i32(tmp, tmp, tmp2);
7609 dead_tmp(tmp2);
9ee6e8bb
PB
7610 }
7611 }
5e3f878a 7612 store_reg(s, rd, tmp);
9ee6e8bb
PB
7613 break;
7614 case 2: /* SIMD add/subtract. */
7615 op = (insn >> 20) & 7;
7616 shift = (insn >> 4) & 7;
7617 if ((op & 3) == 3 || (shift & 3) == 3)
7618 goto illegal_op;
6ddbc6e4
PB
7619 tmp = load_reg(s, rn);
7620 tmp2 = load_reg(s, rm);
7621 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7622 dead_tmp(tmp2);
7623 store_reg(s, rd, tmp);
9ee6e8bb
PB
7624 break;
7625 case 3: /* Other data processing. */
7626 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7627 if (op < 4) {
7628 /* Saturating add/subtract. */
d9ba4830
PB
7629 tmp = load_reg(s, rn);
7630 tmp2 = load_reg(s, rm);
9ee6e8bb 7631 if (op & 2)
d9ba4830 7632 gen_helper_double_saturate(tmp, tmp);
9ee6e8bb 7633 if (op & 1)
d9ba4830 7634 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 7635 else
d9ba4830
PB
7636 gen_helper_add_saturate(tmp, tmp, tmp2);
7637 dead_tmp(tmp2);
9ee6e8bb 7638 } else {
d9ba4830 7639 tmp = load_reg(s, rn);
9ee6e8bb
PB
7640 switch (op) {
7641 case 0x0a: /* rbit */
d9ba4830 7642 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7643 break;
7644 case 0x08: /* rev */
66896cb8 7645 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
7646 break;
7647 case 0x09: /* rev16 */
d9ba4830 7648 gen_rev16(tmp);
9ee6e8bb
PB
7649 break;
7650 case 0x0b: /* revsh */
d9ba4830 7651 gen_revsh(tmp);
9ee6e8bb
PB
7652 break;
7653 case 0x10: /* sel */
d9ba4830 7654 tmp2 = load_reg(s, rm);
6ddbc6e4
PB
7655 tmp3 = new_tmp();
7656 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
d9ba4830 7657 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6ddbc6e4 7658 dead_tmp(tmp3);
d9ba4830 7659 dead_tmp(tmp2);
9ee6e8bb
PB
7660 break;
7661 case 0x18: /* clz */
d9ba4830 7662 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
7663 break;
7664 default:
7665 goto illegal_op;
7666 }
7667 }
d9ba4830 7668 store_reg(s, rd, tmp);
9ee6e8bb
PB
7669 break;
7670 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7671 op = (insn >> 4) & 0xf;
d9ba4830
PB
7672 tmp = load_reg(s, rn);
7673 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7674 switch ((insn >> 20) & 7) {
7675 case 0: /* 32 x 32 -> 32 */
d9ba4830
PB
7676 tcg_gen_mul_i32(tmp, tmp, tmp2);
7677 dead_tmp(tmp2);
9ee6e8bb 7678 if (rs != 15) {
d9ba4830 7679 tmp2 = load_reg(s, rs);
9ee6e8bb 7680 if (op)
d9ba4830 7681 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 7682 else
d9ba4830
PB
7683 tcg_gen_add_i32(tmp, tmp, tmp2);
7684 dead_tmp(tmp2);
9ee6e8bb 7685 }
9ee6e8bb
PB
7686 break;
7687 case 1: /* 16 x 16 -> 32 */
d9ba4830
PB
7688 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7689 dead_tmp(tmp2);
9ee6e8bb 7690 if (rs != 15) {
d9ba4830
PB
7691 tmp2 = load_reg(s, rs);
7692 gen_helper_add_setq(tmp, tmp, tmp2);
7693 dead_tmp(tmp2);
9ee6e8bb 7694 }
9ee6e8bb
PB
7695 break;
7696 case 2: /* Dual multiply add. */
7697 case 4: /* Dual multiply subtract. */
7698 if (op)
d9ba4830
PB
7699 gen_swap_half(tmp2);
7700 gen_smul_dual(tmp, tmp2);
9ee6e8bb
PB
7701 /* This addition cannot overflow. */
7702 if (insn & (1 << 22)) {
d9ba4830 7703 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 7704 } else {
d9ba4830 7705 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 7706 }
d9ba4830 7707 dead_tmp(tmp2);
9ee6e8bb
PB
7708 if (rs != 15)
7709 {
d9ba4830
PB
7710 tmp2 = load_reg(s, rs);
7711 gen_helper_add_setq(tmp, tmp, tmp2);
7712 dead_tmp(tmp2);
9ee6e8bb 7713 }
9ee6e8bb
PB
7714 break;
7715 case 3: /* 32 * 16 -> 32msb */
7716 if (op)
d9ba4830 7717 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7718 else
d9ba4830 7719 gen_sxth(tmp2);
a7812ae4
PB
7720 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7721 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 7722 tmp = new_tmp();
a7812ae4 7723 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 7724 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7725 if (rs != 15)
7726 {
d9ba4830
PB
7727 tmp2 = load_reg(s, rs);
7728 gen_helper_add_setq(tmp, tmp, tmp2);
7729 dead_tmp(tmp2);
9ee6e8bb 7730 }
9ee6e8bb
PB
7731 break;
7732 case 5: case 6: /* 32 * 32 -> 32msb */
d9ba4830
PB
7733 gen_imull(tmp, tmp2);
7734 if (insn & (1 << 5)) {
7735 gen_roundqd(tmp, tmp2);
7736 dead_tmp(tmp2);
7737 } else {
7738 dead_tmp(tmp);
7739 tmp = tmp2;
7740 }
9ee6e8bb 7741 if (rs != 15) {
d9ba4830 7742 tmp2 = load_reg(s, rs);
9ee6e8bb 7743 if (insn & (1 << 21)) {
d9ba4830 7744 tcg_gen_add_i32(tmp, tmp, tmp2);
99c475ab 7745 } else {
d9ba4830 7746 tcg_gen_sub_i32(tmp, tmp2, tmp);
99c475ab 7747 }
d9ba4830 7748 dead_tmp(tmp2);
2c0262af 7749 }
9ee6e8bb
PB
7750 break;
7751 case 7: /* Unsigned sum of absolute differences. */
d9ba4830
PB
7752 gen_helper_usad8(tmp, tmp, tmp2);
7753 dead_tmp(tmp2);
9ee6e8bb 7754 if (rs != 15) {
d9ba4830
PB
7755 tmp2 = load_reg(s, rs);
7756 tcg_gen_add_i32(tmp, tmp, tmp2);
7757 dead_tmp(tmp2);
5fd46862 7758 }
9ee6e8bb 7759 break;
2c0262af 7760 }
d9ba4830 7761 store_reg(s, rd, tmp);
2c0262af 7762 break;
9ee6e8bb
PB
7763 case 6: case 7: /* 64-bit multiply, Divide. */
7764 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
7765 tmp = load_reg(s, rn);
7766 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7767 if ((op & 0x50) == 0x10) {
7768 /* sdiv, udiv */
7769 if (!arm_feature(env, ARM_FEATURE_DIV))
7770 goto illegal_op;
7771 if (op & 0x20)
5e3f878a 7772 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 7773 else
5e3f878a
PB
7774 gen_helper_sdiv(tmp, tmp, tmp2);
7775 dead_tmp(tmp2);
7776 store_reg(s, rd, tmp);
9ee6e8bb
PB
7777 } else if ((op & 0xe) == 0xc) {
7778 /* Dual multiply accumulate long. */
7779 if (op & 1)
5e3f878a
PB
7780 gen_swap_half(tmp2);
7781 gen_smul_dual(tmp, tmp2);
9ee6e8bb 7782 if (op & 0x10) {
5e3f878a 7783 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 7784 } else {
5e3f878a 7785 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 7786 }
5e3f878a 7787 dead_tmp(tmp2);
a7812ae4
PB
7788 /* BUGFIX */
7789 tmp64 = tcg_temp_new_i64();
7790 tcg_gen_ext_i32_i64(tmp64, tmp);
7791 dead_tmp(tmp);
7792 gen_addq(s, tmp64, rs, rd);
7793 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 7794 tcg_temp_free_i64(tmp64);
2c0262af 7795 } else {
9ee6e8bb
PB
7796 if (op & 0x20) {
7797 /* Unsigned 64-bit multiply */
a7812ae4 7798 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 7799 } else {
9ee6e8bb
PB
7800 if (op & 8) {
7801 /* smlalxy */
5e3f878a
PB
7802 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7803 dead_tmp(tmp2);
a7812ae4
PB
7804 tmp64 = tcg_temp_new_i64();
7805 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 7806 dead_tmp(tmp);
9ee6e8bb
PB
7807 } else {
7808 /* Signed 64-bit multiply */
a7812ae4 7809 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 7810 }
b5ff1b31 7811 }
9ee6e8bb
PB
7812 if (op & 4) {
7813 /* umaal */
a7812ae4
PB
7814 gen_addq_lo(s, tmp64, rs);
7815 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
7816 } else if (op & 0x40) {
7817 /* 64-bit accumulate. */
a7812ae4 7818 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 7819 }
a7812ae4 7820 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 7821 tcg_temp_free_i64(tmp64);
5fd46862 7822 }
2c0262af 7823 break;
9ee6e8bb
PB
7824 }
7825 break;
7826 case 6: case 7: case 14: case 15:
7827 /* Coprocessor. */
7828 if (((insn >> 24) & 3) == 3) {
7829 /* Translate into the equivalent ARM encoding. */
7830 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
7831 if (disas_neon_data_insn(env, s, insn))
7832 goto illegal_op;
7833 } else {
7834 if (insn & (1 << 28))
7835 goto illegal_op;
7836 if (disas_coproc_insn (env, s, insn))
7837 goto illegal_op;
7838 }
7839 break;
7840 case 8: case 9: case 10: case 11:
7841 if (insn & (1 << 15)) {
7842 /* Branches, misc control. */
7843 if (insn & 0x5000) {
7844 /* Unconditional branch. */
7845 /* signextend(hw1[10:0]) -> offset[:12]. */
7846 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7847 /* hw1[10:0] -> offset[11:1]. */
7848 offset |= (insn & 0x7ff) << 1;
7849 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7850 offset[24:22] already have the same value because of the
7851 sign extension above. */
7852 offset ^= ((~insn) & (1 << 13)) << 10;
7853 offset ^= ((~insn) & (1 << 11)) << 11;
7854
9ee6e8bb
PB
7855 if (insn & (1 << 14)) {
7856 /* Branch and link. */
3174f8e9 7857 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 7858 }
3b46e624 7859
b0109805 7860 offset += s->pc;
9ee6e8bb
PB
7861 if (insn & (1 << 12)) {
7862 /* b/bl */
b0109805 7863 gen_jmp(s, offset);
9ee6e8bb
PB
7864 } else {
7865 /* blx */
b0109805
PB
7866 offset &= ~(uint32_t)2;
7867 gen_bx_im(s, offset);
2c0262af 7868 }
9ee6e8bb
PB
7869 } else if (((insn >> 23) & 7) == 7) {
7870 /* Misc control */
7871 if (insn & (1 << 13))
7872 goto illegal_op;
7873
7874 if (insn & (1 << 26)) {
7875 /* Secure monitor call (v6Z) */
7876 goto illegal_op; /* not implemented. */
2c0262af 7877 } else {
9ee6e8bb
PB
7878 op = (insn >> 20) & 7;
7879 switch (op) {
7880 case 0: /* msr cpsr. */
7881 if (IS_M(env)) {
8984bd2e
PB
7882 tmp = load_reg(s, rn);
7883 addr = tcg_const_i32(insn & 0xff);
7884 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6
JR
7885 tcg_temp_free_i32(addr);
7886 dead_tmp(tmp);
9ee6e8bb
PB
7887 gen_lookup_tb(s);
7888 break;
7889 }
7890 /* fall through */
7891 case 1: /* msr spsr. */
7892 if (IS_M(env))
7893 goto illegal_op;
2fbac54b
FN
7894 tmp = load_reg(s, rn);
7895 if (gen_set_psr(s,
9ee6e8bb 7896 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 7897 op == 1, tmp))
9ee6e8bb
PB
7898 goto illegal_op;
7899 break;
7900 case 2: /* cps, nop-hint. */
7901 if (((insn >> 8) & 7) == 0) {
7902 gen_nop_hint(s, insn & 0xff);
7903 }
7904 /* Implemented as NOP in user mode. */
7905 if (IS_USER(s))
7906 break;
7907 offset = 0;
7908 imm = 0;
7909 if (insn & (1 << 10)) {
7910 if (insn & (1 << 7))
7911 offset |= CPSR_A;
7912 if (insn & (1 << 6))
7913 offset |= CPSR_I;
7914 if (insn & (1 << 5))
7915 offset |= CPSR_F;
7916 if (insn & (1 << 9))
7917 imm = CPSR_A | CPSR_I | CPSR_F;
7918 }
7919 if (insn & (1 << 8)) {
7920 offset |= 0x1f;
7921 imm |= (insn & 0x1f);
7922 }
7923 if (offset) {
2fbac54b 7924 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
7925 }
7926 break;
7927 case 3: /* Special control operations. */
426f5abc 7928 ARCH(7);
9ee6e8bb
PB
7929 op = (insn >> 4) & 0xf;
7930 switch (op) {
7931 case 2: /* clrex */
426f5abc 7932 gen_clrex(s);
9ee6e8bb
PB
7933 break;
7934 case 4: /* dsb */
7935 case 5: /* dmb */
7936 case 6: /* isb */
7937 /* These execute as NOPs. */
9ee6e8bb
PB
7938 break;
7939 default:
7940 goto illegal_op;
7941 }
7942 break;
7943 case 4: /* bxj */
7944 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
7945 tmp = load_reg(s, rn);
7946 gen_bx(s, tmp);
9ee6e8bb
PB
7947 break;
7948 case 5: /* Exception return. */
7949 /* Unpredictable in user mode. */
7950 goto illegal_op;
7951 case 6: /* mrs cpsr. */
8984bd2e 7952 tmp = new_tmp();
9ee6e8bb 7953 if (IS_M(env)) {
8984bd2e
PB
7954 addr = tcg_const_i32(insn & 0xff);
7955 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 7956 tcg_temp_free_i32(addr);
9ee6e8bb 7957 } else {
8984bd2e 7958 gen_helper_cpsr_read(tmp);
9ee6e8bb 7959 }
8984bd2e 7960 store_reg(s, rd, tmp);
9ee6e8bb
PB
7961 break;
7962 case 7: /* mrs spsr. */
7963 /* Not accessible in user mode. */
7964 if (IS_USER(s) || IS_M(env))
7965 goto illegal_op;
d9ba4830
PB
7966 tmp = load_cpu_field(spsr);
7967 store_reg(s, rd, tmp);
9ee6e8bb 7968 break;
2c0262af
FB
7969 }
7970 }
9ee6e8bb
PB
7971 } else {
7972 /* Conditional branch. */
7973 op = (insn >> 22) & 0xf;
7974 /* Generate a conditional jump to next instruction. */
7975 s->condlabel = gen_new_label();
d9ba4830 7976 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
7977 s->condjmp = 1;
7978
7979 /* offset[11:1] = insn[10:0] */
7980 offset = (insn & 0x7ff) << 1;
7981 /* offset[17:12] = insn[21:16]. */
7982 offset |= (insn & 0x003f0000) >> 4;
7983 /* offset[31:20] = insn[26]. */
7984 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
7985 /* offset[18] = insn[13]. */
7986 offset |= (insn & (1 << 13)) << 5;
7987 /* offset[19] = insn[11]. */
7988 offset |= (insn & (1 << 11)) << 8;
7989
7990 /* jump to the offset */
b0109805 7991 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
7992 }
7993 } else {
7994 /* Data processing immediate. */
7995 if (insn & (1 << 25)) {
7996 if (insn & (1 << 24)) {
7997 if (insn & (1 << 20))
7998 goto illegal_op;
7999 /* Bitfield/Saturate. */
8000 op = (insn >> 21) & 7;
8001 imm = insn & 0x1f;
8002 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4
PB
8003 if (rn == 15) {
8004 tmp = new_tmp();
8005 tcg_gen_movi_i32(tmp, 0);
8006 } else {
8007 tmp = load_reg(s, rn);
8008 }
9ee6e8bb
PB
8009 switch (op) {
8010 case 2: /* Signed bitfield extract. */
8011 imm++;
8012 if (shift + imm > 32)
8013 goto illegal_op;
8014 if (imm < 32)
6ddbc6e4 8015 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
8016 break;
8017 case 6: /* Unsigned bitfield extract. */
8018 imm++;
8019 if (shift + imm > 32)
8020 goto illegal_op;
8021 if (imm < 32)
6ddbc6e4 8022 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
8023 break;
8024 case 3: /* Bitfield insert/clear. */
8025 if (imm < shift)
8026 goto illegal_op;
8027 imm = imm + 1 - shift;
8028 if (imm != 32) {
6ddbc6e4 8029 tmp2 = load_reg(s, rd);
8f8e3aa4 8030 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
6ddbc6e4 8031 dead_tmp(tmp2);
9ee6e8bb
PB
8032 }
8033 break;
8034 case 7:
8035 goto illegal_op;
8036 default: /* Saturate. */
9ee6e8bb
PB
8037 if (shift) {
8038 if (op & 1)
6ddbc6e4 8039 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8040 else
6ddbc6e4 8041 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 8042 }
6ddbc6e4 8043 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
8044 if (op & 4) {
8045 /* Unsigned. */
9ee6e8bb 8046 if ((op & 1) && shift == 0)
6ddbc6e4 8047 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 8048 else
6ddbc6e4 8049 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 8050 } else {
9ee6e8bb 8051 /* Signed. */
9ee6e8bb 8052 if ((op & 1) && shift == 0)
6ddbc6e4 8053 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 8054 else
6ddbc6e4 8055 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 8056 }
b75263d6 8057 tcg_temp_free_i32(tmp2);
9ee6e8bb 8058 break;
2c0262af 8059 }
6ddbc6e4 8060 store_reg(s, rd, tmp);
9ee6e8bb
PB
8061 } else {
8062 imm = ((insn & 0x04000000) >> 15)
8063 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8064 if (insn & (1 << 22)) {
8065 /* 16-bit immediate. */
8066 imm |= (insn >> 4) & 0xf000;
8067 if (insn & (1 << 23)) {
8068 /* movt */
5e3f878a 8069 tmp = load_reg(s, rd);
86831435 8070 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8071 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 8072 } else {
9ee6e8bb 8073 /* movw */
5e3f878a
PB
8074 tmp = new_tmp();
8075 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
8076 }
8077 } else {
9ee6e8bb
PB
8078 /* Add/sub 12-bit immediate. */
8079 if (rn == 15) {
b0109805 8080 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 8081 if (insn & (1 << 23))
b0109805 8082 offset -= imm;
9ee6e8bb 8083 else
b0109805 8084 offset += imm;
5e3f878a
PB
8085 tmp = new_tmp();
8086 tcg_gen_movi_i32(tmp, offset);
2c0262af 8087 } else {
5e3f878a 8088 tmp = load_reg(s, rn);
9ee6e8bb 8089 if (insn & (1 << 23))
5e3f878a 8090 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 8091 else
5e3f878a 8092 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 8093 }
9ee6e8bb 8094 }
5e3f878a 8095 store_reg(s, rd, tmp);
191abaa2 8096 }
9ee6e8bb
PB
8097 } else {
8098 int shifter_out = 0;
8099 /* modified 12-bit immediate. */
8100 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8101 imm = (insn & 0xff);
8102 switch (shift) {
8103 case 0: /* XY */
8104 /* Nothing to do. */
8105 break;
8106 case 1: /* 00XY00XY */
8107 imm |= imm << 16;
8108 break;
8109 case 2: /* XY00XY00 */
8110 imm |= imm << 16;
8111 imm <<= 8;
8112 break;
8113 case 3: /* XYXYXYXY */
8114 imm |= imm << 16;
8115 imm |= imm << 8;
8116 break;
8117 default: /* Rotated constant. */
8118 shift = (shift << 1) | (imm >> 7);
8119 imm |= 0x80;
8120 imm = imm << (32 - shift);
8121 shifter_out = 1;
8122 break;
b5ff1b31 8123 }
3174f8e9
FN
8124 tmp2 = new_tmp();
8125 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 8126 rn = (insn >> 16) & 0xf;
3174f8e9
FN
8127 if (rn == 15) {
8128 tmp = new_tmp();
8129 tcg_gen_movi_i32(tmp, 0);
8130 } else {
8131 tmp = load_reg(s, rn);
8132 }
9ee6e8bb
PB
8133 op = (insn >> 21) & 0xf;
8134 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 8135 shifter_out, tmp, tmp2))
9ee6e8bb 8136 goto illegal_op;
3174f8e9 8137 dead_tmp(tmp2);
9ee6e8bb
PB
8138 rd = (insn >> 8) & 0xf;
8139 if (rd != 15) {
3174f8e9
FN
8140 store_reg(s, rd, tmp);
8141 } else {
8142 dead_tmp(tmp);
2c0262af 8143 }
2c0262af 8144 }
9ee6e8bb
PB
8145 }
8146 break;
8147 case 12: /* Load/store single data item. */
8148 {
8149 int postinc = 0;
8150 int writeback = 0;
b0109805 8151 int user;
9ee6e8bb
PB
8152 if ((insn & 0x01100000) == 0x01000000) {
8153 if (disas_neon_ls_insn(env, s, insn))
c1713132 8154 goto illegal_op;
9ee6e8bb
PB
8155 break;
8156 }
b0109805 8157 user = IS_USER(s);
9ee6e8bb 8158 if (rn == 15) {
b0109805 8159 addr = new_tmp();
9ee6e8bb
PB
8160 /* PC relative. */
8161 /* s->pc has already been incremented by 4. */
8162 imm = s->pc & 0xfffffffc;
8163 if (insn & (1 << 23))
8164 imm += insn & 0xfff;
8165 else
8166 imm -= insn & 0xfff;
b0109805 8167 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 8168 } else {
b0109805 8169 addr = load_reg(s, rn);
9ee6e8bb
PB
8170 if (insn & (1 << 23)) {
8171 /* Positive offset. */
8172 imm = insn & 0xfff;
b0109805 8173 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8174 } else {
8175 op = (insn >> 8) & 7;
8176 imm = insn & 0xff;
8177 switch (op) {
8178 case 0: case 8: /* Shifted Register. */
8179 shift = (insn >> 4) & 0xf;
8180 if (shift > 3)
18c9b560 8181 goto illegal_op;
b26eefb6 8182 tmp = load_reg(s, rm);
9ee6e8bb 8183 if (shift)
b26eefb6 8184 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 8185 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 8186 dead_tmp(tmp);
9ee6e8bb
PB
8187 break;
8188 case 4: /* Negative offset. */
b0109805 8189 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb
PB
8190 break;
8191 case 6: /* User privilege. */
b0109805
PB
8192 tcg_gen_addi_i32(addr, addr, imm);
8193 user = 1;
9ee6e8bb
PB
8194 break;
8195 case 1: /* Post-decrement. */
8196 imm = -imm;
8197 /* Fall through. */
8198 case 3: /* Post-increment. */
9ee6e8bb
PB
8199 postinc = 1;
8200 writeback = 1;
8201 break;
8202 case 5: /* Pre-decrement. */
8203 imm = -imm;
8204 /* Fall through. */
8205 case 7: /* Pre-increment. */
b0109805 8206 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8207 writeback = 1;
8208 break;
8209 default:
b7bcbe95 8210 goto illegal_op;
9ee6e8bb
PB
8211 }
8212 }
8213 }
8214 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8215 if (insn & (1 << 20)) {
8216 /* Load. */
8217 if (rs == 15 && op != 2) {
8218 if (op & 2)
b5ff1b31 8219 goto illegal_op;
9ee6e8bb
PB
8220 /* Memory hint. Implemented as NOP. */
8221 } else {
8222 switch (op) {
b0109805
PB
8223 case 0: tmp = gen_ld8u(addr, user); break;
8224 case 4: tmp = gen_ld8s(addr, user); break;
8225 case 1: tmp = gen_ld16u(addr, user); break;
8226 case 5: tmp = gen_ld16s(addr, user); break;
8227 case 2: tmp = gen_ld32(addr, user); break;
9ee6e8bb
PB
8228 default: goto illegal_op;
8229 }
8230 if (rs == 15) {
b0109805 8231 gen_bx(s, tmp);
9ee6e8bb 8232 } else {
b0109805 8233 store_reg(s, rs, tmp);
9ee6e8bb
PB
8234 }
8235 }
8236 } else {
8237 /* Store. */
8238 if (rs == 15)
b7bcbe95 8239 goto illegal_op;
b0109805 8240 tmp = load_reg(s, rs);
9ee6e8bb 8241 switch (op) {
b0109805
PB
8242 case 0: gen_st8(tmp, addr, user); break;
8243 case 1: gen_st16(tmp, addr, user); break;
8244 case 2: gen_st32(tmp, addr, user); break;
9ee6e8bb 8245 default: goto illegal_op;
b7bcbe95 8246 }
2c0262af 8247 }
9ee6e8bb 8248 if (postinc)
b0109805
PB
8249 tcg_gen_addi_i32(addr, addr, imm);
8250 if (writeback) {
8251 store_reg(s, rn, addr);
8252 } else {
8253 dead_tmp(addr);
8254 }
9ee6e8bb
PB
8255 }
8256 break;
8257 default:
8258 goto illegal_op;
2c0262af 8259 }
9ee6e8bb
PB
8260 return 0;
8261illegal_op:
8262 return 1;
2c0262af
FB
8263}
8264
9ee6e8bb 8265static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
8266{
8267 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8268 int32_t offset;
8269 int i;
b26eefb6 8270 TCGv tmp;
d9ba4830 8271 TCGv tmp2;
b0109805 8272 TCGv addr;
99c475ab 8273
9ee6e8bb
PB
8274 if (s->condexec_mask) {
8275 cond = s->condexec_cond;
8276 s->condlabel = gen_new_label();
d9ba4830 8277 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
8278 s->condjmp = 1;
8279 }
8280
b5ff1b31 8281 insn = lduw_code(s->pc);
99c475ab 8282 s->pc += 2;
b5ff1b31 8283
99c475ab
FB
8284 switch (insn >> 12) {
8285 case 0: case 1:
396e467c 8286
99c475ab
FB
8287 rd = insn & 7;
8288 op = (insn >> 11) & 3;
8289 if (op == 3) {
8290 /* add/subtract */
8291 rn = (insn >> 3) & 7;
396e467c 8292 tmp = load_reg(s, rn);
99c475ab
FB
8293 if (insn & (1 << 10)) {
8294 /* immediate */
396e467c
FN
8295 tmp2 = new_tmp();
8296 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
8297 } else {
8298 /* reg */
8299 rm = (insn >> 6) & 7;
396e467c 8300 tmp2 = load_reg(s, rm);
99c475ab 8301 }
9ee6e8bb
PB
8302 if (insn & (1 << 9)) {
8303 if (s->condexec_mask)
396e467c 8304 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8305 else
396e467c 8306 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb
PB
8307 } else {
8308 if (s->condexec_mask)
396e467c 8309 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 8310 else
396e467c 8311 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 8312 }
396e467c
FN
8313 dead_tmp(tmp2);
8314 store_reg(s, rd, tmp);
99c475ab
FB
8315 } else {
8316 /* shift immediate */
8317 rm = (insn >> 3) & 7;
8318 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
8319 tmp = load_reg(s, rm);
8320 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8321 if (!s->condexec_mask)
8322 gen_logic_CC(tmp);
8323 store_reg(s, rd, tmp);
99c475ab
FB
8324 }
8325 break;
8326 case 2: case 3:
8327 /* arithmetic large immediate */
8328 op = (insn >> 11) & 3;
8329 rd = (insn >> 8) & 0x7;
396e467c
FN
8330 if (op == 0) { /* mov */
8331 tmp = new_tmp();
8332 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 8333 if (!s->condexec_mask)
396e467c
FN
8334 gen_logic_CC(tmp);
8335 store_reg(s, rd, tmp);
8336 } else {
8337 tmp = load_reg(s, rd);
8338 tmp2 = new_tmp();
8339 tcg_gen_movi_i32(tmp2, insn & 0xff);
8340 switch (op) {
8341 case 1: /* cmp */
8342 gen_helper_sub_cc(tmp, tmp, tmp2);
8343 dead_tmp(tmp);
8344 dead_tmp(tmp2);
8345 break;
8346 case 2: /* add */
8347 if (s->condexec_mask)
8348 tcg_gen_add_i32(tmp, tmp, tmp2);
8349 else
8350 gen_helper_add_cc(tmp, tmp, tmp2);
8351 dead_tmp(tmp2);
8352 store_reg(s, rd, tmp);
8353 break;
8354 case 3: /* sub */
8355 if (s->condexec_mask)
8356 tcg_gen_sub_i32(tmp, tmp, tmp2);
8357 else
8358 gen_helper_sub_cc(tmp, tmp, tmp2);
8359 dead_tmp(tmp2);
8360 store_reg(s, rd, tmp);
8361 break;
8362 }
99c475ab 8363 }
99c475ab
FB
8364 break;
8365 case 4:
8366 if (insn & (1 << 11)) {
8367 rd = (insn >> 8) & 7;
5899f386
FB
8368 /* load pc-relative. Bit 1 of PC is ignored. */
8369 val = s->pc + 2 + ((insn & 0xff) * 4);
8370 val &= ~(uint32_t)2;
b0109805
PB
8371 addr = new_tmp();
8372 tcg_gen_movi_i32(addr, val);
8373 tmp = gen_ld32(addr, IS_USER(s));
8374 dead_tmp(addr);
8375 store_reg(s, rd, tmp);
99c475ab
FB
8376 break;
8377 }
8378 if (insn & (1 << 10)) {
8379 /* data processing extended or blx */
8380 rd = (insn & 7) | ((insn >> 4) & 8);
8381 rm = (insn >> 3) & 0xf;
8382 op = (insn >> 8) & 3;
8383 switch (op) {
8384 case 0: /* add */
396e467c
FN
8385 tmp = load_reg(s, rd);
8386 tmp2 = load_reg(s, rm);
8387 tcg_gen_add_i32(tmp, tmp, tmp2);
8388 dead_tmp(tmp2);
8389 store_reg(s, rd, tmp);
99c475ab
FB
8390 break;
8391 case 1: /* cmp */
396e467c
FN
8392 tmp = load_reg(s, rd);
8393 tmp2 = load_reg(s, rm);
8394 gen_helper_sub_cc(tmp, tmp, tmp2);
8395 dead_tmp(tmp2);
8396 dead_tmp(tmp);
99c475ab
FB
8397 break;
8398 case 2: /* mov/cpy */
396e467c
FN
8399 tmp = load_reg(s, rm);
8400 store_reg(s, rd, tmp);
99c475ab
FB
8401 break;
8402 case 3:/* branch [and link] exchange thumb register */
b0109805 8403 tmp = load_reg(s, rm);
99c475ab
FB
8404 if (insn & (1 << 7)) {
8405 val = (uint32_t)s->pc | 1;
b0109805
PB
8406 tmp2 = new_tmp();
8407 tcg_gen_movi_i32(tmp2, val);
8408 store_reg(s, 14, tmp2);
99c475ab 8409 }
d9ba4830 8410 gen_bx(s, tmp);
99c475ab
FB
8411 break;
8412 }
8413 break;
8414 }
8415
8416 /* data processing register */
8417 rd = insn & 7;
8418 rm = (insn >> 3) & 7;
8419 op = (insn >> 6) & 0xf;
8420 if (op == 2 || op == 3 || op == 4 || op == 7) {
8421 /* the shift/rotate ops want the operands backwards */
8422 val = rm;
8423 rm = rd;
8424 rd = val;
8425 val = 1;
8426 } else {
8427 val = 0;
8428 }
8429
396e467c
FN
8430 if (op == 9) { /* neg */
8431 tmp = new_tmp();
8432 tcg_gen_movi_i32(tmp, 0);
8433 } else if (op != 0xf) { /* mvn doesn't read its first operand */
8434 tmp = load_reg(s, rd);
8435 } else {
8436 TCGV_UNUSED(tmp);
8437 }
99c475ab 8438
396e467c 8439 tmp2 = load_reg(s, rm);
5899f386 8440 switch (op) {
99c475ab 8441 case 0x0: /* and */
396e467c 8442 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 8443 if (!s->condexec_mask)
396e467c 8444 gen_logic_CC(tmp);
99c475ab
FB
8445 break;
8446 case 0x1: /* eor */
396e467c 8447 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 8448 if (!s->condexec_mask)
396e467c 8449 gen_logic_CC(tmp);
99c475ab
FB
8450 break;
8451 case 0x2: /* lsl */
9ee6e8bb 8452 if (s->condexec_mask) {
396e467c 8453 gen_helper_shl(tmp2, tmp2, tmp);
9ee6e8bb 8454 } else {
396e467c
FN
8455 gen_helper_shl_cc(tmp2, tmp2, tmp);
8456 gen_logic_CC(tmp2);
9ee6e8bb 8457 }
99c475ab
FB
8458 break;
8459 case 0x3: /* lsr */
9ee6e8bb 8460 if (s->condexec_mask) {
396e467c 8461 gen_helper_shr(tmp2, tmp2, tmp);
9ee6e8bb 8462 } else {
396e467c
FN
8463 gen_helper_shr_cc(tmp2, tmp2, tmp);
8464 gen_logic_CC(tmp2);
9ee6e8bb 8465 }
99c475ab
FB
8466 break;
8467 case 0x4: /* asr */
9ee6e8bb 8468 if (s->condexec_mask) {
396e467c 8469 gen_helper_sar(tmp2, tmp2, tmp);
9ee6e8bb 8470 } else {
396e467c
FN
8471 gen_helper_sar_cc(tmp2, tmp2, tmp);
8472 gen_logic_CC(tmp2);
9ee6e8bb 8473 }
99c475ab
FB
8474 break;
8475 case 0x5: /* adc */
9ee6e8bb 8476 if (s->condexec_mask)
396e467c 8477 gen_adc(tmp, tmp2);
9ee6e8bb 8478 else
396e467c 8479 gen_helper_adc_cc(tmp, tmp, tmp2);
99c475ab
FB
8480 break;
8481 case 0x6: /* sbc */
9ee6e8bb 8482 if (s->condexec_mask)
396e467c 8483 gen_sub_carry(tmp, tmp, tmp2);
9ee6e8bb 8484 else
396e467c 8485 gen_helper_sbc_cc(tmp, tmp, tmp2);
99c475ab
FB
8486 break;
8487 case 0x7: /* ror */
9ee6e8bb 8488 if (s->condexec_mask) {
f669df27
AJ
8489 tcg_gen_andi_i32(tmp, tmp, 0x1f);
8490 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 8491 } else {
396e467c
FN
8492 gen_helper_ror_cc(tmp2, tmp2, tmp);
8493 gen_logic_CC(tmp2);
9ee6e8bb 8494 }
99c475ab
FB
8495 break;
8496 case 0x8: /* tst */
396e467c
FN
8497 tcg_gen_and_i32(tmp, tmp, tmp2);
8498 gen_logic_CC(tmp);
99c475ab 8499 rd = 16;
5899f386 8500 break;
99c475ab 8501 case 0x9: /* neg */
9ee6e8bb 8502 if (s->condexec_mask)
396e467c 8503 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 8504 else
396e467c 8505 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
8506 break;
8507 case 0xa: /* cmp */
396e467c 8508 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
8509 rd = 16;
8510 break;
8511 case 0xb: /* cmn */
396e467c 8512 gen_helper_add_cc(tmp, tmp, tmp2);
99c475ab
FB
8513 rd = 16;
8514 break;
8515 case 0xc: /* orr */
396e467c 8516 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 8517 if (!s->condexec_mask)
396e467c 8518 gen_logic_CC(tmp);
99c475ab
FB
8519 break;
8520 case 0xd: /* mul */
7b2919a0 8521 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 8522 if (!s->condexec_mask)
396e467c 8523 gen_logic_CC(tmp);
99c475ab
FB
8524 break;
8525 case 0xe: /* bic */
f669df27 8526 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 8527 if (!s->condexec_mask)
396e467c 8528 gen_logic_CC(tmp);
99c475ab
FB
8529 break;
8530 case 0xf: /* mvn */
396e467c 8531 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 8532 if (!s->condexec_mask)
396e467c 8533 gen_logic_CC(tmp2);
99c475ab 8534 val = 1;
5899f386 8535 rm = rd;
99c475ab
FB
8536 break;
8537 }
8538 if (rd != 16) {
396e467c
FN
8539 if (val) {
8540 store_reg(s, rm, tmp2);
8541 if (op != 0xf)
8542 dead_tmp(tmp);
8543 } else {
8544 store_reg(s, rd, tmp);
8545 dead_tmp(tmp2);
8546 }
8547 } else {
8548 dead_tmp(tmp);
8549 dead_tmp(tmp2);
99c475ab
FB
8550 }
8551 break;
8552
8553 case 5:
8554 /* load/store register offset. */
8555 rd = insn & 7;
8556 rn = (insn >> 3) & 7;
8557 rm = (insn >> 6) & 7;
8558 op = (insn >> 9) & 7;
b0109805 8559 addr = load_reg(s, rn);
b26eefb6 8560 tmp = load_reg(s, rm);
b0109805 8561 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 8562 dead_tmp(tmp);
99c475ab
FB
8563
8564 if (op < 3) /* store */
b0109805 8565 tmp = load_reg(s, rd);
99c475ab
FB
8566
8567 switch (op) {
8568 case 0: /* str */
b0109805 8569 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
8570 break;
8571 case 1: /* strh */
b0109805 8572 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
8573 break;
8574 case 2: /* strb */
b0109805 8575 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
8576 break;
8577 case 3: /* ldrsb */
b0109805 8578 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
8579 break;
8580 case 4: /* ldr */
b0109805 8581 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8582 break;
8583 case 5: /* ldrh */
b0109805 8584 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
8585 break;
8586 case 6: /* ldrb */
b0109805 8587 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
8588 break;
8589 case 7: /* ldrsh */
b0109805 8590 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
8591 break;
8592 }
8593 if (op >= 3) /* load */
b0109805
PB
8594 store_reg(s, rd, tmp);
8595 dead_tmp(addr);
99c475ab
FB
8596 break;
8597
8598 case 6:
8599 /* load/store word immediate offset */
8600 rd = insn & 7;
8601 rn = (insn >> 3) & 7;
b0109805 8602 addr = load_reg(s, rn);
99c475ab 8603 val = (insn >> 4) & 0x7c;
b0109805 8604 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8605
8606 if (insn & (1 << 11)) {
8607 /* load */
b0109805
PB
8608 tmp = gen_ld32(addr, IS_USER(s));
8609 store_reg(s, rd, tmp);
99c475ab
FB
8610 } else {
8611 /* store */
b0109805
PB
8612 tmp = load_reg(s, rd);
8613 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8614 }
b0109805 8615 dead_tmp(addr);
99c475ab
FB
8616 break;
8617
8618 case 7:
8619 /* load/store byte immediate offset */
8620 rd = insn & 7;
8621 rn = (insn >> 3) & 7;
b0109805 8622 addr = load_reg(s, rn);
99c475ab 8623 val = (insn >> 6) & 0x1f;
b0109805 8624 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8625
8626 if (insn & (1 << 11)) {
8627 /* load */
b0109805
PB
8628 tmp = gen_ld8u(addr, IS_USER(s));
8629 store_reg(s, rd, tmp);
99c475ab
FB
8630 } else {
8631 /* store */
b0109805
PB
8632 tmp = load_reg(s, rd);
8633 gen_st8(tmp, addr, IS_USER(s));
99c475ab 8634 }
b0109805 8635 dead_tmp(addr);
99c475ab
FB
8636 break;
8637
8638 case 8:
8639 /* load/store halfword immediate offset */
8640 rd = insn & 7;
8641 rn = (insn >> 3) & 7;
b0109805 8642 addr = load_reg(s, rn);
99c475ab 8643 val = (insn >> 5) & 0x3e;
b0109805 8644 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8645
8646 if (insn & (1 << 11)) {
8647 /* load */
b0109805
PB
8648 tmp = gen_ld16u(addr, IS_USER(s));
8649 store_reg(s, rd, tmp);
99c475ab
FB
8650 } else {
8651 /* store */
b0109805
PB
8652 tmp = load_reg(s, rd);
8653 gen_st16(tmp, addr, IS_USER(s));
99c475ab 8654 }
b0109805 8655 dead_tmp(addr);
99c475ab
FB
8656 break;
8657
8658 case 9:
8659 /* load/store from stack */
8660 rd = (insn >> 8) & 7;
b0109805 8661 addr = load_reg(s, 13);
99c475ab 8662 val = (insn & 0xff) * 4;
b0109805 8663 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8664
8665 if (insn & (1 << 11)) {
8666 /* load */
b0109805
PB
8667 tmp = gen_ld32(addr, IS_USER(s));
8668 store_reg(s, rd, tmp);
99c475ab
FB
8669 } else {
8670 /* store */
b0109805
PB
8671 tmp = load_reg(s, rd);
8672 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8673 }
b0109805 8674 dead_tmp(addr);
99c475ab
FB
8675 break;
8676
8677 case 10:
8678 /* add to high reg */
8679 rd = (insn >> 8) & 7;
5899f386
FB
8680 if (insn & (1 << 11)) {
8681 /* SP */
5e3f878a 8682 tmp = load_reg(s, 13);
5899f386
FB
8683 } else {
8684 /* PC. bit 1 is ignored. */
5e3f878a
PB
8685 tmp = new_tmp();
8686 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 8687 }
99c475ab 8688 val = (insn & 0xff) * 4;
5e3f878a
PB
8689 tcg_gen_addi_i32(tmp, tmp, val);
8690 store_reg(s, rd, tmp);
99c475ab
FB
8691 break;
8692
8693 case 11:
8694 /* misc */
8695 op = (insn >> 8) & 0xf;
8696 switch (op) {
8697 case 0:
8698 /* adjust stack pointer */
b26eefb6 8699 tmp = load_reg(s, 13);
99c475ab
FB
8700 val = (insn & 0x7f) * 4;
8701 if (insn & (1 << 7))
6a0d8a1d 8702 val = -(int32_t)val;
b26eefb6
PB
8703 tcg_gen_addi_i32(tmp, tmp, val);
8704 store_reg(s, 13, tmp);
99c475ab
FB
8705 break;
8706
9ee6e8bb
PB
8707 case 2: /* sign/zero extend. */
8708 ARCH(6);
8709 rd = insn & 7;
8710 rm = (insn >> 3) & 7;
b0109805 8711 tmp = load_reg(s, rm);
9ee6e8bb 8712 switch ((insn >> 6) & 3) {
b0109805
PB
8713 case 0: gen_sxth(tmp); break;
8714 case 1: gen_sxtb(tmp); break;
8715 case 2: gen_uxth(tmp); break;
8716 case 3: gen_uxtb(tmp); break;
9ee6e8bb 8717 }
b0109805 8718 store_reg(s, rd, tmp);
9ee6e8bb 8719 break;
99c475ab
FB
8720 case 4: case 5: case 0xc: case 0xd:
8721 /* push/pop */
b0109805 8722 addr = load_reg(s, 13);
5899f386
FB
8723 if (insn & (1 << 8))
8724 offset = 4;
99c475ab 8725 else
5899f386
FB
8726 offset = 0;
8727 for (i = 0; i < 8; i++) {
8728 if (insn & (1 << i))
8729 offset += 4;
8730 }
8731 if ((insn & (1 << 11)) == 0) {
b0109805 8732 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8733 }
99c475ab
FB
8734 for (i = 0; i < 8; i++) {
8735 if (insn & (1 << i)) {
8736 if (insn & (1 << 11)) {
8737 /* pop */
b0109805
PB
8738 tmp = gen_ld32(addr, IS_USER(s));
8739 store_reg(s, i, tmp);
99c475ab
FB
8740 } else {
8741 /* push */
b0109805
PB
8742 tmp = load_reg(s, i);
8743 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8744 }
5899f386 8745 /* advance to the next address. */
b0109805 8746 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8747 }
8748 }
a50f5b91 8749 TCGV_UNUSED(tmp);
99c475ab
FB
8750 if (insn & (1 << 8)) {
8751 if (insn & (1 << 11)) {
8752 /* pop pc */
b0109805 8753 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8754 /* don't set the pc until the rest of the instruction
8755 has completed */
8756 } else {
8757 /* push lr */
b0109805
PB
8758 tmp = load_reg(s, 14);
8759 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8760 }
b0109805 8761 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 8762 }
5899f386 8763 if ((insn & (1 << 11)) == 0) {
b0109805 8764 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8765 }
99c475ab 8766 /* write back the new stack pointer */
b0109805 8767 store_reg(s, 13, addr);
99c475ab
FB
8768 /* set the new PC value */
8769 if ((insn & 0x0900) == 0x0900)
b0109805 8770 gen_bx(s, tmp);
99c475ab
FB
8771 break;
8772
9ee6e8bb
PB
8773 case 1: case 3: case 9: case 11: /* czb */
8774 rm = insn & 7;
d9ba4830 8775 tmp = load_reg(s, rm);
9ee6e8bb
PB
8776 s->condlabel = gen_new_label();
8777 s->condjmp = 1;
8778 if (insn & (1 << 11))
cb63669a 8779 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 8780 else
cb63669a 8781 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
d9ba4830 8782 dead_tmp(tmp);
9ee6e8bb
PB
8783 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8784 val = (uint32_t)s->pc + 2;
8785 val += offset;
8786 gen_jmp(s, val);
8787 break;
8788
8789 case 15: /* IT, nop-hint. */
8790 if ((insn & 0xf) == 0) {
8791 gen_nop_hint(s, (insn >> 4) & 0xf);
8792 break;
8793 }
8794 /* If Then. */
8795 s->condexec_cond = (insn >> 4) & 0xe;
8796 s->condexec_mask = insn & 0x1f;
8797 /* No actual code generated for this insn, just setup state. */
8798 break;
8799
06c949e6 8800 case 0xe: /* bkpt */
9ee6e8bb 8801 gen_set_condexec(s);
5e3f878a 8802 gen_set_pc_im(s->pc - 2);
d9ba4830 8803 gen_exception(EXCP_BKPT);
06c949e6
PB
8804 s->is_jmp = DISAS_JUMP;
8805 break;
8806
9ee6e8bb
PB
8807 case 0xa: /* rev */
8808 ARCH(6);
8809 rn = (insn >> 3) & 0x7;
8810 rd = insn & 0x7;
b0109805 8811 tmp = load_reg(s, rn);
9ee6e8bb 8812 switch ((insn >> 6) & 3) {
66896cb8 8813 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
8814 case 1: gen_rev16(tmp); break;
8815 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
8816 default: goto illegal_op;
8817 }
b0109805 8818 store_reg(s, rd, tmp);
9ee6e8bb
PB
8819 break;
8820
8821 case 6: /* cps */
8822 ARCH(6);
8823 if (IS_USER(s))
8824 break;
8825 if (IS_M(env)) {
8984bd2e 8826 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9ee6e8bb 8827 /* PRIMASK */
8984bd2e
PB
8828 if (insn & 1) {
8829 addr = tcg_const_i32(16);
8830 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8831 tcg_temp_free_i32(addr);
8984bd2e 8832 }
9ee6e8bb 8833 /* FAULTMASK */
8984bd2e
PB
8834 if (insn & 2) {
8835 addr = tcg_const_i32(17);
8836 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8837 tcg_temp_free_i32(addr);
8984bd2e 8838 }
b75263d6 8839 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8840 gen_lookup_tb(s);
8841 } else {
8842 if (insn & (1 << 4))
8843 shift = CPSR_A | CPSR_I | CPSR_F;
8844 else
8845 shift = 0;
2fbac54b 8846 gen_set_psr_im(s, shift, 0, ((insn & 7) << 6) & shift);
9ee6e8bb
PB
8847 }
8848 break;
8849
99c475ab
FB
8850 default:
8851 goto undef;
8852 }
8853 break;
8854
8855 case 12:
8856 /* load/store multiple */
8857 rn = (insn >> 8) & 0x7;
b0109805 8858 addr = load_reg(s, rn);
99c475ab
FB
8859 for (i = 0; i < 8; i++) {
8860 if (insn & (1 << i)) {
99c475ab
FB
8861 if (insn & (1 << 11)) {
8862 /* load */
b0109805
PB
8863 tmp = gen_ld32(addr, IS_USER(s));
8864 store_reg(s, i, tmp);
99c475ab
FB
8865 } else {
8866 /* store */
b0109805
PB
8867 tmp = load_reg(s, i);
8868 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8869 }
5899f386 8870 /* advance to the next address */
b0109805 8871 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8872 }
8873 }
5899f386 8874 /* Base register writeback. */
b0109805
PB
8875 if ((insn & (1 << rn)) == 0) {
8876 store_reg(s, rn, addr);
8877 } else {
8878 dead_tmp(addr);
8879 }
99c475ab
FB
8880 break;
8881
8882 case 13:
8883 /* conditional branch or swi */
8884 cond = (insn >> 8) & 0xf;
8885 if (cond == 0xe)
8886 goto undef;
8887
8888 if (cond == 0xf) {
8889 /* swi */
9ee6e8bb 8890 gen_set_condexec(s);
422ebf69 8891 gen_set_pc_im(s->pc);
9ee6e8bb 8892 s->is_jmp = DISAS_SWI;
99c475ab
FB
8893 break;
8894 }
8895 /* generate a conditional jump to next instruction */
e50e6a20 8896 s->condlabel = gen_new_label();
d9ba4830 8897 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 8898 s->condjmp = 1;
99c475ab
FB
8899
8900 /* jump to the offset */
5899f386 8901 val = (uint32_t)s->pc + 2;
99c475ab 8902 offset = ((int32_t)insn << 24) >> 24;
5899f386 8903 val += offset << 1;
8aaca4c0 8904 gen_jmp(s, val);
99c475ab
FB
8905 break;
8906
8907 case 14:
358bf29e 8908 if (insn & (1 << 11)) {
9ee6e8bb
PB
8909 if (disas_thumb2_insn(env, s, insn))
8910 goto undef32;
358bf29e
PB
8911 break;
8912 }
9ee6e8bb 8913 /* unconditional branch */
99c475ab
FB
8914 val = (uint32_t)s->pc;
8915 offset = ((int32_t)insn << 21) >> 21;
8916 val += (offset << 1) + 2;
8aaca4c0 8917 gen_jmp(s, val);
99c475ab
FB
8918 break;
8919
8920 case 15:
9ee6e8bb 8921 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 8922 goto undef32;
9ee6e8bb 8923 break;
99c475ab
FB
8924 }
8925 return;
9ee6e8bb
PB
8926undef32:
8927 gen_set_condexec(s);
5e3f878a 8928 gen_set_pc_im(s->pc - 4);
d9ba4830 8929 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
8930 s->is_jmp = DISAS_JUMP;
8931 return;
8932illegal_op:
99c475ab 8933undef:
9ee6e8bb 8934 gen_set_condexec(s);
5e3f878a 8935 gen_set_pc_im(s->pc - 2);
d9ba4830 8936 gen_exception(EXCP_UDEF);
99c475ab
FB
8937 s->is_jmp = DISAS_JUMP;
8938}
8939
2c0262af
FB
8940/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8941 basic block 'tb'. If search_pc is TRUE, also generate PC
8942 information for each intermediate instruction. */
2cfc5f17
TS
8943static inline void gen_intermediate_code_internal(CPUState *env,
8944 TranslationBlock *tb,
8945 int search_pc)
2c0262af
FB
8946{
8947 DisasContext dc1, *dc = &dc1;
a1d1bb31 8948 CPUBreakpoint *bp;
2c0262af
FB
8949 uint16_t *gen_opc_end;
8950 int j, lj;
0fa85d43 8951 target_ulong pc_start;
b5ff1b31 8952 uint32_t next_page_start;
2e70f6ef
PB
8953 int num_insns;
8954 int max_insns;
3b46e624 8955
2c0262af 8956 /* generate intermediate code */
b26eefb6 8957 num_temps = 0;
b26eefb6 8958
0fa85d43 8959 pc_start = tb->pc;
3b46e624 8960
2c0262af
FB
8961 dc->tb = tb;
8962
2c0262af 8963 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
8964
8965 dc->is_jmp = DISAS_NEXT;
8966 dc->pc = pc_start;
8aaca4c0 8967 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 8968 dc->condjmp = 0;
5899f386 8969 dc->thumb = env->thumb;
9ee6e8bb
PB
8970 dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
8971 dc->condexec_cond = env->condexec_bits >> 4;
b5ff1b31 8972#if !defined(CONFIG_USER_ONLY)
9ee6e8bb
PB
8973 if (IS_M(env)) {
8974 dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
8975 } else {
8976 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
8977 }
b5ff1b31 8978#endif
a7812ae4
PB
8979 cpu_F0s = tcg_temp_new_i32();
8980 cpu_F1s = tcg_temp_new_i32();
8981 cpu_F0d = tcg_temp_new_i64();
8982 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
8983 cpu_V0 = cpu_F0d;
8984 cpu_V1 = cpu_F1d;
e677137d 8985 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 8986 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 8987 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 8988 lj = -1;
2e70f6ef
PB
8989 num_insns = 0;
8990 max_insns = tb->cflags & CF_COUNT_MASK;
8991 if (max_insns == 0)
8992 max_insns = CF_COUNT_MASK;
8993
8994 gen_icount_start();
9ee6e8bb
PB
8995 /* Reset the conditional execution bits immediately. This avoids
8996 complications trying to do it at the end of the block. */
8997 if (env->condexec_bits)
8f01245e
PB
8998 {
8999 TCGv tmp = new_tmp();
9000 tcg_gen_movi_i32(tmp, 0);
d9ba4830 9001 store_cpu_field(tmp, condexec_bits);
8f01245e 9002 }
2c0262af 9003 do {
fbb4a2e3
PB
9004#ifdef CONFIG_USER_ONLY
9005 /* Intercept jump to the magic kernel page. */
9006 if (dc->pc >= 0xffff0000) {
9007 /* We always get here via a jump, so know we are not in a
9008 conditional execution block. */
9009 gen_exception(EXCP_KERNEL_TRAP);
9010 dc->is_jmp = DISAS_UPDATE;
9011 break;
9012 }
9013#else
9ee6e8bb
PB
9014 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9015 /* We always get here via a jump, so know we are not in a
9016 conditional execution block. */
d9ba4830 9017 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
9018 dc->is_jmp = DISAS_UPDATE;
9019 break;
9ee6e8bb
PB
9020 }
9021#endif
9022
72cf2d4f
BS
9023 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9024 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 9025 if (bp->pc == dc->pc) {
9ee6e8bb 9026 gen_set_condexec(dc);
5e3f878a 9027 gen_set_pc_im(dc->pc);
d9ba4830 9028 gen_exception(EXCP_DEBUG);
1fddef4b 9029 dc->is_jmp = DISAS_JUMP;
9ee6e8bb
PB
9030 /* Advance PC so that clearing the breakpoint will
9031 invalidate this TB. */
9032 dc->pc += 2;
9033 goto done_generating;
1fddef4b
FB
9034 break;
9035 }
9036 }
9037 }
2c0262af
FB
9038 if (search_pc) {
9039 j = gen_opc_ptr - gen_opc_buf;
9040 if (lj < j) {
9041 lj++;
9042 while (lj < j)
9043 gen_opc_instr_start[lj++] = 0;
9044 }
0fa85d43 9045 gen_opc_pc[lj] = dc->pc;
2c0262af 9046 gen_opc_instr_start[lj] = 1;
2e70f6ef 9047 gen_opc_icount[lj] = num_insns;
2c0262af 9048 }
e50e6a20 9049
2e70f6ef
PB
9050 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9051 gen_io_start();
9052
9ee6e8bb
PB
9053 if (env->thumb) {
9054 disas_thumb_insn(env, dc);
9055 if (dc->condexec_mask) {
9056 dc->condexec_cond = (dc->condexec_cond & 0xe)
9057 | ((dc->condexec_mask >> 4) & 1);
9058 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9059 if (dc->condexec_mask == 0) {
9060 dc->condexec_cond = 0;
9061 }
9062 }
9063 } else {
9064 disas_arm_insn(env, dc);
9065 }
b26eefb6
PB
9066 if (num_temps) {
9067 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
9068 num_temps = 0;
9069 }
e50e6a20
FB
9070
9071 if (dc->condjmp && !dc->is_jmp) {
9072 gen_set_label(dc->condlabel);
9073 dc->condjmp = 0;
9074 }
aaf2d97d 9075 /* Translation stops when a conditional branch is encountered.
e50e6a20 9076 * Otherwise the subsequent code could get translated several times.
b5ff1b31 9077 * Also stop translation when a page boundary is reached. This
bf20dc07 9078 * ensures prefetch aborts occur at the right place. */
2e70f6ef 9079 num_insns ++;
1fddef4b
FB
9080 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9081 !env->singlestep_enabled &&
1b530a6d 9082 !singlestep &&
2e70f6ef
PB
9083 dc->pc < next_page_start &&
9084 num_insns < max_insns);
9085
9086 if (tb->cflags & CF_LAST_IO) {
9087 if (dc->condjmp) {
9088 /* FIXME: This can theoretically happen with self-modifying
9089 code. */
9090 cpu_abort(env, "IO on conditional branch instruction");
9091 }
9092 gen_io_end();
9093 }
9ee6e8bb 9094
b5ff1b31 9095 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
9096 instruction was a conditional branch or trap, and the PC has
9097 already been written. */
551bd27f 9098 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 9099 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 9100 if (dc->condjmp) {
9ee6e8bb
PB
9101 gen_set_condexec(dc);
9102 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 9103 gen_exception(EXCP_SWI);
9ee6e8bb 9104 } else {
d9ba4830 9105 gen_exception(EXCP_DEBUG);
9ee6e8bb 9106 }
e50e6a20
FB
9107 gen_set_label(dc->condlabel);
9108 }
9109 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 9110 gen_set_pc_im(dc->pc);
e50e6a20 9111 dc->condjmp = 0;
8aaca4c0 9112 }
9ee6e8bb
PB
9113 gen_set_condexec(dc);
9114 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 9115 gen_exception(EXCP_SWI);
9ee6e8bb
PB
9116 } else {
9117 /* FIXME: Single stepping a WFI insn will not halt
9118 the CPU. */
d9ba4830 9119 gen_exception(EXCP_DEBUG);
9ee6e8bb 9120 }
8aaca4c0 9121 } else {
9ee6e8bb
PB
9122 /* While branches must always occur at the end of an IT block,
9123 there are a few other things that can cause us to terminate
9124 the TB in the middel of an IT block:
9125 - Exception generating instructions (bkpt, swi, undefined).
9126 - Page boundaries.
9127 - Hardware watchpoints.
9128 Hardware breakpoints have already been handled and skip this code.
9129 */
9130 gen_set_condexec(dc);
8aaca4c0 9131 switch(dc->is_jmp) {
8aaca4c0 9132 case DISAS_NEXT:
6e256c93 9133 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
9134 break;
9135 default:
9136 case DISAS_JUMP:
9137 case DISAS_UPDATE:
9138 /* indicate that the hash table must be used to find the next TB */
57fec1fe 9139 tcg_gen_exit_tb(0);
8aaca4c0
FB
9140 break;
9141 case DISAS_TB_JUMP:
9142 /* nothing more to generate */
9143 break;
9ee6e8bb 9144 case DISAS_WFI:
d9ba4830 9145 gen_helper_wfi();
9ee6e8bb
PB
9146 break;
9147 case DISAS_SWI:
d9ba4830 9148 gen_exception(EXCP_SWI);
9ee6e8bb 9149 break;
8aaca4c0 9150 }
e50e6a20
FB
9151 if (dc->condjmp) {
9152 gen_set_label(dc->condlabel);
9ee6e8bb 9153 gen_set_condexec(dc);
6e256c93 9154 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
9155 dc->condjmp = 0;
9156 }
2c0262af 9157 }
2e70f6ef 9158
9ee6e8bb 9159done_generating:
2e70f6ef 9160 gen_icount_end(tb, num_insns);
2c0262af
FB
9161 *gen_opc_ptr = INDEX_op_end;
9162
9163#ifdef DEBUG_DISAS
8fec2b8c 9164 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
9165 qemu_log("----------------\n");
9166 qemu_log("IN: %s\n", lookup_symbol(pc_start));
9167 log_target_disas(pc_start, dc->pc - pc_start, env->thumb);
9168 qemu_log("\n");
2c0262af
FB
9169 }
9170#endif
b5ff1b31
FB
9171 if (search_pc) {
9172 j = gen_opc_ptr - gen_opc_buf;
9173 lj++;
9174 while (lj <= j)
9175 gen_opc_instr_start[lj++] = 0;
b5ff1b31 9176 } else {
2c0262af 9177 tb->size = dc->pc - pc_start;
2e70f6ef 9178 tb->icount = num_insns;
b5ff1b31 9179 }
2c0262af
FB
9180}
9181
2cfc5f17 9182void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2c0262af 9183{
2cfc5f17 9184 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
9185}
9186
2cfc5f17 9187void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2c0262af 9188{
2cfc5f17 9189 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
9190}
9191
b5ff1b31
FB
9192static const char *cpu_mode_names[16] = {
9193 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9194 "???", "???", "???", "und", "???", "???", "???", "sys"
9195};
9ee6e8bb 9196
5fafdf24 9197void cpu_dump_state(CPUState *env, FILE *f,
7fe48483
FB
9198 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
9199 int flags)
2c0262af
FB
9200{
9201 int i;
06e80fc9 9202#if 0
bc380d17 9203 union {
b7bcbe95
FB
9204 uint32_t i;
9205 float s;
9206 } s0, s1;
9207 CPU_DoubleU d;
a94a6abf
PB
9208 /* ??? This assumes float64 and double have the same layout.
9209 Oh well, it's only debug dumps. */
9210 union {
9211 float64 f64;
9212 double d;
9213 } d0;
06e80fc9 9214#endif
b5ff1b31 9215 uint32_t psr;
2c0262af
FB
9216
9217 for(i=0;i<16;i++) {
7fe48483 9218 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 9219 if ((i % 4) == 3)
7fe48483 9220 cpu_fprintf(f, "\n");
2c0262af 9221 else
7fe48483 9222 cpu_fprintf(f, " ");
2c0262af 9223 }
b5ff1b31 9224 psr = cpsr_read(env);
687fa640
TS
9225 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9226 psr,
b5ff1b31
FB
9227 psr & (1 << 31) ? 'N' : '-',
9228 psr & (1 << 30) ? 'Z' : '-',
9229 psr & (1 << 29) ? 'C' : '-',
9230 psr & (1 << 28) ? 'V' : '-',
5fafdf24 9231 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 9232 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 9233
5e3f878a 9234#if 0
b7bcbe95 9235 for (i = 0; i < 16; i++) {
8e96005d
FB
9236 d.d = env->vfp.regs[i];
9237 s0.i = d.l.lower;
9238 s1.i = d.l.upper;
a94a6abf
PB
9239 d0.f64 = d.d;
9240 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 9241 i * 2, (int)s0.i, s0.s,
a94a6abf 9242 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 9243 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 9244 d0.d);
b7bcbe95 9245 }
40f137e1 9246 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
5e3f878a 9247#endif
2c0262af 9248}
a6b025d3 9249
d2856f1a
AJ
9250void gen_pc_load(CPUState *env, TranslationBlock *tb,
9251 unsigned long searched_pc, int pc_pos, void *puc)
9252{
9253 env->regs[15] = gen_opc_pc[pc_pos];
9254}