]> git.proxmox.com Git - qemu.git/blame - target-arm/translate.c
Fix a missing trailing newline
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
28#include "exec-all.h"
29#include "disas.h"
57fec1fe 30#include "tcg-op.h"
79383c9c 31#include "qemu-log.h"
1497c961 32
a7812ae4 33#include "helpers.h"
1497c961 34#define GEN_HELPER 1
b26eefb6 35#include "helpers.h"
2c0262af 36
9ee6e8bb
PB
37#define ENABLE_ARCH_5J 0
38#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 42
86753403 43#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 44
2c0262af
FB
45/* internal defines */
46typedef struct DisasContext {
0fa85d43 47 target_ulong pc;
2c0262af 48 int is_jmp;
e50e6a20
FB
49 /* Nonzero if this instruction has been conditionally skipped. */
50 int condjmp;
51 /* The label that will be jumped to when the instruction is skipped. */
52 int condlabel;
9ee6e8bb
PB
53 /* Thumb-2 condtional execution bits. */
54 int condexec_mask;
55 int condexec_cond;
2c0262af 56 struct TranslationBlock *tb;
8aaca4c0 57 int singlestep_enabled;
5899f386 58 int thumb;
b5ff1b31
FB
59#if !defined(CONFIG_USER_ONLY)
60 int user;
61#endif
2c0262af
FB
62} DisasContext;
63
b5ff1b31
FB
64#if defined(CONFIG_USER_ONLY)
65#define IS_USER(s) 1
66#else
67#define IS_USER(s) (s->user)
68#endif
69
9ee6e8bb
PB
70/* These instructions trap after executing, so defer them until after the
71 conditional executions state has been updated. */
72#define DISAS_WFI 4
73#define DISAS_SWI 5
2c0262af 74
a7812ae4 75static TCGv_ptr cpu_env;
ad69471c 76/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 77static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 78static TCGv_i32 cpu_R[16];
426f5abc
PB
79static TCGv_i32 cpu_exclusive_addr;
80static TCGv_i32 cpu_exclusive_val;
81static TCGv_i32 cpu_exclusive_high;
82#ifdef CONFIG_USER_ONLY
83static TCGv_i32 cpu_exclusive_test;
84static TCGv_i32 cpu_exclusive_info;
85#endif
ad69471c 86
b26eefb6 87/* FIXME: These should be removed. */
a7812ae4
PB
88static TCGv cpu_F0s, cpu_F1s;
89static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 90
2e70f6ef
PB
91#include "gen-icount.h"
92
155c3eac
FN
93static const char *regnames[] =
94 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
95 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
96
b26eefb6
PB
97/* initialize TCG globals. */
98void arm_translate_init(void)
99{
155c3eac
FN
100 int i;
101
a7812ae4
PB
102 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
103
155c3eac
FN
104 for (i = 0; i < 16; i++) {
105 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
106 offsetof(CPUState, regs[i]),
107 regnames[i]);
108 }
426f5abc
PB
109 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
110 offsetof(CPUState, exclusive_addr), "exclusive_addr");
111 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
112 offsetof(CPUState, exclusive_val), "exclusive_val");
113 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
114 offsetof(CPUState, exclusive_high), "exclusive_high");
115#ifdef CONFIG_USER_ONLY
116 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
117 offsetof(CPUState, exclusive_test), "exclusive_test");
118 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUState, exclusive_info), "exclusive_info");
120#endif
155c3eac 121
a7812ae4
PB
122#define GEN_HELPER 2
123#include "helpers.h"
b26eefb6
PB
124}
125
b26eefb6 126static int num_temps;
b26eefb6
PB
127
128/* Allocate a temporary variable. */
a7812ae4 129static TCGv_i32 new_tmp(void)
b26eefb6 130{
12edd4f2
FN
131 num_temps++;
132 return tcg_temp_new_i32();
b26eefb6
PB
133}
134
135/* Release a temporary variable. */
136static void dead_tmp(TCGv tmp)
137{
12edd4f2 138 tcg_temp_free(tmp);
b26eefb6 139 num_temps--;
b26eefb6
PB
140}
141
d9ba4830
PB
142static inline TCGv load_cpu_offset(int offset)
143{
144 TCGv tmp = new_tmp();
145 tcg_gen_ld_i32(tmp, cpu_env, offset);
146 return tmp;
147}
148
149#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
150
151static inline void store_cpu_offset(TCGv var, int offset)
152{
153 tcg_gen_st_i32(var, cpu_env, offset);
154 dead_tmp(var);
155}
156
157#define store_cpu_field(var, name) \
158 store_cpu_offset(var, offsetof(CPUState, name))
159
b26eefb6
PB
160/* Set a variable to the value of a CPU register. */
161static void load_reg_var(DisasContext *s, TCGv var, int reg)
162{
163 if (reg == 15) {
164 uint32_t addr;
165 /* normaly, since we updated PC, we need only to add one insn */
166 if (s->thumb)
167 addr = (long)s->pc + 2;
168 else
169 addr = (long)s->pc + 4;
170 tcg_gen_movi_i32(var, addr);
171 } else {
155c3eac 172 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
173 }
174}
175
176/* Create a new temporary and set it to the value of a CPU register. */
177static inline TCGv load_reg(DisasContext *s, int reg)
178{
179 TCGv tmp = new_tmp();
180 load_reg_var(s, tmp, reg);
181 return tmp;
182}
183
184/* Set a CPU register. The source must be a temporary and will be
185 marked as dead. */
186static void store_reg(DisasContext *s, int reg, TCGv var)
187{
188 if (reg == 15) {
189 tcg_gen_andi_i32(var, var, ~1);
190 s->is_jmp = DISAS_JUMP;
191 }
155c3eac 192 tcg_gen_mov_i32(cpu_R[reg], var);
b26eefb6
PB
193 dead_tmp(var);
194}
195
b26eefb6 196/* Value extensions. */
86831435
PB
197#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
198#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
199#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
200#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
201
1497c961
PB
202#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
203#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 204
b26eefb6 205
b75263d6
JR
206static inline void gen_set_cpsr(TCGv var, uint32_t mask)
207{
208 TCGv tmp_mask = tcg_const_i32(mask);
209 gen_helper_cpsr_write(var, tmp_mask);
210 tcg_temp_free_i32(tmp_mask);
211}
d9ba4830
PB
212/* Set NZCV flags from the high 4 bits of var. */
213#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
214
215static void gen_exception(int excp)
216{
217 TCGv tmp = new_tmp();
218 tcg_gen_movi_i32(tmp, excp);
219 gen_helper_exception(tmp);
220 dead_tmp(tmp);
221}
222
3670669c
PB
223static void gen_smul_dual(TCGv a, TCGv b)
224{
225 TCGv tmp1 = new_tmp();
226 TCGv tmp2 = new_tmp();
22478e79
AZ
227 tcg_gen_ext16s_i32(tmp1, a);
228 tcg_gen_ext16s_i32(tmp2, b);
3670669c
PB
229 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
230 dead_tmp(tmp2);
231 tcg_gen_sari_i32(a, a, 16);
232 tcg_gen_sari_i32(b, b, 16);
233 tcg_gen_mul_i32(b, b, a);
234 tcg_gen_mov_i32(a, tmp1);
235 dead_tmp(tmp1);
236}
237
238/* Byteswap each halfword. */
239static void gen_rev16(TCGv var)
240{
241 TCGv tmp = new_tmp();
242 tcg_gen_shri_i32(tmp, var, 8);
243 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
244 tcg_gen_shli_i32(var, var, 8);
245 tcg_gen_andi_i32(var, var, 0xff00ff00);
246 tcg_gen_or_i32(var, var, tmp);
247 dead_tmp(tmp);
248}
249
250/* Byteswap low halfword and sign extend. */
251static void gen_revsh(TCGv var)
252{
253 TCGv tmp = new_tmp();
254 tcg_gen_shri_i32(tmp, var, 8);
255 tcg_gen_andi_i32(tmp, tmp, 0x00ff);
256 tcg_gen_shli_i32(var, var, 8);
257 tcg_gen_ext8s_i32(var, var);
258 tcg_gen_or_i32(var, var, tmp);
259 dead_tmp(tmp);
260}
261
262/* Unsigned bitfield extract. */
263static void gen_ubfx(TCGv var, int shift, uint32_t mask)
264{
265 if (shift)
266 tcg_gen_shri_i32(var, var, shift);
267 tcg_gen_andi_i32(var, var, mask);
268}
269
270/* Signed bitfield extract. */
271static void gen_sbfx(TCGv var, int shift, int width)
272{
273 uint32_t signbit;
274
275 if (shift)
276 tcg_gen_sari_i32(var, var, shift);
277 if (shift + width < 32) {
278 signbit = 1u << (width - 1);
279 tcg_gen_andi_i32(var, var, (1u << width) - 1);
280 tcg_gen_xori_i32(var, var, signbit);
281 tcg_gen_subi_i32(var, var, signbit);
282 }
283}
284
285/* Bitfield insertion. Insert val into base. Clobbers base and val. */
286static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
287{
3670669c 288 tcg_gen_andi_i32(val, val, mask);
8f8e3aa4
PB
289 tcg_gen_shli_i32(val, val, shift);
290 tcg_gen_andi_i32(base, base, ~(mask << shift));
3670669c
PB
291 tcg_gen_or_i32(dest, base, val);
292}
293
d9ba4830
PB
294/* Round the top 32 bits of a 64-bit value. */
295static void gen_roundqd(TCGv a, TCGv b)
3670669c 296{
d9ba4830
PB
297 tcg_gen_shri_i32(a, a, 31);
298 tcg_gen_add_i32(a, a, b);
3670669c
PB
299}
300
8f01245e
PB
301/* FIXME: Most targets have native widening multiplication.
302 It would be good to use that instead of a full wide multiply. */
5e3f878a 303/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 304static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 305{
a7812ae4
PB
306 TCGv_i64 tmp1 = tcg_temp_new_i64();
307 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
308
309 tcg_gen_extu_i32_i64(tmp1, a);
310 dead_tmp(a);
311 tcg_gen_extu_i32_i64(tmp2, b);
312 dead_tmp(b);
313 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 314 tcg_temp_free_i64(tmp2);
5e3f878a
PB
315 return tmp1;
316}
317
a7812ae4 318static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 319{
a7812ae4
PB
320 TCGv_i64 tmp1 = tcg_temp_new_i64();
321 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
322
323 tcg_gen_ext_i32_i64(tmp1, a);
324 dead_tmp(a);
325 tcg_gen_ext_i32_i64(tmp2, b);
326 dead_tmp(b);
327 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 328 tcg_temp_free_i64(tmp2);
5e3f878a
PB
329 return tmp1;
330}
331
8f01245e 332/* Signed 32x32->64 multiply. */
d9ba4830 333static void gen_imull(TCGv a, TCGv b)
8f01245e 334{
a7812ae4
PB
335 TCGv_i64 tmp1 = tcg_temp_new_i64();
336 TCGv_i64 tmp2 = tcg_temp_new_i64();
8f01245e 337
d9ba4830
PB
338 tcg_gen_ext_i32_i64(tmp1, a);
339 tcg_gen_ext_i32_i64(tmp2, b);
8f01245e 340 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 341 tcg_temp_free_i64(tmp2);
d9ba4830 342 tcg_gen_trunc_i64_i32(a, tmp1);
8f01245e 343 tcg_gen_shri_i64(tmp1, tmp1, 32);
d9ba4830 344 tcg_gen_trunc_i64_i32(b, tmp1);
b75263d6 345 tcg_temp_free_i64(tmp1);
d9ba4830 346}
d9ba4830 347
8f01245e
PB
348/* Swap low and high halfwords. */
349static void gen_swap_half(TCGv var)
350{
351 TCGv tmp = new_tmp();
352 tcg_gen_shri_i32(tmp, var, 16);
353 tcg_gen_shli_i32(var, var, 16);
354 tcg_gen_or_i32(var, var, tmp);
3670669c 355 dead_tmp(tmp);
8f01245e
PB
356}
357
b26eefb6
PB
358/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
359 tmp = (t0 ^ t1) & 0x8000;
360 t0 &= ~0x8000;
361 t1 &= ~0x8000;
362 t0 = (t0 + t1) ^ tmp;
363 */
364
365static void gen_add16(TCGv t0, TCGv t1)
366{
367 TCGv tmp = new_tmp();
368 tcg_gen_xor_i32(tmp, t0, t1);
369 tcg_gen_andi_i32(tmp, tmp, 0x8000);
370 tcg_gen_andi_i32(t0, t0, ~0x8000);
371 tcg_gen_andi_i32(t1, t1, ~0x8000);
372 tcg_gen_add_i32(t0, t0, t1);
373 tcg_gen_xor_i32(t0, t0, tmp);
374 dead_tmp(tmp);
375 dead_tmp(t1);
376}
377
9a119ff6
PB
378#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
379
b26eefb6
PB
380/* Set CF to the top bit of var. */
381static void gen_set_CF_bit31(TCGv var)
382{
383 TCGv tmp = new_tmp();
384 tcg_gen_shri_i32(tmp, var, 31);
4cc633c3 385 gen_set_CF(tmp);
b26eefb6
PB
386 dead_tmp(tmp);
387}
388
389/* Set N and Z flags from var. */
390static inline void gen_logic_CC(TCGv var)
391{
6fbe23d5
PB
392 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
393 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
b26eefb6
PB
394}
395
396/* T0 += T1 + CF. */
396e467c 397static void gen_adc(TCGv t0, TCGv t1)
b26eefb6 398{
d9ba4830 399 TCGv tmp;
396e467c 400 tcg_gen_add_i32(t0, t0, t1);
d9ba4830 401 tmp = load_cpu_field(CF);
396e467c 402 tcg_gen_add_i32(t0, t0, tmp);
b26eefb6
PB
403 dead_tmp(tmp);
404}
405
e9bb4aa9
JR
406/* dest = T0 + T1 + CF. */
407static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
408{
409 TCGv tmp;
410 tcg_gen_add_i32(dest, t0, t1);
411 tmp = load_cpu_field(CF);
412 tcg_gen_add_i32(dest, dest, tmp);
413 dead_tmp(tmp);
414}
415
3670669c
PB
416/* dest = T0 - T1 + CF - 1. */
417static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
418{
d9ba4830 419 TCGv tmp;
3670669c 420 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 421 tmp = load_cpu_field(CF);
3670669c
PB
422 tcg_gen_add_i32(dest, dest, tmp);
423 tcg_gen_subi_i32(dest, dest, 1);
424 dead_tmp(tmp);
425}
426
ad69471c
PB
427/* FIXME: Implement this natively. */
428#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
429
9a119ff6 430static void shifter_out_im(TCGv var, int shift)
b26eefb6 431{
9a119ff6
PB
432 TCGv tmp = new_tmp();
433 if (shift == 0) {
434 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 435 } else {
9a119ff6 436 tcg_gen_shri_i32(tmp, var, shift);
4cc633c3 437 if (shift != 31)
9a119ff6
PB
438 tcg_gen_andi_i32(tmp, tmp, 1);
439 }
440 gen_set_CF(tmp);
441 dead_tmp(tmp);
442}
b26eefb6 443
9a119ff6
PB
444/* Shift by immediate. Includes special handling for shift == 0. */
445static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
446{
447 switch (shiftop) {
448 case 0: /* LSL */
449 if (shift != 0) {
450 if (flags)
451 shifter_out_im(var, 32 - shift);
452 tcg_gen_shli_i32(var, var, shift);
453 }
454 break;
455 case 1: /* LSR */
456 if (shift == 0) {
457 if (flags) {
458 tcg_gen_shri_i32(var, var, 31);
459 gen_set_CF(var);
460 }
461 tcg_gen_movi_i32(var, 0);
462 } else {
463 if (flags)
464 shifter_out_im(var, shift - 1);
465 tcg_gen_shri_i32(var, var, shift);
466 }
467 break;
468 case 2: /* ASR */
469 if (shift == 0)
470 shift = 32;
471 if (flags)
472 shifter_out_im(var, shift - 1);
473 if (shift == 32)
474 shift = 31;
475 tcg_gen_sari_i32(var, var, shift);
476 break;
477 case 3: /* ROR/RRX */
478 if (shift != 0) {
479 if (flags)
480 shifter_out_im(var, shift - 1);
f669df27 481 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 482 } else {
d9ba4830 483 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
484 if (flags)
485 shifter_out_im(var, 0);
486 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
487 tcg_gen_shli_i32(tmp, tmp, 31);
488 tcg_gen_or_i32(var, var, tmp);
489 dead_tmp(tmp);
b26eefb6
PB
490 }
491 }
492};
493
8984bd2e
PB
494static inline void gen_arm_shift_reg(TCGv var, int shiftop,
495 TCGv shift, int flags)
496{
497 if (flags) {
498 switch (shiftop) {
499 case 0: gen_helper_shl_cc(var, var, shift); break;
500 case 1: gen_helper_shr_cc(var, var, shift); break;
501 case 2: gen_helper_sar_cc(var, var, shift); break;
502 case 3: gen_helper_ror_cc(var, var, shift); break;
503 }
504 } else {
505 switch (shiftop) {
506 case 0: gen_helper_shl(var, var, shift); break;
507 case 1: gen_helper_shr(var, var, shift); break;
508 case 2: gen_helper_sar(var, var, shift); break;
f669df27
AJ
509 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
510 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
511 }
512 }
513 dead_tmp(shift);
514}
515
6ddbc6e4
PB
516#define PAS_OP(pfx) \
517 switch (op2) { \
518 case 0: gen_pas_helper(glue(pfx,add16)); break; \
519 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
520 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
521 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
522 case 4: gen_pas_helper(glue(pfx,add8)); break; \
523 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
524 }
d9ba4830 525static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 526{
a7812ae4 527 TCGv_ptr tmp;
6ddbc6e4
PB
528
529 switch (op1) {
530#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
531 case 1:
a7812ae4 532 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
533 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
534 PAS_OP(s)
b75263d6 535 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
536 break;
537 case 5:
a7812ae4 538 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
539 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
540 PAS_OP(u)
b75263d6 541 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
542 break;
543#undef gen_pas_helper
544#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
545 case 2:
546 PAS_OP(q);
547 break;
548 case 3:
549 PAS_OP(sh);
550 break;
551 case 6:
552 PAS_OP(uq);
553 break;
554 case 7:
555 PAS_OP(uh);
556 break;
557#undef gen_pas_helper
558 }
559}
9ee6e8bb
PB
560#undef PAS_OP
561
6ddbc6e4
PB
562/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
563#define PAS_OP(pfx) \
ed89a2f1 564 switch (op1) { \
6ddbc6e4
PB
565 case 0: gen_pas_helper(glue(pfx,add8)); break; \
566 case 1: gen_pas_helper(glue(pfx,add16)); break; \
567 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
568 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
569 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
570 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
571 }
d9ba4830 572static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 573{
a7812ae4 574 TCGv_ptr tmp;
6ddbc6e4 575
ed89a2f1 576 switch (op2) {
6ddbc6e4
PB
577#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
578 case 0:
a7812ae4 579 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
580 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
581 PAS_OP(s)
b75263d6 582 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
583 break;
584 case 4:
a7812ae4 585 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
586 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
587 PAS_OP(u)
b75263d6 588 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
589 break;
590#undef gen_pas_helper
591#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
592 case 1:
593 PAS_OP(q);
594 break;
595 case 2:
596 PAS_OP(sh);
597 break;
598 case 5:
599 PAS_OP(uq);
600 break;
601 case 6:
602 PAS_OP(uh);
603 break;
604#undef gen_pas_helper
605 }
606}
9ee6e8bb
PB
607#undef PAS_OP
608
d9ba4830
PB
609static void gen_test_cc(int cc, int label)
610{
611 TCGv tmp;
612 TCGv tmp2;
d9ba4830
PB
613 int inv;
614
d9ba4830
PB
615 switch (cc) {
616 case 0: /* eq: Z */
6fbe23d5 617 tmp = load_cpu_field(ZF);
cb63669a 618 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
619 break;
620 case 1: /* ne: !Z */
6fbe23d5 621 tmp = load_cpu_field(ZF);
cb63669a 622 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
623 break;
624 case 2: /* cs: C */
625 tmp = load_cpu_field(CF);
cb63669a 626 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
627 break;
628 case 3: /* cc: !C */
629 tmp = load_cpu_field(CF);
cb63669a 630 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
631 break;
632 case 4: /* mi: N */
6fbe23d5 633 tmp = load_cpu_field(NF);
cb63669a 634 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
635 break;
636 case 5: /* pl: !N */
6fbe23d5 637 tmp = load_cpu_field(NF);
cb63669a 638 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
639 break;
640 case 6: /* vs: V */
641 tmp = load_cpu_field(VF);
cb63669a 642 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
643 break;
644 case 7: /* vc: !V */
645 tmp = load_cpu_field(VF);
cb63669a 646 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
647 break;
648 case 8: /* hi: C && !Z */
649 inv = gen_new_label();
650 tmp = load_cpu_field(CF);
cb63669a 651 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830 652 dead_tmp(tmp);
6fbe23d5 653 tmp = load_cpu_field(ZF);
cb63669a 654 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
655 gen_set_label(inv);
656 break;
657 case 9: /* ls: !C || Z */
658 tmp = load_cpu_field(CF);
cb63669a 659 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830 660 dead_tmp(tmp);
6fbe23d5 661 tmp = load_cpu_field(ZF);
cb63669a 662 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
663 break;
664 case 10: /* ge: N == V -> N ^ V == 0 */
665 tmp = load_cpu_field(VF);
6fbe23d5 666 tmp2 = load_cpu_field(NF);
d9ba4830
PB
667 tcg_gen_xor_i32(tmp, tmp, tmp2);
668 dead_tmp(tmp2);
cb63669a 669 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
670 break;
671 case 11: /* lt: N != V -> N ^ V != 0 */
672 tmp = load_cpu_field(VF);
6fbe23d5 673 tmp2 = load_cpu_field(NF);
d9ba4830
PB
674 tcg_gen_xor_i32(tmp, tmp, tmp2);
675 dead_tmp(tmp2);
cb63669a 676 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
677 break;
678 case 12: /* gt: !Z && N == V */
679 inv = gen_new_label();
6fbe23d5 680 tmp = load_cpu_field(ZF);
cb63669a 681 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830
PB
682 dead_tmp(tmp);
683 tmp = load_cpu_field(VF);
6fbe23d5 684 tmp2 = load_cpu_field(NF);
d9ba4830
PB
685 tcg_gen_xor_i32(tmp, tmp, tmp2);
686 dead_tmp(tmp2);
cb63669a 687 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
688 gen_set_label(inv);
689 break;
690 case 13: /* le: Z || N != V */
6fbe23d5 691 tmp = load_cpu_field(ZF);
cb63669a 692 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
693 dead_tmp(tmp);
694 tmp = load_cpu_field(VF);
6fbe23d5 695 tmp2 = load_cpu_field(NF);
d9ba4830
PB
696 tcg_gen_xor_i32(tmp, tmp, tmp2);
697 dead_tmp(tmp2);
cb63669a 698 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
699 break;
700 default:
701 fprintf(stderr, "Bad condition code 0x%x\n", cc);
702 abort();
703 }
704 dead_tmp(tmp);
705}
2c0262af 706
b1d8e52e 707static const uint8_t table_logic_cc[16] = {
2c0262af
FB
708 1, /* and */
709 1, /* xor */
710 0, /* sub */
711 0, /* rsb */
712 0, /* add */
713 0, /* adc */
714 0, /* sbc */
715 0, /* rsc */
716 1, /* andl */
717 1, /* xorl */
718 0, /* cmp */
719 0, /* cmn */
720 1, /* orr */
721 1, /* mov */
722 1, /* bic */
723 1, /* mvn */
724};
3b46e624 725
d9ba4830
PB
726/* Set PC and Thumb state from an immediate address. */
727static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 728{
b26eefb6 729 TCGv tmp;
99c475ab 730
b26eefb6 731 s->is_jmp = DISAS_UPDATE;
d9ba4830 732 if (s->thumb != (addr & 1)) {
155c3eac 733 tmp = new_tmp();
d9ba4830
PB
734 tcg_gen_movi_i32(tmp, addr & 1);
735 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
155c3eac 736 dead_tmp(tmp);
d9ba4830 737 }
155c3eac 738 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
739}
740
741/* Set PC and Thumb state from var. var is marked as dead. */
742static inline void gen_bx(DisasContext *s, TCGv var)
743{
d9ba4830 744 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
745 tcg_gen_andi_i32(cpu_R[15], var, ~1);
746 tcg_gen_andi_i32(var, var, 1);
747 store_cpu_field(var, thumb);
d9ba4830
PB
748}
749
21aeb343
JR
750/* Variant of store_reg which uses branch&exchange logic when storing
751 to r15 in ARM architecture v7 and above. The source must be a temporary
752 and will be marked as dead. */
753static inline void store_reg_bx(CPUState *env, DisasContext *s,
754 int reg, TCGv var)
755{
756 if (reg == 15 && ENABLE_ARCH_7) {
757 gen_bx(s, var);
758 } else {
759 store_reg(s, reg, var);
760 }
761}
762
b0109805
PB
763static inline TCGv gen_ld8s(TCGv addr, int index)
764{
765 TCGv tmp = new_tmp();
766 tcg_gen_qemu_ld8s(tmp, addr, index);
767 return tmp;
768}
769static inline TCGv gen_ld8u(TCGv addr, int index)
770{
771 TCGv tmp = new_tmp();
772 tcg_gen_qemu_ld8u(tmp, addr, index);
773 return tmp;
774}
775static inline TCGv gen_ld16s(TCGv addr, int index)
776{
777 TCGv tmp = new_tmp();
778 tcg_gen_qemu_ld16s(tmp, addr, index);
779 return tmp;
780}
781static inline TCGv gen_ld16u(TCGv addr, int index)
782{
783 TCGv tmp = new_tmp();
784 tcg_gen_qemu_ld16u(tmp, addr, index);
785 return tmp;
786}
787static inline TCGv gen_ld32(TCGv addr, int index)
788{
789 TCGv tmp = new_tmp();
790 tcg_gen_qemu_ld32u(tmp, addr, index);
791 return tmp;
792}
84496233
JR
793static inline TCGv_i64 gen_ld64(TCGv addr, int index)
794{
795 TCGv_i64 tmp = tcg_temp_new_i64();
796 tcg_gen_qemu_ld64(tmp, addr, index);
797 return tmp;
798}
b0109805
PB
799static inline void gen_st8(TCGv val, TCGv addr, int index)
800{
801 tcg_gen_qemu_st8(val, addr, index);
802 dead_tmp(val);
803}
804static inline void gen_st16(TCGv val, TCGv addr, int index)
805{
806 tcg_gen_qemu_st16(val, addr, index);
807 dead_tmp(val);
808}
809static inline void gen_st32(TCGv val, TCGv addr, int index)
810{
811 tcg_gen_qemu_st32(val, addr, index);
812 dead_tmp(val);
813}
84496233
JR
814static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
815{
816 tcg_gen_qemu_st64(val, addr, index);
817 tcg_temp_free_i64(val);
818}
b5ff1b31 819
5e3f878a
PB
820static inline void gen_set_pc_im(uint32_t val)
821{
155c3eac 822 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
823}
824
b5ff1b31
FB
825/* Force a TB lookup after an instruction that changes the CPU state. */
826static inline void gen_lookup_tb(DisasContext *s)
827{
a6445c52 828 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
829 s->is_jmp = DISAS_UPDATE;
830}
831
b0109805
PB
832static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
833 TCGv var)
2c0262af 834{
1e8d4eec 835 int val, rm, shift, shiftop;
b26eefb6 836 TCGv offset;
2c0262af
FB
837
838 if (!(insn & (1 << 25))) {
839 /* immediate */
840 val = insn & 0xfff;
841 if (!(insn & (1 << 23)))
842 val = -val;
537730b9 843 if (val != 0)
b0109805 844 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
845 } else {
846 /* shift/register */
847 rm = (insn) & 0xf;
848 shift = (insn >> 7) & 0x1f;
1e8d4eec 849 shiftop = (insn >> 5) & 3;
b26eefb6 850 offset = load_reg(s, rm);
9a119ff6 851 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 852 if (!(insn & (1 << 23)))
b0109805 853 tcg_gen_sub_i32(var, var, offset);
2c0262af 854 else
b0109805 855 tcg_gen_add_i32(var, var, offset);
b26eefb6 856 dead_tmp(offset);
2c0262af
FB
857 }
858}
859
191f9a93 860static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 861 int extra, TCGv var)
2c0262af
FB
862{
863 int val, rm;
b26eefb6 864 TCGv offset;
3b46e624 865
2c0262af
FB
866 if (insn & (1 << 22)) {
867 /* immediate */
868 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
869 if (!(insn & (1 << 23)))
870 val = -val;
18acad92 871 val += extra;
537730b9 872 if (val != 0)
b0109805 873 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
874 } else {
875 /* register */
191f9a93 876 if (extra)
b0109805 877 tcg_gen_addi_i32(var, var, extra);
2c0262af 878 rm = (insn) & 0xf;
b26eefb6 879 offset = load_reg(s, rm);
2c0262af 880 if (!(insn & (1 << 23)))
b0109805 881 tcg_gen_sub_i32(var, var, offset);
2c0262af 882 else
b0109805 883 tcg_gen_add_i32(var, var, offset);
b26eefb6 884 dead_tmp(offset);
2c0262af
FB
885 }
886}
887
4373f3ce
PB
888#define VFP_OP2(name) \
889static inline void gen_vfp_##name(int dp) \
890{ \
891 if (dp) \
892 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
893 else \
894 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
b7bcbe95
FB
895}
896
4373f3ce
PB
897VFP_OP2(add)
898VFP_OP2(sub)
899VFP_OP2(mul)
900VFP_OP2(div)
901
902#undef VFP_OP2
903
904static inline void gen_vfp_abs(int dp)
905{
906 if (dp)
907 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
908 else
909 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
910}
911
912static inline void gen_vfp_neg(int dp)
913{
914 if (dp)
915 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
916 else
917 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
918}
919
920static inline void gen_vfp_sqrt(int dp)
921{
922 if (dp)
923 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
924 else
925 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
926}
927
928static inline void gen_vfp_cmp(int dp)
929{
930 if (dp)
931 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
932 else
933 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
934}
935
936static inline void gen_vfp_cmpe(int dp)
937{
938 if (dp)
939 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
940 else
941 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
942}
943
944static inline void gen_vfp_F1_ld0(int dp)
945{
946 if (dp)
5b340b51 947 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 948 else
5b340b51 949 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
950}
951
952static inline void gen_vfp_uito(int dp)
953{
954 if (dp)
955 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
956 else
957 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
958}
959
960static inline void gen_vfp_sito(int dp)
961{
962 if (dp)
66230e0d 963 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
4373f3ce 964 else
66230e0d 965 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
4373f3ce
PB
966}
967
968static inline void gen_vfp_toui(int dp)
969{
970 if (dp)
971 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
972 else
973 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
974}
975
976static inline void gen_vfp_touiz(int dp)
977{
978 if (dp)
979 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
980 else
981 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
982}
983
984static inline void gen_vfp_tosi(int dp)
985{
986 if (dp)
987 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
988 else
989 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
990}
991
992static inline void gen_vfp_tosiz(int dp)
9ee6e8bb
PB
993{
994 if (dp)
4373f3ce 995 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
9ee6e8bb 996 else
4373f3ce
PB
997 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
998}
999
1000#define VFP_GEN_FIX(name) \
1001static inline void gen_vfp_##name(int dp, int shift) \
1002{ \
b75263d6 1003 TCGv tmp_shift = tcg_const_i32(shift); \
4373f3ce 1004 if (dp) \
b75263d6 1005 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
4373f3ce 1006 else \
b75263d6
JR
1007 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
1008 tcg_temp_free_i32(tmp_shift); \
9ee6e8bb 1009}
4373f3ce
PB
1010VFP_GEN_FIX(tosh)
1011VFP_GEN_FIX(tosl)
1012VFP_GEN_FIX(touh)
1013VFP_GEN_FIX(toul)
1014VFP_GEN_FIX(shto)
1015VFP_GEN_FIX(slto)
1016VFP_GEN_FIX(uhto)
1017VFP_GEN_FIX(ulto)
1018#undef VFP_GEN_FIX
9ee6e8bb 1019
312eea9f 1020static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1021{
1022 if (dp)
312eea9f 1023 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1024 else
312eea9f 1025 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1026}
1027
312eea9f 1028static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1029{
1030 if (dp)
312eea9f 1031 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1032 else
312eea9f 1033 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1034}
1035
8e96005d
FB
1036static inline long
1037vfp_reg_offset (int dp, int reg)
1038{
1039 if (dp)
1040 return offsetof(CPUARMState, vfp.regs[reg]);
1041 else if (reg & 1) {
1042 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1043 + offsetof(CPU_DoubleU, l.upper);
1044 } else {
1045 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1046 + offsetof(CPU_DoubleU, l.lower);
1047 }
1048}
9ee6e8bb
PB
1049
1050/* Return the offset of a 32-bit piece of a NEON register.
1051 zero is the least significant end of the register. */
1052static inline long
1053neon_reg_offset (int reg, int n)
1054{
1055 int sreg;
1056 sreg = reg * 2 + n;
1057 return vfp_reg_offset(0, sreg);
1058}
1059
8f8e3aa4
PB
1060static TCGv neon_load_reg(int reg, int pass)
1061{
1062 TCGv tmp = new_tmp();
1063 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1064 return tmp;
1065}
1066
1067static void neon_store_reg(int reg, int pass, TCGv var)
1068{
1069 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1070 dead_tmp(var);
1071}
1072
a7812ae4 1073static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1074{
1075 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1076}
1077
a7812ae4 1078static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1079{
1080 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1081}
1082
4373f3ce
PB
1083#define tcg_gen_ld_f32 tcg_gen_ld_i32
1084#define tcg_gen_ld_f64 tcg_gen_ld_i64
1085#define tcg_gen_st_f32 tcg_gen_st_i32
1086#define tcg_gen_st_f64 tcg_gen_st_i64
1087
b7bcbe95
FB
1088static inline void gen_mov_F0_vreg(int dp, int reg)
1089{
1090 if (dp)
4373f3ce 1091 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1092 else
4373f3ce 1093 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1094}
1095
1096static inline void gen_mov_F1_vreg(int dp, int reg)
1097{
1098 if (dp)
4373f3ce 1099 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1100 else
4373f3ce 1101 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1102}
1103
1104static inline void gen_mov_vreg_F0(int dp, int reg)
1105{
1106 if (dp)
4373f3ce 1107 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1108 else
4373f3ce 1109 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1110}
1111
18c9b560
AZ
1112#define ARM_CP_RW_BIT (1 << 20)
1113
a7812ae4 1114static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d
PB
1115{
1116 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1117}
1118
a7812ae4 1119static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d
PB
1120{
1121 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1122}
1123
da6b5335 1124static inline TCGv iwmmxt_load_creg(int reg)
e677137d 1125{
da6b5335
FN
1126 TCGv var = new_tmp();
1127 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1128 return var;
e677137d
PB
1129}
1130
da6b5335 1131static inline void iwmmxt_store_creg(int reg, TCGv var)
e677137d 1132{
da6b5335 1133 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
d9968827 1134 dead_tmp(var);
e677137d
PB
1135}
1136
1137static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1138{
1139 iwmmxt_store_reg(cpu_M0, rn);
1140}
1141
1142static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1143{
1144 iwmmxt_load_reg(cpu_M0, rn);
1145}
1146
1147static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1148{
1149 iwmmxt_load_reg(cpu_V1, rn);
1150 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1151}
1152
1153static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1154{
1155 iwmmxt_load_reg(cpu_V1, rn);
1156 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1157}
1158
1159static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1160{
1161 iwmmxt_load_reg(cpu_V1, rn);
1162 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1163}
1164
1165#define IWMMXT_OP(name) \
1166static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1167{ \
1168 iwmmxt_load_reg(cpu_V1, rn); \
1169 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1170}
1171
1172#define IWMMXT_OP_ENV(name) \
1173static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1174{ \
1175 iwmmxt_load_reg(cpu_V1, rn); \
1176 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1177}
1178
1179#define IWMMXT_OP_ENV_SIZE(name) \
1180IWMMXT_OP_ENV(name##b) \
1181IWMMXT_OP_ENV(name##w) \
1182IWMMXT_OP_ENV(name##l)
1183
1184#define IWMMXT_OP_ENV1(name) \
1185static inline void gen_op_iwmmxt_##name##_M0(void) \
1186{ \
1187 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1188}
1189
1190IWMMXT_OP(maddsq)
1191IWMMXT_OP(madduq)
1192IWMMXT_OP(sadb)
1193IWMMXT_OP(sadw)
1194IWMMXT_OP(mulslw)
1195IWMMXT_OP(mulshw)
1196IWMMXT_OP(mululw)
1197IWMMXT_OP(muluhw)
1198IWMMXT_OP(macsw)
1199IWMMXT_OP(macuw)
1200
1201IWMMXT_OP_ENV_SIZE(unpackl)
1202IWMMXT_OP_ENV_SIZE(unpackh)
1203
1204IWMMXT_OP_ENV1(unpacklub)
1205IWMMXT_OP_ENV1(unpackluw)
1206IWMMXT_OP_ENV1(unpacklul)
1207IWMMXT_OP_ENV1(unpackhub)
1208IWMMXT_OP_ENV1(unpackhuw)
1209IWMMXT_OP_ENV1(unpackhul)
1210IWMMXT_OP_ENV1(unpacklsb)
1211IWMMXT_OP_ENV1(unpacklsw)
1212IWMMXT_OP_ENV1(unpacklsl)
1213IWMMXT_OP_ENV1(unpackhsb)
1214IWMMXT_OP_ENV1(unpackhsw)
1215IWMMXT_OP_ENV1(unpackhsl)
1216
1217IWMMXT_OP_ENV_SIZE(cmpeq)
1218IWMMXT_OP_ENV_SIZE(cmpgtu)
1219IWMMXT_OP_ENV_SIZE(cmpgts)
1220
1221IWMMXT_OP_ENV_SIZE(mins)
1222IWMMXT_OP_ENV_SIZE(minu)
1223IWMMXT_OP_ENV_SIZE(maxs)
1224IWMMXT_OP_ENV_SIZE(maxu)
1225
1226IWMMXT_OP_ENV_SIZE(subn)
1227IWMMXT_OP_ENV_SIZE(addn)
1228IWMMXT_OP_ENV_SIZE(subu)
1229IWMMXT_OP_ENV_SIZE(addu)
1230IWMMXT_OP_ENV_SIZE(subs)
1231IWMMXT_OP_ENV_SIZE(adds)
1232
1233IWMMXT_OP_ENV(avgb0)
1234IWMMXT_OP_ENV(avgb1)
1235IWMMXT_OP_ENV(avgw0)
1236IWMMXT_OP_ENV(avgw1)
1237
1238IWMMXT_OP(msadb)
1239
1240IWMMXT_OP_ENV(packuw)
1241IWMMXT_OP_ENV(packul)
1242IWMMXT_OP_ENV(packuq)
1243IWMMXT_OP_ENV(packsw)
1244IWMMXT_OP_ENV(packsl)
1245IWMMXT_OP_ENV(packsq)
1246
e677137d
PB
1247static void gen_op_iwmmxt_set_mup(void)
1248{
1249 TCGv tmp;
1250 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1251 tcg_gen_ori_i32(tmp, tmp, 2);
1252 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1253}
1254
1255static void gen_op_iwmmxt_set_cup(void)
1256{
1257 TCGv tmp;
1258 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1259 tcg_gen_ori_i32(tmp, tmp, 1);
1260 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1261}
1262
1263static void gen_op_iwmmxt_setpsr_nz(void)
1264{
1265 TCGv tmp = new_tmp();
1266 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1267 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1268}
1269
1270static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1271{
1272 iwmmxt_load_reg(cpu_V1, rn);
86831435 1273 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1274 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1275}
1276
da6b5335 1277static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
18c9b560
AZ
1278{
1279 int rd;
1280 uint32_t offset;
da6b5335 1281 TCGv tmp;
18c9b560
AZ
1282
1283 rd = (insn >> 16) & 0xf;
da6b5335 1284 tmp = load_reg(s, rd);
18c9b560
AZ
1285
1286 offset = (insn & 0xff) << ((insn >> 7) & 2);
1287 if (insn & (1 << 24)) {
1288 /* Pre indexed */
1289 if (insn & (1 << 23))
da6b5335 1290 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1291 else
da6b5335
FN
1292 tcg_gen_addi_i32(tmp, tmp, -offset);
1293 tcg_gen_mov_i32(dest, tmp);
18c9b560 1294 if (insn & (1 << 21))
da6b5335
FN
1295 store_reg(s, rd, tmp);
1296 else
1297 dead_tmp(tmp);
18c9b560
AZ
1298 } else if (insn & (1 << 21)) {
1299 /* Post indexed */
da6b5335 1300 tcg_gen_mov_i32(dest, tmp);
18c9b560 1301 if (insn & (1 << 23))
da6b5335 1302 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1303 else
da6b5335
FN
1304 tcg_gen_addi_i32(tmp, tmp, -offset);
1305 store_reg(s, rd, tmp);
18c9b560
AZ
1306 } else if (!(insn & (1 << 23)))
1307 return 1;
1308 return 0;
1309}
1310
da6b5335 1311static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
18c9b560
AZ
1312{
1313 int rd = (insn >> 0) & 0xf;
da6b5335 1314 TCGv tmp;
18c9b560 1315
da6b5335
FN
1316 if (insn & (1 << 8)) {
1317 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1318 return 1;
da6b5335
FN
1319 } else {
1320 tmp = iwmmxt_load_creg(rd);
1321 }
1322 } else {
1323 tmp = new_tmp();
1324 iwmmxt_load_reg(cpu_V0, rd);
1325 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1326 }
1327 tcg_gen_andi_i32(tmp, tmp, mask);
1328 tcg_gen_mov_i32(dest, tmp);
1329 dead_tmp(tmp);
18c9b560
AZ
1330 return 0;
1331}
1332
1333/* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1334 (ie. an undefined instruction). */
1335static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1336{
1337 int rd, wrd;
1338 int rdhi, rdlo, rd0, rd1, i;
da6b5335
FN
1339 TCGv addr;
1340 TCGv tmp, tmp2, tmp3;
18c9b560
AZ
1341
1342 if ((insn & 0x0e000e00) == 0x0c000000) {
1343 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1344 wrd = insn & 0xf;
1345 rdlo = (insn >> 12) & 0xf;
1346 rdhi = (insn >> 16) & 0xf;
1347 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1348 iwmmxt_load_reg(cpu_V0, wrd);
1349 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1350 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1351 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1352 } else { /* TMCRR */
da6b5335
FN
1353 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1354 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1355 gen_op_iwmmxt_set_mup();
1356 }
1357 return 0;
1358 }
1359
1360 wrd = (insn >> 12) & 0xf;
da6b5335
FN
1361 addr = new_tmp();
1362 if (gen_iwmmxt_address(s, insn, addr)) {
1363 dead_tmp(addr);
18c9b560 1364 return 1;
da6b5335 1365 }
18c9b560
AZ
1366 if (insn & ARM_CP_RW_BIT) {
1367 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
da6b5335
FN
1368 tmp = new_tmp();
1369 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1370 iwmmxt_store_creg(wrd, tmp);
18c9b560 1371 } else {
e677137d
PB
1372 i = 1;
1373 if (insn & (1 << 8)) {
1374 if (insn & (1 << 22)) { /* WLDRD */
da6b5335 1375 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1376 i = 0;
1377 } else { /* WLDRW wRd */
da6b5335 1378 tmp = gen_ld32(addr, IS_USER(s));
e677137d
PB
1379 }
1380 } else {
1381 if (insn & (1 << 22)) { /* WLDRH */
da6b5335 1382 tmp = gen_ld16u(addr, IS_USER(s));
e677137d 1383 } else { /* WLDRB */
da6b5335 1384 tmp = gen_ld8u(addr, IS_USER(s));
e677137d
PB
1385 }
1386 }
1387 if (i) {
1388 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1389 dead_tmp(tmp);
1390 }
18c9b560
AZ
1391 gen_op_iwmmxt_movq_wRn_M0(wrd);
1392 }
1393 } else {
1394 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335
FN
1395 tmp = iwmmxt_load_creg(wrd);
1396 gen_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1397 } else {
1398 gen_op_iwmmxt_movq_M0_wRn(wrd);
e677137d
PB
1399 tmp = new_tmp();
1400 if (insn & (1 << 8)) {
1401 if (insn & (1 << 22)) { /* WSTRD */
1402 dead_tmp(tmp);
da6b5335 1403 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1404 } else { /* WSTRW wRd */
1405 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1406 gen_st32(tmp, addr, IS_USER(s));
e677137d
PB
1407 }
1408 } else {
1409 if (insn & (1 << 22)) { /* WSTRH */
1410 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1411 gen_st16(tmp, addr, IS_USER(s));
e677137d
PB
1412 } else { /* WSTRB */
1413 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1414 gen_st8(tmp, addr, IS_USER(s));
e677137d
PB
1415 }
1416 }
18c9b560
AZ
1417 }
1418 }
d9968827 1419 dead_tmp(addr);
18c9b560
AZ
1420 return 0;
1421 }
1422
1423 if ((insn & 0x0f000000) != 0x0e000000)
1424 return 1;
1425
1426 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1427 case 0x000: /* WOR */
1428 wrd = (insn >> 12) & 0xf;
1429 rd0 = (insn >> 0) & 0xf;
1430 rd1 = (insn >> 16) & 0xf;
1431 gen_op_iwmmxt_movq_M0_wRn(rd0);
1432 gen_op_iwmmxt_orq_M0_wRn(rd1);
1433 gen_op_iwmmxt_setpsr_nz();
1434 gen_op_iwmmxt_movq_wRn_M0(wrd);
1435 gen_op_iwmmxt_set_mup();
1436 gen_op_iwmmxt_set_cup();
1437 break;
1438 case 0x011: /* TMCR */
1439 if (insn & 0xf)
1440 return 1;
1441 rd = (insn >> 12) & 0xf;
1442 wrd = (insn >> 16) & 0xf;
1443 switch (wrd) {
1444 case ARM_IWMMXT_wCID:
1445 case ARM_IWMMXT_wCASF:
1446 break;
1447 case ARM_IWMMXT_wCon:
1448 gen_op_iwmmxt_set_cup();
1449 /* Fall through. */
1450 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1451 tmp = iwmmxt_load_creg(wrd);
1452 tmp2 = load_reg(s, rd);
f669df27 1453 tcg_gen_andc_i32(tmp, tmp, tmp2);
da6b5335
FN
1454 dead_tmp(tmp2);
1455 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1456 break;
1457 case ARM_IWMMXT_wCGR0:
1458 case ARM_IWMMXT_wCGR1:
1459 case ARM_IWMMXT_wCGR2:
1460 case ARM_IWMMXT_wCGR3:
1461 gen_op_iwmmxt_set_cup();
da6b5335
FN
1462 tmp = load_reg(s, rd);
1463 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1464 break;
1465 default:
1466 return 1;
1467 }
1468 break;
1469 case 0x100: /* WXOR */
1470 wrd = (insn >> 12) & 0xf;
1471 rd0 = (insn >> 0) & 0xf;
1472 rd1 = (insn >> 16) & 0xf;
1473 gen_op_iwmmxt_movq_M0_wRn(rd0);
1474 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1475 gen_op_iwmmxt_setpsr_nz();
1476 gen_op_iwmmxt_movq_wRn_M0(wrd);
1477 gen_op_iwmmxt_set_mup();
1478 gen_op_iwmmxt_set_cup();
1479 break;
1480 case 0x111: /* TMRC */
1481 if (insn & 0xf)
1482 return 1;
1483 rd = (insn >> 12) & 0xf;
1484 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1485 tmp = iwmmxt_load_creg(wrd);
1486 store_reg(s, rd, tmp);
18c9b560
AZ
1487 break;
1488 case 0x300: /* WANDN */
1489 wrd = (insn >> 12) & 0xf;
1490 rd0 = (insn >> 0) & 0xf;
1491 rd1 = (insn >> 16) & 0xf;
1492 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1493 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1494 gen_op_iwmmxt_andq_M0_wRn(rd1);
1495 gen_op_iwmmxt_setpsr_nz();
1496 gen_op_iwmmxt_movq_wRn_M0(wrd);
1497 gen_op_iwmmxt_set_mup();
1498 gen_op_iwmmxt_set_cup();
1499 break;
1500 case 0x200: /* WAND */
1501 wrd = (insn >> 12) & 0xf;
1502 rd0 = (insn >> 0) & 0xf;
1503 rd1 = (insn >> 16) & 0xf;
1504 gen_op_iwmmxt_movq_M0_wRn(rd0);
1505 gen_op_iwmmxt_andq_M0_wRn(rd1);
1506 gen_op_iwmmxt_setpsr_nz();
1507 gen_op_iwmmxt_movq_wRn_M0(wrd);
1508 gen_op_iwmmxt_set_mup();
1509 gen_op_iwmmxt_set_cup();
1510 break;
1511 case 0x810: case 0xa10: /* WMADD */
1512 wrd = (insn >> 12) & 0xf;
1513 rd0 = (insn >> 0) & 0xf;
1514 rd1 = (insn >> 16) & 0xf;
1515 gen_op_iwmmxt_movq_M0_wRn(rd0);
1516 if (insn & (1 << 21))
1517 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1518 else
1519 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1520 gen_op_iwmmxt_movq_wRn_M0(wrd);
1521 gen_op_iwmmxt_set_mup();
1522 break;
1523 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1524 wrd = (insn >> 12) & 0xf;
1525 rd0 = (insn >> 16) & 0xf;
1526 rd1 = (insn >> 0) & 0xf;
1527 gen_op_iwmmxt_movq_M0_wRn(rd0);
1528 switch ((insn >> 22) & 3) {
1529 case 0:
1530 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1531 break;
1532 case 1:
1533 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1534 break;
1535 case 2:
1536 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1537 break;
1538 case 3:
1539 return 1;
1540 }
1541 gen_op_iwmmxt_movq_wRn_M0(wrd);
1542 gen_op_iwmmxt_set_mup();
1543 gen_op_iwmmxt_set_cup();
1544 break;
1545 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1546 wrd = (insn >> 12) & 0xf;
1547 rd0 = (insn >> 16) & 0xf;
1548 rd1 = (insn >> 0) & 0xf;
1549 gen_op_iwmmxt_movq_M0_wRn(rd0);
1550 switch ((insn >> 22) & 3) {
1551 case 0:
1552 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1553 break;
1554 case 1:
1555 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1556 break;
1557 case 2:
1558 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1559 break;
1560 case 3:
1561 return 1;
1562 }
1563 gen_op_iwmmxt_movq_wRn_M0(wrd);
1564 gen_op_iwmmxt_set_mup();
1565 gen_op_iwmmxt_set_cup();
1566 break;
1567 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1568 wrd = (insn >> 12) & 0xf;
1569 rd0 = (insn >> 16) & 0xf;
1570 rd1 = (insn >> 0) & 0xf;
1571 gen_op_iwmmxt_movq_M0_wRn(rd0);
1572 if (insn & (1 << 22))
1573 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1574 else
1575 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1576 if (!(insn & (1 << 20)))
1577 gen_op_iwmmxt_addl_M0_wRn(wrd);
1578 gen_op_iwmmxt_movq_wRn_M0(wrd);
1579 gen_op_iwmmxt_set_mup();
1580 break;
1581 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1582 wrd = (insn >> 12) & 0xf;
1583 rd0 = (insn >> 16) & 0xf;
1584 rd1 = (insn >> 0) & 0xf;
1585 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1586 if (insn & (1 << 21)) {
1587 if (insn & (1 << 20))
1588 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1589 else
1590 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1591 } else {
1592 if (insn & (1 << 20))
1593 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1594 else
1595 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1596 }
18c9b560
AZ
1597 gen_op_iwmmxt_movq_wRn_M0(wrd);
1598 gen_op_iwmmxt_set_mup();
1599 break;
1600 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1601 wrd = (insn >> 12) & 0xf;
1602 rd0 = (insn >> 16) & 0xf;
1603 rd1 = (insn >> 0) & 0xf;
1604 gen_op_iwmmxt_movq_M0_wRn(rd0);
1605 if (insn & (1 << 21))
1606 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1607 else
1608 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1609 if (!(insn & (1 << 20))) {
e677137d
PB
1610 iwmmxt_load_reg(cpu_V1, wrd);
1611 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1612 }
1613 gen_op_iwmmxt_movq_wRn_M0(wrd);
1614 gen_op_iwmmxt_set_mup();
1615 break;
1616 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1617 wrd = (insn >> 12) & 0xf;
1618 rd0 = (insn >> 16) & 0xf;
1619 rd1 = (insn >> 0) & 0xf;
1620 gen_op_iwmmxt_movq_M0_wRn(rd0);
1621 switch ((insn >> 22) & 3) {
1622 case 0:
1623 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1624 break;
1625 case 1:
1626 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1627 break;
1628 case 2:
1629 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1630 break;
1631 case 3:
1632 return 1;
1633 }
1634 gen_op_iwmmxt_movq_wRn_M0(wrd);
1635 gen_op_iwmmxt_set_mup();
1636 gen_op_iwmmxt_set_cup();
1637 break;
1638 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1639 wrd = (insn >> 12) & 0xf;
1640 rd0 = (insn >> 16) & 0xf;
1641 rd1 = (insn >> 0) & 0xf;
1642 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1643 if (insn & (1 << 22)) {
1644 if (insn & (1 << 20))
1645 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1646 else
1647 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1648 } else {
1649 if (insn & (1 << 20))
1650 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1651 else
1652 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1653 }
18c9b560
AZ
1654 gen_op_iwmmxt_movq_wRn_M0(wrd);
1655 gen_op_iwmmxt_set_mup();
1656 gen_op_iwmmxt_set_cup();
1657 break;
1658 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1659 wrd = (insn >> 12) & 0xf;
1660 rd0 = (insn >> 16) & 0xf;
1661 rd1 = (insn >> 0) & 0xf;
1662 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1663 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1664 tcg_gen_andi_i32(tmp, tmp, 7);
1665 iwmmxt_load_reg(cpu_V1, rd1);
1666 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1667 dead_tmp(tmp);
18c9b560
AZ
1668 gen_op_iwmmxt_movq_wRn_M0(wrd);
1669 gen_op_iwmmxt_set_mup();
1670 break;
1671 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1672 if (((insn >> 6) & 3) == 3)
1673 return 1;
18c9b560
AZ
1674 rd = (insn >> 12) & 0xf;
1675 wrd = (insn >> 16) & 0xf;
da6b5335 1676 tmp = load_reg(s, rd);
18c9b560
AZ
1677 gen_op_iwmmxt_movq_M0_wRn(wrd);
1678 switch ((insn >> 6) & 3) {
1679 case 0:
da6b5335
FN
1680 tmp2 = tcg_const_i32(0xff);
1681 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1682 break;
1683 case 1:
da6b5335
FN
1684 tmp2 = tcg_const_i32(0xffff);
1685 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1686 break;
1687 case 2:
da6b5335
FN
1688 tmp2 = tcg_const_i32(0xffffffff);
1689 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1690 break;
da6b5335
FN
1691 default:
1692 TCGV_UNUSED(tmp2);
1693 TCGV_UNUSED(tmp3);
18c9b560 1694 }
da6b5335
FN
1695 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1696 tcg_temp_free(tmp3);
1697 tcg_temp_free(tmp2);
1698 dead_tmp(tmp);
18c9b560
AZ
1699 gen_op_iwmmxt_movq_wRn_M0(wrd);
1700 gen_op_iwmmxt_set_mup();
1701 break;
1702 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1703 rd = (insn >> 12) & 0xf;
1704 wrd = (insn >> 16) & 0xf;
da6b5335 1705 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1706 return 1;
1707 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335 1708 tmp = new_tmp();
18c9b560
AZ
1709 switch ((insn >> 22) & 3) {
1710 case 0:
da6b5335
FN
1711 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1712 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1713 if (insn & 8) {
1714 tcg_gen_ext8s_i32(tmp, tmp);
1715 } else {
1716 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1717 }
1718 break;
1719 case 1:
da6b5335
FN
1720 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1721 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1722 if (insn & 8) {
1723 tcg_gen_ext16s_i32(tmp, tmp);
1724 } else {
1725 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1726 }
1727 break;
1728 case 2:
da6b5335
FN
1729 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1730 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1731 break;
18c9b560 1732 }
da6b5335 1733 store_reg(s, rd, tmp);
18c9b560
AZ
1734 break;
1735 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1736 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1737 return 1;
da6b5335 1738 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1739 switch ((insn >> 22) & 3) {
1740 case 0:
da6b5335 1741 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1742 break;
1743 case 1:
da6b5335 1744 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1745 break;
1746 case 2:
da6b5335 1747 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1748 break;
18c9b560 1749 }
da6b5335
FN
1750 tcg_gen_shli_i32(tmp, tmp, 28);
1751 gen_set_nzcv(tmp);
1752 dead_tmp(tmp);
18c9b560
AZ
1753 break;
1754 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1755 if (((insn >> 6) & 3) == 3)
1756 return 1;
18c9b560
AZ
1757 rd = (insn >> 12) & 0xf;
1758 wrd = (insn >> 16) & 0xf;
da6b5335 1759 tmp = load_reg(s, rd);
18c9b560
AZ
1760 switch ((insn >> 6) & 3) {
1761 case 0:
da6b5335 1762 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1763 break;
1764 case 1:
da6b5335 1765 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1766 break;
1767 case 2:
da6b5335 1768 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1769 break;
18c9b560 1770 }
da6b5335 1771 dead_tmp(tmp);
18c9b560
AZ
1772 gen_op_iwmmxt_movq_wRn_M0(wrd);
1773 gen_op_iwmmxt_set_mup();
1774 break;
1775 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1776 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1777 return 1;
da6b5335
FN
1778 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1779 tmp2 = new_tmp();
1780 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1781 switch ((insn >> 22) & 3) {
1782 case 0:
1783 for (i = 0; i < 7; i ++) {
da6b5335
FN
1784 tcg_gen_shli_i32(tmp2, tmp2, 4);
1785 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1786 }
1787 break;
1788 case 1:
1789 for (i = 0; i < 3; i ++) {
da6b5335
FN
1790 tcg_gen_shli_i32(tmp2, tmp2, 8);
1791 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1792 }
1793 break;
1794 case 2:
da6b5335
FN
1795 tcg_gen_shli_i32(tmp2, tmp2, 16);
1796 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1797 break;
18c9b560 1798 }
da6b5335
FN
1799 gen_set_nzcv(tmp);
1800 dead_tmp(tmp2);
1801 dead_tmp(tmp);
18c9b560
AZ
1802 break;
1803 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1804 wrd = (insn >> 12) & 0xf;
1805 rd0 = (insn >> 16) & 0xf;
1806 gen_op_iwmmxt_movq_M0_wRn(rd0);
1807 switch ((insn >> 22) & 3) {
1808 case 0:
e677137d 1809 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1810 break;
1811 case 1:
e677137d 1812 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1813 break;
1814 case 2:
e677137d 1815 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1816 break;
1817 case 3:
1818 return 1;
1819 }
1820 gen_op_iwmmxt_movq_wRn_M0(wrd);
1821 gen_op_iwmmxt_set_mup();
1822 break;
1823 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1824 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1825 return 1;
da6b5335
FN
1826 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1827 tmp2 = new_tmp();
1828 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1829 switch ((insn >> 22) & 3) {
1830 case 0:
1831 for (i = 0; i < 7; i ++) {
da6b5335
FN
1832 tcg_gen_shli_i32(tmp2, tmp2, 4);
1833 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1834 }
1835 break;
1836 case 1:
1837 for (i = 0; i < 3; i ++) {
da6b5335
FN
1838 tcg_gen_shli_i32(tmp2, tmp2, 8);
1839 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1840 }
1841 break;
1842 case 2:
da6b5335
FN
1843 tcg_gen_shli_i32(tmp2, tmp2, 16);
1844 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1845 break;
18c9b560 1846 }
da6b5335
FN
1847 gen_set_nzcv(tmp);
1848 dead_tmp(tmp2);
1849 dead_tmp(tmp);
18c9b560
AZ
1850 break;
1851 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1852 rd = (insn >> 12) & 0xf;
1853 rd0 = (insn >> 16) & 0xf;
da6b5335 1854 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1855 return 1;
1856 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 1857 tmp = new_tmp();
18c9b560
AZ
1858 switch ((insn >> 22) & 3) {
1859 case 0:
da6b5335 1860 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1861 break;
1862 case 1:
da6b5335 1863 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1864 break;
1865 case 2:
da6b5335 1866 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1867 break;
18c9b560 1868 }
da6b5335 1869 store_reg(s, rd, tmp);
18c9b560
AZ
1870 break;
1871 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1872 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1873 wrd = (insn >> 12) & 0xf;
1874 rd0 = (insn >> 16) & 0xf;
1875 rd1 = (insn >> 0) & 0xf;
1876 gen_op_iwmmxt_movq_M0_wRn(rd0);
1877 switch ((insn >> 22) & 3) {
1878 case 0:
1879 if (insn & (1 << 21))
1880 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1881 else
1882 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1883 break;
1884 case 1:
1885 if (insn & (1 << 21))
1886 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1887 else
1888 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1889 break;
1890 case 2:
1891 if (insn & (1 << 21))
1892 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1893 else
1894 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1895 break;
1896 case 3:
1897 return 1;
1898 }
1899 gen_op_iwmmxt_movq_wRn_M0(wrd);
1900 gen_op_iwmmxt_set_mup();
1901 gen_op_iwmmxt_set_cup();
1902 break;
1903 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1904 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1905 wrd = (insn >> 12) & 0xf;
1906 rd0 = (insn >> 16) & 0xf;
1907 gen_op_iwmmxt_movq_M0_wRn(rd0);
1908 switch ((insn >> 22) & 3) {
1909 case 0:
1910 if (insn & (1 << 21))
1911 gen_op_iwmmxt_unpacklsb_M0();
1912 else
1913 gen_op_iwmmxt_unpacklub_M0();
1914 break;
1915 case 1:
1916 if (insn & (1 << 21))
1917 gen_op_iwmmxt_unpacklsw_M0();
1918 else
1919 gen_op_iwmmxt_unpackluw_M0();
1920 break;
1921 case 2:
1922 if (insn & (1 << 21))
1923 gen_op_iwmmxt_unpacklsl_M0();
1924 else
1925 gen_op_iwmmxt_unpacklul_M0();
1926 break;
1927 case 3:
1928 return 1;
1929 }
1930 gen_op_iwmmxt_movq_wRn_M0(wrd);
1931 gen_op_iwmmxt_set_mup();
1932 gen_op_iwmmxt_set_cup();
1933 break;
1934 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1935 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1936 wrd = (insn >> 12) & 0xf;
1937 rd0 = (insn >> 16) & 0xf;
1938 gen_op_iwmmxt_movq_M0_wRn(rd0);
1939 switch ((insn >> 22) & 3) {
1940 case 0:
1941 if (insn & (1 << 21))
1942 gen_op_iwmmxt_unpackhsb_M0();
1943 else
1944 gen_op_iwmmxt_unpackhub_M0();
1945 break;
1946 case 1:
1947 if (insn & (1 << 21))
1948 gen_op_iwmmxt_unpackhsw_M0();
1949 else
1950 gen_op_iwmmxt_unpackhuw_M0();
1951 break;
1952 case 2:
1953 if (insn & (1 << 21))
1954 gen_op_iwmmxt_unpackhsl_M0();
1955 else
1956 gen_op_iwmmxt_unpackhul_M0();
1957 break;
1958 case 3:
1959 return 1;
1960 }
1961 gen_op_iwmmxt_movq_wRn_M0(wrd);
1962 gen_op_iwmmxt_set_mup();
1963 gen_op_iwmmxt_set_cup();
1964 break;
1965 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1966 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
1967 if (((insn >> 22) & 3) == 0)
1968 return 1;
18c9b560
AZ
1969 wrd = (insn >> 12) & 0xf;
1970 rd0 = (insn >> 16) & 0xf;
1971 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1972 tmp = new_tmp();
1973 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
1974 dead_tmp(tmp);
18c9b560 1975 return 1;
da6b5335 1976 }
18c9b560 1977 switch ((insn >> 22) & 3) {
18c9b560 1978 case 1:
da6b5335 1979 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1980 break;
1981 case 2:
da6b5335 1982 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1983 break;
1984 case 3:
da6b5335 1985 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1986 break;
1987 }
da6b5335 1988 dead_tmp(tmp);
18c9b560
AZ
1989 gen_op_iwmmxt_movq_wRn_M0(wrd);
1990 gen_op_iwmmxt_set_mup();
1991 gen_op_iwmmxt_set_cup();
1992 break;
1993 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1994 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
1995 if (((insn >> 22) & 3) == 0)
1996 return 1;
18c9b560
AZ
1997 wrd = (insn >> 12) & 0xf;
1998 rd0 = (insn >> 16) & 0xf;
1999 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2000 tmp = new_tmp();
2001 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2002 dead_tmp(tmp);
18c9b560 2003 return 1;
da6b5335 2004 }
18c9b560 2005 switch ((insn >> 22) & 3) {
18c9b560 2006 case 1:
da6b5335 2007 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2008 break;
2009 case 2:
da6b5335 2010 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2011 break;
2012 case 3:
da6b5335 2013 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2014 break;
2015 }
da6b5335 2016 dead_tmp(tmp);
18c9b560
AZ
2017 gen_op_iwmmxt_movq_wRn_M0(wrd);
2018 gen_op_iwmmxt_set_mup();
2019 gen_op_iwmmxt_set_cup();
2020 break;
2021 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2022 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2023 if (((insn >> 22) & 3) == 0)
2024 return 1;
18c9b560
AZ
2025 wrd = (insn >> 12) & 0xf;
2026 rd0 = (insn >> 16) & 0xf;
2027 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2028 tmp = new_tmp();
2029 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2030 dead_tmp(tmp);
18c9b560 2031 return 1;
da6b5335 2032 }
18c9b560 2033 switch ((insn >> 22) & 3) {
18c9b560 2034 case 1:
da6b5335 2035 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2036 break;
2037 case 2:
da6b5335 2038 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2039 break;
2040 case 3:
da6b5335 2041 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2042 break;
2043 }
da6b5335 2044 dead_tmp(tmp);
18c9b560
AZ
2045 gen_op_iwmmxt_movq_wRn_M0(wrd);
2046 gen_op_iwmmxt_set_mup();
2047 gen_op_iwmmxt_set_cup();
2048 break;
2049 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2050 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2051 if (((insn >> 22) & 3) == 0)
2052 return 1;
18c9b560
AZ
2053 wrd = (insn >> 12) & 0xf;
2054 rd0 = (insn >> 16) & 0xf;
2055 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2056 tmp = new_tmp();
18c9b560 2057 switch ((insn >> 22) & 3) {
18c9b560 2058 case 1:
da6b5335
FN
2059 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2060 dead_tmp(tmp);
18c9b560 2061 return 1;
da6b5335
FN
2062 }
2063 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2064 break;
2065 case 2:
da6b5335
FN
2066 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2067 dead_tmp(tmp);
18c9b560 2068 return 1;
da6b5335
FN
2069 }
2070 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2071 break;
2072 case 3:
da6b5335
FN
2073 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2074 dead_tmp(tmp);
18c9b560 2075 return 1;
da6b5335
FN
2076 }
2077 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2078 break;
2079 }
da6b5335 2080 dead_tmp(tmp);
18c9b560
AZ
2081 gen_op_iwmmxt_movq_wRn_M0(wrd);
2082 gen_op_iwmmxt_set_mup();
2083 gen_op_iwmmxt_set_cup();
2084 break;
2085 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2086 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2087 wrd = (insn >> 12) & 0xf;
2088 rd0 = (insn >> 16) & 0xf;
2089 rd1 = (insn >> 0) & 0xf;
2090 gen_op_iwmmxt_movq_M0_wRn(rd0);
2091 switch ((insn >> 22) & 3) {
2092 case 0:
2093 if (insn & (1 << 21))
2094 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2095 else
2096 gen_op_iwmmxt_minub_M0_wRn(rd1);
2097 break;
2098 case 1:
2099 if (insn & (1 << 21))
2100 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2101 else
2102 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2103 break;
2104 case 2:
2105 if (insn & (1 << 21))
2106 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2107 else
2108 gen_op_iwmmxt_minul_M0_wRn(rd1);
2109 break;
2110 case 3:
2111 return 1;
2112 }
2113 gen_op_iwmmxt_movq_wRn_M0(wrd);
2114 gen_op_iwmmxt_set_mup();
2115 break;
2116 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2117 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2118 wrd = (insn >> 12) & 0xf;
2119 rd0 = (insn >> 16) & 0xf;
2120 rd1 = (insn >> 0) & 0xf;
2121 gen_op_iwmmxt_movq_M0_wRn(rd0);
2122 switch ((insn >> 22) & 3) {
2123 case 0:
2124 if (insn & (1 << 21))
2125 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2126 else
2127 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2128 break;
2129 case 1:
2130 if (insn & (1 << 21))
2131 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2132 else
2133 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2134 break;
2135 case 2:
2136 if (insn & (1 << 21))
2137 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2138 else
2139 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2140 break;
2141 case 3:
2142 return 1;
2143 }
2144 gen_op_iwmmxt_movq_wRn_M0(wrd);
2145 gen_op_iwmmxt_set_mup();
2146 break;
2147 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2148 case 0x402: case 0x502: case 0x602: case 0x702:
2149 wrd = (insn >> 12) & 0xf;
2150 rd0 = (insn >> 16) & 0xf;
2151 rd1 = (insn >> 0) & 0xf;
2152 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2153 tmp = tcg_const_i32((insn >> 20) & 3);
2154 iwmmxt_load_reg(cpu_V1, rd1);
2155 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2156 tcg_temp_free(tmp);
18c9b560
AZ
2157 gen_op_iwmmxt_movq_wRn_M0(wrd);
2158 gen_op_iwmmxt_set_mup();
2159 break;
2160 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2161 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2162 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2163 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2164 wrd = (insn >> 12) & 0xf;
2165 rd0 = (insn >> 16) & 0xf;
2166 rd1 = (insn >> 0) & 0xf;
2167 gen_op_iwmmxt_movq_M0_wRn(rd0);
2168 switch ((insn >> 20) & 0xf) {
2169 case 0x0:
2170 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2171 break;
2172 case 0x1:
2173 gen_op_iwmmxt_subub_M0_wRn(rd1);
2174 break;
2175 case 0x3:
2176 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2177 break;
2178 case 0x4:
2179 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2180 break;
2181 case 0x5:
2182 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2183 break;
2184 case 0x7:
2185 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2186 break;
2187 case 0x8:
2188 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2189 break;
2190 case 0x9:
2191 gen_op_iwmmxt_subul_M0_wRn(rd1);
2192 break;
2193 case 0xb:
2194 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2195 break;
2196 default:
2197 return 1;
2198 }
2199 gen_op_iwmmxt_movq_wRn_M0(wrd);
2200 gen_op_iwmmxt_set_mup();
2201 gen_op_iwmmxt_set_cup();
2202 break;
2203 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2204 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2205 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2206 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2207 wrd = (insn >> 12) & 0xf;
2208 rd0 = (insn >> 16) & 0xf;
2209 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2210 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2211 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2212 tcg_temp_free(tmp);
18c9b560
AZ
2213 gen_op_iwmmxt_movq_wRn_M0(wrd);
2214 gen_op_iwmmxt_set_mup();
2215 gen_op_iwmmxt_set_cup();
2216 break;
2217 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2218 case 0x418: case 0x518: case 0x618: case 0x718:
2219 case 0x818: case 0x918: case 0xa18: case 0xb18:
2220 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2221 wrd = (insn >> 12) & 0xf;
2222 rd0 = (insn >> 16) & 0xf;
2223 rd1 = (insn >> 0) & 0xf;
2224 gen_op_iwmmxt_movq_M0_wRn(rd0);
2225 switch ((insn >> 20) & 0xf) {
2226 case 0x0:
2227 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2228 break;
2229 case 0x1:
2230 gen_op_iwmmxt_addub_M0_wRn(rd1);
2231 break;
2232 case 0x3:
2233 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2234 break;
2235 case 0x4:
2236 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2237 break;
2238 case 0x5:
2239 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2240 break;
2241 case 0x7:
2242 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2243 break;
2244 case 0x8:
2245 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2246 break;
2247 case 0x9:
2248 gen_op_iwmmxt_addul_M0_wRn(rd1);
2249 break;
2250 case 0xb:
2251 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2252 break;
2253 default:
2254 return 1;
2255 }
2256 gen_op_iwmmxt_movq_wRn_M0(wrd);
2257 gen_op_iwmmxt_set_mup();
2258 gen_op_iwmmxt_set_cup();
2259 break;
2260 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2261 case 0x408: case 0x508: case 0x608: case 0x708:
2262 case 0x808: case 0x908: case 0xa08: case 0xb08:
2263 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2264 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2265 return 1;
18c9b560
AZ
2266 wrd = (insn >> 12) & 0xf;
2267 rd0 = (insn >> 16) & 0xf;
2268 rd1 = (insn >> 0) & 0xf;
2269 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2270 switch ((insn >> 22) & 3) {
18c9b560
AZ
2271 case 1:
2272 if (insn & (1 << 21))
2273 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2274 else
2275 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2276 break;
2277 case 2:
2278 if (insn & (1 << 21))
2279 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2280 else
2281 gen_op_iwmmxt_packul_M0_wRn(rd1);
2282 break;
2283 case 3:
2284 if (insn & (1 << 21))
2285 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2286 else
2287 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2288 break;
2289 }
2290 gen_op_iwmmxt_movq_wRn_M0(wrd);
2291 gen_op_iwmmxt_set_mup();
2292 gen_op_iwmmxt_set_cup();
2293 break;
2294 case 0x201: case 0x203: case 0x205: case 0x207:
2295 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2296 case 0x211: case 0x213: case 0x215: case 0x217:
2297 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2298 wrd = (insn >> 5) & 0xf;
2299 rd0 = (insn >> 12) & 0xf;
2300 rd1 = (insn >> 0) & 0xf;
2301 if (rd0 == 0xf || rd1 == 0xf)
2302 return 1;
2303 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2304 tmp = load_reg(s, rd0);
2305 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2306 switch ((insn >> 16) & 0xf) {
2307 case 0x0: /* TMIA */
da6b5335 2308 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2309 break;
2310 case 0x8: /* TMIAPH */
da6b5335 2311 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2312 break;
2313 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2314 if (insn & (1 << 16))
da6b5335 2315 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2316 if (insn & (1 << 17))
da6b5335
FN
2317 tcg_gen_shri_i32(tmp2, tmp2, 16);
2318 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2319 break;
2320 default:
da6b5335
FN
2321 dead_tmp(tmp2);
2322 dead_tmp(tmp);
18c9b560
AZ
2323 return 1;
2324 }
da6b5335
FN
2325 dead_tmp(tmp2);
2326 dead_tmp(tmp);
18c9b560
AZ
2327 gen_op_iwmmxt_movq_wRn_M0(wrd);
2328 gen_op_iwmmxt_set_mup();
2329 break;
2330 default:
2331 return 1;
2332 }
2333
2334 return 0;
2335}
2336
2337/* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2338 (ie. an undefined instruction). */
2339static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2340{
2341 int acc, rd0, rd1, rdhi, rdlo;
3a554c0f 2342 TCGv tmp, tmp2;
18c9b560
AZ
2343
2344 if ((insn & 0x0ff00f10) == 0x0e200010) {
2345 /* Multiply with Internal Accumulate Format */
2346 rd0 = (insn >> 12) & 0xf;
2347 rd1 = insn & 0xf;
2348 acc = (insn >> 5) & 7;
2349
2350 if (acc != 0)
2351 return 1;
2352
3a554c0f
FN
2353 tmp = load_reg(s, rd0);
2354 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2355 switch ((insn >> 16) & 0xf) {
2356 case 0x0: /* MIA */
3a554c0f 2357 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2358 break;
2359 case 0x8: /* MIAPH */
3a554c0f 2360 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2361 break;
2362 case 0xc: /* MIABB */
2363 case 0xd: /* MIABT */
2364 case 0xe: /* MIATB */
2365 case 0xf: /* MIATT */
18c9b560 2366 if (insn & (1 << 16))
3a554c0f 2367 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2368 if (insn & (1 << 17))
3a554c0f
FN
2369 tcg_gen_shri_i32(tmp2, tmp2, 16);
2370 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2371 break;
2372 default:
2373 return 1;
2374 }
3a554c0f
FN
2375 dead_tmp(tmp2);
2376 dead_tmp(tmp);
18c9b560
AZ
2377
2378 gen_op_iwmmxt_movq_wRn_M0(acc);
2379 return 0;
2380 }
2381
2382 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2383 /* Internal Accumulator Access Format */
2384 rdhi = (insn >> 16) & 0xf;
2385 rdlo = (insn >> 12) & 0xf;
2386 acc = insn & 7;
2387
2388 if (acc != 0)
2389 return 1;
2390
2391 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2392 iwmmxt_load_reg(cpu_V0, acc);
2393 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2394 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2395 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2396 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2397 } else { /* MAR */
3a554c0f
FN
2398 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2399 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2400 }
2401 return 0;
2402 }
2403
2404 return 1;
2405}
2406
c1713132
AZ
2407/* Disassemble system coprocessor instruction. Return nonzero if
2408 instruction is not defined. */
2409static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2410{
b75263d6 2411 TCGv tmp, tmp2;
c1713132
AZ
2412 uint32_t rd = (insn >> 12) & 0xf;
2413 uint32_t cp = (insn >> 8) & 0xf;
2414 if (IS_USER(s)) {
2415 return 1;
2416 }
2417
18c9b560 2418 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2419 if (!env->cp[cp].cp_read)
2420 return 1;
8984bd2e
PB
2421 gen_set_pc_im(s->pc);
2422 tmp = new_tmp();
b75263d6
JR
2423 tmp2 = tcg_const_i32(insn);
2424 gen_helper_get_cp(tmp, cpu_env, tmp2);
2425 tcg_temp_free(tmp2);
8984bd2e 2426 store_reg(s, rd, tmp);
c1713132
AZ
2427 } else {
2428 if (!env->cp[cp].cp_write)
2429 return 1;
8984bd2e
PB
2430 gen_set_pc_im(s->pc);
2431 tmp = load_reg(s, rd);
b75263d6
JR
2432 tmp2 = tcg_const_i32(insn);
2433 gen_helper_set_cp(cpu_env, tmp2, tmp);
2434 tcg_temp_free(tmp2);
a60de947 2435 dead_tmp(tmp);
c1713132
AZ
2436 }
2437 return 0;
2438}
2439
9ee6e8bb
PB
2440static int cp15_user_ok(uint32_t insn)
2441{
2442 int cpn = (insn >> 16) & 0xf;
2443 int cpm = insn & 0xf;
2444 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2445
2446 if (cpn == 13 && cpm == 0) {
2447 /* TLS register. */
2448 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2449 return 1;
2450 }
2451 if (cpn == 7) {
2452 /* ISB, DSB, DMB. */
2453 if ((cpm == 5 && op == 4)
2454 || (cpm == 10 && (op == 4 || op == 5)))
2455 return 1;
2456 }
2457 return 0;
2458}
2459
3f26c122
RV
2460static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2461{
2462 TCGv tmp;
2463 int cpn = (insn >> 16) & 0xf;
2464 int cpm = insn & 0xf;
2465 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2466
2467 if (!arm_feature(env, ARM_FEATURE_V6K))
2468 return 0;
2469
2470 if (!(cpn == 13 && cpm == 0))
2471 return 0;
2472
2473 if (insn & ARM_CP_RW_BIT) {
3f26c122
RV
2474 switch (op) {
2475 case 2:
c5883be2 2476 tmp = load_cpu_field(cp15.c13_tls1);
3f26c122
RV
2477 break;
2478 case 3:
c5883be2 2479 tmp = load_cpu_field(cp15.c13_tls2);
3f26c122
RV
2480 break;
2481 case 4:
c5883be2 2482 tmp = load_cpu_field(cp15.c13_tls3);
3f26c122
RV
2483 break;
2484 default:
3f26c122
RV
2485 return 0;
2486 }
2487 store_reg(s, rd, tmp);
2488
2489 } else {
2490 tmp = load_reg(s, rd);
2491 switch (op) {
2492 case 2:
c5883be2 2493 store_cpu_field(tmp, cp15.c13_tls1);
3f26c122
RV
2494 break;
2495 case 3:
c5883be2 2496 store_cpu_field(tmp, cp15.c13_tls2);
3f26c122
RV
2497 break;
2498 case 4:
c5883be2 2499 store_cpu_field(tmp, cp15.c13_tls3);
3f26c122
RV
2500 break;
2501 default:
c5883be2 2502 dead_tmp(tmp);
3f26c122
RV
2503 return 0;
2504 }
3f26c122
RV
2505 }
2506 return 1;
2507}
2508
b5ff1b31
FB
2509/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2510 instruction is not defined. */
a90b7318 2511static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2512{
2513 uint32_t rd;
b75263d6 2514 TCGv tmp, tmp2;
b5ff1b31 2515
9ee6e8bb
PB
2516 /* M profile cores use memory mapped registers instead of cp15. */
2517 if (arm_feature(env, ARM_FEATURE_M))
2518 return 1;
2519
2520 if ((insn & (1 << 25)) == 0) {
2521 if (insn & (1 << 20)) {
2522 /* mrrc */
2523 return 1;
2524 }
2525 /* mcrr. Used for block cache operations, so implement as no-op. */
2526 return 0;
2527 }
2528 if ((insn & (1 << 4)) == 0) {
2529 /* cdp */
2530 return 1;
2531 }
2532 if (IS_USER(s) && !cp15_user_ok(insn)) {
b5ff1b31
FB
2533 return 1;
2534 }
9332f9da
FB
2535 if ((insn & 0x0fff0fff) == 0x0e070f90
2536 || (insn & 0x0fff0fff) == 0x0e070f58) {
2537 /* Wait for interrupt. */
8984bd2e 2538 gen_set_pc_im(s->pc);
9ee6e8bb 2539 s->is_jmp = DISAS_WFI;
9332f9da
FB
2540 return 0;
2541 }
b5ff1b31 2542 rd = (insn >> 12) & 0xf;
3f26c122
RV
2543
2544 if (cp15_tls_load_store(env, s, insn, rd))
2545 return 0;
2546
b75263d6 2547 tmp2 = tcg_const_i32(insn);
18c9b560 2548 if (insn & ARM_CP_RW_BIT) {
8984bd2e 2549 tmp = new_tmp();
b75263d6 2550 gen_helper_get_cp15(tmp, cpu_env, tmp2);
b5ff1b31
FB
2551 /* If the destination register is r15 then sets condition codes. */
2552 if (rd != 15)
8984bd2e
PB
2553 store_reg(s, rd, tmp);
2554 else
2555 dead_tmp(tmp);
b5ff1b31 2556 } else {
8984bd2e 2557 tmp = load_reg(s, rd);
b75263d6 2558 gen_helper_set_cp15(cpu_env, tmp2, tmp);
8984bd2e 2559 dead_tmp(tmp);
a90b7318
AZ
2560 /* Normally we would always end the TB here, but Linux
2561 * arch/arm/mach-pxa/sleep.S expects two instructions following
2562 * an MMU enable to execute from cache. Imitate this behaviour. */
2563 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2564 (insn & 0x0fff0fff) != 0x0e010f10)
2565 gen_lookup_tb(s);
b5ff1b31 2566 }
b75263d6 2567 tcg_temp_free_i32(tmp2);
b5ff1b31
FB
2568 return 0;
2569}
2570
9ee6e8bb
PB
2571#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2572#define VFP_SREG(insn, bigbit, smallbit) \
2573 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2574#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2575 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2576 reg = (((insn) >> (bigbit)) & 0x0f) \
2577 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2578 } else { \
2579 if (insn & (1 << (smallbit))) \
2580 return 1; \
2581 reg = ((insn) >> (bigbit)) & 0x0f; \
2582 }} while (0)
2583
2584#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2585#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2586#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2587#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2588#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2589#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2590
4373f3ce
PB
2591/* Move between integer and VFP cores. */
2592static TCGv gen_vfp_mrs(void)
2593{
2594 TCGv tmp = new_tmp();
2595 tcg_gen_mov_i32(tmp, cpu_F0s);
2596 return tmp;
2597}
2598
2599static void gen_vfp_msr(TCGv tmp)
2600{
2601 tcg_gen_mov_i32(cpu_F0s, tmp);
2602 dead_tmp(tmp);
2603}
2604
9ee6e8bb
PB
2605static inline int
2606vfp_enabled(CPUState * env)
2607{
2608 return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
2609}
2610
ad69471c
PB
2611static void gen_neon_dup_u8(TCGv var, int shift)
2612{
2613 TCGv tmp = new_tmp();
2614 if (shift)
2615 tcg_gen_shri_i32(var, var, shift);
86831435 2616 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2617 tcg_gen_shli_i32(tmp, var, 8);
2618 tcg_gen_or_i32(var, var, tmp);
2619 tcg_gen_shli_i32(tmp, var, 16);
2620 tcg_gen_or_i32(var, var, tmp);
2621 dead_tmp(tmp);
2622}
2623
2624static void gen_neon_dup_low16(TCGv var)
2625{
2626 TCGv tmp = new_tmp();
86831435 2627 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2628 tcg_gen_shli_i32(tmp, var, 16);
2629 tcg_gen_or_i32(var, var, tmp);
2630 dead_tmp(tmp);
2631}
2632
2633static void gen_neon_dup_high16(TCGv var)
2634{
2635 TCGv tmp = new_tmp();
2636 tcg_gen_andi_i32(var, var, 0xffff0000);
2637 tcg_gen_shri_i32(tmp, var, 16);
2638 tcg_gen_or_i32(var, var, tmp);
2639 dead_tmp(tmp);
2640}
2641
b7bcbe95
FB
2642/* Disassemble a VFP instruction. Returns nonzero if an error occured
2643 (ie. an undefined instruction). */
2644static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2645{
2646 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2647 int dp, veclen;
312eea9f 2648 TCGv addr;
4373f3ce 2649 TCGv tmp;
ad69471c 2650 TCGv tmp2;
b7bcbe95 2651
40f137e1
PB
2652 if (!arm_feature(env, ARM_FEATURE_VFP))
2653 return 1;
2654
9ee6e8bb
PB
2655 if (!vfp_enabled(env)) {
2656 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2657 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2658 return 1;
2659 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2660 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2661 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2662 return 1;
2663 }
b7bcbe95
FB
2664 dp = ((insn & 0xf00) == 0xb00);
2665 switch ((insn >> 24) & 0xf) {
2666 case 0xe:
2667 if (insn & (1 << 4)) {
2668 /* single register transfer */
b7bcbe95
FB
2669 rd = (insn >> 12) & 0xf;
2670 if (dp) {
9ee6e8bb
PB
2671 int size;
2672 int pass;
2673
2674 VFP_DREG_N(rn, insn);
2675 if (insn & 0xf)
b7bcbe95 2676 return 1;
9ee6e8bb
PB
2677 if (insn & 0x00c00060
2678 && !arm_feature(env, ARM_FEATURE_NEON))
2679 return 1;
2680
2681 pass = (insn >> 21) & 1;
2682 if (insn & (1 << 22)) {
2683 size = 0;
2684 offset = ((insn >> 5) & 3) * 8;
2685 } else if (insn & (1 << 5)) {
2686 size = 1;
2687 offset = (insn & (1 << 6)) ? 16 : 0;
2688 } else {
2689 size = 2;
2690 offset = 0;
2691 }
18c9b560 2692 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2693 /* vfp->arm */
ad69471c 2694 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2695 switch (size) {
2696 case 0:
9ee6e8bb 2697 if (offset)
ad69471c 2698 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2699 if (insn & (1 << 23))
ad69471c 2700 gen_uxtb(tmp);
9ee6e8bb 2701 else
ad69471c 2702 gen_sxtb(tmp);
9ee6e8bb
PB
2703 break;
2704 case 1:
9ee6e8bb
PB
2705 if (insn & (1 << 23)) {
2706 if (offset) {
ad69471c 2707 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2708 } else {
ad69471c 2709 gen_uxth(tmp);
9ee6e8bb
PB
2710 }
2711 } else {
2712 if (offset) {
ad69471c 2713 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2714 } else {
ad69471c 2715 gen_sxth(tmp);
9ee6e8bb
PB
2716 }
2717 }
2718 break;
2719 case 2:
9ee6e8bb
PB
2720 break;
2721 }
ad69471c 2722 store_reg(s, rd, tmp);
b7bcbe95
FB
2723 } else {
2724 /* arm->vfp */
ad69471c 2725 tmp = load_reg(s, rd);
9ee6e8bb
PB
2726 if (insn & (1 << 23)) {
2727 /* VDUP */
2728 if (size == 0) {
ad69471c 2729 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2730 } else if (size == 1) {
ad69471c 2731 gen_neon_dup_low16(tmp);
9ee6e8bb 2732 }
cbbccffc
PB
2733 for (n = 0; n <= pass * 2; n++) {
2734 tmp2 = new_tmp();
2735 tcg_gen_mov_i32(tmp2, tmp);
2736 neon_store_reg(rn, n, tmp2);
2737 }
2738 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2739 } else {
2740 /* VMOV */
2741 switch (size) {
2742 case 0:
ad69471c
PB
2743 tmp2 = neon_load_reg(rn, pass);
2744 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2745 dead_tmp(tmp2);
9ee6e8bb
PB
2746 break;
2747 case 1:
ad69471c
PB
2748 tmp2 = neon_load_reg(rn, pass);
2749 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2750 dead_tmp(tmp2);
9ee6e8bb
PB
2751 break;
2752 case 2:
9ee6e8bb
PB
2753 break;
2754 }
ad69471c 2755 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2756 }
b7bcbe95 2757 }
9ee6e8bb
PB
2758 } else { /* !dp */
2759 if ((insn & 0x6f) != 0x00)
2760 return 1;
2761 rn = VFP_SREG_N(insn);
18c9b560 2762 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2763 /* vfp->arm */
2764 if (insn & (1 << 21)) {
2765 /* system register */
40f137e1 2766 rn >>= 1;
9ee6e8bb 2767
b7bcbe95 2768 switch (rn) {
40f137e1 2769 case ARM_VFP_FPSID:
4373f3ce 2770 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2771 VFP3 restricts all id registers to privileged
2772 accesses. */
2773 if (IS_USER(s)
2774 && arm_feature(env, ARM_FEATURE_VFP3))
2775 return 1;
4373f3ce 2776 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2777 break;
40f137e1 2778 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2779 if (IS_USER(s))
2780 return 1;
4373f3ce 2781 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2782 break;
40f137e1
PB
2783 case ARM_VFP_FPINST:
2784 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2785 /* Not present in VFP3. */
2786 if (IS_USER(s)
2787 || arm_feature(env, ARM_FEATURE_VFP3))
2788 return 1;
4373f3ce 2789 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2790 break;
40f137e1 2791 case ARM_VFP_FPSCR:
601d70b9 2792 if (rd == 15) {
4373f3ce
PB
2793 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2794 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2795 } else {
2796 tmp = new_tmp();
2797 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2798 }
b7bcbe95 2799 break;
9ee6e8bb
PB
2800 case ARM_VFP_MVFR0:
2801 case ARM_VFP_MVFR1:
2802 if (IS_USER(s)
2803 || !arm_feature(env, ARM_FEATURE_VFP3))
2804 return 1;
4373f3ce 2805 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2806 break;
b7bcbe95
FB
2807 default:
2808 return 1;
2809 }
2810 } else {
2811 gen_mov_F0_vreg(0, rn);
4373f3ce 2812 tmp = gen_vfp_mrs();
b7bcbe95
FB
2813 }
2814 if (rd == 15) {
b5ff1b31 2815 /* Set the 4 flag bits in the CPSR. */
4373f3ce
PB
2816 gen_set_nzcv(tmp);
2817 dead_tmp(tmp);
2818 } else {
2819 store_reg(s, rd, tmp);
2820 }
b7bcbe95
FB
2821 } else {
2822 /* arm->vfp */
4373f3ce 2823 tmp = load_reg(s, rd);
b7bcbe95 2824 if (insn & (1 << 21)) {
40f137e1 2825 rn >>= 1;
b7bcbe95
FB
2826 /* system register */
2827 switch (rn) {
40f137e1 2828 case ARM_VFP_FPSID:
9ee6e8bb
PB
2829 case ARM_VFP_MVFR0:
2830 case ARM_VFP_MVFR1:
b7bcbe95
FB
2831 /* Writes are ignored. */
2832 break;
40f137e1 2833 case ARM_VFP_FPSCR:
4373f3ce
PB
2834 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2835 dead_tmp(tmp);
b5ff1b31 2836 gen_lookup_tb(s);
b7bcbe95 2837 break;
40f137e1 2838 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2839 if (IS_USER(s))
2840 return 1;
71b3c3de
JR
2841 /* TODO: VFP subarchitecture support.
2842 * For now, keep the EN bit only */
2843 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 2844 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2845 gen_lookup_tb(s);
2846 break;
2847 case ARM_VFP_FPINST:
2848 case ARM_VFP_FPINST2:
4373f3ce 2849 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2850 break;
b7bcbe95
FB
2851 default:
2852 return 1;
2853 }
2854 } else {
4373f3ce 2855 gen_vfp_msr(tmp);
b7bcbe95
FB
2856 gen_mov_vreg_F0(0, rn);
2857 }
2858 }
2859 }
2860 } else {
2861 /* data processing */
2862 /* The opcode is in bits 23, 21, 20 and 6. */
2863 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2864 if (dp) {
2865 if (op == 15) {
2866 /* rn is opcode */
2867 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2868 } else {
2869 /* rn is register number */
9ee6e8bb 2870 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2871 }
2872
04595bf6 2873 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
b7bcbe95 2874 /* Integer or single precision destination. */
9ee6e8bb 2875 rd = VFP_SREG_D(insn);
b7bcbe95 2876 } else {
9ee6e8bb 2877 VFP_DREG_D(rd, insn);
b7bcbe95 2878 }
04595bf6
PM
2879 if (op == 15 &&
2880 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2881 /* VCVT from int is always from S reg regardless of dp bit.
2882 * VCVT with immediate frac_bits has same format as SREG_M
2883 */
2884 rm = VFP_SREG_M(insn);
b7bcbe95 2885 } else {
9ee6e8bb 2886 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2887 }
2888 } else {
9ee6e8bb 2889 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2890 if (op == 15 && rn == 15) {
2891 /* Double precision destination. */
9ee6e8bb
PB
2892 VFP_DREG_D(rd, insn);
2893 } else {
2894 rd = VFP_SREG_D(insn);
2895 }
04595bf6
PM
2896 /* NB that we implicitly rely on the encoding for the frac_bits
2897 * in VCVT of fixed to float being the same as that of an SREG_M
2898 */
9ee6e8bb 2899 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2900 }
2901
2902 veclen = env->vfp.vec_len;
2903 if (op == 15 && rn > 3)
2904 veclen = 0;
2905
2906 /* Shut up compiler warnings. */
2907 delta_m = 0;
2908 delta_d = 0;
2909 bank_mask = 0;
3b46e624 2910
b7bcbe95
FB
2911 if (veclen > 0) {
2912 if (dp)
2913 bank_mask = 0xc;
2914 else
2915 bank_mask = 0x18;
2916
2917 /* Figure out what type of vector operation this is. */
2918 if ((rd & bank_mask) == 0) {
2919 /* scalar */
2920 veclen = 0;
2921 } else {
2922 if (dp)
2923 delta_d = (env->vfp.vec_stride >> 1) + 1;
2924 else
2925 delta_d = env->vfp.vec_stride + 1;
2926
2927 if ((rm & bank_mask) == 0) {
2928 /* mixed scalar/vector */
2929 delta_m = 0;
2930 } else {
2931 /* vector */
2932 delta_m = delta_d;
2933 }
2934 }
2935 }
2936
2937 /* Load the initial operands. */
2938 if (op == 15) {
2939 switch (rn) {
2940 case 16:
2941 case 17:
2942 /* Integer source */
2943 gen_mov_F0_vreg(0, rm);
2944 break;
2945 case 8:
2946 case 9:
2947 /* Compare */
2948 gen_mov_F0_vreg(dp, rd);
2949 gen_mov_F1_vreg(dp, rm);
2950 break;
2951 case 10:
2952 case 11:
2953 /* Compare with zero */
2954 gen_mov_F0_vreg(dp, rd);
2955 gen_vfp_F1_ld0(dp);
2956 break;
9ee6e8bb
PB
2957 case 20:
2958 case 21:
2959 case 22:
2960 case 23:
644ad806
PB
2961 case 28:
2962 case 29:
2963 case 30:
2964 case 31:
9ee6e8bb
PB
2965 /* Source and destination the same. */
2966 gen_mov_F0_vreg(dp, rd);
2967 break;
b7bcbe95
FB
2968 default:
2969 /* One source operand. */
2970 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 2971 break;
b7bcbe95
FB
2972 }
2973 } else {
2974 /* Two source operands. */
2975 gen_mov_F0_vreg(dp, rn);
2976 gen_mov_F1_vreg(dp, rm);
2977 }
2978
2979 for (;;) {
2980 /* Perform the calculation. */
2981 switch (op) {
2982 case 0: /* mac: fd + (fn * fm) */
2983 gen_vfp_mul(dp);
2984 gen_mov_F1_vreg(dp, rd);
2985 gen_vfp_add(dp);
2986 break;
2987 case 1: /* nmac: fd - (fn * fm) */
2988 gen_vfp_mul(dp);
2989 gen_vfp_neg(dp);
2990 gen_mov_F1_vreg(dp, rd);
2991 gen_vfp_add(dp);
2992 break;
2993 case 2: /* msc: -fd + (fn * fm) */
2994 gen_vfp_mul(dp);
2995 gen_mov_F1_vreg(dp, rd);
2996 gen_vfp_sub(dp);
2997 break;
2998 case 3: /* nmsc: -fd - (fn * fm) */
2999 gen_vfp_mul(dp);
b7bcbe95 3000 gen_vfp_neg(dp);
c9fb531a
PB
3001 gen_mov_F1_vreg(dp, rd);
3002 gen_vfp_sub(dp);
b7bcbe95
FB
3003 break;
3004 case 4: /* mul: fn * fm */
3005 gen_vfp_mul(dp);
3006 break;
3007 case 5: /* nmul: -(fn * fm) */
3008 gen_vfp_mul(dp);
3009 gen_vfp_neg(dp);
3010 break;
3011 case 6: /* add: fn + fm */
3012 gen_vfp_add(dp);
3013 break;
3014 case 7: /* sub: fn - fm */
3015 gen_vfp_sub(dp);
3016 break;
3017 case 8: /* div: fn / fm */
3018 gen_vfp_div(dp);
3019 break;
9ee6e8bb
PB
3020 case 14: /* fconst */
3021 if (!arm_feature(env, ARM_FEATURE_VFP3))
3022 return 1;
3023
3024 n = (insn << 12) & 0x80000000;
3025 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3026 if (dp) {
3027 if (i & 0x40)
3028 i |= 0x3f80;
3029 else
3030 i |= 0x4000;
3031 n |= i << 16;
4373f3ce 3032 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3033 } else {
3034 if (i & 0x40)
3035 i |= 0x780;
3036 else
3037 i |= 0x800;
3038 n |= i << 19;
5b340b51 3039 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3040 }
9ee6e8bb 3041 break;
b7bcbe95
FB
3042 case 15: /* extension space */
3043 switch (rn) {
3044 case 0: /* cpy */
3045 /* no-op */
3046 break;
3047 case 1: /* abs */
3048 gen_vfp_abs(dp);
3049 break;
3050 case 2: /* neg */
3051 gen_vfp_neg(dp);
3052 break;
3053 case 3: /* sqrt */
3054 gen_vfp_sqrt(dp);
3055 break;
60011498
PB
3056 case 4: /* vcvtb.f32.f16 */
3057 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3058 return 1;
3059 tmp = gen_vfp_mrs();
3060 tcg_gen_ext16u_i32(tmp, tmp);
3061 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3062 dead_tmp(tmp);
3063 break;
3064 case 5: /* vcvtt.f32.f16 */
3065 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3066 return 1;
3067 tmp = gen_vfp_mrs();
3068 tcg_gen_shri_i32(tmp, tmp, 16);
3069 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3070 dead_tmp(tmp);
3071 break;
3072 case 6: /* vcvtb.f16.f32 */
3073 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3074 return 1;
3075 tmp = new_tmp();
3076 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3077 gen_mov_F0_vreg(0, rd);
3078 tmp2 = gen_vfp_mrs();
3079 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3080 tcg_gen_or_i32(tmp, tmp, tmp2);
3081 dead_tmp(tmp2);
3082 gen_vfp_msr(tmp);
3083 break;
3084 case 7: /* vcvtt.f16.f32 */
3085 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3086 return 1;
3087 tmp = new_tmp();
3088 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3089 tcg_gen_shli_i32(tmp, tmp, 16);
3090 gen_mov_F0_vreg(0, rd);
3091 tmp2 = gen_vfp_mrs();
3092 tcg_gen_ext16u_i32(tmp2, tmp2);
3093 tcg_gen_or_i32(tmp, tmp, tmp2);
3094 dead_tmp(tmp2);
3095 gen_vfp_msr(tmp);
3096 break;
b7bcbe95
FB
3097 case 8: /* cmp */
3098 gen_vfp_cmp(dp);
3099 break;
3100 case 9: /* cmpe */
3101 gen_vfp_cmpe(dp);
3102 break;
3103 case 10: /* cmpz */
3104 gen_vfp_cmp(dp);
3105 break;
3106 case 11: /* cmpez */
3107 gen_vfp_F1_ld0(dp);
3108 gen_vfp_cmpe(dp);
3109 break;
3110 case 15: /* single<->double conversion */
3111 if (dp)
4373f3ce 3112 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3113 else
4373f3ce 3114 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3115 break;
3116 case 16: /* fuito */
3117 gen_vfp_uito(dp);
3118 break;
3119 case 17: /* fsito */
3120 gen_vfp_sito(dp);
3121 break;
9ee6e8bb
PB
3122 case 20: /* fshto */
3123 if (!arm_feature(env, ARM_FEATURE_VFP3))
3124 return 1;
644ad806 3125 gen_vfp_shto(dp, 16 - rm);
9ee6e8bb
PB
3126 break;
3127 case 21: /* fslto */
3128 if (!arm_feature(env, ARM_FEATURE_VFP3))
3129 return 1;
644ad806 3130 gen_vfp_slto(dp, 32 - rm);
9ee6e8bb
PB
3131 break;
3132 case 22: /* fuhto */
3133 if (!arm_feature(env, ARM_FEATURE_VFP3))
3134 return 1;
644ad806 3135 gen_vfp_uhto(dp, 16 - rm);
9ee6e8bb
PB
3136 break;
3137 case 23: /* fulto */
3138 if (!arm_feature(env, ARM_FEATURE_VFP3))
3139 return 1;
644ad806 3140 gen_vfp_ulto(dp, 32 - rm);
9ee6e8bb 3141 break;
b7bcbe95
FB
3142 case 24: /* ftoui */
3143 gen_vfp_toui(dp);
3144 break;
3145 case 25: /* ftouiz */
3146 gen_vfp_touiz(dp);
3147 break;
3148 case 26: /* ftosi */
3149 gen_vfp_tosi(dp);
3150 break;
3151 case 27: /* ftosiz */
3152 gen_vfp_tosiz(dp);
3153 break;
9ee6e8bb
PB
3154 case 28: /* ftosh */
3155 if (!arm_feature(env, ARM_FEATURE_VFP3))
3156 return 1;
644ad806 3157 gen_vfp_tosh(dp, 16 - rm);
9ee6e8bb
PB
3158 break;
3159 case 29: /* ftosl */
3160 if (!arm_feature(env, ARM_FEATURE_VFP3))
3161 return 1;
644ad806 3162 gen_vfp_tosl(dp, 32 - rm);
9ee6e8bb
PB
3163 break;
3164 case 30: /* ftouh */
3165 if (!arm_feature(env, ARM_FEATURE_VFP3))
3166 return 1;
644ad806 3167 gen_vfp_touh(dp, 16 - rm);
9ee6e8bb
PB
3168 break;
3169 case 31: /* ftoul */
3170 if (!arm_feature(env, ARM_FEATURE_VFP3))
3171 return 1;
644ad806 3172 gen_vfp_toul(dp, 32 - rm);
9ee6e8bb 3173 break;
b7bcbe95
FB
3174 default: /* undefined */
3175 printf ("rn:%d\n", rn);
3176 return 1;
3177 }
3178 break;
3179 default: /* undefined */
3180 printf ("op:%d\n", op);
3181 return 1;
3182 }
3183
3184 /* Write back the result. */
3185 if (op == 15 && (rn >= 8 && rn <= 11))
3186 ; /* Comparison, do nothing. */
04595bf6
PM
3187 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3188 /* VCVT double to int: always integer result. */
b7bcbe95
FB
3189 gen_mov_vreg_F0(0, rd);
3190 else if (op == 15 && rn == 15)
3191 /* conversion */
3192 gen_mov_vreg_F0(!dp, rd);
3193 else
3194 gen_mov_vreg_F0(dp, rd);
3195
3196 /* break out of the loop if we have finished */
3197 if (veclen == 0)
3198 break;
3199
3200 if (op == 15 && delta_m == 0) {
3201 /* single source one-many */
3202 while (veclen--) {
3203 rd = ((rd + delta_d) & (bank_mask - 1))
3204 | (rd & bank_mask);
3205 gen_mov_vreg_F0(dp, rd);
3206 }
3207 break;
3208 }
3209 /* Setup the next operands. */
3210 veclen--;
3211 rd = ((rd + delta_d) & (bank_mask - 1))
3212 | (rd & bank_mask);
3213
3214 if (op == 15) {
3215 /* One source operand. */
3216 rm = ((rm + delta_m) & (bank_mask - 1))
3217 | (rm & bank_mask);
3218 gen_mov_F0_vreg(dp, rm);
3219 } else {
3220 /* Two source operands. */
3221 rn = ((rn + delta_d) & (bank_mask - 1))
3222 | (rn & bank_mask);
3223 gen_mov_F0_vreg(dp, rn);
3224 if (delta_m) {
3225 rm = ((rm + delta_m) & (bank_mask - 1))
3226 | (rm & bank_mask);
3227 gen_mov_F1_vreg(dp, rm);
3228 }
3229 }
3230 }
3231 }
3232 break;
3233 case 0xc:
3234 case 0xd:
9ee6e8bb 3235 if (dp && (insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3236 /* two-register transfer */
3237 rn = (insn >> 16) & 0xf;
3238 rd = (insn >> 12) & 0xf;
3239 if (dp) {
9ee6e8bb
PB
3240 VFP_DREG_M(rm, insn);
3241 } else {
3242 rm = VFP_SREG_M(insn);
3243 }
b7bcbe95 3244
18c9b560 3245 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3246 /* vfp->arm */
3247 if (dp) {
4373f3ce
PB
3248 gen_mov_F0_vreg(0, rm * 2);
3249 tmp = gen_vfp_mrs();
3250 store_reg(s, rd, tmp);
3251 gen_mov_F0_vreg(0, rm * 2 + 1);
3252 tmp = gen_vfp_mrs();
3253 store_reg(s, rn, tmp);
b7bcbe95
FB
3254 } else {
3255 gen_mov_F0_vreg(0, rm);
4373f3ce
PB
3256 tmp = gen_vfp_mrs();
3257 store_reg(s, rn, tmp);
b7bcbe95 3258 gen_mov_F0_vreg(0, rm + 1);
4373f3ce
PB
3259 tmp = gen_vfp_mrs();
3260 store_reg(s, rd, tmp);
b7bcbe95
FB
3261 }
3262 } else {
3263 /* arm->vfp */
3264 if (dp) {
4373f3ce
PB
3265 tmp = load_reg(s, rd);
3266 gen_vfp_msr(tmp);
3267 gen_mov_vreg_F0(0, rm * 2);
3268 tmp = load_reg(s, rn);
3269 gen_vfp_msr(tmp);
3270 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3271 } else {
4373f3ce
PB
3272 tmp = load_reg(s, rn);
3273 gen_vfp_msr(tmp);
b7bcbe95 3274 gen_mov_vreg_F0(0, rm);
4373f3ce
PB
3275 tmp = load_reg(s, rd);
3276 gen_vfp_msr(tmp);
b7bcbe95
FB
3277 gen_mov_vreg_F0(0, rm + 1);
3278 }
3279 }
3280 } else {
3281 /* Load/store */
3282 rn = (insn >> 16) & 0xf;
3283 if (dp)
9ee6e8bb 3284 VFP_DREG_D(rd, insn);
b7bcbe95 3285 else
9ee6e8bb
PB
3286 rd = VFP_SREG_D(insn);
3287 if (s->thumb && rn == 15) {
312eea9f
FN
3288 addr = new_tmp();
3289 tcg_gen_movi_i32(addr, s->pc & ~2);
9ee6e8bb 3290 } else {
312eea9f 3291 addr = load_reg(s, rn);
9ee6e8bb 3292 }
b7bcbe95
FB
3293 if ((insn & 0x01200000) == 0x01000000) {
3294 /* Single load/store */
3295 offset = (insn & 0xff) << 2;
3296 if ((insn & (1 << 23)) == 0)
3297 offset = -offset;
312eea9f 3298 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3299 if (insn & (1 << 20)) {
312eea9f 3300 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3301 gen_mov_vreg_F0(dp, rd);
3302 } else {
3303 gen_mov_F0_vreg(dp, rd);
312eea9f 3304 gen_vfp_st(s, dp, addr);
b7bcbe95 3305 }
312eea9f 3306 dead_tmp(addr);
b7bcbe95
FB
3307 } else {
3308 /* load/store multiple */
3309 if (dp)
3310 n = (insn >> 1) & 0x7f;
3311 else
3312 n = insn & 0xff;
3313
3314 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3315 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3316
3317 if (dp)
3318 offset = 8;
3319 else
3320 offset = 4;
3321 for (i = 0; i < n; i++) {
18c9b560 3322 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3323 /* load */
312eea9f 3324 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3325 gen_mov_vreg_F0(dp, rd + i);
3326 } else {
3327 /* store */
3328 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3329 gen_vfp_st(s, dp, addr);
b7bcbe95 3330 }
312eea9f 3331 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95
FB
3332 }
3333 if (insn & (1 << 21)) {
3334 /* writeback */
3335 if (insn & (1 << 24))
3336 offset = -offset * n;
3337 else if (dp && (insn & 1))
3338 offset = 4;
3339 else
3340 offset = 0;
3341
3342 if (offset != 0)
312eea9f
FN
3343 tcg_gen_addi_i32(addr, addr, offset);
3344 store_reg(s, rn, addr);
3345 } else {
3346 dead_tmp(addr);
b7bcbe95
FB
3347 }
3348 }
3349 }
3350 break;
3351 default:
3352 /* Should never happen. */
3353 return 1;
3354 }
3355 return 0;
3356}
3357
6e256c93 3358static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3359{
6e256c93
FB
3360 TranslationBlock *tb;
3361
3362 tb = s->tb;
3363 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3364 tcg_gen_goto_tb(n);
8984bd2e 3365 gen_set_pc_im(dest);
57fec1fe 3366 tcg_gen_exit_tb((long)tb + n);
6e256c93 3367 } else {
8984bd2e 3368 gen_set_pc_im(dest);
57fec1fe 3369 tcg_gen_exit_tb(0);
6e256c93 3370 }
c53be334
FB
3371}
3372
8aaca4c0
FB
3373static inline void gen_jmp (DisasContext *s, uint32_t dest)
3374{
551bd27f 3375 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3376 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3377 if (s->thumb)
d9ba4830
PB
3378 dest |= 1;
3379 gen_bx_im(s, dest);
8aaca4c0 3380 } else {
6e256c93 3381 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3382 s->is_jmp = DISAS_TB_JUMP;
3383 }
3384}
3385
d9ba4830 3386static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3387{
ee097184 3388 if (x)
d9ba4830 3389 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3390 else
d9ba4830 3391 gen_sxth(t0);
ee097184 3392 if (y)
d9ba4830 3393 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3394 else
d9ba4830
PB
3395 gen_sxth(t1);
3396 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3397}
3398
3399/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 3400static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3401 uint32_t mask;
3402
3403 mask = 0;
3404 if (flags & (1 << 0))
3405 mask |= 0xff;
3406 if (flags & (1 << 1))
3407 mask |= 0xff00;
3408 if (flags & (1 << 2))
3409 mask |= 0xff0000;
3410 if (flags & (1 << 3))
3411 mask |= 0xff000000;
9ee6e8bb 3412
2ae23e75 3413 /* Mask out undefined bits. */
9ee6e8bb
PB
3414 mask &= ~CPSR_RESERVED;
3415 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3416 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3417 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3418 mask &= ~CPSR_IT;
9ee6e8bb 3419 /* Mask out execution state bits. */
2ae23e75 3420 if (!spsr)
e160c51c 3421 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3422 /* Mask out privileged bits. */
3423 if (IS_USER(s))
9ee6e8bb 3424 mask &= CPSR_USER;
b5ff1b31
FB
3425 return mask;
3426}
3427
2fbac54b
FN
3428/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3429static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
b5ff1b31 3430{
d9ba4830 3431 TCGv tmp;
b5ff1b31
FB
3432 if (spsr) {
3433 /* ??? This is also undefined in system mode. */
3434 if (IS_USER(s))
3435 return 1;
d9ba4830
PB
3436
3437 tmp = load_cpu_field(spsr);
3438 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3439 tcg_gen_andi_i32(t0, t0, mask);
3440 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3441 store_cpu_field(tmp, spsr);
b5ff1b31 3442 } else {
2fbac54b 3443 gen_set_cpsr(t0, mask);
b5ff1b31 3444 }
2fbac54b 3445 dead_tmp(t0);
b5ff1b31
FB
3446 gen_lookup_tb(s);
3447 return 0;
3448}
3449
2fbac54b
FN
3450/* Returns nonzero if access to the PSR is not permitted. */
3451static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3452{
3453 TCGv tmp;
3454 tmp = new_tmp();
3455 tcg_gen_movi_i32(tmp, val);
3456 return gen_set_psr(s, mask, spsr, tmp);
3457}
3458
e9bb4aa9
JR
3459/* Generate an old-style exception return. Marks pc as dead. */
3460static void gen_exception_return(DisasContext *s, TCGv pc)
b5ff1b31 3461{
d9ba4830 3462 TCGv tmp;
e9bb4aa9 3463 store_reg(s, 15, pc);
d9ba4830
PB
3464 tmp = load_cpu_field(spsr);
3465 gen_set_cpsr(tmp, 0xffffffff);
3466 dead_tmp(tmp);
b5ff1b31
FB
3467 s->is_jmp = DISAS_UPDATE;
3468}
3469
b0109805
PB
3470/* Generate a v6 exception return. Marks both values as dead. */
3471static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3472{
b0109805
PB
3473 gen_set_cpsr(cpsr, 0xffffffff);
3474 dead_tmp(cpsr);
3475 store_reg(s, 15, pc);
9ee6e8bb
PB
3476 s->is_jmp = DISAS_UPDATE;
3477}
3b46e624 3478
9ee6e8bb
PB
3479static inline void
3480gen_set_condexec (DisasContext *s)
3481{
3482 if (s->condexec_mask) {
8f01245e
PB
3483 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3484 TCGv tmp = new_tmp();
3485 tcg_gen_movi_i32(tmp, val);
d9ba4830 3486 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3487 }
3488}
3b46e624 3489
9ee6e8bb
PB
3490static void gen_nop_hint(DisasContext *s, int val)
3491{
3492 switch (val) {
3493 case 3: /* wfi */
8984bd2e 3494 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3495 s->is_jmp = DISAS_WFI;
3496 break;
3497 case 2: /* wfe */
3498 case 4: /* sev */
3499 /* TODO: Implement SEV and WFE. May help SMP performance. */
3500 default: /* nop */
3501 break;
3502 }
3503}
99c475ab 3504
ad69471c 3505#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3506
dd8fbd78 3507static inline int gen_neon_add(int size, TCGv t0, TCGv t1)
9ee6e8bb
PB
3508{
3509 switch (size) {
dd8fbd78
FN
3510 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3511 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3512 case 2: tcg_gen_add_i32(t0, t0, t1); break;
9ee6e8bb
PB
3513 default: return 1;
3514 }
3515 return 0;
3516}
3517
dd8fbd78 3518static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
ad69471c
PB
3519{
3520 switch (size) {
dd8fbd78
FN
3521 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3522 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3523 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3524 default: return;
3525 }
3526}
3527
3528/* 32-bit pairwise ops end up the same as the elementwise versions. */
3529#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3530#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3531#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3532#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3533
3534/* FIXME: This is wrong. They set the wrong overflow bit. */
3535#define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3536#define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3537#define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3538#define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3539
3540#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3541 switch ((size << 1) | u) { \
3542 case 0: \
dd8fbd78 3543 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3544 break; \
3545 case 1: \
dd8fbd78 3546 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3547 break; \
3548 case 2: \
dd8fbd78 3549 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3550 break; \
3551 case 3: \
dd8fbd78 3552 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3553 break; \
3554 case 4: \
dd8fbd78 3555 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3556 break; \
3557 case 5: \
dd8fbd78 3558 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3559 break; \
3560 default: return 1; \
3561 }} while (0)
9ee6e8bb
PB
3562
3563#define GEN_NEON_INTEGER_OP(name) do { \
3564 switch ((size << 1) | u) { \
ad69471c 3565 case 0: \
dd8fbd78 3566 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3567 break; \
3568 case 1: \
dd8fbd78 3569 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3570 break; \
3571 case 2: \
dd8fbd78 3572 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3573 break; \
3574 case 3: \
dd8fbd78 3575 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3576 break; \
3577 case 4: \
dd8fbd78 3578 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3579 break; \
3580 case 5: \
dd8fbd78 3581 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3582 break; \
9ee6e8bb
PB
3583 default: return 1; \
3584 }} while (0)
3585
dd8fbd78 3586static TCGv neon_load_scratch(int scratch)
9ee6e8bb 3587{
dd8fbd78
FN
3588 TCGv tmp = new_tmp();
3589 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3590 return tmp;
9ee6e8bb
PB
3591}
3592
dd8fbd78 3593static void neon_store_scratch(int scratch, TCGv var)
9ee6e8bb 3594{
dd8fbd78
FN
3595 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3596 dead_tmp(var);
9ee6e8bb
PB
3597}
3598
dd8fbd78 3599static inline TCGv neon_get_scalar(int size, int reg)
9ee6e8bb 3600{
dd8fbd78 3601 TCGv tmp;
9ee6e8bb 3602 if (size == 1) {
dd8fbd78 3603 tmp = neon_load_reg(reg >> 1, reg & 1);
9ee6e8bb 3604 } else {
dd8fbd78
FN
3605 tmp = neon_load_reg(reg >> 2, (reg >> 1) & 1);
3606 if (reg & 1) {
3607 gen_neon_dup_low16(tmp);
3608 } else {
3609 gen_neon_dup_high16(tmp);
3610 }
9ee6e8bb 3611 }
dd8fbd78 3612 return tmp;
9ee6e8bb
PB
3613}
3614
19457615
FN
3615static void gen_neon_unzip_u8(TCGv t0, TCGv t1)
3616{
3617 TCGv rd, rm, tmp;
3618
3619 rd = new_tmp();
3620 rm = new_tmp();
3621 tmp = new_tmp();
3622
3623 tcg_gen_andi_i32(rd, t0, 0xff);
3624 tcg_gen_shri_i32(tmp, t0, 8);
3625 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3626 tcg_gen_or_i32(rd, rd, tmp);
3627 tcg_gen_shli_i32(tmp, t1, 16);
3628 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3629 tcg_gen_or_i32(rd, rd, tmp);
3630 tcg_gen_shli_i32(tmp, t1, 8);
3631 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3632 tcg_gen_or_i32(rd, rd, tmp);
3633
3634 tcg_gen_shri_i32(rm, t0, 8);
3635 tcg_gen_andi_i32(rm, rm, 0xff);
3636 tcg_gen_shri_i32(tmp, t0, 16);
3637 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3638 tcg_gen_or_i32(rm, rm, tmp);
3639 tcg_gen_shli_i32(tmp, t1, 8);
3640 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3641 tcg_gen_or_i32(rm, rm, tmp);
3642 tcg_gen_andi_i32(tmp, t1, 0xff000000);
3643 tcg_gen_or_i32(t1, rm, tmp);
3644 tcg_gen_mov_i32(t0, rd);
3645
3646 dead_tmp(tmp);
3647 dead_tmp(rm);
3648 dead_tmp(rd);
3649}
3650
3651static void gen_neon_zip_u8(TCGv t0, TCGv t1)
3652{
3653 TCGv rd, rm, tmp;
3654
3655 rd = new_tmp();
3656 rm = new_tmp();
3657 tmp = new_tmp();
3658
3659 tcg_gen_andi_i32(rd, t0, 0xff);
3660 tcg_gen_shli_i32(tmp, t1, 8);
3661 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3662 tcg_gen_or_i32(rd, rd, tmp);
3663 tcg_gen_shli_i32(tmp, t0, 16);
3664 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3665 tcg_gen_or_i32(rd, rd, tmp);
3666 tcg_gen_shli_i32(tmp, t1, 24);
3667 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3668 tcg_gen_or_i32(rd, rd, tmp);
3669
3670 tcg_gen_andi_i32(rm, t1, 0xff000000);
3671 tcg_gen_shri_i32(tmp, t0, 8);
3672 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3673 tcg_gen_or_i32(rm, rm, tmp);
3674 tcg_gen_shri_i32(tmp, t1, 8);
3675 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3676 tcg_gen_or_i32(rm, rm, tmp);
3677 tcg_gen_shri_i32(tmp, t0, 16);
3678 tcg_gen_andi_i32(tmp, tmp, 0xff);
3679 tcg_gen_or_i32(t1, rm, tmp);
3680 tcg_gen_mov_i32(t0, rd);
3681
3682 dead_tmp(tmp);
3683 dead_tmp(rm);
3684 dead_tmp(rd);
3685}
3686
3687static void gen_neon_zip_u16(TCGv t0, TCGv t1)
3688{
3689 TCGv tmp, tmp2;
3690
3691 tmp = new_tmp();
3692 tmp2 = new_tmp();
3693
3694 tcg_gen_andi_i32(tmp, t0, 0xffff);
3695 tcg_gen_shli_i32(tmp2, t1, 16);
3696 tcg_gen_or_i32(tmp, tmp, tmp2);
3697 tcg_gen_andi_i32(t1, t1, 0xffff0000);
3698 tcg_gen_shri_i32(tmp2, t0, 16);
3699 tcg_gen_or_i32(t1, t1, tmp2);
3700 tcg_gen_mov_i32(t0, tmp);
3701
3702 dead_tmp(tmp2);
3703 dead_tmp(tmp);
3704}
3705
9ee6e8bb
PB
3706static void gen_neon_unzip(int reg, int q, int tmp, int size)
3707{
3708 int n;
dd8fbd78 3709 TCGv t0, t1;
9ee6e8bb
PB
3710
3711 for (n = 0; n < q + 1; n += 2) {
dd8fbd78
FN
3712 t0 = neon_load_reg(reg, n);
3713 t1 = neon_load_reg(reg, n + 1);
9ee6e8bb 3714 switch (size) {
dd8fbd78
FN
3715 case 0: gen_neon_unzip_u8(t0, t1); break;
3716 case 1: gen_neon_zip_u16(t0, t1); break; /* zip and unzip are the same. */
9ee6e8bb
PB
3717 case 2: /* no-op */; break;
3718 default: abort();
3719 }
dd8fbd78
FN
3720 neon_store_scratch(tmp + n, t0);
3721 neon_store_scratch(tmp + n + 1, t1);
9ee6e8bb
PB
3722 }
3723}
3724
19457615
FN
3725static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3726{
3727 TCGv rd, tmp;
3728
3729 rd = new_tmp();
3730 tmp = new_tmp();
3731
3732 tcg_gen_shli_i32(rd, t0, 8);
3733 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3734 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3735 tcg_gen_or_i32(rd, rd, tmp);
3736
3737 tcg_gen_shri_i32(t1, t1, 8);
3738 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3739 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3740 tcg_gen_or_i32(t1, t1, tmp);
3741 tcg_gen_mov_i32(t0, rd);
3742
3743 dead_tmp(tmp);
3744 dead_tmp(rd);
3745}
3746
3747static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3748{
3749 TCGv rd, tmp;
3750
3751 rd = new_tmp();
3752 tmp = new_tmp();
3753
3754 tcg_gen_shli_i32(rd, t0, 16);
3755 tcg_gen_andi_i32(tmp, t1, 0xffff);
3756 tcg_gen_or_i32(rd, rd, tmp);
3757 tcg_gen_shri_i32(t1, t1, 16);
3758 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3759 tcg_gen_or_i32(t1, t1, tmp);
3760 tcg_gen_mov_i32(t0, rd);
3761
3762 dead_tmp(tmp);
3763 dead_tmp(rd);
3764}
3765
3766
9ee6e8bb
PB
3767static struct {
3768 int nregs;
3769 int interleave;
3770 int spacing;
3771} neon_ls_element_type[11] = {
3772 {4, 4, 1},
3773 {4, 4, 2},
3774 {4, 1, 1},
3775 {4, 2, 1},
3776 {3, 3, 1},
3777 {3, 3, 2},
3778 {3, 1, 1},
3779 {1, 1, 1},
3780 {2, 2, 1},
3781 {2, 2, 2},
3782 {2, 1, 1}
3783};
3784
3785/* Translate a NEON load/store element instruction. Return nonzero if the
3786 instruction is invalid. */
3787static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3788{
3789 int rd, rn, rm;
3790 int op;
3791 int nregs;
3792 int interleave;
84496233 3793 int spacing;
9ee6e8bb
PB
3794 int stride;
3795 int size;
3796 int reg;
3797 int pass;
3798 int load;
3799 int shift;
9ee6e8bb 3800 int n;
1b2b1e54 3801 TCGv addr;
b0109805 3802 TCGv tmp;
8f8e3aa4 3803 TCGv tmp2;
84496233 3804 TCGv_i64 tmp64;
9ee6e8bb
PB
3805
3806 if (!vfp_enabled(env))
3807 return 1;
3808 VFP_DREG_D(rd, insn);
3809 rn = (insn >> 16) & 0xf;
3810 rm = insn & 0xf;
3811 load = (insn & (1 << 21)) != 0;
1b2b1e54 3812 addr = new_tmp();
9ee6e8bb
PB
3813 if ((insn & (1 << 23)) == 0) {
3814 /* Load store all elements. */
3815 op = (insn >> 8) & 0xf;
3816 size = (insn >> 6) & 3;
84496233 3817 if (op > 10)
9ee6e8bb
PB
3818 return 1;
3819 nregs = neon_ls_element_type[op].nregs;
3820 interleave = neon_ls_element_type[op].interleave;
84496233
JR
3821 spacing = neon_ls_element_type[op].spacing;
3822 if (size == 3 && (interleave | spacing) != 1)
3823 return 1;
dcc65026 3824 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3825 stride = (1 << size) * interleave;
3826 for (reg = 0; reg < nregs; reg++) {
3827 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
3828 load_reg_var(s, addr, rn);
3829 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 3830 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
3831 load_reg_var(s, addr, rn);
3832 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 3833 }
84496233
JR
3834 if (size == 3) {
3835 if (load) {
3836 tmp64 = gen_ld64(addr, IS_USER(s));
3837 neon_store_reg64(tmp64, rd);
3838 tcg_temp_free_i64(tmp64);
3839 } else {
3840 tmp64 = tcg_temp_new_i64();
3841 neon_load_reg64(tmp64, rd);
3842 gen_st64(tmp64, addr, IS_USER(s));
3843 }
3844 tcg_gen_addi_i32(addr, addr, stride);
3845 } else {
3846 for (pass = 0; pass < 2; pass++) {
3847 if (size == 2) {
3848 if (load) {
3849 tmp = gen_ld32(addr, IS_USER(s));
3850 neon_store_reg(rd, pass, tmp);
3851 } else {
3852 tmp = neon_load_reg(rd, pass);
3853 gen_st32(tmp, addr, IS_USER(s));
3854 }
1b2b1e54 3855 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
3856 } else if (size == 1) {
3857 if (load) {
3858 tmp = gen_ld16u(addr, IS_USER(s));
3859 tcg_gen_addi_i32(addr, addr, stride);
3860 tmp2 = gen_ld16u(addr, IS_USER(s));
3861 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
3862 tcg_gen_shli_i32(tmp2, tmp2, 16);
3863 tcg_gen_or_i32(tmp, tmp, tmp2);
84496233
JR
3864 dead_tmp(tmp2);
3865 neon_store_reg(rd, pass, tmp);
3866 } else {
3867 tmp = neon_load_reg(rd, pass);
3868 tmp2 = new_tmp();
3869 tcg_gen_shri_i32(tmp2, tmp, 16);
3870 gen_st16(tmp, addr, IS_USER(s));
3871 tcg_gen_addi_i32(addr, addr, stride);
3872 gen_st16(tmp2, addr, IS_USER(s));
1b2b1e54 3873 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3874 }
84496233
JR
3875 } else /* size == 0 */ {
3876 if (load) {
3877 TCGV_UNUSED(tmp2);
3878 for (n = 0; n < 4; n++) {
3879 tmp = gen_ld8u(addr, IS_USER(s));
3880 tcg_gen_addi_i32(addr, addr, stride);
3881 if (n == 0) {
3882 tmp2 = tmp;
3883 } else {
41ba8341
PB
3884 tcg_gen_shli_i32(tmp, tmp, n * 8);
3885 tcg_gen_or_i32(tmp2, tmp2, tmp);
84496233
JR
3886 dead_tmp(tmp);
3887 }
9ee6e8bb 3888 }
84496233
JR
3889 neon_store_reg(rd, pass, tmp2);
3890 } else {
3891 tmp2 = neon_load_reg(rd, pass);
3892 for (n = 0; n < 4; n++) {
3893 tmp = new_tmp();
3894 if (n == 0) {
3895 tcg_gen_mov_i32(tmp, tmp2);
3896 } else {
3897 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3898 }
3899 gen_st8(tmp, addr, IS_USER(s));
3900 tcg_gen_addi_i32(addr, addr, stride);
3901 }
3902 dead_tmp(tmp2);
9ee6e8bb
PB
3903 }
3904 }
3905 }
3906 }
84496233 3907 rd += spacing;
9ee6e8bb
PB
3908 }
3909 stride = nregs * 8;
3910 } else {
3911 size = (insn >> 10) & 3;
3912 if (size == 3) {
3913 /* Load single element to all lanes. */
3914 if (!load)
3915 return 1;
3916 size = (insn >> 6) & 3;
3917 nregs = ((insn >> 8) & 3) + 1;
3918 stride = (insn & (1 << 5)) ? 2 : 1;
dcc65026 3919 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3920 for (reg = 0; reg < nregs; reg++) {
3921 switch (size) {
3922 case 0:
1b2b1e54 3923 tmp = gen_ld8u(addr, IS_USER(s));
ad69471c 3924 gen_neon_dup_u8(tmp, 0);
9ee6e8bb
PB
3925 break;
3926 case 1:
1b2b1e54 3927 tmp = gen_ld16u(addr, IS_USER(s));
ad69471c 3928 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
3929 break;
3930 case 2:
1b2b1e54 3931 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
3932 break;
3933 case 3:
3934 return 1;
a50f5b91
PB
3935 default: /* Avoid compiler warnings. */
3936 abort();
99c475ab 3937 }
1b2b1e54 3938 tcg_gen_addi_i32(addr, addr, 1 << size);
ad69471c
PB
3939 tmp2 = new_tmp();
3940 tcg_gen_mov_i32(tmp2, tmp);
3941 neon_store_reg(rd, 0, tmp2);
3018f259 3942 neon_store_reg(rd, 1, tmp);
9ee6e8bb
PB
3943 rd += stride;
3944 }
3945 stride = (1 << size) * nregs;
3946 } else {
3947 /* Single element. */
3948 pass = (insn >> 7) & 1;
3949 switch (size) {
3950 case 0:
3951 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
3952 stride = 1;
3953 break;
3954 case 1:
3955 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
3956 stride = (insn & (1 << 5)) ? 2 : 1;
3957 break;
3958 case 2:
3959 shift = 0;
9ee6e8bb
PB
3960 stride = (insn & (1 << 6)) ? 2 : 1;
3961 break;
3962 default:
3963 abort();
3964 }
3965 nregs = ((insn >> 8) & 3) + 1;
dcc65026 3966 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3967 for (reg = 0; reg < nregs; reg++) {
3968 if (load) {
9ee6e8bb
PB
3969 switch (size) {
3970 case 0:
1b2b1e54 3971 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb
PB
3972 break;
3973 case 1:
1b2b1e54 3974 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
3975 break;
3976 case 2:
1b2b1e54 3977 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 3978 break;
a50f5b91
PB
3979 default: /* Avoid compiler warnings. */
3980 abort();
9ee6e8bb
PB
3981 }
3982 if (size != 2) {
8f8e3aa4
PB
3983 tmp2 = neon_load_reg(rd, pass);
3984 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3985 dead_tmp(tmp2);
9ee6e8bb 3986 }
8f8e3aa4 3987 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3988 } else { /* Store */
8f8e3aa4
PB
3989 tmp = neon_load_reg(rd, pass);
3990 if (shift)
3991 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
3992 switch (size) {
3993 case 0:
1b2b1e54 3994 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
3995 break;
3996 case 1:
1b2b1e54 3997 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
3998 break;
3999 case 2:
1b2b1e54 4000 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 4001 break;
99c475ab 4002 }
99c475ab 4003 }
9ee6e8bb 4004 rd += stride;
1b2b1e54 4005 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4006 }
9ee6e8bb 4007 stride = nregs * (1 << size);
99c475ab 4008 }
9ee6e8bb 4009 }
1b2b1e54 4010 dead_tmp(addr);
9ee6e8bb 4011 if (rm != 15) {
b26eefb6
PB
4012 TCGv base;
4013
4014 base = load_reg(s, rn);
9ee6e8bb 4015 if (rm == 13) {
b26eefb6 4016 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4017 } else {
b26eefb6
PB
4018 TCGv index;
4019 index = load_reg(s, rm);
4020 tcg_gen_add_i32(base, base, index);
4021 dead_tmp(index);
9ee6e8bb 4022 }
b26eefb6 4023 store_reg(s, rn, base);
9ee6e8bb
PB
4024 }
4025 return 0;
4026}
3b46e624 4027
8f8e3aa4
PB
4028/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4029static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4030{
4031 tcg_gen_and_i32(t, t, c);
f669df27 4032 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4033 tcg_gen_or_i32(dest, t, f);
4034}
4035
a7812ae4 4036static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4037{
4038 switch (size) {
4039 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4040 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4041 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4042 default: abort();
4043 }
4044}
4045
a7812ae4 4046static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4047{
4048 switch (size) {
4049 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4050 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4051 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
4052 default: abort();
4053 }
4054}
4055
a7812ae4 4056static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4057{
4058 switch (size) {
4059 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4060 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4061 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4062 default: abort();
4063 }
4064}
4065
4066static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4067 int q, int u)
4068{
4069 if (q) {
4070 if (u) {
4071 switch (size) {
4072 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4073 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4074 default: abort();
4075 }
4076 } else {
4077 switch (size) {
4078 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4079 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4080 default: abort();
4081 }
4082 }
4083 } else {
4084 if (u) {
4085 switch (size) {
4086 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4087 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4088 default: abort();
4089 }
4090 } else {
4091 switch (size) {
4092 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4093 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4094 default: abort();
4095 }
4096 }
4097 }
4098}
4099
a7812ae4 4100static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
4101{
4102 if (u) {
4103 switch (size) {
4104 case 0: gen_helper_neon_widen_u8(dest, src); break;
4105 case 1: gen_helper_neon_widen_u16(dest, src); break;
4106 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4107 default: abort();
4108 }
4109 } else {
4110 switch (size) {
4111 case 0: gen_helper_neon_widen_s8(dest, src); break;
4112 case 1: gen_helper_neon_widen_s16(dest, src); break;
4113 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4114 default: abort();
4115 }
4116 }
4117 dead_tmp(src);
4118}
4119
4120static inline void gen_neon_addl(int size)
4121{
4122 switch (size) {
4123 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4124 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4125 case 2: tcg_gen_add_i64(CPU_V001); break;
4126 default: abort();
4127 }
4128}
4129
4130static inline void gen_neon_subl(int size)
4131{
4132 switch (size) {
4133 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4134 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4135 case 2: tcg_gen_sub_i64(CPU_V001); break;
4136 default: abort();
4137 }
4138}
4139
a7812ae4 4140static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4141{
4142 switch (size) {
4143 case 0: gen_helper_neon_negl_u16(var, var); break;
4144 case 1: gen_helper_neon_negl_u32(var, var); break;
4145 case 2: gen_helper_neon_negl_u64(var, var); break;
4146 default: abort();
4147 }
4148}
4149
a7812ae4 4150static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4151{
4152 switch (size) {
4153 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4154 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4155 default: abort();
4156 }
4157}
4158
a7812ae4 4159static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4160{
a7812ae4 4161 TCGv_i64 tmp;
ad69471c
PB
4162
4163 switch ((size << 1) | u) {
4164 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4165 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4166 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4167 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4168 case 4:
4169 tmp = gen_muls_i64_i32(a, b);
4170 tcg_gen_mov_i64(dest, tmp);
4171 break;
4172 case 5:
4173 tmp = gen_mulu_i64_i32(a, b);
4174 tcg_gen_mov_i64(dest, tmp);
4175 break;
4176 default: abort();
4177 }
ad69471c
PB
4178}
4179
9ee6e8bb
PB
4180/* Translate a NEON data processing instruction. Return nonzero if the
4181 instruction is invalid.
ad69471c
PB
4182 We process data in a mixture of 32-bit and 64-bit chunks.
4183 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4184
9ee6e8bb
PB
4185static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4186{
4187 int op;
4188 int q;
4189 int rd, rn, rm;
4190 int size;
4191 int shift;
4192 int pass;
4193 int count;
4194 int pairwise;
4195 int u;
4196 int n;
ca9a32e4 4197 uint32_t imm, mask;
b75263d6 4198 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4199 TCGv_i64 tmp64;
9ee6e8bb
PB
4200
4201 if (!vfp_enabled(env))
4202 return 1;
4203 q = (insn & (1 << 6)) != 0;
4204 u = (insn >> 24) & 1;
4205 VFP_DREG_D(rd, insn);
4206 VFP_DREG_N(rn, insn);
4207 VFP_DREG_M(rm, insn);
4208 size = (insn >> 20) & 3;
4209 if ((insn & (1 << 23)) == 0) {
4210 /* Three register same length. */
4211 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
ad69471c
PB
4212 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4213 || op == 10 || op == 11 || op == 16)) {
4214 /* 64-bit element instructions. */
9ee6e8bb 4215 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4216 neon_load_reg64(cpu_V0, rn + pass);
4217 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb
PB
4218 switch (op) {
4219 case 1: /* VQADD */
4220 if (u) {
ad69471c 4221 gen_helper_neon_add_saturate_u64(CPU_V001);
2c0262af 4222 } else {
ad69471c 4223 gen_helper_neon_add_saturate_s64(CPU_V001);
2c0262af 4224 }
9ee6e8bb
PB
4225 break;
4226 case 5: /* VQSUB */
4227 if (u) {
ad69471c
PB
4228 gen_helper_neon_sub_saturate_u64(CPU_V001);
4229 } else {
4230 gen_helper_neon_sub_saturate_s64(CPU_V001);
4231 }
4232 break;
4233 case 8: /* VSHL */
4234 if (u) {
4235 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4236 } else {
4237 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4238 }
4239 break;
4240 case 9: /* VQSHL */
4241 if (u) {
4242 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4243 cpu_V0, cpu_V0);
4244 } else {
4245 gen_helper_neon_qshl_s64(cpu_V1, cpu_env,
4246 cpu_V1, cpu_V0);
4247 }
4248 break;
4249 case 10: /* VRSHL */
4250 if (u) {
4251 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4252 } else {
ad69471c
PB
4253 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4254 }
4255 break;
4256 case 11: /* VQRSHL */
4257 if (u) {
4258 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4259 cpu_V1, cpu_V0);
4260 } else {
4261 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4262 cpu_V1, cpu_V0);
1e8d4eec 4263 }
9ee6e8bb
PB
4264 break;
4265 case 16:
4266 if (u) {
ad69471c 4267 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4268 } else {
ad69471c 4269 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4270 }
4271 break;
4272 default:
4273 abort();
2c0262af 4274 }
ad69471c 4275 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4276 }
9ee6e8bb 4277 return 0;
2c0262af 4278 }
9ee6e8bb
PB
4279 switch (op) {
4280 case 8: /* VSHL */
4281 case 9: /* VQSHL */
4282 case 10: /* VRSHL */
ad69471c 4283 case 11: /* VQRSHL */
9ee6e8bb 4284 {
ad69471c
PB
4285 int rtmp;
4286 /* Shift instruction operands are reversed. */
4287 rtmp = rn;
9ee6e8bb 4288 rn = rm;
ad69471c 4289 rm = rtmp;
9ee6e8bb
PB
4290 pairwise = 0;
4291 }
2c0262af 4292 break;
9ee6e8bb
PB
4293 case 20: /* VPMAX */
4294 case 21: /* VPMIN */
4295 case 23: /* VPADD */
4296 pairwise = 1;
2c0262af 4297 break;
9ee6e8bb
PB
4298 case 26: /* VPADD (float) */
4299 pairwise = (u && size < 2);
2c0262af 4300 break;
9ee6e8bb
PB
4301 case 30: /* VPMIN/VPMAX (float) */
4302 pairwise = u;
2c0262af 4303 break;
9ee6e8bb
PB
4304 default:
4305 pairwise = 0;
2c0262af 4306 break;
9ee6e8bb 4307 }
dd8fbd78 4308
9ee6e8bb
PB
4309 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4310
4311 if (pairwise) {
4312 /* Pairwise. */
4313 if (q)
4314 n = (pass & 1) * 2;
2c0262af 4315 else
9ee6e8bb
PB
4316 n = 0;
4317 if (pass < q + 1) {
dd8fbd78
FN
4318 tmp = neon_load_reg(rn, n);
4319 tmp2 = neon_load_reg(rn, n + 1);
9ee6e8bb 4320 } else {
dd8fbd78
FN
4321 tmp = neon_load_reg(rm, n);
4322 tmp2 = neon_load_reg(rm, n + 1);
9ee6e8bb
PB
4323 }
4324 } else {
4325 /* Elementwise. */
dd8fbd78
FN
4326 tmp = neon_load_reg(rn, pass);
4327 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4328 }
4329 switch (op) {
4330 case 0: /* VHADD */
4331 GEN_NEON_INTEGER_OP(hadd);
4332 break;
4333 case 1: /* VQADD */
ad69471c 4334 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4335 break;
9ee6e8bb
PB
4336 case 2: /* VRHADD */
4337 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4338 break;
9ee6e8bb
PB
4339 case 3: /* Logic ops. */
4340 switch ((u << 2) | size) {
4341 case 0: /* VAND */
dd8fbd78 4342 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4343 break;
4344 case 1: /* BIC */
f669df27 4345 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4346 break;
4347 case 2: /* VORR */
dd8fbd78 4348 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4349 break;
4350 case 3: /* VORN */
f669df27 4351 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4352 break;
4353 case 4: /* VEOR */
dd8fbd78 4354 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4355 break;
4356 case 5: /* VBSL */
dd8fbd78
FN
4357 tmp3 = neon_load_reg(rd, pass);
4358 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4359 dead_tmp(tmp3);
9ee6e8bb
PB
4360 break;
4361 case 6: /* VBIT */
dd8fbd78
FN
4362 tmp3 = neon_load_reg(rd, pass);
4363 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4364 dead_tmp(tmp3);
9ee6e8bb
PB
4365 break;
4366 case 7: /* VBIF */
dd8fbd78
FN
4367 tmp3 = neon_load_reg(rd, pass);
4368 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4369 dead_tmp(tmp3);
9ee6e8bb 4370 break;
2c0262af
FB
4371 }
4372 break;
9ee6e8bb
PB
4373 case 4: /* VHSUB */
4374 GEN_NEON_INTEGER_OP(hsub);
4375 break;
4376 case 5: /* VQSUB */
ad69471c 4377 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4378 break;
9ee6e8bb
PB
4379 case 6: /* VCGT */
4380 GEN_NEON_INTEGER_OP(cgt);
4381 break;
4382 case 7: /* VCGE */
4383 GEN_NEON_INTEGER_OP(cge);
4384 break;
4385 case 8: /* VSHL */
ad69471c 4386 GEN_NEON_INTEGER_OP(shl);
2c0262af 4387 break;
9ee6e8bb 4388 case 9: /* VQSHL */
ad69471c 4389 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4390 break;
9ee6e8bb 4391 case 10: /* VRSHL */
ad69471c 4392 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4393 break;
9ee6e8bb 4394 case 11: /* VQRSHL */
ad69471c 4395 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb
PB
4396 break;
4397 case 12: /* VMAX */
4398 GEN_NEON_INTEGER_OP(max);
4399 break;
4400 case 13: /* VMIN */
4401 GEN_NEON_INTEGER_OP(min);
4402 break;
4403 case 14: /* VABD */
4404 GEN_NEON_INTEGER_OP(abd);
4405 break;
4406 case 15: /* VABA */
4407 GEN_NEON_INTEGER_OP(abd);
dd8fbd78
FN
4408 dead_tmp(tmp2);
4409 tmp2 = neon_load_reg(rd, pass);
4410 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4411 break;
4412 case 16:
4413 if (!u) { /* VADD */
dd8fbd78 4414 if (gen_neon_add(size, tmp, tmp2))
9ee6e8bb
PB
4415 return 1;
4416 } else { /* VSUB */
4417 switch (size) {
dd8fbd78
FN
4418 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4419 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4420 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4421 default: return 1;
4422 }
4423 }
4424 break;
4425 case 17:
4426 if (!u) { /* VTST */
4427 switch (size) {
dd8fbd78
FN
4428 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4429 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4430 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4431 default: return 1;
4432 }
4433 } else { /* VCEQ */
4434 switch (size) {
dd8fbd78
FN
4435 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4436 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4437 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4438 default: return 1;
4439 }
4440 }
4441 break;
4442 case 18: /* Multiply. */
4443 switch (size) {
dd8fbd78
FN
4444 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4445 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4446 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4447 default: return 1;
4448 }
dd8fbd78
FN
4449 dead_tmp(tmp2);
4450 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4451 if (u) { /* VMLS */
dd8fbd78 4452 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4453 } else { /* VMLA */
dd8fbd78 4454 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4455 }
4456 break;
4457 case 19: /* VMUL */
4458 if (u) { /* polynomial */
dd8fbd78 4459 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4460 } else { /* Integer */
4461 switch (size) {
dd8fbd78
FN
4462 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4463 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4464 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4465 default: return 1;
4466 }
4467 }
4468 break;
4469 case 20: /* VPMAX */
4470 GEN_NEON_INTEGER_OP(pmax);
4471 break;
4472 case 21: /* VPMIN */
4473 GEN_NEON_INTEGER_OP(pmin);
4474 break;
4475 case 22: /* Hultiply high. */
4476 if (!u) { /* VQDMULH */
4477 switch (size) {
dd8fbd78
FN
4478 case 1: gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4479 case 2: gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
9ee6e8bb
PB
4480 default: return 1;
4481 }
4482 } else { /* VQRDHMUL */
4483 switch (size) {
dd8fbd78
FN
4484 case 1: gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4485 case 2: gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
9ee6e8bb
PB
4486 default: return 1;
4487 }
4488 }
4489 break;
4490 case 23: /* VPADD */
4491 if (u)
4492 return 1;
4493 switch (size) {
dd8fbd78
FN
4494 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4495 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4496 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4497 default: return 1;
4498 }
4499 break;
4500 case 26: /* Floating point arithnetic. */
4501 switch ((u << 2) | size) {
4502 case 0: /* VADD */
dd8fbd78 4503 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4504 break;
4505 case 2: /* VSUB */
dd8fbd78 4506 gen_helper_neon_sub_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4507 break;
4508 case 4: /* VPADD */
dd8fbd78 4509 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4510 break;
4511 case 6: /* VABD */
dd8fbd78 4512 gen_helper_neon_abd_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4513 break;
4514 default:
4515 return 1;
4516 }
4517 break;
4518 case 27: /* Float multiply. */
dd8fbd78 4519 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb 4520 if (!u) {
dd8fbd78
FN
4521 dead_tmp(tmp2);
4522 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4523 if (size == 0) {
dd8fbd78 4524 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb 4525 } else {
dd8fbd78 4526 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
4527 }
4528 }
4529 break;
4530 case 28: /* Float compare. */
4531 if (!u) {
dd8fbd78 4532 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
b5ff1b31 4533 } else {
9ee6e8bb 4534 if (size == 0)
dd8fbd78 4535 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
9ee6e8bb 4536 else
dd8fbd78 4537 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
b5ff1b31 4538 }
2c0262af 4539 break;
9ee6e8bb
PB
4540 case 29: /* Float compare absolute. */
4541 if (!u)
4542 return 1;
4543 if (size == 0)
dd8fbd78 4544 gen_helper_neon_acge_f32(tmp, tmp, tmp2);
9ee6e8bb 4545 else
dd8fbd78 4546 gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
2c0262af 4547 break;
9ee6e8bb
PB
4548 case 30: /* Float min/max. */
4549 if (size == 0)
dd8fbd78 4550 gen_helper_neon_max_f32(tmp, tmp, tmp2);
9ee6e8bb 4551 else
dd8fbd78 4552 gen_helper_neon_min_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4553 break;
4554 case 31:
4555 if (size == 0)
dd8fbd78 4556 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 4557 else
dd8fbd78 4558 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 4559 break;
9ee6e8bb
PB
4560 default:
4561 abort();
2c0262af 4562 }
dd8fbd78
FN
4563 dead_tmp(tmp2);
4564
9ee6e8bb
PB
4565 /* Save the result. For elementwise operations we can put it
4566 straight into the destination register. For pairwise operations
4567 we have to be careful to avoid clobbering the source operands. */
4568 if (pairwise && rd == rm) {
dd8fbd78 4569 neon_store_scratch(pass, tmp);
9ee6e8bb 4570 } else {
dd8fbd78 4571 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4572 }
4573
4574 } /* for pass */
4575 if (pairwise && rd == rm) {
4576 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
4577 tmp = neon_load_scratch(pass);
4578 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4579 }
4580 }
ad69471c 4581 /* End of 3 register same size operations. */
9ee6e8bb
PB
4582 } else if (insn & (1 << 4)) {
4583 if ((insn & 0x00380080) != 0) {
4584 /* Two registers and shift. */
4585 op = (insn >> 8) & 0xf;
4586 if (insn & (1 << 7)) {
4587 /* 64-bit shift. */
4588 size = 3;
4589 } else {
4590 size = 2;
4591 while ((insn & (1 << (size + 19))) == 0)
4592 size--;
4593 }
4594 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4595 /* To avoid excessive dumplication of ops we implement shift
4596 by immediate using the variable shift operations. */
4597 if (op < 8) {
4598 /* Shift by immediate:
4599 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4600 /* Right shifts are encoded as N - shift, where N is the
4601 element size in bits. */
4602 if (op <= 4)
4603 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4604 if (size == 3) {
4605 count = q + 1;
4606 } else {
4607 count = q ? 4: 2;
4608 }
4609 switch (size) {
4610 case 0:
4611 imm = (uint8_t) shift;
4612 imm |= imm << 8;
4613 imm |= imm << 16;
4614 break;
4615 case 1:
4616 imm = (uint16_t) shift;
4617 imm |= imm << 16;
4618 break;
4619 case 2:
4620 case 3:
4621 imm = shift;
4622 break;
4623 default:
4624 abort();
4625 }
4626
4627 for (pass = 0; pass < count; pass++) {
ad69471c
PB
4628 if (size == 3) {
4629 neon_load_reg64(cpu_V0, rm + pass);
4630 tcg_gen_movi_i64(cpu_V1, imm);
4631 switch (op) {
4632 case 0: /* VSHR */
4633 case 1: /* VSRA */
4634 if (u)
4635 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4636 else
ad69471c 4637 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4638 break;
ad69471c
PB
4639 case 2: /* VRSHR */
4640 case 3: /* VRSRA */
4641 if (u)
4642 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4643 else
ad69471c 4644 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4645 break;
ad69471c
PB
4646 case 4: /* VSRI */
4647 if (!u)
4648 return 1;
4649 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4650 break;
4651 case 5: /* VSHL, VSLI */
4652 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4653 break;
4654 case 6: /* VQSHL */
4655 if (u)
4656 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4657 else
ad69471c
PB
4658 gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4659 break;
4660 case 7: /* VQSHLU */
4661 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4662 break;
9ee6e8bb 4663 }
ad69471c
PB
4664 if (op == 1 || op == 3) {
4665 /* Accumulate. */
4666 neon_load_reg64(cpu_V0, rd + pass);
4667 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4668 } else if (op == 4 || (op == 5 && u)) {
4669 /* Insert */
4670 cpu_abort(env, "VS[LR]I.64 not implemented");
4671 }
4672 neon_store_reg64(cpu_V0, rd + pass);
4673 } else { /* size < 3 */
4674 /* Operands in T0 and T1. */
dd8fbd78
FN
4675 tmp = neon_load_reg(rm, pass);
4676 tmp2 = new_tmp();
4677 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
4678 switch (op) {
4679 case 0: /* VSHR */
4680 case 1: /* VSRA */
4681 GEN_NEON_INTEGER_OP(shl);
4682 break;
4683 case 2: /* VRSHR */
4684 case 3: /* VRSRA */
4685 GEN_NEON_INTEGER_OP(rshl);
4686 break;
4687 case 4: /* VSRI */
4688 if (!u)
4689 return 1;
4690 GEN_NEON_INTEGER_OP(shl);
4691 break;
4692 case 5: /* VSHL, VSLI */
4693 switch (size) {
dd8fbd78
FN
4694 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
4695 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
4696 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
ad69471c
PB
4697 default: return 1;
4698 }
4699 break;
4700 case 6: /* VQSHL */
4701 GEN_NEON_INTEGER_OP_ENV(qshl);
4702 break;
4703 case 7: /* VQSHLU */
4704 switch (size) {
dd8fbd78
FN
4705 case 0: gen_helper_neon_qshl_u8(tmp, cpu_env, tmp, tmp2); break;
4706 case 1: gen_helper_neon_qshl_u16(tmp, cpu_env, tmp, tmp2); break;
4707 case 2: gen_helper_neon_qshl_u32(tmp, cpu_env, tmp, tmp2); break;
ad69471c
PB
4708 default: return 1;
4709 }
4710 break;
4711 }
dd8fbd78 4712 dead_tmp(tmp2);
ad69471c
PB
4713
4714 if (op == 1 || op == 3) {
4715 /* Accumulate. */
dd8fbd78
FN
4716 tmp2 = neon_load_reg(rd, pass);
4717 gen_neon_add(size, tmp2, tmp);
4718 dead_tmp(tmp2);
ad69471c
PB
4719 } else if (op == 4 || (op == 5 && u)) {
4720 /* Insert */
4721 switch (size) {
4722 case 0:
4723 if (op == 4)
ca9a32e4 4724 mask = 0xff >> -shift;
ad69471c 4725 else
ca9a32e4
JR
4726 mask = (uint8_t)(0xff << shift);
4727 mask |= mask << 8;
4728 mask |= mask << 16;
ad69471c
PB
4729 break;
4730 case 1:
4731 if (op == 4)
ca9a32e4 4732 mask = 0xffff >> -shift;
ad69471c 4733 else
ca9a32e4
JR
4734 mask = (uint16_t)(0xffff << shift);
4735 mask |= mask << 16;
ad69471c
PB
4736 break;
4737 case 2:
ca9a32e4
JR
4738 if (shift < -31 || shift > 31) {
4739 mask = 0;
4740 } else {
4741 if (op == 4)
4742 mask = 0xffffffffu >> -shift;
4743 else
4744 mask = 0xffffffffu << shift;
4745 }
ad69471c
PB
4746 break;
4747 default:
4748 abort();
4749 }
dd8fbd78 4750 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
4751 tcg_gen_andi_i32(tmp, tmp, mask);
4752 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78
FN
4753 tcg_gen_or_i32(tmp, tmp, tmp2);
4754 dead_tmp(tmp2);
ad69471c 4755 }
dd8fbd78 4756 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4757 }
4758 } /* for pass */
4759 } else if (op < 10) {
ad69471c 4760 /* Shift by immediate and narrow:
9ee6e8bb
PB
4761 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4762 shift = shift - (1 << (size + 3));
4763 size++;
9ee6e8bb
PB
4764 switch (size) {
4765 case 1:
ad69471c 4766 imm = (uint16_t)shift;
9ee6e8bb 4767 imm |= imm << 16;
ad69471c 4768 tmp2 = tcg_const_i32(imm);
a7812ae4 4769 TCGV_UNUSED_I64(tmp64);
9ee6e8bb
PB
4770 break;
4771 case 2:
ad69471c
PB
4772 imm = (uint32_t)shift;
4773 tmp2 = tcg_const_i32(imm);
a7812ae4 4774 TCGV_UNUSED_I64(tmp64);
4cc633c3 4775 break;
9ee6e8bb 4776 case 3:
a7812ae4
PB
4777 tmp64 = tcg_const_i64(shift);
4778 TCGV_UNUSED(tmp2);
9ee6e8bb
PB
4779 break;
4780 default:
4781 abort();
4782 }
4783
ad69471c
PB
4784 for (pass = 0; pass < 2; pass++) {
4785 if (size == 3) {
4786 neon_load_reg64(cpu_V0, rm + pass);
4787 if (q) {
4788 if (u)
a7812ae4 4789 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4790 else
a7812ae4 4791 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c
PB
4792 } else {
4793 if (u)
a7812ae4 4794 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4795 else
a7812ae4 4796 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c 4797 }
2c0262af 4798 } else {
ad69471c
PB
4799 tmp = neon_load_reg(rm + pass, 0);
4800 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
36aa55dc
PB
4801 tmp3 = neon_load_reg(rm + pass, 1);
4802 gen_neon_shift_narrow(size, tmp3, tmp2, q, u);
4803 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
ad69471c 4804 dead_tmp(tmp);
36aa55dc 4805 dead_tmp(tmp3);
9ee6e8bb 4806 }
ad69471c
PB
4807 tmp = new_tmp();
4808 if (op == 8 && !u) {
4809 gen_neon_narrow(size - 1, tmp, cpu_V0);
9ee6e8bb 4810 } else {
ad69471c
PB
4811 if (op == 8)
4812 gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
9ee6e8bb 4813 else
ad69471c
PB
4814 gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
4815 }
2301db49 4816 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4817 } /* for pass */
b75263d6
JR
4818 if (size == 3) {
4819 tcg_temp_free_i64(tmp64);
2301db49
JR
4820 } else {
4821 dead_tmp(tmp2);
b75263d6 4822 }
9ee6e8bb
PB
4823 } else if (op == 10) {
4824 /* VSHLL */
ad69471c 4825 if (q || size == 3)
9ee6e8bb 4826 return 1;
ad69471c
PB
4827 tmp = neon_load_reg(rm, 0);
4828 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 4829 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4830 if (pass == 1)
4831 tmp = tmp2;
4832
4833 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 4834
9ee6e8bb
PB
4835 if (shift != 0) {
4836 /* The shift is less than the width of the source
ad69471c
PB
4837 type, so we can just shift the whole register. */
4838 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4839 if (size < 2 || !u) {
4840 uint64_t imm64;
4841 if (size == 0) {
4842 imm = (0xffu >> (8 - shift));
4843 imm |= imm << 16;
4844 } else {
4845 imm = 0xffff >> (16 - shift);
9ee6e8bb 4846 }
ad69471c
PB
4847 imm64 = imm | (((uint64_t)imm) << 32);
4848 tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
9ee6e8bb
PB
4849 }
4850 }
ad69471c 4851 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 4852 }
f73534a5 4853 } else if (op >= 14) {
9ee6e8bb 4854 /* VCVT fixed-point. */
f73534a5
PM
4855 /* We have already masked out the must-be-1 top bit of imm6,
4856 * hence this 32-shift where the ARM ARM has 64-imm6.
4857 */
4858 shift = 32 - shift;
9ee6e8bb 4859 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 4860 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 4861 if (!(op & 1)) {
9ee6e8bb 4862 if (u)
4373f3ce 4863 gen_vfp_ulto(0, shift);
9ee6e8bb 4864 else
4373f3ce 4865 gen_vfp_slto(0, shift);
9ee6e8bb
PB
4866 } else {
4867 if (u)
4373f3ce 4868 gen_vfp_toul(0, shift);
9ee6e8bb 4869 else
4373f3ce 4870 gen_vfp_tosl(0, shift);
2c0262af 4871 }
4373f3ce 4872 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
4873 }
4874 } else {
9ee6e8bb
PB
4875 return 1;
4876 }
4877 } else { /* (insn & 0x00380080) == 0 */
4878 int invert;
4879
4880 op = (insn >> 8) & 0xf;
4881 /* One register and immediate. */
4882 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4883 invert = (insn & (1 << 5)) != 0;
4884 switch (op) {
4885 case 0: case 1:
4886 /* no-op */
4887 break;
4888 case 2: case 3:
4889 imm <<= 8;
4890 break;
4891 case 4: case 5:
4892 imm <<= 16;
4893 break;
4894 case 6: case 7:
4895 imm <<= 24;
4896 break;
4897 case 8: case 9:
4898 imm |= imm << 16;
4899 break;
4900 case 10: case 11:
4901 imm = (imm << 8) | (imm << 24);
4902 break;
4903 case 12:
8e31209e 4904 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
4905 break;
4906 case 13:
4907 imm = (imm << 16) | 0xffff;
4908 break;
4909 case 14:
4910 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4911 if (invert)
4912 imm = ~imm;
4913 break;
4914 case 15:
4915 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4916 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4917 break;
4918 }
4919 if (invert)
4920 imm = ~imm;
4921
9ee6e8bb
PB
4922 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4923 if (op & 1 && op < 12) {
ad69471c 4924 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
4925 if (invert) {
4926 /* The immediate value has already been inverted, so
4927 BIC becomes AND. */
ad69471c 4928 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 4929 } else {
ad69471c 4930 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 4931 }
9ee6e8bb 4932 } else {
ad69471c
PB
4933 /* VMOV, VMVN. */
4934 tmp = new_tmp();
9ee6e8bb 4935 if (op == 14 && invert) {
ad69471c
PB
4936 uint32_t val;
4937 val = 0;
9ee6e8bb
PB
4938 for (n = 0; n < 4; n++) {
4939 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 4940 val |= 0xff << (n * 8);
9ee6e8bb 4941 }
ad69471c
PB
4942 tcg_gen_movi_i32(tmp, val);
4943 } else {
4944 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 4945 }
9ee6e8bb 4946 }
ad69471c 4947 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4948 }
4949 }
e4b3861d 4950 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
4951 if (size != 3) {
4952 op = (insn >> 8) & 0xf;
4953 if ((insn & (1 << 6)) == 0) {
4954 /* Three registers of different lengths. */
4955 int src1_wide;
4956 int src2_wide;
4957 int prewiden;
4958 /* prewiden, src1_wide, src2_wide */
4959 static const int neon_3reg_wide[16][3] = {
4960 {1, 0, 0}, /* VADDL */
4961 {1, 1, 0}, /* VADDW */
4962 {1, 0, 0}, /* VSUBL */
4963 {1, 1, 0}, /* VSUBW */
4964 {0, 1, 1}, /* VADDHN */
4965 {0, 0, 0}, /* VABAL */
4966 {0, 1, 1}, /* VSUBHN */
4967 {0, 0, 0}, /* VABDL */
4968 {0, 0, 0}, /* VMLAL */
4969 {0, 0, 0}, /* VQDMLAL */
4970 {0, 0, 0}, /* VMLSL */
4971 {0, 0, 0}, /* VQDMLSL */
4972 {0, 0, 0}, /* Integer VMULL */
4973 {0, 0, 0}, /* VQDMULL */
4974 {0, 0, 0} /* Polynomial VMULL */
4975 };
4976
4977 prewiden = neon_3reg_wide[op][0];
4978 src1_wide = neon_3reg_wide[op][1];
4979 src2_wide = neon_3reg_wide[op][2];
4980
ad69471c
PB
4981 if (size == 0 && (op == 9 || op == 11 || op == 13))
4982 return 1;
4983
9ee6e8bb
PB
4984 /* Avoid overlapping operands. Wide source operands are
4985 always aligned so will never overlap with wide
4986 destinations in problematic ways. */
8f8e3aa4 4987 if (rd == rm && !src2_wide) {
dd8fbd78
FN
4988 tmp = neon_load_reg(rm, 1);
4989 neon_store_scratch(2, tmp);
8f8e3aa4 4990 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
4991 tmp = neon_load_reg(rn, 1);
4992 neon_store_scratch(2, tmp);
9ee6e8bb 4993 }
a50f5b91 4994 TCGV_UNUSED(tmp3);
9ee6e8bb 4995 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4996 if (src1_wide) {
4997 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 4998 TCGV_UNUSED(tmp);
9ee6e8bb 4999 } else {
ad69471c 5000 if (pass == 1 && rd == rn) {
dd8fbd78 5001 tmp = neon_load_scratch(2);
9ee6e8bb 5002 } else {
ad69471c
PB
5003 tmp = neon_load_reg(rn, pass);
5004 }
5005 if (prewiden) {
5006 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5007 }
5008 }
ad69471c
PB
5009 if (src2_wide) {
5010 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 5011 TCGV_UNUSED(tmp2);
9ee6e8bb 5012 } else {
ad69471c 5013 if (pass == 1 && rd == rm) {
dd8fbd78 5014 tmp2 = neon_load_scratch(2);
9ee6e8bb 5015 } else {
ad69471c
PB
5016 tmp2 = neon_load_reg(rm, pass);
5017 }
5018 if (prewiden) {
5019 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5020 }
9ee6e8bb
PB
5021 }
5022 switch (op) {
5023 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5024 gen_neon_addl(size);
9ee6e8bb 5025 break;
79b0e534 5026 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5027 gen_neon_subl(size);
9ee6e8bb
PB
5028 break;
5029 case 5: case 7: /* VABAL, VABDL */
5030 switch ((size << 1) | u) {
ad69471c
PB
5031 case 0:
5032 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5033 break;
5034 case 1:
5035 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5036 break;
5037 case 2:
5038 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5039 break;
5040 case 3:
5041 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5042 break;
5043 case 4:
5044 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5045 break;
5046 case 5:
5047 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5048 break;
9ee6e8bb
PB
5049 default: abort();
5050 }
ad69471c
PB
5051 dead_tmp(tmp2);
5052 dead_tmp(tmp);
9ee6e8bb
PB
5053 break;
5054 case 8: case 9: case 10: case 11: case 12: case 13:
5055 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5056 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
dd8fbd78
FN
5057 dead_tmp(tmp2);
5058 dead_tmp(tmp);
9ee6e8bb
PB
5059 break;
5060 case 14: /* Polynomial VMULL */
5061 cpu_abort(env, "Polynomial VMULL not implemented");
5062
5063 default: /* 15 is RESERVED. */
5064 return 1;
5065 }
5066 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
5067 /* Accumulate. */
5068 if (op == 10 || op == 11) {
ad69471c 5069 gen_neon_negl(cpu_V0, size);
9ee6e8bb
PB
5070 }
5071
9ee6e8bb 5072 if (op != 13) {
ad69471c 5073 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb
PB
5074 }
5075
5076 switch (op) {
5077 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
ad69471c 5078 gen_neon_addl(size);
9ee6e8bb
PB
5079 break;
5080 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c
PB
5081 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5082 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5083 break;
9ee6e8bb
PB
5084 /* Fall through. */
5085 case 13: /* VQDMULL */
ad69471c 5086 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5087 break;
5088 default:
5089 abort();
5090 }
ad69471c 5091 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5092 } else if (op == 4 || op == 6) {
5093 /* Narrowing operation. */
ad69471c 5094 tmp = new_tmp();
79b0e534 5095 if (!u) {
9ee6e8bb 5096 switch (size) {
ad69471c
PB
5097 case 0:
5098 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5099 break;
5100 case 1:
5101 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5102 break;
5103 case 2:
5104 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5105 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5106 break;
9ee6e8bb
PB
5107 default: abort();
5108 }
5109 } else {
5110 switch (size) {
ad69471c
PB
5111 case 0:
5112 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5113 break;
5114 case 1:
5115 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5116 break;
5117 case 2:
5118 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5119 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5120 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5121 break;
9ee6e8bb
PB
5122 default: abort();
5123 }
5124 }
ad69471c
PB
5125 if (pass == 0) {
5126 tmp3 = tmp;
5127 } else {
5128 neon_store_reg(rd, 0, tmp3);
5129 neon_store_reg(rd, 1, tmp);
5130 }
9ee6e8bb
PB
5131 } else {
5132 /* Write back the result. */
ad69471c 5133 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5134 }
5135 }
5136 } else {
5137 /* Two registers and a scalar. */
5138 switch (op) {
5139 case 0: /* Integer VMLA scalar */
5140 case 1: /* Float VMLA scalar */
5141 case 4: /* Integer VMLS scalar */
5142 case 5: /* Floating point VMLS scalar */
5143 case 8: /* Integer VMUL scalar */
5144 case 9: /* Floating point VMUL scalar */
5145 case 12: /* VQDMULH scalar */
5146 case 13: /* VQRDMULH scalar */
dd8fbd78
FN
5147 tmp = neon_get_scalar(size, rm);
5148 neon_store_scratch(0, tmp);
9ee6e8bb 5149 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5150 tmp = neon_load_scratch(0);
5151 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5152 if (op == 12) {
5153 if (size == 1) {
dd8fbd78 5154 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5155 } else {
dd8fbd78 5156 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5157 }
5158 } else if (op == 13) {
5159 if (size == 1) {
dd8fbd78 5160 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5161 } else {
dd8fbd78 5162 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5163 }
5164 } else if (op & 1) {
dd8fbd78 5165 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5166 } else {
5167 switch (size) {
dd8fbd78
FN
5168 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5169 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5170 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5171 default: return 1;
5172 }
5173 }
dd8fbd78 5174 dead_tmp(tmp2);
9ee6e8bb
PB
5175 if (op < 8) {
5176 /* Accumulate. */
dd8fbd78 5177 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5178 switch (op) {
5179 case 0:
dd8fbd78 5180 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5181 break;
5182 case 1:
dd8fbd78 5183 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5184 break;
5185 case 4:
dd8fbd78 5186 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5187 break;
5188 case 5:
dd8fbd78 5189 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
5190 break;
5191 default:
5192 abort();
5193 }
dd8fbd78 5194 dead_tmp(tmp2);
9ee6e8bb 5195 }
dd8fbd78 5196 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5197 }
5198 break;
5199 case 2: /* VMLAL sclar */
5200 case 3: /* VQDMLAL scalar */
5201 case 6: /* VMLSL scalar */
5202 case 7: /* VQDMLSL scalar */
5203 case 10: /* VMULL scalar */
5204 case 11: /* VQDMULL scalar */
ad69471c
PB
5205 if (size == 0 && (op == 3 || op == 7 || op == 11))
5206 return 1;
5207
dd8fbd78
FN
5208 tmp2 = neon_get_scalar(size, rm);
5209 tmp3 = neon_load_reg(rn, 1);
ad69471c 5210
9ee6e8bb 5211 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5212 if (pass == 0) {
5213 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5214 } else {
dd8fbd78 5215 tmp = tmp3;
9ee6e8bb 5216 }
ad69471c 5217 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
dd8fbd78 5218 dead_tmp(tmp);
9ee6e8bb 5219 if (op == 6 || op == 7) {
ad69471c
PB
5220 gen_neon_negl(cpu_V0, size);
5221 }
5222 if (op != 11) {
5223 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5224 }
9ee6e8bb
PB
5225 switch (op) {
5226 case 2: case 6:
ad69471c 5227 gen_neon_addl(size);
9ee6e8bb
PB
5228 break;
5229 case 3: case 7:
ad69471c
PB
5230 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5231 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5232 break;
5233 case 10:
5234 /* no-op */
5235 break;
5236 case 11:
ad69471c 5237 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5238 break;
5239 default:
5240 abort();
5241 }
ad69471c 5242 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5243 }
dd8fbd78
FN
5244
5245 dead_tmp(tmp2);
5246
9ee6e8bb
PB
5247 break;
5248 default: /* 14 and 15 are RESERVED */
5249 return 1;
5250 }
5251 }
5252 } else { /* size == 3 */
5253 if (!u) {
5254 /* Extract. */
9ee6e8bb 5255 imm = (insn >> 8) & 0xf;
ad69471c
PB
5256
5257 if (imm > 7 && !q)
5258 return 1;
5259
5260 if (imm == 0) {
5261 neon_load_reg64(cpu_V0, rn);
5262 if (q) {
5263 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5264 }
ad69471c
PB
5265 } else if (imm == 8) {
5266 neon_load_reg64(cpu_V0, rn + 1);
5267 if (q) {
5268 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5269 }
ad69471c 5270 } else if (q) {
a7812ae4 5271 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5272 if (imm < 8) {
5273 neon_load_reg64(cpu_V0, rn);
a7812ae4 5274 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5275 } else {
5276 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5277 neon_load_reg64(tmp64, rm);
ad69471c
PB
5278 }
5279 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5280 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5281 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5282 if (imm < 8) {
5283 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5284 } else {
ad69471c
PB
5285 neon_load_reg64(cpu_V1, rm + 1);
5286 imm -= 8;
9ee6e8bb 5287 }
ad69471c 5288 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5289 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5290 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 5291 tcg_temp_free_i64(tmp64);
ad69471c 5292 } else {
a7812ae4 5293 /* BUGFIX */
ad69471c 5294 neon_load_reg64(cpu_V0, rn);
a7812ae4 5295 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5296 neon_load_reg64(cpu_V1, rm);
a7812ae4 5297 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5298 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5299 }
5300 neon_store_reg64(cpu_V0, rd);
5301 if (q) {
5302 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5303 }
5304 } else if ((insn & (1 << 11)) == 0) {
5305 /* Two register misc. */
5306 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5307 size = (insn >> 18) & 3;
5308 switch (op) {
5309 case 0: /* VREV64 */
5310 if (size == 3)
5311 return 1;
5312 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5313 tmp = neon_load_reg(rm, pass * 2);
5314 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5315 switch (size) {
dd8fbd78
FN
5316 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5317 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5318 case 2: /* no-op */ break;
5319 default: abort();
5320 }
dd8fbd78 5321 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5322 if (size == 2) {
dd8fbd78 5323 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5324 } else {
9ee6e8bb 5325 switch (size) {
dd8fbd78
FN
5326 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5327 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5328 default: abort();
5329 }
dd8fbd78 5330 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5331 }
5332 }
5333 break;
5334 case 4: case 5: /* VPADDL */
5335 case 12: case 13: /* VPADAL */
9ee6e8bb
PB
5336 if (size == 3)
5337 return 1;
ad69471c
PB
5338 for (pass = 0; pass < q + 1; pass++) {
5339 tmp = neon_load_reg(rm, pass * 2);
5340 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5341 tmp = neon_load_reg(rm, pass * 2 + 1);
5342 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5343 switch (size) {
5344 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5345 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5346 case 2: tcg_gen_add_i64(CPU_V001); break;
5347 default: abort();
5348 }
9ee6e8bb
PB
5349 if (op >= 12) {
5350 /* Accumulate. */
ad69471c
PB
5351 neon_load_reg64(cpu_V1, rd + pass);
5352 gen_neon_addl(size);
9ee6e8bb 5353 }
ad69471c 5354 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5355 }
5356 break;
5357 case 33: /* VTRN */
5358 if (size == 2) {
5359 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
5360 tmp = neon_load_reg(rm, n);
5361 tmp2 = neon_load_reg(rd, n + 1);
5362 neon_store_reg(rm, n, tmp2);
5363 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
5364 }
5365 } else {
5366 goto elementwise;
5367 }
5368 break;
5369 case 34: /* VUZP */
5370 /* Reg Before After
5371 Rd A3 A2 A1 A0 B2 B0 A2 A0
5372 Rm B3 B2 B1 B0 B3 B1 A3 A1
5373 */
5374 if (size == 3)
5375 return 1;
5376 gen_neon_unzip(rd, q, 0, size);
5377 gen_neon_unzip(rm, q, 4, size);
5378 if (q) {
5379 static int unzip_order_q[8] =
5380 {0, 2, 4, 6, 1, 3, 5, 7};
5381 for (n = 0; n < 8; n++) {
5382 int reg = (n < 4) ? rd : rm;
dd8fbd78
FN
5383 tmp = neon_load_scratch(unzip_order_q[n]);
5384 neon_store_reg(reg, n % 4, tmp);
9ee6e8bb
PB
5385 }
5386 } else {
5387 static int unzip_order[4] =
5388 {0, 4, 1, 5};
5389 for (n = 0; n < 4; n++) {
5390 int reg = (n < 2) ? rd : rm;
dd8fbd78
FN
5391 tmp = neon_load_scratch(unzip_order[n]);
5392 neon_store_reg(reg, n % 2, tmp);
9ee6e8bb
PB
5393 }
5394 }
5395 break;
5396 case 35: /* VZIP */
5397 /* Reg Before After
5398 Rd A3 A2 A1 A0 B1 A1 B0 A0
5399 Rm B3 B2 B1 B0 B3 A3 B2 A2
5400 */
5401 if (size == 3)
5402 return 1;
5403 count = (q ? 4 : 2);
5404 for (n = 0; n < count; n++) {
dd8fbd78
FN
5405 tmp = neon_load_reg(rd, n);
5406 tmp2 = neon_load_reg(rd, n);
9ee6e8bb 5407 switch (size) {
dd8fbd78
FN
5408 case 0: gen_neon_zip_u8(tmp, tmp2); break;
5409 case 1: gen_neon_zip_u16(tmp, tmp2); break;
9ee6e8bb
PB
5410 case 2: /* no-op */; break;
5411 default: abort();
5412 }
dd8fbd78
FN
5413 neon_store_scratch(n * 2, tmp);
5414 neon_store_scratch(n * 2 + 1, tmp2);
9ee6e8bb
PB
5415 }
5416 for (n = 0; n < count * 2; n++) {
5417 int reg = (n < count) ? rd : rm;
dd8fbd78
FN
5418 tmp = neon_load_scratch(n);
5419 neon_store_reg(reg, n % count, tmp);
9ee6e8bb
PB
5420 }
5421 break;
5422 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
ad69471c
PB
5423 if (size == 3)
5424 return 1;
a50f5b91 5425 TCGV_UNUSED(tmp2);
9ee6e8bb 5426 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5427 neon_load_reg64(cpu_V0, rm + pass);
5428 tmp = new_tmp();
9ee6e8bb 5429 if (op == 36 && q == 0) {
ad69471c 5430 gen_neon_narrow(size, tmp, cpu_V0);
9ee6e8bb 5431 } else if (q) {
ad69471c 5432 gen_neon_narrow_satu(size, tmp, cpu_V0);
9ee6e8bb 5433 } else {
ad69471c
PB
5434 gen_neon_narrow_sats(size, tmp, cpu_V0);
5435 }
5436 if (pass == 0) {
5437 tmp2 = tmp;
5438 } else {
5439 neon_store_reg(rd, 0, tmp2);
5440 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5441 }
9ee6e8bb
PB
5442 }
5443 break;
5444 case 38: /* VSHLL */
ad69471c 5445 if (q || size == 3)
9ee6e8bb 5446 return 1;
ad69471c
PB
5447 tmp = neon_load_reg(rm, 0);
5448 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5449 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5450 if (pass == 1)
5451 tmp = tmp2;
5452 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 5453 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 5454 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5455 }
5456 break;
60011498
PB
5457 case 44: /* VCVT.F16.F32 */
5458 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5459 return 1;
5460 tmp = new_tmp();
5461 tmp2 = new_tmp();
5462 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
5463 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5464 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
5465 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5466 tcg_gen_shli_i32(tmp2, tmp2, 16);
5467 tcg_gen_or_i32(tmp2, tmp2, tmp);
5468 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
5469 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5470 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5471 neon_store_reg(rd, 0, tmp2);
5472 tmp2 = new_tmp();
5473 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5474 tcg_gen_shli_i32(tmp2, tmp2, 16);
5475 tcg_gen_or_i32(tmp2, tmp2, tmp);
5476 neon_store_reg(rd, 1, tmp2);
5477 dead_tmp(tmp);
5478 break;
5479 case 46: /* VCVT.F32.F16 */
5480 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5481 return 1;
5482 tmp3 = new_tmp();
5483 tmp = neon_load_reg(rm, 0);
5484 tmp2 = neon_load_reg(rm, 1);
5485 tcg_gen_ext16u_i32(tmp3, tmp);
5486 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5487 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5488 tcg_gen_shri_i32(tmp3, tmp, 16);
5489 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5490 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
5491 dead_tmp(tmp);
5492 tcg_gen_ext16u_i32(tmp3, tmp2);
5493 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5494 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5495 tcg_gen_shri_i32(tmp3, tmp2, 16);
5496 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5497 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
5498 dead_tmp(tmp2);
5499 dead_tmp(tmp3);
5500 break;
9ee6e8bb
PB
5501 default:
5502 elementwise:
5503 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5504 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5505 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5506 neon_reg_offset(rm, pass));
dd8fbd78 5507 TCGV_UNUSED(tmp);
9ee6e8bb 5508 } else {
dd8fbd78 5509 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
5510 }
5511 switch (op) {
5512 case 1: /* VREV32 */
5513 switch (size) {
dd8fbd78
FN
5514 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5515 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5516 default: return 1;
5517 }
5518 break;
5519 case 2: /* VREV16 */
5520 if (size != 0)
5521 return 1;
dd8fbd78 5522 gen_rev16(tmp);
9ee6e8bb 5523 break;
9ee6e8bb
PB
5524 case 8: /* CLS */
5525 switch (size) {
dd8fbd78
FN
5526 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5527 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5528 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
9ee6e8bb
PB
5529 default: return 1;
5530 }
5531 break;
5532 case 9: /* CLZ */
5533 switch (size) {
dd8fbd78
FN
5534 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5535 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5536 case 2: gen_helper_clz(tmp, tmp); break;
9ee6e8bb
PB
5537 default: return 1;
5538 }
5539 break;
5540 case 10: /* CNT */
5541 if (size != 0)
5542 return 1;
dd8fbd78 5543 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb
PB
5544 break;
5545 case 11: /* VNOT */
5546 if (size != 0)
5547 return 1;
dd8fbd78 5548 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5549 break;
5550 case 14: /* VQABS */
5551 switch (size) {
dd8fbd78
FN
5552 case 0: gen_helper_neon_qabs_s8(tmp, cpu_env, tmp); break;
5553 case 1: gen_helper_neon_qabs_s16(tmp, cpu_env, tmp); break;
5554 case 2: gen_helper_neon_qabs_s32(tmp, cpu_env, tmp); break;
9ee6e8bb
PB
5555 default: return 1;
5556 }
5557 break;
5558 case 15: /* VQNEG */
5559 switch (size) {
dd8fbd78
FN
5560 case 0: gen_helper_neon_qneg_s8(tmp, cpu_env, tmp); break;
5561 case 1: gen_helper_neon_qneg_s16(tmp, cpu_env, tmp); break;
5562 case 2: gen_helper_neon_qneg_s32(tmp, cpu_env, tmp); break;
9ee6e8bb
PB
5563 default: return 1;
5564 }
5565 break;
5566 case 16: case 19: /* VCGT #0, VCLE #0 */
dd8fbd78 5567 tmp2 = tcg_const_i32(0);
9ee6e8bb 5568 switch(size) {
dd8fbd78
FN
5569 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5570 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5571 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5572 default: return 1;
5573 }
dd8fbd78 5574 tcg_temp_free(tmp2);
9ee6e8bb 5575 if (op == 19)
dd8fbd78 5576 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5577 break;
5578 case 17: case 20: /* VCGE #0, VCLT #0 */
dd8fbd78 5579 tmp2 = tcg_const_i32(0);
9ee6e8bb 5580 switch(size) {
dd8fbd78
FN
5581 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5582 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5583 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5584 default: return 1;
5585 }
dd8fbd78 5586 tcg_temp_free(tmp2);
9ee6e8bb 5587 if (op == 20)
dd8fbd78 5588 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5589 break;
5590 case 18: /* VCEQ #0 */
dd8fbd78 5591 tmp2 = tcg_const_i32(0);
9ee6e8bb 5592 switch(size) {
dd8fbd78
FN
5593 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5594 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5595 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5596 default: return 1;
5597 }
dd8fbd78 5598 tcg_temp_free(tmp2);
9ee6e8bb
PB
5599 break;
5600 case 22: /* VABS */
5601 switch(size) {
dd8fbd78
FN
5602 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
5603 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
5604 case 2: tcg_gen_abs_i32(tmp, tmp); break;
9ee6e8bb
PB
5605 default: return 1;
5606 }
5607 break;
5608 case 23: /* VNEG */
ad69471c
PB
5609 if (size == 3)
5610 return 1;
dd8fbd78
FN
5611 tmp2 = tcg_const_i32(0);
5612 gen_neon_rsb(size, tmp, tmp2);
5613 tcg_temp_free(tmp2);
9ee6e8bb
PB
5614 break;
5615 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
dd8fbd78
FN
5616 tmp2 = tcg_const_i32(0);
5617 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
5618 tcg_temp_free(tmp2);
9ee6e8bb 5619 if (op == 27)
dd8fbd78 5620 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5621 break;
5622 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
dd8fbd78
FN
5623 tmp2 = tcg_const_i32(0);
5624 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
5625 tcg_temp_free(tmp2);
9ee6e8bb 5626 if (op == 28)
dd8fbd78 5627 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5628 break;
5629 case 26: /* Float VCEQ #0 */
dd8fbd78
FN
5630 tmp2 = tcg_const_i32(0);
5631 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
5632 tcg_temp_free(tmp2);
9ee6e8bb
PB
5633 break;
5634 case 30: /* Float VABS */
4373f3ce 5635 gen_vfp_abs(0);
9ee6e8bb
PB
5636 break;
5637 case 31: /* Float VNEG */
4373f3ce 5638 gen_vfp_neg(0);
9ee6e8bb
PB
5639 break;
5640 case 32: /* VSWP */
dd8fbd78
FN
5641 tmp2 = neon_load_reg(rd, pass);
5642 neon_store_reg(rm, pass, tmp2);
9ee6e8bb
PB
5643 break;
5644 case 33: /* VTRN */
dd8fbd78 5645 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5646 switch (size) {
dd8fbd78
FN
5647 case 0: gen_neon_trn_u8(tmp, tmp2); break;
5648 case 1: gen_neon_trn_u16(tmp, tmp2); break;
9ee6e8bb
PB
5649 case 2: abort();
5650 default: return 1;
5651 }
dd8fbd78 5652 neon_store_reg(rm, pass, tmp2);
9ee6e8bb
PB
5653 break;
5654 case 56: /* Integer VRECPE */
dd8fbd78 5655 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb
PB
5656 break;
5657 case 57: /* Integer VRSQRTE */
dd8fbd78 5658 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb
PB
5659 break;
5660 case 58: /* Float VRECPE */
4373f3ce 5661 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5662 break;
5663 case 59: /* Float VRSQRTE */
4373f3ce 5664 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5665 break;
5666 case 60: /* VCVT.F32.S32 */
d3587ef8 5667 gen_vfp_sito(0);
9ee6e8bb
PB
5668 break;
5669 case 61: /* VCVT.F32.U32 */
d3587ef8 5670 gen_vfp_uito(0);
9ee6e8bb
PB
5671 break;
5672 case 62: /* VCVT.S32.F32 */
d3587ef8 5673 gen_vfp_tosiz(0);
9ee6e8bb
PB
5674 break;
5675 case 63: /* VCVT.U32.F32 */
d3587ef8 5676 gen_vfp_touiz(0);
9ee6e8bb
PB
5677 break;
5678 default:
5679 /* Reserved: 21, 29, 39-56 */
5680 return 1;
5681 }
5682 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5683 tcg_gen_st_f32(cpu_F0s, cpu_env,
5684 neon_reg_offset(rd, pass));
9ee6e8bb 5685 } else {
dd8fbd78 5686 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5687 }
5688 }
5689 break;
5690 }
5691 } else if ((insn & (1 << 10)) == 0) {
5692 /* VTBL, VTBX. */
3018f259 5693 n = ((insn >> 5) & 0x18) + 8;
9ee6e8bb 5694 if (insn & (1 << 6)) {
8f8e3aa4 5695 tmp = neon_load_reg(rd, 0);
9ee6e8bb 5696 } else {
8f8e3aa4
PB
5697 tmp = new_tmp();
5698 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5699 }
8f8e3aa4 5700 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
5701 tmp4 = tcg_const_i32(rn);
5702 tmp5 = tcg_const_i32(n);
5703 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
3018f259 5704 dead_tmp(tmp);
9ee6e8bb 5705 if (insn & (1 << 6)) {
8f8e3aa4 5706 tmp = neon_load_reg(rd, 1);
9ee6e8bb 5707 } else {
8f8e3aa4
PB
5708 tmp = new_tmp();
5709 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5710 }
8f8e3aa4 5711 tmp3 = neon_load_reg(rm, 1);
b75263d6 5712 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
5713 tcg_temp_free_i32(tmp5);
5714 tcg_temp_free_i32(tmp4);
8f8e3aa4 5715 neon_store_reg(rd, 0, tmp2);
3018f259
PB
5716 neon_store_reg(rd, 1, tmp3);
5717 dead_tmp(tmp);
9ee6e8bb
PB
5718 } else if ((insn & 0x380) == 0) {
5719 /* VDUP */
5720 if (insn & (1 << 19)) {
dd8fbd78 5721 tmp = neon_load_reg(rm, 1);
9ee6e8bb 5722 } else {
dd8fbd78 5723 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
5724 }
5725 if (insn & (1 << 16)) {
dd8fbd78 5726 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
5727 } else if (insn & (1 << 17)) {
5728 if ((insn >> 18) & 1)
dd8fbd78 5729 gen_neon_dup_high16(tmp);
9ee6e8bb 5730 else
dd8fbd78 5731 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
5732 }
5733 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5734 tmp2 = new_tmp();
5735 tcg_gen_mov_i32(tmp2, tmp);
5736 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 5737 }
dd8fbd78 5738 dead_tmp(tmp);
9ee6e8bb
PB
5739 } else {
5740 return 1;
5741 }
5742 }
5743 }
5744 return 0;
5745}
5746
fe1479c3
PB
5747static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5748{
5749 int crn = (insn >> 16) & 0xf;
5750 int crm = insn & 0xf;
5751 int op1 = (insn >> 21) & 7;
5752 int op2 = (insn >> 5) & 7;
5753 int rt = (insn >> 12) & 0xf;
5754 TCGv tmp;
5755
5756 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5757 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5758 /* TEECR */
5759 if (IS_USER(s))
5760 return 1;
5761 tmp = load_cpu_field(teecr);
5762 store_reg(s, rt, tmp);
5763 return 0;
5764 }
5765 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5766 /* TEEHBR */
5767 if (IS_USER(s) && (env->teecr & 1))
5768 return 1;
5769 tmp = load_cpu_field(teehbr);
5770 store_reg(s, rt, tmp);
5771 return 0;
5772 }
5773 }
5774 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5775 op1, crn, crm, op2);
5776 return 1;
5777}
5778
5779static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5780{
5781 int crn = (insn >> 16) & 0xf;
5782 int crm = insn & 0xf;
5783 int op1 = (insn >> 21) & 7;
5784 int op2 = (insn >> 5) & 7;
5785 int rt = (insn >> 12) & 0xf;
5786 TCGv tmp;
5787
5788 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5789 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5790 /* TEECR */
5791 if (IS_USER(s))
5792 return 1;
5793 tmp = load_reg(s, rt);
5794 gen_helper_set_teecr(cpu_env, tmp);
5795 dead_tmp(tmp);
5796 return 0;
5797 }
5798 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5799 /* TEEHBR */
5800 if (IS_USER(s) && (env->teecr & 1))
5801 return 1;
5802 tmp = load_reg(s, rt);
5803 store_cpu_field(tmp, teehbr);
5804 return 0;
5805 }
5806 }
5807 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5808 op1, crn, crm, op2);
5809 return 1;
5810}
5811
9ee6e8bb
PB
5812static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5813{
5814 int cpnum;
5815
5816 cpnum = (insn >> 8) & 0xf;
5817 if (arm_feature(env, ARM_FEATURE_XSCALE)
5818 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5819 return 1;
5820
5821 switch (cpnum) {
5822 case 0:
5823 case 1:
5824 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5825 return disas_iwmmxt_insn(env, s, insn);
5826 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5827 return disas_dsp_insn(env, s, insn);
5828 }
5829 return 1;
5830 case 10:
5831 case 11:
5832 return disas_vfp_insn (env, s, insn);
fe1479c3
PB
5833 case 14:
5834 /* Coprocessors 7-15 are architecturally reserved by ARM.
5835 Unfortunately Intel decided to ignore this. */
5836 if (arm_feature(env, ARM_FEATURE_XSCALE))
5837 goto board;
5838 if (insn & (1 << 20))
5839 return disas_cp14_read(env, s, insn);
5840 else
5841 return disas_cp14_write(env, s, insn);
9ee6e8bb
PB
5842 case 15:
5843 return disas_cp15_insn (env, s, insn);
5844 default:
fe1479c3 5845 board:
9ee6e8bb
PB
5846 /* Unknown coprocessor. See if the board has hooked it. */
5847 return disas_cp_insn (env, s, insn);
5848 }
5849}
5850
5e3f878a
PB
5851
5852/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 5853static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
5854{
5855 TCGv tmp;
5856 tmp = new_tmp();
5857 tcg_gen_trunc_i64_i32(tmp, val);
5858 store_reg(s, rlow, tmp);
5859 tmp = new_tmp();
5860 tcg_gen_shri_i64(val, val, 32);
5861 tcg_gen_trunc_i64_i32(tmp, val);
5862 store_reg(s, rhigh, tmp);
5863}
5864
5865/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 5866static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 5867{
a7812ae4 5868 TCGv_i64 tmp;
5e3f878a
PB
5869 TCGv tmp2;
5870
36aa55dc 5871 /* Load value and extend to 64 bits. */
a7812ae4 5872 tmp = tcg_temp_new_i64();
5e3f878a
PB
5873 tmp2 = load_reg(s, rlow);
5874 tcg_gen_extu_i32_i64(tmp, tmp2);
5875 dead_tmp(tmp2);
5876 tcg_gen_add_i64(val, val, tmp);
b75263d6 5877 tcg_temp_free_i64(tmp);
5e3f878a
PB
5878}
5879
5880/* load and add a 64-bit value from a register pair. */
a7812ae4 5881static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 5882{
a7812ae4 5883 TCGv_i64 tmp;
36aa55dc
PB
5884 TCGv tmpl;
5885 TCGv tmph;
5e3f878a
PB
5886
5887 /* Load 64-bit value rd:rn. */
36aa55dc
PB
5888 tmpl = load_reg(s, rlow);
5889 tmph = load_reg(s, rhigh);
a7812ae4 5890 tmp = tcg_temp_new_i64();
36aa55dc
PB
5891 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5892 dead_tmp(tmpl);
5893 dead_tmp(tmph);
5e3f878a 5894 tcg_gen_add_i64(val, val, tmp);
b75263d6 5895 tcg_temp_free_i64(tmp);
5e3f878a
PB
5896}
5897
5898/* Set N and Z flags from a 64-bit value. */
a7812ae4 5899static void gen_logicq_cc(TCGv_i64 val)
5e3f878a
PB
5900{
5901 TCGv tmp = new_tmp();
5902 gen_helper_logicq_cc(tmp, val);
6fbe23d5
PB
5903 gen_logic_CC(tmp);
5904 dead_tmp(tmp);
5e3f878a
PB
5905}
5906
426f5abc
PB
5907/* Load/Store exclusive instructions are implemented by remembering
5908 the value/address loaded, and seeing if these are the same
5909 when the store is performed. This should be is sufficient to implement
5910 the architecturally mandated semantics, and avoids having to monitor
5911 regular stores.
5912
5913 In system emulation mode only one CPU will be running at once, so
5914 this sequence is effectively atomic. In user emulation mode we
5915 throw an exception and handle the atomic operation elsewhere. */
5916static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
5917 TCGv addr, int size)
5918{
5919 TCGv tmp;
5920
5921 switch (size) {
5922 case 0:
5923 tmp = gen_ld8u(addr, IS_USER(s));
5924 break;
5925 case 1:
5926 tmp = gen_ld16u(addr, IS_USER(s));
5927 break;
5928 case 2:
5929 case 3:
5930 tmp = gen_ld32(addr, IS_USER(s));
5931 break;
5932 default:
5933 abort();
5934 }
5935 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
5936 store_reg(s, rt, tmp);
5937 if (size == 3) {
2c9adbda
PM
5938 TCGv tmp2 = new_tmp();
5939 tcg_gen_addi_i32(tmp2, addr, 4);
5940 tmp = gen_ld32(tmp2, IS_USER(s));
5941 dead_tmp(tmp2);
426f5abc
PB
5942 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
5943 store_reg(s, rt2, tmp);
5944 }
5945 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
5946}
5947
5948static void gen_clrex(DisasContext *s)
5949{
5950 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
5951}
5952
5953#ifdef CONFIG_USER_ONLY
5954static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
5955 TCGv addr, int size)
5956{
5957 tcg_gen_mov_i32(cpu_exclusive_test, addr);
5958 tcg_gen_movi_i32(cpu_exclusive_info,
5959 size | (rd << 4) | (rt << 8) | (rt2 << 12));
5960 gen_set_condexec(s);
5961 gen_set_pc_im(s->pc - 4);
5962 gen_exception(EXCP_STREX);
5963 s->is_jmp = DISAS_JUMP;
5964}
5965#else
5966static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
5967 TCGv addr, int size)
5968{
5969 TCGv tmp;
5970 int done_label;
5971 int fail_label;
5972
5973 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
5974 [addr] = {Rt};
5975 {Rd} = 0;
5976 } else {
5977 {Rd} = 1;
5978 } */
5979 fail_label = gen_new_label();
5980 done_label = gen_new_label();
5981 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
5982 switch (size) {
5983 case 0:
5984 tmp = gen_ld8u(addr, IS_USER(s));
5985 break;
5986 case 1:
5987 tmp = gen_ld16u(addr, IS_USER(s));
5988 break;
5989 case 2:
5990 case 3:
5991 tmp = gen_ld32(addr, IS_USER(s));
5992 break;
5993 default:
5994 abort();
5995 }
5996 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
5997 dead_tmp(tmp);
5998 if (size == 3) {
5999 TCGv tmp2 = new_tmp();
6000 tcg_gen_addi_i32(tmp2, addr, 4);
2c9adbda 6001 tmp = gen_ld32(tmp2, IS_USER(s));
426f5abc
PB
6002 dead_tmp(tmp2);
6003 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
6004 dead_tmp(tmp);
6005 }
6006 tmp = load_reg(s, rt);
6007 switch (size) {
6008 case 0:
6009 gen_st8(tmp, addr, IS_USER(s));
6010 break;
6011 case 1:
6012 gen_st16(tmp, addr, IS_USER(s));
6013 break;
6014 case 2:
6015 case 3:
6016 gen_st32(tmp, addr, IS_USER(s));
6017 break;
6018 default:
6019 abort();
6020 }
6021 if (size == 3) {
6022 tcg_gen_addi_i32(addr, addr, 4);
6023 tmp = load_reg(s, rt2);
6024 gen_st32(tmp, addr, IS_USER(s));
6025 }
6026 tcg_gen_movi_i32(cpu_R[rd], 0);
6027 tcg_gen_br(done_label);
6028 gen_set_label(fail_label);
6029 tcg_gen_movi_i32(cpu_R[rd], 1);
6030 gen_set_label(done_label);
6031 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6032}
6033#endif
6034
9ee6e8bb
PB
6035static void disas_arm_insn(CPUState * env, DisasContext *s)
6036{
6037 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 6038 TCGv tmp;
3670669c 6039 TCGv tmp2;
6ddbc6e4 6040 TCGv tmp3;
b0109805 6041 TCGv addr;
a7812ae4 6042 TCGv_i64 tmp64;
9ee6e8bb
PB
6043
6044 insn = ldl_code(s->pc);
6045 s->pc += 4;
6046
6047 /* M variants do not implement ARM mode. */
6048 if (IS_M(env))
6049 goto illegal_op;
6050 cond = insn >> 28;
6051 if (cond == 0xf){
6052 /* Unconditional instructions. */
6053 if (((insn >> 25) & 7) == 1) {
6054 /* NEON Data processing. */
6055 if (!arm_feature(env, ARM_FEATURE_NEON))
6056 goto illegal_op;
6057
6058 if (disas_neon_data_insn(env, s, insn))
6059 goto illegal_op;
6060 return;
6061 }
6062 if ((insn & 0x0f100000) == 0x04000000) {
6063 /* NEON load/store. */
6064 if (!arm_feature(env, ARM_FEATURE_NEON))
6065 goto illegal_op;
6066
6067 if (disas_neon_ls_insn(env, s, insn))
6068 goto illegal_op;
6069 return;
6070 }
6071 if ((insn & 0x0d70f000) == 0x0550f000)
6072 return; /* PLD */
6073 else if ((insn & 0x0ffffdff) == 0x01010000) {
6074 ARCH(6);
6075 /* setend */
6076 if (insn & (1 << 9)) {
6077 /* BE8 mode not implemented. */
6078 goto illegal_op;
6079 }
6080 return;
6081 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6082 switch ((insn >> 4) & 0xf) {
6083 case 1: /* clrex */
6084 ARCH(6K);
426f5abc 6085 gen_clrex(s);
9ee6e8bb
PB
6086 return;
6087 case 4: /* dsb */
6088 case 5: /* dmb */
6089 case 6: /* isb */
6090 ARCH(7);
6091 /* We don't emulate caches so these are a no-op. */
6092 return;
6093 default:
6094 goto illegal_op;
6095 }
6096 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6097 /* srs */
c67b6b71 6098 int32_t offset;
9ee6e8bb
PB
6099 if (IS_USER(s))
6100 goto illegal_op;
6101 ARCH(6);
6102 op1 = (insn & 0x1f);
6103 if (op1 == (env->uncached_cpsr & CPSR_M)) {
b0109805 6104 addr = load_reg(s, 13);
9ee6e8bb 6105 } else {
b0109805 6106 addr = new_tmp();
b75263d6
JR
6107 tmp = tcg_const_i32(op1);
6108 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6109 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6110 }
6111 i = (insn >> 23) & 3;
6112 switch (i) {
6113 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6114 case 1: offset = 0; break; /* IA */
6115 case 2: offset = -8; break; /* DB */
9ee6e8bb
PB
6116 case 3: offset = 4; break; /* IB */
6117 default: abort();
6118 }
6119 if (offset)
b0109805
PB
6120 tcg_gen_addi_i32(addr, addr, offset);
6121 tmp = load_reg(s, 14);
6122 gen_st32(tmp, addr, 0);
c67b6b71 6123 tmp = load_cpu_field(spsr);
b0109805
PB
6124 tcg_gen_addi_i32(addr, addr, 4);
6125 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
6126 if (insn & (1 << 21)) {
6127 /* Base writeback. */
6128 switch (i) {
6129 case 0: offset = -8; break;
c67b6b71
FN
6130 case 1: offset = 4; break;
6131 case 2: offset = -4; break;
9ee6e8bb
PB
6132 case 3: offset = 0; break;
6133 default: abort();
6134 }
6135 if (offset)
c67b6b71 6136 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb 6137 if (op1 == (env->uncached_cpsr & CPSR_M)) {
c67b6b71 6138 store_reg(s, 13, addr);
9ee6e8bb 6139 } else {
b75263d6
JR
6140 tmp = tcg_const_i32(op1);
6141 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6142 tcg_temp_free_i32(tmp);
c67b6b71 6143 dead_tmp(addr);
9ee6e8bb 6144 }
b0109805
PB
6145 } else {
6146 dead_tmp(addr);
9ee6e8bb 6147 }
a990f58f 6148 return;
ea825eee 6149 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 6150 /* rfe */
c67b6b71 6151 int32_t offset;
9ee6e8bb
PB
6152 if (IS_USER(s))
6153 goto illegal_op;
6154 ARCH(6);
6155 rn = (insn >> 16) & 0xf;
b0109805 6156 addr = load_reg(s, rn);
9ee6e8bb
PB
6157 i = (insn >> 23) & 3;
6158 switch (i) {
b0109805 6159 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6160 case 1: offset = 0; break; /* IA */
6161 case 2: offset = -8; break; /* DB */
b0109805 6162 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
6163 default: abort();
6164 }
6165 if (offset)
b0109805
PB
6166 tcg_gen_addi_i32(addr, addr, offset);
6167 /* Load PC into tmp and CPSR into tmp2. */
6168 tmp = gen_ld32(addr, 0);
6169 tcg_gen_addi_i32(addr, addr, 4);
6170 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
6171 if (insn & (1 << 21)) {
6172 /* Base writeback. */
6173 switch (i) {
b0109805 6174 case 0: offset = -8; break;
c67b6b71
FN
6175 case 1: offset = 4; break;
6176 case 2: offset = -4; break;
b0109805 6177 case 3: offset = 0; break;
9ee6e8bb
PB
6178 default: abort();
6179 }
6180 if (offset)
b0109805
PB
6181 tcg_gen_addi_i32(addr, addr, offset);
6182 store_reg(s, rn, addr);
6183 } else {
6184 dead_tmp(addr);
9ee6e8bb 6185 }
b0109805 6186 gen_rfe(s, tmp, tmp2);
c67b6b71 6187 return;
9ee6e8bb
PB
6188 } else if ((insn & 0x0e000000) == 0x0a000000) {
6189 /* branch link and change to thumb (blx <offset>) */
6190 int32_t offset;
6191
6192 val = (uint32_t)s->pc;
d9ba4830
PB
6193 tmp = new_tmp();
6194 tcg_gen_movi_i32(tmp, val);
6195 store_reg(s, 14, tmp);
9ee6e8bb
PB
6196 /* Sign-extend the 24-bit offset */
6197 offset = (((int32_t)insn) << 8) >> 8;
6198 /* offset * 4 + bit24 * 2 + (thumb bit) */
6199 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6200 /* pipeline offset */
6201 val += 4;
d9ba4830 6202 gen_bx_im(s, val);
9ee6e8bb
PB
6203 return;
6204 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6205 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6206 /* iWMMXt register transfer. */
6207 if (env->cp15.c15_cpar & (1 << 1))
6208 if (!disas_iwmmxt_insn(env, s, insn))
6209 return;
6210 }
6211 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6212 /* Coprocessor double register transfer. */
6213 } else if ((insn & 0x0f000010) == 0x0e000010) {
6214 /* Additional coprocessor register transfer. */
7997d92f 6215 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
6216 uint32_t mask;
6217 uint32_t val;
6218 /* cps (privileged) */
6219 if (IS_USER(s))
6220 return;
6221 mask = val = 0;
6222 if (insn & (1 << 19)) {
6223 if (insn & (1 << 8))
6224 mask |= CPSR_A;
6225 if (insn & (1 << 7))
6226 mask |= CPSR_I;
6227 if (insn & (1 << 6))
6228 mask |= CPSR_F;
6229 if (insn & (1 << 18))
6230 val |= mask;
6231 }
7997d92f 6232 if (insn & (1 << 17)) {
9ee6e8bb
PB
6233 mask |= CPSR_M;
6234 val |= (insn & 0x1f);
6235 }
6236 if (mask) {
2fbac54b 6237 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
6238 }
6239 return;
6240 }
6241 goto illegal_op;
6242 }
6243 if (cond != 0xe) {
6244 /* if not always execute, we generate a conditional jump to
6245 next instruction */
6246 s->condlabel = gen_new_label();
d9ba4830 6247 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
6248 s->condjmp = 1;
6249 }
6250 if ((insn & 0x0f900000) == 0x03000000) {
6251 if ((insn & (1 << 21)) == 0) {
6252 ARCH(6T2);
6253 rd = (insn >> 12) & 0xf;
6254 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6255 if ((insn & (1 << 22)) == 0) {
6256 /* MOVW */
5e3f878a
PB
6257 tmp = new_tmp();
6258 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
6259 } else {
6260 /* MOVT */
5e3f878a 6261 tmp = load_reg(s, rd);
86831435 6262 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 6263 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 6264 }
5e3f878a 6265 store_reg(s, rd, tmp);
9ee6e8bb
PB
6266 } else {
6267 if (((insn >> 12) & 0xf) != 0xf)
6268 goto illegal_op;
6269 if (((insn >> 16) & 0xf) == 0) {
6270 gen_nop_hint(s, insn & 0xff);
6271 } else {
6272 /* CPSR = immediate */
6273 val = insn & 0xff;
6274 shift = ((insn >> 8) & 0xf) * 2;
6275 if (shift)
6276 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 6277 i = ((insn & (1 << 22)) != 0);
2fbac54b 6278 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
6279 goto illegal_op;
6280 }
6281 }
6282 } else if ((insn & 0x0f900000) == 0x01000000
6283 && (insn & 0x00000090) != 0x00000090) {
6284 /* miscellaneous instructions */
6285 op1 = (insn >> 21) & 3;
6286 sh = (insn >> 4) & 0xf;
6287 rm = insn & 0xf;
6288 switch (sh) {
6289 case 0x0: /* move program status register */
6290 if (op1 & 1) {
6291 /* PSR = reg */
2fbac54b 6292 tmp = load_reg(s, rm);
9ee6e8bb 6293 i = ((op1 & 2) != 0);
2fbac54b 6294 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
6295 goto illegal_op;
6296 } else {
6297 /* reg = PSR */
6298 rd = (insn >> 12) & 0xf;
6299 if (op1 & 2) {
6300 if (IS_USER(s))
6301 goto illegal_op;
d9ba4830 6302 tmp = load_cpu_field(spsr);
9ee6e8bb 6303 } else {
d9ba4830
PB
6304 tmp = new_tmp();
6305 gen_helper_cpsr_read(tmp);
9ee6e8bb 6306 }
d9ba4830 6307 store_reg(s, rd, tmp);
9ee6e8bb
PB
6308 }
6309 break;
6310 case 0x1:
6311 if (op1 == 1) {
6312 /* branch/exchange thumb (bx). */
d9ba4830
PB
6313 tmp = load_reg(s, rm);
6314 gen_bx(s, tmp);
9ee6e8bb
PB
6315 } else if (op1 == 3) {
6316 /* clz */
6317 rd = (insn >> 12) & 0xf;
1497c961
PB
6318 tmp = load_reg(s, rm);
6319 gen_helper_clz(tmp, tmp);
6320 store_reg(s, rd, tmp);
9ee6e8bb
PB
6321 } else {
6322 goto illegal_op;
6323 }
6324 break;
6325 case 0x2:
6326 if (op1 == 1) {
6327 ARCH(5J); /* bxj */
6328 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6329 tmp = load_reg(s, rm);
6330 gen_bx(s, tmp);
9ee6e8bb
PB
6331 } else {
6332 goto illegal_op;
6333 }
6334 break;
6335 case 0x3:
6336 if (op1 != 1)
6337 goto illegal_op;
6338
6339 /* branch link/exchange thumb (blx) */
d9ba4830
PB
6340 tmp = load_reg(s, rm);
6341 tmp2 = new_tmp();
6342 tcg_gen_movi_i32(tmp2, s->pc);
6343 store_reg(s, 14, tmp2);
6344 gen_bx(s, tmp);
9ee6e8bb
PB
6345 break;
6346 case 0x5: /* saturating add/subtract */
6347 rd = (insn >> 12) & 0xf;
6348 rn = (insn >> 16) & 0xf;
b40d0353 6349 tmp = load_reg(s, rm);
5e3f878a 6350 tmp2 = load_reg(s, rn);
9ee6e8bb 6351 if (op1 & 2)
5e3f878a 6352 gen_helper_double_saturate(tmp2, tmp2);
9ee6e8bb 6353 if (op1 & 1)
5e3f878a 6354 gen_helper_sub_saturate(tmp, tmp, tmp2);
9ee6e8bb 6355 else
5e3f878a
PB
6356 gen_helper_add_saturate(tmp, tmp, tmp2);
6357 dead_tmp(tmp2);
6358 store_reg(s, rd, tmp);
9ee6e8bb 6359 break;
49e14940
AL
6360 case 7:
6361 /* SMC instruction (op1 == 3)
6362 and undefined instructions (op1 == 0 || op1 == 2)
6363 will trap */
6364 if (op1 != 1) {
6365 goto illegal_op;
6366 }
6367 /* bkpt */
9ee6e8bb 6368 gen_set_condexec(s);
5e3f878a 6369 gen_set_pc_im(s->pc - 4);
d9ba4830 6370 gen_exception(EXCP_BKPT);
9ee6e8bb
PB
6371 s->is_jmp = DISAS_JUMP;
6372 break;
6373 case 0x8: /* signed multiply */
6374 case 0xa:
6375 case 0xc:
6376 case 0xe:
6377 rs = (insn >> 8) & 0xf;
6378 rn = (insn >> 12) & 0xf;
6379 rd = (insn >> 16) & 0xf;
6380 if (op1 == 1) {
6381 /* (32 * 16) >> 16 */
5e3f878a
PB
6382 tmp = load_reg(s, rm);
6383 tmp2 = load_reg(s, rs);
9ee6e8bb 6384 if (sh & 4)
5e3f878a 6385 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 6386 else
5e3f878a 6387 gen_sxth(tmp2);
a7812ae4
PB
6388 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6389 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 6390 tmp = new_tmp();
a7812ae4 6391 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 6392 tcg_temp_free_i64(tmp64);
9ee6e8bb 6393 if ((sh & 2) == 0) {
5e3f878a
PB
6394 tmp2 = load_reg(s, rn);
6395 gen_helper_add_setq(tmp, tmp, tmp2);
6396 dead_tmp(tmp2);
9ee6e8bb 6397 }
5e3f878a 6398 store_reg(s, rd, tmp);
9ee6e8bb
PB
6399 } else {
6400 /* 16 * 16 */
5e3f878a
PB
6401 tmp = load_reg(s, rm);
6402 tmp2 = load_reg(s, rs);
6403 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6404 dead_tmp(tmp2);
9ee6e8bb 6405 if (op1 == 2) {
a7812ae4
PB
6406 tmp64 = tcg_temp_new_i64();
6407 tcg_gen_ext_i32_i64(tmp64, tmp);
22478e79 6408 dead_tmp(tmp);
a7812ae4
PB
6409 gen_addq(s, tmp64, rn, rd);
6410 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 6411 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
6412 } else {
6413 if (op1 == 0) {
5e3f878a
PB
6414 tmp2 = load_reg(s, rn);
6415 gen_helper_add_setq(tmp, tmp, tmp2);
6416 dead_tmp(tmp2);
9ee6e8bb 6417 }
5e3f878a 6418 store_reg(s, rd, tmp);
9ee6e8bb
PB
6419 }
6420 }
6421 break;
6422 default:
6423 goto illegal_op;
6424 }
6425 } else if (((insn & 0x0e000000) == 0 &&
6426 (insn & 0x00000090) != 0x90) ||
6427 ((insn & 0x0e000000) == (1 << 25))) {
6428 int set_cc, logic_cc, shiftop;
6429
6430 op1 = (insn >> 21) & 0xf;
6431 set_cc = (insn >> 20) & 1;
6432 logic_cc = table_logic_cc[op1] & set_cc;
6433
6434 /* data processing instruction */
6435 if (insn & (1 << 25)) {
6436 /* immediate operand */
6437 val = insn & 0xff;
6438 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 6439 if (shift) {
9ee6e8bb 6440 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9
JR
6441 }
6442 tmp2 = new_tmp();
6443 tcg_gen_movi_i32(tmp2, val);
6444 if (logic_cc && shift) {
6445 gen_set_CF_bit31(tmp2);
6446 }
9ee6e8bb
PB
6447 } else {
6448 /* register */
6449 rm = (insn) & 0xf;
e9bb4aa9 6450 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6451 shiftop = (insn >> 5) & 3;
6452 if (!(insn & (1 << 4))) {
6453 shift = (insn >> 7) & 0x1f;
e9bb4aa9 6454 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
6455 } else {
6456 rs = (insn >> 8) & 0xf;
8984bd2e 6457 tmp = load_reg(s, rs);
e9bb4aa9 6458 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
6459 }
6460 }
6461 if (op1 != 0x0f && op1 != 0x0d) {
6462 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
6463 tmp = load_reg(s, rn);
6464 } else {
6465 TCGV_UNUSED(tmp);
9ee6e8bb
PB
6466 }
6467 rd = (insn >> 12) & 0xf;
6468 switch(op1) {
6469 case 0x00:
e9bb4aa9
JR
6470 tcg_gen_and_i32(tmp, tmp, tmp2);
6471 if (logic_cc) {
6472 gen_logic_CC(tmp);
6473 }
21aeb343 6474 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6475 break;
6476 case 0x01:
e9bb4aa9
JR
6477 tcg_gen_xor_i32(tmp, tmp, tmp2);
6478 if (logic_cc) {
6479 gen_logic_CC(tmp);
6480 }
21aeb343 6481 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6482 break;
6483 case 0x02:
6484 if (set_cc && rd == 15) {
6485 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 6486 if (IS_USER(s)) {
9ee6e8bb 6487 goto illegal_op;
e9bb4aa9
JR
6488 }
6489 gen_helper_sub_cc(tmp, tmp, tmp2);
6490 gen_exception_return(s, tmp);
9ee6e8bb 6491 } else {
e9bb4aa9
JR
6492 if (set_cc) {
6493 gen_helper_sub_cc(tmp, tmp, tmp2);
6494 } else {
6495 tcg_gen_sub_i32(tmp, tmp, tmp2);
6496 }
21aeb343 6497 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6498 }
6499 break;
6500 case 0x03:
e9bb4aa9
JR
6501 if (set_cc) {
6502 gen_helper_sub_cc(tmp, tmp2, tmp);
6503 } else {
6504 tcg_gen_sub_i32(tmp, tmp2, tmp);
6505 }
21aeb343 6506 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6507 break;
6508 case 0x04:
e9bb4aa9
JR
6509 if (set_cc) {
6510 gen_helper_add_cc(tmp, tmp, tmp2);
6511 } else {
6512 tcg_gen_add_i32(tmp, tmp, tmp2);
6513 }
21aeb343 6514 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6515 break;
6516 case 0x05:
e9bb4aa9
JR
6517 if (set_cc) {
6518 gen_helper_adc_cc(tmp, tmp, tmp2);
6519 } else {
6520 gen_add_carry(tmp, tmp, tmp2);
6521 }
21aeb343 6522 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6523 break;
6524 case 0x06:
e9bb4aa9
JR
6525 if (set_cc) {
6526 gen_helper_sbc_cc(tmp, tmp, tmp2);
6527 } else {
6528 gen_sub_carry(tmp, tmp, tmp2);
6529 }
21aeb343 6530 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6531 break;
6532 case 0x07:
e9bb4aa9
JR
6533 if (set_cc) {
6534 gen_helper_sbc_cc(tmp, tmp2, tmp);
6535 } else {
6536 gen_sub_carry(tmp, tmp2, tmp);
6537 }
21aeb343 6538 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6539 break;
6540 case 0x08:
6541 if (set_cc) {
e9bb4aa9
JR
6542 tcg_gen_and_i32(tmp, tmp, tmp2);
6543 gen_logic_CC(tmp);
9ee6e8bb 6544 }
e9bb4aa9 6545 dead_tmp(tmp);
9ee6e8bb
PB
6546 break;
6547 case 0x09:
6548 if (set_cc) {
e9bb4aa9
JR
6549 tcg_gen_xor_i32(tmp, tmp, tmp2);
6550 gen_logic_CC(tmp);
9ee6e8bb 6551 }
e9bb4aa9 6552 dead_tmp(tmp);
9ee6e8bb
PB
6553 break;
6554 case 0x0a:
6555 if (set_cc) {
e9bb4aa9 6556 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb 6557 }
e9bb4aa9 6558 dead_tmp(tmp);
9ee6e8bb
PB
6559 break;
6560 case 0x0b:
6561 if (set_cc) {
e9bb4aa9 6562 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 6563 }
e9bb4aa9 6564 dead_tmp(tmp);
9ee6e8bb
PB
6565 break;
6566 case 0x0c:
e9bb4aa9
JR
6567 tcg_gen_or_i32(tmp, tmp, tmp2);
6568 if (logic_cc) {
6569 gen_logic_CC(tmp);
6570 }
21aeb343 6571 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6572 break;
6573 case 0x0d:
6574 if (logic_cc && rd == 15) {
6575 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 6576 if (IS_USER(s)) {
9ee6e8bb 6577 goto illegal_op;
e9bb4aa9
JR
6578 }
6579 gen_exception_return(s, tmp2);
9ee6e8bb 6580 } else {
e9bb4aa9
JR
6581 if (logic_cc) {
6582 gen_logic_CC(tmp2);
6583 }
21aeb343 6584 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6585 }
6586 break;
6587 case 0x0e:
f669df27 6588 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
6589 if (logic_cc) {
6590 gen_logic_CC(tmp);
6591 }
21aeb343 6592 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6593 break;
6594 default:
6595 case 0x0f:
e9bb4aa9
JR
6596 tcg_gen_not_i32(tmp2, tmp2);
6597 if (logic_cc) {
6598 gen_logic_CC(tmp2);
6599 }
21aeb343 6600 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6601 break;
6602 }
e9bb4aa9
JR
6603 if (op1 != 0x0f && op1 != 0x0d) {
6604 dead_tmp(tmp2);
6605 }
9ee6e8bb
PB
6606 } else {
6607 /* other instructions */
6608 op1 = (insn >> 24) & 0xf;
6609 switch(op1) {
6610 case 0x0:
6611 case 0x1:
6612 /* multiplies, extra load/stores */
6613 sh = (insn >> 5) & 3;
6614 if (sh == 0) {
6615 if (op1 == 0x0) {
6616 rd = (insn >> 16) & 0xf;
6617 rn = (insn >> 12) & 0xf;
6618 rs = (insn >> 8) & 0xf;
6619 rm = (insn) & 0xf;
6620 op1 = (insn >> 20) & 0xf;
6621 switch (op1) {
6622 case 0: case 1: case 2: case 3: case 6:
6623 /* 32 bit mul */
5e3f878a
PB
6624 tmp = load_reg(s, rs);
6625 tmp2 = load_reg(s, rm);
6626 tcg_gen_mul_i32(tmp, tmp, tmp2);
6627 dead_tmp(tmp2);
9ee6e8bb
PB
6628 if (insn & (1 << 22)) {
6629 /* Subtract (mls) */
6630 ARCH(6T2);
5e3f878a
PB
6631 tmp2 = load_reg(s, rn);
6632 tcg_gen_sub_i32(tmp, tmp2, tmp);
6633 dead_tmp(tmp2);
9ee6e8bb
PB
6634 } else if (insn & (1 << 21)) {
6635 /* Add */
5e3f878a
PB
6636 tmp2 = load_reg(s, rn);
6637 tcg_gen_add_i32(tmp, tmp, tmp2);
6638 dead_tmp(tmp2);
9ee6e8bb
PB
6639 }
6640 if (insn & (1 << 20))
5e3f878a
PB
6641 gen_logic_CC(tmp);
6642 store_reg(s, rd, tmp);
9ee6e8bb
PB
6643 break;
6644 default:
6645 /* 64 bit mul */
5e3f878a
PB
6646 tmp = load_reg(s, rs);
6647 tmp2 = load_reg(s, rm);
9ee6e8bb 6648 if (insn & (1 << 22))
a7812ae4 6649 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6650 else
a7812ae4 6651 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
9ee6e8bb 6652 if (insn & (1 << 21)) /* mult accumulate */
a7812ae4 6653 gen_addq(s, tmp64, rn, rd);
9ee6e8bb
PB
6654 if (!(insn & (1 << 23))) { /* double accumulate */
6655 ARCH(6);
a7812ae4
PB
6656 gen_addq_lo(s, tmp64, rn);
6657 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
6658 }
6659 if (insn & (1 << 20))
a7812ae4
PB
6660 gen_logicq_cc(tmp64);
6661 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 6662 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
6663 break;
6664 }
6665 } else {
6666 rn = (insn >> 16) & 0xf;
6667 rd = (insn >> 12) & 0xf;
6668 if (insn & (1 << 23)) {
6669 /* load/store exclusive */
86753403
PB
6670 op1 = (insn >> 21) & 0x3;
6671 if (op1)
a47f43d2 6672 ARCH(6K);
86753403
PB
6673 else
6674 ARCH(6);
3174f8e9 6675 addr = tcg_temp_local_new_i32();
98a46317 6676 load_reg_var(s, addr, rn);
9ee6e8bb 6677 if (insn & (1 << 20)) {
86753403
PB
6678 switch (op1) {
6679 case 0: /* ldrex */
426f5abc 6680 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
6681 break;
6682 case 1: /* ldrexd */
426f5abc 6683 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
6684 break;
6685 case 2: /* ldrexb */
426f5abc 6686 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
6687 break;
6688 case 3: /* ldrexh */
426f5abc 6689 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
6690 break;
6691 default:
6692 abort();
6693 }
9ee6e8bb
PB
6694 } else {
6695 rm = insn & 0xf;
86753403
PB
6696 switch (op1) {
6697 case 0: /* strex */
426f5abc 6698 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
6699 break;
6700 case 1: /* strexd */
502e64fe 6701 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
6702 break;
6703 case 2: /* strexb */
426f5abc 6704 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
6705 break;
6706 case 3: /* strexh */
426f5abc 6707 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
6708 break;
6709 default:
6710 abort();
6711 }
9ee6e8bb 6712 }
3174f8e9 6713 tcg_temp_free(addr);
9ee6e8bb
PB
6714 } else {
6715 /* SWP instruction */
6716 rm = (insn) & 0xf;
6717
8984bd2e
PB
6718 /* ??? This is not really atomic. However we know
6719 we never have multiple CPUs running in parallel,
6720 so it is good enough. */
6721 addr = load_reg(s, rn);
6722 tmp = load_reg(s, rm);
9ee6e8bb 6723 if (insn & (1 << 22)) {
8984bd2e
PB
6724 tmp2 = gen_ld8u(addr, IS_USER(s));
6725 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 6726 } else {
8984bd2e
PB
6727 tmp2 = gen_ld32(addr, IS_USER(s));
6728 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 6729 }
8984bd2e
PB
6730 dead_tmp(addr);
6731 store_reg(s, rd, tmp2);
9ee6e8bb
PB
6732 }
6733 }
6734 } else {
6735 int address_offset;
6736 int load;
6737 /* Misc load/store */
6738 rn = (insn >> 16) & 0xf;
6739 rd = (insn >> 12) & 0xf;
b0109805 6740 addr = load_reg(s, rn);
9ee6e8bb 6741 if (insn & (1 << 24))
b0109805 6742 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
6743 address_offset = 0;
6744 if (insn & (1 << 20)) {
6745 /* load */
6746 switch(sh) {
6747 case 1:
b0109805 6748 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
6749 break;
6750 case 2:
b0109805 6751 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
6752 break;
6753 default:
6754 case 3:
b0109805 6755 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
6756 break;
6757 }
6758 load = 1;
6759 } else if (sh & 2) {
6760 /* doubleword */
6761 if (sh & 1) {
6762 /* store */
b0109805
PB
6763 tmp = load_reg(s, rd);
6764 gen_st32(tmp, addr, IS_USER(s));
6765 tcg_gen_addi_i32(addr, addr, 4);
6766 tmp = load_reg(s, rd + 1);
6767 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6768 load = 0;
6769 } else {
6770 /* load */
b0109805
PB
6771 tmp = gen_ld32(addr, IS_USER(s));
6772 store_reg(s, rd, tmp);
6773 tcg_gen_addi_i32(addr, addr, 4);
6774 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
6775 rd++;
6776 load = 1;
6777 }
6778 address_offset = -4;
6779 } else {
6780 /* store */
b0109805
PB
6781 tmp = load_reg(s, rd);
6782 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6783 load = 0;
6784 }
6785 /* Perform base writeback before the loaded value to
6786 ensure correct behavior with overlapping index registers.
6787 ldrd with base writeback is is undefined if the
6788 destination and index registers overlap. */
6789 if (!(insn & (1 << 24))) {
b0109805
PB
6790 gen_add_datah_offset(s, insn, address_offset, addr);
6791 store_reg(s, rn, addr);
9ee6e8bb
PB
6792 } else if (insn & (1 << 21)) {
6793 if (address_offset)
b0109805
PB
6794 tcg_gen_addi_i32(addr, addr, address_offset);
6795 store_reg(s, rn, addr);
6796 } else {
6797 dead_tmp(addr);
9ee6e8bb
PB
6798 }
6799 if (load) {
6800 /* Complete the load. */
b0109805 6801 store_reg(s, rd, tmp);
9ee6e8bb
PB
6802 }
6803 }
6804 break;
6805 case 0x4:
6806 case 0x5:
6807 goto do_ldst;
6808 case 0x6:
6809 case 0x7:
6810 if (insn & (1 << 4)) {
6811 ARCH(6);
6812 /* Armv6 Media instructions. */
6813 rm = insn & 0xf;
6814 rn = (insn >> 16) & 0xf;
2c0262af 6815 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
6816 rs = (insn >> 8) & 0xf;
6817 switch ((insn >> 23) & 3) {
6818 case 0: /* Parallel add/subtract. */
6819 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
6820 tmp = load_reg(s, rn);
6821 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6822 sh = (insn >> 5) & 7;
6823 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6824 goto illegal_op;
6ddbc6e4
PB
6825 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6826 dead_tmp(tmp2);
6827 store_reg(s, rd, tmp);
9ee6e8bb
PB
6828 break;
6829 case 1:
6830 if ((insn & 0x00700020) == 0) {
6c95676b 6831 /* Halfword pack. */
3670669c
PB
6832 tmp = load_reg(s, rn);
6833 tmp2 = load_reg(s, rm);
9ee6e8bb 6834 shift = (insn >> 7) & 0x1f;
3670669c
PB
6835 if (insn & (1 << 6)) {
6836 /* pkhtb */
22478e79
AZ
6837 if (shift == 0)
6838 shift = 31;
6839 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 6840 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 6841 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
6842 } else {
6843 /* pkhbt */
22478e79
AZ
6844 if (shift)
6845 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 6846 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
6847 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6848 }
6849 tcg_gen_or_i32(tmp, tmp, tmp2);
22478e79 6850 dead_tmp(tmp2);
3670669c 6851 store_reg(s, rd, tmp);
9ee6e8bb
PB
6852 } else if ((insn & 0x00200020) == 0x00200000) {
6853 /* [us]sat */
6ddbc6e4 6854 tmp = load_reg(s, rm);
9ee6e8bb
PB
6855 shift = (insn >> 7) & 0x1f;
6856 if (insn & (1 << 6)) {
6857 if (shift == 0)
6858 shift = 31;
6ddbc6e4 6859 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 6860 } else {
6ddbc6e4 6861 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
6862 }
6863 sh = (insn >> 16) & 0x1f;
6864 if (sh != 0) {
b75263d6 6865 tmp2 = tcg_const_i32(sh);
9ee6e8bb 6866 if (insn & (1 << 22))
b75263d6 6867 gen_helper_usat(tmp, tmp, tmp2);
9ee6e8bb 6868 else
b75263d6
JR
6869 gen_helper_ssat(tmp, tmp, tmp2);
6870 tcg_temp_free_i32(tmp2);
9ee6e8bb 6871 }
6ddbc6e4 6872 store_reg(s, rd, tmp);
9ee6e8bb
PB
6873 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6874 /* [us]sat16 */
6ddbc6e4 6875 tmp = load_reg(s, rm);
9ee6e8bb
PB
6876 sh = (insn >> 16) & 0x1f;
6877 if (sh != 0) {
b75263d6 6878 tmp2 = tcg_const_i32(sh);
9ee6e8bb 6879 if (insn & (1 << 22))
b75263d6 6880 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 6881 else
b75263d6
JR
6882 gen_helper_ssat16(tmp, tmp, tmp2);
6883 tcg_temp_free_i32(tmp2);
9ee6e8bb 6884 }
6ddbc6e4 6885 store_reg(s, rd, tmp);
9ee6e8bb
PB
6886 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6887 /* Select bytes. */
6ddbc6e4
PB
6888 tmp = load_reg(s, rn);
6889 tmp2 = load_reg(s, rm);
6890 tmp3 = new_tmp();
6891 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6892 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6893 dead_tmp(tmp3);
6894 dead_tmp(tmp2);
6895 store_reg(s, rd, tmp);
9ee6e8bb 6896 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 6897 tmp = load_reg(s, rm);
9ee6e8bb
PB
6898 shift = (insn >> 10) & 3;
6899 /* ??? In many cases it's not neccessary to do a
6900 rotate, a shift is sufficient. */
6901 if (shift != 0)
f669df27 6902 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
6903 op1 = (insn >> 20) & 7;
6904 switch (op1) {
5e3f878a
PB
6905 case 0: gen_sxtb16(tmp); break;
6906 case 2: gen_sxtb(tmp); break;
6907 case 3: gen_sxth(tmp); break;
6908 case 4: gen_uxtb16(tmp); break;
6909 case 6: gen_uxtb(tmp); break;
6910 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
6911 default: goto illegal_op;
6912 }
6913 if (rn != 15) {
5e3f878a 6914 tmp2 = load_reg(s, rn);
9ee6e8bb 6915 if ((op1 & 3) == 0) {
5e3f878a 6916 gen_add16(tmp, tmp2);
9ee6e8bb 6917 } else {
5e3f878a
PB
6918 tcg_gen_add_i32(tmp, tmp, tmp2);
6919 dead_tmp(tmp2);
9ee6e8bb
PB
6920 }
6921 }
6c95676b 6922 store_reg(s, rd, tmp);
9ee6e8bb
PB
6923 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6924 /* rev */
b0109805 6925 tmp = load_reg(s, rm);
9ee6e8bb
PB
6926 if (insn & (1 << 22)) {
6927 if (insn & (1 << 7)) {
b0109805 6928 gen_revsh(tmp);
9ee6e8bb
PB
6929 } else {
6930 ARCH(6T2);
b0109805 6931 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
6932 }
6933 } else {
6934 if (insn & (1 << 7))
b0109805 6935 gen_rev16(tmp);
9ee6e8bb 6936 else
66896cb8 6937 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 6938 }
b0109805 6939 store_reg(s, rd, tmp);
9ee6e8bb
PB
6940 } else {
6941 goto illegal_op;
6942 }
6943 break;
6944 case 2: /* Multiplies (Type 3). */
5e3f878a
PB
6945 tmp = load_reg(s, rm);
6946 tmp2 = load_reg(s, rs);
9ee6e8bb
PB
6947 if (insn & (1 << 20)) {
6948 /* Signed multiply most significant [accumulate]. */
a7812ae4 6949 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6950 if (insn & (1 << 5))
a7812ae4
PB
6951 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
6952 tcg_gen_shri_i64(tmp64, tmp64, 32);
5e3f878a 6953 tmp = new_tmp();
a7812ae4 6954 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 6955 tcg_temp_free_i64(tmp64);
955a7dd5
AZ
6956 if (rd != 15) {
6957 tmp2 = load_reg(s, rd);
9ee6e8bb 6958 if (insn & (1 << 6)) {
5e3f878a 6959 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 6960 } else {
5e3f878a 6961 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 6962 }
5e3f878a 6963 dead_tmp(tmp2);
9ee6e8bb 6964 }
955a7dd5 6965 store_reg(s, rn, tmp);
9ee6e8bb
PB
6966 } else {
6967 if (insn & (1 << 5))
5e3f878a
PB
6968 gen_swap_half(tmp2);
6969 gen_smul_dual(tmp, tmp2);
6970 /* This addition cannot overflow. */
6971 if (insn & (1 << 6)) {
6972 tcg_gen_sub_i32(tmp, tmp, tmp2);
6973 } else {
6974 tcg_gen_add_i32(tmp, tmp, tmp2);
6975 }
6976 dead_tmp(tmp2);
9ee6e8bb 6977 if (insn & (1 << 22)) {
5e3f878a 6978 /* smlald, smlsld */
a7812ae4
PB
6979 tmp64 = tcg_temp_new_i64();
6980 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 6981 dead_tmp(tmp);
a7812ae4
PB
6982 gen_addq(s, tmp64, rd, rn);
6983 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 6984 tcg_temp_free_i64(tmp64);
9ee6e8bb 6985 } else {
5e3f878a 6986 /* smuad, smusd, smlad, smlsd */
22478e79 6987 if (rd != 15)
9ee6e8bb 6988 {
22478e79 6989 tmp2 = load_reg(s, rd);
5e3f878a
PB
6990 gen_helper_add_setq(tmp, tmp, tmp2);
6991 dead_tmp(tmp2);
9ee6e8bb 6992 }
22478e79 6993 store_reg(s, rn, tmp);
9ee6e8bb
PB
6994 }
6995 }
6996 break;
6997 case 3:
6998 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
6999 switch (op1) {
7000 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
7001 ARCH(6);
7002 tmp = load_reg(s, rm);
7003 tmp2 = load_reg(s, rs);
7004 gen_helper_usad8(tmp, tmp, tmp2);
7005 dead_tmp(tmp2);
ded9d295
AZ
7006 if (rd != 15) {
7007 tmp2 = load_reg(s, rd);
6ddbc6e4
PB
7008 tcg_gen_add_i32(tmp, tmp, tmp2);
7009 dead_tmp(tmp2);
9ee6e8bb 7010 }
ded9d295 7011 store_reg(s, rn, tmp);
9ee6e8bb
PB
7012 break;
7013 case 0x20: case 0x24: case 0x28: case 0x2c:
7014 /* Bitfield insert/clear. */
7015 ARCH(6T2);
7016 shift = (insn >> 7) & 0x1f;
7017 i = (insn >> 16) & 0x1f;
7018 i = i + 1 - shift;
7019 if (rm == 15) {
5e3f878a
PB
7020 tmp = new_tmp();
7021 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7022 } else {
5e3f878a 7023 tmp = load_reg(s, rm);
9ee6e8bb
PB
7024 }
7025 if (i != 32) {
5e3f878a 7026 tmp2 = load_reg(s, rd);
8f8e3aa4 7027 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
5e3f878a 7028 dead_tmp(tmp2);
9ee6e8bb 7029 }
5e3f878a 7030 store_reg(s, rd, tmp);
9ee6e8bb
PB
7031 break;
7032 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7033 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 7034 ARCH(6T2);
5e3f878a 7035 tmp = load_reg(s, rm);
9ee6e8bb
PB
7036 shift = (insn >> 7) & 0x1f;
7037 i = ((insn >> 16) & 0x1f) + 1;
7038 if (shift + i > 32)
7039 goto illegal_op;
7040 if (i < 32) {
7041 if (op1 & 0x20) {
5e3f878a 7042 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 7043 } else {
5e3f878a 7044 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
7045 }
7046 }
5e3f878a 7047 store_reg(s, rd, tmp);
9ee6e8bb
PB
7048 break;
7049 default:
7050 goto illegal_op;
7051 }
7052 break;
7053 }
7054 break;
7055 }
7056 do_ldst:
7057 /* Check for undefined extension instructions
7058 * per the ARM Bible IE:
7059 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7060 */
7061 sh = (0xf << 20) | (0xf << 4);
7062 if (op1 == 0x7 && ((insn & sh) == sh))
7063 {
7064 goto illegal_op;
7065 }
7066 /* load/store byte/word */
7067 rn = (insn >> 16) & 0xf;
7068 rd = (insn >> 12) & 0xf;
b0109805 7069 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
7070 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7071 if (insn & (1 << 24))
b0109805 7072 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
7073 if (insn & (1 << 20)) {
7074 /* load */
9ee6e8bb 7075 if (insn & (1 << 22)) {
b0109805 7076 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 7077 } else {
b0109805 7078 tmp = gen_ld32(tmp2, i);
9ee6e8bb 7079 }
9ee6e8bb
PB
7080 } else {
7081 /* store */
b0109805 7082 tmp = load_reg(s, rd);
9ee6e8bb 7083 if (insn & (1 << 22))
b0109805 7084 gen_st8(tmp, tmp2, i);
9ee6e8bb 7085 else
b0109805 7086 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
7087 }
7088 if (!(insn & (1 << 24))) {
b0109805
PB
7089 gen_add_data_offset(s, insn, tmp2);
7090 store_reg(s, rn, tmp2);
7091 } else if (insn & (1 << 21)) {
7092 store_reg(s, rn, tmp2);
7093 } else {
7094 dead_tmp(tmp2);
9ee6e8bb
PB
7095 }
7096 if (insn & (1 << 20)) {
7097 /* Complete the load. */
7098 if (rd == 15)
b0109805 7099 gen_bx(s, tmp);
9ee6e8bb 7100 else
b0109805 7101 store_reg(s, rd, tmp);
9ee6e8bb
PB
7102 }
7103 break;
7104 case 0x08:
7105 case 0x09:
7106 {
7107 int j, n, user, loaded_base;
b0109805 7108 TCGv loaded_var;
9ee6e8bb
PB
7109 /* load/store multiple words */
7110 /* XXX: store correct base if write back */
7111 user = 0;
7112 if (insn & (1 << 22)) {
7113 if (IS_USER(s))
7114 goto illegal_op; /* only usable in supervisor mode */
7115
7116 if ((insn & (1 << 15)) == 0)
7117 user = 1;
7118 }
7119 rn = (insn >> 16) & 0xf;
b0109805 7120 addr = load_reg(s, rn);
9ee6e8bb
PB
7121
7122 /* compute total size */
7123 loaded_base = 0;
a50f5b91 7124 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
7125 n = 0;
7126 for(i=0;i<16;i++) {
7127 if (insn & (1 << i))
7128 n++;
7129 }
7130 /* XXX: test invalid n == 0 case ? */
7131 if (insn & (1 << 23)) {
7132 if (insn & (1 << 24)) {
7133 /* pre increment */
b0109805 7134 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7135 } else {
7136 /* post increment */
7137 }
7138 } else {
7139 if (insn & (1 << 24)) {
7140 /* pre decrement */
b0109805 7141 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7142 } else {
7143 /* post decrement */
7144 if (n != 1)
b0109805 7145 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7146 }
7147 }
7148 j = 0;
7149 for(i=0;i<16;i++) {
7150 if (insn & (1 << i)) {
7151 if (insn & (1 << 20)) {
7152 /* load */
b0109805 7153 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 7154 if (i == 15) {
b0109805 7155 gen_bx(s, tmp);
9ee6e8bb 7156 } else if (user) {
b75263d6
JR
7157 tmp2 = tcg_const_i32(i);
7158 gen_helper_set_user_reg(tmp2, tmp);
7159 tcg_temp_free_i32(tmp2);
b0109805 7160 dead_tmp(tmp);
9ee6e8bb 7161 } else if (i == rn) {
b0109805 7162 loaded_var = tmp;
9ee6e8bb
PB
7163 loaded_base = 1;
7164 } else {
b0109805 7165 store_reg(s, i, tmp);
9ee6e8bb
PB
7166 }
7167 } else {
7168 /* store */
7169 if (i == 15) {
7170 /* special case: r15 = PC + 8 */
7171 val = (long)s->pc + 4;
b0109805
PB
7172 tmp = new_tmp();
7173 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 7174 } else if (user) {
b0109805 7175 tmp = new_tmp();
b75263d6
JR
7176 tmp2 = tcg_const_i32(i);
7177 gen_helper_get_user_reg(tmp, tmp2);
7178 tcg_temp_free_i32(tmp2);
9ee6e8bb 7179 } else {
b0109805 7180 tmp = load_reg(s, i);
9ee6e8bb 7181 }
b0109805 7182 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7183 }
7184 j++;
7185 /* no need to add after the last transfer */
7186 if (j != n)
b0109805 7187 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7188 }
7189 }
7190 if (insn & (1 << 21)) {
7191 /* write back */
7192 if (insn & (1 << 23)) {
7193 if (insn & (1 << 24)) {
7194 /* pre increment */
7195 } else {
7196 /* post increment */
b0109805 7197 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7198 }
7199 } else {
7200 if (insn & (1 << 24)) {
7201 /* pre decrement */
7202 if (n != 1)
b0109805 7203 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7204 } else {
7205 /* post decrement */
b0109805 7206 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7207 }
7208 }
b0109805
PB
7209 store_reg(s, rn, addr);
7210 } else {
7211 dead_tmp(addr);
9ee6e8bb
PB
7212 }
7213 if (loaded_base) {
b0109805 7214 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
7215 }
7216 if ((insn & (1 << 22)) && !user) {
7217 /* Restore CPSR from SPSR. */
d9ba4830
PB
7218 tmp = load_cpu_field(spsr);
7219 gen_set_cpsr(tmp, 0xffffffff);
7220 dead_tmp(tmp);
9ee6e8bb
PB
7221 s->is_jmp = DISAS_UPDATE;
7222 }
7223 }
7224 break;
7225 case 0xa:
7226 case 0xb:
7227 {
7228 int32_t offset;
7229
7230 /* branch (and link) */
7231 val = (int32_t)s->pc;
7232 if (insn & (1 << 24)) {
5e3f878a
PB
7233 tmp = new_tmp();
7234 tcg_gen_movi_i32(tmp, val);
7235 store_reg(s, 14, tmp);
9ee6e8bb
PB
7236 }
7237 offset = (((int32_t)insn << 8) >> 8);
7238 val += (offset << 2) + 4;
7239 gen_jmp(s, val);
7240 }
7241 break;
7242 case 0xc:
7243 case 0xd:
7244 case 0xe:
7245 /* Coprocessor. */
7246 if (disas_coproc_insn(env, s, insn))
7247 goto illegal_op;
7248 break;
7249 case 0xf:
7250 /* swi */
5e3f878a 7251 gen_set_pc_im(s->pc);
9ee6e8bb
PB
7252 s->is_jmp = DISAS_SWI;
7253 break;
7254 default:
7255 illegal_op:
7256 gen_set_condexec(s);
5e3f878a 7257 gen_set_pc_im(s->pc - 4);
d9ba4830 7258 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
7259 s->is_jmp = DISAS_JUMP;
7260 break;
7261 }
7262 }
7263}
7264
7265/* Return true if this is a Thumb-2 logical op. */
7266static int
7267thumb2_logic_op(int op)
7268{
7269 return (op < 8);
7270}
7271
7272/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7273 then set condition code flags based on the result of the operation.
7274 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7275 to the high bit of T1.
7276 Returns zero if the opcode is valid. */
7277
7278static int
396e467c 7279gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
9ee6e8bb
PB
7280{
7281 int logic_cc;
7282
7283 logic_cc = 0;
7284 switch (op) {
7285 case 0: /* and */
396e467c 7286 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
7287 logic_cc = conds;
7288 break;
7289 case 1: /* bic */
f669df27 7290 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
7291 logic_cc = conds;
7292 break;
7293 case 2: /* orr */
396e467c 7294 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7295 logic_cc = conds;
7296 break;
7297 case 3: /* orn */
396e467c
FN
7298 tcg_gen_not_i32(t1, t1);
7299 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7300 logic_cc = conds;
7301 break;
7302 case 4: /* eor */
396e467c 7303 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
7304 logic_cc = conds;
7305 break;
7306 case 8: /* add */
7307 if (conds)
396e467c 7308 gen_helper_add_cc(t0, t0, t1);
9ee6e8bb 7309 else
396e467c 7310 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
7311 break;
7312 case 10: /* adc */
7313 if (conds)
396e467c 7314 gen_helper_adc_cc(t0, t0, t1);
9ee6e8bb 7315 else
396e467c 7316 gen_adc(t0, t1);
9ee6e8bb
PB
7317 break;
7318 case 11: /* sbc */
7319 if (conds)
396e467c 7320 gen_helper_sbc_cc(t0, t0, t1);
9ee6e8bb 7321 else
396e467c 7322 gen_sub_carry(t0, t0, t1);
9ee6e8bb
PB
7323 break;
7324 case 13: /* sub */
7325 if (conds)
396e467c 7326 gen_helper_sub_cc(t0, t0, t1);
9ee6e8bb 7327 else
396e467c 7328 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
7329 break;
7330 case 14: /* rsb */
7331 if (conds)
396e467c 7332 gen_helper_sub_cc(t0, t1, t0);
9ee6e8bb 7333 else
396e467c 7334 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
7335 break;
7336 default: /* 5, 6, 7, 9, 12, 15. */
7337 return 1;
7338 }
7339 if (logic_cc) {
396e467c 7340 gen_logic_CC(t0);
9ee6e8bb 7341 if (shifter_out)
396e467c 7342 gen_set_CF_bit31(t1);
9ee6e8bb
PB
7343 }
7344 return 0;
7345}
7346
7347/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7348 is not legal. */
7349static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7350{
b0109805 7351 uint32_t insn, imm, shift, offset;
9ee6e8bb 7352 uint32_t rd, rn, rm, rs;
b26eefb6 7353 TCGv tmp;
6ddbc6e4
PB
7354 TCGv tmp2;
7355 TCGv tmp3;
b0109805 7356 TCGv addr;
a7812ae4 7357 TCGv_i64 tmp64;
9ee6e8bb
PB
7358 int op;
7359 int shiftop;
7360 int conds;
7361 int logic_cc;
7362
7363 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7364 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 7365 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
7366 16-bit instructions to get correct prefetch abort behavior. */
7367 insn = insn_hw1;
7368 if ((insn & (1 << 12)) == 0) {
7369 /* Second half of blx. */
7370 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
7371 tmp = load_reg(s, 14);
7372 tcg_gen_addi_i32(tmp, tmp, offset);
7373 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 7374
d9ba4830 7375 tmp2 = new_tmp();
b0109805 7376 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7377 store_reg(s, 14, tmp2);
7378 gen_bx(s, tmp);
9ee6e8bb
PB
7379 return 0;
7380 }
7381 if (insn & (1 << 11)) {
7382 /* Second half of bl. */
7383 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 7384 tmp = load_reg(s, 14);
6a0d8a1d 7385 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 7386
d9ba4830 7387 tmp2 = new_tmp();
b0109805 7388 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7389 store_reg(s, 14, tmp2);
7390 gen_bx(s, tmp);
9ee6e8bb
PB
7391 return 0;
7392 }
7393 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7394 /* Instruction spans a page boundary. Implement it as two
7395 16-bit instructions in case the second half causes an
7396 prefetch abort. */
7397 offset = ((int32_t)insn << 21) >> 9;
396e467c 7398 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
7399 return 0;
7400 }
7401 /* Fall through to 32-bit decode. */
7402 }
7403
7404 insn = lduw_code(s->pc);
7405 s->pc += 2;
7406 insn |= (uint32_t)insn_hw1 << 16;
7407
7408 if ((insn & 0xf800e800) != 0xf000e800) {
7409 ARCH(6T2);
7410 }
7411
7412 rn = (insn >> 16) & 0xf;
7413 rs = (insn >> 12) & 0xf;
7414 rd = (insn >> 8) & 0xf;
7415 rm = insn & 0xf;
7416 switch ((insn >> 25) & 0xf) {
7417 case 0: case 1: case 2: case 3:
7418 /* 16-bit instructions. Should never happen. */
7419 abort();
7420 case 4:
7421 if (insn & (1 << 22)) {
7422 /* Other load/store, table branch. */
7423 if (insn & 0x01200000) {
7424 /* Load/store doubleword. */
7425 if (rn == 15) {
b0109805
PB
7426 addr = new_tmp();
7427 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 7428 } else {
b0109805 7429 addr = load_reg(s, rn);
9ee6e8bb
PB
7430 }
7431 offset = (insn & 0xff) * 4;
7432 if ((insn & (1 << 23)) == 0)
7433 offset = -offset;
7434 if (insn & (1 << 24)) {
b0109805 7435 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
7436 offset = 0;
7437 }
7438 if (insn & (1 << 20)) {
7439 /* ldrd */
b0109805
PB
7440 tmp = gen_ld32(addr, IS_USER(s));
7441 store_reg(s, rs, tmp);
7442 tcg_gen_addi_i32(addr, addr, 4);
7443 tmp = gen_ld32(addr, IS_USER(s));
7444 store_reg(s, rd, tmp);
9ee6e8bb
PB
7445 } else {
7446 /* strd */
b0109805
PB
7447 tmp = load_reg(s, rs);
7448 gen_st32(tmp, addr, IS_USER(s));
7449 tcg_gen_addi_i32(addr, addr, 4);
7450 tmp = load_reg(s, rd);
7451 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7452 }
7453 if (insn & (1 << 21)) {
7454 /* Base writeback. */
7455 if (rn == 15)
7456 goto illegal_op;
b0109805
PB
7457 tcg_gen_addi_i32(addr, addr, offset - 4);
7458 store_reg(s, rn, addr);
7459 } else {
7460 dead_tmp(addr);
9ee6e8bb
PB
7461 }
7462 } else if ((insn & (1 << 23)) == 0) {
7463 /* Load/store exclusive word. */
3174f8e9 7464 addr = tcg_temp_local_new();
98a46317 7465 load_reg_var(s, addr, rn);
426f5abc 7466 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 7467 if (insn & (1 << 20)) {
426f5abc 7468 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 7469 } else {
426f5abc 7470 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 7471 }
3174f8e9 7472 tcg_temp_free(addr);
9ee6e8bb
PB
7473 } else if ((insn & (1 << 6)) == 0) {
7474 /* Table Branch. */
7475 if (rn == 15) {
b0109805
PB
7476 addr = new_tmp();
7477 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 7478 } else {
b0109805 7479 addr = load_reg(s, rn);
9ee6e8bb 7480 }
b26eefb6 7481 tmp = load_reg(s, rm);
b0109805 7482 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
7483 if (insn & (1 << 4)) {
7484 /* tbh */
b0109805 7485 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 7486 dead_tmp(tmp);
b0109805 7487 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 7488 } else { /* tbb */
b26eefb6 7489 dead_tmp(tmp);
b0109805 7490 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7491 }
b0109805
PB
7492 dead_tmp(addr);
7493 tcg_gen_shli_i32(tmp, tmp, 1);
7494 tcg_gen_addi_i32(tmp, tmp, s->pc);
7495 store_reg(s, 15, tmp);
9ee6e8bb
PB
7496 } else {
7497 /* Load/store exclusive byte/halfword/doubleword. */
426f5abc 7498 ARCH(7);
9ee6e8bb 7499 op = (insn >> 4) & 0x3;
426f5abc
PB
7500 if (op == 2) {
7501 goto illegal_op;
7502 }
3174f8e9 7503 addr = tcg_temp_local_new();
98a46317 7504 load_reg_var(s, addr, rn);
9ee6e8bb 7505 if (insn & (1 << 20)) {
426f5abc 7506 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 7507 } else {
426f5abc 7508 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 7509 }
3174f8e9 7510 tcg_temp_free(addr);
9ee6e8bb
PB
7511 }
7512 } else {
7513 /* Load/store multiple, RFE, SRS. */
7514 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7515 /* Not available in user mode. */
b0109805 7516 if (IS_USER(s))
9ee6e8bb
PB
7517 goto illegal_op;
7518 if (insn & (1 << 20)) {
7519 /* rfe */
b0109805
PB
7520 addr = load_reg(s, rn);
7521 if ((insn & (1 << 24)) == 0)
7522 tcg_gen_addi_i32(addr, addr, -8);
7523 /* Load PC into tmp and CPSR into tmp2. */
7524 tmp = gen_ld32(addr, 0);
7525 tcg_gen_addi_i32(addr, addr, 4);
7526 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
7527 if (insn & (1 << 21)) {
7528 /* Base writeback. */
b0109805
PB
7529 if (insn & (1 << 24)) {
7530 tcg_gen_addi_i32(addr, addr, 4);
7531 } else {
7532 tcg_gen_addi_i32(addr, addr, -4);
7533 }
7534 store_reg(s, rn, addr);
7535 } else {
7536 dead_tmp(addr);
9ee6e8bb 7537 }
b0109805 7538 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
7539 } else {
7540 /* srs */
7541 op = (insn & 0x1f);
7542 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7543 addr = load_reg(s, 13);
9ee6e8bb 7544 } else {
b0109805 7545 addr = new_tmp();
b75263d6
JR
7546 tmp = tcg_const_i32(op);
7547 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7548 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7549 }
7550 if ((insn & (1 << 24)) == 0) {
b0109805 7551 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 7552 }
b0109805
PB
7553 tmp = load_reg(s, 14);
7554 gen_st32(tmp, addr, 0);
7555 tcg_gen_addi_i32(addr, addr, 4);
7556 tmp = new_tmp();
7557 gen_helper_cpsr_read(tmp);
7558 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
7559 if (insn & (1 << 21)) {
7560 if ((insn & (1 << 24)) == 0) {
b0109805 7561 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 7562 } else {
b0109805 7563 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7564 }
7565 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7566 store_reg(s, 13, addr);
9ee6e8bb 7567 } else {
b75263d6
JR
7568 tmp = tcg_const_i32(op);
7569 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7570 tcg_temp_free_i32(tmp);
9ee6e8bb 7571 }
b0109805
PB
7572 } else {
7573 dead_tmp(addr);
9ee6e8bb
PB
7574 }
7575 }
7576 } else {
7577 int i;
7578 /* Load/store multiple. */
b0109805 7579 addr = load_reg(s, rn);
9ee6e8bb
PB
7580 offset = 0;
7581 for (i = 0; i < 16; i++) {
7582 if (insn & (1 << i))
7583 offset += 4;
7584 }
7585 if (insn & (1 << 24)) {
b0109805 7586 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7587 }
7588
7589 for (i = 0; i < 16; i++) {
7590 if ((insn & (1 << i)) == 0)
7591 continue;
7592 if (insn & (1 << 20)) {
7593 /* Load. */
b0109805 7594 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 7595 if (i == 15) {
b0109805 7596 gen_bx(s, tmp);
9ee6e8bb 7597 } else {
b0109805 7598 store_reg(s, i, tmp);
9ee6e8bb
PB
7599 }
7600 } else {
7601 /* Store. */
b0109805
PB
7602 tmp = load_reg(s, i);
7603 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7604 }
b0109805 7605 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7606 }
7607 if (insn & (1 << 21)) {
7608 /* Base register writeback. */
7609 if (insn & (1 << 24)) {
b0109805 7610 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7611 }
7612 /* Fault if writeback register is in register list. */
7613 if (insn & (1 << rn))
7614 goto illegal_op;
b0109805
PB
7615 store_reg(s, rn, addr);
7616 } else {
7617 dead_tmp(addr);
9ee6e8bb
PB
7618 }
7619 }
7620 }
7621 break;
2af9ab77
JB
7622 case 5:
7623
9ee6e8bb 7624 op = (insn >> 21) & 0xf;
2af9ab77
JB
7625 if (op == 6) {
7626 /* Halfword pack. */
7627 tmp = load_reg(s, rn);
7628 tmp2 = load_reg(s, rm);
7629 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
7630 if (insn & (1 << 5)) {
7631 /* pkhtb */
7632 if (shift == 0)
7633 shift = 31;
7634 tcg_gen_sari_i32(tmp2, tmp2, shift);
7635 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
7636 tcg_gen_ext16u_i32(tmp2, tmp2);
7637 } else {
7638 /* pkhbt */
7639 if (shift)
7640 tcg_gen_shli_i32(tmp2, tmp2, shift);
7641 tcg_gen_ext16u_i32(tmp, tmp);
7642 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7643 }
7644 tcg_gen_or_i32(tmp, tmp, tmp2);
7645 dead_tmp(tmp2);
3174f8e9
FN
7646 store_reg(s, rd, tmp);
7647 } else {
2af9ab77
JB
7648 /* Data processing register constant shift. */
7649 if (rn == 15) {
7650 tmp = new_tmp();
7651 tcg_gen_movi_i32(tmp, 0);
7652 } else {
7653 tmp = load_reg(s, rn);
7654 }
7655 tmp2 = load_reg(s, rm);
7656
7657 shiftop = (insn >> 4) & 3;
7658 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7659 conds = (insn & (1 << 20)) != 0;
7660 logic_cc = (conds && thumb2_logic_op(op));
7661 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7662 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
7663 goto illegal_op;
7664 dead_tmp(tmp2);
7665 if (rd != 15) {
7666 store_reg(s, rd, tmp);
7667 } else {
7668 dead_tmp(tmp);
7669 }
3174f8e9 7670 }
9ee6e8bb
PB
7671 break;
7672 case 13: /* Misc data processing. */
7673 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7674 if (op < 4 && (insn & 0xf000) != 0xf000)
7675 goto illegal_op;
7676 switch (op) {
7677 case 0: /* Register controlled shift. */
8984bd2e
PB
7678 tmp = load_reg(s, rn);
7679 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7680 if ((insn & 0x70) != 0)
7681 goto illegal_op;
7682 op = (insn >> 21) & 3;
8984bd2e
PB
7683 logic_cc = (insn & (1 << 20)) != 0;
7684 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7685 if (logic_cc)
7686 gen_logic_CC(tmp);
21aeb343 7687 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7688 break;
7689 case 1: /* Sign/zero extend. */
5e3f878a 7690 tmp = load_reg(s, rm);
9ee6e8bb
PB
7691 shift = (insn >> 4) & 3;
7692 /* ??? In many cases it's not neccessary to do a
7693 rotate, a shift is sufficient. */
7694 if (shift != 0)
f669df27 7695 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7696 op = (insn >> 20) & 7;
7697 switch (op) {
5e3f878a
PB
7698 case 0: gen_sxth(tmp); break;
7699 case 1: gen_uxth(tmp); break;
7700 case 2: gen_sxtb16(tmp); break;
7701 case 3: gen_uxtb16(tmp); break;
7702 case 4: gen_sxtb(tmp); break;
7703 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
7704 default: goto illegal_op;
7705 }
7706 if (rn != 15) {
5e3f878a 7707 tmp2 = load_reg(s, rn);
9ee6e8bb 7708 if ((op >> 1) == 1) {
5e3f878a 7709 gen_add16(tmp, tmp2);
9ee6e8bb 7710 } else {
5e3f878a
PB
7711 tcg_gen_add_i32(tmp, tmp, tmp2);
7712 dead_tmp(tmp2);
9ee6e8bb
PB
7713 }
7714 }
5e3f878a 7715 store_reg(s, rd, tmp);
9ee6e8bb
PB
7716 break;
7717 case 2: /* SIMD add/subtract. */
7718 op = (insn >> 20) & 7;
7719 shift = (insn >> 4) & 7;
7720 if ((op & 3) == 3 || (shift & 3) == 3)
7721 goto illegal_op;
6ddbc6e4
PB
7722 tmp = load_reg(s, rn);
7723 tmp2 = load_reg(s, rm);
7724 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7725 dead_tmp(tmp2);
7726 store_reg(s, rd, tmp);
9ee6e8bb
PB
7727 break;
7728 case 3: /* Other data processing. */
7729 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7730 if (op < 4) {
7731 /* Saturating add/subtract. */
d9ba4830
PB
7732 tmp = load_reg(s, rn);
7733 tmp2 = load_reg(s, rm);
9ee6e8bb 7734 if (op & 1)
4809c612
JB
7735 gen_helper_double_saturate(tmp, tmp);
7736 if (op & 2)
d9ba4830 7737 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 7738 else
d9ba4830
PB
7739 gen_helper_add_saturate(tmp, tmp, tmp2);
7740 dead_tmp(tmp2);
9ee6e8bb 7741 } else {
d9ba4830 7742 tmp = load_reg(s, rn);
9ee6e8bb
PB
7743 switch (op) {
7744 case 0x0a: /* rbit */
d9ba4830 7745 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7746 break;
7747 case 0x08: /* rev */
66896cb8 7748 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
7749 break;
7750 case 0x09: /* rev16 */
d9ba4830 7751 gen_rev16(tmp);
9ee6e8bb
PB
7752 break;
7753 case 0x0b: /* revsh */
d9ba4830 7754 gen_revsh(tmp);
9ee6e8bb
PB
7755 break;
7756 case 0x10: /* sel */
d9ba4830 7757 tmp2 = load_reg(s, rm);
6ddbc6e4
PB
7758 tmp3 = new_tmp();
7759 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
d9ba4830 7760 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6ddbc6e4 7761 dead_tmp(tmp3);
d9ba4830 7762 dead_tmp(tmp2);
9ee6e8bb
PB
7763 break;
7764 case 0x18: /* clz */
d9ba4830 7765 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
7766 break;
7767 default:
7768 goto illegal_op;
7769 }
7770 }
d9ba4830 7771 store_reg(s, rd, tmp);
9ee6e8bb
PB
7772 break;
7773 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7774 op = (insn >> 4) & 0xf;
d9ba4830
PB
7775 tmp = load_reg(s, rn);
7776 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7777 switch ((insn >> 20) & 7) {
7778 case 0: /* 32 x 32 -> 32 */
d9ba4830
PB
7779 tcg_gen_mul_i32(tmp, tmp, tmp2);
7780 dead_tmp(tmp2);
9ee6e8bb 7781 if (rs != 15) {
d9ba4830 7782 tmp2 = load_reg(s, rs);
9ee6e8bb 7783 if (op)
d9ba4830 7784 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 7785 else
d9ba4830
PB
7786 tcg_gen_add_i32(tmp, tmp, tmp2);
7787 dead_tmp(tmp2);
9ee6e8bb 7788 }
9ee6e8bb
PB
7789 break;
7790 case 1: /* 16 x 16 -> 32 */
d9ba4830
PB
7791 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7792 dead_tmp(tmp2);
9ee6e8bb 7793 if (rs != 15) {
d9ba4830
PB
7794 tmp2 = load_reg(s, rs);
7795 gen_helper_add_setq(tmp, tmp, tmp2);
7796 dead_tmp(tmp2);
9ee6e8bb 7797 }
9ee6e8bb
PB
7798 break;
7799 case 2: /* Dual multiply add. */
7800 case 4: /* Dual multiply subtract. */
7801 if (op)
d9ba4830
PB
7802 gen_swap_half(tmp2);
7803 gen_smul_dual(tmp, tmp2);
9ee6e8bb
PB
7804 /* This addition cannot overflow. */
7805 if (insn & (1 << 22)) {
d9ba4830 7806 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 7807 } else {
d9ba4830 7808 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 7809 }
d9ba4830 7810 dead_tmp(tmp2);
9ee6e8bb
PB
7811 if (rs != 15)
7812 {
d9ba4830
PB
7813 tmp2 = load_reg(s, rs);
7814 gen_helper_add_setq(tmp, tmp, tmp2);
7815 dead_tmp(tmp2);
9ee6e8bb 7816 }
9ee6e8bb
PB
7817 break;
7818 case 3: /* 32 * 16 -> 32msb */
7819 if (op)
d9ba4830 7820 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7821 else
d9ba4830 7822 gen_sxth(tmp2);
a7812ae4
PB
7823 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7824 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 7825 tmp = new_tmp();
a7812ae4 7826 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 7827 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7828 if (rs != 15)
7829 {
d9ba4830
PB
7830 tmp2 = load_reg(s, rs);
7831 gen_helper_add_setq(tmp, tmp, tmp2);
7832 dead_tmp(tmp2);
9ee6e8bb 7833 }
9ee6e8bb
PB
7834 break;
7835 case 5: case 6: /* 32 * 32 -> 32msb */
d9ba4830
PB
7836 gen_imull(tmp, tmp2);
7837 if (insn & (1 << 5)) {
7838 gen_roundqd(tmp, tmp2);
7839 dead_tmp(tmp2);
7840 } else {
7841 dead_tmp(tmp);
7842 tmp = tmp2;
7843 }
9ee6e8bb 7844 if (rs != 15) {
d9ba4830 7845 tmp2 = load_reg(s, rs);
9ee6e8bb 7846 if (insn & (1 << 21)) {
d9ba4830 7847 tcg_gen_add_i32(tmp, tmp, tmp2);
99c475ab 7848 } else {
d9ba4830 7849 tcg_gen_sub_i32(tmp, tmp2, tmp);
99c475ab 7850 }
d9ba4830 7851 dead_tmp(tmp2);
2c0262af 7852 }
9ee6e8bb
PB
7853 break;
7854 case 7: /* Unsigned sum of absolute differences. */
d9ba4830
PB
7855 gen_helper_usad8(tmp, tmp, tmp2);
7856 dead_tmp(tmp2);
9ee6e8bb 7857 if (rs != 15) {
d9ba4830
PB
7858 tmp2 = load_reg(s, rs);
7859 tcg_gen_add_i32(tmp, tmp, tmp2);
7860 dead_tmp(tmp2);
5fd46862 7861 }
9ee6e8bb 7862 break;
2c0262af 7863 }
d9ba4830 7864 store_reg(s, rd, tmp);
2c0262af 7865 break;
9ee6e8bb
PB
7866 case 6: case 7: /* 64-bit multiply, Divide. */
7867 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
7868 tmp = load_reg(s, rn);
7869 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7870 if ((op & 0x50) == 0x10) {
7871 /* sdiv, udiv */
7872 if (!arm_feature(env, ARM_FEATURE_DIV))
7873 goto illegal_op;
7874 if (op & 0x20)
5e3f878a 7875 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 7876 else
5e3f878a
PB
7877 gen_helper_sdiv(tmp, tmp, tmp2);
7878 dead_tmp(tmp2);
7879 store_reg(s, rd, tmp);
9ee6e8bb
PB
7880 } else if ((op & 0xe) == 0xc) {
7881 /* Dual multiply accumulate long. */
7882 if (op & 1)
5e3f878a
PB
7883 gen_swap_half(tmp2);
7884 gen_smul_dual(tmp, tmp2);
9ee6e8bb 7885 if (op & 0x10) {
5e3f878a 7886 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 7887 } else {
5e3f878a 7888 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 7889 }
5e3f878a 7890 dead_tmp(tmp2);
a7812ae4
PB
7891 /* BUGFIX */
7892 tmp64 = tcg_temp_new_i64();
7893 tcg_gen_ext_i32_i64(tmp64, tmp);
7894 dead_tmp(tmp);
7895 gen_addq(s, tmp64, rs, rd);
7896 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 7897 tcg_temp_free_i64(tmp64);
2c0262af 7898 } else {
9ee6e8bb
PB
7899 if (op & 0x20) {
7900 /* Unsigned 64-bit multiply */
a7812ae4 7901 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 7902 } else {
9ee6e8bb
PB
7903 if (op & 8) {
7904 /* smlalxy */
5e3f878a
PB
7905 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7906 dead_tmp(tmp2);
a7812ae4
PB
7907 tmp64 = tcg_temp_new_i64();
7908 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 7909 dead_tmp(tmp);
9ee6e8bb
PB
7910 } else {
7911 /* Signed 64-bit multiply */
a7812ae4 7912 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 7913 }
b5ff1b31 7914 }
9ee6e8bb
PB
7915 if (op & 4) {
7916 /* umaal */
a7812ae4
PB
7917 gen_addq_lo(s, tmp64, rs);
7918 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
7919 } else if (op & 0x40) {
7920 /* 64-bit accumulate. */
a7812ae4 7921 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 7922 }
a7812ae4 7923 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 7924 tcg_temp_free_i64(tmp64);
5fd46862 7925 }
2c0262af 7926 break;
9ee6e8bb
PB
7927 }
7928 break;
7929 case 6: case 7: case 14: case 15:
7930 /* Coprocessor. */
7931 if (((insn >> 24) & 3) == 3) {
7932 /* Translate into the equivalent ARM encoding. */
7933 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
7934 if (disas_neon_data_insn(env, s, insn))
7935 goto illegal_op;
7936 } else {
7937 if (insn & (1 << 28))
7938 goto illegal_op;
7939 if (disas_coproc_insn (env, s, insn))
7940 goto illegal_op;
7941 }
7942 break;
7943 case 8: case 9: case 10: case 11:
7944 if (insn & (1 << 15)) {
7945 /* Branches, misc control. */
7946 if (insn & 0x5000) {
7947 /* Unconditional branch. */
7948 /* signextend(hw1[10:0]) -> offset[:12]. */
7949 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7950 /* hw1[10:0] -> offset[11:1]. */
7951 offset |= (insn & 0x7ff) << 1;
7952 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7953 offset[24:22] already have the same value because of the
7954 sign extension above. */
7955 offset ^= ((~insn) & (1 << 13)) << 10;
7956 offset ^= ((~insn) & (1 << 11)) << 11;
7957
9ee6e8bb
PB
7958 if (insn & (1 << 14)) {
7959 /* Branch and link. */
3174f8e9 7960 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 7961 }
3b46e624 7962
b0109805 7963 offset += s->pc;
9ee6e8bb
PB
7964 if (insn & (1 << 12)) {
7965 /* b/bl */
b0109805 7966 gen_jmp(s, offset);
9ee6e8bb
PB
7967 } else {
7968 /* blx */
b0109805
PB
7969 offset &= ~(uint32_t)2;
7970 gen_bx_im(s, offset);
2c0262af 7971 }
9ee6e8bb
PB
7972 } else if (((insn >> 23) & 7) == 7) {
7973 /* Misc control */
7974 if (insn & (1 << 13))
7975 goto illegal_op;
7976
7977 if (insn & (1 << 26)) {
7978 /* Secure monitor call (v6Z) */
7979 goto illegal_op; /* not implemented. */
2c0262af 7980 } else {
9ee6e8bb
PB
7981 op = (insn >> 20) & 7;
7982 switch (op) {
7983 case 0: /* msr cpsr. */
7984 if (IS_M(env)) {
8984bd2e
PB
7985 tmp = load_reg(s, rn);
7986 addr = tcg_const_i32(insn & 0xff);
7987 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6
JR
7988 tcg_temp_free_i32(addr);
7989 dead_tmp(tmp);
9ee6e8bb
PB
7990 gen_lookup_tb(s);
7991 break;
7992 }
7993 /* fall through */
7994 case 1: /* msr spsr. */
7995 if (IS_M(env))
7996 goto illegal_op;
2fbac54b
FN
7997 tmp = load_reg(s, rn);
7998 if (gen_set_psr(s,
9ee6e8bb 7999 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 8000 op == 1, tmp))
9ee6e8bb
PB
8001 goto illegal_op;
8002 break;
8003 case 2: /* cps, nop-hint. */
8004 if (((insn >> 8) & 7) == 0) {
8005 gen_nop_hint(s, insn & 0xff);
8006 }
8007 /* Implemented as NOP in user mode. */
8008 if (IS_USER(s))
8009 break;
8010 offset = 0;
8011 imm = 0;
8012 if (insn & (1 << 10)) {
8013 if (insn & (1 << 7))
8014 offset |= CPSR_A;
8015 if (insn & (1 << 6))
8016 offset |= CPSR_I;
8017 if (insn & (1 << 5))
8018 offset |= CPSR_F;
8019 if (insn & (1 << 9))
8020 imm = CPSR_A | CPSR_I | CPSR_F;
8021 }
8022 if (insn & (1 << 8)) {
8023 offset |= 0x1f;
8024 imm |= (insn & 0x1f);
8025 }
8026 if (offset) {
2fbac54b 8027 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
8028 }
8029 break;
8030 case 3: /* Special control operations. */
426f5abc 8031 ARCH(7);
9ee6e8bb
PB
8032 op = (insn >> 4) & 0xf;
8033 switch (op) {
8034 case 2: /* clrex */
426f5abc 8035 gen_clrex(s);
9ee6e8bb
PB
8036 break;
8037 case 4: /* dsb */
8038 case 5: /* dmb */
8039 case 6: /* isb */
8040 /* These execute as NOPs. */
9ee6e8bb
PB
8041 break;
8042 default:
8043 goto illegal_op;
8044 }
8045 break;
8046 case 4: /* bxj */
8047 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8048 tmp = load_reg(s, rn);
8049 gen_bx(s, tmp);
9ee6e8bb
PB
8050 break;
8051 case 5: /* Exception return. */
b8b45b68
RV
8052 if (IS_USER(s)) {
8053 goto illegal_op;
8054 }
8055 if (rn != 14 || rd != 15) {
8056 goto illegal_op;
8057 }
8058 tmp = load_reg(s, rn);
8059 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8060 gen_exception_return(s, tmp);
8061 break;
9ee6e8bb 8062 case 6: /* mrs cpsr. */
8984bd2e 8063 tmp = new_tmp();
9ee6e8bb 8064 if (IS_M(env)) {
8984bd2e
PB
8065 addr = tcg_const_i32(insn & 0xff);
8066 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 8067 tcg_temp_free_i32(addr);
9ee6e8bb 8068 } else {
8984bd2e 8069 gen_helper_cpsr_read(tmp);
9ee6e8bb 8070 }
8984bd2e 8071 store_reg(s, rd, tmp);
9ee6e8bb
PB
8072 break;
8073 case 7: /* mrs spsr. */
8074 /* Not accessible in user mode. */
8075 if (IS_USER(s) || IS_M(env))
8076 goto illegal_op;
d9ba4830
PB
8077 tmp = load_cpu_field(spsr);
8078 store_reg(s, rd, tmp);
9ee6e8bb 8079 break;
2c0262af
FB
8080 }
8081 }
9ee6e8bb
PB
8082 } else {
8083 /* Conditional branch. */
8084 op = (insn >> 22) & 0xf;
8085 /* Generate a conditional jump to next instruction. */
8086 s->condlabel = gen_new_label();
d9ba4830 8087 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
8088 s->condjmp = 1;
8089
8090 /* offset[11:1] = insn[10:0] */
8091 offset = (insn & 0x7ff) << 1;
8092 /* offset[17:12] = insn[21:16]. */
8093 offset |= (insn & 0x003f0000) >> 4;
8094 /* offset[31:20] = insn[26]. */
8095 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8096 /* offset[18] = insn[13]. */
8097 offset |= (insn & (1 << 13)) << 5;
8098 /* offset[19] = insn[11]. */
8099 offset |= (insn & (1 << 11)) << 8;
8100
8101 /* jump to the offset */
b0109805 8102 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
8103 }
8104 } else {
8105 /* Data processing immediate. */
8106 if (insn & (1 << 25)) {
8107 if (insn & (1 << 24)) {
8108 if (insn & (1 << 20))
8109 goto illegal_op;
8110 /* Bitfield/Saturate. */
8111 op = (insn >> 21) & 7;
8112 imm = insn & 0x1f;
8113 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4
PB
8114 if (rn == 15) {
8115 tmp = new_tmp();
8116 tcg_gen_movi_i32(tmp, 0);
8117 } else {
8118 tmp = load_reg(s, rn);
8119 }
9ee6e8bb
PB
8120 switch (op) {
8121 case 2: /* Signed bitfield extract. */
8122 imm++;
8123 if (shift + imm > 32)
8124 goto illegal_op;
8125 if (imm < 32)
6ddbc6e4 8126 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
8127 break;
8128 case 6: /* Unsigned bitfield extract. */
8129 imm++;
8130 if (shift + imm > 32)
8131 goto illegal_op;
8132 if (imm < 32)
6ddbc6e4 8133 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
8134 break;
8135 case 3: /* Bitfield insert/clear. */
8136 if (imm < shift)
8137 goto illegal_op;
8138 imm = imm + 1 - shift;
8139 if (imm != 32) {
6ddbc6e4 8140 tmp2 = load_reg(s, rd);
8f8e3aa4 8141 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
6ddbc6e4 8142 dead_tmp(tmp2);
9ee6e8bb
PB
8143 }
8144 break;
8145 case 7:
8146 goto illegal_op;
8147 default: /* Saturate. */
9ee6e8bb
PB
8148 if (shift) {
8149 if (op & 1)
6ddbc6e4 8150 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8151 else
6ddbc6e4 8152 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 8153 }
6ddbc6e4 8154 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
8155 if (op & 4) {
8156 /* Unsigned. */
9ee6e8bb 8157 if ((op & 1) && shift == 0)
6ddbc6e4 8158 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 8159 else
6ddbc6e4 8160 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 8161 } else {
9ee6e8bb 8162 /* Signed. */
9ee6e8bb 8163 if ((op & 1) && shift == 0)
6ddbc6e4 8164 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 8165 else
6ddbc6e4 8166 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 8167 }
b75263d6 8168 tcg_temp_free_i32(tmp2);
9ee6e8bb 8169 break;
2c0262af 8170 }
6ddbc6e4 8171 store_reg(s, rd, tmp);
9ee6e8bb
PB
8172 } else {
8173 imm = ((insn & 0x04000000) >> 15)
8174 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8175 if (insn & (1 << 22)) {
8176 /* 16-bit immediate. */
8177 imm |= (insn >> 4) & 0xf000;
8178 if (insn & (1 << 23)) {
8179 /* movt */
5e3f878a 8180 tmp = load_reg(s, rd);
86831435 8181 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8182 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 8183 } else {
9ee6e8bb 8184 /* movw */
5e3f878a
PB
8185 tmp = new_tmp();
8186 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
8187 }
8188 } else {
9ee6e8bb
PB
8189 /* Add/sub 12-bit immediate. */
8190 if (rn == 15) {
b0109805 8191 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 8192 if (insn & (1 << 23))
b0109805 8193 offset -= imm;
9ee6e8bb 8194 else
b0109805 8195 offset += imm;
5e3f878a
PB
8196 tmp = new_tmp();
8197 tcg_gen_movi_i32(tmp, offset);
2c0262af 8198 } else {
5e3f878a 8199 tmp = load_reg(s, rn);
9ee6e8bb 8200 if (insn & (1 << 23))
5e3f878a 8201 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 8202 else
5e3f878a 8203 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 8204 }
9ee6e8bb 8205 }
5e3f878a 8206 store_reg(s, rd, tmp);
191abaa2 8207 }
9ee6e8bb
PB
8208 } else {
8209 int shifter_out = 0;
8210 /* modified 12-bit immediate. */
8211 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8212 imm = (insn & 0xff);
8213 switch (shift) {
8214 case 0: /* XY */
8215 /* Nothing to do. */
8216 break;
8217 case 1: /* 00XY00XY */
8218 imm |= imm << 16;
8219 break;
8220 case 2: /* XY00XY00 */
8221 imm |= imm << 16;
8222 imm <<= 8;
8223 break;
8224 case 3: /* XYXYXYXY */
8225 imm |= imm << 16;
8226 imm |= imm << 8;
8227 break;
8228 default: /* Rotated constant. */
8229 shift = (shift << 1) | (imm >> 7);
8230 imm |= 0x80;
8231 imm = imm << (32 - shift);
8232 shifter_out = 1;
8233 break;
b5ff1b31 8234 }
3174f8e9
FN
8235 tmp2 = new_tmp();
8236 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 8237 rn = (insn >> 16) & 0xf;
3174f8e9
FN
8238 if (rn == 15) {
8239 tmp = new_tmp();
8240 tcg_gen_movi_i32(tmp, 0);
8241 } else {
8242 tmp = load_reg(s, rn);
8243 }
9ee6e8bb
PB
8244 op = (insn >> 21) & 0xf;
8245 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 8246 shifter_out, tmp, tmp2))
9ee6e8bb 8247 goto illegal_op;
3174f8e9 8248 dead_tmp(tmp2);
9ee6e8bb
PB
8249 rd = (insn >> 8) & 0xf;
8250 if (rd != 15) {
3174f8e9
FN
8251 store_reg(s, rd, tmp);
8252 } else {
8253 dead_tmp(tmp);
2c0262af 8254 }
2c0262af 8255 }
9ee6e8bb
PB
8256 }
8257 break;
8258 case 12: /* Load/store single data item. */
8259 {
8260 int postinc = 0;
8261 int writeback = 0;
b0109805 8262 int user;
9ee6e8bb
PB
8263 if ((insn & 0x01100000) == 0x01000000) {
8264 if (disas_neon_ls_insn(env, s, insn))
c1713132 8265 goto illegal_op;
9ee6e8bb
PB
8266 break;
8267 }
b0109805 8268 user = IS_USER(s);
9ee6e8bb 8269 if (rn == 15) {
b0109805 8270 addr = new_tmp();
9ee6e8bb
PB
8271 /* PC relative. */
8272 /* s->pc has already been incremented by 4. */
8273 imm = s->pc & 0xfffffffc;
8274 if (insn & (1 << 23))
8275 imm += insn & 0xfff;
8276 else
8277 imm -= insn & 0xfff;
b0109805 8278 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 8279 } else {
b0109805 8280 addr = load_reg(s, rn);
9ee6e8bb
PB
8281 if (insn & (1 << 23)) {
8282 /* Positive offset. */
8283 imm = insn & 0xfff;
b0109805 8284 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8285 } else {
8286 op = (insn >> 8) & 7;
8287 imm = insn & 0xff;
8288 switch (op) {
8289 case 0: case 8: /* Shifted Register. */
8290 shift = (insn >> 4) & 0xf;
8291 if (shift > 3)
18c9b560 8292 goto illegal_op;
b26eefb6 8293 tmp = load_reg(s, rm);
9ee6e8bb 8294 if (shift)
b26eefb6 8295 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 8296 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 8297 dead_tmp(tmp);
9ee6e8bb
PB
8298 break;
8299 case 4: /* Negative offset. */
b0109805 8300 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb
PB
8301 break;
8302 case 6: /* User privilege. */
b0109805
PB
8303 tcg_gen_addi_i32(addr, addr, imm);
8304 user = 1;
9ee6e8bb
PB
8305 break;
8306 case 1: /* Post-decrement. */
8307 imm = -imm;
8308 /* Fall through. */
8309 case 3: /* Post-increment. */
9ee6e8bb
PB
8310 postinc = 1;
8311 writeback = 1;
8312 break;
8313 case 5: /* Pre-decrement. */
8314 imm = -imm;
8315 /* Fall through. */
8316 case 7: /* Pre-increment. */
b0109805 8317 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8318 writeback = 1;
8319 break;
8320 default:
b7bcbe95 8321 goto illegal_op;
9ee6e8bb
PB
8322 }
8323 }
8324 }
8325 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8326 if (insn & (1 << 20)) {
8327 /* Load. */
8328 if (rs == 15 && op != 2) {
8329 if (op & 2)
b5ff1b31 8330 goto illegal_op;
9ee6e8bb
PB
8331 /* Memory hint. Implemented as NOP. */
8332 } else {
8333 switch (op) {
b0109805
PB
8334 case 0: tmp = gen_ld8u(addr, user); break;
8335 case 4: tmp = gen_ld8s(addr, user); break;
8336 case 1: tmp = gen_ld16u(addr, user); break;
8337 case 5: tmp = gen_ld16s(addr, user); break;
8338 case 2: tmp = gen_ld32(addr, user); break;
9ee6e8bb
PB
8339 default: goto illegal_op;
8340 }
8341 if (rs == 15) {
b0109805 8342 gen_bx(s, tmp);
9ee6e8bb 8343 } else {
b0109805 8344 store_reg(s, rs, tmp);
9ee6e8bb
PB
8345 }
8346 }
8347 } else {
8348 /* Store. */
8349 if (rs == 15)
b7bcbe95 8350 goto illegal_op;
b0109805 8351 tmp = load_reg(s, rs);
9ee6e8bb 8352 switch (op) {
b0109805
PB
8353 case 0: gen_st8(tmp, addr, user); break;
8354 case 1: gen_st16(tmp, addr, user); break;
8355 case 2: gen_st32(tmp, addr, user); break;
9ee6e8bb 8356 default: goto illegal_op;
b7bcbe95 8357 }
2c0262af 8358 }
9ee6e8bb 8359 if (postinc)
b0109805
PB
8360 tcg_gen_addi_i32(addr, addr, imm);
8361 if (writeback) {
8362 store_reg(s, rn, addr);
8363 } else {
8364 dead_tmp(addr);
8365 }
9ee6e8bb
PB
8366 }
8367 break;
8368 default:
8369 goto illegal_op;
2c0262af 8370 }
9ee6e8bb
PB
8371 return 0;
8372illegal_op:
8373 return 1;
2c0262af
FB
8374}
8375
9ee6e8bb 8376static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
8377{
8378 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8379 int32_t offset;
8380 int i;
b26eefb6 8381 TCGv tmp;
d9ba4830 8382 TCGv tmp2;
b0109805 8383 TCGv addr;
99c475ab 8384
9ee6e8bb
PB
8385 if (s->condexec_mask) {
8386 cond = s->condexec_cond;
bedd2912
JB
8387 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
8388 s->condlabel = gen_new_label();
8389 gen_test_cc(cond ^ 1, s->condlabel);
8390 s->condjmp = 1;
8391 }
9ee6e8bb
PB
8392 }
8393
b5ff1b31 8394 insn = lduw_code(s->pc);
99c475ab 8395 s->pc += 2;
b5ff1b31 8396
99c475ab
FB
8397 switch (insn >> 12) {
8398 case 0: case 1:
396e467c 8399
99c475ab
FB
8400 rd = insn & 7;
8401 op = (insn >> 11) & 3;
8402 if (op == 3) {
8403 /* add/subtract */
8404 rn = (insn >> 3) & 7;
396e467c 8405 tmp = load_reg(s, rn);
99c475ab
FB
8406 if (insn & (1 << 10)) {
8407 /* immediate */
396e467c
FN
8408 tmp2 = new_tmp();
8409 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
8410 } else {
8411 /* reg */
8412 rm = (insn >> 6) & 7;
396e467c 8413 tmp2 = load_reg(s, rm);
99c475ab 8414 }
9ee6e8bb
PB
8415 if (insn & (1 << 9)) {
8416 if (s->condexec_mask)
396e467c 8417 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8418 else
396e467c 8419 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb
PB
8420 } else {
8421 if (s->condexec_mask)
396e467c 8422 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 8423 else
396e467c 8424 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 8425 }
396e467c
FN
8426 dead_tmp(tmp2);
8427 store_reg(s, rd, tmp);
99c475ab
FB
8428 } else {
8429 /* shift immediate */
8430 rm = (insn >> 3) & 7;
8431 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
8432 tmp = load_reg(s, rm);
8433 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8434 if (!s->condexec_mask)
8435 gen_logic_CC(tmp);
8436 store_reg(s, rd, tmp);
99c475ab
FB
8437 }
8438 break;
8439 case 2: case 3:
8440 /* arithmetic large immediate */
8441 op = (insn >> 11) & 3;
8442 rd = (insn >> 8) & 0x7;
396e467c
FN
8443 if (op == 0) { /* mov */
8444 tmp = new_tmp();
8445 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 8446 if (!s->condexec_mask)
396e467c
FN
8447 gen_logic_CC(tmp);
8448 store_reg(s, rd, tmp);
8449 } else {
8450 tmp = load_reg(s, rd);
8451 tmp2 = new_tmp();
8452 tcg_gen_movi_i32(tmp2, insn & 0xff);
8453 switch (op) {
8454 case 1: /* cmp */
8455 gen_helper_sub_cc(tmp, tmp, tmp2);
8456 dead_tmp(tmp);
8457 dead_tmp(tmp2);
8458 break;
8459 case 2: /* add */
8460 if (s->condexec_mask)
8461 tcg_gen_add_i32(tmp, tmp, tmp2);
8462 else
8463 gen_helper_add_cc(tmp, tmp, tmp2);
8464 dead_tmp(tmp2);
8465 store_reg(s, rd, tmp);
8466 break;
8467 case 3: /* sub */
8468 if (s->condexec_mask)
8469 tcg_gen_sub_i32(tmp, tmp, tmp2);
8470 else
8471 gen_helper_sub_cc(tmp, tmp, tmp2);
8472 dead_tmp(tmp2);
8473 store_reg(s, rd, tmp);
8474 break;
8475 }
99c475ab 8476 }
99c475ab
FB
8477 break;
8478 case 4:
8479 if (insn & (1 << 11)) {
8480 rd = (insn >> 8) & 7;
5899f386
FB
8481 /* load pc-relative. Bit 1 of PC is ignored. */
8482 val = s->pc + 2 + ((insn & 0xff) * 4);
8483 val &= ~(uint32_t)2;
b0109805
PB
8484 addr = new_tmp();
8485 tcg_gen_movi_i32(addr, val);
8486 tmp = gen_ld32(addr, IS_USER(s));
8487 dead_tmp(addr);
8488 store_reg(s, rd, tmp);
99c475ab
FB
8489 break;
8490 }
8491 if (insn & (1 << 10)) {
8492 /* data processing extended or blx */
8493 rd = (insn & 7) | ((insn >> 4) & 8);
8494 rm = (insn >> 3) & 0xf;
8495 op = (insn >> 8) & 3;
8496 switch (op) {
8497 case 0: /* add */
396e467c
FN
8498 tmp = load_reg(s, rd);
8499 tmp2 = load_reg(s, rm);
8500 tcg_gen_add_i32(tmp, tmp, tmp2);
8501 dead_tmp(tmp2);
8502 store_reg(s, rd, tmp);
99c475ab
FB
8503 break;
8504 case 1: /* cmp */
396e467c
FN
8505 tmp = load_reg(s, rd);
8506 tmp2 = load_reg(s, rm);
8507 gen_helper_sub_cc(tmp, tmp, tmp2);
8508 dead_tmp(tmp2);
8509 dead_tmp(tmp);
99c475ab
FB
8510 break;
8511 case 2: /* mov/cpy */
396e467c
FN
8512 tmp = load_reg(s, rm);
8513 store_reg(s, rd, tmp);
99c475ab
FB
8514 break;
8515 case 3:/* branch [and link] exchange thumb register */
b0109805 8516 tmp = load_reg(s, rm);
99c475ab
FB
8517 if (insn & (1 << 7)) {
8518 val = (uint32_t)s->pc | 1;
b0109805
PB
8519 tmp2 = new_tmp();
8520 tcg_gen_movi_i32(tmp2, val);
8521 store_reg(s, 14, tmp2);
99c475ab 8522 }
d9ba4830 8523 gen_bx(s, tmp);
99c475ab
FB
8524 break;
8525 }
8526 break;
8527 }
8528
8529 /* data processing register */
8530 rd = insn & 7;
8531 rm = (insn >> 3) & 7;
8532 op = (insn >> 6) & 0xf;
8533 if (op == 2 || op == 3 || op == 4 || op == 7) {
8534 /* the shift/rotate ops want the operands backwards */
8535 val = rm;
8536 rm = rd;
8537 rd = val;
8538 val = 1;
8539 } else {
8540 val = 0;
8541 }
8542
396e467c
FN
8543 if (op == 9) { /* neg */
8544 tmp = new_tmp();
8545 tcg_gen_movi_i32(tmp, 0);
8546 } else if (op != 0xf) { /* mvn doesn't read its first operand */
8547 tmp = load_reg(s, rd);
8548 } else {
8549 TCGV_UNUSED(tmp);
8550 }
99c475ab 8551
396e467c 8552 tmp2 = load_reg(s, rm);
5899f386 8553 switch (op) {
99c475ab 8554 case 0x0: /* and */
396e467c 8555 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 8556 if (!s->condexec_mask)
396e467c 8557 gen_logic_CC(tmp);
99c475ab
FB
8558 break;
8559 case 0x1: /* eor */
396e467c 8560 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 8561 if (!s->condexec_mask)
396e467c 8562 gen_logic_CC(tmp);
99c475ab
FB
8563 break;
8564 case 0x2: /* lsl */
9ee6e8bb 8565 if (s->condexec_mask) {
396e467c 8566 gen_helper_shl(tmp2, tmp2, tmp);
9ee6e8bb 8567 } else {
396e467c
FN
8568 gen_helper_shl_cc(tmp2, tmp2, tmp);
8569 gen_logic_CC(tmp2);
9ee6e8bb 8570 }
99c475ab
FB
8571 break;
8572 case 0x3: /* lsr */
9ee6e8bb 8573 if (s->condexec_mask) {
396e467c 8574 gen_helper_shr(tmp2, tmp2, tmp);
9ee6e8bb 8575 } else {
396e467c
FN
8576 gen_helper_shr_cc(tmp2, tmp2, tmp);
8577 gen_logic_CC(tmp2);
9ee6e8bb 8578 }
99c475ab
FB
8579 break;
8580 case 0x4: /* asr */
9ee6e8bb 8581 if (s->condexec_mask) {
396e467c 8582 gen_helper_sar(tmp2, tmp2, tmp);
9ee6e8bb 8583 } else {
396e467c
FN
8584 gen_helper_sar_cc(tmp2, tmp2, tmp);
8585 gen_logic_CC(tmp2);
9ee6e8bb 8586 }
99c475ab
FB
8587 break;
8588 case 0x5: /* adc */
9ee6e8bb 8589 if (s->condexec_mask)
396e467c 8590 gen_adc(tmp, tmp2);
9ee6e8bb 8591 else
396e467c 8592 gen_helper_adc_cc(tmp, tmp, tmp2);
99c475ab
FB
8593 break;
8594 case 0x6: /* sbc */
9ee6e8bb 8595 if (s->condexec_mask)
396e467c 8596 gen_sub_carry(tmp, tmp, tmp2);
9ee6e8bb 8597 else
396e467c 8598 gen_helper_sbc_cc(tmp, tmp, tmp2);
99c475ab
FB
8599 break;
8600 case 0x7: /* ror */
9ee6e8bb 8601 if (s->condexec_mask) {
f669df27
AJ
8602 tcg_gen_andi_i32(tmp, tmp, 0x1f);
8603 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 8604 } else {
396e467c
FN
8605 gen_helper_ror_cc(tmp2, tmp2, tmp);
8606 gen_logic_CC(tmp2);
9ee6e8bb 8607 }
99c475ab
FB
8608 break;
8609 case 0x8: /* tst */
396e467c
FN
8610 tcg_gen_and_i32(tmp, tmp, tmp2);
8611 gen_logic_CC(tmp);
99c475ab 8612 rd = 16;
5899f386 8613 break;
99c475ab 8614 case 0x9: /* neg */
9ee6e8bb 8615 if (s->condexec_mask)
396e467c 8616 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 8617 else
396e467c 8618 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
8619 break;
8620 case 0xa: /* cmp */
396e467c 8621 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
8622 rd = 16;
8623 break;
8624 case 0xb: /* cmn */
396e467c 8625 gen_helper_add_cc(tmp, tmp, tmp2);
99c475ab
FB
8626 rd = 16;
8627 break;
8628 case 0xc: /* orr */
396e467c 8629 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 8630 if (!s->condexec_mask)
396e467c 8631 gen_logic_CC(tmp);
99c475ab
FB
8632 break;
8633 case 0xd: /* mul */
7b2919a0 8634 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 8635 if (!s->condexec_mask)
396e467c 8636 gen_logic_CC(tmp);
99c475ab
FB
8637 break;
8638 case 0xe: /* bic */
f669df27 8639 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 8640 if (!s->condexec_mask)
396e467c 8641 gen_logic_CC(tmp);
99c475ab
FB
8642 break;
8643 case 0xf: /* mvn */
396e467c 8644 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 8645 if (!s->condexec_mask)
396e467c 8646 gen_logic_CC(tmp2);
99c475ab 8647 val = 1;
5899f386 8648 rm = rd;
99c475ab
FB
8649 break;
8650 }
8651 if (rd != 16) {
396e467c
FN
8652 if (val) {
8653 store_reg(s, rm, tmp2);
8654 if (op != 0xf)
8655 dead_tmp(tmp);
8656 } else {
8657 store_reg(s, rd, tmp);
8658 dead_tmp(tmp2);
8659 }
8660 } else {
8661 dead_tmp(tmp);
8662 dead_tmp(tmp2);
99c475ab
FB
8663 }
8664 break;
8665
8666 case 5:
8667 /* load/store register offset. */
8668 rd = insn & 7;
8669 rn = (insn >> 3) & 7;
8670 rm = (insn >> 6) & 7;
8671 op = (insn >> 9) & 7;
b0109805 8672 addr = load_reg(s, rn);
b26eefb6 8673 tmp = load_reg(s, rm);
b0109805 8674 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 8675 dead_tmp(tmp);
99c475ab
FB
8676
8677 if (op < 3) /* store */
b0109805 8678 tmp = load_reg(s, rd);
99c475ab
FB
8679
8680 switch (op) {
8681 case 0: /* str */
b0109805 8682 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
8683 break;
8684 case 1: /* strh */
b0109805 8685 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
8686 break;
8687 case 2: /* strb */
b0109805 8688 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
8689 break;
8690 case 3: /* ldrsb */
b0109805 8691 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
8692 break;
8693 case 4: /* ldr */
b0109805 8694 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8695 break;
8696 case 5: /* ldrh */
b0109805 8697 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
8698 break;
8699 case 6: /* ldrb */
b0109805 8700 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
8701 break;
8702 case 7: /* ldrsh */
b0109805 8703 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
8704 break;
8705 }
8706 if (op >= 3) /* load */
b0109805
PB
8707 store_reg(s, rd, tmp);
8708 dead_tmp(addr);
99c475ab
FB
8709 break;
8710
8711 case 6:
8712 /* load/store word immediate offset */
8713 rd = insn & 7;
8714 rn = (insn >> 3) & 7;
b0109805 8715 addr = load_reg(s, rn);
99c475ab 8716 val = (insn >> 4) & 0x7c;
b0109805 8717 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8718
8719 if (insn & (1 << 11)) {
8720 /* load */
b0109805
PB
8721 tmp = gen_ld32(addr, IS_USER(s));
8722 store_reg(s, rd, tmp);
99c475ab
FB
8723 } else {
8724 /* store */
b0109805
PB
8725 tmp = load_reg(s, rd);
8726 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8727 }
b0109805 8728 dead_tmp(addr);
99c475ab
FB
8729 break;
8730
8731 case 7:
8732 /* load/store byte immediate offset */
8733 rd = insn & 7;
8734 rn = (insn >> 3) & 7;
b0109805 8735 addr = load_reg(s, rn);
99c475ab 8736 val = (insn >> 6) & 0x1f;
b0109805 8737 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8738
8739 if (insn & (1 << 11)) {
8740 /* load */
b0109805
PB
8741 tmp = gen_ld8u(addr, IS_USER(s));
8742 store_reg(s, rd, tmp);
99c475ab
FB
8743 } else {
8744 /* store */
b0109805
PB
8745 tmp = load_reg(s, rd);
8746 gen_st8(tmp, addr, IS_USER(s));
99c475ab 8747 }
b0109805 8748 dead_tmp(addr);
99c475ab
FB
8749 break;
8750
8751 case 8:
8752 /* load/store halfword immediate offset */
8753 rd = insn & 7;
8754 rn = (insn >> 3) & 7;
b0109805 8755 addr = load_reg(s, rn);
99c475ab 8756 val = (insn >> 5) & 0x3e;
b0109805 8757 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8758
8759 if (insn & (1 << 11)) {
8760 /* load */
b0109805
PB
8761 tmp = gen_ld16u(addr, IS_USER(s));
8762 store_reg(s, rd, tmp);
99c475ab
FB
8763 } else {
8764 /* store */
b0109805
PB
8765 tmp = load_reg(s, rd);
8766 gen_st16(tmp, addr, IS_USER(s));
99c475ab 8767 }
b0109805 8768 dead_tmp(addr);
99c475ab
FB
8769 break;
8770
8771 case 9:
8772 /* load/store from stack */
8773 rd = (insn >> 8) & 7;
b0109805 8774 addr = load_reg(s, 13);
99c475ab 8775 val = (insn & 0xff) * 4;
b0109805 8776 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8777
8778 if (insn & (1 << 11)) {
8779 /* load */
b0109805
PB
8780 tmp = gen_ld32(addr, IS_USER(s));
8781 store_reg(s, rd, tmp);
99c475ab
FB
8782 } else {
8783 /* store */
b0109805
PB
8784 tmp = load_reg(s, rd);
8785 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8786 }
b0109805 8787 dead_tmp(addr);
99c475ab
FB
8788 break;
8789
8790 case 10:
8791 /* add to high reg */
8792 rd = (insn >> 8) & 7;
5899f386
FB
8793 if (insn & (1 << 11)) {
8794 /* SP */
5e3f878a 8795 tmp = load_reg(s, 13);
5899f386
FB
8796 } else {
8797 /* PC. bit 1 is ignored. */
5e3f878a
PB
8798 tmp = new_tmp();
8799 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 8800 }
99c475ab 8801 val = (insn & 0xff) * 4;
5e3f878a
PB
8802 tcg_gen_addi_i32(tmp, tmp, val);
8803 store_reg(s, rd, tmp);
99c475ab
FB
8804 break;
8805
8806 case 11:
8807 /* misc */
8808 op = (insn >> 8) & 0xf;
8809 switch (op) {
8810 case 0:
8811 /* adjust stack pointer */
b26eefb6 8812 tmp = load_reg(s, 13);
99c475ab
FB
8813 val = (insn & 0x7f) * 4;
8814 if (insn & (1 << 7))
6a0d8a1d 8815 val = -(int32_t)val;
b26eefb6
PB
8816 tcg_gen_addi_i32(tmp, tmp, val);
8817 store_reg(s, 13, tmp);
99c475ab
FB
8818 break;
8819
9ee6e8bb
PB
8820 case 2: /* sign/zero extend. */
8821 ARCH(6);
8822 rd = insn & 7;
8823 rm = (insn >> 3) & 7;
b0109805 8824 tmp = load_reg(s, rm);
9ee6e8bb 8825 switch ((insn >> 6) & 3) {
b0109805
PB
8826 case 0: gen_sxth(tmp); break;
8827 case 1: gen_sxtb(tmp); break;
8828 case 2: gen_uxth(tmp); break;
8829 case 3: gen_uxtb(tmp); break;
9ee6e8bb 8830 }
b0109805 8831 store_reg(s, rd, tmp);
9ee6e8bb 8832 break;
99c475ab
FB
8833 case 4: case 5: case 0xc: case 0xd:
8834 /* push/pop */
b0109805 8835 addr = load_reg(s, 13);
5899f386
FB
8836 if (insn & (1 << 8))
8837 offset = 4;
99c475ab 8838 else
5899f386
FB
8839 offset = 0;
8840 for (i = 0; i < 8; i++) {
8841 if (insn & (1 << i))
8842 offset += 4;
8843 }
8844 if ((insn & (1 << 11)) == 0) {
b0109805 8845 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8846 }
99c475ab
FB
8847 for (i = 0; i < 8; i++) {
8848 if (insn & (1 << i)) {
8849 if (insn & (1 << 11)) {
8850 /* pop */
b0109805
PB
8851 tmp = gen_ld32(addr, IS_USER(s));
8852 store_reg(s, i, tmp);
99c475ab
FB
8853 } else {
8854 /* push */
b0109805
PB
8855 tmp = load_reg(s, i);
8856 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8857 }
5899f386 8858 /* advance to the next address. */
b0109805 8859 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8860 }
8861 }
a50f5b91 8862 TCGV_UNUSED(tmp);
99c475ab
FB
8863 if (insn & (1 << 8)) {
8864 if (insn & (1 << 11)) {
8865 /* pop pc */
b0109805 8866 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8867 /* don't set the pc until the rest of the instruction
8868 has completed */
8869 } else {
8870 /* push lr */
b0109805
PB
8871 tmp = load_reg(s, 14);
8872 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8873 }
b0109805 8874 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 8875 }
5899f386 8876 if ((insn & (1 << 11)) == 0) {
b0109805 8877 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8878 }
99c475ab 8879 /* write back the new stack pointer */
b0109805 8880 store_reg(s, 13, addr);
99c475ab
FB
8881 /* set the new PC value */
8882 if ((insn & 0x0900) == 0x0900)
b0109805 8883 gen_bx(s, tmp);
99c475ab
FB
8884 break;
8885
9ee6e8bb
PB
8886 case 1: case 3: case 9: case 11: /* czb */
8887 rm = insn & 7;
d9ba4830 8888 tmp = load_reg(s, rm);
9ee6e8bb
PB
8889 s->condlabel = gen_new_label();
8890 s->condjmp = 1;
8891 if (insn & (1 << 11))
cb63669a 8892 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 8893 else
cb63669a 8894 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
d9ba4830 8895 dead_tmp(tmp);
9ee6e8bb
PB
8896 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8897 val = (uint32_t)s->pc + 2;
8898 val += offset;
8899 gen_jmp(s, val);
8900 break;
8901
8902 case 15: /* IT, nop-hint. */
8903 if ((insn & 0xf) == 0) {
8904 gen_nop_hint(s, (insn >> 4) & 0xf);
8905 break;
8906 }
8907 /* If Then. */
8908 s->condexec_cond = (insn >> 4) & 0xe;
8909 s->condexec_mask = insn & 0x1f;
8910 /* No actual code generated for this insn, just setup state. */
8911 break;
8912
06c949e6 8913 case 0xe: /* bkpt */
9ee6e8bb 8914 gen_set_condexec(s);
5e3f878a 8915 gen_set_pc_im(s->pc - 2);
d9ba4830 8916 gen_exception(EXCP_BKPT);
06c949e6
PB
8917 s->is_jmp = DISAS_JUMP;
8918 break;
8919
9ee6e8bb
PB
8920 case 0xa: /* rev */
8921 ARCH(6);
8922 rn = (insn >> 3) & 0x7;
8923 rd = insn & 0x7;
b0109805 8924 tmp = load_reg(s, rn);
9ee6e8bb 8925 switch ((insn >> 6) & 3) {
66896cb8 8926 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
8927 case 1: gen_rev16(tmp); break;
8928 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
8929 default: goto illegal_op;
8930 }
b0109805 8931 store_reg(s, rd, tmp);
9ee6e8bb
PB
8932 break;
8933
8934 case 6: /* cps */
8935 ARCH(6);
8936 if (IS_USER(s))
8937 break;
8938 if (IS_M(env)) {
8984bd2e 8939 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9ee6e8bb 8940 /* PRIMASK */
8984bd2e
PB
8941 if (insn & 1) {
8942 addr = tcg_const_i32(16);
8943 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8944 tcg_temp_free_i32(addr);
8984bd2e 8945 }
9ee6e8bb 8946 /* FAULTMASK */
8984bd2e
PB
8947 if (insn & 2) {
8948 addr = tcg_const_i32(17);
8949 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8950 tcg_temp_free_i32(addr);
8984bd2e 8951 }
b75263d6 8952 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8953 gen_lookup_tb(s);
8954 } else {
8955 if (insn & (1 << 4))
8956 shift = CPSR_A | CPSR_I | CPSR_F;
8957 else
8958 shift = 0;
fa26df03 8959 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9ee6e8bb
PB
8960 }
8961 break;
8962
99c475ab
FB
8963 default:
8964 goto undef;
8965 }
8966 break;
8967
8968 case 12:
8969 /* load/store multiple */
8970 rn = (insn >> 8) & 0x7;
b0109805 8971 addr = load_reg(s, rn);
99c475ab
FB
8972 for (i = 0; i < 8; i++) {
8973 if (insn & (1 << i)) {
99c475ab
FB
8974 if (insn & (1 << 11)) {
8975 /* load */
b0109805
PB
8976 tmp = gen_ld32(addr, IS_USER(s));
8977 store_reg(s, i, tmp);
99c475ab
FB
8978 } else {
8979 /* store */
b0109805
PB
8980 tmp = load_reg(s, i);
8981 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8982 }
5899f386 8983 /* advance to the next address */
b0109805 8984 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8985 }
8986 }
5899f386 8987 /* Base register writeback. */
b0109805
PB
8988 if ((insn & (1 << rn)) == 0) {
8989 store_reg(s, rn, addr);
8990 } else {
8991 dead_tmp(addr);
8992 }
99c475ab
FB
8993 break;
8994
8995 case 13:
8996 /* conditional branch or swi */
8997 cond = (insn >> 8) & 0xf;
8998 if (cond == 0xe)
8999 goto undef;
9000
9001 if (cond == 0xf) {
9002 /* swi */
9ee6e8bb 9003 gen_set_condexec(s);
422ebf69 9004 gen_set_pc_im(s->pc);
9ee6e8bb 9005 s->is_jmp = DISAS_SWI;
99c475ab
FB
9006 break;
9007 }
9008 /* generate a conditional jump to next instruction */
e50e6a20 9009 s->condlabel = gen_new_label();
d9ba4830 9010 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 9011 s->condjmp = 1;
99c475ab
FB
9012
9013 /* jump to the offset */
5899f386 9014 val = (uint32_t)s->pc + 2;
99c475ab 9015 offset = ((int32_t)insn << 24) >> 24;
5899f386 9016 val += offset << 1;
8aaca4c0 9017 gen_jmp(s, val);
99c475ab
FB
9018 break;
9019
9020 case 14:
358bf29e 9021 if (insn & (1 << 11)) {
9ee6e8bb
PB
9022 if (disas_thumb2_insn(env, s, insn))
9023 goto undef32;
358bf29e
PB
9024 break;
9025 }
9ee6e8bb 9026 /* unconditional branch */
99c475ab
FB
9027 val = (uint32_t)s->pc;
9028 offset = ((int32_t)insn << 21) >> 21;
9029 val += (offset << 1) + 2;
8aaca4c0 9030 gen_jmp(s, val);
99c475ab
FB
9031 break;
9032
9033 case 15:
9ee6e8bb 9034 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 9035 goto undef32;
9ee6e8bb 9036 break;
99c475ab
FB
9037 }
9038 return;
9ee6e8bb
PB
9039undef32:
9040 gen_set_condexec(s);
5e3f878a 9041 gen_set_pc_im(s->pc - 4);
d9ba4830 9042 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
9043 s->is_jmp = DISAS_JUMP;
9044 return;
9045illegal_op:
99c475ab 9046undef:
9ee6e8bb 9047 gen_set_condexec(s);
5e3f878a 9048 gen_set_pc_im(s->pc - 2);
d9ba4830 9049 gen_exception(EXCP_UDEF);
99c475ab
FB
9050 s->is_jmp = DISAS_JUMP;
9051}
9052
2c0262af
FB
9053/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9054 basic block 'tb'. If search_pc is TRUE, also generate PC
9055 information for each intermediate instruction. */
2cfc5f17
TS
9056static inline void gen_intermediate_code_internal(CPUState *env,
9057 TranslationBlock *tb,
9058 int search_pc)
2c0262af
FB
9059{
9060 DisasContext dc1, *dc = &dc1;
a1d1bb31 9061 CPUBreakpoint *bp;
2c0262af
FB
9062 uint16_t *gen_opc_end;
9063 int j, lj;
0fa85d43 9064 target_ulong pc_start;
b5ff1b31 9065 uint32_t next_page_start;
2e70f6ef
PB
9066 int num_insns;
9067 int max_insns;
3b46e624 9068
2c0262af 9069 /* generate intermediate code */
b26eefb6 9070 num_temps = 0;
b26eefb6 9071
0fa85d43 9072 pc_start = tb->pc;
3b46e624 9073
2c0262af
FB
9074 dc->tb = tb;
9075
2c0262af 9076 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
9077
9078 dc->is_jmp = DISAS_NEXT;
9079 dc->pc = pc_start;
8aaca4c0 9080 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 9081 dc->condjmp = 0;
5899f386 9082 dc->thumb = env->thumb;
9ee6e8bb
PB
9083 dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
9084 dc->condexec_cond = env->condexec_bits >> 4;
b5ff1b31 9085#if !defined(CONFIG_USER_ONLY)
9ee6e8bb
PB
9086 if (IS_M(env)) {
9087 dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
9088 } else {
9089 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
9090 }
b5ff1b31 9091#endif
a7812ae4
PB
9092 cpu_F0s = tcg_temp_new_i32();
9093 cpu_F1s = tcg_temp_new_i32();
9094 cpu_F0d = tcg_temp_new_i64();
9095 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
9096 cpu_V0 = cpu_F0d;
9097 cpu_V1 = cpu_F1d;
e677137d 9098 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 9099 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 9100 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 9101 lj = -1;
2e70f6ef
PB
9102 num_insns = 0;
9103 max_insns = tb->cflags & CF_COUNT_MASK;
9104 if (max_insns == 0)
9105 max_insns = CF_COUNT_MASK;
9106
9107 gen_icount_start();
9ee6e8bb
PB
9108 /* Reset the conditional execution bits immediately. This avoids
9109 complications trying to do it at the end of the block. */
9110 if (env->condexec_bits)
8f01245e
PB
9111 {
9112 TCGv tmp = new_tmp();
9113 tcg_gen_movi_i32(tmp, 0);
d9ba4830 9114 store_cpu_field(tmp, condexec_bits);
8f01245e 9115 }
2c0262af 9116 do {
fbb4a2e3
PB
9117#ifdef CONFIG_USER_ONLY
9118 /* Intercept jump to the magic kernel page. */
9119 if (dc->pc >= 0xffff0000) {
9120 /* We always get here via a jump, so know we are not in a
9121 conditional execution block. */
9122 gen_exception(EXCP_KERNEL_TRAP);
9123 dc->is_jmp = DISAS_UPDATE;
9124 break;
9125 }
9126#else
9ee6e8bb
PB
9127 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9128 /* We always get here via a jump, so know we are not in a
9129 conditional execution block. */
d9ba4830 9130 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
9131 dc->is_jmp = DISAS_UPDATE;
9132 break;
9ee6e8bb
PB
9133 }
9134#endif
9135
72cf2d4f
BS
9136 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9137 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 9138 if (bp->pc == dc->pc) {
9ee6e8bb 9139 gen_set_condexec(dc);
5e3f878a 9140 gen_set_pc_im(dc->pc);
d9ba4830 9141 gen_exception(EXCP_DEBUG);
1fddef4b 9142 dc->is_jmp = DISAS_JUMP;
9ee6e8bb
PB
9143 /* Advance PC so that clearing the breakpoint will
9144 invalidate this TB. */
9145 dc->pc += 2;
9146 goto done_generating;
1fddef4b
FB
9147 break;
9148 }
9149 }
9150 }
2c0262af
FB
9151 if (search_pc) {
9152 j = gen_opc_ptr - gen_opc_buf;
9153 if (lj < j) {
9154 lj++;
9155 while (lj < j)
9156 gen_opc_instr_start[lj++] = 0;
9157 }
0fa85d43 9158 gen_opc_pc[lj] = dc->pc;
2c0262af 9159 gen_opc_instr_start[lj] = 1;
2e70f6ef 9160 gen_opc_icount[lj] = num_insns;
2c0262af 9161 }
e50e6a20 9162
2e70f6ef
PB
9163 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9164 gen_io_start();
9165
9ee6e8bb
PB
9166 if (env->thumb) {
9167 disas_thumb_insn(env, dc);
9168 if (dc->condexec_mask) {
9169 dc->condexec_cond = (dc->condexec_cond & 0xe)
9170 | ((dc->condexec_mask >> 4) & 1);
9171 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9172 if (dc->condexec_mask == 0) {
9173 dc->condexec_cond = 0;
9174 }
9175 }
9176 } else {
9177 disas_arm_insn(env, dc);
9178 }
b26eefb6
PB
9179 if (num_temps) {
9180 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
9181 num_temps = 0;
9182 }
e50e6a20
FB
9183
9184 if (dc->condjmp && !dc->is_jmp) {
9185 gen_set_label(dc->condlabel);
9186 dc->condjmp = 0;
9187 }
aaf2d97d 9188 /* Translation stops when a conditional branch is encountered.
e50e6a20 9189 * Otherwise the subsequent code could get translated several times.
b5ff1b31 9190 * Also stop translation when a page boundary is reached. This
bf20dc07 9191 * ensures prefetch aborts occur at the right place. */
2e70f6ef 9192 num_insns ++;
1fddef4b
FB
9193 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9194 !env->singlestep_enabled &&
1b530a6d 9195 !singlestep &&
2e70f6ef
PB
9196 dc->pc < next_page_start &&
9197 num_insns < max_insns);
9198
9199 if (tb->cflags & CF_LAST_IO) {
9200 if (dc->condjmp) {
9201 /* FIXME: This can theoretically happen with self-modifying
9202 code. */
9203 cpu_abort(env, "IO on conditional branch instruction");
9204 }
9205 gen_io_end();
9206 }
9ee6e8bb 9207
b5ff1b31 9208 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
9209 instruction was a conditional branch or trap, and the PC has
9210 already been written. */
551bd27f 9211 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 9212 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 9213 if (dc->condjmp) {
9ee6e8bb
PB
9214 gen_set_condexec(dc);
9215 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 9216 gen_exception(EXCP_SWI);
9ee6e8bb 9217 } else {
d9ba4830 9218 gen_exception(EXCP_DEBUG);
9ee6e8bb 9219 }
e50e6a20
FB
9220 gen_set_label(dc->condlabel);
9221 }
9222 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 9223 gen_set_pc_im(dc->pc);
e50e6a20 9224 dc->condjmp = 0;
8aaca4c0 9225 }
9ee6e8bb
PB
9226 gen_set_condexec(dc);
9227 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 9228 gen_exception(EXCP_SWI);
9ee6e8bb
PB
9229 } else {
9230 /* FIXME: Single stepping a WFI insn will not halt
9231 the CPU. */
d9ba4830 9232 gen_exception(EXCP_DEBUG);
9ee6e8bb 9233 }
8aaca4c0 9234 } else {
9ee6e8bb
PB
9235 /* While branches must always occur at the end of an IT block,
9236 there are a few other things that can cause us to terminate
9237 the TB in the middel of an IT block:
9238 - Exception generating instructions (bkpt, swi, undefined).
9239 - Page boundaries.
9240 - Hardware watchpoints.
9241 Hardware breakpoints have already been handled and skip this code.
9242 */
9243 gen_set_condexec(dc);
8aaca4c0 9244 switch(dc->is_jmp) {
8aaca4c0 9245 case DISAS_NEXT:
6e256c93 9246 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
9247 break;
9248 default:
9249 case DISAS_JUMP:
9250 case DISAS_UPDATE:
9251 /* indicate that the hash table must be used to find the next TB */
57fec1fe 9252 tcg_gen_exit_tb(0);
8aaca4c0
FB
9253 break;
9254 case DISAS_TB_JUMP:
9255 /* nothing more to generate */
9256 break;
9ee6e8bb 9257 case DISAS_WFI:
d9ba4830 9258 gen_helper_wfi();
9ee6e8bb
PB
9259 break;
9260 case DISAS_SWI:
d9ba4830 9261 gen_exception(EXCP_SWI);
9ee6e8bb 9262 break;
8aaca4c0 9263 }
e50e6a20
FB
9264 if (dc->condjmp) {
9265 gen_set_label(dc->condlabel);
9ee6e8bb 9266 gen_set_condexec(dc);
6e256c93 9267 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
9268 dc->condjmp = 0;
9269 }
2c0262af 9270 }
2e70f6ef 9271
9ee6e8bb 9272done_generating:
2e70f6ef 9273 gen_icount_end(tb, num_insns);
2c0262af
FB
9274 *gen_opc_ptr = INDEX_op_end;
9275
9276#ifdef DEBUG_DISAS
8fec2b8c 9277 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
9278 qemu_log("----------------\n");
9279 qemu_log("IN: %s\n", lookup_symbol(pc_start));
9280 log_target_disas(pc_start, dc->pc - pc_start, env->thumb);
9281 qemu_log("\n");
2c0262af
FB
9282 }
9283#endif
b5ff1b31
FB
9284 if (search_pc) {
9285 j = gen_opc_ptr - gen_opc_buf;
9286 lj++;
9287 while (lj <= j)
9288 gen_opc_instr_start[lj++] = 0;
b5ff1b31 9289 } else {
2c0262af 9290 tb->size = dc->pc - pc_start;
2e70f6ef 9291 tb->icount = num_insns;
b5ff1b31 9292 }
2c0262af
FB
9293}
9294
2cfc5f17 9295void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2c0262af 9296{
2cfc5f17 9297 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
9298}
9299
2cfc5f17 9300void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2c0262af 9301{
2cfc5f17 9302 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
9303}
9304
b5ff1b31
FB
9305static const char *cpu_mode_names[16] = {
9306 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9307 "???", "???", "???", "und", "???", "???", "???", "sys"
9308};
9ee6e8bb 9309
9a78eead 9310void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
7fe48483 9311 int flags)
2c0262af
FB
9312{
9313 int i;
06e80fc9 9314#if 0
bc380d17 9315 union {
b7bcbe95
FB
9316 uint32_t i;
9317 float s;
9318 } s0, s1;
9319 CPU_DoubleU d;
a94a6abf
PB
9320 /* ??? This assumes float64 and double have the same layout.
9321 Oh well, it's only debug dumps. */
9322 union {
9323 float64 f64;
9324 double d;
9325 } d0;
06e80fc9 9326#endif
b5ff1b31 9327 uint32_t psr;
2c0262af
FB
9328
9329 for(i=0;i<16;i++) {
7fe48483 9330 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 9331 if ((i % 4) == 3)
7fe48483 9332 cpu_fprintf(f, "\n");
2c0262af 9333 else
7fe48483 9334 cpu_fprintf(f, " ");
2c0262af 9335 }
b5ff1b31 9336 psr = cpsr_read(env);
687fa640
TS
9337 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9338 psr,
b5ff1b31
FB
9339 psr & (1 << 31) ? 'N' : '-',
9340 psr & (1 << 30) ? 'Z' : '-',
9341 psr & (1 << 29) ? 'C' : '-',
9342 psr & (1 << 28) ? 'V' : '-',
5fafdf24 9343 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 9344 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 9345
5e3f878a 9346#if 0
b7bcbe95 9347 for (i = 0; i < 16; i++) {
8e96005d
FB
9348 d.d = env->vfp.regs[i];
9349 s0.i = d.l.lower;
9350 s1.i = d.l.upper;
a94a6abf
PB
9351 d0.f64 = d.d;
9352 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 9353 i * 2, (int)s0.i, s0.s,
a94a6abf 9354 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 9355 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 9356 d0.d);
b7bcbe95 9357 }
40f137e1 9358 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
5e3f878a 9359#endif
2c0262af 9360}
a6b025d3 9361
d2856f1a
AJ
9362void gen_pc_load(CPUState *env, TranslationBlock *tb,
9363 unsigned long searched_pc, int pc_pos, void *puc)
9364{
9365 env->regs[15] = gen_opc_pc[pc_pos];
9366}