]> git.proxmox.com Git - qemu.git/blame - target-arm/translate.c
target-arm: neon - fix VRADDHN/VRSUBHN vs VADDHN/VSUBHN
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
28#include "exec-all.h"
29#include "disas.h"
57fec1fe 30#include "tcg-op.h"
79383c9c 31#include "qemu-log.h"
1497c961 32
a7812ae4 33#include "helpers.h"
1497c961 34#define GEN_HELPER 1
b26eefb6 35#include "helpers.h"
2c0262af 36
9ee6e8bb
PB
37#define ENABLE_ARCH_5J 0
38#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 42
86753403 43#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 44
2c0262af
FB
45/* internal defines */
46typedef struct DisasContext {
0fa85d43 47 target_ulong pc;
2c0262af 48 int is_jmp;
e50e6a20
FB
49 /* Nonzero if this instruction has been conditionally skipped. */
50 int condjmp;
51 /* The label that will be jumped to when the instruction is skipped. */
52 int condlabel;
9ee6e8bb
PB
53 /* Thumb-2 condtional execution bits. */
54 int condexec_mask;
55 int condexec_cond;
2c0262af 56 struct TranslationBlock *tb;
8aaca4c0 57 int singlestep_enabled;
5899f386 58 int thumb;
b5ff1b31
FB
59#if !defined(CONFIG_USER_ONLY)
60 int user;
61#endif
2c0262af
FB
62} DisasContext;
63
b5ff1b31
FB
64#if defined(CONFIG_USER_ONLY)
65#define IS_USER(s) 1
66#else
67#define IS_USER(s) (s->user)
68#endif
69
9ee6e8bb
PB
70/* These instructions trap after executing, so defer them until after the
71 conditional executions state has been updated. */
72#define DISAS_WFI 4
73#define DISAS_SWI 5
2c0262af 74
a7812ae4 75static TCGv_ptr cpu_env;
ad69471c 76/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 77static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 78static TCGv_i32 cpu_R[16];
426f5abc
PB
79static TCGv_i32 cpu_exclusive_addr;
80static TCGv_i32 cpu_exclusive_val;
81static TCGv_i32 cpu_exclusive_high;
82#ifdef CONFIG_USER_ONLY
83static TCGv_i32 cpu_exclusive_test;
84static TCGv_i32 cpu_exclusive_info;
85#endif
ad69471c 86
b26eefb6 87/* FIXME: These should be removed. */
a7812ae4
PB
88static TCGv cpu_F0s, cpu_F1s;
89static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 90
2e70f6ef
PB
91#include "gen-icount.h"
92
155c3eac
FN
93static const char *regnames[] =
94 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
95 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
96
b26eefb6
PB
97/* initialize TCG globals. */
98void arm_translate_init(void)
99{
155c3eac
FN
100 int i;
101
a7812ae4
PB
102 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
103
155c3eac
FN
104 for (i = 0; i < 16; i++) {
105 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
106 offsetof(CPUState, regs[i]),
107 regnames[i]);
108 }
426f5abc
PB
109 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
110 offsetof(CPUState, exclusive_addr), "exclusive_addr");
111 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
112 offsetof(CPUState, exclusive_val), "exclusive_val");
113 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
114 offsetof(CPUState, exclusive_high), "exclusive_high");
115#ifdef CONFIG_USER_ONLY
116 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
117 offsetof(CPUState, exclusive_test), "exclusive_test");
118 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUState, exclusive_info), "exclusive_info");
120#endif
155c3eac 121
a7812ae4
PB
122#define GEN_HELPER 2
123#include "helpers.h"
b26eefb6
PB
124}
125
b26eefb6 126static int num_temps;
b26eefb6
PB
127
128/* Allocate a temporary variable. */
a7812ae4 129static TCGv_i32 new_tmp(void)
b26eefb6 130{
12edd4f2
FN
131 num_temps++;
132 return tcg_temp_new_i32();
b26eefb6
PB
133}
134
135/* Release a temporary variable. */
136static void dead_tmp(TCGv tmp)
137{
12edd4f2 138 tcg_temp_free(tmp);
b26eefb6 139 num_temps--;
b26eefb6
PB
140}
141
d9ba4830
PB
142static inline TCGv load_cpu_offset(int offset)
143{
144 TCGv tmp = new_tmp();
145 tcg_gen_ld_i32(tmp, cpu_env, offset);
146 return tmp;
147}
148
149#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
150
151static inline void store_cpu_offset(TCGv var, int offset)
152{
153 tcg_gen_st_i32(var, cpu_env, offset);
154 dead_tmp(var);
155}
156
157#define store_cpu_field(var, name) \
158 store_cpu_offset(var, offsetof(CPUState, name))
159
b26eefb6
PB
160/* Set a variable to the value of a CPU register. */
161static void load_reg_var(DisasContext *s, TCGv var, int reg)
162{
163 if (reg == 15) {
164 uint32_t addr;
165 /* normaly, since we updated PC, we need only to add one insn */
166 if (s->thumb)
167 addr = (long)s->pc + 2;
168 else
169 addr = (long)s->pc + 4;
170 tcg_gen_movi_i32(var, addr);
171 } else {
155c3eac 172 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
173 }
174}
175
176/* Create a new temporary and set it to the value of a CPU register. */
177static inline TCGv load_reg(DisasContext *s, int reg)
178{
179 TCGv tmp = new_tmp();
180 load_reg_var(s, tmp, reg);
181 return tmp;
182}
183
184/* Set a CPU register. The source must be a temporary and will be
185 marked as dead. */
186static void store_reg(DisasContext *s, int reg, TCGv var)
187{
188 if (reg == 15) {
189 tcg_gen_andi_i32(var, var, ~1);
190 s->is_jmp = DISAS_JUMP;
191 }
155c3eac 192 tcg_gen_mov_i32(cpu_R[reg], var);
b26eefb6
PB
193 dead_tmp(var);
194}
195
b26eefb6 196/* Value extensions. */
86831435
PB
197#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
198#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
199#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
200#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
201
1497c961
PB
202#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
203#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 204
b26eefb6 205
b75263d6
JR
206static inline void gen_set_cpsr(TCGv var, uint32_t mask)
207{
208 TCGv tmp_mask = tcg_const_i32(mask);
209 gen_helper_cpsr_write(var, tmp_mask);
210 tcg_temp_free_i32(tmp_mask);
211}
d9ba4830
PB
212/* Set NZCV flags from the high 4 bits of var. */
213#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
214
215static void gen_exception(int excp)
216{
217 TCGv tmp = new_tmp();
218 tcg_gen_movi_i32(tmp, excp);
219 gen_helper_exception(tmp);
220 dead_tmp(tmp);
221}
222
3670669c
PB
223static void gen_smul_dual(TCGv a, TCGv b)
224{
225 TCGv tmp1 = new_tmp();
226 TCGv tmp2 = new_tmp();
22478e79
AZ
227 tcg_gen_ext16s_i32(tmp1, a);
228 tcg_gen_ext16s_i32(tmp2, b);
3670669c
PB
229 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
230 dead_tmp(tmp2);
231 tcg_gen_sari_i32(a, a, 16);
232 tcg_gen_sari_i32(b, b, 16);
233 tcg_gen_mul_i32(b, b, a);
234 tcg_gen_mov_i32(a, tmp1);
235 dead_tmp(tmp1);
236}
237
238/* Byteswap each halfword. */
239static void gen_rev16(TCGv var)
240{
241 TCGv tmp = new_tmp();
242 tcg_gen_shri_i32(tmp, var, 8);
243 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
244 tcg_gen_shli_i32(var, var, 8);
245 tcg_gen_andi_i32(var, var, 0xff00ff00);
246 tcg_gen_or_i32(var, var, tmp);
247 dead_tmp(tmp);
248}
249
250/* Byteswap low halfword and sign extend. */
251static void gen_revsh(TCGv var)
252{
253 TCGv tmp = new_tmp();
254 tcg_gen_shri_i32(tmp, var, 8);
255 tcg_gen_andi_i32(tmp, tmp, 0x00ff);
256 tcg_gen_shli_i32(var, var, 8);
257 tcg_gen_ext8s_i32(var, var);
258 tcg_gen_or_i32(var, var, tmp);
259 dead_tmp(tmp);
260}
261
262/* Unsigned bitfield extract. */
263static void gen_ubfx(TCGv var, int shift, uint32_t mask)
264{
265 if (shift)
266 tcg_gen_shri_i32(var, var, shift);
267 tcg_gen_andi_i32(var, var, mask);
268}
269
270/* Signed bitfield extract. */
271static void gen_sbfx(TCGv var, int shift, int width)
272{
273 uint32_t signbit;
274
275 if (shift)
276 tcg_gen_sari_i32(var, var, shift);
277 if (shift + width < 32) {
278 signbit = 1u << (width - 1);
279 tcg_gen_andi_i32(var, var, (1u << width) - 1);
280 tcg_gen_xori_i32(var, var, signbit);
281 tcg_gen_subi_i32(var, var, signbit);
282 }
283}
284
285/* Bitfield insertion. Insert val into base. Clobbers base and val. */
286static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
287{
3670669c 288 tcg_gen_andi_i32(val, val, mask);
8f8e3aa4
PB
289 tcg_gen_shli_i32(val, val, shift);
290 tcg_gen_andi_i32(base, base, ~(mask << shift));
3670669c
PB
291 tcg_gen_or_i32(dest, base, val);
292}
293
d9ba4830
PB
294/* Round the top 32 bits of a 64-bit value. */
295static void gen_roundqd(TCGv a, TCGv b)
3670669c 296{
d9ba4830
PB
297 tcg_gen_shri_i32(a, a, 31);
298 tcg_gen_add_i32(a, a, b);
3670669c
PB
299}
300
8f01245e
PB
301/* FIXME: Most targets have native widening multiplication.
302 It would be good to use that instead of a full wide multiply. */
5e3f878a 303/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 304static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 305{
a7812ae4
PB
306 TCGv_i64 tmp1 = tcg_temp_new_i64();
307 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
308
309 tcg_gen_extu_i32_i64(tmp1, a);
310 dead_tmp(a);
311 tcg_gen_extu_i32_i64(tmp2, b);
312 dead_tmp(b);
313 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 314 tcg_temp_free_i64(tmp2);
5e3f878a
PB
315 return tmp1;
316}
317
a7812ae4 318static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 319{
a7812ae4
PB
320 TCGv_i64 tmp1 = tcg_temp_new_i64();
321 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
322
323 tcg_gen_ext_i32_i64(tmp1, a);
324 dead_tmp(a);
325 tcg_gen_ext_i32_i64(tmp2, b);
326 dead_tmp(b);
327 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 328 tcg_temp_free_i64(tmp2);
5e3f878a
PB
329 return tmp1;
330}
331
8f01245e 332/* Signed 32x32->64 multiply. */
d9ba4830 333static void gen_imull(TCGv a, TCGv b)
8f01245e 334{
a7812ae4
PB
335 TCGv_i64 tmp1 = tcg_temp_new_i64();
336 TCGv_i64 tmp2 = tcg_temp_new_i64();
8f01245e 337
d9ba4830
PB
338 tcg_gen_ext_i32_i64(tmp1, a);
339 tcg_gen_ext_i32_i64(tmp2, b);
8f01245e 340 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 341 tcg_temp_free_i64(tmp2);
d9ba4830 342 tcg_gen_trunc_i64_i32(a, tmp1);
8f01245e 343 tcg_gen_shri_i64(tmp1, tmp1, 32);
d9ba4830 344 tcg_gen_trunc_i64_i32(b, tmp1);
b75263d6 345 tcg_temp_free_i64(tmp1);
d9ba4830 346}
d9ba4830 347
8f01245e
PB
348/* Swap low and high halfwords. */
349static void gen_swap_half(TCGv var)
350{
351 TCGv tmp = new_tmp();
352 tcg_gen_shri_i32(tmp, var, 16);
353 tcg_gen_shli_i32(var, var, 16);
354 tcg_gen_or_i32(var, var, tmp);
3670669c 355 dead_tmp(tmp);
8f01245e
PB
356}
357
b26eefb6
PB
358/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
359 tmp = (t0 ^ t1) & 0x8000;
360 t0 &= ~0x8000;
361 t1 &= ~0x8000;
362 t0 = (t0 + t1) ^ tmp;
363 */
364
365static void gen_add16(TCGv t0, TCGv t1)
366{
367 TCGv tmp = new_tmp();
368 tcg_gen_xor_i32(tmp, t0, t1);
369 tcg_gen_andi_i32(tmp, tmp, 0x8000);
370 tcg_gen_andi_i32(t0, t0, ~0x8000);
371 tcg_gen_andi_i32(t1, t1, ~0x8000);
372 tcg_gen_add_i32(t0, t0, t1);
373 tcg_gen_xor_i32(t0, t0, tmp);
374 dead_tmp(tmp);
375 dead_tmp(t1);
376}
377
9a119ff6
PB
378#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
379
b26eefb6
PB
380/* Set CF to the top bit of var. */
381static void gen_set_CF_bit31(TCGv var)
382{
383 TCGv tmp = new_tmp();
384 tcg_gen_shri_i32(tmp, var, 31);
4cc633c3 385 gen_set_CF(tmp);
b26eefb6
PB
386 dead_tmp(tmp);
387}
388
389/* Set N and Z flags from var. */
390static inline void gen_logic_CC(TCGv var)
391{
6fbe23d5
PB
392 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
393 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
b26eefb6
PB
394}
395
396/* T0 += T1 + CF. */
396e467c 397static void gen_adc(TCGv t0, TCGv t1)
b26eefb6 398{
d9ba4830 399 TCGv tmp;
396e467c 400 tcg_gen_add_i32(t0, t0, t1);
d9ba4830 401 tmp = load_cpu_field(CF);
396e467c 402 tcg_gen_add_i32(t0, t0, tmp);
b26eefb6
PB
403 dead_tmp(tmp);
404}
405
e9bb4aa9
JR
406/* dest = T0 + T1 + CF. */
407static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
408{
409 TCGv tmp;
410 tcg_gen_add_i32(dest, t0, t1);
411 tmp = load_cpu_field(CF);
412 tcg_gen_add_i32(dest, dest, tmp);
413 dead_tmp(tmp);
414}
415
3670669c
PB
416/* dest = T0 - T1 + CF - 1. */
417static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
418{
d9ba4830 419 TCGv tmp;
3670669c 420 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 421 tmp = load_cpu_field(CF);
3670669c
PB
422 tcg_gen_add_i32(dest, dest, tmp);
423 tcg_gen_subi_i32(dest, dest, 1);
424 dead_tmp(tmp);
425}
426
ad69471c
PB
427/* FIXME: Implement this natively. */
428#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
429
9a119ff6 430static void shifter_out_im(TCGv var, int shift)
b26eefb6 431{
9a119ff6
PB
432 TCGv tmp = new_tmp();
433 if (shift == 0) {
434 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 435 } else {
9a119ff6 436 tcg_gen_shri_i32(tmp, var, shift);
4cc633c3 437 if (shift != 31)
9a119ff6
PB
438 tcg_gen_andi_i32(tmp, tmp, 1);
439 }
440 gen_set_CF(tmp);
441 dead_tmp(tmp);
442}
b26eefb6 443
9a119ff6
PB
444/* Shift by immediate. Includes special handling for shift == 0. */
445static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
446{
447 switch (shiftop) {
448 case 0: /* LSL */
449 if (shift != 0) {
450 if (flags)
451 shifter_out_im(var, 32 - shift);
452 tcg_gen_shli_i32(var, var, shift);
453 }
454 break;
455 case 1: /* LSR */
456 if (shift == 0) {
457 if (flags) {
458 tcg_gen_shri_i32(var, var, 31);
459 gen_set_CF(var);
460 }
461 tcg_gen_movi_i32(var, 0);
462 } else {
463 if (flags)
464 shifter_out_im(var, shift - 1);
465 tcg_gen_shri_i32(var, var, shift);
466 }
467 break;
468 case 2: /* ASR */
469 if (shift == 0)
470 shift = 32;
471 if (flags)
472 shifter_out_im(var, shift - 1);
473 if (shift == 32)
474 shift = 31;
475 tcg_gen_sari_i32(var, var, shift);
476 break;
477 case 3: /* ROR/RRX */
478 if (shift != 0) {
479 if (flags)
480 shifter_out_im(var, shift - 1);
f669df27 481 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 482 } else {
d9ba4830 483 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
484 if (flags)
485 shifter_out_im(var, 0);
486 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
487 tcg_gen_shli_i32(tmp, tmp, 31);
488 tcg_gen_or_i32(var, var, tmp);
489 dead_tmp(tmp);
b26eefb6
PB
490 }
491 }
492};
493
8984bd2e
PB
494static inline void gen_arm_shift_reg(TCGv var, int shiftop,
495 TCGv shift, int flags)
496{
497 if (flags) {
498 switch (shiftop) {
499 case 0: gen_helper_shl_cc(var, var, shift); break;
500 case 1: gen_helper_shr_cc(var, var, shift); break;
501 case 2: gen_helper_sar_cc(var, var, shift); break;
502 case 3: gen_helper_ror_cc(var, var, shift); break;
503 }
504 } else {
505 switch (shiftop) {
506 case 0: gen_helper_shl(var, var, shift); break;
507 case 1: gen_helper_shr(var, var, shift); break;
508 case 2: gen_helper_sar(var, var, shift); break;
f669df27
AJ
509 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
510 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
511 }
512 }
513 dead_tmp(shift);
514}
515
6ddbc6e4
PB
516#define PAS_OP(pfx) \
517 switch (op2) { \
518 case 0: gen_pas_helper(glue(pfx,add16)); break; \
519 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
520 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
521 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
522 case 4: gen_pas_helper(glue(pfx,add8)); break; \
523 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
524 }
d9ba4830 525static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 526{
a7812ae4 527 TCGv_ptr tmp;
6ddbc6e4
PB
528
529 switch (op1) {
530#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
531 case 1:
a7812ae4 532 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
533 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
534 PAS_OP(s)
b75263d6 535 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
536 break;
537 case 5:
a7812ae4 538 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
539 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
540 PAS_OP(u)
b75263d6 541 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
542 break;
543#undef gen_pas_helper
544#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
545 case 2:
546 PAS_OP(q);
547 break;
548 case 3:
549 PAS_OP(sh);
550 break;
551 case 6:
552 PAS_OP(uq);
553 break;
554 case 7:
555 PAS_OP(uh);
556 break;
557#undef gen_pas_helper
558 }
559}
9ee6e8bb
PB
560#undef PAS_OP
561
6ddbc6e4
PB
562/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
563#define PAS_OP(pfx) \
564 switch (op2) { \
565 case 0: gen_pas_helper(glue(pfx,add8)); break; \
566 case 1: gen_pas_helper(glue(pfx,add16)); break; \
567 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
568 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
569 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
570 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
571 }
d9ba4830 572static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 573{
a7812ae4 574 TCGv_ptr tmp;
6ddbc6e4
PB
575
576 switch (op1) {
577#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
578 case 0:
a7812ae4 579 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
580 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
581 PAS_OP(s)
b75263d6 582 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
583 break;
584 case 4:
a7812ae4 585 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
586 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
587 PAS_OP(u)
b75263d6 588 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
589 break;
590#undef gen_pas_helper
591#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
592 case 1:
593 PAS_OP(q);
594 break;
595 case 2:
596 PAS_OP(sh);
597 break;
598 case 5:
599 PAS_OP(uq);
600 break;
601 case 6:
602 PAS_OP(uh);
603 break;
604#undef gen_pas_helper
605 }
606}
9ee6e8bb
PB
607#undef PAS_OP
608
d9ba4830
PB
609static void gen_test_cc(int cc, int label)
610{
611 TCGv tmp;
612 TCGv tmp2;
d9ba4830
PB
613 int inv;
614
d9ba4830
PB
615 switch (cc) {
616 case 0: /* eq: Z */
6fbe23d5 617 tmp = load_cpu_field(ZF);
cb63669a 618 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
619 break;
620 case 1: /* ne: !Z */
6fbe23d5 621 tmp = load_cpu_field(ZF);
cb63669a 622 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
623 break;
624 case 2: /* cs: C */
625 tmp = load_cpu_field(CF);
cb63669a 626 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
627 break;
628 case 3: /* cc: !C */
629 tmp = load_cpu_field(CF);
cb63669a 630 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
631 break;
632 case 4: /* mi: N */
6fbe23d5 633 tmp = load_cpu_field(NF);
cb63669a 634 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
635 break;
636 case 5: /* pl: !N */
6fbe23d5 637 tmp = load_cpu_field(NF);
cb63669a 638 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
639 break;
640 case 6: /* vs: V */
641 tmp = load_cpu_field(VF);
cb63669a 642 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
643 break;
644 case 7: /* vc: !V */
645 tmp = load_cpu_field(VF);
cb63669a 646 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
647 break;
648 case 8: /* hi: C && !Z */
649 inv = gen_new_label();
650 tmp = load_cpu_field(CF);
cb63669a 651 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830 652 dead_tmp(tmp);
6fbe23d5 653 tmp = load_cpu_field(ZF);
cb63669a 654 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
655 gen_set_label(inv);
656 break;
657 case 9: /* ls: !C || Z */
658 tmp = load_cpu_field(CF);
cb63669a 659 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830 660 dead_tmp(tmp);
6fbe23d5 661 tmp = load_cpu_field(ZF);
cb63669a 662 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
663 break;
664 case 10: /* ge: N == V -> N ^ V == 0 */
665 tmp = load_cpu_field(VF);
6fbe23d5 666 tmp2 = load_cpu_field(NF);
d9ba4830
PB
667 tcg_gen_xor_i32(tmp, tmp, tmp2);
668 dead_tmp(tmp2);
cb63669a 669 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
670 break;
671 case 11: /* lt: N != V -> N ^ V != 0 */
672 tmp = load_cpu_field(VF);
6fbe23d5 673 tmp2 = load_cpu_field(NF);
d9ba4830
PB
674 tcg_gen_xor_i32(tmp, tmp, tmp2);
675 dead_tmp(tmp2);
cb63669a 676 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
677 break;
678 case 12: /* gt: !Z && N == V */
679 inv = gen_new_label();
6fbe23d5 680 tmp = load_cpu_field(ZF);
cb63669a 681 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830
PB
682 dead_tmp(tmp);
683 tmp = load_cpu_field(VF);
6fbe23d5 684 tmp2 = load_cpu_field(NF);
d9ba4830
PB
685 tcg_gen_xor_i32(tmp, tmp, tmp2);
686 dead_tmp(tmp2);
cb63669a 687 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
688 gen_set_label(inv);
689 break;
690 case 13: /* le: Z || N != V */
6fbe23d5 691 tmp = load_cpu_field(ZF);
cb63669a 692 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
693 dead_tmp(tmp);
694 tmp = load_cpu_field(VF);
6fbe23d5 695 tmp2 = load_cpu_field(NF);
d9ba4830
PB
696 tcg_gen_xor_i32(tmp, tmp, tmp2);
697 dead_tmp(tmp2);
cb63669a 698 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
699 break;
700 default:
701 fprintf(stderr, "Bad condition code 0x%x\n", cc);
702 abort();
703 }
704 dead_tmp(tmp);
705}
2c0262af 706
b1d8e52e 707static const uint8_t table_logic_cc[16] = {
2c0262af
FB
708 1, /* and */
709 1, /* xor */
710 0, /* sub */
711 0, /* rsb */
712 0, /* add */
713 0, /* adc */
714 0, /* sbc */
715 0, /* rsc */
716 1, /* andl */
717 1, /* xorl */
718 0, /* cmp */
719 0, /* cmn */
720 1, /* orr */
721 1, /* mov */
722 1, /* bic */
723 1, /* mvn */
724};
3b46e624 725
d9ba4830
PB
726/* Set PC and Thumb state from an immediate address. */
727static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 728{
b26eefb6 729 TCGv tmp;
99c475ab 730
b26eefb6 731 s->is_jmp = DISAS_UPDATE;
d9ba4830 732 if (s->thumb != (addr & 1)) {
155c3eac 733 tmp = new_tmp();
d9ba4830
PB
734 tcg_gen_movi_i32(tmp, addr & 1);
735 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
155c3eac 736 dead_tmp(tmp);
d9ba4830 737 }
155c3eac 738 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
739}
740
741/* Set PC and Thumb state from var. var is marked as dead. */
742static inline void gen_bx(DisasContext *s, TCGv var)
743{
d9ba4830 744 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
745 tcg_gen_andi_i32(cpu_R[15], var, ~1);
746 tcg_gen_andi_i32(var, var, 1);
747 store_cpu_field(var, thumb);
d9ba4830
PB
748}
749
21aeb343
JR
750/* Variant of store_reg which uses branch&exchange logic when storing
751 to r15 in ARM architecture v7 and above. The source must be a temporary
752 and will be marked as dead. */
753static inline void store_reg_bx(CPUState *env, DisasContext *s,
754 int reg, TCGv var)
755{
756 if (reg == 15 && ENABLE_ARCH_7) {
757 gen_bx(s, var);
758 } else {
759 store_reg(s, reg, var);
760 }
761}
762
b0109805
PB
763static inline TCGv gen_ld8s(TCGv addr, int index)
764{
765 TCGv tmp = new_tmp();
766 tcg_gen_qemu_ld8s(tmp, addr, index);
767 return tmp;
768}
769static inline TCGv gen_ld8u(TCGv addr, int index)
770{
771 TCGv tmp = new_tmp();
772 tcg_gen_qemu_ld8u(tmp, addr, index);
773 return tmp;
774}
775static inline TCGv gen_ld16s(TCGv addr, int index)
776{
777 TCGv tmp = new_tmp();
778 tcg_gen_qemu_ld16s(tmp, addr, index);
779 return tmp;
780}
781static inline TCGv gen_ld16u(TCGv addr, int index)
782{
783 TCGv tmp = new_tmp();
784 tcg_gen_qemu_ld16u(tmp, addr, index);
785 return tmp;
786}
787static inline TCGv gen_ld32(TCGv addr, int index)
788{
789 TCGv tmp = new_tmp();
790 tcg_gen_qemu_ld32u(tmp, addr, index);
791 return tmp;
792}
84496233
JR
793static inline TCGv_i64 gen_ld64(TCGv addr, int index)
794{
795 TCGv_i64 tmp = tcg_temp_new_i64();
796 tcg_gen_qemu_ld64(tmp, addr, index);
797 return tmp;
798}
b0109805
PB
799static inline void gen_st8(TCGv val, TCGv addr, int index)
800{
801 tcg_gen_qemu_st8(val, addr, index);
802 dead_tmp(val);
803}
804static inline void gen_st16(TCGv val, TCGv addr, int index)
805{
806 tcg_gen_qemu_st16(val, addr, index);
807 dead_tmp(val);
808}
809static inline void gen_st32(TCGv val, TCGv addr, int index)
810{
811 tcg_gen_qemu_st32(val, addr, index);
812 dead_tmp(val);
813}
84496233
JR
814static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
815{
816 tcg_gen_qemu_st64(val, addr, index);
817 tcg_temp_free_i64(val);
818}
b5ff1b31 819
5e3f878a
PB
820static inline void gen_set_pc_im(uint32_t val)
821{
155c3eac 822 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
823}
824
b5ff1b31
FB
825/* Force a TB lookup after an instruction that changes the CPU state. */
826static inline void gen_lookup_tb(DisasContext *s)
827{
a6445c52 828 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
829 s->is_jmp = DISAS_UPDATE;
830}
831
b0109805
PB
832static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
833 TCGv var)
2c0262af 834{
1e8d4eec 835 int val, rm, shift, shiftop;
b26eefb6 836 TCGv offset;
2c0262af
FB
837
838 if (!(insn & (1 << 25))) {
839 /* immediate */
840 val = insn & 0xfff;
841 if (!(insn & (1 << 23)))
842 val = -val;
537730b9 843 if (val != 0)
b0109805 844 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
845 } else {
846 /* shift/register */
847 rm = (insn) & 0xf;
848 shift = (insn >> 7) & 0x1f;
1e8d4eec 849 shiftop = (insn >> 5) & 3;
b26eefb6 850 offset = load_reg(s, rm);
9a119ff6 851 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 852 if (!(insn & (1 << 23)))
b0109805 853 tcg_gen_sub_i32(var, var, offset);
2c0262af 854 else
b0109805 855 tcg_gen_add_i32(var, var, offset);
b26eefb6 856 dead_tmp(offset);
2c0262af
FB
857 }
858}
859
191f9a93 860static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 861 int extra, TCGv var)
2c0262af
FB
862{
863 int val, rm;
b26eefb6 864 TCGv offset;
3b46e624 865
2c0262af
FB
866 if (insn & (1 << 22)) {
867 /* immediate */
868 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
869 if (!(insn & (1 << 23)))
870 val = -val;
18acad92 871 val += extra;
537730b9 872 if (val != 0)
b0109805 873 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
874 } else {
875 /* register */
191f9a93 876 if (extra)
b0109805 877 tcg_gen_addi_i32(var, var, extra);
2c0262af 878 rm = (insn) & 0xf;
b26eefb6 879 offset = load_reg(s, rm);
2c0262af 880 if (!(insn & (1 << 23)))
b0109805 881 tcg_gen_sub_i32(var, var, offset);
2c0262af 882 else
b0109805 883 tcg_gen_add_i32(var, var, offset);
b26eefb6 884 dead_tmp(offset);
2c0262af
FB
885 }
886}
887
4373f3ce
PB
888#define VFP_OP2(name) \
889static inline void gen_vfp_##name(int dp) \
890{ \
891 if (dp) \
892 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
893 else \
894 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
b7bcbe95
FB
895}
896
4373f3ce
PB
897VFP_OP2(add)
898VFP_OP2(sub)
899VFP_OP2(mul)
900VFP_OP2(div)
901
902#undef VFP_OP2
903
904static inline void gen_vfp_abs(int dp)
905{
906 if (dp)
907 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
908 else
909 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
910}
911
912static inline void gen_vfp_neg(int dp)
913{
914 if (dp)
915 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
916 else
917 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
918}
919
920static inline void gen_vfp_sqrt(int dp)
921{
922 if (dp)
923 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
924 else
925 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
926}
927
928static inline void gen_vfp_cmp(int dp)
929{
930 if (dp)
931 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
932 else
933 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
934}
935
936static inline void gen_vfp_cmpe(int dp)
937{
938 if (dp)
939 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
940 else
941 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
942}
943
944static inline void gen_vfp_F1_ld0(int dp)
945{
946 if (dp)
5b340b51 947 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 948 else
5b340b51 949 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
950}
951
952static inline void gen_vfp_uito(int dp)
953{
954 if (dp)
955 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
956 else
957 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
958}
959
960static inline void gen_vfp_sito(int dp)
961{
962 if (dp)
66230e0d 963 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
4373f3ce 964 else
66230e0d 965 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
4373f3ce
PB
966}
967
968static inline void gen_vfp_toui(int dp)
969{
970 if (dp)
971 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
972 else
973 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
974}
975
976static inline void gen_vfp_touiz(int dp)
977{
978 if (dp)
979 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
980 else
981 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
982}
983
984static inline void gen_vfp_tosi(int dp)
985{
986 if (dp)
987 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
988 else
989 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
990}
991
992static inline void gen_vfp_tosiz(int dp)
9ee6e8bb
PB
993{
994 if (dp)
4373f3ce 995 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
9ee6e8bb 996 else
4373f3ce
PB
997 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
998}
999
1000#define VFP_GEN_FIX(name) \
1001static inline void gen_vfp_##name(int dp, int shift) \
1002{ \
b75263d6 1003 TCGv tmp_shift = tcg_const_i32(shift); \
4373f3ce 1004 if (dp) \
b75263d6 1005 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
4373f3ce 1006 else \
b75263d6
JR
1007 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
1008 tcg_temp_free_i32(tmp_shift); \
9ee6e8bb 1009}
4373f3ce
PB
1010VFP_GEN_FIX(tosh)
1011VFP_GEN_FIX(tosl)
1012VFP_GEN_FIX(touh)
1013VFP_GEN_FIX(toul)
1014VFP_GEN_FIX(shto)
1015VFP_GEN_FIX(slto)
1016VFP_GEN_FIX(uhto)
1017VFP_GEN_FIX(ulto)
1018#undef VFP_GEN_FIX
9ee6e8bb 1019
312eea9f 1020static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1021{
1022 if (dp)
312eea9f 1023 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1024 else
312eea9f 1025 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1026}
1027
312eea9f 1028static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1029{
1030 if (dp)
312eea9f 1031 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1032 else
312eea9f 1033 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1034}
1035
8e96005d
FB
1036static inline long
1037vfp_reg_offset (int dp, int reg)
1038{
1039 if (dp)
1040 return offsetof(CPUARMState, vfp.regs[reg]);
1041 else if (reg & 1) {
1042 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1043 + offsetof(CPU_DoubleU, l.upper);
1044 } else {
1045 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1046 + offsetof(CPU_DoubleU, l.lower);
1047 }
1048}
9ee6e8bb
PB
1049
1050/* Return the offset of a 32-bit piece of a NEON register.
1051 zero is the least significant end of the register. */
1052static inline long
1053neon_reg_offset (int reg, int n)
1054{
1055 int sreg;
1056 sreg = reg * 2 + n;
1057 return vfp_reg_offset(0, sreg);
1058}
1059
8f8e3aa4
PB
1060static TCGv neon_load_reg(int reg, int pass)
1061{
1062 TCGv tmp = new_tmp();
1063 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1064 return tmp;
1065}
1066
1067static void neon_store_reg(int reg, int pass, TCGv var)
1068{
1069 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1070 dead_tmp(var);
1071}
1072
a7812ae4 1073static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1074{
1075 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1076}
1077
a7812ae4 1078static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1079{
1080 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1081}
1082
4373f3ce
PB
1083#define tcg_gen_ld_f32 tcg_gen_ld_i32
1084#define tcg_gen_ld_f64 tcg_gen_ld_i64
1085#define tcg_gen_st_f32 tcg_gen_st_i32
1086#define tcg_gen_st_f64 tcg_gen_st_i64
1087
b7bcbe95
FB
1088static inline void gen_mov_F0_vreg(int dp, int reg)
1089{
1090 if (dp)
4373f3ce 1091 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1092 else
4373f3ce 1093 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1094}
1095
1096static inline void gen_mov_F1_vreg(int dp, int reg)
1097{
1098 if (dp)
4373f3ce 1099 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1100 else
4373f3ce 1101 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1102}
1103
1104static inline void gen_mov_vreg_F0(int dp, int reg)
1105{
1106 if (dp)
4373f3ce 1107 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1108 else
4373f3ce 1109 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1110}
1111
18c9b560
AZ
1112#define ARM_CP_RW_BIT (1 << 20)
1113
a7812ae4 1114static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d
PB
1115{
1116 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1117}
1118
a7812ae4 1119static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d
PB
1120{
1121 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1122}
1123
da6b5335 1124static inline TCGv iwmmxt_load_creg(int reg)
e677137d 1125{
da6b5335
FN
1126 TCGv var = new_tmp();
1127 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1128 return var;
e677137d
PB
1129}
1130
da6b5335 1131static inline void iwmmxt_store_creg(int reg, TCGv var)
e677137d 1132{
da6b5335 1133 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
e677137d
PB
1134}
1135
1136static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1137{
1138 iwmmxt_store_reg(cpu_M0, rn);
1139}
1140
1141static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1142{
1143 iwmmxt_load_reg(cpu_M0, rn);
1144}
1145
1146static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1147{
1148 iwmmxt_load_reg(cpu_V1, rn);
1149 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1150}
1151
1152static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1153{
1154 iwmmxt_load_reg(cpu_V1, rn);
1155 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1156}
1157
1158static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1159{
1160 iwmmxt_load_reg(cpu_V1, rn);
1161 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1162}
1163
1164#define IWMMXT_OP(name) \
1165static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1166{ \
1167 iwmmxt_load_reg(cpu_V1, rn); \
1168 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1169}
1170
1171#define IWMMXT_OP_ENV(name) \
1172static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1173{ \
1174 iwmmxt_load_reg(cpu_V1, rn); \
1175 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1176}
1177
1178#define IWMMXT_OP_ENV_SIZE(name) \
1179IWMMXT_OP_ENV(name##b) \
1180IWMMXT_OP_ENV(name##w) \
1181IWMMXT_OP_ENV(name##l)
1182
1183#define IWMMXT_OP_ENV1(name) \
1184static inline void gen_op_iwmmxt_##name##_M0(void) \
1185{ \
1186 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1187}
1188
1189IWMMXT_OP(maddsq)
1190IWMMXT_OP(madduq)
1191IWMMXT_OP(sadb)
1192IWMMXT_OP(sadw)
1193IWMMXT_OP(mulslw)
1194IWMMXT_OP(mulshw)
1195IWMMXT_OP(mululw)
1196IWMMXT_OP(muluhw)
1197IWMMXT_OP(macsw)
1198IWMMXT_OP(macuw)
1199
1200IWMMXT_OP_ENV_SIZE(unpackl)
1201IWMMXT_OP_ENV_SIZE(unpackh)
1202
1203IWMMXT_OP_ENV1(unpacklub)
1204IWMMXT_OP_ENV1(unpackluw)
1205IWMMXT_OP_ENV1(unpacklul)
1206IWMMXT_OP_ENV1(unpackhub)
1207IWMMXT_OP_ENV1(unpackhuw)
1208IWMMXT_OP_ENV1(unpackhul)
1209IWMMXT_OP_ENV1(unpacklsb)
1210IWMMXT_OP_ENV1(unpacklsw)
1211IWMMXT_OP_ENV1(unpacklsl)
1212IWMMXT_OP_ENV1(unpackhsb)
1213IWMMXT_OP_ENV1(unpackhsw)
1214IWMMXT_OP_ENV1(unpackhsl)
1215
1216IWMMXT_OP_ENV_SIZE(cmpeq)
1217IWMMXT_OP_ENV_SIZE(cmpgtu)
1218IWMMXT_OP_ENV_SIZE(cmpgts)
1219
1220IWMMXT_OP_ENV_SIZE(mins)
1221IWMMXT_OP_ENV_SIZE(minu)
1222IWMMXT_OP_ENV_SIZE(maxs)
1223IWMMXT_OP_ENV_SIZE(maxu)
1224
1225IWMMXT_OP_ENV_SIZE(subn)
1226IWMMXT_OP_ENV_SIZE(addn)
1227IWMMXT_OP_ENV_SIZE(subu)
1228IWMMXT_OP_ENV_SIZE(addu)
1229IWMMXT_OP_ENV_SIZE(subs)
1230IWMMXT_OP_ENV_SIZE(adds)
1231
1232IWMMXT_OP_ENV(avgb0)
1233IWMMXT_OP_ENV(avgb1)
1234IWMMXT_OP_ENV(avgw0)
1235IWMMXT_OP_ENV(avgw1)
1236
1237IWMMXT_OP(msadb)
1238
1239IWMMXT_OP_ENV(packuw)
1240IWMMXT_OP_ENV(packul)
1241IWMMXT_OP_ENV(packuq)
1242IWMMXT_OP_ENV(packsw)
1243IWMMXT_OP_ENV(packsl)
1244IWMMXT_OP_ENV(packsq)
1245
e677137d
PB
1246static void gen_op_iwmmxt_set_mup(void)
1247{
1248 TCGv tmp;
1249 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1250 tcg_gen_ori_i32(tmp, tmp, 2);
1251 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1252}
1253
1254static void gen_op_iwmmxt_set_cup(void)
1255{
1256 TCGv tmp;
1257 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1258 tcg_gen_ori_i32(tmp, tmp, 1);
1259 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1260}
1261
1262static void gen_op_iwmmxt_setpsr_nz(void)
1263{
1264 TCGv tmp = new_tmp();
1265 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1266 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1267}
1268
1269static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1270{
1271 iwmmxt_load_reg(cpu_V1, rn);
86831435 1272 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1273 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1274}
1275
da6b5335 1276static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
18c9b560
AZ
1277{
1278 int rd;
1279 uint32_t offset;
da6b5335 1280 TCGv tmp;
18c9b560
AZ
1281
1282 rd = (insn >> 16) & 0xf;
da6b5335 1283 tmp = load_reg(s, rd);
18c9b560
AZ
1284
1285 offset = (insn & 0xff) << ((insn >> 7) & 2);
1286 if (insn & (1 << 24)) {
1287 /* Pre indexed */
1288 if (insn & (1 << 23))
da6b5335 1289 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1290 else
da6b5335
FN
1291 tcg_gen_addi_i32(tmp, tmp, -offset);
1292 tcg_gen_mov_i32(dest, tmp);
18c9b560 1293 if (insn & (1 << 21))
da6b5335
FN
1294 store_reg(s, rd, tmp);
1295 else
1296 dead_tmp(tmp);
18c9b560
AZ
1297 } else if (insn & (1 << 21)) {
1298 /* Post indexed */
da6b5335 1299 tcg_gen_mov_i32(dest, tmp);
18c9b560 1300 if (insn & (1 << 23))
da6b5335 1301 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1302 else
da6b5335
FN
1303 tcg_gen_addi_i32(tmp, tmp, -offset);
1304 store_reg(s, rd, tmp);
18c9b560
AZ
1305 } else if (!(insn & (1 << 23)))
1306 return 1;
1307 return 0;
1308}
1309
da6b5335 1310static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
18c9b560
AZ
1311{
1312 int rd = (insn >> 0) & 0xf;
da6b5335 1313 TCGv tmp;
18c9b560 1314
da6b5335
FN
1315 if (insn & (1 << 8)) {
1316 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1317 return 1;
da6b5335
FN
1318 } else {
1319 tmp = iwmmxt_load_creg(rd);
1320 }
1321 } else {
1322 tmp = new_tmp();
1323 iwmmxt_load_reg(cpu_V0, rd);
1324 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1325 }
1326 tcg_gen_andi_i32(tmp, tmp, mask);
1327 tcg_gen_mov_i32(dest, tmp);
1328 dead_tmp(tmp);
18c9b560
AZ
1329 return 0;
1330}
1331
1332/* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1333 (ie. an undefined instruction). */
1334static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1335{
1336 int rd, wrd;
1337 int rdhi, rdlo, rd0, rd1, i;
da6b5335
FN
1338 TCGv addr;
1339 TCGv tmp, tmp2, tmp3;
18c9b560
AZ
1340
1341 if ((insn & 0x0e000e00) == 0x0c000000) {
1342 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1343 wrd = insn & 0xf;
1344 rdlo = (insn >> 12) & 0xf;
1345 rdhi = (insn >> 16) & 0xf;
1346 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1347 iwmmxt_load_reg(cpu_V0, wrd);
1348 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1349 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1350 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1351 } else { /* TMCRR */
da6b5335
FN
1352 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1353 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1354 gen_op_iwmmxt_set_mup();
1355 }
1356 return 0;
1357 }
1358
1359 wrd = (insn >> 12) & 0xf;
da6b5335
FN
1360 addr = new_tmp();
1361 if (gen_iwmmxt_address(s, insn, addr)) {
1362 dead_tmp(addr);
18c9b560 1363 return 1;
da6b5335 1364 }
18c9b560
AZ
1365 if (insn & ARM_CP_RW_BIT) {
1366 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
da6b5335
FN
1367 tmp = new_tmp();
1368 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1369 iwmmxt_store_creg(wrd, tmp);
18c9b560 1370 } else {
e677137d
PB
1371 i = 1;
1372 if (insn & (1 << 8)) {
1373 if (insn & (1 << 22)) { /* WLDRD */
da6b5335 1374 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1375 i = 0;
1376 } else { /* WLDRW wRd */
da6b5335 1377 tmp = gen_ld32(addr, IS_USER(s));
e677137d
PB
1378 }
1379 } else {
1380 if (insn & (1 << 22)) { /* WLDRH */
da6b5335 1381 tmp = gen_ld16u(addr, IS_USER(s));
e677137d 1382 } else { /* WLDRB */
da6b5335 1383 tmp = gen_ld8u(addr, IS_USER(s));
e677137d
PB
1384 }
1385 }
1386 if (i) {
1387 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1388 dead_tmp(tmp);
1389 }
18c9b560
AZ
1390 gen_op_iwmmxt_movq_wRn_M0(wrd);
1391 }
1392 } else {
1393 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335
FN
1394 tmp = iwmmxt_load_creg(wrd);
1395 gen_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1396 } else {
1397 gen_op_iwmmxt_movq_M0_wRn(wrd);
e677137d
PB
1398 tmp = new_tmp();
1399 if (insn & (1 << 8)) {
1400 if (insn & (1 << 22)) { /* WSTRD */
1401 dead_tmp(tmp);
da6b5335 1402 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1403 } else { /* WSTRW wRd */
1404 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1405 gen_st32(tmp, addr, IS_USER(s));
e677137d
PB
1406 }
1407 } else {
1408 if (insn & (1 << 22)) { /* WSTRH */
1409 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1410 gen_st16(tmp, addr, IS_USER(s));
e677137d
PB
1411 } else { /* WSTRB */
1412 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1413 gen_st8(tmp, addr, IS_USER(s));
e677137d
PB
1414 }
1415 }
18c9b560
AZ
1416 }
1417 }
1418 return 0;
1419 }
1420
1421 if ((insn & 0x0f000000) != 0x0e000000)
1422 return 1;
1423
1424 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1425 case 0x000: /* WOR */
1426 wrd = (insn >> 12) & 0xf;
1427 rd0 = (insn >> 0) & 0xf;
1428 rd1 = (insn >> 16) & 0xf;
1429 gen_op_iwmmxt_movq_M0_wRn(rd0);
1430 gen_op_iwmmxt_orq_M0_wRn(rd1);
1431 gen_op_iwmmxt_setpsr_nz();
1432 gen_op_iwmmxt_movq_wRn_M0(wrd);
1433 gen_op_iwmmxt_set_mup();
1434 gen_op_iwmmxt_set_cup();
1435 break;
1436 case 0x011: /* TMCR */
1437 if (insn & 0xf)
1438 return 1;
1439 rd = (insn >> 12) & 0xf;
1440 wrd = (insn >> 16) & 0xf;
1441 switch (wrd) {
1442 case ARM_IWMMXT_wCID:
1443 case ARM_IWMMXT_wCASF:
1444 break;
1445 case ARM_IWMMXT_wCon:
1446 gen_op_iwmmxt_set_cup();
1447 /* Fall through. */
1448 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1449 tmp = iwmmxt_load_creg(wrd);
1450 tmp2 = load_reg(s, rd);
f669df27 1451 tcg_gen_andc_i32(tmp, tmp, tmp2);
da6b5335
FN
1452 dead_tmp(tmp2);
1453 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1454 break;
1455 case ARM_IWMMXT_wCGR0:
1456 case ARM_IWMMXT_wCGR1:
1457 case ARM_IWMMXT_wCGR2:
1458 case ARM_IWMMXT_wCGR3:
1459 gen_op_iwmmxt_set_cup();
da6b5335
FN
1460 tmp = load_reg(s, rd);
1461 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1462 break;
1463 default:
1464 return 1;
1465 }
1466 break;
1467 case 0x100: /* WXOR */
1468 wrd = (insn >> 12) & 0xf;
1469 rd0 = (insn >> 0) & 0xf;
1470 rd1 = (insn >> 16) & 0xf;
1471 gen_op_iwmmxt_movq_M0_wRn(rd0);
1472 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1473 gen_op_iwmmxt_setpsr_nz();
1474 gen_op_iwmmxt_movq_wRn_M0(wrd);
1475 gen_op_iwmmxt_set_mup();
1476 gen_op_iwmmxt_set_cup();
1477 break;
1478 case 0x111: /* TMRC */
1479 if (insn & 0xf)
1480 return 1;
1481 rd = (insn >> 12) & 0xf;
1482 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1483 tmp = iwmmxt_load_creg(wrd);
1484 store_reg(s, rd, tmp);
18c9b560
AZ
1485 break;
1486 case 0x300: /* WANDN */
1487 wrd = (insn >> 12) & 0xf;
1488 rd0 = (insn >> 0) & 0xf;
1489 rd1 = (insn >> 16) & 0xf;
1490 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1491 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1492 gen_op_iwmmxt_andq_M0_wRn(rd1);
1493 gen_op_iwmmxt_setpsr_nz();
1494 gen_op_iwmmxt_movq_wRn_M0(wrd);
1495 gen_op_iwmmxt_set_mup();
1496 gen_op_iwmmxt_set_cup();
1497 break;
1498 case 0x200: /* WAND */
1499 wrd = (insn >> 12) & 0xf;
1500 rd0 = (insn >> 0) & 0xf;
1501 rd1 = (insn >> 16) & 0xf;
1502 gen_op_iwmmxt_movq_M0_wRn(rd0);
1503 gen_op_iwmmxt_andq_M0_wRn(rd1);
1504 gen_op_iwmmxt_setpsr_nz();
1505 gen_op_iwmmxt_movq_wRn_M0(wrd);
1506 gen_op_iwmmxt_set_mup();
1507 gen_op_iwmmxt_set_cup();
1508 break;
1509 case 0x810: case 0xa10: /* WMADD */
1510 wrd = (insn >> 12) & 0xf;
1511 rd0 = (insn >> 0) & 0xf;
1512 rd1 = (insn >> 16) & 0xf;
1513 gen_op_iwmmxt_movq_M0_wRn(rd0);
1514 if (insn & (1 << 21))
1515 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1516 else
1517 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1518 gen_op_iwmmxt_movq_wRn_M0(wrd);
1519 gen_op_iwmmxt_set_mup();
1520 break;
1521 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1522 wrd = (insn >> 12) & 0xf;
1523 rd0 = (insn >> 16) & 0xf;
1524 rd1 = (insn >> 0) & 0xf;
1525 gen_op_iwmmxt_movq_M0_wRn(rd0);
1526 switch ((insn >> 22) & 3) {
1527 case 0:
1528 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1529 break;
1530 case 1:
1531 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1532 break;
1533 case 2:
1534 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1535 break;
1536 case 3:
1537 return 1;
1538 }
1539 gen_op_iwmmxt_movq_wRn_M0(wrd);
1540 gen_op_iwmmxt_set_mup();
1541 gen_op_iwmmxt_set_cup();
1542 break;
1543 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1544 wrd = (insn >> 12) & 0xf;
1545 rd0 = (insn >> 16) & 0xf;
1546 rd1 = (insn >> 0) & 0xf;
1547 gen_op_iwmmxt_movq_M0_wRn(rd0);
1548 switch ((insn >> 22) & 3) {
1549 case 0:
1550 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1551 break;
1552 case 1:
1553 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1554 break;
1555 case 2:
1556 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1557 break;
1558 case 3:
1559 return 1;
1560 }
1561 gen_op_iwmmxt_movq_wRn_M0(wrd);
1562 gen_op_iwmmxt_set_mup();
1563 gen_op_iwmmxt_set_cup();
1564 break;
1565 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1566 wrd = (insn >> 12) & 0xf;
1567 rd0 = (insn >> 16) & 0xf;
1568 rd1 = (insn >> 0) & 0xf;
1569 gen_op_iwmmxt_movq_M0_wRn(rd0);
1570 if (insn & (1 << 22))
1571 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1572 else
1573 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1574 if (!(insn & (1 << 20)))
1575 gen_op_iwmmxt_addl_M0_wRn(wrd);
1576 gen_op_iwmmxt_movq_wRn_M0(wrd);
1577 gen_op_iwmmxt_set_mup();
1578 break;
1579 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1580 wrd = (insn >> 12) & 0xf;
1581 rd0 = (insn >> 16) & 0xf;
1582 rd1 = (insn >> 0) & 0xf;
1583 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1584 if (insn & (1 << 21)) {
1585 if (insn & (1 << 20))
1586 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1587 else
1588 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1589 } else {
1590 if (insn & (1 << 20))
1591 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1592 else
1593 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1594 }
18c9b560
AZ
1595 gen_op_iwmmxt_movq_wRn_M0(wrd);
1596 gen_op_iwmmxt_set_mup();
1597 break;
1598 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1599 wrd = (insn >> 12) & 0xf;
1600 rd0 = (insn >> 16) & 0xf;
1601 rd1 = (insn >> 0) & 0xf;
1602 gen_op_iwmmxt_movq_M0_wRn(rd0);
1603 if (insn & (1 << 21))
1604 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1605 else
1606 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1607 if (!(insn & (1 << 20))) {
e677137d
PB
1608 iwmmxt_load_reg(cpu_V1, wrd);
1609 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1610 }
1611 gen_op_iwmmxt_movq_wRn_M0(wrd);
1612 gen_op_iwmmxt_set_mup();
1613 break;
1614 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1615 wrd = (insn >> 12) & 0xf;
1616 rd0 = (insn >> 16) & 0xf;
1617 rd1 = (insn >> 0) & 0xf;
1618 gen_op_iwmmxt_movq_M0_wRn(rd0);
1619 switch ((insn >> 22) & 3) {
1620 case 0:
1621 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1622 break;
1623 case 1:
1624 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1625 break;
1626 case 2:
1627 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1628 break;
1629 case 3:
1630 return 1;
1631 }
1632 gen_op_iwmmxt_movq_wRn_M0(wrd);
1633 gen_op_iwmmxt_set_mup();
1634 gen_op_iwmmxt_set_cup();
1635 break;
1636 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1637 wrd = (insn >> 12) & 0xf;
1638 rd0 = (insn >> 16) & 0xf;
1639 rd1 = (insn >> 0) & 0xf;
1640 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1641 if (insn & (1 << 22)) {
1642 if (insn & (1 << 20))
1643 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1644 else
1645 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1646 } else {
1647 if (insn & (1 << 20))
1648 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1649 else
1650 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1651 }
18c9b560
AZ
1652 gen_op_iwmmxt_movq_wRn_M0(wrd);
1653 gen_op_iwmmxt_set_mup();
1654 gen_op_iwmmxt_set_cup();
1655 break;
1656 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1657 wrd = (insn >> 12) & 0xf;
1658 rd0 = (insn >> 16) & 0xf;
1659 rd1 = (insn >> 0) & 0xf;
1660 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1661 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1662 tcg_gen_andi_i32(tmp, tmp, 7);
1663 iwmmxt_load_reg(cpu_V1, rd1);
1664 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1665 dead_tmp(tmp);
18c9b560
AZ
1666 gen_op_iwmmxt_movq_wRn_M0(wrd);
1667 gen_op_iwmmxt_set_mup();
1668 break;
1669 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1670 if (((insn >> 6) & 3) == 3)
1671 return 1;
18c9b560
AZ
1672 rd = (insn >> 12) & 0xf;
1673 wrd = (insn >> 16) & 0xf;
da6b5335 1674 tmp = load_reg(s, rd);
18c9b560
AZ
1675 gen_op_iwmmxt_movq_M0_wRn(wrd);
1676 switch ((insn >> 6) & 3) {
1677 case 0:
da6b5335
FN
1678 tmp2 = tcg_const_i32(0xff);
1679 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1680 break;
1681 case 1:
da6b5335
FN
1682 tmp2 = tcg_const_i32(0xffff);
1683 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1684 break;
1685 case 2:
da6b5335
FN
1686 tmp2 = tcg_const_i32(0xffffffff);
1687 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1688 break;
da6b5335
FN
1689 default:
1690 TCGV_UNUSED(tmp2);
1691 TCGV_UNUSED(tmp3);
18c9b560 1692 }
da6b5335
FN
1693 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1694 tcg_temp_free(tmp3);
1695 tcg_temp_free(tmp2);
1696 dead_tmp(tmp);
18c9b560
AZ
1697 gen_op_iwmmxt_movq_wRn_M0(wrd);
1698 gen_op_iwmmxt_set_mup();
1699 break;
1700 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1701 rd = (insn >> 12) & 0xf;
1702 wrd = (insn >> 16) & 0xf;
da6b5335 1703 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1704 return 1;
1705 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335 1706 tmp = new_tmp();
18c9b560
AZ
1707 switch ((insn >> 22) & 3) {
1708 case 0:
da6b5335
FN
1709 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1710 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1711 if (insn & 8) {
1712 tcg_gen_ext8s_i32(tmp, tmp);
1713 } else {
1714 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1715 }
1716 break;
1717 case 1:
da6b5335
FN
1718 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1719 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1720 if (insn & 8) {
1721 tcg_gen_ext16s_i32(tmp, tmp);
1722 } else {
1723 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1724 }
1725 break;
1726 case 2:
da6b5335
FN
1727 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1728 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1729 break;
18c9b560 1730 }
da6b5335 1731 store_reg(s, rd, tmp);
18c9b560
AZ
1732 break;
1733 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1734 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1735 return 1;
da6b5335 1736 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1737 switch ((insn >> 22) & 3) {
1738 case 0:
da6b5335 1739 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1740 break;
1741 case 1:
da6b5335 1742 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1743 break;
1744 case 2:
da6b5335 1745 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1746 break;
18c9b560 1747 }
da6b5335
FN
1748 tcg_gen_shli_i32(tmp, tmp, 28);
1749 gen_set_nzcv(tmp);
1750 dead_tmp(tmp);
18c9b560
AZ
1751 break;
1752 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1753 if (((insn >> 6) & 3) == 3)
1754 return 1;
18c9b560
AZ
1755 rd = (insn >> 12) & 0xf;
1756 wrd = (insn >> 16) & 0xf;
da6b5335 1757 tmp = load_reg(s, rd);
18c9b560
AZ
1758 switch ((insn >> 6) & 3) {
1759 case 0:
da6b5335 1760 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1761 break;
1762 case 1:
da6b5335 1763 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1764 break;
1765 case 2:
da6b5335 1766 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1767 break;
18c9b560 1768 }
da6b5335 1769 dead_tmp(tmp);
18c9b560
AZ
1770 gen_op_iwmmxt_movq_wRn_M0(wrd);
1771 gen_op_iwmmxt_set_mup();
1772 break;
1773 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1774 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1775 return 1;
da6b5335
FN
1776 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1777 tmp2 = new_tmp();
1778 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1779 switch ((insn >> 22) & 3) {
1780 case 0:
1781 for (i = 0; i < 7; i ++) {
da6b5335
FN
1782 tcg_gen_shli_i32(tmp2, tmp2, 4);
1783 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1784 }
1785 break;
1786 case 1:
1787 for (i = 0; i < 3; i ++) {
da6b5335
FN
1788 tcg_gen_shli_i32(tmp2, tmp2, 8);
1789 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1790 }
1791 break;
1792 case 2:
da6b5335
FN
1793 tcg_gen_shli_i32(tmp2, tmp2, 16);
1794 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1795 break;
18c9b560 1796 }
da6b5335
FN
1797 gen_set_nzcv(tmp);
1798 dead_tmp(tmp2);
1799 dead_tmp(tmp);
18c9b560
AZ
1800 break;
1801 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1802 wrd = (insn >> 12) & 0xf;
1803 rd0 = (insn >> 16) & 0xf;
1804 gen_op_iwmmxt_movq_M0_wRn(rd0);
1805 switch ((insn >> 22) & 3) {
1806 case 0:
e677137d 1807 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1808 break;
1809 case 1:
e677137d 1810 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1811 break;
1812 case 2:
e677137d 1813 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1814 break;
1815 case 3:
1816 return 1;
1817 }
1818 gen_op_iwmmxt_movq_wRn_M0(wrd);
1819 gen_op_iwmmxt_set_mup();
1820 break;
1821 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1822 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1823 return 1;
da6b5335
FN
1824 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1825 tmp2 = new_tmp();
1826 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1827 switch ((insn >> 22) & 3) {
1828 case 0:
1829 for (i = 0; i < 7; i ++) {
da6b5335
FN
1830 tcg_gen_shli_i32(tmp2, tmp2, 4);
1831 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1832 }
1833 break;
1834 case 1:
1835 for (i = 0; i < 3; i ++) {
da6b5335
FN
1836 tcg_gen_shli_i32(tmp2, tmp2, 8);
1837 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1838 }
1839 break;
1840 case 2:
da6b5335
FN
1841 tcg_gen_shli_i32(tmp2, tmp2, 16);
1842 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1843 break;
18c9b560 1844 }
da6b5335
FN
1845 gen_set_nzcv(tmp);
1846 dead_tmp(tmp2);
1847 dead_tmp(tmp);
18c9b560
AZ
1848 break;
1849 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1850 rd = (insn >> 12) & 0xf;
1851 rd0 = (insn >> 16) & 0xf;
da6b5335 1852 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1853 return 1;
1854 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 1855 tmp = new_tmp();
18c9b560
AZ
1856 switch ((insn >> 22) & 3) {
1857 case 0:
da6b5335 1858 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1859 break;
1860 case 1:
da6b5335 1861 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1862 break;
1863 case 2:
da6b5335 1864 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1865 break;
18c9b560 1866 }
da6b5335 1867 store_reg(s, rd, tmp);
18c9b560
AZ
1868 break;
1869 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1870 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1871 wrd = (insn >> 12) & 0xf;
1872 rd0 = (insn >> 16) & 0xf;
1873 rd1 = (insn >> 0) & 0xf;
1874 gen_op_iwmmxt_movq_M0_wRn(rd0);
1875 switch ((insn >> 22) & 3) {
1876 case 0:
1877 if (insn & (1 << 21))
1878 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1879 else
1880 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1881 break;
1882 case 1:
1883 if (insn & (1 << 21))
1884 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1885 else
1886 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1887 break;
1888 case 2:
1889 if (insn & (1 << 21))
1890 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1891 else
1892 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1893 break;
1894 case 3:
1895 return 1;
1896 }
1897 gen_op_iwmmxt_movq_wRn_M0(wrd);
1898 gen_op_iwmmxt_set_mup();
1899 gen_op_iwmmxt_set_cup();
1900 break;
1901 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1902 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1903 wrd = (insn >> 12) & 0xf;
1904 rd0 = (insn >> 16) & 0xf;
1905 gen_op_iwmmxt_movq_M0_wRn(rd0);
1906 switch ((insn >> 22) & 3) {
1907 case 0:
1908 if (insn & (1 << 21))
1909 gen_op_iwmmxt_unpacklsb_M0();
1910 else
1911 gen_op_iwmmxt_unpacklub_M0();
1912 break;
1913 case 1:
1914 if (insn & (1 << 21))
1915 gen_op_iwmmxt_unpacklsw_M0();
1916 else
1917 gen_op_iwmmxt_unpackluw_M0();
1918 break;
1919 case 2:
1920 if (insn & (1 << 21))
1921 gen_op_iwmmxt_unpacklsl_M0();
1922 else
1923 gen_op_iwmmxt_unpacklul_M0();
1924 break;
1925 case 3:
1926 return 1;
1927 }
1928 gen_op_iwmmxt_movq_wRn_M0(wrd);
1929 gen_op_iwmmxt_set_mup();
1930 gen_op_iwmmxt_set_cup();
1931 break;
1932 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1933 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1934 wrd = (insn >> 12) & 0xf;
1935 rd0 = (insn >> 16) & 0xf;
1936 gen_op_iwmmxt_movq_M0_wRn(rd0);
1937 switch ((insn >> 22) & 3) {
1938 case 0:
1939 if (insn & (1 << 21))
1940 gen_op_iwmmxt_unpackhsb_M0();
1941 else
1942 gen_op_iwmmxt_unpackhub_M0();
1943 break;
1944 case 1:
1945 if (insn & (1 << 21))
1946 gen_op_iwmmxt_unpackhsw_M0();
1947 else
1948 gen_op_iwmmxt_unpackhuw_M0();
1949 break;
1950 case 2:
1951 if (insn & (1 << 21))
1952 gen_op_iwmmxt_unpackhsl_M0();
1953 else
1954 gen_op_iwmmxt_unpackhul_M0();
1955 break;
1956 case 3:
1957 return 1;
1958 }
1959 gen_op_iwmmxt_movq_wRn_M0(wrd);
1960 gen_op_iwmmxt_set_mup();
1961 gen_op_iwmmxt_set_cup();
1962 break;
1963 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1964 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
1965 if (((insn >> 22) & 3) == 0)
1966 return 1;
18c9b560
AZ
1967 wrd = (insn >> 12) & 0xf;
1968 rd0 = (insn >> 16) & 0xf;
1969 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1970 tmp = new_tmp();
1971 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
1972 dead_tmp(tmp);
18c9b560 1973 return 1;
da6b5335 1974 }
18c9b560 1975 switch ((insn >> 22) & 3) {
18c9b560 1976 case 1:
da6b5335 1977 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1978 break;
1979 case 2:
da6b5335 1980 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1981 break;
1982 case 3:
da6b5335 1983 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1984 break;
1985 }
da6b5335 1986 dead_tmp(tmp);
18c9b560
AZ
1987 gen_op_iwmmxt_movq_wRn_M0(wrd);
1988 gen_op_iwmmxt_set_mup();
1989 gen_op_iwmmxt_set_cup();
1990 break;
1991 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1992 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
1993 if (((insn >> 22) & 3) == 0)
1994 return 1;
18c9b560
AZ
1995 wrd = (insn >> 12) & 0xf;
1996 rd0 = (insn >> 16) & 0xf;
1997 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1998 tmp = new_tmp();
1999 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2000 dead_tmp(tmp);
18c9b560 2001 return 1;
da6b5335 2002 }
18c9b560 2003 switch ((insn >> 22) & 3) {
18c9b560 2004 case 1:
da6b5335 2005 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2006 break;
2007 case 2:
da6b5335 2008 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2009 break;
2010 case 3:
da6b5335 2011 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2012 break;
2013 }
da6b5335 2014 dead_tmp(tmp);
18c9b560
AZ
2015 gen_op_iwmmxt_movq_wRn_M0(wrd);
2016 gen_op_iwmmxt_set_mup();
2017 gen_op_iwmmxt_set_cup();
2018 break;
2019 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2020 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2021 if (((insn >> 22) & 3) == 0)
2022 return 1;
18c9b560
AZ
2023 wrd = (insn >> 12) & 0xf;
2024 rd0 = (insn >> 16) & 0xf;
2025 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2026 tmp = new_tmp();
2027 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2028 dead_tmp(tmp);
18c9b560 2029 return 1;
da6b5335 2030 }
18c9b560 2031 switch ((insn >> 22) & 3) {
18c9b560 2032 case 1:
da6b5335 2033 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2034 break;
2035 case 2:
da6b5335 2036 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2037 break;
2038 case 3:
da6b5335 2039 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2040 break;
2041 }
da6b5335 2042 dead_tmp(tmp);
18c9b560
AZ
2043 gen_op_iwmmxt_movq_wRn_M0(wrd);
2044 gen_op_iwmmxt_set_mup();
2045 gen_op_iwmmxt_set_cup();
2046 break;
2047 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2048 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2049 if (((insn >> 22) & 3) == 0)
2050 return 1;
18c9b560
AZ
2051 wrd = (insn >> 12) & 0xf;
2052 rd0 = (insn >> 16) & 0xf;
2053 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2054 tmp = new_tmp();
18c9b560 2055 switch ((insn >> 22) & 3) {
18c9b560 2056 case 1:
da6b5335
FN
2057 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2058 dead_tmp(tmp);
18c9b560 2059 return 1;
da6b5335
FN
2060 }
2061 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2062 break;
2063 case 2:
da6b5335
FN
2064 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2065 dead_tmp(tmp);
18c9b560 2066 return 1;
da6b5335
FN
2067 }
2068 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2069 break;
2070 case 3:
da6b5335
FN
2071 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2072 dead_tmp(tmp);
18c9b560 2073 return 1;
da6b5335
FN
2074 }
2075 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2076 break;
2077 }
da6b5335 2078 dead_tmp(tmp);
18c9b560
AZ
2079 gen_op_iwmmxt_movq_wRn_M0(wrd);
2080 gen_op_iwmmxt_set_mup();
2081 gen_op_iwmmxt_set_cup();
2082 break;
2083 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2084 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2085 wrd = (insn >> 12) & 0xf;
2086 rd0 = (insn >> 16) & 0xf;
2087 rd1 = (insn >> 0) & 0xf;
2088 gen_op_iwmmxt_movq_M0_wRn(rd0);
2089 switch ((insn >> 22) & 3) {
2090 case 0:
2091 if (insn & (1 << 21))
2092 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2093 else
2094 gen_op_iwmmxt_minub_M0_wRn(rd1);
2095 break;
2096 case 1:
2097 if (insn & (1 << 21))
2098 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2099 else
2100 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2101 break;
2102 case 2:
2103 if (insn & (1 << 21))
2104 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2105 else
2106 gen_op_iwmmxt_minul_M0_wRn(rd1);
2107 break;
2108 case 3:
2109 return 1;
2110 }
2111 gen_op_iwmmxt_movq_wRn_M0(wrd);
2112 gen_op_iwmmxt_set_mup();
2113 break;
2114 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2115 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2116 wrd = (insn >> 12) & 0xf;
2117 rd0 = (insn >> 16) & 0xf;
2118 rd1 = (insn >> 0) & 0xf;
2119 gen_op_iwmmxt_movq_M0_wRn(rd0);
2120 switch ((insn >> 22) & 3) {
2121 case 0:
2122 if (insn & (1 << 21))
2123 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2124 else
2125 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2126 break;
2127 case 1:
2128 if (insn & (1 << 21))
2129 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2130 else
2131 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2132 break;
2133 case 2:
2134 if (insn & (1 << 21))
2135 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2136 else
2137 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2138 break;
2139 case 3:
2140 return 1;
2141 }
2142 gen_op_iwmmxt_movq_wRn_M0(wrd);
2143 gen_op_iwmmxt_set_mup();
2144 break;
2145 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2146 case 0x402: case 0x502: case 0x602: case 0x702:
2147 wrd = (insn >> 12) & 0xf;
2148 rd0 = (insn >> 16) & 0xf;
2149 rd1 = (insn >> 0) & 0xf;
2150 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2151 tmp = tcg_const_i32((insn >> 20) & 3);
2152 iwmmxt_load_reg(cpu_V1, rd1);
2153 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2154 tcg_temp_free(tmp);
18c9b560
AZ
2155 gen_op_iwmmxt_movq_wRn_M0(wrd);
2156 gen_op_iwmmxt_set_mup();
2157 break;
2158 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2159 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2160 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2161 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2162 wrd = (insn >> 12) & 0xf;
2163 rd0 = (insn >> 16) & 0xf;
2164 rd1 = (insn >> 0) & 0xf;
2165 gen_op_iwmmxt_movq_M0_wRn(rd0);
2166 switch ((insn >> 20) & 0xf) {
2167 case 0x0:
2168 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2169 break;
2170 case 0x1:
2171 gen_op_iwmmxt_subub_M0_wRn(rd1);
2172 break;
2173 case 0x3:
2174 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2175 break;
2176 case 0x4:
2177 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2178 break;
2179 case 0x5:
2180 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2181 break;
2182 case 0x7:
2183 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2184 break;
2185 case 0x8:
2186 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2187 break;
2188 case 0x9:
2189 gen_op_iwmmxt_subul_M0_wRn(rd1);
2190 break;
2191 case 0xb:
2192 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2193 break;
2194 default:
2195 return 1;
2196 }
2197 gen_op_iwmmxt_movq_wRn_M0(wrd);
2198 gen_op_iwmmxt_set_mup();
2199 gen_op_iwmmxt_set_cup();
2200 break;
2201 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2202 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2203 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2204 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2205 wrd = (insn >> 12) & 0xf;
2206 rd0 = (insn >> 16) & 0xf;
2207 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2208 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2209 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2210 tcg_temp_free(tmp);
18c9b560
AZ
2211 gen_op_iwmmxt_movq_wRn_M0(wrd);
2212 gen_op_iwmmxt_set_mup();
2213 gen_op_iwmmxt_set_cup();
2214 break;
2215 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2216 case 0x418: case 0x518: case 0x618: case 0x718:
2217 case 0x818: case 0x918: case 0xa18: case 0xb18:
2218 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2219 wrd = (insn >> 12) & 0xf;
2220 rd0 = (insn >> 16) & 0xf;
2221 rd1 = (insn >> 0) & 0xf;
2222 gen_op_iwmmxt_movq_M0_wRn(rd0);
2223 switch ((insn >> 20) & 0xf) {
2224 case 0x0:
2225 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2226 break;
2227 case 0x1:
2228 gen_op_iwmmxt_addub_M0_wRn(rd1);
2229 break;
2230 case 0x3:
2231 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2232 break;
2233 case 0x4:
2234 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2235 break;
2236 case 0x5:
2237 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2238 break;
2239 case 0x7:
2240 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2241 break;
2242 case 0x8:
2243 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2244 break;
2245 case 0x9:
2246 gen_op_iwmmxt_addul_M0_wRn(rd1);
2247 break;
2248 case 0xb:
2249 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2250 break;
2251 default:
2252 return 1;
2253 }
2254 gen_op_iwmmxt_movq_wRn_M0(wrd);
2255 gen_op_iwmmxt_set_mup();
2256 gen_op_iwmmxt_set_cup();
2257 break;
2258 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2259 case 0x408: case 0x508: case 0x608: case 0x708:
2260 case 0x808: case 0x908: case 0xa08: case 0xb08:
2261 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2262 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2263 return 1;
18c9b560
AZ
2264 wrd = (insn >> 12) & 0xf;
2265 rd0 = (insn >> 16) & 0xf;
2266 rd1 = (insn >> 0) & 0xf;
2267 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2268 switch ((insn >> 22) & 3) {
18c9b560
AZ
2269 case 1:
2270 if (insn & (1 << 21))
2271 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2272 else
2273 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2274 break;
2275 case 2:
2276 if (insn & (1 << 21))
2277 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2278 else
2279 gen_op_iwmmxt_packul_M0_wRn(rd1);
2280 break;
2281 case 3:
2282 if (insn & (1 << 21))
2283 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2284 else
2285 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2286 break;
2287 }
2288 gen_op_iwmmxt_movq_wRn_M0(wrd);
2289 gen_op_iwmmxt_set_mup();
2290 gen_op_iwmmxt_set_cup();
2291 break;
2292 case 0x201: case 0x203: case 0x205: case 0x207:
2293 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2294 case 0x211: case 0x213: case 0x215: case 0x217:
2295 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2296 wrd = (insn >> 5) & 0xf;
2297 rd0 = (insn >> 12) & 0xf;
2298 rd1 = (insn >> 0) & 0xf;
2299 if (rd0 == 0xf || rd1 == 0xf)
2300 return 1;
2301 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2302 tmp = load_reg(s, rd0);
2303 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2304 switch ((insn >> 16) & 0xf) {
2305 case 0x0: /* TMIA */
da6b5335 2306 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2307 break;
2308 case 0x8: /* TMIAPH */
da6b5335 2309 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2310 break;
2311 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2312 if (insn & (1 << 16))
da6b5335 2313 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2314 if (insn & (1 << 17))
da6b5335
FN
2315 tcg_gen_shri_i32(tmp2, tmp2, 16);
2316 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2317 break;
2318 default:
da6b5335
FN
2319 dead_tmp(tmp2);
2320 dead_tmp(tmp);
18c9b560
AZ
2321 return 1;
2322 }
da6b5335
FN
2323 dead_tmp(tmp2);
2324 dead_tmp(tmp);
18c9b560
AZ
2325 gen_op_iwmmxt_movq_wRn_M0(wrd);
2326 gen_op_iwmmxt_set_mup();
2327 break;
2328 default:
2329 return 1;
2330 }
2331
2332 return 0;
2333}
2334
2335/* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2336 (ie. an undefined instruction). */
2337static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2338{
2339 int acc, rd0, rd1, rdhi, rdlo;
3a554c0f 2340 TCGv tmp, tmp2;
18c9b560
AZ
2341
2342 if ((insn & 0x0ff00f10) == 0x0e200010) {
2343 /* Multiply with Internal Accumulate Format */
2344 rd0 = (insn >> 12) & 0xf;
2345 rd1 = insn & 0xf;
2346 acc = (insn >> 5) & 7;
2347
2348 if (acc != 0)
2349 return 1;
2350
3a554c0f
FN
2351 tmp = load_reg(s, rd0);
2352 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2353 switch ((insn >> 16) & 0xf) {
2354 case 0x0: /* MIA */
3a554c0f 2355 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2356 break;
2357 case 0x8: /* MIAPH */
3a554c0f 2358 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2359 break;
2360 case 0xc: /* MIABB */
2361 case 0xd: /* MIABT */
2362 case 0xe: /* MIATB */
2363 case 0xf: /* MIATT */
18c9b560 2364 if (insn & (1 << 16))
3a554c0f 2365 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2366 if (insn & (1 << 17))
3a554c0f
FN
2367 tcg_gen_shri_i32(tmp2, tmp2, 16);
2368 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2369 break;
2370 default:
2371 return 1;
2372 }
3a554c0f
FN
2373 dead_tmp(tmp2);
2374 dead_tmp(tmp);
18c9b560
AZ
2375
2376 gen_op_iwmmxt_movq_wRn_M0(acc);
2377 return 0;
2378 }
2379
2380 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2381 /* Internal Accumulator Access Format */
2382 rdhi = (insn >> 16) & 0xf;
2383 rdlo = (insn >> 12) & 0xf;
2384 acc = insn & 7;
2385
2386 if (acc != 0)
2387 return 1;
2388
2389 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2390 iwmmxt_load_reg(cpu_V0, acc);
2391 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2392 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2393 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2394 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2395 } else { /* MAR */
3a554c0f
FN
2396 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2397 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2398 }
2399 return 0;
2400 }
2401
2402 return 1;
2403}
2404
c1713132
AZ
2405/* Disassemble system coprocessor instruction. Return nonzero if
2406 instruction is not defined. */
2407static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2408{
b75263d6 2409 TCGv tmp, tmp2;
c1713132
AZ
2410 uint32_t rd = (insn >> 12) & 0xf;
2411 uint32_t cp = (insn >> 8) & 0xf;
2412 if (IS_USER(s)) {
2413 return 1;
2414 }
2415
18c9b560 2416 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2417 if (!env->cp[cp].cp_read)
2418 return 1;
8984bd2e
PB
2419 gen_set_pc_im(s->pc);
2420 tmp = new_tmp();
b75263d6
JR
2421 tmp2 = tcg_const_i32(insn);
2422 gen_helper_get_cp(tmp, cpu_env, tmp2);
2423 tcg_temp_free(tmp2);
8984bd2e 2424 store_reg(s, rd, tmp);
c1713132
AZ
2425 } else {
2426 if (!env->cp[cp].cp_write)
2427 return 1;
8984bd2e
PB
2428 gen_set_pc_im(s->pc);
2429 tmp = load_reg(s, rd);
b75263d6
JR
2430 tmp2 = tcg_const_i32(insn);
2431 gen_helper_set_cp(cpu_env, tmp2, tmp);
2432 tcg_temp_free(tmp2);
a60de947 2433 dead_tmp(tmp);
c1713132
AZ
2434 }
2435 return 0;
2436}
2437
9ee6e8bb
PB
2438static int cp15_user_ok(uint32_t insn)
2439{
2440 int cpn = (insn >> 16) & 0xf;
2441 int cpm = insn & 0xf;
2442 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2443
2444 if (cpn == 13 && cpm == 0) {
2445 /* TLS register. */
2446 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2447 return 1;
2448 }
2449 if (cpn == 7) {
2450 /* ISB, DSB, DMB. */
2451 if ((cpm == 5 && op == 4)
2452 || (cpm == 10 && (op == 4 || op == 5)))
2453 return 1;
2454 }
2455 return 0;
2456}
2457
3f26c122
RV
2458static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2459{
2460 TCGv tmp;
2461 int cpn = (insn >> 16) & 0xf;
2462 int cpm = insn & 0xf;
2463 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2464
2465 if (!arm_feature(env, ARM_FEATURE_V6K))
2466 return 0;
2467
2468 if (!(cpn == 13 && cpm == 0))
2469 return 0;
2470
2471 if (insn & ARM_CP_RW_BIT) {
3f26c122
RV
2472 switch (op) {
2473 case 2:
c5883be2 2474 tmp = load_cpu_field(cp15.c13_tls1);
3f26c122
RV
2475 break;
2476 case 3:
c5883be2 2477 tmp = load_cpu_field(cp15.c13_tls2);
3f26c122
RV
2478 break;
2479 case 4:
c5883be2 2480 tmp = load_cpu_field(cp15.c13_tls3);
3f26c122
RV
2481 break;
2482 default:
3f26c122
RV
2483 return 0;
2484 }
2485 store_reg(s, rd, tmp);
2486
2487 } else {
2488 tmp = load_reg(s, rd);
2489 switch (op) {
2490 case 2:
c5883be2 2491 store_cpu_field(tmp, cp15.c13_tls1);
3f26c122
RV
2492 break;
2493 case 3:
c5883be2 2494 store_cpu_field(tmp, cp15.c13_tls2);
3f26c122
RV
2495 break;
2496 case 4:
c5883be2 2497 store_cpu_field(tmp, cp15.c13_tls3);
3f26c122
RV
2498 break;
2499 default:
c5883be2 2500 dead_tmp(tmp);
3f26c122
RV
2501 return 0;
2502 }
3f26c122
RV
2503 }
2504 return 1;
2505}
2506
b5ff1b31
FB
2507/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2508 instruction is not defined. */
a90b7318 2509static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2510{
2511 uint32_t rd;
b75263d6 2512 TCGv tmp, tmp2;
b5ff1b31 2513
9ee6e8bb
PB
2514 /* M profile cores use memory mapped registers instead of cp15. */
2515 if (arm_feature(env, ARM_FEATURE_M))
2516 return 1;
2517
2518 if ((insn & (1 << 25)) == 0) {
2519 if (insn & (1 << 20)) {
2520 /* mrrc */
2521 return 1;
2522 }
2523 /* mcrr. Used for block cache operations, so implement as no-op. */
2524 return 0;
2525 }
2526 if ((insn & (1 << 4)) == 0) {
2527 /* cdp */
2528 return 1;
2529 }
2530 if (IS_USER(s) && !cp15_user_ok(insn)) {
b5ff1b31
FB
2531 return 1;
2532 }
9332f9da
FB
2533 if ((insn & 0x0fff0fff) == 0x0e070f90
2534 || (insn & 0x0fff0fff) == 0x0e070f58) {
2535 /* Wait for interrupt. */
8984bd2e 2536 gen_set_pc_im(s->pc);
9ee6e8bb 2537 s->is_jmp = DISAS_WFI;
9332f9da
FB
2538 return 0;
2539 }
b5ff1b31 2540 rd = (insn >> 12) & 0xf;
3f26c122
RV
2541
2542 if (cp15_tls_load_store(env, s, insn, rd))
2543 return 0;
2544
b75263d6 2545 tmp2 = tcg_const_i32(insn);
18c9b560 2546 if (insn & ARM_CP_RW_BIT) {
8984bd2e 2547 tmp = new_tmp();
b75263d6 2548 gen_helper_get_cp15(tmp, cpu_env, tmp2);
b5ff1b31
FB
2549 /* If the destination register is r15 then sets condition codes. */
2550 if (rd != 15)
8984bd2e
PB
2551 store_reg(s, rd, tmp);
2552 else
2553 dead_tmp(tmp);
b5ff1b31 2554 } else {
8984bd2e 2555 tmp = load_reg(s, rd);
b75263d6 2556 gen_helper_set_cp15(cpu_env, tmp2, tmp);
8984bd2e 2557 dead_tmp(tmp);
a90b7318
AZ
2558 /* Normally we would always end the TB here, but Linux
2559 * arch/arm/mach-pxa/sleep.S expects two instructions following
2560 * an MMU enable to execute from cache. Imitate this behaviour. */
2561 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2562 (insn & 0x0fff0fff) != 0x0e010f10)
2563 gen_lookup_tb(s);
b5ff1b31 2564 }
b75263d6 2565 tcg_temp_free_i32(tmp2);
b5ff1b31
FB
2566 return 0;
2567}
2568
9ee6e8bb
PB
2569#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2570#define VFP_SREG(insn, bigbit, smallbit) \
2571 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2572#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2573 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2574 reg = (((insn) >> (bigbit)) & 0x0f) \
2575 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2576 } else { \
2577 if (insn & (1 << (smallbit))) \
2578 return 1; \
2579 reg = ((insn) >> (bigbit)) & 0x0f; \
2580 }} while (0)
2581
2582#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2583#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2584#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2585#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2586#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2587#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2588
4373f3ce
PB
2589/* Move between integer and VFP cores. */
2590static TCGv gen_vfp_mrs(void)
2591{
2592 TCGv tmp = new_tmp();
2593 tcg_gen_mov_i32(tmp, cpu_F0s);
2594 return tmp;
2595}
2596
2597static void gen_vfp_msr(TCGv tmp)
2598{
2599 tcg_gen_mov_i32(cpu_F0s, tmp);
2600 dead_tmp(tmp);
2601}
2602
9ee6e8bb
PB
2603static inline int
2604vfp_enabled(CPUState * env)
2605{
2606 return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
2607}
2608
ad69471c
PB
2609static void gen_neon_dup_u8(TCGv var, int shift)
2610{
2611 TCGv tmp = new_tmp();
2612 if (shift)
2613 tcg_gen_shri_i32(var, var, shift);
86831435 2614 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2615 tcg_gen_shli_i32(tmp, var, 8);
2616 tcg_gen_or_i32(var, var, tmp);
2617 tcg_gen_shli_i32(tmp, var, 16);
2618 tcg_gen_or_i32(var, var, tmp);
2619 dead_tmp(tmp);
2620}
2621
2622static void gen_neon_dup_low16(TCGv var)
2623{
2624 TCGv tmp = new_tmp();
86831435 2625 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2626 tcg_gen_shli_i32(tmp, var, 16);
2627 tcg_gen_or_i32(var, var, tmp);
2628 dead_tmp(tmp);
2629}
2630
2631static void gen_neon_dup_high16(TCGv var)
2632{
2633 TCGv tmp = new_tmp();
2634 tcg_gen_andi_i32(var, var, 0xffff0000);
2635 tcg_gen_shri_i32(tmp, var, 16);
2636 tcg_gen_or_i32(var, var, tmp);
2637 dead_tmp(tmp);
2638}
2639
b7bcbe95
FB
2640/* Disassemble a VFP instruction. Returns nonzero if an error occured
2641 (ie. an undefined instruction). */
2642static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2643{
2644 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2645 int dp, veclen;
312eea9f 2646 TCGv addr;
4373f3ce 2647 TCGv tmp;
ad69471c 2648 TCGv tmp2;
b7bcbe95 2649
40f137e1
PB
2650 if (!arm_feature(env, ARM_FEATURE_VFP))
2651 return 1;
2652
9ee6e8bb
PB
2653 if (!vfp_enabled(env)) {
2654 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2655 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2656 return 1;
2657 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2658 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2659 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2660 return 1;
2661 }
b7bcbe95
FB
2662 dp = ((insn & 0xf00) == 0xb00);
2663 switch ((insn >> 24) & 0xf) {
2664 case 0xe:
2665 if (insn & (1 << 4)) {
2666 /* single register transfer */
b7bcbe95
FB
2667 rd = (insn >> 12) & 0xf;
2668 if (dp) {
9ee6e8bb
PB
2669 int size;
2670 int pass;
2671
2672 VFP_DREG_N(rn, insn);
2673 if (insn & 0xf)
b7bcbe95 2674 return 1;
9ee6e8bb
PB
2675 if (insn & 0x00c00060
2676 && !arm_feature(env, ARM_FEATURE_NEON))
2677 return 1;
2678
2679 pass = (insn >> 21) & 1;
2680 if (insn & (1 << 22)) {
2681 size = 0;
2682 offset = ((insn >> 5) & 3) * 8;
2683 } else if (insn & (1 << 5)) {
2684 size = 1;
2685 offset = (insn & (1 << 6)) ? 16 : 0;
2686 } else {
2687 size = 2;
2688 offset = 0;
2689 }
18c9b560 2690 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2691 /* vfp->arm */
ad69471c 2692 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2693 switch (size) {
2694 case 0:
9ee6e8bb 2695 if (offset)
ad69471c 2696 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2697 if (insn & (1 << 23))
ad69471c 2698 gen_uxtb(tmp);
9ee6e8bb 2699 else
ad69471c 2700 gen_sxtb(tmp);
9ee6e8bb
PB
2701 break;
2702 case 1:
9ee6e8bb
PB
2703 if (insn & (1 << 23)) {
2704 if (offset) {
ad69471c 2705 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2706 } else {
ad69471c 2707 gen_uxth(tmp);
9ee6e8bb
PB
2708 }
2709 } else {
2710 if (offset) {
ad69471c 2711 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2712 } else {
ad69471c 2713 gen_sxth(tmp);
9ee6e8bb
PB
2714 }
2715 }
2716 break;
2717 case 2:
9ee6e8bb
PB
2718 break;
2719 }
ad69471c 2720 store_reg(s, rd, tmp);
b7bcbe95
FB
2721 } else {
2722 /* arm->vfp */
ad69471c 2723 tmp = load_reg(s, rd);
9ee6e8bb
PB
2724 if (insn & (1 << 23)) {
2725 /* VDUP */
2726 if (size == 0) {
ad69471c 2727 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2728 } else if (size == 1) {
ad69471c 2729 gen_neon_dup_low16(tmp);
9ee6e8bb 2730 }
cbbccffc
PB
2731 for (n = 0; n <= pass * 2; n++) {
2732 tmp2 = new_tmp();
2733 tcg_gen_mov_i32(tmp2, tmp);
2734 neon_store_reg(rn, n, tmp2);
2735 }
2736 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2737 } else {
2738 /* VMOV */
2739 switch (size) {
2740 case 0:
ad69471c
PB
2741 tmp2 = neon_load_reg(rn, pass);
2742 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2743 dead_tmp(tmp2);
9ee6e8bb
PB
2744 break;
2745 case 1:
ad69471c
PB
2746 tmp2 = neon_load_reg(rn, pass);
2747 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2748 dead_tmp(tmp2);
9ee6e8bb
PB
2749 break;
2750 case 2:
9ee6e8bb
PB
2751 break;
2752 }
ad69471c 2753 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2754 }
b7bcbe95 2755 }
9ee6e8bb
PB
2756 } else { /* !dp */
2757 if ((insn & 0x6f) != 0x00)
2758 return 1;
2759 rn = VFP_SREG_N(insn);
18c9b560 2760 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2761 /* vfp->arm */
2762 if (insn & (1 << 21)) {
2763 /* system register */
40f137e1 2764 rn >>= 1;
9ee6e8bb 2765
b7bcbe95 2766 switch (rn) {
40f137e1 2767 case ARM_VFP_FPSID:
4373f3ce 2768 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2769 VFP3 restricts all id registers to privileged
2770 accesses. */
2771 if (IS_USER(s)
2772 && arm_feature(env, ARM_FEATURE_VFP3))
2773 return 1;
4373f3ce 2774 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2775 break;
40f137e1 2776 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2777 if (IS_USER(s))
2778 return 1;
4373f3ce 2779 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2780 break;
40f137e1
PB
2781 case ARM_VFP_FPINST:
2782 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2783 /* Not present in VFP3. */
2784 if (IS_USER(s)
2785 || arm_feature(env, ARM_FEATURE_VFP3))
2786 return 1;
4373f3ce 2787 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2788 break;
40f137e1 2789 case ARM_VFP_FPSCR:
601d70b9 2790 if (rd == 15) {
4373f3ce
PB
2791 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2792 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2793 } else {
2794 tmp = new_tmp();
2795 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2796 }
b7bcbe95 2797 break;
9ee6e8bb
PB
2798 case ARM_VFP_MVFR0:
2799 case ARM_VFP_MVFR1:
2800 if (IS_USER(s)
2801 || !arm_feature(env, ARM_FEATURE_VFP3))
2802 return 1;
4373f3ce 2803 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2804 break;
b7bcbe95
FB
2805 default:
2806 return 1;
2807 }
2808 } else {
2809 gen_mov_F0_vreg(0, rn);
4373f3ce 2810 tmp = gen_vfp_mrs();
b7bcbe95
FB
2811 }
2812 if (rd == 15) {
b5ff1b31 2813 /* Set the 4 flag bits in the CPSR. */
4373f3ce
PB
2814 gen_set_nzcv(tmp);
2815 dead_tmp(tmp);
2816 } else {
2817 store_reg(s, rd, tmp);
2818 }
b7bcbe95
FB
2819 } else {
2820 /* arm->vfp */
4373f3ce 2821 tmp = load_reg(s, rd);
b7bcbe95 2822 if (insn & (1 << 21)) {
40f137e1 2823 rn >>= 1;
b7bcbe95
FB
2824 /* system register */
2825 switch (rn) {
40f137e1 2826 case ARM_VFP_FPSID:
9ee6e8bb
PB
2827 case ARM_VFP_MVFR0:
2828 case ARM_VFP_MVFR1:
b7bcbe95
FB
2829 /* Writes are ignored. */
2830 break;
40f137e1 2831 case ARM_VFP_FPSCR:
4373f3ce
PB
2832 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2833 dead_tmp(tmp);
b5ff1b31 2834 gen_lookup_tb(s);
b7bcbe95 2835 break;
40f137e1 2836 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2837 if (IS_USER(s))
2838 return 1;
71b3c3de
JR
2839 /* TODO: VFP subarchitecture support.
2840 * For now, keep the EN bit only */
2841 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 2842 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2843 gen_lookup_tb(s);
2844 break;
2845 case ARM_VFP_FPINST:
2846 case ARM_VFP_FPINST2:
4373f3ce 2847 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2848 break;
b7bcbe95
FB
2849 default:
2850 return 1;
2851 }
2852 } else {
4373f3ce 2853 gen_vfp_msr(tmp);
b7bcbe95
FB
2854 gen_mov_vreg_F0(0, rn);
2855 }
2856 }
2857 }
2858 } else {
2859 /* data processing */
2860 /* The opcode is in bits 23, 21, 20 and 6. */
2861 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2862 if (dp) {
2863 if (op == 15) {
2864 /* rn is opcode */
2865 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2866 } else {
2867 /* rn is register number */
9ee6e8bb 2868 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2869 }
2870
2871 if (op == 15 && (rn == 15 || rn > 17)) {
2872 /* Integer or single precision destination. */
9ee6e8bb 2873 rd = VFP_SREG_D(insn);
b7bcbe95 2874 } else {
9ee6e8bb 2875 VFP_DREG_D(rd, insn);
b7bcbe95
FB
2876 }
2877
2878 if (op == 15 && (rn == 16 || rn == 17)) {
2879 /* Integer source. */
2880 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
2881 } else {
9ee6e8bb 2882 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2883 }
2884 } else {
9ee6e8bb 2885 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2886 if (op == 15 && rn == 15) {
2887 /* Double precision destination. */
9ee6e8bb
PB
2888 VFP_DREG_D(rd, insn);
2889 } else {
2890 rd = VFP_SREG_D(insn);
2891 }
2892 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2893 }
2894
2895 veclen = env->vfp.vec_len;
2896 if (op == 15 && rn > 3)
2897 veclen = 0;
2898
2899 /* Shut up compiler warnings. */
2900 delta_m = 0;
2901 delta_d = 0;
2902 bank_mask = 0;
3b46e624 2903
b7bcbe95
FB
2904 if (veclen > 0) {
2905 if (dp)
2906 bank_mask = 0xc;
2907 else
2908 bank_mask = 0x18;
2909
2910 /* Figure out what type of vector operation this is. */
2911 if ((rd & bank_mask) == 0) {
2912 /* scalar */
2913 veclen = 0;
2914 } else {
2915 if (dp)
2916 delta_d = (env->vfp.vec_stride >> 1) + 1;
2917 else
2918 delta_d = env->vfp.vec_stride + 1;
2919
2920 if ((rm & bank_mask) == 0) {
2921 /* mixed scalar/vector */
2922 delta_m = 0;
2923 } else {
2924 /* vector */
2925 delta_m = delta_d;
2926 }
2927 }
2928 }
2929
2930 /* Load the initial operands. */
2931 if (op == 15) {
2932 switch (rn) {
2933 case 16:
2934 case 17:
2935 /* Integer source */
2936 gen_mov_F0_vreg(0, rm);
2937 break;
2938 case 8:
2939 case 9:
2940 /* Compare */
2941 gen_mov_F0_vreg(dp, rd);
2942 gen_mov_F1_vreg(dp, rm);
2943 break;
2944 case 10:
2945 case 11:
2946 /* Compare with zero */
2947 gen_mov_F0_vreg(dp, rd);
2948 gen_vfp_F1_ld0(dp);
2949 break;
9ee6e8bb
PB
2950 case 20:
2951 case 21:
2952 case 22:
2953 case 23:
644ad806
PB
2954 case 28:
2955 case 29:
2956 case 30:
2957 case 31:
9ee6e8bb
PB
2958 /* Source and destination the same. */
2959 gen_mov_F0_vreg(dp, rd);
2960 break;
b7bcbe95
FB
2961 default:
2962 /* One source operand. */
2963 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 2964 break;
b7bcbe95
FB
2965 }
2966 } else {
2967 /* Two source operands. */
2968 gen_mov_F0_vreg(dp, rn);
2969 gen_mov_F1_vreg(dp, rm);
2970 }
2971
2972 for (;;) {
2973 /* Perform the calculation. */
2974 switch (op) {
2975 case 0: /* mac: fd + (fn * fm) */
2976 gen_vfp_mul(dp);
2977 gen_mov_F1_vreg(dp, rd);
2978 gen_vfp_add(dp);
2979 break;
2980 case 1: /* nmac: fd - (fn * fm) */
2981 gen_vfp_mul(dp);
2982 gen_vfp_neg(dp);
2983 gen_mov_F1_vreg(dp, rd);
2984 gen_vfp_add(dp);
2985 break;
2986 case 2: /* msc: -fd + (fn * fm) */
2987 gen_vfp_mul(dp);
2988 gen_mov_F1_vreg(dp, rd);
2989 gen_vfp_sub(dp);
2990 break;
2991 case 3: /* nmsc: -fd - (fn * fm) */
2992 gen_vfp_mul(dp);
b7bcbe95 2993 gen_vfp_neg(dp);
c9fb531a
PB
2994 gen_mov_F1_vreg(dp, rd);
2995 gen_vfp_sub(dp);
b7bcbe95
FB
2996 break;
2997 case 4: /* mul: fn * fm */
2998 gen_vfp_mul(dp);
2999 break;
3000 case 5: /* nmul: -(fn * fm) */
3001 gen_vfp_mul(dp);
3002 gen_vfp_neg(dp);
3003 break;
3004 case 6: /* add: fn + fm */
3005 gen_vfp_add(dp);
3006 break;
3007 case 7: /* sub: fn - fm */
3008 gen_vfp_sub(dp);
3009 break;
3010 case 8: /* div: fn / fm */
3011 gen_vfp_div(dp);
3012 break;
9ee6e8bb
PB
3013 case 14: /* fconst */
3014 if (!arm_feature(env, ARM_FEATURE_VFP3))
3015 return 1;
3016
3017 n = (insn << 12) & 0x80000000;
3018 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3019 if (dp) {
3020 if (i & 0x40)
3021 i |= 0x3f80;
3022 else
3023 i |= 0x4000;
3024 n |= i << 16;
4373f3ce 3025 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3026 } else {
3027 if (i & 0x40)
3028 i |= 0x780;
3029 else
3030 i |= 0x800;
3031 n |= i << 19;
5b340b51 3032 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3033 }
9ee6e8bb 3034 break;
b7bcbe95
FB
3035 case 15: /* extension space */
3036 switch (rn) {
3037 case 0: /* cpy */
3038 /* no-op */
3039 break;
3040 case 1: /* abs */
3041 gen_vfp_abs(dp);
3042 break;
3043 case 2: /* neg */
3044 gen_vfp_neg(dp);
3045 break;
3046 case 3: /* sqrt */
3047 gen_vfp_sqrt(dp);
3048 break;
60011498
PB
3049 case 4: /* vcvtb.f32.f16 */
3050 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3051 return 1;
3052 tmp = gen_vfp_mrs();
3053 tcg_gen_ext16u_i32(tmp, tmp);
3054 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3055 dead_tmp(tmp);
3056 break;
3057 case 5: /* vcvtt.f32.f16 */
3058 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3059 return 1;
3060 tmp = gen_vfp_mrs();
3061 tcg_gen_shri_i32(tmp, tmp, 16);
3062 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3063 dead_tmp(tmp);
3064 break;
3065 case 6: /* vcvtb.f16.f32 */
3066 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3067 return 1;
3068 tmp = new_tmp();
3069 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3070 gen_mov_F0_vreg(0, rd);
3071 tmp2 = gen_vfp_mrs();
3072 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3073 tcg_gen_or_i32(tmp, tmp, tmp2);
3074 dead_tmp(tmp2);
3075 gen_vfp_msr(tmp);
3076 break;
3077 case 7: /* vcvtt.f16.f32 */
3078 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3079 return 1;
3080 tmp = new_tmp();
3081 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3082 tcg_gen_shli_i32(tmp, tmp, 16);
3083 gen_mov_F0_vreg(0, rd);
3084 tmp2 = gen_vfp_mrs();
3085 tcg_gen_ext16u_i32(tmp2, tmp2);
3086 tcg_gen_or_i32(tmp, tmp, tmp2);
3087 dead_tmp(tmp2);
3088 gen_vfp_msr(tmp);
3089 break;
b7bcbe95
FB
3090 case 8: /* cmp */
3091 gen_vfp_cmp(dp);
3092 break;
3093 case 9: /* cmpe */
3094 gen_vfp_cmpe(dp);
3095 break;
3096 case 10: /* cmpz */
3097 gen_vfp_cmp(dp);
3098 break;
3099 case 11: /* cmpez */
3100 gen_vfp_F1_ld0(dp);
3101 gen_vfp_cmpe(dp);
3102 break;
3103 case 15: /* single<->double conversion */
3104 if (dp)
4373f3ce 3105 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3106 else
4373f3ce 3107 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3108 break;
3109 case 16: /* fuito */
3110 gen_vfp_uito(dp);
3111 break;
3112 case 17: /* fsito */
3113 gen_vfp_sito(dp);
3114 break;
9ee6e8bb
PB
3115 case 20: /* fshto */
3116 if (!arm_feature(env, ARM_FEATURE_VFP3))
3117 return 1;
644ad806 3118 gen_vfp_shto(dp, 16 - rm);
9ee6e8bb
PB
3119 break;
3120 case 21: /* fslto */
3121 if (!arm_feature(env, ARM_FEATURE_VFP3))
3122 return 1;
644ad806 3123 gen_vfp_slto(dp, 32 - rm);
9ee6e8bb
PB
3124 break;
3125 case 22: /* fuhto */
3126 if (!arm_feature(env, ARM_FEATURE_VFP3))
3127 return 1;
644ad806 3128 gen_vfp_uhto(dp, 16 - rm);
9ee6e8bb
PB
3129 break;
3130 case 23: /* fulto */
3131 if (!arm_feature(env, ARM_FEATURE_VFP3))
3132 return 1;
644ad806 3133 gen_vfp_ulto(dp, 32 - rm);
9ee6e8bb 3134 break;
b7bcbe95
FB
3135 case 24: /* ftoui */
3136 gen_vfp_toui(dp);
3137 break;
3138 case 25: /* ftouiz */
3139 gen_vfp_touiz(dp);
3140 break;
3141 case 26: /* ftosi */
3142 gen_vfp_tosi(dp);
3143 break;
3144 case 27: /* ftosiz */
3145 gen_vfp_tosiz(dp);
3146 break;
9ee6e8bb
PB
3147 case 28: /* ftosh */
3148 if (!arm_feature(env, ARM_FEATURE_VFP3))
3149 return 1;
644ad806 3150 gen_vfp_tosh(dp, 16 - rm);
9ee6e8bb
PB
3151 break;
3152 case 29: /* ftosl */
3153 if (!arm_feature(env, ARM_FEATURE_VFP3))
3154 return 1;
644ad806 3155 gen_vfp_tosl(dp, 32 - rm);
9ee6e8bb
PB
3156 break;
3157 case 30: /* ftouh */
3158 if (!arm_feature(env, ARM_FEATURE_VFP3))
3159 return 1;
644ad806 3160 gen_vfp_touh(dp, 16 - rm);
9ee6e8bb
PB
3161 break;
3162 case 31: /* ftoul */
3163 if (!arm_feature(env, ARM_FEATURE_VFP3))
3164 return 1;
644ad806 3165 gen_vfp_toul(dp, 32 - rm);
9ee6e8bb 3166 break;
b7bcbe95
FB
3167 default: /* undefined */
3168 printf ("rn:%d\n", rn);
3169 return 1;
3170 }
3171 break;
3172 default: /* undefined */
3173 printf ("op:%d\n", op);
3174 return 1;
3175 }
3176
3177 /* Write back the result. */
3178 if (op == 15 && (rn >= 8 && rn <= 11))
3179 ; /* Comparison, do nothing. */
3180 else if (op == 15 && rn > 17)
3181 /* Integer result. */
3182 gen_mov_vreg_F0(0, rd);
3183 else if (op == 15 && rn == 15)
3184 /* conversion */
3185 gen_mov_vreg_F0(!dp, rd);
3186 else
3187 gen_mov_vreg_F0(dp, rd);
3188
3189 /* break out of the loop if we have finished */
3190 if (veclen == 0)
3191 break;
3192
3193 if (op == 15 && delta_m == 0) {
3194 /* single source one-many */
3195 while (veclen--) {
3196 rd = ((rd + delta_d) & (bank_mask - 1))
3197 | (rd & bank_mask);
3198 gen_mov_vreg_F0(dp, rd);
3199 }
3200 break;
3201 }
3202 /* Setup the next operands. */
3203 veclen--;
3204 rd = ((rd + delta_d) & (bank_mask - 1))
3205 | (rd & bank_mask);
3206
3207 if (op == 15) {
3208 /* One source operand. */
3209 rm = ((rm + delta_m) & (bank_mask - 1))
3210 | (rm & bank_mask);
3211 gen_mov_F0_vreg(dp, rm);
3212 } else {
3213 /* Two source operands. */
3214 rn = ((rn + delta_d) & (bank_mask - 1))
3215 | (rn & bank_mask);
3216 gen_mov_F0_vreg(dp, rn);
3217 if (delta_m) {
3218 rm = ((rm + delta_m) & (bank_mask - 1))
3219 | (rm & bank_mask);
3220 gen_mov_F1_vreg(dp, rm);
3221 }
3222 }
3223 }
3224 }
3225 break;
3226 case 0xc:
3227 case 0xd:
9ee6e8bb 3228 if (dp && (insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3229 /* two-register transfer */
3230 rn = (insn >> 16) & 0xf;
3231 rd = (insn >> 12) & 0xf;
3232 if (dp) {
9ee6e8bb
PB
3233 VFP_DREG_M(rm, insn);
3234 } else {
3235 rm = VFP_SREG_M(insn);
3236 }
b7bcbe95 3237
18c9b560 3238 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3239 /* vfp->arm */
3240 if (dp) {
4373f3ce
PB
3241 gen_mov_F0_vreg(0, rm * 2);
3242 tmp = gen_vfp_mrs();
3243 store_reg(s, rd, tmp);
3244 gen_mov_F0_vreg(0, rm * 2 + 1);
3245 tmp = gen_vfp_mrs();
3246 store_reg(s, rn, tmp);
b7bcbe95
FB
3247 } else {
3248 gen_mov_F0_vreg(0, rm);
4373f3ce
PB
3249 tmp = gen_vfp_mrs();
3250 store_reg(s, rn, tmp);
b7bcbe95 3251 gen_mov_F0_vreg(0, rm + 1);
4373f3ce
PB
3252 tmp = gen_vfp_mrs();
3253 store_reg(s, rd, tmp);
b7bcbe95
FB
3254 }
3255 } else {
3256 /* arm->vfp */
3257 if (dp) {
4373f3ce
PB
3258 tmp = load_reg(s, rd);
3259 gen_vfp_msr(tmp);
3260 gen_mov_vreg_F0(0, rm * 2);
3261 tmp = load_reg(s, rn);
3262 gen_vfp_msr(tmp);
3263 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3264 } else {
4373f3ce
PB
3265 tmp = load_reg(s, rn);
3266 gen_vfp_msr(tmp);
b7bcbe95 3267 gen_mov_vreg_F0(0, rm);
4373f3ce
PB
3268 tmp = load_reg(s, rd);
3269 gen_vfp_msr(tmp);
b7bcbe95
FB
3270 gen_mov_vreg_F0(0, rm + 1);
3271 }
3272 }
3273 } else {
3274 /* Load/store */
3275 rn = (insn >> 16) & 0xf;
3276 if (dp)
9ee6e8bb 3277 VFP_DREG_D(rd, insn);
b7bcbe95 3278 else
9ee6e8bb
PB
3279 rd = VFP_SREG_D(insn);
3280 if (s->thumb && rn == 15) {
312eea9f
FN
3281 addr = new_tmp();
3282 tcg_gen_movi_i32(addr, s->pc & ~2);
9ee6e8bb 3283 } else {
312eea9f 3284 addr = load_reg(s, rn);
9ee6e8bb 3285 }
b7bcbe95
FB
3286 if ((insn & 0x01200000) == 0x01000000) {
3287 /* Single load/store */
3288 offset = (insn & 0xff) << 2;
3289 if ((insn & (1 << 23)) == 0)
3290 offset = -offset;
312eea9f 3291 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3292 if (insn & (1 << 20)) {
312eea9f 3293 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3294 gen_mov_vreg_F0(dp, rd);
3295 } else {
3296 gen_mov_F0_vreg(dp, rd);
312eea9f 3297 gen_vfp_st(s, dp, addr);
b7bcbe95 3298 }
312eea9f 3299 dead_tmp(addr);
b7bcbe95
FB
3300 } else {
3301 /* load/store multiple */
3302 if (dp)
3303 n = (insn >> 1) & 0x7f;
3304 else
3305 n = insn & 0xff;
3306
3307 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3308 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3309
3310 if (dp)
3311 offset = 8;
3312 else
3313 offset = 4;
3314 for (i = 0; i < n; i++) {
18c9b560 3315 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3316 /* load */
312eea9f 3317 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3318 gen_mov_vreg_F0(dp, rd + i);
3319 } else {
3320 /* store */
3321 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3322 gen_vfp_st(s, dp, addr);
b7bcbe95 3323 }
312eea9f 3324 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95
FB
3325 }
3326 if (insn & (1 << 21)) {
3327 /* writeback */
3328 if (insn & (1 << 24))
3329 offset = -offset * n;
3330 else if (dp && (insn & 1))
3331 offset = 4;
3332 else
3333 offset = 0;
3334
3335 if (offset != 0)
312eea9f
FN
3336 tcg_gen_addi_i32(addr, addr, offset);
3337 store_reg(s, rn, addr);
3338 } else {
3339 dead_tmp(addr);
b7bcbe95
FB
3340 }
3341 }
3342 }
3343 break;
3344 default:
3345 /* Should never happen. */
3346 return 1;
3347 }
3348 return 0;
3349}
3350
6e256c93 3351static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3352{
6e256c93
FB
3353 TranslationBlock *tb;
3354
3355 tb = s->tb;
3356 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3357 tcg_gen_goto_tb(n);
8984bd2e 3358 gen_set_pc_im(dest);
57fec1fe 3359 tcg_gen_exit_tb((long)tb + n);
6e256c93 3360 } else {
8984bd2e 3361 gen_set_pc_im(dest);
57fec1fe 3362 tcg_gen_exit_tb(0);
6e256c93 3363 }
c53be334
FB
3364}
3365
8aaca4c0
FB
3366static inline void gen_jmp (DisasContext *s, uint32_t dest)
3367{
551bd27f 3368 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3369 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3370 if (s->thumb)
d9ba4830
PB
3371 dest |= 1;
3372 gen_bx_im(s, dest);
8aaca4c0 3373 } else {
6e256c93 3374 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3375 s->is_jmp = DISAS_TB_JUMP;
3376 }
3377}
3378
d9ba4830 3379static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3380{
ee097184 3381 if (x)
d9ba4830 3382 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3383 else
d9ba4830 3384 gen_sxth(t0);
ee097184 3385 if (y)
d9ba4830 3386 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3387 else
d9ba4830
PB
3388 gen_sxth(t1);
3389 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3390}
3391
3392/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 3393static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3394 uint32_t mask;
3395
3396 mask = 0;
3397 if (flags & (1 << 0))
3398 mask |= 0xff;
3399 if (flags & (1 << 1))
3400 mask |= 0xff00;
3401 if (flags & (1 << 2))
3402 mask |= 0xff0000;
3403 if (flags & (1 << 3))
3404 mask |= 0xff000000;
9ee6e8bb 3405
2ae23e75 3406 /* Mask out undefined bits. */
9ee6e8bb
PB
3407 mask &= ~CPSR_RESERVED;
3408 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3409 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3410 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3411 mask &= ~CPSR_IT;
9ee6e8bb 3412 /* Mask out execution state bits. */
2ae23e75 3413 if (!spsr)
e160c51c 3414 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3415 /* Mask out privileged bits. */
3416 if (IS_USER(s))
9ee6e8bb 3417 mask &= CPSR_USER;
b5ff1b31
FB
3418 return mask;
3419}
3420
2fbac54b
FN
3421/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3422static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
b5ff1b31 3423{
d9ba4830 3424 TCGv tmp;
b5ff1b31
FB
3425 if (spsr) {
3426 /* ??? This is also undefined in system mode. */
3427 if (IS_USER(s))
3428 return 1;
d9ba4830
PB
3429
3430 tmp = load_cpu_field(spsr);
3431 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3432 tcg_gen_andi_i32(t0, t0, mask);
3433 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3434 store_cpu_field(tmp, spsr);
b5ff1b31 3435 } else {
2fbac54b 3436 gen_set_cpsr(t0, mask);
b5ff1b31 3437 }
2fbac54b 3438 dead_tmp(t0);
b5ff1b31
FB
3439 gen_lookup_tb(s);
3440 return 0;
3441}
3442
2fbac54b
FN
3443/* Returns nonzero if access to the PSR is not permitted. */
3444static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3445{
3446 TCGv tmp;
3447 tmp = new_tmp();
3448 tcg_gen_movi_i32(tmp, val);
3449 return gen_set_psr(s, mask, spsr, tmp);
3450}
3451
e9bb4aa9
JR
3452/* Generate an old-style exception return. Marks pc as dead. */
3453static void gen_exception_return(DisasContext *s, TCGv pc)
b5ff1b31 3454{
d9ba4830 3455 TCGv tmp;
e9bb4aa9 3456 store_reg(s, 15, pc);
d9ba4830
PB
3457 tmp = load_cpu_field(spsr);
3458 gen_set_cpsr(tmp, 0xffffffff);
3459 dead_tmp(tmp);
b5ff1b31
FB
3460 s->is_jmp = DISAS_UPDATE;
3461}
3462
b0109805
PB
3463/* Generate a v6 exception return. Marks both values as dead. */
3464static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3465{
b0109805
PB
3466 gen_set_cpsr(cpsr, 0xffffffff);
3467 dead_tmp(cpsr);
3468 store_reg(s, 15, pc);
9ee6e8bb
PB
3469 s->is_jmp = DISAS_UPDATE;
3470}
3b46e624 3471
9ee6e8bb
PB
3472static inline void
3473gen_set_condexec (DisasContext *s)
3474{
3475 if (s->condexec_mask) {
8f01245e
PB
3476 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3477 TCGv tmp = new_tmp();
3478 tcg_gen_movi_i32(tmp, val);
d9ba4830 3479 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3480 }
3481}
3b46e624 3482
9ee6e8bb
PB
3483static void gen_nop_hint(DisasContext *s, int val)
3484{
3485 switch (val) {
3486 case 3: /* wfi */
8984bd2e 3487 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3488 s->is_jmp = DISAS_WFI;
3489 break;
3490 case 2: /* wfe */
3491 case 4: /* sev */
3492 /* TODO: Implement SEV and WFE. May help SMP performance. */
3493 default: /* nop */
3494 break;
3495 }
3496}
99c475ab 3497
ad69471c 3498#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3499
dd8fbd78 3500static inline int gen_neon_add(int size, TCGv t0, TCGv t1)
9ee6e8bb
PB
3501{
3502 switch (size) {
dd8fbd78
FN
3503 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3504 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3505 case 2: tcg_gen_add_i32(t0, t0, t1); break;
9ee6e8bb
PB
3506 default: return 1;
3507 }
3508 return 0;
3509}
3510
dd8fbd78 3511static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
ad69471c
PB
3512{
3513 switch (size) {
dd8fbd78
FN
3514 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3515 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3516 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3517 default: return;
3518 }
3519}
3520
3521/* 32-bit pairwise ops end up the same as the elementwise versions. */
3522#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3523#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3524#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3525#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3526
3527/* FIXME: This is wrong. They set the wrong overflow bit. */
3528#define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3529#define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3530#define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3531#define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3532
3533#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3534 switch ((size << 1) | u) { \
3535 case 0: \
dd8fbd78 3536 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3537 break; \
3538 case 1: \
dd8fbd78 3539 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3540 break; \
3541 case 2: \
dd8fbd78 3542 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3543 break; \
3544 case 3: \
dd8fbd78 3545 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3546 break; \
3547 case 4: \
dd8fbd78 3548 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3549 break; \
3550 case 5: \
dd8fbd78 3551 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3552 break; \
3553 default: return 1; \
3554 }} while (0)
9ee6e8bb
PB
3555
3556#define GEN_NEON_INTEGER_OP(name) do { \
3557 switch ((size << 1) | u) { \
ad69471c 3558 case 0: \
dd8fbd78 3559 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3560 break; \
3561 case 1: \
dd8fbd78 3562 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3563 break; \
3564 case 2: \
dd8fbd78 3565 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3566 break; \
3567 case 3: \
dd8fbd78 3568 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3569 break; \
3570 case 4: \
dd8fbd78 3571 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3572 break; \
3573 case 5: \
dd8fbd78 3574 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3575 break; \
9ee6e8bb
PB
3576 default: return 1; \
3577 }} while (0)
3578
dd8fbd78 3579static TCGv neon_load_scratch(int scratch)
9ee6e8bb 3580{
dd8fbd78
FN
3581 TCGv tmp = new_tmp();
3582 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3583 return tmp;
9ee6e8bb
PB
3584}
3585
dd8fbd78 3586static void neon_store_scratch(int scratch, TCGv var)
9ee6e8bb 3587{
dd8fbd78
FN
3588 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3589 dead_tmp(var);
9ee6e8bb
PB
3590}
3591
dd8fbd78 3592static inline TCGv neon_get_scalar(int size, int reg)
9ee6e8bb 3593{
dd8fbd78 3594 TCGv tmp;
9ee6e8bb 3595 if (size == 1) {
dd8fbd78 3596 tmp = neon_load_reg(reg >> 1, reg & 1);
9ee6e8bb 3597 } else {
dd8fbd78
FN
3598 tmp = neon_load_reg(reg >> 2, (reg >> 1) & 1);
3599 if (reg & 1) {
3600 gen_neon_dup_low16(tmp);
3601 } else {
3602 gen_neon_dup_high16(tmp);
3603 }
9ee6e8bb 3604 }
dd8fbd78 3605 return tmp;
9ee6e8bb
PB
3606}
3607
19457615
FN
3608static void gen_neon_unzip_u8(TCGv t0, TCGv t1)
3609{
3610 TCGv rd, rm, tmp;
3611
3612 rd = new_tmp();
3613 rm = new_tmp();
3614 tmp = new_tmp();
3615
3616 tcg_gen_andi_i32(rd, t0, 0xff);
3617 tcg_gen_shri_i32(tmp, t0, 8);
3618 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3619 tcg_gen_or_i32(rd, rd, tmp);
3620 tcg_gen_shli_i32(tmp, t1, 16);
3621 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3622 tcg_gen_or_i32(rd, rd, tmp);
3623 tcg_gen_shli_i32(tmp, t1, 8);
3624 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3625 tcg_gen_or_i32(rd, rd, tmp);
3626
3627 tcg_gen_shri_i32(rm, t0, 8);
3628 tcg_gen_andi_i32(rm, rm, 0xff);
3629 tcg_gen_shri_i32(tmp, t0, 16);
3630 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3631 tcg_gen_or_i32(rm, rm, tmp);
3632 tcg_gen_shli_i32(tmp, t1, 8);
3633 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3634 tcg_gen_or_i32(rm, rm, tmp);
3635 tcg_gen_andi_i32(tmp, t1, 0xff000000);
3636 tcg_gen_or_i32(t1, rm, tmp);
3637 tcg_gen_mov_i32(t0, rd);
3638
3639 dead_tmp(tmp);
3640 dead_tmp(rm);
3641 dead_tmp(rd);
3642}
3643
3644static void gen_neon_zip_u8(TCGv t0, TCGv t1)
3645{
3646 TCGv rd, rm, tmp;
3647
3648 rd = new_tmp();
3649 rm = new_tmp();
3650 tmp = new_tmp();
3651
3652 tcg_gen_andi_i32(rd, t0, 0xff);
3653 tcg_gen_shli_i32(tmp, t1, 8);
3654 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3655 tcg_gen_or_i32(rd, rd, tmp);
3656 tcg_gen_shli_i32(tmp, t0, 16);
3657 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3658 tcg_gen_or_i32(rd, rd, tmp);
3659 tcg_gen_shli_i32(tmp, t1, 24);
3660 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3661 tcg_gen_or_i32(rd, rd, tmp);
3662
3663 tcg_gen_andi_i32(rm, t1, 0xff000000);
3664 tcg_gen_shri_i32(tmp, t0, 8);
3665 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3666 tcg_gen_or_i32(rm, rm, tmp);
3667 tcg_gen_shri_i32(tmp, t1, 8);
3668 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3669 tcg_gen_or_i32(rm, rm, tmp);
3670 tcg_gen_shri_i32(tmp, t0, 16);
3671 tcg_gen_andi_i32(tmp, tmp, 0xff);
3672 tcg_gen_or_i32(t1, rm, tmp);
3673 tcg_gen_mov_i32(t0, rd);
3674
3675 dead_tmp(tmp);
3676 dead_tmp(rm);
3677 dead_tmp(rd);
3678}
3679
3680static void gen_neon_zip_u16(TCGv t0, TCGv t1)
3681{
3682 TCGv tmp, tmp2;
3683
3684 tmp = new_tmp();
3685 tmp2 = new_tmp();
3686
3687 tcg_gen_andi_i32(tmp, t0, 0xffff);
3688 tcg_gen_shli_i32(tmp2, t1, 16);
3689 tcg_gen_or_i32(tmp, tmp, tmp2);
3690 tcg_gen_andi_i32(t1, t1, 0xffff0000);
3691 tcg_gen_shri_i32(tmp2, t0, 16);
3692 tcg_gen_or_i32(t1, t1, tmp2);
3693 tcg_gen_mov_i32(t0, tmp);
3694
3695 dead_tmp(tmp2);
3696 dead_tmp(tmp);
3697}
3698
9ee6e8bb
PB
3699static void gen_neon_unzip(int reg, int q, int tmp, int size)
3700{
3701 int n;
dd8fbd78 3702 TCGv t0, t1;
9ee6e8bb
PB
3703
3704 for (n = 0; n < q + 1; n += 2) {
dd8fbd78
FN
3705 t0 = neon_load_reg(reg, n);
3706 t1 = neon_load_reg(reg, n + 1);
9ee6e8bb 3707 switch (size) {
dd8fbd78
FN
3708 case 0: gen_neon_unzip_u8(t0, t1); break;
3709 case 1: gen_neon_zip_u16(t0, t1); break; /* zip and unzip are the same. */
9ee6e8bb
PB
3710 case 2: /* no-op */; break;
3711 default: abort();
3712 }
dd8fbd78
FN
3713 neon_store_scratch(tmp + n, t0);
3714 neon_store_scratch(tmp + n + 1, t1);
9ee6e8bb
PB
3715 }
3716}
3717
19457615
FN
3718static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3719{
3720 TCGv rd, tmp;
3721
3722 rd = new_tmp();
3723 tmp = new_tmp();
3724
3725 tcg_gen_shli_i32(rd, t0, 8);
3726 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3727 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3728 tcg_gen_or_i32(rd, rd, tmp);
3729
3730 tcg_gen_shri_i32(t1, t1, 8);
3731 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3732 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3733 tcg_gen_or_i32(t1, t1, tmp);
3734 tcg_gen_mov_i32(t0, rd);
3735
3736 dead_tmp(tmp);
3737 dead_tmp(rd);
3738}
3739
3740static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3741{
3742 TCGv rd, tmp;
3743
3744 rd = new_tmp();
3745 tmp = new_tmp();
3746
3747 tcg_gen_shli_i32(rd, t0, 16);
3748 tcg_gen_andi_i32(tmp, t1, 0xffff);
3749 tcg_gen_or_i32(rd, rd, tmp);
3750 tcg_gen_shri_i32(t1, t1, 16);
3751 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3752 tcg_gen_or_i32(t1, t1, tmp);
3753 tcg_gen_mov_i32(t0, rd);
3754
3755 dead_tmp(tmp);
3756 dead_tmp(rd);
3757}
3758
3759
9ee6e8bb
PB
3760static struct {
3761 int nregs;
3762 int interleave;
3763 int spacing;
3764} neon_ls_element_type[11] = {
3765 {4, 4, 1},
3766 {4, 4, 2},
3767 {4, 1, 1},
3768 {4, 2, 1},
3769 {3, 3, 1},
3770 {3, 3, 2},
3771 {3, 1, 1},
3772 {1, 1, 1},
3773 {2, 2, 1},
3774 {2, 2, 2},
3775 {2, 1, 1}
3776};
3777
3778/* Translate a NEON load/store element instruction. Return nonzero if the
3779 instruction is invalid. */
3780static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3781{
3782 int rd, rn, rm;
3783 int op;
3784 int nregs;
3785 int interleave;
84496233 3786 int spacing;
9ee6e8bb
PB
3787 int stride;
3788 int size;
3789 int reg;
3790 int pass;
3791 int load;
3792 int shift;
9ee6e8bb 3793 int n;
1b2b1e54 3794 TCGv addr;
b0109805 3795 TCGv tmp;
8f8e3aa4 3796 TCGv tmp2;
84496233 3797 TCGv_i64 tmp64;
9ee6e8bb
PB
3798
3799 if (!vfp_enabled(env))
3800 return 1;
3801 VFP_DREG_D(rd, insn);
3802 rn = (insn >> 16) & 0xf;
3803 rm = insn & 0xf;
3804 load = (insn & (1 << 21)) != 0;
1b2b1e54 3805 addr = new_tmp();
9ee6e8bb
PB
3806 if ((insn & (1 << 23)) == 0) {
3807 /* Load store all elements. */
3808 op = (insn >> 8) & 0xf;
3809 size = (insn >> 6) & 3;
84496233 3810 if (op > 10)
9ee6e8bb
PB
3811 return 1;
3812 nregs = neon_ls_element_type[op].nregs;
3813 interleave = neon_ls_element_type[op].interleave;
84496233
JR
3814 spacing = neon_ls_element_type[op].spacing;
3815 if (size == 3 && (interleave | spacing) != 1)
3816 return 1;
dcc65026 3817 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3818 stride = (1 << size) * interleave;
3819 for (reg = 0; reg < nregs; reg++) {
3820 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
3821 load_reg_var(s, addr, rn);
3822 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 3823 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
3824 load_reg_var(s, addr, rn);
3825 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 3826 }
84496233
JR
3827 if (size == 3) {
3828 if (load) {
3829 tmp64 = gen_ld64(addr, IS_USER(s));
3830 neon_store_reg64(tmp64, rd);
3831 tcg_temp_free_i64(tmp64);
3832 } else {
3833 tmp64 = tcg_temp_new_i64();
3834 neon_load_reg64(tmp64, rd);
3835 gen_st64(tmp64, addr, IS_USER(s));
3836 }
3837 tcg_gen_addi_i32(addr, addr, stride);
3838 } else {
3839 for (pass = 0; pass < 2; pass++) {
3840 if (size == 2) {
3841 if (load) {
3842 tmp = gen_ld32(addr, IS_USER(s));
3843 neon_store_reg(rd, pass, tmp);
3844 } else {
3845 tmp = neon_load_reg(rd, pass);
3846 gen_st32(tmp, addr, IS_USER(s));
3847 }
1b2b1e54 3848 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
3849 } else if (size == 1) {
3850 if (load) {
3851 tmp = gen_ld16u(addr, IS_USER(s));
3852 tcg_gen_addi_i32(addr, addr, stride);
3853 tmp2 = gen_ld16u(addr, IS_USER(s));
3854 tcg_gen_addi_i32(addr, addr, stride);
3855 gen_bfi(tmp, tmp, tmp2, 16, 0xffff);
3856 dead_tmp(tmp2);
3857 neon_store_reg(rd, pass, tmp);
3858 } else {
3859 tmp = neon_load_reg(rd, pass);
3860 tmp2 = new_tmp();
3861 tcg_gen_shri_i32(tmp2, tmp, 16);
3862 gen_st16(tmp, addr, IS_USER(s));
3863 tcg_gen_addi_i32(addr, addr, stride);
3864 gen_st16(tmp2, addr, IS_USER(s));
1b2b1e54 3865 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3866 }
84496233
JR
3867 } else /* size == 0 */ {
3868 if (load) {
3869 TCGV_UNUSED(tmp2);
3870 for (n = 0; n < 4; n++) {
3871 tmp = gen_ld8u(addr, IS_USER(s));
3872 tcg_gen_addi_i32(addr, addr, stride);
3873 if (n == 0) {
3874 tmp2 = tmp;
3875 } else {
3876 gen_bfi(tmp2, tmp2, tmp, n * 8, 0xff);
3877 dead_tmp(tmp);
3878 }
9ee6e8bb 3879 }
84496233
JR
3880 neon_store_reg(rd, pass, tmp2);
3881 } else {
3882 tmp2 = neon_load_reg(rd, pass);
3883 for (n = 0; n < 4; n++) {
3884 tmp = new_tmp();
3885 if (n == 0) {
3886 tcg_gen_mov_i32(tmp, tmp2);
3887 } else {
3888 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3889 }
3890 gen_st8(tmp, addr, IS_USER(s));
3891 tcg_gen_addi_i32(addr, addr, stride);
3892 }
3893 dead_tmp(tmp2);
9ee6e8bb
PB
3894 }
3895 }
3896 }
3897 }
84496233 3898 rd += spacing;
9ee6e8bb
PB
3899 }
3900 stride = nregs * 8;
3901 } else {
3902 size = (insn >> 10) & 3;
3903 if (size == 3) {
3904 /* Load single element to all lanes. */
3905 if (!load)
3906 return 1;
3907 size = (insn >> 6) & 3;
3908 nregs = ((insn >> 8) & 3) + 1;
3909 stride = (insn & (1 << 5)) ? 2 : 1;
dcc65026 3910 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3911 for (reg = 0; reg < nregs; reg++) {
3912 switch (size) {
3913 case 0:
1b2b1e54 3914 tmp = gen_ld8u(addr, IS_USER(s));
ad69471c 3915 gen_neon_dup_u8(tmp, 0);
9ee6e8bb
PB
3916 break;
3917 case 1:
1b2b1e54 3918 tmp = gen_ld16u(addr, IS_USER(s));
ad69471c 3919 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
3920 break;
3921 case 2:
1b2b1e54 3922 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
3923 break;
3924 case 3:
3925 return 1;
a50f5b91
PB
3926 default: /* Avoid compiler warnings. */
3927 abort();
99c475ab 3928 }
1b2b1e54 3929 tcg_gen_addi_i32(addr, addr, 1 << size);
ad69471c
PB
3930 tmp2 = new_tmp();
3931 tcg_gen_mov_i32(tmp2, tmp);
3932 neon_store_reg(rd, 0, tmp2);
3018f259 3933 neon_store_reg(rd, 1, tmp);
9ee6e8bb
PB
3934 rd += stride;
3935 }
3936 stride = (1 << size) * nregs;
3937 } else {
3938 /* Single element. */
3939 pass = (insn >> 7) & 1;
3940 switch (size) {
3941 case 0:
3942 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
3943 stride = 1;
3944 break;
3945 case 1:
3946 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
3947 stride = (insn & (1 << 5)) ? 2 : 1;
3948 break;
3949 case 2:
3950 shift = 0;
9ee6e8bb
PB
3951 stride = (insn & (1 << 6)) ? 2 : 1;
3952 break;
3953 default:
3954 abort();
3955 }
3956 nregs = ((insn >> 8) & 3) + 1;
dcc65026 3957 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3958 for (reg = 0; reg < nregs; reg++) {
3959 if (load) {
9ee6e8bb
PB
3960 switch (size) {
3961 case 0:
1b2b1e54 3962 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb
PB
3963 break;
3964 case 1:
1b2b1e54 3965 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
3966 break;
3967 case 2:
1b2b1e54 3968 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 3969 break;
a50f5b91
PB
3970 default: /* Avoid compiler warnings. */
3971 abort();
9ee6e8bb
PB
3972 }
3973 if (size != 2) {
8f8e3aa4
PB
3974 tmp2 = neon_load_reg(rd, pass);
3975 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3976 dead_tmp(tmp2);
9ee6e8bb 3977 }
8f8e3aa4 3978 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3979 } else { /* Store */
8f8e3aa4
PB
3980 tmp = neon_load_reg(rd, pass);
3981 if (shift)
3982 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
3983 switch (size) {
3984 case 0:
1b2b1e54 3985 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
3986 break;
3987 case 1:
1b2b1e54 3988 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
3989 break;
3990 case 2:
1b2b1e54 3991 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 3992 break;
99c475ab 3993 }
99c475ab 3994 }
9ee6e8bb 3995 rd += stride;
1b2b1e54 3996 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 3997 }
9ee6e8bb 3998 stride = nregs * (1 << size);
99c475ab 3999 }
9ee6e8bb 4000 }
1b2b1e54 4001 dead_tmp(addr);
9ee6e8bb 4002 if (rm != 15) {
b26eefb6
PB
4003 TCGv base;
4004
4005 base = load_reg(s, rn);
9ee6e8bb 4006 if (rm == 13) {
b26eefb6 4007 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4008 } else {
b26eefb6
PB
4009 TCGv index;
4010 index = load_reg(s, rm);
4011 tcg_gen_add_i32(base, base, index);
4012 dead_tmp(index);
9ee6e8bb 4013 }
b26eefb6 4014 store_reg(s, rn, base);
9ee6e8bb
PB
4015 }
4016 return 0;
4017}
3b46e624 4018
8f8e3aa4
PB
4019/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4020static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4021{
4022 tcg_gen_and_i32(t, t, c);
f669df27 4023 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4024 tcg_gen_or_i32(dest, t, f);
4025}
4026
a7812ae4 4027static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4028{
4029 switch (size) {
4030 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4031 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4032 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4033 default: abort();
4034 }
4035}
4036
a7812ae4 4037static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4038{
4039 switch (size) {
4040 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4041 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4042 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
4043 default: abort();
4044 }
4045}
4046
a7812ae4 4047static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4048{
4049 switch (size) {
4050 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4051 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4052 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4053 default: abort();
4054 }
4055}
4056
4057static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4058 int q, int u)
4059{
4060 if (q) {
4061 if (u) {
4062 switch (size) {
4063 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4064 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4065 default: abort();
4066 }
4067 } else {
4068 switch (size) {
4069 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4070 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4071 default: abort();
4072 }
4073 }
4074 } else {
4075 if (u) {
4076 switch (size) {
4077 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4078 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4079 default: abort();
4080 }
4081 } else {
4082 switch (size) {
4083 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4084 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4085 default: abort();
4086 }
4087 }
4088 }
4089}
4090
a7812ae4 4091static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
4092{
4093 if (u) {
4094 switch (size) {
4095 case 0: gen_helper_neon_widen_u8(dest, src); break;
4096 case 1: gen_helper_neon_widen_u16(dest, src); break;
4097 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4098 default: abort();
4099 }
4100 } else {
4101 switch (size) {
4102 case 0: gen_helper_neon_widen_s8(dest, src); break;
4103 case 1: gen_helper_neon_widen_s16(dest, src); break;
4104 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4105 default: abort();
4106 }
4107 }
4108 dead_tmp(src);
4109}
4110
4111static inline void gen_neon_addl(int size)
4112{
4113 switch (size) {
4114 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4115 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4116 case 2: tcg_gen_add_i64(CPU_V001); break;
4117 default: abort();
4118 }
4119}
4120
4121static inline void gen_neon_subl(int size)
4122{
4123 switch (size) {
4124 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4125 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4126 case 2: tcg_gen_sub_i64(CPU_V001); break;
4127 default: abort();
4128 }
4129}
4130
a7812ae4 4131static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4132{
4133 switch (size) {
4134 case 0: gen_helper_neon_negl_u16(var, var); break;
4135 case 1: gen_helper_neon_negl_u32(var, var); break;
4136 case 2: gen_helper_neon_negl_u64(var, var); break;
4137 default: abort();
4138 }
4139}
4140
a7812ae4 4141static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4142{
4143 switch (size) {
4144 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4145 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4146 default: abort();
4147 }
4148}
4149
a7812ae4 4150static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4151{
a7812ae4 4152 TCGv_i64 tmp;
ad69471c
PB
4153
4154 switch ((size << 1) | u) {
4155 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4156 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4157 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4158 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4159 case 4:
4160 tmp = gen_muls_i64_i32(a, b);
4161 tcg_gen_mov_i64(dest, tmp);
4162 break;
4163 case 5:
4164 tmp = gen_mulu_i64_i32(a, b);
4165 tcg_gen_mov_i64(dest, tmp);
4166 break;
4167 default: abort();
4168 }
ad69471c
PB
4169}
4170
9ee6e8bb
PB
4171/* Translate a NEON data processing instruction. Return nonzero if the
4172 instruction is invalid.
ad69471c
PB
4173 We process data in a mixture of 32-bit and 64-bit chunks.
4174 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4175
9ee6e8bb
PB
4176static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4177{
4178 int op;
4179 int q;
4180 int rd, rn, rm;
4181 int size;
4182 int shift;
4183 int pass;
4184 int count;
4185 int pairwise;
4186 int u;
4187 int n;
ca9a32e4 4188 uint32_t imm, mask;
b75263d6 4189 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4190 TCGv_i64 tmp64;
9ee6e8bb
PB
4191
4192 if (!vfp_enabled(env))
4193 return 1;
4194 q = (insn & (1 << 6)) != 0;
4195 u = (insn >> 24) & 1;
4196 VFP_DREG_D(rd, insn);
4197 VFP_DREG_N(rn, insn);
4198 VFP_DREG_M(rm, insn);
4199 size = (insn >> 20) & 3;
4200 if ((insn & (1 << 23)) == 0) {
4201 /* Three register same length. */
4202 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
ad69471c
PB
4203 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4204 || op == 10 || op == 11 || op == 16)) {
4205 /* 64-bit element instructions. */
9ee6e8bb 4206 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4207 neon_load_reg64(cpu_V0, rn + pass);
4208 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb
PB
4209 switch (op) {
4210 case 1: /* VQADD */
4211 if (u) {
ad69471c 4212 gen_helper_neon_add_saturate_u64(CPU_V001);
2c0262af 4213 } else {
ad69471c 4214 gen_helper_neon_add_saturate_s64(CPU_V001);
2c0262af 4215 }
9ee6e8bb
PB
4216 break;
4217 case 5: /* VQSUB */
4218 if (u) {
ad69471c
PB
4219 gen_helper_neon_sub_saturate_u64(CPU_V001);
4220 } else {
4221 gen_helper_neon_sub_saturate_s64(CPU_V001);
4222 }
4223 break;
4224 case 8: /* VSHL */
4225 if (u) {
4226 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4227 } else {
4228 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4229 }
4230 break;
4231 case 9: /* VQSHL */
4232 if (u) {
4233 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4234 cpu_V0, cpu_V0);
4235 } else {
4236 gen_helper_neon_qshl_s64(cpu_V1, cpu_env,
4237 cpu_V1, cpu_V0);
4238 }
4239 break;
4240 case 10: /* VRSHL */
4241 if (u) {
4242 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4243 } else {
ad69471c
PB
4244 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4245 }
4246 break;
4247 case 11: /* VQRSHL */
4248 if (u) {
4249 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4250 cpu_V1, cpu_V0);
4251 } else {
4252 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4253 cpu_V1, cpu_V0);
1e8d4eec 4254 }
9ee6e8bb
PB
4255 break;
4256 case 16:
4257 if (u) {
ad69471c 4258 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4259 } else {
ad69471c 4260 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4261 }
4262 break;
4263 default:
4264 abort();
2c0262af 4265 }
ad69471c 4266 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4267 }
9ee6e8bb 4268 return 0;
2c0262af 4269 }
9ee6e8bb
PB
4270 switch (op) {
4271 case 8: /* VSHL */
4272 case 9: /* VQSHL */
4273 case 10: /* VRSHL */
ad69471c 4274 case 11: /* VQRSHL */
9ee6e8bb 4275 {
ad69471c
PB
4276 int rtmp;
4277 /* Shift instruction operands are reversed. */
4278 rtmp = rn;
9ee6e8bb 4279 rn = rm;
ad69471c 4280 rm = rtmp;
9ee6e8bb
PB
4281 pairwise = 0;
4282 }
2c0262af 4283 break;
9ee6e8bb
PB
4284 case 20: /* VPMAX */
4285 case 21: /* VPMIN */
4286 case 23: /* VPADD */
4287 pairwise = 1;
2c0262af 4288 break;
9ee6e8bb
PB
4289 case 26: /* VPADD (float) */
4290 pairwise = (u && size < 2);
2c0262af 4291 break;
9ee6e8bb
PB
4292 case 30: /* VPMIN/VPMAX (float) */
4293 pairwise = u;
2c0262af 4294 break;
9ee6e8bb
PB
4295 default:
4296 pairwise = 0;
2c0262af 4297 break;
9ee6e8bb 4298 }
dd8fbd78 4299
9ee6e8bb
PB
4300 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4301
4302 if (pairwise) {
4303 /* Pairwise. */
4304 if (q)
4305 n = (pass & 1) * 2;
2c0262af 4306 else
9ee6e8bb
PB
4307 n = 0;
4308 if (pass < q + 1) {
dd8fbd78
FN
4309 tmp = neon_load_reg(rn, n);
4310 tmp2 = neon_load_reg(rn, n + 1);
9ee6e8bb 4311 } else {
dd8fbd78
FN
4312 tmp = neon_load_reg(rm, n);
4313 tmp2 = neon_load_reg(rm, n + 1);
9ee6e8bb
PB
4314 }
4315 } else {
4316 /* Elementwise. */
dd8fbd78
FN
4317 tmp = neon_load_reg(rn, pass);
4318 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4319 }
4320 switch (op) {
4321 case 0: /* VHADD */
4322 GEN_NEON_INTEGER_OP(hadd);
4323 break;
4324 case 1: /* VQADD */
ad69471c 4325 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4326 break;
9ee6e8bb
PB
4327 case 2: /* VRHADD */
4328 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4329 break;
9ee6e8bb
PB
4330 case 3: /* Logic ops. */
4331 switch ((u << 2) | size) {
4332 case 0: /* VAND */
dd8fbd78 4333 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4334 break;
4335 case 1: /* BIC */
f669df27 4336 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4337 break;
4338 case 2: /* VORR */
dd8fbd78 4339 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4340 break;
4341 case 3: /* VORN */
f669df27 4342 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4343 break;
4344 case 4: /* VEOR */
dd8fbd78 4345 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4346 break;
4347 case 5: /* VBSL */
dd8fbd78
FN
4348 tmp3 = neon_load_reg(rd, pass);
4349 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4350 dead_tmp(tmp3);
9ee6e8bb
PB
4351 break;
4352 case 6: /* VBIT */
dd8fbd78
FN
4353 tmp3 = neon_load_reg(rd, pass);
4354 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4355 dead_tmp(tmp3);
9ee6e8bb
PB
4356 break;
4357 case 7: /* VBIF */
dd8fbd78
FN
4358 tmp3 = neon_load_reg(rd, pass);
4359 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4360 dead_tmp(tmp3);
9ee6e8bb 4361 break;
2c0262af
FB
4362 }
4363 break;
9ee6e8bb
PB
4364 case 4: /* VHSUB */
4365 GEN_NEON_INTEGER_OP(hsub);
4366 break;
4367 case 5: /* VQSUB */
ad69471c 4368 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4369 break;
9ee6e8bb
PB
4370 case 6: /* VCGT */
4371 GEN_NEON_INTEGER_OP(cgt);
4372 break;
4373 case 7: /* VCGE */
4374 GEN_NEON_INTEGER_OP(cge);
4375 break;
4376 case 8: /* VSHL */
ad69471c 4377 GEN_NEON_INTEGER_OP(shl);
2c0262af 4378 break;
9ee6e8bb 4379 case 9: /* VQSHL */
ad69471c 4380 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4381 break;
9ee6e8bb 4382 case 10: /* VRSHL */
ad69471c 4383 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4384 break;
9ee6e8bb 4385 case 11: /* VQRSHL */
ad69471c 4386 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb
PB
4387 break;
4388 case 12: /* VMAX */
4389 GEN_NEON_INTEGER_OP(max);
4390 break;
4391 case 13: /* VMIN */
4392 GEN_NEON_INTEGER_OP(min);
4393 break;
4394 case 14: /* VABD */
4395 GEN_NEON_INTEGER_OP(abd);
4396 break;
4397 case 15: /* VABA */
4398 GEN_NEON_INTEGER_OP(abd);
dd8fbd78
FN
4399 dead_tmp(tmp2);
4400 tmp2 = neon_load_reg(rd, pass);
4401 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4402 break;
4403 case 16:
4404 if (!u) { /* VADD */
dd8fbd78 4405 if (gen_neon_add(size, tmp, tmp2))
9ee6e8bb
PB
4406 return 1;
4407 } else { /* VSUB */
4408 switch (size) {
dd8fbd78
FN
4409 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4410 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4411 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4412 default: return 1;
4413 }
4414 }
4415 break;
4416 case 17:
4417 if (!u) { /* VTST */
4418 switch (size) {
dd8fbd78
FN
4419 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4420 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4421 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4422 default: return 1;
4423 }
4424 } else { /* VCEQ */
4425 switch (size) {
dd8fbd78
FN
4426 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4427 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4428 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4429 default: return 1;
4430 }
4431 }
4432 break;
4433 case 18: /* Multiply. */
4434 switch (size) {
dd8fbd78
FN
4435 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4436 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4437 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4438 default: return 1;
4439 }
dd8fbd78
FN
4440 dead_tmp(tmp2);
4441 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4442 if (u) { /* VMLS */
dd8fbd78 4443 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4444 } else { /* VMLA */
dd8fbd78 4445 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4446 }
4447 break;
4448 case 19: /* VMUL */
4449 if (u) { /* polynomial */
dd8fbd78 4450 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4451 } else { /* Integer */
4452 switch (size) {
dd8fbd78
FN
4453 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4454 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4455 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4456 default: return 1;
4457 }
4458 }
4459 break;
4460 case 20: /* VPMAX */
4461 GEN_NEON_INTEGER_OP(pmax);
4462 break;
4463 case 21: /* VPMIN */
4464 GEN_NEON_INTEGER_OP(pmin);
4465 break;
4466 case 22: /* Hultiply high. */
4467 if (!u) { /* VQDMULH */
4468 switch (size) {
dd8fbd78
FN
4469 case 1: gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4470 case 2: gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
9ee6e8bb
PB
4471 default: return 1;
4472 }
4473 } else { /* VQRDHMUL */
4474 switch (size) {
dd8fbd78
FN
4475 case 1: gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4476 case 2: gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
9ee6e8bb
PB
4477 default: return 1;
4478 }
4479 }
4480 break;
4481 case 23: /* VPADD */
4482 if (u)
4483 return 1;
4484 switch (size) {
dd8fbd78
FN
4485 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4486 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4487 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4488 default: return 1;
4489 }
4490 break;
4491 case 26: /* Floating point arithnetic. */
4492 switch ((u << 2) | size) {
4493 case 0: /* VADD */
dd8fbd78 4494 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4495 break;
4496 case 2: /* VSUB */
dd8fbd78 4497 gen_helper_neon_sub_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4498 break;
4499 case 4: /* VPADD */
dd8fbd78 4500 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4501 break;
4502 case 6: /* VABD */
dd8fbd78 4503 gen_helper_neon_abd_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4504 break;
4505 default:
4506 return 1;
4507 }
4508 break;
4509 case 27: /* Float multiply. */
dd8fbd78 4510 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb 4511 if (!u) {
dd8fbd78
FN
4512 dead_tmp(tmp2);
4513 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4514 if (size == 0) {
dd8fbd78 4515 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb 4516 } else {
dd8fbd78 4517 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
4518 }
4519 }
4520 break;
4521 case 28: /* Float compare. */
4522 if (!u) {
dd8fbd78 4523 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
b5ff1b31 4524 } else {
9ee6e8bb 4525 if (size == 0)
dd8fbd78 4526 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
9ee6e8bb 4527 else
dd8fbd78 4528 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
b5ff1b31 4529 }
2c0262af 4530 break;
9ee6e8bb
PB
4531 case 29: /* Float compare absolute. */
4532 if (!u)
4533 return 1;
4534 if (size == 0)
dd8fbd78 4535 gen_helper_neon_acge_f32(tmp, tmp, tmp2);
9ee6e8bb 4536 else
dd8fbd78 4537 gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
2c0262af 4538 break;
9ee6e8bb
PB
4539 case 30: /* Float min/max. */
4540 if (size == 0)
dd8fbd78 4541 gen_helper_neon_max_f32(tmp, tmp, tmp2);
9ee6e8bb 4542 else
dd8fbd78 4543 gen_helper_neon_min_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4544 break;
4545 case 31:
4546 if (size == 0)
dd8fbd78 4547 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 4548 else
dd8fbd78 4549 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 4550 break;
9ee6e8bb
PB
4551 default:
4552 abort();
2c0262af 4553 }
dd8fbd78
FN
4554 dead_tmp(tmp2);
4555
9ee6e8bb
PB
4556 /* Save the result. For elementwise operations we can put it
4557 straight into the destination register. For pairwise operations
4558 we have to be careful to avoid clobbering the source operands. */
4559 if (pairwise && rd == rm) {
dd8fbd78 4560 neon_store_scratch(pass, tmp);
9ee6e8bb 4561 } else {
dd8fbd78 4562 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4563 }
4564
4565 } /* for pass */
4566 if (pairwise && rd == rm) {
4567 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
4568 tmp = neon_load_scratch(pass);
4569 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4570 }
4571 }
ad69471c 4572 /* End of 3 register same size operations. */
9ee6e8bb
PB
4573 } else if (insn & (1 << 4)) {
4574 if ((insn & 0x00380080) != 0) {
4575 /* Two registers and shift. */
4576 op = (insn >> 8) & 0xf;
4577 if (insn & (1 << 7)) {
4578 /* 64-bit shift. */
4579 size = 3;
4580 } else {
4581 size = 2;
4582 while ((insn & (1 << (size + 19))) == 0)
4583 size--;
4584 }
4585 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4586 /* To avoid excessive dumplication of ops we implement shift
4587 by immediate using the variable shift operations. */
4588 if (op < 8) {
4589 /* Shift by immediate:
4590 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4591 /* Right shifts are encoded as N - shift, where N is the
4592 element size in bits. */
4593 if (op <= 4)
4594 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4595 if (size == 3) {
4596 count = q + 1;
4597 } else {
4598 count = q ? 4: 2;
4599 }
4600 switch (size) {
4601 case 0:
4602 imm = (uint8_t) shift;
4603 imm |= imm << 8;
4604 imm |= imm << 16;
4605 break;
4606 case 1:
4607 imm = (uint16_t) shift;
4608 imm |= imm << 16;
4609 break;
4610 case 2:
4611 case 3:
4612 imm = shift;
4613 break;
4614 default:
4615 abort();
4616 }
4617
4618 for (pass = 0; pass < count; pass++) {
ad69471c
PB
4619 if (size == 3) {
4620 neon_load_reg64(cpu_V0, rm + pass);
4621 tcg_gen_movi_i64(cpu_V1, imm);
4622 switch (op) {
4623 case 0: /* VSHR */
4624 case 1: /* VSRA */
4625 if (u)
4626 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4627 else
ad69471c 4628 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4629 break;
ad69471c
PB
4630 case 2: /* VRSHR */
4631 case 3: /* VRSRA */
4632 if (u)
4633 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4634 else
ad69471c 4635 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4636 break;
ad69471c
PB
4637 case 4: /* VSRI */
4638 if (!u)
4639 return 1;
4640 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4641 break;
4642 case 5: /* VSHL, VSLI */
4643 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4644 break;
4645 case 6: /* VQSHL */
4646 if (u)
4647 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4648 else
ad69471c
PB
4649 gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4650 break;
4651 case 7: /* VQSHLU */
4652 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4653 break;
9ee6e8bb 4654 }
ad69471c
PB
4655 if (op == 1 || op == 3) {
4656 /* Accumulate. */
4657 neon_load_reg64(cpu_V0, rd + pass);
4658 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4659 } else if (op == 4 || (op == 5 && u)) {
4660 /* Insert */
4661 cpu_abort(env, "VS[LR]I.64 not implemented");
4662 }
4663 neon_store_reg64(cpu_V0, rd + pass);
4664 } else { /* size < 3 */
4665 /* Operands in T0 and T1. */
dd8fbd78
FN
4666 tmp = neon_load_reg(rm, pass);
4667 tmp2 = new_tmp();
4668 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
4669 switch (op) {
4670 case 0: /* VSHR */
4671 case 1: /* VSRA */
4672 GEN_NEON_INTEGER_OP(shl);
4673 break;
4674 case 2: /* VRSHR */
4675 case 3: /* VRSRA */
4676 GEN_NEON_INTEGER_OP(rshl);
4677 break;
4678 case 4: /* VSRI */
4679 if (!u)
4680 return 1;
4681 GEN_NEON_INTEGER_OP(shl);
4682 break;
4683 case 5: /* VSHL, VSLI */
4684 switch (size) {
dd8fbd78
FN
4685 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
4686 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
4687 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
ad69471c
PB
4688 default: return 1;
4689 }
4690 break;
4691 case 6: /* VQSHL */
4692 GEN_NEON_INTEGER_OP_ENV(qshl);
4693 break;
4694 case 7: /* VQSHLU */
4695 switch (size) {
dd8fbd78
FN
4696 case 0: gen_helper_neon_qshl_u8(tmp, cpu_env, tmp, tmp2); break;
4697 case 1: gen_helper_neon_qshl_u16(tmp, cpu_env, tmp, tmp2); break;
4698 case 2: gen_helper_neon_qshl_u32(tmp, cpu_env, tmp, tmp2); break;
ad69471c
PB
4699 default: return 1;
4700 }
4701 break;
4702 }
dd8fbd78 4703 dead_tmp(tmp2);
ad69471c
PB
4704
4705 if (op == 1 || op == 3) {
4706 /* Accumulate. */
dd8fbd78
FN
4707 tmp2 = neon_load_reg(rd, pass);
4708 gen_neon_add(size, tmp2, tmp);
4709 dead_tmp(tmp2);
ad69471c
PB
4710 } else if (op == 4 || (op == 5 && u)) {
4711 /* Insert */
4712 switch (size) {
4713 case 0:
4714 if (op == 4)
ca9a32e4 4715 mask = 0xff >> -shift;
ad69471c 4716 else
ca9a32e4
JR
4717 mask = (uint8_t)(0xff << shift);
4718 mask |= mask << 8;
4719 mask |= mask << 16;
ad69471c
PB
4720 break;
4721 case 1:
4722 if (op == 4)
ca9a32e4 4723 mask = 0xffff >> -shift;
ad69471c 4724 else
ca9a32e4
JR
4725 mask = (uint16_t)(0xffff << shift);
4726 mask |= mask << 16;
ad69471c
PB
4727 break;
4728 case 2:
ca9a32e4
JR
4729 if (shift < -31 || shift > 31) {
4730 mask = 0;
4731 } else {
4732 if (op == 4)
4733 mask = 0xffffffffu >> -shift;
4734 else
4735 mask = 0xffffffffu << shift;
4736 }
ad69471c
PB
4737 break;
4738 default:
4739 abort();
4740 }
dd8fbd78 4741 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
4742 tcg_gen_andi_i32(tmp, tmp, mask);
4743 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78
FN
4744 tcg_gen_or_i32(tmp, tmp, tmp2);
4745 dead_tmp(tmp2);
ad69471c 4746 }
dd8fbd78 4747 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4748 }
4749 } /* for pass */
4750 } else if (op < 10) {
ad69471c 4751 /* Shift by immediate and narrow:
9ee6e8bb
PB
4752 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4753 shift = shift - (1 << (size + 3));
4754 size++;
9ee6e8bb
PB
4755 switch (size) {
4756 case 1:
ad69471c 4757 imm = (uint16_t)shift;
9ee6e8bb 4758 imm |= imm << 16;
ad69471c 4759 tmp2 = tcg_const_i32(imm);
a7812ae4 4760 TCGV_UNUSED_I64(tmp64);
9ee6e8bb
PB
4761 break;
4762 case 2:
ad69471c
PB
4763 imm = (uint32_t)shift;
4764 tmp2 = tcg_const_i32(imm);
a7812ae4 4765 TCGV_UNUSED_I64(tmp64);
4cc633c3 4766 break;
9ee6e8bb 4767 case 3:
a7812ae4
PB
4768 tmp64 = tcg_const_i64(shift);
4769 TCGV_UNUSED(tmp2);
9ee6e8bb
PB
4770 break;
4771 default:
4772 abort();
4773 }
4774
ad69471c
PB
4775 for (pass = 0; pass < 2; pass++) {
4776 if (size == 3) {
4777 neon_load_reg64(cpu_V0, rm + pass);
4778 if (q) {
4779 if (u)
a7812ae4 4780 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4781 else
a7812ae4 4782 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c
PB
4783 } else {
4784 if (u)
a7812ae4 4785 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4786 else
a7812ae4 4787 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c 4788 }
2c0262af 4789 } else {
ad69471c
PB
4790 tmp = neon_load_reg(rm + pass, 0);
4791 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
36aa55dc
PB
4792 tmp3 = neon_load_reg(rm + pass, 1);
4793 gen_neon_shift_narrow(size, tmp3, tmp2, q, u);
4794 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
ad69471c 4795 dead_tmp(tmp);
36aa55dc 4796 dead_tmp(tmp3);
9ee6e8bb 4797 }
ad69471c
PB
4798 tmp = new_tmp();
4799 if (op == 8 && !u) {
4800 gen_neon_narrow(size - 1, tmp, cpu_V0);
9ee6e8bb 4801 } else {
ad69471c
PB
4802 if (op == 8)
4803 gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
9ee6e8bb 4804 else
ad69471c
PB
4805 gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
4806 }
2301db49 4807 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4808 } /* for pass */
b75263d6
JR
4809 if (size == 3) {
4810 tcg_temp_free_i64(tmp64);
2301db49
JR
4811 } else {
4812 dead_tmp(tmp2);
b75263d6 4813 }
9ee6e8bb
PB
4814 } else if (op == 10) {
4815 /* VSHLL */
ad69471c 4816 if (q || size == 3)
9ee6e8bb 4817 return 1;
ad69471c
PB
4818 tmp = neon_load_reg(rm, 0);
4819 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 4820 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4821 if (pass == 1)
4822 tmp = tmp2;
4823
4824 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 4825
9ee6e8bb
PB
4826 if (shift != 0) {
4827 /* The shift is less than the width of the source
ad69471c
PB
4828 type, so we can just shift the whole register. */
4829 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4830 if (size < 2 || !u) {
4831 uint64_t imm64;
4832 if (size == 0) {
4833 imm = (0xffu >> (8 - shift));
4834 imm |= imm << 16;
4835 } else {
4836 imm = 0xffff >> (16 - shift);
9ee6e8bb 4837 }
ad69471c
PB
4838 imm64 = imm | (((uint64_t)imm) << 32);
4839 tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
9ee6e8bb
PB
4840 }
4841 }
ad69471c 4842 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4843 }
4844 } else if (op == 15 || op == 16) {
4845 /* VCVT fixed-point. */
4846 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 4847 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
9ee6e8bb
PB
4848 if (op & 1) {
4849 if (u)
4373f3ce 4850 gen_vfp_ulto(0, shift);
9ee6e8bb 4851 else
4373f3ce 4852 gen_vfp_slto(0, shift);
9ee6e8bb
PB
4853 } else {
4854 if (u)
4373f3ce 4855 gen_vfp_toul(0, shift);
9ee6e8bb 4856 else
4373f3ce 4857 gen_vfp_tosl(0, shift);
2c0262af 4858 }
4373f3ce 4859 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
4860 }
4861 } else {
9ee6e8bb
PB
4862 return 1;
4863 }
4864 } else { /* (insn & 0x00380080) == 0 */
4865 int invert;
4866
4867 op = (insn >> 8) & 0xf;
4868 /* One register and immediate. */
4869 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4870 invert = (insn & (1 << 5)) != 0;
4871 switch (op) {
4872 case 0: case 1:
4873 /* no-op */
4874 break;
4875 case 2: case 3:
4876 imm <<= 8;
4877 break;
4878 case 4: case 5:
4879 imm <<= 16;
4880 break;
4881 case 6: case 7:
4882 imm <<= 24;
4883 break;
4884 case 8: case 9:
4885 imm |= imm << 16;
4886 break;
4887 case 10: case 11:
4888 imm = (imm << 8) | (imm << 24);
4889 break;
4890 case 12:
4891 imm = (imm < 8) | 0xff;
4892 break;
4893 case 13:
4894 imm = (imm << 16) | 0xffff;
4895 break;
4896 case 14:
4897 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4898 if (invert)
4899 imm = ~imm;
4900 break;
4901 case 15:
4902 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4903 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4904 break;
4905 }
4906 if (invert)
4907 imm = ~imm;
4908
9ee6e8bb
PB
4909 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4910 if (op & 1 && op < 12) {
ad69471c 4911 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
4912 if (invert) {
4913 /* The immediate value has already been inverted, so
4914 BIC becomes AND. */
ad69471c 4915 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 4916 } else {
ad69471c 4917 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 4918 }
9ee6e8bb 4919 } else {
ad69471c
PB
4920 /* VMOV, VMVN. */
4921 tmp = new_tmp();
9ee6e8bb 4922 if (op == 14 && invert) {
ad69471c
PB
4923 uint32_t val;
4924 val = 0;
9ee6e8bb
PB
4925 for (n = 0; n < 4; n++) {
4926 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 4927 val |= 0xff << (n * 8);
9ee6e8bb 4928 }
ad69471c
PB
4929 tcg_gen_movi_i32(tmp, val);
4930 } else {
4931 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 4932 }
9ee6e8bb 4933 }
ad69471c 4934 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4935 }
4936 }
e4b3861d 4937 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
4938 if (size != 3) {
4939 op = (insn >> 8) & 0xf;
4940 if ((insn & (1 << 6)) == 0) {
4941 /* Three registers of different lengths. */
4942 int src1_wide;
4943 int src2_wide;
4944 int prewiden;
4945 /* prewiden, src1_wide, src2_wide */
4946 static const int neon_3reg_wide[16][3] = {
4947 {1, 0, 0}, /* VADDL */
4948 {1, 1, 0}, /* VADDW */
4949 {1, 0, 0}, /* VSUBL */
4950 {1, 1, 0}, /* VSUBW */
4951 {0, 1, 1}, /* VADDHN */
4952 {0, 0, 0}, /* VABAL */
4953 {0, 1, 1}, /* VSUBHN */
4954 {0, 0, 0}, /* VABDL */
4955 {0, 0, 0}, /* VMLAL */
4956 {0, 0, 0}, /* VQDMLAL */
4957 {0, 0, 0}, /* VMLSL */
4958 {0, 0, 0}, /* VQDMLSL */
4959 {0, 0, 0}, /* Integer VMULL */
4960 {0, 0, 0}, /* VQDMULL */
4961 {0, 0, 0} /* Polynomial VMULL */
4962 };
4963
4964 prewiden = neon_3reg_wide[op][0];
4965 src1_wide = neon_3reg_wide[op][1];
4966 src2_wide = neon_3reg_wide[op][2];
4967
ad69471c
PB
4968 if (size == 0 && (op == 9 || op == 11 || op == 13))
4969 return 1;
4970
9ee6e8bb
PB
4971 /* Avoid overlapping operands. Wide source operands are
4972 always aligned so will never overlap with wide
4973 destinations in problematic ways. */
8f8e3aa4 4974 if (rd == rm && !src2_wide) {
dd8fbd78
FN
4975 tmp = neon_load_reg(rm, 1);
4976 neon_store_scratch(2, tmp);
8f8e3aa4 4977 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
4978 tmp = neon_load_reg(rn, 1);
4979 neon_store_scratch(2, tmp);
9ee6e8bb 4980 }
a50f5b91 4981 TCGV_UNUSED(tmp3);
9ee6e8bb 4982 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4983 if (src1_wide) {
4984 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 4985 TCGV_UNUSED(tmp);
9ee6e8bb 4986 } else {
ad69471c 4987 if (pass == 1 && rd == rn) {
dd8fbd78 4988 tmp = neon_load_scratch(2);
9ee6e8bb 4989 } else {
ad69471c
PB
4990 tmp = neon_load_reg(rn, pass);
4991 }
4992 if (prewiden) {
4993 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
4994 }
4995 }
ad69471c
PB
4996 if (src2_wide) {
4997 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 4998 TCGV_UNUSED(tmp2);
9ee6e8bb 4999 } else {
ad69471c 5000 if (pass == 1 && rd == rm) {
dd8fbd78 5001 tmp2 = neon_load_scratch(2);
9ee6e8bb 5002 } else {
ad69471c
PB
5003 tmp2 = neon_load_reg(rm, pass);
5004 }
5005 if (prewiden) {
5006 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5007 }
9ee6e8bb
PB
5008 }
5009 switch (op) {
5010 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5011 gen_neon_addl(size);
9ee6e8bb 5012 break;
79b0e534 5013 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5014 gen_neon_subl(size);
9ee6e8bb
PB
5015 break;
5016 case 5: case 7: /* VABAL, VABDL */
5017 switch ((size << 1) | u) {
ad69471c
PB
5018 case 0:
5019 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5020 break;
5021 case 1:
5022 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5023 break;
5024 case 2:
5025 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5026 break;
5027 case 3:
5028 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5029 break;
5030 case 4:
5031 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5032 break;
5033 case 5:
5034 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5035 break;
9ee6e8bb
PB
5036 default: abort();
5037 }
ad69471c
PB
5038 dead_tmp(tmp2);
5039 dead_tmp(tmp);
9ee6e8bb
PB
5040 break;
5041 case 8: case 9: case 10: case 11: case 12: case 13:
5042 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5043 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
dd8fbd78
FN
5044 dead_tmp(tmp2);
5045 dead_tmp(tmp);
9ee6e8bb
PB
5046 break;
5047 case 14: /* Polynomial VMULL */
5048 cpu_abort(env, "Polynomial VMULL not implemented");
5049
5050 default: /* 15 is RESERVED. */
5051 return 1;
5052 }
5053 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
5054 /* Accumulate. */
5055 if (op == 10 || op == 11) {
ad69471c 5056 gen_neon_negl(cpu_V0, size);
9ee6e8bb
PB
5057 }
5058
9ee6e8bb 5059 if (op != 13) {
ad69471c 5060 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb
PB
5061 }
5062
5063 switch (op) {
5064 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
ad69471c 5065 gen_neon_addl(size);
9ee6e8bb
PB
5066 break;
5067 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c
PB
5068 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5069 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5070 break;
9ee6e8bb
PB
5071 /* Fall through. */
5072 case 13: /* VQDMULL */
ad69471c 5073 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5074 break;
5075 default:
5076 abort();
5077 }
ad69471c 5078 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5079 } else if (op == 4 || op == 6) {
5080 /* Narrowing operation. */
ad69471c 5081 tmp = new_tmp();
79b0e534 5082 if (!u) {
9ee6e8bb 5083 switch (size) {
ad69471c
PB
5084 case 0:
5085 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5086 break;
5087 case 1:
5088 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5089 break;
5090 case 2:
5091 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5092 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5093 break;
9ee6e8bb
PB
5094 default: abort();
5095 }
5096 } else {
5097 switch (size) {
ad69471c
PB
5098 case 0:
5099 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5100 break;
5101 case 1:
5102 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5103 break;
5104 case 2:
5105 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5106 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5107 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5108 break;
9ee6e8bb
PB
5109 default: abort();
5110 }
5111 }
ad69471c
PB
5112 if (pass == 0) {
5113 tmp3 = tmp;
5114 } else {
5115 neon_store_reg(rd, 0, tmp3);
5116 neon_store_reg(rd, 1, tmp);
5117 }
9ee6e8bb
PB
5118 } else {
5119 /* Write back the result. */
ad69471c 5120 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5121 }
5122 }
5123 } else {
5124 /* Two registers and a scalar. */
5125 switch (op) {
5126 case 0: /* Integer VMLA scalar */
5127 case 1: /* Float VMLA scalar */
5128 case 4: /* Integer VMLS scalar */
5129 case 5: /* Floating point VMLS scalar */
5130 case 8: /* Integer VMUL scalar */
5131 case 9: /* Floating point VMUL scalar */
5132 case 12: /* VQDMULH scalar */
5133 case 13: /* VQRDMULH scalar */
dd8fbd78
FN
5134 tmp = neon_get_scalar(size, rm);
5135 neon_store_scratch(0, tmp);
9ee6e8bb 5136 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5137 tmp = neon_load_scratch(0);
5138 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5139 if (op == 12) {
5140 if (size == 1) {
dd8fbd78 5141 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5142 } else {
dd8fbd78 5143 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5144 }
5145 } else if (op == 13) {
5146 if (size == 1) {
dd8fbd78 5147 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5148 } else {
dd8fbd78 5149 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5150 }
5151 } else if (op & 1) {
dd8fbd78 5152 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5153 } else {
5154 switch (size) {
dd8fbd78
FN
5155 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5156 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5157 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5158 default: return 1;
5159 }
5160 }
dd8fbd78 5161 dead_tmp(tmp2);
9ee6e8bb
PB
5162 if (op < 8) {
5163 /* Accumulate. */
dd8fbd78 5164 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5165 switch (op) {
5166 case 0:
dd8fbd78 5167 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5168 break;
5169 case 1:
dd8fbd78 5170 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5171 break;
5172 case 4:
dd8fbd78 5173 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5174 break;
5175 case 5:
dd8fbd78 5176 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
5177 break;
5178 default:
5179 abort();
5180 }
dd8fbd78 5181 dead_tmp(tmp2);
9ee6e8bb 5182 }
dd8fbd78 5183 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5184 }
5185 break;
5186 case 2: /* VMLAL sclar */
5187 case 3: /* VQDMLAL scalar */
5188 case 6: /* VMLSL scalar */
5189 case 7: /* VQDMLSL scalar */
5190 case 10: /* VMULL scalar */
5191 case 11: /* VQDMULL scalar */
ad69471c
PB
5192 if (size == 0 && (op == 3 || op == 7 || op == 11))
5193 return 1;
5194
dd8fbd78
FN
5195 tmp2 = neon_get_scalar(size, rm);
5196 tmp3 = neon_load_reg(rn, 1);
ad69471c 5197
9ee6e8bb 5198 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5199 if (pass == 0) {
5200 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5201 } else {
dd8fbd78 5202 tmp = tmp3;
9ee6e8bb 5203 }
ad69471c 5204 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
dd8fbd78 5205 dead_tmp(tmp);
9ee6e8bb 5206 if (op == 6 || op == 7) {
ad69471c
PB
5207 gen_neon_negl(cpu_V0, size);
5208 }
5209 if (op != 11) {
5210 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5211 }
9ee6e8bb
PB
5212 switch (op) {
5213 case 2: case 6:
ad69471c 5214 gen_neon_addl(size);
9ee6e8bb
PB
5215 break;
5216 case 3: case 7:
ad69471c
PB
5217 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5218 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5219 break;
5220 case 10:
5221 /* no-op */
5222 break;
5223 case 11:
ad69471c 5224 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5225 break;
5226 default:
5227 abort();
5228 }
ad69471c 5229 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5230 }
dd8fbd78
FN
5231
5232 dead_tmp(tmp2);
5233
9ee6e8bb
PB
5234 break;
5235 default: /* 14 and 15 are RESERVED */
5236 return 1;
5237 }
5238 }
5239 } else { /* size == 3 */
5240 if (!u) {
5241 /* Extract. */
9ee6e8bb 5242 imm = (insn >> 8) & 0xf;
ad69471c
PB
5243 count = q + 1;
5244
5245 if (imm > 7 && !q)
5246 return 1;
5247
5248 if (imm == 0) {
5249 neon_load_reg64(cpu_V0, rn);
5250 if (q) {
5251 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5252 }
ad69471c
PB
5253 } else if (imm == 8) {
5254 neon_load_reg64(cpu_V0, rn + 1);
5255 if (q) {
5256 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5257 }
ad69471c 5258 } else if (q) {
a7812ae4 5259 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5260 if (imm < 8) {
5261 neon_load_reg64(cpu_V0, rn);
a7812ae4 5262 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5263 } else {
5264 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5265 neon_load_reg64(tmp64, rm);
ad69471c
PB
5266 }
5267 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5268 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5269 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5270 if (imm < 8) {
5271 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5272 } else {
ad69471c
PB
5273 neon_load_reg64(cpu_V1, rm + 1);
5274 imm -= 8;
9ee6e8bb 5275 }
ad69471c 5276 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5277 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5278 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 5279 tcg_temp_free_i64(tmp64);
ad69471c 5280 } else {
a7812ae4 5281 /* BUGFIX */
ad69471c 5282 neon_load_reg64(cpu_V0, rn);
a7812ae4 5283 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5284 neon_load_reg64(cpu_V1, rm);
a7812ae4 5285 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5286 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5287 }
5288 neon_store_reg64(cpu_V0, rd);
5289 if (q) {
5290 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5291 }
5292 } else if ((insn & (1 << 11)) == 0) {
5293 /* Two register misc. */
5294 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5295 size = (insn >> 18) & 3;
5296 switch (op) {
5297 case 0: /* VREV64 */
5298 if (size == 3)
5299 return 1;
5300 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5301 tmp = neon_load_reg(rm, pass * 2);
5302 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5303 switch (size) {
dd8fbd78
FN
5304 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5305 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5306 case 2: /* no-op */ break;
5307 default: abort();
5308 }
dd8fbd78 5309 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5310 if (size == 2) {
dd8fbd78 5311 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5312 } else {
9ee6e8bb 5313 switch (size) {
dd8fbd78
FN
5314 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5315 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5316 default: abort();
5317 }
dd8fbd78 5318 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5319 }
5320 }
5321 break;
5322 case 4: case 5: /* VPADDL */
5323 case 12: case 13: /* VPADAL */
9ee6e8bb
PB
5324 if (size == 3)
5325 return 1;
ad69471c
PB
5326 for (pass = 0; pass < q + 1; pass++) {
5327 tmp = neon_load_reg(rm, pass * 2);
5328 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5329 tmp = neon_load_reg(rm, pass * 2 + 1);
5330 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5331 switch (size) {
5332 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5333 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5334 case 2: tcg_gen_add_i64(CPU_V001); break;
5335 default: abort();
5336 }
9ee6e8bb
PB
5337 if (op >= 12) {
5338 /* Accumulate. */
ad69471c
PB
5339 neon_load_reg64(cpu_V1, rd + pass);
5340 gen_neon_addl(size);
9ee6e8bb 5341 }
ad69471c 5342 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5343 }
5344 break;
5345 case 33: /* VTRN */
5346 if (size == 2) {
5347 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
5348 tmp = neon_load_reg(rm, n);
5349 tmp2 = neon_load_reg(rd, n + 1);
5350 neon_store_reg(rm, n, tmp2);
5351 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
5352 }
5353 } else {
5354 goto elementwise;
5355 }
5356 break;
5357 case 34: /* VUZP */
5358 /* Reg Before After
5359 Rd A3 A2 A1 A0 B2 B0 A2 A0
5360 Rm B3 B2 B1 B0 B3 B1 A3 A1
5361 */
5362 if (size == 3)
5363 return 1;
5364 gen_neon_unzip(rd, q, 0, size);
5365 gen_neon_unzip(rm, q, 4, size);
5366 if (q) {
5367 static int unzip_order_q[8] =
5368 {0, 2, 4, 6, 1, 3, 5, 7};
5369 for (n = 0; n < 8; n++) {
5370 int reg = (n < 4) ? rd : rm;
dd8fbd78
FN
5371 tmp = neon_load_scratch(unzip_order_q[n]);
5372 neon_store_reg(reg, n % 4, tmp);
9ee6e8bb
PB
5373 }
5374 } else {
5375 static int unzip_order[4] =
5376 {0, 4, 1, 5};
5377 for (n = 0; n < 4; n++) {
5378 int reg = (n < 2) ? rd : rm;
dd8fbd78
FN
5379 tmp = neon_load_scratch(unzip_order[n]);
5380 neon_store_reg(reg, n % 2, tmp);
9ee6e8bb
PB
5381 }
5382 }
5383 break;
5384 case 35: /* VZIP */
5385 /* Reg Before After
5386 Rd A3 A2 A1 A0 B1 A1 B0 A0
5387 Rm B3 B2 B1 B0 B3 A3 B2 A2
5388 */
5389 if (size == 3)
5390 return 1;
5391 count = (q ? 4 : 2);
5392 for (n = 0; n < count; n++) {
dd8fbd78
FN
5393 tmp = neon_load_reg(rd, n);
5394 tmp2 = neon_load_reg(rd, n);
9ee6e8bb 5395 switch (size) {
dd8fbd78
FN
5396 case 0: gen_neon_zip_u8(tmp, tmp2); break;
5397 case 1: gen_neon_zip_u16(tmp, tmp2); break;
9ee6e8bb
PB
5398 case 2: /* no-op */; break;
5399 default: abort();
5400 }
dd8fbd78
FN
5401 neon_store_scratch(n * 2, tmp);
5402 neon_store_scratch(n * 2 + 1, tmp2);
9ee6e8bb
PB
5403 }
5404 for (n = 0; n < count * 2; n++) {
5405 int reg = (n < count) ? rd : rm;
dd8fbd78
FN
5406 tmp = neon_load_scratch(n);
5407 neon_store_reg(reg, n % count, tmp);
9ee6e8bb
PB
5408 }
5409 break;
5410 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
ad69471c
PB
5411 if (size == 3)
5412 return 1;
a50f5b91 5413 TCGV_UNUSED(tmp2);
9ee6e8bb 5414 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5415 neon_load_reg64(cpu_V0, rm + pass);
5416 tmp = new_tmp();
9ee6e8bb 5417 if (op == 36 && q == 0) {
ad69471c 5418 gen_neon_narrow(size, tmp, cpu_V0);
9ee6e8bb 5419 } else if (q) {
ad69471c 5420 gen_neon_narrow_satu(size, tmp, cpu_V0);
9ee6e8bb 5421 } else {
ad69471c
PB
5422 gen_neon_narrow_sats(size, tmp, cpu_V0);
5423 }
5424 if (pass == 0) {
5425 tmp2 = tmp;
5426 } else {
5427 neon_store_reg(rd, 0, tmp2);
5428 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5429 }
9ee6e8bb
PB
5430 }
5431 break;
5432 case 38: /* VSHLL */
ad69471c 5433 if (q || size == 3)
9ee6e8bb 5434 return 1;
ad69471c
PB
5435 tmp = neon_load_reg(rm, 0);
5436 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5437 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5438 if (pass == 1)
5439 tmp = tmp2;
5440 gen_neon_widen(cpu_V0, tmp, size, 1);
5441 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5442 }
5443 break;
60011498
PB
5444 case 44: /* VCVT.F16.F32 */
5445 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5446 return 1;
5447 tmp = new_tmp();
5448 tmp2 = new_tmp();
5449 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
5450 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5451 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
5452 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5453 tcg_gen_shli_i32(tmp2, tmp2, 16);
5454 tcg_gen_or_i32(tmp2, tmp2, tmp);
5455 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
5456 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5457 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5458 neon_store_reg(rd, 0, tmp2);
5459 tmp2 = new_tmp();
5460 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5461 tcg_gen_shli_i32(tmp2, tmp2, 16);
5462 tcg_gen_or_i32(tmp2, tmp2, tmp);
5463 neon_store_reg(rd, 1, tmp2);
5464 dead_tmp(tmp);
5465 break;
5466 case 46: /* VCVT.F32.F16 */
5467 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5468 return 1;
5469 tmp3 = new_tmp();
5470 tmp = neon_load_reg(rm, 0);
5471 tmp2 = neon_load_reg(rm, 1);
5472 tcg_gen_ext16u_i32(tmp3, tmp);
5473 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5474 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5475 tcg_gen_shri_i32(tmp3, tmp, 16);
5476 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5477 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
5478 dead_tmp(tmp);
5479 tcg_gen_ext16u_i32(tmp3, tmp2);
5480 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5481 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5482 tcg_gen_shri_i32(tmp3, tmp2, 16);
5483 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5484 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
5485 dead_tmp(tmp2);
5486 dead_tmp(tmp3);
5487 break;
9ee6e8bb
PB
5488 default:
5489 elementwise:
5490 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5491 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5492 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5493 neon_reg_offset(rm, pass));
dd8fbd78 5494 TCGV_UNUSED(tmp);
9ee6e8bb 5495 } else {
dd8fbd78 5496 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
5497 }
5498 switch (op) {
5499 case 1: /* VREV32 */
5500 switch (size) {
dd8fbd78
FN
5501 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5502 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5503 default: return 1;
5504 }
5505 break;
5506 case 2: /* VREV16 */
5507 if (size != 0)
5508 return 1;
dd8fbd78 5509 gen_rev16(tmp);
9ee6e8bb 5510 break;
9ee6e8bb
PB
5511 case 8: /* CLS */
5512 switch (size) {
dd8fbd78
FN
5513 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5514 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5515 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
9ee6e8bb
PB
5516 default: return 1;
5517 }
5518 break;
5519 case 9: /* CLZ */
5520 switch (size) {
dd8fbd78
FN
5521 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5522 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5523 case 2: gen_helper_clz(tmp, tmp); break;
9ee6e8bb
PB
5524 default: return 1;
5525 }
5526 break;
5527 case 10: /* CNT */
5528 if (size != 0)
5529 return 1;
dd8fbd78 5530 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb
PB
5531 break;
5532 case 11: /* VNOT */
5533 if (size != 0)
5534 return 1;
dd8fbd78 5535 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5536 break;
5537 case 14: /* VQABS */
5538 switch (size) {
dd8fbd78
FN
5539 case 0: gen_helper_neon_qabs_s8(tmp, cpu_env, tmp); break;
5540 case 1: gen_helper_neon_qabs_s16(tmp, cpu_env, tmp); break;
5541 case 2: gen_helper_neon_qabs_s32(tmp, cpu_env, tmp); break;
9ee6e8bb
PB
5542 default: return 1;
5543 }
5544 break;
5545 case 15: /* VQNEG */
5546 switch (size) {
dd8fbd78
FN
5547 case 0: gen_helper_neon_qneg_s8(tmp, cpu_env, tmp); break;
5548 case 1: gen_helper_neon_qneg_s16(tmp, cpu_env, tmp); break;
5549 case 2: gen_helper_neon_qneg_s32(tmp, cpu_env, tmp); break;
9ee6e8bb
PB
5550 default: return 1;
5551 }
5552 break;
5553 case 16: case 19: /* VCGT #0, VCLE #0 */
dd8fbd78 5554 tmp2 = tcg_const_i32(0);
9ee6e8bb 5555 switch(size) {
dd8fbd78
FN
5556 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5557 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5558 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5559 default: return 1;
5560 }
dd8fbd78 5561 tcg_temp_free(tmp2);
9ee6e8bb 5562 if (op == 19)
dd8fbd78 5563 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5564 break;
5565 case 17: case 20: /* VCGE #0, VCLT #0 */
dd8fbd78 5566 tmp2 = tcg_const_i32(0);
9ee6e8bb 5567 switch(size) {
dd8fbd78
FN
5568 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5569 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5570 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5571 default: return 1;
5572 }
dd8fbd78 5573 tcg_temp_free(tmp2);
9ee6e8bb 5574 if (op == 20)
dd8fbd78 5575 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5576 break;
5577 case 18: /* VCEQ #0 */
dd8fbd78 5578 tmp2 = tcg_const_i32(0);
9ee6e8bb 5579 switch(size) {
dd8fbd78
FN
5580 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5581 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5582 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5583 default: return 1;
5584 }
dd8fbd78 5585 tcg_temp_free(tmp2);
9ee6e8bb
PB
5586 break;
5587 case 22: /* VABS */
5588 switch(size) {
dd8fbd78
FN
5589 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
5590 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
5591 case 2: tcg_gen_abs_i32(tmp, tmp); break;
9ee6e8bb
PB
5592 default: return 1;
5593 }
5594 break;
5595 case 23: /* VNEG */
ad69471c
PB
5596 if (size == 3)
5597 return 1;
dd8fbd78
FN
5598 tmp2 = tcg_const_i32(0);
5599 gen_neon_rsb(size, tmp, tmp2);
5600 tcg_temp_free(tmp2);
9ee6e8bb
PB
5601 break;
5602 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
dd8fbd78
FN
5603 tmp2 = tcg_const_i32(0);
5604 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
5605 tcg_temp_free(tmp2);
9ee6e8bb 5606 if (op == 27)
dd8fbd78 5607 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5608 break;
5609 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
dd8fbd78
FN
5610 tmp2 = tcg_const_i32(0);
5611 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
5612 tcg_temp_free(tmp2);
9ee6e8bb 5613 if (op == 28)
dd8fbd78 5614 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5615 break;
5616 case 26: /* Float VCEQ #0 */
dd8fbd78
FN
5617 tmp2 = tcg_const_i32(0);
5618 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
5619 tcg_temp_free(tmp2);
9ee6e8bb
PB
5620 break;
5621 case 30: /* Float VABS */
4373f3ce 5622 gen_vfp_abs(0);
9ee6e8bb
PB
5623 break;
5624 case 31: /* Float VNEG */
4373f3ce 5625 gen_vfp_neg(0);
9ee6e8bb
PB
5626 break;
5627 case 32: /* VSWP */
dd8fbd78
FN
5628 tmp2 = neon_load_reg(rd, pass);
5629 neon_store_reg(rm, pass, tmp2);
9ee6e8bb
PB
5630 break;
5631 case 33: /* VTRN */
dd8fbd78 5632 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5633 switch (size) {
dd8fbd78
FN
5634 case 0: gen_neon_trn_u8(tmp, tmp2); break;
5635 case 1: gen_neon_trn_u16(tmp, tmp2); break;
9ee6e8bb
PB
5636 case 2: abort();
5637 default: return 1;
5638 }
dd8fbd78 5639 neon_store_reg(rm, pass, tmp2);
9ee6e8bb
PB
5640 break;
5641 case 56: /* Integer VRECPE */
dd8fbd78 5642 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb
PB
5643 break;
5644 case 57: /* Integer VRSQRTE */
dd8fbd78 5645 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb
PB
5646 break;
5647 case 58: /* Float VRECPE */
4373f3ce 5648 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5649 break;
5650 case 59: /* Float VRSQRTE */
4373f3ce 5651 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5652 break;
5653 case 60: /* VCVT.F32.S32 */
4373f3ce 5654 gen_vfp_tosiz(0);
9ee6e8bb
PB
5655 break;
5656 case 61: /* VCVT.F32.U32 */
4373f3ce 5657 gen_vfp_touiz(0);
9ee6e8bb
PB
5658 break;
5659 case 62: /* VCVT.S32.F32 */
4373f3ce 5660 gen_vfp_sito(0);
9ee6e8bb
PB
5661 break;
5662 case 63: /* VCVT.U32.F32 */
4373f3ce 5663 gen_vfp_uito(0);
9ee6e8bb
PB
5664 break;
5665 default:
5666 /* Reserved: 21, 29, 39-56 */
5667 return 1;
5668 }
5669 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5670 tcg_gen_st_f32(cpu_F0s, cpu_env,
5671 neon_reg_offset(rd, pass));
9ee6e8bb 5672 } else {
dd8fbd78 5673 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5674 }
5675 }
5676 break;
5677 }
5678 } else if ((insn & (1 << 10)) == 0) {
5679 /* VTBL, VTBX. */
3018f259 5680 n = ((insn >> 5) & 0x18) + 8;
9ee6e8bb 5681 if (insn & (1 << 6)) {
8f8e3aa4 5682 tmp = neon_load_reg(rd, 0);
9ee6e8bb 5683 } else {
8f8e3aa4
PB
5684 tmp = new_tmp();
5685 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5686 }
8f8e3aa4 5687 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
5688 tmp4 = tcg_const_i32(rn);
5689 tmp5 = tcg_const_i32(n);
5690 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
3018f259 5691 dead_tmp(tmp);
9ee6e8bb 5692 if (insn & (1 << 6)) {
8f8e3aa4 5693 tmp = neon_load_reg(rd, 1);
9ee6e8bb 5694 } else {
8f8e3aa4
PB
5695 tmp = new_tmp();
5696 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5697 }
8f8e3aa4 5698 tmp3 = neon_load_reg(rm, 1);
b75263d6 5699 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
5700 tcg_temp_free_i32(tmp5);
5701 tcg_temp_free_i32(tmp4);
8f8e3aa4 5702 neon_store_reg(rd, 0, tmp2);
3018f259
PB
5703 neon_store_reg(rd, 1, tmp3);
5704 dead_tmp(tmp);
9ee6e8bb
PB
5705 } else if ((insn & 0x380) == 0) {
5706 /* VDUP */
5707 if (insn & (1 << 19)) {
dd8fbd78 5708 tmp = neon_load_reg(rm, 1);
9ee6e8bb 5709 } else {
dd8fbd78 5710 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
5711 }
5712 if (insn & (1 << 16)) {
dd8fbd78 5713 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
5714 } else if (insn & (1 << 17)) {
5715 if ((insn >> 18) & 1)
dd8fbd78 5716 gen_neon_dup_high16(tmp);
9ee6e8bb 5717 else
dd8fbd78 5718 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
5719 }
5720 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5721 tmp2 = new_tmp();
5722 tcg_gen_mov_i32(tmp2, tmp);
5723 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 5724 }
dd8fbd78 5725 dead_tmp(tmp);
9ee6e8bb
PB
5726 } else {
5727 return 1;
5728 }
5729 }
5730 }
5731 return 0;
5732}
5733
fe1479c3
PB
5734static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5735{
5736 int crn = (insn >> 16) & 0xf;
5737 int crm = insn & 0xf;
5738 int op1 = (insn >> 21) & 7;
5739 int op2 = (insn >> 5) & 7;
5740 int rt = (insn >> 12) & 0xf;
5741 TCGv tmp;
5742
5743 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5744 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5745 /* TEECR */
5746 if (IS_USER(s))
5747 return 1;
5748 tmp = load_cpu_field(teecr);
5749 store_reg(s, rt, tmp);
5750 return 0;
5751 }
5752 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5753 /* TEEHBR */
5754 if (IS_USER(s) && (env->teecr & 1))
5755 return 1;
5756 tmp = load_cpu_field(teehbr);
5757 store_reg(s, rt, tmp);
5758 return 0;
5759 }
5760 }
5761 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5762 op1, crn, crm, op2);
5763 return 1;
5764}
5765
5766static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5767{
5768 int crn = (insn >> 16) & 0xf;
5769 int crm = insn & 0xf;
5770 int op1 = (insn >> 21) & 7;
5771 int op2 = (insn >> 5) & 7;
5772 int rt = (insn >> 12) & 0xf;
5773 TCGv tmp;
5774
5775 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5776 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5777 /* TEECR */
5778 if (IS_USER(s))
5779 return 1;
5780 tmp = load_reg(s, rt);
5781 gen_helper_set_teecr(cpu_env, tmp);
5782 dead_tmp(tmp);
5783 return 0;
5784 }
5785 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5786 /* TEEHBR */
5787 if (IS_USER(s) && (env->teecr & 1))
5788 return 1;
5789 tmp = load_reg(s, rt);
5790 store_cpu_field(tmp, teehbr);
5791 return 0;
5792 }
5793 }
5794 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5795 op1, crn, crm, op2);
5796 return 1;
5797}
5798
9ee6e8bb
PB
5799static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5800{
5801 int cpnum;
5802
5803 cpnum = (insn >> 8) & 0xf;
5804 if (arm_feature(env, ARM_FEATURE_XSCALE)
5805 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5806 return 1;
5807
5808 switch (cpnum) {
5809 case 0:
5810 case 1:
5811 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5812 return disas_iwmmxt_insn(env, s, insn);
5813 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5814 return disas_dsp_insn(env, s, insn);
5815 }
5816 return 1;
5817 case 10:
5818 case 11:
5819 return disas_vfp_insn (env, s, insn);
fe1479c3
PB
5820 case 14:
5821 /* Coprocessors 7-15 are architecturally reserved by ARM.
5822 Unfortunately Intel decided to ignore this. */
5823 if (arm_feature(env, ARM_FEATURE_XSCALE))
5824 goto board;
5825 if (insn & (1 << 20))
5826 return disas_cp14_read(env, s, insn);
5827 else
5828 return disas_cp14_write(env, s, insn);
9ee6e8bb
PB
5829 case 15:
5830 return disas_cp15_insn (env, s, insn);
5831 default:
fe1479c3 5832 board:
9ee6e8bb
PB
5833 /* Unknown coprocessor. See if the board has hooked it. */
5834 return disas_cp_insn (env, s, insn);
5835 }
5836}
5837
5e3f878a
PB
5838
5839/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 5840static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
5841{
5842 TCGv tmp;
5843 tmp = new_tmp();
5844 tcg_gen_trunc_i64_i32(tmp, val);
5845 store_reg(s, rlow, tmp);
5846 tmp = new_tmp();
5847 tcg_gen_shri_i64(val, val, 32);
5848 tcg_gen_trunc_i64_i32(tmp, val);
5849 store_reg(s, rhigh, tmp);
5850}
5851
5852/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 5853static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 5854{
a7812ae4 5855 TCGv_i64 tmp;
5e3f878a
PB
5856 TCGv tmp2;
5857
36aa55dc 5858 /* Load value and extend to 64 bits. */
a7812ae4 5859 tmp = tcg_temp_new_i64();
5e3f878a
PB
5860 tmp2 = load_reg(s, rlow);
5861 tcg_gen_extu_i32_i64(tmp, tmp2);
5862 dead_tmp(tmp2);
5863 tcg_gen_add_i64(val, val, tmp);
b75263d6 5864 tcg_temp_free_i64(tmp);
5e3f878a
PB
5865}
5866
5867/* load and add a 64-bit value from a register pair. */
a7812ae4 5868static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 5869{
a7812ae4 5870 TCGv_i64 tmp;
36aa55dc
PB
5871 TCGv tmpl;
5872 TCGv tmph;
5e3f878a
PB
5873
5874 /* Load 64-bit value rd:rn. */
36aa55dc
PB
5875 tmpl = load_reg(s, rlow);
5876 tmph = load_reg(s, rhigh);
a7812ae4 5877 tmp = tcg_temp_new_i64();
36aa55dc
PB
5878 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5879 dead_tmp(tmpl);
5880 dead_tmp(tmph);
5e3f878a 5881 tcg_gen_add_i64(val, val, tmp);
b75263d6 5882 tcg_temp_free_i64(tmp);
5e3f878a
PB
5883}
5884
5885/* Set N and Z flags from a 64-bit value. */
a7812ae4 5886static void gen_logicq_cc(TCGv_i64 val)
5e3f878a
PB
5887{
5888 TCGv tmp = new_tmp();
5889 gen_helper_logicq_cc(tmp, val);
6fbe23d5
PB
5890 gen_logic_CC(tmp);
5891 dead_tmp(tmp);
5e3f878a
PB
5892}
5893
426f5abc
PB
5894/* Load/Store exclusive instructions are implemented by remembering
5895 the value/address loaded, and seeing if these are the same
5896 when the store is performed. This should be is sufficient to implement
5897 the architecturally mandated semantics, and avoids having to monitor
5898 regular stores.
5899
5900 In system emulation mode only one CPU will be running at once, so
5901 this sequence is effectively atomic. In user emulation mode we
5902 throw an exception and handle the atomic operation elsewhere. */
5903static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
5904 TCGv addr, int size)
5905{
5906 TCGv tmp;
5907
5908 switch (size) {
5909 case 0:
5910 tmp = gen_ld8u(addr, IS_USER(s));
5911 break;
5912 case 1:
5913 tmp = gen_ld16u(addr, IS_USER(s));
5914 break;
5915 case 2:
5916 case 3:
5917 tmp = gen_ld32(addr, IS_USER(s));
5918 break;
5919 default:
5920 abort();
5921 }
5922 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
5923 store_reg(s, rt, tmp);
5924 if (size == 3) {
5925 tcg_gen_addi_i32(addr, addr, 4);
5926 tmp = gen_ld32(addr, IS_USER(s));
5927 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
5928 store_reg(s, rt2, tmp);
5929 }
5930 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
5931}
5932
5933static void gen_clrex(DisasContext *s)
5934{
5935 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
5936}
5937
5938#ifdef CONFIG_USER_ONLY
5939static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
5940 TCGv addr, int size)
5941{
5942 tcg_gen_mov_i32(cpu_exclusive_test, addr);
5943 tcg_gen_movi_i32(cpu_exclusive_info,
5944 size | (rd << 4) | (rt << 8) | (rt2 << 12));
5945 gen_set_condexec(s);
5946 gen_set_pc_im(s->pc - 4);
5947 gen_exception(EXCP_STREX);
5948 s->is_jmp = DISAS_JUMP;
5949}
5950#else
5951static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
5952 TCGv addr, int size)
5953{
5954 TCGv tmp;
5955 int done_label;
5956 int fail_label;
5957
5958 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
5959 [addr] = {Rt};
5960 {Rd} = 0;
5961 } else {
5962 {Rd} = 1;
5963 } */
5964 fail_label = gen_new_label();
5965 done_label = gen_new_label();
5966 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
5967 switch (size) {
5968 case 0:
5969 tmp = gen_ld8u(addr, IS_USER(s));
5970 break;
5971 case 1:
5972 tmp = gen_ld16u(addr, IS_USER(s));
5973 break;
5974 case 2:
5975 case 3:
5976 tmp = gen_ld32(addr, IS_USER(s));
5977 break;
5978 default:
5979 abort();
5980 }
5981 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
5982 dead_tmp(tmp);
5983 if (size == 3) {
5984 TCGv tmp2 = new_tmp();
5985 tcg_gen_addi_i32(tmp2, addr, 4);
5986 tmp = gen_ld32(addr, IS_USER(s));
5987 dead_tmp(tmp2);
5988 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
5989 dead_tmp(tmp);
5990 }
5991 tmp = load_reg(s, rt);
5992 switch (size) {
5993 case 0:
5994 gen_st8(tmp, addr, IS_USER(s));
5995 break;
5996 case 1:
5997 gen_st16(tmp, addr, IS_USER(s));
5998 break;
5999 case 2:
6000 case 3:
6001 gen_st32(tmp, addr, IS_USER(s));
6002 break;
6003 default:
6004 abort();
6005 }
6006 if (size == 3) {
6007 tcg_gen_addi_i32(addr, addr, 4);
6008 tmp = load_reg(s, rt2);
6009 gen_st32(tmp, addr, IS_USER(s));
6010 }
6011 tcg_gen_movi_i32(cpu_R[rd], 0);
6012 tcg_gen_br(done_label);
6013 gen_set_label(fail_label);
6014 tcg_gen_movi_i32(cpu_R[rd], 1);
6015 gen_set_label(done_label);
6016 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6017}
6018#endif
6019
9ee6e8bb
PB
6020static void disas_arm_insn(CPUState * env, DisasContext *s)
6021{
6022 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 6023 TCGv tmp;
3670669c 6024 TCGv tmp2;
6ddbc6e4 6025 TCGv tmp3;
b0109805 6026 TCGv addr;
a7812ae4 6027 TCGv_i64 tmp64;
9ee6e8bb
PB
6028
6029 insn = ldl_code(s->pc);
6030 s->pc += 4;
6031
6032 /* M variants do not implement ARM mode. */
6033 if (IS_M(env))
6034 goto illegal_op;
6035 cond = insn >> 28;
6036 if (cond == 0xf){
6037 /* Unconditional instructions. */
6038 if (((insn >> 25) & 7) == 1) {
6039 /* NEON Data processing. */
6040 if (!arm_feature(env, ARM_FEATURE_NEON))
6041 goto illegal_op;
6042
6043 if (disas_neon_data_insn(env, s, insn))
6044 goto illegal_op;
6045 return;
6046 }
6047 if ((insn & 0x0f100000) == 0x04000000) {
6048 /* NEON load/store. */
6049 if (!arm_feature(env, ARM_FEATURE_NEON))
6050 goto illegal_op;
6051
6052 if (disas_neon_ls_insn(env, s, insn))
6053 goto illegal_op;
6054 return;
6055 }
6056 if ((insn & 0x0d70f000) == 0x0550f000)
6057 return; /* PLD */
6058 else if ((insn & 0x0ffffdff) == 0x01010000) {
6059 ARCH(6);
6060 /* setend */
6061 if (insn & (1 << 9)) {
6062 /* BE8 mode not implemented. */
6063 goto illegal_op;
6064 }
6065 return;
6066 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6067 switch ((insn >> 4) & 0xf) {
6068 case 1: /* clrex */
6069 ARCH(6K);
426f5abc 6070 gen_clrex(s);
9ee6e8bb
PB
6071 return;
6072 case 4: /* dsb */
6073 case 5: /* dmb */
6074 case 6: /* isb */
6075 ARCH(7);
6076 /* We don't emulate caches so these are a no-op. */
6077 return;
6078 default:
6079 goto illegal_op;
6080 }
6081 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6082 /* srs */
c67b6b71 6083 int32_t offset;
9ee6e8bb
PB
6084 if (IS_USER(s))
6085 goto illegal_op;
6086 ARCH(6);
6087 op1 = (insn & 0x1f);
6088 if (op1 == (env->uncached_cpsr & CPSR_M)) {
b0109805 6089 addr = load_reg(s, 13);
9ee6e8bb 6090 } else {
b0109805 6091 addr = new_tmp();
b75263d6
JR
6092 tmp = tcg_const_i32(op1);
6093 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6094 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6095 }
6096 i = (insn >> 23) & 3;
6097 switch (i) {
6098 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6099 case 1: offset = 0; break; /* IA */
6100 case 2: offset = -8; break; /* DB */
9ee6e8bb
PB
6101 case 3: offset = 4; break; /* IB */
6102 default: abort();
6103 }
6104 if (offset)
b0109805
PB
6105 tcg_gen_addi_i32(addr, addr, offset);
6106 tmp = load_reg(s, 14);
6107 gen_st32(tmp, addr, 0);
c67b6b71 6108 tmp = load_cpu_field(spsr);
b0109805
PB
6109 tcg_gen_addi_i32(addr, addr, 4);
6110 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
6111 if (insn & (1 << 21)) {
6112 /* Base writeback. */
6113 switch (i) {
6114 case 0: offset = -8; break;
c67b6b71
FN
6115 case 1: offset = 4; break;
6116 case 2: offset = -4; break;
9ee6e8bb
PB
6117 case 3: offset = 0; break;
6118 default: abort();
6119 }
6120 if (offset)
c67b6b71 6121 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb 6122 if (op1 == (env->uncached_cpsr & CPSR_M)) {
c67b6b71 6123 store_reg(s, 13, addr);
9ee6e8bb 6124 } else {
b75263d6
JR
6125 tmp = tcg_const_i32(op1);
6126 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6127 tcg_temp_free_i32(tmp);
c67b6b71 6128 dead_tmp(addr);
9ee6e8bb 6129 }
b0109805
PB
6130 } else {
6131 dead_tmp(addr);
9ee6e8bb
PB
6132 }
6133 } else if ((insn & 0x0e5fffe0) == 0x081d0a00) {
6134 /* rfe */
c67b6b71 6135 int32_t offset;
9ee6e8bb
PB
6136 if (IS_USER(s))
6137 goto illegal_op;
6138 ARCH(6);
6139 rn = (insn >> 16) & 0xf;
b0109805 6140 addr = load_reg(s, rn);
9ee6e8bb
PB
6141 i = (insn >> 23) & 3;
6142 switch (i) {
b0109805 6143 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6144 case 1: offset = 0; break; /* IA */
6145 case 2: offset = -8; break; /* DB */
b0109805 6146 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
6147 default: abort();
6148 }
6149 if (offset)
b0109805
PB
6150 tcg_gen_addi_i32(addr, addr, offset);
6151 /* Load PC into tmp and CPSR into tmp2. */
6152 tmp = gen_ld32(addr, 0);
6153 tcg_gen_addi_i32(addr, addr, 4);
6154 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
6155 if (insn & (1 << 21)) {
6156 /* Base writeback. */
6157 switch (i) {
b0109805 6158 case 0: offset = -8; break;
c67b6b71
FN
6159 case 1: offset = 4; break;
6160 case 2: offset = -4; break;
b0109805 6161 case 3: offset = 0; break;
9ee6e8bb
PB
6162 default: abort();
6163 }
6164 if (offset)
b0109805
PB
6165 tcg_gen_addi_i32(addr, addr, offset);
6166 store_reg(s, rn, addr);
6167 } else {
6168 dead_tmp(addr);
9ee6e8bb 6169 }
b0109805 6170 gen_rfe(s, tmp, tmp2);
c67b6b71 6171 return;
9ee6e8bb
PB
6172 } else if ((insn & 0x0e000000) == 0x0a000000) {
6173 /* branch link and change to thumb (blx <offset>) */
6174 int32_t offset;
6175
6176 val = (uint32_t)s->pc;
d9ba4830
PB
6177 tmp = new_tmp();
6178 tcg_gen_movi_i32(tmp, val);
6179 store_reg(s, 14, tmp);
9ee6e8bb
PB
6180 /* Sign-extend the 24-bit offset */
6181 offset = (((int32_t)insn) << 8) >> 8;
6182 /* offset * 4 + bit24 * 2 + (thumb bit) */
6183 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6184 /* pipeline offset */
6185 val += 4;
d9ba4830 6186 gen_bx_im(s, val);
9ee6e8bb
PB
6187 return;
6188 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6189 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6190 /* iWMMXt register transfer. */
6191 if (env->cp15.c15_cpar & (1 << 1))
6192 if (!disas_iwmmxt_insn(env, s, insn))
6193 return;
6194 }
6195 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6196 /* Coprocessor double register transfer. */
6197 } else if ((insn & 0x0f000010) == 0x0e000010) {
6198 /* Additional coprocessor register transfer. */
7997d92f 6199 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
6200 uint32_t mask;
6201 uint32_t val;
6202 /* cps (privileged) */
6203 if (IS_USER(s))
6204 return;
6205 mask = val = 0;
6206 if (insn & (1 << 19)) {
6207 if (insn & (1 << 8))
6208 mask |= CPSR_A;
6209 if (insn & (1 << 7))
6210 mask |= CPSR_I;
6211 if (insn & (1 << 6))
6212 mask |= CPSR_F;
6213 if (insn & (1 << 18))
6214 val |= mask;
6215 }
7997d92f 6216 if (insn & (1 << 17)) {
9ee6e8bb
PB
6217 mask |= CPSR_M;
6218 val |= (insn & 0x1f);
6219 }
6220 if (mask) {
2fbac54b 6221 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
6222 }
6223 return;
6224 }
6225 goto illegal_op;
6226 }
6227 if (cond != 0xe) {
6228 /* if not always execute, we generate a conditional jump to
6229 next instruction */
6230 s->condlabel = gen_new_label();
d9ba4830 6231 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
6232 s->condjmp = 1;
6233 }
6234 if ((insn & 0x0f900000) == 0x03000000) {
6235 if ((insn & (1 << 21)) == 0) {
6236 ARCH(6T2);
6237 rd = (insn >> 12) & 0xf;
6238 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6239 if ((insn & (1 << 22)) == 0) {
6240 /* MOVW */
5e3f878a
PB
6241 tmp = new_tmp();
6242 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
6243 } else {
6244 /* MOVT */
5e3f878a 6245 tmp = load_reg(s, rd);
86831435 6246 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 6247 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 6248 }
5e3f878a 6249 store_reg(s, rd, tmp);
9ee6e8bb
PB
6250 } else {
6251 if (((insn >> 12) & 0xf) != 0xf)
6252 goto illegal_op;
6253 if (((insn >> 16) & 0xf) == 0) {
6254 gen_nop_hint(s, insn & 0xff);
6255 } else {
6256 /* CPSR = immediate */
6257 val = insn & 0xff;
6258 shift = ((insn >> 8) & 0xf) * 2;
6259 if (shift)
6260 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 6261 i = ((insn & (1 << 22)) != 0);
2fbac54b 6262 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
6263 goto illegal_op;
6264 }
6265 }
6266 } else if ((insn & 0x0f900000) == 0x01000000
6267 && (insn & 0x00000090) != 0x00000090) {
6268 /* miscellaneous instructions */
6269 op1 = (insn >> 21) & 3;
6270 sh = (insn >> 4) & 0xf;
6271 rm = insn & 0xf;
6272 switch (sh) {
6273 case 0x0: /* move program status register */
6274 if (op1 & 1) {
6275 /* PSR = reg */
2fbac54b 6276 tmp = load_reg(s, rm);
9ee6e8bb 6277 i = ((op1 & 2) != 0);
2fbac54b 6278 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
6279 goto illegal_op;
6280 } else {
6281 /* reg = PSR */
6282 rd = (insn >> 12) & 0xf;
6283 if (op1 & 2) {
6284 if (IS_USER(s))
6285 goto illegal_op;
d9ba4830 6286 tmp = load_cpu_field(spsr);
9ee6e8bb 6287 } else {
d9ba4830
PB
6288 tmp = new_tmp();
6289 gen_helper_cpsr_read(tmp);
9ee6e8bb 6290 }
d9ba4830 6291 store_reg(s, rd, tmp);
9ee6e8bb
PB
6292 }
6293 break;
6294 case 0x1:
6295 if (op1 == 1) {
6296 /* branch/exchange thumb (bx). */
d9ba4830
PB
6297 tmp = load_reg(s, rm);
6298 gen_bx(s, tmp);
9ee6e8bb
PB
6299 } else if (op1 == 3) {
6300 /* clz */
6301 rd = (insn >> 12) & 0xf;
1497c961
PB
6302 tmp = load_reg(s, rm);
6303 gen_helper_clz(tmp, tmp);
6304 store_reg(s, rd, tmp);
9ee6e8bb
PB
6305 } else {
6306 goto illegal_op;
6307 }
6308 break;
6309 case 0x2:
6310 if (op1 == 1) {
6311 ARCH(5J); /* bxj */
6312 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6313 tmp = load_reg(s, rm);
6314 gen_bx(s, tmp);
9ee6e8bb
PB
6315 } else {
6316 goto illegal_op;
6317 }
6318 break;
6319 case 0x3:
6320 if (op1 != 1)
6321 goto illegal_op;
6322
6323 /* branch link/exchange thumb (blx) */
d9ba4830
PB
6324 tmp = load_reg(s, rm);
6325 tmp2 = new_tmp();
6326 tcg_gen_movi_i32(tmp2, s->pc);
6327 store_reg(s, 14, tmp2);
6328 gen_bx(s, tmp);
9ee6e8bb
PB
6329 break;
6330 case 0x5: /* saturating add/subtract */
6331 rd = (insn >> 12) & 0xf;
6332 rn = (insn >> 16) & 0xf;
b40d0353 6333 tmp = load_reg(s, rm);
5e3f878a 6334 tmp2 = load_reg(s, rn);
9ee6e8bb 6335 if (op1 & 2)
5e3f878a 6336 gen_helper_double_saturate(tmp2, tmp2);
9ee6e8bb 6337 if (op1 & 1)
5e3f878a 6338 gen_helper_sub_saturate(tmp, tmp, tmp2);
9ee6e8bb 6339 else
5e3f878a
PB
6340 gen_helper_add_saturate(tmp, tmp, tmp2);
6341 dead_tmp(tmp2);
6342 store_reg(s, rd, tmp);
9ee6e8bb
PB
6343 break;
6344 case 7: /* bkpt */
6345 gen_set_condexec(s);
5e3f878a 6346 gen_set_pc_im(s->pc - 4);
d9ba4830 6347 gen_exception(EXCP_BKPT);
9ee6e8bb
PB
6348 s->is_jmp = DISAS_JUMP;
6349 break;
6350 case 0x8: /* signed multiply */
6351 case 0xa:
6352 case 0xc:
6353 case 0xe:
6354 rs = (insn >> 8) & 0xf;
6355 rn = (insn >> 12) & 0xf;
6356 rd = (insn >> 16) & 0xf;
6357 if (op1 == 1) {
6358 /* (32 * 16) >> 16 */
5e3f878a
PB
6359 tmp = load_reg(s, rm);
6360 tmp2 = load_reg(s, rs);
9ee6e8bb 6361 if (sh & 4)
5e3f878a 6362 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 6363 else
5e3f878a 6364 gen_sxth(tmp2);
a7812ae4
PB
6365 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6366 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 6367 tmp = new_tmp();
a7812ae4 6368 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 6369 tcg_temp_free_i64(tmp64);
9ee6e8bb 6370 if ((sh & 2) == 0) {
5e3f878a
PB
6371 tmp2 = load_reg(s, rn);
6372 gen_helper_add_setq(tmp, tmp, tmp2);
6373 dead_tmp(tmp2);
9ee6e8bb 6374 }
5e3f878a 6375 store_reg(s, rd, tmp);
9ee6e8bb
PB
6376 } else {
6377 /* 16 * 16 */
5e3f878a
PB
6378 tmp = load_reg(s, rm);
6379 tmp2 = load_reg(s, rs);
6380 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6381 dead_tmp(tmp2);
9ee6e8bb 6382 if (op1 == 2) {
a7812ae4
PB
6383 tmp64 = tcg_temp_new_i64();
6384 tcg_gen_ext_i32_i64(tmp64, tmp);
22478e79 6385 dead_tmp(tmp);
a7812ae4
PB
6386 gen_addq(s, tmp64, rn, rd);
6387 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 6388 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
6389 } else {
6390 if (op1 == 0) {
5e3f878a
PB
6391 tmp2 = load_reg(s, rn);
6392 gen_helper_add_setq(tmp, tmp, tmp2);
6393 dead_tmp(tmp2);
9ee6e8bb 6394 }
5e3f878a 6395 store_reg(s, rd, tmp);
9ee6e8bb
PB
6396 }
6397 }
6398 break;
6399 default:
6400 goto illegal_op;
6401 }
6402 } else if (((insn & 0x0e000000) == 0 &&
6403 (insn & 0x00000090) != 0x90) ||
6404 ((insn & 0x0e000000) == (1 << 25))) {
6405 int set_cc, logic_cc, shiftop;
6406
6407 op1 = (insn >> 21) & 0xf;
6408 set_cc = (insn >> 20) & 1;
6409 logic_cc = table_logic_cc[op1] & set_cc;
6410
6411 /* data processing instruction */
6412 if (insn & (1 << 25)) {
6413 /* immediate operand */
6414 val = insn & 0xff;
6415 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 6416 if (shift) {
9ee6e8bb 6417 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9
JR
6418 }
6419 tmp2 = new_tmp();
6420 tcg_gen_movi_i32(tmp2, val);
6421 if (logic_cc && shift) {
6422 gen_set_CF_bit31(tmp2);
6423 }
9ee6e8bb
PB
6424 } else {
6425 /* register */
6426 rm = (insn) & 0xf;
e9bb4aa9 6427 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6428 shiftop = (insn >> 5) & 3;
6429 if (!(insn & (1 << 4))) {
6430 shift = (insn >> 7) & 0x1f;
e9bb4aa9 6431 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
6432 } else {
6433 rs = (insn >> 8) & 0xf;
8984bd2e 6434 tmp = load_reg(s, rs);
e9bb4aa9 6435 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
6436 }
6437 }
6438 if (op1 != 0x0f && op1 != 0x0d) {
6439 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
6440 tmp = load_reg(s, rn);
6441 } else {
6442 TCGV_UNUSED(tmp);
9ee6e8bb
PB
6443 }
6444 rd = (insn >> 12) & 0xf;
6445 switch(op1) {
6446 case 0x00:
e9bb4aa9
JR
6447 tcg_gen_and_i32(tmp, tmp, tmp2);
6448 if (logic_cc) {
6449 gen_logic_CC(tmp);
6450 }
21aeb343 6451 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6452 break;
6453 case 0x01:
e9bb4aa9
JR
6454 tcg_gen_xor_i32(tmp, tmp, tmp2);
6455 if (logic_cc) {
6456 gen_logic_CC(tmp);
6457 }
21aeb343 6458 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6459 break;
6460 case 0x02:
6461 if (set_cc && rd == 15) {
6462 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 6463 if (IS_USER(s)) {
9ee6e8bb 6464 goto illegal_op;
e9bb4aa9
JR
6465 }
6466 gen_helper_sub_cc(tmp, tmp, tmp2);
6467 gen_exception_return(s, tmp);
9ee6e8bb 6468 } else {
e9bb4aa9
JR
6469 if (set_cc) {
6470 gen_helper_sub_cc(tmp, tmp, tmp2);
6471 } else {
6472 tcg_gen_sub_i32(tmp, tmp, tmp2);
6473 }
21aeb343 6474 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6475 }
6476 break;
6477 case 0x03:
e9bb4aa9
JR
6478 if (set_cc) {
6479 gen_helper_sub_cc(tmp, tmp2, tmp);
6480 } else {
6481 tcg_gen_sub_i32(tmp, tmp2, tmp);
6482 }
21aeb343 6483 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6484 break;
6485 case 0x04:
e9bb4aa9
JR
6486 if (set_cc) {
6487 gen_helper_add_cc(tmp, tmp, tmp2);
6488 } else {
6489 tcg_gen_add_i32(tmp, tmp, tmp2);
6490 }
21aeb343 6491 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6492 break;
6493 case 0x05:
e9bb4aa9
JR
6494 if (set_cc) {
6495 gen_helper_adc_cc(tmp, tmp, tmp2);
6496 } else {
6497 gen_add_carry(tmp, tmp, tmp2);
6498 }
21aeb343 6499 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6500 break;
6501 case 0x06:
e9bb4aa9
JR
6502 if (set_cc) {
6503 gen_helper_sbc_cc(tmp, tmp, tmp2);
6504 } else {
6505 gen_sub_carry(tmp, tmp, tmp2);
6506 }
21aeb343 6507 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6508 break;
6509 case 0x07:
e9bb4aa9
JR
6510 if (set_cc) {
6511 gen_helper_sbc_cc(tmp, tmp2, tmp);
6512 } else {
6513 gen_sub_carry(tmp, tmp2, tmp);
6514 }
21aeb343 6515 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6516 break;
6517 case 0x08:
6518 if (set_cc) {
e9bb4aa9
JR
6519 tcg_gen_and_i32(tmp, tmp, tmp2);
6520 gen_logic_CC(tmp);
9ee6e8bb 6521 }
e9bb4aa9 6522 dead_tmp(tmp);
9ee6e8bb
PB
6523 break;
6524 case 0x09:
6525 if (set_cc) {
e9bb4aa9
JR
6526 tcg_gen_xor_i32(tmp, tmp, tmp2);
6527 gen_logic_CC(tmp);
9ee6e8bb 6528 }
e9bb4aa9 6529 dead_tmp(tmp);
9ee6e8bb
PB
6530 break;
6531 case 0x0a:
6532 if (set_cc) {
e9bb4aa9 6533 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb 6534 }
e9bb4aa9 6535 dead_tmp(tmp);
9ee6e8bb
PB
6536 break;
6537 case 0x0b:
6538 if (set_cc) {
e9bb4aa9 6539 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 6540 }
e9bb4aa9 6541 dead_tmp(tmp);
9ee6e8bb
PB
6542 break;
6543 case 0x0c:
e9bb4aa9
JR
6544 tcg_gen_or_i32(tmp, tmp, tmp2);
6545 if (logic_cc) {
6546 gen_logic_CC(tmp);
6547 }
21aeb343 6548 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6549 break;
6550 case 0x0d:
6551 if (logic_cc && rd == 15) {
6552 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 6553 if (IS_USER(s)) {
9ee6e8bb 6554 goto illegal_op;
e9bb4aa9
JR
6555 }
6556 gen_exception_return(s, tmp2);
9ee6e8bb 6557 } else {
e9bb4aa9
JR
6558 if (logic_cc) {
6559 gen_logic_CC(tmp2);
6560 }
21aeb343 6561 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6562 }
6563 break;
6564 case 0x0e:
f669df27 6565 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
6566 if (logic_cc) {
6567 gen_logic_CC(tmp);
6568 }
21aeb343 6569 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6570 break;
6571 default:
6572 case 0x0f:
e9bb4aa9
JR
6573 tcg_gen_not_i32(tmp2, tmp2);
6574 if (logic_cc) {
6575 gen_logic_CC(tmp2);
6576 }
21aeb343 6577 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6578 break;
6579 }
e9bb4aa9
JR
6580 if (op1 != 0x0f && op1 != 0x0d) {
6581 dead_tmp(tmp2);
6582 }
9ee6e8bb
PB
6583 } else {
6584 /* other instructions */
6585 op1 = (insn >> 24) & 0xf;
6586 switch(op1) {
6587 case 0x0:
6588 case 0x1:
6589 /* multiplies, extra load/stores */
6590 sh = (insn >> 5) & 3;
6591 if (sh == 0) {
6592 if (op1 == 0x0) {
6593 rd = (insn >> 16) & 0xf;
6594 rn = (insn >> 12) & 0xf;
6595 rs = (insn >> 8) & 0xf;
6596 rm = (insn) & 0xf;
6597 op1 = (insn >> 20) & 0xf;
6598 switch (op1) {
6599 case 0: case 1: case 2: case 3: case 6:
6600 /* 32 bit mul */
5e3f878a
PB
6601 tmp = load_reg(s, rs);
6602 tmp2 = load_reg(s, rm);
6603 tcg_gen_mul_i32(tmp, tmp, tmp2);
6604 dead_tmp(tmp2);
9ee6e8bb
PB
6605 if (insn & (1 << 22)) {
6606 /* Subtract (mls) */
6607 ARCH(6T2);
5e3f878a
PB
6608 tmp2 = load_reg(s, rn);
6609 tcg_gen_sub_i32(tmp, tmp2, tmp);
6610 dead_tmp(tmp2);
9ee6e8bb
PB
6611 } else if (insn & (1 << 21)) {
6612 /* Add */
5e3f878a
PB
6613 tmp2 = load_reg(s, rn);
6614 tcg_gen_add_i32(tmp, tmp, tmp2);
6615 dead_tmp(tmp2);
9ee6e8bb
PB
6616 }
6617 if (insn & (1 << 20))
5e3f878a
PB
6618 gen_logic_CC(tmp);
6619 store_reg(s, rd, tmp);
9ee6e8bb
PB
6620 break;
6621 default:
6622 /* 64 bit mul */
5e3f878a
PB
6623 tmp = load_reg(s, rs);
6624 tmp2 = load_reg(s, rm);
9ee6e8bb 6625 if (insn & (1 << 22))
a7812ae4 6626 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6627 else
a7812ae4 6628 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
9ee6e8bb 6629 if (insn & (1 << 21)) /* mult accumulate */
a7812ae4 6630 gen_addq(s, tmp64, rn, rd);
9ee6e8bb
PB
6631 if (!(insn & (1 << 23))) { /* double accumulate */
6632 ARCH(6);
a7812ae4
PB
6633 gen_addq_lo(s, tmp64, rn);
6634 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
6635 }
6636 if (insn & (1 << 20))
a7812ae4
PB
6637 gen_logicq_cc(tmp64);
6638 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 6639 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
6640 break;
6641 }
6642 } else {
6643 rn = (insn >> 16) & 0xf;
6644 rd = (insn >> 12) & 0xf;
6645 if (insn & (1 << 23)) {
6646 /* load/store exclusive */
86753403
PB
6647 op1 = (insn >> 21) & 0x3;
6648 if (op1)
a47f43d2 6649 ARCH(6K);
86753403
PB
6650 else
6651 ARCH(6);
3174f8e9 6652 addr = tcg_temp_local_new_i32();
98a46317 6653 load_reg_var(s, addr, rn);
9ee6e8bb 6654 if (insn & (1 << 20)) {
86753403
PB
6655 switch (op1) {
6656 case 0: /* ldrex */
426f5abc 6657 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
6658 break;
6659 case 1: /* ldrexd */
426f5abc 6660 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
6661 break;
6662 case 2: /* ldrexb */
426f5abc 6663 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
6664 break;
6665 case 3: /* ldrexh */
426f5abc 6666 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
6667 break;
6668 default:
6669 abort();
6670 }
9ee6e8bb
PB
6671 } else {
6672 rm = insn & 0xf;
86753403
PB
6673 switch (op1) {
6674 case 0: /* strex */
426f5abc 6675 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
6676 break;
6677 case 1: /* strexd */
502e64fe 6678 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
6679 break;
6680 case 2: /* strexb */
426f5abc 6681 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
6682 break;
6683 case 3: /* strexh */
426f5abc 6684 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
6685 break;
6686 default:
6687 abort();
6688 }
9ee6e8bb 6689 }
3174f8e9 6690 tcg_temp_free(addr);
9ee6e8bb
PB
6691 } else {
6692 /* SWP instruction */
6693 rm = (insn) & 0xf;
6694
8984bd2e
PB
6695 /* ??? This is not really atomic. However we know
6696 we never have multiple CPUs running in parallel,
6697 so it is good enough. */
6698 addr = load_reg(s, rn);
6699 tmp = load_reg(s, rm);
9ee6e8bb 6700 if (insn & (1 << 22)) {
8984bd2e
PB
6701 tmp2 = gen_ld8u(addr, IS_USER(s));
6702 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 6703 } else {
8984bd2e
PB
6704 tmp2 = gen_ld32(addr, IS_USER(s));
6705 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 6706 }
8984bd2e
PB
6707 dead_tmp(addr);
6708 store_reg(s, rd, tmp2);
9ee6e8bb
PB
6709 }
6710 }
6711 } else {
6712 int address_offset;
6713 int load;
6714 /* Misc load/store */
6715 rn = (insn >> 16) & 0xf;
6716 rd = (insn >> 12) & 0xf;
b0109805 6717 addr = load_reg(s, rn);
9ee6e8bb 6718 if (insn & (1 << 24))
b0109805 6719 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
6720 address_offset = 0;
6721 if (insn & (1 << 20)) {
6722 /* load */
6723 switch(sh) {
6724 case 1:
b0109805 6725 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
6726 break;
6727 case 2:
b0109805 6728 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
6729 break;
6730 default:
6731 case 3:
b0109805 6732 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
6733 break;
6734 }
6735 load = 1;
6736 } else if (sh & 2) {
6737 /* doubleword */
6738 if (sh & 1) {
6739 /* store */
b0109805
PB
6740 tmp = load_reg(s, rd);
6741 gen_st32(tmp, addr, IS_USER(s));
6742 tcg_gen_addi_i32(addr, addr, 4);
6743 tmp = load_reg(s, rd + 1);
6744 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6745 load = 0;
6746 } else {
6747 /* load */
b0109805
PB
6748 tmp = gen_ld32(addr, IS_USER(s));
6749 store_reg(s, rd, tmp);
6750 tcg_gen_addi_i32(addr, addr, 4);
6751 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
6752 rd++;
6753 load = 1;
6754 }
6755 address_offset = -4;
6756 } else {
6757 /* store */
b0109805
PB
6758 tmp = load_reg(s, rd);
6759 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6760 load = 0;
6761 }
6762 /* Perform base writeback before the loaded value to
6763 ensure correct behavior with overlapping index registers.
6764 ldrd with base writeback is is undefined if the
6765 destination and index registers overlap. */
6766 if (!(insn & (1 << 24))) {
b0109805
PB
6767 gen_add_datah_offset(s, insn, address_offset, addr);
6768 store_reg(s, rn, addr);
9ee6e8bb
PB
6769 } else if (insn & (1 << 21)) {
6770 if (address_offset)
b0109805
PB
6771 tcg_gen_addi_i32(addr, addr, address_offset);
6772 store_reg(s, rn, addr);
6773 } else {
6774 dead_tmp(addr);
9ee6e8bb
PB
6775 }
6776 if (load) {
6777 /* Complete the load. */
b0109805 6778 store_reg(s, rd, tmp);
9ee6e8bb
PB
6779 }
6780 }
6781 break;
6782 case 0x4:
6783 case 0x5:
6784 goto do_ldst;
6785 case 0x6:
6786 case 0x7:
6787 if (insn & (1 << 4)) {
6788 ARCH(6);
6789 /* Armv6 Media instructions. */
6790 rm = insn & 0xf;
6791 rn = (insn >> 16) & 0xf;
2c0262af 6792 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
6793 rs = (insn >> 8) & 0xf;
6794 switch ((insn >> 23) & 3) {
6795 case 0: /* Parallel add/subtract. */
6796 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
6797 tmp = load_reg(s, rn);
6798 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6799 sh = (insn >> 5) & 7;
6800 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6801 goto illegal_op;
6ddbc6e4
PB
6802 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6803 dead_tmp(tmp2);
6804 store_reg(s, rd, tmp);
9ee6e8bb
PB
6805 break;
6806 case 1:
6807 if ((insn & 0x00700020) == 0) {
6c95676b 6808 /* Halfword pack. */
3670669c
PB
6809 tmp = load_reg(s, rn);
6810 tmp2 = load_reg(s, rm);
9ee6e8bb 6811 shift = (insn >> 7) & 0x1f;
3670669c
PB
6812 if (insn & (1 << 6)) {
6813 /* pkhtb */
22478e79
AZ
6814 if (shift == 0)
6815 shift = 31;
6816 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 6817 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 6818 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
6819 } else {
6820 /* pkhbt */
22478e79
AZ
6821 if (shift)
6822 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 6823 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
6824 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6825 }
6826 tcg_gen_or_i32(tmp, tmp, tmp2);
22478e79 6827 dead_tmp(tmp2);
3670669c 6828 store_reg(s, rd, tmp);
9ee6e8bb
PB
6829 } else if ((insn & 0x00200020) == 0x00200000) {
6830 /* [us]sat */
6ddbc6e4 6831 tmp = load_reg(s, rm);
9ee6e8bb
PB
6832 shift = (insn >> 7) & 0x1f;
6833 if (insn & (1 << 6)) {
6834 if (shift == 0)
6835 shift = 31;
6ddbc6e4 6836 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 6837 } else {
6ddbc6e4 6838 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
6839 }
6840 sh = (insn >> 16) & 0x1f;
6841 if (sh != 0) {
b75263d6 6842 tmp2 = tcg_const_i32(sh);
9ee6e8bb 6843 if (insn & (1 << 22))
b75263d6 6844 gen_helper_usat(tmp, tmp, tmp2);
9ee6e8bb 6845 else
b75263d6
JR
6846 gen_helper_ssat(tmp, tmp, tmp2);
6847 tcg_temp_free_i32(tmp2);
9ee6e8bb 6848 }
6ddbc6e4 6849 store_reg(s, rd, tmp);
9ee6e8bb
PB
6850 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6851 /* [us]sat16 */
6ddbc6e4 6852 tmp = load_reg(s, rm);
9ee6e8bb
PB
6853 sh = (insn >> 16) & 0x1f;
6854 if (sh != 0) {
b75263d6 6855 tmp2 = tcg_const_i32(sh);
9ee6e8bb 6856 if (insn & (1 << 22))
b75263d6 6857 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 6858 else
b75263d6
JR
6859 gen_helper_ssat16(tmp, tmp, tmp2);
6860 tcg_temp_free_i32(tmp2);
9ee6e8bb 6861 }
6ddbc6e4 6862 store_reg(s, rd, tmp);
9ee6e8bb
PB
6863 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6864 /* Select bytes. */
6ddbc6e4
PB
6865 tmp = load_reg(s, rn);
6866 tmp2 = load_reg(s, rm);
6867 tmp3 = new_tmp();
6868 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6869 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6870 dead_tmp(tmp3);
6871 dead_tmp(tmp2);
6872 store_reg(s, rd, tmp);
9ee6e8bb 6873 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 6874 tmp = load_reg(s, rm);
9ee6e8bb
PB
6875 shift = (insn >> 10) & 3;
6876 /* ??? In many cases it's not neccessary to do a
6877 rotate, a shift is sufficient. */
6878 if (shift != 0)
f669df27 6879 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
6880 op1 = (insn >> 20) & 7;
6881 switch (op1) {
5e3f878a
PB
6882 case 0: gen_sxtb16(tmp); break;
6883 case 2: gen_sxtb(tmp); break;
6884 case 3: gen_sxth(tmp); break;
6885 case 4: gen_uxtb16(tmp); break;
6886 case 6: gen_uxtb(tmp); break;
6887 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
6888 default: goto illegal_op;
6889 }
6890 if (rn != 15) {
5e3f878a 6891 tmp2 = load_reg(s, rn);
9ee6e8bb 6892 if ((op1 & 3) == 0) {
5e3f878a 6893 gen_add16(tmp, tmp2);
9ee6e8bb 6894 } else {
5e3f878a
PB
6895 tcg_gen_add_i32(tmp, tmp, tmp2);
6896 dead_tmp(tmp2);
9ee6e8bb
PB
6897 }
6898 }
6c95676b 6899 store_reg(s, rd, tmp);
9ee6e8bb
PB
6900 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6901 /* rev */
b0109805 6902 tmp = load_reg(s, rm);
9ee6e8bb
PB
6903 if (insn & (1 << 22)) {
6904 if (insn & (1 << 7)) {
b0109805 6905 gen_revsh(tmp);
9ee6e8bb
PB
6906 } else {
6907 ARCH(6T2);
b0109805 6908 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
6909 }
6910 } else {
6911 if (insn & (1 << 7))
b0109805 6912 gen_rev16(tmp);
9ee6e8bb 6913 else
66896cb8 6914 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 6915 }
b0109805 6916 store_reg(s, rd, tmp);
9ee6e8bb
PB
6917 } else {
6918 goto illegal_op;
6919 }
6920 break;
6921 case 2: /* Multiplies (Type 3). */
5e3f878a
PB
6922 tmp = load_reg(s, rm);
6923 tmp2 = load_reg(s, rs);
9ee6e8bb
PB
6924 if (insn & (1 << 20)) {
6925 /* Signed multiply most significant [accumulate]. */
a7812ae4 6926 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6927 if (insn & (1 << 5))
a7812ae4
PB
6928 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
6929 tcg_gen_shri_i64(tmp64, tmp64, 32);
5e3f878a 6930 tmp = new_tmp();
a7812ae4 6931 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 6932 tcg_temp_free_i64(tmp64);
955a7dd5
AZ
6933 if (rd != 15) {
6934 tmp2 = load_reg(s, rd);
9ee6e8bb 6935 if (insn & (1 << 6)) {
5e3f878a 6936 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 6937 } else {
5e3f878a 6938 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 6939 }
5e3f878a 6940 dead_tmp(tmp2);
9ee6e8bb 6941 }
955a7dd5 6942 store_reg(s, rn, tmp);
9ee6e8bb
PB
6943 } else {
6944 if (insn & (1 << 5))
5e3f878a
PB
6945 gen_swap_half(tmp2);
6946 gen_smul_dual(tmp, tmp2);
6947 /* This addition cannot overflow. */
6948 if (insn & (1 << 6)) {
6949 tcg_gen_sub_i32(tmp, tmp, tmp2);
6950 } else {
6951 tcg_gen_add_i32(tmp, tmp, tmp2);
6952 }
6953 dead_tmp(tmp2);
9ee6e8bb 6954 if (insn & (1 << 22)) {
5e3f878a 6955 /* smlald, smlsld */
a7812ae4
PB
6956 tmp64 = tcg_temp_new_i64();
6957 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 6958 dead_tmp(tmp);
a7812ae4
PB
6959 gen_addq(s, tmp64, rd, rn);
6960 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 6961 tcg_temp_free_i64(tmp64);
9ee6e8bb 6962 } else {
5e3f878a 6963 /* smuad, smusd, smlad, smlsd */
22478e79 6964 if (rd != 15)
9ee6e8bb 6965 {
22478e79 6966 tmp2 = load_reg(s, rd);
5e3f878a
PB
6967 gen_helper_add_setq(tmp, tmp, tmp2);
6968 dead_tmp(tmp2);
9ee6e8bb 6969 }
22478e79 6970 store_reg(s, rn, tmp);
9ee6e8bb
PB
6971 }
6972 }
6973 break;
6974 case 3:
6975 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
6976 switch (op1) {
6977 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
6978 ARCH(6);
6979 tmp = load_reg(s, rm);
6980 tmp2 = load_reg(s, rs);
6981 gen_helper_usad8(tmp, tmp, tmp2);
6982 dead_tmp(tmp2);
ded9d295
AZ
6983 if (rd != 15) {
6984 tmp2 = load_reg(s, rd);
6ddbc6e4
PB
6985 tcg_gen_add_i32(tmp, tmp, tmp2);
6986 dead_tmp(tmp2);
9ee6e8bb 6987 }
ded9d295 6988 store_reg(s, rn, tmp);
9ee6e8bb
PB
6989 break;
6990 case 0x20: case 0x24: case 0x28: case 0x2c:
6991 /* Bitfield insert/clear. */
6992 ARCH(6T2);
6993 shift = (insn >> 7) & 0x1f;
6994 i = (insn >> 16) & 0x1f;
6995 i = i + 1 - shift;
6996 if (rm == 15) {
5e3f878a
PB
6997 tmp = new_tmp();
6998 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6999 } else {
5e3f878a 7000 tmp = load_reg(s, rm);
9ee6e8bb
PB
7001 }
7002 if (i != 32) {
5e3f878a 7003 tmp2 = load_reg(s, rd);
8f8e3aa4 7004 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
5e3f878a 7005 dead_tmp(tmp2);
9ee6e8bb 7006 }
5e3f878a 7007 store_reg(s, rd, tmp);
9ee6e8bb
PB
7008 break;
7009 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7010 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 7011 ARCH(6T2);
5e3f878a 7012 tmp = load_reg(s, rm);
9ee6e8bb
PB
7013 shift = (insn >> 7) & 0x1f;
7014 i = ((insn >> 16) & 0x1f) + 1;
7015 if (shift + i > 32)
7016 goto illegal_op;
7017 if (i < 32) {
7018 if (op1 & 0x20) {
5e3f878a 7019 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 7020 } else {
5e3f878a 7021 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
7022 }
7023 }
5e3f878a 7024 store_reg(s, rd, tmp);
9ee6e8bb
PB
7025 break;
7026 default:
7027 goto illegal_op;
7028 }
7029 break;
7030 }
7031 break;
7032 }
7033 do_ldst:
7034 /* Check for undefined extension instructions
7035 * per the ARM Bible IE:
7036 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7037 */
7038 sh = (0xf << 20) | (0xf << 4);
7039 if (op1 == 0x7 && ((insn & sh) == sh))
7040 {
7041 goto illegal_op;
7042 }
7043 /* load/store byte/word */
7044 rn = (insn >> 16) & 0xf;
7045 rd = (insn >> 12) & 0xf;
b0109805 7046 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
7047 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7048 if (insn & (1 << 24))
b0109805 7049 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
7050 if (insn & (1 << 20)) {
7051 /* load */
9ee6e8bb 7052 if (insn & (1 << 22)) {
b0109805 7053 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 7054 } else {
b0109805 7055 tmp = gen_ld32(tmp2, i);
9ee6e8bb 7056 }
9ee6e8bb
PB
7057 } else {
7058 /* store */
b0109805 7059 tmp = load_reg(s, rd);
9ee6e8bb 7060 if (insn & (1 << 22))
b0109805 7061 gen_st8(tmp, tmp2, i);
9ee6e8bb 7062 else
b0109805 7063 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
7064 }
7065 if (!(insn & (1 << 24))) {
b0109805
PB
7066 gen_add_data_offset(s, insn, tmp2);
7067 store_reg(s, rn, tmp2);
7068 } else if (insn & (1 << 21)) {
7069 store_reg(s, rn, tmp2);
7070 } else {
7071 dead_tmp(tmp2);
9ee6e8bb
PB
7072 }
7073 if (insn & (1 << 20)) {
7074 /* Complete the load. */
7075 if (rd == 15)
b0109805 7076 gen_bx(s, tmp);
9ee6e8bb 7077 else
b0109805 7078 store_reg(s, rd, tmp);
9ee6e8bb
PB
7079 }
7080 break;
7081 case 0x08:
7082 case 0x09:
7083 {
7084 int j, n, user, loaded_base;
b0109805 7085 TCGv loaded_var;
9ee6e8bb
PB
7086 /* load/store multiple words */
7087 /* XXX: store correct base if write back */
7088 user = 0;
7089 if (insn & (1 << 22)) {
7090 if (IS_USER(s))
7091 goto illegal_op; /* only usable in supervisor mode */
7092
7093 if ((insn & (1 << 15)) == 0)
7094 user = 1;
7095 }
7096 rn = (insn >> 16) & 0xf;
b0109805 7097 addr = load_reg(s, rn);
9ee6e8bb
PB
7098
7099 /* compute total size */
7100 loaded_base = 0;
a50f5b91 7101 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
7102 n = 0;
7103 for(i=0;i<16;i++) {
7104 if (insn & (1 << i))
7105 n++;
7106 }
7107 /* XXX: test invalid n == 0 case ? */
7108 if (insn & (1 << 23)) {
7109 if (insn & (1 << 24)) {
7110 /* pre increment */
b0109805 7111 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7112 } else {
7113 /* post increment */
7114 }
7115 } else {
7116 if (insn & (1 << 24)) {
7117 /* pre decrement */
b0109805 7118 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7119 } else {
7120 /* post decrement */
7121 if (n != 1)
b0109805 7122 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7123 }
7124 }
7125 j = 0;
7126 for(i=0;i<16;i++) {
7127 if (insn & (1 << i)) {
7128 if (insn & (1 << 20)) {
7129 /* load */
b0109805 7130 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 7131 if (i == 15) {
b0109805 7132 gen_bx(s, tmp);
9ee6e8bb 7133 } else if (user) {
b75263d6
JR
7134 tmp2 = tcg_const_i32(i);
7135 gen_helper_set_user_reg(tmp2, tmp);
7136 tcg_temp_free_i32(tmp2);
b0109805 7137 dead_tmp(tmp);
9ee6e8bb 7138 } else if (i == rn) {
b0109805 7139 loaded_var = tmp;
9ee6e8bb
PB
7140 loaded_base = 1;
7141 } else {
b0109805 7142 store_reg(s, i, tmp);
9ee6e8bb
PB
7143 }
7144 } else {
7145 /* store */
7146 if (i == 15) {
7147 /* special case: r15 = PC + 8 */
7148 val = (long)s->pc + 4;
b0109805
PB
7149 tmp = new_tmp();
7150 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 7151 } else if (user) {
b0109805 7152 tmp = new_tmp();
b75263d6
JR
7153 tmp2 = tcg_const_i32(i);
7154 gen_helper_get_user_reg(tmp, tmp2);
7155 tcg_temp_free_i32(tmp2);
9ee6e8bb 7156 } else {
b0109805 7157 tmp = load_reg(s, i);
9ee6e8bb 7158 }
b0109805 7159 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7160 }
7161 j++;
7162 /* no need to add after the last transfer */
7163 if (j != n)
b0109805 7164 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7165 }
7166 }
7167 if (insn & (1 << 21)) {
7168 /* write back */
7169 if (insn & (1 << 23)) {
7170 if (insn & (1 << 24)) {
7171 /* pre increment */
7172 } else {
7173 /* post increment */
b0109805 7174 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7175 }
7176 } else {
7177 if (insn & (1 << 24)) {
7178 /* pre decrement */
7179 if (n != 1)
b0109805 7180 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7181 } else {
7182 /* post decrement */
b0109805 7183 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7184 }
7185 }
b0109805
PB
7186 store_reg(s, rn, addr);
7187 } else {
7188 dead_tmp(addr);
9ee6e8bb
PB
7189 }
7190 if (loaded_base) {
b0109805 7191 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
7192 }
7193 if ((insn & (1 << 22)) && !user) {
7194 /* Restore CPSR from SPSR. */
d9ba4830
PB
7195 tmp = load_cpu_field(spsr);
7196 gen_set_cpsr(tmp, 0xffffffff);
7197 dead_tmp(tmp);
9ee6e8bb
PB
7198 s->is_jmp = DISAS_UPDATE;
7199 }
7200 }
7201 break;
7202 case 0xa:
7203 case 0xb:
7204 {
7205 int32_t offset;
7206
7207 /* branch (and link) */
7208 val = (int32_t)s->pc;
7209 if (insn & (1 << 24)) {
5e3f878a
PB
7210 tmp = new_tmp();
7211 tcg_gen_movi_i32(tmp, val);
7212 store_reg(s, 14, tmp);
9ee6e8bb
PB
7213 }
7214 offset = (((int32_t)insn << 8) >> 8);
7215 val += (offset << 2) + 4;
7216 gen_jmp(s, val);
7217 }
7218 break;
7219 case 0xc:
7220 case 0xd:
7221 case 0xe:
7222 /* Coprocessor. */
7223 if (disas_coproc_insn(env, s, insn))
7224 goto illegal_op;
7225 break;
7226 case 0xf:
7227 /* swi */
5e3f878a 7228 gen_set_pc_im(s->pc);
9ee6e8bb
PB
7229 s->is_jmp = DISAS_SWI;
7230 break;
7231 default:
7232 illegal_op:
7233 gen_set_condexec(s);
5e3f878a 7234 gen_set_pc_im(s->pc - 4);
d9ba4830 7235 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
7236 s->is_jmp = DISAS_JUMP;
7237 break;
7238 }
7239 }
7240}
7241
7242/* Return true if this is a Thumb-2 logical op. */
7243static int
7244thumb2_logic_op(int op)
7245{
7246 return (op < 8);
7247}
7248
7249/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7250 then set condition code flags based on the result of the operation.
7251 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7252 to the high bit of T1.
7253 Returns zero if the opcode is valid. */
7254
7255static int
396e467c 7256gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
9ee6e8bb
PB
7257{
7258 int logic_cc;
7259
7260 logic_cc = 0;
7261 switch (op) {
7262 case 0: /* and */
396e467c 7263 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
7264 logic_cc = conds;
7265 break;
7266 case 1: /* bic */
f669df27 7267 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
7268 logic_cc = conds;
7269 break;
7270 case 2: /* orr */
396e467c 7271 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7272 logic_cc = conds;
7273 break;
7274 case 3: /* orn */
396e467c
FN
7275 tcg_gen_not_i32(t1, t1);
7276 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7277 logic_cc = conds;
7278 break;
7279 case 4: /* eor */
396e467c 7280 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
7281 logic_cc = conds;
7282 break;
7283 case 8: /* add */
7284 if (conds)
396e467c 7285 gen_helper_add_cc(t0, t0, t1);
9ee6e8bb 7286 else
396e467c 7287 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
7288 break;
7289 case 10: /* adc */
7290 if (conds)
396e467c 7291 gen_helper_adc_cc(t0, t0, t1);
9ee6e8bb 7292 else
396e467c 7293 gen_adc(t0, t1);
9ee6e8bb
PB
7294 break;
7295 case 11: /* sbc */
7296 if (conds)
396e467c 7297 gen_helper_sbc_cc(t0, t0, t1);
9ee6e8bb 7298 else
396e467c 7299 gen_sub_carry(t0, t0, t1);
9ee6e8bb
PB
7300 break;
7301 case 13: /* sub */
7302 if (conds)
396e467c 7303 gen_helper_sub_cc(t0, t0, t1);
9ee6e8bb 7304 else
396e467c 7305 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
7306 break;
7307 case 14: /* rsb */
7308 if (conds)
396e467c 7309 gen_helper_sub_cc(t0, t1, t0);
9ee6e8bb 7310 else
396e467c 7311 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
7312 break;
7313 default: /* 5, 6, 7, 9, 12, 15. */
7314 return 1;
7315 }
7316 if (logic_cc) {
396e467c 7317 gen_logic_CC(t0);
9ee6e8bb 7318 if (shifter_out)
396e467c 7319 gen_set_CF_bit31(t1);
9ee6e8bb
PB
7320 }
7321 return 0;
7322}
7323
7324/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7325 is not legal. */
7326static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7327{
b0109805 7328 uint32_t insn, imm, shift, offset;
9ee6e8bb 7329 uint32_t rd, rn, rm, rs;
b26eefb6 7330 TCGv tmp;
6ddbc6e4
PB
7331 TCGv tmp2;
7332 TCGv tmp3;
b0109805 7333 TCGv addr;
a7812ae4 7334 TCGv_i64 tmp64;
9ee6e8bb
PB
7335 int op;
7336 int shiftop;
7337 int conds;
7338 int logic_cc;
7339
7340 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7341 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 7342 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
7343 16-bit instructions to get correct prefetch abort behavior. */
7344 insn = insn_hw1;
7345 if ((insn & (1 << 12)) == 0) {
7346 /* Second half of blx. */
7347 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
7348 tmp = load_reg(s, 14);
7349 tcg_gen_addi_i32(tmp, tmp, offset);
7350 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 7351
d9ba4830 7352 tmp2 = new_tmp();
b0109805 7353 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7354 store_reg(s, 14, tmp2);
7355 gen_bx(s, tmp);
9ee6e8bb
PB
7356 return 0;
7357 }
7358 if (insn & (1 << 11)) {
7359 /* Second half of bl. */
7360 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 7361 tmp = load_reg(s, 14);
6a0d8a1d 7362 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 7363
d9ba4830 7364 tmp2 = new_tmp();
b0109805 7365 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7366 store_reg(s, 14, tmp2);
7367 gen_bx(s, tmp);
9ee6e8bb
PB
7368 return 0;
7369 }
7370 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7371 /* Instruction spans a page boundary. Implement it as two
7372 16-bit instructions in case the second half causes an
7373 prefetch abort. */
7374 offset = ((int32_t)insn << 21) >> 9;
396e467c 7375 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
7376 return 0;
7377 }
7378 /* Fall through to 32-bit decode. */
7379 }
7380
7381 insn = lduw_code(s->pc);
7382 s->pc += 2;
7383 insn |= (uint32_t)insn_hw1 << 16;
7384
7385 if ((insn & 0xf800e800) != 0xf000e800) {
7386 ARCH(6T2);
7387 }
7388
7389 rn = (insn >> 16) & 0xf;
7390 rs = (insn >> 12) & 0xf;
7391 rd = (insn >> 8) & 0xf;
7392 rm = insn & 0xf;
7393 switch ((insn >> 25) & 0xf) {
7394 case 0: case 1: case 2: case 3:
7395 /* 16-bit instructions. Should never happen. */
7396 abort();
7397 case 4:
7398 if (insn & (1 << 22)) {
7399 /* Other load/store, table branch. */
7400 if (insn & 0x01200000) {
7401 /* Load/store doubleword. */
7402 if (rn == 15) {
b0109805
PB
7403 addr = new_tmp();
7404 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 7405 } else {
b0109805 7406 addr = load_reg(s, rn);
9ee6e8bb
PB
7407 }
7408 offset = (insn & 0xff) * 4;
7409 if ((insn & (1 << 23)) == 0)
7410 offset = -offset;
7411 if (insn & (1 << 24)) {
b0109805 7412 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
7413 offset = 0;
7414 }
7415 if (insn & (1 << 20)) {
7416 /* ldrd */
b0109805
PB
7417 tmp = gen_ld32(addr, IS_USER(s));
7418 store_reg(s, rs, tmp);
7419 tcg_gen_addi_i32(addr, addr, 4);
7420 tmp = gen_ld32(addr, IS_USER(s));
7421 store_reg(s, rd, tmp);
9ee6e8bb
PB
7422 } else {
7423 /* strd */
b0109805
PB
7424 tmp = load_reg(s, rs);
7425 gen_st32(tmp, addr, IS_USER(s));
7426 tcg_gen_addi_i32(addr, addr, 4);
7427 tmp = load_reg(s, rd);
7428 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7429 }
7430 if (insn & (1 << 21)) {
7431 /* Base writeback. */
7432 if (rn == 15)
7433 goto illegal_op;
b0109805
PB
7434 tcg_gen_addi_i32(addr, addr, offset - 4);
7435 store_reg(s, rn, addr);
7436 } else {
7437 dead_tmp(addr);
9ee6e8bb
PB
7438 }
7439 } else if ((insn & (1 << 23)) == 0) {
7440 /* Load/store exclusive word. */
3174f8e9 7441 addr = tcg_temp_local_new();
98a46317 7442 load_reg_var(s, addr, rn);
426f5abc 7443 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 7444 if (insn & (1 << 20)) {
426f5abc 7445 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 7446 } else {
426f5abc 7447 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 7448 }
3174f8e9 7449 tcg_temp_free(addr);
9ee6e8bb
PB
7450 } else if ((insn & (1 << 6)) == 0) {
7451 /* Table Branch. */
7452 if (rn == 15) {
b0109805
PB
7453 addr = new_tmp();
7454 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 7455 } else {
b0109805 7456 addr = load_reg(s, rn);
9ee6e8bb 7457 }
b26eefb6 7458 tmp = load_reg(s, rm);
b0109805 7459 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
7460 if (insn & (1 << 4)) {
7461 /* tbh */
b0109805 7462 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 7463 dead_tmp(tmp);
b0109805 7464 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 7465 } else { /* tbb */
b26eefb6 7466 dead_tmp(tmp);
b0109805 7467 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7468 }
b0109805
PB
7469 dead_tmp(addr);
7470 tcg_gen_shli_i32(tmp, tmp, 1);
7471 tcg_gen_addi_i32(tmp, tmp, s->pc);
7472 store_reg(s, 15, tmp);
9ee6e8bb
PB
7473 } else {
7474 /* Load/store exclusive byte/halfword/doubleword. */
426f5abc 7475 ARCH(7);
9ee6e8bb 7476 op = (insn >> 4) & 0x3;
426f5abc
PB
7477 if (op == 2) {
7478 goto illegal_op;
7479 }
3174f8e9 7480 addr = tcg_temp_local_new();
98a46317 7481 load_reg_var(s, addr, rn);
9ee6e8bb 7482 if (insn & (1 << 20)) {
426f5abc 7483 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 7484 } else {
426f5abc 7485 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 7486 }
3174f8e9 7487 tcg_temp_free(addr);
9ee6e8bb
PB
7488 }
7489 } else {
7490 /* Load/store multiple, RFE, SRS. */
7491 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7492 /* Not available in user mode. */
b0109805 7493 if (IS_USER(s))
9ee6e8bb
PB
7494 goto illegal_op;
7495 if (insn & (1 << 20)) {
7496 /* rfe */
b0109805
PB
7497 addr = load_reg(s, rn);
7498 if ((insn & (1 << 24)) == 0)
7499 tcg_gen_addi_i32(addr, addr, -8);
7500 /* Load PC into tmp and CPSR into tmp2. */
7501 tmp = gen_ld32(addr, 0);
7502 tcg_gen_addi_i32(addr, addr, 4);
7503 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
7504 if (insn & (1 << 21)) {
7505 /* Base writeback. */
b0109805
PB
7506 if (insn & (1 << 24)) {
7507 tcg_gen_addi_i32(addr, addr, 4);
7508 } else {
7509 tcg_gen_addi_i32(addr, addr, -4);
7510 }
7511 store_reg(s, rn, addr);
7512 } else {
7513 dead_tmp(addr);
9ee6e8bb 7514 }
b0109805 7515 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
7516 } else {
7517 /* srs */
7518 op = (insn & 0x1f);
7519 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7520 addr = load_reg(s, 13);
9ee6e8bb 7521 } else {
b0109805 7522 addr = new_tmp();
b75263d6
JR
7523 tmp = tcg_const_i32(op);
7524 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7525 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7526 }
7527 if ((insn & (1 << 24)) == 0) {
b0109805 7528 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 7529 }
b0109805
PB
7530 tmp = load_reg(s, 14);
7531 gen_st32(tmp, addr, 0);
7532 tcg_gen_addi_i32(addr, addr, 4);
7533 tmp = new_tmp();
7534 gen_helper_cpsr_read(tmp);
7535 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
7536 if (insn & (1 << 21)) {
7537 if ((insn & (1 << 24)) == 0) {
b0109805 7538 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 7539 } else {
b0109805 7540 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7541 }
7542 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7543 store_reg(s, 13, addr);
9ee6e8bb 7544 } else {
b75263d6
JR
7545 tmp = tcg_const_i32(op);
7546 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7547 tcg_temp_free_i32(tmp);
9ee6e8bb 7548 }
b0109805
PB
7549 } else {
7550 dead_tmp(addr);
9ee6e8bb
PB
7551 }
7552 }
7553 } else {
7554 int i;
7555 /* Load/store multiple. */
b0109805 7556 addr = load_reg(s, rn);
9ee6e8bb
PB
7557 offset = 0;
7558 for (i = 0; i < 16; i++) {
7559 if (insn & (1 << i))
7560 offset += 4;
7561 }
7562 if (insn & (1 << 24)) {
b0109805 7563 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7564 }
7565
7566 for (i = 0; i < 16; i++) {
7567 if ((insn & (1 << i)) == 0)
7568 continue;
7569 if (insn & (1 << 20)) {
7570 /* Load. */
b0109805 7571 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 7572 if (i == 15) {
b0109805 7573 gen_bx(s, tmp);
9ee6e8bb 7574 } else {
b0109805 7575 store_reg(s, i, tmp);
9ee6e8bb
PB
7576 }
7577 } else {
7578 /* Store. */
b0109805
PB
7579 tmp = load_reg(s, i);
7580 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7581 }
b0109805 7582 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7583 }
7584 if (insn & (1 << 21)) {
7585 /* Base register writeback. */
7586 if (insn & (1 << 24)) {
b0109805 7587 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7588 }
7589 /* Fault if writeback register is in register list. */
7590 if (insn & (1 << rn))
7591 goto illegal_op;
b0109805
PB
7592 store_reg(s, rn, addr);
7593 } else {
7594 dead_tmp(addr);
9ee6e8bb
PB
7595 }
7596 }
7597 }
7598 break;
7599 case 5: /* Data processing register constant shift. */
3174f8e9
FN
7600 if (rn == 15) {
7601 tmp = new_tmp();
7602 tcg_gen_movi_i32(tmp, 0);
7603 } else {
7604 tmp = load_reg(s, rn);
7605 }
7606 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7607 op = (insn >> 21) & 0xf;
7608 shiftop = (insn >> 4) & 3;
7609 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7610 conds = (insn & (1 << 20)) != 0;
7611 logic_cc = (conds && thumb2_logic_op(op));
3174f8e9
FN
7612 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7613 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9ee6e8bb 7614 goto illegal_op;
3174f8e9
FN
7615 dead_tmp(tmp2);
7616 if (rd != 15) {
7617 store_reg(s, rd, tmp);
7618 } else {
7619 dead_tmp(tmp);
7620 }
9ee6e8bb
PB
7621 break;
7622 case 13: /* Misc data processing. */
7623 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7624 if (op < 4 && (insn & 0xf000) != 0xf000)
7625 goto illegal_op;
7626 switch (op) {
7627 case 0: /* Register controlled shift. */
8984bd2e
PB
7628 tmp = load_reg(s, rn);
7629 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7630 if ((insn & 0x70) != 0)
7631 goto illegal_op;
7632 op = (insn >> 21) & 3;
8984bd2e
PB
7633 logic_cc = (insn & (1 << 20)) != 0;
7634 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7635 if (logic_cc)
7636 gen_logic_CC(tmp);
21aeb343 7637 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7638 break;
7639 case 1: /* Sign/zero extend. */
5e3f878a 7640 tmp = load_reg(s, rm);
9ee6e8bb
PB
7641 shift = (insn >> 4) & 3;
7642 /* ??? In many cases it's not neccessary to do a
7643 rotate, a shift is sufficient. */
7644 if (shift != 0)
f669df27 7645 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7646 op = (insn >> 20) & 7;
7647 switch (op) {
5e3f878a
PB
7648 case 0: gen_sxth(tmp); break;
7649 case 1: gen_uxth(tmp); break;
7650 case 2: gen_sxtb16(tmp); break;
7651 case 3: gen_uxtb16(tmp); break;
7652 case 4: gen_sxtb(tmp); break;
7653 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
7654 default: goto illegal_op;
7655 }
7656 if (rn != 15) {
5e3f878a 7657 tmp2 = load_reg(s, rn);
9ee6e8bb 7658 if ((op >> 1) == 1) {
5e3f878a 7659 gen_add16(tmp, tmp2);
9ee6e8bb 7660 } else {
5e3f878a
PB
7661 tcg_gen_add_i32(tmp, tmp, tmp2);
7662 dead_tmp(tmp2);
9ee6e8bb
PB
7663 }
7664 }
5e3f878a 7665 store_reg(s, rd, tmp);
9ee6e8bb
PB
7666 break;
7667 case 2: /* SIMD add/subtract. */
7668 op = (insn >> 20) & 7;
7669 shift = (insn >> 4) & 7;
7670 if ((op & 3) == 3 || (shift & 3) == 3)
7671 goto illegal_op;
6ddbc6e4
PB
7672 tmp = load_reg(s, rn);
7673 tmp2 = load_reg(s, rm);
7674 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7675 dead_tmp(tmp2);
7676 store_reg(s, rd, tmp);
9ee6e8bb
PB
7677 break;
7678 case 3: /* Other data processing. */
7679 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7680 if (op < 4) {
7681 /* Saturating add/subtract. */
d9ba4830
PB
7682 tmp = load_reg(s, rn);
7683 tmp2 = load_reg(s, rm);
9ee6e8bb 7684 if (op & 2)
d9ba4830 7685 gen_helper_double_saturate(tmp, tmp);
9ee6e8bb 7686 if (op & 1)
d9ba4830 7687 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 7688 else
d9ba4830
PB
7689 gen_helper_add_saturate(tmp, tmp, tmp2);
7690 dead_tmp(tmp2);
9ee6e8bb 7691 } else {
d9ba4830 7692 tmp = load_reg(s, rn);
9ee6e8bb
PB
7693 switch (op) {
7694 case 0x0a: /* rbit */
d9ba4830 7695 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7696 break;
7697 case 0x08: /* rev */
66896cb8 7698 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
7699 break;
7700 case 0x09: /* rev16 */
d9ba4830 7701 gen_rev16(tmp);
9ee6e8bb
PB
7702 break;
7703 case 0x0b: /* revsh */
d9ba4830 7704 gen_revsh(tmp);
9ee6e8bb
PB
7705 break;
7706 case 0x10: /* sel */
d9ba4830 7707 tmp2 = load_reg(s, rm);
6ddbc6e4
PB
7708 tmp3 = new_tmp();
7709 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
d9ba4830 7710 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6ddbc6e4 7711 dead_tmp(tmp3);
d9ba4830 7712 dead_tmp(tmp2);
9ee6e8bb
PB
7713 break;
7714 case 0x18: /* clz */
d9ba4830 7715 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
7716 break;
7717 default:
7718 goto illegal_op;
7719 }
7720 }
d9ba4830 7721 store_reg(s, rd, tmp);
9ee6e8bb
PB
7722 break;
7723 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7724 op = (insn >> 4) & 0xf;
d9ba4830
PB
7725 tmp = load_reg(s, rn);
7726 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7727 switch ((insn >> 20) & 7) {
7728 case 0: /* 32 x 32 -> 32 */
d9ba4830
PB
7729 tcg_gen_mul_i32(tmp, tmp, tmp2);
7730 dead_tmp(tmp2);
9ee6e8bb 7731 if (rs != 15) {
d9ba4830 7732 tmp2 = load_reg(s, rs);
9ee6e8bb 7733 if (op)
d9ba4830 7734 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 7735 else
d9ba4830
PB
7736 tcg_gen_add_i32(tmp, tmp, tmp2);
7737 dead_tmp(tmp2);
9ee6e8bb 7738 }
9ee6e8bb
PB
7739 break;
7740 case 1: /* 16 x 16 -> 32 */
d9ba4830
PB
7741 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7742 dead_tmp(tmp2);
9ee6e8bb 7743 if (rs != 15) {
d9ba4830
PB
7744 tmp2 = load_reg(s, rs);
7745 gen_helper_add_setq(tmp, tmp, tmp2);
7746 dead_tmp(tmp2);
9ee6e8bb 7747 }
9ee6e8bb
PB
7748 break;
7749 case 2: /* Dual multiply add. */
7750 case 4: /* Dual multiply subtract. */
7751 if (op)
d9ba4830
PB
7752 gen_swap_half(tmp2);
7753 gen_smul_dual(tmp, tmp2);
9ee6e8bb
PB
7754 /* This addition cannot overflow. */
7755 if (insn & (1 << 22)) {
d9ba4830 7756 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 7757 } else {
d9ba4830 7758 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 7759 }
d9ba4830 7760 dead_tmp(tmp2);
9ee6e8bb
PB
7761 if (rs != 15)
7762 {
d9ba4830
PB
7763 tmp2 = load_reg(s, rs);
7764 gen_helper_add_setq(tmp, tmp, tmp2);
7765 dead_tmp(tmp2);
9ee6e8bb 7766 }
9ee6e8bb
PB
7767 break;
7768 case 3: /* 32 * 16 -> 32msb */
7769 if (op)
d9ba4830 7770 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7771 else
d9ba4830 7772 gen_sxth(tmp2);
a7812ae4
PB
7773 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7774 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 7775 tmp = new_tmp();
a7812ae4 7776 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 7777 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7778 if (rs != 15)
7779 {
d9ba4830
PB
7780 tmp2 = load_reg(s, rs);
7781 gen_helper_add_setq(tmp, tmp, tmp2);
7782 dead_tmp(tmp2);
9ee6e8bb 7783 }
9ee6e8bb
PB
7784 break;
7785 case 5: case 6: /* 32 * 32 -> 32msb */
d9ba4830
PB
7786 gen_imull(tmp, tmp2);
7787 if (insn & (1 << 5)) {
7788 gen_roundqd(tmp, tmp2);
7789 dead_tmp(tmp2);
7790 } else {
7791 dead_tmp(tmp);
7792 tmp = tmp2;
7793 }
9ee6e8bb 7794 if (rs != 15) {
d9ba4830 7795 tmp2 = load_reg(s, rs);
9ee6e8bb 7796 if (insn & (1 << 21)) {
d9ba4830 7797 tcg_gen_add_i32(tmp, tmp, tmp2);
99c475ab 7798 } else {
d9ba4830 7799 tcg_gen_sub_i32(tmp, tmp2, tmp);
99c475ab 7800 }
d9ba4830 7801 dead_tmp(tmp2);
2c0262af 7802 }
9ee6e8bb
PB
7803 break;
7804 case 7: /* Unsigned sum of absolute differences. */
d9ba4830
PB
7805 gen_helper_usad8(tmp, tmp, tmp2);
7806 dead_tmp(tmp2);
9ee6e8bb 7807 if (rs != 15) {
d9ba4830
PB
7808 tmp2 = load_reg(s, rs);
7809 tcg_gen_add_i32(tmp, tmp, tmp2);
7810 dead_tmp(tmp2);
5fd46862 7811 }
9ee6e8bb 7812 break;
2c0262af 7813 }
d9ba4830 7814 store_reg(s, rd, tmp);
2c0262af 7815 break;
9ee6e8bb
PB
7816 case 6: case 7: /* 64-bit multiply, Divide. */
7817 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
7818 tmp = load_reg(s, rn);
7819 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7820 if ((op & 0x50) == 0x10) {
7821 /* sdiv, udiv */
7822 if (!arm_feature(env, ARM_FEATURE_DIV))
7823 goto illegal_op;
7824 if (op & 0x20)
5e3f878a 7825 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 7826 else
5e3f878a
PB
7827 gen_helper_sdiv(tmp, tmp, tmp2);
7828 dead_tmp(tmp2);
7829 store_reg(s, rd, tmp);
9ee6e8bb
PB
7830 } else if ((op & 0xe) == 0xc) {
7831 /* Dual multiply accumulate long. */
7832 if (op & 1)
5e3f878a
PB
7833 gen_swap_half(tmp2);
7834 gen_smul_dual(tmp, tmp2);
9ee6e8bb 7835 if (op & 0x10) {
5e3f878a 7836 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 7837 } else {
5e3f878a 7838 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 7839 }
5e3f878a 7840 dead_tmp(tmp2);
a7812ae4
PB
7841 /* BUGFIX */
7842 tmp64 = tcg_temp_new_i64();
7843 tcg_gen_ext_i32_i64(tmp64, tmp);
7844 dead_tmp(tmp);
7845 gen_addq(s, tmp64, rs, rd);
7846 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 7847 tcg_temp_free_i64(tmp64);
2c0262af 7848 } else {
9ee6e8bb
PB
7849 if (op & 0x20) {
7850 /* Unsigned 64-bit multiply */
a7812ae4 7851 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 7852 } else {
9ee6e8bb
PB
7853 if (op & 8) {
7854 /* smlalxy */
5e3f878a
PB
7855 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7856 dead_tmp(tmp2);
a7812ae4
PB
7857 tmp64 = tcg_temp_new_i64();
7858 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 7859 dead_tmp(tmp);
9ee6e8bb
PB
7860 } else {
7861 /* Signed 64-bit multiply */
a7812ae4 7862 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 7863 }
b5ff1b31 7864 }
9ee6e8bb
PB
7865 if (op & 4) {
7866 /* umaal */
a7812ae4
PB
7867 gen_addq_lo(s, tmp64, rs);
7868 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
7869 } else if (op & 0x40) {
7870 /* 64-bit accumulate. */
a7812ae4 7871 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 7872 }
a7812ae4 7873 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 7874 tcg_temp_free_i64(tmp64);
5fd46862 7875 }
2c0262af 7876 break;
9ee6e8bb
PB
7877 }
7878 break;
7879 case 6: case 7: case 14: case 15:
7880 /* Coprocessor. */
7881 if (((insn >> 24) & 3) == 3) {
7882 /* Translate into the equivalent ARM encoding. */
7883 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
7884 if (disas_neon_data_insn(env, s, insn))
7885 goto illegal_op;
7886 } else {
7887 if (insn & (1 << 28))
7888 goto illegal_op;
7889 if (disas_coproc_insn (env, s, insn))
7890 goto illegal_op;
7891 }
7892 break;
7893 case 8: case 9: case 10: case 11:
7894 if (insn & (1 << 15)) {
7895 /* Branches, misc control. */
7896 if (insn & 0x5000) {
7897 /* Unconditional branch. */
7898 /* signextend(hw1[10:0]) -> offset[:12]. */
7899 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7900 /* hw1[10:0] -> offset[11:1]. */
7901 offset |= (insn & 0x7ff) << 1;
7902 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7903 offset[24:22] already have the same value because of the
7904 sign extension above. */
7905 offset ^= ((~insn) & (1 << 13)) << 10;
7906 offset ^= ((~insn) & (1 << 11)) << 11;
7907
9ee6e8bb
PB
7908 if (insn & (1 << 14)) {
7909 /* Branch and link. */
3174f8e9 7910 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 7911 }
3b46e624 7912
b0109805 7913 offset += s->pc;
9ee6e8bb
PB
7914 if (insn & (1 << 12)) {
7915 /* b/bl */
b0109805 7916 gen_jmp(s, offset);
9ee6e8bb
PB
7917 } else {
7918 /* blx */
b0109805
PB
7919 offset &= ~(uint32_t)2;
7920 gen_bx_im(s, offset);
2c0262af 7921 }
9ee6e8bb
PB
7922 } else if (((insn >> 23) & 7) == 7) {
7923 /* Misc control */
7924 if (insn & (1 << 13))
7925 goto illegal_op;
7926
7927 if (insn & (1 << 26)) {
7928 /* Secure monitor call (v6Z) */
7929 goto illegal_op; /* not implemented. */
2c0262af 7930 } else {
9ee6e8bb
PB
7931 op = (insn >> 20) & 7;
7932 switch (op) {
7933 case 0: /* msr cpsr. */
7934 if (IS_M(env)) {
8984bd2e
PB
7935 tmp = load_reg(s, rn);
7936 addr = tcg_const_i32(insn & 0xff);
7937 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6
JR
7938 tcg_temp_free_i32(addr);
7939 dead_tmp(tmp);
9ee6e8bb
PB
7940 gen_lookup_tb(s);
7941 break;
7942 }
7943 /* fall through */
7944 case 1: /* msr spsr. */
7945 if (IS_M(env))
7946 goto illegal_op;
2fbac54b
FN
7947 tmp = load_reg(s, rn);
7948 if (gen_set_psr(s,
9ee6e8bb 7949 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 7950 op == 1, tmp))
9ee6e8bb
PB
7951 goto illegal_op;
7952 break;
7953 case 2: /* cps, nop-hint. */
7954 if (((insn >> 8) & 7) == 0) {
7955 gen_nop_hint(s, insn & 0xff);
7956 }
7957 /* Implemented as NOP in user mode. */
7958 if (IS_USER(s))
7959 break;
7960 offset = 0;
7961 imm = 0;
7962 if (insn & (1 << 10)) {
7963 if (insn & (1 << 7))
7964 offset |= CPSR_A;
7965 if (insn & (1 << 6))
7966 offset |= CPSR_I;
7967 if (insn & (1 << 5))
7968 offset |= CPSR_F;
7969 if (insn & (1 << 9))
7970 imm = CPSR_A | CPSR_I | CPSR_F;
7971 }
7972 if (insn & (1 << 8)) {
7973 offset |= 0x1f;
7974 imm |= (insn & 0x1f);
7975 }
7976 if (offset) {
2fbac54b 7977 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
7978 }
7979 break;
7980 case 3: /* Special control operations. */
426f5abc 7981 ARCH(7);
9ee6e8bb
PB
7982 op = (insn >> 4) & 0xf;
7983 switch (op) {
7984 case 2: /* clrex */
426f5abc 7985 gen_clrex(s);
9ee6e8bb
PB
7986 break;
7987 case 4: /* dsb */
7988 case 5: /* dmb */
7989 case 6: /* isb */
7990 /* These execute as NOPs. */
9ee6e8bb
PB
7991 break;
7992 default:
7993 goto illegal_op;
7994 }
7995 break;
7996 case 4: /* bxj */
7997 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
7998 tmp = load_reg(s, rn);
7999 gen_bx(s, tmp);
9ee6e8bb
PB
8000 break;
8001 case 5: /* Exception return. */
b8b45b68
RV
8002 if (IS_USER(s)) {
8003 goto illegal_op;
8004 }
8005 if (rn != 14 || rd != 15) {
8006 goto illegal_op;
8007 }
8008 tmp = load_reg(s, rn);
8009 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8010 gen_exception_return(s, tmp);
8011 break;
9ee6e8bb 8012 case 6: /* mrs cpsr. */
8984bd2e 8013 tmp = new_tmp();
9ee6e8bb 8014 if (IS_M(env)) {
8984bd2e
PB
8015 addr = tcg_const_i32(insn & 0xff);
8016 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 8017 tcg_temp_free_i32(addr);
9ee6e8bb 8018 } else {
8984bd2e 8019 gen_helper_cpsr_read(tmp);
9ee6e8bb 8020 }
8984bd2e 8021 store_reg(s, rd, tmp);
9ee6e8bb
PB
8022 break;
8023 case 7: /* mrs spsr. */
8024 /* Not accessible in user mode. */
8025 if (IS_USER(s) || IS_M(env))
8026 goto illegal_op;
d9ba4830
PB
8027 tmp = load_cpu_field(spsr);
8028 store_reg(s, rd, tmp);
9ee6e8bb 8029 break;
2c0262af
FB
8030 }
8031 }
9ee6e8bb
PB
8032 } else {
8033 /* Conditional branch. */
8034 op = (insn >> 22) & 0xf;
8035 /* Generate a conditional jump to next instruction. */
8036 s->condlabel = gen_new_label();
d9ba4830 8037 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
8038 s->condjmp = 1;
8039
8040 /* offset[11:1] = insn[10:0] */
8041 offset = (insn & 0x7ff) << 1;
8042 /* offset[17:12] = insn[21:16]. */
8043 offset |= (insn & 0x003f0000) >> 4;
8044 /* offset[31:20] = insn[26]. */
8045 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8046 /* offset[18] = insn[13]. */
8047 offset |= (insn & (1 << 13)) << 5;
8048 /* offset[19] = insn[11]. */
8049 offset |= (insn & (1 << 11)) << 8;
8050
8051 /* jump to the offset */
b0109805 8052 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
8053 }
8054 } else {
8055 /* Data processing immediate. */
8056 if (insn & (1 << 25)) {
8057 if (insn & (1 << 24)) {
8058 if (insn & (1 << 20))
8059 goto illegal_op;
8060 /* Bitfield/Saturate. */
8061 op = (insn >> 21) & 7;
8062 imm = insn & 0x1f;
8063 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4
PB
8064 if (rn == 15) {
8065 tmp = new_tmp();
8066 tcg_gen_movi_i32(tmp, 0);
8067 } else {
8068 tmp = load_reg(s, rn);
8069 }
9ee6e8bb
PB
8070 switch (op) {
8071 case 2: /* Signed bitfield extract. */
8072 imm++;
8073 if (shift + imm > 32)
8074 goto illegal_op;
8075 if (imm < 32)
6ddbc6e4 8076 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
8077 break;
8078 case 6: /* Unsigned bitfield extract. */
8079 imm++;
8080 if (shift + imm > 32)
8081 goto illegal_op;
8082 if (imm < 32)
6ddbc6e4 8083 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
8084 break;
8085 case 3: /* Bitfield insert/clear. */
8086 if (imm < shift)
8087 goto illegal_op;
8088 imm = imm + 1 - shift;
8089 if (imm != 32) {
6ddbc6e4 8090 tmp2 = load_reg(s, rd);
8f8e3aa4 8091 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
6ddbc6e4 8092 dead_tmp(tmp2);
9ee6e8bb
PB
8093 }
8094 break;
8095 case 7:
8096 goto illegal_op;
8097 default: /* Saturate. */
9ee6e8bb
PB
8098 if (shift) {
8099 if (op & 1)
6ddbc6e4 8100 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8101 else
6ddbc6e4 8102 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 8103 }
6ddbc6e4 8104 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
8105 if (op & 4) {
8106 /* Unsigned. */
9ee6e8bb 8107 if ((op & 1) && shift == 0)
6ddbc6e4 8108 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 8109 else
6ddbc6e4 8110 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 8111 } else {
9ee6e8bb 8112 /* Signed. */
9ee6e8bb 8113 if ((op & 1) && shift == 0)
6ddbc6e4 8114 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 8115 else
6ddbc6e4 8116 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 8117 }
b75263d6 8118 tcg_temp_free_i32(tmp2);
9ee6e8bb 8119 break;
2c0262af 8120 }
6ddbc6e4 8121 store_reg(s, rd, tmp);
9ee6e8bb
PB
8122 } else {
8123 imm = ((insn & 0x04000000) >> 15)
8124 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8125 if (insn & (1 << 22)) {
8126 /* 16-bit immediate. */
8127 imm |= (insn >> 4) & 0xf000;
8128 if (insn & (1 << 23)) {
8129 /* movt */
5e3f878a 8130 tmp = load_reg(s, rd);
86831435 8131 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8132 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 8133 } else {
9ee6e8bb 8134 /* movw */
5e3f878a
PB
8135 tmp = new_tmp();
8136 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
8137 }
8138 } else {
9ee6e8bb
PB
8139 /* Add/sub 12-bit immediate. */
8140 if (rn == 15) {
b0109805 8141 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 8142 if (insn & (1 << 23))
b0109805 8143 offset -= imm;
9ee6e8bb 8144 else
b0109805 8145 offset += imm;
5e3f878a
PB
8146 tmp = new_tmp();
8147 tcg_gen_movi_i32(tmp, offset);
2c0262af 8148 } else {
5e3f878a 8149 tmp = load_reg(s, rn);
9ee6e8bb 8150 if (insn & (1 << 23))
5e3f878a 8151 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 8152 else
5e3f878a 8153 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 8154 }
9ee6e8bb 8155 }
5e3f878a 8156 store_reg(s, rd, tmp);
191abaa2 8157 }
9ee6e8bb
PB
8158 } else {
8159 int shifter_out = 0;
8160 /* modified 12-bit immediate. */
8161 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8162 imm = (insn & 0xff);
8163 switch (shift) {
8164 case 0: /* XY */
8165 /* Nothing to do. */
8166 break;
8167 case 1: /* 00XY00XY */
8168 imm |= imm << 16;
8169 break;
8170 case 2: /* XY00XY00 */
8171 imm |= imm << 16;
8172 imm <<= 8;
8173 break;
8174 case 3: /* XYXYXYXY */
8175 imm |= imm << 16;
8176 imm |= imm << 8;
8177 break;
8178 default: /* Rotated constant. */
8179 shift = (shift << 1) | (imm >> 7);
8180 imm |= 0x80;
8181 imm = imm << (32 - shift);
8182 shifter_out = 1;
8183 break;
b5ff1b31 8184 }
3174f8e9
FN
8185 tmp2 = new_tmp();
8186 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 8187 rn = (insn >> 16) & 0xf;
3174f8e9
FN
8188 if (rn == 15) {
8189 tmp = new_tmp();
8190 tcg_gen_movi_i32(tmp, 0);
8191 } else {
8192 tmp = load_reg(s, rn);
8193 }
9ee6e8bb
PB
8194 op = (insn >> 21) & 0xf;
8195 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 8196 shifter_out, tmp, tmp2))
9ee6e8bb 8197 goto illegal_op;
3174f8e9 8198 dead_tmp(tmp2);
9ee6e8bb
PB
8199 rd = (insn >> 8) & 0xf;
8200 if (rd != 15) {
3174f8e9
FN
8201 store_reg(s, rd, tmp);
8202 } else {
8203 dead_tmp(tmp);
2c0262af 8204 }
2c0262af 8205 }
9ee6e8bb
PB
8206 }
8207 break;
8208 case 12: /* Load/store single data item. */
8209 {
8210 int postinc = 0;
8211 int writeback = 0;
b0109805 8212 int user;
9ee6e8bb
PB
8213 if ((insn & 0x01100000) == 0x01000000) {
8214 if (disas_neon_ls_insn(env, s, insn))
c1713132 8215 goto illegal_op;
9ee6e8bb
PB
8216 break;
8217 }
b0109805 8218 user = IS_USER(s);
9ee6e8bb 8219 if (rn == 15) {
b0109805 8220 addr = new_tmp();
9ee6e8bb
PB
8221 /* PC relative. */
8222 /* s->pc has already been incremented by 4. */
8223 imm = s->pc & 0xfffffffc;
8224 if (insn & (1 << 23))
8225 imm += insn & 0xfff;
8226 else
8227 imm -= insn & 0xfff;
b0109805 8228 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 8229 } else {
b0109805 8230 addr = load_reg(s, rn);
9ee6e8bb
PB
8231 if (insn & (1 << 23)) {
8232 /* Positive offset. */
8233 imm = insn & 0xfff;
b0109805 8234 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8235 } else {
8236 op = (insn >> 8) & 7;
8237 imm = insn & 0xff;
8238 switch (op) {
8239 case 0: case 8: /* Shifted Register. */
8240 shift = (insn >> 4) & 0xf;
8241 if (shift > 3)
18c9b560 8242 goto illegal_op;
b26eefb6 8243 tmp = load_reg(s, rm);
9ee6e8bb 8244 if (shift)
b26eefb6 8245 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 8246 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 8247 dead_tmp(tmp);
9ee6e8bb
PB
8248 break;
8249 case 4: /* Negative offset. */
b0109805 8250 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb
PB
8251 break;
8252 case 6: /* User privilege. */
b0109805
PB
8253 tcg_gen_addi_i32(addr, addr, imm);
8254 user = 1;
9ee6e8bb
PB
8255 break;
8256 case 1: /* Post-decrement. */
8257 imm = -imm;
8258 /* Fall through. */
8259 case 3: /* Post-increment. */
9ee6e8bb
PB
8260 postinc = 1;
8261 writeback = 1;
8262 break;
8263 case 5: /* Pre-decrement. */
8264 imm = -imm;
8265 /* Fall through. */
8266 case 7: /* Pre-increment. */
b0109805 8267 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8268 writeback = 1;
8269 break;
8270 default:
b7bcbe95 8271 goto illegal_op;
9ee6e8bb
PB
8272 }
8273 }
8274 }
8275 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8276 if (insn & (1 << 20)) {
8277 /* Load. */
8278 if (rs == 15 && op != 2) {
8279 if (op & 2)
b5ff1b31 8280 goto illegal_op;
9ee6e8bb
PB
8281 /* Memory hint. Implemented as NOP. */
8282 } else {
8283 switch (op) {
b0109805
PB
8284 case 0: tmp = gen_ld8u(addr, user); break;
8285 case 4: tmp = gen_ld8s(addr, user); break;
8286 case 1: tmp = gen_ld16u(addr, user); break;
8287 case 5: tmp = gen_ld16s(addr, user); break;
8288 case 2: tmp = gen_ld32(addr, user); break;
9ee6e8bb
PB
8289 default: goto illegal_op;
8290 }
8291 if (rs == 15) {
b0109805 8292 gen_bx(s, tmp);
9ee6e8bb 8293 } else {
b0109805 8294 store_reg(s, rs, tmp);
9ee6e8bb
PB
8295 }
8296 }
8297 } else {
8298 /* Store. */
8299 if (rs == 15)
b7bcbe95 8300 goto illegal_op;
b0109805 8301 tmp = load_reg(s, rs);
9ee6e8bb 8302 switch (op) {
b0109805
PB
8303 case 0: gen_st8(tmp, addr, user); break;
8304 case 1: gen_st16(tmp, addr, user); break;
8305 case 2: gen_st32(tmp, addr, user); break;
9ee6e8bb 8306 default: goto illegal_op;
b7bcbe95 8307 }
2c0262af 8308 }
9ee6e8bb 8309 if (postinc)
b0109805
PB
8310 tcg_gen_addi_i32(addr, addr, imm);
8311 if (writeback) {
8312 store_reg(s, rn, addr);
8313 } else {
8314 dead_tmp(addr);
8315 }
9ee6e8bb
PB
8316 }
8317 break;
8318 default:
8319 goto illegal_op;
2c0262af 8320 }
9ee6e8bb
PB
8321 return 0;
8322illegal_op:
8323 return 1;
2c0262af
FB
8324}
8325
9ee6e8bb 8326static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
8327{
8328 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8329 int32_t offset;
8330 int i;
b26eefb6 8331 TCGv tmp;
d9ba4830 8332 TCGv tmp2;
b0109805 8333 TCGv addr;
99c475ab 8334
9ee6e8bb
PB
8335 if (s->condexec_mask) {
8336 cond = s->condexec_cond;
8337 s->condlabel = gen_new_label();
d9ba4830 8338 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
8339 s->condjmp = 1;
8340 }
8341
b5ff1b31 8342 insn = lduw_code(s->pc);
99c475ab 8343 s->pc += 2;
b5ff1b31 8344
99c475ab
FB
8345 switch (insn >> 12) {
8346 case 0: case 1:
396e467c 8347
99c475ab
FB
8348 rd = insn & 7;
8349 op = (insn >> 11) & 3;
8350 if (op == 3) {
8351 /* add/subtract */
8352 rn = (insn >> 3) & 7;
396e467c 8353 tmp = load_reg(s, rn);
99c475ab
FB
8354 if (insn & (1 << 10)) {
8355 /* immediate */
396e467c
FN
8356 tmp2 = new_tmp();
8357 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
8358 } else {
8359 /* reg */
8360 rm = (insn >> 6) & 7;
396e467c 8361 tmp2 = load_reg(s, rm);
99c475ab 8362 }
9ee6e8bb
PB
8363 if (insn & (1 << 9)) {
8364 if (s->condexec_mask)
396e467c 8365 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8366 else
396e467c 8367 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb
PB
8368 } else {
8369 if (s->condexec_mask)
396e467c 8370 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 8371 else
396e467c 8372 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 8373 }
396e467c
FN
8374 dead_tmp(tmp2);
8375 store_reg(s, rd, tmp);
99c475ab
FB
8376 } else {
8377 /* shift immediate */
8378 rm = (insn >> 3) & 7;
8379 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
8380 tmp = load_reg(s, rm);
8381 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8382 if (!s->condexec_mask)
8383 gen_logic_CC(tmp);
8384 store_reg(s, rd, tmp);
99c475ab
FB
8385 }
8386 break;
8387 case 2: case 3:
8388 /* arithmetic large immediate */
8389 op = (insn >> 11) & 3;
8390 rd = (insn >> 8) & 0x7;
396e467c
FN
8391 if (op == 0) { /* mov */
8392 tmp = new_tmp();
8393 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 8394 if (!s->condexec_mask)
396e467c
FN
8395 gen_logic_CC(tmp);
8396 store_reg(s, rd, tmp);
8397 } else {
8398 tmp = load_reg(s, rd);
8399 tmp2 = new_tmp();
8400 tcg_gen_movi_i32(tmp2, insn & 0xff);
8401 switch (op) {
8402 case 1: /* cmp */
8403 gen_helper_sub_cc(tmp, tmp, tmp2);
8404 dead_tmp(tmp);
8405 dead_tmp(tmp2);
8406 break;
8407 case 2: /* add */
8408 if (s->condexec_mask)
8409 tcg_gen_add_i32(tmp, tmp, tmp2);
8410 else
8411 gen_helper_add_cc(tmp, tmp, tmp2);
8412 dead_tmp(tmp2);
8413 store_reg(s, rd, tmp);
8414 break;
8415 case 3: /* sub */
8416 if (s->condexec_mask)
8417 tcg_gen_sub_i32(tmp, tmp, tmp2);
8418 else
8419 gen_helper_sub_cc(tmp, tmp, tmp2);
8420 dead_tmp(tmp2);
8421 store_reg(s, rd, tmp);
8422 break;
8423 }
99c475ab 8424 }
99c475ab
FB
8425 break;
8426 case 4:
8427 if (insn & (1 << 11)) {
8428 rd = (insn >> 8) & 7;
5899f386
FB
8429 /* load pc-relative. Bit 1 of PC is ignored. */
8430 val = s->pc + 2 + ((insn & 0xff) * 4);
8431 val &= ~(uint32_t)2;
b0109805
PB
8432 addr = new_tmp();
8433 tcg_gen_movi_i32(addr, val);
8434 tmp = gen_ld32(addr, IS_USER(s));
8435 dead_tmp(addr);
8436 store_reg(s, rd, tmp);
99c475ab
FB
8437 break;
8438 }
8439 if (insn & (1 << 10)) {
8440 /* data processing extended or blx */
8441 rd = (insn & 7) | ((insn >> 4) & 8);
8442 rm = (insn >> 3) & 0xf;
8443 op = (insn >> 8) & 3;
8444 switch (op) {
8445 case 0: /* add */
396e467c
FN
8446 tmp = load_reg(s, rd);
8447 tmp2 = load_reg(s, rm);
8448 tcg_gen_add_i32(tmp, tmp, tmp2);
8449 dead_tmp(tmp2);
8450 store_reg(s, rd, tmp);
99c475ab
FB
8451 break;
8452 case 1: /* cmp */
396e467c
FN
8453 tmp = load_reg(s, rd);
8454 tmp2 = load_reg(s, rm);
8455 gen_helper_sub_cc(tmp, tmp, tmp2);
8456 dead_tmp(tmp2);
8457 dead_tmp(tmp);
99c475ab
FB
8458 break;
8459 case 2: /* mov/cpy */
396e467c
FN
8460 tmp = load_reg(s, rm);
8461 store_reg(s, rd, tmp);
99c475ab
FB
8462 break;
8463 case 3:/* branch [and link] exchange thumb register */
b0109805 8464 tmp = load_reg(s, rm);
99c475ab
FB
8465 if (insn & (1 << 7)) {
8466 val = (uint32_t)s->pc | 1;
b0109805
PB
8467 tmp2 = new_tmp();
8468 tcg_gen_movi_i32(tmp2, val);
8469 store_reg(s, 14, tmp2);
99c475ab 8470 }
d9ba4830 8471 gen_bx(s, tmp);
99c475ab
FB
8472 break;
8473 }
8474 break;
8475 }
8476
8477 /* data processing register */
8478 rd = insn & 7;
8479 rm = (insn >> 3) & 7;
8480 op = (insn >> 6) & 0xf;
8481 if (op == 2 || op == 3 || op == 4 || op == 7) {
8482 /* the shift/rotate ops want the operands backwards */
8483 val = rm;
8484 rm = rd;
8485 rd = val;
8486 val = 1;
8487 } else {
8488 val = 0;
8489 }
8490
396e467c
FN
8491 if (op == 9) { /* neg */
8492 tmp = new_tmp();
8493 tcg_gen_movi_i32(tmp, 0);
8494 } else if (op != 0xf) { /* mvn doesn't read its first operand */
8495 tmp = load_reg(s, rd);
8496 } else {
8497 TCGV_UNUSED(tmp);
8498 }
99c475ab 8499
396e467c 8500 tmp2 = load_reg(s, rm);
5899f386 8501 switch (op) {
99c475ab 8502 case 0x0: /* and */
396e467c 8503 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 8504 if (!s->condexec_mask)
396e467c 8505 gen_logic_CC(tmp);
99c475ab
FB
8506 break;
8507 case 0x1: /* eor */
396e467c 8508 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 8509 if (!s->condexec_mask)
396e467c 8510 gen_logic_CC(tmp);
99c475ab
FB
8511 break;
8512 case 0x2: /* lsl */
9ee6e8bb 8513 if (s->condexec_mask) {
396e467c 8514 gen_helper_shl(tmp2, tmp2, tmp);
9ee6e8bb 8515 } else {
396e467c
FN
8516 gen_helper_shl_cc(tmp2, tmp2, tmp);
8517 gen_logic_CC(tmp2);
9ee6e8bb 8518 }
99c475ab
FB
8519 break;
8520 case 0x3: /* lsr */
9ee6e8bb 8521 if (s->condexec_mask) {
396e467c 8522 gen_helper_shr(tmp2, tmp2, tmp);
9ee6e8bb 8523 } else {
396e467c
FN
8524 gen_helper_shr_cc(tmp2, tmp2, tmp);
8525 gen_logic_CC(tmp2);
9ee6e8bb 8526 }
99c475ab
FB
8527 break;
8528 case 0x4: /* asr */
9ee6e8bb 8529 if (s->condexec_mask) {
396e467c 8530 gen_helper_sar(tmp2, tmp2, tmp);
9ee6e8bb 8531 } else {
396e467c
FN
8532 gen_helper_sar_cc(tmp2, tmp2, tmp);
8533 gen_logic_CC(tmp2);
9ee6e8bb 8534 }
99c475ab
FB
8535 break;
8536 case 0x5: /* adc */
9ee6e8bb 8537 if (s->condexec_mask)
396e467c 8538 gen_adc(tmp, tmp2);
9ee6e8bb 8539 else
396e467c 8540 gen_helper_adc_cc(tmp, tmp, tmp2);
99c475ab
FB
8541 break;
8542 case 0x6: /* sbc */
9ee6e8bb 8543 if (s->condexec_mask)
396e467c 8544 gen_sub_carry(tmp, tmp, tmp2);
9ee6e8bb 8545 else
396e467c 8546 gen_helper_sbc_cc(tmp, tmp, tmp2);
99c475ab
FB
8547 break;
8548 case 0x7: /* ror */
9ee6e8bb 8549 if (s->condexec_mask) {
f669df27
AJ
8550 tcg_gen_andi_i32(tmp, tmp, 0x1f);
8551 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 8552 } else {
396e467c
FN
8553 gen_helper_ror_cc(tmp2, tmp2, tmp);
8554 gen_logic_CC(tmp2);
9ee6e8bb 8555 }
99c475ab
FB
8556 break;
8557 case 0x8: /* tst */
396e467c
FN
8558 tcg_gen_and_i32(tmp, tmp, tmp2);
8559 gen_logic_CC(tmp);
99c475ab 8560 rd = 16;
5899f386 8561 break;
99c475ab 8562 case 0x9: /* neg */
9ee6e8bb 8563 if (s->condexec_mask)
396e467c 8564 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 8565 else
396e467c 8566 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
8567 break;
8568 case 0xa: /* cmp */
396e467c 8569 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
8570 rd = 16;
8571 break;
8572 case 0xb: /* cmn */
396e467c 8573 gen_helper_add_cc(tmp, tmp, tmp2);
99c475ab
FB
8574 rd = 16;
8575 break;
8576 case 0xc: /* orr */
396e467c 8577 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 8578 if (!s->condexec_mask)
396e467c 8579 gen_logic_CC(tmp);
99c475ab
FB
8580 break;
8581 case 0xd: /* mul */
7b2919a0 8582 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 8583 if (!s->condexec_mask)
396e467c 8584 gen_logic_CC(tmp);
99c475ab
FB
8585 break;
8586 case 0xe: /* bic */
f669df27 8587 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 8588 if (!s->condexec_mask)
396e467c 8589 gen_logic_CC(tmp);
99c475ab
FB
8590 break;
8591 case 0xf: /* mvn */
396e467c 8592 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 8593 if (!s->condexec_mask)
396e467c 8594 gen_logic_CC(tmp2);
99c475ab 8595 val = 1;
5899f386 8596 rm = rd;
99c475ab
FB
8597 break;
8598 }
8599 if (rd != 16) {
396e467c
FN
8600 if (val) {
8601 store_reg(s, rm, tmp2);
8602 if (op != 0xf)
8603 dead_tmp(tmp);
8604 } else {
8605 store_reg(s, rd, tmp);
8606 dead_tmp(tmp2);
8607 }
8608 } else {
8609 dead_tmp(tmp);
8610 dead_tmp(tmp2);
99c475ab
FB
8611 }
8612 break;
8613
8614 case 5:
8615 /* load/store register offset. */
8616 rd = insn & 7;
8617 rn = (insn >> 3) & 7;
8618 rm = (insn >> 6) & 7;
8619 op = (insn >> 9) & 7;
b0109805 8620 addr = load_reg(s, rn);
b26eefb6 8621 tmp = load_reg(s, rm);
b0109805 8622 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 8623 dead_tmp(tmp);
99c475ab
FB
8624
8625 if (op < 3) /* store */
b0109805 8626 tmp = load_reg(s, rd);
99c475ab
FB
8627
8628 switch (op) {
8629 case 0: /* str */
b0109805 8630 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
8631 break;
8632 case 1: /* strh */
b0109805 8633 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
8634 break;
8635 case 2: /* strb */
b0109805 8636 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
8637 break;
8638 case 3: /* ldrsb */
b0109805 8639 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
8640 break;
8641 case 4: /* ldr */
b0109805 8642 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8643 break;
8644 case 5: /* ldrh */
b0109805 8645 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
8646 break;
8647 case 6: /* ldrb */
b0109805 8648 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
8649 break;
8650 case 7: /* ldrsh */
b0109805 8651 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
8652 break;
8653 }
8654 if (op >= 3) /* load */
b0109805
PB
8655 store_reg(s, rd, tmp);
8656 dead_tmp(addr);
99c475ab
FB
8657 break;
8658
8659 case 6:
8660 /* load/store word immediate offset */
8661 rd = insn & 7;
8662 rn = (insn >> 3) & 7;
b0109805 8663 addr = load_reg(s, rn);
99c475ab 8664 val = (insn >> 4) & 0x7c;
b0109805 8665 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8666
8667 if (insn & (1 << 11)) {
8668 /* load */
b0109805
PB
8669 tmp = gen_ld32(addr, IS_USER(s));
8670 store_reg(s, rd, tmp);
99c475ab
FB
8671 } else {
8672 /* store */
b0109805
PB
8673 tmp = load_reg(s, rd);
8674 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8675 }
b0109805 8676 dead_tmp(addr);
99c475ab
FB
8677 break;
8678
8679 case 7:
8680 /* load/store byte immediate offset */
8681 rd = insn & 7;
8682 rn = (insn >> 3) & 7;
b0109805 8683 addr = load_reg(s, rn);
99c475ab 8684 val = (insn >> 6) & 0x1f;
b0109805 8685 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8686
8687 if (insn & (1 << 11)) {
8688 /* load */
b0109805
PB
8689 tmp = gen_ld8u(addr, IS_USER(s));
8690 store_reg(s, rd, tmp);
99c475ab
FB
8691 } else {
8692 /* store */
b0109805
PB
8693 tmp = load_reg(s, rd);
8694 gen_st8(tmp, addr, IS_USER(s));
99c475ab 8695 }
b0109805 8696 dead_tmp(addr);
99c475ab
FB
8697 break;
8698
8699 case 8:
8700 /* load/store halfword immediate offset */
8701 rd = insn & 7;
8702 rn = (insn >> 3) & 7;
b0109805 8703 addr = load_reg(s, rn);
99c475ab 8704 val = (insn >> 5) & 0x3e;
b0109805 8705 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8706
8707 if (insn & (1 << 11)) {
8708 /* load */
b0109805
PB
8709 tmp = gen_ld16u(addr, IS_USER(s));
8710 store_reg(s, rd, tmp);
99c475ab
FB
8711 } else {
8712 /* store */
b0109805
PB
8713 tmp = load_reg(s, rd);
8714 gen_st16(tmp, addr, IS_USER(s));
99c475ab 8715 }
b0109805 8716 dead_tmp(addr);
99c475ab
FB
8717 break;
8718
8719 case 9:
8720 /* load/store from stack */
8721 rd = (insn >> 8) & 7;
b0109805 8722 addr = load_reg(s, 13);
99c475ab 8723 val = (insn & 0xff) * 4;
b0109805 8724 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8725
8726 if (insn & (1 << 11)) {
8727 /* load */
b0109805
PB
8728 tmp = gen_ld32(addr, IS_USER(s));
8729 store_reg(s, rd, tmp);
99c475ab
FB
8730 } else {
8731 /* store */
b0109805
PB
8732 tmp = load_reg(s, rd);
8733 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8734 }
b0109805 8735 dead_tmp(addr);
99c475ab
FB
8736 break;
8737
8738 case 10:
8739 /* add to high reg */
8740 rd = (insn >> 8) & 7;
5899f386
FB
8741 if (insn & (1 << 11)) {
8742 /* SP */
5e3f878a 8743 tmp = load_reg(s, 13);
5899f386
FB
8744 } else {
8745 /* PC. bit 1 is ignored. */
5e3f878a
PB
8746 tmp = new_tmp();
8747 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 8748 }
99c475ab 8749 val = (insn & 0xff) * 4;
5e3f878a
PB
8750 tcg_gen_addi_i32(tmp, tmp, val);
8751 store_reg(s, rd, tmp);
99c475ab
FB
8752 break;
8753
8754 case 11:
8755 /* misc */
8756 op = (insn >> 8) & 0xf;
8757 switch (op) {
8758 case 0:
8759 /* adjust stack pointer */
b26eefb6 8760 tmp = load_reg(s, 13);
99c475ab
FB
8761 val = (insn & 0x7f) * 4;
8762 if (insn & (1 << 7))
6a0d8a1d 8763 val = -(int32_t)val;
b26eefb6
PB
8764 tcg_gen_addi_i32(tmp, tmp, val);
8765 store_reg(s, 13, tmp);
99c475ab
FB
8766 break;
8767
9ee6e8bb
PB
8768 case 2: /* sign/zero extend. */
8769 ARCH(6);
8770 rd = insn & 7;
8771 rm = (insn >> 3) & 7;
b0109805 8772 tmp = load_reg(s, rm);
9ee6e8bb 8773 switch ((insn >> 6) & 3) {
b0109805
PB
8774 case 0: gen_sxth(tmp); break;
8775 case 1: gen_sxtb(tmp); break;
8776 case 2: gen_uxth(tmp); break;
8777 case 3: gen_uxtb(tmp); break;
9ee6e8bb 8778 }
b0109805 8779 store_reg(s, rd, tmp);
9ee6e8bb 8780 break;
99c475ab
FB
8781 case 4: case 5: case 0xc: case 0xd:
8782 /* push/pop */
b0109805 8783 addr = load_reg(s, 13);
5899f386
FB
8784 if (insn & (1 << 8))
8785 offset = 4;
99c475ab 8786 else
5899f386
FB
8787 offset = 0;
8788 for (i = 0; i < 8; i++) {
8789 if (insn & (1 << i))
8790 offset += 4;
8791 }
8792 if ((insn & (1 << 11)) == 0) {
b0109805 8793 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8794 }
99c475ab
FB
8795 for (i = 0; i < 8; i++) {
8796 if (insn & (1 << i)) {
8797 if (insn & (1 << 11)) {
8798 /* pop */
b0109805
PB
8799 tmp = gen_ld32(addr, IS_USER(s));
8800 store_reg(s, i, tmp);
99c475ab
FB
8801 } else {
8802 /* push */
b0109805
PB
8803 tmp = load_reg(s, i);
8804 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8805 }
5899f386 8806 /* advance to the next address. */
b0109805 8807 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8808 }
8809 }
a50f5b91 8810 TCGV_UNUSED(tmp);
99c475ab
FB
8811 if (insn & (1 << 8)) {
8812 if (insn & (1 << 11)) {
8813 /* pop pc */
b0109805 8814 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8815 /* don't set the pc until the rest of the instruction
8816 has completed */
8817 } else {
8818 /* push lr */
b0109805
PB
8819 tmp = load_reg(s, 14);
8820 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8821 }
b0109805 8822 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 8823 }
5899f386 8824 if ((insn & (1 << 11)) == 0) {
b0109805 8825 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8826 }
99c475ab 8827 /* write back the new stack pointer */
b0109805 8828 store_reg(s, 13, addr);
99c475ab
FB
8829 /* set the new PC value */
8830 if ((insn & 0x0900) == 0x0900)
b0109805 8831 gen_bx(s, tmp);
99c475ab
FB
8832 break;
8833
9ee6e8bb
PB
8834 case 1: case 3: case 9: case 11: /* czb */
8835 rm = insn & 7;
d9ba4830 8836 tmp = load_reg(s, rm);
9ee6e8bb
PB
8837 s->condlabel = gen_new_label();
8838 s->condjmp = 1;
8839 if (insn & (1 << 11))
cb63669a 8840 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 8841 else
cb63669a 8842 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
d9ba4830 8843 dead_tmp(tmp);
9ee6e8bb
PB
8844 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8845 val = (uint32_t)s->pc + 2;
8846 val += offset;
8847 gen_jmp(s, val);
8848 break;
8849
8850 case 15: /* IT, nop-hint. */
8851 if ((insn & 0xf) == 0) {
8852 gen_nop_hint(s, (insn >> 4) & 0xf);
8853 break;
8854 }
8855 /* If Then. */
8856 s->condexec_cond = (insn >> 4) & 0xe;
8857 s->condexec_mask = insn & 0x1f;
8858 /* No actual code generated for this insn, just setup state. */
8859 break;
8860
06c949e6 8861 case 0xe: /* bkpt */
9ee6e8bb 8862 gen_set_condexec(s);
5e3f878a 8863 gen_set_pc_im(s->pc - 2);
d9ba4830 8864 gen_exception(EXCP_BKPT);
06c949e6
PB
8865 s->is_jmp = DISAS_JUMP;
8866 break;
8867
9ee6e8bb
PB
8868 case 0xa: /* rev */
8869 ARCH(6);
8870 rn = (insn >> 3) & 0x7;
8871 rd = insn & 0x7;
b0109805 8872 tmp = load_reg(s, rn);
9ee6e8bb 8873 switch ((insn >> 6) & 3) {
66896cb8 8874 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
8875 case 1: gen_rev16(tmp); break;
8876 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
8877 default: goto illegal_op;
8878 }
b0109805 8879 store_reg(s, rd, tmp);
9ee6e8bb
PB
8880 break;
8881
8882 case 6: /* cps */
8883 ARCH(6);
8884 if (IS_USER(s))
8885 break;
8886 if (IS_M(env)) {
8984bd2e 8887 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9ee6e8bb 8888 /* PRIMASK */
8984bd2e
PB
8889 if (insn & 1) {
8890 addr = tcg_const_i32(16);
8891 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8892 tcg_temp_free_i32(addr);
8984bd2e 8893 }
9ee6e8bb 8894 /* FAULTMASK */
8984bd2e
PB
8895 if (insn & 2) {
8896 addr = tcg_const_i32(17);
8897 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8898 tcg_temp_free_i32(addr);
8984bd2e 8899 }
b75263d6 8900 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8901 gen_lookup_tb(s);
8902 } else {
8903 if (insn & (1 << 4))
8904 shift = CPSR_A | CPSR_I | CPSR_F;
8905 else
8906 shift = 0;
fa26df03 8907 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9ee6e8bb
PB
8908 }
8909 break;
8910
99c475ab
FB
8911 default:
8912 goto undef;
8913 }
8914 break;
8915
8916 case 12:
8917 /* load/store multiple */
8918 rn = (insn >> 8) & 0x7;
b0109805 8919 addr = load_reg(s, rn);
99c475ab
FB
8920 for (i = 0; i < 8; i++) {
8921 if (insn & (1 << i)) {
99c475ab
FB
8922 if (insn & (1 << 11)) {
8923 /* load */
b0109805
PB
8924 tmp = gen_ld32(addr, IS_USER(s));
8925 store_reg(s, i, tmp);
99c475ab
FB
8926 } else {
8927 /* store */
b0109805
PB
8928 tmp = load_reg(s, i);
8929 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8930 }
5899f386 8931 /* advance to the next address */
b0109805 8932 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8933 }
8934 }
5899f386 8935 /* Base register writeback. */
b0109805
PB
8936 if ((insn & (1 << rn)) == 0) {
8937 store_reg(s, rn, addr);
8938 } else {
8939 dead_tmp(addr);
8940 }
99c475ab
FB
8941 break;
8942
8943 case 13:
8944 /* conditional branch or swi */
8945 cond = (insn >> 8) & 0xf;
8946 if (cond == 0xe)
8947 goto undef;
8948
8949 if (cond == 0xf) {
8950 /* swi */
9ee6e8bb 8951 gen_set_condexec(s);
422ebf69 8952 gen_set_pc_im(s->pc);
9ee6e8bb 8953 s->is_jmp = DISAS_SWI;
99c475ab
FB
8954 break;
8955 }
8956 /* generate a conditional jump to next instruction */
e50e6a20 8957 s->condlabel = gen_new_label();
d9ba4830 8958 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 8959 s->condjmp = 1;
99c475ab
FB
8960
8961 /* jump to the offset */
5899f386 8962 val = (uint32_t)s->pc + 2;
99c475ab 8963 offset = ((int32_t)insn << 24) >> 24;
5899f386 8964 val += offset << 1;
8aaca4c0 8965 gen_jmp(s, val);
99c475ab
FB
8966 break;
8967
8968 case 14:
358bf29e 8969 if (insn & (1 << 11)) {
9ee6e8bb
PB
8970 if (disas_thumb2_insn(env, s, insn))
8971 goto undef32;
358bf29e
PB
8972 break;
8973 }
9ee6e8bb 8974 /* unconditional branch */
99c475ab
FB
8975 val = (uint32_t)s->pc;
8976 offset = ((int32_t)insn << 21) >> 21;
8977 val += (offset << 1) + 2;
8aaca4c0 8978 gen_jmp(s, val);
99c475ab
FB
8979 break;
8980
8981 case 15:
9ee6e8bb 8982 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 8983 goto undef32;
9ee6e8bb 8984 break;
99c475ab
FB
8985 }
8986 return;
9ee6e8bb
PB
8987undef32:
8988 gen_set_condexec(s);
5e3f878a 8989 gen_set_pc_im(s->pc - 4);
d9ba4830 8990 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
8991 s->is_jmp = DISAS_JUMP;
8992 return;
8993illegal_op:
99c475ab 8994undef:
9ee6e8bb 8995 gen_set_condexec(s);
5e3f878a 8996 gen_set_pc_im(s->pc - 2);
d9ba4830 8997 gen_exception(EXCP_UDEF);
99c475ab
FB
8998 s->is_jmp = DISAS_JUMP;
8999}
9000
2c0262af
FB
9001/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9002 basic block 'tb'. If search_pc is TRUE, also generate PC
9003 information for each intermediate instruction. */
2cfc5f17
TS
9004static inline void gen_intermediate_code_internal(CPUState *env,
9005 TranslationBlock *tb,
9006 int search_pc)
2c0262af
FB
9007{
9008 DisasContext dc1, *dc = &dc1;
a1d1bb31 9009 CPUBreakpoint *bp;
2c0262af
FB
9010 uint16_t *gen_opc_end;
9011 int j, lj;
0fa85d43 9012 target_ulong pc_start;
b5ff1b31 9013 uint32_t next_page_start;
2e70f6ef
PB
9014 int num_insns;
9015 int max_insns;
3b46e624 9016
2c0262af 9017 /* generate intermediate code */
b26eefb6 9018 num_temps = 0;
b26eefb6 9019
0fa85d43 9020 pc_start = tb->pc;
3b46e624 9021
2c0262af
FB
9022 dc->tb = tb;
9023
2c0262af 9024 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
9025
9026 dc->is_jmp = DISAS_NEXT;
9027 dc->pc = pc_start;
8aaca4c0 9028 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 9029 dc->condjmp = 0;
5899f386 9030 dc->thumb = env->thumb;
9ee6e8bb
PB
9031 dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
9032 dc->condexec_cond = env->condexec_bits >> 4;
b5ff1b31 9033#if !defined(CONFIG_USER_ONLY)
9ee6e8bb
PB
9034 if (IS_M(env)) {
9035 dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
9036 } else {
9037 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
9038 }
b5ff1b31 9039#endif
a7812ae4
PB
9040 cpu_F0s = tcg_temp_new_i32();
9041 cpu_F1s = tcg_temp_new_i32();
9042 cpu_F0d = tcg_temp_new_i64();
9043 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
9044 cpu_V0 = cpu_F0d;
9045 cpu_V1 = cpu_F1d;
e677137d 9046 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 9047 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 9048 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 9049 lj = -1;
2e70f6ef
PB
9050 num_insns = 0;
9051 max_insns = tb->cflags & CF_COUNT_MASK;
9052 if (max_insns == 0)
9053 max_insns = CF_COUNT_MASK;
9054
9055 gen_icount_start();
9ee6e8bb
PB
9056 /* Reset the conditional execution bits immediately. This avoids
9057 complications trying to do it at the end of the block. */
9058 if (env->condexec_bits)
8f01245e
PB
9059 {
9060 TCGv tmp = new_tmp();
9061 tcg_gen_movi_i32(tmp, 0);
d9ba4830 9062 store_cpu_field(tmp, condexec_bits);
8f01245e 9063 }
2c0262af 9064 do {
fbb4a2e3
PB
9065#ifdef CONFIG_USER_ONLY
9066 /* Intercept jump to the magic kernel page. */
9067 if (dc->pc >= 0xffff0000) {
9068 /* We always get here via a jump, so know we are not in a
9069 conditional execution block. */
9070 gen_exception(EXCP_KERNEL_TRAP);
9071 dc->is_jmp = DISAS_UPDATE;
9072 break;
9073 }
9074#else
9ee6e8bb
PB
9075 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9076 /* We always get here via a jump, so know we are not in a
9077 conditional execution block. */
d9ba4830 9078 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
9079 dc->is_jmp = DISAS_UPDATE;
9080 break;
9ee6e8bb
PB
9081 }
9082#endif
9083
72cf2d4f
BS
9084 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9085 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 9086 if (bp->pc == dc->pc) {
9ee6e8bb 9087 gen_set_condexec(dc);
5e3f878a 9088 gen_set_pc_im(dc->pc);
d9ba4830 9089 gen_exception(EXCP_DEBUG);
1fddef4b 9090 dc->is_jmp = DISAS_JUMP;
9ee6e8bb
PB
9091 /* Advance PC so that clearing the breakpoint will
9092 invalidate this TB. */
9093 dc->pc += 2;
9094 goto done_generating;
1fddef4b
FB
9095 break;
9096 }
9097 }
9098 }
2c0262af
FB
9099 if (search_pc) {
9100 j = gen_opc_ptr - gen_opc_buf;
9101 if (lj < j) {
9102 lj++;
9103 while (lj < j)
9104 gen_opc_instr_start[lj++] = 0;
9105 }
0fa85d43 9106 gen_opc_pc[lj] = dc->pc;
2c0262af 9107 gen_opc_instr_start[lj] = 1;
2e70f6ef 9108 gen_opc_icount[lj] = num_insns;
2c0262af 9109 }
e50e6a20 9110
2e70f6ef
PB
9111 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9112 gen_io_start();
9113
9ee6e8bb
PB
9114 if (env->thumb) {
9115 disas_thumb_insn(env, dc);
9116 if (dc->condexec_mask) {
9117 dc->condexec_cond = (dc->condexec_cond & 0xe)
9118 | ((dc->condexec_mask >> 4) & 1);
9119 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9120 if (dc->condexec_mask == 0) {
9121 dc->condexec_cond = 0;
9122 }
9123 }
9124 } else {
9125 disas_arm_insn(env, dc);
9126 }
b26eefb6
PB
9127 if (num_temps) {
9128 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
9129 num_temps = 0;
9130 }
e50e6a20
FB
9131
9132 if (dc->condjmp && !dc->is_jmp) {
9133 gen_set_label(dc->condlabel);
9134 dc->condjmp = 0;
9135 }
aaf2d97d 9136 /* Translation stops when a conditional branch is encountered.
e50e6a20 9137 * Otherwise the subsequent code could get translated several times.
b5ff1b31 9138 * Also stop translation when a page boundary is reached. This
bf20dc07 9139 * ensures prefetch aborts occur at the right place. */
2e70f6ef 9140 num_insns ++;
1fddef4b
FB
9141 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9142 !env->singlestep_enabled &&
1b530a6d 9143 !singlestep &&
2e70f6ef
PB
9144 dc->pc < next_page_start &&
9145 num_insns < max_insns);
9146
9147 if (tb->cflags & CF_LAST_IO) {
9148 if (dc->condjmp) {
9149 /* FIXME: This can theoretically happen with self-modifying
9150 code. */
9151 cpu_abort(env, "IO on conditional branch instruction");
9152 }
9153 gen_io_end();
9154 }
9ee6e8bb 9155
b5ff1b31 9156 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
9157 instruction was a conditional branch or trap, and the PC has
9158 already been written. */
551bd27f 9159 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 9160 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 9161 if (dc->condjmp) {
9ee6e8bb
PB
9162 gen_set_condexec(dc);
9163 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 9164 gen_exception(EXCP_SWI);
9ee6e8bb 9165 } else {
d9ba4830 9166 gen_exception(EXCP_DEBUG);
9ee6e8bb 9167 }
e50e6a20
FB
9168 gen_set_label(dc->condlabel);
9169 }
9170 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 9171 gen_set_pc_im(dc->pc);
e50e6a20 9172 dc->condjmp = 0;
8aaca4c0 9173 }
9ee6e8bb
PB
9174 gen_set_condexec(dc);
9175 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 9176 gen_exception(EXCP_SWI);
9ee6e8bb
PB
9177 } else {
9178 /* FIXME: Single stepping a WFI insn will not halt
9179 the CPU. */
d9ba4830 9180 gen_exception(EXCP_DEBUG);
9ee6e8bb 9181 }
8aaca4c0 9182 } else {
9ee6e8bb
PB
9183 /* While branches must always occur at the end of an IT block,
9184 there are a few other things that can cause us to terminate
9185 the TB in the middel of an IT block:
9186 - Exception generating instructions (bkpt, swi, undefined).
9187 - Page boundaries.
9188 - Hardware watchpoints.
9189 Hardware breakpoints have already been handled and skip this code.
9190 */
9191 gen_set_condexec(dc);
8aaca4c0 9192 switch(dc->is_jmp) {
8aaca4c0 9193 case DISAS_NEXT:
6e256c93 9194 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
9195 break;
9196 default:
9197 case DISAS_JUMP:
9198 case DISAS_UPDATE:
9199 /* indicate that the hash table must be used to find the next TB */
57fec1fe 9200 tcg_gen_exit_tb(0);
8aaca4c0
FB
9201 break;
9202 case DISAS_TB_JUMP:
9203 /* nothing more to generate */
9204 break;
9ee6e8bb 9205 case DISAS_WFI:
d9ba4830 9206 gen_helper_wfi();
9ee6e8bb
PB
9207 break;
9208 case DISAS_SWI:
d9ba4830 9209 gen_exception(EXCP_SWI);
9ee6e8bb 9210 break;
8aaca4c0 9211 }
e50e6a20
FB
9212 if (dc->condjmp) {
9213 gen_set_label(dc->condlabel);
9ee6e8bb 9214 gen_set_condexec(dc);
6e256c93 9215 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
9216 dc->condjmp = 0;
9217 }
2c0262af 9218 }
2e70f6ef 9219
9ee6e8bb 9220done_generating:
2e70f6ef 9221 gen_icount_end(tb, num_insns);
2c0262af
FB
9222 *gen_opc_ptr = INDEX_op_end;
9223
9224#ifdef DEBUG_DISAS
8fec2b8c 9225 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
9226 qemu_log("----------------\n");
9227 qemu_log("IN: %s\n", lookup_symbol(pc_start));
9228 log_target_disas(pc_start, dc->pc - pc_start, env->thumb);
9229 qemu_log("\n");
2c0262af
FB
9230 }
9231#endif
b5ff1b31
FB
9232 if (search_pc) {
9233 j = gen_opc_ptr - gen_opc_buf;
9234 lj++;
9235 while (lj <= j)
9236 gen_opc_instr_start[lj++] = 0;
b5ff1b31 9237 } else {
2c0262af 9238 tb->size = dc->pc - pc_start;
2e70f6ef 9239 tb->icount = num_insns;
b5ff1b31 9240 }
2c0262af
FB
9241}
9242
2cfc5f17 9243void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2c0262af 9244{
2cfc5f17 9245 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
9246}
9247
2cfc5f17 9248void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2c0262af 9249{
2cfc5f17 9250 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
9251}
9252
b5ff1b31
FB
9253static const char *cpu_mode_names[16] = {
9254 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9255 "???", "???", "???", "und", "???", "???", "???", "sys"
9256};
9ee6e8bb 9257
5fafdf24 9258void cpu_dump_state(CPUState *env, FILE *f,
7fe48483
FB
9259 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
9260 int flags)
2c0262af
FB
9261{
9262 int i;
06e80fc9 9263#if 0
bc380d17 9264 union {
b7bcbe95
FB
9265 uint32_t i;
9266 float s;
9267 } s0, s1;
9268 CPU_DoubleU d;
a94a6abf
PB
9269 /* ??? This assumes float64 and double have the same layout.
9270 Oh well, it's only debug dumps. */
9271 union {
9272 float64 f64;
9273 double d;
9274 } d0;
06e80fc9 9275#endif
b5ff1b31 9276 uint32_t psr;
2c0262af
FB
9277
9278 for(i=0;i<16;i++) {
7fe48483 9279 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 9280 if ((i % 4) == 3)
7fe48483 9281 cpu_fprintf(f, "\n");
2c0262af 9282 else
7fe48483 9283 cpu_fprintf(f, " ");
2c0262af 9284 }
b5ff1b31 9285 psr = cpsr_read(env);
687fa640
TS
9286 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9287 psr,
b5ff1b31
FB
9288 psr & (1 << 31) ? 'N' : '-',
9289 psr & (1 << 30) ? 'Z' : '-',
9290 psr & (1 << 29) ? 'C' : '-',
9291 psr & (1 << 28) ? 'V' : '-',
5fafdf24 9292 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 9293 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 9294
5e3f878a 9295#if 0
b7bcbe95 9296 for (i = 0; i < 16; i++) {
8e96005d
FB
9297 d.d = env->vfp.regs[i];
9298 s0.i = d.l.lower;
9299 s1.i = d.l.upper;
a94a6abf
PB
9300 d0.f64 = d.d;
9301 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 9302 i * 2, (int)s0.i, s0.s,
a94a6abf 9303 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 9304 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 9305 d0.d);
b7bcbe95 9306 }
40f137e1 9307 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
5e3f878a 9308#endif
2c0262af 9309}
a6b025d3 9310
d2856f1a
AJ
9311void gen_pc_load(CPUState *env, TranslationBlock *tb,
9312 unsigned long searched_pc, int pc_pos, void *puc)
9313{
9314 env->regs[15] = gen_opc_pc[pc_pos];
9315}