]> git.proxmox.com Git - qemu.git/blame - target-arm/translate.c
target-arm: fix thumb CPS
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
28#include "exec-all.h"
29#include "disas.h"
57fec1fe 30#include "tcg-op.h"
79383c9c 31#include "qemu-log.h"
1497c961 32
a7812ae4 33#include "helpers.h"
1497c961 34#define GEN_HELPER 1
b26eefb6 35#include "helpers.h"
2c0262af 36
9ee6e8bb
PB
37#define ENABLE_ARCH_5J 0
38#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 42
86753403 43#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 44
2c0262af
FB
45/* internal defines */
46typedef struct DisasContext {
0fa85d43 47 target_ulong pc;
2c0262af 48 int is_jmp;
e50e6a20
FB
49 /* Nonzero if this instruction has been conditionally skipped. */
50 int condjmp;
51 /* The label that will be jumped to when the instruction is skipped. */
52 int condlabel;
9ee6e8bb
PB
53 /* Thumb-2 condtional execution bits. */
54 int condexec_mask;
55 int condexec_cond;
2c0262af 56 struct TranslationBlock *tb;
8aaca4c0 57 int singlestep_enabled;
5899f386 58 int thumb;
b5ff1b31
FB
59#if !defined(CONFIG_USER_ONLY)
60 int user;
61#endif
2c0262af
FB
62} DisasContext;
63
b5ff1b31
FB
64#if defined(CONFIG_USER_ONLY)
65#define IS_USER(s) 1
66#else
67#define IS_USER(s) (s->user)
68#endif
69
9ee6e8bb
PB
70/* These instructions trap after executing, so defer them until after the
71 conditional executions state has been updated. */
72#define DISAS_WFI 4
73#define DISAS_SWI 5
2c0262af 74
a7812ae4 75static TCGv_ptr cpu_env;
ad69471c 76/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 77static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 78static TCGv_i32 cpu_R[16];
426f5abc
PB
79static TCGv_i32 cpu_exclusive_addr;
80static TCGv_i32 cpu_exclusive_val;
81static TCGv_i32 cpu_exclusive_high;
82#ifdef CONFIG_USER_ONLY
83static TCGv_i32 cpu_exclusive_test;
84static TCGv_i32 cpu_exclusive_info;
85#endif
ad69471c 86
b26eefb6 87/* FIXME: These should be removed. */
a7812ae4
PB
88static TCGv cpu_F0s, cpu_F1s;
89static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 90
2e70f6ef
PB
91#include "gen-icount.h"
92
155c3eac
FN
93static const char *regnames[] =
94 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
95 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
96
b26eefb6
PB
97/* initialize TCG globals. */
98void arm_translate_init(void)
99{
155c3eac
FN
100 int i;
101
a7812ae4
PB
102 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
103
155c3eac
FN
104 for (i = 0; i < 16; i++) {
105 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
106 offsetof(CPUState, regs[i]),
107 regnames[i]);
108 }
426f5abc
PB
109 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
110 offsetof(CPUState, exclusive_addr), "exclusive_addr");
111 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
112 offsetof(CPUState, exclusive_val), "exclusive_val");
113 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
114 offsetof(CPUState, exclusive_high), "exclusive_high");
115#ifdef CONFIG_USER_ONLY
116 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
117 offsetof(CPUState, exclusive_test), "exclusive_test");
118 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUState, exclusive_info), "exclusive_info");
120#endif
155c3eac 121
a7812ae4
PB
122#define GEN_HELPER 2
123#include "helpers.h"
b26eefb6
PB
124}
125
b26eefb6 126static int num_temps;
b26eefb6
PB
127
128/* Allocate a temporary variable. */
a7812ae4 129static TCGv_i32 new_tmp(void)
b26eefb6 130{
12edd4f2
FN
131 num_temps++;
132 return tcg_temp_new_i32();
b26eefb6
PB
133}
134
135/* Release a temporary variable. */
136static void dead_tmp(TCGv tmp)
137{
12edd4f2 138 tcg_temp_free(tmp);
b26eefb6 139 num_temps--;
b26eefb6
PB
140}
141
d9ba4830
PB
142static inline TCGv load_cpu_offset(int offset)
143{
144 TCGv tmp = new_tmp();
145 tcg_gen_ld_i32(tmp, cpu_env, offset);
146 return tmp;
147}
148
149#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
150
151static inline void store_cpu_offset(TCGv var, int offset)
152{
153 tcg_gen_st_i32(var, cpu_env, offset);
154 dead_tmp(var);
155}
156
157#define store_cpu_field(var, name) \
158 store_cpu_offset(var, offsetof(CPUState, name))
159
b26eefb6
PB
160/* Set a variable to the value of a CPU register. */
161static void load_reg_var(DisasContext *s, TCGv var, int reg)
162{
163 if (reg == 15) {
164 uint32_t addr;
165 /* normaly, since we updated PC, we need only to add one insn */
166 if (s->thumb)
167 addr = (long)s->pc + 2;
168 else
169 addr = (long)s->pc + 4;
170 tcg_gen_movi_i32(var, addr);
171 } else {
155c3eac 172 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
173 }
174}
175
176/* Create a new temporary and set it to the value of a CPU register. */
177static inline TCGv load_reg(DisasContext *s, int reg)
178{
179 TCGv tmp = new_tmp();
180 load_reg_var(s, tmp, reg);
181 return tmp;
182}
183
184/* Set a CPU register. The source must be a temporary and will be
185 marked as dead. */
186static void store_reg(DisasContext *s, int reg, TCGv var)
187{
188 if (reg == 15) {
189 tcg_gen_andi_i32(var, var, ~1);
190 s->is_jmp = DISAS_JUMP;
191 }
155c3eac 192 tcg_gen_mov_i32(cpu_R[reg], var);
b26eefb6
PB
193 dead_tmp(var);
194}
195
b26eefb6 196/* Value extensions. */
86831435
PB
197#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
198#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
199#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
200#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
201
1497c961
PB
202#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
203#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 204
b26eefb6 205
b75263d6
JR
206static inline void gen_set_cpsr(TCGv var, uint32_t mask)
207{
208 TCGv tmp_mask = tcg_const_i32(mask);
209 gen_helper_cpsr_write(var, tmp_mask);
210 tcg_temp_free_i32(tmp_mask);
211}
d9ba4830
PB
212/* Set NZCV flags from the high 4 bits of var. */
213#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
214
215static void gen_exception(int excp)
216{
217 TCGv tmp = new_tmp();
218 tcg_gen_movi_i32(tmp, excp);
219 gen_helper_exception(tmp);
220 dead_tmp(tmp);
221}
222
3670669c
PB
223static void gen_smul_dual(TCGv a, TCGv b)
224{
225 TCGv tmp1 = new_tmp();
226 TCGv tmp2 = new_tmp();
22478e79
AZ
227 tcg_gen_ext16s_i32(tmp1, a);
228 tcg_gen_ext16s_i32(tmp2, b);
3670669c
PB
229 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
230 dead_tmp(tmp2);
231 tcg_gen_sari_i32(a, a, 16);
232 tcg_gen_sari_i32(b, b, 16);
233 tcg_gen_mul_i32(b, b, a);
234 tcg_gen_mov_i32(a, tmp1);
235 dead_tmp(tmp1);
236}
237
238/* Byteswap each halfword. */
239static void gen_rev16(TCGv var)
240{
241 TCGv tmp = new_tmp();
242 tcg_gen_shri_i32(tmp, var, 8);
243 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
244 tcg_gen_shli_i32(var, var, 8);
245 tcg_gen_andi_i32(var, var, 0xff00ff00);
246 tcg_gen_or_i32(var, var, tmp);
247 dead_tmp(tmp);
248}
249
250/* Byteswap low halfword and sign extend. */
251static void gen_revsh(TCGv var)
252{
253 TCGv tmp = new_tmp();
254 tcg_gen_shri_i32(tmp, var, 8);
255 tcg_gen_andi_i32(tmp, tmp, 0x00ff);
256 tcg_gen_shli_i32(var, var, 8);
257 tcg_gen_ext8s_i32(var, var);
258 tcg_gen_or_i32(var, var, tmp);
259 dead_tmp(tmp);
260}
261
262/* Unsigned bitfield extract. */
263static void gen_ubfx(TCGv var, int shift, uint32_t mask)
264{
265 if (shift)
266 tcg_gen_shri_i32(var, var, shift);
267 tcg_gen_andi_i32(var, var, mask);
268}
269
270/* Signed bitfield extract. */
271static void gen_sbfx(TCGv var, int shift, int width)
272{
273 uint32_t signbit;
274
275 if (shift)
276 tcg_gen_sari_i32(var, var, shift);
277 if (shift + width < 32) {
278 signbit = 1u << (width - 1);
279 tcg_gen_andi_i32(var, var, (1u << width) - 1);
280 tcg_gen_xori_i32(var, var, signbit);
281 tcg_gen_subi_i32(var, var, signbit);
282 }
283}
284
285/* Bitfield insertion. Insert val into base. Clobbers base and val. */
286static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
287{
3670669c 288 tcg_gen_andi_i32(val, val, mask);
8f8e3aa4
PB
289 tcg_gen_shli_i32(val, val, shift);
290 tcg_gen_andi_i32(base, base, ~(mask << shift));
3670669c
PB
291 tcg_gen_or_i32(dest, base, val);
292}
293
d9ba4830
PB
294/* Round the top 32 bits of a 64-bit value. */
295static void gen_roundqd(TCGv a, TCGv b)
3670669c 296{
d9ba4830
PB
297 tcg_gen_shri_i32(a, a, 31);
298 tcg_gen_add_i32(a, a, b);
3670669c
PB
299}
300
8f01245e
PB
301/* FIXME: Most targets have native widening multiplication.
302 It would be good to use that instead of a full wide multiply. */
5e3f878a 303/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 304static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 305{
a7812ae4
PB
306 TCGv_i64 tmp1 = tcg_temp_new_i64();
307 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
308
309 tcg_gen_extu_i32_i64(tmp1, a);
310 dead_tmp(a);
311 tcg_gen_extu_i32_i64(tmp2, b);
312 dead_tmp(b);
313 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 314 tcg_temp_free_i64(tmp2);
5e3f878a
PB
315 return tmp1;
316}
317
a7812ae4 318static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 319{
a7812ae4
PB
320 TCGv_i64 tmp1 = tcg_temp_new_i64();
321 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
322
323 tcg_gen_ext_i32_i64(tmp1, a);
324 dead_tmp(a);
325 tcg_gen_ext_i32_i64(tmp2, b);
326 dead_tmp(b);
327 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 328 tcg_temp_free_i64(tmp2);
5e3f878a
PB
329 return tmp1;
330}
331
8f01245e 332/* Signed 32x32->64 multiply. */
d9ba4830 333static void gen_imull(TCGv a, TCGv b)
8f01245e 334{
a7812ae4
PB
335 TCGv_i64 tmp1 = tcg_temp_new_i64();
336 TCGv_i64 tmp2 = tcg_temp_new_i64();
8f01245e 337
d9ba4830
PB
338 tcg_gen_ext_i32_i64(tmp1, a);
339 tcg_gen_ext_i32_i64(tmp2, b);
8f01245e 340 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 341 tcg_temp_free_i64(tmp2);
d9ba4830 342 tcg_gen_trunc_i64_i32(a, tmp1);
8f01245e 343 tcg_gen_shri_i64(tmp1, tmp1, 32);
d9ba4830 344 tcg_gen_trunc_i64_i32(b, tmp1);
b75263d6 345 tcg_temp_free_i64(tmp1);
d9ba4830 346}
d9ba4830 347
8f01245e
PB
348/* Swap low and high halfwords. */
349static void gen_swap_half(TCGv var)
350{
351 TCGv tmp = new_tmp();
352 tcg_gen_shri_i32(tmp, var, 16);
353 tcg_gen_shli_i32(var, var, 16);
354 tcg_gen_or_i32(var, var, tmp);
3670669c 355 dead_tmp(tmp);
8f01245e
PB
356}
357
b26eefb6
PB
358/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
359 tmp = (t0 ^ t1) & 0x8000;
360 t0 &= ~0x8000;
361 t1 &= ~0x8000;
362 t0 = (t0 + t1) ^ tmp;
363 */
364
365static void gen_add16(TCGv t0, TCGv t1)
366{
367 TCGv tmp = new_tmp();
368 tcg_gen_xor_i32(tmp, t0, t1);
369 tcg_gen_andi_i32(tmp, tmp, 0x8000);
370 tcg_gen_andi_i32(t0, t0, ~0x8000);
371 tcg_gen_andi_i32(t1, t1, ~0x8000);
372 tcg_gen_add_i32(t0, t0, t1);
373 tcg_gen_xor_i32(t0, t0, tmp);
374 dead_tmp(tmp);
375 dead_tmp(t1);
376}
377
9a119ff6
PB
378#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
379
b26eefb6
PB
380/* Set CF to the top bit of var. */
381static void gen_set_CF_bit31(TCGv var)
382{
383 TCGv tmp = new_tmp();
384 tcg_gen_shri_i32(tmp, var, 31);
4cc633c3 385 gen_set_CF(tmp);
b26eefb6
PB
386 dead_tmp(tmp);
387}
388
389/* Set N and Z flags from var. */
390static inline void gen_logic_CC(TCGv var)
391{
6fbe23d5
PB
392 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
393 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
b26eefb6
PB
394}
395
396/* T0 += T1 + CF. */
396e467c 397static void gen_adc(TCGv t0, TCGv t1)
b26eefb6 398{
d9ba4830 399 TCGv tmp;
396e467c 400 tcg_gen_add_i32(t0, t0, t1);
d9ba4830 401 tmp = load_cpu_field(CF);
396e467c 402 tcg_gen_add_i32(t0, t0, tmp);
b26eefb6
PB
403 dead_tmp(tmp);
404}
405
e9bb4aa9
JR
406/* dest = T0 + T1 + CF. */
407static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
408{
409 TCGv tmp;
410 tcg_gen_add_i32(dest, t0, t1);
411 tmp = load_cpu_field(CF);
412 tcg_gen_add_i32(dest, dest, tmp);
413 dead_tmp(tmp);
414}
415
3670669c
PB
416/* dest = T0 - T1 + CF - 1. */
417static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
418{
d9ba4830 419 TCGv tmp;
3670669c 420 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 421 tmp = load_cpu_field(CF);
3670669c
PB
422 tcg_gen_add_i32(dest, dest, tmp);
423 tcg_gen_subi_i32(dest, dest, 1);
424 dead_tmp(tmp);
425}
426
ad69471c
PB
427/* FIXME: Implement this natively. */
428#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
429
9a119ff6 430static void shifter_out_im(TCGv var, int shift)
b26eefb6 431{
9a119ff6
PB
432 TCGv tmp = new_tmp();
433 if (shift == 0) {
434 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 435 } else {
9a119ff6 436 tcg_gen_shri_i32(tmp, var, shift);
4cc633c3 437 if (shift != 31)
9a119ff6
PB
438 tcg_gen_andi_i32(tmp, tmp, 1);
439 }
440 gen_set_CF(tmp);
441 dead_tmp(tmp);
442}
b26eefb6 443
9a119ff6
PB
444/* Shift by immediate. Includes special handling for shift == 0. */
445static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
446{
447 switch (shiftop) {
448 case 0: /* LSL */
449 if (shift != 0) {
450 if (flags)
451 shifter_out_im(var, 32 - shift);
452 tcg_gen_shli_i32(var, var, shift);
453 }
454 break;
455 case 1: /* LSR */
456 if (shift == 0) {
457 if (flags) {
458 tcg_gen_shri_i32(var, var, 31);
459 gen_set_CF(var);
460 }
461 tcg_gen_movi_i32(var, 0);
462 } else {
463 if (flags)
464 shifter_out_im(var, shift - 1);
465 tcg_gen_shri_i32(var, var, shift);
466 }
467 break;
468 case 2: /* ASR */
469 if (shift == 0)
470 shift = 32;
471 if (flags)
472 shifter_out_im(var, shift - 1);
473 if (shift == 32)
474 shift = 31;
475 tcg_gen_sari_i32(var, var, shift);
476 break;
477 case 3: /* ROR/RRX */
478 if (shift != 0) {
479 if (flags)
480 shifter_out_im(var, shift - 1);
f669df27 481 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 482 } else {
d9ba4830 483 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
484 if (flags)
485 shifter_out_im(var, 0);
486 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
487 tcg_gen_shli_i32(tmp, tmp, 31);
488 tcg_gen_or_i32(var, var, tmp);
489 dead_tmp(tmp);
b26eefb6
PB
490 }
491 }
492};
493
8984bd2e
PB
494static inline void gen_arm_shift_reg(TCGv var, int shiftop,
495 TCGv shift, int flags)
496{
497 if (flags) {
498 switch (shiftop) {
499 case 0: gen_helper_shl_cc(var, var, shift); break;
500 case 1: gen_helper_shr_cc(var, var, shift); break;
501 case 2: gen_helper_sar_cc(var, var, shift); break;
502 case 3: gen_helper_ror_cc(var, var, shift); break;
503 }
504 } else {
505 switch (shiftop) {
506 case 0: gen_helper_shl(var, var, shift); break;
507 case 1: gen_helper_shr(var, var, shift); break;
508 case 2: gen_helper_sar(var, var, shift); break;
f669df27
AJ
509 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
510 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
511 }
512 }
513 dead_tmp(shift);
514}
515
6ddbc6e4
PB
516#define PAS_OP(pfx) \
517 switch (op2) { \
518 case 0: gen_pas_helper(glue(pfx,add16)); break; \
519 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
520 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
521 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
522 case 4: gen_pas_helper(glue(pfx,add8)); break; \
523 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
524 }
d9ba4830 525static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 526{
a7812ae4 527 TCGv_ptr tmp;
6ddbc6e4
PB
528
529 switch (op1) {
530#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
531 case 1:
a7812ae4 532 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
533 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
534 PAS_OP(s)
b75263d6 535 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
536 break;
537 case 5:
a7812ae4 538 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
539 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
540 PAS_OP(u)
b75263d6 541 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
542 break;
543#undef gen_pas_helper
544#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
545 case 2:
546 PAS_OP(q);
547 break;
548 case 3:
549 PAS_OP(sh);
550 break;
551 case 6:
552 PAS_OP(uq);
553 break;
554 case 7:
555 PAS_OP(uh);
556 break;
557#undef gen_pas_helper
558 }
559}
9ee6e8bb
PB
560#undef PAS_OP
561
6ddbc6e4
PB
562/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
563#define PAS_OP(pfx) \
564 switch (op2) { \
565 case 0: gen_pas_helper(glue(pfx,add8)); break; \
566 case 1: gen_pas_helper(glue(pfx,add16)); break; \
567 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
568 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
569 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
570 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
571 }
d9ba4830 572static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 573{
a7812ae4 574 TCGv_ptr tmp;
6ddbc6e4
PB
575
576 switch (op1) {
577#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
578 case 0:
a7812ae4 579 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
580 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
581 PAS_OP(s)
b75263d6 582 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
583 break;
584 case 4:
a7812ae4 585 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
586 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
587 PAS_OP(u)
b75263d6 588 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
589 break;
590#undef gen_pas_helper
591#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
592 case 1:
593 PAS_OP(q);
594 break;
595 case 2:
596 PAS_OP(sh);
597 break;
598 case 5:
599 PAS_OP(uq);
600 break;
601 case 6:
602 PAS_OP(uh);
603 break;
604#undef gen_pas_helper
605 }
606}
9ee6e8bb
PB
607#undef PAS_OP
608
d9ba4830
PB
609static void gen_test_cc(int cc, int label)
610{
611 TCGv tmp;
612 TCGv tmp2;
d9ba4830
PB
613 int inv;
614
d9ba4830
PB
615 switch (cc) {
616 case 0: /* eq: Z */
6fbe23d5 617 tmp = load_cpu_field(ZF);
cb63669a 618 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
619 break;
620 case 1: /* ne: !Z */
6fbe23d5 621 tmp = load_cpu_field(ZF);
cb63669a 622 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
623 break;
624 case 2: /* cs: C */
625 tmp = load_cpu_field(CF);
cb63669a 626 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
627 break;
628 case 3: /* cc: !C */
629 tmp = load_cpu_field(CF);
cb63669a 630 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
631 break;
632 case 4: /* mi: N */
6fbe23d5 633 tmp = load_cpu_field(NF);
cb63669a 634 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
635 break;
636 case 5: /* pl: !N */
6fbe23d5 637 tmp = load_cpu_field(NF);
cb63669a 638 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
639 break;
640 case 6: /* vs: V */
641 tmp = load_cpu_field(VF);
cb63669a 642 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
643 break;
644 case 7: /* vc: !V */
645 tmp = load_cpu_field(VF);
cb63669a 646 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
647 break;
648 case 8: /* hi: C && !Z */
649 inv = gen_new_label();
650 tmp = load_cpu_field(CF);
cb63669a 651 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830 652 dead_tmp(tmp);
6fbe23d5 653 tmp = load_cpu_field(ZF);
cb63669a 654 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
655 gen_set_label(inv);
656 break;
657 case 9: /* ls: !C || Z */
658 tmp = load_cpu_field(CF);
cb63669a 659 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830 660 dead_tmp(tmp);
6fbe23d5 661 tmp = load_cpu_field(ZF);
cb63669a 662 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
663 break;
664 case 10: /* ge: N == V -> N ^ V == 0 */
665 tmp = load_cpu_field(VF);
6fbe23d5 666 tmp2 = load_cpu_field(NF);
d9ba4830
PB
667 tcg_gen_xor_i32(tmp, tmp, tmp2);
668 dead_tmp(tmp2);
cb63669a 669 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
670 break;
671 case 11: /* lt: N != V -> N ^ V != 0 */
672 tmp = load_cpu_field(VF);
6fbe23d5 673 tmp2 = load_cpu_field(NF);
d9ba4830
PB
674 tcg_gen_xor_i32(tmp, tmp, tmp2);
675 dead_tmp(tmp2);
cb63669a 676 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
677 break;
678 case 12: /* gt: !Z && N == V */
679 inv = gen_new_label();
6fbe23d5 680 tmp = load_cpu_field(ZF);
cb63669a 681 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830
PB
682 dead_tmp(tmp);
683 tmp = load_cpu_field(VF);
6fbe23d5 684 tmp2 = load_cpu_field(NF);
d9ba4830
PB
685 tcg_gen_xor_i32(tmp, tmp, tmp2);
686 dead_tmp(tmp2);
cb63669a 687 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
688 gen_set_label(inv);
689 break;
690 case 13: /* le: Z || N != V */
6fbe23d5 691 tmp = load_cpu_field(ZF);
cb63669a 692 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
693 dead_tmp(tmp);
694 tmp = load_cpu_field(VF);
6fbe23d5 695 tmp2 = load_cpu_field(NF);
d9ba4830
PB
696 tcg_gen_xor_i32(tmp, tmp, tmp2);
697 dead_tmp(tmp2);
cb63669a 698 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
699 break;
700 default:
701 fprintf(stderr, "Bad condition code 0x%x\n", cc);
702 abort();
703 }
704 dead_tmp(tmp);
705}
2c0262af 706
b1d8e52e 707static const uint8_t table_logic_cc[16] = {
2c0262af
FB
708 1, /* and */
709 1, /* xor */
710 0, /* sub */
711 0, /* rsb */
712 0, /* add */
713 0, /* adc */
714 0, /* sbc */
715 0, /* rsc */
716 1, /* andl */
717 1, /* xorl */
718 0, /* cmp */
719 0, /* cmn */
720 1, /* orr */
721 1, /* mov */
722 1, /* bic */
723 1, /* mvn */
724};
3b46e624 725
d9ba4830
PB
726/* Set PC and Thumb state from an immediate address. */
727static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 728{
b26eefb6 729 TCGv tmp;
99c475ab 730
b26eefb6 731 s->is_jmp = DISAS_UPDATE;
d9ba4830 732 if (s->thumb != (addr & 1)) {
155c3eac 733 tmp = new_tmp();
d9ba4830
PB
734 tcg_gen_movi_i32(tmp, addr & 1);
735 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
155c3eac 736 dead_tmp(tmp);
d9ba4830 737 }
155c3eac 738 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
739}
740
741/* Set PC and Thumb state from var. var is marked as dead. */
742static inline void gen_bx(DisasContext *s, TCGv var)
743{
d9ba4830 744 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
745 tcg_gen_andi_i32(cpu_R[15], var, ~1);
746 tcg_gen_andi_i32(var, var, 1);
747 store_cpu_field(var, thumb);
d9ba4830
PB
748}
749
21aeb343
JR
750/* Variant of store_reg which uses branch&exchange logic when storing
751 to r15 in ARM architecture v7 and above. The source must be a temporary
752 and will be marked as dead. */
753static inline void store_reg_bx(CPUState *env, DisasContext *s,
754 int reg, TCGv var)
755{
756 if (reg == 15 && ENABLE_ARCH_7) {
757 gen_bx(s, var);
758 } else {
759 store_reg(s, reg, var);
760 }
761}
762
b0109805
PB
763static inline TCGv gen_ld8s(TCGv addr, int index)
764{
765 TCGv tmp = new_tmp();
766 tcg_gen_qemu_ld8s(tmp, addr, index);
767 return tmp;
768}
769static inline TCGv gen_ld8u(TCGv addr, int index)
770{
771 TCGv tmp = new_tmp();
772 tcg_gen_qemu_ld8u(tmp, addr, index);
773 return tmp;
774}
775static inline TCGv gen_ld16s(TCGv addr, int index)
776{
777 TCGv tmp = new_tmp();
778 tcg_gen_qemu_ld16s(tmp, addr, index);
779 return tmp;
780}
781static inline TCGv gen_ld16u(TCGv addr, int index)
782{
783 TCGv tmp = new_tmp();
784 tcg_gen_qemu_ld16u(tmp, addr, index);
785 return tmp;
786}
787static inline TCGv gen_ld32(TCGv addr, int index)
788{
789 TCGv tmp = new_tmp();
790 tcg_gen_qemu_ld32u(tmp, addr, index);
791 return tmp;
792}
84496233
JR
793static inline TCGv_i64 gen_ld64(TCGv addr, int index)
794{
795 TCGv_i64 tmp = tcg_temp_new_i64();
796 tcg_gen_qemu_ld64(tmp, addr, index);
797 return tmp;
798}
b0109805
PB
799static inline void gen_st8(TCGv val, TCGv addr, int index)
800{
801 tcg_gen_qemu_st8(val, addr, index);
802 dead_tmp(val);
803}
804static inline void gen_st16(TCGv val, TCGv addr, int index)
805{
806 tcg_gen_qemu_st16(val, addr, index);
807 dead_tmp(val);
808}
809static inline void gen_st32(TCGv val, TCGv addr, int index)
810{
811 tcg_gen_qemu_st32(val, addr, index);
812 dead_tmp(val);
813}
84496233
JR
814static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
815{
816 tcg_gen_qemu_st64(val, addr, index);
817 tcg_temp_free_i64(val);
818}
b5ff1b31 819
5e3f878a
PB
820static inline void gen_set_pc_im(uint32_t val)
821{
155c3eac 822 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
823}
824
b5ff1b31
FB
825/* Force a TB lookup after an instruction that changes the CPU state. */
826static inline void gen_lookup_tb(DisasContext *s)
827{
a6445c52 828 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
829 s->is_jmp = DISAS_UPDATE;
830}
831
b0109805
PB
832static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
833 TCGv var)
2c0262af 834{
1e8d4eec 835 int val, rm, shift, shiftop;
b26eefb6 836 TCGv offset;
2c0262af
FB
837
838 if (!(insn & (1 << 25))) {
839 /* immediate */
840 val = insn & 0xfff;
841 if (!(insn & (1 << 23)))
842 val = -val;
537730b9 843 if (val != 0)
b0109805 844 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
845 } else {
846 /* shift/register */
847 rm = (insn) & 0xf;
848 shift = (insn >> 7) & 0x1f;
1e8d4eec 849 shiftop = (insn >> 5) & 3;
b26eefb6 850 offset = load_reg(s, rm);
9a119ff6 851 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 852 if (!(insn & (1 << 23)))
b0109805 853 tcg_gen_sub_i32(var, var, offset);
2c0262af 854 else
b0109805 855 tcg_gen_add_i32(var, var, offset);
b26eefb6 856 dead_tmp(offset);
2c0262af
FB
857 }
858}
859
191f9a93 860static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 861 int extra, TCGv var)
2c0262af
FB
862{
863 int val, rm;
b26eefb6 864 TCGv offset;
3b46e624 865
2c0262af
FB
866 if (insn & (1 << 22)) {
867 /* immediate */
868 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
869 if (!(insn & (1 << 23)))
870 val = -val;
18acad92 871 val += extra;
537730b9 872 if (val != 0)
b0109805 873 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
874 } else {
875 /* register */
191f9a93 876 if (extra)
b0109805 877 tcg_gen_addi_i32(var, var, extra);
2c0262af 878 rm = (insn) & 0xf;
b26eefb6 879 offset = load_reg(s, rm);
2c0262af 880 if (!(insn & (1 << 23)))
b0109805 881 tcg_gen_sub_i32(var, var, offset);
2c0262af 882 else
b0109805 883 tcg_gen_add_i32(var, var, offset);
b26eefb6 884 dead_tmp(offset);
2c0262af
FB
885 }
886}
887
4373f3ce
PB
888#define VFP_OP2(name) \
889static inline void gen_vfp_##name(int dp) \
890{ \
891 if (dp) \
892 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
893 else \
894 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
b7bcbe95
FB
895}
896
4373f3ce
PB
897VFP_OP2(add)
898VFP_OP2(sub)
899VFP_OP2(mul)
900VFP_OP2(div)
901
902#undef VFP_OP2
903
904static inline void gen_vfp_abs(int dp)
905{
906 if (dp)
907 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
908 else
909 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
910}
911
912static inline void gen_vfp_neg(int dp)
913{
914 if (dp)
915 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
916 else
917 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
918}
919
920static inline void gen_vfp_sqrt(int dp)
921{
922 if (dp)
923 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
924 else
925 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
926}
927
928static inline void gen_vfp_cmp(int dp)
929{
930 if (dp)
931 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
932 else
933 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
934}
935
936static inline void gen_vfp_cmpe(int dp)
937{
938 if (dp)
939 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
940 else
941 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
942}
943
944static inline void gen_vfp_F1_ld0(int dp)
945{
946 if (dp)
5b340b51 947 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 948 else
5b340b51 949 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
950}
951
952static inline void gen_vfp_uito(int dp)
953{
954 if (dp)
955 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
956 else
957 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
958}
959
960static inline void gen_vfp_sito(int dp)
961{
962 if (dp)
66230e0d 963 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
4373f3ce 964 else
66230e0d 965 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
4373f3ce
PB
966}
967
968static inline void gen_vfp_toui(int dp)
969{
970 if (dp)
971 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
972 else
973 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
974}
975
976static inline void gen_vfp_touiz(int dp)
977{
978 if (dp)
979 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
980 else
981 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
982}
983
984static inline void gen_vfp_tosi(int dp)
985{
986 if (dp)
987 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
988 else
989 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
990}
991
992static inline void gen_vfp_tosiz(int dp)
9ee6e8bb
PB
993{
994 if (dp)
4373f3ce 995 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
9ee6e8bb 996 else
4373f3ce
PB
997 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
998}
999
1000#define VFP_GEN_FIX(name) \
1001static inline void gen_vfp_##name(int dp, int shift) \
1002{ \
b75263d6 1003 TCGv tmp_shift = tcg_const_i32(shift); \
4373f3ce 1004 if (dp) \
b75263d6 1005 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
4373f3ce 1006 else \
b75263d6
JR
1007 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
1008 tcg_temp_free_i32(tmp_shift); \
9ee6e8bb 1009}
4373f3ce
PB
1010VFP_GEN_FIX(tosh)
1011VFP_GEN_FIX(tosl)
1012VFP_GEN_FIX(touh)
1013VFP_GEN_FIX(toul)
1014VFP_GEN_FIX(shto)
1015VFP_GEN_FIX(slto)
1016VFP_GEN_FIX(uhto)
1017VFP_GEN_FIX(ulto)
1018#undef VFP_GEN_FIX
9ee6e8bb 1019
312eea9f 1020static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1021{
1022 if (dp)
312eea9f 1023 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1024 else
312eea9f 1025 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1026}
1027
312eea9f 1028static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1029{
1030 if (dp)
312eea9f 1031 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1032 else
312eea9f 1033 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1034}
1035
8e96005d
FB
1036static inline long
1037vfp_reg_offset (int dp, int reg)
1038{
1039 if (dp)
1040 return offsetof(CPUARMState, vfp.regs[reg]);
1041 else if (reg & 1) {
1042 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1043 + offsetof(CPU_DoubleU, l.upper);
1044 } else {
1045 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1046 + offsetof(CPU_DoubleU, l.lower);
1047 }
1048}
9ee6e8bb
PB
1049
1050/* Return the offset of a 32-bit piece of a NEON register.
1051 zero is the least significant end of the register. */
1052static inline long
1053neon_reg_offset (int reg, int n)
1054{
1055 int sreg;
1056 sreg = reg * 2 + n;
1057 return vfp_reg_offset(0, sreg);
1058}
1059
8f8e3aa4
PB
1060static TCGv neon_load_reg(int reg, int pass)
1061{
1062 TCGv tmp = new_tmp();
1063 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1064 return tmp;
1065}
1066
1067static void neon_store_reg(int reg, int pass, TCGv var)
1068{
1069 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1070 dead_tmp(var);
1071}
1072
a7812ae4 1073static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1074{
1075 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1076}
1077
a7812ae4 1078static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1079{
1080 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1081}
1082
4373f3ce
PB
1083#define tcg_gen_ld_f32 tcg_gen_ld_i32
1084#define tcg_gen_ld_f64 tcg_gen_ld_i64
1085#define tcg_gen_st_f32 tcg_gen_st_i32
1086#define tcg_gen_st_f64 tcg_gen_st_i64
1087
b7bcbe95
FB
1088static inline void gen_mov_F0_vreg(int dp, int reg)
1089{
1090 if (dp)
4373f3ce 1091 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1092 else
4373f3ce 1093 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1094}
1095
1096static inline void gen_mov_F1_vreg(int dp, int reg)
1097{
1098 if (dp)
4373f3ce 1099 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1100 else
4373f3ce 1101 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1102}
1103
1104static inline void gen_mov_vreg_F0(int dp, int reg)
1105{
1106 if (dp)
4373f3ce 1107 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1108 else
4373f3ce 1109 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1110}
1111
18c9b560
AZ
1112#define ARM_CP_RW_BIT (1 << 20)
1113
a7812ae4 1114static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d
PB
1115{
1116 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1117}
1118
a7812ae4 1119static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d
PB
1120{
1121 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1122}
1123
da6b5335 1124static inline TCGv iwmmxt_load_creg(int reg)
e677137d 1125{
da6b5335
FN
1126 TCGv var = new_tmp();
1127 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1128 return var;
e677137d
PB
1129}
1130
da6b5335 1131static inline void iwmmxt_store_creg(int reg, TCGv var)
e677137d 1132{
da6b5335 1133 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
e677137d
PB
1134}
1135
1136static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1137{
1138 iwmmxt_store_reg(cpu_M0, rn);
1139}
1140
1141static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1142{
1143 iwmmxt_load_reg(cpu_M0, rn);
1144}
1145
1146static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1147{
1148 iwmmxt_load_reg(cpu_V1, rn);
1149 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1150}
1151
1152static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1153{
1154 iwmmxt_load_reg(cpu_V1, rn);
1155 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1156}
1157
1158static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1159{
1160 iwmmxt_load_reg(cpu_V1, rn);
1161 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1162}
1163
1164#define IWMMXT_OP(name) \
1165static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1166{ \
1167 iwmmxt_load_reg(cpu_V1, rn); \
1168 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1169}
1170
1171#define IWMMXT_OP_ENV(name) \
1172static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1173{ \
1174 iwmmxt_load_reg(cpu_V1, rn); \
1175 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1176}
1177
1178#define IWMMXT_OP_ENV_SIZE(name) \
1179IWMMXT_OP_ENV(name##b) \
1180IWMMXT_OP_ENV(name##w) \
1181IWMMXT_OP_ENV(name##l)
1182
1183#define IWMMXT_OP_ENV1(name) \
1184static inline void gen_op_iwmmxt_##name##_M0(void) \
1185{ \
1186 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1187}
1188
1189IWMMXT_OP(maddsq)
1190IWMMXT_OP(madduq)
1191IWMMXT_OP(sadb)
1192IWMMXT_OP(sadw)
1193IWMMXT_OP(mulslw)
1194IWMMXT_OP(mulshw)
1195IWMMXT_OP(mululw)
1196IWMMXT_OP(muluhw)
1197IWMMXT_OP(macsw)
1198IWMMXT_OP(macuw)
1199
1200IWMMXT_OP_ENV_SIZE(unpackl)
1201IWMMXT_OP_ENV_SIZE(unpackh)
1202
1203IWMMXT_OP_ENV1(unpacklub)
1204IWMMXT_OP_ENV1(unpackluw)
1205IWMMXT_OP_ENV1(unpacklul)
1206IWMMXT_OP_ENV1(unpackhub)
1207IWMMXT_OP_ENV1(unpackhuw)
1208IWMMXT_OP_ENV1(unpackhul)
1209IWMMXT_OP_ENV1(unpacklsb)
1210IWMMXT_OP_ENV1(unpacklsw)
1211IWMMXT_OP_ENV1(unpacklsl)
1212IWMMXT_OP_ENV1(unpackhsb)
1213IWMMXT_OP_ENV1(unpackhsw)
1214IWMMXT_OP_ENV1(unpackhsl)
1215
1216IWMMXT_OP_ENV_SIZE(cmpeq)
1217IWMMXT_OP_ENV_SIZE(cmpgtu)
1218IWMMXT_OP_ENV_SIZE(cmpgts)
1219
1220IWMMXT_OP_ENV_SIZE(mins)
1221IWMMXT_OP_ENV_SIZE(minu)
1222IWMMXT_OP_ENV_SIZE(maxs)
1223IWMMXT_OP_ENV_SIZE(maxu)
1224
1225IWMMXT_OP_ENV_SIZE(subn)
1226IWMMXT_OP_ENV_SIZE(addn)
1227IWMMXT_OP_ENV_SIZE(subu)
1228IWMMXT_OP_ENV_SIZE(addu)
1229IWMMXT_OP_ENV_SIZE(subs)
1230IWMMXT_OP_ENV_SIZE(adds)
1231
1232IWMMXT_OP_ENV(avgb0)
1233IWMMXT_OP_ENV(avgb1)
1234IWMMXT_OP_ENV(avgw0)
1235IWMMXT_OP_ENV(avgw1)
1236
1237IWMMXT_OP(msadb)
1238
1239IWMMXT_OP_ENV(packuw)
1240IWMMXT_OP_ENV(packul)
1241IWMMXT_OP_ENV(packuq)
1242IWMMXT_OP_ENV(packsw)
1243IWMMXT_OP_ENV(packsl)
1244IWMMXT_OP_ENV(packsq)
1245
e677137d
PB
1246static void gen_op_iwmmxt_set_mup(void)
1247{
1248 TCGv tmp;
1249 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1250 tcg_gen_ori_i32(tmp, tmp, 2);
1251 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1252}
1253
1254static void gen_op_iwmmxt_set_cup(void)
1255{
1256 TCGv tmp;
1257 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1258 tcg_gen_ori_i32(tmp, tmp, 1);
1259 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1260}
1261
1262static void gen_op_iwmmxt_setpsr_nz(void)
1263{
1264 TCGv tmp = new_tmp();
1265 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1266 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1267}
1268
1269static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1270{
1271 iwmmxt_load_reg(cpu_V1, rn);
86831435 1272 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1273 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1274}
1275
da6b5335 1276static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
18c9b560
AZ
1277{
1278 int rd;
1279 uint32_t offset;
da6b5335 1280 TCGv tmp;
18c9b560
AZ
1281
1282 rd = (insn >> 16) & 0xf;
da6b5335 1283 tmp = load_reg(s, rd);
18c9b560
AZ
1284
1285 offset = (insn & 0xff) << ((insn >> 7) & 2);
1286 if (insn & (1 << 24)) {
1287 /* Pre indexed */
1288 if (insn & (1 << 23))
da6b5335 1289 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1290 else
da6b5335
FN
1291 tcg_gen_addi_i32(tmp, tmp, -offset);
1292 tcg_gen_mov_i32(dest, tmp);
18c9b560 1293 if (insn & (1 << 21))
da6b5335
FN
1294 store_reg(s, rd, tmp);
1295 else
1296 dead_tmp(tmp);
18c9b560
AZ
1297 } else if (insn & (1 << 21)) {
1298 /* Post indexed */
da6b5335 1299 tcg_gen_mov_i32(dest, tmp);
18c9b560 1300 if (insn & (1 << 23))
da6b5335 1301 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1302 else
da6b5335
FN
1303 tcg_gen_addi_i32(tmp, tmp, -offset);
1304 store_reg(s, rd, tmp);
18c9b560
AZ
1305 } else if (!(insn & (1 << 23)))
1306 return 1;
1307 return 0;
1308}
1309
da6b5335 1310static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
18c9b560
AZ
1311{
1312 int rd = (insn >> 0) & 0xf;
da6b5335 1313 TCGv tmp;
18c9b560 1314
da6b5335
FN
1315 if (insn & (1 << 8)) {
1316 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1317 return 1;
da6b5335
FN
1318 } else {
1319 tmp = iwmmxt_load_creg(rd);
1320 }
1321 } else {
1322 tmp = new_tmp();
1323 iwmmxt_load_reg(cpu_V0, rd);
1324 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1325 }
1326 tcg_gen_andi_i32(tmp, tmp, mask);
1327 tcg_gen_mov_i32(dest, tmp);
1328 dead_tmp(tmp);
18c9b560
AZ
1329 return 0;
1330}
1331
1332/* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1333 (ie. an undefined instruction). */
1334static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1335{
1336 int rd, wrd;
1337 int rdhi, rdlo, rd0, rd1, i;
da6b5335
FN
1338 TCGv addr;
1339 TCGv tmp, tmp2, tmp3;
18c9b560
AZ
1340
1341 if ((insn & 0x0e000e00) == 0x0c000000) {
1342 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1343 wrd = insn & 0xf;
1344 rdlo = (insn >> 12) & 0xf;
1345 rdhi = (insn >> 16) & 0xf;
1346 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1347 iwmmxt_load_reg(cpu_V0, wrd);
1348 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1349 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1350 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1351 } else { /* TMCRR */
da6b5335
FN
1352 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1353 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1354 gen_op_iwmmxt_set_mup();
1355 }
1356 return 0;
1357 }
1358
1359 wrd = (insn >> 12) & 0xf;
da6b5335
FN
1360 addr = new_tmp();
1361 if (gen_iwmmxt_address(s, insn, addr)) {
1362 dead_tmp(addr);
18c9b560 1363 return 1;
da6b5335 1364 }
18c9b560
AZ
1365 if (insn & ARM_CP_RW_BIT) {
1366 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
da6b5335
FN
1367 tmp = new_tmp();
1368 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1369 iwmmxt_store_creg(wrd, tmp);
18c9b560 1370 } else {
e677137d
PB
1371 i = 1;
1372 if (insn & (1 << 8)) {
1373 if (insn & (1 << 22)) { /* WLDRD */
da6b5335 1374 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1375 i = 0;
1376 } else { /* WLDRW wRd */
da6b5335 1377 tmp = gen_ld32(addr, IS_USER(s));
e677137d
PB
1378 }
1379 } else {
1380 if (insn & (1 << 22)) { /* WLDRH */
da6b5335 1381 tmp = gen_ld16u(addr, IS_USER(s));
e677137d 1382 } else { /* WLDRB */
da6b5335 1383 tmp = gen_ld8u(addr, IS_USER(s));
e677137d
PB
1384 }
1385 }
1386 if (i) {
1387 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1388 dead_tmp(tmp);
1389 }
18c9b560
AZ
1390 gen_op_iwmmxt_movq_wRn_M0(wrd);
1391 }
1392 } else {
1393 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335
FN
1394 tmp = iwmmxt_load_creg(wrd);
1395 gen_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1396 } else {
1397 gen_op_iwmmxt_movq_M0_wRn(wrd);
e677137d
PB
1398 tmp = new_tmp();
1399 if (insn & (1 << 8)) {
1400 if (insn & (1 << 22)) { /* WSTRD */
1401 dead_tmp(tmp);
da6b5335 1402 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1403 } else { /* WSTRW wRd */
1404 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1405 gen_st32(tmp, addr, IS_USER(s));
e677137d
PB
1406 }
1407 } else {
1408 if (insn & (1 << 22)) { /* WSTRH */
1409 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1410 gen_st16(tmp, addr, IS_USER(s));
e677137d
PB
1411 } else { /* WSTRB */
1412 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1413 gen_st8(tmp, addr, IS_USER(s));
e677137d
PB
1414 }
1415 }
18c9b560
AZ
1416 }
1417 }
1418 return 0;
1419 }
1420
1421 if ((insn & 0x0f000000) != 0x0e000000)
1422 return 1;
1423
1424 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1425 case 0x000: /* WOR */
1426 wrd = (insn >> 12) & 0xf;
1427 rd0 = (insn >> 0) & 0xf;
1428 rd1 = (insn >> 16) & 0xf;
1429 gen_op_iwmmxt_movq_M0_wRn(rd0);
1430 gen_op_iwmmxt_orq_M0_wRn(rd1);
1431 gen_op_iwmmxt_setpsr_nz();
1432 gen_op_iwmmxt_movq_wRn_M0(wrd);
1433 gen_op_iwmmxt_set_mup();
1434 gen_op_iwmmxt_set_cup();
1435 break;
1436 case 0x011: /* TMCR */
1437 if (insn & 0xf)
1438 return 1;
1439 rd = (insn >> 12) & 0xf;
1440 wrd = (insn >> 16) & 0xf;
1441 switch (wrd) {
1442 case ARM_IWMMXT_wCID:
1443 case ARM_IWMMXT_wCASF:
1444 break;
1445 case ARM_IWMMXT_wCon:
1446 gen_op_iwmmxt_set_cup();
1447 /* Fall through. */
1448 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1449 tmp = iwmmxt_load_creg(wrd);
1450 tmp2 = load_reg(s, rd);
f669df27 1451 tcg_gen_andc_i32(tmp, tmp, tmp2);
da6b5335
FN
1452 dead_tmp(tmp2);
1453 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1454 break;
1455 case ARM_IWMMXT_wCGR0:
1456 case ARM_IWMMXT_wCGR1:
1457 case ARM_IWMMXT_wCGR2:
1458 case ARM_IWMMXT_wCGR3:
1459 gen_op_iwmmxt_set_cup();
da6b5335
FN
1460 tmp = load_reg(s, rd);
1461 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1462 break;
1463 default:
1464 return 1;
1465 }
1466 break;
1467 case 0x100: /* WXOR */
1468 wrd = (insn >> 12) & 0xf;
1469 rd0 = (insn >> 0) & 0xf;
1470 rd1 = (insn >> 16) & 0xf;
1471 gen_op_iwmmxt_movq_M0_wRn(rd0);
1472 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1473 gen_op_iwmmxt_setpsr_nz();
1474 gen_op_iwmmxt_movq_wRn_M0(wrd);
1475 gen_op_iwmmxt_set_mup();
1476 gen_op_iwmmxt_set_cup();
1477 break;
1478 case 0x111: /* TMRC */
1479 if (insn & 0xf)
1480 return 1;
1481 rd = (insn >> 12) & 0xf;
1482 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1483 tmp = iwmmxt_load_creg(wrd);
1484 store_reg(s, rd, tmp);
18c9b560
AZ
1485 break;
1486 case 0x300: /* WANDN */
1487 wrd = (insn >> 12) & 0xf;
1488 rd0 = (insn >> 0) & 0xf;
1489 rd1 = (insn >> 16) & 0xf;
1490 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1491 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1492 gen_op_iwmmxt_andq_M0_wRn(rd1);
1493 gen_op_iwmmxt_setpsr_nz();
1494 gen_op_iwmmxt_movq_wRn_M0(wrd);
1495 gen_op_iwmmxt_set_mup();
1496 gen_op_iwmmxt_set_cup();
1497 break;
1498 case 0x200: /* WAND */
1499 wrd = (insn >> 12) & 0xf;
1500 rd0 = (insn >> 0) & 0xf;
1501 rd1 = (insn >> 16) & 0xf;
1502 gen_op_iwmmxt_movq_M0_wRn(rd0);
1503 gen_op_iwmmxt_andq_M0_wRn(rd1);
1504 gen_op_iwmmxt_setpsr_nz();
1505 gen_op_iwmmxt_movq_wRn_M0(wrd);
1506 gen_op_iwmmxt_set_mup();
1507 gen_op_iwmmxt_set_cup();
1508 break;
1509 case 0x810: case 0xa10: /* WMADD */
1510 wrd = (insn >> 12) & 0xf;
1511 rd0 = (insn >> 0) & 0xf;
1512 rd1 = (insn >> 16) & 0xf;
1513 gen_op_iwmmxt_movq_M0_wRn(rd0);
1514 if (insn & (1 << 21))
1515 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1516 else
1517 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1518 gen_op_iwmmxt_movq_wRn_M0(wrd);
1519 gen_op_iwmmxt_set_mup();
1520 break;
1521 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1522 wrd = (insn >> 12) & 0xf;
1523 rd0 = (insn >> 16) & 0xf;
1524 rd1 = (insn >> 0) & 0xf;
1525 gen_op_iwmmxt_movq_M0_wRn(rd0);
1526 switch ((insn >> 22) & 3) {
1527 case 0:
1528 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1529 break;
1530 case 1:
1531 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1532 break;
1533 case 2:
1534 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1535 break;
1536 case 3:
1537 return 1;
1538 }
1539 gen_op_iwmmxt_movq_wRn_M0(wrd);
1540 gen_op_iwmmxt_set_mup();
1541 gen_op_iwmmxt_set_cup();
1542 break;
1543 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1544 wrd = (insn >> 12) & 0xf;
1545 rd0 = (insn >> 16) & 0xf;
1546 rd1 = (insn >> 0) & 0xf;
1547 gen_op_iwmmxt_movq_M0_wRn(rd0);
1548 switch ((insn >> 22) & 3) {
1549 case 0:
1550 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1551 break;
1552 case 1:
1553 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1554 break;
1555 case 2:
1556 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1557 break;
1558 case 3:
1559 return 1;
1560 }
1561 gen_op_iwmmxt_movq_wRn_M0(wrd);
1562 gen_op_iwmmxt_set_mup();
1563 gen_op_iwmmxt_set_cup();
1564 break;
1565 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1566 wrd = (insn >> 12) & 0xf;
1567 rd0 = (insn >> 16) & 0xf;
1568 rd1 = (insn >> 0) & 0xf;
1569 gen_op_iwmmxt_movq_M0_wRn(rd0);
1570 if (insn & (1 << 22))
1571 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1572 else
1573 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1574 if (!(insn & (1 << 20)))
1575 gen_op_iwmmxt_addl_M0_wRn(wrd);
1576 gen_op_iwmmxt_movq_wRn_M0(wrd);
1577 gen_op_iwmmxt_set_mup();
1578 break;
1579 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1580 wrd = (insn >> 12) & 0xf;
1581 rd0 = (insn >> 16) & 0xf;
1582 rd1 = (insn >> 0) & 0xf;
1583 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1584 if (insn & (1 << 21)) {
1585 if (insn & (1 << 20))
1586 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1587 else
1588 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1589 } else {
1590 if (insn & (1 << 20))
1591 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1592 else
1593 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1594 }
18c9b560
AZ
1595 gen_op_iwmmxt_movq_wRn_M0(wrd);
1596 gen_op_iwmmxt_set_mup();
1597 break;
1598 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1599 wrd = (insn >> 12) & 0xf;
1600 rd0 = (insn >> 16) & 0xf;
1601 rd1 = (insn >> 0) & 0xf;
1602 gen_op_iwmmxt_movq_M0_wRn(rd0);
1603 if (insn & (1 << 21))
1604 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1605 else
1606 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1607 if (!(insn & (1 << 20))) {
e677137d
PB
1608 iwmmxt_load_reg(cpu_V1, wrd);
1609 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1610 }
1611 gen_op_iwmmxt_movq_wRn_M0(wrd);
1612 gen_op_iwmmxt_set_mup();
1613 break;
1614 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1615 wrd = (insn >> 12) & 0xf;
1616 rd0 = (insn >> 16) & 0xf;
1617 rd1 = (insn >> 0) & 0xf;
1618 gen_op_iwmmxt_movq_M0_wRn(rd0);
1619 switch ((insn >> 22) & 3) {
1620 case 0:
1621 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1622 break;
1623 case 1:
1624 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1625 break;
1626 case 2:
1627 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1628 break;
1629 case 3:
1630 return 1;
1631 }
1632 gen_op_iwmmxt_movq_wRn_M0(wrd);
1633 gen_op_iwmmxt_set_mup();
1634 gen_op_iwmmxt_set_cup();
1635 break;
1636 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1637 wrd = (insn >> 12) & 0xf;
1638 rd0 = (insn >> 16) & 0xf;
1639 rd1 = (insn >> 0) & 0xf;
1640 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1641 if (insn & (1 << 22)) {
1642 if (insn & (1 << 20))
1643 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1644 else
1645 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1646 } else {
1647 if (insn & (1 << 20))
1648 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1649 else
1650 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1651 }
18c9b560
AZ
1652 gen_op_iwmmxt_movq_wRn_M0(wrd);
1653 gen_op_iwmmxt_set_mup();
1654 gen_op_iwmmxt_set_cup();
1655 break;
1656 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1657 wrd = (insn >> 12) & 0xf;
1658 rd0 = (insn >> 16) & 0xf;
1659 rd1 = (insn >> 0) & 0xf;
1660 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1661 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1662 tcg_gen_andi_i32(tmp, tmp, 7);
1663 iwmmxt_load_reg(cpu_V1, rd1);
1664 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1665 dead_tmp(tmp);
18c9b560
AZ
1666 gen_op_iwmmxt_movq_wRn_M0(wrd);
1667 gen_op_iwmmxt_set_mup();
1668 break;
1669 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1670 if (((insn >> 6) & 3) == 3)
1671 return 1;
18c9b560
AZ
1672 rd = (insn >> 12) & 0xf;
1673 wrd = (insn >> 16) & 0xf;
da6b5335 1674 tmp = load_reg(s, rd);
18c9b560
AZ
1675 gen_op_iwmmxt_movq_M0_wRn(wrd);
1676 switch ((insn >> 6) & 3) {
1677 case 0:
da6b5335
FN
1678 tmp2 = tcg_const_i32(0xff);
1679 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1680 break;
1681 case 1:
da6b5335
FN
1682 tmp2 = tcg_const_i32(0xffff);
1683 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1684 break;
1685 case 2:
da6b5335
FN
1686 tmp2 = tcg_const_i32(0xffffffff);
1687 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1688 break;
da6b5335
FN
1689 default:
1690 TCGV_UNUSED(tmp2);
1691 TCGV_UNUSED(tmp3);
18c9b560 1692 }
da6b5335
FN
1693 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1694 tcg_temp_free(tmp3);
1695 tcg_temp_free(tmp2);
1696 dead_tmp(tmp);
18c9b560
AZ
1697 gen_op_iwmmxt_movq_wRn_M0(wrd);
1698 gen_op_iwmmxt_set_mup();
1699 break;
1700 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1701 rd = (insn >> 12) & 0xf;
1702 wrd = (insn >> 16) & 0xf;
da6b5335 1703 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1704 return 1;
1705 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335 1706 tmp = new_tmp();
18c9b560
AZ
1707 switch ((insn >> 22) & 3) {
1708 case 0:
da6b5335
FN
1709 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1710 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1711 if (insn & 8) {
1712 tcg_gen_ext8s_i32(tmp, tmp);
1713 } else {
1714 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1715 }
1716 break;
1717 case 1:
da6b5335
FN
1718 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1719 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1720 if (insn & 8) {
1721 tcg_gen_ext16s_i32(tmp, tmp);
1722 } else {
1723 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1724 }
1725 break;
1726 case 2:
da6b5335
FN
1727 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1728 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1729 break;
18c9b560 1730 }
da6b5335 1731 store_reg(s, rd, tmp);
18c9b560
AZ
1732 break;
1733 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1734 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1735 return 1;
da6b5335 1736 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1737 switch ((insn >> 22) & 3) {
1738 case 0:
da6b5335 1739 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1740 break;
1741 case 1:
da6b5335 1742 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1743 break;
1744 case 2:
da6b5335 1745 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1746 break;
18c9b560 1747 }
da6b5335
FN
1748 tcg_gen_shli_i32(tmp, tmp, 28);
1749 gen_set_nzcv(tmp);
1750 dead_tmp(tmp);
18c9b560
AZ
1751 break;
1752 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1753 if (((insn >> 6) & 3) == 3)
1754 return 1;
18c9b560
AZ
1755 rd = (insn >> 12) & 0xf;
1756 wrd = (insn >> 16) & 0xf;
da6b5335 1757 tmp = load_reg(s, rd);
18c9b560
AZ
1758 switch ((insn >> 6) & 3) {
1759 case 0:
da6b5335 1760 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1761 break;
1762 case 1:
da6b5335 1763 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1764 break;
1765 case 2:
da6b5335 1766 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1767 break;
18c9b560 1768 }
da6b5335 1769 dead_tmp(tmp);
18c9b560
AZ
1770 gen_op_iwmmxt_movq_wRn_M0(wrd);
1771 gen_op_iwmmxt_set_mup();
1772 break;
1773 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1774 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1775 return 1;
da6b5335
FN
1776 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1777 tmp2 = new_tmp();
1778 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1779 switch ((insn >> 22) & 3) {
1780 case 0:
1781 for (i = 0; i < 7; i ++) {
da6b5335
FN
1782 tcg_gen_shli_i32(tmp2, tmp2, 4);
1783 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1784 }
1785 break;
1786 case 1:
1787 for (i = 0; i < 3; i ++) {
da6b5335
FN
1788 tcg_gen_shli_i32(tmp2, tmp2, 8);
1789 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1790 }
1791 break;
1792 case 2:
da6b5335
FN
1793 tcg_gen_shli_i32(tmp2, tmp2, 16);
1794 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1795 break;
18c9b560 1796 }
da6b5335
FN
1797 gen_set_nzcv(tmp);
1798 dead_tmp(tmp2);
1799 dead_tmp(tmp);
18c9b560
AZ
1800 break;
1801 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1802 wrd = (insn >> 12) & 0xf;
1803 rd0 = (insn >> 16) & 0xf;
1804 gen_op_iwmmxt_movq_M0_wRn(rd0);
1805 switch ((insn >> 22) & 3) {
1806 case 0:
e677137d 1807 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1808 break;
1809 case 1:
e677137d 1810 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1811 break;
1812 case 2:
e677137d 1813 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1814 break;
1815 case 3:
1816 return 1;
1817 }
1818 gen_op_iwmmxt_movq_wRn_M0(wrd);
1819 gen_op_iwmmxt_set_mup();
1820 break;
1821 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1822 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1823 return 1;
da6b5335
FN
1824 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1825 tmp2 = new_tmp();
1826 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1827 switch ((insn >> 22) & 3) {
1828 case 0:
1829 for (i = 0; i < 7; i ++) {
da6b5335
FN
1830 tcg_gen_shli_i32(tmp2, tmp2, 4);
1831 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1832 }
1833 break;
1834 case 1:
1835 for (i = 0; i < 3; i ++) {
da6b5335
FN
1836 tcg_gen_shli_i32(tmp2, tmp2, 8);
1837 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1838 }
1839 break;
1840 case 2:
da6b5335
FN
1841 tcg_gen_shli_i32(tmp2, tmp2, 16);
1842 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1843 break;
18c9b560 1844 }
da6b5335
FN
1845 gen_set_nzcv(tmp);
1846 dead_tmp(tmp2);
1847 dead_tmp(tmp);
18c9b560
AZ
1848 break;
1849 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1850 rd = (insn >> 12) & 0xf;
1851 rd0 = (insn >> 16) & 0xf;
da6b5335 1852 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1853 return 1;
1854 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 1855 tmp = new_tmp();
18c9b560
AZ
1856 switch ((insn >> 22) & 3) {
1857 case 0:
da6b5335 1858 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1859 break;
1860 case 1:
da6b5335 1861 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1862 break;
1863 case 2:
da6b5335 1864 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1865 break;
18c9b560 1866 }
da6b5335 1867 store_reg(s, rd, tmp);
18c9b560
AZ
1868 break;
1869 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1870 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1871 wrd = (insn >> 12) & 0xf;
1872 rd0 = (insn >> 16) & 0xf;
1873 rd1 = (insn >> 0) & 0xf;
1874 gen_op_iwmmxt_movq_M0_wRn(rd0);
1875 switch ((insn >> 22) & 3) {
1876 case 0:
1877 if (insn & (1 << 21))
1878 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1879 else
1880 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1881 break;
1882 case 1:
1883 if (insn & (1 << 21))
1884 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1885 else
1886 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1887 break;
1888 case 2:
1889 if (insn & (1 << 21))
1890 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1891 else
1892 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1893 break;
1894 case 3:
1895 return 1;
1896 }
1897 gen_op_iwmmxt_movq_wRn_M0(wrd);
1898 gen_op_iwmmxt_set_mup();
1899 gen_op_iwmmxt_set_cup();
1900 break;
1901 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1902 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1903 wrd = (insn >> 12) & 0xf;
1904 rd0 = (insn >> 16) & 0xf;
1905 gen_op_iwmmxt_movq_M0_wRn(rd0);
1906 switch ((insn >> 22) & 3) {
1907 case 0:
1908 if (insn & (1 << 21))
1909 gen_op_iwmmxt_unpacklsb_M0();
1910 else
1911 gen_op_iwmmxt_unpacklub_M0();
1912 break;
1913 case 1:
1914 if (insn & (1 << 21))
1915 gen_op_iwmmxt_unpacklsw_M0();
1916 else
1917 gen_op_iwmmxt_unpackluw_M0();
1918 break;
1919 case 2:
1920 if (insn & (1 << 21))
1921 gen_op_iwmmxt_unpacklsl_M0();
1922 else
1923 gen_op_iwmmxt_unpacklul_M0();
1924 break;
1925 case 3:
1926 return 1;
1927 }
1928 gen_op_iwmmxt_movq_wRn_M0(wrd);
1929 gen_op_iwmmxt_set_mup();
1930 gen_op_iwmmxt_set_cup();
1931 break;
1932 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1933 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1934 wrd = (insn >> 12) & 0xf;
1935 rd0 = (insn >> 16) & 0xf;
1936 gen_op_iwmmxt_movq_M0_wRn(rd0);
1937 switch ((insn >> 22) & 3) {
1938 case 0:
1939 if (insn & (1 << 21))
1940 gen_op_iwmmxt_unpackhsb_M0();
1941 else
1942 gen_op_iwmmxt_unpackhub_M0();
1943 break;
1944 case 1:
1945 if (insn & (1 << 21))
1946 gen_op_iwmmxt_unpackhsw_M0();
1947 else
1948 gen_op_iwmmxt_unpackhuw_M0();
1949 break;
1950 case 2:
1951 if (insn & (1 << 21))
1952 gen_op_iwmmxt_unpackhsl_M0();
1953 else
1954 gen_op_iwmmxt_unpackhul_M0();
1955 break;
1956 case 3:
1957 return 1;
1958 }
1959 gen_op_iwmmxt_movq_wRn_M0(wrd);
1960 gen_op_iwmmxt_set_mup();
1961 gen_op_iwmmxt_set_cup();
1962 break;
1963 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1964 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
1965 if (((insn >> 22) & 3) == 0)
1966 return 1;
18c9b560
AZ
1967 wrd = (insn >> 12) & 0xf;
1968 rd0 = (insn >> 16) & 0xf;
1969 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1970 tmp = new_tmp();
1971 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
1972 dead_tmp(tmp);
18c9b560 1973 return 1;
da6b5335 1974 }
18c9b560 1975 switch ((insn >> 22) & 3) {
18c9b560 1976 case 1:
da6b5335 1977 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1978 break;
1979 case 2:
da6b5335 1980 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1981 break;
1982 case 3:
da6b5335 1983 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
1984 break;
1985 }
da6b5335 1986 dead_tmp(tmp);
18c9b560
AZ
1987 gen_op_iwmmxt_movq_wRn_M0(wrd);
1988 gen_op_iwmmxt_set_mup();
1989 gen_op_iwmmxt_set_cup();
1990 break;
1991 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1992 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
1993 if (((insn >> 22) & 3) == 0)
1994 return 1;
18c9b560
AZ
1995 wrd = (insn >> 12) & 0xf;
1996 rd0 = (insn >> 16) & 0xf;
1997 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1998 tmp = new_tmp();
1999 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2000 dead_tmp(tmp);
18c9b560 2001 return 1;
da6b5335 2002 }
18c9b560 2003 switch ((insn >> 22) & 3) {
18c9b560 2004 case 1:
da6b5335 2005 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2006 break;
2007 case 2:
da6b5335 2008 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2009 break;
2010 case 3:
da6b5335 2011 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2012 break;
2013 }
da6b5335 2014 dead_tmp(tmp);
18c9b560
AZ
2015 gen_op_iwmmxt_movq_wRn_M0(wrd);
2016 gen_op_iwmmxt_set_mup();
2017 gen_op_iwmmxt_set_cup();
2018 break;
2019 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2020 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2021 if (((insn >> 22) & 3) == 0)
2022 return 1;
18c9b560
AZ
2023 wrd = (insn >> 12) & 0xf;
2024 rd0 = (insn >> 16) & 0xf;
2025 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2026 tmp = new_tmp();
2027 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2028 dead_tmp(tmp);
18c9b560 2029 return 1;
da6b5335 2030 }
18c9b560 2031 switch ((insn >> 22) & 3) {
18c9b560 2032 case 1:
da6b5335 2033 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2034 break;
2035 case 2:
da6b5335 2036 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2037 break;
2038 case 3:
da6b5335 2039 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2040 break;
2041 }
da6b5335 2042 dead_tmp(tmp);
18c9b560
AZ
2043 gen_op_iwmmxt_movq_wRn_M0(wrd);
2044 gen_op_iwmmxt_set_mup();
2045 gen_op_iwmmxt_set_cup();
2046 break;
2047 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2048 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2049 if (((insn >> 22) & 3) == 0)
2050 return 1;
18c9b560
AZ
2051 wrd = (insn >> 12) & 0xf;
2052 rd0 = (insn >> 16) & 0xf;
2053 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2054 tmp = new_tmp();
18c9b560 2055 switch ((insn >> 22) & 3) {
18c9b560 2056 case 1:
da6b5335
FN
2057 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2058 dead_tmp(tmp);
18c9b560 2059 return 1;
da6b5335
FN
2060 }
2061 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2062 break;
2063 case 2:
da6b5335
FN
2064 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2065 dead_tmp(tmp);
18c9b560 2066 return 1;
da6b5335
FN
2067 }
2068 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2069 break;
2070 case 3:
da6b5335
FN
2071 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2072 dead_tmp(tmp);
18c9b560 2073 return 1;
da6b5335
FN
2074 }
2075 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2076 break;
2077 }
da6b5335 2078 dead_tmp(tmp);
18c9b560
AZ
2079 gen_op_iwmmxt_movq_wRn_M0(wrd);
2080 gen_op_iwmmxt_set_mup();
2081 gen_op_iwmmxt_set_cup();
2082 break;
2083 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2084 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2085 wrd = (insn >> 12) & 0xf;
2086 rd0 = (insn >> 16) & 0xf;
2087 rd1 = (insn >> 0) & 0xf;
2088 gen_op_iwmmxt_movq_M0_wRn(rd0);
2089 switch ((insn >> 22) & 3) {
2090 case 0:
2091 if (insn & (1 << 21))
2092 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2093 else
2094 gen_op_iwmmxt_minub_M0_wRn(rd1);
2095 break;
2096 case 1:
2097 if (insn & (1 << 21))
2098 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2099 else
2100 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2101 break;
2102 case 2:
2103 if (insn & (1 << 21))
2104 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2105 else
2106 gen_op_iwmmxt_minul_M0_wRn(rd1);
2107 break;
2108 case 3:
2109 return 1;
2110 }
2111 gen_op_iwmmxt_movq_wRn_M0(wrd);
2112 gen_op_iwmmxt_set_mup();
2113 break;
2114 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2115 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2116 wrd = (insn >> 12) & 0xf;
2117 rd0 = (insn >> 16) & 0xf;
2118 rd1 = (insn >> 0) & 0xf;
2119 gen_op_iwmmxt_movq_M0_wRn(rd0);
2120 switch ((insn >> 22) & 3) {
2121 case 0:
2122 if (insn & (1 << 21))
2123 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2124 else
2125 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2126 break;
2127 case 1:
2128 if (insn & (1 << 21))
2129 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2130 else
2131 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2132 break;
2133 case 2:
2134 if (insn & (1 << 21))
2135 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2136 else
2137 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2138 break;
2139 case 3:
2140 return 1;
2141 }
2142 gen_op_iwmmxt_movq_wRn_M0(wrd);
2143 gen_op_iwmmxt_set_mup();
2144 break;
2145 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2146 case 0x402: case 0x502: case 0x602: case 0x702:
2147 wrd = (insn >> 12) & 0xf;
2148 rd0 = (insn >> 16) & 0xf;
2149 rd1 = (insn >> 0) & 0xf;
2150 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2151 tmp = tcg_const_i32((insn >> 20) & 3);
2152 iwmmxt_load_reg(cpu_V1, rd1);
2153 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2154 tcg_temp_free(tmp);
18c9b560
AZ
2155 gen_op_iwmmxt_movq_wRn_M0(wrd);
2156 gen_op_iwmmxt_set_mup();
2157 break;
2158 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2159 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2160 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2161 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2162 wrd = (insn >> 12) & 0xf;
2163 rd0 = (insn >> 16) & 0xf;
2164 rd1 = (insn >> 0) & 0xf;
2165 gen_op_iwmmxt_movq_M0_wRn(rd0);
2166 switch ((insn >> 20) & 0xf) {
2167 case 0x0:
2168 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2169 break;
2170 case 0x1:
2171 gen_op_iwmmxt_subub_M0_wRn(rd1);
2172 break;
2173 case 0x3:
2174 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2175 break;
2176 case 0x4:
2177 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2178 break;
2179 case 0x5:
2180 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2181 break;
2182 case 0x7:
2183 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2184 break;
2185 case 0x8:
2186 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2187 break;
2188 case 0x9:
2189 gen_op_iwmmxt_subul_M0_wRn(rd1);
2190 break;
2191 case 0xb:
2192 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2193 break;
2194 default:
2195 return 1;
2196 }
2197 gen_op_iwmmxt_movq_wRn_M0(wrd);
2198 gen_op_iwmmxt_set_mup();
2199 gen_op_iwmmxt_set_cup();
2200 break;
2201 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2202 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2203 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2204 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2205 wrd = (insn >> 12) & 0xf;
2206 rd0 = (insn >> 16) & 0xf;
2207 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2208 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2209 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2210 tcg_temp_free(tmp);
18c9b560
AZ
2211 gen_op_iwmmxt_movq_wRn_M0(wrd);
2212 gen_op_iwmmxt_set_mup();
2213 gen_op_iwmmxt_set_cup();
2214 break;
2215 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2216 case 0x418: case 0x518: case 0x618: case 0x718:
2217 case 0x818: case 0x918: case 0xa18: case 0xb18:
2218 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2219 wrd = (insn >> 12) & 0xf;
2220 rd0 = (insn >> 16) & 0xf;
2221 rd1 = (insn >> 0) & 0xf;
2222 gen_op_iwmmxt_movq_M0_wRn(rd0);
2223 switch ((insn >> 20) & 0xf) {
2224 case 0x0:
2225 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2226 break;
2227 case 0x1:
2228 gen_op_iwmmxt_addub_M0_wRn(rd1);
2229 break;
2230 case 0x3:
2231 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2232 break;
2233 case 0x4:
2234 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2235 break;
2236 case 0x5:
2237 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2238 break;
2239 case 0x7:
2240 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2241 break;
2242 case 0x8:
2243 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2244 break;
2245 case 0x9:
2246 gen_op_iwmmxt_addul_M0_wRn(rd1);
2247 break;
2248 case 0xb:
2249 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2250 break;
2251 default:
2252 return 1;
2253 }
2254 gen_op_iwmmxt_movq_wRn_M0(wrd);
2255 gen_op_iwmmxt_set_mup();
2256 gen_op_iwmmxt_set_cup();
2257 break;
2258 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2259 case 0x408: case 0x508: case 0x608: case 0x708:
2260 case 0x808: case 0x908: case 0xa08: case 0xb08:
2261 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2262 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2263 return 1;
18c9b560
AZ
2264 wrd = (insn >> 12) & 0xf;
2265 rd0 = (insn >> 16) & 0xf;
2266 rd1 = (insn >> 0) & 0xf;
2267 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2268 switch ((insn >> 22) & 3) {
18c9b560
AZ
2269 case 1:
2270 if (insn & (1 << 21))
2271 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2272 else
2273 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2274 break;
2275 case 2:
2276 if (insn & (1 << 21))
2277 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2278 else
2279 gen_op_iwmmxt_packul_M0_wRn(rd1);
2280 break;
2281 case 3:
2282 if (insn & (1 << 21))
2283 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2284 else
2285 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2286 break;
2287 }
2288 gen_op_iwmmxt_movq_wRn_M0(wrd);
2289 gen_op_iwmmxt_set_mup();
2290 gen_op_iwmmxt_set_cup();
2291 break;
2292 case 0x201: case 0x203: case 0x205: case 0x207:
2293 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2294 case 0x211: case 0x213: case 0x215: case 0x217:
2295 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2296 wrd = (insn >> 5) & 0xf;
2297 rd0 = (insn >> 12) & 0xf;
2298 rd1 = (insn >> 0) & 0xf;
2299 if (rd0 == 0xf || rd1 == 0xf)
2300 return 1;
2301 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2302 tmp = load_reg(s, rd0);
2303 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2304 switch ((insn >> 16) & 0xf) {
2305 case 0x0: /* TMIA */
da6b5335 2306 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2307 break;
2308 case 0x8: /* TMIAPH */
da6b5335 2309 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2310 break;
2311 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2312 if (insn & (1 << 16))
da6b5335 2313 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2314 if (insn & (1 << 17))
da6b5335
FN
2315 tcg_gen_shri_i32(tmp2, tmp2, 16);
2316 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2317 break;
2318 default:
da6b5335
FN
2319 dead_tmp(tmp2);
2320 dead_tmp(tmp);
18c9b560
AZ
2321 return 1;
2322 }
da6b5335
FN
2323 dead_tmp(tmp2);
2324 dead_tmp(tmp);
18c9b560
AZ
2325 gen_op_iwmmxt_movq_wRn_M0(wrd);
2326 gen_op_iwmmxt_set_mup();
2327 break;
2328 default:
2329 return 1;
2330 }
2331
2332 return 0;
2333}
2334
2335/* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2336 (ie. an undefined instruction). */
2337static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2338{
2339 int acc, rd0, rd1, rdhi, rdlo;
3a554c0f 2340 TCGv tmp, tmp2;
18c9b560
AZ
2341
2342 if ((insn & 0x0ff00f10) == 0x0e200010) {
2343 /* Multiply with Internal Accumulate Format */
2344 rd0 = (insn >> 12) & 0xf;
2345 rd1 = insn & 0xf;
2346 acc = (insn >> 5) & 7;
2347
2348 if (acc != 0)
2349 return 1;
2350
3a554c0f
FN
2351 tmp = load_reg(s, rd0);
2352 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2353 switch ((insn >> 16) & 0xf) {
2354 case 0x0: /* MIA */
3a554c0f 2355 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2356 break;
2357 case 0x8: /* MIAPH */
3a554c0f 2358 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2359 break;
2360 case 0xc: /* MIABB */
2361 case 0xd: /* MIABT */
2362 case 0xe: /* MIATB */
2363 case 0xf: /* MIATT */
18c9b560 2364 if (insn & (1 << 16))
3a554c0f 2365 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2366 if (insn & (1 << 17))
3a554c0f
FN
2367 tcg_gen_shri_i32(tmp2, tmp2, 16);
2368 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2369 break;
2370 default:
2371 return 1;
2372 }
3a554c0f
FN
2373 dead_tmp(tmp2);
2374 dead_tmp(tmp);
18c9b560
AZ
2375
2376 gen_op_iwmmxt_movq_wRn_M0(acc);
2377 return 0;
2378 }
2379
2380 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2381 /* Internal Accumulator Access Format */
2382 rdhi = (insn >> 16) & 0xf;
2383 rdlo = (insn >> 12) & 0xf;
2384 acc = insn & 7;
2385
2386 if (acc != 0)
2387 return 1;
2388
2389 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2390 iwmmxt_load_reg(cpu_V0, acc);
2391 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2392 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2393 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2394 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2395 } else { /* MAR */
3a554c0f
FN
2396 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2397 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2398 }
2399 return 0;
2400 }
2401
2402 return 1;
2403}
2404
c1713132
AZ
2405/* Disassemble system coprocessor instruction. Return nonzero if
2406 instruction is not defined. */
2407static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2408{
b75263d6 2409 TCGv tmp, tmp2;
c1713132
AZ
2410 uint32_t rd = (insn >> 12) & 0xf;
2411 uint32_t cp = (insn >> 8) & 0xf;
2412 if (IS_USER(s)) {
2413 return 1;
2414 }
2415
18c9b560 2416 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2417 if (!env->cp[cp].cp_read)
2418 return 1;
8984bd2e
PB
2419 gen_set_pc_im(s->pc);
2420 tmp = new_tmp();
b75263d6
JR
2421 tmp2 = tcg_const_i32(insn);
2422 gen_helper_get_cp(tmp, cpu_env, tmp2);
2423 tcg_temp_free(tmp2);
8984bd2e 2424 store_reg(s, rd, tmp);
c1713132
AZ
2425 } else {
2426 if (!env->cp[cp].cp_write)
2427 return 1;
8984bd2e
PB
2428 gen_set_pc_im(s->pc);
2429 tmp = load_reg(s, rd);
b75263d6
JR
2430 tmp2 = tcg_const_i32(insn);
2431 gen_helper_set_cp(cpu_env, tmp2, tmp);
2432 tcg_temp_free(tmp2);
a60de947 2433 dead_tmp(tmp);
c1713132
AZ
2434 }
2435 return 0;
2436}
2437
9ee6e8bb
PB
2438static int cp15_user_ok(uint32_t insn)
2439{
2440 int cpn = (insn >> 16) & 0xf;
2441 int cpm = insn & 0xf;
2442 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2443
2444 if (cpn == 13 && cpm == 0) {
2445 /* TLS register. */
2446 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2447 return 1;
2448 }
2449 if (cpn == 7) {
2450 /* ISB, DSB, DMB. */
2451 if ((cpm == 5 && op == 4)
2452 || (cpm == 10 && (op == 4 || op == 5)))
2453 return 1;
2454 }
2455 return 0;
2456}
2457
3f26c122
RV
2458static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2459{
2460 TCGv tmp;
2461 int cpn = (insn >> 16) & 0xf;
2462 int cpm = insn & 0xf;
2463 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2464
2465 if (!arm_feature(env, ARM_FEATURE_V6K))
2466 return 0;
2467
2468 if (!(cpn == 13 && cpm == 0))
2469 return 0;
2470
2471 if (insn & ARM_CP_RW_BIT) {
2472 tmp = new_tmp();
2473 switch (op) {
2474 case 2:
2475 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, cp15.c13_tls1));
2476 break;
2477 case 3:
2478 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, cp15.c13_tls2));
2479 break;
2480 case 4:
2481 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, cp15.c13_tls3));
2482 break;
2483 default:
2484 dead_tmp(tmp);
2485 return 0;
2486 }
2487 store_reg(s, rd, tmp);
2488
2489 } else {
2490 tmp = load_reg(s, rd);
2491 switch (op) {
2492 case 2:
2493 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, cp15.c13_tls1));
2494 break;
2495 case 3:
2496 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, cp15.c13_tls2));
2497 break;
2498 case 4:
2499 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, cp15.c13_tls3));
2500 break;
2501 default:
2502 return 0;
2503 }
2504 dead_tmp(tmp);
2505 }
2506 return 1;
2507}
2508
b5ff1b31
FB
2509/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2510 instruction is not defined. */
a90b7318 2511static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2512{
2513 uint32_t rd;
b75263d6 2514 TCGv tmp, tmp2;
b5ff1b31 2515
9ee6e8bb
PB
2516 /* M profile cores use memory mapped registers instead of cp15. */
2517 if (arm_feature(env, ARM_FEATURE_M))
2518 return 1;
2519
2520 if ((insn & (1 << 25)) == 0) {
2521 if (insn & (1 << 20)) {
2522 /* mrrc */
2523 return 1;
2524 }
2525 /* mcrr. Used for block cache operations, so implement as no-op. */
2526 return 0;
2527 }
2528 if ((insn & (1 << 4)) == 0) {
2529 /* cdp */
2530 return 1;
2531 }
2532 if (IS_USER(s) && !cp15_user_ok(insn)) {
b5ff1b31
FB
2533 return 1;
2534 }
9332f9da
FB
2535 if ((insn & 0x0fff0fff) == 0x0e070f90
2536 || (insn & 0x0fff0fff) == 0x0e070f58) {
2537 /* Wait for interrupt. */
8984bd2e 2538 gen_set_pc_im(s->pc);
9ee6e8bb 2539 s->is_jmp = DISAS_WFI;
9332f9da
FB
2540 return 0;
2541 }
b5ff1b31 2542 rd = (insn >> 12) & 0xf;
3f26c122
RV
2543
2544 if (cp15_tls_load_store(env, s, insn, rd))
2545 return 0;
2546
b75263d6 2547 tmp2 = tcg_const_i32(insn);
18c9b560 2548 if (insn & ARM_CP_RW_BIT) {
8984bd2e 2549 tmp = new_tmp();
b75263d6 2550 gen_helper_get_cp15(tmp, cpu_env, tmp2);
b5ff1b31
FB
2551 /* If the destination register is r15 then sets condition codes. */
2552 if (rd != 15)
8984bd2e
PB
2553 store_reg(s, rd, tmp);
2554 else
2555 dead_tmp(tmp);
b5ff1b31 2556 } else {
8984bd2e 2557 tmp = load_reg(s, rd);
b75263d6 2558 gen_helper_set_cp15(cpu_env, tmp2, tmp);
8984bd2e 2559 dead_tmp(tmp);
a90b7318
AZ
2560 /* Normally we would always end the TB here, but Linux
2561 * arch/arm/mach-pxa/sleep.S expects two instructions following
2562 * an MMU enable to execute from cache. Imitate this behaviour. */
2563 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2564 (insn & 0x0fff0fff) != 0x0e010f10)
2565 gen_lookup_tb(s);
b5ff1b31 2566 }
b75263d6 2567 tcg_temp_free_i32(tmp2);
b5ff1b31
FB
2568 return 0;
2569}
2570
9ee6e8bb
PB
2571#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2572#define VFP_SREG(insn, bigbit, smallbit) \
2573 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2574#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2575 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2576 reg = (((insn) >> (bigbit)) & 0x0f) \
2577 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2578 } else { \
2579 if (insn & (1 << (smallbit))) \
2580 return 1; \
2581 reg = ((insn) >> (bigbit)) & 0x0f; \
2582 }} while (0)
2583
2584#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2585#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2586#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2587#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2588#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2589#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2590
4373f3ce
PB
2591/* Move between integer and VFP cores. */
2592static TCGv gen_vfp_mrs(void)
2593{
2594 TCGv tmp = new_tmp();
2595 tcg_gen_mov_i32(tmp, cpu_F0s);
2596 return tmp;
2597}
2598
2599static void gen_vfp_msr(TCGv tmp)
2600{
2601 tcg_gen_mov_i32(cpu_F0s, tmp);
2602 dead_tmp(tmp);
2603}
2604
9ee6e8bb
PB
2605static inline int
2606vfp_enabled(CPUState * env)
2607{
2608 return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
2609}
2610
ad69471c
PB
2611static void gen_neon_dup_u8(TCGv var, int shift)
2612{
2613 TCGv tmp = new_tmp();
2614 if (shift)
2615 tcg_gen_shri_i32(var, var, shift);
86831435 2616 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2617 tcg_gen_shli_i32(tmp, var, 8);
2618 tcg_gen_or_i32(var, var, tmp);
2619 tcg_gen_shli_i32(tmp, var, 16);
2620 tcg_gen_or_i32(var, var, tmp);
2621 dead_tmp(tmp);
2622}
2623
2624static void gen_neon_dup_low16(TCGv var)
2625{
2626 TCGv tmp = new_tmp();
86831435 2627 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2628 tcg_gen_shli_i32(tmp, var, 16);
2629 tcg_gen_or_i32(var, var, tmp);
2630 dead_tmp(tmp);
2631}
2632
2633static void gen_neon_dup_high16(TCGv var)
2634{
2635 TCGv tmp = new_tmp();
2636 tcg_gen_andi_i32(var, var, 0xffff0000);
2637 tcg_gen_shri_i32(tmp, var, 16);
2638 tcg_gen_or_i32(var, var, tmp);
2639 dead_tmp(tmp);
2640}
2641
b7bcbe95
FB
2642/* Disassemble a VFP instruction. Returns nonzero if an error occured
2643 (ie. an undefined instruction). */
2644static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2645{
2646 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2647 int dp, veclen;
312eea9f 2648 TCGv addr;
4373f3ce 2649 TCGv tmp;
ad69471c 2650 TCGv tmp2;
b7bcbe95 2651
40f137e1
PB
2652 if (!arm_feature(env, ARM_FEATURE_VFP))
2653 return 1;
2654
9ee6e8bb
PB
2655 if (!vfp_enabled(env)) {
2656 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2657 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2658 return 1;
2659 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2660 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2661 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2662 return 1;
2663 }
b7bcbe95
FB
2664 dp = ((insn & 0xf00) == 0xb00);
2665 switch ((insn >> 24) & 0xf) {
2666 case 0xe:
2667 if (insn & (1 << 4)) {
2668 /* single register transfer */
b7bcbe95
FB
2669 rd = (insn >> 12) & 0xf;
2670 if (dp) {
9ee6e8bb
PB
2671 int size;
2672 int pass;
2673
2674 VFP_DREG_N(rn, insn);
2675 if (insn & 0xf)
b7bcbe95 2676 return 1;
9ee6e8bb
PB
2677 if (insn & 0x00c00060
2678 && !arm_feature(env, ARM_FEATURE_NEON))
2679 return 1;
2680
2681 pass = (insn >> 21) & 1;
2682 if (insn & (1 << 22)) {
2683 size = 0;
2684 offset = ((insn >> 5) & 3) * 8;
2685 } else if (insn & (1 << 5)) {
2686 size = 1;
2687 offset = (insn & (1 << 6)) ? 16 : 0;
2688 } else {
2689 size = 2;
2690 offset = 0;
2691 }
18c9b560 2692 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2693 /* vfp->arm */
ad69471c 2694 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2695 switch (size) {
2696 case 0:
9ee6e8bb 2697 if (offset)
ad69471c 2698 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2699 if (insn & (1 << 23))
ad69471c 2700 gen_uxtb(tmp);
9ee6e8bb 2701 else
ad69471c 2702 gen_sxtb(tmp);
9ee6e8bb
PB
2703 break;
2704 case 1:
9ee6e8bb
PB
2705 if (insn & (1 << 23)) {
2706 if (offset) {
ad69471c 2707 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2708 } else {
ad69471c 2709 gen_uxth(tmp);
9ee6e8bb
PB
2710 }
2711 } else {
2712 if (offset) {
ad69471c 2713 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2714 } else {
ad69471c 2715 gen_sxth(tmp);
9ee6e8bb
PB
2716 }
2717 }
2718 break;
2719 case 2:
9ee6e8bb
PB
2720 break;
2721 }
ad69471c 2722 store_reg(s, rd, tmp);
b7bcbe95
FB
2723 } else {
2724 /* arm->vfp */
ad69471c 2725 tmp = load_reg(s, rd);
9ee6e8bb
PB
2726 if (insn & (1 << 23)) {
2727 /* VDUP */
2728 if (size == 0) {
ad69471c 2729 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2730 } else if (size == 1) {
ad69471c 2731 gen_neon_dup_low16(tmp);
9ee6e8bb 2732 }
cbbccffc
PB
2733 for (n = 0; n <= pass * 2; n++) {
2734 tmp2 = new_tmp();
2735 tcg_gen_mov_i32(tmp2, tmp);
2736 neon_store_reg(rn, n, tmp2);
2737 }
2738 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2739 } else {
2740 /* VMOV */
2741 switch (size) {
2742 case 0:
ad69471c
PB
2743 tmp2 = neon_load_reg(rn, pass);
2744 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2745 dead_tmp(tmp2);
9ee6e8bb
PB
2746 break;
2747 case 1:
ad69471c
PB
2748 tmp2 = neon_load_reg(rn, pass);
2749 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2750 dead_tmp(tmp2);
9ee6e8bb
PB
2751 break;
2752 case 2:
9ee6e8bb
PB
2753 break;
2754 }
ad69471c 2755 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2756 }
b7bcbe95 2757 }
9ee6e8bb
PB
2758 } else { /* !dp */
2759 if ((insn & 0x6f) != 0x00)
2760 return 1;
2761 rn = VFP_SREG_N(insn);
18c9b560 2762 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2763 /* vfp->arm */
2764 if (insn & (1 << 21)) {
2765 /* system register */
40f137e1 2766 rn >>= 1;
9ee6e8bb 2767
b7bcbe95 2768 switch (rn) {
40f137e1 2769 case ARM_VFP_FPSID:
4373f3ce 2770 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2771 VFP3 restricts all id registers to privileged
2772 accesses. */
2773 if (IS_USER(s)
2774 && arm_feature(env, ARM_FEATURE_VFP3))
2775 return 1;
4373f3ce 2776 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2777 break;
40f137e1 2778 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2779 if (IS_USER(s))
2780 return 1;
4373f3ce 2781 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2782 break;
40f137e1
PB
2783 case ARM_VFP_FPINST:
2784 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2785 /* Not present in VFP3. */
2786 if (IS_USER(s)
2787 || arm_feature(env, ARM_FEATURE_VFP3))
2788 return 1;
4373f3ce 2789 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2790 break;
40f137e1 2791 case ARM_VFP_FPSCR:
601d70b9 2792 if (rd == 15) {
4373f3ce
PB
2793 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2794 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2795 } else {
2796 tmp = new_tmp();
2797 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2798 }
b7bcbe95 2799 break;
9ee6e8bb
PB
2800 case ARM_VFP_MVFR0:
2801 case ARM_VFP_MVFR1:
2802 if (IS_USER(s)
2803 || !arm_feature(env, ARM_FEATURE_VFP3))
2804 return 1;
4373f3ce 2805 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2806 break;
b7bcbe95
FB
2807 default:
2808 return 1;
2809 }
2810 } else {
2811 gen_mov_F0_vreg(0, rn);
4373f3ce 2812 tmp = gen_vfp_mrs();
b7bcbe95
FB
2813 }
2814 if (rd == 15) {
b5ff1b31 2815 /* Set the 4 flag bits in the CPSR. */
4373f3ce
PB
2816 gen_set_nzcv(tmp);
2817 dead_tmp(tmp);
2818 } else {
2819 store_reg(s, rd, tmp);
2820 }
b7bcbe95
FB
2821 } else {
2822 /* arm->vfp */
4373f3ce 2823 tmp = load_reg(s, rd);
b7bcbe95 2824 if (insn & (1 << 21)) {
40f137e1 2825 rn >>= 1;
b7bcbe95
FB
2826 /* system register */
2827 switch (rn) {
40f137e1 2828 case ARM_VFP_FPSID:
9ee6e8bb
PB
2829 case ARM_VFP_MVFR0:
2830 case ARM_VFP_MVFR1:
b7bcbe95
FB
2831 /* Writes are ignored. */
2832 break;
40f137e1 2833 case ARM_VFP_FPSCR:
4373f3ce
PB
2834 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2835 dead_tmp(tmp);
b5ff1b31 2836 gen_lookup_tb(s);
b7bcbe95 2837 break;
40f137e1 2838 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2839 if (IS_USER(s))
2840 return 1;
71b3c3de
JR
2841 /* TODO: VFP subarchitecture support.
2842 * For now, keep the EN bit only */
2843 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 2844 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2845 gen_lookup_tb(s);
2846 break;
2847 case ARM_VFP_FPINST:
2848 case ARM_VFP_FPINST2:
4373f3ce 2849 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2850 break;
b7bcbe95
FB
2851 default:
2852 return 1;
2853 }
2854 } else {
4373f3ce 2855 gen_vfp_msr(tmp);
b7bcbe95
FB
2856 gen_mov_vreg_F0(0, rn);
2857 }
2858 }
2859 }
2860 } else {
2861 /* data processing */
2862 /* The opcode is in bits 23, 21, 20 and 6. */
2863 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2864 if (dp) {
2865 if (op == 15) {
2866 /* rn is opcode */
2867 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2868 } else {
2869 /* rn is register number */
9ee6e8bb 2870 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2871 }
2872
2873 if (op == 15 && (rn == 15 || rn > 17)) {
2874 /* Integer or single precision destination. */
9ee6e8bb 2875 rd = VFP_SREG_D(insn);
b7bcbe95 2876 } else {
9ee6e8bb 2877 VFP_DREG_D(rd, insn);
b7bcbe95
FB
2878 }
2879
2880 if (op == 15 && (rn == 16 || rn == 17)) {
2881 /* Integer source. */
2882 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
2883 } else {
9ee6e8bb 2884 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2885 }
2886 } else {
9ee6e8bb 2887 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2888 if (op == 15 && rn == 15) {
2889 /* Double precision destination. */
9ee6e8bb
PB
2890 VFP_DREG_D(rd, insn);
2891 } else {
2892 rd = VFP_SREG_D(insn);
2893 }
2894 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2895 }
2896
2897 veclen = env->vfp.vec_len;
2898 if (op == 15 && rn > 3)
2899 veclen = 0;
2900
2901 /* Shut up compiler warnings. */
2902 delta_m = 0;
2903 delta_d = 0;
2904 bank_mask = 0;
3b46e624 2905
b7bcbe95
FB
2906 if (veclen > 0) {
2907 if (dp)
2908 bank_mask = 0xc;
2909 else
2910 bank_mask = 0x18;
2911
2912 /* Figure out what type of vector operation this is. */
2913 if ((rd & bank_mask) == 0) {
2914 /* scalar */
2915 veclen = 0;
2916 } else {
2917 if (dp)
2918 delta_d = (env->vfp.vec_stride >> 1) + 1;
2919 else
2920 delta_d = env->vfp.vec_stride + 1;
2921
2922 if ((rm & bank_mask) == 0) {
2923 /* mixed scalar/vector */
2924 delta_m = 0;
2925 } else {
2926 /* vector */
2927 delta_m = delta_d;
2928 }
2929 }
2930 }
2931
2932 /* Load the initial operands. */
2933 if (op == 15) {
2934 switch (rn) {
2935 case 16:
2936 case 17:
2937 /* Integer source */
2938 gen_mov_F0_vreg(0, rm);
2939 break;
2940 case 8:
2941 case 9:
2942 /* Compare */
2943 gen_mov_F0_vreg(dp, rd);
2944 gen_mov_F1_vreg(dp, rm);
2945 break;
2946 case 10:
2947 case 11:
2948 /* Compare with zero */
2949 gen_mov_F0_vreg(dp, rd);
2950 gen_vfp_F1_ld0(dp);
2951 break;
9ee6e8bb
PB
2952 case 20:
2953 case 21:
2954 case 22:
2955 case 23:
644ad806
PB
2956 case 28:
2957 case 29:
2958 case 30:
2959 case 31:
9ee6e8bb
PB
2960 /* Source and destination the same. */
2961 gen_mov_F0_vreg(dp, rd);
2962 break;
b7bcbe95
FB
2963 default:
2964 /* One source operand. */
2965 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 2966 break;
b7bcbe95
FB
2967 }
2968 } else {
2969 /* Two source operands. */
2970 gen_mov_F0_vreg(dp, rn);
2971 gen_mov_F1_vreg(dp, rm);
2972 }
2973
2974 for (;;) {
2975 /* Perform the calculation. */
2976 switch (op) {
2977 case 0: /* mac: fd + (fn * fm) */
2978 gen_vfp_mul(dp);
2979 gen_mov_F1_vreg(dp, rd);
2980 gen_vfp_add(dp);
2981 break;
2982 case 1: /* nmac: fd - (fn * fm) */
2983 gen_vfp_mul(dp);
2984 gen_vfp_neg(dp);
2985 gen_mov_F1_vreg(dp, rd);
2986 gen_vfp_add(dp);
2987 break;
2988 case 2: /* msc: -fd + (fn * fm) */
2989 gen_vfp_mul(dp);
2990 gen_mov_F1_vreg(dp, rd);
2991 gen_vfp_sub(dp);
2992 break;
2993 case 3: /* nmsc: -fd - (fn * fm) */
2994 gen_vfp_mul(dp);
b7bcbe95 2995 gen_vfp_neg(dp);
c9fb531a
PB
2996 gen_mov_F1_vreg(dp, rd);
2997 gen_vfp_sub(dp);
b7bcbe95
FB
2998 break;
2999 case 4: /* mul: fn * fm */
3000 gen_vfp_mul(dp);
3001 break;
3002 case 5: /* nmul: -(fn * fm) */
3003 gen_vfp_mul(dp);
3004 gen_vfp_neg(dp);
3005 break;
3006 case 6: /* add: fn + fm */
3007 gen_vfp_add(dp);
3008 break;
3009 case 7: /* sub: fn - fm */
3010 gen_vfp_sub(dp);
3011 break;
3012 case 8: /* div: fn / fm */
3013 gen_vfp_div(dp);
3014 break;
9ee6e8bb
PB
3015 case 14: /* fconst */
3016 if (!arm_feature(env, ARM_FEATURE_VFP3))
3017 return 1;
3018
3019 n = (insn << 12) & 0x80000000;
3020 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3021 if (dp) {
3022 if (i & 0x40)
3023 i |= 0x3f80;
3024 else
3025 i |= 0x4000;
3026 n |= i << 16;
4373f3ce 3027 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3028 } else {
3029 if (i & 0x40)
3030 i |= 0x780;
3031 else
3032 i |= 0x800;
3033 n |= i << 19;
5b340b51 3034 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3035 }
9ee6e8bb 3036 break;
b7bcbe95
FB
3037 case 15: /* extension space */
3038 switch (rn) {
3039 case 0: /* cpy */
3040 /* no-op */
3041 break;
3042 case 1: /* abs */
3043 gen_vfp_abs(dp);
3044 break;
3045 case 2: /* neg */
3046 gen_vfp_neg(dp);
3047 break;
3048 case 3: /* sqrt */
3049 gen_vfp_sqrt(dp);
3050 break;
60011498
PB
3051 case 4: /* vcvtb.f32.f16 */
3052 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3053 return 1;
3054 tmp = gen_vfp_mrs();
3055 tcg_gen_ext16u_i32(tmp, tmp);
3056 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3057 dead_tmp(tmp);
3058 break;
3059 case 5: /* vcvtt.f32.f16 */
3060 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3061 return 1;
3062 tmp = gen_vfp_mrs();
3063 tcg_gen_shri_i32(tmp, tmp, 16);
3064 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3065 dead_tmp(tmp);
3066 break;
3067 case 6: /* vcvtb.f16.f32 */
3068 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3069 return 1;
3070 tmp = new_tmp();
3071 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3072 gen_mov_F0_vreg(0, rd);
3073 tmp2 = gen_vfp_mrs();
3074 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3075 tcg_gen_or_i32(tmp, tmp, tmp2);
3076 dead_tmp(tmp2);
3077 gen_vfp_msr(tmp);
3078 break;
3079 case 7: /* vcvtt.f16.f32 */
3080 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3081 return 1;
3082 tmp = new_tmp();
3083 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3084 tcg_gen_shli_i32(tmp, tmp, 16);
3085 gen_mov_F0_vreg(0, rd);
3086 tmp2 = gen_vfp_mrs();
3087 tcg_gen_ext16u_i32(tmp2, tmp2);
3088 tcg_gen_or_i32(tmp, tmp, tmp2);
3089 dead_tmp(tmp2);
3090 gen_vfp_msr(tmp);
3091 break;
b7bcbe95
FB
3092 case 8: /* cmp */
3093 gen_vfp_cmp(dp);
3094 break;
3095 case 9: /* cmpe */
3096 gen_vfp_cmpe(dp);
3097 break;
3098 case 10: /* cmpz */
3099 gen_vfp_cmp(dp);
3100 break;
3101 case 11: /* cmpez */
3102 gen_vfp_F1_ld0(dp);
3103 gen_vfp_cmpe(dp);
3104 break;
3105 case 15: /* single<->double conversion */
3106 if (dp)
4373f3ce 3107 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3108 else
4373f3ce 3109 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3110 break;
3111 case 16: /* fuito */
3112 gen_vfp_uito(dp);
3113 break;
3114 case 17: /* fsito */
3115 gen_vfp_sito(dp);
3116 break;
9ee6e8bb
PB
3117 case 20: /* fshto */
3118 if (!arm_feature(env, ARM_FEATURE_VFP3))
3119 return 1;
644ad806 3120 gen_vfp_shto(dp, 16 - rm);
9ee6e8bb
PB
3121 break;
3122 case 21: /* fslto */
3123 if (!arm_feature(env, ARM_FEATURE_VFP3))
3124 return 1;
644ad806 3125 gen_vfp_slto(dp, 32 - rm);
9ee6e8bb
PB
3126 break;
3127 case 22: /* fuhto */
3128 if (!arm_feature(env, ARM_FEATURE_VFP3))
3129 return 1;
644ad806 3130 gen_vfp_uhto(dp, 16 - rm);
9ee6e8bb
PB
3131 break;
3132 case 23: /* fulto */
3133 if (!arm_feature(env, ARM_FEATURE_VFP3))
3134 return 1;
644ad806 3135 gen_vfp_ulto(dp, 32 - rm);
9ee6e8bb 3136 break;
b7bcbe95
FB
3137 case 24: /* ftoui */
3138 gen_vfp_toui(dp);
3139 break;
3140 case 25: /* ftouiz */
3141 gen_vfp_touiz(dp);
3142 break;
3143 case 26: /* ftosi */
3144 gen_vfp_tosi(dp);
3145 break;
3146 case 27: /* ftosiz */
3147 gen_vfp_tosiz(dp);
3148 break;
9ee6e8bb
PB
3149 case 28: /* ftosh */
3150 if (!arm_feature(env, ARM_FEATURE_VFP3))
3151 return 1;
644ad806 3152 gen_vfp_tosh(dp, 16 - rm);
9ee6e8bb
PB
3153 break;
3154 case 29: /* ftosl */
3155 if (!arm_feature(env, ARM_FEATURE_VFP3))
3156 return 1;
644ad806 3157 gen_vfp_tosl(dp, 32 - rm);
9ee6e8bb
PB
3158 break;
3159 case 30: /* ftouh */
3160 if (!arm_feature(env, ARM_FEATURE_VFP3))
3161 return 1;
644ad806 3162 gen_vfp_touh(dp, 16 - rm);
9ee6e8bb
PB
3163 break;
3164 case 31: /* ftoul */
3165 if (!arm_feature(env, ARM_FEATURE_VFP3))
3166 return 1;
644ad806 3167 gen_vfp_toul(dp, 32 - rm);
9ee6e8bb 3168 break;
b7bcbe95
FB
3169 default: /* undefined */
3170 printf ("rn:%d\n", rn);
3171 return 1;
3172 }
3173 break;
3174 default: /* undefined */
3175 printf ("op:%d\n", op);
3176 return 1;
3177 }
3178
3179 /* Write back the result. */
3180 if (op == 15 && (rn >= 8 && rn <= 11))
3181 ; /* Comparison, do nothing. */
3182 else if (op == 15 && rn > 17)
3183 /* Integer result. */
3184 gen_mov_vreg_F0(0, rd);
3185 else if (op == 15 && rn == 15)
3186 /* conversion */
3187 gen_mov_vreg_F0(!dp, rd);
3188 else
3189 gen_mov_vreg_F0(dp, rd);
3190
3191 /* break out of the loop if we have finished */
3192 if (veclen == 0)
3193 break;
3194
3195 if (op == 15 && delta_m == 0) {
3196 /* single source one-many */
3197 while (veclen--) {
3198 rd = ((rd + delta_d) & (bank_mask - 1))
3199 | (rd & bank_mask);
3200 gen_mov_vreg_F0(dp, rd);
3201 }
3202 break;
3203 }
3204 /* Setup the next operands. */
3205 veclen--;
3206 rd = ((rd + delta_d) & (bank_mask - 1))
3207 | (rd & bank_mask);
3208
3209 if (op == 15) {
3210 /* One source operand. */
3211 rm = ((rm + delta_m) & (bank_mask - 1))
3212 | (rm & bank_mask);
3213 gen_mov_F0_vreg(dp, rm);
3214 } else {
3215 /* Two source operands. */
3216 rn = ((rn + delta_d) & (bank_mask - 1))
3217 | (rn & bank_mask);
3218 gen_mov_F0_vreg(dp, rn);
3219 if (delta_m) {
3220 rm = ((rm + delta_m) & (bank_mask - 1))
3221 | (rm & bank_mask);
3222 gen_mov_F1_vreg(dp, rm);
3223 }
3224 }
3225 }
3226 }
3227 break;
3228 case 0xc:
3229 case 0xd:
9ee6e8bb 3230 if (dp && (insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3231 /* two-register transfer */
3232 rn = (insn >> 16) & 0xf;
3233 rd = (insn >> 12) & 0xf;
3234 if (dp) {
9ee6e8bb
PB
3235 VFP_DREG_M(rm, insn);
3236 } else {
3237 rm = VFP_SREG_M(insn);
3238 }
b7bcbe95 3239
18c9b560 3240 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3241 /* vfp->arm */
3242 if (dp) {
4373f3ce
PB
3243 gen_mov_F0_vreg(0, rm * 2);
3244 tmp = gen_vfp_mrs();
3245 store_reg(s, rd, tmp);
3246 gen_mov_F0_vreg(0, rm * 2 + 1);
3247 tmp = gen_vfp_mrs();
3248 store_reg(s, rn, tmp);
b7bcbe95
FB
3249 } else {
3250 gen_mov_F0_vreg(0, rm);
4373f3ce
PB
3251 tmp = gen_vfp_mrs();
3252 store_reg(s, rn, tmp);
b7bcbe95 3253 gen_mov_F0_vreg(0, rm + 1);
4373f3ce
PB
3254 tmp = gen_vfp_mrs();
3255 store_reg(s, rd, tmp);
b7bcbe95
FB
3256 }
3257 } else {
3258 /* arm->vfp */
3259 if (dp) {
4373f3ce
PB
3260 tmp = load_reg(s, rd);
3261 gen_vfp_msr(tmp);
3262 gen_mov_vreg_F0(0, rm * 2);
3263 tmp = load_reg(s, rn);
3264 gen_vfp_msr(tmp);
3265 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3266 } else {
4373f3ce
PB
3267 tmp = load_reg(s, rn);
3268 gen_vfp_msr(tmp);
b7bcbe95 3269 gen_mov_vreg_F0(0, rm);
4373f3ce
PB
3270 tmp = load_reg(s, rd);
3271 gen_vfp_msr(tmp);
b7bcbe95
FB
3272 gen_mov_vreg_F0(0, rm + 1);
3273 }
3274 }
3275 } else {
3276 /* Load/store */
3277 rn = (insn >> 16) & 0xf;
3278 if (dp)
9ee6e8bb 3279 VFP_DREG_D(rd, insn);
b7bcbe95 3280 else
9ee6e8bb
PB
3281 rd = VFP_SREG_D(insn);
3282 if (s->thumb && rn == 15) {
312eea9f
FN
3283 addr = new_tmp();
3284 tcg_gen_movi_i32(addr, s->pc & ~2);
9ee6e8bb 3285 } else {
312eea9f 3286 addr = load_reg(s, rn);
9ee6e8bb 3287 }
b7bcbe95
FB
3288 if ((insn & 0x01200000) == 0x01000000) {
3289 /* Single load/store */
3290 offset = (insn & 0xff) << 2;
3291 if ((insn & (1 << 23)) == 0)
3292 offset = -offset;
312eea9f 3293 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3294 if (insn & (1 << 20)) {
312eea9f 3295 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3296 gen_mov_vreg_F0(dp, rd);
3297 } else {
3298 gen_mov_F0_vreg(dp, rd);
312eea9f 3299 gen_vfp_st(s, dp, addr);
b7bcbe95 3300 }
312eea9f 3301 dead_tmp(addr);
b7bcbe95
FB
3302 } else {
3303 /* load/store multiple */
3304 if (dp)
3305 n = (insn >> 1) & 0x7f;
3306 else
3307 n = insn & 0xff;
3308
3309 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3310 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3311
3312 if (dp)
3313 offset = 8;
3314 else
3315 offset = 4;
3316 for (i = 0; i < n; i++) {
18c9b560 3317 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3318 /* load */
312eea9f 3319 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3320 gen_mov_vreg_F0(dp, rd + i);
3321 } else {
3322 /* store */
3323 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3324 gen_vfp_st(s, dp, addr);
b7bcbe95 3325 }
312eea9f 3326 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95
FB
3327 }
3328 if (insn & (1 << 21)) {
3329 /* writeback */
3330 if (insn & (1 << 24))
3331 offset = -offset * n;
3332 else if (dp && (insn & 1))
3333 offset = 4;
3334 else
3335 offset = 0;
3336
3337 if (offset != 0)
312eea9f
FN
3338 tcg_gen_addi_i32(addr, addr, offset);
3339 store_reg(s, rn, addr);
3340 } else {
3341 dead_tmp(addr);
b7bcbe95
FB
3342 }
3343 }
3344 }
3345 break;
3346 default:
3347 /* Should never happen. */
3348 return 1;
3349 }
3350 return 0;
3351}
3352
6e256c93 3353static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3354{
6e256c93
FB
3355 TranslationBlock *tb;
3356
3357 tb = s->tb;
3358 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3359 tcg_gen_goto_tb(n);
8984bd2e 3360 gen_set_pc_im(dest);
57fec1fe 3361 tcg_gen_exit_tb((long)tb + n);
6e256c93 3362 } else {
8984bd2e 3363 gen_set_pc_im(dest);
57fec1fe 3364 tcg_gen_exit_tb(0);
6e256c93 3365 }
c53be334
FB
3366}
3367
8aaca4c0
FB
3368static inline void gen_jmp (DisasContext *s, uint32_t dest)
3369{
551bd27f 3370 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3371 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3372 if (s->thumb)
d9ba4830
PB
3373 dest |= 1;
3374 gen_bx_im(s, dest);
8aaca4c0 3375 } else {
6e256c93 3376 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3377 s->is_jmp = DISAS_TB_JUMP;
3378 }
3379}
3380
d9ba4830 3381static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3382{
ee097184 3383 if (x)
d9ba4830 3384 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3385 else
d9ba4830 3386 gen_sxth(t0);
ee097184 3387 if (y)
d9ba4830 3388 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3389 else
d9ba4830
PB
3390 gen_sxth(t1);
3391 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3392}
3393
3394/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 3395static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3396 uint32_t mask;
3397
3398 mask = 0;
3399 if (flags & (1 << 0))
3400 mask |= 0xff;
3401 if (flags & (1 << 1))
3402 mask |= 0xff00;
3403 if (flags & (1 << 2))
3404 mask |= 0xff0000;
3405 if (flags & (1 << 3))
3406 mask |= 0xff000000;
9ee6e8bb 3407
2ae23e75 3408 /* Mask out undefined bits. */
9ee6e8bb
PB
3409 mask &= ~CPSR_RESERVED;
3410 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3411 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3412 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3413 mask &= ~CPSR_IT;
9ee6e8bb 3414 /* Mask out execution state bits. */
2ae23e75 3415 if (!spsr)
e160c51c 3416 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3417 /* Mask out privileged bits. */
3418 if (IS_USER(s))
9ee6e8bb 3419 mask &= CPSR_USER;
b5ff1b31
FB
3420 return mask;
3421}
3422
2fbac54b
FN
3423/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3424static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
b5ff1b31 3425{
d9ba4830 3426 TCGv tmp;
b5ff1b31
FB
3427 if (spsr) {
3428 /* ??? This is also undefined in system mode. */
3429 if (IS_USER(s))
3430 return 1;
d9ba4830
PB
3431
3432 tmp = load_cpu_field(spsr);
3433 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3434 tcg_gen_andi_i32(t0, t0, mask);
3435 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3436 store_cpu_field(tmp, spsr);
b5ff1b31 3437 } else {
2fbac54b 3438 gen_set_cpsr(t0, mask);
b5ff1b31 3439 }
2fbac54b 3440 dead_tmp(t0);
b5ff1b31
FB
3441 gen_lookup_tb(s);
3442 return 0;
3443}
3444
2fbac54b
FN
3445/* Returns nonzero if access to the PSR is not permitted. */
3446static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3447{
3448 TCGv tmp;
3449 tmp = new_tmp();
3450 tcg_gen_movi_i32(tmp, val);
3451 return gen_set_psr(s, mask, spsr, tmp);
3452}
3453
e9bb4aa9
JR
3454/* Generate an old-style exception return. Marks pc as dead. */
3455static void gen_exception_return(DisasContext *s, TCGv pc)
b5ff1b31 3456{
d9ba4830 3457 TCGv tmp;
e9bb4aa9 3458 store_reg(s, 15, pc);
d9ba4830
PB
3459 tmp = load_cpu_field(spsr);
3460 gen_set_cpsr(tmp, 0xffffffff);
3461 dead_tmp(tmp);
b5ff1b31
FB
3462 s->is_jmp = DISAS_UPDATE;
3463}
3464
b0109805
PB
3465/* Generate a v6 exception return. Marks both values as dead. */
3466static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3467{
b0109805
PB
3468 gen_set_cpsr(cpsr, 0xffffffff);
3469 dead_tmp(cpsr);
3470 store_reg(s, 15, pc);
9ee6e8bb
PB
3471 s->is_jmp = DISAS_UPDATE;
3472}
3b46e624 3473
9ee6e8bb
PB
3474static inline void
3475gen_set_condexec (DisasContext *s)
3476{
3477 if (s->condexec_mask) {
8f01245e
PB
3478 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3479 TCGv tmp = new_tmp();
3480 tcg_gen_movi_i32(tmp, val);
d9ba4830 3481 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3482 }
3483}
3b46e624 3484
9ee6e8bb
PB
3485static void gen_nop_hint(DisasContext *s, int val)
3486{
3487 switch (val) {
3488 case 3: /* wfi */
8984bd2e 3489 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3490 s->is_jmp = DISAS_WFI;
3491 break;
3492 case 2: /* wfe */
3493 case 4: /* sev */
3494 /* TODO: Implement SEV and WFE. May help SMP performance. */
3495 default: /* nop */
3496 break;
3497 }
3498}
99c475ab 3499
ad69471c 3500#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3501
dd8fbd78 3502static inline int gen_neon_add(int size, TCGv t0, TCGv t1)
9ee6e8bb
PB
3503{
3504 switch (size) {
dd8fbd78
FN
3505 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3506 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3507 case 2: tcg_gen_add_i32(t0, t0, t1); break;
9ee6e8bb
PB
3508 default: return 1;
3509 }
3510 return 0;
3511}
3512
dd8fbd78 3513static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
ad69471c
PB
3514{
3515 switch (size) {
dd8fbd78
FN
3516 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3517 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3518 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3519 default: return;
3520 }
3521}
3522
3523/* 32-bit pairwise ops end up the same as the elementwise versions. */
3524#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3525#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3526#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3527#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3528
3529/* FIXME: This is wrong. They set the wrong overflow bit. */
3530#define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3531#define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3532#define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3533#define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3534
3535#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3536 switch ((size << 1) | u) { \
3537 case 0: \
dd8fbd78 3538 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3539 break; \
3540 case 1: \
dd8fbd78 3541 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3542 break; \
3543 case 2: \
dd8fbd78 3544 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3545 break; \
3546 case 3: \
dd8fbd78 3547 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3548 break; \
3549 case 4: \
dd8fbd78 3550 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3551 break; \
3552 case 5: \
dd8fbd78 3553 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3554 break; \
3555 default: return 1; \
3556 }} while (0)
9ee6e8bb
PB
3557
3558#define GEN_NEON_INTEGER_OP(name) do { \
3559 switch ((size << 1) | u) { \
ad69471c 3560 case 0: \
dd8fbd78 3561 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3562 break; \
3563 case 1: \
dd8fbd78 3564 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3565 break; \
3566 case 2: \
dd8fbd78 3567 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3568 break; \
3569 case 3: \
dd8fbd78 3570 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3571 break; \
3572 case 4: \
dd8fbd78 3573 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3574 break; \
3575 case 5: \
dd8fbd78 3576 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3577 break; \
9ee6e8bb
PB
3578 default: return 1; \
3579 }} while (0)
3580
dd8fbd78 3581static TCGv neon_load_scratch(int scratch)
9ee6e8bb 3582{
dd8fbd78
FN
3583 TCGv tmp = new_tmp();
3584 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3585 return tmp;
9ee6e8bb
PB
3586}
3587
dd8fbd78 3588static void neon_store_scratch(int scratch, TCGv var)
9ee6e8bb 3589{
dd8fbd78
FN
3590 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3591 dead_tmp(var);
9ee6e8bb
PB
3592}
3593
dd8fbd78 3594static inline TCGv neon_get_scalar(int size, int reg)
9ee6e8bb 3595{
dd8fbd78 3596 TCGv tmp;
9ee6e8bb 3597 if (size == 1) {
dd8fbd78 3598 tmp = neon_load_reg(reg >> 1, reg & 1);
9ee6e8bb 3599 } else {
dd8fbd78
FN
3600 tmp = neon_load_reg(reg >> 2, (reg >> 1) & 1);
3601 if (reg & 1) {
3602 gen_neon_dup_low16(tmp);
3603 } else {
3604 gen_neon_dup_high16(tmp);
3605 }
9ee6e8bb 3606 }
dd8fbd78 3607 return tmp;
9ee6e8bb
PB
3608}
3609
19457615
FN
3610static void gen_neon_unzip_u8(TCGv t0, TCGv t1)
3611{
3612 TCGv rd, rm, tmp;
3613
3614 rd = new_tmp();
3615 rm = new_tmp();
3616 tmp = new_tmp();
3617
3618 tcg_gen_andi_i32(rd, t0, 0xff);
3619 tcg_gen_shri_i32(tmp, t0, 8);
3620 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3621 tcg_gen_or_i32(rd, rd, tmp);
3622 tcg_gen_shli_i32(tmp, t1, 16);
3623 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3624 tcg_gen_or_i32(rd, rd, tmp);
3625 tcg_gen_shli_i32(tmp, t1, 8);
3626 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3627 tcg_gen_or_i32(rd, rd, tmp);
3628
3629 tcg_gen_shri_i32(rm, t0, 8);
3630 tcg_gen_andi_i32(rm, rm, 0xff);
3631 tcg_gen_shri_i32(tmp, t0, 16);
3632 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3633 tcg_gen_or_i32(rm, rm, tmp);
3634 tcg_gen_shli_i32(tmp, t1, 8);
3635 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3636 tcg_gen_or_i32(rm, rm, tmp);
3637 tcg_gen_andi_i32(tmp, t1, 0xff000000);
3638 tcg_gen_or_i32(t1, rm, tmp);
3639 tcg_gen_mov_i32(t0, rd);
3640
3641 dead_tmp(tmp);
3642 dead_tmp(rm);
3643 dead_tmp(rd);
3644}
3645
3646static void gen_neon_zip_u8(TCGv t0, TCGv t1)
3647{
3648 TCGv rd, rm, tmp;
3649
3650 rd = new_tmp();
3651 rm = new_tmp();
3652 tmp = new_tmp();
3653
3654 tcg_gen_andi_i32(rd, t0, 0xff);
3655 tcg_gen_shli_i32(tmp, t1, 8);
3656 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3657 tcg_gen_or_i32(rd, rd, tmp);
3658 tcg_gen_shli_i32(tmp, t0, 16);
3659 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3660 tcg_gen_or_i32(rd, rd, tmp);
3661 tcg_gen_shli_i32(tmp, t1, 24);
3662 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3663 tcg_gen_or_i32(rd, rd, tmp);
3664
3665 tcg_gen_andi_i32(rm, t1, 0xff000000);
3666 tcg_gen_shri_i32(tmp, t0, 8);
3667 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3668 tcg_gen_or_i32(rm, rm, tmp);
3669 tcg_gen_shri_i32(tmp, t1, 8);
3670 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3671 tcg_gen_or_i32(rm, rm, tmp);
3672 tcg_gen_shri_i32(tmp, t0, 16);
3673 tcg_gen_andi_i32(tmp, tmp, 0xff);
3674 tcg_gen_or_i32(t1, rm, tmp);
3675 tcg_gen_mov_i32(t0, rd);
3676
3677 dead_tmp(tmp);
3678 dead_tmp(rm);
3679 dead_tmp(rd);
3680}
3681
3682static void gen_neon_zip_u16(TCGv t0, TCGv t1)
3683{
3684 TCGv tmp, tmp2;
3685
3686 tmp = new_tmp();
3687 tmp2 = new_tmp();
3688
3689 tcg_gen_andi_i32(tmp, t0, 0xffff);
3690 tcg_gen_shli_i32(tmp2, t1, 16);
3691 tcg_gen_or_i32(tmp, tmp, tmp2);
3692 tcg_gen_andi_i32(t1, t1, 0xffff0000);
3693 tcg_gen_shri_i32(tmp2, t0, 16);
3694 tcg_gen_or_i32(t1, t1, tmp2);
3695 tcg_gen_mov_i32(t0, tmp);
3696
3697 dead_tmp(tmp2);
3698 dead_tmp(tmp);
3699}
3700
9ee6e8bb
PB
3701static void gen_neon_unzip(int reg, int q, int tmp, int size)
3702{
3703 int n;
dd8fbd78 3704 TCGv t0, t1;
9ee6e8bb
PB
3705
3706 for (n = 0; n < q + 1; n += 2) {
dd8fbd78
FN
3707 t0 = neon_load_reg(reg, n);
3708 t1 = neon_load_reg(reg, n + 1);
9ee6e8bb 3709 switch (size) {
dd8fbd78
FN
3710 case 0: gen_neon_unzip_u8(t0, t1); break;
3711 case 1: gen_neon_zip_u16(t0, t1); break; /* zip and unzip are the same. */
9ee6e8bb
PB
3712 case 2: /* no-op */; break;
3713 default: abort();
3714 }
dd8fbd78
FN
3715 neon_store_scratch(tmp + n, t0);
3716 neon_store_scratch(tmp + n + 1, t1);
9ee6e8bb
PB
3717 }
3718}
3719
19457615
FN
3720static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3721{
3722 TCGv rd, tmp;
3723
3724 rd = new_tmp();
3725 tmp = new_tmp();
3726
3727 tcg_gen_shli_i32(rd, t0, 8);
3728 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3729 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3730 tcg_gen_or_i32(rd, rd, tmp);
3731
3732 tcg_gen_shri_i32(t1, t1, 8);
3733 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3734 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3735 tcg_gen_or_i32(t1, t1, tmp);
3736 tcg_gen_mov_i32(t0, rd);
3737
3738 dead_tmp(tmp);
3739 dead_tmp(rd);
3740}
3741
3742static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3743{
3744 TCGv rd, tmp;
3745
3746 rd = new_tmp();
3747 tmp = new_tmp();
3748
3749 tcg_gen_shli_i32(rd, t0, 16);
3750 tcg_gen_andi_i32(tmp, t1, 0xffff);
3751 tcg_gen_or_i32(rd, rd, tmp);
3752 tcg_gen_shri_i32(t1, t1, 16);
3753 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3754 tcg_gen_or_i32(t1, t1, tmp);
3755 tcg_gen_mov_i32(t0, rd);
3756
3757 dead_tmp(tmp);
3758 dead_tmp(rd);
3759}
3760
3761
9ee6e8bb
PB
3762static struct {
3763 int nregs;
3764 int interleave;
3765 int spacing;
3766} neon_ls_element_type[11] = {
3767 {4, 4, 1},
3768 {4, 4, 2},
3769 {4, 1, 1},
3770 {4, 2, 1},
3771 {3, 3, 1},
3772 {3, 3, 2},
3773 {3, 1, 1},
3774 {1, 1, 1},
3775 {2, 2, 1},
3776 {2, 2, 2},
3777 {2, 1, 1}
3778};
3779
3780/* Translate a NEON load/store element instruction. Return nonzero if the
3781 instruction is invalid. */
3782static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3783{
3784 int rd, rn, rm;
3785 int op;
3786 int nregs;
3787 int interleave;
84496233 3788 int spacing;
9ee6e8bb
PB
3789 int stride;
3790 int size;
3791 int reg;
3792 int pass;
3793 int load;
3794 int shift;
9ee6e8bb 3795 int n;
1b2b1e54 3796 TCGv addr;
b0109805 3797 TCGv tmp;
8f8e3aa4 3798 TCGv tmp2;
84496233 3799 TCGv_i64 tmp64;
9ee6e8bb
PB
3800
3801 if (!vfp_enabled(env))
3802 return 1;
3803 VFP_DREG_D(rd, insn);
3804 rn = (insn >> 16) & 0xf;
3805 rm = insn & 0xf;
3806 load = (insn & (1 << 21)) != 0;
1b2b1e54 3807 addr = new_tmp();
9ee6e8bb
PB
3808 if ((insn & (1 << 23)) == 0) {
3809 /* Load store all elements. */
3810 op = (insn >> 8) & 0xf;
3811 size = (insn >> 6) & 3;
84496233 3812 if (op > 10)
9ee6e8bb
PB
3813 return 1;
3814 nregs = neon_ls_element_type[op].nregs;
3815 interleave = neon_ls_element_type[op].interleave;
84496233
JR
3816 spacing = neon_ls_element_type[op].spacing;
3817 if (size == 3 && (interleave | spacing) != 1)
3818 return 1;
dcc65026 3819 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3820 stride = (1 << size) * interleave;
3821 for (reg = 0; reg < nregs; reg++) {
3822 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
3823 load_reg_var(s, addr, rn);
3824 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 3825 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
3826 load_reg_var(s, addr, rn);
3827 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 3828 }
84496233
JR
3829 if (size == 3) {
3830 if (load) {
3831 tmp64 = gen_ld64(addr, IS_USER(s));
3832 neon_store_reg64(tmp64, rd);
3833 tcg_temp_free_i64(tmp64);
3834 } else {
3835 tmp64 = tcg_temp_new_i64();
3836 neon_load_reg64(tmp64, rd);
3837 gen_st64(tmp64, addr, IS_USER(s));
3838 }
3839 tcg_gen_addi_i32(addr, addr, stride);
3840 } else {
3841 for (pass = 0; pass < 2; pass++) {
3842 if (size == 2) {
3843 if (load) {
3844 tmp = gen_ld32(addr, IS_USER(s));
3845 neon_store_reg(rd, pass, tmp);
3846 } else {
3847 tmp = neon_load_reg(rd, pass);
3848 gen_st32(tmp, addr, IS_USER(s));
3849 }
1b2b1e54 3850 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
3851 } else if (size == 1) {
3852 if (load) {
3853 tmp = gen_ld16u(addr, IS_USER(s));
3854 tcg_gen_addi_i32(addr, addr, stride);
3855 tmp2 = gen_ld16u(addr, IS_USER(s));
3856 tcg_gen_addi_i32(addr, addr, stride);
3857 gen_bfi(tmp, tmp, tmp2, 16, 0xffff);
3858 dead_tmp(tmp2);
3859 neon_store_reg(rd, pass, tmp);
3860 } else {
3861 tmp = neon_load_reg(rd, pass);
3862 tmp2 = new_tmp();
3863 tcg_gen_shri_i32(tmp2, tmp, 16);
3864 gen_st16(tmp, addr, IS_USER(s));
3865 tcg_gen_addi_i32(addr, addr, stride);
3866 gen_st16(tmp2, addr, IS_USER(s));
1b2b1e54 3867 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3868 }
84496233
JR
3869 } else /* size == 0 */ {
3870 if (load) {
3871 TCGV_UNUSED(tmp2);
3872 for (n = 0; n < 4; n++) {
3873 tmp = gen_ld8u(addr, IS_USER(s));
3874 tcg_gen_addi_i32(addr, addr, stride);
3875 if (n == 0) {
3876 tmp2 = tmp;
3877 } else {
3878 gen_bfi(tmp2, tmp2, tmp, n * 8, 0xff);
3879 dead_tmp(tmp);
3880 }
9ee6e8bb 3881 }
84496233
JR
3882 neon_store_reg(rd, pass, tmp2);
3883 } else {
3884 tmp2 = neon_load_reg(rd, pass);
3885 for (n = 0; n < 4; n++) {
3886 tmp = new_tmp();
3887 if (n == 0) {
3888 tcg_gen_mov_i32(tmp, tmp2);
3889 } else {
3890 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3891 }
3892 gen_st8(tmp, addr, IS_USER(s));
3893 tcg_gen_addi_i32(addr, addr, stride);
3894 }
3895 dead_tmp(tmp2);
9ee6e8bb
PB
3896 }
3897 }
3898 }
3899 }
84496233 3900 rd += spacing;
9ee6e8bb
PB
3901 }
3902 stride = nregs * 8;
3903 } else {
3904 size = (insn >> 10) & 3;
3905 if (size == 3) {
3906 /* Load single element to all lanes. */
3907 if (!load)
3908 return 1;
3909 size = (insn >> 6) & 3;
3910 nregs = ((insn >> 8) & 3) + 1;
3911 stride = (insn & (1 << 5)) ? 2 : 1;
dcc65026 3912 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3913 for (reg = 0; reg < nregs; reg++) {
3914 switch (size) {
3915 case 0:
1b2b1e54 3916 tmp = gen_ld8u(addr, IS_USER(s));
ad69471c 3917 gen_neon_dup_u8(tmp, 0);
9ee6e8bb
PB
3918 break;
3919 case 1:
1b2b1e54 3920 tmp = gen_ld16u(addr, IS_USER(s));
ad69471c 3921 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
3922 break;
3923 case 2:
1b2b1e54 3924 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
3925 break;
3926 case 3:
3927 return 1;
a50f5b91
PB
3928 default: /* Avoid compiler warnings. */
3929 abort();
99c475ab 3930 }
1b2b1e54 3931 tcg_gen_addi_i32(addr, addr, 1 << size);
ad69471c
PB
3932 tmp2 = new_tmp();
3933 tcg_gen_mov_i32(tmp2, tmp);
3934 neon_store_reg(rd, 0, tmp2);
3018f259 3935 neon_store_reg(rd, 1, tmp);
9ee6e8bb
PB
3936 rd += stride;
3937 }
3938 stride = (1 << size) * nregs;
3939 } else {
3940 /* Single element. */
3941 pass = (insn >> 7) & 1;
3942 switch (size) {
3943 case 0:
3944 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
3945 stride = 1;
3946 break;
3947 case 1:
3948 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
3949 stride = (insn & (1 << 5)) ? 2 : 1;
3950 break;
3951 case 2:
3952 shift = 0;
9ee6e8bb
PB
3953 stride = (insn & (1 << 6)) ? 2 : 1;
3954 break;
3955 default:
3956 abort();
3957 }
3958 nregs = ((insn >> 8) & 3) + 1;
dcc65026 3959 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3960 for (reg = 0; reg < nregs; reg++) {
3961 if (load) {
9ee6e8bb
PB
3962 switch (size) {
3963 case 0:
1b2b1e54 3964 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb
PB
3965 break;
3966 case 1:
1b2b1e54 3967 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
3968 break;
3969 case 2:
1b2b1e54 3970 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 3971 break;
a50f5b91
PB
3972 default: /* Avoid compiler warnings. */
3973 abort();
9ee6e8bb
PB
3974 }
3975 if (size != 2) {
8f8e3aa4
PB
3976 tmp2 = neon_load_reg(rd, pass);
3977 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3978 dead_tmp(tmp2);
9ee6e8bb 3979 }
8f8e3aa4 3980 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3981 } else { /* Store */
8f8e3aa4
PB
3982 tmp = neon_load_reg(rd, pass);
3983 if (shift)
3984 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
3985 switch (size) {
3986 case 0:
1b2b1e54 3987 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
3988 break;
3989 case 1:
1b2b1e54 3990 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
3991 break;
3992 case 2:
1b2b1e54 3993 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 3994 break;
99c475ab 3995 }
99c475ab 3996 }
9ee6e8bb 3997 rd += stride;
1b2b1e54 3998 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 3999 }
9ee6e8bb 4000 stride = nregs * (1 << size);
99c475ab 4001 }
9ee6e8bb 4002 }
1b2b1e54 4003 dead_tmp(addr);
9ee6e8bb 4004 if (rm != 15) {
b26eefb6
PB
4005 TCGv base;
4006
4007 base = load_reg(s, rn);
9ee6e8bb 4008 if (rm == 13) {
b26eefb6 4009 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4010 } else {
b26eefb6
PB
4011 TCGv index;
4012 index = load_reg(s, rm);
4013 tcg_gen_add_i32(base, base, index);
4014 dead_tmp(index);
9ee6e8bb 4015 }
b26eefb6 4016 store_reg(s, rn, base);
9ee6e8bb
PB
4017 }
4018 return 0;
4019}
3b46e624 4020
8f8e3aa4
PB
4021/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4022static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4023{
4024 tcg_gen_and_i32(t, t, c);
f669df27 4025 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4026 tcg_gen_or_i32(dest, t, f);
4027}
4028
a7812ae4 4029static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4030{
4031 switch (size) {
4032 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4033 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4034 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4035 default: abort();
4036 }
4037}
4038
a7812ae4 4039static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4040{
4041 switch (size) {
4042 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4043 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4044 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
4045 default: abort();
4046 }
4047}
4048
a7812ae4 4049static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4050{
4051 switch (size) {
4052 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4053 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4054 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4055 default: abort();
4056 }
4057}
4058
4059static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4060 int q, int u)
4061{
4062 if (q) {
4063 if (u) {
4064 switch (size) {
4065 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4066 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4067 default: abort();
4068 }
4069 } else {
4070 switch (size) {
4071 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4072 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4073 default: abort();
4074 }
4075 }
4076 } else {
4077 if (u) {
4078 switch (size) {
4079 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4080 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4081 default: abort();
4082 }
4083 } else {
4084 switch (size) {
4085 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4086 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4087 default: abort();
4088 }
4089 }
4090 }
4091}
4092
a7812ae4 4093static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
4094{
4095 if (u) {
4096 switch (size) {
4097 case 0: gen_helper_neon_widen_u8(dest, src); break;
4098 case 1: gen_helper_neon_widen_u16(dest, src); break;
4099 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4100 default: abort();
4101 }
4102 } else {
4103 switch (size) {
4104 case 0: gen_helper_neon_widen_s8(dest, src); break;
4105 case 1: gen_helper_neon_widen_s16(dest, src); break;
4106 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4107 default: abort();
4108 }
4109 }
4110 dead_tmp(src);
4111}
4112
4113static inline void gen_neon_addl(int size)
4114{
4115 switch (size) {
4116 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4117 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4118 case 2: tcg_gen_add_i64(CPU_V001); break;
4119 default: abort();
4120 }
4121}
4122
4123static inline void gen_neon_subl(int size)
4124{
4125 switch (size) {
4126 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4127 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4128 case 2: tcg_gen_sub_i64(CPU_V001); break;
4129 default: abort();
4130 }
4131}
4132
a7812ae4 4133static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4134{
4135 switch (size) {
4136 case 0: gen_helper_neon_negl_u16(var, var); break;
4137 case 1: gen_helper_neon_negl_u32(var, var); break;
4138 case 2: gen_helper_neon_negl_u64(var, var); break;
4139 default: abort();
4140 }
4141}
4142
a7812ae4 4143static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4144{
4145 switch (size) {
4146 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4147 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4148 default: abort();
4149 }
4150}
4151
a7812ae4 4152static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4153{
a7812ae4 4154 TCGv_i64 tmp;
ad69471c
PB
4155
4156 switch ((size << 1) | u) {
4157 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4158 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4159 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4160 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4161 case 4:
4162 tmp = gen_muls_i64_i32(a, b);
4163 tcg_gen_mov_i64(dest, tmp);
4164 break;
4165 case 5:
4166 tmp = gen_mulu_i64_i32(a, b);
4167 tcg_gen_mov_i64(dest, tmp);
4168 break;
4169 default: abort();
4170 }
ad69471c
PB
4171}
4172
9ee6e8bb
PB
4173/* Translate a NEON data processing instruction. Return nonzero if the
4174 instruction is invalid.
ad69471c
PB
4175 We process data in a mixture of 32-bit and 64-bit chunks.
4176 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4177
9ee6e8bb
PB
4178static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4179{
4180 int op;
4181 int q;
4182 int rd, rn, rm;
4183 int size;
4184 int shift;
4185 int pass;
4186 int count;
4187 int pairwise;
4188 int u;
4189 int n;
ca9a32e4 4190 uint32_t imm, mask;
b75263d6 4191 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4192 TCGv_i64 tmp64;
9ee6e8bb
PB
4193
4194 if (!vfp_enabled(env))
4195 return 1;
4196 q = (insn & (1 << 6)) != 0;
4197 u = (insn >> 24) & 1;
4198 VFP_DREG_D(rd, insn);
4199 VFP_DREG_N(rn, insn);
4200 VFP_DREG_M(rm, insn);
4201 size = (insn >> 20) & 3;
4202 if ((insn & (1 << 23)) == 0) {
4203 /* Three register same length. */
4204 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
ad69471c
PB
4205 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4206 || op == 10 || op == 11 || op == 16)) {
4207 /* 64-bit element instructions. */
9ee6e8bb 4208 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4209 neon_load_reg64(cpu_V0, rn + pass);
4210 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb
PB
4211 switch (op) {
4212 case 1: /* VQADD */
4213 if (u) {
ad69471c 4214 gen_helper_neon_add_saturate_u64(CPU_V001);
2c0262af 4215 } else {
ad69471c 4216 gen_helper_neon_add_saturate_s64(CPU_V001);
2c0262af 4217 }
9ee6e8bb
PB
4218 break;
4219 case 5: /* VQSUB */
4220 if (u) {
ad69471c
PB
4221 gen_helper_neon_sub_saturate_u64(CPU_V001);
4222 } else {
4223 gen_helper_neon_sub_saturate_s64(CPU_V001);
4224 }
4225 break;
4226 case 8: /* VSHL */
4227 if (u) {
4228 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4229 } else {
4230 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4231 }
4232 break;
4233 case 9: /* VQSHL */
4234 if (u) {
4235 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4236 cpu_V0, cpu_V0);
4237 } else {
4238 gen_helper_neon_qshl_s64(cpu_V1, cpu_env,
4239 cpu_V1, cpu_V0);
4240 }
4241 break;
4242 case 10: /* VRSHL */
4243 if (u) {
4244 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4245 } else {
ad69471c
PB
4246 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4247 }
4248 break;
4249 case 11: /* VQRSHL */
4250 if (u) {
4251 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4252 cpu_V1, cpu_V0);
4253 } else {
4254 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4255 cpu_V1, cpu_V0);
1e8d4eec 4256 }
9ee6e8bb
PB
4257 break;
4258 case 16:
4259 if (u) {
ad69471c 4260 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4261 } else {
ad69471c 4262 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4263 }
4264 break;
4265 default:
4266 abort();
2c0262af 4267 }
ad69471c 4268 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4269 }
9ee6e8bb 4270 return 0;
2c0262af 4271 }
9ee6e8bb
PB
4272 switch (op) {
4273 case 8: /* VSHL */
4274 case 9: /* VQSHL */
4275 case 10: /* VRSHL */
ad69471c 4276 case 11: /* VQRSHL */
9ee6e8bb 4277 {
ad69471c
PB
4278 int rtmp;
4279 /* Shift instruction operands are reversed. */
4280 rtmp = rn;
9ee6e8bb 4281 rn = rm;
ad69471c 4282 rm = rtmp;
9ee6e8bb
PB
4283 pairwise = 0;
4284 }
2c0262af 4285 break;
9ee6e8bb
PB
4286 case 20: /* VPMAX */
4287 case 21: /* VPMIN */
4288 case 23: /* VPADD */
4289 pairwise = 1;
2c0262af 4290 break;
9ee6e8bb
PB
4291 case 26: /* VPADD (float) */
4292 pairwise = (u && size < 2);
2c0262af 4293 break;
9ee6e8bb
PB
4294 case 30: /* VPMIN/VPMAX (float) */
4295 pairwise = u;
2c0262af 4296 break;
9ee6e8bb
PB
4297 default:
4298 pairwise = 0;
2c0262af 4299 break;
9ee6e8bb 4300 }
dd8fbd78 4301
9ee6e8bb
PB
4302 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4303
4304 if (pairwise) {
4305 /* Pairwise. */
4306 if (q)
4307 n = (pass & 1) * 2;
2c0262af 4308 else
9ee6e8bb
PB
4309 n = 0;
4310 if (pass < q + 1) {
dd8fbd78
FN
4311 tmp = neon_load_reg(rn, n);
4312 tmp2 = neon_load_reg(rn, n + 1);
9ee6e8bb 4313 } else {
dd8fbd78
FN
4314 tmp = neon_load_reg(rm, n);
4315 tmp2 = neon_load_reg(rm, n + 1);
9ee6e8bb
PB
4316 }
4317 } else {
4318 /* Elementwise. */
dd8fbd78
FN
4319 tmp = neon_load_reg(rn, pass);
4320 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4321 }
4322 switch (op) {
4323 case 0: /* VHADD */
4324 GEN_NEON_INTEGER_OP(hadd);
4325 break;
4326 case 1: /* VQADD */
ad69471c 4327 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4328 break;
9ee6e8bb
PB
4329 case 2: /* VRHADD */
4330 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4331 break;
9ee6e8bb
PB
4332 case 3: /* Logic ops. */
4333 switch ((u << 2) | size) {
4334 case 0: /* VAND */
dd8fbd78 4335 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4336 break;
4337 case 1: /* BIC */
f669df27 4338 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4339 break;
4340 case 2: /* VORR */
dd8fbd78 4341 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4342 break;
4343 case 3: /* VORN */
f669df27 4344 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4345 break;
4346 case 4: /* VEOR */
dd8fbd78 4347 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4348 break;
4349 case 5: /* VBSL */
dd8fbd78
FN
4350 tmp3 = neon_load_reg(rd, pass);
4351 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4352 dead_tmp(tmp3);
9ee6e8bb
PB
4353 break;
4354 case 6: /* VBIT */
dd8fbd78
FN
4355 tmp3 = neon_load_reg(rd, pass);
4356 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4357 dead_tmp(tmp3);
9ee6e8bb
PB
4358 break;
4359 case 7: /* VBIF */
dd8fbd78
FN
4360 tmp3 = neon_load_reg(rd, pass);
4361 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4362 dead_tmp(tmp3);
9ee6e8bb 4363 break;
2c0262af
FB
4364 }
4365 break;
9ee6e8bb
PB
4366 case 4: /* VHSUB */
4367 GEN_NEON_INTEGER_OP(hsub);
4368 break;
4369 case 5: /* VQSUB */
ad69471c 4370 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4371 break;
9ee6e8bb
PB
4372 case 6: /* VCGT */
4373 GEN_NEON_INTEGER_OP(cgt);
4374 break;
4375 case 7: /* VCGE */
4376 GEN_NEON_INTEGER_OP(cge);
4377 break;
4378 case 8: /* VSHL */
ad69471c 4379 GEN_NEON_INTEGER_OP(shl);
2c0262af 4380 break;
9ee6e8bb 4381 case 9: /* VQSHL */
ad69471c 4382 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4383 break;
9ee6e8bb 4384 case 10: /* VRSHL */
ad69471c 4385 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4386 break;
9ee6e8bb 4387 case 11: /* VQRSHL */
ad69471c 4388 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb
PB
4389 break;
4390 case 12: /* VMAX */
4391 GEN_NEON_INTEGER_OP(max);
4392 break;
4393 case 13: /* VMIN */
4394 GEN_NEON_INTEGER_OP(min);
4395 break;
4396 case 14: /* VABD */
4397 GEN_NEON_INTEGER_OP(abd);
4398 break;
4399 case 15: /* VABA */
4400 GEN_NEON_INTEGER_OP(abd);
dd8fbd78
FN
4401 dead_tmp(tmp2);
4402 tmp2 = neon_load_reg(rd, pass);
4403 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4404 break;
4405 case 16:
4406 if (!u) { /* VADD */
dd8fbd78 4407 if (gen_neon_add(size, tmp, tmp2))
9ee6e8bb
PB
4408 return 1;
4409 } else { /* VSUB */
4410 switch (size) {
dd8fbd78
FN
4411 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4412 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4413 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4414 default: return 1;
4415 }
4416 }
4417 break;
4418 case 17:
4419 if (!u) { /* VTST */
4420 switch (size) {
dd8fbd78
FN
4421 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4422 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4423 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4424 default: return 1;
4425 }
4426 } else { /* VCEQ */
4427 switch (size) {
dd8fbd78
FN
4428 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4429 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4430 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4431 default: return 1;
4432 }
4433 }
4434 break;
4435 case 18: /* Multiply. */
4436 switch (size) {
dd8fbd78
FN
4437 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4438 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4439 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4440 default: return 1;
4441 }
dd8fbd78
FN
4442 dead_tmp(tmp2);
4443 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4444 if (u) { /* VMLS */
dd8fbd78 4445 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4446 } else { /* VMLA */
dd8fbd78 4447 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4448 }
4449 break;
4450 case 19: /* VMUL */
4451 if (u) { /* polynomial */
dd8fbd78 4452 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4453 } else { /* Integer */
4454 switch (size) {
dd8fbd78
FN
4455 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4456 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4457 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4458 default: return 1;
4459 }
4460 }
4461 break;
4462 case 20: /* VPMAX */
4463 GEN_NEON_INTEGER_OP(pmax);
4464 break;
4465 case 21: /* VPMIN */
4466 GEN_NEON_INTEGER_OP(pmin);
4467 break;
4468 case 22: /* Hultiply high. */
4469 if (!u) { /* VQDMULH */
4470 switch (size) {
dd8fbd78
FN
4471 case 1: gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4472 case 2: gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
9ee6e8bb
PB
4473 default: return 1;
4474 }
4475 } else { /* VQRDHMUL */
4476 switch (size) {
dd8fbd78
FN
4477 case 1: gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4478 case 2: gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
9ee6e8bb
PB
4479 default: return 1;
4480 }
4481 }
4482 break;
4483 case 23: /* VPADD */
4484 if (u)
4485 return 1;
4486 switch (size) {
dd8fbd78
FN
4487 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4488 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4489 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4490 default: return 1;
4491 }
4492 break;
4493 case 26: /* Floating point arithnetic. */
4494 switch ((u << 2) | size) {
4495 case 0: /* VADD */
dd8fbd78 4496 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4497 break;
4498 case 2: /* VSUB */
dd8fbd78 4499 gen_helper_neon_sub_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4500 break;
4501 case 4: /* VPADD */
dd8fbd78 4502 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4503 break;
4504 case 6: /* VABD */
dd8fbd78 4505 gen_helper_neon_abd_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4506 break;
4507 default:
4508 return 1;
4509 }
4510 break;
4511 case 27: /* Float multiply. */
dd8fbd78 4512 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb 4513 if (!u) {
dd8fbd78
FN
4514 dead_tmp(tmp2);
4515 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4516 if (size == 0) {
dd8fbd78 4517 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb 4518 } else {
dd8fbd78 4519 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
4520 }
4521 }
4522 break;
4523 case 28: /* Float compare. */
4524 if (!u) {
dd8fbd78 4525 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
b5ff1b31 4526 } else {
9ee6e8bb 4527 if (size == 0)
dd8fbd78 4528 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
9ee6e8bb 4529 else
dd8fbd78 4530 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
b5ff1b31 4531 }
2c0262af 4532 break;
9ee6e8bb
PB
4533 case 29: /* Float compare absolute. */
4534 if (!u)
4535 return 1;
4536 if (size == 0)
dd8fbd78 4537 gen_helper_neon_acge_f32(tmp, tmp, tmp2);
9ee6e8bb 4538 else
dd8fbd78 4539 gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
2c0262af 4540 break;
9ee6e8bb
PB
4541 case 30: /* Float min/max. */
4542 if (size == 0)
dd8fbd78 4543 gen_helper_neon_max_f32(tmp, tmp, tmp2);
9ee6e8bb 4544 else
dd8fbd78 4545 gen_helper_neon_min_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4546 break;
4547 case 31:
4548 if (size == 0)
dd8fbd78 4549 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 4550 else
dd8fbd78 4551 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 4552 break;
9ee6e8bb
PB
4553 default:
4554 abort();
2c0262af 4555 }
dd8fbd78
FN
4556 dead_tmp(tmp2);
4557
9ee6e8bb
PB
4558 /* Save the result. For elementwise operations we can put it
4559 straight into the destination register. For pairwise operations
4560 we have to be careful to avoid clobbering the source operands. */
4561 if (pairwise && rd == rm) {
dd8fbd78 4562 neon_store_scratch(pass, tmp);
9ee6e8bb 4563 } else {
dd8fbd78 4564 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4565 }
4566
4567 } /* for pass */
4568 if (pairwise && rd == rm) {
4569 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
4570 tmp = neon_load_scratch(pass);
4571 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4572 }
4573 }
ad69471c 4574 /* End of 3 register same size operations. */
9ee6e8bb
PB
4575 } else if (insn & (1 << 4)) {
4576 if ((insn & 0x00380080) != 0) {
4577 /* Two registers and shift. */
4578 op = (insn >> 8) & 0xf;
4579 if (insn & (1 << 7)) {
4580 /* 64-bit shift. */
4581 size = 3;
4582 } else {
4583 size = 2;
4584 while ((insn & (1 << (size + 19))) == 0)
4585 size--;
4586 }
4587 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4588 /* To avoid excessive dumplication of ops we implement shift
4589 by immediate using the variable shift operations. */
4590 if (op < 8) {
4591 /* Shift by immediate:
4592 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4593 /* Right shifts are encoded as N - shift, where N is the
4594 element size in bits. */
4595 if (op <= 4)
4596 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4597 if (size == 3) {
4598 count = q + 1;
4599 } else {
4600 count = q ? 4: 2;
4601 }
4602 switch (size) {
4603 case 0:
4604 imm = (uint8_t) shift;
4605 imm |= imm << 8;
4606 imm |= imm << 16;
4607 break;
4608 case 1:
4609 imm = (uint16_t) shift;
4610 imm |= imm << 16;
4611 break;
4612 case 2:
4613 case 3:
4614 imm = shift;
4615 break;
4616 default:
4617 abort();
4618 }
4619
4620 for (pass = 0; pass < count; pass++) {
ad69471c
PB
4621 if (size == 3) {
4622 neon_load_reg64(cpu_V0, rm + pass);
4623 tcg_gen_movi_i64(cpu_V1, imm);
4624 switch (op) {
4625 case 0: /* VSHR */
4626 case 1: /* VSRA */
4627 if (u)
4628 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4629 else
ad69471c 4630 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4631 break;
ad69471c
PB
4632 case 2: /* VRSHR */
4633 case 3: /* VRSRA */
4634 if (u)
4635 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4636 else
ad69471c 4637 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4638 break;
ad69471c
PB
4639 case 4: /* VSRI */
4640 if (!u)
4641 return 1;
4642 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4643 break;
4644 case 5: /* VSHL, VSLI */
4645 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4646 break;
4647 case 6: /* VQSHL */
4648 if (u)
4649 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4650 else
ad69471c
PB
4651 gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4652 break;
4653 case 7: /* VQSHLU */
4654 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4655 break;
9ee6e8bb 4656 }
ad69471c
PB
4657 if (op == 1 || op == 3) {
4658 /* Accumulate. */
4659 neon_load_reg64(cpu_V0, rd + pass);
4660 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4661 } else if (op == 4 || (op == 5 && u)) {
4662 /* Insert */
4663 cpu_abort(env, "VS[LR]I.64 not implemented");
4664 }
4665 neon_store_reg64(cpu_V0, rd + pass);
4666 } else { /* size < 3 */
4667 /* Operands in T0 and T1. */
dd8fbd78
FN
4668 tmp = neon_load_reg(rm, pass);
4669 tmp2 = new_tmp();
4670 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
4671 switch (op) {
4672 case 0: /* VSHR */
4673 case 1: /* VSRA */
4674 GEN_NEON_INTEGER_OP(shl);
4675 break;
4676 case 2: /* VRSHR */
4677 case 3: /* VRSRA */
4678 GEN_NEON_INTEGER_OP(rshl);
4679 break;
4680 case 4: /* VSRI */
4681 if (!u)
4682 return 1;
4683 GEN_NEON_INTEGER_OP(shl);
4684 break;
4685 case 5: /* VSHL, VSLI */
4686 switch (size) {
dd8fbd78
FN
4687 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
4688 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
4689 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
ad69471c
PB
4690 default: return 1;
4691 }
4692 break;
4693 case 6: /* VQSHL */
4694 GEN_NEON_INTEGER_OP_ENV(qshl);
4695 break;
4696 case 7: /* VQSHLU */
4697 switch (size) {
dd8fbd78
FN
4698 case 0: gen_helper_neon_qshl_u8(tmp, cpu_env, tmp, tmp2); break;
4699 case 1: gen_helper_neon_qshl_u16(tmp, cpu_env, tmp, tmp2); break;
4700 case 2: gen_helper_neon_qshl_u32(tmp, cpu_env, tmp, tmp2); break;
ad69471c
PB
4701 default: return 1;
4702 }
4703 break;
4704 }
dd8fbd78 4705 dead_tmp(tmp2);
ad69471c
PB
4706
4707 if (op == 1 || op == 3) {
4708 /* Accumulate. */
dd8fbd78
FN
4709 tmp2 = neon_load_reg(rd, pass);
4710 gen_neon_add(size, tmp2, tmp);
4711 dead_tmp(tmp2);
ad69471c
PB
4712 } else if (op == 4 || (op == 5 && u)) {
4713 /* Insert */
4714 switch (size) {
4715 case 0:
4716 if (op == 4)
ca9a32e4 4717 mask = 0xff >> -shift;
ad69471c 4718 else
ca9a32e4
JR
4719 mask = (uint8_t)(0xff << shift);
4720 mask |= mask << 8;
4721 mask |= mask << 16;
ad69471c
PB
4722 break;
4723 case 1:
4724 if (op == 4)
ca9a32e4 4725 mask = 0xffff >> -shift;
ad69471c 4726 else
ca9a32e4
JR
4727 mask = (uint16_t)(0xffff << shift);
4728 mask |= mask << 16;
ad69471c
PB
4729 break;
4730 case 2:
ca9a32e4
JR
4731 if (shift < -31 || shift > 31) {
4732 mask = 0;
4733 } else {
4734 if (op == 4)
4735 mask = 0xffffffffu >> -shift;
4736 else
4737 mask = 0xffffffffu << shift;
4738 }
ad69471c
PB
4739 break;
4740 default:
4741 abort();
4742 }
dd8fbd78 4743 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
4744 tcg_gen_andi_i32(tmp, tmp, mask);
4745 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78
FN
4746 tcg_gen_or_i32(tmp, tmp, tmp2);
4747 dead_tmp(tmp2);
ad69471c 4748 }
dd8fbd78 4749 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4750 }
4751 } /* for pass */
4752 } else if (op < 10) {
ad69471c 4753 /* Shift by immediate and narrow:
9ee6e8bb
PB
4754 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4755 shift = shift - (1 << (size + 3));
4756 size++;
9ee6e8bb
PB
4757 switch (size) {
4758 case 1:
ad69471c 4759 imm = (uint16_t)shift;
9ee6e8bb 4760 imm |= imm << 16;
ad69471c 4761 tmp2 = tcg_const_i32(imm);
a7812ae4 4762 TCGV_UNUSED_I64(tmp64);
9ee6e8bb
PB
4763 break;
4764 case 2:
ad69471c
PB
4765 imm = (uint32_t)shift;
4766 tmp2 = tcg_const_i32(imm);
a7812ae4 4767 TCGV_UNUSED_I64(tmp64);
4cc633c3 4768 break;
9ee6e8bb 4769 case 3:
a7812ae4
PB
4770 tmp64 = tcg_const_i64(shift);
4771 TCGV_UNUSED(tmp2);
9ee6e8bb
PB
4772 break;
4773 default:
4774 abort();
4775 }
4776
ad69471c
PB
4777 for (pass = 0; pass < 2; pass++) {
4778 if (size == 3) {
4779 neon_load_reg64(cpu_V0, rm + pass);
4780 if (q) {
4781 if (u)
a7812ae4 4782 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4783 else
a7812ae4 4784 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c
PB
4785 } else {
4786 if (u)
a7812ae4 4787 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4788 else
a7812ae4 4789 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c 4790 }
2c0262af 4791 } else {
ad69471c
PB
4792 tmp = neon_load_reg(rm + pass, 0);
4793 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
36aa55dc
PB
4794 tmp3 = neon_load_reg(rm + pass, 1);
4795 gen_neon_shift_narrow(size, tmp3, tmp2, q, u);
4796 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
ad69471c 4797 dead_tmp(tmp);
36aa55dc 4798 dead_tmp(tmp3);
9ee6e8bb 4799 }
ad69471c
PB
4800 tmp = new_tmp();
4801 if (op == 8 && !u) {
4802 gen_neon_narrow(size - 1, tmp, cpu_V0);
9ee6e8bb 4803 } else {
ad69471c
PB
4804 if (op == 8)
4805 gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
9ee6e8bb 4806 else
ad69471c
PB
4807 gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
4808 }
2301db49 4809 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4810 } /* for pass */
b75263d6
JR
4811 if (size == 3) {
4812 tcg_temp_free_i64(tmp64);
2301db49
JR
4813 } else {
4814 dead_tmp(tmp2);
b75263d6 4815 }
9ee6e8bb
PB
4816 } else if (op == 10) {
4817 /* VSHLL */
ad69471c 4818 if (q || size == 3)
9ee6e8bb 4819 return 1;
ad69471c
PB
4820 tmp = neon_load_reg(rm, 0);
4821 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 4822 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4823 if (pass == 1)
4824 tmp = tmp2;
4825
4826 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 4827
9ee6e8bb
PB
4828 if (shift != 0) {
4829 /* The shift is less than the width of the source
ad69471c
PB
4830 type, so we can just shift the whole register. */
4831 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4832 if (size < 2 || !u) {
4833 uint64_t imm64;
4834 if (size == 0) {
4835 imm = (0xffu >> (8 - shift));
4836 imm |= imm << 16;
4837 } else {
4838 imm = 0xffff >> (16 - shift);
9ee6e8bb 4839 }
ad69471c
PB
4840 imm64 = imm | (((uint64_t)imm) << 32);
4841 tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
9ee6e8bb
PB
4842 }
4843 }
ad69471c 4844 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4845 }
4846 } else if (op == 15 || op == 16) {
4847 /* VCVT fixed-point. */
4848 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 4849 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
9ee6e8bb
PB
4850 if (op & 1) {
4851 if (u)
4373f3ce 4852 gen_vfp_ulto(0, shift);
9ee6e8bb 4853 else
4373f3ce 4854 gen_vfp_slto(0, shift);
9ee6e8bb
PB
4855 } else {
4856 if (u)
4373f3ce 4857 gen_vfp_toul(0, shift);
9ee6e8bb 4858 else
4373f3ce 4859 gen_vfp_tosl(0, shift);
2c0262af 4860 }
4373f3ce 4861 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
4862 }
4863 } else {
9ee6e8bb
PB
4864 return 1;
4865 }
4866 } else { /* (insn & 0x00380080) == 0 */
4867 int invert;
4868
4869 op = (insn >> 8) & 0xf;
4870 /* One register and immediate. */
4871 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4872 invert = (insn & (1 << 5)) != 0;
4873 switch (op) {
4874 case 0: case 1:
4875 /* no-op */
4876 break;
4877 case 2: case 3:
4878 imm <<= 8;
4879 break;
4880 case 4: case 5:
4881 imm <<= 16;
4882 break;
4883 case 6: case 7:
4884 imm <<= 24;
4885 break;
4886 case 8: case 9:
4887 imm |= imm << 16;
4888 break;
4889 case 10: case 11:
4890 imm = (imm << 8) | (imm << 24);
4891 break;
4892 case 12:
4893 imm = (imm < 8) | 0xff;
4894 break;
4895 case 13:
4896 imm = (imm << 16) | 0xffff;
4897 break;
4898 case 14:
4899 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4900 if (invert)
4901 imm = ~imm;
4902 break;
4903 case 15:
4904 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4905 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4906 break;
4907 }
4908 if (invert)
4909 imm = ~imm;
4910
9ee6e8bb
PB
4911 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4912 if (op & 1 && op < 12) {
ad69471c 4913 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
4914 if (invert) {
4915 /* The immediate value has already been inverted, so
4916 BIC becomes AND. */
ad69471c 4917 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 4918 } else {
ad69471c 4919 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 4920 }
9ee6e8bb 4921 } else {
ad69471c
PB
4922 /* VMOV, VMVN. */
4923 tmp = new_tmp();
9ee6e8bb 4924 if (op == 14 && invert) {
ad69471c
PB
4925 uint32_t val;
4926 val = 0;
9ee6e8bb
PB
4927 for (n = 0; n < 4; n++) {
4928 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 4929 val |= 0xff << (n * 8);
9ee6e8bb 4930 }
ad69471c
PB
4931 tcg_gen_movi_i32(tmp, val);
4932 } else {
4933 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 4934 }
9ee6e8bb 4935 }
ad69471c 4936 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4937 }
4938 }
e4b3861d 4939 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
4940 if (size != 3) {
4941 op = (insn >> 8) & 0xf;
4942 if ((insn & (1 << 6)) == 0) {
4943 /* Three registers of different lengths. */
4944 int src1_wide;
4945 int src2_wide;
4946 int prewiden;
4947 /* prewiden, src1_wide, src2_wide */
4948 static const int neon_3reg_wide[16][3] = {
4949 {1, 0, 0}, /* VADDL */
4950 {1, 1, 0}, /* VADDW */
4951 {1, 0, 0}, /* VSUBL */
4952 {1, 1, 0}, /* VSUBW */
4953 {0, 1, 1}, /* VADDHN */
4954 {0, 0, 0}, /* VABAL */
4955 {0, 1, 1}, /* VSUBHN */
4956 {0, 0, 0}, /* VABDL */
4957 {0, 0, 0}, /* VMLAL */
4958 {0, 0, 0}, /* VQDMLAL */
4959 {0, 0, 0}, /* VMLSL */
4960 {0, 0, 0}, /* VQDMLSL */
4961 {0, 0, 0}, /* Integer VMULL */
4962 {0, 0, 0}, /* VQDMULL */
4963 {0, 0, 0} /* Polynomial VMULL */
4964 };
4965
4966 prewiden = neon_3reg_wide[op][0];
4967 src1_wide = neon_3reg_wide[op][1];
4968 src2_wide = neon_3reg_wide[op][2];
4969
ad69471c
PB
4970 if (size == 0 && (op == 9 || op == 11 || op == 13))
4971 return 1;
4972
9ee6e8bb
PB
4973 /* Avoid overlapping operands. Wide source operands are
4974 always aligned so will never overlap with wide
4975 destinations in problematic ways. */
8f8e3aa4 4976 if (rd == rm && !src2_wide) {
dd8fbd78
FN
4977 tmp = neon_load_reg(rm, 1);
4978 neon_store_scratch(2, tmp);
8f8e3aa4 4979 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
4980 tmp = neon_load_reg(rn, 1);
4981 neon_store_scratch(2, tmp);
9ee6e8bb 4982 }
a50f5b91 4983 TCGV_UNUSED(tmp3);
9ee6e8bb 4984 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4985 if (src1_wide) {
4986 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 4987 TCGV_UNUSED(tmp);
9ee6e8bb 4988 } else {
ad69471c 4989 if (pass == 1 && rd == rn) {
dd8fbd78 4990 tmp = neon_load_scratch(2);
9ee6e8bb 4991 } else {
ad69471c
PB
4992 tmp = neon_load_reg(rn, pass);
4993 }
4994 if (prewiden) {
4995 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
4996 }
4997 }
ad69471c
PB
4998 if (src2_wide) {
4999 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 5000 TCGV_UNUSED(tmp2);
9ee6e8bb 5001 } else {
ad69471c 5002 if (pass == 1 && rd == rm) {
dd8fbd78 5003 tmp2 = neon_load_scratch(2);
9ee6e8bb 5004 } else {
ad69471c
PB
5005 tmp2 = neon_load_reg(rm, pass);
5006 }
5007 if (prewiden) {
5008 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5009 }
9ee6e8bb
PB
5010 }
5011 switch (op) {
5012 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5013 gen_neon_addl(size);
9ee6e8bb
PB
5014 break;
5015 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
ad69471c 5016 gen_neon_subl(size);
9ee6e8bb
PB
5017 break;
5018 case 5: case 7: /* VABAL, VABDL */
5019 switch ((size << 1) | u) {
ad69471c
PB
5020 case 0:
5021 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5022 break;
5023 case 1:
5024 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5025 break;
5026 case 2:
5027 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5028 break;
5029 case 3:
5030 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5031 break;
5032 case 4:
5033 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5034 break;
5035 case 5:
5036 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5037 break;
9ee6e8bb
PB
5038 default: abort();
5039 }
ad69471c
PB
5040 dead_tmp(tmp2);
5041 dead_tmp(tmp);
9ee6e8bb
PB
5042 break;
5043 case 8: case 9: case 10: case 11: case 12: case 13:
5044 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5045 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
dd8fbd78
FN
5046 dead_tmp(tmp2);
5047 dead_tmp(tmp);
9ee6e8bb
PB
5048 break;
5049 case 14: /* Polynomial VMULL */
5050 cpu_abort(env, "Polynomial VMULL not implemented");
5051
5052 default: /* 15 is RESERVED. */
5053 return 1;
5054 }
5055 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
5056 /* Accumulate. */
5057 if (op == 10 || op == 11) {
ad69471c 5058 gen_neon_negl(cpu_V0, size);
9ee6e8bb
PB
5059 }
5060
9ee6e8bb 5061 if (op != 13) {
ad69471c 5062 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb
PB
5063 }
5064
5065 switch (op) {
5066 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
ad69471c 5067 gen_neon_addl(size);
9ee6e8bb
PB
5068 break;
5069 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c
PB
5070 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5071 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5072 break;
9ee6e8bb
PB
5073 /* Fall through. */
5074 case 13: /* VQDMULL */
ad69471c 5075 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5076 break;
5077 default:
5078 abort();
5079 }
ad69471c 5080 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5081 } else if (op == 4 || op == 6) {
5082 /* Narrowing operation. */
ad69471c 5083 tmp = new_tmp();
9ee6e8bb
PB
5084 if (u) {
5085 switch (size) {
ad69471c
PB
5086 case 0:
5087 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5088 break;
5089 case 1:
5090 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5091 break;
5092 case 2:
5093 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5094 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5095 break;
9ee6e8bb
PB
5096 default: abort();
5097 }
5098 } else {
5099 switch (size) {
ad69471c
PB
5100 case 0:
5101 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5102 break;
5103 case 1:
5104 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5105 break;
5106 case 2:
5107 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5108 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5109 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5110 break;
9ee6e8bb
PB
5111 default: abort();
5112 }
5113 }
ad69471c
PB
5114 if (pass == 0) {
5115 tmp3 = tmp;
5116 } else {
5117 neon_store_reg(rd, 0, tmp3);
5118 neon_store_reg(rd, 1, tmp);
5119 }
9ee6e8bb
PB
5120 } else {
5121 /* Write back the result. */
ad69471c 5122 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5123 }
5124 }
5125 } else {
5126 /* Two registers and a scalar. */
5127 switch (op) {
5128 case 0: /* Integer VMLA scalar */
5129 case 1: /* Float VMLA scalar */
5130 case 4: /* Integer VMLS scalar */
5131 case 5: /* Floating point VMLS scalar */
5132 case 8: /* Integer VMUL scalar */
5133 case 9: /* Floating point VMUL scalar */
5134 case 12: /* VQDMULH scalar */
5135 case 13: /* VQRDMULH scalar */
dd8fbd78
FN
5136 tmp = neon_get_scalar(size, rm);
5137 neon_store_scratch(0, tmp);
9ee6e8bb 5138 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5139 tmp = neon_load_scratch(0);
5140 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5141 if (op == 12) {
5142 if (size == 1) {
dd8fbd78 5143 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5144 } else {
dd8fbd78 5145 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5146 }
5147 } else if (op == 13) {
5148 if (size == 1) {
dd8fbd78 5149 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5150 } else {
dd8fbd78 5151 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5152 }
5153 } else if (op & 1) {
dd8fbd78 5154 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5155 } else {
5156 switch (size) {
dd8fbd78
FN
5157 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5158 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5159 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5160 default: return 1;
5161 }
5162 }
dd8fbd78 5163 dead_tmp(tmp2);
9ee6e8bb
PB
5164 if (op < 8) {
5165 /* Accumulate. */
dd8fbd78 5166 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5167 switch (op) {
5168 case 0:
dd8fbd78 5169 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5170 break;
5171 case 1:
dd8fbd78 5172 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5173 break;
5174 case 4:
dd8fbd78 5175 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5176 break;
5177 case 5:
dd8fbd78 5178 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
5179 break;
5180 default:
5181 abort();
5182 }
dd8fbd78 5183 dead_tmp(tmp2);
9ee6e8bb 5184 }
dd8fbd78 5185 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5186 }
5187 break;
5188 case 2: /* VMLAL sclar */
5189 case 3: /* VQDMLAL scalar */
5190 case 6: /* VMLSL scalar */
5191 case 7: /* VQDMLSL scalar */
5192 case 10: /* VMULL scalar */
5193 case 11: /* VQDMULL scalar */
ad69471c
PB
5194 if (size == 0 && (op == 3 || op == 7 || op == 11))
5195 return 1;
5196
dd8fbd78
FN
5197 tmp2 = neon_get_scalar(size, rm);
5198 tmp3 = neon_load_reg(rn, 1);
ad69471c 5199
9ee6e8bb 5200 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5201 if (pass == 0) {
5202 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5203 } else {
dd8fbd78 5204 tmp = tmp3;
9ee6e8bb 5205 }
ad69471c 5206 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
dd8fbd78 5207 dead_tmp(tmp);
9ee6e8bb 5208 if (op == 6 || op == 7) {
ad69471c
PB
5209 gen_neon_negl(cpu_V0, size);
5210 }
5211 if (op != 11) {
5212 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5213 }
9ee6e8bb
PB
5214 switch (op) {
5215 case 2: case 6:
ad69471c 5216 gen_neon_addl(size);
9ee6e8bb
PB
5217 break;
5218 case 3: case 7:
ad69471c
PB
5219 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5220 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5221 break;
5222 case 10:
5223 /* no-op */
5224 break;
5225 case 11:
ad69471c 5226 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5227 break;
5228 default:
5229 abort();
5230 }
ad69471c 5231 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5232 }
dd8fbd78
FN
5233
5234 dead_tmp(tmp2);
5235
9ee6e8bb
PB
5236 break;
5237 default: /* 14 and 15 are RESERVED */
5238 return 1;
5239 }
5240 }
5241 } else { /* size == 3 */
5242 if (!u) {
5243 /* Extract. */
9ee6e8bb 5244 imm = (insn >> 8) & 0xf;
ad69471c
PB
5245 count = q + 1;
5246
5247 if (imm > 7 && !q)
5248 return 1;
5249
5250 if (imm == 0) {
5251 neon_load_reg64(cpu_V0, rn);
5252 if (q) {
5253 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5254 }
ad69471c
PB
5255 } else if (imm == 8) {
5256 neon_load_reg64(cpu_V0, rn + 1);
5257 if (q) {
5258 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5259 }
ad69471c 5260 } else if (q) {
a7812ae4 5261 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5262 if (imm < 8) {
5263 neon_load_reg64(cpu_V0, rn);
a7812ae4 5264 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5265 } else {
5266 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5267 neon_load_reg64(tmp64, rm);
ad69471c
PB
5268 }
5269 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5270 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5271 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5272 if (imm < 8) {
5273 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5274 } else {
ad69471c
PB
5275 neon_load_reg64(cpu_V1, rm + 1);
5276 imm -= 8;
9ee6e8bb 5277 }
ad69471c 5278 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5279 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5280 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 5281 tcg_temp_free_i64(tmp64);
ad69471c 5282 } else {
a7812ae4 5283 /* BUGFIX */
ad69471c 5284 neon_load_reg64(cpu_V0, rn);
a7812ae4 5285 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5286 neon_load_reg64(cpu_V1, rm);
a7812ae4 5287 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5288 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5289 }
5290 neon_store_reg64(cpu_V0, rd);
5291 if (q) {
5292 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5293 }
5294 } else if ((insn & (1 << 11)) == 0) {
5295 /* Two register misc. */
5296 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5297 size = (insn >> 18) & 3;
5298 switch (op) {
5299 case 0: /* VREV64 */
5300 if (size == 3)
5301 return 1;
5302 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5303 tmp = neon_load_reg(rm, pass * 2);
5304 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5305 switch (size) {
dd8fbd78
FN
5306 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5307 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5308 case 2: /* no-op */ break;
5309 default: abort();
5310 }
dd8fbd78 5311 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5312 if (size == 2) {
dd8fbd78 5313 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5314 } else {
9ee6e8bb 5315 switch (size) {
dd8fbd78
FN
5316 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5317 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5318 default: abort();
5319 }
dd8fbd78 5320 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5321 }
5322 }
5323 break;
5324 case 4: case 5: /* VPADDL */
5325 case 12: case 13: /* VPADAL */
9ee6e8bb
PB
5326 if (size == 3)
5327 return 1;
ad69471c
PB
5328 for (pass = 0; pass < q + 1; pass++) {
5329 tmp = neon_load_reg(rm, pass * 2);
5330 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5331 tmp = neon_load_reg(rm, pass * 2 + 1);
5332 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5333 switch (size) {
5334 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5335 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5336 case 2: tcg_gen_add_i64(CPU_V001); break;
5337 default: abort();
5338 }
9ee6e8bb
PB
5339 if (op >= 12) {
5340 /* Accumulate. */
ad69471c
PB
5341 neon_load_reg64(cpu_V1, rd + pass);
5342 gen_neon_addl(size);
9ee6e8bb 5343 }
ad69471c 5344 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5345 }
5346 break;
5347 case 33: /* VTRN */
5348 if (size == 2) {
5349 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
5350 tmp = neon_load_reg(rm, n);
5351 tmp2 = neon_load_reg(rd, n + 1);
5352 neon_store_reg(rm, n, tmp2);
5353 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
5354 }
5355 } else {
5356 goto elementwise;
5357 }
5358 break;
5359 case 34: /* VUZP */
5360 /* Reg Before After
5361 Rd A3 A2 A1 A0 B2 B0 A2 A0
5362 Rm B3 B2 B1 B0 B3 B1 A3 A1
5363 */
5364 if (size == 3)
5365 return 1;
5366 gen_neon_unzip(rd, q, 0, size);
5367 gen_neon_unzip(rm, q, 4, size);
5368 if (q) {
5369 static int unzip_order_q[8] =
5370 {0, 2, 4, 6, 1, 3, 5, 7};
5371 for (n = 0; n < 8; n++) {
5372 int reg = (n < 4) ? rd : rm;
dd8fbd78
FN
5373 tmp = neon_load_scratch(unzip_order_q[n]);
5374 neon_store_reg(reg, n % 4, tmp);
9ee6e8bb
PB
5375 }
5376 } else {
5377 static int unzip_order[4] =
5378 {0, 4, 1, 5};
5379 for (n = 0; n < 4; n++) {
5380 int reg = (n < 2) ? rd : rm;
dd8fbd78
FN
5381 tmp = neon_load_scratch(unzip_order[n]);
5382 neon_store_reg(reg, n % 2, tmp);
9ee6e8bb
PB
5383 }
5384 }
5385 break;
5386 case 35: /* VZIP */
5387 /* Reg Before After
5388 Rd A3 A2 A1 A0 B1 A1 B0 A0
5389 Rm B3 B2 B1 B0 B3 A3 B2 A2
5390 */
5391 if (size == 3)
5392 return 1;
5393 count = (q ? 4 : 2);
5394 for (n = 0; n < count; n++) {
dd8fbd78
FN
5395 tmp = neon_load_reg(rd, n);
5396 tmp2 = neon_load_reg(rd, n);
9ee6e8bb 5397 switch (size) {
dd8fbd78
FN
5398 case 0: gen_neon_zip_u8(tmp, tmp2); break;
5399 case 1: gen_neon_zip_u16(tmp, tmp2); break;
9ee6e8bb
PB
5400 case 2: /* no-op */; break;
5401 default: abort();
5402 }
dd8fbd78
FN
5403 neon_store_scratch(n * 2, tmp);
5404 neon_store_scratch(n * 2 + 1, tmp2);
9ee6e8bb
PB
5405 }
5406 for (n = 0; n < count * 2; n++) {
5407 int reg = (n < count) ? rd : rm;
dd8fbd78
FN
5408 tmp = neon_load_scratch(n);
5409 neon_store_reg(reg, n % count, tmp);
9ee6e8bb
PB
5410 }
5411 break;
5412 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
ad69471c
PB
5413 if (size == 3)
5414 return 1;
a50f5b91 5415 TCGV_UNUSED(tmp2);
9ee6e8bb 5416 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5417 neon_load_reg64(cpu_V0, rm + pass);
5418 tmp = new_tmp();
9ee6e8bb 5419 if (op == 36 && q == 0) {
ad69471c 5420 gen_neon_narrow(size, tmp, cpu_V0);
9ee6e8bb 5421 } else if (q) {
ad69471c 5422 gen_neon_narrow_satu(size, tmp, cpu_V0);
9ee6e8bb 5423 } else {
ad69471c
PB
5424 gen_neon_narrow_sats(size, tmp, cpu_V0);
5425 }
5426 if (pass == 0) {
5427 tmp2 = tmp;
5428 } else {
5429 neon_store_reg(rd, 0, tmp2);
5430 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5431 }
9ee6e8bb
PB
5432 }
5433 break;
5434 case 38: /* VSHLL */
ad69471c 5435 if (q || size == 3)
9ee6e8bb 5436 return 1;
ad69471c
PB
5437 tmp = neon_load_reg(rm, 0);
5438 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5439 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5440 if (pass == 1)
5441 tmp = tmp2;
5442 gen_neon_widen(cpu_V0, tmp, size, 1);
5443 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5444 }
5445 break;
60011498
PB
5446 case 44: /* VCVT.F16.F32 */
5447 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5448 return 1;
5449 tmp = new_tmp();
5450 tmp2 = new_tmp();
5451 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
5452 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5453 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
5454 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5455 tcg_gen_shli_i32(tmp2, tmp2, 16);
5456 tcg_gen_or_i32(tmp2, tmp2, tmp);
5457 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
5458 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5459 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5460 neon_store_reg(rd, 0, tmp2);
5461 tmp2 = new_tmp();
5462 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5463 tcg_gen_shli_i32(tmp2, tmp2, 16);
5464 tcg_gen_or_i32(tmp2, tmp2, tmp);
5465 neon_store_reg(rd, 1, tmp2);
5466 dead_tmp(tmp);
5467 break;
5468 case 46: /* VCVT.F32.F16 */
5469 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5470 return 1;
5471 tmp3 = new_tmp();
5472 tmp = neon_load_reg(rm, 0);
5473 tmp2 = neon_load_reg(rm, 1);
5474 tcg_gen_ext16u_i32(tmp3, tmp);
5475 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5476 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5477 tcg_gen_shri_i32(tmp3, tmp, 16);
5478 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5479 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
5480 dead_tmp(tmp);
5481 tcg_gen_ext16u_i32(tmp3, tmp2);
5482 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5483 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5484 tcg_gen_shri_i32(tmp3, tmp2, 16);
5485 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5486 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
5487 dead_tmp(tmp2);
5488 dead_tmp(tmp3);
5489 break;
9ee6e8bb
PB
5490 default:
5491 elementwise:
5492 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5493 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5494 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5495 neon_reg_offset(rm, pass));
dd8fbd78 5496 TCGV_UNUSED(tmp);
9ee6e8bb 5497 } else {
dd8fbd78 5498 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
5499 }
5500 switch (op) {
5501 case 1: /* VREV32 */
5502 switch (size) {
dd8fbd78
FN
5503 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5504 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5505 default: return 1;
5506 }
5507 break;
5508 case 2: /* VREV16 */
5509 if (size != 0)
5510 return 1;
dd8fbd78 5511 gen_rev16(tmp);
9ee6e8bb 5512 break;
9ee6e8bb
PB
5513 case 8: /* CLS */
5514 switch (size) {
dd8fbd78
FN
5515 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5516 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5517 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
9ee6e8bb
PB
5518 default: return 1;
5519 }
5520 break;
5521 case 9: /* CLZ */
5522 switch (size) {
dd8fbd78
FN
5523 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5524 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5525 case 2: gen_helper_clz(tmp, tmp); break;
9ee6e8bb
PB
5526 default: return 1;
5527 }
5528 break;
5529 case 10: /* CNT */
5530 if (size != 0)
5531 return 1;
dd8fbd78 5532 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb
PB
5533 break;
5534 case 11: /* VNOT */
5535 if (size != 0)
5536 return 1;
dd8fbd78 5537 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5538 break;
5539 case 14: /* VQABS */
5540 switch (size) {
dd8fbd78
FN
5541 case 0: gen_helper_neon_qabs_s8(tmp, cpu_env, tmp); break;
5542 case 1: gen_helper_neon_qabs_s16(tmp, cpu_env, tmp); break;
5543 case 2: gen_helper_neon_qabs_s32(tmp, cpu_env, tmp); break;
9ee6e8bb
PB
5544 default: return 1;
5545 }
5546 break;
5547 case 15: /* VQNEG */
5548 switch (size) {
dd8fbd78
FN
5549 case 0: gen_helper_neon_qneg_s8(tmp, cpu_env, tmp); break;
5550 case 1: gen_helper_neon_qneg_s16(tmp, cpu_env, tmp); break;
5551 case 2: gen_helper_neon_qneg_s32(tmp, cpu_env, tmp); break;
9ee6e8bb
PB
5552 default: return 1;
5553 }
5554 break;
5555 case 16: case 19: /* VCGT #0, VCLE #0 */
dd8fbd78 5556 tmp2 = tcg_const_i32(0);
9ee6e8bb 5557 switch(size) {
dd8fbd78
FN
5558 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5559 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5560 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5561 default: return 1;
5562 }
dd8fbd78 5563 tcg_temp_free(tmp2);
9ee6e8bb 5564 if (op == 19)
dd8fbd78 5565 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5566 break;
5567 case 17: case 20: /* VCGE #0, VCLT #0 */
dd8fbd78 5568 tmp2 = tcg_const_i32(0);
9ee6e8bb 5569 switch(size) {
dd8fbd78
FN
5570 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5571 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5572 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5573 default: return 1;
5574 }
dd8fbd78 5575 tcg_temp_free(tmp2);
9ee6e8bb 5576 if (op == 20)
dd8fbd78 5577 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5578 break;
5579 case 18: /* VCEQ #0 */
dd8fbd78 5580 tmp2 = tcg_const_i32(0);
9ee6e8bb 5581 switch(size) {
dd8fbd78
FN
5582 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5583 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5584 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5585 default: return 1;
5586 }
dd8fbd78 5587 tcg_temp_free(tmp2);
9ee6e8bb
PB
5588 break;
5589 case 22: /* VABS */
5590 switch(size) {
dd8fbd78
FN
5591 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
5592 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
5593 case 2: tcg_gen_abs_i32(tmp, tmp); break;
9ee6e8bb
PB
5594 default: return 1;
5595 }
5596 break;
5597 case 23: /* VNEG */
ad69471c
PB
5598 if (size == 3)
5599 return 1;
dd8fbd78
FN
5600 tmp2 = tcg_const_i32(0);
5601 gen_neon_rsb(size, tmp, tmp2);
5602 tcg_temp_free(tmp2);
9ee6e8bb
PB
5603 break;
5604 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
dd8fbd78
FN
5605 tmp2 = tcg_const_i32(0);
5606 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
5607 tcg_temp_free(tmp2);
9ee6e8bb 5608 if (op == 27)
dd8fbd78 5609 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5610 break;
5611 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
dd8fbd78
FN
5612 tmp2 = tcg_const_i32(0);
5613 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
5614 tcg_temp_free(tmp2);
9ee6e8bb 5615 if (op == 28)
dd8fbd78 5616 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5617 break;
5618 case 26: /* Float VCEQ #0 */
dd8fbd78
FN
5619 tmp2 = tcg_const_i32(0);
5620 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
5621 tcg_temp_free(tmp2);
9ee6e8bb
PB
5622 break;
5623 case 30: /* Float VABS */
4373f3ce 5624 gen_vfp_abs(0);
9ee6e8bb
PB
5625 break;
5626 case 31: /* Float VNEG */
4373f3ce 5627 gen_vfp_neg(0);
9ee6e8bb
PB
5628 break;
5629 case 32: /* VSWP */
dd8fbd78
FN
5630 tmp2 = neon_load_reg(rd, pass);
5631 neon_store_reg(rm, pass, tmp2);
9ee6e8bb
PB
5632 break;
5633 case 33: /* VTRN */
dd8fbd78 5634 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5635 switch (size) {
dd8fbd78
FN
5636 case 0: gen_neon_trn_u8(tmp, tmp2); break;
5637 case 1: gen_neon_trn_u16(tmp, tmp2); break;
9ee6e8bb
PB
5638 case 2: abort();
5639 default: return 1;
5640 }
dd8fbd78 5641 neon_store_reg(rm, pass, tmp2);
9ee6e8bb
PB
5642 break;
5643 case 56: /* Integer VRECPE */
dd8fbd78 5644 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb
PB
5645 break;
5646 case 57: /* Integer VRSQRTE */
dd8fbd78 5647 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb
PB
5648 break;
5649 case 58: /* Float VRECPE */
4373f3ce 5650 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5651 break;
5652 case 59: /* Float VRSQRTE */
4373f3ce 5653 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5654 break;
5655 case 60: /* VCVT.F32.S32 */
4373f3ce 5656 gen_vfp_tosiz(0);
9ee6e8bb
PB
5657 break;
5658 case 61: /* VCVT.F32.U32 */
4373f3ce 5659 gen_vfp_touiz(0);
9ee6e8bb
PB
5660 break;
5661 case 62: /* VCVT.S32.F32 */
4373f3ce 5662 gen_vfp_sito(0);
9ee6e8bb
PB
5663 break;
5664 case 63: /* VCVT.U32.F32 */
4373f3ce 5665 gen_vfp_uito(0);
9ee6e8bb
PB
5666 break;
5667 default:
5668 /* Reserved: 21, 29, 39-56 */
5669 return 1;
5670 }
5671 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5672 tcg_gen_st_f32(cpu_F0s, cpu_env,
5673 neon_reg_offset(rd, pass));
9ee6e8bb 5674 } else {
dd8fbd78 5675 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5676 }
5677 }
5678 break;
5679 }
5680 } else if ((insn & (1 << 10)) == 0) {
5681 /* VTBL, VTBX. */
3018f259 5682 n = ((insn >> 5) & 0x18) + 8;
9ee6e8bb 5683 if (insn & (1 << 6)) {
8f8e3aa4 5684 tmp = neon_load_reg(rd, 0);
9ee6e8bb 5685 } else {
8f8e3aa4
PB
5686 tmp = new_tmp();
5687 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5688 }
8f8e3aa4 5689 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
5690 tmp4 = tcg_const_i32(rn);
5691 tmp5 = tcg_const_i32(n);
5692 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
3018f259 5693 dead_tmp(tmp);
9ee6e8bb 5694 if (insn & (1 << 6)) {
8f8e3aa4 5695 tmp = neon_load_reg(rd, 1);
9ee6e8bb 5696 } else {
8f8e3aa4
PB
5697 tmp = new_tmp();
5698 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5699 }
8f8e3aa4 5700 tmp3 = neon_load_reg(rm, 1);
b75263d6 5701 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
5702 tcg_temp_free_i32(tmp5);
5703 tcg_temp_free_i32(tmp4);
8f8e3aa4 5704 neon_store_reg(rd, 0, tmp2);
3018f259
PB
5705 neon_store_reg(rd, 1, tmp3);
5706 dead_tmp(tmp);
9ee6e8bb
PB
5707 } else if ((insn & 0x380) == 0) {
5708 /* VDUP */
5709 if (insn & (1 << 19)) {
dd8fbd78 5710 tmp = neon_load_reg(rm, 1);
9ee6e8bb 5711 } else {
dd8fbd78 5712 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
5713 }
5714 if (insn & (1 << 16)) {
dd8fbd78 5715 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
5716 } else if (insn & (1 << 17)) {
5717 if ((insn >> 18) & 1)
dd8fbd78 5718 gen_neon_dup_high16(tmp);
9ee6e8bb 5719 else
dd8fbd78 5720 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
5721 }
5722 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5723 tmp2 = new_tmp();
5724 tcg_gen_mov_i32(tmp2, tmp);
5725 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 5726 }
dd8fbd78 5727 dead_tmp(tmp);
9ee6e8bb
PB
5728 } else {
5729 return 1;
5730 }
5731 }
5732 }
5733 return 0;
5734}
5735
fe1479c3
PB
5736static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5737{
5738 int crn = (insn >> 16) & 0xf;
5739 int crm = insn & 0xf;
5740 int op1 = (insn >> 21) & 7;
5741 int op2 = (insn >> 5) & 7;
5742 int rt = (insn >> 12) & 0xf;
5743 TCGv tmp;
5744
5745 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5746 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5747 /* TEECR */
5748 if (IS_USER(s))
5749 return 1;
5750 tmp = load_cpu_field(teecr);
5751 store_reg(s, rt, tmp);
5752 return 0;
5753 }
5754 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5755 /* TEEHBR */
5756 if (IS_USER(s) && (env->teecr & 1))
5757 return 1;
5758 tmp = load_cpu_field(teehbr);
5759 store_reg(s, rt, tmp);
5760 return 0;
5761 }
5762 }
5763 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5764 op1, crn, crm, op2);
5765 return 1;
5766}
5767
5768static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5769{
5770 int crn = (insn >> 16) & 0xf;
5771 int crm = insn & 0xf;
5772 int op1 = (insn >> 21) & 7;
5773 int op2 = (insn >> 5) & 7;
5774 int rt = (insn >> 12) & 0xf;
5775 TCGv tmp;
5776
5777 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5778 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5779 /* TEECR */
5780 if (IS_USER(s))
5781 return 1;
5782 tmp = load_reg(s, rt);
5783 gen_helper_set_teecr(cpu_env, tmp);
5784 dead_tmp(tmp);
5785 return 0;
5786 }
5787 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5788 /* TEEHBR */
5789 if (IS_USER(s) && (env->teecr & 1))
5790 return 1;
5791 tmp = load_reg(s, rt);
5792 store_cpu_field(tmp, teehbr);
5793 return 0;
5794 }
5795 }
5796 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5797 op1, crn, crm, op2);
5798 return 1;
5799}
5800
9ee6e8bb
PB
5801static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5802{
5803 int cpnum;
5804
5805 cpnum = (insn >> 8) & 0xf;
5806 if (arm_feature(env, ARM_FEATURE_XSCALE)
5807 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5808 return 1;
5809
5810 switch (cpnum) {
5811 case 0:
5812 case 1:
5813 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5814 return disas_iwmmxt_insn(env, s, insn);
5815 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5816 return disas_dsp_insn(env, s, insn);
5817 }
5818 return 1;
5819 case 10:
5820 case 11:
5821 return disas_vfp_insn (env, s, insn);
fe1479c3
PB
5822 case 14:
5823 /* Coprocessors 7-15 are architecturally reserved by ARM.
5824 Unfortunately Intel decided to ignore this. */
5825 if (arm_feature(env, ARM_FEATURE_XSCALE))
5826 goto board;
5827 if (insn & (1 << 20))
5828 return disas_cp14_read(env, s, insn);
5829 else
5830 return disas_cp14_write(env, s, insn);
9ee6e8bb
PB
5831 case 15:
5832 return disas_cp15_insn (env, s, insn);
5833 default:
fe1479c3 5834 board:
9ee6e8bb
PB
5835 /* Unknown coprocessor. See if the board has hooked it. */
5836 return disas_cp_insn (env, s, insn);
5837 }
5838}
5839
5e3f878a
PB
5840
5841/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 5842static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
5843{
5844 TCGv tmp;
5845 tmp = new_tmp();
5846 tcg_gen_trunc_i64_i32(tmp, val);
5847 store_reg(s, rlow, tmp);
5848 tmp = new_tmp();
5849 tcg_gen_shri_i64(val, val, 32);
5850 tcg_gen_trunc_i64_i32(tmp, val);
5851 store_reg(s, rhigh, tmp);
5852}
5853
5854/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 5855static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 5856{
a7812ae4 5857 TCGv_i64 tmp;
5e3f878a
PB
5858 TCGv tmp2;
5859
36aa55dc 5860 /* Load value and extend to 64 bits. */
a7812ae4 5861 tmp = tcg_temp_new_i64();
5e3f878a
PB
5862 tmp2 = load_reg(s, rlow);
5863 tcg_gen_extu_i32_i64(tmp, tmp2);
5864 dead_tmp(tmp2);
5865 tcg_gen_add_i64(val, val, tmp);
b75263d6 5866 tcg_temp_free_i64(tmp);
5e3f878a
PB
5867}
5868
5869/* load and add a 64-bit value from a register pair. */
a7812ae4 5870static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 5871{
a7812ae4 5872 TCGv_i64 tmp;
36aa55dc
PB
5873 TCGv tmpl;
5874 TCGv tmph;
5e3f878a
PB
5875
5876 /* Load 64-bit value rd:rn. */
36aa55dc
PB
5877 tmpl = load_reg(s, rlow);
5878 tmph = load_reg(s, rhigh);
a7812ae4 5879 tmp = tcg_temp_new_i64();
36aa55dc
PB
5880 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5881 dead_tmp(tmpl);
5882 dead_tmp(tmph);
5e3f878a 5883 tcg_gen_add_i64(val, val, tmp);
b75263d6 5884 tcg_temp_free_i64(tmp);
5e3f878a
PB
5885}
5886
5887/* Set N and Z flags from a 64-bit value. */
a7812ae4 5888static void gen_logicq_cc(TCGv_i64 val)
5e3f878a
PB
5889{
5890 TCGv tmp = new_tmp();
5891 gen_helper_logicq_cc(tmp, val);
6fbe23d5
PB
5892 gen_logic_CC(tmp);
5893 dead_tmp(tmp);
5e3f878a
PB
5894}
5895
426f5abc
PB
5896/* Load/Store exclusive instructions are implemented by remembering
5897 the value/address loaded, and seeing if these are the same
5898 when the store is performed. This should be is sufficient to implement
5899 the architecturally mandated semantics, and avoids having to monitor
5900 regular stores.
5901
5902 In system emulation mode only one CPU will be running at once, so
5903 this sequence is effectively atomic. In user emulation mode we
5904 throw an exception and handle the atomic operation elsewhere. */
5905static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
5906 TCGv addr, int size)
5907{
5908 TCGv tmp;
5909
5910 switch (size) {
5911 case 0:
5912 tmp = gen_ld8u(addr, IS_USER(s));
5913 break;
5914 case 1:
5915 tmp = gen_ld16u(addr, IS_USER(s));
5916 break;
5917 case 2:
5918 case 3:
5919 tmp = gen_ld32(addr, IS_USER(s));
5920 break;
5921 default:
5922 abort();
5923 }
5924 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
5925 store_reg(s, rt, tmp);
5926 if (size == 3) {
5927 tcg_gen_addi_i32(addr, addr, 4);
5928 tmp = gen_ld32(addr, IS_USER(s));
5929 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
5930 store_reg(s, rt2, tmp);
5931 }
5932 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
5933}
5934
5935static void gen_clrex(DisasContext *s)
5936{
5937 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
5938}
5939
5940#ifdef CONFIG_USER_ONLY
5941static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
5942 TCGv addr, int size)
5943{
5944 tcg_gen_mov_i32(cpu_exclusive_test, addr);
5945 tcg_gen_movi_i32(cpu_exclusive_info,
5946 size | (rd << 4) | (rt << 8) | (rt2 << 12));
5947 gen_set_condexec(s);
5948 gen_set_pc_im(s->pc - 4);
5949 gen_exception(EXCP_STREX);
5950 s->is_jmp = DISAS_JUMP;
5951}
5952#else
5953static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
5954 TCGv addr, int size)
5955{
5956 TCGv tmp;
5957 int done_label;
5958 int fail_label;
5959
5960 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
5961 [addr] = {Rt};
5962 {Rd} = 0;
5963 } else {
5964 {Rd} = 1;
5965 } */
5966 fail_label = gen_new_label();
5967 done_label = gen_new_label();
5968 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
5969 switch (size) {
5970 case 0:
5971 tmp = gen_ld8u(addr, IS_USER(s));
5972 break;
5973 case 1:
5974 tmp = gen_ld16u(addr, IS_USER(s));
5975 break;
5976 case 2:
5977 case 3:
5978 tmp = gen_ld32(addr, IS_USER(s));
5979 break;
5980 default:
5981 abort();
5982 }
5983 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
5984 dead_tmp(tmp);
5985 if (size == 3) {
5986 TCGv tmp2 = new_tmp();
5987 tcg_gen_addi_i32(tmp2, addr, 4);
5988 tmp = gen_ld32(addr, IS_USER(s));
5989 dead_tmp(tmp2);
5990 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
5991 dead_tmp(tmp);
5992 }
5993 tmp = load_reg(s, rt);
5994 switch (size) {
5995 case 0:
5996 gen_st8(tmp, addr, IS_USER(s));
5997 break;
5998 case 1:
5999 gen_st16(tmp, addr, IS_USER(s));
6000 break;
6001 case 2:
6002 case 3:
6003 gen_st32(tmp, addr, IS_USER(s));
6004 break;
6005 default:
6006 abort();
6007 }
6008 if (size == 3) {
6009 tcg_gen_addi_i32(addr, addr, 4);
6010 tmp = load_reg(s, rt2);
6011 gen_st32(tmp, addr, IS_USER(s));
6012 }
6013 tcg_gen_movi_i32(cpu_R[rd], 0);
6014 tcg_gen_br(done_label);
6015 gen_set_label(fail_label);
6016 tcg_gen_movi_i32(cpu_R[rd], 1);
6017 gen_set_label(done_label);
6018 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6019}
6020#endif
6021
9ee6e8bb
PB
6022static void disas_arm_insn(CPUState * env, DisasContext *s)
6023{
6024 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 6025 TCGv tmp;
3670669c 6026 TCGv tmp2;
6ddbc6e4 6027 TCGv tmp3;
b0109805 6028 TCGv addr;
a7812ae4 6029 TCGv_i64 tmp64;
9ee6e8bb
PB
6030
6031 insn = ldl_code(s->pc);
6032 s->pc += 4;
6033
6034 /* M variants do not implement ARM mode. */
6035 if (IS_M(env))
6036 goto illegal_op;
6037 cond = insn >> 28;
6038 if (cond == 0xf){
6039 /* Unconditional instructions. */
6040 if (((insn >> 25) & 7) == 1) {
6041 /* NEON Data processing. */
6042 if (!arm_feature(env, ARM_FEATURE_NEON))
6043 goto illegal_op;
6044
6045 if (disas_neon_data_insn(env, s, insn))
6046 goto illegal_op;
6047 return;
6048 }
6049 if ((insn & 0x0f100000) == 0x04000000) {
6050 /* NEON load/store. */
6051 if (!arm_feature(env, ARM_FEATURE_NEON))
6052 goto illegal_op;
6053
6054 if (disas_neon_ls_insn(env, s, insn))
6055 goto illegal_op;
6056 return;
6057 }
6058 if ((insn & 0x0d70f000) == 0x0550f000)
6059 return; /* PLD */
6060 else if ((insn & 0x0ffffdff) == 0x01010000) {
6061 ARCH(6);
6062 /* setend */
6063 if (insn & (1 << 9)) {
6064 /* BE8 mode not implemented. */
6065 goto illegal_op;
6066 }
6067 return;
6068 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6069 switch ((insn >> 4) & 0xf) {
6070 case 1: /* clrex */
6071 ARCH(6K);
426f5abc 6072 gen_clrex(s);
9ee6e8bb
PB
6073 return;
6074 case 4: /* dsb */
6075 case 5: /* dmb */
6076 case 6: /* isb */
6077 ARCH(7);
6078 /* We don't emulate caches so these are a no-op. */
6079 return;
6080 default:
6081 goto illegal_op;
6082 }
6083 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6084 /* srs */
c67b6b71 6085 int32_t offset;
9ee6e8bb
PB
6086 if (IS_USER(s))
6087 goto illegal_op;
6088 ARCH(6);
6089 op1 = (insn & 0x1f);
6090 if (op1 == (env->uncached_cpsr & CPSR_M)) {
b0109805 6091 addr = load_reg(s, 13);
9ee6e8bb 6092 } else {
b0109805 6093 addr = new_tmp();
b75263d6
JR
6094 tmp = tcg_const_i32(op1);
6095 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6096 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6097 }
6098 i = (insn >> 23) & 3;
6099 switch (i) {
6100 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6101 case 1: offset = 0; break; /* IA */
6102 case 2: offset = -8; break; /* DB */
9ee6e8bb
PB
6103 case 3: offset = 4; break; /* IB */
6104 default: abort();
6105 }
6106 if (offset)
b0109805
PB
6107 tcg_gen_addi_i32(addr, addr, offset);
6108 tmp = load_reg(s, 14);
6109 gen_st32(tmp, addr, 0);
c67b6b71 6110 tmp = load_cpu_field(spsr);
b0109805
PB
6111 tcg_gen_addi_i32(addr, addr, 4);
6112 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
6113 if (insn & (1 << 21)) {
6114 /* Base writeback. */
6115 switch (i) {
6116 case 0: offset = -8; break;
c67b6b71
FN
6117 case 1: offset = 4; break;
6118 case 2: offset = -4; break;
9ee6e8bb
PB
6119 case 3: offset = 0; break;
6120 default: abort();
6121 }
6122 if (offset)
c67b6b71 6123 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb 6124 if (op1 == (env->uncached_cpsr & CPSR_M)) {
c67b6b71 6125 store_reg(s, 13, addr);
9ee6e8bb 6126 } else {
b75263d6
JR
6127 tmp = tcg_const_i32(op1);
6128 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6129 tcg_temp_free_i32(tmp);
c67b6b71 6130 dead_tmp(addr);
9ee6e8bb 6131 }
b0109805
PB
6132 } else {
6133 dead_tmp(addr);
9ee6e8bb
PB
6134 }
6135 } else if ((insn & 0x0e5fffe0) == 0x081d0a00) {
6136 /* rfe */
c67b6b71 6137 int32_t offset;
9ee6e8bb
PB
6138 if (IS_USER(s))
6139 goto illegal_op;
6140 ARCH(6);
6141 rn = (insn >> 16) & 0xf;
b0109805 6142 addr = load_reg(s, rn);
9ee6e8bb
PB
6143 i = (insn >> 23) & 3;
6144 switch (i) {
b0109805 6145 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6146 case 1: offset = 0; break; /* IA */
6147 case 2: offset = -8; break; /* DB */
b0109805 6148 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
6149 default: abort();
6150 }
6151 if (offset)
b0109805
PB
6152 tcg_gen_addi_i32(addr, addr, offset);
6153 /* Load PC into tmp and CPSR into tmp2. */
6154 tmp = gen_ld32(addr, 0);
6155 tcg_gen_addi_i32(addr, addr, 4);
6156 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
6157 if (insn & (1 << 21)) {
6158 /* Base writeback. */
6159 switch (i) {
b0109805 6160 case 0: offset = -8; break;
c67b6b71
FN
6161 case 1: offset = 4; break;
6162 case 2: offset = -4; break;
b0109805 6163 case 3: offset = 0; break;
9ee6e8bb
PB
6164 default: abort();
6165 }
6166 if (offset)
b0109805
PB
6167 tcg_gen_addi_i32(addr, addr, offset);
6168 store_reg(s, rn, addr);
6169 } else {
6170 dead_tmp(addr);
9ee6e8bb 6171 }
b0109805 6172 gen_rfe(s, tmp, tmp2);
c67b6b71 6173 return;
9ee6e8bb
PB
6174 } else if ((insn & 0x0e000000) == 0x0a000000) {
6175 /* branch link and change to thumb (blx <offset>) */
6176 int32_t offset;
6177
6178 val = (uint32_t)s->pc;
d9ba4830
PB
6179 tmp = new_tmp();
6180 tcg_gen_movi_i32(tmp, val);
6181 store_reg(s, 14, tmp);
9ee6e8bb
PB
6182 /* Sign-extend the 24-bit offset */
6183 offset = (((int32_t)insn) << 8) >> 8;
6184 /* offset * 4 + bit24 * 2 + (thumb bit) */
6185 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6186 /* pipeline offset */
6187 val += 4;
d9ba4830 6188 gen_bx_im(s, val);
9ee6e8bb
PB
6189 return;
6190 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6191 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6192 /* iWMMXt register transfer. */
6193 if (env->cp15.c15_cpar & (1 << 1))
6194 if (!disas_iwmmxt_insn(env, s, insn))
6195 return;
6196 }
6197 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6198 /* Coprocessor double register transfer. */
6199 } else if ((insn & 0x0f000010) == 0x0e000010) {
6200 /* Additional coprocessor register transfer. */
7997d92f 6201 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
6202 uint32_t mask;
6203 uint32_t val;
6204 /* cps (privileged) */
6205 if (IS_USER(s))
6206 return;
6207 mask = val = 0;
6208 if (insn & (1 << 19)) {
6209 if (insn & (1 << 8))
6210 mask |= CPSR_A;
6211 if (insn & (1 << 7))
6212 mask |= CPSR_I;
6213 if (insn & (1 << 6))
6214 mask |= CPSR_F;
6215 if (insn & (1 << 18))
6216 val |= mask;
6217 }
7997d92f 6218 if (insn & (1 << 17)) {
9ee6e8bb
PB
6219 mask |= CPSR_M;
6220 val |= (insn & 0x1f);
6221 }
6222 if (mask) {
2fbac54b 6223 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
6224 }
6225 return;
6226 }
6227 goto illegal_op;
6228 }
6229 if (cond != 0xe) {
6230 /* if not always execute, we generate a conditional jump to
6231 next instruction */
6232 s->condlabel = gen_new_label();
d9ba4830 6233 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
6234 s->condjmp = 1;
6235 }
6236 if ((insn & 0x0f900000) == 0x03000000) {
6237 if ((insn & (1 << 21)) == 0) {
6238 ARCH(6T2);
6239 rd = (insn >> 12) & 0xf;
6240 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6241 if ((insn & (1 << 22)) == 0) {
6242 /* MOVW */
5e3f878a
PB
6243 tmp = new_tmp();
6244 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
6245 } else {
6246 /* MOVT */
5e3f878a 6247 tmp = load_reg(s, rd);
86831435 6248 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 6249 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 6250 }
5e3f878a 6251 store_reg(s, rd, tmp);
9ee6e8bb
PB
6252 } else {
6253 if (((insn >> 12) & 0xf) != 0xf)
6254 goto illegal_op;
6255 if (((insn >> 16) & 0xf) == 0) {
6256 gen_nop_hint(s, insn & 0xff);
6257 } else {
6258 /* CPSR = immediate */
6259 val = insn & 0xff;
6260 shift = ((insn >> 8) & 0xf) * 2;
6261 if (shift)
6262 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 6263 i = ((insn & (1 << 22)) != 0);
2fbac54b 6264 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
6265 goto illegal_op;
6266 }
6267 }
6268 } else if ((insn & 0x0f900000) == 0x01000000
6269 && (insn & 0x00000090) != 0x00000090) {
6270 /* miscellaneous instructions */
6271 op1 = (insn >> 21) & 3;
6272 sh = (insn >> 4) & 0xf;
6273 rm = insn & 0xf;
6274 switch (sh) {
6275 case 0x0: /* move program status register */
6276 if (op1 & 1) {
6277 /* PSR = reg */
2fbac54b 6278 tmp = load_reg(s, rm);
9ee6e8bb 6279 i = ((op1 & 2) != 0);
2fbac54b 6280 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
6281 goto illegal_op;
6282 } else {
6283 /* reg = PSR */
6284 rd = (insn >> 12) & 0xf;
6285 if (op1 & 2) {
6286 if (IS_USER(s))
6287 goto illegal_op;
d9ba4830 6288 tmp = load_cpu_field(spsr);
9ee6e8bb 6289 } else {
d9ba4830
PB
6290 tmp = new_tmp();
6291 gen_helper_cpsr_read(tmp);
9ee6e8bb 6292 }
d9ba4830 6293 store_reg(s, rd, tmp);
9ee6e8bb
PB
6294 }
6295 break;
6296 case 0x1:
6297 if (op1 == 1) {
6298 /* branch/exchange thumb (bx). */
d9ba4830
PB
6299 tmp = load_reg(s, rm);
6300 gen_bx(s, tmp);
9ee6e8bb
PB
6301 } else if (op1 == 3) {
6302 /* clz */
6303 rd = (insn >> 12) & 0xf;
1497c961
PB
6304 tmp = load_reg(s, rm);
6305 gen_helper_clz(tmp, tmp);
6306 store_reg(s, rd, tmp);
9ee6e8bb
PB
6307 } else {
6308 goto illegal_op;
6309 }
6310 break;
6311 case 0x2:
6312 if (op1 == 1) {
6313 ARCH(5J); /* bxj */
6314 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6315 tmp = load_reg(s, rm);
6316 gen_bx(s, tmp);
9ee6e8bb
PB
6317 } else {
6318 goto illegal_op;
6319 }
6320 break;
6321 case 0x3:
6322 if (op1 != 1)
6323 goto illegal_op;
6324
6325 /* branch link/exchange thumb (blx) */
d9ba4830
PB
6326 tmp = load_reg(s, rm);
6327 tmp2 = new_tmp();
6328 tcg_gen_movi_i32(tmp2, s->pc);
6329 store_reg(s, 14, tmp2);
6330 gen_bx(s, tmp);
9ee6e8bb
PB
6331 break;
6332 case 0x5: /* saturating add/subtract */
6333 rd = (insn >> 12) & 0xf;
6334 rn = (insn >> 16) & 0xf;
b40d0353 6335 tmp = load_reg(s, rm);
5e3f878a 6336 tmp2 = load_reg(s, rn);
9ee6e8bb 6337 if (op1 & 2)
5e3f878a 6338 gen_helper_double_saturate(tmp2, tmp2);
9ee6e8bb 6339 if (op1 & 1)
5e3f878a 6340 gen_helper_sub_saturate(tmp, tmp, tmp2);
9ee6e8bb 6341 else
5e3f878a
PB
6342 gen_helper_add_saturate(tmp, tmp, tmp2);
6343 dead_tmp(tmp2);
6344 store_reg(s, rd, tmp);
9ee6e8bb
PB
6345 break;
6346 case 7: /* bkpt */
6347 gen_set_condexec(s);
5e3f878a 6348 gen_set_pc_im(s->pc - 4);
d9ba4830 6349 gen_exception(EXCP_BKPT);
9ee6e8bb
PB
6350 s->is_jmp = DISAS_JUMP;
6351 break;
6352 case 0x8: /* signed multiply */
6353 case 0xa:
6354 case 0xc:
6355 case 0xe:
6356 rs = (insn >> 8) & 0xf;
6357 rn = (insn >> 12) & 0xf;
6358 rd = (insn >> 16) & 0xf;
6359 if (op1 == 1) {
6360 /* (32 * 16) >> 16 */
5e3f878a
PB
6361 tmp = load_reg(s, rm);
6362 tmp2 = load_reg(s, rs);
9ee6e8bb 6363 if (sh & 4)
5e3f878a 6364 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 6365 else
5e3f878a 6366 gen_sxth(tmp2);
a7812ae4
PB
6367 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6368 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 6369 tmp = new_tmp();
a7812ae4 6370 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 6371 tcg_temp_free_i64(tmp64);
9ee6e8bb 6372 if ((sh & 2) == 0) {
5e3f878a
PB
6373 tmp2 = load_reg(s, rn);
6374 gen_helper_add_setq(tmp, tmp, tmp2);
6375 dead_tmp(tmp2);
9ee6e8bb 6376 }
5e3f878a 6377 store_reg(s, rd, tmp);
9ee6e8bb
PB
6378 } else {
6379 /* 16 * 16 */
5e3f878a
PB
6380 tmp = load_reg(s, rm);
6381 tmp2 = load_reg(s, rs);
6382 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6383 dead_tmp(tmp2);
9ee6e8bb 6384 if (op1 == 2) {
a7812ae4
PB
6385 tmp64 = tcg_temp_new_i64();
6386 tcg_gen_ext_i32_i64(tmp64, tmp);
22478e79 6387 dead_tmp(tmp);
a7812ae4
PB
6388 gen_addq(s, tmp64, rn, rd);
6389 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 6390 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
6391 } else {
6392 if (op1 == 0) {
5e3f878a
PB
6393 tmp2 = load_reg(s, rn);
6394 gen_helper_add_setq(tmp, tmp, tmp2);
6395 dead_tmp(tmp2);
9ee6e8bb 6396 }
5e3f878a 6397 store_reg(s, rd, tmp);
9ee6e8bb
PB
6398 }
6399 }
6400 break;
6401 default:
6402 goto illegal_op;
6403 }
6404 } else if (((insn & 0x0e000000) == 0 &&
6405 (insn & 0x00000090) != 0x90) ||
6406 ((insn & 0x0e000000) == (1 << 25))) {
6407 int set_cc, logic_cc, shiftop;
6408
6409 op1 = (insn >> 21) & 0xf;
6410 set_cc = (insn >> 20) & 1;
6411 logic_cc = table_logic_cc[op1] & set_cc;
6412
6413 /* data processing instruction */
6414 if (insn & (1 << 25)) {
6415 /* immediate operand */
6416 val = insn & 0xff;
6417 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 6418 if (shift) {
9ee6e8bb 6419 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9
JR
6420 }
6421 tmp2 = new_tmp();
6422 tcg_gen_movi_i32(tmp2, val);
6423 if (logic_cc && shift) {
6424 gen_set_CF_bit31(tmp2);
6425 }
9ee6e8bb
PB
6426 } else {
6427 /* register */
6428 rm = (insn) & 0xf;
e9bb4aa9 6429 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6430 shiftop = (insn >> 5) & 3;
6431 if (!(insn & (1 << 4))) {
6432 shift = (insn >> 7) & 0x1f;
e9bb4aa9 6433 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
6434 } else {
6435 rs = (insn >> 8) & 0xf;
8984bd2e 6436 tmp = load_reg(s, rs);
e9bb4aa9 6437 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
6438 }
6439 }
6440 if (op1 != 0x0f && op1 != 0x0d) {
6441 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
6442 tmp = load_reg(s, rn);
6443 } else {
6444 TCGV_UNUSED(tmp);
9ee6e8bb
PB
6445 }
6446 rd = (insn >> 12) & 0xf;
6447 switch(op1) {
6448 case 0x00:
e9bb4aa9
JR
6449 tcg_gen_and_i32(tmp, tmp, tmp2);
6450 if (logic_cc) {
6451 gen_logic_CC(tmp);
6452 }
21aeb343 6453 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6454 break;
6455 case 0x01:
e9bb4aa9
JR
6456 tcg_gen_xor_i32(tmp, tmp, tmp2);
6457 if (logic_cc) {
6458 gen_logic_CC(tmp);
6459 }
21aeb343 6460 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6461 break;
6462 case 0x02:
6463 if (set_cc && rd == 15) {
6464 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 6465 if (IS_USER(s)) {
9ee6e8bb 6466 goto illegal_op;
e9bb4aa9
JR
6467 }
6468 gen_helper_sub_cc(tmp, tmp, tmp2);
6469 gen_exception_return(s, tmp);
9ee6e8bb 6470 } else {
e9bb4aa9
JR
6471 if (set_cc) {
6472 gen_helper_sub_cc(tmp, tmp, tmp2);
6473 } else {
6474 tcg_gen_sub_i32(tmp, tmp, tmp2);
6475 }
21aeb343 6476 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6477 }
6478 break;
6479 case 0x03:
e9bb4aa9
JR
6480 if (set_cc) {
6481 gen_helper_sub_cc(tmp, tmp2, tmp);
6482 } else {
6483 tcg_gen_sub_i32(tmp, tmp2, tmp);
6484 }
21aeb343 6485 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6486 break;
6487 case 0x04:
e9bb4aa9
JR
6488 if (set_cc) {
6489 gen_helper_add_cc(tmp, tmp, tmp2);
6490 } else {
6491 tcg_gen_add_i32(tmp, tmp, tmp2);
6492 }
21aeb343 6493 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6494 break;
6495 case 0x05:
e9bb4aa9
JR
6496 if (set_cc) {
6497 gen_helper_adc_cc(tmp, tmp, tmp2);
6498 } else {
6499 gen_add_carry(tmp, tmp, tmp2);
6500 }
21aeb343 6501 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6502 break;
6503 case 0x06:
e9bb4aa9
JR
6504 if (set_cc) {
6505 gen_helper_sbc_cc(tmp, tmp, tmp2);
6506 } else {
6507 gen_sub_carry(tmp, tmp, tmp2);
6508 }
21aeb343 6509 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6510 break;
6511 case 0x07:
e9bb4aa9
JR
6512 if (set_cc) {
6513 gen_helper_sbc_cc(tmp, tmp2, tmp);
6514 } else {
6515 gen_sub_carry(tmp, tmp2, tmp);
6516 }
21aeb343 6517 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6518 break;
6519 case 0x08:
6520 if (set_cc) {
e9bb4aa9
JR
6521 tcg_gen_and_i32(tmp, tmp, tmp2);
6522 gen_logic_CC(tmp);
9ee6e8bb 6523 }
e9bb4aa9 6524 dead_tmp(tmp);
9ee6e8bb
PB
6525 break;
6526 case 0x09:
6527 if (set_cc) {
e9bb4aa9
JR
6528 tcg_gen_xor_i32(tmp, tmp, tmp2);
6529 gen_logic_CC(tmp);
9ee6e8bb 6530 }
e9bb4aa9 6531 dead_tmp(tmp);
9ee6e8bb
PB
6532 break;
6533 case 0x0a:
6534 if (set_cc) {
e9bb4aa9 6535 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb 6536 }
e9bb4aa9 6537 dead_tmp(tmp);
9ee6e8bb
PB
6538 break;
6539 case 0x0b:
6540 if (set_cc) {
e9bb4aa9 6541 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 6542 }
e9bb4aa9 6543 dead_tmp(tmp);
9ee6e8bb
PB
6544 break;
6545 case 0x0c:
e9bb4aa9
JR
6546 tcg_gen_or_i32(tmp, tmp, tmp2);
6547 if (logic_cc) {
6548 gen_logic_CC(tmp);
6549 }
21aeb343 6550 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6551 break;
6552 case 0x0d:
6553 if (logic_cc && rd == 15) {
6554 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 6555 if (IS_USER(s)) {
9ee6e8bb 6556 goto illegal_op;
e9bb4aa9
JR
6557 }
6558 gen_exception_return(s, tmp2);
9ee6e8bb 6559 } else {
e9bb4aa9
JR
6560 if (logic_cc) {
6561 gen_logic_CC(tmp2);
6562 }
21aeb343 6563 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6564 }
6565 break;
6566 case 0x0e:
f669df27 6567 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
6568 if (logic_cc) {
6569 gen_logic_CC(tmp);
6570 }
21aeb343 6571 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6572 break;
6573 default:
6574 case 0x0f:
e9bb4aa9
JR
6575 tcg_gen_not_i32(tmp2, tmp2);
6576 if (logic_cc) {
6577 gen_logic_CC(tmp2);
6578 }
21aeb343 6579 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6580 break;
6581 }
e9bb4aa9
JR
6582 if (op1 != 0x0f && op1 != 0x0d) {
6583 dead_tmp(tmp2);
6584 }
9ee6e8bb
PB
6585 } else {
6586 /* other instructions */
6587 op1 = (insn >> 24) & 0xf;
6588 switch(op1) {
6589 case 0x0:
6590 case 0x1:
6591 /* multiplies, extra load/stores */
6592 sh = (insn >> 5) & 3;
6593 if (sh == 0) {
6594 if (op1 == 0x0) {
6595 rd = (insn >> 16) & 0xf;
6596 rn = (insn >> 12) & 0xf;
6597 rs = (insn >> 8) & 0xf;
6598 rm = (insn) & 0xf;
6599 op1 = (insn >> 20) & 0xf;
6600 switch (op1) {
6601 case 0: case 1: case 2: case 3: case 6:
6602 /* 32 bit mul */
5e3f878a
PB
6603 tmp = load_reg(s, rs);
6604 tmp2 = load_reg(s, rm);
6605 tcg_gen_mul_i32(tmp, tmp, tmp2);
6606 dead_tmp(tmp2);
9ee6e8bb
PB
6607 if (insn & (1 << 22)) {
6608 /* Subtract (mls) */
6609 ARCH(6T2);
5e3f878a
PB
6610 tmp2 = load_reg(s, rn);
6611 tcg_gen_sub_i32(tmp, tmp2, tmp);
6612 dead_tmp(tmp2);
9ee6e8bb
PB
6613 } else if (insn & (1 << 21)) {
6614 /* Add */
5e3f878a
PB
6615 tmp2 = load_reg(s, rn);
6616 tcg_gen_add_i32(tmp, tmp, tmp2);
6617 dead_tmp(tmp2);
9ee6e8bb
PB
6618 }
6619 if (insn & (1 << 20))
5e3f878a
PB
6620 gen_logic_CC(tmp);
6621 store_reg(s, rd, tmp);
9ee6e8bb
PB
6622 break;
6623 default:
6624 /* 64 bit mul */
5e3f878a
PB
6625 tmp = load_reg(s, rs);
6626 tmp2 = load_reg(s, rm);
9ee6e8bb 6627 if (insn & (1 << 22))
a7812ae4 6628 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6629 else
a7812ae4 6630 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
9ee6e8bb 6631 if (insn & (1 << 21)) /* mult accumulate */
a7812ae4 6632 gen_addq(s, tmp64, rn, rd);
9ee6e8bb
PB
6633 if (!(insn & (1 << 23))) { /* double accumulate */
6634 ARCH(6);
a7812ae4
PB
6635 gen_addq_lo(s, tmp64, rn);
6636 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
6637 }
6638 if (insn & (1 << 20))
a7812ae4
PB
6639 gen_logicq_cc(tmp64);
6640 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 6641 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
6642 break;
6643 }
6644 } else {
6645 rn = (insn >> 16) & 0xf;
6646 rd = (insn >> 12) & 0xf;
6647 if (insn & (1 << 23)) {
6648 /* load/store exclusive */
86753403
PB
6649 op1 = (insn >> 21) & 0x3;
6650 if (op1)
a47f43d2 6651 ARCH(6K);
86753403
PB
6652 else
6653 ARCH(6);
3174f8e9 6654 addr = tcg_temp_local_new_i32();
98a46317 6655 load_reg_var(s, addr, rn);
9ee6e8bb 6656 if (insn & (1 << 20)) {
86753403
PB
6657 switch (op1) {
6658 case 0: /* ldrex */
426f5abc 6659 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
6660 break;
6661 case 1: /* ldrexd */
426f5abc 6662 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
6663 break;
6664 case 2: /* ldrexb */
426f5abc 6665 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
6666 break;
6667 case 3: /* ldrexh */
426f5abc 6668 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
6669 break;
6670 default:
6671 abort();
6672 }
9ee6e8bb
PB
6673 } else {
6674 rm = insn & 0xf;
86753403
PB
6675 switch (op1) {
6676 case 0: /* strex */
426f5abc 6677 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
6678 break;
6679 case 1: /* strexd */
502e64fe 6680 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
6681 break;
6682 case 2: /* strexb */
426f5abc 6683 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
6684 break;
6685 case 3: /* strexh */
426f5abc 6686 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
6687 break;
6688 default:
6689 abort();
6690 }
9ee6e8bb 6691 }
3174f8e9 6692 tcg_temp_free(addr);
9ee6e8bb
PB
6693 } else {
6694 /* SWP instruction */
6695 rm = (insn) & 0xf;
6696
8984bd2e
PB
6697 /* ??? This is not really atomic. However we know
6698 we never have multiple CPUs running in parallel,
6699 so it is good enough. */
6700 addr = load_reg(s, rn);
6701 tmp = load_reg(s, rm);
9ee6e8bb 6702 if (insn & (1 << 22)) {
8984bd2e
PB
6703 tmp2 = gen_ld8u(addr, IS_USER(s));
6704 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 6705 } else {
8984bd2e
PB
6706 tmp2 = gen_ld32(addr, IS_USER(s));
6707 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 6708 }
8984bd2e
PB
6709 dead_tmp(addr);
6710 store_reg(s, rd, tmp2);
9ee6e8bb
PB
6711 }
6712 }
6713 } else {
6714 int address_offset;
6715 int load;
6716 /* Misc load/store */
6717 rn = (insn >> 16) & 0xf;
6718 rd = (insn >> 12) & 0xf;
b0109805 6719 addr = load_reg(s, rn);
9ee6e8bb 6720 if (insn & (1 << 24))
b0109805 6721 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
6722 address_offset = 0;
6723 if (insn & (1 << 20)) {
6724 /* load */
6725 switch(sh) {
6726 case 1:
b0109805 6727 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
6728 break;
6729 case 2:
b0109805 6730 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
6731 break;
6732 default:
6733 case 3:
b0109805 6734 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
6735 break;
6736 }
6737 load = 1;
6738 } else if (sh & 2) {
6739 /* doubleword */
6740 if (sh & 1) {
6741 /* store */
b0109805
PB
6742 tmp = load_reg(s, rd);
6743 gen_st32(tmp, addr, IS_USER(s));
6744 tcg_gen_addi_i32(addr, addr, 4);
6745 tmp = load_reg(s, rd + 1);
6746 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6747 load = 0;
6748 } else {
6749 /* load */
b0109805
PB
6750 tmp = gen_ld32(addr, IS_USER(s));
6751 store_reg(s, rd, tmp);
6752 tcg_gen_addi_i32(addr, addr, 4);
6753 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
6754 rd++;
6755 load = 1;
6756 }
6757 address_offset = -4;
6758 } else {
6759 /* store */
b0109805
PB
6760 tmp = load_reg(s, rd);
6761 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6762 load = 0;
6763 }
6764 /* Perform base writeback before the loaded value to
6765 ensure correct behavior with overlapping index registers.
6766 ldrd with base writeback is is undefined if the
6767 destination and index registers overlap. */
6768 if (!(insn & (1 << 24))) {
b0109805
PB
6769 gen_add_datah_offset(s, insn, address_offset, addr);
6770 store_reg(s, rn, addr);
9ee6e8bb
PB
6771 } else if (insn & (1 << 21)) {
6772 if (address_offset)
b0109805
PB
6773 tcg_gen_addi_i32(addr, addr, address_offset);
6774 store_reg(s, rn, addr);
6775 } else {
6776 dead_tmp(addr);
9ee6e8bb
PB
6777 }
6778 if (load) {
6779 /* Complete the load. */
b0109805 6780 store_reg(s, rd, tmp);
9ee6e8bb
PB
6781 }
6782 }
6783 break;
6784 case 0x4:
6785 case 0x5:
6786 goto do_ldst;
6787 case 0x6:
6788 case 0x7:
6789 if (insn & (1 << 4)) {
6790 ARCH(6);
6791 /* Armv6 Media instructions. */
6792 rm = insn & 0xf;
6793 rn = (insn >> 16) & 0xf;
2c0262af 6794 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
6795 rs = (insn >> 8) & 0xf;
6796 switch ((insn >> 23) & 3) {
6797 case 0: /* Parallel add/subtract. */
6798 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
6799 tmp = load_reg(s, rn);
6800 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6801 sh = (insn >> 5) & 7;
6802 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6803 goto illegal_op;
6ddbc6e4
PB
6804 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6805 dead_tmp(tmp2);
6806 store_reg(s, rd, tmp);
9ee6e8bb
PB
6807 break;
6808 case 1:
6809 if ((insn & 0x00700020) == 0) {
6c95676b 6810 /* Halfword pack. */
3670669c
PB
6811 tmp = load_reg(s, rn);
6812 tmp2 = load_reg(s, rm);
9ee6e8bb 6813 shift = (insn >> 7) & 0x1f;
3670669c
PB
6814 if (insn & (1 << 6)) {
6815 /* pkhtb */
22478e79
AZ
6816 if (shift == 0)
6817 shift = 31;
6818 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 6819 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 6820 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
6821 } else {
6822 /* pkhbt */
22478e79
AZ
6823 if (shift)
6824 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 6825 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
6826 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6827 }
6828 tcg_gen_or_i32(tmp, tmp, tmp2);
22478e79 6829 dead_tmp(tmp2);
3670669c 6830 store_reg(s, rd, tmp);
9ee6e8bb
PB
6831 } else if ((insn & 0x00200020) == 0x00200000) {
6832 /* [us]sat */
6ddbc6e4 6833 tmp = load_reg(s, rm);
9ee6e8bb
PB
6834 shift = (insn >> 7) & 0x1f;
6835 if (insn & (1 << 6)) {
6836 if (shift == 0)
6837 shift = 31;
6ddbc6e4 6838 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 6839 } else {
6ddbc6e4 6840 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
6841 }
6842 sh = (insn >> 16) & 0x1f;
6843 if (sh != 0) {
b75263d6 6844 tmp2 = tcg_const_i32(sh);
9ee6e8bb 6845 if (insn & (1 << 22))
b75263d6 6846 gen_helper_usat(tmp, tmp, tmp2);
9ee6e8bb 6847 else
b75263d6
JR
6848 gen_helper_ssat(tmp, tmp, tmp2);
6849 tcg_temp_free_i32(tmp2);
9ee6e8bb 6850 }
6ddbc6e4 6851 store_reg(s, rd, tmp);
9ee6e8bb
PB
6852 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6853 /* [us]sat16 */
6ddbc6e4 6854 tmp = load_reg(s, rm);
9ee6e8bb
PB
6855 sh = (insn >> 16) & 0x1f;
6856 if (sh != 0) {
b75263d6 6857 tmp2 = tcg_const_i32(sh);
9ee6e8bb 6858 if (insn & (1 << 22))
b75263d6 6859 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 6860 else
b75263d6
JR
6861 gen_helper_ssat16(tmp, tmp, tmp2);
6862 tcg_temp_free_i32(tmp2);
9ee6e8bb 6863 }
6ddbc6e4 6864 store_reg(s, rd, tmp);
9ee6e8bb
PB
6865 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6866 /* Select bytes. */
6ddbc6e4
PB
6867 tmp = load_reg(s, rn);
6868 tmp2 = load_reg(s, rm);
6869 tmp3 = new_tmp();
6870 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6871 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6872 dead_tmp(tmp3);
6873 dead_tmp(tmp2);
6874 store_reg(s, rd, tmp);
9ee6e8bb 6875 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 6876 tmp = load_reg(s, rm);
9ee6e8bb
PB
6877 shift = (insn >> 10) & 3;
6878 /* ??? In many cases it's not neccessary to do a
6879 rotate, a shift is sufficient. */
6880 if (shift != 0)
f669df27 6881 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
6882 op1 = (insn >> 20) & 7;
6883 switch (op1) {
5e3f878a
PB
6884 case 0: gen_sxtb16(tmp); break;
6885 case 2: gen_sxtb(tmp); break;
6886 case 3: gen_sxth(tmp); break;
6887 case 4: gen_uxtb16(tmp); break;
6888 case 6: gen_uxtb(tmp); break;
6889 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
6890 default: goto illegal_op;
6891 }
6892 if (rn != 15) {
5e3f878a 6893 tmp2 = load_reg(s, rn);
9ee6e8bb 6894 if ((op1 & 3) == 0) {
5e3f878a 6895 gen_add16(tmp, tmp2);
9ee6e8bb 6896 } else {
5e3f878a
PB
6897 tcg_gen_add_i32(tmp, tmp, tmp2);
6898 dead_tmp(tmp2);
9ee6e8bb
PB
6899 }
6900 }
6c95676b 6901 store_reg(s, rd, tmp);
9ee6e8bb
PB
6902 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6903 /* rev */
b0109805 6904 tmp = load_reg(s, rm);
9ee6e8bb
PB
6905 if (insn & (1 << 22)) {
6906 if (insn & (1 << 7)) {
b0109805 6907 gen_revsh(tmp);
9ee6e8bb
PB
6908 } else {
6909 ARCH(6T2);
b0109805 6910 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
6911 }
6912 } else {
6913 if (insn & (1 << 7))
b0109805 6914 gen_rev16(tmp);
9ee6e8bb 6915 else
66896cb8 6916 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 6917 }
b0109805 6918 store_reg(s, rd, tmp);
9ee6e8bb
PB
6919 } else {
6920 goto illegal_op;
6921 }
6922 break;
6923 case 2: /* Multiplies (Type 3). */
5e3f878a
PB
6924 tmp = load_reg(s, rm);
6925 tmp2 = load_reg(s, rs);
9ee6e8bb
PB
6926 if (insn & (1 << 20)) {
6927 /* Signed multiply most significant [accumulate]. */
a7812ae4 6928 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6929 if (insn & (1 << 5))
a7812ae4
PB
6930 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
6931 tcg_gen_shri_i64(tmp64, tmp64, 32);
5e3f878a 6932 tmp = new_tmp();
a7812ae4 6933 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 6934 tcg_temp_free_i64(tmp64);
955a7dd5
AZ
6935 if (rd != 15) {
6936 tmp2 = load_reg(s, rd);
9ee6e8bb 6937 if (insn & (1 << 6)) {
5e3f878a 6938 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 6939 } else {
5e3f878a 6940 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 6941 }
5e3f878a 6942 dead_tmp(tmp2);
9ee6e8bb 6943 }
955a7dd5 6944 store_reg(s, rn, tmp);
9ee6e8bb
PB
6945 } else {
6946 if (insn & (1 << 5))
5e3f878a
PB
6947 gen_swap_half(tmp2);
6948 gen_smul_dual(tmp, tmp2);
6949 /* This addition cannot overflow. */
6950 if (insn & (1 << 6)) {
6951 tcg_gen_sub_i32(tmp, tmp, tmp2);
6952 } else {
6953 tcg_gen_add_i32(tmp, tmp, tmp2);
6954 }
6955 dead_tmp(tmp2);
9ee6e8bb 6956 if (insn & (1 << 22)) {
5e3f878a 6957 /* smlald, smlsld */
a7812ae4
PB
6958 tmp64 = tcg_temp_new_i64();
6959 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 6960 dead_tmp(tmp);
a7812ae4
PB
6961 gen_addq(s, tmp64, rd, rn);
6962 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 6963 tcg_temp_free_i64(tmp64);
9ee6e8bb 6964 } else {
5e3f878a 6965 /* smuad, smusd, smlad, smlsd */
22478e79 6966 if (rd != 15)
9ee6e8bb 6967 {
22478e79 6968 tmp2 = load_reg(s, rd);
5e3f878a
PB
6969 gen_helper_add_setq(tmp, tmp, tmp2);
6970 dead_tmp(tmp2);
9ee6e8bb 6971 }
22478e79 6972 store_reg(s, rn, tmp);
9ee6e8bb
PB
6973 }
6974 }
6975 break;
6976 case 3:
6977 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
6978 switch (op1) {
6979 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
6980 ARCH(6);
6981 tmp = load_reg(s, rm);
6982 tmp2 = load_reg(s, rs);
6983 gen_helper_usad8(tmp, tmp, tmp2);
6984 dead_tmp(tmp2);
ded9d295
AZ
6985 if (rd != 15) {
6986 tmp2 = load_reg(s, rd);
6ddbc6e4
PB
6987 tcg_gen_add_i32(tmp, tmp, tmp2);
6988 dead_tmp(tmp2);
9ee6e8bb 6989 }
ded9d295 6990 store_reg(s, rn, tmp);
9ee6e8bb
PB
6991 break;
6992 case 0x20: case 0x24: case 0x28: case 0x2c:
6993 /* Bitfield insert/clear. */
6994 ARCH(6T2);
6995 shift = (insn >> 7) & 0x1f;
6996 i = (insn >> 16) & 0x1f;
6997 i = i + 1 - shift;
6998 if (rm == 15) {
5e3f878a
PB
6999 tmp = new_tmp();
7000 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7001 } else {
5e3f878a 7002 tmp = load_reg(s, rm);
9ee6e8bb
PB
7003 }
7004 if (i != 32) {
5e3f878a 7005 tmp2 = load_reg(s, rd);
8f8e3aa4 7006 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
5e3f878a 7007 dead_tmp(tmp2);
9ee6e8bb 7008 }
5e3f878a 7009 store_reg(s, rd, tmp);
9ee6e8bb
PB
7010 break;
7011 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7012 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 7013 ARCH(6T2);
5e3f878a 7014 tmp = load_reg(s, rm);
9ee6e8bb
PB
7015 shift = (insn >> 7) & 0x1f;
7016 i = ((insn >> 16) & 0x1f) + 1;
7017 if (shift + i > 32)
7018 goto illegal_op;
7019 if (i < 32) {
7020 if (op1 & 0x20) {
5e3f878a 7021 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 7022 } else {
5e3f878a 7023 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
7024 }
7025 }
5e3f878a 7026 store_reg(s, rd, tmp);
9ee6e8bb
PB
7027 break;
7028 default:
7029 goto illegal_op;
7030 }
7031 break;
7032 }
7033 break;
7034 }
7035 do_ldst:
7036 /* Check for undefined extension instructions
7037 * per the ARM Bible IE:
7038 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7039 */
7040 sh = (0xf << 20) | (0xf << 4);
7041 if (op1 == 0x7 && ((insn & sh) == sh))
7042 {
7043 goto illegal_op;
7044 }
7045 /* load/store byte/word */
7046 rn = (insn >> 16) & 0xf;
7047 rd = (insn >> 12) & 0xf;
b0109805 7048 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
7049 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7050 if (insn & (1 << 24))
b0109805 7051 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
7052 if (insn & (1 << 20)) {
7053 /* load */
9ee6e8bb 7054 if (insn & (1 << 22)) {
b0109805 7055 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 7056 } else {
b0109805 7057 tmp = gen_ld32(tmp2, i);
9ee6e8bb 7058 }
9ee6e8bb
PB
7059 } else {
7060 /* store */
b0109805 7061 tmp = load_reg(s, rd);
9ee6e8bb 7062 if (insn & (1 << 22))
b0109805 7063 gen_st8(tmp, tmp2, i);
9ee6e8bb 7064 else
b0109805 7065 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
7066 }
7067 if (!(insn & (1 << 24))) {
b0109805
PB
7068 gen_add_data_offset(s, insn, tmp2);
7069 store_reg(s, rn, tmp2);
7070 } else if (insn & (1 << 21)) {
7071 store_reg(s, rn, tmp2);
7072 } else {
7073 dead_tmp(tmp2);
9ee6e8bb
PB
7074 }
7075 if (insn & (1 << 20)) {
7076 /* Complete the load. */
7077 if (rd == 15)
b0109805 7078 gen_bx(s, tmp);
9ee6e8bb 7079 else
b0109805 7080 store_reg(s, rd, tmp);
9ee6e8bb
PB
7081 }
7082 break;
7083 case 0x08:
7084 case 0x09:
7085 {
7086 int j, n, user, loaded_base;
b0109805 7087 TCGv loaded_var;
9ee6e8bb
PB
7088 /* load/store multiple words */
7089 /* XXX: store correct base if write back */
7090 user = 0;
7091 if (insn & (1 << 22)) {
7092 if (IS_USER(s))
7093 goto illegal_op; /* only usable in supervisor mode */
7094
7095 if ((insn & (1 << 15)) == 0)
7096 user = 1;
7097 }
7098 rn = (insn >> 16) & 0xf;
b0109805 7099 addr = load_reg(s, rn);
9ee6e8bb
PB
7100
7101 /* compute total size */
7102 loaded_base = 0;
a50f5b91 7103 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
7104 n = 0;
7105 for(i=0;i<16;i++) {
7106 if (insn & (1 << i))
7107 n++;
7108 }
7109 /* XXX: test invalid n == 0 case ? */
7110 if (insn & (1 << 23)) {
7111 if (insn & (1 << 24)) {
7112 /* pre increment */
b0109805 7113 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7114 } else {
7115 /* post increment */
7116 }
7117 } else {
7118 if (insn & (1 << 24)) {
7119 /* pre decrement */
b0109805 7120 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7121 } else {
7122 /* post decrement */
7123 if (n != 1)
b0109805 7124 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7125 }
7126 }
7127 j = 0;
7128 for(i=0;i<16;i++) {
7129 if (insn & (1 << i)) {
7130 if (insn & (1 << 20)) {
7131 /* load */
b0109805 7132 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 7133 if (i == 15) {
b0109805 7134 gen_bx(s, tmp);
9ee6e8bb 7135 } else if (user) {
b75263d6
JR
7136 tmp2 = tcg_const_i32(i);
7137 gen_helper_set_user_reg(tmp2, tmp);
7138 tcg_temp_free_i32(tmp2);
b0109805 7139 dead_tmp(tmp);
9ee6e8bb 7140 } else if (i == rn) {
b0109805 7141 loaded_var = tmp;
9ee6e8bb
PB
7142 loaded_base = 1;
7143 } else {
b0109805 7144 store_reg(s, i, tmp);
9ee6e8bb
PB
7145 }
7146 } else {
7147 /* store */
7148 if (i == 15) {
7149 /* special case: r15 = PC + 8 */
7150 val = (long)s->pc + 4;
b0109805
PB
7151 tmp = new_tmp();
7152 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 7153 } else if (user) {
b0109805 7154 tmp = new_tmp();
b75263d6
JR
7155 tmp2 = tcg_const_i32(i);
7156 gen_helper_get_user_reg(tmp, tmp2);
7157 tcg_temp_free_i32(tmp2);
9ee6e8bb 7158 } else {
b0109805 7159 tmp = load_reg(s, i);
9ee6e8bb 7160 }
b0109805 7161 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7162 }
7163 j++;
7164 /* no need to add after the last transfer */
7165 if (j != n)
b0109805 7166 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7167 }
7168 }
7169 if (insn & (1 << 21)) {
7170 /* write back */
7171 if (insn & (1 << 23)) {
7172 if (insn & (1 << 24)) {
7173 /* pre increment */
7174 } else {
7175 /* post increment */
b0109805 7176 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7177 }
7178 } else {
7179 if (insn & (1 << 24)) {
7180 /* pre decrement */
7181 if (n != 1)
b0109805 7182 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7183 } else {
7184 /* post decrement */
b0109805 7185 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7186 }
7187 }
b0109805
PB
7188 store_reg(s, rn, addr);
7189 } else {
7190 dead_tmp(addr);
9ee6e8bb
PB
7191 }
7192 if (loaded_base) {
b0109805 7193 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
7194 }
7195 if ((insn & (1 << 22)) && !user) {
7196 /* Restore CPSR from SPSR. */
d9ba4830
PB
7197 tmp = load_cpu_field(spsr);
7198 gen_set_cpsr(tmp, 0xffffffff);
7199 dead_tmp(tmp);
9ee6e8bb
PB
7200 s->is_jmp = DISAS_UPDATE;
7201 }
7202 }
7203 break;
7204 case 0xa:
7205 case 0xb:
7206 {
7207 int32_t offset;
7208
7209 /* branch (and link) */
7210 val = (int32_t)s->pc;
7211 if (insn & (1 << 24)) {
5e3f878a
PB
7212 tmp = new_tmp();
7213 tcg_gen_movi_i32(tmp, val);
7214 store_reg(s, 14, tmp);
9ee6e8bb
PB
7215 }
7216 offset = (((int32_t)insn << 8) >> 8);
7217 val += (offset << 2) + 4;
7218 gen_jmp(s, val);
7219 }
7220 break;
7221 case 0xc:
7222 case 0xd:
7223 case 0xe:
7224 /* Coprocessor. */
7225 if (disas_coproc_insn(env, s, insn))
7226 goto illegal_op;
7227 break;
7228 case 0xf:
7229 /* swi */
5e3f878a 7230 gen_set_pc_im(s->pc);
9ee6e8bb
PB
7231 s->is_jmp = DISAS_SWI;
7232 break;
7233 default:
7234 illegal_op:
7235 gen_set_condexec(s);
5e3f878a 7236 gen_set_pc_im(s->pc - 4);
d9ba4830 7237 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
7238 s->is_jmp = DISAS_JUMP;
7239 break;
7240 }
7241 }
7242}
7243
7244/* Return true if this is a Thumb-2 logical op. */
7245static int
7246thumb2_logic_op(int op)
7247{
7248 return (op < 8);
7249}
7250
7251/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7252 then set condition code flags based on the result of the operation.
7253 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7254 to the high bit of T1.
7255 Returns zero if the opcode is valid. */
7256
7257static int
396e467c 7258gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
9ee6e8bb
PB
7259{
7260 int logic_cc;
7261
7262 logic_cc = 0;
7263 switch (op) {
7264 case 0: /* and */
396e467c 7265 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
7266 logic_cc = conds;
7267 break;
7268 case 1: /* bic */
f669df27 7269 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
7270 logic_cc = conds;
7271 break;
7272 case 2: /* orr */
396e467c 7273 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7274 logic_cc = conds;
7275 break;
7276 case 3: /* orn */
396e467c
FN
7277 tcg_gen_not_i32(t1, t1);
7278 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7279 logic_cc = conds;
7280 break;
7281 case 4: /* eor */
396e467c 7282 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
7283 logic_cc = conds;
7284 break;
7285 case 8: /* add */
7286 if (conds)
396e467c 7287 gen_helper_add_cc(t0, t0, t1);
9ee6e8bb 7288 else
396e467c 7289 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
7290 break;
7291 case 10: /* adc */
7292 if (conds)
396e467c 7293 gen_helper_adc_cc(t0, t0, t1);
9ee6e8bb 7294 else
396e467c 7295 gen_adc(t0, t1);
9ee6e8bb
PB
7296 break;
7297 case 11: /* sbc */
7298 if (conds)
396e467c 7299 gen_helper_sbc_cc(t0, t0, t1);
9ee6e8bb 7300 else
396e467c 7301 gen_sub_carry(t0, t0, t1);
9ee6e8bb
PB
7302 break;
7303 case 13: /* sub */
7304 if (conds)
396e467c 7305 gen_helper_sub_cc(t0, t0, t1);
9ee6e8bb 7306 else
396e467c 7307 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
7308 break;
7309 case 14: /* rsb */
7310 if (conds)
396e467c 7311 gen_helper_sub_cc(t0, t1, t0);
9ee6e8bb 7312 else
396e467c 7313 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
7314 break;
7315 default: /* 5, 6, 7, 9, 12, 15. */
7316 return 1;
7317 }
7318 if (logic_cc) {
396e467c 7319 gen_logic_CC(t0);
9ee6e8bb 7320 if (shifter_out)
396e467c 7321 gen_set_CF_bit31(t1);
9ee6e8bb
PB
7322 }
7323 return 0;
7324}
7325
7326/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7327 is not legal. */
7328static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7329{
b0109805 7330 uint32_t insn, imm, shift, offset;
9ee6e8bb 7331 uint32_t rd, rn, rm, rs;
b26eefb6 7332 TCGv tmp;
6ddbc6e4
PB
7333 TCGv tmp2;
7334 TCGv tmp3;
b0109805 7335 TCGv addr;
a7812ae4 7336 TCGv_i64 tmp64;
9ee6e8bb
PB
7337 int op;
7338 int shiftop;
7339 int conds;
7340 int logic_cc;
7341
7342 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7343 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 7344 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
7345 16-bit instructions to get correct prefetch abort behavior. */
7346 insn = insn_hw1;
7347 if ((insn & (1 << 12)) == 0) {
7348 /* Second half of blx. */
7349 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
7350 tmp = load_reg(s, 14);
7351 tcg_gen_addi_i32(tmp, tmp, offset);
7352 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 7353
d9ba4830 7354 tmp2 = new_tmp();
b0109805 7355 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7356 store_reg(s, 14, tmp2);
7357 gen_bx(s, tmp);
9ee6e8bb
PB
7358 return 0;
7359 }
7360 if (insn & (1 << 11)) {
7361 /* Second half of bl. */
7362 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 7363 tmp = load_reg(s, 14);
6a0d8a1d 7364 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 7365
d9ba4830 7366 tmp2 = new_tmp();
b0109805 7367 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7368 store_reg(s, 14, tmp2);
7369 gen_bx(s, tmp);
9ee6e8bb
PB
7370 return 0;
7371 }
7372 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7373 /* Instruction spans a page boundary. Implement it as two
7374 16-bit instructions in case the second half causes an
7375 prefetch abort. */
7376 offset = ((int32_t)insn << 21) >> 9;
396e467c 7377 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
7378 return 0;
7379 }
7380 /* Fall through to 32-bit decode. */
7381 }
7382
7383 insn = lduw_code(s->pc);
7384 s->pc += 2;
7385 insn |= (uint32_t)insn_hw1 << 16;
7386
7387 if ((insn & 0xf800e800) != 0xf000e800) {
7388 ARCH(6T2);
7389 }
7390
7391 rn = (insn >> 16) & 0xf;
7392 rs = (insn >> 12) & 0xf;
7393 rd = (insn >> 8) & 0xf;
7394 rm = insn & 0xf;
7395 switch ((insn >> 25) & 0xf) {
7396 case 0: case 1: case 2: case 3:
7397 /* 16-bit instructions. Should never happen. */
7398 abort();
7399 case 4:
7400 if (insn & (1 << 22)) {
7401 /* Other load/store, table branch. */
7402 if (insn & 0x01200000) {
7403 /* Load/store doubleword. */
7404 if (rn == 15) {
b0109805
PB
7405 addr = new_tmp();
7406 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 7407 } else {
b0109805 7408 addr = load_reg(s, rn);
9ee6e8bb
PB
7409 }
7410 offset = (insn & 0xff) * 4;
7411 if ((insn & (1 << 23)) == 0)
7412 offset = -offset;
7413 if (insn & (1 << 24)) {
b0109805 7414 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
7415 offset = 0;
7416 }
7417 if (insn & (1 << 20)) {
7418 /* ldrd */
b0109805
PB
7419 tmp = gen_ld32(addr, IS_USER(s));
7420 store_reg(s, rs, tmp);
7421 tcg_gen_addi_i32(addr, addr, 4);
7422 tmp = gen_ld32(addr, IS_USER(s));
7423 store_reg(s, rd, tmp);
9ee6e8bb
PB
7424 } else {
7425 /* strd */
b0109805
PB
7426 tmp = load_reg(s, rs);
7427 gen_st32(tmp, addr, IS_USER(s));
7428 tcg_gen_addi_i32(addr, addr, 4);
7429 tmp = load_reg(s, rd);
7430 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7431 }
7432 if (insn & (1 << 21)) {
7433 /* Base writeback. */
7434 if (rn == 15)
7435 goto illegal_op;
b0109805
PB
7436 tcg_gen_addi_i32(addr, addr, offset - 4);
7437 store_reg(s, rn, addr);
7438 } else {
7439 dead_tmp(addr);
9ee6e8bb
PB
7440 }
7441 } else if ((insn & (1 << 23)) == 0) {
7442 /* Load/store exclusive word. */
3174f8e9 7443 addr = tcg_temp_local_new();
98a46317 7444 load_reg_var(s, addr, rn);
426f5abc 7445 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 7446 if (insn & (1 << 20)) {
426f5abc 7447 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 7448 } else {
426f5abc 7449 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 7450 }
3174f8e9 7451 tcg_temp_free(addr);
9ee6e8bb
PB
7452 } else if ((insn & (1 << 6)) == 0) {
7453 /* Table Branch. */
7454 if (rn == 15) {
b0109805
PB
7455 addr = new_tmp();
7456 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 7457 } else {
b0109805 7458 addr = load_reg(s, rn);
9ee6e8bb 7459 }
b26eefb6 7460 tmp = load_reg(s, rm);
b0109805 7461 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
7462 if (insn & (1 << 4)) {
7463 /* tbh */
b0109805 7464 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 7465 dead_tmp(tmp);
b0109805 7466 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 7467 } else { /* tbb */
b26eefb6 7468 dead_tmp(tmp);
b0109805 7469 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7470 }
b0109805
PB
7471 dead_tmp(addr);
7472 tcg_gen_shli_i32(tmp, tmp, 1);
7473 tcg_gen_addi_i32(tmp, tmp, s->pc);
7474 store_reg(s, 15, tmp);
9ee6e8bb
PB
7475 } else {
7476 /* Load/store exclusive byte/halfword/doubleword. */
426f5abc 7477 ARCH(7);
9ee6e8bb 7478 op = (insn >> 4) & 0x3;
426f5abc
PB
7479 if (op == 2) {
7480 goto illegal_op;
7481 }
3174f8e9 7482 addr = tcg_temp_local_new();
98a46317 7483 load_reg_var(s, addr, rn);
9ee6e8bb 7484 if (insn & (1 << 20)) {
426f5abc 7485 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 7486 } else {
426f5abc 7487 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 7488 }
3174f8e9 7489 tcg_temp_free(addr);
9ee6e8bb
PB
7490 }
7491 } else {
7492 /* Load/store multiple, RFE, SRS. */
7493 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7494 /* Not available in user mode. */
b0109805 7495 if (IS_USER(s))
9ee6e8bb
PB
7496 goto illegal_op;
7497 if (insn & (1 << 20)) {
7498 /* rfe */
b0109805
PB
7499 addr = load_reg(s, rn);
7500 if ((insn & (1 << 24)) == 0)
7501 tcg_gen_addi_i32(addr, addr, -8);
7502 /* Load PC into tmp and CPSR into tmp2. */
7503 tmp = gen_ld32(addr, 0);
7504 tcg_gen_addi_i32(addr, addr, 4);
7505 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
7506 if (insn & (1 << 21)) {
7507 /* Base writeback. */
b0109805
PB
7508 if (insn & (1 << 24)) {
7509 tcg_gen_addi_i32(addr, addr, 4);
7510 } else {
7511 tcg_gen_addi_i32(addr, addr, -4);
7512 }
7513 store_reg(s, rn, addr);
7514 } else {
7515 dead_tmp(addr);
9ee6e8bb 7516 }
b0109805 7517 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
7518 } else {
7519 /* srs */
7520 op = (insn & 0x1f);
7521 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7522 addr = load_reg(s, 13);
9ee6e8bb 7523 } else {
b0109805 7524 addr = new_tmp();
b75263d6
JR
7525 tmp = tcg_const_i32(op);
7526 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7527 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7528 }
7529 if ((insn & (1 << 24)) == 0) {
b0109805 7530 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 7531 }
b0109805
PB
7532 tmp = load_reg(s, 14);
7533 gen_st32(tmp, addr, 0);
7534 tcg_gen_addi_i32(addr, addr, 4);
7535 tmp = new_tmp();
7536 gen_helper_cpsr_read(tmp);
7537 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
7538 if (insn & (1 << 21)) {
7539 if ((insn & (1 << 24)) == 0) {
b0109805 7540 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 7541 } else {
b0109805 7542 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7543 }
7544 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7545 store_reg(s, 13, addr);
9ee6e8bb 7546 } else {
b75263d6
JR
7547 tmp = tcg_const_i32(op);
7548 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7549 tcg_temp_free_i32(tmp);
9ee6e8bb 7550 }
b0109805
PB
7551 } else {
7552 dead_tmp(addr);
9ee6e8bb
PB
7553 }
7554 }
7555 } else {
7556 int i;
7557 /* Load/store multiple. */
b0109805 7558 addr = load_reg(s, rn);
9ee6e8bb
PB
7559 offset = 0;
7560 for (i = 0; i < 16; i++) {
7561 if (insn & (1 << i))
7562 offset += 4;
7563 }
7564 if (insn & (1 << 24)) {
b0109805 7565 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7566 }
7567
7568 for (i = 0; i < 16; i++) {
7569 if ((insn & (1 << i)) == 0)
7570 continue;
7571 if (insn & (1 << 20)) {
7572 /* Load. */
b0109805 7573 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 7574 if (i == 15) {
b0109805 7575 gen_bx(s, tmp);
9ee6e8bb 7576 } else {
b0109805 7577 store_reg(s, i, tmp);
9ee6e8bb
PB
7578 }
7579 } else {
7580 /* Store. */
b0109805
PB
7581 tmp = load_reg(s, i);
7582 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7583 }
b0109805 7584 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7585 }
7586 if (insn & (1 << 21)) {
7587 /* Base register writeback. */
7588 if (insn & (1 << 24)) {
b0109805 7589 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7590 }
7591 /* Fault if writeback register is in register list. */
7592 if (insn & (1 << rn))
7593 goto illegal_op;
b0109805
PB
7594 store_reg(s, rn, addr);
7595 } else {
7596 dead_tmp(addr);
9ee6e8bb
PB
7597 }
7598 }
7599 }
7600 break;
7601 case 5: /* Data processing register constant shift. */
3174f8e9
FN
7602 if (rn == 15) {
7603 tmp = new_tmp();
7604 tcg_gen_movi_i32(tmp, 0);
7605 } else {
7606 tmp = load_reg(s, rn);
7607 }
7608 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7609 op = (insn >> 21) & 0xf;
7610 shiftop = (insn >> 4) & 3;
7611 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7612 conds = (insn & (1 << 20)) != 0;
7613 logic_cc = (conds && thumb2_logic_op(op));
3174f8e9
FN
7614 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7615 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9ee6e8bb 7616 goto illegal_op;
3174f8e9
FN
7617 dead_tmp(tmp2);
7618 if (rd != 15) {
7619 store_reg(s, rd, tmp);
7620 } else {
7621 dead_tmp(tmp);
7622 }
9ee6e8bb
PB
7623 break;
7624 case 13: /* Misc data processing. */
7625 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7626 if (op < 4 && (insn & 0xf000) != 0xf000)
7627 goto illegal_op;
7628 switch (op) {
7629 case 0: /* Register controlled shift. */
8984bd2e
PB
7630 tmp = load_reg(s, rn);
7631 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7632 if ((insn & 0x70) != 0)
7633 goto illegal_op;
7634 op = (insn >> 21) & 3;
8984bd2e
PB
7635 logic_cc = (insn & (1 << 20)) != 0;
7636 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7637 if (logic_cc)
7638 gen_logic_CC(tmp);
21aeb343 7639 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7640 break;
7641 case 1: /* Sign/zero extend. */
5e3f878a 7642 tmp = load_reg(s, rm);
9ee6e8bb
PB
7643 shift = (insn >> 4) & 3;
7644 /* ??? In many cases it's not neccessary to do a
7645 rotate, a shift is sufficient. */
7646 if (shift != 0)
f669df27 7647 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7648 op = (insn >> 20) & 7;
7649 switch (op) {
5e3f878a
PB
7650 case 0: gen_sxth(tmp); break;
7651 case 1: gen_uxth(tmp); break;
7652 case 2: gen_sxtb16(tmp); break;
7653 case 3: gen_uxtb16(tmp); break;
7654 case 4: gen_sxtb(tmp); break;
7655 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
7656 default: goto illegal_op;
7657 }
7658 if (rn != 15) {
5e3f878a 7659 tmp2 = load_reg(s, rn);
9ee6e8bb 7660 if ((op >> 1) == 1) {
5e3f878a 7661 gen_add16(tmp, tmp2);
9ee6e8bb 7662 } else {
5e3f878a
PB
7663 tcg_gen_add_i32(tmp, tmp, tmp2);
7664 dead_tmp(tmp2);
9ee6e8bb
PB
7665 }
7666 }
5e3f878a 7667 store_reg(s, rd, tmp);
9ee6e8bb
PB
7668 break;
7669 case 2: /* SIMD add/subtract. */
7670 op = (insn >> 20) & 7;
7671 shift = (insn >> 4) & 7;
7672 if ((op & 3) == 3 || (shift & 3) == 3)
7673 goto illegal_op;
6ddbc6e4
PB
7674 tmp = load_reg(s, rn);
7675 tmp2 = load_reg(s, rm);
7676 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7677 dead_tmp(tmp2);
7678 store_reg(s, rd, tmp);
9ee6e8bb
PB
7679 break;
7680 case 3: /* Other data processing. */
7681 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7682 if (op < 4) {
7683 /* Saturating add/subtract. */
d9ba4830
PB
7684 tmp = load_reg(s, rn);
7685 tmp2 = load_reg(s, rm);
9ee6e8bb 7686 if (op & 2)
d9ba4830 7687 gen_helper_double_saturate(tmp, tmp);
9ee6e8bb 7688 if (op & 1)
d9ba4830 7689 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 7690 else
d9ba4830
PB
7691 gen_helper_add_saturate(tmp, tmp, tmp2);
7692 dead_tmp(tmp2);
9ee6e8bb 7693 } else {
d9ba4830 7694 tmp = load_reg(s, rn);
9ee6e8bb
PB
7695 switch (op) {
7696 case 0x0a: /* rbit */
d9ba4830 7697 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7698 break;
7699 case 0x08: /* rev */
66896cb8 7700 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
7701 break;
7702 case 0x09: /* rev16 */
d9ba4830 7703 gen_rev16(tmp);
9ee6e8bb
PB
7704 break;
7705 case 0x0b: /* revsh */
d9ba4830 7706 gen_revsh(tmp);
9ee6e8bb
PB
7707 break;
7708 case 0x10: /* sel */
d9ba4830 7709 tmp2 = load_reg(s, rm);
6ddbc6e4
PB
7710 tmp3 = new_tmp();
7711 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
d9ba4830 7712 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6ddbc6e4 7713 dead_tmp(tmp3);
d9ba4830 7714 dead_tmp(tmp2);
9ee6e8bb
PB
7715 break;
7716 case 0x18: /* clz */
d9ba4830 7717 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
7718 break;
7719 default:
7720 goto illegal_op;
7721 }
7722 }
d9ba4830 7723 store_reg(s, rd, tmp);
9ee6e8bb
PB
7724 break;
7725 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7726 op = (insn >> 4) & 0xf;
d9ba4830
PB
7727 tmp = load_reg(s, rn);
7728 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7729 switch ((insn >> 20) & 7) {
7730 case 0: /* 32 x 32 -> 32 */
d9ba4830
PB
7731 tcg_gen_mul_i32(tmp, tmp, tmp2);
7732 dead_tmp(tmp2);
9ee6e8bb 7733 if (rs != 15) {
d9ba4830 7734 tmp2 = load_reg(s, rs);
9ee6e8bb 7735 if (op)
d9ba4830 7736 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 7737 else
d9ba4830
PB
7738 tcg_gen_add_i32(tmp, tmp, tmp2);
7739 dead_tmp(tmp2);
9ee6e8bb 7740 }
9ee6e8bb
PB
7741 break;
7742 case 1: /* 16 x 16 -> 32 */
d9ba4830
PB
7743 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7744 dead_tmp(tmp2);
9ee6e8bb 7745 if (rs != 15) {
d9ba4830
PB
7746 tmp2 = load_reg(s, rs);
7747 gen_helper_add_setq(tmp, tmp, tmp2);
7748 dead_tmp(tmp2);
9ee6e8bb 7749 }
9ee6e8bb
PB
7750 break;
7751 case 2: /* Dual multiply add. */
7752 case 4: /* Dual multiply subtract. */
7753 if (op)
d9ba4830
PB
7754 gen_swap_half(tmp2);
7755 gen_smul_dual(tmp, tmp2);
9ee6e8bb
PB
7756 /* This addition cannot overflow. */
7757 if (insn & (1 << 22)) {
d9ba4830 7758 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 7759 } else {
d9ba4830 7760 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 7761 }
d9ba4830 7762 dead_tmp(tmp2);
9ee6e8bb
PB
7763 if (rs != 15)
7764 {
d9ba4830
PB
7765 tmp2 = load_reg(s, rs);
7766 gen_helper_add_setq(tmp, tmp, tmp2);
7767 dead_tmp(tmp2);
9ee6e8bb 7768 }
9ee6e8bb
PB
7769 break;
7770 case 3: /* 32 * 16 -> 32msb */
7771 if (op)
d9ba4830 7772 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7773 else
d9ba4830 7774 gen_sxth(tmp2);
a7812ae4
PB
7775 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7776 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 7777 tmp = new_tmp();
a7812ae4 7778 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 7779 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7780 if (rs != 15)
7781 {
d9ba4830
PB
7782 tmp2 = load_reg(s, rs);
7783 gen_helper_add_setq(tmp, tmp, tmp2);
7784 dead_tmp(tmp2);
9ee6e8bb 7785 }
9ee6e8bb
PB
7786 break;
7787 case 5: case 6: /* 32 * 32 -> 32msb */
d9ba4830
PB
7788 gen_imull(tmp, tmp2);
7789 if (insn & (1 << 5)) {
7790 gen_roundqd(tmp, tmp2);
7791 dead_tmp(tmp2);
7792 } else {
7793 dead_tmp(tmp);
7794 tmp = tmp2;
7795 }
9ee6e8bb 7796 if (rs != 15) {
d9ba4830 7797 tmp2 = load_reg(s, rs);
9ee6e8bb 7798 if (insn & (1 << 21)) {
d9ba4830 7799 tcg_gen_add_i32(tmp, tmp, tmp2);
99c475ab 7800 } else {
d9ba4830 7801 tcg_gen_sub_i32(tmp, tmp2, tmp);
99c475ab 7802 }
d9ba4830 7803 dead_tmp(tmp2);
2c0262af 7804 }
9ee6e8bb
PB
7805 break;
7806 case 7: /* Unsigned sum of absolute differences. */
d9ba4830
PB
7807 gen_helper_usad8(tmp, tmp, tmp2);
7808 dead_tmp(tmp2);
9ee6e8bb 7809 if (rs != 15) {
d9ba4830
PB
7810 tmp2 = load_reg(s, rs);
7811 tcg_gen_add_i32(tmp, tmp, tmp2);
7812 dead_tmp(tmp2);
5fd46862 7813 }
9ee6e8bb 7814 break;
2c0262af 7815 }
d9ba4830 7816 store_reg(s, rd, tmp);
2c0262af 7817 break;
9ee6e8bb
PB
7818 case 6: case 7: /* 64-bit multiply, Divide. */
7819 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
7820 tmp = load_reg(s, rn);
7821 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7822 if ((op & 0x50) == 0x10) {
7823 /* sdiv, udiv */
7824 if (!arm_feature(env, ARM_FEATURE_DIV))
7825 goto illegal_op;
7826 if (op & 0x20)
5e3f878a 7827 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 7828 else
5e3f878a
PB
7829 gen_helper_sdiv(tmp, tmp, tmp2);
7830 dead_tmp(tmp2);
7831 store_reg(s, rd, tmp);
9ee6e8bb
PB
7832 } else if ((op & 0xe) == 0xc) {
7833 /* Dual multiply accumulate long. */
7834 if (op & 1)
5e3f878a
PB
7835 gen_swap_half(tmp2);
7836 gen_smul_dual(tmp, tmp2);
9ee6e8bb 7837 if (op & 0x10) {
5e3f878a 7838 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 7839 } else {
5e3f878a 7840 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 7841 }
5e3f878a 7842 dead_tmp(tmp2);
a7812ae4
PB
7843 /* BUGFIX */
7844 tmp64 = tcg_temp_new_i64();
7845 tcg_gen_ext_i32_i64(tmp64, tmp);
7846 dead_tmp(tmp);
7847 gen_addq(s, tmp64, rs, rd);
7848 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 7849 tcg_temp_free_i64(tmp64);
2c0262af 7850 } else {
9ee6e8bb
PB
7851 if (op & 0x20) {
7852 /* Unsigned 64-bit multiply */
a7812ae4 7853 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 7854 } else {
9ee6e8bb
PB
7855 if (op & 8) {
7856 /* smlalxy */
5e3f878a
PB
7857 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7858 dead_tmp(tmp2);
a7812ae4
PB
7859 tmp64 = tcg_temp_new_i64();
7860 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 7861 dead_tmp(tmp);
9ee6e8bb
PB
7862 } else {
7863 /* Signed 64-bit multiply */
a7812ae4 7864 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 7865 }
b5ff1b31 7866 }
9ee6e8bb
PB
7867 if (op & 4) {
7868 /* umaal */
a7812ae4
PB
7869 gen_addq_lo(s, tmp64, rs);
7870 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
7871 } else if (op & 0x40) {
7872 /* 64-bit accumulate. */
a7812ae4 7873 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 7874 }
a7812ae4 7875 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 7876 tcg_temp_free_i64(tmp64);
5fd46862 7877 }
2c0262af 7878 break;
9ee6e8bb
PB
7879 }
7880 break;
7881 case 6: case 7: case 14: case 15:
7882 /* Coprocessor. */
7883 if (((insn >> 24) & 3) == 3) {
7884 /* Translate into the equivalent ARM encoding. */
7885 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
7886 if (disas_neon_data_insn(env, s, insn))
7887 goto illegal_op;
7888 } else {
7889 if (insn & (1 << 28))
7890 goto illegal_op;
7891 if (disas_coproc_insn (env, s, insn))
7892 goto illegal_op;
7893 }
7894 break;
7895 case 8: case 9: case 10: case 11:
7896 if (insn & (1 << 15)) {
7897 /* Branches, misc control. */
7898 if (insn & 0x5000) {
7899 /* Unconditional branch. */
7900 /* signextend(hw1[10:0]) -> offset[:12]. */
7901 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7902 /* hw1[10:0] -> offset[11:1]. */
7903 offset |= (insn & 0x7ff) << 1;
7904 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7905 offset[24:22] already have the same value because of the
7906 sign extension above. */
7907 offset ^= ((~insn) & (1 << 13)) << 10;
7908 offset ^= ((~insn) & (1 << 11)) << 11;
7909
9ee6e8bb
PB
7910 if (insn & (1 << 14)) {
7911 /* Branch and link. */
3174f8e9 7912 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 7913 }
3b46e624 7914
b0109805 7915 offset += s->pc;
9ee6e8bb
PB
7916 if (insn & (1 << 12)) {
7917 /* b/bl */
b0109805 7918 gen_jmp(s, offset);
9ee6e8bb
PB
7919 } else {
7920 /* blx */
b0109805
PB
7921 offset &= ~(uint32_t)2;
7922 gen_bx_im(s, offset);
2c0262af 7923 }
9ee6e8bb
PB
7924 } else if (((insn >> 23) & 7) == 7) {
7925 /* Misc control */
7926 if (insn & (1 << 13))
7927 goto illegal_op;
7928
7929 if (insn & (1 << 26)) {
7930 /* Secure monitor call (v6Z) */
7931 goto illegal_op; /* not implemented. */
2c0262af 7932 } else {
9ee6e8bb
PB
7933 op = (insn >> 20) & 7;
7934 switch (op) {
7935 case 0: /* msr cpsr. */
7936 if (IS_M(env)) {
8984bd2e
PB
7937 tmp = load_reg(s, rn);
7938 addr = tcg_const_i32(insn & 0xff);
7939 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6
JR
7940 tcg_temp_free_i32(addr);
7941 dead_tmp(tmp);
9ee6e8bb
PB
7942 gen_lookup_tb(s);
7943 break;
7944 }
7945 /* fall through */
7946 case 1: /* msr spsr. */
7947 if (IS_M(env))
7948 goto illegal_op;
2fbac54b
FN
7949 tmp = load_reg(s, rn);
7950 if (gen_set_psr(s,
9ee6e8bb 7951 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 7952 op == 1, tmp))
9ee6e8bb
PB
7953 goto illegal_op;
7954 break;
7955 case 2: /* cps, nop-hint. */
7956 if (((insn >> 8) & 7) == 0) {
7957 gen_nop_hint(s, insn & 0xff);
7958 }
7959 /* Implemented as NOP in user mode. */
7960 if (IS_USER(s))
7961 break;
7962 offset = 0;
7963 imm = 0;
7964 if (insn & (1 << 10)) {
7965 if (insn & (1 << 7))
7966 offset |= CPSR_A;
7967 if (insn & (1 << 6))
7968 offset |= CPSR_I;
7969 if (insn & (1 << 5))
7970 offset |= CPSR_F;
7971 if (insn & (1 << 9))
7972 imm = CPSR_A | CPSR_I | CPSR_F;
7973 }
7974 if (insn & (1 << 8)) {
7975 offset |= 0x1f;
7976 imm |= (insn & 0x1f);
7977 }
7978 if (offset) {
2fbac54b 7979 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
7980 }
7981 break;
7982 case 3: /* Special control operations. */
426f5abc 7983 ARCH(7);
9ee6e8bb
PB
7984 op = (insn >> 4) & 0xf;
7985 switch (op) {
7986 case 2: /* clrex */
426f5abc 7987 gen_clrex(s);
9ee6e8bb
PB
7988 break;
7989 case 4: /* dsb */
7990 case 5: /* dmb */
7991 case 6: /* isb */
7992 /* These execute as NOPs. */
9ee6e8bb
PB
7993 break;
7994 default:
7995 goto illegal_op;
7996 }
7997 break;
7998 case 4: /* bxj */
7999 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8000 tmp = load_reg(s, rn);
8001 gen_bx(s, tmp);
9ee6e8bb
PB
8002 break;
8003 case 5: /* Exception return. */
8004 /* Unpredictable in user mode. */
8005 goto illegal_op;
8006 case 6: /* mrs cpsr. */
8984bd2e 8007 tmp = new_tmp();
9ee6e8bb 8008 if (IS_M(env)) {
8984bd2e
PB
8009 addr = tcg_const_i32(insn & 0xff);
8010 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 8011 tcg_temp_free_i32(addr);
9ee6e8bb 8012 } else {
8984bd2e 8013 gen_helper_cpsr_read(tmp);
9ee6e8bb 8014 }
8984bd2e 8015 store_reg(s, rd, tmp);
9ee6e8bb
PB
8016 break;
8017 case 7: /* mrs spsr. */
8018 /* Not accessible in user mode. */
8019 if (IS_USER(s) || IS_M(env))
8020 goto illegal_op;
d9ba4830
PB
8021 tmp = load_cpu_field(spsr);
8022 store_reg(s, rd, tmp);
9ee6e8bb 8023 break;
2c0262af
FB
8024 }
8025 }
9ee6e8bb
PB
8026 } else {
8027 /* Conditional branch. */
8028 op = (insn >> 22) & 0xf;
8029 /* Generate a conditional jump to next instruction. */
8030 s->condlabel = gen_new_label();
d9ba4830 8031 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
8032 s->condjmp = 1;
8033
8034 /* offset[11:1] = insn[10:0] */
8035 offset = (insn & 0x7ff) << 1;
8036 /* offset[17:12] = insn[21:16]. */
8037 offset |= (insn & 0x003f0000) >> 4;
8038 /* offset[31:20] = insn[26]. */
8039 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8040 /* offset[18] = insn[13]. */
8041 offset |= (insn & (1 << 13)) << 5;
8042 /* offset[19] = insn[11]. */
8043 offset |= (insn & (1 << 11)) << 8;
8044
8045 /* jump to the offset */
b0109805 8046 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
8047 }
8048 } else {
8049 /* Data processing immediate. */
8050 if (insn & (1 << 25)) {
8051 if (insn & (1 << 24)) {
8052 if (insn & (1 << 20))
8053 goto illegal_op;
8054 /* Bitfield/Saturate. */
8055 op = (insn >> 21) & 7;
8056 imm = insn & 0x1f;
8057 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4
PB
8058 if (rn == 15) {
8059 tmp = new_tmp();
8060 tcg_gen_movi_i32(tmp, 0);
8061 } else {
8062 tmp = load_reg(s, rn);
8063 }
9ee6e8bb
PB
8064 switch (op) {
8065 case 2: /* Signed bitfield extract. */
8066 imm++;
8067 if (shift + imm > 32)
8068 goto illegal_op;
8069 if (imm < 32)
6ddbc6e4 8070 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
8071 break;
8072 case 6: /* Unsigned bitfield extract. */
8073 imm++;
8074 if (shift + imm > 32)
8075 goto illegal_op;
8076 if (imm < 32)
6ddbc6e4 8077 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
8078 break;
8079 case 3: /* Bitfield insert/clear. */
8080 if (imm < shift)
8081 goto illegal_op;
8082 imm = imm + 1 - shift;
8083 if (imm != 32) {
6ddbc6e4 8084 tmp2 = load_reg(s, rd);
8f8e3aa4 8085 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
6ddbc6e4 8086 dead_tmp(tmp2);
9ee6e8bb
PB
8087 }
8088 break;
8089 case 7:
8090 goto illegal_op;
8091 default: /* Saturate. */
9ee6e8bb
PB
8092 if (shift) {
8093 if (op & 1)
6ddbc6e4 8094 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8095 else
6ddbc6e4 8096 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 8097 }
6ddbc6e4 8098 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
8099 if (op & 4) {
8100 /* Unsigned. */
9ee6e8bb 8101 if ((op & 1) && shift == 0)
6ddbc6e4 8102 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 8103 else
6ddbc6e4 8104 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 8105 } else {
9ee6e8bb 8106 /* Signed. */
9ee6e8bb 8107 if ((op & 1) && shift == 0)
6ddbc6e4 8108 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 8109 else
6ddbc6e4 8110 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 8111 }
b75263d6 8112 tcg_temp_free_i32(tmp2);
9ee6e8bb 8113 break;
2c0262af 8114 }
6ddbc6e4 8115 store_reg(s, rd, tmp);
9ee6e8bb
PB
8116 } else {
8117 imm = ((insn & 0x04000000) >> 15)
8118 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8119 if (insn & (1 << 22)) {
8120 /* 16-bit immediate. */
8121 imm |= (insn >> 4) & 0xf000;
8122 if (insn & (1 << 23)) {
8123 /* movt */
5e3f878a 8124 tmp = load_reg(s, rd);
86831435 8125 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8126 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 8127 } else {
9ee6e8bb 8128 /* movw */
5e3f878a
PB
8129 tmp = new_tmp();
8130 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
8131 }
8132 } else {
9ee6e8bb
PB
8133 /* Add/sub 12-bit immediate. */
8134 if (rn == 15) {
b0109805 8135 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 8136 if (insn & (1 << 23))
b0109805 8137 offset -= imm;
9ee6e8bb 8138 else
b0109805 8139 offset += imm;
5e3f878a
PB
8140 tmp = new_tmp();
8141 tcg_gen_movi_i32(tmp, offset);
2c0262af 8142 } else {
5e3f878a 8143 tmp = load_reg(s, rn);
9ee6e8bb 8144 if (insn & (1 << 23))
5e3f878a 8145 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 8146 else
5e3f878a 8147 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 8148 }
9ee6e8bb 8149 }
5e3f878a 8150 store_reg(s, rd, tmp);
191abaa2 8151 }
9ee6e8bb
PB
8152 } else {
8153 int shifter_out = 0;
8154 /* modified 12-bit immediate. */
8155 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8156 imm = (insn & 0xff);
8157 switch (shift) {
8158 case 0: /* XY */
8159 /* Nothing to do. */
8160 break;
8161 case 1: /* 00XY00XY */
8162 imm |= imm << 16;
8163 break;
8164 case 2: /* XY00XY00 */
8165 imm |= imm << 16;
8166 imm <<= 8;
8167 break;
8168 case 3: /* XYXYXYXY */
8169 imm |= imm << 16;
8170 imm |= imm << 8;
8171 break;
8172 default: /* Rotated constant. */
8173 shift = (shift << 1) | (imm >> 7);
8174 imm |= 0x80;
8175 imm = imm << (32 - shift);
8176 shifter_out = 1;
8177 break;
b5ff1b31 8178 }
3174f8e9
FN
8179 tmp2 = new_tmp();
8180 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 8181 rn = (insn >> 16) & 0xf;
3174f8e9
FN
8182 if (rn == 15) {
8183 tmp = new_tmp();
8184 tcg_gen_movi_i32(tmp, 0);
8185 } else {
8186 tmp = load_reg(s, rn);
8187 }
9ee6e8bb
PB
8188 op = (insn >> 21) & 0xf;
8189 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 8190 shifter_out, tmp, tmp2))
9ee6e8bb 8191 goto illegal_op;
3174f8e9 8192 dead_tmp(tmp2);
9ee6e8bb
PB
8193 rd = (insn >> 8) & 0xf;
8194 if (rd != 15) {
3174f8e9
FN
8195 store_reg(s, rd, tmp);
8196 } else {
8197 dead_tmp(tmp);
2c0262af 8198 }
2c0262af 8199 }
9ee6e8bb
PB
8200 }
8201 break;
8202 case 12: /* Load/store single data item. */
8203 {
8204 int postinc = 0;
8205 int writeback = 0;
b0109805 8206 int user;
9ee6e8bb
PB
8207 if ((insn & 0x01100000) == 0x01000000) {
8208 if (disas_neon_ls_insn(env, s, insn))
c1713132 8209 goto illegal_op;
9ee6e8bb
PB
8210 break;
8211 }
b0109805 8212 user = IS_USER(s);
9ee6e8bb 8213 if (rn == 15) {
b0109805 8214 addr = new_tmp();
9ee6e8bb
PB
8215 /* PC relative. */
8216 /* s->pc has already been incremented by 4. */
8217 imm = s->pc & 0xfffffffc;
8218 if (insn & (1 << 23))
8219 imm += insn & 0xfff;
8220 else
8221 imm -= insn & 0xfff;
b0109805 8222 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 8223 } else {
b0109805 8224 addr = load_reg(s, rn);
9ee6e8bb
PB
8225 if (insn & (1 << 23)) {
8226 /* Positive offset. */
8227 imm = insn & 0xfff;
b0109805 8228 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8229 } else {
8230 op = (insn >> 8) & 7;
8231 imm = insn & 0xff;
8232 switch (op) {
8233 case 0: case 8: /* Shifted Register. */
8234 shift = (insn >> 4) & 0xf;
8235 if (shift > 3)
18c9b560 8236 goto illegal_op;
b26eefb6 8237 tmp = load_reg(s, rm);
9ee6e8bb 8238 if (shift)
b26eefb6 8239 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 8240 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 8241 dead_tmp(tmp);
9ee6e8bb
PB
8242 break;
8243 case 4: /* Negative offset. */
b0109805 8244 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb
PB
8245 break;
8246 case 6: /* User privilege. */
b0109805
PB
8247 tcg_gen_addi_i32(addr, addr, imm);
8248 user = 1;
9ee6e8bb
PB
8249 break;
8250 case 1: /* Post-decrement. */
8251 imm = -imm;
8252 /* Fall through. */
8253 case 3: /* Post-increment. */
9ee6e8bb
PB
8254 postinc = 1;
8255 writeback = 1;
8256 break;
8257 case 5: /* Pre-decrement. */
8258 imm = -imm;
8259 /* Fall through. */
8260 case 7: /* Pre-increment. */
b0109805 8261 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8262 writeback = 1;
8263 break;
8264 default:
b7bcbe95 8265 goto illegal_op;
9ee6e8bb
PB
8266 }
8267 }
8268 }
8269 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8270 if (insn & (1 << 20)) {
8271 /* Load. */
8272 if (rs == 15 && op != 2) {
8273 if (op & 2)
b5ff1b31 8274 goto illegal_op;
9ee6e8bb
PB
8275 /* Memory hint. Implemented as NOP. */
8276 } else {
8277 switch (op) {
b0109805
PB
8278 case 0: tmp = gen_ld8u(addr, user); break;
8279 case 4: tmp = gen_ld8s(addr, user); break;
8280 case 1: tmp = gen_ld16u(addr, user); break;
8281 case 5: tmp = gen_ld16s(addr, user); break;
8282 case 2: tmp = gen_ld32(addr, user); break;
9ee6e8bb
PB
8283 default: goto illegal_op;
8284 }
8285 if (rs == 15) {
b0109805 8286 gen_bx(s, tmp);
9ee6e8bb 8287 } else {
b0109805 8288 store_reg(s, rs, tmp);
9ee6e8bb
PB
8289 }
8290 }
8291 } else {
8292 /* Store. */
8293 if (rs == 15)
b7bcbe95 8294 goto illegal_op;
b0109805 8295 tmp = load_reg(s, rs);
9ee6e8bb 8296 switch (op) {
b0109805
PB
8297 case 0: gen_st8(tmp, addr, user); break;
8298 case 1: gen_st16(tmp, addr, user); break;
8299 case 2: gen_st32(tmp, addr, user); break;
9ee6e8bb 8300 default: goto illegal_op;
b7bcbe95 8301 }
2c0262af 8302 }
9ee6e8bb 8303 if (postinc)
b0109805
PB
8304 tcg_gen_addi_i32(addr, addr, imm);
8305 if (writeback) {
8306 store_reg(s, rn, addr);
8307 } else {
8308 dead_tmp(addr);
8309 }
9ee6e8bb
PB
8310 }
8311 break;
8312 default:
8313 goto illegal_op;
2c0262af 8314 }
9ee6e8bb
PB
8315 return 0;
8316illegal_op:
8317 return 1;
2c0262af
FB
8318}
8319
9ee6e8bb 8320static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
8321{
8322 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8323 int32_t offset;
8324 int i;
b26eefb6 8325 TCGv tmp;
d9ba4830 8326 TCGv tmp2;
b0109805 8327 TCGv addr;
99c475ab 8328
9ee6e8bb
PB
8329 if (s->condexec_mask) {
8330 cond = s->condexec_cond;
8331 s->condlabel = gen_new_label();
d9ba4830 8332 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
8333 s->condjmp = 1;
8334 }
8335
b5ff1b31 8336 insn = lduw_code(s->pc);
99c475ab 8337 s->pc += 2;
b5ff1b31 8338
99c475ab
FB
8339 switch (insn >> 12) {
8340 case 0: case 1:
396e467c 8341
99c475ab
FB
8342 rd = insn & 7;
8343 op = (insn >> 11) & 3;
8344 if (op == 3) {
8345 /* add/subtract */
8346 rn = (insn >> 3) & 7;
396e467c 8347 tmp = load_reg(s, rn);
99c475ab
FB
8348 if (insn & (1 << 10)) {
8349 /* immediate */
396e467c
FN
8350 tmp2 = new_tmp();
8351 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
8352 } else {
8353 /* reg */
8354 rm = (insn >> 6) & 7;
396e467c 8355 tmp2 = load_reg(s, rm);
99c475ab 8356 }
9ee6e8bb
PB
8357 if (insn & (1 << 9)) {
8358 if (s->condexec_mask)
396e467c 8359 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8360 else
396e467c 8361 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb
PB
8362 } else {
8363 if (s->condexec_mask)
396e467c 8364 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 8365 else
396e467c 8366 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 8367 }
396e467c
FN
8368 dead_tmp(tmp2);
8369 store_reg(s, rd, tmp);
99c475ab
FB
8370 } else {
8371 /* shift immediate */
8372 rm = (insn >> 3) & 7;
8373 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
8374 tmp = load_reg(s, rm);
8375 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8376 if (!s->condexec_mask)
8377 gen_logic_CC(tmp);
8378 store_reg(s, rd, tmp);
99c475ab
FB
8379 }
8380 break;
8381 case 2: case 3:
8382 /* arithmetic large immediate */
8383 op = (insn >> 11) & 3;
8384 rd = (insn >> 8) & 0x7;
396e467c
FN
8385 if (op == 0) { /* mov */
8386 tmp = new_tmp();
8387 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 8388 if (!s->condexec_mask)
396e467c
FN
8389 gen_logic_CC(tmp);
8390 store_reg(s, rd, tmp);
8391 } else {
8392 tmp = load_reg(s, rd);
8393 tmp2 = new_tmp();
8394 tcg_gen_movi_i32(tmp2, insn & 0xff);
8395 switch (op) {
8396 case 1: /* cmp */
8397 gen_helper_sub_cc(tmp, tmp, tmp2);
8398 dead_tmp(tmp);
8399 dead_tmp(tmp2);
8400 break;
8401 case 2: /* add */
8402 if (s->condexec_mask)
8403 tcg_gen_add_i32(tmp, tmp, tmp2);
8404 else
8405 gen_helper_add_cc(tmp, tmp, tmp2);
8406 dead_tmp(tmp2);
8407 store_reg(s, rd, tmp);
8408 break;
8409 case 3: /* sub */
8410 if (s->condexec_mask)
8411 tcg_gen_sub_i32(tmp, tmp, tmp2);
8412 else
8413 gen_helper_sub_cc(tmp, tmp, tmp2);
8414 dead_tmp(tmp2);
8415 store_reg(s, rd, tmp);
8416 break;
8417 }
99c475ab 8418 }
99c475ab
FB
8419 break;
8420 case 4:
8421 if (insn & (1 << 11)) {
8422 rd = (insn >> 8) & 7;
5899f386
FB
8423 /* load pc-relative. Bit 1 of PC is ignored. */
8424 val = s->pc + 2 + ((insn & 0xff) * 4);
8425 val &= ~(uint32_t)2;
b0109805
PB
8426 addr = new_tmp();
8427 tcg_gen_movi_i32(addr, val);
8428 tmp = gen_ld32(addr, IS_USER(s));
8429 dead_tmp(addr);
8430 store_reg(s, rd, tmp);
99c475ab
FB
8431 break;
8432 }
8433 if (insn & (1 << 10)) {
8434 /* data processing extended or blx */
8435 rd = (insn & 7) | ((insn >> 4) & 8);
8436 rm = (insn >> 3) & 0xf;
8437 op = (insn >> 8) & 3;
8438 switch (op) {
8439 case 0: /* add */
396e467c
FN
8440 tmp = load_reg(s, rd);
8441 tmp2 = load_reg(s, rm);
8442 tcg_gen_add_i32(tmp, tmp, tmp2);
8443 dead_tmp(tmp2);
8444 store_reg(s, rd, tmp);
99c475ab
FB
8445 break;
8446 case 1: /* cmp */
396e467c
FN
8447 tmp = load_reg(s, rd);
8448 tmp2 = load_reg(s, rm);
8449 gen_helper_sub_cc(tmp, tmp, tmp2);
8450 dead_tmp(tmp2);
8451 dead_tmp(tmp);
99c475ab
FB
8452 break;
8453 case 2: /* mov/cpy */
396e467c
FN
8454 tmp = load_reg(s, rm);
8455 store_reg(s, rd, tmp);
99c475ab
FB
8456 break;
8457 case 3:/* branch [and link] exchange thumb register */
b0109805 8458 tmp = load_reg(s, rm);
99c475ab
FB
8459 if (insn & (1 << 7)) {
8460 val = (uint32_t)s->pc | 1;
b0109805
PB
8461 tmp2 = new_tmp();
8462 tcg_gen_movi_i32(tmp2, val);
8463 store_reg(s, 14, tmp2);
99c475ab 8464 }
d9ba4830 8465 gen_bx(s, tmp);
99c475ab
FB
8466 break;
8467 }
8468 break;
8469 }
8470
8471 /* data processing register */
8472 rd = insn & 7;
8473 rm = (insn >> 3) & 7;
8474 op = (insn >> 6) & 0xf;
8475 if (op == 2 || op == 3 || op == 4 || op == 7) {
8476 /* the shift/rotate ops want the operands backwards */
8477 val = rm;
8478 rm = rd;
8479 rd = val;
8480 val = 1;
8481 } else {
8482 val = 0;
8483 }
8484
396e467c
FN
8485 if (op == 9) { /* neg */
8486 tmp = new_tmp();
8487 tcg_gen_movi_i32(tmp, 0);
8488 } else if (op != 0xf) { /* mvn doesn't read its first operand */
8489 tmp = load_reg(s, rd);
8490 } else {
8491 TCGV_UNUSED(tmp);
8492 }
99c475ab 8493
396e467c 8494 tmp2 = load_reg(s, rm);
5899f386 8495 switch (op) {
99c475ab 8496 case 0x0: /* and */
396e467c 8497 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 8498 if (!s->condexec_mask)
396e467c 8499 gen_logic_CC(tmp);
99c475ab
FB
8500 break;
8501 case 0x1: /* eor */
396e467c 8502 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 8503 if (!s->condexec_mask)
396e467c 8504 gen_logic_CC(tmp);
99c475ab
FB
8505 break;
8506 case 0x2: /* lsl */
9ee6e8bb 8507 if (s->condexec_mask) {
396e467c 8508 gen_helper_shl(tmp2, tmp2, tmp);
9ee6e8bb 8509 } else {
396e467c
FN
8510 gen_helper_shl_cc(tmp2, tmp2, tmp);
8511 gen_logic_CC(tmp2);
9ee6e8bb 8512 }
99c475ab
FB
8513 break;
8514 case 0x3: /* lsr */
9ee6e8bb 8515 if (s->condexec_mask) {
396e467c 8516 gen_helper_shr(tmp2, tmp2, tmp);
9ee6e8bb 8517 } else {
396e467c
FN
8518 gen_helper_shr_cc(tmp2, tmp2, tmp);
8519 gen_logic_CC(tmp2);
9ee6e8bb 8520 }
99c475ab
FB
8521 break;
8522 case 0x4: /* asr */
9ee6e8bb 8523 if (s->condexec_mask) {
396e467c 8524 gen_helper_sar(tmp2, tmp2, tmp);
9ee6e8bb 8525 } else {
396e467c
FN
8526 gen_helper_sar_cc(tmp2, tmp2, tmp);
8527 gen_logic_CC(tmp2);
9ee6e8bb 8528 }
99c475ab
FB
8529 break;
8530 case 0x5: /* adc */
9ee6e8bb 8531 if (s->condexec_mask)
396e467c 8532 gen_adc(tmp, tmp2);
9ee6e8bb 8533 else
396e467c 8534 gen_helper_adc_cc(tmp, tmp, tmp2);
99c475ab
FB
8535 break;
8536 case 0x6: /* sbc */
9ee6e8bb 8537 if (s->condexec_mask)
396e467c 8538 gen_sub_carry(tmp, tmp, tmp2);
9ee6e8bb 8539 else
396e467c 8540 gen_helper_sbc_cc(tmp, tmp, tmp2);
99c475ab
FB
8541 break;
8542 case 0x7: /* ror */
9ee6e8bb 8543 if (s->condexec_mask) {
f669df27
AJ
8544 tcg_gen_andi_i32(tmp, tmp, 0x1f);
8545 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 8546 } else {
396e467c
FN
8547 gen_helper_ror_cc(tmp2, tmp2, tmp);
8548 gen_logic_CC(tmp2);
9ee6e8bb 8549 }
99c475ab
FB
8550 break;
8551 case 0x8: /* tst */
396e467c
FN
8552 tcg_gen_and_i32(tmp, tmp, tmp2);
8553 gen_logic_CC(tmp);
99c475ab 8554 rd = 16;
5899f386 8555 break;
99c475ab 8556 case 0x9: /* neg */
9ee6e8bb 8557 if (s->condexec_mask)
396e467c 8558 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 8559 else
396e467c 8560 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
8561 break;
8562 case 0xa: /* cmp */
396e467c 8563 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
8564 rd = 16;
8565 break;
8566 case 0xb: /* cmn */
396e467c 8567 gen_helper_add_cc(tmp, tmp, tmp2);
99c475ab
FB
8568 rd = 16;
8569 break;
8570 case 0xc: /* orr */
396e467c 8571 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 8572 if (!s->condexec_mask)
396e467c 8573 gen_logic_CC(tmp);
99c475ab
FB
8574 break;
8575 case 0xd: /* mul */
7b2919a0 8576 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 8577 if (!s->condexec_mask)
396e467c 8578 gen_logic_CC(tmp);
99c475ab
FB
8579 break;
8580 case 0xe: /* bic */
f669df27 8581 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 8582 if (!s->condexec_mask)
396e467c 8583 gen_logic_CC(tmp);
99c475ab
FB
8584 break;
8585 case 0xf: /* mvn */
396e467c 8586 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 8587 if (!s->condexec_mask)
396e467c 8588 gen_logic_CC(tmp2);
99c475ab 8589 val = 1;
5899f386 8590 rm = rd;
99c475ab
FB
8591 break;
8592 }
8593 if (rd != 16) {
396e467c
FN
8594 if (val) {
8595 store_reg(s, rm, tmp2);
8596 if (op != 0xf)
8597 dead_tmp(tmp);
8598 } else {
8599 store_reg(s, rd, tmp);
8600 dead_tmp(tmp2);
8601 }
8602 } else {
8603 dead_tmp(tmp);
8604 dead_tmp(tmp2);
99c475ab
FB
8605 }
8606 break;
8607
8608 case 5:
8609 /* load/store register offset. */
8610 rd = insn & 7;
8611 rn = (insn >> 3) & 7;
8612 rm = (insn >> 6) & 7;
8613 op = (insn >> 9) & 7;
b0109805 8614 addr = load_reg(s, rn);
b26eefb6 8615 tmp = load_reg(s, rm);
b0109805 8616 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 8617 dead_tmp(tmp);
99c475ab
FB
8618
8619 if (op < 3) /* store */
b0109805 8620 tmp = load_reg(s, rd);
99c475ab
FB
8621
8622 switch (op) {
8623 case 0: /* str */
b0109805 8624 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
8625 break;
8626 case 1: /* strh */
b0109805 8627 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
8628 break;
8629 case 2: /* strb */
b0109805 8630 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
8631 break;
8632 case 3: /* ldrsb */
b0109805 8633 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
8634 break;
8635 case 4: /* ldr */
b0109805 8636 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8637 break;
8638 case 5: /* ldrh */
b0109805 8639 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
8640 break;
8641 case 6: /* ldrb */
b0109805 8642 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
8643 break;
8644 case 7: /* ldrsh */
b0109805 8645 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
8646 break;
8647 }
8648 if (op >= 3) /* load */
b0109805
PB
8649 store_reg(s, rd, tmp);
8650 dead_tmp(addr);
99c475ab
FB
8651 break;
8652
8653 case 6:
8654 /* load/store word immediate offset */
8655 rd = insn & 7;
8656 rn = (insn >> 3) & 7;
b0109805 8657 addr = load_reg(s, rn);
99c475ab 8658 val = (insn >> 4) & 0x7c;
b0109805 8659 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8660
8661 if (insn & (1 << 11)) {
8662 /* load */
b0109805
PB
8663 tmp = gen_ld32(addr, IS_USER(s));
8664 store_reg(s, rd, tmp);
99c475ab
FB
8665 } else {
8666 /* store */
b0109805
PB
8667 tmp = load_reg(s, rd);
8668 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8669 }
b0109805 8670 dead_tmp(addr);
99c475ab
FB
8671 break;
8672
8673 case 7:
8674 /* load/store byte immediate offset */
8675 rd = insn & 7;
8676 rn = (insn >> 3) & 7;
b0109805 8677 addr = load_reg(s, rn);
99c475ab 8678 val = (insn >> 6) & 0x1f;
b0109805 8679 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8680
8681 if (insn & (1 << 11)) {
8682 /* load */
b0109805
PB
8683 tmp = gen_ld8u(addr, IS_USER(s));
8684 store_reg(s, rd, tmp);
99c475ab
FB
8685 } else {
8686 /* store */
b0109805
PB
8687 tmp = load_reg(s, rd);
8688 gen_st8(tmp, addr, IS_USER(s));
99c475ab 8689 }
b0109805 8690 dead_tmp(addr);
99c475ab
FB
8691 break;
8692
8693 case 8:
8694 /* load/store halfword immediate offset */
8695 rd = insn & 7;
8696 rn = (insn >> 3) & 7;
b0109805 8697 addr = load_reg(s, rn);
99c475ab 8698 val = (insn >> 5) & 0x3e;
b0109805 8699 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8700
8701 if (insn & (1 << 11)) {
8702 /* load */
b0109805
PB
8703 tmp = gen_ld16u(addr, IS_USER(s));
8704 store_reg(s, rd, tmp);
99c475ab
FB
8705 } else {
8706 /* store */
b0109805
PB
8707 tmp = load_reg(s, rd);
8708 gen_st16(tmp, addr, IS_USER(s));
99c475ab 8709 }
b0109805 8710 dead_tmp(addr);
99c475ab
FB
8711 break;
8712
8713 case 9:
8714 /* load/store from stack */
8715 rd = (insn >> 8) & 7;
b0109805 8716 addr = load_reg(s, 13);
99c475ab 8717 val = (insn & 0xff) * 4;
b0109805 8718 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8719
8720 if (insn & (1 << 11)) {
8721 /* load */
b0109805
PB
8722 tmp = gen_ld32(addr, IS_USER(s));
8723 store_reg(s, rd, tmp);
99c475ab
FB
8724 } else {
8725 /* store */
b0109805
PB
8726 tmp = load_reg(s, rd);
8727 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8728 }
b0109805 8729 dead_tmp(addr);
99c475ab
FB
8730 break;
8731
8732 case 10:
8733 /* add to high reg */
8734 rd = (insn >> 8) & 7;
5899f386
FB
8735 if (insn & (1 << 11)) {
8736 /* SP */
5e3f878a 8737 tmp = load_reg(s, 13);
5899f386
FB
8738 } else {
8739 /* PC. bit 1 is ignored. */
5e3f878a
PB
8740 tmp = new_tmp();
8741 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 8742 }
99c475ab 8743 val = (insn & 0xff) * 4;
5e3f878a
PB
8744 tcg_gen_addi_i32(tmp, tmp, val);
8745 store_reg(s, rd, tmp);
99c475ab
FB
8746 break;
8747
8748 case 11:
8749 /* misc */
8750 op = (insn >> 8) & 0xf;
8751 switch (op) {
8752 case 0:
8753 /* adjust stack pointer */
b26eefb6 8754 tmp = load_reg(s, 13);
99c475ab
FB
8755 val = (insn & 0x7f) * 4;
8756 if (insn & (1 << 7))
6a0d8a1d 8757 val = -(int32_t)val;
b26eefb6
PB
8758 tcg_gen_addi_i32(tmp, tmp, val);
8759 store_reg(s, 13, tmp);
99c475ab
FB
8760 break;
8761
9ee6e8bb
PB
8762 case 2: /* sign/zero extend. */
8763 ARCH(6);
8764 rd = insn & 7;
8765 rm = (insn >> 3) & 7;
b0109805 8766 tmp = load_reg(s, rm);
9ee6e8bb 8767 switch ((insn >> 6) & 3) {
b0109805
PB
8768 case 0: gen_sxth(tmp); break;
8769 case 1: gen_sxtb(tmp); break;
8770 case 2: gen_uxth(tmp); break;
8771 case 3: gen_uxtb(tmp); break;
9ee6e8bb 8772 }
b0109805 8773 store_reg(s, rd, tmp);
9ee6e8bb 8774 break;
99c475ab
FB
8775 case 4: case 5: case 0xc: case 0xd:
8776 /* push/pop */
b0109805 8777 addr = load_reg(s, 13);
5899f386
FB
8778 if (insn & (1 << 8))
8779 offset = 4;
99c475ab 8780 else
5899f386
FB
8781 offset = 0;
8782 for (i = 0; i < 8; i++) {
8783 if (insn & (1 << i))
8784 offset += 4;
8785 }
8786 if ((insn & (1 << 11)) == 0) {
b0109805 8787 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8788 }
99c475ab
FB
8789 for (i = 0; i < 8; i++) {
8790 if (insn & (1 << i)) {
8791 if (insn & (1 << 11)) {
8792 /* pop */
b0109805
PB
8793 tmp = gen_ld32(addr, IS_USER(s));
8794 store_reg(s, i, tmp);
99c475ab
FB
8795 } else {
8796 /* push */
b0109805
PB
8797 tmp = load_reg(s, i);
8798 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8799 }
5899f386 8800 /* advance to the next address. */
b0109805 8801 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8802 }
8803 }
a50f5b91 8804 TCGV_UNUSED(tmp);
99c475ab
FB
8805 if (insn & (1 << 8)) {
8806 if (insn & (1 << 11)) {
8807 /* pop pc */
b0109805 8808 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8809 /* don't set the pc until the rest of the instruction
8810 has completed */
8811 } else {
8812 /* push lr */
b0109805
PB
8813 tmp = load_reg(s, 14);
8814 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8815 }
b0109805 8816 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 8817 }
5899f386 8818 if ((insn & (1 << 11)) == 0) {
b0109805 8819 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8820 }
99c475ab 8821 /* write back the new stack pointer */
b0109805 8822 store_reg(s, 13, addr);
99c475ab
FB
8823 /* set the new PC value */
8824 if ((insn & 0x0900) == 0x0900)
b0109805 8825 gen_bx(s, tmp);
99c475ab
FB
8826 break;
8827
9ee6e8bb
PB
8828 case 1: case 3: case 9: case 11: /* czb */
8829 rm = insn & 7;
d9ba4830 8830 tmp = load_reg(s, rm);
9ee6e8bb
PB
8831 s->condlabel = gen_new_label();
8832 s->condjmp = 1;
8833 if (insn & (1 << 11))
cb63669a 8834 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 8835 else
cb63669a 8836 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
d9ba4830 8837 dead_tmp(tmp);
9ee6e8bb
PB
8838 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8839 val = (uint32_t)s->pc + 2;
8840 val += offset;
8841 gen_jmp(s, val);
8842 break;
8843
8844 case 15: /* IT, nop-hint. */
8845 if ((insn & 0xf) == 0) {
8846 gen_nop_hint(s, (insn >> 4) & 0xf);
8847 break;
8848 }
8849 /* If Then. */
8850 s->condexec_cond = (insn >> 4) & 0xe;
8851 s->condexec_mask = insn & 0x1f;
8852 /* No actual code generated for this insn, just setup state. */
8853 break;
8854
06c949e6 8855 case 0xe: /* bkpt */
9ee6e8bb 8856 gen_set_condexec(s);
5e3f878a 8857 gen_set_pc_im(s->pc - 2);
d9ba4830 8858 gen_exception(EXCP_BKPT);
06c949e6
PB
8859 s->is_jmp = DISAS_JUMP;
8860 break;
8861
9ee6e8bb
PB
8862 case 0xa: /* rev */
8863 ARCH(6);
8864 rn = (insn >> 3) & 0x7;
8865 rd = insn & 0x7;
b0109805 8866 tmp = load_reg(s, rn);
9ee6e8bb 8867 switch ((insn >> 6) & 3) {
66896cb8 8868 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
8869 case 1: gen_rev16(tmp); break;
8870 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
8871 default: goto illegal_op;
8872 }
b0109805 8873 store_reg(s, rd, tmp);
9ee6e8bb
PB
8874 break;
8875
8876 case 6: /* cps */
8877 ARCH(6);
8878 if (IS_USER(s))
8879 break;
8880 if (IS_M(env)) {
8984bd2e 8881 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9ee6e8bb 8882 /* PRIMASK */
8984bd2e
PB
8883 if (insn & 1) {
8884 addr = tcg_const_i32(16);
8885 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8886 tcg_temp_free_i32(addr);
8984bd2e 8887 }
9ee6e8bb 8888 /* FAULTMASK */
8984bd2e
PB
8889 if (insn & 2) {
8890 addr = tcg_const_i32(17);
8891 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8892 tcg_temp_free_i32(addr);
8984bd2e 8893 }
b75263d6 8894 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8895 gen_lookup_tb(s);
8896 } else {
8897 if (insn & (1 << 4))
8898 shift = CPSR_A | CPSR_I | CPSR_F;
8899 else
8900 shift = 0;
fa26df03 8901 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9ee6e8bb
PB
8902 }
8903 break;
8904
99c475ab
FB
8905 default:
8906 goto undef;
8907 }
8908 break;
8909
8910 case 12:
8911 /* load/store multiple */
8912 rn = (insn >> 8) & 0x7;
b0109805 8913 addr = load_reg(s, rn);
99c475ab
FB
8914 for (i = 0; i < 8; i++) {
8915 if (insn & (1 << i)) {
99c475ab
FB
8916 if (insn & (1 << 11)) {
8917 /* load */
b0109805
PB
8918 tmp = gen_ld32(addr, IS_USER(s));
8919 store_reg(s, i, tmp);
99c475ab
FB
8920 } else {
8921 /* store */
b0109805
PB
8922 tmp = load_reg(s, i);
8923 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8924 }
5899f386 8925 /* advance to the next address */
b0109805 8926 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8927 }
8928 }
5899f386 8929 /* Base register writeback. */
b0109805
PB
8930 if ((insn & (1 << rn)) == 0) {
8931 store_reg(s, rn, addr);
8932 } else {
8933 dead_tmp(addr);
8934 }
99c475ab
FB
8935 break;
8936
8937 case 13:
8938 /* conditional branch or swi */
8939 cond = (insn >> 8) & 0xf;
8940 if (cond == 0xe)
8941 goto undef;
8942
8943 if (cond == 0xf) {
8944 /* swi */
9ee6e8bb 8945 gen_set_condexec(s);
422ebf69 8946 gen_set_pc_im(s->pc);
9ee6e8bb 8947 s->is_jmp = DISAS_SWI;
99c475ab
FB
8948 break;
8949 }
8950 /* generate a conditional jump to next instruction */
e50e6a20 8951 s->condlabel = gen_new_label();
d9ba4830 8952 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 8953 s->condjmp = 1;
99c475ab
FB
8954
8955 /* jump to the offset */
5899f386 8956 val = (uint32_t)s->pc + 2;
99c475ab 8957 offset = ((int32_t)insn << 24) >> 24;
5899f386 8958 val += offset << 1;
8aaca4c0 8959 gen_jmp(s, val);
99c475ab
FB
8960 break;
8961
8962 case 14:
358bf29e 8963 if (insn & (1 << 11)) {
9ee6e8bb
PB
8964 if (disas_thumb2_insn(env, s, insn))
8965 goto undef32;
358bf29e
PB
8966 break;
8967 }
9ee6e8bb 8968 /* unconditional branch */
99c475ab
FB
8969 val = (uint32_t)s->pc;
8970 offset = ((int32_t)insn << 21) >> 21;
8971 val += (offset << 1) + 2;
8aaca4c0 8972 gen_jmp(s, val);
99c475ab
FB
8973 break;
8974
8975 case 15:
9ee6e8bb 8976 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 8977 goto undef32;
9ee6e8bb 8978 break;
99c475ab
FB
8979 }
8980 return;
9ee6e8bb
PB
8981undef32:
8982 gen_set_condexec(s);
5e3f878a 8983 gen_set_pc_im(s->pc - 4);
d9ba4830 8984 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
8985 s->is_jmp = DISAS_JUMP;
8986 return;
8987illegal_op:
99c475ab 8988undef:
9ee6e8bb 8989 gen_set_condexec(s);
5e3f878a 8990 gen_set_pc_im(s->pc - 2);
d9ba4830 8991 gen_exception(EXCP_UDEF);
99c475ab
FB
8992 s->is_jmp = DISAS_JUMP;
8993}
8994
2c0262af
FB
8995/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8996 basic block 'tb'. If search_pc is TRUE, also generate PC
8997 information for each intermediate instruction. */
2cfc5f17
TS
8998static inline void gen_intermediate_code_internal(CPUState *env,
8999 TranslationBlock *tb,
9000 int search_pc)
2c0262af
FB
9001{
9002 DisasContext dc1, *dc = &dc1;
a1d1bb31 9003 CPUBreakpoint *bp;
2c0262af
FB
9004 uint16_t *gen_opc_end;
9005 int j, lj;
0fa85d43 9006 target_ulong pc_start;
b5ff1b31 9007 uint32_t next_page_start;
2e70f6ef
PB
9008 int num_insns;
9009 int max_insns;
3b46e624 9010
2c0262af 9011 /* generate intermediate code */
b26eefb6 9012 num_temps = 0;
b26eefb6 9013
0fa85d43 9014 pc_start = tb->pc;
3b46e624 9015
2c0262af
FB
9016 dc->tb = tb;
9017
2c0262af 9018 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
9019
9020 dc->is_jmp = DISAS_NEXT;
9021 dc->pc = pc_start;
8aaca4c0 9022 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 9023 dc->condjmp = 0;
5899f386 9024 dc->thumb = env->thumb;
9ee6e8bb
PB
9025 dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
9026 dc->condexec_cond = env->condexec_bits >> 4;
b5ff1b31 9027#if !defined(CONFIG_USER_ONLY)
9ee6e8bb
PB
9028 if (IS_M(env)) {
9029 dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
9030 } else {
9031 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
9032 }
b5ff1b31 9033#endif
a7812ae4
PB
9034 cpu_F0s = tcg_temp_new_i32();
9035 cpu_F1s = tcg_temp_new_i32();
9036 cpu_F0d = tcg_temp_new_i64();
9037 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
9038 cpu_V0 = cpu_F0d;
9039 cpu_V1 = cpu_F1d;
e677137d 9040 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 9041 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 9042 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 9043 lj = -1;
2e70f6ef
PB
9044 num_insns = 0;
9045 max_insns = tb->cflags & CF_COUNT_MASK;
9046 if (max_insns == 0)
9047 max_insns = CF_COUNT_MASK;
9048
9049 gen_icount_start();
9ee6e8bb
PB
9050 /* Reset the conditional execution bits immediately. This avoids
9051 complications trying to do it at the end of the block. */
9052 if (env->condexec_bits)
8f01245e
PB
9053 {
9054 TCGv tmp = new_tmp();
9055 tcg_gen_movi_i32(tmp, 0);
d9ba4830 9056 store_cpu_field(tmp, condexec_bits);
8f01245e 9057 }
2c0262af 9058 do {
fbb4a2e3
PB
9059#ifdef CONFIG_USER_ONLY
9060 /* Intercept jump to the magic kernel page. */
9061 if (dc->pc >= 0xffff0000) {
9062 /* We always get here via a jump, so know we are not in a
9063 conditional execution block. */
9064 gen_exception(EXCP_KERNEL_TRAP);
9065 dc->is_jmp = DISAS_UPDATE;
9066 break;
9067 }
9068#else
9ee6e8bb
PB
9069 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9070 /* We always get here via a jump, so know we are not in a
9071 conditional execution block. */
d9ba4830 9072 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
9073 dc->is_jmp = DISAS_UPDATE;
9074 break;
9ee6e8bb
PB
9075 }
9076#endif
9077
72cf2d4f
BS
9078 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9079 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 9080 if (bp->pc == dc->pc) {
9ee6e8bb 9081 gen_set_condexec(dc);
5e3f878a 9082 gen_set_pc_im(dc->pc);
d9ba4830 9083 gen_exception(EXCP_DEBUG);
1fddef4b 9084 dc->is_jmp = DISAS_JUMP;
9ee6e8bb
PB
9085 /* Advance PC so that clearing the breakpoint will
9086 invalidate this TB. */
9087 dc->pc += 2;
9088 goto done_generating;
1fddef4b
FB
9089 break;
9090 }
9091 }
9092 }
2c0262af
FB
9093 if (search_pc) {
9094 j = gen_opc_ptr - gen_opc_buf;
9095 if (lj < j) {
9096 lj++;
9097 while (lj < j)
9098 gen_opc_instr_start[lj++] = 0;
9099 }
0fa85d43 9100 gen_opc_pc[lj] = dc->pc;
2c0262af 9101 gen_opc_instr_start[lj] = 1;
2e70f6ef 9102 gen_opc_icount[lj] = num_insns;
2c0262af 9103 }
e50e6a20 9104
2e70f6ef
PB
9105 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9106 gen_io_start();
9107
9ee6e8bb
PB
9108 if (env->thumb) {
9109 disas_thumb_insn(env, dc);
9110 if (dc->condexec_mask) {
9111 dc->condexec_cond = (dc->condexec_cond & 0xe)
9112 | ((dc->condexec_mask >> 4) & 1);
9113 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9114 if (dc->condexec_mask == 0) {
9115 dc->condexec_cond = 0;
9116 }
9117 }
9118 } else {
9119 disas_arm_insn(env, dc);
9120 }
b26eefb6
PB
9121 if (num_temps) {
9122 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
9123 num_temps = 0;
9124 }
e50e6a20
FB
9125
9126 if (dc->condjmp && !dc->is_jmp) {
9127 gen_set_label(dc->condlabel);
9128 dc->condjmp = 0;
9129 }
aaf2d97d 9130 /* Translation stops when a conditional branch is encountered.
e50e6a20 9131 * Otherwise the subsequent code could get translated several times.
b5ff1b31 9132 * Also stop translation when a page boundary is reached. This
bf20dc07 9133 * ensures prefetch aborts occur at the right place. */
2e70f6ef 9134 num_insns ++;
1fddef4b
FB
9135 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9136 !env->singlestep_enabled &&
1b530a6d 9137 !singlestep &&
2e70f6ef
PB
9138 dc->pc < next_page_start &&
9139 num_insns < max_insns);
9140
9141 if (tb->cflags & CF_LAST_IO) {
9142 if (dc->condjmp) {
9143 /* FIXME: This can theoretically happen with self-modifying
9144 code. */
9145 cpu_abort(env, "IO on conditional branch instruction");
9146 }
9147 gen_io_end();
9148 }
9ee6e8bb 9149
b5ff1b31 9150 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
9151 instruction was a conditional branch or trap, and the PC has
9152 already been written. */
551bd27f 9153 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 9154 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 9155 if (dc->condjmp) {
9ee6e8bb
PB
9156 gen_set_condexec(dc);
9157 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 9158 gen_exception(EXCP_SWI);
9ee6e8bb 9159 } else {
d9ba4830 9160 gen_exception(EXCP_DEBUG);
9ee6e8bb 9161 }
e50e6a20
FB
9162 gen_set_label(dc->condlabel);
9163 }
9164 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 9165 gen_set_pc_im(dc->pc);
e50e6a20 9166 dc->condjmp = 0;
8aaca4c0 9167 }
9ee6e8bb
PB
9168 gen_set_condexec(dc);
9169 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 9170 gen_exception(EXCP_SWI);
9ee6e8bb
PB
9171 } else {
9172 /* FIXME: Single stepping a WFI insn will not halt
9173 the CPU. */
d9ba4830 9174 gen_exception(EXCP_DEBUG);
9ee6e8bb 9175 }
8aaca4c0 9176 } else {
9ee6e8bb
PB
9177 /* While branches must always occur at the end of an IT block,
9178 there are a few other things that can cause us to terminate
9179 the TB in the middel of an IT block:
9180 - Exception generating instructions (bkpt, swi, undefined).
9181 - Page boundaries.
9182 - Hardware watchpoints.
9183 Hardware breakpoints have already been handled and skip this code.
9184 */
9185 gen_set_condexec(dc);
8aaca4c0 9186 switch(dc->is_jmp) {
8aaca4c0 9187 case DISAS_NEXT:
6e256c93 9188 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
9189 break;
9190 default:
9191 case DISAS_JUMP:
9192 case DISAS_UPDATE:
9193 /* indicate that the hash table must be used to find the next TB */
57fec1fe 9194 tcg_gen_exit_tb(0);
8aaca4c0
FB
9195 break;
9196 case DISAS_TB_JUMP:
9197 /* nothing more to generate */
9198 break;
9ee6e8bb 9199 case DISAS_WFI:
d9ba4830 9200 gen_helper_wfi();
9ee6e8bb
PB
9201 break;
9202 case DISAS_SWI:
d9ba4830 9203 gen_exception(EXCP_SWI);
9ee6e8bb 9204 break;
8aaca4c0 9205 }
e50e6a20
FB
9206 if (dc->condjmp) {
9207 gen_set_label(dc->condlabel);
9ee6e8bb 9208 gen_set_condexec(dc);
6e256c93 9209 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
9210 dc->condjmp = 0;
9211 }
2c0262af 9212 }
2e70f6ef 9213
9ee6e8bb 9214done_generating:
2e70f6ef 9215 gen_icount_end(tb, num_insns);
2c0262af
FB
9216 *gen_opc_ptr = INDEX_op_end;
9217
9218#ifdef DEBUG_DISAS
8fec2b8c 9219 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
9220 qemu_log("----------------\n");
9221 qemu_log("IN: %s\n", lookup_symbol(pc_start));
9222 log_target_disas(pc_start, dc->pc - pc_start, env->thumb);
9223 qemu_log("\n");
2c0262af
FB
9224 }
9225#endif
b5ff1b31
FB
9226 if (search_pc) {
9227 j = gen_opc_ptr - gen_opc_buf;
9228 lj++;
9229 while (lj <= j)
9230 gen_opc_instr_start[lj++] = 0;
b5ff1b31 9231 } else {
2c0262af 9232 tb->size = dc->pc - pc_start;
2e70f6ef 9233 tb->icount = num_insns;
b5ff1b31 9234 }
2c0262af
FB
9235}
9236
2cfc5f17 9237void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2c0262af 9238{
2cfc5f17 9239 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
9240}
9241
2cfc5f17 9242void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2c0262af 9243{
2cfc5f17 9244 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
9245}
9246
b5ff1b31
FB
9247static const char *cpu_mode_names[16] = {
9248 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9249 "???", "???", "???", "und", "???", "???", "???", "sys"
9250};
9ee6e8bb 9251
5fafdf24 9252void cpu_dump_state(CPUState *env, FILE *f,
7fe48483
FB
9253 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
9254 int flags)
2c0262af
FB
9255{
9256 int i;
06e80fc9 9257#if 0
bc380d17 9258 union {
b7bcbe95
FB
9259 uint32_t i;
9260 float s;
9261 } s0, s1;
9262 CPU_DoubleU d;
a94a6abf
PB
9263 /* ??? This assumes float64 and double have the same layout.
9264 Oh well, it's only debug dumps. */
9265 union {
9266 float64 f64;
9267 double d;
9268 } d0;
06e80fc9 9269#endif
b5ff1b31 9270 uint32_t psr;
2c0262af
FB
9271
9272 for(i=0;i<16;i++) {
7fe48483 9273 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 9274 if ((i % 4) == 3)
7fe48483 9275 cpu_fprintf(f, "\n");
2c0262af 9276 else
7fe48483 9277 cpu_fprintf(f, " ");
2c0262af 9278 }
b5ff1b31 9279 psr = cpsr_read(env);
687fa640
TS
9280 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9281 psr,
b5ff1b31
FB
9282 psr & (1 << 31) ? 'N' : '-',
9283 psr & (1 << 30) ? 'Z' : '-',
9284 psr & (1 << 29) ? 'C' : '-',
9285 psr & (1 << 28) ? 'V' : '-',
5fafdf24 9286 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 9287 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 9288
5e3f878a 9289#if 0
b7bcbe95 9290 for (i = 0; i < 16; i++) {
8e96005d
FB
9291 d.d = env->vfp.regs[i];
9292 s0.i = d.l.lower;
9293 s1.i = d.l.upper;
a94a6abf
PB
9294 d0.f64 = d.d;
9295 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 9296 i * 2, (int)s0.i, s0.s,
a94a6abf 9297 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 9298 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 9299 d0.d);
b7bcbe95 9300 }
40f137e1 9301 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
5e3f878a 9302#endif
2c0262af 9303}
a6b025d3 9304
d2856f1a
AJ
9305void gen_pc_load(CPUState *env, TranslationBlock *tb,
9306 unsigned long searched_pc, int pc_pos, void *puc)
9307{
9308 env->regs[15] = gen_opc_pc[pc_pos];
9309}