]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/translate.c
target-arm: convert disas_neon_ls_insn not to use cpu_T
[mirror_qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
28#include "exec-all.h"
29#include "disas.h"
57fec1fe 30#include "tcg-op.h"
79383c9c 31#include "qemu-log.h"
1497c961 32
a7812ae4 33#include "helpers.h"
1497c961 34#define GEN_HELPER 1
b26eefb6 35#include "helpers.h"
2c0262af 36
9ee6e8bb
PB
37#define ENABLE_ARCH_5J 0
38#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 42
86753403 43#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 44
2c0262af
FB
45/* internal defines */
46typedef struct DisasContext {
0fa85d43 47 target_ulong pc;
2c0262af 48 int is_jmp;
e50e6a20
FB
49 /* Nonzero if this instruction has been conditionally skipped. */
50 int condjmp;
51 /* The label that will be jumped to when the instruction is skipped. */
52 int condlabel;
9ee6e8bb
PB
53 /* Thumb-2 condtional execution bits. */
54 int condexec_mask;
55 int condexec_cond;
2c0262af 56 struct TranslationBlock *tb;
8aaca4c0 57 int singlestep_enabled;
5899f386 58 int thumb;
b5ff1b31
FB
59#if !defined(CONFIG_USER_ONLY)
60 int user;
61#endif
2c0262af
FB
62} DisasContext;
63
b5ff1b31
FB
64#if defined(CONFIG_USER_ONLY)
65#define IS_USER(s) 1
66#else
67#define IS_USER(s) (s->user)
68#endif
69
9ee6e8bb
PB
70/* These instructions trap after executing, so defer them until after the
71 conditional executions state has been updated. */
72#define DISAS_WFI 4
73#define DISAS_SWI 5
2c0262af 74
a7812ae4 75static TCGv_ptr cpu_env;
ad69471c 76/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 77static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 78static TCGv_i32 cpu_R[16];
ad69471c 79
b26eefb6 80/* FIXME: These should be removed. */
8f8e3aa4 81static TCGv cpu_T[2];
a7812ae4
PB
82static TCGv cpu_F0s, cpu_F1s;
83static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 84
2e70f6ef
PB
85#define ICOUNT_TEMP cpu_T[0]
86#include "gen-icount.h"
87
155c3eac
FN
88static const char *regnames[] =
89 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
90 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
91
b26eefb6
PB
92/* initialize TCG globals. */
93void arm_translate_init(void)
94{
155c3eac
FN
95 int i;
96
a7812ae4
PB
97 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
98
99 cpu_T[0] = tcg_global_reg_new_i32(TCG_AREG1, "T0");
100 cpu_T[1] = tcg_global_reg_new_i32(TCG_AREG2, "T1");
b26eefb6 101
155c3eac
FN
102 for (i = 0; i < 16; i++) {
103 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
104 offsetof(CPUState, regs[i]),
105 regnames[i]);
106 }
107
a7812ae4
PB
108#define GEN_HELPER 2
109#include "helpers.h"
b26eefb6
PB
110}
111
b26eefb6 112static int num_temps;
b26eefb6
PB
113
114/* Allocate a temporary variable. */
a7812ae4 115static TCGv_i32 new_tmp(void)
b26eefb6 116{
12edd4f2
FN
117 num_temps++;
118 return tcg_temp_new_i32();
b26eefb6
PB
119}
120
121/* Release a temporary variable. */
122static void dead_tmp(TCGv tmp)
123{
12edd4f2 124 tcg_temp_free(tmp);
b26eefb6 125 num_temps--;
b26eefb6
PB
126}
127
d9ba4830
PB
128static inline TCGv load_cpu_offset(int offset)
129{
130 TCGv tmp = new_tmp();
131 tcg_gen_ld_i32(tmp, cpu_env, offset);
132 return tmp;
133}
134
135#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
136
137static inline void store_cpu_offset(TCGv var, int offset)
138{
139 tcg_gen_st_i32(var, cpu_env, offset);
140 dead_tmp(var);
141}
142
143#define store_cpu_field(var, name) \
144 store_cpu_offset(var, offsetof(CPUState, name))
145
b26eefb6
PB
146/* Set a variable to the value of a CPU register. */
147static void load_reg_var(DisasContext *s, TCGv var, int reg)
148{
149 if (reg == 15) {
150 uint32_t addr;
151 /* normaly, since we updated PC, we need only to add one insn */
152 if (s->thumb)
153 addr = (long)s->pc + 2;
154 else
155 addr = (long)s->pc + 4;
156 tcg_gen_movi_i32(var, addr);
157 } else {
155c3eac 158 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
159 }
160}
161
162/* Create a new temporary and set it to the value of a CPU register. */
163static inline TCGv load_reg(DisasContext *s, int reg)
164{
165 TCGv tmp = new_tmp();
166 load_reg_var(s, tmp, reg);
167 return tmp;
168}
169
170/* Set a CPU register. The source must be a temporary and will be
171 marked as dead. */
172static void store_reg(DisasContext *s, int reg, TCGv var)
173{
174 if (reg == 15) {
175 tcg_gen_andi_i32(var, var, ~1);
176 s->is_jmp = DISAS_JUMP;
177 }
155c3eac 178 tcg_gen_mov_i32(cpu_R[reg], var);
b26eefb6
PB
179 dead_tmp(var);
180}
181
182
183/* Basic operations. */
184#define gen_op_movl_T0_T1() tcg_gen_mov_i32(cpu_T[0], cpu_T[1])
b26eefb6
PB
185#define gen_op_movl_T0_im(im) tcg_gen_movi_i32(cpu_T[0], im)
186#define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im)
b26eefb6
PB
187
188#define gen_op_addl_T1_im(im) tcg_gen_addi_i32(cpu_T[1], cpu_T[1], im)
189#define gen_op_addl_T0_T1() tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1])
b26eefb6
PB
190
191#define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1])
b26eefb6 192#define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1])
b26eefb6 193#define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1])
b26eefb6 194
b26eefb6
PB
195#define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im)
196#define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im)
b26eefb6
PB
197
198/* Value extensions. */
86831435
PB
199#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
200#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
201#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
202#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
203
1497c961
PB
204#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
205#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 206
b26eefb6 207
d9ba4830
PB
208#define gen_set_cpsr(var, mask) gen_helper_cpsr_write(var, tcg_const_i32(mask))
209/* Set NZCV flags from the high 4 bits of var. */
210#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
211
212static void gen_exception(int excp)
213{
214 TCGv tmp = new_tmp();
215 tcg_gen_movi_i32(tmp, excp);
216 gen_helper_exception(tmp);
217 dead_tmp(tmp);
218}
219
3670669c
PB
220static void gen_smul_dual(TCGv a, TCGv b)
221{
222 TCGv tmp1 = new_tmp();
223 TCGv tmp2 = new_tmp();
22478e79
AZ
224 tcg_gen_ext16s_i32(tmp1, a);
225 tcg_gen_ext16s_i32(tmp2, b);
3670669c
PB
226 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
227 dead_tmp(tmp2);
228 tcg_gen_sari_i32(a, a, 16);
229 tcg_gen_sari_i32(b, b, 16);
230 tcg_gen_mul_i32(b, b, a);
231 tcg_gen_mov_i32(a, tmp1);
232 dead_tmp(tmp1);
233}
234
235/* Byteswap each halfword. */
236static void gen_rev16(TCGv var)
237{
238 TCGv tmp = new_tmp();
239 tcg_gen_shri_i32(tmp, var, 8);
240 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
241 tcg_gen_shli_i32(var, var, 8);
242 tcg_gen_andi_i32(var, var, 0xff00ff00);
243 tcg_gen_or_i32(var, var, tmp);
244 dead_tmp(tmp);
245}
246
247/* Byteswap low halfword and sign extend. */
248static void gen_revsh(TCGv var)
249{
250 TCGv tmp = new_tmp();
251 tcg_gen_shri_i32(tmp, var, 8);
252 tcg_gen_andi_i32(tmp, tmp, 0x00ff);
253 tcg_gen_shli_i32(var, var, 8);
254 tcg_gen_ext8s_i32(var, var);
255 tcg_gen_or_i32(var, var, tmp);
256 dead_tmp(tmp);
257}
258
259/* Unsigned bitfield extract. */
260static void gen_ubfx(TCGv var, int shift, uint32_t mask)
261{
262 if (shift)
263 tcg_gen_shri_i32(var, var, shift);
264 tcg_gen_andi_i32(var, var, mask);
265}
266
267/* Signed bitfield extract. */
268static void gen_sbfx(TCGv var, int shift, int width)
269{
270 uint32_t signbit;
271
272 if (shift)
273 tcg_gen_sari_i32(var, var, shift);
274 if (shift + width < 32) {
275 signbit = 1u << (width - 1);
276 tcg_gen_andi_i32(var, var, (1u << width) - 1);
277 tcg_gen_xori_i32(var, var, signbit);
278 tcg_gen_subi_i32(var, var, signbit);
279 }
280}
281
282/* Bitfield insertion. Insert val into base. Clobbers base and val. */
283static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
284{
3670669c 285 tcg_gen_andi_i32(val, val, mask);
8f8e3aa4
PB
286 tcg_gen_shli_i32(val, val, shift);
287 tcg_gen_andi_i32(base, base, ~(mask << shift));
3670669c
PB
288 tcg_gen_or_i32(dest, base, val);
289}
290
d9ba4830
PB
291/* Round the top 32 bits of a 64-bit value. */
292static void gen_roundqd(TCGv a, TCGv b)
3670669c 293{
d9ba4830
PB
294 tcg_gen_shri_i32(a, a, 31);
295 tcg_gen_add_i32(a, a, b);
3670669c
PB
296}
297
8f01245e
PB
298/* FIXME: Most targets have native widening multiplication.
299 It would be good to use that instead of a full wide multiply. */
5e3f878a 300/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 301static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 302{
a7812ae4
PB
303 TCGv_i64 tmp1 = tcg_temp_new_i64();
304 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
305
306 tcg_gen_extu_i32_i64(tmp1, a);
307 dead_tmp(a);
308 tcg_gen_extu_i32_i64(tmp2, b);
309 dead_tmp(b);
310 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
311 return tmp1;
312}
313
a7812ae4 314static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 315{
a7812ae4
PB
316 TCGv_i64 tmp1 = tcg_temp_new_i64();
317 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
318
319 tcg_gen_ext_i32_i64(tmp1, a);
320 dead_tmp(a);
321 tcg_gen_ext_i32_i64(tmp2, b);
322 dead_tmp(b);
323 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
324 return tmp1;
325}
326
8f01245e 327/* Unsigned 32x32->64 multiply. */
396e467c 328static void gen_mull(TCGv a, TCGv b)
8f01245e 329{
a7812ae4
PB
330 TCGv_i64 tmp1 = tcg_temp_new_i64();
331 TCGv_i64 tmp2 = tcg_temp_new_i64();
8f01245e 332
396e467c
FN
333 tcg_gen_extu_i32_i64(tmp1, a);
334 tcg_gen_extu_i32_i64(tmp2, b);
8f01245e 335 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
396e467c 336 tcg_gen_trunc_i64_i32(a, tmp1);
8f01245e 337 tcg_gen_shri_i64(tmp1, tmp1, 32);
396e467c 338 tcg_gen_trunc_i64_i32(b, tmp1);
8f01245e
PB
339}
340
341/* Signed 32x32->64 multiply. */
d9ba4830 342static void gen_imull(TCGv a, TCGv b)
8f01245e 343{
a7812ae4
PB
344 TCGv_i64 tmp1 = tcg_temp_new_i64();
345 TCGv_i64 tmp2 = tcg_temp_new_i64();
8f01245e 346
d9ba4830
PB
347 tcg_gen_ext_i32_i64(tmp1, a);
348 tcg_gen_ext_i32_i64(tmp2, b);
8f01245e 349 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
d9ba4830 350 tcg_gen_trunc_i64_i32(a, tmp1);
8f01245e 351 tcg_gen_shri_i64(tmp1, tmp1, 32);
d9ba4830
PB
352 tcg_gen_trunc_i64_i32(b, tmp1);
353}
d9ba4830 354
8f01245e
PB
355/* Swap low and high halfwords. */
356static void gen_swap_half(TCGv var)
357{
358 TCGv tmp = new_tmp();
359 tcg_gen_shri_i32(tmp, var, 16);
360 tcg_gen_shli_i32(var, var, 16);
361 tcg_gen_or_i32(var, var, tmp);
3670669c 362 dead_tmp(tmp);
8f01245e
PB
363}
364
b26eefb6
PB
365/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
366 tmp = (t0 ^ t1) & 0x8000;
367 t0 &= ~0x8000;
368 t1 &= ~0x8000;
369 t0 = (t0 + t1) ^ tmp;
370 */
371
372static void gen_add16(TCGv t0, TCGv t1)
373{
374 TCGv tmp = new_tmp();
375 tcg_gen_xor_i32(tmp, t0, t1);
376 tcg_gen_andi_i32(tmp, tmp, 0x8000);
377 tcg_gen_andi_i32(t0, t0, ~0x8000);
378 tcg_gen_andi_i32(t1, t1, ~0x8000);
379 tcg_gen_add_i32(t0, t0, t1);
380 tcg_gen_xor_i32(t0, t0, tmp);
381 dead_tmp(tmp);
382 dead_tmp(t1);
383}
384
9a119ff6
PB
385#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
386
b26eefb6
PB
387/* Set CF to the top bit of var. */
388static void gen_set_CF_bit31(TCGv var)
389{
390 TCGv tmp = new_tmp();
391 tcg_gen_shri_i32(tmp, var, 31);
4cc633c3 392 gen_set_CF(tmp);
b26eefb6
PB
393 dead_tmp(tmp);
394}
395
396/* Set N and Z flags from var. */
397static inline void gen_logic_CC(TCGv var)
398{
6fbe23d5
PB
399 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
400 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
b26eefb6
PB
401}
402
403/* T0 += T1 + CF. */
396e467c 404static void gen_adc(TCGv t0, TCGv t1)
b26eefb6 405{
d9ba4830 406 TCGv tmp;
396e467c 407 tcg_gen_add_i32(t0, t0, t1);
d9ba4830 408 tmp = load_cpu_field(CF);
396e467c 409 tcg_gen_add_i32(t0, t0, tmp);
b26eefb6
PB
410 dead_tmp(tmp);
411}
412
e9bb4aa9
JR
413/* dest = T0 + T1 + CF. */
414static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
415{
416 TCGv tmp;
417 tcg_gen_add_i32(dest, t0, t1);
418 tmp = load_cpu_field(CF);
419 tcg_gen_add_i32(dest, dest, tmp);
420 dead_tmp(tmp);
421}
422
3670669c
PB
423/* dest = T0 - T1 + CF - 1. */
424static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
425{
d9ba4830 426 TCGv tmp;
3670669c 427 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 428 tmp = load_cpu_field(CF);
3670669c
PB
429 tcg_gen_add_i32(dest, dest, tmp);
430 tcg_gen_subi_i32(dest, dest, 1);
431 dead_tmp(tmp);
432}
433
b26eefb6
PB
434/* T0 &= ~T1. Clobbers T1. */
435/* FIXME: Implement bic natively. */
8f8e3aa4
PB
436static inline void tcg_gen_bic_i32(TCGv dest, TCGv t0, TCGv t1)
437{
438 TCGv tmp = new_tmp();
439 tcg_gen_not_i32(tmp, t1);
440 tcg_gen_and_i32(dest, t0, tmp);
441 dead_tmp(tmp);
442}
b26eefb6
PB
443static inline void gen_op_bicl_T0_T1(void)
444{
445 gen_op_notl_T1();
446 gen_op_andl_T0_T1();
447}
448
ad69471c
PB
449/* FIXME: Implement this natively. */
450#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
451
b26eefb6
PB
452/* FIXME: Implement this natively. */
453static void tcg_gen_rori_i32(TCGv t0, TCGv t1, int i)
454{
455 TCGv tmp;
456
457 if (i == 0)
458 return;
459
460 tmp = new_tmp();
461 tcg_gen_shri_i32(tmp, t1, i);
462 tcg_gen_shli_i32(t1, t1, 32 - i);
463 tcg_gen_or_i32(t0, t1, tmp);
464 dead_tmp(tmp);
465}
466
9a119ff6 467static void shifter_out_im(TCGv var, int shift)
b26eefb6 468{
9a119ff6
PB
469 TCGv tmp = new_tmp();
470 if (shift == 0) {
471 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 472 } else {
9a119ff6 473 tcg_gen_shri_i32(tmp, var, shift);
4cc633c3 474 if (shift != 31)
9a119ff6
PB
475 tcg_gen_andi_i32(tmp, tmp, 1);
476 }
477 gen_set_CF(tmp);
478 dead_tmp(tmp);
479}
b26eefb6 480
9a119ff6
PB
481/* Shift by immediate. Includes special handling for shift == 0. */
482static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
483{
484 switch (shiftop) {
485 case 0: /* LSL */
486 if (shift != 0) {
487 if (flags)
488 shifter_out_im(var, 32 - shift);
489 tcg_gen_shli_i32(var, var, shift);
490 }
491 break;
492 case 1: /* LSR */
493 if (shift == 0) {
494 if (flags) {
495 tcg_gen_shri_i32(var, var, 31);
496 gen_set_CF(var);
497 }
498 tcg_gen_movi_i32(var, 0);
499 } else {
500 if (flags)
501 shifter_out_im(var, shift - 1);
502 tcg_gen_shri_i32(var, var, shift);
503 }
504 break;
505 case 2: /* ASR */
506 if (shift == 0)
507 shift = 32;
508 if (flags)
509 shifter_out_im(var, shift - 1);
510 if (shift == 32)
511 shift = 31;
512 tcg_gen_sari_i32(var, var, shift);
513 break;
514 case 3: /* ROR/RRX */
515 if (shift != 0) {
516 if (flags)
517 shifter_out_im(var, shift - 1);
518 tcg_gen_rori_i32(var, var, shift); break;
519 } else {
d9ba4830 520 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
521 if (flags)
522 shifter_out_im(var, 0);
523 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
524 tcg_gen_shli_i32(tmp, tmp, 31);
525 tcg_gen_or_i32(var, var, tmp);
526 dead_tmp(tmp);
b26eefb6
PB
527 }
528 }
529};
530
8984bd2e
PB
531static inline void gen_arm_shift_reg(TCGv var, int shiftop,
532 TCGv shift, int flags)
533{
534 if (flags) {
535 switch (shiftop) {
536 case 0: gen_helper_shl_cc(var, var, shift); break;
537 case 1: gen_helper_shr_cc(var, var, shift); break;
538 case 2: gen_helper_sar_cc(var, var, shift); break;
539 case 3: gen_helper_ror_cc(var, var, shift); break;
540 }
541 } else {
542 switch (shiftop) {
543 case 0: gen_helper_shl(var, var, shift); break;
544 case 1: gen_helper_shr(var, var, shift); break;
545 case 2: gen_helper_sar(var, var, shift); break;
546 case 3: gen_helper_ror(var, var, shift); break;
547 }
548 }
549 dead_tmp(shift);
550}
551
6ddbc6e4
PB
552#define PAS_OP(pfx) \
553 switch (op2) { \
554 case 0: gen_pas_helper(glue(pfx,add16)); break; \
555 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
556 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
557 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
558 case 4: gen_pas_helper(glue(pfx,add8)); break; \
559 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
560 }
d9ba4830 561static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 562{
a7812ae4 563 TCGv_ptr tmp;
6ddbc6e4
PB
564
565 switch (op1) {
566#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
567 case 1:
a7812ae4 568 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
569 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
570 PAS_OP(s)
571 break;
572 case 5:
a7812ae4 573 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
574 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
575 PAS_OP(u)
576 break;
577#undef gen_pas_helper
578#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
579 case 2:
580 PAS_OP(q);
581 break;
582 case 3:
583 PAS_OP(sh);
584 break;
585 case 6:
586 PAS_OP(uq);
587 break;
588 case 7:
589 PAS_OP(uh);
590 break;
591#undef gen_pas_helper
592 }
593}
9ee6e8bb
PB
594#undef PAS_OP
595
6ddbc6e4
PB
596/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
597#define PAS_OP(pfx) \
598 switch (op2) { \
599 case 0: gen_pas_helper(glue(pfx,add8)); break; \
600 case 1: gen_pas_helper(glue(pfx,add16)); break; \
601 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
602 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
603 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
604 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
605 }
d9ba4830 606static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 607{
a7812ae4 608 TCGv_ptr tmp;
6ddbc6e4
PB
609
610 switch (op1) {
611#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
612 case 0:
a7812ae4 613 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
614 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
615 PAS_OP(s)
616 break;
617 case 4:
a7812ae4 618 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
619 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
620 PAS_OP(u)
621 break;
622#undef gen_pas_helper
623#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
624 case 1:
625 PAS_OP(q);
626 break;
627 case 2:
628 PAS_OP(sh);
629 break;
630 case 5:
631 PAS_OP(uq);
632 break;
633 case 6:
634 PAS_OP(uh);
635 break;
636#undef gen_pas_helper
637 }
638}
9ee6e8bb
PB
639#undef PAS_OP
640
d9ba4830
PB
641static void gen_test_cc(int cc, int label)
642{
643 TCGv tmp;
644 TCGv tmp2;
d9ba4830
PB
645 int inv;
646
d9ba4830
PB
647 switch (cc) {
648 case 0: /* eq: Z */
6fbe23d5 649 tmp = load_cpu_field(ZF);
cb63669a 650 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
651 break;
652 case 1: /* ne: !Z */
6fbe23d5 653 tmp = load_cpu_field(ZF);
cb63669a 654 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
655 break;
656 case 2: /* cs: C */
657 tmp = load_cpu_field(CF);
cb63669a 658 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
659 break;
660 case 3: /* cc: !C */
661 tmp = load_cpu_field(CF);
cb63669a 662 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
663 break;
664 case 4: /* mi: N */
6fbe23d5 665 tmp = load_cpu_field(NF);
cb63669a 666 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
667 break;
668 case 5: /* pl: !N */
6fbe23d5 669 tmp = load_cpu_field(NF);
cb63669a 670 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
671 break;
672 case 6: /* vs: V */
673 tmp = load_cpu_field(VF);
cb63669a 674 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
675 break;
676 case 7: /* vc: !V */
677 tmp = load_cpu_field(VF);
cb63669a 678 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
679 break;
680 case 8: /* hi: C && !Z */
681 inv = gen_new_label();
682 tmp = load_cpu_field(CF);
cb63669a 683 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830 684 dead_tmp(tmp);
6fbe23d5 685 tmp = load_cpu_field(ZF);
cb63669a 686 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
687 gen_set_label(inv);
688 break;
689 case 9: /* ls: !C || Z */
690 tmp = load_cpu_field(CF);
cb63669a 691 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830 692 dead_tmp(tmp);
6fbe23d5 693 tmp = load_cpu_field(ZF);
cb63669a 694 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
695 break;
696 case 10: /* ge: N == V -> N ^ V == 0 */
697 tmp = load_cpu_field(VF);
6fbe23d5 698 tmp2 = load_cpu_field(NF);
d9ba4830
PB
699 tcg_gen_xor_i32(tmp, tmp, tmp2);
700 dead_tmp(tmp2);
cb63669a 701 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
702 break;
703 case 11: /* lt: N != V -> N ^ V != 0 */
704 tmp = load_cpu_field(VF);
6fbe23d5 705 tmp2 = load_cpu_field(NF);
d9ba4830
PB
706 tcg_gen_xor_i32(tmp, tmp, tmp2);
707 dead_tmp(tmp2);
cb63669a 708 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
709 break;
710 case 12: /* gt: !Z && N == V */
711 inv = gen_new_label();
6fbe23d5 712 tmp = load_cpu_field(ZF);
cb63669a 713 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830
PB
714 dead_tmp(tmp);
715 tmp = load_cpu_field(VF);
6fbe23d5 716 tmp2 = load_cpu_field(NF);
d9ba4830
PB
717 tcg_gen_xor_i32(tmp, tmp, tmp2);
718 dead_tmp(tmp2);
cb63669a 719 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
720 gen_set_label(inv);
721 break;
722 case 13: /* le: Z || N != V */
6fbe23d5 723 tmp = load_cpu_field(ZF);
cb63669a 724 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
725 dead_tmp(tmp);
726 tmp = load_cpu_field(VF);
6fbe23d5 727 tmp2 = load_cpu_field(NF);
d9ba4830
PB
728 tcg_gen_xor_i32(tmp, tmp, tmp2);
729 dead_tmp(tmp2);
cb63669a 730 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
731 break;
732 default:
733 fprintf(stderr, "Bad condition code 0x%x\n", cc);
734 abort();
735 }
736 dead_tmp(tmp);
737}
2c0262af 738
b1d8e52e 739static const uint8_t table_logic_cc[16] = {
2c0262af
FB
740 1, /* and */
741 1, /* xor */
742 0, /* sub */
743 0, /* rsb */
744 0, /* add */
745 0, /* adc */
746 0, /* sbc */
747 0, /* rsc */
748 1, /* andl */
749 1, /* xorl */
750 0, /* cmp */
751 0, /* cmn */
752 1, /* orr */
753 1, /* mov */
754 1, /* bic */
755 1, /* mvn */
756};
3b46e624 757
d9ba4830
PB
758/* Set PC and Thumb state from an immediate address. */
759static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 760{
b26eefb6 761 TCGv tmp;
99c475ab 762
b26eefb6 763 s->is_jmp = DISAS_UPDATE;
d9ba4830 764 if (s->thumb != (addr & 1)) {
155c3eac 765 tmp = new_tmp();
d9ba4830
PB
766 tcg_gen_movi_i32(tmp, addr & 1);
767 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
155c3eac 768 dead_tmp(tmp);
d9ba4830 769 }
155c3eac 770 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
771}
772
773/* Set PC and Thumb state from var. var is marked as dead. */
774static inline void gen_bx(DisasContext *s, TCGv var)
775{
d9ba4830 776 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
777 tcg_gen_andi_i32(cpu_R[15], var, ~1);
778 tcg_gen_andi_i32(var, var, 1);
779 store_cpu_field(var, thumb);
d9ba4830
PB
780}
781
21aeb343
JR
782/* Variant of store_reg which uses branch&exchange logic when storing
783 to r15 in ARM architecture v7 and above. The source must be a temporary
784 and will be marked as dead. */
785static inline void store_reg_bx(CPUState *env, DisasContext *s,
786 int reg, TCGv var)
787{
788 if (reg == 15 && ENABLE_ARCH_7) {
789 gen_bx(s, var);
790 } else {
791 store_reg(s, reg, var);
792 }
793}
794
b0109805
PB
795static inline TCGv gen_ld8s(TCGv addr, int index)
796{
797 TCGv tmp = new_tmp();
798 tcg_gen_qemu_ld8s(tmp, addr, index);
799 return tmp;
800}
801static inline TCGv gen_ld8u(TCGv addr, int index)
802{
803 TCGv tmp = new_tmp();
804 tcg_gen_qemu_ld8u(tmp, addr, index);
805 return tmp;
806}
807static inline TCGv gen_ld16s(TCGv addr, int index)
808{
809 TCGv tmp = new_tmp();
810 tcg_gen_qemu_ld16s(tmp, addr, index);
811 return tmp;
812}
813static inline TCGv gen_ld16u(TCGv addr, int index)
814{
815 TCGv tmp = new_tmp();
816 tcg_gen_qemu_ld16u(tmp, addr, index);
817 return tmp;
818}
819static inline TCGv gen_ld32(TCGv addr, int index)
820{
821 TCGv tmp = new_tmp();
822 tcg_gen_qemu_ld32u(tmp, addr, index);
823 return tmp;
824}
825static inline void gen_st8(TCGv val, TCGv addr, int index)
826{
827 tcg_gen_qemu_st8(val, addr, index);
828 dead_tmp(val);
829}
830static inline void gen_st16(TCGv val, TCGv addr, int index)
831{
832 tcg_gen_qemu_st16(val, addr, index);
833 dead_tmp(val);
834}
835static inline void gen_st32(TCGv val, TCGv addr, int index)
836{
837 tcg_gen_qemu_st32(val, addr, index);
838 dead_tmp(val);
839}
b5ff1b31 840
2c0262af
FB
841static inline void gen_movl_T0_reg(DisasContext *s, int reg)
842{
b26eefb6 843 load_reg_var(s, cpu_T[0], reg);
2c0262af
FB
844}
845
846static inline void gen_movl_T1_reg(DisasContext *s, int reg)
847{
b26eefb6 848 load_reg_var(s, cpu_T[1], reg);
2c0262af
FB
849}
850
5e3f878a
PB
851static inline void gen_set_pc_im(uint32_t val)
852{
155c3eac 853 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
854}
855
2c0262af
FB
856static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
857{
b26eefb6
PB
858 TCGv tmp;
859 if (reg == 15) {
860 tmp = new_tmp();
861 tcg_gen_andi_i32(tmp, cpu_T[t], ~1);
862 } else {
863 tmp = cpu_T[t];
864 }
155c3eac 865 tcg_gen_mov_i32(cpu_R[reg], tmp);
2c0262af 866 if (reg == 15) {
b26eefb6 867 dead_tmp(tmp);
2c0262af
FB
868 s->is_jmp = DISAS_JUMP;
869 }
870}
871
872static inline void gen_movl_reg_T0(DisasContext *s, int reg)
873{
874 gen_movl_reg_TN(s, reg, 0);
875}
876
877static inline void gen_movl_reg_T1(DisasContext *s, int reg)
878{
879 gen_movl_reg_TN(s, reg, 1);
880}
881
b5ff1b31
FB
882/* Force a TB lookup after an instruction that changes the CPU state. */
883static inline void gen_lookup_tb(DisasContext *s)
884{
a6445c52 885 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
886 s->is_jmp = DISAS_UPDATE;
887}
888
b0109805
PB
889static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
890 TCGv var)
2c0262af 891{
1e8d4eec 892 int val, rm, shift, shiftop;
b26eefb6 893 TCGv offset;
2c0262af
FB
894
895 if (!(insn & (1 << 25))) {
896 /* immediate */
897 val = insn & 0xfff;
898 if (!(insn & (1 << 23)))
899 val = -val;
537730b9 900 if (val != 0)
b0109805 901 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
902 } else {
903 /* shift/register */
904 rm = (insn) & 0xf;
905 shift = (insn >> 7) & 0x1f;
1e8d4eec 906 shiftop = (insn >> 5) & 3;
b26eefb6 907 offset = load_reg(s, rm);
9a119ff6 908 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 909 if (!(insn & (1 << 23)))
b0109805 910 tcg_gen_sub_i32(var, var, offset);
2c0262af 911 else
b0109805 912 tcg_gen_add_i32(var, var, offset);
b26eefb6 913 dead_tmp(offset);
2c0262af
FB
914 }
915}
916
191f9a93 917static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 918 int extra, TCGv var)
2c0262af
FB
919{
920 int val, rm;
b26eefb6 921 TCGv offset;
3b46e624 922
2c0262af
FB
923 if (insn & (1 << 22)) {
924 /* immediate */
925 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
926 if (!(insn & (1 << 23)))
927 val = -val;
18acad92 928 val += extra;
537730b9 929 if (val != 0)
b0109805 930 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
931 } else {
932 /* register */
191f9a93 933 if (extra)
b0109805 934 tcg_gen_addi_i32(var, var, extra);
2c0262af 935 rm = (insn) & 0xf;
b26eefb6 936 offset = load_reg(s, rm);
2c0262af 937 if (!(insn & (1 << 23)))
b0109805 938 tcg_gen_sub_i32(var, var, offset);
2c0262af 939 else
b0109805 940 tcg_gen_add_i32(var, var, offset);
b26eefb6 941 dead_tmp(offset);
2c0262af
FB
942 }
943}
944
4373f3ce
PB
945#define VFP_OP2(name) \
946static inline void gen_vfp_##name(int dp) \
947{ \
948 if (dp) \
949 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
950 else \
951 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
b7bcbe95
FB
952}
953
4373f3ce
PB
954VFP_OP2(add)
955VFP_OP2(sub)
956VFP_OP2(mul)
957VFP_OP2(div)
958
959#undef VFP_OP2
960
961static inline void gen_vfp_abs(int dp)
962{
963 if (dp)
964 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
965 else
966 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
967}
968
969static inline void gen_vfp_neg(int dp)
970{
971 if (dp)
972 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
973 else
974 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
975}
976
977static inline void gen_vfp_sqrt(int dp)
978{
979 if (dp)
980 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
981 else
982 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
983}
984
985static inline void gen_vfp_cmp(int dp)
986{
987 if (dp)
988 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
989 else
990 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
991}
992
993static inline void gen_vfp_cmpe(int dp)
994{
995 if (dp)
996 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
997 else
998 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
999}
1000
1001static inline void gen_vfp_F1_ld0(int dp)
1002{
1003 if (dp)
5b340b51 1004 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1005 else
5b340b51 1006 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1007}
1008
1009static inline void gen_vfp_uito(int dp)
1010{
1011 if (dp)
1012 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
1013 else
1014 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
1015}
1016
1017static inline void gen_vfp_sito(int dp)
1018{
1019 if (dp)
66230e0d 1020 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
4373f3ce 1021 else
66230e0d 1022 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
4373f3ce
PB
1023}
1024
1025static inline void gen_vfp_toui(int dp)
1026{
1027 if (dp)
1028 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
1029 else
1030 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
1031}
1032
1033static inline void gen_vfp_touiz(int dp)
1034{
1035 if (dp)
1036 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
1037 else
1038 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
1039}
1040
1041static inline void gen_vfp_tosi(int dp)
1042{
1043 if (dp)
1044 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
1045 else
1046 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
1047}
1048
1049static inline void gen_vfp_tosiz(int dp)
9ee6e8bb
PB
1050{
1051 if (dp)
4373f3ce 1052 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
9ee6e8bb 1053 else
4373f3ce
PB
1054 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
1055}
1056
1057#define VFP_GEN_FIX(name) \
1058static inline void gen_vfp_##name(int dp, int shift) \
1059{ \
1060 if (dp) \
1061 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tcg_const_i32(shift), cpu_env);\
1062 else \
1063 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tcg_const_i32(shift), cpu_env);\
9ee6e8bb 1064}
4373f3ce
PB
1065VFP_GEN_FIX(tosh)
1066VFP_GEN_FIX(tosl)
1067VFP_GEN_FIX(touh)
1068VFP_GEN_FIX(toul)
1069VFP_GEN_FIX(shto)
1070VFP_GEN_FIX(slto)
1071VFP_GEN_FIX(uhto)
1072VFP_GEN_FIX(ulto)
1073#undef VFP_GEN_FIX
9ee6e8bb 1074
b5ff1b31
FB
1075static inline void gen_vfp_ld(DisasContext *s, int dp)
1076{
1077 if (dp)
4373f3ce 1078 tcg_gen_qemu_ld64(cpu_F0d, cpu_T[1], IS_USER(s));
b5ff1b31 1079 else
4373f3ce 1080 tcg_gen_qemu_ld32u(cpu_F0s, cpu_T[1], IS_USER(s));
b5ff1b31
FB
1081}
1082
1083static inline void gen_vfp_st(DisasContext *s, int dp)
1084{
1085 if (dp)
4373f3ce 1086 tcg_gen_qemu_st64(cpu_F0d, cpu_T[1], IS_USER(s));
b5ff1b31 1087 else
4373f3ce 1088 tcg_gen_qemu_st32(cpu_F0s, cpu_T[1], IS_USER(s));
b5ff1b31
FB
1089}
1090
8e96005d
FB
1091static inline long
1092vfp_reg_offset (int dp, int reg)
1093{
1094 if (dp)
1095 return offsetof(CPUARMState, vfp.regs[reg]);
1096 else if (reg & 1) {
1097 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1098 + offsetof(CPU_DoubleU, l.upper);
1099 } else {
1100 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1101 + offsetof(CPU_DoubleU, l.lower);
1102 }
1103}
9ee6e8bb
PB
1104
1105/* Return the offset of a 32-bit piece of a NEON register.
1106 zero is the least significant end of the register. */
1107static inline long
1108neon_reg_offset (int reg, int n)
1109{
1110 int sreg;
1111 sreg = reg * 2 + n;
1112 return vfp_reg_offset(0, sreg);
1113}
1114
8f8e3aa4
PB
1115static TCGv neon_load_reg(int reg, int pass)
1116{
1117 TCGv tmp = new_tmp();
1118 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1119 return tmp;
1120}
1121
1122static void neon_store_reg(int reg, int pass, TCGv var)
1123{
1124 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1125 dead_tmp(var);
1126}
1127
a7812ae4 1128static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1129{
1130 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1131}
1132
a7812ae4 1133static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1134{
1135 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1136}
1137
4373f3ce
PB
1138#define tcg_gen_ld_f32 tcg_gen_ld_i32
1139#define tcg_gen_ld_f64 tcg_gen_ld_i64
1140#define tcg_gen_st_f32 tcg_gen_st_i32
1141#define tcg_gen_st_f64 tcg_gen_st_i64
1142
b7bcbe95
FB
1143static inline void gen_mov_F0_vreg(int dp, int reg)
1144{
1145 if (dp)
4373f3ce 1146 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1147 else
4373f3ce 1148 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1149}
1150
1151static inline void gen_mov_F1_vreg(int dp, int reg)
1152{
1153 if (dp)
4373f3ce 1154 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1155 else
4373f3ce 1156 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1157}
1158
1159static inline void gen_mov_vreg_F0(int dp, int reg)
1160{
1161 if (dp)
4373f3ce 1162 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1163 else
4373f3ce 1164 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1165}
1166
18c9b560
AZ
1167#define ARM_CP_RW_BIT (1 << 20)
1168
a7812ae4 1169static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d
PB
1170{
1171 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1172}
1173
a7812ae4 1174static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d
PB
1175{
1176 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1177}
1178
1179static inline void gen_op_iwmmxt_movl_wCx_T0(int reg)
1180{
1181 tcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1182}
1183
1184static inline void gen_op_iwmmxt_movl_T0_wCx(int reg)
1185{
1186 tcg_gen_ld_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1187}
1188
1189static inline void gen_op_iwmmxt_movl_T1_wCx(int reg)
1190{
1191 tcg_gen_ld_i32(cpu_T[1], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1192}
1193
1194static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1195{
1196 iwmmxt_store_reg(cpu_M0, rn);
1197}
1198
1199static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1200{
1201 iwmmxt_load_reg(cpu_M0, rn);
1202}
1203
1204static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1205{
1206 iwmmxt_load_reg(cpu_V1, rn);
1207 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1208}
1209
1210static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1211{
1212 iwmmxt_load_reg(cpu_V1, rn);
1213 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1214}
1215
1216static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1217{
1218 iwmmxt_load_reg(cpu_V1, rn);
1219 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1220}
1221
1222#define IWMMXT_OP(name) \
1223static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1224{ \
1225 iwmmxt_load_reg(cpu_V1, rn); \
1226 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1227}
1228
1229#define IWMMXT_OP_ENV(name) \
1230static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1231{ \
1232 iwmmxt_load_reg(cpu_V1, rn); \
1233 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1234}
1235
1236#define IWMMXT_OP_ENV_SIZE(name) \
1237IWMMXT_OP_ENV(name##b) \
1238IWMMXT_OP_ENV(name##w) \
1239IWMMXT_OP_ENV(name##l)
1240
1241#define IWMMXT_OP_ENV1(name) \
1242static inline void gen_op_iwmmxt_##name##_M0(void) \
1243{ \
1244 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1245}
1246
1247IWMMXT_OP(maddsq)
1248IWMMXT_OP(madduq)
1249IWMMXT_OP(sadb)
1250IWMMXT_OP(sadw)
1251IWMMXT_OP(mulslw)
1252IWMMXT_OP(mulshw)
1253IWMMXT_OP(mululw)
1254IWMMXT_OP(muluhw)
1255IWMMXT_OP(macsw)
1256IWMMXT_OP(macuw)
1257
1258IWMMXT_OP_ENV_SIZE(unpackl)
1259IWMMXT_OP_ENV_SIZE(unpackh)
1260
1261IWMMXT_OP_ENV1(unpacklub)
1262IWMMXT_OP_ENV1(unpackluw)
1263IWMMXT_OP_ENV1(unpacklul)
1264IWMMXT_OP_ENV1(unpackhub)
1265IWMMXT_OP_ENV1(unpackhuw)
1266IWMMXT_OP_ENV1(unpackhul)
1267IWMMXT_OP_ENV1(unpacklsb)
1268IWMMXT_OP_ENV1(unpacklsw)
1269IWMMXT_OP_ENV1(unpacklsl)
1270IWMMXT_OP_ENV1(unpackhsb)
1271IWMMXT_OP_ENV1(unpackhsw)
1272IWMMXT_OP_ENV1(unpackhsl)
1273
1274IWMMXT_OP_ENV_SIZE(cmpeq)
1275IWMMXT_OP_ENV_SIZE(cmpgtu)
1276IWMMXT_OP_ENV_SIZE(cmpgts)
1277
1278IWMMXT_OP_ENV_SIZE(mins)
1279IWMMXT_OP_ENV_SIZE(minu)
1280IWMMXT_OP_ENV_SIZE(maxs)
1281IWMMXT_OP_ENV_SIZE(maxu)
1282
1283IWMMXT_OP_ENV_SIZE(subn)
1284IWMMXT_OP_ENV_SIZE(addn)
1285IWMMXT_OP_ENV_SIZE(subu)
1286IWMMXT_OP_ENV_SIZE(addu)
1287IWMMXT_OP_ENV_SIZE(subs)
1288IWMMXT_OP_ENV_SIZE(adds)
1289
1290IWMMXT_OP_ENV(avgb0)
1291IWMMXT_OP_ENV(avgb1)
1292IWMMXT_OP_ENV(avgw0)
1293IWMMXT_OP_ENV(avgw1)
1294
1295IWMMXT_OP(msadb)
1296
1297IWMMXT_OP_ENV(packuw)
1298IWMMXT_OP_ENV(packul)
1299IWMMXT_OP_ENV(packuq)
1300IWMMXT_OP_ENV(packsw)
1301IWMMXT_OP_ENV(packsl)
1302IWMMXT_OP_ENV(packsq)
1303
1304static inline void gen_op_iwmmxt_muladdsl_M0_T0_T1(void)
1305{
1306 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1307}
1308
1309static inline void gen_op_iwmmxt_muladdsw_M0_T0_T1(void)
1310{
1311 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1312}
1313
1314static inline void gen_op_iwmmxt_muladdswl_M0_T0_T1(void)
1315{
1316 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1317}
1318
1319static inline void gen_op_iwmmxt_align_M0_T0_wRn(int rn)
1320{
1321 iwmmxt_load_reg(cpu_V1, rn);
1322 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, cpu_T[0]);
1323}
1324
1325static inline void gen_op_iwmmxt_insr_M0_T0_T1(int shift)
1326{
1327 TCGv tmp = tcg_const_i32(shift);
1328 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1], tmp);
1329}
1330
1331static inline void gen_op_iwmmxt_extrsb_T0_M0(int shift)
1332{
1333 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1334 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1335 tcg_gen_ext8s_i32(cpu_T[0], cpu_T[0]);
1336}
1337
1338static inline void gen_op_iwmmxt_extrsw_T0_M0(int shift)
1339{
1340 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1341 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1342 tcg_gen_ext16s_i32(cpu_T[0], cpu_T[0]);
1343}
1344
1345static inline void gen_op_iwmmxt_extru_T0_M0(int shift, uint32_t mask)
1346{
1347 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1348 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1349 if (mask != ~0u)
1350 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
1351}
1352
1353static void gen_op_iwmmxt_set_mup(void)
1354{
1355 TCGv tmp;
1356 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1357 tcg_gen_ori_i32(tmp, tmp, 2);
1358 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1359}
1360
1361static void gen_op_iwmmxt_set_cup(void)
1362{
1363 TCGv tmp;
1364 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1365 tcg_gen_ori_i32(tmp, tmp, 1);
1366 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1367}
1368
1369static void gen_op_iwmmxt_setpsr_nz(void)
1370{
1371 TCGv tmp = new_tmp();
1372 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1373 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1374}
1375
1376static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1377{
1378 iwmmxt_load_reg(cpu_V1, rn);
86831435 1379 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1380 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1381}
1382
1383
1384static void gen_iwmmxt_movl_T0_T1_wRn(int rn)
1385{
1386 iwmmxt_load_reg(cpu_V0, rn);
1387 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_V0);
1388 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1389 tcg_gen_trunc_i64_i32(cpu_T[1], cpu_V0);
1390}
1391
1392static void gen_iwmmxt_movl_wRn_T0_T1(int rn)
1393{
36aa55dc 1394 tcg_gen_concat_i32_i64(cpu_V0, cpu_T[0], cpu_T[1]);
e677137d
PB
1395 iwmmxt_store_reg(cpu_V0, rn);
1396}
1397
18c9b560
AZ
1398static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn)
1399{
1400 int rd;
1401 uint32_t offset;
1402
1403 rd = (insn >> 16) & 0xf;
1404 gen_movl_T1_reg(s, rd);
1405
1406 offset = (insn & 0xff) << ((insn >> 7) & 2);
1407 if (insn & (1 << 24)) {
1408 /* Pre indexed */
1409 if (insn & (1 << 23))
1410 gen_op_addl_T1_im(offset);
1411 else
1412 gen_op_addl_T1_im(-offset);
1413
1414 if (insn & (1 << 21))
1415 gen_movl_reg_T1(s, rd);
1416 } else if (insn & (1 << 21)) {
1417 /* Post indexed */
1418 if (insn & (1 << 23))
1419 gen_op_movl_T0_im(offset);
1420 else
1421 gen_op_movl_T0_im(- offset);
1422 gen_op_addl_T0_T1();
1423 gen_movl_reg_T0(s, rd);
1424 } else if (!(insn & (1 << 23)))
1425 return 1;
1426 return 0;
1427}
1428
1429static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask)
1430{
1431 int rd = (insn >> 0) & 0xf;
1432
1433 if (insn & (1 << 8))
1434 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3)
1435 return 1;
1436 else
1437 gen_op_iwmmxt_movl_T0_wCx(rd);
1438 else
e677137d 1439 gen_iwmmxt_movl_T0_T1_wRn(rd);
18c9b560
AZ
1440
1441 gen_op_movl_T1_im(mask);
1442 gen_op_andl_T0_T1();
1443 return 0;
1444}
1445
1446/* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1447 (ie. an undefined instruction). */
1448static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1449{
1450 int rd, wrd;
1451 int rdhi, rdlo, rd0, rd1, i;
b0109805 1452 TCGv tmp;
18c9b560
AZ
1453
1454 if ((insn & 0x0e000e00) == 0x0c000000) {
1455 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1456 wrd = insn & 0xf;
1457 rdlo = (insn >> 12) & 0xf;
1458 rdhi = (insn >> 16) & 0xf;
1459 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
e677137d 1460 gen_iwmmxt_movl_T0_T1_wRn(wrd);
18c9b560
AZ
1461 gen_movl_reg_T0(s, rdlo);
1462 gen_movl_reg_T1(s, rdhi);
1463 } else { /* TMCRR */
1464 gen_movl_T0_reg(s, rdlo);
1465 gen_movl_T1_reg(s, rdhi);
e677137d 1466 gen_iwmmxt_movl_wRn_T0_T1(wrd);
18c9b560
AZ
1467 gen_op_iwmmxt_set_mup();
1468 }
1469 return 0;
1470 }
1471
1472 wrd = (insn >> 12) & 0xf;
1473 if (gen_iwmmxt_address(s, insn))
1474 return 1;
1475 if (insn & ARM_CP_RW_BIT) {
1476 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
b0109805
PB
1477 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1478 tcg_gen_mov_i32(cpu_T[0], tmp);
1479 dead_tmp(tmp);
18c9b560
AZ
1480 gen_op_iwmmxt_movl_wCx_T0(wrd);
1481 } else {
e677137d
PB
1482 i = 1;
1483 if (insn & (1 << 8)) {
1484 if (insn & (1 << 22)) { /* WLDRD */
1485 tcg_gen_qemu_ld64(cpu_M0, cpu_T[1], IS_USER(s));
1486 i = 0;
1487 } else { /* WLDRW wRd */
1488 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1489 }
1490 } else {
1491 if (insn & (1 << 22)) { /* WLDRH */
1492 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
1493 } else { /* WLDRB */
1494 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
1495 }
1496 }
1497 if (i) {
1498 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1499 dead_tmp(tmp);
1500 }
18c9b560
AZ
1501 gen_op_iwmmxt_movq_wRn_M0(wrd);
1502 }
1503 } else {
1504 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1505 gen_op_iwmmxt_movl_T0_wCx(wrd);
b0109805
PB
1506 tmp = new_tmp();
1507 tcg_gen_mov_i32(tmp, cpu_T[0]);
1508 gen_st32(tmp, cpu_T[1], IS_USER(s));
18c9b560
AZ
1509 } else {
1510 gen_op_iwmmxt_movq_M0_wRn(wrd);
e677137d
PB
1511 tmp = new_tmp();
1512 if (insn & (1 << 8)) {
1513 if (insn & (1 << 22)) { /* WSTRD */
1514 dead_tmp(tmp);
1515 tcg_gen_qemu_st64(cpu_M0, cpu_T[1], IS_USER(s));
1516 } else { /* WSTRW wRd */
1517 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1518 gen_st32(tmp, cpu_T[1], IS_USER(s));
1519 }
1520 } else {
1521 if (insn & (1 << 22)) { /* WSTRH */
1522 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1523 gen_st16(tmp, cpu_T[1], IS_USER(s));
1524 } else { /* WSTRB */
1525 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1526 gen_st8(tmp, cpu_T[1], IS_USER(s));
1527 }
1528 }
18c9b560
AZ
1529 }
1530 }
1531 return 0;
1532 }
1533
1534 if ((insn & 0x0f000000) != 0x0e000000)
1535 return 1;
1536
1537 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1538 case 0x000: /* WOR */
1539 wrd = (insn >> 12) & 0xf;
1540 rd0 = (insn >> 0) & 0xf;
1541 rd1 = (insn >> 16) & 0xf;
1542 gen_op_iwmmxt_movq_M0_wRn(rd0);
1543 gen_op_iwmmxt_orq_M0_wRn(rd1);
1544 gen_op_iwmmxt_setpsr_nz();
1545 gen_op_iwmmxt_movq_wRn_M0(wrd);
1546 gen_op_iwmmxt_set_mup();
1547 gen_op_iwmmxt_set_cup();
1548 break;
1549 case 0x011: /* TMCR */
1550 if (insn & 0xf)
1551 return 1;
1552 rd = (insn >> 12) & 0xf;
1553 wrd = (insn >> 16) & 0xf;
1554 switch (wrd) {
1555 case ARM_IWMMXT_wCID:
1556 case ARM_IWMMXT_wCASF:
1557 break;
1558 case ARM_IWMMXT_wCon:
1559 gen_op_iwmmxt_set_cup();
1560 /* Fall through. */
1561 case ARM_IWMMXT_wCSSF:
1562 gen_op_iwmmxt_movl_T0_wCx(wrd);
1563 gen_movl_T1_reg(s, rd);
1564 gen_op_bicl_T0_T1();
1565 gen_op_iwmmxt_movl_wCx_T0(wrd);
1566 break;
1567 case ARM_IWMMXT_wCGR0:
1568 case ARM_IWMMXT_wCGR1:
1569 case ARM_IWMMXT_wCGR2:
1570 case ARM_IWMMXT_wCGR3:
1571 gen_op_iwmmxt_set_cup();
1572 gen_movl_reg_T0(s, rd);
1573 gen_op_iwmmxt_movl_wCx_T0(wrd);
1574 break;
1575 default:
1576 return 1;
1577 }
1578 break;
1579 case 0x100: /* WXOR */
1580 wrd = (insn >> 12) & 0xf;
1581 rd0 = (insn >> 0) & 0xf;
1582 rd1 = (insn >> 16) & 0xf;
1583 gen_op_iwmmxt_movq_M0_wRn(rd0);
1584 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1585 gen_op_iwmmxt_setpsr_nz();
1586 gen_op_iwmmxt_movq_wRn_M0(wrd);
1587 gen_op_iwmmxt_set_mup();
1588 gen_op_iwmmxt_set_cup();
1589 break;
1590 case 0x111: /* TMRC */
1591 if (insn & 0xf)
1592 return 1;
1593 rd = (insn >> 12) & 0xf;
1594 wrd = (insn >> 16) & 0xf;
1595 gen_op_iwmmxt_movl_T0_wCx(wrd);
1596 gen_movl_reg_T0(s, rd);
1597 break;
1598 case 0x300: /* WANDN */
1599 wrd = (insn >> 12) & 0xf;
1600 rd0 = (insn >> 0) & 0xf;
1601 rd1 = (insn >> 16) & 0xf;
1602 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1603 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1604 gen_op_iwmmxt_andq_M0_wRn(rd1);
1605 gen_op_iwmmxt_setpsr_nz();
1606 gen_op_iwmmxt_movq_wRn_M0(wrd);
1607 gen_op_iwmmxt_set_mup();
1608 gen_op_iwmmxt_set_cup();
1609 break;
1610 case 0x200: /* WAND */
1611 wrd = (insn >> 12) & 0xf;
1612 rd0 = (insn >> 0) & 0xf;
1613 rd1 = (insn >> 16) & 0xf;
1614 gen_op_iwmmxt_movq_M0_wRn(rd0);
1615 gen_op_iwmmxt_andq_M0_wRn(rd1);
1616 gen_op_iwmmxt_setpsr_nz();
1617 gen_op_iwmmxt_movq_wRn_M0(wrd);
1618 gen_op_iwmmxt_set_mup();
1619 gen_op_iwmmxt_set_cup();
1620 break;
1621 case 0x810: case 0xa10: /* WMADD */
1622 wrd = (insn >> 12) & 0xf;
1623 rd0 = (insn >> 0) & 0xf;
1624 rd1 = (insn >> 16) & 0xf;
1625 gen_op_iwmmxt_movq_M0_wRn(rd0);
1626 if (insn & (1 << 21))
1627 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1628 else
1629 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1630 gen_op_iwmmxt_movq_wRn_M0(wrd);
1631 gen_op_iwmmxt_set_mup();
1632 break;
1633 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1634 wrd = (insn >> 12) & 0xf;
1635 rd0 = (insn >> 16) & 0xf;
1636 rd1 = (insn >> 0) & 0xf;
1637 gen_op_iwmmxt_movq_M0_wRn(rd0);
1638 switch ((insn >> 22) & 3) {
1639 case 0:
1640 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1641 break;
1642 case 1:
1643 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1644 break;
1645 case 2:
1646 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1647 break;
1648 case 3:
1649 return 1;
1650 }
1651 gen_op_iwmmxt_movq_wRn_M0(wrd);
1652 gen_op_iwmmxt_set_mup();
1653 gen_op_iwmmxt_set_cup();
1654 break;
1655 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1656 wrd = (insn >> 12) & 0xf;
1657 rd0 = (insn >> 16) & 0xf;
1658 rd1 = (insn >> 0) & 0xf;
1659 gen_op_iwmmxt_movq_M0_wRn(rd0);
1660 switch ((insn >> 22) & 3) {
1661 case 0:
1662 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1663 break;
1664 case 1:
1665 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1666 break;
1667 case 2:
1668 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1669 break;
1670 case 3:
1671 return 1;
1672 }
1673 gen_op_iwmmxt_movq_wRn_M0(wrd);
1674 gen_op_iwmmxt_set_mup();
1675 gen_op_iwmmxt_set_cup();
1676 break;
1677 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1678 wrd = (insn >> 12) & 0xf;
1679 rd0 = (insn >> 16) & 0xf;
1680 rd1 = (insn >> 0) & 0xf;
1681 gen_op_iwmmxt_movq_M0_wRn(rd0);
1682 if (insn & (1 << 22))
1683 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1684 else
1685 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1686 if (!(insn & (1 << 20)))
1687 gen_op_iwmmxt_addl_M0_wRn(wrd);
1688 gen_op_iwmmxt_movq_wRn_M0(wrd);
1689 gen_op_iwmmxt_set_mup();
1690 break;
1691 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1692 wrd = (insn >> 12) & 0xf;
1693 rd0 = (insn >> 16) & 0xf;
1694 rd1 = (insn >> 0) & 0xf;
1695 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1696 if (insn & (1 << 21)) {
1697 if (insn & (1 << 20))
1698 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1699 else
1700 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1701 } else {
1702 if (insn & (1 << 20))
1703 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1704 else
1705 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1706 }
18c9b560
AZ
1707 gen_op_iwmmxt_movq_wRn_M0(wrd);
1708 gen_op_iwmmxt_set_mup();
1709 break;
1710 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1711 wrd = (insn >> 12) & 0xf;
1712 rd0 = (insn >> 16) & 0xf;
1713 rd1 = (insn >> 0) & 0xf;
1714 gen_op_iwmmxt_movq_M0_wRn(rd0);
1715 if (insn & (1 << 21))
1716 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1717 else
1718 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1719 if (!(insn & (1 << 20))) {
e677137d
PB
1720 iwmmxt_load_reg(cpu_V1, wrd);
1721 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1722 }
1723 gen_op_iwmmxt_movq_wRn_M0(wrd);
1724 gen_op_iwmmxt_set_mup();
1725 break;
1726 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1727 wrd = (insn >> 12) & 0xf;
1728 rd0 = (insn >> 16) & 0xf;
1729 rd1 = (insn >> 0) & 0xf;
1730 gen_op_iwmmxt_movq_M0_wRn(rd0);
1731 switch ((insn >> 22) & 3) {
1732 case 0:
1733 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1734 break;
1735 case 1:
1736 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1737 break;
1738 case 2:
1739 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1740 break;
1741 case 3:
1742 return 1;
1743 }
1744 gen_op_iwmmxt_movq_wRn_M0(wrd);
1745 gen_op_iwmmxt_set_mup();
1746 gen_op_iwmmxt_set_cup();
1747 break;
1748 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1749 wrd = (insn >> 12) & 0xf;
1750 rd0 = (insn >> 16) & 0xf;
1751 rd1 = (insn >> 0) & 0xf;
1752 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1753 if (insn & (1 << 22)) {
1754 if (insn & (1 << 20))
1755 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1756 else
1757 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1758 } else {
1759 if (insn & (1 << 20))
1760 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1761 else
1762 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1763 }
18c9b560
AZ
1764 gen_op_iwmmxt_movq_wRn_M0(wrd);
1765 gen_op_iwmmxt_set_mup();
1766 gen_op_iwmmxt_set_cup();
1767 break;
1768 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1769 wrd = (insn >> 12) & 0xf;
1770 rd0 = (insn >> 16) & 0xf;
1771 rd1 = (insn >> 0) & 0xf;
1772 gen_op_iwmmxt_movq_M0_wRn(rd0);
1773 gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1774 gen_op_movl_T1_im(7);
1775 gen_op_andl_T0_T1();
1776 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
1777 gen_op_iwmmxt_movq_wRn_M0(wrd);
1778 gen_op_iwmmxt_set_mup();
1779 break;
1780 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1781 rd = (insn >> 12) & 0xf;
1782 wrd = (insn >> 16) & 0xf;
1783 gen_movl_T0_reg(s, rd);
1784 gen_op_iwmmxt_movq_M0_wRn(wrd);
1785 switch ((insn >> 6) & 3) {
1786 case 0:
1787 gen_op_movl_T1_im(0xff);
1788 gen_op_iwmmxt_insr_M0_T0_T1((insn & 7) << 3);
1789 break;
1790 case 1:
1791 gen_op_movl_T1_im(0xffff);
1792 gen_op_iwmmxt_insr_M0_T0_T1((insn & 3) << 4);
1793 break;
1794 case 2:
1795 gen_op_movl_T1_im(0xffffffff);
1796 gen_op_iwmmxt_insr_M0_T0_T1((insn & 1) << 5);
1797 break;
1798 case 3:
1799 return 1;
1800 }
1801 gen_op_iwmmxt_movq_wRn_M0(wrd);
1802 gen_op_iwmmxt_set_mup();
1803 break;
1804 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1805 rd = (insn >> 12) & 0xf;
1806 wrd = (insn >> 16) & 0xf;
1807 if (rd == 15)
1808 return 1;
1809 gen_op_iwmmxt_movq_M0_wRn(wrd);
1810 switch ((insn >> 22) & 3) {
1811 case 0:
1812 if (insn & 8)
1813 gen_op_iwmmxt_extrsb_T0_M0((insn & 7) << 3);
1814 else {
e677137d 1815 gen_op_iwmmxt_extru_T0_M0((insn & 7) << 3, 0xff);
18c9b560
AZ
1816 }
1817 break;
1818 case 1:
1819 if (insn & 8)
1820 gen_op_iwmmxt_extrsw_T0_M0((insn & 3) << 4);
1821 else {
e677137d 1822 gen_op_iwmmxt_extru_T0_M0((insn & 3) << 4, 0xffff);
18c9b560
AZ
1823 }
1824 break;
1825 case 2:
e677137d 1826 gen_op_iwmmxt_extru_T0_M0((insn & 1) << 5, ~0u);
18c9b560
AZ
1827 break;
1828 case 3:
1829 return 1;
1830 }
b26eefb6 1831 gen_movl_reg_T0(s, rd);
18c9b560
AZ
1832 break;
1833 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1834 if ((insn & 0x000ff008) != 0x0003f000)
1835 return 1;
1836 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1837 switch ((insn >> 22) & 3) {
1838 case 0:
1839 gen_op_shrl_T1_im(((insn & 7) << 2) + 0);
1840 break;
1841 case 1:
1842 gen_op_shrl_T1_im(((insn & 3) << 3) + 4);
1843 break;
1844 case 2:
1845 gen_op_shrl_T1_im(((insn & 1) << 4) + 12);
1846 break;
1847 case 3:
1848 return 1;
1849 }
1850 gen_op_shll_T1_im(28);
d9ba4830 1851 gen_set_nzcv(cpu_T[1]);
18c9b560
AZ
1852 break;
1853 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1854 rd = (insn >> 12) & 0xf;
1855 wrd = (insn >> 16) & 0xf;
1856 gen_movl_T0_reg(s, rd);
1857 switch ((insn >> 6) & 3) {
1858 case 0:
e677137d 1859 gen_helper_iwmmxt_bcstb(cpu_M0, cpu_T[0]);
18c9b560
AZ
1860 break;
1861 case 1:
e677137d 1862 gen_helper_iwmmxt_bcstw(cpu_M0, cpu_T[0]);
18c9b560
AZ
1863 break;
1864 case 2:
e677137d 1865 gen_helper_iwmmxt_bcstl(cpu_M0, cpu_T[0]);
18c9b560
AZ
1866 break;
1867 case 3:
1868 return 1;
1869 }
1870 gen_op_iwmmxt_movq_wRn_M0(wrd);
1871 gen_op_iwmmxt_set_mup();
1872 break;
1873 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1874 if ((insn & 0x000ff00f) != 0x0003f000)
1875 return 1;
1876 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
15bb4eac 1877 gen_op_movl_T0_T1();
18c9b560
AZ
1878 switch ((insn >> 22) & 3) {
1879 case 0:
1880 for (i = 0; i < 7; i ++) {
1881 gen_op_shll_T1_im(4);
1882 gen_op_andl_T0_T1();
1883 }
1884 break;
1885 case 1:
1886 for (i = 0; i < 3; i ++) {
1887 gen_op_shll_T1_im(8);
1888 gen_op_andl_T0_T1();
1889 }
1890 break;
1891 case 2:
1892 gen_op_shll_T1_im(16);
1893 gen_op_andl_T0_T1();
1894 break;
1895 case 3:
1896 return 1;
1897 }
d9ba4830 1898 gen_set_nzcv(cpu_T[0]);
18c9b560
AZ
1899 break;
1900 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1901 wrd = (insn >> 12) & 0xf;
1902 rd0 = (insn >> 16) & 0xf;
1903 gen_op_iwmmxt_movq_M0_wRn(rd0);
1904 switch ((insn >> 22) & 3) {
1905 case 0:
e677137d 1906 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1907 break;
1908 case 1:
e677137d 1909 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1910 break;
1911 case 2:
e677137d 1912 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1913 break;
1914 case 3:
1915 return 1;
1916 }
1917 gen_op_iwmmxt_movq_wRn_M0(wrd);
1918 gen_op_iwmmxt_set_mup();
1919 break;
1920 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1921 if ((insn & 0x000ff00f) != 0x0003f000)
1922 return 1;
1923 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
15bb4eac 1924 gen_op_movl_T0_T1();
18c9b560
AZ
1925 switch ((insn >> 22) & 3) {
1926 case 0:
1927 for (i = 0; i < 7; i ++) {
1928 gen_op_shll_T1_im(4);
1929 gen_op_orl_T0_T1();
1930 }
1931 break;
1932 case 1:
1933 for (i = 0; i < 3; i ++) {
1934 gen_op_shll_T1_im(8);
1935 gen_op_orl_T0_T1();
1936 }
1937 break;
1938 case 2:
1939 gen_op_shll_T1_im(16);
1940 gen_op_orl_T0_T1();
1941 break;
1942 case 3:
1943 return 1;
1944 }
d9ba4830 1945 gen_set_nzcv(cpu_T[0]);
18c9b560
AZ
1946 break;
1947 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1948 rd = (insn >> 12) & 0xf;
1949 rd0 = (insn >> 16) & 0xf;
1950 if ((insn & 0xf) != 0)
1951 return 1;
1952 gen_op_iwmmxt_movq_M0_wRn(rd0);
1953 switch ((insn >> 22) & 3) {
1954 case 0:
e677137d 1955 gen_helper_iwmmxt_msbb(cpu_T[0], cpu_M0);
18c9b560
AZ
1956 break;
1957 case 1:
e677137d 1958 gen_helper_iwmmxt_msbw(cpu_T[0], cpu_M0);
18c9b560
AZ
1959 break;
1960 case 2:
e677137d 1961 gen_helper_iwmmxt_msbl(cpu_T[0], cpu_M0);
18c9b560
AZ
1962 break;
1963 case 3:
1964 return 1;
1965 }
1966 gen_movl_reg_T0(s, rd);
1967 break;
1968 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1969 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1970 wrd = (insn >> 12) & 0xf;
1971 rd0 = (insn >> 16) & 0xf;
1972 rd1 = (insn >> 0) & 0xf;
1973 gen_op_iwmmxt_movq_M0_wRn(rd0);
1974 switch ((insn >> 22) & 3) {
1975 case 0:
1976 if (insn & (1 << 21))
1977 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1978 else
1979 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1980 break;
1981 case 1:
1982 if (insn & (1 << 21))
1983 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1984 else
1985 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1986 break;
1987 case 2:
1988 if (insn & (1 << 21))
1989 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1990 else
1991 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1992 break;
1993 case 3:
1994 return 1;
1995 }
1996 gen_op_iwmmxt_movq_wRn_M0(wrd);
1997 gen_op_iwmmxt_set_mup();
1998 gen_op_iwmmxt_set_cup();
1999 break;
2000 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2001 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2002 wrd = (insn >> 12) & 0xf;
2003 rd0 = (insn >> 16) & 0xf;
2004 gen_op_iwmmxt_movq_M0_wRn(rd0);
2005 switch ((insn >> 22) & 3) {
2006 case 0:
2007 if (insn & (1 << 21))
2008 gen_op_iwmmxt_unpacklsb_M0();
2009 else
2010 gen_op_iwmmxt_unpacklub_M0();
2011 break;
2012 case 1:
2013 if (insn & (1 << 21))
2014 gen_op_iwmmxt_unpacklsw_M0();
2015 else
2016 gen_op_iwmmxt_unpackluw_M0();
2017 break;
2018 case 2:
2019 if (insn & (1 << 21))
2020 gen_op_iwmmxt_unpacklsl_M0();
2021 else
2022 gen_op_iwmmxt_unpacklul_M0();
2023 break;
2024 case 3:
2025 return 1;
2026 }
2027 gen_op_iwmmxt_movq_wRn_M0(wrd);
2028 gen_op_iwmmxt_set_mup();
2029 gen_op_iwmmxt_set_cup();
2030 break;
2031 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2032 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2033 wrd = (insn >> 12) & 0xf;
2034 rd0 = (insn >> 16) & 0xf;
2035 gen_op_iwmmxt_movq_M0_wRn(rd0);
2036 switch ((insn >> 22) & 3) {
2037 case 0:
2038 if (insn & (1 << 21))
2039 gen_op_iwmmxt_unpackhsb_M0();
2040 else
2041 gen_op_iwmmxt_unpackhub_M0();
2042 break;
2043 case 1:
2044 if (insn & (1 << 21))
2045 gen_op_iwmmxt_unpackhsw_M0();
2046 else
2047 gen_op_iwmmxt_unpackhuw_M0();
2048 break;
2049 case 2:
2050 if (insn & (1 << 21))
2051 gen_op_iwmmxt_unpackhsl_M0();
2052 else
2053 gen_op_iwmmxt_unpackhul_M0();
2054 break;
2055 case 3:
2056 return 1;
2057 }
2058 gen_op_iwmmxt_movq_wRn_M0(wrd);
2059 gen_op_iwmmxt_set_mup();
2060 gen_op_iwmmxt_set_cup();
2061 break;
2062 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2063 case 0x214: case 0x614: case 0xa14: case 0xe14:
2064 wrd = (insn >> 12) & 0xf;
2065 rd0 = (insn >> 16) & 0xf;
2066 gen_op_iwmmxt_movq_M0_wRn(rd0);
2067 if (gen_iwmmxt_shift(insn, 0xff))
2068 return 1;
2069 switch ((insn >> 22) & 3) {
2070 case 0:
2071 return 1;
2072 case 1:
e677137d 2073 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2074 break;
2075 case 2:
e677137d 2076 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2077 break;
2078 case 3:
e677137d 2079 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2080 break;
2081 }
2082 gen_op_iwmmxt_movq_wRn_M0(wrd);
2083 gen_op_iwmmxt_set_mup();
2084 gen_op_iwmmxt_set_cup();
2085 break;
2086 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2087 case 0x014: case 0x414: case 0x814: case 0xc14:
2088 wrd = (insn >> 12) & 0xf;
2089 rd0 = (insn >> 16) & 0xf;
2090 gen_op_iwmmxt_movq_M0_wRn(rd0);
2091 if (gen_iwmmxt_shift(insn, 0xff))
2092 return 1;
2093 switch ((insn >> 22) & 3) {
2094 case 0:
2095 return 1;
2096 case 1:
e677137d 2097 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2098 break;
2099 case 2:
e677137d 2100 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2101 break;
2102 case 3:
e677137d 2103 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2104 break;
2105 }
2106 gen_op_iwmmxt_movq_wRn_M0(wrd);
2107 gen_op_iwmmxt_set_mup();
2108 gen_op_iwmmxt_set_cup();
2109 break;
2110 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2111 case 0x114: case 0x514: case 0x914: case 0xd14:
2112 wrd = (insn >> 12) & 0xf;
2113 rd0 = (insn >> 16) & 0xf;
2114 gen_op_iwmmxt_movq_M0_wRn(rd0);
2115 if (gen_iwmmxt_shift(insn, 0xff))
2116 return 1;
2117 switch ((insn >> 22) & 3) {
2118 case 0:
2119 return 1;
2120 case 1:
e677137d 2121 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2122 break;
2123 case 2:
e677137d 2124 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2125 break;
2126 case 3:
e677137d 2127 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2128 break;
2129 }
2130 gen_op_iwmmxt_movq_wRn_M0(wrd);
2131 gen_op_iwmmxt_set_mup();
2132 gen_op_iwmmxt_set_cup();
2133 break;
2134 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2135 case 0x314: case 0x714: case 0xb14: case 0xf14:
2136 wrd = (insn >> 12) & 0xf;
2137 rd0 = (insn >> 16) & 0xf;
2138 gen_op_iwmmxt_movq_M0_wRn(rd0);
2139 switch ((insn >> 22) & 3) {
2140 case 0:
2141 return 1;
2142 case 1:
2143 if (gen_iwmmxt_shift(insn, 0xf))
2144 return 1;
e677137d 2145 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2146 break;
2147 case 2:
2148 if (gen_iwmmxt_shift(insn, 0x1f))
2149 return 1;
e677137d 2150 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2151 break;
2152 case 3:
2153 if (gen_iwmmxt_shift(insn, 0x3f))
2154 return 1;
e677137d 2155 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2156 break;
2157 }
2158 gen_op_iwmmxt_movq_wRn_M0(wrd);
2159 gen_op_iwmmxt_set_mup();
2160 gen_op_iwmmxt_set_cup();
2161 break;
2162 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2163 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2164 wrd = (insn >> 12) & 0xf;
2165 rd0 = (insn >> 16) & 0xf;
2166 rd1 = (insn >> 0) & 0xf;
2167 gen_op_iwmmxt_movq_M0_wRn(rd0);
2168 switch ((insn >> 22) & 3) {
2169 case 0:
2170 if (insn & (1 << 21))
2171 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2172 else
2173 gen_op_iwmmxt_minub_M0_wRn(rd1);
2174 break;
2175 case 1:
2176 if (insn & (1 << 21))
2177 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2178 else
2179 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2180 break;
2181 case 2:
2182 if (insn & (1 << 21))
2183 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2184 else
2185 gen_op_iwmmxt_minul_M0_wRn(rd1);
2186 break;
2187 case 3:
2188 return 1;
2189 }
2190 gen_op_iwmmxt_movq_wRn_M0(wrd);
2191 gen_op_iwmmxt_set_mup();
2192 break;
2193 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2194 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2195 wrd = (insn >> 12) & 0xf;
2196 rd0 = (insn >> 16) & 0xf;
2197 rd1 = (insn >> 0) & 0xf;
2198 gen_op_iwmmxt_movq_M0_wRn(rd0);
2199 switch ((insn >> 22) & 3) {
2200 case 0:
2201 if (insn & (1 << 21))
2202 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2203 else
2204 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2205 break;
2206 case 1:
2207 if (insn & (1 << 21))
2208 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2209 else
2210 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2211 break;
2212 case 2:
2213 if (insn & (1 << 21))
2214 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2215 else
2216 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2217 break;
2218 case 3:
2219 return 1;
2220 }
2221 gen_op_iwmmxt_movq_wRn_M0(wrd);
2222 gen_op_iwmmxt_set_mup();
2223 break;
2224 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2225 case 0x402: case 0x502: case 0x602: case 0x702:
2226 wrd = (insn >> 12) & 0xf;
2227 rd0 = (insn >> 16) & 0xf;
2228 rd1 = (insn >> 0) & 0xf;
2229 gen_op_iwmmxt_movq_M0_wRn(rd0);
2230 gen_op_movl_T0_im((insn >> 20) & 3);
2231 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
2232 gen_op_iwmmxt_movq_wRn_M0(wrd);
2233 gen_op_iwmmxt_set_mup();
2234 break;
2235 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2236 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2237 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2238 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2239 wrd = (insn >> 12) & 0xf;
2240 rd0 = (insn >> 16) & 0xf;
2241 rd1 = (insn >> 0) & 0xf;
2242 gen_op_iwmmxt_movq_M0_wRn(rd0);
2243 switch ((insn >> 20) & 0xf) {
2244 case 0x0:
2245 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2246 break;
2247 case 0x1:
2248 gen_op_iwmmxt_subub_M0_wRn(rd1);
2249 break;
2250 case 0x3:
2251 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2252 break;
2253 case 0x4:
2254 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2255 break;
2256 case 0x5:
2257 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2258 break;
2259 case 0x7:
2260 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2261 break;
2262 case 0x8:
2263 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2264 break;
2265 case 0x9:
2266 gen_op_iwmmxt_subul_M0_wRn(rd1);
2267 break;
2268 case 0xb:
2269 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2270 break;
2271 default:
2272 return 1;
2273 }
2274 gen_op_iwmmxt_movq_wRn_M0(wrd);
2275 gen_op_iwmmxt_set_mup();
2276 gen_op_iwmmxt_set_cup();
2277 break;
2278 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2279 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2280 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2281 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2282 wrd = (insn >> 12) & 0xf;
2283 rd0 = (insn >> 16) & 0xf;
2284 gen_op_iwmmxt_movq_M0_wRn(rd0);
2285 gen_op_movl_T0_im(((insn >> 16) & 0xf0) | (insn & 0x0f));
e677137d 2286 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2287 gen_op_iwmmxt_movq_wRn_M0(wrd);
2288 gen_op_iwmmxt_set_mup();
2289 gen_op_iwmmxt_set_cup();
2290 break;
2291 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2292 case 0x418: case 0x518: case 0x618: case 0x718:
2293 case 0x818: case 0x918: case 0xa18: case 0xb18:
2294 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2295 wrd = (insn >> 12) & 0xf;
2296 rd0 = (insn >> 16) & 0xf;
2297 rd1 = (insn >> 0) & 0xf;
2298 gen_op_iwmmxt_movq_M0_wRn(rd0);
2299 switch ((insn >> 20) & 0xf) {
2300 case 0x0:
2301 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2302 break;
2303 case 0x1:
2304 gen_op_iwmmxt_addub_M0_wRn(rd1);
2305 break;
2306 case 0x3:
2307 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2308 break;
2309 case 0x4:
2310 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2311 break;
2312 case 0x5:
2313 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2314 break;
2315 case 0x7:
2316 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2317 break;
2318 case 0x8:
2319 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2320 break;
2321 case 0x9:
2322 gen_op_iwmmxt_addul_M0_wRn(rd1);
2323 break;
2324 case 0xb:
2325 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2326 break;
2327 default:
2328 return 1;
2329 }
2330 gen_op_iwmmxt_movq_wRn_M0(wrd);
2331 gen_op_iwmmxt_set_mup();
2332 gen_op_iwmmxt_set_cup();
2333 break;
2334 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2335 case 0x408: case 0x508: case 0x608: case 0x708:
2336 case 0x808: case 0x908: case 0xa08: case 0xb08:
2337 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2338 wrd = (insn >> 12) & 0xf;
2339 rd0 = (insn >> 16) & 0xf;
2340 rd1 = (insn >> 0) & 0xf;
2341 gen_op_iwmmxt_movq_M0_wRn(rd0);
2342 if (!(insn & (1 << 20)))
2343 return 1;
2344 switch ((insn >> 22) & 3) {
2345 case 0:
2346 return 1;
2347 case 1:
2348 if (insn & (1 << 21))
2349 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2350 else
2351 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2352 break;
2353 case 2:
2354 if (insn & (1 << 21))
2355 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2356 else
2357 gen_op_iwmmxt_packul_M0_wRn(rd1);
2358 break;
2359 case 3:
2360 if (insn & (1 << 21))
2361 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2362 else
2363 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2364 break;
2365 }
2366 gen_op_iwmmxt_movq_wRn_M0(wrd);
2367 gen_op_iwmmxt_set_mup();
2368 gen_op_iwmmxt_set_cup();
2369 break;
2370 case 0x201: case 0x203: case 0x205: case 0x207:
2371 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2372 case 0x211: case 0x213: case 0x215: case 0x217:
2373 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2374 wrd = (insn >> 5) & 0xf;
2375 rd0 = (insn >> 12) & 0xf;
2376 rd1 = (insn >> 0) & 0xf;
2377 if (rd0 == 0xf || rd1 == 0xf)
2378 return 1;
2379 gen_op_iwmmxt_movq_M0_wRn(wrd);
2380 switch ((insn >> 16) & 0xf) {
2381 case 0x0: /* TMIA */
b26eefb6
PB
2382 gen_movl_T0_reg(s, rd0);
2383 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2384 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2385 break;
2386 case 0x8: /* TMIAPH */
b26eefb6
PB
2387 gen_movl_T0_reg(s, rd0);
2388 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2389 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2390 break;
2391 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
b26eefb6 2392 gen_movl_T1_reg(s, rd0);
18c9b560
AZ
2393 if (insn & (1 << 16))
2394 gen_op_shrl_T1_im(16);
2395 gen_op_movl_T0_T1();
b26eefb6 2396 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2397 if (insn & (1 << 17))
2398 gen_op_shrl_T1_im(16);
2399 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2400 break;
2401 default:
2402 return 1;
2403 }
2404 gen_op_iwmmxt_movq_wRn_M0(wrd);
2405 gen_op_iwmmxt_set_mup();
2406 break;
2407 default:
2408 return 1;
2409 }
2410
2411 return 0;
2412}
2413
2414/* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2415 (ie. an undefined instruction). */
2416static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2417{
2418 int acc, rd0, rd1, rdhi, rdlo;
2419
2420 if ((insn & 0x0ff00f10) == 0x0e200010) {
2421 /* Multiply with Internal Accumulate Format */
2422 rd0 = (insn >> 12) & 0xf;
2423 rd1 = insn & 0xf;
2424 acc = (insn >> 5) & 7;
2425
2426 if (acc != 0)
2427 return 1;
2428
2429 switch ((insn >> 16) & 0xf) {
2430 case 0x0: /* MIA */
b26eefb6
PB
2431 gen_movl_T0_reg(s, rd0);
2432 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2433 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2434 break;
2435 case 0x8: /* MIAPH */
b26eefb6
PB
2436 gen_movl_T0_reg(s, rd0);
2437 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2438 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2439 break;
2440 case 0xc: /* MIABB */
2441 case 0xd: /* MIABT */
2442 case 0xe: /* MIATB */
2443 case 0xf: /* MIATT */
b26eefb6 2444 gen_movl_T1_reg(s, rd0);
18c9b560
AZ
2445 if (insn & (1 << 16))
2446 gen_op_shrl_T1_im(16);
2447 gen_op_movl_T0_T1();
b26eefb6 2448 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2449 if (insn & (1 << 17))
2450 gen_op_shrl_T1_im(16);
2451 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2452 break;
2453 default:
2454 return 1;
2455 }
2456
2457 gen_op_iwmmxt_movq_wRn_M0(acc);
2458 return 0;
2459 }
2460
2461 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2462 /* Internal Accumulator Access Format */
2463 rdhi = (insn >> 16) & 0xf;
2464 rdlo = (insn >> 12) & 0xf;
2465 acc = insn & 7;
2466
2467 if (acc != 0)
2468 return 1;
2469
2470 if (insn & ARM_CP_RW_BIT) { /* MRA */
e677137d 2471 gen_iwmmxt_movl_T0_T1_wRn(acc);
b26eefb6 2472 gen_movl_reg_T0(s, rdlo);
18c9b560
AZ
2473 gen_op_movl_T0_im((1 << (40 - 32)) - 1);
2474 gen_op_andl_T0_T1();
b26eefb6 2475 gen_movl_reg_T0(s, rdhi);
18c9b560 2476 } else { /* MAR */
b26eefb6
PB
2477 gen_movl_T0_reg(s, rdlo);
2478 gen_movl_T1_reg(s, rdhi);
e677137d 2479 gen_iwmmxt_movl_wRn_T0_T1(acc);
18c9b560
AZ
2480 }
2481 return 0;
2482 }
2483
2484 return 1;
2485}
2486
c1713132
AZ
2487/* Disassemble system coprocessor instruction. Return nonzero if
2488 instruction is not defined. */
2489static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2490{
8984bd2e 2491 TCGv tmp;
c1713132
AZ
2492 uint32_t rd = (insn >> 12) & 0xf;
2493 uint32_t cp = (insn >> 8) & 0xf;
2494 if (IS_USER(s)) {
2495 return 1;
2496 }
2497
18c9b560 2498 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2499 if (!env->cp[cp].cp_read)
2500 return 1;
8984bd2e
PB
2501 gen_set_pc_im(s->pc);
2502 tmp = new_tmp();
2503 gen_helper_get_cp(tmp, cpu_env, tcg_const_i32(insn));
2504 store_reg(s, rd, tmp);
c1713132
AZ
2505 } else {
2506 if (!env->cp[cp].cp_write)
2507 return 1;
8984bd2e
PB
2508 gen_set_pc_im(s->pc);
2509 tmp = load_reg(s, rd);
2510 gen_helper_set_cp(cpu_env, tcg_const_i32(insn), tmp);
a60de947 2511 dead_tmp(tmp);
c1713132
AZ
2512 }
2513 return 0;
2514}
2515
9ee6e8bb
PB
2516static int cp15_user_ok(uint32_t insn)
2517{
2518 int cpn = (insn >> 16) & 0xf;
2519 int cpm = insn & 0xf;
2520 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2521
2522 if (cpn == 13 && cpm == 0) {
2523 /* TLS register. */
2524 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2525 return 1;
2526 }
2527 if (cpn == 7) {
2528 /* ISB, DSB, DMB. */
2529 if ((cpm == 5 && op == 4)
2530 || (cpm == 10 && (op == 4 || op == 5)))
2531 return 1;
2532 }
2533 return 0;
2534}
2535
b5ff1b31
FB
2536/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2537 instruction is not defined. */
a90b7318 2538static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2539{
2540 uint32_t rd;
8984bd2e 2541 TCGv tmp;
b5ff1b31 2542
9ee6e8bb
PB
2543 /* M profile cores use memory mapped registers instead of cp15. */
2544 if (arm_feature(env, ARM_FEATURE_M))
2545 return 1;
2546
2547 if ((insn & (1 << 25)) == 0) {
2548 if (insn & (1 << 20)) {
2549 /* mrrc */
2550 return 1;
2551 }
2552 /* mcrr. Used for block cache operations, so implement as no-op. */
2553 return 0;
2554 }
2555 if ((insn & (1 << 4)) == 0) {
2556 /* cdp */
2557 return 1;
2558 }
2559 if (IS_USER(s) && !cp15_user_ok(insn)) {
b5ff1b31
FB
2560 return 1;
2561 }
9332f9da
FB
2562 if ((insn & 0x0fff0fff) == 0x0e070f90
2563 || (insn & 0x0fff0fff) == 0x0e070f58) {
2564 /* Wait for interrupt. */
8984bd2e 2565 gen_set_pc_im(s->pc);
9ee6e8bb 2566 s->is_jmp = DISAS_WFI;
9332f9da
FB
2567 return 0;
2568 }
b5ff1b31 2569 rd = (insn >> 12) & 0xf;
18c9b560 2570 if (insn & ARM_CP_RW_BIT) {
8984bd2e
PB
2571 tmp = new_tmp();
2572 gen_helper_get_cp15(tmp, cpu_env, tcg_const_i32(insn));
b5ff1b31
FB
2573 /* If the destination register is r15 then sets condition codes. */
2574 if (rd != 15)
8984bd2e
PB
2575 store_reg(s, rd, tmp);
2576 else
2577 dead_tmp(tmp);
b5ff1b31 2578 } else {
8984bd2e
PB
2579 tmp = load_reg(s, rd);
2580 gen_helper_set_cp15(cpu_env, tcg_const_i32(insn), tmp);
2581 dead_tmp(tmp);
a90b7318
AZ
2582 /* Normally we would always end the TB here, but Linux
2583 * arch/arm/mach-pxa/sleep.S expects two instructions following
2584 * an MMU enable to execute from cache. Imitate this behaviour. */
2585 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2586 (insn & 0x0fff0fff) != 0x0e010f10)
2587 gen_lookup_tb(s);
b5ff1b31 2588 }
b5ff1b31
FB
2589 return 0;
2590}
2591
9ee6e8bb
PB
2592#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2593#define VFP_SREG(insn, bigbit, smallbit) \
2594 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2595#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2596 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2597 reg = (((insn) >> (bigbit)) & 0x0f) \
2598 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2599 } else { \
2600 if (insn & (1 << (smallbit))) \
2601 return 1; \
2602 reg = ((insn) >> (bigbit)) & 0x0f; \
2603 }} while (0)
2604
2605#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2606#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2607#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2608#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2609#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2610#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2611
4373f3ce
PB
2612/* Move between integer and VFP cores. */
2613static TCGv gen_vfp_mrs(void)
2614{
2615 TCGv tmp = new_tmp();
2616 tcg_gen_mov_i32(tmp, cpu_F0s);
2617 return tmp;
2618}
2619
2620static void gen_vfp_msr(TCGv tmp)
2621{
2622 tcg_gen_mov_i32(cpu_F0s, tmp);
2623 dead_tmp(tmp);
2624}
2625
9ee6e8bb
PB
2626static inline int
2627vfp_enabled(CPUState * env)
2628{
2629 return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
2630}
2631
ad69471c
PB
2632static void gen_neon_dup_u8(TCGv var, int shift)
2633{
2634 TCGv tmp = new_tmp();
2635 if (shift)
2636 tcg_gen_shri_i32(var, var, shift);
86831435 2637 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2638 tcg_gen_shli_i32(tmp, var, 8);
2639 tcg_gen_or_i32(var, var, tmp);
2640 tcg_gen_shli_i32(tmp, var, 16);
2641 tcg_gen_or_i32(var, var, tmp);
2642 dead_tmp(tmp);
2643}
2644
2645static void gen_neon_dup_low16(TCGv var)
2646{
2647 TCGv tmp = new_tmp();
86831435 2648 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2649 tcg_gen_shli_i32(tmp, var, 16);
2650 tcg_gen_or_i32(var, var, tmp);
2651 dead_tmp(tmp);
2652}
2653
2654static void gen_neon_dup_high16(TCGv var)
2655{
2656 TCGv tmp = new_tmp();
2657 tcg_gen_andi_i32(var, var, 0xffff0000);
2658 tcg_gen_shri_i32(tmp, var, 16);
2659 tcg_gen_or_i32(var, var, tmp);
2660 dead_tmp(tmp);
2661}
2662
b7bcbe95
FB
2663/* Disassemble a VFP instruction. Returns nonzero if an error occured
2664 (ie. an undefined instruction). */
2665static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2666{
2667 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2668 int dp, veclen;
4373f3ce 2669 TCGv tmp;
ad69471c 2670 TCGv tmp2;
b7bcbe95 2671
40f137e1
PB
2672 if (!arm_feature(env, ARM_FEATURE_VFP))
2673 return 1;
2674
9ee6e8bb
PB
2675 if (!vfp_enabled(env)) {
2676 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2677 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2678 return 1;
2679 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2680 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2681 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2682 return 1;
2683 }
b7bcbe95
FB
2684 dp = ((insn & 0xf00) == 0xb00);
2685 switch ((insn >> 24) & 0xf) {
2686 case 0xe:
2687 if (insn & (1 << 4)) {
2688 /* single register transfer */
b7bcbe95
FB
2689 rd = (insn >> 12) & 0xf;
2690 if (dp) {
9ee6e8bb
PB
2691 int size;
2692 int pass;
2693
2694 VFP_DREG_N(rn, insn);
2695 if (insn & 0xf)
b7bcbe95 2696 return 1;
9ee6e8bb
PB
2697 if (insn & 0x00c00060
2698 && !arm_feature(env, ARM_FEATURE_NEON))
2699 return 1;
2700
2701 pass = (insn >> 21) & 1;
2702 if (insn & (1 << 22)) {
2703 size = 0;
2704 offset = ((insn >> 5) & 3) * 8;
2705 } else if (insn & (1 << 5)) {
2706 size = 1;
2707 offset = (insn & (1 << 6)) ? 16 : 0;
2708 } else {
2709 size = 2;
2710 offset = 0;
2711 }
18c9b560 2712 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2713 /* vfp->arm */
ad69471c 2714 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2715 switch (size) {
2716 case 0:
9ee6e8bb 2717 if (offset)
ad69471c 2718 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2719 if (insn & (1 << 23))
ad69471c 2720 gen_uxtb(tmp);
9ee6e8bb 2721 else
ad69471c 2722 gen_sxtb(tmp);
9ee6e8bb
PB
2723 break;
2724 case 1:
9ee6e8bb
PB
2725 if (insn & (1 << 23)) {
2726 if (offset) {
ad69471c 2727 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2728 } else {
ad69471c 2729 gen_uxth(tmp);
9ee6e8bb
PB
2730 }
2731 } else {
2732 if (offset) {
ad69471c 2733 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2734 } else {
ad69471c 2735 gen_sxth(tmp);
9ee6e8bb
PB
2736 }
2737 }
2738 break;
2739 case 2:
9ee6e8bb
PB
2740 break;
2741 }
ad69471c 2742 store_reg(s, rd, tmp);
b7bcbe95
FB
2743 } else {
2744 /* arm->vfp */
ad69471c 2745 tmp = load_reg(s, rd);
9ee6e8bb
PB
2746 if (insn & (1 << 23)) {
2747 /* VDUP */
2748 if (size == 0) {
ad69471c 2749 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2750 } else if (size == 1) {
ad69471c 2751 gen_neon_dup_low16(tmp);
9ee6e8bb 2752 }
cbbccffc
PB
2753 for (n = 0; n <= pass * 2; n++) {
2754 tmp2 = new_tmp();
2755 tcg_gen_mov_i32(tmp2, tmp);
2756 neon_store_reg(rn, n, tmp2);
2757 }
2758 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2759 } else {
2760 /* VMOV */
2761 switch (size) {
2762 case 0:
ad69471c
PB
2763 tmp2 = neon_load_reg(rn, pass);
2764 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2765 dead_tmp(tmp2);
9ee6e8bb
PB
2766 break;
2767 case 1:
ad69471c
PB
2768 tmp2 = neon_load_reg(rn, pass);
2769 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2770 dead_tmp(tmp2);
9ee6e8bb
PB
2771 break;
2772 case 2:
9ee6e8bb
PB
2773 break;
2774 }
ad69471c 2775 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2776 }
b7bcbe95 2777 }
9ee6e8bb
PB
2778 } else { /* !dp */
2779 if ((insn & 0x6f) != 0x00)
2780 return 1;
2781 rn = VFP_SREG_N(insn);
18c9b560 2782 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2783 /* vfp->arm */
2784 if (insn & (1 << 21)) {
2785 /* system register */
40f137e1 2786 rn >>= 1;
9ee6e8bb 2787
b7bcbe95 2788 switch (rn) {
40f137e1 2789 case ARM_VFP_FPSID:
4373f3ce 2790 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2791 VFP3 restricts all id registers to privileged
2792 accesses. */
2793 if (IS_USER(s)
2794 && arm_feature(env, ARM_FEATURE_VFP3))
2795 return 1;
4373f3ce 2796 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2797 break;
40f137e1 2798 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2799 if (IS_USER(s))
2800 return 1;
4373f3ce 2801 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2802 break;
40f137e1
PB
2803 case ARM_VFP_FPINST:
2804 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2805 /* Not present in VFP3. */
2806 if (IS_USER(s)
2807 || arm_feature(env, ARM_FEATURE_VFP3))
2808 return 1;
4373f3ce 2809 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2810 break;
40f137e1 2811 case ARM_VFP_FPSCR:
601d70b9 2812 if (rd == 15) {
4373f3ce
PB
2813 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2814 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2815 } else {
2816 tmp = new_tmp();
2817 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2818 }
b7bcbe95 2819 break;
9ee6e8bb
PB
2820 case ARM_VFP_MVFR0:
2821 case ARM_VFP_MVFR1:
2822 if (IS_USER(s)
2823 || !arm_feature(env, ARM_FEATURE_VFP3))
2824 return 1;
4373f3ce 2825 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2826 break;
b7bcbe95
FB
2827 default:
2828 return 1;
2829 }
2830 } else {
2831 gen_mov_F0_vreg(0, rn);
4373f3ce 2832 tmp = gen_vfp_mrs();
b7bcbe95
FB
2833 }
2834 if (rd == 15) {
b5ff1b31 2835 /* Set the 4 flag bits in the CPSR. */
4373f3ce
PB
2836 gen_set_nzcv(tmp);
2837 dead_tmp(tmp);
2838 } else {
2839 store_reg(s, rd, tmp);
2840 }
b7bcbe95
FB
2841 } else {
2842 /* arm->vfp */
4373f3ce 2843 tmp = load_reg(s, rd);
b7bcbe95 2844 if (insn & (1 << 21)) {
40f137e1 2845 rn >>= 1;
b7bcbe95
FB
2846 /* system register */
2847 switch (rn) {
40f137e1 2848 case ARM_VFP_FPSID:
9ee6e8bb
PB
2849 case ARM_VFP_MVFR0:
2850 case ARM_VFP_MVFR1:
b7bcbe95
FB
2851 /* Writes are ignored. */
2852 break;
40f137e1 2853 case ARM_VFP_FPSCR:
4373f3ce
PB
2854 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2855 dead_tmp(tmp);
b5ff1b31 2856 gen_lookup_tb(s);
b7bcbe95 2857 break;
40f137e1 2858 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2859 if (IS_USER(s))
2860 return 1;
4373f3ce 2861 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2862 gen_lookup_tb(s);
2863 break;
2864 case ARM_VFP_FPINST:
2865 case ARM_VFP_FPINST2:
4373f3ce 2866 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2867 break;
b7bcbe95
FB
2868 default:
2869 return 1;
2870 }
2871 } else {
4373f3ce 2872 gen_vfp_msr(tmp);
b7bcbe95
FB
2873 gen_mov_vreg_F0(0, rn);
2874 }
2875 }
2876 }
2877 } else {
2878 /* data processing */
2879 /* The opcode is in bits 23, 21, 20 and 6. */
2880 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2881 if (dp) {
2882 if (op == 15) {
2883 /* rn is opcode */
2884 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2885 } else {
2886 /* rn is register number */
9ee6e8bb 2887 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2888 }
2889
2890 if (op == 15 && (rn == 15 || rn > 17)) {
2891 /* Integer or single precision destination. */
9ee6e8bb 2892 rd = VFP_SREG_D(insn);
b7bcbe95 2893 } else {
9ee6e8bb 2894 VFP_DREG_D(rd, insn);
b7bcbe95
FB
2895 }
2896
2897 if (op == 15 && (rn == 16 || rn == 17)) {
2898 /* Integer source. */
2899 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
2900 } else {
9ee6e8bb 2901 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2902 }
2903 } else {
9ee6e8bb 2904 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2905 if (op == 15 && rn == 15) {
2906 /* Double precision destination. */
9ee6e8bb
PB
2907 VFP_DREG_D(rd, insn);
2908 } else {
2909 rd = VFP_SREG_D(insn);
2910 }
2911 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2912 }
2913
2914 veclen = env->vfp.vec_len;
2915 if (op == 15 && rn > 3)
2916 veclen = 0;
2917
2918 /* Shut up compiler warnings. */
2919 delta_m = 0;
2920 delta_d = 0;
2921 bank_mask = 0;
3b46e624 2922
b7bcbe95
FB
2923 if (veclen > 0) {
2924 if (dp)
2925 bank_mask = 0xc;
2926 else
2927 bank_mask = 0x18;
2928
2929 /* Figure out what type of vector operation this is. */
2930 if ((rd & bank_mask) == 0) {
2931 /* scalar */
2932 veclen = 0;
2933 } else {
2934 if (dp)
2935 delta_d = (env->vfp.vec_stride >> 1) + 1;
2936 else
2937 delta_d = env->vfp.vec_stride + 1;
2938
2939 if ((rm & bank_mask) == 0) {
2940 /* mixed scalar/vector */
2941 delta_m = 0;
2942 } else {
2943 /* vector */
2944 delta_m = delta_d;
2945 }
2946 }
2947 }
2948
2949 /* Load the initial operands. */
2950 if (op == 15) {
2951 switch (rn) {
2952 case 16:
2953 case 17:
2954 /* Integer source */
2955 gen_mov_F0_vreg(0, rm);
2956 break;
2957 case 8:
2958 case 9:
2959 /* Compare */
2960 gen_mov_F0_vreg(dp, rd);
2961 gen_mov_F1_vreg(dp, rm);
2962 break;
2963 case 10:
2964 case 11:
2965 /* Compare with zero */
2966 gen_mov_F0_vreg(dp, rd);
2967 gen_vfp_F1_ld0(dp);
2968 break;
9ee6e8bb
PB
2969 case 20:
2970 case 21:
2971 case 22:
2972 case 23:
644ad806
PB
2973 case 28:
2974 case 29:
2975 case 30:
2976 case 31:
9ee6e8bb
PB
2977 /* Source and destination the same. */
2978 gen_mov_F0_vreg(dp, rd);
2979 break;
b7bcbe95
FB
2980 default:
2981 /* One source operand. */
2982 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 2983 break;
b7bcbe95
FB
2984 }
2985 } else {
2986 /* Two source operands. */
2987 gen_mov_F0_vreg(dp, rn);
2988 gen_mov_F1_vreg(dp, rm);
2989 }
2990
2991 for (;;) {
2992 /* Perform the calculation. */
2993 switch (op) {
2994 case 0: /* mac: fd + (fn * fm) */
2995 gen_vfp_mul(dp);
2996 gen_mov_F1_vreg(dp, rd);
2997 gen_vfp_add(dp);
2998 break;
2999 case 1: /* nmac: fd - (fn * fm) */
3000 gen_vfp_mul(dp);
3001 gen_vfp_neg(dp);
3002 gen_mov_F1_vreg(dp, rd);
3003 gen_vfp_add(dp);
3004 break;
3005 case 2: /* msc: -fd + (fn * fm) */
3006 gen_vfp_mul(dp);
3007 gen_mov_F1_vreg(dp, rd);
3008 gen_vfp_sub(dp);
3009 break;
3010 case 3: /* nmsc: -fd - (fn * fm) */
3011 gen_vfp_mul(dp);
b7bcbe95 3012 gen_vfp_neg(dp);
c9fb531a
PB
3013 gen_mov_F1_vreg(dp, rd);
3014 gen_vfp_sub(dp);
b7bcbe95
FB
3015 break;
3016 case 4: /* mul: fn * fm */
3017 gen_vfp_mul(dp);
3018 break;
3019 case 5: /* nmul: -(fn * fm) */
3020 gen_vfp_mul(dp);
3021 gen_vfp_neg(dp);
3022 break;
3023 case 6: /* add: fn + fm */
3024 gen_vfp_add(dp);
3025 break;
3026 case 7: /* sub: fn - fm */
3027 gen_vfp_sub(dp);
3028 break;
3029 case 8: /* div: fn / fm */
3030 gen_vfp_div(dp);
3031 break;
9ee6e8bb
PB
3032 case 14: /* fconst */
3033 if (!arm_feature(env, ARM_FEATURE_VFP3))
3034 return 1;
3035
3036 n = (insn << 12) & 0x80000000;
3037 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3038 if (dp) {
3039 if (i & 0x40)
3040 i |= 0x3f80;
3041 else
3042 i |= 0x4000;
3043 n |= i << 16;
4373f3ce 3044 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3045 } else {
3046 if (i & 0x40)
3047 i |= 0x780;
3048 else
3049 i |= 0x800;
3050 n |= i << 19;
5b340b51 3051 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3052 }
9ee6e8bb 3053 break;
b7bcbe95
FB
3054 case 15: /* extension space */
3055 switch (rn) {
3056 case 0: /* cpy */
3057 /* no-op */
3058 break;
3059 case 1: /* abs */
3060 gen_vfp_abs(dp);
3061 break;
3062 case 2: /* neg */
3063 gen_vfp_neg(dp);
3064 break;
3065 case 3: /* sqrt */
3066 gen_vfp_sqrt(dp);
3067 break;
3068 case 8: /* cmp */
3069 gen_vfp_cmp(dp);
3070 break;
3071 case 9: /* cmpe */
3072 gen_vfp_cmpe(dp);
3073 break;
3074 case 10: /* cmpz */
3075 gen_vfp_cmp(dp);
3076 break;
3077 case 11: /* cmpez */
3078 gen_vfp_F1_ld0(dp);
3079 gen_vfp_cmpe(dp);
3080 break;
3081 case 15: /* single<->double conversion */
3082 if (dp)
4373f3ce 3083 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3084 else
4373f3ce 3085 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3086 break;
3087 case 16: /* fuito */
3088 gen_vfp_uito(dp);
3089 break;
3090 case 17: /* fsito */
3091 gen_vfp_sito(dp);
3092 break;
9ee6e8bb
PB
3093 case 20: /* fshto */
3094 if (!arm_feature(env, ARM_FEATURE_VFP3))
3095 return 1;
644ad806 3096 gen_vfp_shto(dp, 16 - rm);
9ee6e8bb
PB
3097 break;
3098 case 21: /* fslto */
3099 if (!arm_feature(env, ARM_FEATURE_VFP3))
3100 return 1;
644ad806 3101 gen_vfp_slto(dp, 32 - rm);
9ee6e8bb
PB
3102 break;
3103 case 22: /* fuhto */
3104 if (!arm_feature(env, ARM_FEATURE_VFP3))
3105 return 1;
644ad806 3106 gen_vfp_uhto(dp, 16 - rm);
9ee6e8bb
PB
3107 break;
3108 case 23: /* fulto */
3109 if (!arm_feature(env, ARM_FEATURE_VFP3))
3110 return 1;
644ad806 3111 gen_vfp_ulto(dp, 32 - rm);
9ee6e8bb 3112 break;
b7bcbe95
FB
3113 case 24: /* ftoui */
3114 gen_vfp_toui(dp);
3115 break;
3116 case 25: /* ftouiz */
3117 gen_vfp_touiz(dp);
3118 break;
3119 case 26: /* ftosi */
3120 gen_vfp_tosi(dp);
3121 break;
3122 case 27: /* ftosiz */
3123 gen_vfp_tosiz(dp);
3124 break;
9ee6e8bb
PB
3125 case 28: /* ftosh */
3126 if (!arm_feature(env, ARM_FEATURE_VFP3))
3127 return 1;
644ad806 3128 gen_vfp_tosh(dp, 16 - rm);
9ee6e8bb
PB
3129 break;
3130 case 29: /* ftosl */
3131 if (!arm_feature(env, ARM_FEATURE_VFP3))
3132 return 1;
644ad806 3133 gen_vfp_tosl(dp, 32 - rm);
9ee6e8bb
PB
3134 break;
3135 case 30: /* ftouh */
3136 if (!arm_feature(env, ARM_FEATURE_VFP3))
3137 return 1;
644ad806 3138 gen_vfp_touh(dp, 16 - rm);
9ee6e8bb
PB
3139 break;
3140 case 31: /* ftoul */
3141 if (!arm_feature(env, ARM_FEATURE_VFP3))
3142 return 1;
644ad806 3143 gen_vfp_toul(dp, 32 - rm);
9ee6e8bb 3144 break;
b7bcbe95
FB
3145 default: /* undefined */
3146 printf ("rn:%d\n", rn);
3147 return 1;
3148 }
3149 break;
3150 default: /* undefined */
3151 printf ("op:%d\n", op);
3152 return 1;
3153 }
3154
3155 /* Write back the result. */
3156 if (op == 15 && (rn >= 8 && rn <= 11))
3157 ; /* Comparison, do nothing. */
3158 else if (op == 15 && rn > 17)
3159 /* Integer result. */
3160 gen_mov_vreg_F0(0, rd);
3161 else if (op == 15 && rn == 15)
3162 /* conversion */
3163 gen_mov_vreg_F0(!dp, rd);
3164 else
3165 gen_mov_vreg_F0(dp, rd);
3166
3167 /* break out of the loop if we have finished */
3168 if (veclen == 0)
3169 break;
3170
3171 if (op == 15 && delta_m == 0) {
3172 /* single source one-many */
3173 while (veclen--) {
3174 rd = ((rd + delta_d) & (bank_mask - 1))
3175 | (rd & bank_mask);
3176 gen_mov_vreg_F0(dp, rd);
3177 }
3178 break;
3179 }
3180 /* Setup the next operands. */
3181 veclen--;
3182 rd = ((rd + delta_d) & (bank_mask - 1))
3183 | (rd & bank_mask);
3184
3185 if (op == 15) {
3186 /* One source operand. */
3187 rm = ((rm + delta_m) & (bank_mask - 1))
3188 | (rm & bank_mask);
3189 gen_mov_F0_vreg(dp, rm);
3190 } else {
3191 /* Two source operands. */
3192 rn = ((rn + delta_d) & (bank_mask - 1))
3193 | (rn & bank_mask);
3194 gen_mov_F0_vreg(dp, rn);
3195 if (delta_m) {
3196 rm = ((rm + delta_m) & (bank_mask - 1))
3197 | (rm & bank_mask);
3198 gen_mov_F1_vreg(dp, rm);
3199 }
3200 }
3201 }
3202 }
3203 break;
3204 case 0xc:
3205 case 0xd:
9ee6e8bb 3206 if (dp && (insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3207 /* two-register transfer */
3208 rn = (insn >> 16) & 0xf;
3209 rd = (insn >> 12) & 0xf;
3210 if (dp) {
9ee6e8bb
PB
3211 VFP_DREG_M(rm, insn);
3212 } else {
3213 rm = VFP_SREG_M(insn);
3214 }
b7bcbe95 3215
18c9b560 3216 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3217 /* vfp->arm */
3218 if (dp) {
4373f3ce
PB
3219 gen_mov_F0_vreg(0, rm * 2);
3220 tmp = gen_vfp_mrs();
3221 store_reg(s, rd, tmp);
3222 gen_mov_F0_vreg(0, rm * 2 + 1);
3223 tmp = gen_vfp_mrs();
3224 store_reg(s, rn, tmp);
b7bcbe95
FB
3225 } else {
3226 gen_mov_F0_vreg(0, rm);
4373f3ce
PB
3227 tmp = gen_vfp_mrs();
3228 store_reg(s, rn, tmp);
b7bcbe95 3229 gen_mov_F0_vreg(0, rm + 1);
4373f3ce
PB
3230 tmp = gen_vfp_mrs();
3231 store_reg(s, rd, tmp);
b7bcbe95
FB
3232 }
3233 } else {
3234 /* arm->vfp */
3235 if (dp) {
4373f3ce
PB
3236 tmp = load_reg(s, rd);
3237 gen_vfp_msr(tmp);
3238 gen_mov_vreg_F0(0, rm * 2);
3239 tmp = load_reg(s, rn);
3240 gen_vfp_msr(tmp);
3241 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3242 } else {
4373f3ce
PB
3243 tmp = load_reg(s, rn);
3244 gen_vfp_msr(tmp);
b7bcbe95 3245 gen_mov_vreg_F0(0, rm);
4373f3ce
PB
3246 tmp = load_reg(s, rd);
3247 gen_vfp_msr(tmp);
b7bcbe95
FB
3248 gen_mov_vreg_F0(0, rm + 1);
3249 }
3250 }
3251 } else {
3252 /* Load/store */
3253 rn = (insn >> 16) & 0xf;
3254 if (dp)
9ee6e8bb 3255 VFP_DREG_D(rd, insn);
b7bcbe95 3256 else
9ee6e8bb
PB
3257 rd = VFP_SREG_D(insn);
3258 if (s->thumb && rn == 15) {
3259 gen_op_movl_T1_im(s->pc & ~2);
3260 } else {
3261 gen_movl_T1_reg(s, rn);
3262 }
b7bcbe95
FB
3263 if ((insn & 0x01200000) == 0x01000000) {
3264 /* Single load/store */
3265 offset = (insn & 0xff) << 2;
3266 if ((insn & (1 << 23)) == 0)
3267 offset = -offset;
3268 gen_op_addl_T1_im(offset);
3269 if (insn & (1 << 20)) {
b5ff1b31 3270 gen_vfp_ld(s, dp);
b7bcbe95
FB
3271 gen_mov_vreg_F0(dp, rd);
3272 } else {
3273 gen_mov_F0_vreg(dp, rd);
b5ff1b31 3274 gen_vfp_st(s, dp);
b7bcbe95
FB
3275 }
3276 } else {
3277 /* load/store multiple */
3278 if (dp)
3279 n = (insn >> 1) & 0x7f;
3280 else
3281 n = insn & 0xff;
3282
3283 if (insn & (1 << 24)) /* pre-decrement */
3284 gen_op_addl_T1_im(-((insn & 0xff) << 2));
3285
3286 if (dp)
3287 offset = 8;
3288 else
3289 offset = 4;
3290 for (i = 0; i < n; i++) {
18c9b560 3291 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3292 /* load */
b5ff1b31 3293 gen_vfp_ld(s, dp);
b7bcbe95
FB
3294 gen_mov_vreg_F0(dp, rd + i);
3295 } else {
3296 /* store */
3297 gen_mov_F0_vreg(dp, rd + i);
b5ff1b31 3298 gen_vfp_st(s, dp);
b7bcbe95
FB
3299 }
3300 gen_op_addl_T1_im(offset);
3301 }
3302 if (insn & (1 << 21)) {
3303 /* writeback */
3304 if (insn & (1 << 24))
3305 offset = -offset * n;
3306 else if (dp && (insn & 1))
3307 offset = 4;
3308 else
3309 offset = 0;
3310
3311 if (offset != 0)
3312 gen_op_addl_T1_im(offset);
3313 gen_movl_reg_T1(s, rn);
3314 }
3315 }
3316 }
3317 break;
3318 default:
3319 /* Should never happen. */
3320 return 1;
3321 }
3322 return 0;
3323}
3324
6e256c93 3325static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3326{
6e256c93
FB
3327 TranslationBlock *tb;
3328
3329 tb = s->tb;
3330 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3331 tcg_gen_goto_tb(n);
8984bd2e 3332 gen_set_pc_im(dest);
57fec1fe 3333 tcg_gen_exit_tb((long)tb + n);
6e256c93 3334 } else {
8984bd2e 3335 gen_set_pc_im(dest);
57fec1fe 3336 tcg_gen_exit_tb(0);
6e256c93 3337 }
c53be334
FB
3338}
3339
8aaca4c0
FB
3340static inline void gen_jmp (DisasContext *s, uint32_t dest)
3341{
551bd27f 3342 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3343 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3344 if (s->thumb)
d9ba4830
PB
3345 dest |= 1;
3346 gen_bx_im(s, dest);
8aaca4c0 3347 } else {
6e256c93 3348 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3349 s->is_jmp = DISAS_TB_JUMP;
3350 }
3351}
3352
d9ba4830 3353static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3354{
ee097184 3355 if (x)
d9ba4830 3356 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3357 else
d9ba4830 3358 gen_sxth(t0);
ee097184 3359 if (y)
d9ba4830 3360 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3361 else
d9ba4830
PB
3362 gen_sxth(t1);
3363 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3364}
3365
3366/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 3367static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3368 uint32_t mask;
3369
3370 mask = 0;
3371 if (flags & (1 << 0))
3372 mask |= 0xff;
3373 if (flags & (1 << 1))
3374 mask |= 0xff00;
3375 if (flags & (1 << 2))
3376 mask |= 0xff0000;
3377 if (flags & (1 << 3))
3378 mask |= 0xff000000;
9ee6e8bb 3379
2ae23e75 3380 /* Mask out undefined bits. */
9ee6e8bb
PB
3381 mask &= ~CPSR_RESERVED;
3382 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3383 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3384 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3385 mask &= ~CPSR_IT;
9ee6e8bb 3386 /* Mask out execution state bits. */
2ae23e75 3387 if (!spsr)
e160c51c 3388 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3389 /* Mask out privileged bits. */
3390 if (IS_USER(s))
9ee6e8bb 3391 mask &= CPSR_USER;
b5ff1b31
FB
3392 return mask;
3393}
3394
2fbac54b
FN
3395/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3396static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
b5ff1b31 3397{
d9ba4830 3398 TCGv tmp;
b5ff1b31
FB
3399 if (spsr) {
3400 /* ??? This is also undefined in system mode. */
3401 if (IS_USER(s))
3402 return 1;
d9ba4830
PB
3403
3404 tmp = load_cpu_field(spsr);
3405 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3406 tcg_gen_andi_i32(t0, t0, mask);
3407 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3408 store_cpu_field(tmp, spsr);
b5ff1b31 3409 } else {
2fbac54b 3410 gen_set_cpsr(t0, mask);
b5ff1b31 3411 }
2fbac54b 3412 dead_tmp(t0);
b5ff1b31
FB
3413 gen_lookup_tb(s);
3414 return 0;
3415}
3416
2fbac54b
FN
3417/* Returns nonzero if access to the PSR is not permitted. */
3418static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3419{
3420 TCGv tmp;
3421 tmp = new_tmp();
3422 tcg_gen_movi_i32(tmp, val);
3423 return gen_set_psr(s, mask, spsr, tmp);
3424}
3425
e9bb4aa9
JR
3426/* Generate an old-style exception return. Marks pc as dead. */
3427static void gen_exception_return(DisasContext *s, TCGv pc)
b5ff1b31 3428{
d9ba4830 3429 TCGv tmp;
e9bb4aa9 3430 store_reg(s, 15, pc);
d9ba4830
PB
3431 tmp = load_cpu_field(spsr);
3432 gen_set_cpsr(tmp, 0xffffffff);
3433 dead_tmp(tmp);
b5ff1b31
FB
3434 s->is_jmp = DISAS_UPDATE;
3435}
3436
b0109805
PB
3437/* Generate a v6 exception return. Marks both values as dead. */
3438static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3439{
b0109805
PB
3440 gen_set_cpsr(cpsr, 0xffffffff);
3441 dead_tmp(cpsr);
3442 store_reg(s, 15, pc);
9ee6e8bb
PB
3443 s->is_jmp = DISAS_UPDATE;
3444}
3b46e624 3445
9ee6e8bb
PB
3446static inline void
3447gen_set_condexec (DisasContext *s)
3448{
3449 if (s->condexec_mask) {
8f01245e
PB
3450 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3451 TCGv tmp = new_tmp();
3452 tcg_gen_movi_i32(tmp, val);
d9ba4830 3453 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3454 }
3455}
3b46e624 3456
9ee6e8bb
PB
3457static void gen_nop_hint(DisasContext *s, int val)
3458{
3459 switch (val) {
3460 case 3: /* wfi */
8984bd2e 3461 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3462 s->is_jmp = DISAS_WFI;
3463 break;
3464 case 2: /* wfe */
3465 case 4: /* sev */
3466 /* TODO: Implement SEV and WFE. May help SMP performance. */
3467 default: /* nop */
3468 break;
3469 }
3470}
99c475ab 3471
ad69471c 3472#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3473
dd8fbd78 3474static inline int gen_neon_add(int size, TCGv t0, TCGv t1)
9ee6e8bb
PB
3475{
3476 switch (size) {
dd8fbd78
FN
3477 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3478 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3479 case 2: tcg_gen_add_i32(t0, t0, t1); break;
9ee6e8bb
PB
3480 default: return 1;
3481 }
3482 return 0;
3483}
3484
dd8fbd78 3485static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
ad69471c
PB
3486{
3487 switch (size) {
dd8fbd78
FN
3488 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3489 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3490 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3491 default: return;
3492 }
3493}
3494
3495/* 32-bit pairwise ops end up the same as the elementwise versions. */
3496#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3497#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3498#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3499#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3500
3501/* FIXME: This is wrong. They set the wrong overflow bit. */
3502#define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3503#define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3504#define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3505#define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3506
3507#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3508 switch ((size << 1) | u) { \
3509 case 0: \
dd8fbd78 3510 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3511 break; \
3512 case 1: \
dd8fbd78 3513 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3514 break; \
3515 case 2: \
dd8fbd78 3516 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3517 break; \
3518 case 3: \
dd8fbd78 3519 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3520 break; \
3521 case 4: \
dd8fbd78 3522 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3523 break; \
3524 case 5: \
dd8fbd78 3525 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3526 break; \
3527 default: return 1; \
3528 }} while (0)
9ee6e8bb
PB
3529
3530#define GEN_NEON_INTEGER_OP(name) do { \
3531 switch ((size << 1) | u) { \
ad69471c 3532 case 0: \
dd8fbd78 3533 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3534 break; \
3535 case 1: \
dd8fbd78 3536 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3537 break; \
3538 case 2: \
dd8fbd78 3539 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3540 break; \
3541 case 3: \
dd8fbd78 3542 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3543 break; \
3544 case 4: \
dd8fbd78 3545 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3546 break; \
3547 case 5: \
dd8fbd78 3548 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3549 break; \
9ee6e8bb
PB
3550 default: return 1; \
3551 }} while (0)
3552
dd8fbd78 3553static TCGv neon_load_scratch(int scratch)
9ee6e8bb 3554{
dd8fbd78
FN
3555 TCGv tmp = new_tmp();
3556 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3557 return tmp;
9ee6e8bb
PB
3558}
3559
dd8fbd78 3560static void neon_store_scratch(int scratch, TCGv var)
9ee6e8bb 3561{
dd8fbd78
FN
3562 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3563 dead_tmp(var);
9ee6e8bb
PB
3564}
3565
dd8fbd78 3566static inline TCGv neon_get_scalar(int size, int reg)
9ee6e8bb 3567{
dd8fbd78 3568 TCGv tmp;
9ee6e8bb 3569 if (size == 1) {
dd8fbd78 3570 tmp = neon_load_reg(reg >> 1, reg & 1);
9ee6e8bb 3571 } else {
dd8fbd78
FN
3572 tmp = neon_load_reg(reg >> 2, (reg >> 1) & 1);
3573 if (reg & 1) {
3574 gen_neon_dup_low16(tmp);
3575 } else {
3576 gen_neon_dup_high16(tmp);
3577 }
9ee6e8bb 3578 }
dd8fbd78 3579 return tmp;
9ee6e8bb
PB
3580}
3581
19457615
FN
3582static void gen_neon_unzip_u8(TCGv t0, TCGv t1)
3583{
3584 TCGv rd, rm, tmp;
3585
3586 rd = new_tmp();
3587 rm = new_tmp();
3588 tmp = new_tmp();
3589
3590 tcg_gen_andi_i32(rd, t0, 0xff);
3591 tcg_gen_shri_i32(tmp, t0, 8);
3592 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3593 tcg_gen_or_i32(rd, rd, tmp);
3594 tcg_gen_shli_i32(tmp, t1, 16);
3595 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3596 tcg_gen_or_i32(rd, rd, tmp);
3597 tcg_gen_shli_i32(tmp, t1, 8);
3598 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3599 tcg_gen_or_i32(rd, rd, tmp);
3600
3601 tcg_gen_shri_i32(rm, t0, 8);
3602 tcg_gen_andi_i32(rm, rm, 0xff);
3603 tcg_gen_shri_i32(tmp, t0, 16);
3604 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3605 tcg_gen_or_i32(rm, rm, tmp);
3606 tcg_gen_shli_i32(tmp, t1, 8);
3607 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3608 tcg_gen_or_i32(rm, rm, tmp);
3609 tcg_gen_andi_i32(tmp, t1, 0xff000000);
3610 tcg_gen_or_i32(t1, rm, tmp);
3611 tcg_gen_mov_i32(t0, rd);
3612
3613 dead_tmp(tmp);
3614 dead_tmp(rm);
3615 dead_tmp(rd);
3616}
3617
3618static void gen_neon_zip_u8(TCGv t0, TCGv t1)
3619{
3620 TCGv rd, rm, tmp;
3621
3622 rd = new_tmp();
3623 rm = new_tmp();
3624 tmp = new_tmp();
3625
3626 tcg_gen_andi_i32(rd, t0, 0xff);
3627 tcg_gen_shli_i32(tmp, t1, 8);
3628 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3629 tcg_gen_or_i32(rd, rd, tmp);
3630 tcg_gen_shli_i32(tmp, t0, 16);
3631 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3632 tcg_gen_or_i32(rd, rd, tmp);
3633 tcg_gen_shli_i32(tmp, t1, 24);
3634 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3635 tcg_gen_or_i32(rd, rd, tmp);
3636
3637 tcg_gen_andi_i32(rm, t1, 0xff000000);
3638 tcg_gen_shri_i32(tmp, t0, 8);
3639 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3640 tcg_gen_or_i32(rm, rm, tmp);
3641 tcg_gen_shri_i32(tmp, t1, 8);
3642 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3643 tcg_gen_or_i32(rm, rm, tmp);
3644 tcg_gen_shri_i32(tmp, t0, 16);
3645 tcg_gen_andi_i32(tmp, tmp, 0xff);
3646 tcg_gen_or_i32(t1, rm, tmp);
3647 tcg_gen_mov_i32(t0, rd);
3648
3649 dead_tmp(tmp);
3650 dead_tmp(rm);
3651 dead_tmp(rd);
3652}
3653
3654static void gen_neon_zip_u16(TCGv t0, TCGv t1)
3655{
3656 TCGv tmp, tmp2;
3657
3658 tmp = new_tmp();
3659 tmp2 = new_tmp();
3660
3661 tcg_gen_andi_i32(tmp, t0, 0xffff);
3662 tcg_gen_shli_i32(tmp2, t1, 16);
3663 tcg_gen_or_i32(tmp, tmp, tmp2);
3664 tcg_gen_andi_i32(t1, t1, 0xffff0000);
3665 tcg_gen_shri_i32(tmp2, t0, 16);
3666 tcg_gen_or_i32(t1, t1, tmp2);
3667 tcg_gen_mov_i32(t0, tmp);
3668
3669 dead_tmp(tmp2);
3670 dead_tmp(tmp);
3671}
3672
9ee6e8bb
PB
3673static void gen_neon_unzip(int reg, int q, int tmp, int size)
3674{
3675 int n;
dd8fbd78 3676 TCGv t0, t1;
9ee6e8bb
PB
3677
3678 for (n = 0; n < q + 1; n += 2) {
dd8fbd78
FN
3679 t0 = neon_load_reg(reg, n);
3680 t1 = neon_load_reg(reg, n + 1);
9ee6e8bb 3681 switch (size) {
dd8fbd78
FN
3682 case 0: gen_neon_unzip_u8(t0, t1); break;
3683 case 1: gen_neon_zip_u16(t0, t1); break; /* zip and unzip are the same. */
9ee6e8bb
PB
3684 case 2: /* no-op */; break;
3685 default: abort();
3686 }
dd8fbd78
FN
3687 neon_store_scratch(tmp + n, t0);
3688 neon_store_scratch(tmp + n + 1, t1);
9ee6e8bb
PB
3689 }
3690}
3691
19457615
FN
3692static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3693{
3694 TCGv rd, tmp;
3695
3696 rd = new_tmp();
3697 tmp = new_tmp();
3698
3699 tcg_gen_shli_i32(rd, t0, 8);
3700 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3701 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3702 tcg_gen_or_i32(rd, rd, tmp);
3703
3704 tcg_gen_shri_i32(t1, t1, 8);
3705 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3706 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3707 tcg_gen_or_i32(t1, t1, tmp);
3708 tcg_gen_mov_i32(t0, rd);
3709
3710 dead_tmp(tmp);
3711 dead_tmp(rd);
3712}
3713
3714static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3715{
3716 TCGv rd, tmp;
3717
3718 rd = new_tmp();
3719 tmp = new_tmp();
3720
3721 tcg_gen_shli_i32(rd, t0, 16);
3722 tcg_gen_andi_i32(tmp, t1, 0xffff);
3723 tcg_gen_or_i32(rd, rd, tmp);
3724 tcg_gen_shri_i32(t1, t1, 16);
3725 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3726 tcg_gen_or_i32(t1, t1, tmp);
3727 tcg_gen_mov_i32(t0, rd);
3728
3729 dead_tmp(tmp);
3730 dead_tmp(rd);
3731}
3732
3733
9ee6e8bb
PB
3734static struct {
3735 int nregs;
3736 int interleave;
3737 int spacing;
3738} neon_ls_element_type[11] = {
3739 {4, 4, 1},
3740 {4, 4, 2},
3741 {4, 1, 1},
3742 {4, 2, 1},
3743 {3, 3, 1},
3744 {3, 3, 2},
3745 {3, 1, 1},
3746 {1, 1, 1},
3747 {2, 2, 1},
3748 {2, 2, 2},
3749 {2, 1, 1}
3750};
3751
3752/* Translate a NEON load/store element instruction. Return nonzero if the
3753 instruction is invalid. */
3754static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3755{
3756 int rd, rn, rm;
3757 int op;
3758 int nregs;
3759 int interleave;
3760 int stride;
3761 int size;
3762 int reg;
3763 int pass;
3764 int load;
3765 int shift;
9ee6e8bb 3766 int n;
1b2b1e54 3767 TCGv addr;
b0109805 3768 TCGv tmp;
8f8e3aa4 3769 TCGv tmp2;
9ee6e8bb
PB
3770
3771 if (!vfp_enabled(env))
3772 return 1;
3773 VFP_DREG_D(rd, insn);
3774 rn = (insn >> 16) & 0xf;
3775 rm = insn & 0xf;
3776 load = (insn & (1 << 21)) != 0;
1b2b1e54 3777 addr = new_tmp();
9ee6e8bb
PB
3778 if ((insn & (1 << 23)) == 0) {
3779 /* Load store all elements. */
3780 op = (insn >> 8) & 0xf;
3781 size = (insn >> 6) & 3;
3782 if (op > 10 || size == 3)
3783 return 1;
3784 nregs = neon_ls_element_type[op].nregs;
3785 interleave = neon_ls_element_type[op].interleave;
1b2b1e54 3786 tcg_gen_mov_i32(addr, cpu_R[rn]);
9ee6e8bb
PB
3787 stride = (1 << size) * interleave;
3788 for (reg = 0; reg < nregs; reg++) {
3789 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
1b2b1e54 3790 tcg_gen_addi_i32(addr, cpu_R[rn], (1 << size) * reg);
9ee6e8bb 3791 } else if (interleave == 2 && nregs == 4 && reg == 2) {
1b2b1e54 3792 tcg_gen_addi_i32(addr, cpu_R[rn], 1 << size);
9ee6e8bb
PB
3793 }
3794 for (pass = 0; pass < 2; pass++) {
3795 if (size == 2) {
3796 if (load) {
1b2b1e54 3797 tmp = gen_ld32(addr, IS_USER(s));
ad69471c 3798 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3799 } else {
ad69471c 3800 tmp = neon_load_reg(rd, pass);
1b2b1e54 3801 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 3802 }
1b2b1e54 3803 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb
PB
3804 } else if (size == 1) {
3805 if (load) {
1b2b1e54
FN
3806 tmp = gen_ld16u(addr, IS_USER(s));
3807 tcg_gen_addi_i32(addr, addr, stride);
3808 tmp2 = gen_ld16u(addr, IS_USER(s));
3809 tcg_gen_addi_i32(addr, addr, stride);
8f8e3aa4
PB
3810 gen_bfi(tmp, tmp, tmp2, 16, 0xffff);
3811 dead_tmp(tmp2);
3812 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3813 } else {
8f8e3aa4
PB
3814 tmp = neon_load_reg(rd, pass);
3815 tmp2 = new_tmp();
3816 tcg_gen_shri_i32(tmp2, tmp, 16);
1b2b1e54
FN
3817 gen_st16(tmp, addr, IS_USER(s));
3818 tcg_gen_addi_i32(addr, addr, stride);
3819 gen_st16(tmp2, addr, IS_USER(s));
3820 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb
PB
3821 }
3822 } else /* size == 0 */ {
3823 if (load) {
a50f5b91 3824 TCGV_UNUSED(tmp2);
9ee6e8bb 3825 for (n = 0; n < 4; n++) {
1b2b1e54
FN
3826 tmp = gen_ld8u(addr, IS_USER(s));
3827 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3828 if (n == 0) {
8f8e3aa4 3829 tmp2 = tmp;
9ee6e8bb 3830 } else {
8f8e3aa4
PB
3831 gen_bfi(tmp2, tmp2, tmp, n * 8, 0xff);
3832 dead_tmp(tmp);
9ee6e8bb 3833 }
9ee6e8bb 3834 }
8f8e3aa4 3835 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 3836 } else {
8f8e3aa4 3837 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 3838 for (n = 0; n < 4; n++) {
8f8e3aa4 3839 tmp = new_tmp();
9ee6e8bb 3840 if (n == 0) {
8f8e3aa4 3841 tcg_gen_mov_i32(tmp, tmp2);
9ee6e8bb 3842 } else {
8f8e3aa4 3843 tcg_gen_shri_i32(tmp, tmp2, n * 8);
9ee6e8bb 3844 }
1b2b1e54
FN
3845 gen_st8(tmp, addr, IS_USER(s));
3846 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3847 }
8f8e3aa4 3848 dead_tmp(tmp2);
9ee6e8bb
PB
3849 }
3850 }
3851 }
3852 rd += neon_ls_element_type[op].spacing;
3853 }
3854 stride = nregs * 8;
3855 } else {
3856 size = (insn >> 10) & 3;
3857 if (size == 3) {
3858 /* Load single element to all lanes. */
3859 if (!load)
3860 return 1;
3861 size = (insn >> 6) & 3;
3862 nregs = ((insn >> 8) & 3) + 1;
3863 stride = (insn & (1 << 5)) ? 2 : 1;
1b2b1e54 3864 tcg_gen_mov_i32(addr, cpu_R[rn]);
9ee6e8bb
PB
3865 for (reg = 0; reg < nregs; reg++) {
3866 switch (size) {
3867 case 0:
1b2b1e54 3868 tmp = gen_ld8u(addr, IS_USER(s));
ad69471c 3869 gen_neon_dup_u8(tmp, 0);
9ee6e8bb
PB
3870 break;
3871 case 1:
1b2b1e54 3872 tmp = gen_ld16u(addr, IS_USER(s));
ad69471c 3873 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
3874 break;
3875 case 2:
1b2b1e54 3876 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
3877 break;
3878 case 3:
3879 return 1;
a50f5b91
PB
3880 default: /* Avoid compiler warnings. */
3881 abort();
99c475ab 3882 }
1b2b1e54 3883 tcg_gen_addi_i32(addr, addr, 1 << size);
ad69471c
PB
3884 tmp2 = new_tmp();
3885 tcg_gen_mov_i32(tmp2, tmp);
3886 neon_store_reg(rd, 0, tmp2);
3018f259 3887 neon_store_reg(rd, 1, tmp);
9ee6e8bb
PB
3888 rd += stride;
3889 }
3890 stride = (1 << size) * nregs;
3891 } else {
3892 /* Single element. */
3893 pass = (insn >> 7) & 1;
3894 switch (size) {
3895 case 0:
3896 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
3897 stride = 1;
3898 break;
3899 case 1:
3900 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
3901 stride = (insn & (1 << 5)) ? 2 : 1;
3902 break;
3903 case 2:
3904 shift = 0;
9ee6e8bb
PB
3905 stride = (insn & (1 << 6)) ? 2 : 1;
3906 break;
3907 default:
3908 abort();
3909 }
3910 nregs = ((insn >> 8) & 3) + 1;
1b2b1e54 3911 tcg_gen_mov_i32(addr, cpu_R[rn]);
9ee6e8bb
PB
3912 for (reg = 0; reg < nregs; reg++) {
3913 if (load) {
9ee6e8bb
PB
3914 switch (size) {
3915 case 0:
1b2b1e54 3916 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb
PB
3917 break;
3918 case 1:
1b2b1e54 3919 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
3920 break;
3921 case 2:
1b2b1e54 3922 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 3923 break;
a50f5b91
PB
3924 default: /* Avoid compiler warnings. */
3925 abort();
9ee6e8bb
PB
3926 }
3927 if (size != 2) {
8f8e3aa4
PB
3928 tmp2 = neon_load_reg(rd, pass);
3929 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3930 dead_tmp(tmp2);
9ee6e8bb 3931 }
8f8e3aa4 3932 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3933 } else { /* Store */
8f8e3aa4
PB
3934 tmp = neon_load_reg(rd, pass);
3935 if (shift)
3936 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
3937 switch (size) {
3938 case 0:
1b2b1e54 3939 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
3940 break;
3941 case 1:
1b2b1e54 3942 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
3943 break;
3944 case 2:
1b2b1e54 3945 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 3946 break;
99c475ab 3947 }
99c475ab 3948 }
9ee6e8bb 3949 rd += stride;
1b2b1e54 3950 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 3951 }
9ee6e8bb 3952 stride = nregs * (1 << size);
99c475ab 3953 }
9ee6e8bb 3954 }
1b2b1e54 3955 dead_tmp(addr);
9ee6e8bb 3956 if (rm != 15) {
b26eefb6
PB
3957 TCGv base;
3958
3959 base = load_reg(s, rn);
9ee6e8bb 3960 if (rm == 13) {
b26eefb6 3961 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 3962 } else {
b26eefb6
PB
3963 TCGv index;
3964 index = load_reg(s, rm);
3965 tcg_gen_add_i32(base, base, index);
3966 dead_tmp(index);
9ee6e8bb 3967 }
b26eefb6 3968 store_reg(s, rn, base);
9ee6e8bb
PB
3969 }
3970 return 0;
3971}
3b46e624 3972
8f8e3aa4
PB
3973/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
3974static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
3975{
3976 tcg_gen_and_i32(t, t, c);
3977 tcg_gen_bic_i32(f, f, c);
3978 tcg_gen_or_i32(dest, t, f);
3979}
3980
a7812ae4 3981static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
3982{
3983 switch (size) {
3984 case 0: gen_helper_neon_narrow_u8(dest, src); break;
3985 case 1: gen_helper_neon_narrow_u16(dest, src); break;
3986 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
3987 default: abort();
3988 }
3989}
3990
a7812ae4 3991static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
3992{
3993 switch (size) {
3994 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
3995 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
3996 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
3997 default: abort();
3998 }
3999}
4000
a7812ae4 4001static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4002{
4003 switch (size) {
4004 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4005 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4006 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4007 default: abort();
4008 }
4009}
4010
4011static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4012 int q, int u)
4013{
4014 if (q) {
4015 if (u) {
4016 switch (size) {
4017 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4018 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4019 default: abort();
4020 }
4021 } else {
4022 switch (size) {
4023 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4024 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4025 default: abort();
4026 }
4027 }
4028 } else {
4029 if (u) {
4030 switch (size) {
4031 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4032 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4033 default: abort();
4034 }
4035 } else {
4036 switch (size) {
4037 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4038 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4039 default: abort();
4040 }
4041 }
4042 }
4043}
4044
a7812ae4 4045static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
4046{
4047 if (u) {
4048 switch (size) {
4049 case 0: gen_helper_neon_widen_u8(dest, src); break;
4050 case 1: gen_helper_neon_widen_u16(dest, src); break;
4051 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4052 default: abort();
4053 }
4054 } else {
4055 switch (size) {
4056 case 0: gen_helper_neon_widen_s8(dest, src); break;
4057 case 1: gen_helper_neon_widen_s16(dest, src); break;
4058 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4059 default: abort();
4060 }
4061 }
4062 dead_tmp(src);
4063}
4064
4065static inline void gen_neon_addl(int size)
4066{
4067 switch (size) {
4068 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4069 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4070 case 2: tcg_gen_add_i64(CPU_V001); break;
4071 default: abort();
4072 }
4073}
4074
4075static inline void gen_neon_subl(int size)
4076{
4077 switch (size) {
4078 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4079 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4080 case 2: tcg_gen_sub_i64(CPU_V001); break;
4081 default: abort();
4082 }
4083}
4084
a7812ae4 4085static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4086{
4087 switch (size) {
4088 case 0: gen_helper_neon_negl_u16(var, var); break;
4089 case 1: gen_helper_neon_negl_u32(var, var); break;
4090 case 2: gen_helper_neon_negl_u64(var, var); break;
4091 default: abort();
4092 }
4093}
4094
a7812ae4 4095static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4096{
4097 switch (size) {
4098 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4099 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4100 default: abort();
4101 }
4102}
4103
a7812ae4 4104static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4105{
a7812ae4 4106 TCGv_i64 tmp;
ad69471c
PB
4107
4108 switch ((size << 1) | u) {
4109 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4110 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4111 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4112 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4113 case 4:
4114 tmp = gen_muls_i64_i32(a, b);
4115 tcg_gen_mov_i64(dest, tmp);
4116 break;
4117 case 5:
4118 tmp = gen_mulu_i64_i32(a, b);
4119 tcg_gen_mov_i64(dest, tmp);
4120 break;
4121 default: abort();
4122 }
ad69471c
PB
4123}
4124
9ee6e8bb
PB
4125/* Translate a NEON data processing instruction. Return nonzero if the
4126 instruction is invalid.
ad69471c
PB
4127 We process data in a mixture of 32-bit and 64-bit chunks.
4128 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4129
9ee6e8bb
PB
4130static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4131{
4132 int op;
4133 int q;
4134 int rd, rn, rm;
4135 int size;
4136 int shift;
4137 int pass;
4138 int count;
4139 int pairwise;
4140 int u;
4141 int n;
4142 uint32_t imm;
8f8e3aa4
PB
4143 TCGv tmp;
4144 TCGv tmp2;
4145 TCGv tmp3;
a7812ae4 4146 TCGv_i64 tmp64;
9ee6e8bb
PB
4147
4148 if (!vfp_enabled(env))
4149 return 1;
4150 q = (insn & (1 << 6)) != 0;
4151 u = (insn >> 24) & 1;
4152 VFP_DREG_D(rd, insn);
4153 VFP_DREG_N(rn, insn);
4154 VFP_DREG_M(rm, insn);
4155 size = (insn >> 20) & 3;
4156 if ((insn & (1 << 23)) == 0) {
4157 /* Three register same length. */
4158 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
ad69471c
PB
4159 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4160 || op == 10 || op == 11 || op == 16)) {
4161 /* 64-bit element instructions. */
9ee6e8bb 4162 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4163 neon_load_reg64(cpu_V0, rn + pass);
4164 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb
PB
4165 switch (op) {
4166 case 1: /* VQADD */
4167 if (u) {
ad69471c 4168 gen_helper_neon_add_saturate_u64(CPU_V001);
2c0262af 4169 } else {
ad69471c 4170 gen_helper_neon_add_saturate_s64(CPU_V001);
2c0262af 4171 }
9ee6e8bb
PB
4172 break;
4173 case 5: /* VQSUB */
4174 if (u) {
ad69471c
PB
4175 gen_helper_neon_sub_saturate_u64(CPU_V001);
4176 } else {
4177 gen_helper_neon_sub_saturate_s64(CPU_V001);
4178 }
4179 break;
4180 case 8: /* VSHL */
4181 if (u) {
4182 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4183 } else {
4184 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4185 }
4186 break;
4187 case 9: /* VQSHL */
4188 if (u) {
4189 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4190 cpu_V0, cpu_V0);
4191 } else {
4192 gen_helper_neon_qshl_s64(cpu_V1, cpu_env,
4193 cpu_V1, cpu_V0);
4194 }
4195 break;
4196 case 10: /* VRSHL */
4197 if (u) {
4198 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4199 } else {
ad69471c
PB
4200 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4201 }
4202 break;
4203 case 11: /* VQRSHL */
4204 if (u) {
4205 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4206 cpu_V1, cpu_V0);
4207 } else {
4208 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4209 cpu_V1, cpu_V0);
1e8d4eec 4210 }
9ee6e8bb
PB
4211 break;
4212 case 16:
4213 if (u) {
ad69471c 4214 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4215 } else {
ad69471c 4216 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4217 }
4218 break;
4219 default:
4220 abort();
2c0262af 4221 }
ad69471c 4222 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4223 }
9ee6e8bb 4224 return 0;
2c0262af 4225 }
9ee6e8bb
PB
4226 switch (op) {
4227 case 8: /* VSHL */
4228 case 9: /* VQSHL */
4229 case 10: /* VRSHL */
ad69471c 4230 case 11: /* VQRSHL */
9ee6e8bb 4231 {
ad69471c
PB
4232 int rtmp;
4233 /* Shift instruction operands are reversed. */
4234 rtmp = rn;
9ee6e8bb 4235 rn = rm;
ad69471c 4236 rm = rtmp;
9ee6e8bb
PB
4237 pairwise = 0;
4238 }
2c0262af 4239 break;
9ee6e8bb
PB
4240 case 20: /* VPMAX */
4241 case 21: /* VPMIN */
4242 case 23: /* VPADD */
4243 pairwise = 1;
2c0262af 4244 break;
9ee6e8bb
PB
4245 case 26: /* VPADD (float) */
4246 pairwise = (u && size < 2);
2c0262af 4247 break;
9ee6e8bb
PB
4248 case 30: /* VPMIN/VPMAX (float) */
4249 pairwise = u;
2c0262af 4250 break;
9ee6e8bb
PB
4251 default:
4252 pairwise = 0;
2c0262af 4253 break;
9ee6e8bb 4254 }
dd8fbd78 4255
9ee6e8bb
PB
4256 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4257
4258 if (pairwise) {
4259 /* Pairwise. */
4260 if (q)
4261 n = (pass & 1) * 2;
2c0262af 4262 else
9ee6e8bb
PB
4263 n = 0;
4264 if (pass < q + 1) {
dd8fbd78
FN
4265 tmp = neon_load_reg(rn, n);
4266 tmp2 = neon_load_reg(rn, n + 1);
9ee6e8bb 4267 } else {
dd8fbd78
FN
4268 tmp = neon_load_reg(rm, n);
4269 tmp2 = neon_load_reg(rm, n + 1);
9ee6e8bb
PB
4270 }
4271 } else {
4272 /* Elementwise. */
dd8fbd78
FN
4273 tmp = neon_load_reg(rn, pass);
4274 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4275 }
4276 switch (op) {
4277 case 0: /* VHADD */
4278 GEN_NEON_INTEGER_OP(hadd);
4279 break;
4280 case 1: /* VQADD */
ad69471c 4281 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4282 break;
9ee6e8bb
PB
4283 case 2: /* VRHADD */
4284 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4285 break;
9ee6e8bb
PB
4286 case 3: /* Logic ops. */
4287 switch ((u << 2) | size) {
4288 case 0: /* VAND */
dd8fbd78 4289 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4290 break;
4291 case 1: /* BIC */
dd8fbd78 4292 tcg_gen_bic_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4293 break;
4294 case 2: /* VORR */
dd8fbd78 4295 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4296 break;
4297 case 3: /* VORN */
dd8fbd78
FN
4298 tcg_gen_not_i32(tmp2, tmp2);
4299 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4300 break;
4301 case 4: /* VEOR */
dd8fbd78 4302 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4303 break;
4304 case 5: /* VBSL */
dd8fbd78
FN
4305 tmp3 = neon_load_reg(rd, pass);
4306 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4307 dead_tmp(tmp3);
9ee6e8bb
PB
4308 break;
4309 case 6: /* VBIT */
dd8fbd78
FN
4310 tmp3 = neon_load_reg(rd, pass);
4311 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4312 dead_tmp(tmp3);
9ee6e8bb
PB
4313 break;
4314 case 7: /* VBIF */
dd8fbd78
FN
4315 tmp3 = neon_load_reg(rd, pass);
4316 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4317 dead_tmp(tmp3);
9ee6e8bb 4318 break;
2c0262af
FB
4319 }
4320 break;
9ee6e8bb
PB
4321 case 4: /* VHSUB */
4322 GEN_NEON_INTEGER_OP(hsub);
4323 break;
4324 case 5: /* VQSUB */
ad69471c 4325 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4326 break;
9ee6e8bb
PB
4327 case 6: /* VCGT */
4328 GEN_NEON_INTEGER_OP(cgt);
4329 break;
4330 case 7: /* VCGE */
4331 GEN_NEON_INTEGER_OP(cge);
4332 break;
4333 case 8: /* VSHL */
ad69471c 4334 GEN_NEON_INTEGER_OP(shl);
2c0262af 4335 break;
9ee6e8bb 4336 case 9: /* VQSHL */
ad69471c 4337 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4338 break;
9ee6e8bb 4339 case 10: /* VRSHL */
ad69471c 4340 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4341 break;
9ee6e8bb 4342 case 11: /* VQRSHL */
ad69471c 4343 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb
PB
4344 break;
4345 case 12: /* VMAX */
4346 GEN_NEON_INTEGER_OP(max);
4347 break;
4348 case 13: /* VMIN */
4349 GEN_NEON_INTEGER_OP(min);
4350 break;
4351 case 14: /* VABD */
4352 GEN_NEON_INTEGER_OP(abd);
4353 break;
4354 case 15: /* VABA */
4355 GEN_NEON_INTEGER_OP(abd);
dd8fbd78
FN
4356 dead_tmp(tmp2);
4357 tmp2 = neon_load_reg(rd, pass);
4358 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4359 break;
4360 case 16:
4361 if (!u) { /* VADD */
dd8fbd78 4362 if (gen_neon_add(size, tmp, tmp2))
9ee6e8bb
PB
4363 return 1;
4364 } else { /* VSUB */
4365 switch (size) {
dd8fbd78
FN
4366 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4367 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4368 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4369 default: return 1;
4370 }
4371 }
4372 break;
4373 case 17:
4374 if (!u) { /* VTST */
4375 switch (size) {
dd8fbd78
FN
4376 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4377 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4378 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4379 default: return 1;
4380 }
4381 } else { /* VCEQ */
4382 switch (size) {
dd8fbd78
FN
4383 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4384 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4385 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4386 default: return 1;
4387 }
4388 }
4389 break;
4390 case 18: /* Multiply. */
4391 switch (size) {
dd8fbd78
FN
4392 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4393 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4394 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4395 default: return 1;
4396 }
dd8fbd78
FN
4397 dead_tmp(tmp2);
4398 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4399 if (u) { /* VMLS */
dd8fbd78 4400 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4401 } else { /* VMLA */
dd8fbd78 4402 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4403 }
4404 break;
4405 case 19: /* VMUL */
4406 if (u) { /* polynomial */
dd8fbd78 4407 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4408 } else { /* Integer */
4409 switch (size) {
dd8fbd78
FN
4410 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4411 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4412 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4413 default: return 1;
4414 }
4415 }
4416 break;
4417 case 20: /* VPMAX */
4418 GEN_NEON_INTEGER_OP(pmax);
4419 break;
4420 case 21: /* VPMIN */
4421 GEN_NEON_INTEGER_OP(pmin);
4422 break;
4423 case 22: /* Hultiply high. */
4424 if (!u) { /* VQDMULH */
4425 switch (size) {
dd8fbd78
FN
4426 case 1: gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4427 case 2: gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
9ee6e8bb
PB
4428 default: return 1;
4429 }
4430 } else { /* VQRDHMUL */
4431 switch (size) {
dd8fbd78
FN
4432 case 1: gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4433 case 2: gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
9ee6e8bb
PB
4434 default: return 1;
4435 }
4436 }
4437 break;
4438 case 23: /* VPADD */
4439 if (u)
4440 return 1;
4441 switch (size) {
dd8fbd78
FN
4442 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4443 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4444 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4445 default: return 1;
4446 }
4447 break;
4448 case 26: /* Floating point arithnetic. */
4449 switch ((u << 2) | size) {
4450 case 0: /* VADD */
dd8fbd78 4451 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4452 break;
4453 case 2: /* VSUB */
dd8fbd78 4454 gen_helper_neon_sub_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4455 break;
4456 case 4: /* VPADD */
dd8fbd78 4457 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4458 break;
4459 case 6: /* VABD */
dd8fbd78 4460 gen_helper_neon_abd_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4461 break;
4462 default:
4463 return 1;
4464 }
4465 break;
4466 case 27: /* Float multiply. */
dd8fbd78 4467 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb 4468 if (!u) {
dd8fbd78
FN
4469 dead_tmp(tmp2);
4470 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4471 if (size == 0) {
dd8fbd78 4472 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb 4473 } else {
dd8fbd78 4474 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
4475 }
4476 }
4477 break;
4478 case 28: /* Float compare. */
4479 if (!u) {
dd8fbd78 4480 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
b5ff1b31 4481 } else {
9ee6e8bb 4482 if (size == 0)
dd8fbd78 4483 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
9ee6e8bb 4484 else
dd8fbd78 4485 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
b5ff1b31 4486 }
2c0262af 4487 break;
9ee6e8bb
PB
4488 case 29: /* Float compare absolute. */
4489 if (!u)
4490 return 1;
4491 if (size == 0)
dd8fbd78 4492 gen_helper_neon_acge_f32(tmp, tmp, tmp2);
9ee6e8bb 4493 else
dd8fbd78 4494 gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
2c0262af 4495 break;
9ee6e8bb
PB
4496 case 30: /* Float min/max. */
4497 if (size == 0)
dd8fbd78 4498 gen_helper_neon_max_f32(tmp, tmp, tmp2);
9ee6e8bb 4499 else
dd8fbd78 4500 gen_helper_neon_min_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4501 break;
4502 case 31:
4503 if (size == 0)
dd8fbd78 4504 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 4505 else
dd8fbd78 4506 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 4507 break;
9ee6e8bb
PB
4508 default:
4509 abort();
2c0262af 4510 }
dd8fbd78
FN
4511 dead_tmp(tmp2);
4512
9ee6e8bb
PB
4513 /* Save the result. For elementwise operations we can put it
4514 straight into the destination register. For pairwise operations
4515 we have to be careful to avoid clobbering the source operands. */
4516 if (pairwise && rd == rm) {
dd8fbd78 4517 neon_store_scratch(pass, tmp);
9ee6e8bb 4518 } else {
dd8fbd78 4519 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4520 }
4521
4522 } /* for pass */
4523 if (pairwise && rd == rm) {
4524 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
4525 tmp = neon_load_scratch(pass);
4526 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4527 }
4528 }
ad69471c 4529 /* End of 3 register same size operations. */
9ee6e8bb
PB
4530 } else if (insn & (1 << 4)) {
4531 if ((insn & 0x00380080) != 0) {
4532 /* Two registers and shift. */
4533 op = (insn >> 8) & 0xf;
4534 if (insn & (1 << 7)) {
4535 /* 64-bit shift. */
4536 size = 3;
4537 } else {
4538 size = 2;
4539 while ((insn & (1 << (size + 19))) == 0)
4540 size--;
4541 }
4542 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4543 /* To avoid excessive dumplication of ops we implement shift
4544 by immediate using the variable shift operations. */
4545 if (op < 8) {
4546 /* Shift by immediate:
4547 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4548 /* Right shifts are encoded as N - shift, where N is the
4549 element size in bits. */
4550 if (op <= 4)
4551 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4552 if (size == 3) {
4553 count = q + 1;
4554 } else {
4555 count = q ? 4: 2;
4556 }
4557 switch (size) {
4558 case 0:
4559 imm = (uint8_t) shift;
4560 imm |= imm << 8;
4561 imm |= imm << 16;
4562 break;
4563 case 1:
4564 imm = (uint16_t) shift;
4565 imm |= imm << 16;
4566 break;
4567 case 2:
4568 case 3:
4569 imm = shift;
4570 break;
4571 default:
4572 abort();
4573 }
4574
4575 for (pass = 0; pass < count; pass++) {
ad69471c
PB
4576 if (size == 3) {
4577 neon_load_reg64(cpu_V0, rm + pass);
4578 tcg_gen_movi_i64(cpu_V1, imm);
4579 switch (op) {
4580 case 0: /* VSHR */
4581 case 1: /* VSRA */
4582 if (u)
4583 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4584 else
ad69471c 4585 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4586 break;
ad69471c
PB
4587 case 2: /* VRSHR */
4588 case 3: /* VRSRA */
4589 if (u)
4590 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4591 else
ad69471c 4592 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4593 break;
ad69471c
PB
4594 case 4: /* VSRI */
4595 if (!u)
4596 return 1;
4597 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4598 break;
4599 case 5: /* VSHL, VSLI */
4600 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4601 break;
4602 case 6: /* VQSHL */
4603 if (u)
4604 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4605 else
ad69471c
PB
4606 gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4607 break;
4608 case 7: /* VQSHLU */
4609 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4610 break;
9ee6e8bb 4611 }
ad69471c
PB
4612 if (op == 1 || op == 3) {
4613 /* Accumulate. */
4614 neon_load_reg64(cpu_V0, rd + pass);
4615 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4616 } else if (op == 4 || (op == 5 && u)) {
4617 /* Insert */
4618 cpu_abort(env, "VS[LR]I.64 not implemented");
4619 }
4620 neon_store_reg64(cpu_V0, rd + pass);
4621 } else { /* size < 3 */
4622 /* Operands in T0 and T1. */
dd8fbd78
FN
4623 tmp = neon_load_reg(rm, pass);
4624 tmp2 = new_tmp();
4625 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
4626 switch (op) {
4627 case 0: /* VSHR */
4628 case 1: /* VSRA */
4629 GEN_NEON_INTEGER_OP(shl);
4630 break;
4631 case 2: /* VRSHR */
4632 case 3: /* VRSRA */
4633 GEN_NEON_INTEGER_OP(rshl);
4634 break;
4635 case 4: /* VSRI */
4636 if (!u)
4637 return 1;
4638 GEN_NEON_INTEGER_OP(shl);
4639 break;
4640 case 5: /* VSHL, VSLI */
4641 switch (size) {
dd8fbd78
FN
4642 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
4643 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
4644 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
ad69471c
PB
4645 default: return 1;
4646 }
4647 break;
4648 case 6: /* VQSHL */
4649 GEN_NEON_INTEGER_OP_ENV(qshl);
4650 break;
4651 case 7: /* VQSHLU */
4652 switch (size) {
dd8fbd78
FN
4653 case 0: gen_helper_neon_qshl_u8(tmp, cpu_env, tmp, tmp2); break;
4654 case 1: gen_helper_neon_qshl_u16(tmp, cpu_env, tmp, tmp2); break;
4655 case 2: gen_helper_neon_qshl_u32(tmp, cpu_env, tmp, tmp2); break;
ad69471c
PB
4656 default: return 1;
4657 }
4658 break;
4659 }
dd8fbd78 4660 dead_tmp(tmp2);
ad69471c
PB
4661
4662 if (op == 1 || op == 3) {
4663 /* Accumulate. */
dd8fbd78
FN
4664 tmp2 = neon_load_reg(rd, pass);
4665 gen_neon_add(size, tmp2, tmp);
4666 dead_tmp(tmp2);
ad69471c
PB
4667 } else if (op == 4 || (op == 5 && u)) {
4668 /* Insert */
4669 switch (size) {
4670 case 0:
4671 if (op == 4)
4672 imm = 0xff >> -shift;
4673 else
4674 imm = (uint8_t)(0xff << shift);
4675 imm |= imm << 8;
4676 imm |= imm << 16;
4677 break;
4678 case 1:
4679 if (op == 4)
4680 imm = 0xffff >> -shift;
4681 else
4682 imm = (uint16_t)(0xffff << shift);
4683 imm |= imm << 16;
4684 break;
4685 case 2:
4686 if (op == 4)
4687 imm = 0xffffffffu >> -shift;
4688 else
4689 imm = 0xffffffffu << shift;
4690 break;
4691 default:
4692 abort();
4693 }
dd8fbd78
FN
4694 tmp2 = neon_load_reg(rd, pass);
4695 tcg_gen_andi_i32(tmp, tmp, imm);
4696 tcg_gen_andi_i32(tmp2, tmp2, ~imm);
4697 tcg_gen_or_i32(tmp, tmp, tmp2);
4698 dead_tmp(tmp2);
ad69471c 4699 }
dd8fbd78 4700 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4701 }
4702 } /* for pass */
4703 } else if (op < 10) {
ad69471c 4704 /* Shift by immediate and narrow:
9ee6e8bb
PB
4705 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4706 shift = shift - (1 << (size + 3));
4707 size++;
9ee6e8bb
PB
4708 switch (size) {
4709 case 1:
ad69471c 4710 imm = (uint16_t)shift;
9ee6e8bb 4711 imm |= imm << 16;
ad69471c 4712 tmp2 = tcg_const_i32(imm);
a7812ae4 4713 TCGV_UNUSED_I64(tmp64);
9ee6e8bb
PB
4714 break;
4715 case 2:
ad69471c
PB
4716 imm = (uint32_t)shift;
4717 tmp2 = tcg_const_i32(imm);
a7812ae4 4718 TCGV_UNUSED_I64(tmp64);
4cc633c3 4719 break;
9ee6e8bb 4720 case 3:
a7812ae4
PB
4721 tmp64 = tcg_const_i64(shift);
4722 TCGV_UNUSED(tmp2);
9ee6e8bb
PB
4723 break;
4724 default:
4725 abort();
4726 }
4727
ad69471c
PB
4728 for (pass = 0; pass < 2; pass++) {
4729 if (size == 3) {
4730 neon_load_reg64(cpu_V0, rm + pass);
4731 if (q) {
4732 if (u)
a7812ae4 4733 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4734 else
a7812ae4 4735 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c
PB
4736 } else {
4737 if (u)
a7812ae4 4738 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4739 else
a7812ae4 4740 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c 4741 }
2c0262af 4742 } else {
ad69471c
PB
4743 tmp = neon_load_reg(rm + pass, 0);
4744 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
36aa55dc
PB
4745 tmp3 = neon_load_reg(rm + pass, 1);
4746 gen_neon_shift_narrow(size, tmp3, tmp2, q, u);
4747 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
ad69471c 4748 dead_tmp(tmp);
36aa55dc 4749 dead_tmp(tmp3);
9ee6e8bb 4750 }
ad69471c
PB
4751 tmp = new_tmp();
4752 if (op == 8 && !u) {
4753 gen_neon_narrow(size - 1, tmp, cpu_V0);
9ee6e8bb 4754 } else {
ad69471c
PB
4755 if (op == 8)
4756 gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
9ee6e8bb 4757 else
ad69471c
PB
4758 gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
4759 }
4760 if (pass == 0) {
4761 tmp2 = tmp;
4762 } else {
4763 neon_store_reg(rd, 0, tmp2);
4764 neon_store_reg(rd, 1, tmp);
9ee6e8bb
PB
4765 }
4766 } /* for pass */
4767 } else if (op == 10) {
4768 /* VSHLL */
ad69471c 4769 if (q || size == 3)
9ee6e8bb 4770 return 1;
ad69471c
PB
4771 tmp = neon_load_reg(rm, 0);
4772 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 4773 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4774 if (pass == 1)
4775 tmp = tmp2;
4776
4777 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 4778
9ee6e8bb
PB
4779 if (shift != 0) {
4780 /* The shift is less than the width of the source
ad69471c
PB
4781 type, so we can just shift the whole register. */
4782 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4783 if (size < 2 || !u) {
4784 uint64_t imm64;
4785 if (size == 0) {
4786 imm = (0xffu >> (8 - shift));
4787 imm |= imm << 16;
4788 } else {
4789 imm = 0xffff >> (16 - shift);
9ee6e8bb 4790 }
ad69471c
PB
4791 imm64 = imm | (((uint64_t)imm) << 32);
4792 tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
9ee6e8bb
PB
4793 }
4794 }
ad69471c 4795 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4796 }
4797 } else if (op == 15 || op == 16) {
4798 /* VCVT fixed-point. */
4799 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 4800 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
9ee6e8bb
PB
4801 if (op & 1) {
4802 if (u)
4373f3ce 4803 gen_vfp_ulto(0, shift);
9ee6e8bb 4804 else
4373f3ce 4805 gen_vfp_slto(0, shift);
9ee6e8bb
PB
4806 } else {
4807 if (u)
4373f3ce 4808 gen_vfp_toul(0, shift);
9ee6e8bb 4809 else
4373f3ce 4810 gen_vfp_tosl(0, shift);
2c0262af 4811 }
4373f3ce 4812 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
4813 }
4814 } else {
9ee6e8bb
PB
4815 return 1;
4816 }
4817 } else { /* (insn & 0x00380080) == 0 */
4818 int invert;
4819
4820 op = (insn >> 8) & 0xf;
4821 /* One register and immediate. */
4822 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4823 invert = (insn & (1 << 5)) != 0;
4824 switch (op) {
4825 case 0: case 1:
4826 /* no-op */
4827 break;
4828 case 2: case 3:
4829 imm <<= 8;
4830 break;
4831 case 4: case 5:
4832 imm <<= 16;
4833 break;
4834 case 6: case 7:
4835 imm <<= 24;
4836 break;
4837 case 8: case 9:
4838 imm |= imm << 16;
4839 break;
4840 case 10: case 11:
4841 imm = (imm << 8) | (imm << 24);
4842 break;
4843 case 12:
4844 imm = (imm < 8) | 0xff;
4845 break;
4846 case 13:
4847 imm = (imm << 16) | 0xffff;
4848 break;
4849 case 14:
4850 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4851 if (invert)
4852 imm = ~imm;
4853 break;
4854 case 15:
4855 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4856 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4857 break;
4858 }
4859 if (invert)
4860 imm = ~imm;
4861
9ee6e8bb
PB
4862 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4863 if (op & 1 && op < 12) {
ad69471c 4864 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
4865 if (invert) {
4866 /* The immediate value has already been inverted, so
4867 BIC becomes AND. */
ad69471c 4868 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 4869 } else {
ad69471c 4870 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 4871 }
9ee6e8bb 4872 } else {
ad69471c
PB
4873 /* VMOV, VMVN. */
4874 tmp = new_tmp();
9ee6e8bb 4875 if (op == 14 && invert) {
ad69471c
PB
4876 uint32_t val;
4877 val = 0;
9ee6e8bb
PB
4878 for (n = 0; n < 4; n++) {
4879 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 4880 val |= 0xff << (n * 8);
9ee6e8bb 4881 }
ad69471c
PB
4882 tcg_gen_movi_i32(tmp, val);
4883 } else {
4884 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 4885 }
9ee6e8bb 4886 }
ad69471c 4887 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4888 }
4889 }
e4b3861d 4890 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
4891 if (size != 3) {
4892 op = (insn >> 8) & 0xf;
4893 if ((insn & (1 << 6)) == 0) {
4894 /* Three registers of different lengths. */
4895 int src1_wide;
4896 int src2_wide;
4897 int prewiden;
4898 /* prewiden, src1_wide, src2_wide */
4899 static const int neon_3reg_wide[16][3] = {
4900 {1, 0, 0}, /* VADDL */
4901 {1, 1, 0}, /* VADDW */
4902 {1, 0, 0}, /* VSUBL */
4903 {1, 1, 0}, /* VSUBW */
4904 {0, 1, 1}, /* VADDHN */
4905 {0, 0, 0}, /* VABAL */
4906 {0, 1, 1}, /* VSUBHN */
4907 {0, 0, 0}, /* VABDL */
4908 {0, 0, 0}, /* VMLAL */
4909 {0, 0, 0}, /* VQDMLAL */
4910 {0, 0, 0}, /* VMLSL */
4911 {0, 0, 0}, /* VQDMLSL */
4912 {0, 0, 0}, /* Integer VMULL */
4913 {0, 0, 0}, /* VQDMULL */
4914 {0, 0, 0} /* Polynomial VMULL */
4915 };
4916
4917 prewiden = neon_3reg_wide[op][0];
4918 src1_wide = neon_3reg_wide[op][1];
4919 src2_wide = neon_3reg_wide[op][2];
4920
ad69471c
PB
4921 if (size == 0 && (op == 9 || op == 11 || op == 13))
4922 return 1;
4923
9ee6e8bb
PB
4924 /* Avoid overlapping operands. Wide source operands are
4925 always aligned so will never overlap with wide
4926 destinations in problematic ways. */
8f8e3aa4 4927 if (rd == rm && !src2_wide) {
dd8fbd78
FN
4928 tmp = neon_load_reg(rm, 1);
4929 neon_store_scratch(2, tmp);
8f8e3aa4 4930 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
4931 tmp = neon_load_reg(rn, 1);
4932 neon_store_scratch(2, tmp);
9ee6e8bb 4933 }
a50f5b91 4934 TCGV_UNUSED(tmp3);
9ee6e8bb 4935 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4936 if (src1_wide) {
4937 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 4938 TCGV_UNUSED(tmp);
9ee6e8bb 4939 } else {
ad69471c 4940 if (pass == 1 && rd == rn) {
dd8fbd78 4941 tmp = neon_load_scratch(2);
9ee6e8bb 4942 } else {
ad69471c
PB
4943 tmp = neon_load_reg(rn, pass);
4944 }
4945 if (prewiden) {
4946 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
4947 }
4948 }
ad69471c
PB
4949 if (src2_wide) {
4950 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 4951 TCGV_UNUSED(tmp2);
9ee6e8bb 4952 } else {
ad69471c 4953 if (pass == 1 && rd == rm) {
dd8fbd78 4954 tmp2 = neon_load_scratch(2);
9ee6e8bb 4955 } else {
ad69471c
PB
4956 tmp2 = neon_load_reg(rm, pass);
4957 }
4958 if (prewiden) {
4959 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 4960 }
9ee6e8bb
PB
4961 }
4962 switch (op) {
4963 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 4964 gen_neon_addl(size);
9ee6e8bb
PB
4965 break;
4966 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
ad69471c 4967 gen_neon_subl(size);
9ee6e8bb
PB
4968 break;
4969 case 5: case 7: /* VABAL, VABDL */
4970 switch ((size << 1) | u) {
ad69471c
PB
4971 case 0:
4972 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
4973 break;
4974 case 1:
4975 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
4976 break;
4977 case 2:
4978 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
4979 break;
4980 case 3:
4981 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
4982 break;
4983 case 4:
4984 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
4985 break;
4986 case 5:
4987 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
4988 break;
9ee6e8bb
PB
4989 default: abort();
4990 }
ad69471c
PB
4991 dead_tmp(tmp2);
4992 dead_tmp(tmp);
9ee6e8bb
PB
4993 break;
4994 case 8: case 9: case 10: case 11: case 12: case 13:
4995 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 4996 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
dd8fbd78
FN
4997 dead_tmp(tmp2);
4998 dead_tmp(tmp);
9ee6e8bb
PB
4999 break;
5000 case 14: /* Polynomial VMULL */
5001 cpu_abort(env, "Polynomial VMULL not implemented");
5002
5003 default: /* 15 is RESERVED. */
5004 return 1;
5005 }
5006 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
5007 /* Accumulate. */
5008 if (op == 10 || op == 11) {
ad69471c 5009 gen_neon_negl(cpu_V0, size);
9ee6e8bb
PB
5010 }
5011
9ee6e8bb 5012 if (op != 13) {
ad69471c 5013 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb
PB
5014 }
5015
5016 switch (op) {
5017 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
ad69471c 5018 gen_neon_addl(size);
9ee6e8bb
PB
5019 break;
5020 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c
PB
5021 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5022 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5023 break;
9ee6e8bb
PB
5024 /* Fall through. */
5025 case 13: /* VQDMULL */
ad69471c 5026 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5027 break;
5028 default:
5029 abort();
5030 }
ad69471c 5031 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5032 } else if (op == 4 || op == 6) {
5033 /* Narrowing operation. */
ad69471c 5034 tmp = new_tmp();
9ee6e8bb
PB
5035 if (u) {
5036 switch (size) {
ad69471c
PB
5037 case 0:
5038 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5039 break;
5040 case 1:
5041 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5042 break;
5043 case 2:
5044 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5045 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5046 break;
9ee6e8bb
PB
5047 default: abort();
5048 }
5049 } else {
5050 switch (size) {
ad69471c
PB
5051 case 0:
5052 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5053 break;
5054 case 1:
5055 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5056 break;
5057 case 2:
5058 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5059 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5060 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5061 break;
9ee6e8bb
PB
5062 default: abort();
5063 }
5064 }
ad69471c
PB
5065 if (pass == 0) {
5066 tmp3 = tmp;
5067 } else {
5068 neon_store_reg(rd, 0, tmp3);
5069 neon_store_reg(rd, 1, tmp);
5070 }
9ee6e8bb
PB
5071 } else {
5072 /* Write back the result. */
ad69471c 5073 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5074 }
5075 }
5076 } else {
5077 /* Two registers and a scalar. */
5078 switch (op) {
5079 case 0: /* Integer VMLA scalar */
5080 case 1: /* Float VMLA scalar */
5081 case 4: /* Integer VMLS scalar */
5082 case 5: /* Floating point VMLS scalar */
5083 case 8: /* Integer VMUL scalar */
5084 case 9: /* Floating point VMUL scalar */
5085 case 12: /* VQDMULH scalar */
5086 case 13: /* VQRDMULH scalar */
dd8fbd78
FN
5087 tmp = neon_get_scalar(size, rm);
5088 neon_store_scratch(0, tmp);
9ee6e8bb 5089 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5090 tmp = neon_load_scratch(0);
5091 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5092 if (op == 12) {
5093 if (size == 1) {
dd8fbd78 5094 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5095 } else {
dd8fbd78 5096 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5097 }
5098 } else if (op == 13) {
5099 if (size == 1) {
dd8fbd78 5100 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5101 } else {
dd8fbd78 5102 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5103 }
5104 } else if (op & 1) {
dd8fbd78 5105 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5106 } else {
5107 switch (size) {
dd8fbd78
FN
5108 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5109 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5110 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5111 default: return 1;
5112 }
5113 }
dd8fbd78 5114 dead_tmp(tmp2);
9ee6e8bb
PB
5115 if (op < 8) {
5116 /* Accumulate. */
dd8fbd78 5117 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5118 switch (op) {
5119 case 0:
dd8fbd78 5120 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5121 break;
5122 case 1:
dd8fbd78 5123 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5124 break;
5125 case 4:
dd8fbd78 5126 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5127 break;
5128 case 5:
dd8fbd78 5129 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
5130 break;
5131 default:
5132 abort();
5133 }
dd8fbd78 5134 dead_tmp(tmp2);
9ee6e8bb 5135 }
dd8fbd78 5136 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5137 }
5138 break;
5139 case 2: /* VMLAL sclar */
5140 case 3: /* VQDMLAL scalar */
5141 case 6: /* VMLSL scalar */
5142 case 7: /* VQDMLSL scalar */
5143 case 10: /* VMULL scalar */
5144 case 11: /* VQDMULL scalar */
ad69471c
PB
5145 if (size == 0 && (op == 3 || op == 7 || op == 11))
5146 return 1;
5147
dd8fbd78
FN
5148 tmp2 = neon_get_scalar(size, rm);
5149 tmp3 = neon_load_reg(rn, 1);
ad69471c 5150
9ee6e8bb 5151 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5152 if (pass == 0) {
5153 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5154 } else {
dd8fbd78 5155 tmp = tmp3;
9ee6e8bb 5156 }
ad69471c 5157 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
dd8fbd78 5158 dead_tmp(tmp);
9ee6e8bb 5159 if (op == 6 || op == 7) {
ad69471c
PB
5160 gen_neon_negl(cpu_V0, size);
5161 }
5162 if (op != 11) {
5163 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5164 }
9ee6e8bb
PB
5165 switch (op) {
5166 case 2: case 6:
ad69471c 5167 gen_neon_addl(size);
9ee6e8bb
PB
5168 break;
5169 case 3: case 7:
ad69471c
PB
5170 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5171 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5172 break;
5173 case 10:
5174 /* no-op */
5175 break;
5176 case 11:
ad69471c 5177 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5178 break;
5179 default:
5180 abort();
5181 }
ad69471c 5182 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5183 }
dd8fbd78
FN
5184
5185 dead_tmp(tmp2);
5186
9ee6e8bb
PB
5187 break;
5188 default: /* 14 and 15 are RESERVED */
5189 return 1;
5190 }
5191 }
5192 } else { /* size == 3 */
5193 if (!u) {
5194 /* Extract. */
9ee6e8bb 5195 imm = (insn >> 8) & 0xf;
ad69471c
PB
5196 count = q + 1;
5197
5198 if (imm > 7 && !q)
5199 return 1;
5200
5201 if (imm == 0) {
5202 neon_load_reg64(cpu_V0, rn);
5203 if (q) {
5204 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5205 }
ad69471c
PB
5206 } else if (imm == 8) {
5207 neon_load_reg64(cpu_V0, rn + 1);
5208 if (q) {
5209 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5210 }
ad69471c 5211 } else if (q) {
a7812ae4 5212 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5213 if (imm < 8) {
5214 neon_load_reg64(cpu_V0, rn);
a7812ae4 5215 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5216 } else {
5217 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5218 neon_load_reg64(tmp64, rm);
ad69471c
PB
5219 }
5220 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5221 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5222 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5223 if (imm < 8) {
5224 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5225 } else {
ad69471c
PB
5226 neon_load_reg64(cpu_V1, rm + 1);
5227 imm -= 8;
9ee6e8bb 5228 }
ad69471c 5229 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5230 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5231 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
ad69471c 5232 } else {
a7812ae4 5233 /* BUGFIX */
ad69471c 5234 neon_load_reg64(cpu_V0, rn);
a7812ae4 5235 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5236 neon_load_reg64(cpu_V1, rm);
a7812ae4 5237 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5238 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5239 }
5240 neon_store_reg64(cpu_V0, rd);
5241 if (q) {
5242 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5243 }
5244 } else if ((insn & (1 << 11)) == 0) {
5245 /* Two register misc. */
5246 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5247 size = (insn >> 18) & 3;
5248 switch (op) {
5249 case 0: /* VREV64 */
5250 if (size == 3)
5251 return 1;
5252 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5253 tmp = neon_load_reg(rm, pass * 2);
5254 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5255 switch (size) {
dd8fbd78
FN
5256 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5257 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5258 case 2: /* no-op */ break;
5259 default: abort();
5260 }
dd8fbd78 5261 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5262 if (size == 2) {
dd8fbd78 5263 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5264 } else {
9ee6e8bb 5265 switch (size) {
dd8fbd78
FN
5266 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5267 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5268 default: abort();
5269 }
dd8fbd78 5270 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5271 }
5272 }
5273 break;
5274 case 4: case 5: /* VPADDL */
5275 case 12: case 13: /* VPADAL */
9ee6e8bb
PB
5276 if (size == 3)
5277 return 1;
ad69471c
PB
5278 for (pass = 0; pass < q + 1; pass++) {
5279 tmp = neon_load_reg(rm, pass * 2);
5280 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5281 tmp = neon_load_reg(rm, pass * 2 + 1);
5282 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5283 switch (size) {
5284 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5285 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5286 case 2: tcg_gen_add_i64(CPU_V001); break;
5287 default: abort();
5288 }
9ee6e8bb
PB
5289 if (op >= 12) {
5290 /* Accumulate. */
ad69471c
PB
5291 neon_load_reg64(cpu_V1, rd + pass);
5292 gen_neon_addl(size);
9ee6e8bb 5293 }
ad69471c 5294 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5295 }
5296 break;
5297 case 33: /* VTRN */
5298 if (size == 2) {
5299 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
5300 tmp = neon_load_reg(rm, n);
5301 tmp2 = neon_load_reg(rd, n + 1);
5302 neon_store_reg(rm, n, tmp2);
5303 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
5304 }
5305 } else {
5306 goto elementwise;
5307 }
5308 break;
5309 case 34: /* VUZP */
5310 /* Reg Before After
5311 Rd A3 A2 A1 A0 B2 B0 A2 A0
5312 Rm B3 B2 B1 B0 B3 B1 A3 A1
5313 */
5314 if (size == 3)
5315 return 1;
5316 gen_neon_unzip(rd, q, 0, size);
5317 gen_neon_unzip(rm, q, 4, size);
5318 if (q) {
5319 static int unzip_order_q[8] =
5320 {0, 2, 4, 6, 1, 3, 5, 7};
5321 for (n = 0; n < 8; n++) {
5322 int reg = (n < 4) ? rd : rm;
dd8fbd78
FN
5323 tmp = neon_load_scratch(unzip_order_q[n]);
5324 neon_store_reg(reg, n % 4, tmp);
9ee6e8bb
PB
5325 }
5326 } else {
5327 static int unzip_order[4] =
5328 {0, 4, 1, 5};
5329 for (n = 0; n < 4; n++) {
5330 int reg = (n < 2) ? rd : rm;
dd8fbd78
FN
5331 tmp = neon_load_scratch(unzip_order[n]);
5332 neon_store_reg(reg, n % 2, tmp);
9ee6e8bb
PB
5333 }
5334 }
5335 break;
5336 case 35: /* VZIP */
5337 /* Reg Before After
5338 Rd A3 A2 A1 A0 B1 A1 B0 A0
5339 Rm B3 B2 B1 B0 B3 A3 B2 A2
5340 */
5341 if (size == 3)
5342 return 1;
5343 count = (q ? 4 : 2);
5344 for (n = 0; n < count; n++) {
dd8fbd78
FN
5345 tmp = neon_load_reg(rd, n);
5346 tmp2 = neon_load_reg(rd, n);
9ee6e8bb 5347 switch (size) {
dd8fbd78
FN
5348 case 0: gen_neon_zip_u8(tmp, tmp2); break;
5349 case 1: gen_neon_zip_u16(tmp, tmp2); break;
9ee6e8bb
PB
5350 case 2: /* no-op */; break;
5351 default: abort();
5352 }
dd8fbd78
FN
5353 neon_store_scratch(n * 2, tmp);
5354 neon_store_scratch(n * 2 + 1, tmp2);
9ee6e8bb
PB
5355 }
5356 for (n = 0; n < count * 2; n++) {
5357 int reg = (n < count) ? rd : rm;
dd8fbd78
FN
5358 tmp = neon_load_scratch(n);
5359 neon_store_reg(reg, n % count, tmp);
9ee6e8bb
PB
5360 }
5361 break;
5362 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
ad69471c
PB
5363 if (size == 3)
5364 return 1;
a50f5b91 5365 TCGV_UNUSED(tmp2);
9ee6e8bb 5366 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5367 neon_load_reg64(cpu_V0, rm + pass);
5368 tmp = new_tmp();
9ee6e8bb 5369 if (op == 36 && q == 0) {
ad69471c 5370 gen_neon_narrow(size, tmp, cpu_V0);
9ee6e8bb 5371 } else if (q) {
ad69471c 5372 gen_neon_narrow_satu(size, tmp, cpu_V0);
9ee6e8bb 5373 } else {
ad69471c
PB
5374 gen_neon_narrow_sats(size, tmp, cpu_V0);
5375 }
5376 if (pass == 0) {
5377 tmp2 = tmp;
5378 } else {
5379 neon_store_reg(rd, 0, tmp2);
5380 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5381 }
9ee6e8bb
PB
5382 }
5383 break;
5384 case 38: /* VSHLL */
ad69471c 5385 if (q || size == 3)
9ee6e8bb 5386 return 1;
ad69471c
PB
5387 tmp = neon_load_reg(rm, 0);
5388 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5389 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5390 if (pass == 1)
5391 tmp = tmp2;
5392 gen_neon_widen(cpu_V0, tmp, size, 1);
5393 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5394 }
5395 break;
5396 default:
5397 elementwise:
5398 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5399 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5400 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5401 neon_reg_offset(rm, pass));
dd8fbd78 5402 TCGV_UNUSED(tmp);
9ee6e8bb 5403 } else {
dd8fbd78 5404 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
5405 }
5406 switch (op) {
5407 case 1: /* VREV32 */
5408 switch (size) {
dd8fbd78
FN
5409 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5410 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5411 default: return 1;
5412 }
5413 break;
5414 case 2: /* VREV16 */
5415 if (size != 0)
5416 return 1;
dd8fbd78 5417 gen_rev16(tmp);
9ee6e8bb 5418 break;
9ee6e8bb
PB
5419 case 8: /* CLS */
5420 switch (size) {
dd8fbd78
FN
5421 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5422 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5423 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
9ee6e8bb
PB
5424 default: return 1;
5425 }
5426 break;
5427 case 9: /* CLZ */
5428 switch (size) {
dd8fbd78
FN
5429 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5430 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5431 case 2: gen_helper_clz(tmp, tmp); break;
9ee6e8bb
PB
5432 default: return 1;
5433 }
5434 break;
5435 case 10: /* CNT */
5436 if (size != 0)
5437 return 1;
dd8fbd78 5438 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb
PB
5439 break;
5440 case 11: /* VNOT */
5441 if (size != 0)
5442 return 1;
dd8fbd78 5443 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5444 break;
5445 case 14: /* VQABS */
5446 switch (size) {
dd8fbd78
FN
5447 case 0: gen_helper_neon_qabs_s8(tmp, cpu_env, tmp); break;
5448 case 1: gen_helper_neon_qabs_s16(tmp, cpu_env, tmp); break;
5449 case 2: gen_helper_neon_qabs_s32(tmp, cpu_env, tmp); break;
9ee6e8bb
PB
5450 default: return 1;
5451 }
5452 break;
5453 case 15: /* VQNEG */
5454 switch (size) {
dd8fbd78
FN
5455 case 0: gen_helper_neon_qneg_s8(tmp, cpu_env, tmp); break;
5456 case 1: gen_helper_neon_qneg_s16(tmp, cpu_env, tmp); break;
5457 case 2: gen_helper_neon_qneg_s32(tmp, cpu_env, tmp); break;
9ee6e8bb
PB
5458 default: return 1;
5459 }
5460 break;
5461 case 16: case 19: /* VCGT #0, VCLE #0 */
dd8fbd78 5462 tmp2 = tcg_const_i32(0);
9ee6e8bb 5463 switch(size) {
dd8fbd78
FN
5464 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5465 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5466 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5467 default: return 1;
5468 }
dd8fbd78 5469 tcg_temp_free(tmp2);
9ee6e8bb 5470 if (op == 19)
dd8fbd78 5471 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5472 break;
5473 case 17: case 20: /* VCGE #0, VCLT #0 */
dd8fbd78 5474 tmp2 = tcg_const_i32(0);
9ee6e8bb 5475 switch(size) {
dd8fbd78
FN
5476 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5477 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5478 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5479 default: return 1;
5480 }
dd8fbd78 5481 tcg_temp_free(tmp2);
9ee6e8bb 5482 if (op == 20)
dd8fbd78 5483 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5484 break;
5485 case 18: /* VCEQ #0 */
dd8fbd78 5486 tmp2 = tcg_const_i32(0);
9ee6e8bb 5487 switch(size) {
dd8fbd78
FN
5488 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5489 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5490 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5491 default: return 1;
5492 }
dd8fbd78 5493 tcg_temp_free(tmp2);
9ee6e8bb
PB
5494 break;
5495 case 22: /* VABS */
5496 switch(size) {
dd8fbd78
FN
5497 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
5498 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
5499 case 2: tcg_gen_abs_i32(tmp, tmp); break;
9ee6e8bb
PB
5500 default: return 1;
5501 }
5502 break;
5503 case 23: /* VNEG */
ad69471c
PB
5504 if (size == 3)
5505 return 1;
dd8fbd78
FN
5506 tmp2 = tcg_const_i32(0);
5507 gen_neon_rsb(size, tmp, tmp2);
5508 tcg_temp_free(tmp2);
9ee6e8bb
PB
5509 break;
5510 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
dd8fbd78
FN
5511 tmp2 = tcg_const_i32(0);
5512 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
5513 tcg_temp_free(tmp2);
9ee6e8bb 5514 if (op == 27)
dd8fbd78 5515 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5516 break;
5517 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
dd8fbd78
FN
5518 tmp2 = tcg_const_i32(0);
5519 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
5520 tcg_temp_free(tmp2);
9ee6e8bb 5521 if (op == 28)
dd8fbd78 5522 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5523 break;
5524 case 26: /* Float VCEQ #0 */
dd8fbd78
FN
5525 tmp2 = tcg_const_i32(0);
5526 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
5527 tcg_temp_free(tmp2);
9ee6e8bb
PB
5528 break;
5529 case 30: /* Float VABS */
4373f3ce 5530 gen_vfp_abs(0);
9ee6e8bb
PB
5531 break;
5532 case 31: /* Float VNEG */
4373f3ce 5533 gen_vfp_neg(0);
9ee6e8bb
PB
5534 break;
5535 case 32: /* VSWP */
dd8fbd78
FN
5536 tmp2 = neon_load_reg(rd, pass);
5537 neon_store_reg(rm, pass, tmp2);
9ee6e8bb
PB
5538 break;
5539 case 33: /* VTRN */
dd8fbd78 5540 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5541 switch (size) {
dd8fbd78
FN
5542 case 0: gen_neon_trn_u8(tmp, tmp2); break;
5543 case 1: gen_neon_trn_u16(tmp, tmp2); break;
9ee6e8bb
PB
5544 case 2: abort();
5545 default: return 1;
5546 }
dd8fbd78 5547 neon_store_reg(rm, pass, tmp2);
9ee6e8bb
PB
5548 break;
5549 case 56: /* Integer VRECPE */
dd8fbd78 5550 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb
PB
5551 break;
5552 case 57: /* Integer VRSQRTE */
dd8fbd78 5553 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb
PB
5554 break;
5555 case 58: /* Float VRECPE */
4373f3ce 5556 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5557 break;
5558 case 59: /* Float VRSQRTE */
4373f3ce 5559 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5560 break;
5561 case 60: /* VCVT.F32.S32 */
4373f3ce 5562 gen_vfp_tosiz(0);
9ee6e8bb
PB
5563 break;
5564 case 61: /* VCVT.F32.U32 */
4373f3ce 5565 gen_vfp_touiz(0);
9ee6e8bb
PB
5566 break;
5567 case 62: /* VCVT.S32.F32 */
4373f3ce 5568 gen_vfp_sito(0);
9ee6e8bb
PB
5569 break;
5570 case 63: /* VCVT.U32.F32 */
4373f3ce 5571 gen_vfp_uito(0);
9ee6e8bb
PB
5572 break;
5573 default:
5574 /* Reserved: 21, 29, 39-56 */
5575 return 1;
5576 }
5577 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5578 tcg_gen_st_f32(cpu_F0s, cpu_env,
5579 neon_reg_offset(rd, pass));
9ee6e8bb 5580 } else {
dd8fbd78 5581 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5582 }
5583 }
5584 break;
5585 }
5586 } else if ((insn & (1 << 10)) == 0) {
5587 /* VTBL, VTBX. */
3018f259 5588 n = ((insn >> 5) & 0x18) + 8;
9ee6e8bb 5589 if (insn & (1 << 6)) {
8f8e3aa4 5590 tmp = neon_load_reg(rd, 0);
9ee6e8bb 5591 } else {
8f8e3aa4
PB
5592 tmp = new_tmp();
5593 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5594 }
8f8e3aa4
PB
5595 tmp2 = neon_load_reg(rm, 0);
5596 gen_helper_neon_tbl(tmp2, tmp2, tmp, tcg_const_i32(rn),
5597 tcg_const_i32(n));
3018f259 5598 dead_tmp(tmp);
9ee6e8bb 5599 if (insn & (1 << 6)) {
8f8e3aa4 5600 tmp = neon_load_reg(rd, 1);
9ee6e8bb 5601 } else {
8f8e3aa4
PB
5602 tmp = new_tmp();
5603 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5604 }
8f8e3aa4
PB
5605 tmp3 = neon_load_reg(rm, 1);
5606 gen_helper_neon_tbl(tmp3, tmp3, tmp, tcg_const_i32(rn),
5607 tcg_const_i32(n));
5608 neon_store_reg(rd, 0, tmp2);
3018f259
PB
5609 neon_store_reg(rd, 1, tmp3);
5610 dead_tmp(tmp);
9ee6e8bb
PB
5611 } else if ((insn & 0x380) == 0) {
5612 /* VDUP */
5613 if (insn & (1 << 19)) {
dd8fbd78 5614 tmp = neon_load_reg(rm, 1);
9ee6e8bb 5615 } else {
dd8fbd78 5616 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
5617 }
5618 if (insn & (1 << 16)) {
dd8fbd78 5619 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
5620 } else if (insn & (1 << 17)) {
5621 if ((insn >> 18) & 1)
dd8fbd78 5622 gen_neon_dup_high16(tmp);
9ee6e8bb 5623 else
dd8fbd78 5624 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
5625 }
5626 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5627 tmp2 = new_tmp();
5628 tcg_gen_mov_i32(tmp2, tmp);
5629 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 5630 }
dd8fbd78 5631 dead_tmp(tmp);
9ee6e8bb
PB
5632 } else {
5633 return 1;
5634 }
5635 }
5636 }
5637 return 0;
5638}
5639
fe1479c3
PB
5640static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5641{
5642 int crn = (insn >> 16) & 0xf;
5643 int crm = insn & 0xf;
5644 int op1 = (insn >> 21) & 7;
5645 int op2 = (insn >> 5) & 7;
5646 int rt = (insn >> 12) & 0xf;
5647 TCGv tmp;
5648
5649 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5650 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5651 /* TEECR */
5652 if (IS_USER(s))
5653 return 1;
5654 tmp = load_cpu_field(teecr);
5655 store_reg(s, rt, tmp);
5656 return 0;
5657 }
5658 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5659 /* TEEHBR */
5660 if (IS_USER(s) && (env->teecr & 1))
5661 return 1;
5662 tmp = load_cpu_field(teehbr);
5663 store_reg(s, rt, tmp);
5664 return 0;
5665 }
5666 }
5667 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5668 op1, crn, crm, op2);
5669 return 1;
5670}
5671
5672static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5673{
5674 int crn = (insn >> 16) & 0xf;
5675 int crm = insn & 0xf;
5676 int op1 = (insn >> 21) & 7;
5677 int op2 = (insn >> 5) & 7;
5678 int rt = (insn >> 12) & 0xf;
5679 TCGv tmp;
5680
5681 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5682 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5683 /* TEECR */
5684 if (IS_USER(s))
5685 return 1;
5686 tmp = load_reg(s, rt);
5687 gen_helper_set_teecr(cpu_env, tmp);
5688 dead_tmp(tmp);
5689 return 0;
5690 }
5691 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5692 /* TEEHBR */
5693 if (IS_USER(s) && (env->teecr & 1))
5694 return 1;
5695 tmp = load_reg(s, rt);
5696 store_cpu_field(tmp, teehbr);
5697 return 0;
5698 }
5699 }
5700 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5701 op1, crn, crm, op2);
5702 return 1;
5703}
5704
9ee6e8bb
PB
5705static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5706{
5707 int cpnum;
5708
5709 cpnum = (insn >> 8) & 0xf;
5710 if (arm_feature(env, ARM_FEATURE_XSCALE)
5711 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5712 return 1;
5713
5714 switch (cpnum) {
5715 case 0:
5716 case 1:
5717 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5718 return disas_iwmmxt_insn(env, s, insn);
5719 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5720 return disas_dsp_insn(env, s, insn);
5721 }
5722 return 1;
5723 case 10:
5724 case 11:
5725 return disas_vfp_insn (env, s, insn);
fe1479c3
PB
5726 case 14:
5727 /* Coprocessors 7-15 are architecturally reserved by ARM.
5728 Unfortunately Intel decided to ignore this. */
5729 if (arm_feature(env, ARM_FEATURE_XSCALE))
5730 goto board;
5731 if (insn & (1 << 20))
5732 return disas_cp14_read(env, s, insn);
5733 else
5734 return disas_cp14_write(env, s, insn);
9ee6e8bb
PB
5735 case 15:
5736 return disas_cp15_insn (env, s, insn);
5737 default:
fe1479c3 5738 board:
9ee6e8bb
PB
5739 /* Unknown coprocessor. See if the board has hooked it. */
5740 return disas_cp_insn (env, s, insn);
5741 }
5742}
5743
5e3f878a
PB
5744
5745/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 5746static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
5747{
5748 TCGv tmp;
5749 tmp = new_tmp();
5750 tcg_gen_trunc_i64_i32(tmp, val);
5751 store_reg(s, rlow, tmp);
5752 tmp = new_tmp();
5753 tcg_gen_shri_i64(val, val, 32);
5754 tcg_gen_trunc_i64_i32(tmp, val);
5755 store_reg(s, rhigh, tmp);
5756}
5757
5758/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 5759static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 5760{
a7812ae4 5761 TCGv_i64 tmp;
5e3f878a
PB
5762 TCGv tmp2;
5763
36aa55dc 5764 /* Load value and extend to 64 bits. */
a7812ae4 5765 tmp = tcg_temp_new_i64();
5e3f878a
PB
5766 tmp2 = load_reg(s, rlow);
5767 tcg_gen_extu_i32_i64(tmp, tmp2);
5768 dead_tmp(tmp2);
5769 tcg_gen_add_i64(val, val, tmp);
5770}
5771
5772/* load and add a 64-bit value from a register pair. */
a7812ae4 5773static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 5774{
a7812ae4 5775 TCGv_i64 tmp;
36aa55dc
PB
5776 TCGv tmpl;
5777 TCGv tmph;
5e3f878a
PB
5778
5779 /* Load 64-bit value rd:rn. */
36aa55dc
PB
5780 tmpl = load_reg(s, rlow);
5781 tmph = load_reg(s, rhigh);
a7812ae4 5782 tmp = tcg_temp_new_i64();
36aa55dc
PB
5783 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5784 dead_tmp(tmpl);
5785 dead_tmp(tmph);
5e3f878a
PB
5786 tcg_gen_add_i64(val, val, tmp);
5787}
5788
5789/* Set N and Z flags from a 64-bit value. */
a7812ae4 5790static void gen_logicq_cc(TCGv_i64 val)
5e3f878a
PB
5791{
5792 TCGv tmp = new_tmp();
5793 gen_helper_logicq_cc(tmp, val);
6fbe23d5
PB
5794 gen_logic_CC(tmp);
5795 dead_tmp(tmp);
5e3f878a
PB
5796}
5797
9ee6e8bb
PB
5798static void disas_arm_insn(CPUState * env, DisasContext *s)
5799{
5800 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 5801 TCGv tmp;
3670669c 5802 TCGv tmp2;
6ddbc6e4 5803 TCGv tmp3;
b0109805 5804 TCGv addr;
a7812ae4 5805 TCGv_i64 tmp64;
9ee6e8bb
PB
5806
5807 insn = ldl_code(s->pc);
5808 s->pc += 4;
5809
5810 /* M variants do not implement ARM mode. */
5811 if (IS_M(env))
5812 goto illegal_op;
5813 cond = insn >> 28;
5814 if (cond == 0xf){
5815 /* Unconditional instructions. */
5816 if (((insn >> 25) & 7) == 1) {
5817 /* NEON Data processing. */
5818 if (!arm_feature(env, ARM_FEATURE_NEON))
5819 goto illegal_op;
5820
5821 if (disas_neon_data_insn(env, s, insn))
5822 goto illegal_op;
5823 return;
5824 }
5825 if ((insn & 0x0f100000) == 0x04000000) {
5826 /* NEON load/store. */
5827 if (!arm_feature(env, ARM_FEATURE_NEON))
5828 goto illegal_op;
5829
5830 if (disas_neon_ls_insn(env, s, insn))
5831 goto illegal_op;
5832 return;
5833 }
5834 if ((insn & 0x0d70f000) == 0x0550f000)
5835 return; /* PLD */
5836 else if ((insn & 0x0ffffdff) == 0x01010000) {
5837 ARCH(6);
5838 /* setend */
5839 if (insn & (1 << 9)) {
5840 /* BE8 mode not implemented. */
5841 goto illegal_op;
5842 }
5843 return;
5844 } else if ((insn & 0x0fffff00) == 0x057ff000) {
5845 switch ((insn >> 4) & 0xf) {
5846 case 1: /* clrex */
5847 ARCH(6K);
8f8e3aa4 5848 gen_helper_clrex(cpu_env);
9ee6e8bb
PB
5849 return;
5850 case 4: /* dsb */
5851 case 5: /* dmb */
5852 case 6: /* isb */
5853 ARCH(7);
5854 /* We don't emulate caches so these are a no-op. */
5855 return;
5856 default:
5857 goto illegal_op;
5858 }
5859 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
5860 /* srs */
c67b6b71 5861 int32_t offset;
9ee6e8bb
PB
5862 if (IS_USER(s))
5863 goto illegal_op;
5864 ARCH(6);
5865 op1 = (insn & 0x1f);
5866 if (op1 == (env->uncached_cpsr & CPSR_M)) {
b0109805 5867 addr = load_reg(s, 13);
9ee6e8bb 5868 } else {
b0109805
PB
5869 addr = new_tmp();
5870 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op1));
9ee6e8bb
PB
5871 }
5872 i = (insn >> 23) & 3;
5873 switch (i) {
5874 case 0: offset = -4; break; /* DA */
c67b6b71
FN
5875 case 1: offset = 0; break; /* IA */
5876 case 2: offset = -8; break; /* DB */
9ee6e8bb
PB
5877 case 3: offset = 4; break; /* IB */
5878 default: abort();
5879 }
5880 if (offset)
b0109805
PB
5881 tcg_gen_addi_i32(addr, addr, offset);
5882 tmp = load_reg(s, 14);
5883 gen_st32(tmp, addr, 0);
c67b6b71 5884 tmp = load_cpu_field(spsr);
b0109805
PB
5885 tcg_gen_addi_i32(addr, addr, 4);
5886 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
5887 if (insn & (1 << 21)) {
5888 /* Base writeback. */
5889 switch (i) {
5890 case 0: offset = -8; break;
c67b6b71
FN
5891 case 1: offset = 4; break;
5892 case 2: offset = -4; break;
9ee6e8bb
PB
5893 case 3: offset = 0; break;
5894 default: abort();
5895 }
5896 if (offset)
c67b6b71 5897 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb 5898 if (op1 == (env->uncached_cpsr & CPSR_M)) {
c67b6b71 5899 store_reg(s, 13, addr);
9ee6e8bb 5900 } else {
c67b6b71
FN
5901 gen_helper_set_r13_banked(cpu_env, tcg_const_i32(op1), addr);
5902 dead_tmp(addr);
9ee6e8bb 5903 }
b0109805
PB
5904 } else {
5905 dead_tmp(addr);
9ee6e8bb
PB
5906 }
5907 } else if ((insn & 0x0e5fffe0) == 0x081d0a00) {
5908 /* rfe */
c67b6b71 5909 int32_t offset;
9ee6e8bb
PB
5910 if (IS_USER(s))
5911 goto illegal_op;
5912 ARCH(6);
5913 rn = (insn >> 16) & 0xf;
b0109805 5914 addr = load_reg(s, rn);
9ee6e8bb
PB
5915 i = (insn >> 23) & 3;
5916 switch (i) {
b0109805 5917 case 0: offset = -4; break; /* DA */
c67b6b71
FN
5918 case 1: offset = 0; break; /* IA */
5919 case 2: offset = -8; break; /* DB */
b0109805 5920 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
5921 default: abort();
5922 }
5923 if (offset)
b0109805
PB
5924 tcg_gen_addi_i32(addr, addr, offset);
5925 /* Load PC into tmp and CPSR into tmp2. */
5926 tmp = gen_ld32(addr, 0);
5927 tcg_gen_addi_i32(addr, addr, 4);
5928 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
5929 if (insn & (1 << 21)) {
5930 /* Base writeback. */
5931 switch (i) {
b0109805 5932 case 0: offset = -8; break;
c67b6b71
FN
5933 case 1: offset = 4; break;
5934 case 2: offset = -4; break;
b0109805 5935 case 3: offset = 0; break;
9ee6e8bb
PB
5936 default: abort();
5937 }
5938 if (offset)
b0109805
PB
5939 tcg_gen_addi_i32(addr, addr, offset);
5940 store_reg(s, rn, addr);
5941 } else {
5942 dead_tmp(addr);
9ee6e8bb 5943 }
b0109805 5944 gen_rfe(s, tmp, tmp2);
c67b6b71 5945 return;
9ee6e8bb
PB
5946 } else if ((insn & 0x0e000000) == 0x0a000000) {
5947 /* branch link and change to thumb (blx <offset>) */
5948 int32_t offset;
5949
5950 val = (uint32_t)s->pc;
d9ba4830
PB
5951 tmp = new_tmp();
5952 tcg_gen_movi_i32(tmp, val);
5953 store_reg(s, 14, tmp);
9ee6e8bb
PB
5954 /* Sign-extend the 24-bit offset */
5955 offset = (((int32_t)insn) << 8) >> 8;
5956 /* offset * 4 + bit24 * 2 + (thumb bit) */
5957 val += (offset << 2) | ((insn >> 23) & 2) | 1;
5958 /* pipeline offset */
5959 val += 4;
d9ba4830 5960 gen_bx_im(s, val);
9ee6e8bb
PB
5961 return;
5962 } else if ((insn & 0x0e000f00) == 0x0c000100) {
5963 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5964 /* iWMMXt register transfer. */
5965 if (env->cp15.c15_cpar & (1 << 1))
5966 if (!disas_iwmmxt_insn(env, s, insn))
5967 return;
5968 }
5969 } else if ((insn & 0x0fe00000) == 0x0c400000) {
5970 /* Coprocessor double register transfer. */
5971 } else if ((insn & 0x0f000010) == 0x0e000010) {
5972 /* Additional coprocessor register transfer. */
7997d92f 5973 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
5974 uint32_t mask;
5975 uint32_t val;
5976 /* cps (privileged) */
5977 if (IS_USER(s))
5978 return;
5979 mask = val = 0;
5980 if (insn & (1 << 19)) {
5981 if (insn & (1 << 8))
5982 mask |= CPSR_A;
5983 if (insn & (1 << 7))
5984 mask |= CPSR_I;
5985 if (insn & (1 << 6))
5986 mask |= CPSR_F;
5987 if (insn & (1 << 18))
5988 val |= mask;
5989 }
7997d92f 5990 if (insn & (1 << 17)) {
9ee6e8bb
PB
5991 mask |= CPSR_M;
5992 val |= (insn & 0x1f);
5993 }
5994 if (mask) {
2fbac54b 5995 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
5996 }
5997 return;
5998 }
5999 goto illegal_op;
6000 }
6001 if (cond != 0xe) {
6002 /* if not always execute, we generate a conditional jump to
6003 next instruction */
6004 s->condlabel = gen_new_label();
d9ba4830 6005 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
6006 s->condjmp = 1;
6007 }
6008 if ((insn & 0x0f900000) == 0x03000000) {
6009 if ((insn & (1 << 21)) == 0) {
6010 ARCH(6T2);
6011 rd = (insn >> 12) & 0xf;
6012 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6013 if ((insn & (1 << 22)) == 0) {
6014 /* MOVW */
5e3f878a
PB
6015 tmp = new_tmp();
6016 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
6017 } else {
6018 /* MOVT */
5e3f878a 6019 tmp = load_reg(s, rd);
86831435 6020 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 6021 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 6022 }
5e3f878a 6023 store_reg(s, rd, tmp);
9ee6e8bb
PB
6024 } else {
6025 if (((insn >> 12) & 0xf) != 0xf)
6026 goto illegal_op;
6027 if (((insn >> 16) & 0xf) == 0) {
6028 gen_nop_hint(s, insn & 0xff);
6029 } else {
6030 /* CPSR = immediate */
6031 val = insn & 0xff;
6032 shift = ((insn >> 8) & 0xf) * 2;
6033 if (shift)
6034 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 6035 i = ((insn & (1 << 22)) != 0);
2fbac54b 6036 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
6037 goto illegal_op;
6038 }
6039 }
6040 } else if ((insn & 0x0f900000) == 0x01000000
6041 && (insn & 0x00000090) != 0x00000090) {
6042 /* miscellaneous instructions */
6043 op1 = (insn >> 21) & 3;
6044 sh = (insn >> 4) & 0xf;
6045 rm = insn & 0xf;
6046 switch (sh) {
6047 case 0x0: /* move program status register */
6048 if (op1 & 1) {
6049 /* PSR = reg */
2fbac54b 6050 tmp = load_reg(s, rm);
9ee6e8bb 6051 i = ((op1 & 2) != 0);
2fbac54b 6052 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
6053 goto illegal_op;
6054 } else {
6055 /* reg = PSR */
6056 rd = (insn >> 12) & 0xf;
6057 if (op1 & 2) {
6058 if (IS_USER(s))
6059 goto illegal_op;
d9ba4830 6060 tmp = load_cpu_field(spsr);
9ee6e8bb 6061 } else {
d9ba4830
PB
6062 tmp = new_tmp();
6063 gen_helper_cpsr_read(tmp);
9ee6e8bb 6064 }
d9ba4830 6065 store_reg(s, rd, tmp);
9ee6e8bb
PB
6066 }
6067 break;
6068 case 0x1:
6069 if (op1 == 1) {
6070 /* branch/exchange thumb (bx). */
d9ba4830
PB
6071 tmp = load_reg(s, rm);
6072 gen_bx(s, tmp);
9ee6e8bb
PB
6073 } else if (op1 == 3) {
6074 /* clz */
6075 rd = (insn >> 12) & 0xf;
1497c961
PB
6076 tmp = load_reg(s, rm);
6077 gen_helper_clz(tmp, tmp);
6078 store_reg(s, rd, tmp);
9ee6e8bb
PB
6079 } else {
6080 goto illegal_op;
6081 }
6082 break;
6083 case 0x2:
6084 if (op1 == 1) {
6085 ARCH(5J); /* bxj */
6086 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6087 tmp = load_reg(s, rm);
6088 gen_bx(s, tmp);
9ee6e8bb
PB
6089 } else {
6090 goto illegal_op;
6091 }
6092 break;
6093 case 0x3:
6094 if (op1 != 1)
6095 goto illegal_op;
6096
6097 /* branch link/exchange thumb (blx) */
d9ba4830
PB
6098 tmp = load_reg(s, rm);
6099 tmp2 = new_tmp();
6100 tcg_gen_movi_i32(tmp2, s->pc);
6101 store_reg(s, 14, tmp2);
6102 gen_bx(s, tmp);
9ee6e8bb
PB
6103 break;
6104 case 0x5: /* saturating add/subtract */
6105 rd = (insn >> 12) & 0xf;
6106 rn = (insn >> 16) & 0xf;
b40d0353 6107 tmp = load_reg(s, rm);
5e3f878a 6108 tmp2 = load_reg(s, rn);
9ee6e8bb 6109 if (op1 & 2)
5e3f878a 6110 gen_helper_double_saturate(tmp2, tmp2);
9ee6e8bb 6111 if (op1 & 1)
5e3f878a 6112 gen_helper_sub_saturate(tmp, tmp, tmp2);
9ee6e8bb 6113 else
5e3f878a
PB
6114 gen_helper_add_saturate(tmp, tmp, tmp2);
6115 dead_tmp(tmp2);
6116 store_reg(s, rd, tmp);
9ee6e8bb
PB
6117 break;
6118 case 7: /* bkpt */
6119 gen_set_condexec(s);
5e3f878a 6120 gen_set_pc_im(s->pc - 4);
d9ba4830 6121 gen_exception(EXCP_BKPT);
9ee6e8bb
PB
6122 s->is_jmp = DISAS_JUMP;
6123 break;
6124 case 0x8: /* signed multiply */
6125 case 0xa:
6126 case 0xc:
6127 case 0xe:
6128 rs = (insn >> 8) & 0xf;
6129 rn = (insn >> 12) & 0xf;
6130 rd = (insn >> 16) & 0xf;
6131 if (op1 == 1) {
6132 /* (32 * 16) >> 16 */
5e3f878a
PB
6133 tmp = load_reg(s, rm);
6134 tmp2 = load_reg(s, rs);
9ee6e8bb 6135 if (sh & 4)
5e3f878a 6136 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 6137 else
5e3f878a 6138 gen_sxth(tmp2);
a7812ae4
PB
6139 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6140 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 6141 tmp = new_tmp();
a7812ae4 6142 tcg_gen_trunc_i64_i32(tmp, tmp64);
9ee6e8bb 6143 if ((sh & 2) == 0) {
5e3f878a
PB
6144 tmp2 = load_reg(s, rn);
6145 gen_helper_add_setq(tmp, tmp, tmp2);
6146 dead_tmp(tmp2);
9ee6e8bb 6147 }
5e3f878a 6148 store_reg(s, rd, tmp);
9ee6e8bb
PB
6149 } else {
6150 /* 16 * 16 */
5e3f878a
PB
6151 tmp = load_reg(s, rm);
6152 tmp2 = load_reg(s, rs);
6153 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6154 dead_tmp(tmp2);
9ee6e8bb 6155 if (op1 == 2) {
a7812ae4
PB
6156 tmp64 = tcg_temp_new_i64();
6157 tcg_gen_ext_i32_i64(tmp64, tmp);
22478e79 6158 dead_tmp(tmp);
a7812ae4
PB
6159 gen_addq(s, tmp64, rn, rd);
6160 gen_storeq_reg(s, rn, rd, tmp64);
9ee6e8bb
PB
6161 } else {
6162 if (op1 == 0) {
5e3f878a
PB
6163 tmp2 = load_reg(s, rn);
6164 gen_helper_add_setq(tmp, tmp, tmp2);
6165 dead_tmp(tmp2);
9ee6e8bb 6166 }
5e3f878a 6167 store_reg(s, rd, tmp);
9ee6e8bb
PB
6168 }
6169 }
6170 break;
6171 default:
6172 goto illegal_op;
6173 }
6174 } else if (((insn & 0x0e000000) == 0 &&
6175 (insn & 0x00000090) != 0x90) ||
6176 ((insn & 0x0e000000) == (1 << 25))) {
6177 int set_cc, logic_cc, shiftop;
6178
6179 op1 = (insn >> 21) & 0xf;
6180 set_cc = (insn >> 20) & 1;
6181 logic_cc = table_logic_cc[op1] & set_cc;
6182
6183 /* data processing instruction */
6184 if (insn & (1 << 25)) {
6185 /* immediate operand */
6186 val = insn & 0xff;
6187 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 6188 if (shift) {
9ee6e8bb 6189 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9
JR
6190 }
6191 tmp2 = new_tmp();
6192 tcg_gen_movi_i32(tmp2, val);
6193 if (logic_cc && shift) {
6194 gen_set_CF_bit31(tmp2);
6195 }
9ee6e8bb
PB
6196 } else {
6197 /* register */
6198 rm = (insn) & 0xf;
e9bb4aa9 6199 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6200 shiftop = (insn >> 5) & 3;
6201 if (!(insn & (1 << 4))) {
6202 shift = (insn >> 7) & 0x1f;
e9bb4aa9 6203 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
6204 } else {
6205 rs = (insn >> 8) & 0xf;
8984bd2e 6206 tmp = load_reg(s, rs);
e9bb4aa9 6207 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
6208 }
6209 }
6210 if (op1 != 0x0f && op1 != 0x0d) {
6211 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
6212 tmp = load_reg(s, rn);
6213 } else {
6214 TCGV_UNUSED(tmp);
9ee6e8bb
PB
6215 }
6216 rd = (insn >> 12) & 0xf;
6217 switch(op1) {
6218 case 0x00:
e9bb4aa9
JR
6219 tcg_gen_and_i32(tmp, tmp, tmp2);
6220 if (logic_cc) {
6221 gen_logic_CC(tmp);
6222 }
21aeb343 6223 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6224 break;
6225 case 0x01:
e9bb4aa9
JR
6226 tcg_gen_xor_i32(tmp, tmp, tmp2);
6227 if (logic_cc) {
6228 gen_logic_CC(tmp);
6229 }
21aeb343 6230 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6231 break;
6232 case 0x02:
6233 if (set_cc && rd == 15) {
6234 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 6235 if (IS_USER(s)) {
9ee6e8bb 6236 goto illegal_op;
e9bb4aa9
JR
6237 }
6238 gen_helper_sub_cc(tmp, tmp, tmp2);
6239 gen_exception_return(s, tmp);
9ee6e8bb 6240 } else {
e9bb4aa9
JR
6241 if (set_cc) {
6242 gen_helper_sub_cc(tmp, tmp, tmp2);
6243 } else {
6244 tcg_gen_sub_i32(tmp, tmp, tmp2);
6245 }
21aeb343 6246 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6247 }
6248 break;
6249 case 0x03:
e9bb4aa9
JR
6250 if (set_cc) {
6251 gen_helper_sub_cc(tmp, tmp2, tmp);
6252 } else {
6253 tcg_gen_sub_i32(tmp, tmp2, tmp);
6254 }
21aeb343 6255 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6256 break;
6257 case 0x04:
e9bb4aa9
JR
6258 if (set_cc) {
6259 gen_helper_add_cc(tmp, tmp, tmp2);
6260 } else {
6261 tcg_gen_add_i32(tmp, tmp, tmp2);
6262 }
21aeb343 6263 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6264 break;
6265 case 0x05:
e9bb4aa9
JR
6266 if (set_cc) {
6267 gen_helper_adc_cc(tmp, tmp, tmp2);
6268 } else {
6269 gen_add_carry(tmp, tmp, tmp2);
6270 }
21aeb343 6271 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6272 break;
6273 case 0x06:
e9bb4aa9
JR
6274 if (set_cc) {
6275 gen_helper_sbc_cc(tmp, tmp, tmp2);
6276 } else {
6277 gen_sub_carry(tmp, tmp, tmp2);
6278 }
21aeb343 6279 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6280 break;
6281 case 0x07:
e9bb4aa9
JR
6282 if (set_cc) {
6283 gen_helper_sbc_cc(tmp, tmp2, tmp);
6284 } else {
6285 gen_sub_carry(tmp, tmp2, tmp);
6286 }
21aeb343 6287 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6288 break;
6289 case 0x08:
6290 if (set_cc) {
e9bb4aa9
JR
6291 tcg_gen_and_i32(tmp, tmp, tmp2);
6292 gen_logic_CC(tmp);
9ee6e8bb 6293 }
e9bb4aa9 6294 dead_tmp(tmp);
9ee6e8bb
PB
6295 break;
6296 case 0x09:
6297 if (set_cc) {
e9bb4aa9
JR
6298 tcg_gen_xor_i32(tmp, tmp, tmp2);
6299 gen_logic_CC(tmp);
9ee6e8bb 6300 }
e9bb4aa9 6301 dead_tmp(tmp);
9ee6e8bb
PB
6302 break;
6303 case 0x0a:
6304 if (set_cc) {
e9bb4aa9 6305 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb 6306 }
e9bb4aa9 6307 dead_tmp(tmp);
9ee6e8bb
PB
6308 break;
6309 case 0x0b:
6310 if (set_cc) {
e9bb4aa9 6311 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 6312 }
e9bb4aa9 6313 dead_tmp(tmp);
9ee6e8bb
PB
6314 break;
6315 case 0x0c:
e9bb4aa9
JR
6316 tcg_gen_or_i32(tmp, tmp, tmp2);
6317 if (logic_cc) {
6318 gen_logic_CC(tmp);
6319 }
21aeb343 6320 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6321 break;
6322 case 0x0d:
6323 if (logic_cc && rd == 15) {
6324 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 6325 if (IS_USER(s)) {
9ee6e8bb 6326 goto illegal_op;
e9bb4aa9
JR
6327 }
6328 gen_exception_return(s, tmp2);
9ee6e8bb 6329 } else {
e9bb4aa9
JR
6330 if (logic_cc) {
6331 gen_logic_CC(tmp2);
6332 }
21aeb343 6333 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6334 }
6335 break;
6336 case 0x0e:
e9bb4aa9
JR
6337 tcg_gen_bic_i32(tmp, tmp, tmp2);
6338 if (logic_cc) {
6339 gen_logic_CC(tmp);
6340 }
21aeb343 6341 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6342 break;
6343 default:
6344 case 0x0f:
e9bb4aa9
JR
6345 tcg_gen_not_i32(tmp2, tmp2);
6346 if (logic_cc) {
6347 gen_logic_CC(tmp2);
6348 }
21aeb343 6349 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6350 break;
6351 }
e9bb4aa9
JR
6352 if (op1 != 0x0f && op1 != 0x0d) {
6353 dead_tmp(tmp2);
6354 }
9ee6e8bb
PB
6355 } else {
6356 /* other instructions */
6357 op1 = (insn >> 24) & 0xf;
6358 switch(op1) {
6359 case 0x0:
6360 case 0x1:
6361 /* multiplies, extra load/stores */
6362 sh = (insn >> 5) & 3;
6363 if (sh == 0) {
6364 if (op1 == 0x0) {
6365 rd = (insn >> 16) & 0xf;
6366 rn = (insn >> 12) & 0xf;
6367 rs = (insn >> 8) & 0xf;
6368 rm = (insn) & 0xf;
6369 op1 = (insn >> 20) & 0xf;
6370 switch (op1) {
6371 case 0: case 1: case 2: case 3: case 6:
6372 /* 32 bit mul */
5e3f878a
PB
6373 tmp = load_reg(s, rs);
6374 tmp2 = load_reg(s, rm);
6375 tcg_gen_mul_i32(tmp, tmp, tmp2);
6376 dead_tmp(tmp2);
9ee6e8bb
PB
6377 if (insn & (1 << 22)) {
6378 /* Subtract (mls) */
6379 ARCH(6T2);
5e3f878a
PB
6380 tmp2 = load_reg(s, rn);
6381 tcg_gen_sub_i32(tmp, tmp2, tmp);
6382 dead_tmp(tmp2);
9ee6e8bb
PB
6383 } else if (insn & (1 << 21)) {
6384 /* Add */
5e3f878a
PB
6385 tmp2 = load_reg(s, rn);
6386 tcg_gen_add_i32(tmp, tmp, tmp2);
6387 dead_tmp(tmp2);
9ee6e8bb
PB
6388 }
6389 if (insn & (1 << 20))
5e3f878a
PB
6390 gen_logic_CC(tmp);
6391 store_reg(s, rd, tmp);
9ee6e8bb
PB
6392 break;
6393 default:
6394 /* 64 bit mul */
5e3f878a
PB
6395 tmp = load_reg(s, rs);
6396 tmp2 = load_reg(s, rm);
9ee6e8bb 6397 if (insn & (1 << 22))
a7812ae4 6398 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6399 else
a7812ae4 6400 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
9ee6e8bb 6401 if (insn & (1 << 21)) /* mult accumulate */
a7812ae4 6402 gen_addq(s, tmp64, rn, rd);
9ee6e8bb
PB
6403 if (!(insn & (1 << 23))) { /* double accumulate */
6404 ARCH(6);
a7812ae4
PB
6405 gen_addq_lo(s, tmp64, rn);
6406 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
6407 }
6408 if (insn & (1 << 20))
a7812ae4
PB
6409 gen_logicq_cc(tmp64);
6410 gen_storeq_reg(s, rn, rd, tmp64);
9ee6e8bb
PB
6411 break;
6412 }
6413 } else {
6414 rn = (insn >> 16) & 0xf;
6415 rd = (insn >> 12) & 0xf;
6416 if (insn & (1 << 23)) {
6417 /* load/store exclusive */
86753403
PB
6418 op1 = (insn >> 21) & 0x3;
6419 if (op1)
a47f43d2 6420 ARCH(6K);
86753403
PB
6421 else
6422 ARCH(6);
3174f8e9
FN
6423 addr = tcg_temp_local_new_i32();
6424 tcg_gen_mov_i32(addr, cpu_R[rn]);
9ee6e8bb 6425 if (insn & (1 << 20)) {
3174f8e9 6426 gen_helper_mark_exclusive(cpu_env, addr);
86753403
PB
6427 switch (op1) {
6428 case 0: /* ldrex */
6429 tmp = gen_ld32(addr, IS_USER(s));
6430 break;
6431 case 1: /* ldrexd */
6432 tmp = gen_ld32(addr, IS_USER(s));
6433 store_reg(s, rd, tmp);
6434 tcg_gen_addi_i32(addr, addr, 4);
6435 tmp = gen_ld32(addr, IS_USER(s));
6436 rd++;
6437 break;
6438 case 2: /* ldrexb */
6439 tmp = gen_ld8u(addr, IS_USER(s));
6440 break;
6441 case 3: /* ldrexh */
6442 tmp = gen_ld16u(addr, IS_USER(s));
6443 break;
6444 default:
6445 abort();
6446 }
8f8e3aa4 6447 store_reg(s, rd, tmp);
9ee6e8bb 6448 } else {
8f8e3aa4 6449 int label = gen_new_label();
9ee6e8bb 6450 rm = insn & 0xf;
3174f8e9
FN
6451 tmp2 = tcg_temp_local_new_i32();
6452 gen_helper_test_exclusive(tmp2, cpu_env, addr);
6453 tcg_gen_brcondi_i32(TCG_COND_NE, tmp2, 0, label);
8f8e3aa4 6454 tmp = load_reg(s,rm);
86753403
PB
6455 switch (op1) {
6456 case 0: /* strex */
6457 gen_st32(tmp, addr, IS_USER(s));
6458 break;
6459 case 1: /* strexd */
6460 gen_st32(tmp, addr, IS_USER(s));
6461 tcg_gen_addi_i32(addr, addr, 4);
6462 tmp = load_reg(s, rm + 1);
6463 gen_st32(tmp, addr, IS_USER(s));
6464 break;
6465 case 2: /* strexb */
6466 gen_st8(tmp, addr, IS_USER(s));
6467 break;
6468 case 3: /* strexh */
6469 gen_st16(tmp, addr, IS_USER(s));
6470 break;
6471 default:
6472 abort();
6473 }
2637a3be 6474 gen_set_label(label);
3174f8e9
FN
6475 tcg_gen_mov_i32(cpu_R[rd], tmp2);
6476 tcg_temp_free(tmp2);
9ee6e8bb 6477 }
3174f8e9 6478 tcg_temp_free(addr);
9ee6e8bb
PB
6479 } else {
6480 /* SWP instruction */
6481 rm = (insn) & 0xf;
6482
8984bd2e
PB
6483 /* ??? This is not really atomic. However we know
6484 we never have multiple CPUs running in parallel,
6485 so it is good enough. */
6486 addr = load_reg(s, rn);
6487 tmp = load_reg(s, rm);
9ee6e8bb 6488 if (insn & (1 << 22)) {
8984bd2e
PB
6489 tmp2 = gen_ld8u(addr, IS_USER(s));
6490 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 6491 } else {
8984bd2e
PB
6492 tmp2 = gen_ld32(addr, IS_USER(s));
6493 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 6494 }
8984bd2e
PB
6495 dead_tmp(addr);
6496 store_reg(s, rd, tmp2);
9ee6e8bb
PB
6497 }
6498 }
6499 } else {
6500 int address_offset;
6501 int load;
6502 /* Misc load/store */
6503 rn = (insn >> 16) & 0xf;
6504 rd = (insn >> 12) & 0xf;
b0109805 6505 addr = load_reg(s, rn);
9ee6e8bb 6506 if (insn & (1 << 24))
b0109805 6507 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
6508 address_offset = 0;
6509 if (insn & (1 << 20)) {
6510 /* load */
6511 switch(sh) {
6512 case 1:
b0109805 6513 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
6514 break;
6515 case 2:
b0109805 6516 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
6517 break;
6518 default:
6519 case 3:
b0109805 6520 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
6521 break;
6522 }
6523 load = 1;
6524 } else if (sh & 2) {
6525 /* doubleword */
6526 if (sh & 1) {
6527 /* store */
b0109805
PB
6528 tmp = load_reg(s, rd);
6529 gen_st32(tmp, addr, IS_USER(s));
6530 tcg_gen_addi_i32(addr, addr, 4);
6531 tmp = load_reg(s, rd + 1);
6532 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6533 load = 0;
6534 } else {
6535 /* load */
b0109805
PB
6536 tmp = gen_ld32(addr, IS_USER(s));
6537 store_reg(s, rd, tmp);
6538 tcg_gen_addi_i32(addr, addr, 4);
6539 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
6540 rd++;
6541 load = 1;
6542 }
6543 address_offset = -4;
6544 } else {
6545 /* store */
b0109805
PB
6546 tmp = load_reg(s, rd);
6547 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6548 load = 0;
6549 }
6550 /* Perform base writeback before the loaded value to
6551 ensure correct behavior with overlapping index registers.
6552 ldrd with base writeback is is undefined if the
6553 destination and index registers overlap. */
6554 if (!(insn & (1 << 24))) {
b0109805
PB
6555 gen_add_datah_offset(s, insn, address_offset, addr);
6556 store_reg(s, rn, addr);
9ee6e8bb
PB
6557 } else if (insn & (1 << 21)) {
6558 if (address_offset)
b0109805
PB
6559 tcg_gen_addi_i32(addr, addr, address_offset);
6560 store_reg(s, rn, addr);
6561 } else {
6562 dead_tmp(addr);
9ee6e8bb
PB
6563 }
6564 if (load) {
6565 /* Complete the load. */
b0109805 6566 store_reg(s, rd, tmp);
9ee6e8bb
PB
6567 }
6568 }
6569 break;
6570 case 0x4:
6571 case 0x5:
6572 goto do_ldst;
6573 case 0x6:
6574 case 0x7:
6575 if (insn & (1 << 4)) {
6576 ARCH(6);
6577 /* Armv6 Media instructions. */
6578 rm = insn & 0xf;
6579 rn = (insn >> 16) & 0xf;
2c0262af 6580 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
6581 rs = (insn >> 8) & 0xf;
6582 switch ((insn >> 23) & 3) {
6583 case 0: /* Parallel add/subtract. */
6584 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
6585 tmp = load_reg(s, rn);
6586 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6587 sh = (insn >> 5) & 7;
6588 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6589 goto illegal_op;
6ddbc6e4
PB
6590 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6591 dead_tmp(tmp2);
6592 store_reg(s, rd, tmp);
9ee6e8bb
PB
6593 break;
6594 case 1:
6595 if ((insn & 0x00700020) == 0) {
6c95676b 6596 /* Halfword pack. */
3670669c
PB
6597 tmp = load_reg(s, rn);
6598 tmp2 = load_reg(s, rm);
9ee6e8bb 6599 shift = (insn >> 7) & 0x1f;
3670669c
PB
6600 if (insn & (1 << 6)) {
6601 /* pkhtb */
22478e79
AZ
6602 if (shift == 0)
6603 shift = 31;
6604 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 6605 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 6606 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
6607 } else {
6608 /* pkhbt */
22478e79
AZ
6609 if (shift)
6610 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 6611 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
6612 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6613 }
6614 tcg_gen_or_i32(tmp, tmp, tmp2);
22478e79 6615 dead_tmp(tmp2);
3670669c 6616 store_reg(s, rd, tmp);
9ee6e8bb
PB
6617 } else if ((insn & 0x00200020) == 0x00200000) {
6618 /* [us]sat */
6ddbc6e4 6619 tmp = load_reg(s, rm);
9ee6e8bb
PB
6620 shift = (insn >> 7) & 0x1f;
6621 if (insn & (1 << 6)) {
6622 if (shift == 0)
6623 shift = 31;
6ddbc6e4 6624 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 6625 } else {
6ddbc6e4 6626 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
6627 }
6628 sh = (insn >> 16) & 0x1f;
6629 if (sh != 0) {
6630 if (insn & (1 << 22))
6ddbc6e4 6631 gen_helper_usat(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6632 else
6ddbc6e4 6633 gen_helper_ssat(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6634 }
6ddbc6e4 6635 store_reg(s, rd, tmp);
9ee6e8bb
PB
6636 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6637 /* [us]sat16 */
6ddbc6e4 6638 tmp = load_reg(s, rm);
9ee6e8bb
PB
6639 sh = (insn >> 16) & 0x1f;
6640 if (sh != 0) {
6641 if (insn & (1 << 22))
6ddbc6e4 6642 gen_helper_usat16(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6643 else
6ddbc6e4 6644 gen_helper_ssat16(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6645 }
6ddbc6e4 6646 store_reg(s, rd, tmp);
9ee6e8bb
PB
6647 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6648 /* Select bytes. */
6ddbc6e4
PB
6649 tmp = load_reg(s, rn);
6650 tmp2 = load_reg(s, rm);
6651 tmp3 = new_tmp();
6652 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6653 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6654 dead_tmp(tmp3);
6655 dead_tmp(tmp2);
6656 store_reg(s, rd, tmp);
9ee6e8bb 6657 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 6658 tmp = load_reg(s, rm);
9ee6e8bb
PB
6659 shift = (insn >> 10) & 3;
6660 /* ??? In many cases it's not neccessary to do a
6661 rotate, a shift is sufficient. */
6662 if (shift != 0)
5e3f878a 6663 tcg_gen_rori_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
6664 op1 = (insn >> 20) & 7;
6665 switch (op1) {
5e3f878a
PB
6666 case 0: gen_sxtb16(tmp); break;
6667 case 2: gen_sxtb(tmp); break;
6668 case 3: gen_sxth(tmp); break;
6669 case 4: gen_uxtb16(tmp); break;
6670 case 6: gen_uxtb(tmp); break;
6671 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
6672 default: goto illegal_op;
6673 }
6674 if (rn != 15) {
5e3f878a 6675 tmp2 = load_reg(s, rn);
9ee6e8bb 6676 if ((op1 & 3) == 0) {
5e3f878a 6677 gen_add16(tmp, tmp2);
9ee6e8bb 6678 } else {
5e3f878a
PB
6679 tcg_gen_add_i32(tmp, tmp, tmp2);
6680 dead_tmp(tmp2);
9ee6e8bb
PB
6681 }
6682 }
6c95676b 6683 store_reg(s, rd, tmp);
9ee6e8bb
PB
6684 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6685 /* rev */
b0109805 6686 tmp = load_reg(s, rm);
9ee6e8bb
PB
6687 if (insn & (1 << 22)) {
6688 if (insn & (1 << 7)) {
b0109805 6689 gen_revsh(tmp);
9ee6e8bb
PB
6690 } else {
6691 ARCH(6T2);
b0109805 6692 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
6693 }
6694 } else {
6695 if (insn & (1 << 7))
b0109805 6696 gen_rev16(tmp);
9ee6e8bb 6697 else
66896cb8 6698 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 6699 }
b0109805 6700 store_reg(s, rd, tmp);
9ee6e8bb
PB
6701 } else {
6702 goto illegal_op;
6703 }
6704 break;
6705 case 2: /* Multiplies (Type 3). */
5e3f878a
PB
6706 tmp = load_reg(s, rm);
6707 tmp2 = load_reg(s, rs);
9ee6e8bb
PB
6708 if (insn & (1 << 20)) {
6709 /* Signed multiply most significant [accumulate]. */
a7812ae4 6710 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6711 if (insn & (1 << 5))
a7812ae4
PB
6712 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
6713 tcg_gen_shri_i64(tmp64, tmp64, 32);
5e3f878a 6714 tmp = new_tmp();
a7812ae4 6715 tcg_gen_trunc_i64_i32(tmp, tmp64);
955a7dd5
AZ
6716 if (rd != 15) {
6717 tmp2 = load_reg(s, rd);
9ee6e8bb 6718 if (insn & (1 << 6)) {
5e3f878a 6719 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 6720 } else {
5e3f878a 6721 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 6722 }
5e3f878a 6723 dead_tmp(tmp2);
9ee6e8bb 6724 }
955a7dd5 6725 store_reg(s, rn, tmp);
9ee6e8bb
PB
6726 } else {
6727 if (insn & (1 << 5))
5e3f878a
PB
6728 gen_swap_half(tmp2);
6729 gen_smul_dual(tmp, tmp2);
6730 /* This addition cannot overflow. */
6731 if (insn & (1 << 6)) {
6732 tcg_gen_sub_i32(tmp, tmp, tmp2);
6733 } else {
6734 tcg_gen_add_i32(tmp, tmp, tmp2);
6735 }
6736 dead_tmp(tmp2);
9ee6e8bb 6737 if (insn & (1 << 22)) {
5e3f878a 6738 /* smlald, smlsld */
a7812ae4
PB
6739 tmp64 = tcg_temp_new_i64();
6740 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 6741 dead_tmp(tmp);
a7812ae4
PB
6742 gen_addq(s, tmp64, rd, rn);
6743 gen_storeq_reg(s, rd, rn, tmp64);
9ee6e8bb 6744 } else {
5e3f878a 6745 /* smuad, smusd, smlad, smlsd */
22478e79 6746 if (rd != 15)
9ee6e8bb 6747 {
22478e79 6748 tmp2 = load_reg(s, rd);
5e3f878a
PB
6749 gen_helper_add_setq(tmp, tmp, tmp2);
6750 dead_tmp(tmp2);
9ee6e8bb 6751 }
22478e79 6752 store_reg(s, rn, tmp);
9ee6e8bb
PB
6753 }
6754 }
6755 break;
6756 case 3:
6757 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
6758 switch (op1) {
6759 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
6760 ARCH(6);
6761 tmp = load_reg(s, rm);
6762 tmp2 = load_reg(s, rs);
6763 gen_helper_usad8(tmp, tmp, tmp2);
6764 dead_tmp(tmp2);
ded9d295
AZ
6765 if (rd != 15) {
6766 tmp2 = load_reg(s, rd);
6ddbc6e4
PB
6767 tcg_gen_add_i32(tmp, tmp, tmp2);
6768 dead_tmp(tmp2);
9ee6e8bb 6769 }
ded9d295 6770 store_reg(s, rn, tmp);
9ee6e8bb
PB
6771 break;
6772 case 0x20: case 0x24: case 0x28: case 0x2c:
6773 /* Bitfield insert/clear. */
6774 ARCH(6T2);
6775 shift = (insn >> 7) & 0x1f;
6776 i = (insn >> 16) & 0x1f;
6777 i = i + 1 - shift;
6778 if (rm == 15) {
5e3f878a
PB
6779 tmp = new_tmp();
6780 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6781 } else {
5e3f878a 6782 tmp = load_reg(s, rm);
9ee6e8bb
PB
6783 }
6784 if (i != 32) {
5e3f878a 6785 tmp2 = load_reg(s, rd);
8f8e3aa4 6786 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
5e3f878a 6787 dead_tmp(tmp2);
9ee6e8bb 6788 }
5e3f878a 6789 store_reg(s, rd, tmp);
9ee6e8bb
PB
6790 break;
6791 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
6792 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 6793 ARCH(6T2);
5e3f878a 6794 tmp = load_reg(s, rm);
9ee6e8bb
PB
6795 shift = (insn >> 7) & 0x1f;
6796 i = ((insn >> 16) & 0x1f) + 1;
6797 if (shift + i > 32)
6798 goto illegal_op;
6799 if (i < 32) {
6800 if (op1 & 0x20) {
5e3f878a 6801 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 6802 } else {
5e3f878a 6803 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
6804 }
6805 }
5e3f878a 6806 store_reg(s, rd, tmp);
9ee6e8bb
PB
6807 break;
6808 default:
6809 goto illegal_op;
6810 }
6811 break;
6812 }
6813 break;
6814 }
6815 do_ldst:
6816 /* Check for undefined extension instructions
6817 * per the ARM Bible IE:
6818 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
6819 */
6820 sh = (0xf << 20) | (0xf << 4);
6821 if (op1 == 0x7 && ((insn & sh) == sh))
6822 {
6823 goto illegal_op;
6824 }
6825 /* load/store byte/word */
6826 rn = (insn >> 16) & 0xf;
6827 rd = (insn >> 12) & 0xf;
b0109805 6828 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
6829 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
6830 if (insn & (1 << 24))
b0109805 6831 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
6832 if (insn & (1 << 20)) {
6833 /* load */
9ee6e8bb 6834 if (insn & (1 << 22)) {
b0109805 6835 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 6836 } else {
b0109805 6837 tmp = gen_ld32(tmp2, i);
9ee6e8bb 6838 }
9ee6e8bb
PB
6839 } else {
6840 /* store */
b0109805 6841 tmp = load_reg(s, rd);
9ee6e8bb 6842 if (insn & (1 << 22))
b0109805 6843 gen_st8(tmp, tmp2, i);
9ee6e8bb 6844 else
b0109805 6845 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
6846 }
6847 if (!(insn & (1 << 24))) {
b0109805
PB
6848 gen_add_data_offset(s, insn, tmp2);
6849 store_reg(s, rn, tmp2);
6850 } else if (insn & (1 << 21)) {
6851 store_reg(s, rn, tmp2);
6852 } else {
6853 dead_tmp(tmp2);
9ee6e8bb
PB
6854 }
6855 if (insn & (1 << 20)) {
6856 /* Complete the load. */
6857 if (rd == 15)
b0109805 6858 gen_bx(s, tmp);
9ee6e8bb 6859 else
b0109805 6860 store_reg(s, rd, tmp);
9ee6e8bb
PB
6861 }
6862 break;
6863 case 0x08:
6864 case 0x09:
6865 {
6866 int j, n, user, loaded_base;
b0109805 6867 TCGv loaded_var;
9ee6e8bb
PB
6868 /* load/store multiple words */
6869 /* XXX: store correct base if write back */
6870 user = 0;
6871 if (insn & (1 << 22)) {
6872 if (IS_USER(s))
6873 goto illegal_op; /* only usable in supervisor mode */
6874
6875 if ((insn & (1 << 15)) == 0)
6876 user = 1;
6877 }
6878 rn = (insn >> 16) & 0xf;
b0109805 6879 addr = load_reg(s, rn);
9ee6e8bb
PB
6880
6881 /* compute total size */
6882 loaded_base = 0;
a50f5b91 6883 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
6884 n = 0;
6885 for(i=0;i<16;i++) {
6886 if (insn & (1 << i))
6887 n++;
6888 }
6889 /* XXX: test invalid n == 0 case ? */
6890 if (insn & (1 << 23)) {
6891 if (insn & (1 << 24)) {
6892 /* pre increment */
b0109805 6893 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6894 } else {
6895 /* post increment */
6896 }
6897 } else {
6898 if (insn & (1 << 24)) {
6899 /* pre decrement */
b0109805 6900 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
6901 } else {
6902 /* post decrement */
6903 if (n != 1)
b0109805 6904 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
6905 }
6906 }
6907 j = 0;
6908 for(i=0;i<16;i++) {
6909 if (insn & (1 << i)) {
6910 if (insn & (1 << 20)) {
6911 /* load */
b0109805 6912 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 6913 if (i == 15) {
b0109805 6914 gen_bx(s, tmp);
9ee6e8bb 6915 } else if (user) {
b0109805
PB
6916 gen_helper_set_user_reg(tcg_const_i32(i), tmp);
6917 dead_tmp(tmp);
9ee6e8bb 6918 } else if (i == rn) {
b0109805 6919 loaded_var = tmp;
9ee6e8bb
PB
6920 loaded_base = 1;
6921 } else {
b0109805 6922 store_reg(s, i, tmp);
9ee6e8bb
PB
6923 }
6924 } else {
6925 /* store */
6926 if (i == 15) {
6927 /* special case: r15 = PC + 8 */
6928 val = (long)s->pc + 4;
b0109805
PB
6929 tmp = new_tmp();
6930 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 6931 } else if (user) {
b0109805
PB
6932 tmp = new_tmp();
6933 gen_helper_get_user_reg(tmp, tcg_const_i32(i));
9ee6e8bb 6934 } else {
b0109805 6935 tmp = load_reg(s, i);
9ee6e8bb 6936 }
b0109805 6937 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6938 }
6939 j++;
6940 /* no need to add after the last transfer */
6941 if (j != n)
b0109805 6942 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6943 }
6944 }
6945 if (insn & (1 << 21)) {
6946 /* write back */
6947 if (insn & (1 << 23)) {
6948 if (insn & (1 << 24)) {
6949 /* pre increment */
6950 } else {
6951 /* post increment */
b0109805 6952 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6953 }
6954 } else {
6955 if (insn & (1 << 24)) {
6956 /* pre decrement */
6957 if (n != 1)
b0109805 6958 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
6959 } else {
6960 /* post decrement */
b0109805 6961 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
6962 }
6963 }
b0109805
PB
6964 store_reg(s, rn, addr);
6965 } else {
6966 dead_tmp(addr);
9ee6e8bb
PB
6967 }
6968 if (loaded_base) {
b0109805 6969 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
6970 }
6971 if ((insn & (1 << 22)) && !user) {
6972 /* Restore CPSR from SPSR. */
d9ba4830
PB
6973 tmp = load_cpu_field(spsr);
6974 gen_set_cpsr(tmp, 0xffffffff);
6975 dead_tmp(tmp);
9ee6e8bb
PB
6976 s->is_jmp = DISAS_UPDATE;
6977 }
6978 }
6979 break;
6980 case 0xa:
6981 case 0xb:
6982 {
6983 int32_t offset;
6984
6985 /* branch (and link) */
6986 val = (int32_t)s->pc;
6987 if (insn & (1 << 24)) {
5e3f878a
PB
6988 tmp = new_tmp();
6989 tcg_gen_movi_i32(tmp, val);
6990 store_reg(s, 14, tmp);
9ee6e8bb
PB
6991 }
6992 offset = (((int32_t)insn << 8) >> 8);
6993 val += (offset << 2) + 4;
6994 gen_jmp(s, val);
6995 }
6996 break;
6997 case 0xc:
6998 case 0xd:
6999 case 0xe:
7000 /* Coprocessor. */
7001 if (disas_coproc_insn(env, s, insn))
7002 goto illegal_op;
7003 break;
7004 case 0xf:
7005 /* swi */
5e3f878a 7006 gen_set_pc_im(s->pc);
9ee6e8bb
PB
7007 s->is_jmp = DISAS_SWI;
7008 break;
7009 default:
7010 illegal_op:
7011 gen_set_condexec(s);
5e3f878a 7012 gen_set_pc_im(s->pc - 4);
d9ba4830 7013 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
7014 s->is_jmp = DISAS_JUMP;
7015 break;
7016 }
7017 }
7018}
7019
7020/* Return true if this is a Thumb-2 logical op. */
7021static int
7022thumb2_logic_op(int op)
7023{
7024 return (op < 8);
7025}
7026
7027/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7028 then set condition code flags based on the result of the operation.
7029 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7030 to the high bit of T1.
7031 Returns zero if the opcode is valid. */
7032
7033static int
396e467c 7034gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
9ee6e8bb
PB
7035{
7036 int logic_cc;
7037
7038 logic_cc = 0;
7039 switch (op) {
7040 case 0: /* and */
396e467c 7041 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
7042 logic_cc = conds;
7043 break;
7044 case 1: /* bic */
396e467c 7045 tcg_gen_bic_i32(t0, t0, t1);
9ee6e8bb
PB
7046 logic_cc = conds;
7047 break;
7048 case 2: /* orr */
396e467c 7049 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7050 logic_cc = conds;
7051 break;
7052 case 3: /* orn */
396e467c
FN
7053 tcg_gen_not_i32(t1, t1);
7054 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7055 logic_cc = conds;
7056 break;
7057 case 4: /* eor */
396e467c 7058 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
7059 logic_cc = conds;
7060 break;
7061 case 8: /* add */
7062 if (conds)
396e467c 7063 gen_helper_add_cc(t0, t0, t1);
9ee6e8bb 7064 else
396e467c 7065 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
7066 break;
7067 case 10: /* adc */
7068 if (conds)
396e467c 7069 gen_helper_adc_cc(t0, t0, t1);
9ee6e8bb 7070 else
396e467c 7071 gen_adc(t0, t1);
9ee6e8bb
PB
7072 break;
7073 case 11: /* sbc */
7074 if (conds)
396e467c 7075 gen_helper_sbc_cc(t0, t0, t1);
9ee6e8bb 7076 else
396e467c 7077 gen_sub_carry(t0, t0, t1);
9ee6e8bb
PB
7078 break;
7079 case 13: /* sub */
7080 if (conds)
396e467c 7081 gen_helper_sub_cc(t0, t0, t1);
9ee6e8bb 7082 else
396e467c 7083 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
7084 break;
7085 case 14: /* rsb */
7086 if (conds)
396e467c 7087 gen_helper_sub_cc(t0, t1, t0);
9ee6e8bb 7088 else
396e467c 7089 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
7090 break;
7091 default: /* 5, 6, 7, 9, 12, 15. */
7092 return 1;
7093 }
7094 if (logic_cc) {
396e467c 7095 gen_logic_CC(t0);
9ee6e8bb 7096 if (shifter_out)
396e467c 7097 gen_set_CF_bit31(t1);
9ee6e8bb
PB
7098 }
7099 return 0;
7100}
7101
7102/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7103 is not legal. */
7104static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7105{
b0109805 7106 uint32_t insn, imm, shift, offset;
9ee6e8bb 7107 uint32_t rd, rn, rm, rs;
b26eefb6 7108 TCGv tmp;
6ddbc6e4
PB
7109 TCGv tmp2;
7110 TCGv tmp3;
b0109805 7111 TCGv addr;
a7812ae4 7112 TCGv_i64 tmp64;
9ee6e8bb
PB
7113 int op;
7114 int shiftop;
7115 int conds;
7116 int logic_cc;
7117
7118 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7119 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 7120 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
7121 16-bit instructions to get correct prefetch abort behavior. */
7122 insn = insn_hw1;
7123 if ((insn & (1 << 12)) == 0) {
7124 /* Second half of blx. */
7125 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
7126 tmp = load_reg(s, 14);
7127 tcg_gen_addi_i32(tmp, tmp, offset);
7128 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 7129
d9ba4830 7130 tmp2 = new_tmp();
b0109805 7131 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7132 store_reg(s, 14, tmp2);
7133 gen_bx(s, tmp);
9ee6e8bb
PB
7134 return 0;
7135 }
7136 if (insn & (1 << 11)) {
7137 /* Second half of bl. */
7138 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 7139 tmp = load_reg(s, 14);
6a0d8a1d 7140 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 7141
d9ba4830 7142 tmp2 = new_tmp();
b0109805 7143 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7144 store_reg(s, 14, tmp2);
7145 gen_bx(s, tmp);
9ee6e8bb
PB
7146 return 0;
7147 }
7148 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7149 /* Instruction spans a page boundary. Implement it as two
7150 16-bit instructions in case the second half causes an
7151 prefetch abort. */
7152 offset = ((int32_t)insn << 21) >> 9;
396e467c 7153 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
7154 return 0;
7155 }
7156 /* Fall through to 32-bit decode. */
7157 }
7158
7159 insn = lduw_code(s->pc);
7160 s->pc += 2;
7161 insn |= (uint32_t)insn_hw1 << 16;
7162
7163 if ((insn & 0xf800e800) != 0xf000e800) {
7164 ARCH(6T2);
7165 }
7166
7167 rn = (insn >> 16) & 0xf;
7168 rs = (insn >> 12) & 0xf;
7169 rd = (insn >> 8) & 0xf;
7170 rm = insn & 0xf;
7171 switch ((insn >> 25) & 0xf) {
7172 case 0: case 1: case 2: case 3:
7173 /* 16-bit instructions. Should never happen. */
7174 abort();
7175 case 4:
7176 if (insn & (1 << 22)) {
7177 /* Other load/store, table branch. */
7178 if (insn & 0x01200000) {
7179 /* Load/store doubleword. */
7180 if (rn == 15) {
b0109805
PB
7181 addr = new_tmp();
7182 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 7183 } else {
b0109805 7184 addr = load_reg(s, rn);
9ee6e8bb
PB
7185 }
7186 offset = (insn & 0xff) * 4;
7187 if ((insn & (1 << 23)) == 0)
7188 offset = -offset;
7189 if (insn & (1 << 24)) {
b0109805 7190 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
7191 offset = 0;
7192 }
7193 if (insn & (1 << 20)) {
7194 /* ldrd */
b0109805
PB
7195 tmp = gen_ld32(addr, IS_USER(s));
7196 store_reg(s, rs, tmp);
7197 tcg_gen_addi_i32(addr, addr, 4);
7198 tmp = gen_ld32(addr, IS_USER(s));
7199 store_reg(s, rd, tmp);
9ee6e8bb
PB
7200 } else {
7201 /* strd */
b0109805
PB
7202 tmp = load_reg(s, rs);
7203 gen_st32(tmp, addr, IS_USER(s));
7204 tcg_gen_addi_i32(addr, addr, 4);
7205 tmp = load_reg(s, rd);
7206 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7207 }
7208 if (insn & (1 << 21)) {
7209 /* Base writeback. */
7210 if (rn == 15)
7211 goto illegal_op;
b0109805
PB
7212 tcg_gen_addi_i32(addr, addr, offset - 4);
7213 store_reg(s, rn, addr);
7214 } else {
7215 dead_tmp(addr);
9ee6e8bb
PB
7216 }
7217 } else if ((insn & (1 << 23)) == 0) {
7218 /* Load/store exclusive word. */
3174f8e9
FN
7219 addr = tcg_temp_local_new();
7220 tcg_gen_mov_i32(addr, cpu_R[rn]);
2c0262af 7221 if (insn & (1 << 20)) {
3174f8e9 7222 gen_helper_mark_exclusive(cpu_env, addr);
8f8e3aa4
PB
7223 tmp = gen_ld32(addr, IS_USER(s));
7224 store_reg(s, rd, tmp);
9ee6e8bb 7225 } else {
8f8e3aa4 7226 int label = gen_new_label();
3174f8e9
FN
7227 tmp2 = tcg_temp_local_new();
7228 gen_helper_test_exclusive(tmp2, cpu_env, addr);
7229 tcg_gen_brcondi_i32(TCG_COND_NE, tmp2, 0, label);
8f8e3aa4 7230 tmp = load_reg(s, rs);
3174f8e9 7231 gen_st32(tmp, addr, IS_USER(s));
8f8e3aa4 7232 gen_set_label(label);
3174f8e9
FN
7233 tcg_gen_mov_i32(cpu_R[rd], tmp2);
7234 tcg_temp_free(tmp2);
9ee6e8bb 7235 }
3174f8e9 7236 tcg_temp_free(addr);
9ee6e8bb
PB
7237 } else if ((insn & (1 << 6)) == 0) {
7238 /* Table Branch. */
7239 if (rn == 15) {
b0109805
PB
7240 addr = new_tmp();
7241 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 7242 } else {
b0109805 7243 addr = load_reg(s, rn);
9ee6e8bb 7244 }
b26eefb6 7245 tmp = load_reg(s, rm);
b0109805 7246 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
7247 if (insn & (1 << 4)) {
7248 /* tbh */
b0109805 7249 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 7250 dead_tmp(tmp);
b0109805 7251 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 7252 } else { /* tbb */
b26eefb6 7253 dead_tmp(tmp);
b0109805 7254 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7255 }
b0109805
PB
7256 dead_tmp(addr);
7257 tcg_gen_shli_i32(tmp, tmp, 1);
7258 tcg_gen_addi_i32(tmp, tmp, s->pc);
7259 store_reg(s, 15, tmp);
9ee6e8bb
PB
7260 } else {
7261 /* Load/store exclusive byte/halfword/doubleword. */
8f8e3aa4
PB
7262 /* ??? These are not really atomic. However we know
7263 we never have multiple CPUs running in parallel,
7264 so it is good enough. */
9ee6e8bb 7265 op = (insn >> 4) & 0x3;
3174f8e9
FN
7266 addr = tcg_temp_local_new();
7267 tcg_gen_mov_i32(addr, cpu_R[rn]);
9ee6e8bb 7268 if (insn & (1 << 20)) {
8f8e3aa4 7269 gen_helper_mark_exclusive(cpu_env, addr);
9ee6e8bb
PB
7270 switch (op) {
7271 case 0:
8f8e3aa4 7272 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7273 break;
2c0262af 7274 case 1:
8f8e3aa4 7275 tmp = gen_ld16u(addr, IS_USER(s));
2c0262af 7276 break;
9ee6e8bb 7277 case 3:
8f8e3aa4
PB
7278 tmp = gen_ld32(addr, IS_USER(s));
7279 tcg_gen_addi_i32(addr, addr, 4);
7280 tmp2 = gen_ld32(addr, IS_USER(s));
7281 store_reg(s, rd, tmp2);
2c0262af
FB
7282 break;
7283 default:
9ee6e8bb
PB
7284 goto illegal_op;
7285 }
8f8e3aa4 7286 store_reg(s, rs, tmp);
9ee6e8bb 7287 } else {
8f8e3aa4 7288 int label = gen_new_label();
3174f8e9
FN
7289 tmp2 = tcg_temp_local_new();
7290 gen_helper_test_exclusive(tmp2, cpu_env, addr);
7291 tcg_gen_brcondi_i32(TCG_COND_NE, tmp2, 0, label);
8f8e3aa4 7292 tmp = load_reg(s, rs);
9ee6e8bb
PB
7293 switch (op) {
7294 case 0:
8f8e3aa4 7295 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7296 break;
7297 case 1:
8f8e3aa4 7298 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb 7299 break;
2c0262af 7300 case 3:
8f8e3aa4
PB
7301 gen_st32(tmp, addr, IS_USER(s));
7302 tcg_gen_addi_i32(addr, addr, 4);
7303 tmp = load_reg(s, rd);
7304 gen_st32(tmp, addr, IS_USER(s));
2c0262af 7305 break;
9ee6e8bb
PB
7306 default:
7307 goto illegal_op;
2c0262af 7308 }
8f8e3aa4 7309 gen_set_label(label);
3174f8e9
FN
7310 tcg_gen_mov_i32(cpu_R[rm], tmp2);
7311 tcg_temp_free(tmp2);
9ee6e8bb 7312 }
3174f8e9 7313 tcg_temp_free(addr);
9ee6e8bb
PB
7314 }
7315 } else {
7316 /* Load/store multiple, RFE, SRS. */
7317 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7318 /* Not available in user mode. */
b0109805 7319 if (IS_USER(s))
9ee6e8bb
PB
7320 goto illegal_op;
7321 if (insn & (1 << 20)) {
7322 /* rfe */
b0109805
PB
7323 addr = load_reg(s, rn);
7324 if ((insn & (1 << 24)) == 0)
7325 tcg_gen_addi_i32(addr, addr, -8);
7326 /* Load PC into tmp and CPSR into tmp2. */
7327 tmp = gen_ld32(addr, 0);
7328 tcg_gen_addi_i32(addr, addr, 4);
7329 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
7330 if (insn & (1 << 21)) {
7331 /* Base writeback. */
b0109805
PB
7332 if (insn & (1 << 24)) {
7333 tcg_gen_addi_i32(addr, addr, 4);
7334 } else {
7335 tcg_gen_addi_i32(addr, addr, -4);
7336 }
7337 store_reg(s, rn, addr);
7338 } else {
7339 dead_tmp(addr);
9ee6e8bb 7340 }
b0109805 7341 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
7342 } else {
7343 /* srs */
7344 op = (insn & 0x1f);
7345 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7346 addr = load_reg(s, 13);
9ee6e8bb 7347 } else {
b0109805
PB
7348 addr = new_tmp();
7349 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op));
9ee6e8bb
PB
7350 }
7351 if ((insn & (1 << 24)) == 0) {
b0109805 7352 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 7353 }
b0109805
PB
7354 tmp = load_reg(s, 14);
7355 gen_st32(tmp, addr, 0);
7356 tcg_gen_addi_i32(addr, addr, 4);
7357 tmp = new_tmp();
7358 gen_helper_cpsr_read(tmp);
7359 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
7360 if (insn & (1 << 21)) {
7361 if ((insn & (1 << 24)) == 0) {
b0109805 7362 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 7363 } else {
b0109805 7364 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7365 }
7366 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7367 store_reg(s, 13, addr);
9ee6e8bb 7368 } else {
b0109805
PB
7369 gen_helper_set_r13_banked(cpu_env,
7370 tcg_const_i32(op), addr);
9ee6e8bb 7371 }
b0109805
PB
7372 } else {
7373 dead_tmp(addr);
9ee6e8bb
PB
7374 }
7375 }
7376 } else {
7377 int i;
7378 /* Load/store multiple. */
b0109805 7379 addr = load_reg(s, rn);
9ee6e8bb
PB
7380 offset = 0;
7381 for (i = 0; i < 16; i++) {
7382 if (insn & (1 << i))
7383 offset += 4;
7384 }
7385 if (insn & (1 << 24)) {
b0109805 7386 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7387 }
7388
7389 for (i = 0; i < 16; i++) {
7390 if ((insn & (1 << i)) == 0)
7391 continue;
7392 if (insn & (1 << 20)) {
7393 /* Load. */
b0109805 7394 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 7395 if (i == 15) {
b0109805 7396 gen_bx(s, tmp);
9ee6e8bb 7397 } else {
b0109805 7398 store_reg(s, i, tmp);
9ee6e8bb
PB
7399 }
7400 } else {
7401 /* Store. */
b0109805
PB
7402 tmp = load_reg(s, i);
7403 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7404 }
b0109805 7405 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7406 }
7407 if (insn & (1 << 21)) {
7408 /* Base register writeback. */
7409 if (insn & (1 << 24)) {
b0109805 7410 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7411 }
7412 /* Fault if writeback register is in register list. */
7413 if (insn & (1 << rn))
7414 goto illegal_op;
b0109805
PB
7415 store_reg(s, rn, addr);
7416 } else {
7417 dead_tmp(addr);
9ee6e8bb
PB
7418 }
7419 }
7420 }
7421 break;
7422 case 5: /* Data processing register constant shift. */
3174f8e9
FN
7423 if (rn == 15) {
7424 tmp = new_tmp();
7425 tcg_gen_movi_i32(tmp, 0);
7426 } else {
7427 tmp = load_reg(s, rn);
7428 }
7429 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7430 op = (insn >> 21) & 0xf;
7431 shiftop = (insn >> 4) & 3;
7432 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7433 conds = (insn & (1 << 20)) != 0;
7434 logic_cc = (conds && thumb2_logic_op(op));
3174f8e9
FN
7435 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7436 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9ee6e8bb 7437 goto illegal_op;
3174f8e9
FN
7438 dead_tmp(tmp2);
7439 if (rd != 15) {
7440 store_reg(s, rd, tmp);
7441 } else {
7442 dead_tmp(tmp);
7443 }
9ee6e8bb
PB
7444 break;
7445 case 13: /* Misc data processing. */
7446 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7447 if (op < 4 && (insn & 0xf000) != 0xf000)
7448 goto illegal_op;
7449 switch (op) {
7450 case 0: /* Register controlled shift. */
8984bd2e
PB
7451 tmp = load_reg(s, rn);
7452 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7453 if ((insn & 0x70) != 0)
7454 goto illegal_op;
7455 op = (insn >> 21) & 3;
8984bd2e
PB
7456 logic_cc = (insn & (1 << 20)) != 0;
7457 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7458 if (logic_cc)
7459 gen_logic_CC(tmp);
21aeb343 7460 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7461 break;
7462 case 1: /* Sign/zero extend. */
5e3f878a 7463 tmp = load_reg(s, rm);
9ee6e8bb
PB
7464 shift = (insn >> 4) & 3;
7465 /* ??? In many cases it's not neccessary to do a
7466 rotate, a shift is sufficient. */
7467 if (shift != 0)
5e3f878a 7468 tcg_gen_rori_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7469 op = (insn >> 20) & 7;
7470 switch (op) {
5e3f878a
PB
7471 case 0: gen_sxth(tmp); break;
7472 case 1: gen_uxth(tmp); break;
7473 case 2: gen_sxtb16(tmp); break;
7474 case 3: gen_uxtb16(tmp); break;
7475 case 4: gen_sxtb(tmp); break;
7476 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
7477 default: goto illegal_op;
7478 }
7479 if (rn != 15) {
5e3f878a 7480 tmp2 = load_reg(s, rn);
9ee6e8bb 7481 if ((op >> 1) == 1) {
5e3f878a 7482 gen_add16(tmp, tmp2);
9ee6e8bb 7483 } else {
5e3f878a
PB
7484 tcg_gen_add_i32(tmp, tmp, tmp2);
7485 dead_tmp(tmp2);
9ee6e8bb
PB
7486 }
7487 }
5e3f878a 7488 store_reg(s, rd, tmp);
9ee6e8bb
PB
7489 break;
7490 case 2: /* SIMD add/subtract. */
7491 op = (insn >> 20) & 7;
7492 shift = (insn >> 4) & 7;
7493 if ((op & 3) == 3 || (shift & 3) == 3)
7494 goto illegal_op;
6ddbc6e4
PB
7495 tmp = load_reg(s, rn);
7496 tmp2 = load_reg(s, rm);
7497 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7498 dead_tmp(tmp2);
7499 store_reg(s, rd, tmp);
9ee6e8bb
PB
7500 break;
7501 case 3: /* Other data processing. */
7502 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7503 if (op < 4) {
7504 /* Saturating add/subtract. */
d9ba4830
PB
7505 tmp = load_reg(s, rn);
7506 tmp2 = load_reg(s, rm);
9ee6e8bb 7507 if (op & 2)
d9ba4830 7508 gen_helper_double_saturate(tmp, tmp);
9ee6e8bb 7509 if (op & 1)
d9ba4830 7510 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 7511 else
d9ba4830
PB
7512 gen_helper_add_saturate(tmp, tmp, tmp2);
7513 dead_tmp(tmp2);
9ee6e8bb 7514 } else {
d9ba4830 7515 tmp = load_reg(s, rn);
9ee6e8bb
PB
7516 switch (op) {
7517 case 0x0a: /* rbit */
d9ba4830 7518 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7519 break;
7520 case 0x08: /* rev */
66896cb8 7521 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
7522 break;
7523 case 0x09: /* rev16 */
d9ba4830 7524 gen_rev16(tmp);
9ee6e8bb
PB
7525 break;
7526 case 0x0b: /* revsh */
d9ba4830 7527 gen_revsh(tmp);
9ee6e8bb
PB
7528 break;
7529 case 0x10: /* sel */
d9ba4830 7530 tmp2 = load_reg(s, rm);
6ddbc6e4
PB
7531 tmp3 = new_tmp();
7532 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
d9ba4830 7533 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6ddbc6e4 7534 dead_tmp(tmp3);
d9ba4830 7535 dead_tmp(tmp2);
9ee6e8bb
PB
7536 break;
7537 case 0x18: /* clz */
d9ba4830 7538 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
7539 break;
7540 default:
7541 goto illegal_op;
7542 }
7543 }
d9ba4830 7544 store_reg(s, rd, tmp);
9ee6e8bb
PB
7545 break;
7546 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7547 op = (insn >> 4) & 0xf;
d9ba4830
PB
7548 tmp = load_reg(s, rn);
7549 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7550 switch ((insn >> 20) & 7) {
7551 case 0: /* 32 x 32 -> 32 */
d9ba4830
PB
7552 tcg_gen_mul_i32(tmp, tmp, tmp2);
7553 dead_tmp(tmp2);
9ee6e8bb 7554 if (rs != 15) {
d9ba4830 7555 tmp2 = load_reg(s, rs);
9ee6e8bb 7556 if (op)
d9ba4830 7557 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 7558 else
d9ba4830
PB
7559 tcg_gen_add_i32(tmp, tmp, tmp2);
7560 dead_tmp(tmp2);
9ee6e8bb 7561 }
9ee6e8bb
PB
7562 break;
7563 case 1: /* 16 x 16 -> 32 */
d9ba4830
PB
7564 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7565 dead_tmp(tmp2);
9ee6e8bb 7566 if (rs != 15) {
d9ba4830
PB
7567 tmp2 = load_reg(s, rs);
7568 gen_helper_add_setq(tmp, tmp, tmp2);
7569 dead_tmp(tmp2);
9ee6e8bb 7570 }
9ee6e8bb
PB
7571 break;
7572 case 2: /* Dual multiply add. */
7573 case 4: /* Dual multiply subtract. */
7574 if (op)
d9ba4830
PB
7575 gen_swap_half(tmp2);
7576 gen_smul_dual(tmp, tmp2);
9ee6e8bb
PB
7577 /* This addition cannot overflow. */
7578 if (insn & (1 << 22)) {
d9ba4830 7579 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 7580 } else {
d9ba4830 7581 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 7582 }
d9ba4830 7583 dead_tmp(tmp2);
9ee6e8bb
PB
7584 if (rs != 15)
7585 {
d9ba4830
PB
7586 tmp2 = load_reg(s, rs);
7587 gen_helper_add_setq(tmp, tmp, tmp2);
7588 dead_tmp(tmp2);
9ee6e8bb 7589 }
9ee6e8bb
PB
7590 break;
7591 case 3: /* 32 * 16 -> 32msb */
7592 if (op)
d9ba4830 7593 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7594 else
d9ba4830 7595 gen_sxth(tmp2);
a7812ae4
PB
7596 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7597 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 7598 tmp = new_tmp();
a7812ae4 7599 tcg_gen_trunc_i64_i32(tmp, tmp64);
9ee6e8bb
PB
7600 if (rs != 15)
7601 {
d9ba4830
PB
7602 tmp2 = load_reg(s, rs);
7603 gen_helper_add_setq(tmp, tmp, tmp2);
7604 dead_tmp(tmp2);
9ee6e8bb 7605 }
9ee6e8bb
PB
7606 break;
7607 case 5: case 6: /* 32 * 32 -> 32msb */
d9ba4830
PB
7608 gen_imull(tmp, tmp2);
7609 if (insn & (1 << 5)) {
7610 gen_roundqd(tmp, tmp2);
7611 dead_tmp(tmp2);
7612 } else {
7613 dead_tmp(tmp);
7614 tmp = tmp2;
7615 }
9ee6e8bb 7616 if (rs != 15) {
d9ba4830 7617 tmp2 = load_reg(s, rs);
9ee6e8bb 7618 if (insn & (1 << 21)) {
d9ba4830 7619 tcg_gen_add_i32(tmp, tmp, tmp2);
99c475ab 7620 } else {
d9ba4830 7621 tcg_gen_sub_i32(tmp, tmp2, tmp);
99c475ab 7622 }
d9ba4830 7623 dead_tmp(tmp2);
2c0262af 7624 }
9ee6e8bb
PB
7625 break;
7626 case 7: /* Unsigned sum of absolute differences. */
d9ba4830
PB
7627 gen_helper_usad8(tmp, tmp, tmp2);
7628 dead_tmp(tmp2);
9ee6e8bb 7629 if (rs != 15) {
d9ba4830
PB
7630 tmp2 = load_reg(s, rs);
7631 tcg_gen_add_i32(tmp, tmp, tmp2);
7632 dead_tmp(tmp2);
5fd46862 7633 }
9ee6e8bb 7634 break;
2c0262af 7635 }
d9ba4830 7636 store_reg(s, rd, tmp);
2c0262af 7637 break;
9ee6e8bb
PB
7638 case 6: case 7: /* 64-bit multiply, Divide. */
7639 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
7640 tmp = load_reg(s, rn);
7641 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7642 if ((op & 0x50) == 0x10) {
7643 /* sdiv, udiv */
7644 if (!arm_feature(env, ARM_FEATURE_DIV))
7645 goto illegal_op;
7646 if (op & 0x20)
5e3f878a 7647 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 7648 else
5e3f878a
PB
7649 gen_helper_sdiv(tmp, tmp, tmp2);
7650 dead_tmp(tmp2);
7651 store_reg(s, rd, tmp);
9ee6e8bb
PB
7652 } else if ((op & 0xe) == 0xc) {
7653 /* Dual multiply accumulate long. */
7654 if (op & 1)
5e3f878a
PB
7655 gen_swap_half(tmp2);
7656 gen_smul_dual(tmp, tmp2);
9ee6e8bb 7657 if (op & 0x10) {
5e3f878a 7658 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 7659 } else {
5e3f878a 7660 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 7661 }
5e3f878a 7662 dead_tmp(tmp2);
a7812ae4
PB
7663 /* BUGFIX */
7664 tmp64 = tcg_temp_new_i64();
7665 tcg_gen_ext_i32_i64(tmp64, tmp);
7666 dead_tmp(tmp);
7667 gen_addq(s, tmp64, rs, rd);
7668 gen_storeq_reg(s, rs, rd, tmp64);
2c0262af 7669 } else {
9ee6e8bb
PB
7670 if (op & 0x20) {
7671 /* Unsigned 64-bit multiply */
a7812ae4 7672 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 7673 } else {
9ee6e8bb
PB
7674 if (op & 8) {
7675 /* smlalxy */
5e3f878a
PB
7676 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7677 dead_tmp(tmp2);
a7812ae4
PB
7678 tmp64 = tcg_temp_new_i64();
7679 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 7680 dead_tmp(tmp);
9ee6e8bb
PB
7681 } else {
7682 /* Signed 64-bit multiply */
a7812ae4 7683 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 7684 }
b5ff1b31 7685 }
9ee6e8bb
PB
7686 if (op & 4) {
7687 /* umaal */
a7812ae4
PB
7688 gen_addq_lo(s, tmp64, rs);
7689 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
7690 } else if (op & 0x40) {
7691 /* 64-bit accumulate. */
a7812ae4 7692 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 7693 }
a7812ae4 7694 gen_storeq_reg(s, rs, rd, tmp64);
5fd46862 7695 }
2c0262af 7696 break;
9ee6e8bb
PB
7697 }
7698 break;
7699 case 6: case 7: case 14: case 15:
7700 /* Coprocessor. */
7701 if (((insn >> 24) & 3) == 3) {
7702 /* Translate into the equivalent ARM encoding. */
7703 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
7704 if (disas_neon_data_insn(env, s, insn))
7705 goto illegal_op;
7706 } else {
7707 if (insn & (1 << 28))
7708 goto illegal_op;
7709 if (disas_coproc_insn (env, s, insn))
7710 goto illegal_op;
7711 }
7712 break;
7713 case 8: case 9: case 10: case 11:
7714 if (insn & (1 << 15)) {
7715 /* Branches, misc control. */
7716 if (insn & 0x5000) {
7717 /* Unconditional branch. */
7718 /* signextend(hw1[10:0]) -> offset[:12]. */
7719 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7720 /* hw1[10:0] -> offset[11:1]. */
7721 offset |= (insn & 0x7ff) << 1;
7722 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7723 offset[24:22] already have the same value because of the
7724 sign extension above. */
7725 offset ^= ((~insn) & (1 << 13)) << 10;
7726 offset ^= ((~insn) & (1 << 11)) << 11;
7727
9ee6e8bb
PB
7728 if (insn & (1 << 14)) {
7729 /* Branch and link. */
3174f8e9 7730 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 7731 }
3b46e624 7732
b0109805 7733 offset += s->pc;
9ee6e8bb
PB
7734 if (insn & (1 << 12)) {
7735 /* b/bl */
b0109805 7736 gen_jmp(s, offset);
9ee6e8bb
PB
7737 } else {
7738 /* blx */
b0109805
PB
7739 offset &= ~(uint32_t)2;
7740 gen_bx_im(s, offset);
2c0262af 7741 }
9ee6e8bb
PB
7742 } else if (((insn >> 23) & 7) == 7) {
7743 /* Misc control */
7744 if (insn & (1 << 13))
7745 goto illegal_op;
7746
7747 if (insn & (1 << 26)) {
7748 /* Secure monitor call (v6Z) */
7749 goto illegal_op; /* not implemented. */
2c0262af 7750 } else {
9ee6e8bb
PB
7751 op = (insn >> 20) & 7;
7752 switch (op) {
7753 case 0: /* msr cpsr. */
7754 if (IS_M(env)) {
8984bd2e
PB
7755 tmp = load_reg(s, rn);
7756 addr = tcg_const_i32(insn & 0xff);
7757 gen_helper_v7m_msr(cpu_env, addr, tmp);
9ee6e8bb
PB
7758 gen_lookup_tb(s);
7759 break;
7760 }
7761 /* fall through */
7762 case 1: /* msr spsr. */
7763 if (IS_M(env))
7764 goto illegal_op;
2fbac54b
FN
7765 tmp = load_reg(s, rn);
7766 if (gen_set_psr(s,
9ee6e8bb 7767 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 7768 op == 1, tmp))
9ee6e8bb
PB
7769 goto illegal_op;
7770 break;
7771 case 2: /* cps, nop-hint. */
7772 if (((insn >> 8) & 7) == 0) {
7773 gen_nop_hint(s, insn & 0xff);
7774 }
7775 /* Implemented as NOP in user mode. */
7776 if (IS_USER(s))
7777 break;
7778 offset = 0;
7779 imm = 0;
7780 if (insn & (1 << 10)) {
7781 if (insn & (1 << 7))
7782 offset |= CPSR_A;
7783 if (insn & (1 << 6))
7784 offset |= CPSR_I;
7785 if (insn & (1 << 5))
7786 offset |= CPSR_F;
7787 if (insn & (1 << 9))
7788 imm = CPSR_A | CPSR_I | CPSR_F;
7789 }
7790 if (insn & (1 << 8)) {
7791 offset |= 0x1f;
7792 imm |= (insn & 0x1f);
7793 }
7794 if (offset) {
2fbac54b 7795 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
7796 }
7797 break;
7798 case 3: /* Special control operations. */
7799 op = (insn >> 4) & 0xf;
7800 switch (op) {
7801 case 2: /* clrex */
8f8e3aa4 7802 gen_helper_clrex(cpu_env);
9ee6e8bb
PB
7803 break;
7804 case 4: /* dsb */
7805 case 5: /* dmb */
7806 case 6: /* isb */
7807 /* These execute as NOPs. */
7808 ARCH(7);
7809 break;
7810 default:
7811 goto illegal_op;
7812 }
7813 break;
7814 case 4: /* bxj */
7815 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
7816 tmp = load_reg(s, rn);
7817 gen_bx(s, tmp);
9ee6e8bb
PB
7818 break;
7819 case 5: /* Exception return. */
7820 /* Unpredictable in user mode. */
7821 goto illegal_op;
7822 case 6: /* mrs cpsr. */
8984bd2e 7823 tmp = new_tmp();
9ee6e8bb 7824 if (IS_M(env)) {
8984bd2e
PB
7825 addr = tcg_const_i32(insn & 0xff);
7826 gen_helper_v7m_mrs(tmp, cpu_env, addr);
9ee6e8bb 7827 } else {
8984bd2e 7828 gen_helper_cpsr_read(tmp);
9ee6e8bb 7829 }
8984bd2e 7830 store_reg(s, rd, tmp);
9ee6e8bb
PB
7831 break;
7832 case 7: /* mrs spsr. */
7833 /* Not accessible in user mode. */
7834 if (IS_USER(s) || IS_M(env))
7835 goto illegal_op;
d9ba4830
PB
7836 tmp = load_cpu_field(spsr);
7837 store_reg(s, rd, tmp);
9ee6e8bb 7838 break;
2c0262af
FB
7839 }
7840 }
9ee6e8bb
PB
7841 } else {
7842 /* Conditional branch. */
7843 op = (insn >> 22) & 0xf;
7844 /* Generate a conditional jump to next instruction. */
7845 s->condlabel = gen_new_label();
d9ba4830 7846 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
7847 s->condjmp = 1;
7848
7849 /* offset[11:1] = insn[10:0] */
7850 offset = (insn & 0x7ff) << 1;
7851 /* offset[17:12] = insn[21:16]. */
7852 offset |= (insn & 0x003f0000) >> 4;
7853 /* offset[31:20] = insn[26]. */
7854 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
7855 /* offset[18] = insn[13]. */
7856 offset |= (insn & (1 << 13)) << 5;
7857 /* offset[19] = insn[11]. */
7858 offset |= (insn & (1 << 11)) << 8;
7859
7860 /* jump to the offset */
b0109805 7861 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
7862 }
7863 } else {
7864 /* Data processing immediate. */
7865 if (insn & (1 << 25)) {
7866 if (insn & (1 << 24)) {
7867 if (insn & (1 << 20))
7868 goto illegal_op;
7869 /* Bitfield/Saturate. */
7870 op = (insn >> 21) & 7;
7871 imm = insn & 0x1f;
7872 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4
PB
7873 if (rn == 15) {
7874 tmp = new_tmp();
7875 tcg_gen_movi_i32(tmp, 0);
7876 } else {
7877 tmp = load_reg(s, rn);
7878 }
9ee6e8bb
PB
7879 switch (op) {
7880 case 2: /* Signed bitfield extract. */
7881 imm++;
7882 if (shift + imm > 32)
7883 goto illegal_op;
7884 if (imm < 32)
6ddbc6e4 7885 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
7886 break;
7887 case 6: /* Unsigned bitfield extract. */
7888 imm++;
7889 if (shift + imm > 32)
7890 goto illegal_op;
7891 if (imm < 32)
6ddbc6e4 7892 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
7893 break;
7894 case 3: /* Bitfield insert/clear. */
7895 if (imm < shift)
7896 goto illegal_op;
7897 imm = imm + 1 - shift;
7898 if (imm != 32) {
6ddbc6e4 7899 tmp2 = load_reg(s, rd);
8f8e3aa4 7900 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
6ddbc6e4 7901 dead_tmp(tmp2);
9ee6e8bb
PB
7902 }
7903 break;
7904 case 7:
7905 goto illegal_op;
7906 default: /* Saturate. */
9ee6e8bb
PB
7907 if (shift) {
7908 if (op & 1)
6ddbc6e4 7909 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7910 else
6ddbc6e4 7911 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 7912 }
6ddbc6e4 7913 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
7914 if (op & 4) {
7915 /* Unsigned. */
9ee6e8bb 7916 if ((op & 1) && shift == 0)
6ddbc6e4 7917 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 7918 else
6ddbc6e4 7919 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 7920 } else {
9ee6e8bb 7921 /* Signed. */
9ee6e8bb 7922 if ((op & 1) && shift == 0)
6ddbc6e4 7923 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 7924 else
6ddbc6e4 7925 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 7926 }
9ee6e8bb 7927 break;
2c0262af 7928 }
6ddbc6e4 7929 store_reg(s, rd, tmp);
9ee6e8bb
PB
7930 } else {
7931 imm = ((insn & 0x04000000) >> 15)
7932 | ((insn & 0x7000) >> 4) | (insn & 0xff);
7933 if (insn & (1 << 22)) {
7934 /* 16-bit immediate. */
7935 imm |= (insn >> 4) & 0xf000;
7936 if (insn & (1 << 23)) {
7937 /* movt */
5e3f878a 7938 tmp = load_reg(s, rd);
86831435 7939 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 7940 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 7941 } else {
9ee6e8bb 7942 /* movw */
5e3f878a
PB
7943 tmp = new_tmp();
7944 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
7945 }
7946 } else {
9ee6e8bb
PB
7947 /* Add/sub 12-bit immediate. */
7948 if (rn == 15) {
b0109805 7949 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 7950 if (insn & (1 << 23))
b0109805 7951 offset -= imm;
9ee6e8bb 7952 else
b0109805 7953 offset += imm;
5e3f878a
PB
7954 tmp = new_tmp();
7955 tcg_gen_movi_i32(tmp, offset);
2c0262af 7956 } else {
5e3f878a 7957 tmp = load_reg(s, rn);
9ee6e8bb 7958 if (insn & (1 << 23))
5e3f878a 7959 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 7960 else
5e3f878a 7961 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 7962 }
9ee6e8bb 7963 }
5e3f878a 7964 store_reg(s, rd, tmp);
191abaa2 7965 }
9ee6e8bb
PB
7966 } else {
7967 int shifter_out = 0;
7968 /* modified 12-bit immediate. */
7969 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
7970 imm = (insn & 0xff);
7971 switch (shift) {
7972 case 0: /* XY */
7973 /* Nothing to do. */
7974 break;
7975 case 1: /* 00XY00XY */
7976 imm |= imm << 16;
7977 break;
7978 case 2: /* XY00XY00 */
7979 imm |= imm << 16;
7980 imm <<= 8;
7981 break;
7982 case 3: /* XYXYXYXY */
7983 imm |= imm << 16;
7984 imm |= imm << 8;
7985 break;
7986 default: /* Rotated constant. */
7987 shift = (shift << 1) | (imm >> 7);
7988 imm |= 0x80;
7989 imm = imm << (32 - shift);
7990 shifter_out = 1;
7991 break;
b5ff1b31 7992 }
3174f8e9
FN
7993 tmp2 = new_tmp();
7994 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 7995 rn = (insn >> 16) & 0xf;
3174f8e9
FN
7996 if (rn == 15) {
7997 tmp = new_tmp();
7998 tcg_gen_movi_i32(tmp, 0);
7999 } else {
8000 tmp = load_reg(s, rn);
8001 }
9ee6e8bb
PB
8002 op = (insn >> 21) & 0xf;
8003 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 8004 shifter_out, tmp, tmp2))
9ee6e8bb 8005 goto illegal_op;
3174f8e9 8006 dead_tmp(tmp2);
9ee6e8bb
PB
8007 rd = (insn >> 8) & 0xf;
8008 if (rd != 15) {
3174f8e9
FN
8009 store_reg(s, rd, tmp);
8010 } else {
8011 dead_tmp(tmp);
2c0262af 8012 }
2c0262af 8013 }
9ee6e8bb
PB
8014 }
8015 break;
8016 case 12: /* Load/store single data item. */
8017 {
8018 int postinc = 0;
8019 int writeback = 0;
b0109805 8020 int user;
9ee6e8bb
PB
8021 if ((insn & 0x01100000) == 0x01000000) {
8022 if (disas_neon_ls_insn(env, s, insn))
c1713132 8023 goto illegal_op;
9ee6e8bb
PB
8024 break;
8025 }
b0109805 8026 user = IS_USER(s);
9ee6e8bb 8027 if (rn == 15) {
b0109805 8028 addr = new_tmp();
9ee6e8bb
PB
8029 /* PC relative. */
8030 /* s->pc has already been incremented by 4. */
8031 imm = s->pc & 0xfffffffc;
8032 if (insn & (1 << 23))
8033 imm += insn & 0xfff;
8034 else
8035 imm -= insn & 0xfff;
b0109805 8036 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 8037 } else {
b0109805 8038 addr = load_reg(s, rn);
9ee6e8bb
PB
8039 if (insn & (1 << 23)) {
8040 /* Positive offset. */
8041 imm = insn & 0xfff;
b0109805 8042 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8043 } else {
8044 op = (insn >> 8) & 7;
8045 imm = insn & 0xff;
8046 switch (op) {
8047 case 0: case 8: /* Shifted Register. */
8048 shift = (insn >> 4) & 0xf;
8049 if (shift > 3)
18c9b560 8050 goto illegal_op;
b26eefb6 8051 tmp = load_reg(s, rm);
9ee6e8bb 8052 if (shift)
b26eefb6 8053 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 8054 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 8055 dead_tmp(tmp);
9ee6e8bb
PB
8056 break;
8057 case 4: /* Negative offset. */
b0109805 8058 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb
PB
8059 break;
8060 case 6: /* User privilege. */
b0109805
PB
8061 tcg_gen_addi_i32(addr, addr, imm);
8062 user = 1;
9ee6e8bb
PB
8063 break;
8064 case 1: /* Post-decrement. */
8065 imm = -imm;
8066 /* Fall through. */
8067 case 3: /* Post-increment. */
9ee6e8bb
PB
8068 postinc = 1;
8069 writeback = 1;
8070 break;
8071 case 5: /* Pre-decrement. */
8072 imm = -imm;
8073 /* Fall through. */
8074 case 7: /* Pre-increment. */
b0109805 8075 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8076 writeback = 1;
8077 break;
8078 default:
b7bcbe95 8079 goto illegal_op;
9ee6e8bb
PB
8080 }
8081 }
8082 }
8083 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8084 if (insn & (1 << 20)) {
8085 /* Load. */
8086 if (rs == 15 && op != 2) {
8087 if (op & 2)
b5ff1b31 8088 goto illegal_op;
9ee6e8bb
PB
8089 /* Memory hint. Implemented as NOP. */
8090 } else {
8091 switch (op) {
b0109805
PB
8092 case 0: tmp = gen_ld8u(addr, user); break;
8093 case 4: tmp = gen_ld8s(addr, user); break;
8094 case 1: tmp = gen_ld16u(addr, user); break;
8095 case 5: tmp = gen_ld16s(addr, user); break;
8096 case 2: tmp = gen_ld32(addr, user); break;
9ee6e8bb
PB
8097 default: goto illegal_op;
8098 }
8099 if (rs == 15) {
b0109805 8100 gen_bx(s, tmp);
9ee6e8bb 8101 } else {
b0109805 8102 store_reg(s, rs, tmp);
9ee6e8bb
PB
8103 }
8104 }
8105 } else {
8106 /* Store. */
8107 if (rs == 15)
b7bcbe95 8108 goto illegal_op;
b0109805 8109 tmp = load_reg(s, rs);
9ee6e8bb 8110 switch (op) {
b0109805
PB
8111 case 0: gen_st8(tmp, addr, user); break;
8112 case 1: gen_st16(tmp, addr, user); break;
8113 case 2: gen_st32(tmp, addr, user); break;
9ee6e8bb 8114 default: goto illegal_op;
b7bcbe95 8115 }
2c0262af 8116 }
9ee6e8bb 8117 if (postinc)
b0109805
PB
8118 tcg_gen_addi_i32(addr, addr, imm);
8119 if (writeback) {
8120 store_reg(s, rn, addr);
8121 } else {
8122 dead_tmp(addr);
8123 }
9ee6e8bb
PB
8124 }
8125 break;
8126 default:
8127 goto illegal_op;
2c0262af 8128 }
9ee6e8bb
PB
8129 return 0;
8130illegal_op:
8131 return 1;
2c0262af
FB
8132}
8133
9ee6e8bb 8134static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
8135{
8136 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8137 int32_t offset;
8138 int i;
b26eefb6 8139 TCGv tmp;
d9ba4830 8140 TCGv tmp2;
b0109805 8141 TCGv addr;
99c475ab 8142
9ee6e8bb
PB
8143 if (s->condexec_mask) {
8144 cond = s->condexec_cond;
8145 s->condlabel = gen_new_label();
d9ba4830 8146 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
8147 s->condjmp = 1;
8148 }
8149
b5ff1b31 8150 insn = lduw_code(s->pc);
99c475ab 8151 s->pc += 2;
b5ff1b31 8152
99c475ab
FB
8153 switch (insn >> 12) {
8154 case 0: case 1:
396e467c 8155
99c475ab
FB
8156 rd = insn & 7;
8157 op = (insn >> 11) & 3;
8158 if (op == 3) {
8159 /* add/subtract */
8160 rn = (insn >> 3) & 7;
396e467c 8161 tmp = load_reg(s, rn);
99c475ab
FB
8162 if (insn & (1 << 10)) {
8163 /* immediate */
396e467c
FN
8164 tmp2 = new_tmp();
8165 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
8166 } else {
8167 /* reg */
8168 rm = (insn >> 6) & 7;
396e467c 8169 tmp2 = load_reg(s, rm);
99c475ab 8170 }
9ee6e8bb
PB
8171 if (insn & (1 << 9)) {
8172 if (s->condexec_mask)
396e467c 8173 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8174 else
396e467c 8175 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb
PB
8176 } else {
8177 if (s->condexec_mask)
396e467c 8178 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 8179 else
396e467c 8180 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 8181 }
396e467c
FN
8182 dead_tmp(tmp2);
8183 store_reg(s, rd, tmp);
99c475ab
FB
8184 } else {
8185 /* shift immediate */
8186 rm = (insn >> 3) & 7;
8187 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
8188 tmp = load_reg(s, rm);
8189 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8190 if (!s->condexec_mask)
8191 gen_logic_CC(tmp);
8192 store_reg(s, rd, tmp);
99c475ab
FB
8193 }
8194 break;
8195 case 2: case 3:
8196 /* arithmetic large immediate */
8197 op = (insn >> 11) & 3;
8198 rd = (insn >> 8) & 0x7;
396e467c
FN
8199 if (op == 0) { /* mov */
8200 tmp = new_tmp();
8201 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 8202 if (!s->condexec_mask)
396e467c
FN
8203 gen_logic_CC(tmp);
8204 store_reg(s, rd, tmp);
8205 } else {
8206 tmp = load_reg(s, rd);
8207 tmp2 = new_tmp();
8208 tcg_gen_movi_i32(tmp2, insn & 0xff);
8209 switch (op) {
8210 case 1: /* cmp */
8211 gen_helper_sub_cc(tmp, tmp, tmp2);
8212 dead_tmp(tmp);
8213 dead_tmp(tmp2);
8214 break;
8215 case 2: /* add */
8216 if (s->condexec_mask)
8217 tcg_gen_add_i32(tmp, tmp, tmp2);
8218 else
8219 gen_helper_add_cc(tmp, tmp, tmp2);
8220 dead_tmp(tmp2);
8221 store_reg(s, rd, tmp);
8222 break;
8223 case 3: /* sub */
8224 if (s->condexec_mask)
8225 tcg_gen_sub_i32(tmp, tmp, tmp2);
8226 else
8227 gen_helper_sub_cc(tmp, tmp, tmp2);
8228 dead_tmp(tmp2);
8229 store_reg(s, rd, tmp);
8230 break;
8231 }
99c475ab 8232 }
99c475ab
FB
8233 break;
8234 case 4:
8235 if (insn & (1 << 11)) {
8236 rd = (insn >> 8) & 7;
5899f386
FB
8237 /* load pc-relative. Bit 1 of PC is ignored. */
8238 val = s->pc + 2 + ((insn & 0xff) * 4);
8239 val &= ~(uint32_t)2;
b0109805
PB
8240 addr = new_tmp();
8241 tcg_gen_movi_i32(addr, val);
8242 tmp = gen_ld32(addr, IS_USER(s));
8243 dead_tmp(addr);
8244 store_reg(s, rd, tmp);
99c475ab
FB
8245 break;
8246 }
8247 if (insn & (1 << 10)) {
8248 /* data processing extended or blx */
8249 rd = (insn & 7) | ((insn >> 4) & 8);
8250 rm = (insn >> 3) & 0xf;
8251 op = (insn >> 8) & 3;
8252 switch (op) {
8253 case 0: /* add */
396e467c
FN
8254 tmp = load_reg(s, rd);
8255 tmp2 = load_reg(s, rm);
8256 tcg_gen_add_i32(tmp, tmp, tmp2);
8257 dead_tmp(tmp2);
8258 store_reg(s, rd, tmp);
99c475ab
FB
8259 break;
8260 case 1: /* cmp */
396e467c
FN
8261 tmp = load_reg(s, rd);
8262 tmp2 = load_reg(s, rm);
8263 gen_helper_sub_cc(tmp, tmp, tmp2);
8264 dead_tmp(tmp2);
8265 dead_tmp(tmp);
99c475ab
FB
8266 break;
8267 case 2: /* mov/cpy */
396e467c
FN
8268 tmp = load_reg(s, rm);
8269 store_reg(s, rd, tmp);
99c475ab
FB
8270 break;
8271 case 3:/* branch [and link] exchange thumb register */
b0109805 8272 tmp = load_reg(s, rm);
99c475ab
FB
8273 if (insn & (1 << 7)) {
8274 val = (uint32_t)s->pc | 1;
b0109805
PB
8275 tmp2 = new_tmp();
8276 tcg_gen_movi_i32(tmp2, val);
8277 store_reg(s, 14, tmp2);
99c475ab 8278 }
d9ba4830 8279 gen_bx(s, tmp);
99c475ab
FB
8280 break;
8281 }
8282 break;
8283 }
8284
8285 /* data processing register */
8286 rd = insn & 7;
8287 rm = (insn >> 3) & 7;
8288 op = (insn >> 6) & 0xf;
8289 if (op == 2 || op == 3 || op == 4 || op == 7) {
8290 /* the shift/rotate ops want the operands backwards */
8291 val = rm;
8292 rm = rd;
8293 rd = val;
8294 val = 1;
8295 } else {
8296 val = 0;
8297 }
8298
396e467c
FN
8299 if (op == 9) { /* neg */
8300 tmp = new_tmp();
8301 tcg_gen_movi_i32(tmp, 0);
8302 } else if (op != 0xf) { /* mvn doesn't read its first operand */
8303 tmp = load_reg(s, rd);
8304 } else {
8305 TCGV_UNUSED(tmp);
8306 }
99c475ab 8307
396e467c 8308 tmp2 = load_reg(s, rm);
5899f386 8309 switch (op) {
99c475ab 8310 case 0x0: /* and */
396e467c 8311 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 8312 if (!s->condexec_mask)
396e467c 8313 gen_logic_CC(tmp);
99c475ab
FB
8314 break;
8315 case 0x1: /* eor */
396e467c 8316 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 8317 if (!s->condexec_mask)
396e467c 8318 gen_logic_CC(tmp);
99c475ab
FB
8319 break;
8320 case 0x2: /* lsl */
9ee6e8bb 8321 if (s->condexec_mask) {
396e467c 8322 gen_helper_shl(tmp2, tmp2, tmp);
9ee6e8bb 8323 } else {
396e467c
FN
8324 gen_helper_shl_cc(tmp2, tmp2, tmp);
8325 gen_logic_CC(tmp2);
9ee6e8bb 8326 }
99c475ab
FB
8327 break;
8328 case 0x3: /* lsr */
9ee6e8bb 8329 if (s->condexec_mask) {
396e467c 8330 gen_helper_shr(tmp2, tmp2, tmp);
9ee6e8bb 8331 } else {
396e467c
FN
8332 gen_helper_shr_cc(tmp2, tmp2, tmp);
8333 gen_logic_CC(tmp2);
9ee6e8bb 8334 }
99c475ab
FB
8335 break;
8336 case 0x4: /* asr */
9ee6e8bb 8337 if (s->condexec_mask) {
396e467c 8338 gen_helper_sar(tmp2, tmp2, tmp);
9ee6e8bb 8339 } else {
396e467c
FN
8340 gen_helper_sar_cc(tmp2, tmp2, tmp);
8341 gen_logic_CC(tmp2);
9ee6e8bb 8342 }
99c475ab
FB
8343 break;
8344 case 0x5: /* adc */
9ee6e8bb 8345 if (s->condexec_mask)
396e467c 8346 gen_adc(tmp, tmp2);
9ee6e8bb 8347 else
396e467c 8348 gen_helper_adc_cc(tmp, tmp, tmp2);
99c475ab
FB
8349 break;
8350 case 0x6: /* sbc */
9ee6e8bb 8351 if (s->condexec_mask)
396e467c 8352 gen_sub_carry(tmp, tmp, tmp2);
9ee6e8bb 8353 else
396e467c 8354 gen_helper_sbc_cc(tmp, tmp, tmp2);
99c475ab
FB
8355 break;
8356 case 0x7: /* ror */
9ee6e8bb 8357 if (s->condexec_mask) {
396e467c 8358 gen_helper_ror(tmp2, tmp2, tmp);
9ee6e8bb 8359 } else {
396e467c
FN
8360 gen_helper_ror_cc(tmp2, tmp2, tmp);
8361 gen_logic_CC(tmp2);
9ee6e8bb 8362 }
99c475ab
FB
8363 break;
8364 case 0x8: /* tst */
396e467c
FN
8365 tcg_gen_and_i32(tmp, tmp, tmp2);
8366 gen_logic_CC(tmp);
99c475ab 8367 rd = 16;
5899f386 8368 break;
99c475ab 8369 case 0x9: /* neg */
9ee6e8bb 8370 if (s->condexec_mask)
396e467c 8371 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 8372 else
396e467c 8373 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
8374 break;
8375 case 0xa: /* cmp */
396e467c 8376 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
8377 rd = 16;
8378 break;
8379 case 0xb: /* cmn */
396e467c 8380 gen_helper_add_cc(tmp, tmp, tmp2);
99c475ab
FB
8381 rd = 16;
8382 break;
8383 case 0xc: /* orr */
396e467c 8384 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 8385 if (!s->condexec_mask)
396e467c 8386 gen_logic_CC(tmp);
99c475ab
FB
8387 break;
8388 case 0xd: /* mul */
396e467c 8389 gen_mull(tmp, tmp2);
9ee6e8bb 8390 if (!s->condexec_mask)
396e467c 8391 gen_logic_CC(tmp);
99c475ab
FB
8392 break;
8393 case 0xe: /* bic */
396e467c 8394 tcg_gen_bic_i32(tmp, tmp, tmp2);
9ee6e8bb 8395 if (!s->condexec_mask)
396e467c 8396 gen_logic_CC(tmp);
99c475ab
FB
8397 break;
8398 case 0xf: /* mvn */
396e467c 8399 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 8400 if (!s->condexec_mask)
396e467c 8401 gen_logic_CC(tmp2);
99c475ab 8402 val = 1;
5899f386 8403 rm = rd;
99c475ab
FB
8404 break;
8405 }
8406 if (rd != 16) {
396e467c
FN
8407 if (val) {
8408 store_reg(s, rm, tmp2);
8409 if (op != 0xf)
8410 dead_tmp(tmp);
8411 } else {
8412 store_reg(s, rd, tmp);
8413 dead_tmp(tmp2);
8414 }
8415 } else {
8416 dead_tmp(tmp);
8417 dead_tmp(tmp2);
99c475ab
FB
8418 }
8419 break;
8420
8421 case 5:
8422 /* load/store register offset. */
8423 rd = insn & 7;
8424 rn = (insn >> 3) & 7;
8425 rm = (insn >> 6) & 7;
8426 op = (insn >> 9) & 7;
b0109805 8427 addr = load_reg(s, rn);
b26eefb6 8428 tmp = load_reg(s, rm);
b0109805 8429 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 8430 dead_tmp(tmp);
99c475ab
FB
8431
8432 if (op < 3) /* store */
b0109805 8433 tmp = load_reg(s, rd);
99c475ab
FB
8434
8435 switch (op) {
8436 case 0: /* str */
b0109805 8437 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
8438 break;
8439 case 1: /* strh */
b0109805 8440 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
8441 break;
8442 case 2: /* strb */
b0109805 8443 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
8444 break;
8445 case 3: /* ldrsb */
b0109805 8446 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
8447 break;
8448 case 4: /* ldr */
b0109805 8449 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8450 break;
8451 case 5: /* ldrh */
b0109805 8452 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
8453 break;
8454 case 6: /* ldrb */
b0109805 8455 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
8456 break;
8457 case 7: /* ldrsh */
b0109805 8458 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
8459 break;
8460 }
8461 if (op >= 3) /* load */
b0109805
PB
8462 store_reg(s, rd, tmp);
8463 dead_tmp(addr);
99c475ab
FB
8464 break;
8465
8466 case 6:
8467 /* load/store word immediate offset */
8468 rd = insn & 7;
8469 rn = (insn >> 3) & 7;
b0109805 8470 addr = load_reg(s, rn);
99c475ab 8471 val = (insn >> 4) & 0x7c;
b0109805 8472 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8473
8474 if (insn & (1 << 11)) {
8475 /* load */
b0109805
PB
8476 tmp = gen_ld32(addr, IS_USER(s));
8477 store_reg(s, rd, tmp);
99c475ab
FB
8478 } else {
8479 /* store */
b0109805
PB
8480 tmp = load_reg(s, rd);
8481 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8482 }
b0109805 8483 dead_tmp(addr);
99c475ab
FB
8484 break;
8485
8486 case 7:
8487 /* load/store byte immediate offset */
8488 rd = insn & 7;
8489 rn = (insn >> 3) & 7;
b0109805 8490 addr = load_reg(s, rn);
99c475ab 8491 val = (insn >> 6) & 0x1f;
b0109805 8492 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8493
8494 if (insn & (1 << 11)) {
8495 /* load */
b0109805
PB
8496 tmp = gen_ld8u(addr, IS_USER(s));
8497 store_reg(s, rd, tmp);
99c475ab
FB
8498 } else {
8499 /* store */
b0109805
PB
8500 tmp = load_reg(s, rd);
8501 gen_st8(tmp, addr, IS_USER(s));
99c475ab 8502 }
b0109805 8503 dead_tmp(addr);
99c475ab
FB
8504 break;
8505
8506 case 8:
8507 /* load/store halfword immediate offset */
8508 rd = insn & 7;
8509 rn = (insn >> 3) & 7;
b0109805 8510 addr = load_reg(s, rn);
99c475ab 8511 val = (insn >> 5) & 0x3e;
b0109805 8512 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8513
8514 if (insn & (1 << 11)) {
8515 /* load */
b0109805
PB
8516 tmp = gen_ld16u(addr, IS_USER(s));
8517 store_reg(s, rd, tmp);
99c475ab
FB
8518 } else {
8519 /* store */
b0109805
PB
8520 tmp = load_reg(s, rd);
8521 gen_st16(tmp, addr, IS_USER(s));
99c475ab 8522 }
b0109805 8523 dead_tmp(addr);
99c475ab
FB
8524 break;
8525
8526 case 9:
8527 /* load/store from stack */
8528 rd = (insn >> 8) & 7;
b0109805 8529 addr = load_reg(s, 13);
99c475ab 8530 val = (insn & 0xff) * 4;
b0109805 8531 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8532
8533 if (insn & (1 << 11)) {
8534 /* load */
b0109805
PB
8535 tmp = gen_ld32(addr, IS_USER(s));
8536 store_reg(s, rd, tmp);
99c475ab
FB
8537 } else {
8538 /* store */
b0109805
PB
8539 tmp = load_reg(s, rd);
8540 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8541 }
b0109805 8542 dead_tmp(addr);
99c475ab
FB
8543 break;
8544
8545 case 10:
8546 /* add to high reg */
8547 rd = (insn >> 8) & 7;
5899f386
FB
8548 if (insn & (1 << 11)) {
8549 /* SP */
5e3f878a 8550 tmp = load_reg(s, 13);
5899f386
FB
8551 } else {
8552 /* PC. bit 1 is ignored. */
5e3f878a
PB
8553 tmp = new_tmp();
8554 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 8555 }
99c475ab 8556 val = (insn & 0xff) * 4;
5e3f878a
PB
8557 tcg_gen_addi_i32(tmp, tmp, val);
8558 store_reg(s, rd, tmp);
99c475ab
FB
8559 break;
8560
8561 case 11:
8562 /* misc */
8563 op = (insn >> 8) & 0xf;
8564 switch (op) {
8565 case 0:
8566 /* adjust stack pointer */
b26eefb6 8567 tmp = load_reg(s, 13);
99c475ab
FB
8568 val = (insn & 0x7f) * 4;
8569 if (insn & (1 << 7))
6a0d8a1d 8570 val = -(int32_t)val;
b26eefb6
PB
8571 tcg_gen_addi_i32(tmp, tmp, val);
8572 store_reg(s, 13, tmp);
99c475ab
FB
8573 break;
8574
9ee6e8bb
PB
8575 case 2: /* sign/zero extend. */
8576 ARCH(6);
8577 rd = insn & 7;
8578 rm = (insn >> 3) & 7;
b0109805 8579 tmp = load_reg(s, rm);
9ee6e8bb 8580 switch ((insn >> 6) & 3) {
b0109805
PB
8581 case 0: gen_sxth(tmp); break;
8582 case 1: gen_sxtb(tmp); break;
8583 case 2: gen_uxth(tmp); break;
8584 case 3: gen_uxtb(tmp); break;
9ee6e8bb 8585 }
b0109805 8586 store_reg(s, rd, tmp);
9ee6e8bb 8587 break;
99c475ab
FB
8588 case 4: case 5: case 0xc: case 0xd:
8589 /* push/pop */
b0109805 8590 addr = load_reg(s, 13);
5899f386
FB
8591 if (insn & (1 << 8))
8592 offset = 4;
99c475ab 8593 else
5899f386
FB
8594 offset = 0;
8595 for (i = 0; i < 8; i++) {
8596 if (insn & (1 << i))
8597 offset += 4;
8598 }
8599 if ((insn & (1 << 11)) == 0) {
b0109805 8600 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8601 }
99c475ab
FB
8602 for (i = 0; i < 8; i++) {
8603 if (insn & (1 << i)) {
8604 if (insn & (1 << 11)) {
8605 /* pop */
b0109805
PB
8606 tmp = gen_ld32(addr, IS_USER(s));
8607 store_reg(s, i, tmp);
99c475ab
FB
8608 } else {
8609 /* push */
b0109805
PB
8610 tmp = load_reg(s, i);
8611 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8612 }
5899f386 8613 /* advance to the next address. */
b0109805 8614 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8615 }
8616 }
a50f5b91 8617 TCGV_UNUSED(tmp);
99c475ab
FB
8618 if (insn & (1 << 8)) {
8619 if (insn & (1 << 11)) {
8620 /* pop pc */
b0109805 8621 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8622 /* don't set the pc until the rest of the instruction
8623 has completed */
8624 } else {
8625 /* push lr */
b0109805
PB
8626 tmp = load_reg(s, 14);
8627 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8628 }
b0109805 8629 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 8630 }
5899f386 8631 if ((insn & (1 << 11)) == 0) {
b0109805 8632 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8633 }
99c475ab 8634 /* write back the new stack pointer */
b0109805 8635 store_reg(s, 13, addr);
99c475ab
FB
8636 /* set the new PC value */
8637 if ((insn & 0x0900) == 0x0900)
b0109805 8638 gen_bx(s, tmp);
99c475ab
FB
8639 break;
8640
9ee6e8bb
PB
8641 case 1: case 3: case 9: case 11: /* czb */
8642 rm = insn & 7;
d9ba4830 8643 tmp = load_reg(s, rm);
9ee6e8bb
PB
8644 s->condlabel = gen_new_label();
8645 s->condjmp = 1;
8646 if (insn & (1 << 11))
cb63669a 8647 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 8648 else
cb63669a 8649 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
d9ba4830 8650 dead_tmp(tmp);
9ee6e8bb
PB
8651 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8652 val = (uint32_t)s->pc + 2;
8653 val += offset;
8654 gen_jmp(s, val);
8655 break;
8656
8657 case 15: /* IT, nop-hint. */
8658 if ((insn & 0xf) == 0) {
8659 gen_nop_hint(s, (insn >> 4) & 0xf);
8660 break;
8661 }
8662 /* If Then. */
8663 s->condexec_cond = (insn >> 4) & 0xe;
8664 s->condexec_mask = insn & 0x1f;
8665 /* No actual code generated for this insn, just setup state. */
8666 break;
8667
06c949e6 8668 case 0xe: /* bkpt */
9ee6e8bb 8669 gen_set_condexec(s);
5e3f878a 8670 gen_set_pc_im(s->pc - 2);
d9ba4830 8671 gen_exception(EXCP_BKPT);
06c949e6
PB
8672 s->is_jmp = DISAS_JUMP;
8673 break;
8674
9ee6e8bb
PB
8675 case 0xa: /* rev */
8676 ARCH(6);
8677 rn = (insn >> 3) & 0x7;
8678 rd = insn & 0x7;
b0109805 8679 tmp = load_reg(s, rn);
9ee6e8bb 8680 switch ((insn >> 6) & 3) {
66896cb8 8681 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
8682 case 1: gen_rev16(tmp); break;
8683 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
8684 default: goto illegal_op;
8685 }
b0109805 8686 store_reg(s, rd, tmp);
9ee6e8bb
PB
8687 break;
8688
8689 case 6: /* cps */
8690 ARCH(6);
8691 if (IS_USER(s))
8692 break;
8693 if (IS_M(env)) {
8984bd2e 8694 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9ee6e8bb 8695 /* PRIMASK */
8984bd2e
PB
8696 if (insn & 1) {
8697 addr = tcg_const_i32(16);
8698 gen_helper_v7m_msr(cpu_env, addr, tmp);
8699 }
9ee6e8bb 8700 /* FAULTMASK */
8984bd2e
PB
8701 if (insn & 2) {
8702 addr = tcg_const_i32(17);
8703 gen_helper_v7m_msr(cpu_env, addr, tmp);
8704 }
9ee6e8bb
PB
8705 gen_lookup_tb(s);
8706 } else {
8707 if (insn & (1 << 4))
8708 shift = CPSR_A | CPSR_I | CPSR_F;
8709 else
8710 shift = 0;
2fbac54b 8711 gen_set_psr_im(s, shift, 0, ((insn & 7) << 6) & shift);
9ee6e8bb
PB
8712 }
8713 break;
8714
99c475ab
FB
8715 default:
8716 goto undef;
8717 }
8718 break;
8719
8720 case 12:
8721 /* load/store multiple */
8722 rn = (insn >> 8) & 0x7;
b0109805 8723 addr = load_reg(s, rn);
99c475ab
FB
8724 for (i = 0; i < 8; i++) {
8725 if (insn & (1 << i)) {
99c475ab
FB
8726 if (insn & (1 << 11)) {
8727 /* load */
b0109805
PB
8728 tmp = gen_ld32(addr, IS_USER(s));
8729 store_reg(s, i, tmp);
99c475ab
FB
8730 } else {
8731 /* store */
b0109805
PB
8732 tmp = load_reg(s, i);
8733 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8734 }
5899f386 8735 /* advance to the next address */
b0109805 8736 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8737 }
8738 }
5899f386 8739 /* Base register writeback. */
b0109805
PB
8740 if ((insn & (1 << rn)) == 0) {
8741 store_reg(s, rn, addr);
8742 } else {
8743 dead_tmp(addr);
8744 }
99c475ab
FB
8745 break;
8746
8747 case 13:
8748 /* conditional branch or swi */
8749 cond = (insn >> 8) & 0xf;
8750 if (cond == 0xe)
8751 goto undef;
8752
8753 if (cond == 0xf) {
8754 /* swi */
9ee6e8bb 8755 gen_set_condexec(s);
422ebf69 8756 gen_set_pc_im(s->pc);
9ee6e8bb 8757 s->is_jmp = DISAS_SWI;
99c475ab
FB
8758 break;
8759 }
8760 /* generate a conditional jump to next instruction */
e50e6a20 8761 s->condlabel = gen_new_label();
d9ba4830 8762 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 8763 s->condjmp = 1;
99c475ab
FB
8764
8765 /* jump to the offset */
5899f386 8766 val = (uint32_t)s->pc + 2;
99c475ab 8767 offset = ((int32_t)insn << 24) >> 24;
5899f386 8768 val += offset << 1;
8aaca4c0 8769 gen_jmp(s, val);
99c475ab
FB
8770 break;
8771
8772 case 14:
358bf29e 8773 if (insn & (1 << 11)) {
9ee6e8bb
PB
8774 if (disas_thumb2_insn(env, s, insn))
8775 goto undef32;
358bf29e
PB
8776 break;
8777 }
9ee6e8bb 8778 /* unconditional branch */
99c475ab
FB
8779 val = (uint32_t)s->pc;
8780 offset = ((int32_t)insn << 21) >> 21;
8781 val += (offset << 1) + 2;
8aaca4c0 8782 gen_jmp(s, val);
99c475ab
FB
8783 break;
8784
8785 case 15:
9ee6e8bb 8786 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 8787 goto undef32;
9ee6e8bb 8788 break;
99c475ab
FB
8789 }
8790 return;
9ee6e8bb
PB
8791undef32:
8792 gen_set_condexec(s);
5e3f878a 8793 gen_set_pc_im(s->pc - 4);
d9ba4830 8794 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
8795 s->is_jmp = DISAS_JUMP;
8796 return;
8797illegal_op:
99c475ab 8798undef:
9ee6e8bb 8799 gen_set_condexec(s);
5e3f878a 8800 gen_set_pc_im(s->pc - 2);
d9ba4830 8801 gen_exception(EXCP_UDEF);
99c475ab
FB
8802 s->is_jmp = DISAS_JUMP;
8803}
8804
2c0262af
FB
8805/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8806 basic block 'tb'. If search_pc is TRUE, also generate PC
8807 information for each intermediate instruction. */
2cfc5f17
TS
8808static inline void gen_intermediate_code_internal(CPUState *env,
8809 TranslationBlock *tb,
8810 int search_pc)
2c0262af
FB
8811{
8812 DisasContext dc1, *dc = &dc1;
a1d1bb31 8813 CPUBreakpoint *bp;
2c0262af
FB
8814 uint16_t *gen_opc_end;
8815 int j, lj;
0fa85d43 8816 target_ulong pc_start;
b5ff1b31 8817 uint32_t next_page_start;
2e70f6ef
PB
8818 int num_insns;
8819 int max_insns;
3b46e624 8820
2c0262af 8821 /* generate intermediate code */
b26eefb6 8822 num_temps = 0;
b26eefb6 8823
0fa85d43 8824 pc_start = tb->pc;
3b46e624 8825
2c0262af
FB
8826 dc->tb = tb;
8827
2c0262af 8828 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
8829
8830 dc->is_jmp = DISAS_NEXT;
8831 dc->pc = pc_start;
8aaca4c0 8832 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 8833 dc->condjmp = 0;
5899f386 8834 dc->thumb = env->thumb;
9ee6e8bb
PB
8835 dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
8836 dc->condexec_cond = env->condexec_bits >> 4;
b5ff1b31 8837#if !defined(CONFIG_USER_ONLY)
9ee6e8bb
PB
8838 if (IS_M(env)) {
8839 dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
8840 } else {
8841 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
8842 }
b5ff1b31 8843#endif
a7812ae4
PB
8844 cpu_F0s = tcg_temp_new_i32();
8845 cpu_F1s = tcg_temp_new_i32();
8846 cpu_F0d = tcg_temp_new_i64();
8847 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
8848 cpu_V0 = cpu_F0d;
8849 cpu_V1 = cpu_F1d;
e677137d 8850 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 8851 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 8852 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 8853 lj = -1;
2e70f6ef
PB
8854 num_insns = 0;
8855 max_insns = tb->cflags & CF_COUNT_MASK;
8856 if (max_insns == 0)
8857 max_insns = CF_COUNT_MASK;
8858
8859 gen_icount_start();
9ee6e8bb
PB
8860 /* Reset the conditional execution bits immediately. This avoids
8861 complications trying to do it at the end of the block. */
8862 if (env->condexec_bits)
8f01245e
PB
8863 {
8864 TCGv tmp = new_tmp();
8865 tcg_gen_movi_i32(tmp, 0);
d9ba4830 8866 store_cpu_field(tmp, condexec_bits);
8f01245e 8867 }
2c0262af 8868 do {
fbb4a2e3
PB
8869#ifdef CONFIG_USER_ONLY
8870 /* Intercept jump to the magic kernel page. */
8871 if (dc->pc >= 0xffff0000) {
8872 /* We always get here via a jump, so know we are not in a
8873 conditional execution block. */
8874 gen_exception(EXCP_KERNEL_TRAP);
8875 dc->is_jmp = DISAS_UPDATE;
8876 break;
8877 }
8878#else
9ee6e8bb
PB
8879 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
8880 /* We always get here via a jump, so know we are not in a
8881 conditional execution block. */
d9ba4830 8882 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
8883 dc->is_jmp = DISAS_UPDATE;
8884 break;
9ee6e8bb
PB
8885 }
8886#endif
8887
72cf2d4f
BS
8888 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
8889 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 8890 if (bp->pc == dc->pc) {
9ee6e8bb 8891 gen_set_condexec(dc);
5e3f878a 8892 gen_set_pc_im(dc->pc);
d9ba4830 8893 gen_exception(EXCP_DEBUG);
1fddef4b 8894 dc->is_jmp = DISAS_JUMP;
9ee6e8bb
PB
8895 /* Advance PC so that clearing the breakpoint will
8896 invalidate this TB. */
8897 dc->pc += 2;
8898 goto done_generating;
1fddef4b
FB
8899 break;
8900 }
8901 }
8902 }
2c0262af
FB
8903 if (search_pc) {
8904 j = gen_opc_ptr - gen_opc_buf;
8905 if (lj < j) {
8906 lj++;
8907 while (lj < j)
8908 gen_opc_instr_start[lj++] = 0;
8909 }
0fa85d43 8910 gen_opc_pc[lj] = dc->pc;
2c0262af 8911 gen_opc_instr_start[lj] = 1;
2e70f6ef 8912 gen_opc_icount[lj] = num_insns;
2c0262af 8913 }
e50e6a20 8914
2e70f6ef
PB
8915 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
8916 gen_io_start();
8917
9ee6e8bb
PB
8918 if (env->thumb) {
8919 disas_thumb_insn(env, dc);
8920 if (dc->condexec_mask) {
8921 dc->condexec_cond = (dc->condexec_cond & 0xe)
8922 | ((dc->condexec_mask >> 4) & 1);
8923 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
8924 if (dc->condexec_mask == 0) {
8925 dc->condexec_cond = 0;
8926 }
8927 }
8928 } else {
8929 disas_arm_insn(env, dc);
8930 }
b26eefb6
PB
8931 if (num_temps) {
8932 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
8933 num_temps = 0;
8934 }
e50e6a20
FB
8935
8936 if (dc->condjmp && !dc->is_jmp) {
8937 gen_set_label(dc->condlabel);
8938 dc->condjmp = 0;
8939 }
aaf2d97d 8940 /* Translation stops when a conditional branch is encountered.
e50e6a20 8941 * Otherwise the subsequent code could get translated several times.
b5ff1b31 8942 * Also stop translation when a page boundary is reached. This
bf20dc07 8943 * ensures prefetch aborts occur at the right place. */
2e70f6ef 8944 num_insns ++;
1fddef4b
FB
8945 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
8946 !env->singlestep_enabled &&
1b530a6d 8947 !singlestep &&
2e70f6ef
PB
8948 dc->pc < next_page_start &&
8949 num_insns < max_insns);
8950
8951 if (tb->cflags & CF_LAST_IO) {
8952 if (dc->condjmp) {
8953 /* FIXME: This can theoretically happen with self-modifying
8954 code. */
8955 cpu_abort(env, "IO on conditional branch instruction");
8956 }
8957 gen_io_end();
8958 }
9ee6e8bb 8959
b5ff1b31 8960 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
8961 instruction was a conditional branch or trap, and the PC has
8962 already been written. */
551bd27f 8963 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 8964 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 8965 if (dc->condjmp) {
9ee6e8bb
PB
8966 gen_set_condexec(dc);
8967 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 8968 gen_exception(EXCP_SWI);
9ee6e8bb 8969 } else {
d9ba4830 8970 gen_exception(EXCP_DEBUG);
9ee6e8bb 8971 }
e50e6a20
FB
8972 gen_set_label(dc->condlabel);
8973 }
8974 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 8975 gen_set_pc_im(dc->pc);
e50e6a20 8976 dc->condjmp = 0;
8aaca4c0 8977 }
9ee6e8bb
PB
8978 gen_set_condexec(dc);
8979 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 8980 gen_exception(EXCP_SWI);
9ee6e8bb
PB
8981 } else {
8982 /* FIXME: Single stepping a WFI insn will not halt
8983 the CPU. */
d9ba4830 8984 gen_exception(EXCP_DEBUG);
9ee6e8bb 8985 }
8aaca4c0 8986 } else {
9ee6e8bb
PB
8987 /* While branches must always occur at the end of an IT block,
8988 there are a few other things that can cause us to terminate
8989 the TB in the middel of an IT block:
8990 - Exception generating instructions (bkpt, swi, undefined).
8991 - Page boundaries.
8992 - Hardware watchpoints.
8993 Hardware breakpoints have already been handled and skip this code.
8994 */
8995 gen_set_condexec(dc);
8aaca4c0 8996 switch(dc->is_jmp) {
8aaca4c0 8997 case DISAS_NEXT:
6e256c93 8998 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
8999 break;
9000 default:
9001 case DISAS_JUMP:
9002 case DISAS_UPDATE:
9003 /* indicate that the hash table must be used to find the next TB */
57fec1fe 9004 tcg_gen_exit_tb(0);
8aaca4c0
FB
9005 break;
9006 case DISAS_TB_JUMP:
9007 /* nothing more to generate */
9008 break;
9ee6e8bb 9009 case DISAS_WFI:
d9ba4830 9010 gen_helper_wfi();
9ee6e8bb
PB
9011 break;
9012 case DISAS_SWI:
d9ba4830 9013 gen_exception(EXCP_SWI);
9ee6e8bb 9014 break;
8aaca4c0 9015 }
e50e6a20
FB
9016 if (dc->condjmp) {
9017 gen_set_label(dc->condlabel);
9ee6e8bb 9018 gen_set_condexec(dc);
6e256c93 9019 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
9020 dc->condjmp = 0;
9021 }
2c0262af 9022 }
2e70f6ef 9023
9ee6e8bb 9024done_generating:
2e70f6ef 9025 gen_icount_end(tb, num_insns);
2c0262af
FB
9026 *gen_opc_ptr = INDEX_op_end;
9027
9028#ifdef DEBUG_DISAS
8fec2b8c 9029 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
9030 qemu_log("----------------\n");
9031 qemu_log("IN: %s\n", lookup_symbol(pc_start));
9032 log_target_disas(pc_start, dc->pc - pc_start, env->thumb);
9033 qemu_log("\n");
2c0262af
FB
9034 }
9035#endif
b5ff1b31
FB
9036 if (search_pc) {
9037 j = gen_opc_ptr - gen_opc_buf;
9038 lj++;
9039 while (lj <= j)
9040 gen_opc_instr_start[lj++] = 0;
b5ff1b31 9041 } else {
2c0262af 9042 tb->size = dc->pc - pc_start;
2e70f6ef 9043 tb->icount = num_insns;
b5ff1b31 9044 }
2c0262af
FB
9045}
9046
2cfc5f17 9047void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2c0262af 9048{
2cfc5f17 9049 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
9050}
9051
2cfc5f17 9052void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2c0262af 9053{
2cfc5f17 9054 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
9055}
9056
b5ff1b31
FB
9057static const char *cpu_mode_names[16] = {
9058 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9059 "???", "???", "???", "und", "???", "???", "???", "sys"
9060};
9ee6e8bb 9061
5fafdf24 9062void cpu_dump_state(CPUState *env, FILE *f,
7fe48483
FB
9063 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
9064 int flags)
2c0262af
FB
9065{
9066 int i;
06e80fc9 9067#if 0
bc380d17 9068 union {
b7bcbe95
FB
9069 uint32_t i;
9070 float s;
9071 } s0, s1;
9072 CPU_DoubleU d;
a94a6abf
PB
9073 /* ??? This assumes float64 and double have the same layout.
9074 Oh well, it's only debug dumps. */
9075 union {
9076 float64 f64;
9077 double d;
9078 } d0;
06e80fc9 9079#endif
b5ff1b31 9080 uint32_t psr;
2c0262af
FB
9081
9082 for(i=0;i<16;i++) {
7fe48483 9083 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 9084 if ((i % 4) == 3)
7fe48483 9085 cpu_fprintf(f, "\n");
2c0262af 9086 else
7fe48483 9087 cpu_fprintf(f, " ");
2c0262af 9088 }
b5ff1b31 9089 psr = cpsr_read(env);
687fa640
TS
9090 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9091 psr,
b5ff1b31
FB
9092 psr & (1 << 31) ? 'N' : '-',
9093 psr & (1 << 30) ? 'Z' : '-',
9094 psr & (1 << 29) ? 'C' : '-',
9095 psr & (1 << 28) ? 'V' : '-',
5fafdf24 9096 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 9097 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 9098
5e3f878a 9099#if 0
b7bcbe95 9100 for (i = 0; i < 16; i++) {
8e96005d
FB
9101 d.d = env->vfp.regs[i];
9102 s0.i = d.l.lower;
9103 s1.i = d.l.upper;
a94a6abf
PB
9104 d0.f64 = d.d;
9105 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 9106 i * 2, (int)s0.i, s0.s,
a94a6abf 9107 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 9108 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 9109 d0.d);
b7bcbe95 9110 }
40f137e1 9111 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
5e3f878a 9112#endif
2c0262af 9113}
a6b025d3 9114
d2856f1a
AJ
9115void gen_pc_load(CPUState *env, TranslationBlock *tb,
9116 unsigned long searched_pc, int pc_pos, void *puc)
9117{
9118 env->regs[15] = gen_opc_pc[pc_pos];
9119}