]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/translate.c
target-arm: convert disas_dsp_insn not use cpu_T
[mirror_qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
28#include "exec-all.h"
29#include "disas.h"
57fec1fe 30#include "tcg-op.h"
79383c9c 31#include "qemu-log.h"
1497c961 32
a7812ae4 33#include "helpers.h"
1497c961 34#define GEN_HELPER 1
b26eefb6 35#include "helpers.h"
2c0262af 36
9ee6e8bb
PB
37#define ENABLE_ARCH_5J 0
38#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 42
86753403 43#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 44
2c0262af
FB
45/* internal defines */
46typedef struct DisasContext {
0fa85d43 47 target_ulong pc;
2c0262af 48 int is_jmp;
e50e6a20
FB
49 /* Nonzero if this instruction has been conditionally skipped. */
50 int condjmp;
51 /* The label that will be jumped to when the instruction is skipped. */
52 int condlabel;
9ee6e8bb
PB
53 /* Thumb-2 condtional execution bits. */
54 int condexec_mask;
55 int condexec_cond;
2c0262af 56 struct TranslationBlock *tb;
8aaca4c0 57 int singlestep_enabled;
5899f386 58 int thumb;
b5ff1b31
FB
59#if !defined(CONFIG_USER_ONLY)
60 int user;
61#endif
2c0262af
FB
62} DisasContext;
63
b5ff1b31
FB
64#if defined(CONFIG_USER_ONLY)
65#define IS_USER(s) 1
66#else
67#define IS_USER(s) (s->user)
68#endif
69
9ee6e8bb
PB
70/* These instructions trap after executing, so defer them until after the
71 conditional executions state has been updated. */
72#define DISAS_WFI 4
73#define DISAS_SWI 5
2c0262af 74
a7812ae4 75static TCGv_ptr cpu_env;
ad69471c 76/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 77static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 78static TCGv_i32 cpu_R[16];
ad69471c 79
b26eefb6 80/* FIXME: These should be removed. */
8f8e3aa4 81static TCGv cpu_T[2];
a7812ae4
PB
82static TCGv cpu_F0s, cpu_F1s;
83static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 84
2e70f6ef
PB
85#define ICOUNT_TEMP cpu_T[0]
86#include "gen-icount.h"
87
155c3eac
FN
88static const char *regnames[] =
89 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
90 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
91
b26eefb6
PB
92/* initialize TCG globals. */
93void arm_translate_init(void)
94{
155c3eac
FN
95 int i;
96
a7812ae4
PB
97 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
98
99 cpu_T[0] = tcg_global_reg_new_i32(TCG_AREG1, "T0");
100 cpu_T[1] = tcg_global_reg_new_i32(TCG_AREG2, "T1");
b26eefb6 101
155c3eac
FN
102 for (i = 0; i < 16; i++) {
103 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
104 offsetof(CPUState, regs[i]),
105 regnames[i]);
106 }
107
a7812ae4
PB
108#define GEN_HELPER 2
109#include "helpers.h"
b26eefb6
PB
110}
111
b26eefb6 112static int num_temps;
b26eefb6
PB
113
114/* Allocate a temporary variable. */
a7812ae4 115static TCGv_i32 new_tmp(void)
b26eefb6 116{
12edd4f2
FN
117 num_temps++;
118 return tcg_temp_new_i32();
b26eefb6
PB
119}
120
121/* Release a temporary variable. */
122static void dead_tmp(TCGv tmp)
123{
12edd4f2 124 tcg_temp_free(tmp);
b26eefb6 125 num_temps--;
b26eefb6
PB
126}
127
d9ba4830
PB
128static inline TCGv load_cpu_offset(int offset)
129{
130 TCGv tmp = new_tmp();
131 tcg_gen_ld_i32(tmp, cpu_env, offset);
132 return tmp;
133}
134
135#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
136
137static inline void store_cpu_offset(TCGv var, int offset)
138{
139 tcg_gen_st_i32(var, cpu_env, offset);
140 dead_tmp(var);
141}
142
143#define store_cpu_field(var, name) \
144 store_cpu_offset(var, offsetof(CPUState, name))
145
b26eefb6
PB
146/* Set a variable to the value of a CPU register. */
147static void load_reg_var(DisasContext *s, TCGv var, int reg)
148{
149 if (reg == 15) {
150 uint32_t addr;
151 /* normaly, since we updated PC, we need only to add one insn */
152 if (s->thumb)
153 addr = (long)s->pc + 2;
154 else
155 addr = (long)s->pc + 4;
156 tcg_gen_movi_i32(var, addr);
157 } else {
155c3eac 158 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
159 }
160}
161
162/* Create a new temporary and set it to the value of a CPU register. */
163static inline TCGv load_reg(DisasContext *s, int reg)
164{
165 TCGv tmp = new_tmp();
166 load_reg_var(s, tmp, reg);
167 return tmp;
168}
169
170/* Set a CPU register. The source must be a temporary and will be
171 marked as dead. */
172static void store_reg(DisasContext *s, int reg, TCGv var)
173{
174 if (reg == 15) {
175 tcg_gen_andi_i32(var, var, ~1);
176 s->is_jmp = DISAS_JUMP;
177 }
155c3eac 178 tcg_gen_mov_i32(cpu_R[reg], var);
b26eefb6
PB
179 dead_tmp(var);
180}
181
182
183/* Basic operations. */
184#define gen_op_movl_T0_T1() tcg_gen_mov_i32(cpu_T[0], cpu_T[1])
b26eefb6
PB
185#define gen_op_movl_T0_im(im) tcg_gen_movi_i32(cpu_T[0], im)
186#define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im)
b26eefb6
PB
187
188#define gen_op_addl_T1_im(im) tcg_gen_addi_i32(cpu_T[1], cpu_T[1], im)
189#define gen_op_addl_T0_T1() tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1])
b26eefb6
PB
190
191#define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1])
b26eefb6 192#define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1])
b26eefb6 193#define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1])
b26eefb6 194
b26eefb6
PB
195#define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im)
196#define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im)
b26eefb6
PB
197
198/* Value extensions. */
86831435
PB
199#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
200#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
201#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
202#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
203
1497c961
PB
204#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
205#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 206
b26eefb6 207
d9ba4830
PB
208#define gen_set_cpsr(var, mask) gen_helper_cpsr_write(var, tcg_const_i32(mask))
209/* Set NZCV flags from the high 4 bits of var. */
210#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
211
212static void gen_exception(int excp)
213{
214 TCGv tmp = new_tmp();
215 tcg_gen_movi_i32(tmp, excp);
216 gen_helper_exception(tmp);
217 dead_tmp(tmp);
218}
219
3670669c
PB
220static void gen_smul_dual(TCGv a, TCGv b)
221{
222 TCGv tmp1 = new_tmp();
223 TCGv tmp2 = new_tmp();
22478e79
AZ
224 tcg_gen_ext16s_i32(tmp1, a);
225 tcg_gen_ext16s_i32(tmp2, b);
3670669c
PB
226 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
227 dead_tmp(tmp2);
228 tcg_gen_sari_i32(a, a, 16);
229 tcg_gen_sari_i32(b, b, 16);
230 tcg_gen_mul_i32(b, b, a);
231 tcg_gen_mov_i32(a, tmp1);
232 dead_tmp(tmp1);
233}
234
235/* Byteswap each halfword. */
236static void gen_rev16(TCGv var)
237{
238 TCGv tmp = new_tmp();
239 tcg_gen_shri_i32(tmp, var, 8);
240 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
241 tcg_gen_shli_i32(var, var, 8);
242 tcg_gen_andi_i32(var, var, 0xff00ff00);
243 tcg_gen_or_i32(var, var, tmp);
244 dead_tmp(tmp);
245}
246
247/* Byteswap low halfword and sign extend. */
248static void gen_revsh(TCGv var)
249{
250 TCGv tmp = new_tmp();
251 tcg_gen_shri_i32(tmp, var, 8);
252 tcg_gen_andi_i32(tmp, tmp, 0x00ff);
253 tcg_gen_shli_i32(var, var, 8);
254 tcg_gen_ext8s_i32(var, var);
255 tcg_gen_or_i32(var, var, tmp);
256 dead_tmp(tmp);
257}
258
259/* Unsigned bitfield extract. */
260static void gen_ubfx(TCGv var, int shift, uint32_t mask)
261{
262 if (shift)
263 tcg_gen_shri_i32(var, var, shift);
264 tcg_gen_andi_i32(var, var, mask);
265}
266
267/* Signed bitfield extract. */
268static void gen_sbfx(TCGv var, int shift, int width)
269{
270 uint32_t signbit;
271
272 if (shift)
273 tcg_gen_sari_i32(var, var, shift);
274 if (shift + width < 32) {
275 signbit = 1u << (width - 1);
276 tcg_gen_andi_i32(var, var, (1u << width) - 1);
277 tcg_gen_xori_i32(var, var, signbit);
278 tcg_gen_subi_i32(var, var, signbit);
279 }
280}
281
282/* Bitfield insertion. Insert val into base. Clobbers base and val. */
283static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
284{
3670669c 285 tcg_gen_andi_i32(val, val, mask);
8f8e3aa4
PB
286 tcg_gen_shli_i32(val, val, shift);
287 tcg_gen_andi_i32(base, base, ~(mask << shift));
3670669c
PB
288 tcg_gen_or_i32(dest, base, val);
289}
290
d9ba4830
PB
291/* Round the top 32 bits of a 64-bit value. */
292static void gen_roundqd(TCGv a, TCGv b)
3670669c 293{
d9ba4830
PB
294 tcg_gen_shri_i32(a, a, 31);
295 tcg_gen_add_i32(a, a, b);
3670669c
PB
296}
297
8f01245e
PB
298/* FIXME: Most targets have native widening multiplication.
299 It would be good to use that instead of a full wide multiply. */
5e3f878a 300/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 301static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 302{
a7812ae4
PB
303 TCGv_i64 tmp1 = tcg_temp_new_i64();
304 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
305
306 tcg_gen_extu_i32_i64(tmp1, a);
307 dead_tmp(a);
308 tcg_gen_extu_i32_i64(tmp2, b);
309 dead_tmp(b);
310 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
311 return tmp1;
312}
313
a7812ae4 314static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 315{
a7812ae4
PB
316 TCGv_i64 tmp1 = tcg_temp_new_i64();
317 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
318
319 tcg_gen_ext_i32_i64(tmp1, a);
320 dead_tmp(a);
321 tcg_gen_ext_i32_i64(tmp2, b);
322 dead_tmp(b);
323 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
324 return tmp1;
325}
326
8f01245e 327/* Unsigned 32x32->64 multiply. */
396e467c 328static void gen_mull(TCGv a, TCGv b)
8f01245e 329{
a7812ae4
PB
330 TCGv_i64 tmp1 = tcg_temp_new_i64();
331 TCGv_i64 tmp2 = tcg_temp_new_i64();
8f01245e 332
396e467c
FN
333 tcg_gen_extu_i32_i64(tmp1, a);
334 tcg_gen_extu_i32_i64(tmp2, b);
8f01245e 335 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
396e467c 336 tcg_gen_trunc_i64_i32(a, tmp1);
8f01245e 337 tcg_gen_shri_i64(tmp1, tmp1, 32);
396e467c 338 tcg_gen_trunc_i64_i32(b, tmp1);
8f01245e
PB
339}
340
341/* Signed 32x32->64 multiply. */
d9ba4830 342static void gen_imull(TCGv a, TCGv b)
8f01245e 343{
a7812ae4
PB
344 TCGv_i64 tmp1 = tcg_temp_new_i64();
345 TCGv_i64 tmp2 = tcg_temp_new_i64();
8f01245e 346
d9ba4830
PB
347 tcg_gen_ext_i32_i64(tmp1, a);
348 tcg_gen_ext_i32_i64(tmp2, b);
8f01245e 349 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
d9ba4830 350 tcg_gen_trunc_i64_i32(a, tmp1);
8f01245e 351 tcg_gen_shri_i64(tmp1, tmp1, 32);
d9ba4830
PB
352 tcg_gen_trunc_i64_i32(b, tmp1);
353}
d9ba4830 354
8f01245e
PB
355/* Swap low and high halfwords. */
356static void gen_swap_half(TCGv var)
357{
358 TCGv tmp = new_tmp();
359 tcg_gen_shri_i32(tmp, var, 16);
360 tcg_gen_shli_i32(var, var, 16);
361 tcg_gen_or_i32(var, var, tmp);
3670669c 362 dead_tmp(tmp);
8f01245e
PB
363}
364
b26eefb6
PB
365/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
366 tmp = (t0 ^ t1) & 0x8000;
367 t0 &= ~0x8000;
368 t1 &= ~0x8000;
369 t0 = (t0 + t1) ^ tmp;
370 */
371
372static void gen_add16(TCGv t0, TCGv t1)
373{
374 TCGv tmp = new_tmp();
375 tcg_gen_xor_i32(tmp, t0, t1);
376 tcg_gen_andi_i32(tmp, tmp, 0x8000);
377 tcg_gen_andi_i32(t0, t0, ~0x8000);
378 tcg_gen_andi_i32(t1, t1, ~0x8000);
379 tcg_gen_add_i32(t0, t0, t1);
380 tcg_gen_xor_i32(t0, t0, tmp);
381 dead_tmp(tmp);
382 dead_tmp(t1);
383}
384
9a119ff6
PB
385#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
386
b26eefb6
PB
387/* Set CF to the top bit of var. */
388static void gen_set_CF_bit31(TCGv var)
389{
390 TCGv tmp = new_tmp();
391 tcg_gen_shri_i32(tmp, var, 31);
4cc633c3 392 gen_set_CF(tmp);
b26eefb6
PB
393 dead_tmp(tmp);
394}
395
396/* Set N and Z flags from var. */
397static inline void gen_logic_CC(TCGv var)
398{
6fbe23d5
PB
399 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
400 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
b26eefb6
PB
401}
402
403/* T0 += T1 + CF. */
396e467c 404static void gen_adc(TCGv t0, TCGv t1)
b26eefb6 405{
d9ba4830 406 TCGv tmp;
396e467c 407 tcg_gen_add_i32(t0, t0, t1);
d9ba4830 408 tmp = load_cpu_field(CF);
396e467c 409 tcg_gen_add_i32(t0, t0, tmp);
b26eefb6
PB
410 dead_tmp(tmp);
411}
412
e9bb4aa9
JR
413/* dest = T0 + T1 + CF. */
414static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
415{
416 TCGv tmp;
417 tcg_gen_add_i32(dest, t0, t1);
418 tmp = load_cpu_field(CF);
419 tcg_gen_add_i32(dest, dest, tmp);
420 dead_tmp(tmp);
421}
422
3670669c
PB
423/* dest = T0 - T1 + CF - 1. */
424static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
425{
d9ba4830 426 TCGv tmp;
3670669c 427 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 428 tmp = load_cpu_field(CF);
3670669c
PB
429 tcg_gen_add_i32(dest, dest, tmp);
430 tcg_gen_subi_i32(dest, dest, 1);
431 dead_tmp(tmp);
432}
433
b26eefb6
PB
434/* T0 &= ~T1. Clobbers T1. */
435/* FIXME: Implement bic natively. */
8f8e3aa4
PB
436static inline void tcg_gen_bic_i32(TCGv dest, TCGv t0, TCGv t1)
437{
438 TCGv tmp = new_tmp();
439 tcg_gen_not_i32(tmp, t1);
440 tcg_gen_and_i32(dest, t0, tmp);
441 dead_tmp(tmp);
442}
b26eefb6
PB
443static inline void gen_op_bicl_T0_T1(void)
444{
445 gen_op_notl_T1();
446 gen_op_andl_T0_T1();
447}
448
ad69471c
PB
449/* FIXME: Implement this natively. */
450#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
451
b26eefb6
PB
452/* FIXME: Implement this natively. */
453static void tcg_gen_rori_i32(TCGv t0, TCGv t1, int i)
454{
455 TCGv tmp;
456
457 if (i == 0)
458 return;
459
460 tmp = new_tmp();
461 tcg_gen_shri_i32(tmp, t1, i);
462 tcg_gen_shli_i32(t1, t1, 32 - i);
463 tcg_gen_or_i32(t0, t1, tmp);
464 dead_tmp(tmp);
465}
466
9a119ff6 467static void shifter_out_im(TCGv var, int shift)
b26eefb6 468{
9a119ff6
PB
469 TCGv tmp = new_tmp();
470 if (shift == 0) {
471 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 472 } else {
9a119ff6 473 tcg_gen_shri_i32(tmp, var, shift);
4cc633c3 474 if (shift != 31)
9a119ff6
PB
475 tcg_gen_andi_i32(tmp, tmp, 1);
476 }
477 gen_set_CF(tmp);
478 dead_tmp(tmp);
479}
b26eefb6 480
9a119ff6
PB
481/* Shift by immediate. Includes special handling for shift == 0. */
482static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
483{
484 switch (shiftop) {
485 case 0: /* LSL */
486 if (shift != 0) {
487 if (flags)
488 shifter_out_im(var, 32 - shift);
489 tcg_gen_shli_i32(var, var, shift);
490 }
491 break;
492 case 1: /* LSR */
493 if (shift == 0) {
494 if (flags) {
495 tcg_gen_shri_i32(var, var, 31);
496 gen_set_CF(var);
497 }
498 tcg_gen_movi_i32(var, 0);
499 } else {
500 if (flags)
501 shifter_out_im(var, shift - 1);
502 tcg_gen_shri_i32(var, var, shift);
503 }
504 break;
505 case 2: /* ASR */
506 if (shift == 0)
507 shift = 32;
508 if (flags)
509 shifter_out_im(var, shift - 1);
510 if (shift == 32)
511 shift = 31;
512 tcg_gen_sari_i32(var, var, shift);
513 break;
514 case 3: /* ROR/RRX */
515 if (shift != 0) {
516 if (flags)
517 shifter_out_im(var, shift - 1);
518 tcg_gen_rori_i32(var, var, shift); break;
519 } else {
d9ba4830 520 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
521 if (flags)
522 shifter_out_im(var, 0);
523 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
524 tcg_gen_shli_i32(tmp, tmp, 31);
525 tcg_gen_or_i32(var, var, tmp);
526 dead_tmp(tmp);
b26eefb6
PB
527 }
528 }
529};
530
8984bd2e
PB
531static inline void gen_arm_shift_reg(TCGv var, int shiftop,
532 TCGv shift, int flags)
533{
534 if (flags) {
535 switch (shiftop) {
536 case 0: gen_helper_shl_cc(var, var, shift); break;
537 case 1: gen_helper_shr_cc(var, var, shift); break;
538 case 2: gen_helper_sar_cc(var, var, shift); break;
539 case 3: gen_helper_ror_cc(var, var, shift); break;
540 }
541 } else {
542 switch (shiftop) {
543 case 0: gen_helper_shl(var, var, shift); break;
544 case 1: gen_helper_shr(var, var, shift); break;
545 case 2: gen_helper_sar(var, var, shift); break;
546 case 3: gen_helper_ror(var, var, shift); break;
547 }
548 }
549 dead_tmp(shift);
550}
551
6ddbc6e4
PB
552#define PAS_OP(pfx) \
553 switch (op2) { \
554 case 0: gen_pas_helper(glue(pfx,add16)); break; \
555 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
556 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
557 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
558 case 4: gen_pas_helper(glue(pfx,add8)); break; \
559 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
560 }
d9ba4830 561static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 562{
a7812ae4 563 TCGv_ptr tmp;
6ddbc6e4
PB
564
565 switch (op1) {
566#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
567 case 1:
a7812ae4 568 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
569 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
570 PAS_OP(s)
571 break;
572 case 5:
a7812ae4 573 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
574 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
575 PAS_OP(u)
576 break;
577#undef gen_pas_helper
578#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
579 case 2:
580 PAS_OP(q);
581 break;
582 case 3:
583 PAS_OP(sh);
584 break;
585 case 6:
586 PAS_OP(uq);
587 break;
588 case 7:
589 PAS_OP(uh);
590 break;
591#undef gen_pas_helper
592 }
593}
9ee6e8bb
PB
594#undef PAS_OP
595
6ddbc6e4
PB
596/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
597#define PAS_OP(pfx) \
598 switch (op2) { \
599 case 0: gen_pas_helper(glue(pfx,add8)); break; \
600 case 1: gen_pas_helper(glue(pfx,add16)); break; \
601 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
602 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
603 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
604 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
605 }
d9ba4830 606static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 607{
a7812ae4 608 TCGv_ptr tmp;
6ddbc6e4
PB
609
610 switch (op1) {
611#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
612 case 0:
a7812ae4 613 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
614 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
615 PAS_OP(s)
616 break;
617 case 4:
a7812ae4 618 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
619 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
620 PAS_OP(u)
621 break;
622#undef gen_pas_helper
623#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
624 case 1:
625 PAS_OP(q);
626 break;
627 case 2:
628 PAS_OP(sh);
629 break;
630 case 5:
631 PAS_OP(uq);
632 break;
633 case 6:
634 PAS_OP(uh);
635 break;
636#undef gen_pas_helper
637 }
638}
9ee6e8bb
PB
639#undef PAS_OP
640
d9ba4830
PB
641static void gen_test_cc(int cc, int label)
642{
643 TCGv tmp;
644 TCGv tmp2;
d9ba4830
PB
645 int inv;
646
d9ba4830
PB
647 switch (cc) {
648 case 0: /* eq: Z */
6fbe23d5 649 tmp = load_cpu_field(ZF);
cb63669a 650 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
651 break;
652 case 1: /* ne: !Z */
6fbe23d5 653 tmp = load_cpu_field(ZF);
cb63669a 654 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
655 break;
656 case 2: /* cs: C */
657 tmp = load_cpu_field(CF);
cb63669a 658 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
659 break;
660 case 3: /* cc: !C */
661 tmp = load_cpu_field(CF);
cb63669a 662 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
663 break;
664 case 4: /* mi: N */
6fbe23d5 665 tmp = load_cpu_field(NF);
cb63669a 666 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
667 break;
668 case 5: /* pl: !N */
6fbe23d5 669 tmp = load_cpu_field(NF);
cb63669a 670 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
671 break;
672 case 6: /* vs: V */
673 tmp = load_cpu_field(VF);
cb63669a 674 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
675 break;
676 case 7: /* vc: !V */
677 tmp = load_cpu_field(VF);
cb63669a 678 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
679 break;
680 case 8: /* hi: C && !Z */
681 inv = gen_new_label();
682 tmp = load_cpu_field(CF);
cb63669a 683 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830 684 dead_tmp(tmp);
6fbe23d5 685 tmp = load_cpu_field(ZF);
cb63669a 686 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
687 gen_set_label(inv);
688 break;
689 case 9: /* ls: !C || Z */
690 tmp = load_cpu_field(CF);
cb63669a 691 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830 692 dead_tmp(tmp);
6fbe23d5 693 tmp = load_cpu_field(ZF);
cb63669a 694 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
695 break;
696 case 10: /* ge: N == V -> N ^ V == 0 */
697 tmp = load_cpu_field(VF);
6fbe23d5 698 tmp2 = load_cpu_field(NF);
d9ba4830
PB
699 tcg_gen_xor_i32(tmp, tmp, tmp2);
700 dead_tmp(tmp2);
cb63669a 701 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
702 break;
703 case 11: /* lt: N != V -> N ^ V != 0 */
704 tmp = load_cpu_field(VF);
6fbe23d5 705 tmp2 = load_cpu_field(NF);
d9ba4830
PB
706 tcg_gen_xor_i32(tmp, tmp, tmp2);
707 dead_tmp(tmp2);
cb63669a 708 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
709 break;
710 case 12: /* gt: !Z && N == V */
711 inv = gen_new_label();
6fbe23d5 712 tmp = load_cpu_field(ZF);
cb63669a 713 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830
PB
714 dead_tmp(tmp);
715 tmp = load_cpu_field(VF);
6fbe23d5 716 tmp2 = load_cpu_field(NF);
d9ba4830
PB
717 tcg_gen_xor_i32(tmp, tmp, tmp2);
718 dead_tmp(tmp2);
cb63669a 719 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
720 gen_set_label(inv);
721 break;
722 case 13: /* le: Z || N != V */
6fbe23d5 723 tmp = load_cpu_field(ZF);
cb63669a 724 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
725 dead_tmp(tmp);
726 tmp = load_cpu_field(VF);
6fbe23d5 727 tmp2 = load_cpu_field(NF);
d9ba4830
PB
728 tcg_gen_xor_i32(tmp, tmp, tmp2);
729 dead_tmp(tmp2);
cb63669a 730 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
731 break;
732 default:
733 fprintf(stderr, "Bad condition code 0x%x\n", cc);
734 abort();
735 }
736 dead_tmp(tmp);
737}
2c0262af 738
b1d8e52e 739static const uint8_t table_logic_cc[16] = {
2c0262af
FB
740 1, /* and */
741 1, /* xor */
742 0, /* sub */
743 0, /* rsb */
744 0, /* add */
745 0, /* adc */
746 0, /* sbc */
747 0, /* rsc */
748 1, /* andl */
749 1, /* xorl */
750 0, /* cmp */
751 0, /* cmn */
752 1, /* orr */
753 1, /* mov */
754 1, /* bic */
755 1, /* mvn */
756};
3b46e624 757
d9ba4830
PB
758/* Set PC and Thumb state from an immediate address. */
759static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 760{
b26eefb6 761 TCGv tmp;
99c475ab 762
b26eefb6 763 s->is_jmp = DISAS_UPDATE;
d9ba4830 764 if (s->thumb != (addr & 1)) {
155c3eac 765 tmp = new_tmp();
d9ba4830
PB
766 tcg_gen_movi_i32(tmp, addr & 1);
767 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
155c3eac 768 dead_tmp(tmp);
d9ba4830 769 }
155c3eac 770 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
771}
772
773/* Set PC and Thumb state from var. var is marked as dead. */
774static inline void gen_bx(DisasContext *s, TCGv var)
775{
d9ba4830 776 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
777 tcg_gen_andi_i32(cpu_R[15], var, ~1);
778 tcg_gen_andi_i32(var, var, 1);
779 store_cpu_field(var, thumb);
d9ba4830
PB
780}
781
21aeb343
JR
782/* Variant of store_reg which uses branch&exchange logic when storing
783 to r15 in ARM architecture v7 and above. The source must be a temporary
784 and will be marked as dead. */
785static inline void store_reg_bx(CPUState *env, DisasContext *s,
786 int reg, TCGv var)
787{
788 if (reg == 15 && ENABLE_ARCH_7) {
789 gen_bx(s, var);
790 } else {
791 store_reg(s, reg, var);
792 }
793}
794
b0109805
PB
795static inline TCGv gen_ld8s(TCGv addr, int index)
796{
797 TCGv tmp = new_tmp();
798 tcg_gen_qemu_ld8s(tmp, addr, index);
799 return tmp;
800}
801static inline TCGv gen_ld8u(TCGv addr, int index)
802{
803 TCGv tmp = new_tmp();
804 tcg_gen_qemu_ld8u(tmp, addr, index);
805 return tmp;
806}
807static inline TCGv gen_ld16s(TCGv addr, int index)
808{
809 TCGv tmp = new_tmp();
810 tcg_gen_qemu_ld16s(tmp, addr, index);
811 return tmp;
812}
813static inline TCGv gen_ld16u(TCGv addr, int index)
814{
815 TCGv tmp = new_tmp();
816 tcg_gen_qemu_ld16u(tmp, addr, index);
817 return tmp;
818}
819static inline TCGv gen_ld32(TCGv addr, int index)
820{
821 TCGv tmp = new_tmp();
822 tcg_gen_qemu_ld32u(tmp, addr, index);
823 return tmp;
824}
825static inline void gen_st8(TCGv val, TCGv addr, int index)
826{
827 tcg_gen_qemu_st8(val, addr, index);
828 dead_tmp(val);
829}
830static inline void gen_st16(TCGv val, TCGv addr, int index)
831{
832 tcg_gen_qemu_st16(val, addr, index);
833 dead_tmp(val);
834}
835static inline void gen_st32(TCGv val, TCGv addr, int index)
836{
837 tcg_gen_qemu_st32(val, addr, index);
838 dead_tmp(val);
839}
b5ff1b31 840
2c0262af
FB
841static inline void gen_movl_T0_reg(DisasContext *s, int reg)
842{
b26eefb6 843 load_reg_var(s, cpu_T[0], reg);
2c0262af
FB
844}
845
846static inline void gen_movl_T1_reg(DisasContext *s, int reg)
847{
b26eefb6 848 load_reg_var(s, cpu_T[1], reg);
2c0262af
FB
849}
850
5e3f878a
PB
851static inline void gen_set_pc_im(uint32_t val)
852{
155c3eac 853 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
854}
855
2c0262af
FB
856static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
857{
b26eefb6
PB
858 TCGv tmp;
859 if (reg == 15) {
860 tmp = new_tmp();
861 tcg_gen_andi_i32(tmp, cpu_T[t], ~1);
862 } else {
863 tmp = cpu_T[t];
864 }
155c3eac 865 tcg_gen_mov_i32(cpu_R[reg], tmp);
2c0262af 866 if (reg == 15) {
b26eefb6 867 dead_tmp(tmp);
2c0262af
FB
868 s->is_jmp = DISAS_JUMP;
869 }
870}
871
872static inline void gen_movl_reg_T0(DisasContext *s, int reg)
873{
874 gen_movl_reg_TN(s, reg, 0);
875}
876
877static inline void gen_movl_reg_T1(DisasContext *s, int reg)
878{
879 gen_movl_reg_TN(s, reg, 1);
880}
881
b5ff1b31
FB
882/* Force a TB lookup after an instruction that changes the CPU state. */
883static inline void gen_lookup_tb(DisasContext *s)
884{
a6445c52 885 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
886 s->is_jmp = DISAS_UPDATE;
887}
888
b0109805
PB
889static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
890 TCGv var)
2c0262af 891{
1e8d4eec 892 int val, rm, shift, shiftop;
b26eefb6 893 TCGv offset;
2c0262af
FB
894
895 if (!(insn & (1 << 25))) {
896 /* immediate */
897 val = insn & 0xfff;
898 if (!(insn & (1 << 23)))
899 val = -val;
537730b9 900 if (val != 0)
b0109805 901 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
902 } else {
903 /* shift/register */
904 rm = (insn) & 0xf;
905 shift = (insn >> 7) & 0x1f;
1e8d4eec 906 shiftop = (insn >> 5) & 3;
b26eefb6 907 offset = load_reg(s, rm);
9a119ff6 908 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 909 if (!(insn & (1 << 23)))
b0109805 910 tcg_gen_sub_i32(var, var, offset);
2c0262af 911 else
b0109805 912 tcg_gen_add_i32(var, var, offset);
b26eefb6 913 dead_tmp(offset);
2c0262af
FB
914 }
915}
916
191f9a93 917static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 918 int extra, TCGv var)
2c0262af
FB
919{
920 int val, rm;
b26eefb6 921 TCGv offset;
3b46e624 922
2c0262af
FB
923 if (insn & (1 << 22)) {
924 /* immediate */
925 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
926 if (!(insn & (1 << 23)))
927 val = -val;
18acad92 928 val += extra;
537730b9 929 if (val != 0)
b0109805 930 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
931 } else {
932 /* register */
191f9a93 933 if (extra)
b0109805 934 tcg_gen_addi_i32(var, var, extra);
2c0262af 935 rm = (insn) & 0xf;
b26eefb6 936 offset = load_reg(s, rm);
2c0262af 937 if (!(insn & (1 << 23)))
b0109805 938 tcg_gen_sub_i32(var, var, offset);
2c0262af 939 else
b0109805 940 tcg_gen_add_i32(var, var, offset);
b26eefb6 941 dead_tmp(offset);
2c0262af
FB
942 }
943}
944
4373f3ce
PB
945#define VFP_OP2(name) \
946static inline void gen_vfp_##name(int dp) \
947{ \
948 if (dp) \
949 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
950 else \
951 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
b7bcbe95
FB
952}
953
4373f3ce
PB
954VFP_OP2(add)
955VFP_OP2(sub)
956VFP_OP2(mul)
957VFP_OP2(div)
958
959#undef VFP_OP2
960
961static inline void gen_vfp_abs(int dp)
962{
963 if (dp)
964 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
965 else
966 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
967}
968
969static inline void gen_vfp_neg(int dp)
970{
971 if (dp)
972 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
973 else
974 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
975}
976
977static inline void gen_vfp_sqrt(int dp)
978{
979 if (dp)
980 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
981 else
982 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
983}
984
985static inline void gen_vfp_cmp(int dp)
986{
987 if (dp)
988 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
989 else
990 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
991}
992
993static inline void gen_vfp_cmpe(int dp)
994{
995 if (dp)
996 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
997 else
998 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
999}
1000
1001static inline void gen_vfp_F1_ld0(int dp)
1002{
1003 if (dp)
5b340b51 1004 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1005 else
5b340b51 1006 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1007}
1008
1009static inline void gen_vfp_uito(int dp)
1010{
1011 if (dp)
1012 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
1013 else
1014 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
1015}
1016
1017static inline void gen_vfp_sito(int dp)
1018{
1019 if (dp)
66230e0d 1020 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
4373f3ce 1021 else
66230e0d 1022 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
4373f3ce
PB
1023}
1024
1025static inline void gen_vfp_toui(int dp)
1026{
1027 if (dp)
1028 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
1029 else
1030 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
1031}
1032
1033static inline void gen_vfp_touiz(int dp)
1034{
1035 if (dp)
1036 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
1037 else
1038 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
1039}
1040
1041static inline void gen_vfp_tosi(int dp)
1042{
1043 if (dp)
1044 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
1045 else
1046 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
1047}
1048
1049static inline void gen_vfp_tosiz(int dp)
9ee6e8bb
PB
1050{
1051 if (dp)
4373f3ce 1052 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
9ee6e8bb 1053 else
4373f3ce
PB
1054 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
1055}
1056
1057#define VFP_GEN_FIX(name) \
1058static inline void gen_vfp_##name(int dp, int shift) \
1059{ \
1060 if (dp) \
1061 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tcg_const_i32(shift), cpu_env);\
1062 else \
1063 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tcg_const_i32(shift), cpu_env);\
9ee6e8bb 1064}
4373f3ce
PB
1065VFP_GEN_FIX(tosh)
1066VFP_GEN_FIX(tosl)
1067VFP_GEN_FIX(touh)
1068VFP_GEN_FIX(toul)
1069VFP_GEN_FIX(shto)
1070VFP_GEN_FIX(slto)
1071VFP_GEN_FIX(uhto)
1072VFP_GEN_FIX(ulto)
1073#undef VFP_GEN_FIX
9ee6e8bb 1074
b5ff1b31
FB
1075static inline void gen_vfp_ld(DisasContext *s, int dp)
1076{
1077 if (dp)
4373f3ce 1078 tcg_gen_qemu_ld64(cpu_F0d, cpu_T[1], IS_USER(s));
b5ff1b31 1079 else
4373f3ce 1080 tcg_gen_qemu_ld32u(cpu_F0s, cpu_T[1], IS_USER(s));
b5ff1b31
FB
1081}
1082
1083static inline void gen_vfp_st(DisasContext *s, int dp)
1084{
1085 if (dp)
4373f3ce 1086 tcg_gen_qemu_st64(cpu_F0d, cpu_T[1], IS_USER(s));
b5ff1b31 1087 else
4373f3ce 1088 tcg_gen_qemu_st32(cpu_F0s, cpu_T[1], IS_USER(s));
b5ff1b31
FB
1089}
1090
8e96005d
FB
1091static inline long
1092vfp_reg_offset (int dp, int reg)
1093{
1094 if (dp)
1095 return offsetof(CPUARMState, vfp.regs[reg]);
1096 else if (reg & 1) {
1097 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1098 + offsetof(CPU_DoubleU, l.upper);
1099 } else {
1100 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1101 + offsetof(CPU_DoubleU, l.lower);
1102 }
1103}
9ee6e8bb
PB
1104
1105/* Return the offset of a 32-bit piece of a NEON register.
1106 zero is the least significant end of the register. */
1107static inline long
1108neon_reg_offset (int reg, int n)
1109{
1110 int sreg;
1111 sreg = reg * 2 + n;
1112 return vfp_reg_offset(0, sreg);
1113}
1114
8f8e3aa4
PB
1115static TCGv neon_load_reg(int reg, int pass)
1116{
1117 TCGv tmp = new_tmp();
1118 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1119 return tmp;
1120}
1121
1122static void neon_store_reg(int reg, int pass, TCGv var)
1123{
1124 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1125 dead_tmp(var);
1126}
1127
a7812ae4 1128static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1129{
1130 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1131}
1132
a7812ae4 1133static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1134{
1135 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1136}
1137
4373f3ce
PB
1138#define tcg_gen_ld_f32 tcg_gen_ld_i32
1139#define tcg_gen_ld_f64 tcg_gen_ld_i64
1140#define tcg_gen_st_f32 tcg_gen_st_i32
1141#define tcg_gen_st_f64 tcg_gen_st_i64
1142
b7bcbe95
FB
1143static inline void gen_mov_F0_vreg(int dp, int reg)
1144{
1145 if (dp)
4373f3ce 1146 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1147 else
4373f3ce 1148 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1149}
1150
1151static inline void gen_mov_F1_vreg(int dp, int reg)
1152{
1153 if (dp)
4373f3ce 1154 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1155 else
4373f3ce 1156 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1157}
1158
1159static inline void gen_mov_vreg_F0(int dp, int reg)
1160{
1161 if (dp)
4373f3ce 1162 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1163 else
4373f3ce 1164 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1165}
1166
18c9b560
AZ
1167#define ARM_CP_RW_BIT (1 << 20)
1168
a7812ae4 1169static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d
PB
1170{
1171 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1172}
1173
a7812ae4 1174static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d
PB
1175{
1176 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1177}
1178
1179static inline void gen_op_iwmmxt_movl_wCx_T0(int reg)
1180{
1181 tcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1182}
1183
1184static inline void gen_op_iwmmxt_movl_T0_wCx(int reg)
1185{
1186 tcg_gen_ld_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1187}
1188
1189static inline void gen_op_iwmmxt_movl_T1_wCx(int reg)
1190{
1191 tcg_gen_ld_i32(cpu_T[1], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1192}
1193
1194static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1195{
1196 iwmmxt_store_reg(cpu_M0, rn);
1197}
1198
1199static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1200{
1201 iwmmxt_load_reg(cpu_M0, rn);
1202}
1203
1204static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1205{
1206 iwmmxt_load_reg(cpu_V1, rn);
1207 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1208}
1209
1210static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1211{
1212 iwmmxt_load_reg(cpu_V1, rn);
1213 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1214}
1215
1216static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1217{
1218 iwmmxt_load_reg(cpu_V1, rn);
1219 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1220}
1221
1222#define IWMMXT_OP(name) \
1223static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1224{ \
1225 iwmmxt_load_reg(cpu_V1, rn); \
1226 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1227}
1228
1229#define IWMMXT_OP_ENV(name) \
1230static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1231{ \
1232 iwmmxt_load_reg(cpu_V1, rn); \
1233 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1234}
1235
1236#define IWMMXT_OP_ENV_SIZE(name) \
1237IWMMXT_OP_ENV(name##b) \
1238IWMMXT_OP_ENV(name##w) \
1239IWMMXT_OP_ENV(name##l)
1240
1241#define IWMMXT_OP_ENV1(name) \
1242static inline void gen_op_iwmmxt_##name##_M0(void) \
1243{ \
1244 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1245}
1246
1247IWMMXT_OP(maddsq)
1248IWMMXT_OP(madduq)
1249IWMMXT_OP(sadb)
1250IWMMXT_OP(sadw)
1251IWMMXT_OP(mulslw)
1252IWMMXT_OP(mulshw)
1253IWMMXT_OP(mululw)
1254IWMMXT_OP(muluhw)
1255IWMMXT_OP(macsw)
1256IWMMXT_OP(macuw)
1257
1258IWMMXT_OP_ENV_SIZE(unpackl)
1259IWMMXT_OP_ENV_SIZE(unpackh)
1260
1261IWMMXT_OP_ENV1(unpacklub)
1262IWMMXT_OP_ENV1(unpackluw)
1263IWMMXT_OP_ENV1(unpacklul)
1264IWMMXT_OP_ENV1(unpackhub)
1265IWMMXT_OP_ENV1(unpackhuw)
1266IWMMXT_OP_ENV1(unpackhul)
1267IWMMXT_OP_ENV1(unpacklsb)
1268IWMMXT_OP_ENV1(unpacklsw)
1269IWMMXT_OP_ENV1(unpacklsl)
1270IWMMXT_OP_ENV1(unpackhsb)
1271IWMMXT_OP_ENV1(unpackhsw)
1272IWMMXT_OP_ENV1(unpackhsl)
1273
1274IWMMXT_OP_ENV_SIZE(cmpeq)
1275IWMMXT_OP_ENV_SIZE(cmpgtu)
1276IWMMXT_OP_ENV_SIZE(cmpgts)
1277
1278IWMMXT_OP_ENV_SIZE(mins)
1279IWMMXT_OP_ENV_SIZE(minu)
1280IWMMXT_OP_ENV_SIZE(maxs)
1281IWMMXT_OP_ENV_SIZE(maxu)
1282
1283IWMMXT_OP_ENV_SIZE(subn)
1284IWMMXT_OP_ENV_SIZE(addn)
1285IWMMXT_OP_ENV_SIZE(subu)
1286IWMMXT_OP_ENV_SIZE(addu)
1287IWMMXT_OP_ENV_SIZE(subs)
1288IWMMXT_OP_ENV_SIZE(adds)
1289
1290IWMMXT_OP_ENV(avgb0)
1291IWMMXT_OP_ENV(avgb1)
1292IWMMXT_OP_ENV(avgw0)
1293IWMMXT_OP_ENV(avgw1)
1294
1295IWMMXT_OP(msadb)
1296
1297IWMMXT_OP_ENV(packuw)
1298IWMMXT_OP_ENV(packul)
1299IWMMXT_OP_ENV(packuq)
1300IWMMXT_OP_ENV(packsw)
1301IWMMXT_OP_ENV(packsl)
1302IWMMXT_OP_ENV(packsq)
1303
e677137d
PB
1304static inline void gen_op_iwmmxt_align_M0_T0_wRn(int rn)
1305{
1306 iwmmxt_load_reg(cpu_V1, rn);
1307 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, cpu_T[0]);
1308}
1309
1310static inline void gen_op_iwmmxt_insr_M0_T0_T1(int shift)
1311{
1312 TCGv tmp = tcg_const_i32(shift);
1313 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1], tmp);
1314}
1315
1316static inline void gen_op_iwmmxt_extrsb_T0_M0(int shift)
1317{
1318 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1319 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1320 tcg_gen_ext8s_i32(cpu_T[0], cpu_T[0]);
1321}
1322
1323static inline void gen_op_iwmmxt_extrsw_T0_M0(int shift)
1324{
1325 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1326 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1327 tcg_gen_ext16s_i32(cpu_T[0], cpu_T[0]);
1328}
1329
1330static inline void gen_op_iwmmxt_extru_T0_M0(int shift, uint32_t mask)
1331{
1332 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1333 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1334 if (mask != ~0u)
1335 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
1336}
1337
1338static void gen_op_iwmmxt_set_mup(void)
1339{
1340 TCGv tmp;
1341 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1342 tcg_gen_ori_i32(tmp, tmp, 2);
1343 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1344}
1345
1346static void gen_op_iwmmxt_set_cup(void)
1347{
1348 TCGv tmp;
1349 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1350 tcg_gen_ori_i32(tmp, tmp, 1);
1351 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1352}
1353
1354static void gen_op_iwmmxt_setpsr_nz(void)
1355{
1356 TCGv tmp = new_tmp();
1357 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1358 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1359}
1360
1361static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1362{
1363 iwmmxt_load_reg(cpu_V1, rn);
86831435 1364 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1365 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1366}
1367
e677137d
PB
1368static void gen_iwmmxt_movl_T0_T1_wRn(int rn)
1369{
1370 iwmmxt_load_reg(cpu_V0, rn);
1371 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_V0);
1372 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1373 tcg_gen_trunc_i64_i32(cpu_T[1], cpu_V0);
1374}
1375
1376static void gen_iwmmxt_movl_wRn_T0_T1(int rn)
1377{
36aa55dc 1378 tcg_gen_concat_i32_i64(cpu_V0, cpu_T[0], cpu_T[1]);
e677137d
PB
1379 iwmmxt_store_reg(cpu_V0, rn);
1380}
1381
18c9b560
AZ
1382static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn)
1383{
1384 int rd;
1385 uint32_t offset;
1386
1387 rd = (insn >> 16) & 0xf;
1388 gen_movl_T1_reg(s, rd);
1389
1390 offset = (insn & 0xff) << ((insn >> 7) & 2);
1391 if (insn & (1 << 24)) {
1392 /* Pre indexed */
1393 if (insn & (1 << 23))
1394 gen_op_addl_T1_im(offset);
1395 else
1396 gen_op_addl_T1_im(-offset);
1397
1398 if (insn & (1 << 21))
1399 gen_movl_reg_T1(s, rd);
1400 } else if (insn & (1 << 21)) {
1401 /* Post indexed */
1402 if (insn & (1 << 23))
1403 gen_op_movl_T0_im(offset);
1404 else
1405 gen_op_movl_T0_im(- offset);
1406 gen_op_addl_T0_T1();
1407 gen_movl_reg_T0(s, rd);
1408 } else if (!(insn & (1 << 23)))
1409 return 1;
1410 return 0;
1411}
1412
1413static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask)
1414{
1415 int rd = (insn >> 0) & 0xf;
1416
1417 if (insn & (1 << 8))
1418 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3)
1419 return 1;
1420 else
1421 gen_op_iwmmxt_movl_T0_wCx(rd);
1422 else
e677137d 1423 gen_iwmmxt_movl_T0_T1_wRn(rd);
18c9b560
AZ
1424
1425 gen_op_movl_T1_im(mask);
1426 gen_op_andl_T0_T1();
1427 return 0;
1428}
1429
1430/* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1431 (ie. an undefined instruction). */
1432static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1433{
1434 int rd, wrd;
1435 int rdhi, rdlo, rd0, rd1, i;
b0109805 1436 TCGv tmp;
18c9b560
AZ
1437
1438 if ((insn & 0x0e000e00) == 0x0c000000) {
1439 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1440 wrd = insn & 0xf;
1441 rdlo = (insn >> 12) & 0xf;
1442 rdhi = (insn >> 16) & 0xf;
1443 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
e677137d 1444 gen_iwmmxt_movl_T0_T1_wRn(wrd);
18c9b560
AZ
1445 gen_movl_reg_T0(s, rdlo);
1446 gen_movl_reg_T1(s, rdhi);
1447 } else { /* TMCRR */
1448 gen_movl_T0_reg(s, rdlo);
1449 gen_movl_T1_reg(s, rdhi);
e677137d 1450 gen_iwmmxt_movl_wRn_T0_T1(wrd);
18c9b560
AZ
1451 gen_op_iwmmxt_set_mup();
1452 }
1453 return 0;
1454 }
1455
1456 wrd = (insn >> 12) & 0xf;
1457 if (gen_iwmmxt_address(s, insn))
1458 return 1;
1459 if (insn & ARM_CP_RW_BIT) {
1460 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
b0109805
PB
1461 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1462 tcg_gen_mov_i32(cpu_T[0], tmp);
1463 dead_tmp(tmp);
18c9b560
AZ
1464 gen_op_iwmmxt_movl_wCx_T0(wrd);
1465 } else {
e677137d
PB
1466 i = 1;
1467 if (insn & (1 << 8)) {
1468 if (insn & (1 << 22)) { /* WLDRD */
1469 tcg_gen_qemu_ld64(cpu_M0, cpu_T[1], IS_USER(s));
1470 i = 0;
1471 } else { /* WLDRW wRd */
1472 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1473 }
1474 } else {
1475 if (insn & (1 << 22)) { /* WLDRH */
1476 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
1477 } else { /* WLDRB */
1478 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
1479 }
1480 }
1481 if (i) {
1482 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1483 dead_tmp(tmp);
1484 }
18c9b560
AZ
1485 gen_op_iwmmxt_movq_wRn_M0(wrd);
1486 }
1487 } else {
1488 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1489 gen_op_iwmmxt_movl_T0_wCx(wrd);
b0109805
PB
1490 tmp = new_tmp();
1491 tcg_gen_mov_i32(tmp, cpu_T[0]);
1492 gen_st32(tmp, cpu_T[1], IS_USER(s));
18c9b560
AZ
1493 } else {
1494 gen_op_iwmmxt_movq_M0_wRn(wrd);
e677137d
PB
1495 tmp = new_tmp();
1496 if (insn & (1 << 8)) {
1497 if (insn & (1 << 22)) { /* WSTRD */
1498 dead_tmp(tmp);
1499 tcg_gen_qemu_st64(cpu_M0, cpu_T[1], IS_USER(s));
1500 } else { /* WSTRW wRd */
1501 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1502 gen_st32(tmp, cpu_T[1], IS_USER(s));
1503 }
1504 } else {
1505 if (insn & (1 << 22)) { /* WSTRH */
1506 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1507 gen_st16(tmp, cpu_T[1], IS_USER(s));
1508 } else { /* WSTRB */
1509 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1510 gen_st8(tmp, cpu_T[1], IS_USER(s));
1511 }
1512 }
18c9b560
AZ
1513 }
1514 }
1515 return 0;
1516 }
1517
1518 if ((insn & 0x0f000000) != 0x0e000000)
1519 return 1;
1520
1521 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1522 case 0x000: /* WOR */
1523 wrd = (insn >> 12) & 0xf;
1524 rd0 = (insn >> 0) & 0xf;
1525 rd1 = (insn >> 16) & 0xf;
1526 gen_op_iwmmxt_movq_M0_wRn(rd0);
1527 gen_op_iwmmxt_orq_M0_wRn(rd1);
1528 gen_op_iwmmxt_setpsr_nz();
1529 gen_op_iwmmxt_movq_wRn_M0(wrd);
1530 gen_op_iwmmxt_set_mup();
1531 gen_op_iwmmxt_set_cup();
1532 break;
1533 case 0x011: /* TMCR */
1534 if (insn & 0xf)
1535 return 1;
1536 rd = (insn >> 12) & 0xf;
1537 wrd = (insn >> 16) & 0xf;
1538 switch (wrd) {
1539 case ARM_IWMMXT_wCID:
1540 case ARM_IWMMXT_wCASF:
1541 break;
1542 case ARM_IWMMXT_wCon:
1543 gen_op_iwmmxt_set_cup();
1544 /* Fall through. */
1545 case ARM_IWMMXT_wCSSF:
1546 gen_op_iwmmxt_movl_T0_wCx(wrd);
1547 gen_movl_T1_reg(s, rd);
1548 gen_op_bicl_T0_T1();
1549 gen_op_iwmmxt_movl_wCx_T0(wrd);
1550 break;
1551 case ARM_IWMMXT_wCGR0:
1552 case ARM_IWMMXT_wCGR1:
1553 case ARM_IWMMXT_wCGR2:
1554 case ARM_IWMMXT_wCGR3:
1555 gen_op_iwmmxt_set_cup();
1556 gen_movl_reg_T0(s, rd);
1557 gen_op_iwmmxt_movl_wCx_T0(wrd);
1558 break;
1559 default:
1560 return 1;
1561 }
1562 break;
1563 case 0x100: /* WXOR */
1564 wrd = (insn >> 12) & 0xf;
1565 rd0 = (insn >> 0) & 0xf;
1566 rd1 = (insn >> 16) & 0xf;
1567 gen_op_iwmmxt_movq_M0_wRn(rd0);
1568 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1569 gen_op_iwmmxt_setpsr_nz();
1570 gen_op_iwmmxt_movq_wRn_M0(wrd);
1571 gen_op_iwmmxt_set_mup();
1572 gen_op_iwmmxt_set_cup();
1573 break;
1574 case 0x111: /* TMRC */
1575 if (insn & 0xf)
1576 return 1;
1577 rd = (insn >> 12) & 0xf;
1578 wrd = (insn >> 16) & 0xf;
1579 gen_op_iwmmxt_movl_T0_wCx(wrd);
1580 gen_movl_reg_T0(s, rd);
1581 break;
1582 case 0x300: /* WANDN */
1583 wrd = (insn >> 12) & 0xf;
1584 rd0 = (insn >> 0) & 0xf;
1585 rd1 = (insn >> 16) & 0xf;
1586 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1587 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1588 gen_op_iwmmxt_andq_M0_wRn(rd1);
1589 gen_op_iwmmxt_setpsr_nz();
1590 gen_op_iwmmxt_movq_wRn_M0(wrd);
1591 gen_op_iwmmxt_set_mup();
1592 gen_op_iwmmxt_set_cup();
1593 break;
1594 case 0x200: /* WAND */
1595 wrd = (insn >> 12) & 0xf;
1596 rd0 = (insn >> 0) & 0xf;
1597 rd1 = (insn >> 16) & 0xf;
1598 gen_op_iwmmxt_movq_M0_wRn(rd0);
1599 gen_op_iwmmxt_andq_M0_wRn(rd1);
1600 gen_op_iwmmxt_setpsr_nz();
1601 gen_op_iwmmxt_movq_wRn_M0(wrd);
1602 gen_op_iwmmxt_set_mup();
1603 gen_op_iwmmxt_set_cup();
1604 break;
1605 case 0x810: case 0xa10: /* WMADD */
1606 wrd = (insn >> 12) & 0xf;
1607 rd0 = (insn >> 0) & 0xf;
1608 rd1 = (insn >> 16) & 0xf;
1609 gen_op_iwmmxt_movq_M0_wRn(rd0);
1610 if (insn & (1 << 21))
1611 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1612 else
1613 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1614 gen_op_iwmmxt_movq_wRn_M0(wrd);
1615 gen_op_iwmmxt_set_mup();
1616 break;
1617 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1618 wrd = (insn >> 12) & 0xf;
1619 rd0 = (insn >> 16) & 0xf;
1620 rd1 = (insn >> 0) & 0xf;
1621 gen_op_iwmmxt_movq_M0_wRn(rd0);
1622 switch ((insn >> 22) & 3) {
1623 case 0:
1624 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1625 break;
1626 case 1:
1627 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1628 break;
1629 case 2:
1630 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1631 break;
1632 case 3:
1633 return 1;
1634 }
1635 gen_op_iwmmxt_movq_wRn_M0(wrd);
1636 gen_op_iwmmxt_set_mup();
1637 gen_op_iwmmxt_set_cup();
1638 break;
1639 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1640 wrd = (insn >> 12) & 0xf;
1641 rd0 = (insn >> 16) & 0xf;
1642 rd1 = (insn >> 0) & 0xf;
1643 gen_op_iwmmxt_movq_M0_wRn(rd0);
1644 switch ((insn >> 22) & 3) {
1645 case 0:
1646 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1647 break;
1648 case 1:
1649 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1650 break;
1651 case 2:
1652 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1653 break;
1654 case 3:
1655 return 1;
1656 }
1657 gen_op_iwmmxt_movq_wRn_M0(wrd);
1658 gen_op_iwmmxt_set_mup();
1659 gen_op_iwmmxt_set_cup();
1660 break;
1661 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1662 wrd = (insn >> 12) & 0xf;
1663 rd0 = (insn >> 16) & 0xf;
1664 rd1 = (insn >> 0) & 0xf;
1665 gen_op_iwmmxt_movq_M0_wRn(rd0);
1666 if (insn & (1 << 22))
1667 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1668 else
1669 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1670 if (!(insn & (1 << 20)))
1671 gen_op_iwmmxt_addl_M0_wRn(wrd);
1672 gen_op_iwmmxt_movq_wRn_M0(wrd);
1673 gen_op_iwmmxt_set_mup();
1674 break;
1675 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1676 wrd = (insn >> 12) & 0xf;
1677 rd0 = (insn >> 16) & 0xf;
1678 rd1 = (insn >> 0) & 0xf;
1679 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1680 if (insn & (1 << 21)) {
1681 if (insn & (1 << 20))
1682 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1683 else
1684 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1685 } else {
1686 if (insn & (1 << 20))
1687 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1688 else
1689 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1690 }
18c9b560
AZ
1691 gen_op_iwmmxt_movq_wRn_M0(wrd);
1692 gen_op_iwmmxt_set_mup();
1693 break;
1694 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1695 wrd = (insn >> 12) & 0xf;
1696 rd0 = (insn >> 16) & 0xf;
1697 rd1 = (insn >> 0) & 0xf;
1698 gen_op_iwmmxt_movq_M0_wRn(rd0);
1699 if (insn & (1 << 21))
1700 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1701 else
1702 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1703 if (!(insn & (1 << 20))) {
e677137d
PB
1704 iwmmxt_load_reg(cpu_V1, wrd);
1705 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1706 }
1707 gen_op_iwmmxt_movq_wRn_M0(wrd);
1708 gen_op_iwmmxt_set_mup();
1709 break;
1710 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1711 wrd = (insn >> 12) & 0xf;
1712 rd0 = (insn >> 16) & 0xf;
1713 rd1 = (insn >> 0) & 0xf;
1714 gen_op_iwmmxt_movq_M0_wRn(rd0);
1715 switch ((insn >> 22) & 3) {
1716 case 0:
1717 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1718 break;
1719 case 1:
1720 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1721 break;
1722 case 2:
1723 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1724 break;
1725 case 3:
1726 return 1;
1727 }
1728 gen_op_iwmmxt_movq_wRn_M0(wrd);
1729 gen_op_iwmmxt_set_mup();
1730 gen_op_iwmmxt_set_cup();
1731 break;
1732 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1733 wrd = (insn >> 12) & 0xf;
1734 rd0 = (insn >> 16) & 0xf;
1735 rd1 = (insn >> 0) & 0xf;
1736 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1737 if (insn & (1 << 22)) {
1738 if (insn & (1 << 20))
1739 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1740 else
1741 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1742 } else {
1743 if (insn & (1 << 20))
1744 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1745 else
1746 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1747 }
18c9b560
AZ
1748 gen_op_iwmmxt_movq_wRn_M0(wrd);
1749 gen_op_iwmmxt_set_mup();
1750 gen_op_iwmmxt_set_cup();
1751 break;
1752 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1753 wrd = (insn >> 12) & 0xf;
1754 rd0 = (insn >> 16) & 0xf;
1755 rd1 = (insn >> 0) & 0xf;
1756 gen_op_iwmmxt_movq_M0_wRn(rd0);
1757 gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1758 gen_op_movl_T1_im(7);
1759 gen_op_andl_T0_T1();
1760 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
1761 gen_op_iwmmxt_movq_wRn_M0(wrd);
1762 gen_op_iwmmxt_set_mup();
1763 break;
1764 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1765 rd = (insn >> 12) & 0xf;
1766 wrd = (insn >> 16) & 0xf;
1767 gen_movl_T0_reg(s, rd);
1768 gen_op_iwmmxt_movq_M0_wRn(wrd);
1769 switch ((insn >> 6) & 3) {
1770 case 0:
1771 gen_op_movl_T1_im(0xff);
1772 gen_op_iwmmxt_insr_M0_T0_T1((insn & 7) << 3);
1773 break;
1774 case 1:
1775 gen_op_movl_T1_im(0xffff);
1776 gen_op_iwmmxt_insr_M0_T0_T1((insn & 3) << 4);
1777 break;
1778 case 2:
1779 gen_op_movl_T1_im(0xffffffff);
1780 gen_op_iwmmxt_insr_M0_T0_T1((insn & 1) << 5);
1781 break;
1782 case 3:
1783 return 1;
1784 }
1785 gen_op_iwmmxt_movq_wRn_M0(wrd);
1786 gen_op_iwmmxt_set_mup();
1787 break;
1788 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1789 rd = (insn >> 12) & 0xf;
1790 wrd = (insn >> 16) & 0xf;
1791 if (rd == 15)
1792 return 1;
1793 gen_op_iwmmxt_movq_M0_wRn(wrd);
1794 switch ((insn >> 22) & 3) {
1795 case 0:
1796 if (insn & 8)
1797 gen_op_iwmmxt_extrsb_T0_M0((insn & 7) << 3);
1798 else {
e677137d 1799 gen_op_iwmmxt_extru_T0_M0((insn & 7) << 3, 0xff);
18c9b560
AZ
1800 }
1801 break;
1802 case 1:
1803 if (insn & 8)
1804 gen_op_iwmmxt_extrsw_T0_M0((insn & 3) << 4);
1805 else {
e677137d 1806 gen_op_iwmmxt_extru_T0_M0((insn & 3) << 4, 0xffff);
18c9b560
AZ
1807 }
1808 break;
1809 case 2:
e677137d 1810 gen_op_iwmmxt_extru_T0_M0((insn & 1) << 5, ~0u);
18c9b560
AZ
1811 break;
1812 case 3:
1813 return 1;
1814 }
b26eefb6 1815 gen_movl_reg_T0(s, rd);
18c9b560
AZ
1816 break;
1817 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1818 if ((insn & 0x000ff008) != 0x0003f000)
1819 return 1;
1820 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1821 switch ((insn >> 22) & 3) {
1822 case 0:
1823 gen_op_shrl_T1_im(((insn & 7) << 2) + 0);
1824 break;
1825 case 1:
1826 gen_op_shrl_T1_im(((insn & 3) << 3) + 4);
1827 break;
1828 case 2:
1829 gen_op_shrl_T1_im(((insn & 1) << 4) + 12);
1830 break;
1831 case 3:
1832 return 1;
1833 }
1834 gen_op_shll_T1_im(28);
d9ba4830 1835 gen_set_nzcv(cpu_T[1]);
18c9b560
AZ
1836 break;
1837 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1838 rd = (insn >> 12) & 0xf;
1839 wrd = (insn >> 16) & 0xf;
1840 gen_movl_T0_reg(s, rd);
1841 switch ((insn >> 6) & 3) {
1842 case 0:
e677137d 1843 gen_helper_iwmmxt_bcstb(cpu_M0, cpu_T[0]);
18c9b560
AZ
1844 break;
1845 case 1:
e677137d 1846 gen_helper_iwmmxt_bcstw(cpu_M0, cpu_T[0]);
18c9b560
AZ
1847 break;
1848 case 2:
e677137d 1849 gen_helper_iwmmxt_bcstl(cpu_M0, cpu_T[0]);
18c9b560
AZ
1850 break;
1851 case 3:
1852 return 1;
1853 }
1854 gen_op_iwmmxt_movq_wRn_M0(wrd);
1855 gen_op_iwmmxt_set_mup();
1856 break;
1857 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1858 if ((insn & 0x000ff00f) != 0x0003f000)
1859 return 1;
1860 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
15bb4eac 1861 gen_op_movl_T0_T1();
18c9b560
AZ
1862 switch ((insn >> 22) & 3) {
1863 case 0:
1864 for (i = 0; i < 7; i ++) {
1865 gen_op_shll_T1_im(4);
1866 gen_op_andl_T0_T1();
1867 }
1868 break;
1869 case 1:
1870 for (i = 0; i < 3; i ++) {
1871 gen_op_shll_T1_im(8);
1872 gen_op_andl_T0_T1();
1873 }
1874 break;
1875 case 2:
1876 gen_op_shll_T1_im(16);
1877 gen_op_andl_T0_T1();
1878 break;
1879 case 3:
1880 return 1;
1881 }
d9ba4830 1882 gen_set_nzcv(cpu_T[0]);
18c9b560
AZ
1883 break;
1884 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1885 wrd = (insn >> 12) & 0xf;
1886 rd0 = (insn >> 16) & 0xf;
1887 gen_op_iwmmxt_movq_M0_wRn(rd0);
1888 switch ((insn >> 22) & 3) {
1889 case 0:
e677137d 1890 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1891 break;
1892 case 1:
e677137d 1893 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1894 break;
1895 case 2:
e677137d 1896 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1897 break;
1898 case 3:
1899 return 1;
1900 }
1901 gen_op_iwmmxt_movq_wRn_M0(wrd);
1902 gen_op_iwmmxt_set_mup();
1903 break;
1904 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1905 if ((insn & 0x000ff00f) != 0x0003f000)
1906 return 1;
1907 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
15bb4eac 1908 gen_op_movl_T0_T1();
18c9b560
AZ
1909 switch ((insn >> 22) & 3) {
1910 case 0:
1911 for (i = 0; i < 7; i ++) {
1912 gen_op_shll_T1_im(4);
1913 gen_op_orl_T0_T1();
1914 }
1915 break;
1916 case 1:
1917 for (i = 0; i < 3; i ++) {
1918 gen_op_shll_T1_im(8);
1919 gen_op_orl_T0_T1();
1920 }
1921 break;
1922 case 2:
1923 gen_op_shll_T1_im(16);
1924 gen_op_orl_T0_T1();
1925 break;
1926 case 3:
1927 return 1;
1928 }
d9ba4830 1929 gen_set_nzcv(cpu_T[0]);
18c9b560
AZ
1930 break;
1931 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1932 rd = (insn >> 12) & 0xf;
1933 rd0 = (insn >> 16) & 0xf;
1934 if ((insn & 0xf) != 0)
1935 return 1;
1936 gen_op_iwmmxt_movq_M0_wRn(rd0);
1937 switch ((insn >> 22) & 3) {
1938 case 0:
e677137d 1939 gen_helper_iwmmxt_msbb(cpu_T[0], cpu_M0);
18c9b560
AZ
1940 break;
1941 case 1:
e677137d 1942 gen_helper_iwmmxt_msbw(cpu_T[0], cpu_M0);
18c9b560
AZ
1943 break;
1944 case 2:
e677137d 1945 gen_helper_iwmmxt_msbl(cpu_T[0], cpu_M0);
18c9b560
AZ
1946 break;
1947 case 3:
1948 return 1;
1949 }
1950 gen_movl_reg_T0(s, rd);
1951 break;
1952 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1953 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1954 wrd = (insn >> 12) & 0xf;
1955 rd0 = (insn >> 16) & 0xf;
1956 rd1 = (insn >> 0) & 0xf;
1957 gen_op_iwmmxt_movq_M0_wRn(rd0);
1958 switch ((insn >> 22) & 3) {
1959 case 0:
1960 if (insn & (1 << 21))
1961 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1962 else
1963 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1964 break;
1965 case 1:
1966 if (insn & (1 << 21))
1967 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1968 else
1969 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1970 break;
1971 case 2:
1972 if (insn & (1 << 21))
1973 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1974 else
1975 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1976 break;
1977 case 3:
1978 return 1;
1979 }
1980 gen_op_iwmmxt_movq_wRn_M0(wrd);
1981 gen_op_iwmmxt_set_mup();
1982 gen_op_iwmmxt_set_cup();
1983 break;
1984 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1985 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1986 wrd = (insn >> 12) & 0xf;
1987 rd0 = (insn >> 16) & 0xf;
1988 gen_op_iwmmxt_movq_M0_wRn(rd0);
1989 switch ((insn >> 22) & 3) {
1990 case 0:
1991 if (insn & (1 << 21))
1992 gen_op_iwmmxt_unpacklsb_M0();
1993 else
1994 gen_op_iwmmxt_unpacklub_M0();
1995 break;
1996 case 1:
1997 if (insn & (1 << 21))
1998 gen_op_iwmmxt_unpacklsw_M0();
1999 else
2000 gen_op_iwmmxt_unpackluw_M0();
2001 break;
2002 case 2:
2003 if (insn & (1 << 21))
2004 gen_op_iwmmxt_unpacklsl_M0();
2005 else
2006 gen_op_iwmmxt_unpacklul_M0();
2007 break;
2008 case 3:
2009 return 1;
2010 }
2011 gen_op_iwmmxt_movq_wRn_M0(wrd);
2012 gen_op_iwmmxt_set_mup();
2013 gen_op_iwmmxt_set_cup();
2014 break;
2015 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2016 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2017 wrd = (insn >> 12) & 0xf;
2018 rd0 = (insn >> 16) & 0xf;
2019 gen_op_iwmmxt_movq_M0_wRn(rd0);
2020 switch ((insn >> 22) & 3) {
2021 case 0:
2022 if (insn & (1 << 21))
2023 gen_op_iwmmxt_unpackhsb_M0();
2024 else
2025 gen_op_iwmmxt_unpackhub_M0();
2026 break;
2027 case 1:
2028 if (insn & (1 << 21))
2029 gen_op_iwmmxt_unpackhsw_M0();
2030 else
2031 gen_op_iwmmxt_unpackhuw_M0();
2032 break;
2033 case 2:
2034 if (insn & (1 << 21))
2035 gen_op_iwmmxt_unpackhsl_M0();
2036 else
2037 gen_op_iwmmxt_unpackhul_M0();
2038 break;
2039 case 3:
2040 return 1;
2041 }
2042 gen_op_iwmmxt_movq_wRn_M0(wrd);
2043 gen_op_iwmmxt_set_mup();
2044 gen_op_iwmmxt_set_cup();
2045 break;
2046 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2047 case 0x214: case 0x614: case 0xa14: case 0xe14:
2048 wrd = (insn >> 12) & 0xf;
2049 rd0 = (insn >> 16) & 0xf;
2050 gen_op_iwmmxt_movq_M0_wRn(rd0);
2051 if (gen_iwmmxt_shift(insn, 0xff))
2052 return 1;
2053 switch ((insn >> 22) & 3) {
2054 case 0:
2055 return 1;
2056 case 1:
e677137d 2057 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2058 break;
2059 case 2:
e677137d 2060 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2061 break;
2062 case 3:
e677137d 2063 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2064 break;
2065 }
2066 gen_op_iwmmxt_movq_wRn_M0(wrd);
2067 gen_op_iwmmxt_set_mup();
2068 gen_op_iwmmxt_set_cup();
2069 break;
2070 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2071 case 0x014: case 0x414: case 0x814: case 0xc14:
2072 wrd = (insn >> 12) & 0xf;
2073 rd0 = (insn >> 16) & 0xf;
2074 gen_op_iwmmxt_movq_M0_wRn(rd0);
2075 if (gen_iwmmxt_shift(insn, 0xff))
2076 return 1;
2077 switch ((insn >> 22) & 3) {
2078 case 0:
2079 return 1;
2080 case 1:
e677137d 2081 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2082 break;
2083 case 2:
e677137d 2084 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2085 break;
2086 case 3:
e677137d 2087 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2088 break;
2089 }
2090 gen_op_iwmmxt_movq_wRn_M0(wrd);
2091 gen_op_iwmmxt_set_mup();
2092 gen_op_iwmmxt_set_cup();
2093 break;
2094 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2095 case 0x114: case 0x514: case 0x914: case 0xd14:
2096 wrd = (insn >> 12) & 0xf;
2097 rd0 = (insn >> 16) & 0xf;
2098 gen_op_iwmmxt_movq_M0_wRn(rd0);
2099 if (gen_iwmmxt_shift(insn, 0xff))
2100 return 1;
2101 switch ((insn >> 22) & 3) {
2102 case 0:
2103 return 1;
2104 case 1:
e677137d 2105 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2106 break;
2107 case 2:
e677137d 2108 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2109 break;
2110 case 3:
e677137d 2111 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2112 break;
2113 }
2114 gen_op_iwmmxt_movq_wRn_M0(wrd);
2115 gen_op_iwmmxt_set_mup();
2116 gen_op_iwmmxt_set_cup();
2117 break;
2118 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2119 case 0x314: case 0x714: case 0xb14: case 0xf14:
2120 wrd = (insn >> 12) & 0xf;
2121 rd0 = (insn >> 16) & 0xf;
2122 gen_op_iwmmxt_movq_M0_wRn(rd0);
2123 switch ((insn >> 22) & 3) {
2124 case 0:
2125 return 1;
2126 case 1:
2127 if (gen_iwmmxt_shift(insn, 0xf))
2128 return 1;
e677137d 2129 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2130 break;
2131 case 2:
2132 if (gen_iwmmxt_shift(insn, 0x1f))
2133 return 1;
e677137d 2134 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2135 break;
2136 case 3:
2137 if (gen_iwmmxt_shift(insn, 0x3f))
2138 return 1;
e677137d 2139 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2140 break;
2141 }
2142 gen_op_iwmmxt_movq_wRn_M0(wrd);
2143 gen_op_iwmmxt_set_mup();
2144 gen_op_iwmmxt_set_cup();
2145 break;
2146 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2147 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2148 wrd = (insn >> 12) & 0xf;
2149 rd0 = (insn >> 16) & 0xf;
2150 rd1 = (insn >> 0) & 0xf;
2151 gen_op_iwmmxt_movq_M0_wRn(rd0);
2152 switch ((insn >> 22) & 3) {
2153 case 0:
2154 if (insn & (1 << 21))
2155 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2156 else
2157 gen_op_iwmmxt_minub_M0_wRn(rd1);
2158 break;
2159 case 1:
2160 if (insn & (1 << 21))
2161 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2162 else
2163 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2164 break;
2165 case 2:
2166 if (insn & (1 << 21))
2167 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2168 else
2169 gen_op_iwmmxt_minul_M0_wRn(rd1);
2170 break;
2171 case 3:
2172 return 1;
2173 }
2174 gen_op_iwmmxt_movq_wRn_M0(wrd);
2175 gen_op_iwmmxt_set_mup();
2176 break;
2177 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2178 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2179 wrd = (insn >> 12) & 0xf;
2180 rd0 = (insn >> 16) & 0xf;
2181 rd1 = (insn >> 0) & 0xf;
2182 gen_op_iwmmxt_movq_M0_wRn(rd0);
2183 switch ((insn >> 22) & 3) {
2184 case 0:
2185 if (insn & (1 << 21))
2186 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2187 else
2188 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2189 break;
2190 case 1:
2191 if (insn & (1 << 21))
2192 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2193 else
2194 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2195 break;
2196 case 2:
2197 if (insn & (1 << 21))
2198 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2199 else
2200 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2201 break;
2202 case 3:
2203 return 1;
2204 }
2205 gen_op_iwmmxt_movq_wRn_M0(wrd);
2206 gen_op_iwmmxt_set_mup();
2207 break;
2208 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2209 case 0x402: case 0x502: case 0x602: case 0x702:
2210 wrd = (insn >> 12) & 0xf;
2211 rd0 = (insn >> 16) & 0xf;
2212 rd1 = (insn >> 0) & 0xf;
2213 gen_op_iwmmxt_movq_M0_wRn(rd0);
2214 gen_op_movl_T0_im((insn >> 20) & 3);
2215 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
2216 gen_op_iwmmxt_movq_wRn_M0(wrd);
2217 gen_op_iwmmxt_set_mup();
2218 break;
2219 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2220 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2221 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2222 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2223 wrd = (insn >> 12) & 0xf;
2224 rd0 = (insn >> 16) & 0xf;
2225 rd1 = (insn >> 0) & 0xf;
2226 gen_op_iwmmxt_movq_M0_wRn(rd0);
2227 switch ((insn >> 20) & 0xf) {
2228 case 0x0:
2229 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2230 break;
2231 case 0x1:
2232 gen_op_iwmmxt_subub_M0_wRn(rd1);
2233 break;
2234 case 0x3:
2235 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2236 break;
2237 case 0x4:
2238 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2239 break;
2240 case 0x5:
2241 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2242 break;
2243 case 0x7:
2244 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2245 break;
2246 case 0x8:
2247 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2248 break;
2249 case 0x9:
2250 gen_op_iwmmxt_subul_M0_wRn(rd1);
2251 break;
2252 case 0xb:
2253 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2254 break;
2255 default:
2256 return 1;
2257 }
2258 gen_op_iwmmxt_movq_wRn_M0(wrd);
2259 gen_op_iwmmxt_set_mup();
2260 gen_op_iwmmxt_set_cup();
2261 break;
2262 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2263 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2264 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2265 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2266 wrd = (insn >> 12) & 0xf;
2267 rd0 = (insn >> 16) & 0xf;
2268 gen_op_iwmmxt_movq_M0_wRn(rd0);
2269 gen_op_movl_T0_im(((insn >> 16) & 0xf0) | (insn & 0x0f));
e677137d 2270 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2271 gen_op_iwmmxt_movq_wRn_M0(wrd);
2272 gen_op_iwmmxt_set_mup();
2273 gen_op_iwmmxt_set_cup();
2274 break;
2275 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2276 case 0x418: case 0x518: case 0x618: case 0x718:
2277 case 0x818: case 0x918: case 0xa18: case 0xb18:
2278 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2279 wrd = (insn >> 12) & 0xf;
2280 rd0 = (insn >> 16) & 0xf;
2281 rd1 = (insn >> 0) & 0xf;
2282 gen_op_iwmmxt_movq_M0_wRn(rd0);
2283 switch ((insn >> 20) & 0xf) {
2284 case 0x0:
2285 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2286 break;
2287 case 0x1:
2288 gen_op_iwmmxt_addub_M0_wRn(rd1);
2289 break;
2290 case 0x3:
2291 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2292 break;
2293 case 0x4:
2294 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2295 break;
2296 case 0x5:
2297 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2298 break;
2299 case 0x7:
2300 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2301 break;
2302 case 0x8:
2303 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2304 break;
2305 case 0x9:
2306 gen_op_iwmmxt_addul_M0_wRn(rd1);
2307 break;
2308 case 0xb:
2309 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2310 break;
2311 default:
2312 return 1;
2313 }
2314 gen_op_iwmmxt_movq_wRn_M0(wrd);
2315 gen_op_iwmmxt_set_mup();
2316 gen_op_iwmmxt_set_cup();
2317 break;
2318 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2319 case 0x408: case 0x508: case 0x608: case 0x708:
2320 case 0x808: case 0x908: case 0xa08: case 0xb08:
2321 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2322 wrd = (insn >> 12) & 0xf;
2323 rd0 = (insn >> 16) & 0xf;
2324 rd1 = (insn >> 0) & 0xf;
2325 gen_op_iwmmxt_movq_M0_wRn(rd0);
2326 if (!(insn & (1 << 20)))
2327 return 1;
2328 switch ((insn >> 22) & 3) {
2329 case 0:
2330 return 1;
2331 case 1:
2332 if (insn & (1 << 21))
2333 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2334 else
2335 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2336 break;
2337 case 2:
2338 if (insn & (1 << 21))
2339 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2340 else
2341 gen_op_iwmmxt_packul_M0_wRn(rd1);
2342 break;
2343 case 3:
2344 if (insn & (1 << 21))
2345 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2346 else
2347 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2348 break;
2349 }
2350 gen_op_iwmmxt_movq_wRn_M0(wrd);
2351 gen_op_iwmmxt_set_mup();
2352 gen_op_iwmmxt_set_cup();
2353 break;
2354 case 0x201: case 0x203: case 0x205: case 0x207:
2355 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2356 case 0x211: case 0x213: case 0x215: case 0x217:
2357 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2358 wrd = (insn >> 5) & 0xf;
2359 rd0 = (insn >> 12) & 0xf;
2360 rd1 = (insn >> 0) & 0xf;
2361 if (rd0 == 0xf || rd1 == 0xf)
2362 return 1;
2363 gen_op_iwmmxt_movq_M0_wRn(wrd);
2364 switch ((insn >> 16) & 0xf) {
2365 case 0x0: /* TMIA */
b26eefb6
PB
2366 gen_movl_T0_reg(s, rd0);
2367 gen_movl_T1_reg(s, rd1);
3a554c0f 2368 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
18c9b560
AZ
2369 break;
2370 case 0x8: /* TMIAPH */
b26eefb6
PB
2371 gen_movl_T0_reg(s, rd0);
2372 gen_movl_T1_reg(s, rd1);
3a554c0f 2373 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
18c9b560
AZ
2374 break;
2375 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
b26eefb6 2376 gen_movl_T1_reg(s, rd0);
18c9b560
AZ
2377 if (insn & (1 << 16))
2378 gen_op_shrl_T1_im(16);
2379 gen_op_movl_T0_T1();
b26eefb6 2380 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2381 if (insn & (1 << 17))
2382 gen_op_shrl_T1_im(16);
3a554c0f 2383 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
18c9b560
AZ
2384 break;
2385 default:
2386 return 1;
2387 }
2388 gen_op_iwmmxt_movq_wRn_M0(wrd);
2389 gen_op_iwmmxt_set_mup();
2390 break;
2391 default:
2392 return 1;
2393 }
2394
2395 return 0;
2396}
2397
2398/* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2399 (ie. an undefined instruction). */
2400static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2401{
2402 int acc, rd0, rd1, rdhi, rdlo;
3a554c0f 2403 TCGv tmp, tmp2;
18c9b560
AZ
2404
2405 if ((insn & 0x0ff00f10) == 0x0e200010) {
2406 /* Multiply with Internal Accumulate Format */
2407 rd0 = (insn >> 12) & 0xf;
2408 rd1 = insn & 0xf;
2409 acc = (insn >> 5) & 7;
2410
2411 if (acc != 0)
2412 return 1;
2413
3a554c0f
FN
2414 tmp = load_reg(s, rd0);
2415 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2416 switch ((insn >> 16) & 0xf) {
2417 case 0x0: /* MIA */
3a554c0f 2418 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2419 break;
2420 case 0x8: /* MIAPH */
3a554c0f 2421 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2422 break;
2423 case 0xc: /* MIABB */
2424 case 0xd: /* MIABT */
2425 case 0xe: /* MIATB */
2426 case 0xf: /* MIATT */
18c9b560 2427 if (insn & (1 << 16))
3a554c0f 2428 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2429 if (insn & (1 << 17))
3a554c0f
FN
2430 tcg_gen_shri_i32(tmp2, tmp2, 16);
2431 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2432 break;
2433 default:
2434 return 1;
2435 }
3a554c0f
FN
2436 dead_tmp(tmp2);
2437 dead_tmp(tmp);
18c9b560
AZ
2438
2439 gen_op_iwmmxt_movq_wRn_M0(acc);
2440 return 0;
2441 }
2442
2443 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2444 /* Internal Accumulator Access Format */
2445 rdhi = (insn >> 16) & 0xf;
2446 rdlo = (insn >> 12) & 0xf;
2447 acc = insn & 7;
2448
2449 if (acc != 0)
2450 return 1;
2451
2452 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2453 iwmmxt_load_reg(cpu_V0, acc);
2454 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2455 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2456 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2457 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2458 } else { /* MAR */
3a554c0f
FN
2459 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2460 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2461 }
2462 return 0;
2463 }
2464
2465 return 1;
2466}
2467
c1713132
AZ
2468/* Disassemble system coprocessor instruction. Return nonzero if
2469 instruction is not defined. */
2470static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2471{
8984bd2e 2472 TCGv tmp;
c1713132
AZ
2473 uint32_t rd = (insn >> 12) & 0xf;
2474 uint32_t cp = (insn >> 8) & 0xf;
2475 if (IS_USER(s)) {
2476 return 1;
2477 }
2478
18c9b560 2479 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2480 if (!env->cp[cp].cp_read)
2481 return 1;
8984bd2e
PB
2482 gen_set_pc_im(s->pc);
2483 tmp = new_tmp();
2484 gen_helper_get_cp(tmp, cpu_env, tcg_const_i32(insn));
2485 store_reg(s, rd, tmp);
c1713132
AZ
2486 } else {
2487 if (!env->cp[cp].cp_write)
2488 return 1;
8984bd2e
PB
2489 gen_set_pc_im(s->pc);
2490 tmp = load_reg(s, rd);
2491 gen_helper_set_cp(cpu_env, tcg_const_i32(insn), tmp);
a60de947 2492 dead_tmp(tmp);
c1713132
AZ
2493 }
2494 return 0;
2495}
2496
9ee6e8bb
PB
2497static int cp15_user_ok(uint32_t insn)
2498{
2499 int cpn = (insn >> 16) & 0xf;
2500 int cpm = insn & 0xf;
2501 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2502
2503 if (cpn == 13 && cpm == 0) {
2504 /* TLS register. */
2505 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2506 return 1;
2507 }
2508 if (cpn == 7) {
2509 /* ISB, DSB, DMB. */
2510 if ((cpm == 5 && op == 4)
2511 || (cpm == 10 && (op == 4 || op == 5)))
2512 return 1;
2513 }
2514 return 0;
2515}
2516
b5ff1b31
FB
2517/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2518 instruction is not defined. */
a90b7318 2519static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2520{
2521 uint32_t rd;
8984bd2e 2522 TCGv tmp;
b5ff1b31 2523
9ee6e8bb
PB
2524 /* M profile cores use memory mapped registers instead of cp15. */
2525 if (arm_feature(env, ARM_FEATURE_M))
2526 return 1;
2527
2528 if ((insn & (1 << 25)) == 0) {
2529 if (insn & (1 << 20)) {
2530 /* mrrc */
2531 return 1;
2532 }
2533 /* mcrr. Used for block cache operations, so implement as no-op. */
2534 return 0;
2535 }
2536 if ((insn & (1 << 4)) == 0) {
2537 /* cdp */
2538 return 1;
2539 }
2540 if (IS_USER(s) && !cp15_user_ok(insn)) {
b5ff1b31
FB
2541 return 1;
2542 }
9332f9da
FB
2543 if ((insn & 0x0fff0fff) == 0x0e070f90
2544 || (insn & 0x0fff0fff) == 0x0e070f58) {
2545 /* Wait for interrupt. */
8984bd2e 2546 gen_set_pc_im(s->pc);
9ee6e8bb 2547 s->is_jmp = DISAS_WFI;
9332f9da
FB
2548 return 0;
2549 }
b5ff1b31 2550 rd = (insn >> 12) & 0xf;
18c9b560 2551 if (insn & ARM_CP_RW_BIT) {
8984bd2e
PB
2552 tmp = new_tmp();
2553 gen_helper_get_cp15(tmp, cpu_env, tcg_const_i32(insn));
b5ff1b31
FB
2554 /* If the destination register is r15 then sets condition codes. */
2555 if (rd != 15)
8984bd2e
PB
2556 store_reg(s, rd, tmp);
2557 else
2558 dead_tmp(tmp);
b5ff1b31 2559 } else {
8984bd2e
PB
2560 tmp = load_reg(s, rd);
2561 gen_helper_set_cp15(cpu_env, tcg_const_i32(insn), tmp);
2562 dead_tmp(tmp);
a90b7318
AZ
2563 /* Normally we would always end the TB here, but Linux
2564 * arch/arm/mach-pxa/sleep.S expects two instructions following
2565 * an MMU enable to execute from cache. Imitate this behaviour. */
2566 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2567 (insn & 0x0fff0fff) != 0x0e010f10)
2568 gen_lookup_tb(s);
b5ff1b31 2569 }
b5ff1b31
FB
2570 return 0;
2571}
2572
9ee6e8bb
PB
2573#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2574#define VFP_SREG(insn, bigbit, smallbit) \
2575 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2576#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2577 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2578 reg = (((insn) >> (bigbit)) & 0x0f) \
2579 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2580 } else { \
2581 if (insn & (1 << (smallbit))) \
2582 return 1; \
2583 reg = ((insn) >> (bigbit)) & 0x0f; \
2584 }} while (0)
2585
2586#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2587#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2588#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2589#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2590#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2591#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2592
4373f3ce
PB
2593/* Move between integer and VFP cores. */
2594static TCGv gen_vfp_mrs(void)
2595{
2596 TCGv tmp = new_tmp();
2597 tcg_gen_mov_i32(tmp, cpu_F0s);
2598 return tmp;
2599}
2600
2601static void gen_vfp_msr(TCGv tmp)
2602{
2603 tcg_gen_mov_i32(cpu_F0s, tmp);
2604 dead_tmp(tmp);
2605}
2606
9ee6e8bb
PB
2607static inline int
2608vfp_enabled(CPUState * env)
2609{
2610 return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
2611}
2612
ad69471c
PB
2613static void gen_neon_dup_u8(TCGv var, int shift)
2614{
2615 TCGv tmp = new_tmp();
2616 if (shift)
2617 tcg_gen_shri_i32(var, var, shift);
86831435 2618 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2619 tcg_gen_shli_i32(tmp, var, 8);
2620 tcg_gen_or_i32(var, var, tmp);
2621 tcg_gen_shli_i32(tmp, var, 16);
2622 tcg_gen_or_i32(var, var, tmp);
2623 dead_tmp(tmp);
2624}
2625
2626static void gen_neon_dup_low16(TCGv var)
2627{
2628 TCGv tmp = new_tmp();
86831435 2629 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2630 tcg_gen_shli_i32(tmp, var, 16);
2631 tcg_gen_or_i32(var, var, tmp);
2632 dead_tmp(tmp);
2633}
2634
2635static void gen_neon_dup_high16(TCGv var)
2636{
2637 TCGv tmp = new_tmp();
2638 tcg_gen_andi_i32(var, var, 0xffff0000);
2639 tcg_gen_shri_i32(tmp, var, 16);
2640 tcg_gen_or_i32(var, var, tmp);
2641 dead_tmp(tmp);
2642}
2643
b7bcbe95
FB
2644/* Disassemble a VFP instruction. Returns nonzero if an error occured
2645 (ie. an undefined instruction). */
2646static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2647{
2648 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2649 int dp, veclen;
4373f3ce 2650 TCGv tmp;
ad69471c 2651 TCGv tmp2;
b7bcbe95 2652
40f137e1
PB
2653 if (!arm_feature(env, ARM_FEATURE_VFP))
2654 return 1;
2655
9ee6e8bb
PB
2656 if (!vfp_enabled(env)) {
2657 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2658 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2659 return 1;
2660 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2661 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2662 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2663 return 1;
2664 }
b7bcbe95
FB
2665 dp = ((insn & 0xf00) == 0xb00);
2666 switch ((insn >> 24) & 0xf) {
2667 case 0xe:
2668 if (insn & (1 << 4)) {
2669 /* single register transfer */
b7bcbe95
FB
2670 rd = (insn >> 12) & 0xf;
2671 if (dp) {
9ee6e8bb
PB
2672 int size;
2673 int pass;
2674
2675 VFP_DREG_N(rn, insn);
2676 if (insn & 0xf)
b7bcbe95 2677 return 1;
9ee6e8bb
PB
2678 if (insn & 0x00c00060
2679 && !arm_feature(env, ARM_FEATURE_NEON))
2680 return 1;
2681
2682 pass = (insn >> 21) & 1;
2683 if (insn & (1 << 22)) {
2684 size = 0;
2685 offset = ((insn >> 5) & 3) * 8;
2686 } else if (insn & (1 << 5)) {
2687 size = 1;
2688 offset = (insn & (1 << 6)) ? 16 : 0;
2689 } else {
2690 size = 2;
2691 offset = 0;
2692 }
18c9b560 2693 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2694 /* vfp->arm */
ad69471c 2695 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2696 switch (size) {
2697 case 0:
9ee6e8bb 2698 if (offset)
ad69471c 2699 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2700 if (insn & (1 << 23))
ad69471c 2701 gen_uxtb(tmp);
9ee6e8bb 2702 else
ad69471c 2703 gen_sxtb(tmp);
9ee6e8bb
PB
2704 break;
2705 case 1:
9ee6e8bb
PB
2706 if (insn & (1 << 23)) {
2707 if (offset) {
ad69471c 2708 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2709 } else {
ad69471c 2710 gen_uxth(tmp);
9ee6e8bb
PB
2711 }
2712 } else {
2713 if (offset) {
ad69471c 2714 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2715 } else {
ad69471c 2716 gen_sxth(tmp);
9ee6e8bb
PB
2717 }
2718 }
2719 break;
2720 case 2:
9ee6e8bb
PB
2721 break;
2722 }
ad69471c 2723 store_reg(s, rd, tmp);
b7bcbe95
FB
2724 } else {
2725 /* arm->vfp */
ad69471c 2726 tmp = load_reg(s, rd);
9ee6e8bb
PB
2727 if (insn & (1 << 23)) {
2728 /* VDUP */
2729 if (size == 0) {
ad69471c 2730 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2731 } else if (size == 1) {
ad69471c 2732 gen_neon_dup_low16(tmp);
9ee6e8bb 2733 }
cbbccffc
PB
2734 for (n = 0; n <= pass * 2; n++) {
2735 tmp2 = new_tmp();
2736 tcg_gen_mov_i32(tmp2, tmp);
2737 neon_store_reg(rn, n, tmp2);
2738 }
2739 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2740 } else {
2741 /* VMOV */
2742 switch (size) {
2743 case 0:
ad69471c
PB
2744 tmp2 = neon_load_reg(rn, pass);
2745 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2746 dead_tmp(tmp2);
9ee6e8bb
PB
2747 break;
2748 case 1:
ad69471c
PB
2749 tmp2 = neon_load_reg(rn, pass);
2750 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2751 dead_tmp(tmp2);
9ee6e8bb
PB
2752 break;
2753 case 2:
9ee6e8bb
PB
2754 break;
2755 }
ad69471c 2756 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2757 }
b7bcbe95 2758 }
9ee6e8bb
PB
2759 } else { /* !dp */
2760 if ((insn & 0x6f) != 0x00)
2761 return 1;
2762 rn = VFP_SREG_N(insn);
18c9b560 2763 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2764 /* vfp->arm */
2765 if (insn & (1 << 21)) {
2766 /* system register */
40f137e1 2767 rn >>= 1;
9ee6e8bb 2768
b7bcbe95 2769 switch (rn) {
40f137e1 2770 case ARM_VFP_FPSID:
4373f3ce 2771 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2772 VFP3 restricts all id registers to privileged
2773 accesses. */
2774 if (IS_USER(s)
2775 && arm_feature(env, ARM_FEATURE_VFP3))
2776 return 1;
4373f3ce 2777 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2778 break;
40f137e1 2779 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2780 if (IS_USER(s))
2781 return 1;
4373f3ce 2782 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2783 break;
40f137e1
PB
2784 case ARM_VFP_FPINST:
2785 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2786 /* Not present in VFP3. */
2787 if (IS_USER(s)
2788 || arm_feature(env, ARM_FEATURE_VFP3))
2789 return 1;
4373f3ce 2790 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2791 break;
40f137e1 2792 case ARM_VFP_FPSCR:
601d70b9 2793 if (rd == 15) {
4373f3ce
PB
2794 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2795 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2796 } else {
2797 tmp = new_tmp();
2798 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2799 }
b7bcbe95 2800 break;
9ee6e8bb
PB
2801 case ARM_VFP_MVFR0:
2802 case ARM_VFP_MVFR1:
2803 if (IS_USER(s)
2804 || !arm_feature(env, ARM_FEATURE_VFP3))
2805 return 1;
4373f3ce 2806 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2807 break;
b7bcbe95
FB
2808 default:
2809 return 1;
2810 }
2811 } else {
2812 gen_mov_F0_vreg(0, rn);
4373f3ce 2813 tmp = gen_vfp_mrs();
b7bcbe95
FB
2814 }
2815 if (rd == 15) {
b5ff1b31 2816 /* Set the 4 flag bits in the CPSR. */
4373f3ce
PB
2817 gen_set_nzcv(tmp);
2818 dead_tmp(tmp);
2819 } else {
2820 store_reg(s, rd, tmp);
2821 }
b7bcbe95
FB
2822 } else {
2823 /* arm->vfp */
4373f3ce 2824 tmp = load_reg(s, rd);
b7bcbe95 2825 if (insn & (1 << 21)) {
40f137e1 2826 rn >>= 1;
b7bcbe95
FB
2827 /* system register */
2828 switch (rn) {
40f137e1 2829 case ARM_VFP_FPSID:
9ee6e8bb
PB
2830 case ARM_VFP_MVFR0:
2831 case ARM_VFP_MVFR1:
b7bcbe95
FB
2832 /* Writes are ignored. */
2833 break;
40f137e1 2834 case ARM_VFP_FPSCR:
4373f3ce
PB
2835 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2836 dead_tmp(tmp);
b5ff1b31 2837 gen_lookup_tb(s);
b7bcbe95 2838 break;
40f137e1 2839 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2840 if (IS_USER(s))
2841 return 1;
4373f3ce 2842 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2843 gen_lookup_tb(s);
2844 break;
2845 case ARM_VFP_FPINST:
2846 case ARM_VFP_FPINST2:
4373f3ce 2847 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2848 break;
b7bcbe95
FB
2849 default:
2850 return 1;
2851 }
2852 } else {
4373f3ce 2853 gen_vfp_msr(tmp);
b7bcbe95
FB
2854 gen_mov_vreg_F0(0, rn);
2855 }
2856 }
2857 }
2858 } else {
2859 /* data processing */
2860 /* The opcode is in bits 23, 21, 20 and 6. */
2861 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2862 if (dp) {
2863 if (op == 15) {
2864 /* rn is opcode */
2865 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2866 } else {
2867 /* rn is register number */
9ee6e8bb 2868 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2869 }
2870
2871 if (op == 15 && (rn == 15 || rn > 17)) {
2872 /* Integer or single precision destination. */
9ee6e8bb 2873 rd = VFP_SREG_D(insn);
b7bcbe95 2874 } else {
9ee6e8bb 2875 VFP_DREG_D(rd, insn);
b7bcbe95
FB
2876 }
2877
2878 if (op == 15 && (rn == 16 || rn == 17)) {
2879 /* Integer source. */
2880 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
2881 } else {
9ee6e8bb 2882 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2883 }
2884 } else {
9ee6e8bb 2885 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2886 if (op == 15 && rn == 15) {
2887 /* Double precision destination. */
9ee6e8bb
PB
2888 VFP_DREG_D(rd, insn);
2889 } else {
2890 rd = VFP_SREG_D(insn);
2891 }
2892 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2893 }
2894
2895 veclen = env->vfp.vec_len;
2896 if (op == 15 && rn > 3)
2897 veclen = 0;
2898
2899 /* Shut up compiler warnings. */
2900 delta_m = 0;
2901 delta_d = 0;
2902 bank_mask = 0;
3b46e624 2903
b7bcbe95
FB
2904 if (veclen > 0) {
2905 if (dp)
2906 bank_mask = 0xc;
2907 else
2908 bank_mask = 0x18;
2909
2910 /* Figure out what type of vector operation this is. */
2911 if ((rd & bank_mask) == 0) {
2912 /* scalar */
2913 veclen = 0;
2914 } else {
2915 if (dp)
2916 delta_d = (env->vfp.vec_stride >> 1) + 1;
2917 else
2918 delta_d = env->vfp.vec_stride + 1;
2919
2920 if ((rm & bank_mask) == 0) {
2921 /* mixed scalar/vector */
2922 delta_m = 0;
2923 } else {
2924 /* vector */
2925 delta_m = delta_d;
2926 }
2927 }
2928 }
2929
2930 /* Load the initial operands. */
2931 if (op == 15) {
2932 switch (rn) {
2933 case 16:
2934 case 17:
2935 /* Integer source */
2936 gen_mov_F0_vreg(0, rm);
2937 break;
2938 case 8:
2939 case 9:
2940 /* Compare */
2941 gen_mov_F0_vreg(dp, rd);
2942 gen_mov_F1_vreg(dp, rm);
2943 break;
2944 case 10:
2945 case 11:
2946 /* Compare with zero */
2947 gen_mov_F0_vreg(dp, rd);
2948 gen_vfp_F1_ld0(dp);
2949 break;
9ee6e8bb
PB
2950 case 20:
2951 case 21:
2952 case 22:
2953 case 23:
644ad806
PB
2954 case 28:
2955 case 29:
2956 case 30:
2957 case 31:
9ee6e8bb
PB
2958 /* Source and destination the same. */
2959 gen_mov_F0_vreg(dp, rd);
2960 break;
b7bcbe95
FB
2961 default:
2962 /* One source operand. */
2963 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 2964 break;
b7bcbe95
FB
2965 }
2966 } else {
2967 /* Two source operands. */
2968 gen_mov_F0_vreg(dp, rn);
2969 gen_mov_F1_vreg(dp, rm);
2970 }
2971
2972 for (;;) {
2973 /* Perform the calculation. */
2974 switch (op) {
2975 case 0: /* mac: fd + (fn * fm) */
2976 gen_vfp_mul(dp);
2977 gen_mov_F1_vreg(dp, rd);
2978 gen_vfp_add(dp);
2979 break;
2980 case 1: /* nmac: fd - (fn * fm) */
2981 gen_vfp_mul(dp);
2982 gen_vfp_neg(dp);
2983 gen_mov_F1_vreg(dp, rd);
2984 gen_vfp_add(dp);
2985 break;
2986 case 2: /* msc: -fd + (fn * fm) */
2987 gen_vfp_mul(dp);
2988 gen_mov_F1_vreg(dp, rd);
2989 gen_vfp_sub(dp);
2990 break;
2991 case 3: /* nmsc: -fd - (fn * fm) */
2992 gen_vfp_mul(dp);
b7bcbe95 2993 gen_vfp_neg(dp);
c9fb531a
PB
2994 gen_mov_F1_vreg(dp, rd);
2995 gen_vfp_sub(dp);
b7bcbe95
FB
2996 break;
2997 case 4: /* mul: fn * fm */
2998 gen_vfp_mul(dp);
2999 break;
3000 case 5: /* nmul: -(fn * fm) */
3001 gen_vfp_mul(dp);
3002 gen_vfp_neg(dp);
3003 break;
3004 case 6: /* add: fn + fm */
3005 gen_vfp_add(dp);
3006 break;
3007 case 7: /* sub: fn - fm */
3008 gen_vfp_sub(dp);
3009 break;
3010 case 8: /* div: fn / fm */
3011 gen_vfp_div(dp);
3012 break;
9ee6e8bb
PB
3013 case 14: /* fconst */
3014 if (!arm_feature(env, ARM_FEATURE_VFP3))
3015 return 1;
3016
3017 n = (insn << 12) & 0x80000000;
3018 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3019 if (dp) {
3020 if (i & 0x40)
3021 i |= 0x3f80;
3022 else
3023 i |= 0x4000;
3024 n |= i << 16;
4373f3ce 3025 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3026 } else {
3027 if (i & 0x40)
3028 i |= 0x780;
3029 else
3030 i |= 0x800;
3031 n |= i << 19;
5b340b51 3032 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3033 }
9ee6e8bb 3034 break;
b7bcbe95
FB
3035 case 15: /* extension space */
3036 switch (rn) {
3037 case 0: /* cpy */
3038 /* no-op */
3039 break;
3040 case 1: /* abs */
3041 gen_vfp_abs(dp);
3042 break;
3043 case 2: /* neg */
3044 gen_vfp_neg(dp);
3045 break;
3046 case 3: /* sqrt */
3047 gen_vfp_sqrt(dp);
3048 break;
3049 case 8: /* cmp */
3050 gen_vfp_cmp(dp);
3051 break;
3052 case 9: /* cmpe */
3053 gen_vfp_cmpe(dp);
3054 break;
3055 case 10: /* cmpz */
3056 gen_vfp_cmp(dp);
3057 break;
3058 case 11: /* cmpez */
3059 gen_vfp_F1_ld0(dp);
3060 gen_vfp_cmpe(dp);
3061 break;
3062 case 15: /* single<->double conversion */
3063 if (dp)
4373f3ce 3064 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3065 else
4373f3ce 3066 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3067 break;
3068 case 16: /* fuito */
3069 gen_vfp_uito(dp);
3070 break;
3071 case 17: /* fsito */
3072 gen_vfp_sito(dp);
3073 break;
9ee6e8bb
PB
3074 case 20: /* fshto */
3075 if (!arm_feature(env, ARM_FEATURE_VFP3))
3076 return 1;
644ad806 3077 gen_vfp_shto(dp, 16 - rm);
9ee6e8bb
PB
3078 break;
3079 case 21: /* fslto */
3080 if (!arm_feature(env, ARM_FEATURE_VFP3))
3081 return 1;
644ad806 3082 gen_vfp_slto(dp, 32 - rm);
9ee6e8bb
PB
3083 break;
3084 case 22: /* fuhto */
3085 if (!arm_feature(env, ARM_FEATURE_VFP3))
3086 return 1;
644ad806 3087 gen_vfp_uhto(dp, 16 - rm);
9ee6e8bb
PB
3088 break;
3089 case 23: /* fulto */
3090 if (!arm_feature(env, ARM_FEATURE_VFP3))
3091 return 1;
644ad806 3092 gen_vfp_ulto(dp, 32 - rm);
9ee6e8bb 3093 break;
b7bcbe95
FB
3094 case 24: /* ftoui */
3095 gen_vfp_toui(dp);
3096 break;
3097 case 25: /* ftouiz */
3098 gen_vfp_touiz(dp);
3099 break;
3100 case 26: /* ftosi */
3101 gen_vfp_tosi(dp);
3102 break;
3103 case 27: /* ftosiz */
3104 gen_vfp_tosiz(dp);
3105 break;
9ee6e8bb
PB
3106 case 28: /* ftosh */
3107 if (!arm_feature(env, ARM_FEATURE_VFP3))
3108 return 1;
644ad806 3109 gen_vfp_tosh(dp, 16 - rm);
9ee6e8bb
PB
3110 break;
3111 case 29: /* ftosl */
3112 if (!arm_feature(env, ARM_FEATURE_VFP3))
3113 return 1;
644ad806 3114 gen_vfp_tosl(dp, 32 - rm);
9ee6e8bb
PB
3115 break;
3116 case 30: /* ftouh */
3117 if (!arm_feature(env, ARM_FEATURE_VFP3))
3118 return 1;
644ad806 3119 gen_vfp_touh(dp, 16 - rm);
9ee6e8bb
PB
3120 break;
3121 case 31: /* ftoul */
3122 if (!arm_feature(env, ARM_FEATURE_VFP3))
3123 return 1;
644ad806 3124 gen_vfp_toul(dp, 32 - rm);
9ee6e8bb 3125 break;
b7bcbe95
FB
3126 default: /* undefined */
3127 printf ("rn:%d\n", rn);
3128 return 1;
3129 }
3130 break;
3131 default: /* undefined */
3132 printf ("op:%d\n", op);
3133 return 1;
3134 }
3135
3136 /* Write back the result. */
3137 if (op == 15 && (rn >= 8 && rn <= 11))
3138 ; /* Comparison, do nothing. */
3139 else if (op == 15 && rn > 17)
3140 /* Integer result. */
3141 gen_mov_vreg_F0(0, rd);
3142 else if (op == 15 && rn == 15)
3143 /* conversion */
3144 gen_mov_vreg_F0(!dp, rd);
3145 else
3146 gen_mov_vreg_F0(dp, rd);
3147
3148 /* break out of the loop if we have finished */
3149 if (veclen == 0)
3150 break;
3151
3152 if (op == 15 && delta_m == 0) {
3153 /* single source one-many */
3154 while (veclen--) {
3155 rd = ((rd + delta_d) & (bank_mask - 1))
3156 | (rd & bank_mask);
3157 gen_mov_vreg_F0(dp, rd);
3158 }
3159 break;
3160 }
3161 /* Setup the next operands. */
3162 veclen--;
3163 rd = ((rd + delta_d) & (bank_mask - 1))
3164 | (rd & bank_mask);
3165
3166 if (op == 15) {
3167 /* One source operand. */
3168 rm = ((rm + delta_m) & (bank_mask - 1))
3169 | (rm & bank_mask);
3170 gen_mov_F0_vreg(dp, rm);
3171 } else {
3172 /* Two source operands. */
3173 rn = ((rn + delta_d) & (bank_mask - 1))
3174 | (rn & bank_mask);
3175 gen_mov_F0_vreg(dp, rn);
3176 if (delta_m) {
3177 rm = ((rm + delta_m) & (bank_mask - 1))
3178 | (rm & bank_mask);
3179 gen_mov_F1_vreg(dp, rm);
3180 }
3181 }
3182 }
3183 }
3184 break;
3185 case 0xc:
3186 case 0xd:
9ee6e8bb 3187 if (dp && (insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3188 /* two-register transfer */
3189 rn = (insn >> 16) & 0xf;
3190 rd = (insn >> 12) & 0xf;
3191 if (dp) {
9ee6e8bb
PB
3192 VFP_DREG_M(rm, insn);
3193 } else {
3194 rm = VFP_SREG_M(insn);
3195 }
b7bcbe95 3196
18c9b560 3197 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3198 /* vfp->arm */
3199 if (dp) {
4373f3ce
PB
3200 gen_mov_F0_vreg(0, rm * 2);
3201 tmp = gen_vfp_mrs();
3202 store_reg(s, rd, tmp);
3203 gen_mov_F0_vreg(0, rm * 2 + 1);
3204 tmp = gen_vfp_mrs();
3205 store_reg(s, rn, tmp);
b7bcbe95
FB
3206 } else {
3207 gen_mov_F0_vreg(0, rm);
4373f3ce
PB
3208 tmp = gen_vfp_mrs();
3209 store_reg(s, rn, tmp);
b7bcbe95 3210 gen_mov_F0_vreg(0, rm + 1);
4373f3ce
PB
3211 tmp = gen_vfp_mrs();
3212 store_reg(s, rd, tmp);
b7bcbe95
FB
3213 }
3214 } else {
3215 /* arm->vfp */
3216 if (dp) {
4373f3ce
PB
3217 tmp = load_reg(s, rd);
3218 gen_vfp_msr(tmp);
3219 gen_mov_vreg_F0(0, rm * 2);
3220 tmp = load_reg(s, rn);
3221 gen_vfp_msr(tmp);
3222 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3223 } else {
4373f3ce
PB
3224 tmp = load_reg(s, rn);
3225 gen_vfp_msr(tmp);
b7bcbe95 3226 gen_mov_vreg_F0(0, rm);
4373f3ce
PB
3227 tmp = load_reg(s, rd);
3228 gen_vfp_msr(tmp);
b7bcbe95
FB
3229 gen_mov_vreg_F0(0, rm + 1);
3230 }
3231 }
3232 } else {
3233 /* Load/store */
3234 rn = (insn >> 16) & 0xf;
3235 if (dp)
9ee6e8bb 3236 VFP_DREG_D(rd, insn);
b7bcbe95 3237 else
9ee6e8bb
PB
3238 rd = VFP_SREG_D(insn);
3239 if (s->thumb && rn == 15) {
3240 gen_op_movl_T1_im(s->pc & ~2);
3241 } else {
3242 gen_movl_T1_reg(s, rn);
3243 }
b7bcbe95
FB
3244 if ((insn & 0x01200000) == 0x01000000) {
3245 /* Single load/store */
3246 offset = (insn & 0xff) << 2;
3247 if ((insn & (1 << 23)) == 0)
3248 offset = -offset;
3249 gen_op_addl_T1_im(offset);
3250 if (insn & (1 << 20)) {
b5ff1b31 3251 gen_vfp_ld(s, dp);
b7bcbe95
FB
3252 gen_mov_vreg_F0(dp, rd);
3253 } else {
3254 gen_mov_F0_vreg(dp, rd);
b5ff1b31 3255 gen_vfp_st(s, dp);
b7bcbe95
FB
3256 }
3257 } else {
3258 /* load/store multiple */
3259 if (dp)
3260 n = (insn >> 1) & 0x7f;
3261 else
3262 n = insn & 0xff;
3263
3264 if (insn & (1 << 24)) /* pre-decrement */
3265 gen_op_addl_T1_im(-((insn & 0xff) << 2));
3266
3267 if (dp)
3268 offset = 8;
3269 else
3270 offset = 4;
3271 for (i = 0; i < n; i++) {
18c9b560 3272 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3273 /* load */
b5ff1b31 3274 gen_vfp_ld(s, dp);
b7bcbe95
FB
3275 gen_mov_vreg_F0(dp, rd + i);
3276 } else {
3277 /* store */
3278 gen_mov_F0_vreg(dp, rd + i);
b5ff1b31 3279 gen_vfp_st(s, dp);
b7bcbe95
FB
3280 }
3281 gen_op_addl_T1_im(offset);
3282 }
3283 if (insn & (1 << 21)) {
3284 /* writeback */
3285 if (insn & (1 << 24))
3286 offset = -offset * n;
3287 else if (dp && (insn & 1))
3288 offset = 4;
3289 else
3290 offset = 0;
3291
3292 if (offset != 0)
3293 gen_op_addl_T1_im(offset);
3294 gen_movl_reg_T1(s, rn);
3295 }
3296 }
3297 }
3298 break;
3299 default:
3300 /* Should never happen. */
3301 return 1;
3302 }
3303 return 0;
3304}
3305
6e256c93 3306static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3307{
6e256c93
FB
3308 TranslationBlock *tb;
3309
3310 tb = s->tb;
3311 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3312 tcg_gen_goto_tb(n);
8984bd2e 3313 gen_set_pc_im(dest);
57fec1fe 3314 tcg_gen_exit_tb((long)tb + n);
6e256c93 3315 } else {
8984bd2e 3316 gen_set_pc_im(dest);
57fec1fe 3317 tcg_gen_exit_tb(0);
6e256c93 3318 }
c53be334
FB
3319}
3320
8aaca4c0
FB
3321static inline void gen_jmp (DisasContext *s, uint32_t dest)
3322{
551bd27f 3323 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3324 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3325 if (s->thumb)
d9ba4830
PB
3326 dest |= 1;
3327 gen_bx_im(s, dest);
8aaca4c0 3328 } else {
6e256c93 3329 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3330 s->is_jmp = DISAS_TB_JUMP;
3331 }
3332}
3333
d9ba4830 3334static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3335{
ee097184 3336 if (x)
d9ba4830 3337 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3338 else
d9ba4830 3339 gen_sxth(t0);
ee097184 3340 if (y)
d9ba4830 3341 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3342 else
d9ba4830
PB
3343 gen_sxth(t1);
3344 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3345}
3346
3347/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 3348static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3349 uint32_t mask;
3350
3351 mask = 0;
3352 if (flags & (1 << 0))
3353 mask |= 0xff;
3354 if (flags & (1 << 1))
3355 mask |= 0xff00;
3356 if (flags & (1 << 2))
3357 mask |= 0xff0000;
3358 if (flags & (1 << 3))
3359 mask |= 0xff000000;
9ee6e8bb 3360
2ae23e75 3361 /* Mask out undefined bits. */
9ee6e8bb
PB
3362 mask &= ~CPSR_RESERVED;
3363 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3364 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3365 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3366 mask &= ~CPSR_IT;
9ee6e8bb 3367 /* Mask out execution state bits. */
2ae23e75 3368 if (!spsr)
e160c51c 3369 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3370 /* Mask out privileged bits. */
3371 if (IS_USER(s))
9ee6e8bb 3372 mask &= CPSR_USER;
b5ff1b31
FB
3373 return mask;
3374}
3375
2fbac54b
FN
3376/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3377static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
b5ff1b31 3378{
d9ba4830 3379 TCGv tmp;
b5ff1b31
FB
3380 if (spsr) {
3381 /* ??? This is also undefined in system mode. */
3382 if (IS_USER(s))
3383 return 1;
d9ba4830
PB
3384
3385 tmp = load_cpu_field(spsr);
3386 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3387 tcg_gen_andi_i32(t0, t0, mask);
3388 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3389 store_cpu_field(tmp, spsr);
b5ff1b31 3390 } else {
2fbac54b 3391 gen_set_cpsr(t0, mask);
b5ff1b31 3392 }
2fbac54b 3393 dead_tmp(t0);
b5ff1b31
FB
3394 gen_lookup_tb(s);
3395 return 0;
3396}
3397
2fbac54b
FN
3398/* Returns nonzero if access to the PSR is not permitted. */
3399static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3400{
3401 TCGv tmp;
3402 tmp = new_tmp();
3403 tcg_gen_movi_i32(tmp, val);
3404 return gen_set_psr(s, mask, spsr, tmp);
3405}
3406
e9bb4aa9
JR
3407/* Generate an old-style exception return. Marks pc as dead. */
3408static void gen_exception_return(DisasContext *s, TCGv pc)
b5ff1b31 3409{
d9ba4830 3410 TCGv tmp;
e9bb4aa9 3411 store_reg(s, 15, pc);
d9ba4830
PB
3412 tmp = load_cpu_field(spsr);
3413 gen_set_cpsr(tmp, 0xffffffff);
3414 dead_tmp(tmp);
b5ff1b31
FB
3415 s->is_jmp = DISAS_UPDATE;
3416}
3417
b0109805
PB
3418/* Generate a v6 exception return. Marks both values as dead. */
3419static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3420{
b0109805
PB
3421 gen_set_cpsr(cpsr, 0xffffffff);
3422 dead_tmp(cpsr);
3423 store_reg(s, 15, pc);
9ee6e8bb
PB
3424 s->is_jmp = DISAS_UPDATE;
3425}
3b46e624 3426
9ee6e8bb
PB
3427static inline void
3428gen_set_condexec (DisasContext *s)
3429{
3430 if (s->condexec_mask) {
8f01245e
PB
3431 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3432 TCGv tmp = new_tmp();
3433 tcg_gen_movi_i32(tmp, val);
d9ba4830 3434 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3435 }
3436}
3b46e624 3437
9ee6e8bb
PB
3438static void gen_nop_hint(DisasContext *s, int val)
3439{
3440 switch (val) {
3441 case 3: /* wfi */
8984bd2e 3442 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3443 s->is_jmp = DISAS_WFI;
3444 break;
3445 case 2: /* wfe */
3446 case 4: /* sev */
3447 /* TODO: Implement SEV and WFE. May help SMP performance. */
3448 default: /* nop */
3449 break;
3450 }
3451}
99c475ab 3452
ad69471c 3453#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3454
dd8fbd78 3455static inline int gen_neon_add(int size, TCGv t0, TCGv t1)
9ee6e8bb
PB
3456{
3457 switch (size) {
dd8fbd78
FN
3458 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3459 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3460 case 2: tcg_gen_add_i32(t0, t0, t1); break;
9ee6e8bb
PB
3461 default: return 1;
3462 }
3463 return 0;
3464}
3465
dd8fbd78 3466static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
ad69471c
PB
3467{
3468 switch (size) {
dd8fbd78
FN
3469 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3470 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3471 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3472 default: return;
3473 }
3474}
3475
3476/* 32-bit pairwise ops end up the same as the elementwise versions. */
3477#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3478#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3479#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3480#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3481
3482/* FIXME: This is wrong. They set the wrong overflow bit. */
3483#define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3484#define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3485#define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3486#define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3487
3488#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3489 switch ((size << 1) | u) { \
3490 case 0: \
dd8fbd78 3491 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3492 break; \
3493 case 1: \
dd8fbd78 3494 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3495 break; \
3496 case 2: \
dd8fbd78 3497 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3498 break; \
3499 case 3: \
dd8fbd78 3500 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3501 break; \
3502 case 4: \
dd8fbd78 3503 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3504 break; \
3505 case 5: \
dd8fbd78 3506 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3507 break; \
3508 default: return 1; \
3509 }} while (0)
9ee6e8bb
PB
3510
3511#define GEN_NEON_INTEGER_OP(name) do { \
3512 switch ((size << 1) | u) { \
ad69471c 3513 case 0: \
dd8fbd78 3514 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3515 break; \
3516 case 1: \
dd8fbd78 3517 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3518 break; \
3519 case 2: \
dd8fbd78 3520 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3521 break; \
3522 case 3: \
dd8fbd78 3523 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3524 break; \
3525 case 4: \
dd8fbd78 3526 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3527 break; \
3528 case 5: \
dd8fbd78 3529 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3530 break; \
9ee6e8bb
PB
3531 default: return 1; \
3532 }} while (0)
3533
dd8fbd78 3534static TCGv neon_load_scratch(int scratch)
9ee6e8bb 3535{
dd8fbd78
FN
3536 TCGv tmp = new_tmp();
3537 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3538 return tmp;
9ee6e8bb
PB
3539}
3540
dd8fbd78 3541static void neon_store_scratch(int scratch, TCGv var)
9ee6e8bb 3542{
dd8fbd78
FN
3543 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3544 dead_tmp(var);
9ee6e8bb
PB
3545}
3546
dd8fbd78 3547static inline TCGv neon_get_scalar(int size, int reg)
9ee6e8bb 3548{
dd8fbd78 3549 TCGv tmp;
9ee6e8bb 3550 if (size == 1) {
dd8fbd78 3551 tmp = neon_load_reg(reg >> 1, reg & 1);
9ee6e8bb 3552 } else {
dd8fbd78
FN
3553 tmp = neon_load_reg(reg >> 2, (reg >> 1) & 1);
3554 if (reg & 1) {
3555 gen_neon_dup_low16(tmp);
3556 } else {
3557 gen_neon_dup_high16(tmp);
3558 }
9ee6e8bb 3559 }
dd8fbd78 3560 return tmp;
9ee6e8bb
PB
3561}
3562
19457615
FN
3563static void gen_neon_unzip_u8(TCGv t0, TCGv t1)
3564{
3565 TCGv rd, rm, tmp;
3566
3567 rd = new_tmp();
3568 rm = new_tmp();
3569 tmp = new_tmp();
3570
3571 tcg_gen_andi_i32(rd, t0, 0xff);
3572 tcg_gen_shri_i32(tmp, t0, 8);
3573 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3574 tcg_gen_or_i32(rd, rd, tmp);
3575 tcg_gen_shli_i32(tmp, t1, 16);
3576 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3577 tcg_gen_or_i32(rd, rd, tmp);
3578 tcg_gen_shli_i32(tmp, t1, 8);
3579 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3580 tcg_gen_or_i32(rd, rd, tmp);
3581
3582 tcg_gen_shri_i32(rm, t0, 8);
3583 tcg_gen_andi_i32(rm, rm, 0xff);
3584 tcg_gen_shri_i32(tmp, t0, 16);
3585 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3586 tcg_gen_or_i32(rm, rm, tmp);
3587 tcg_gen_shli_i32(tmp, t1, 8);
3588 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3589 tcg_gen_or_i32(rm, rm, tmp);
3590 tcg_gen_andi_i32(tmp, t1, 0xff000000);
3591 tcg_gen_or_i32(t1, rm, tmp);
3592 tcg_gen_mov_i32(t0, rd);
3593
3594 dead_tmp(tmp);
3595 dead_tmp(rm);
3596 dead_tmp(rd);
3597}
3598
3599static void gen_neon_zip_u8(TCGv t0, TCGv t1)
3600{
3601 TCGv rd, rm, tmp;
3602
3603 rd = new_tmp();
3604 rm = new_tmp();
3605 tmp = new_tmp();
3606
3607 tcg_gen_andi_i32(rd, t0, 0xff);
3608 tcg_gen_shli_i32(tmp, t1, 8);
3609 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3610 tcg_gen_or_i32(rd, rd, tmp);
3611 tcg_gen_shli_i32(tmp, t0, 16);
3612 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3613 tcg_gen_or_i32(rd, rd, tmp);
3614 tcg_gen_shli_i32(tmp, t1, 24);
3615 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3616 tcg_gen_or_i32(rd, rd, tmp);
3617
3618 tcg_gen_andi_i32(rm, t1, 0xff000000);
3619 tcg_gen_shri_i32(tmp, t0, 8);
3620 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3621 tcg_gen_or_i32(rm, rm, tmp);
3622 tcg_gen_shri_i32(tmp, t1, 8);
3623 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3624 tcg_gen_or_i32(rm, rm, tmp);
3625 tcg_gen_shri_i32(tmp, t0, 16);
3626 tcg_gen_andi_i32(tmp, tmp, 0xff);
3627 tcg_gen_or_i32(t1, rm, tmp);
3628 tcg_gen_mov_i32(t0, rd);
3629
3630 dead_tmp(tmp);
3631 dead_tmp(rm);
3632 dead_tmp(rd);
3633}
3634
3635static void gen_neon_zip_u16(TCGv t0, TCGv t1)
3636{
3637 TCGv tmp, tmp2;
3638
3639 tmp = new_tmp();
3640 tmp2 = new_tmp();
3641
3642 tcg_gen_andi_i32(tmp, t0, 0xffff);
3643 tcg_gen_shli_i32(tmp2, t1, 16);
3644 tcg_gen_or_i32(tmp, tmp, tmp2);
3645 tcg_gen_andi_i32(t1, t1, 0xffff0000);
3646 tcg_gen_shri_i32(tmp2, t0, 16);
3647 tcg_gen_or_i32(t1, t1, tmp2);
3648 tcg_gen_mov_i32(t0, tmp);
3649
3650 dead_tmp(tmp2);
3651 dead_tmp(tmp);
3652}
3653
9ee6e8bb
PB
3654static void gen_neon_unzip(int reg, int q, int tmp, int size)
3655{
3656 int n;
dd8fbd78 3657 TCGv t0, t1;
9ee6e8bb
PB
3658
3659 for (n = 0; n < q + 1; n += 2) {
dd8fbd78
FN
3660 t0 = neon_load_reg(reg, n);
3661 t1 = neon_load_reg(reg, n + 1);
9ee6e8bb 3662 switch (size) {
dd8fbd78
FN
3663 case 0: gen_neon_unzip_u8(t0, t1); break;
3664 case 1: gen_neon_zip_u16(t0, t1); break; /* zip and unzip are the same. */
9ee6e8bb
PB
3665 case 2: /* no-op */; break;
3666 default: abort();
3667 }
dd8fbd78
FN
3668 neon_store_scratch(tmp + n, t0);
3669 neon_store_scratch(tmp + n + 1, t1);
9ee6e8bb
PB
3670 }
3671}
3672
19457615
FN
3673static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3674{
3675 TCGv rd, tmp;
3676
3677 rd = new_tmp();
3678 tmp = new_tmp();
3679
3680 tcg_gen_shli_i32(rd, t0, 8);
3681 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3682 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3683 tcg_gen_or_i32(rd, rd, tmp);
3684
3685 tcg_gen_shri_i32(t1, t1, 8);
3686 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3687 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3688 tcg_gen_or_i32(t1, t1, tmp);
3689 tcg_gen_mov_i32(t0, rd);
3690
3691 dead_tmp(tmp);
3692 dead_tmp(rd);
3693}
3694
3695static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3696{
3697 TCGv rd, tmp;
3698
3699 rd = new_tmp();
3700 tmp = new_tmp();
3701
3702 tcg_gen_shli_i32(rd, t0, 16);
3703 tcg_gen_andi_i32(tmp, t1, 0xffff);
3704 tcg_gen_or_i32(rd, rd, tmp);
3705 tcg_gen_shri_i32(t1, t1, 16);
3706 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3707 tcg_gen_or_i32(t1, t1, tmp);
3708 tcg_gen_mov_i32(t0, rd);
3709
3710 dead_tmp(tmp);
3711 dead_tmp(rd);
3712}
3713
3714
9ee6e8bb
PB
3715static struct {
3716 int nregs;
3717 int interleave;
3718 int spacing;
3719} neon_ls_element_type[11] = {
3720 {4, 4, 1},
3721 {4, 4, 2},
3722 {4, 1, 1},
3723 {4, 2, 1},
3724 {3, 3, 1},
3725 {3, 3, 2},
3726 {3, 1, 1},
3727 {1, 1, 1},
3728 {2, 2, 1},
3729 {2, 2, 2},
3730 {2, 1, 1}
3731};
3732
3733/* Translate a NEON load/store element instruction. Return nonzero if the
3734 instruction is invalid. */
3735static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3736{
3737 int rd, rn, rm;
3738 int op;
3739 int nregs;
3740 int interleave;
3741 int stride;
3742 int size;
3743 int reg;
3744 int pass;
3745 int load;
3746 int shift;
9ee6e8bb 3747 int n;
1b2b1e54 3748 TCGv addr;
b0109805 3749 TCGv tmp;
8f8e3aa4 3750 TCGv tmp2;
9ee6e8bb
PB
3751
3752 if (!vfp_enabled(env))
3753 return 1;
3754 VFP_DREG_D(rd, insn);
3755 rn = (insn >> 16) & 0xf;
3756 rm = insn & 0xf;
3757 load = (insn & (1 << 21)) != 0;
1b2b1e54 3758 addr = new_tmp();
9ee6e8bb
PB
3759 if ((insn & (1 << 23)) == 0) {
3760 /* Load store all elements. */
3761 op = (insn >> 8) & 0xf;
3762 size = (insn >> 6) & 3;
3763 if (op > 10 || size == 3)
3764 return 1;
3765 nregs = neon_ls_element_type[op].nregs;
3766 interleave = neon_ls_element_type[op].interleave;
1b2b1e54 3767 tcg_gen_mov_i32(addr, cpu_R[rn]);
9ee6e8bb
PB
3768 stride = (1 << size) * interleave;
3769 for (reg = 0; reg < nregs; reg++) {
3770 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
1b2b1e54 3771 tcg_gen_addi_i32(addr, cpu_R[rn], (1 << size) * reg);
9ee6e8bb 3772 } else if (interleave == 2 && nregs == 4 && reg == 2) {
1b2b1e54 3773 tcg_gen_addi_i32(addr, cpu_R[rn], 1 << size);
9ee6e8bb
PB
3774 }
3775 for (pass = 0; pass < 2; pass++) {
3776 if (size == 2) {
3777 if (load) {
1b2b1e54 3778 tmp = gen_ld32(addr, IS_USER(s));
ad69471c 3779 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3780 } else {
ad69471c 3781 tmp = neon_load_reg(rd, pass);
1b2b1e54 3782 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 3783 }
1b2b1e54 3784 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb
PB
3785 } else if (size == 1) {
3786 if (load) {
1b2b1e54
FN
3787 tmp = gen_ld16u(addr, IS_USER(s));
3788 tcg_gen_addi_i32(addr, addr, stride);
3789 tmp2 = gen_ld16u(addr, IS_USER(s));
3790 tcg_gen_addi_i32(addr, addr, stride);
8f8e3aa4
PB
3791 gen_bfi(tmp, tmp, tmp2, 16, 0xffff);
3792 dead_tmp(tmp2);
3793 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3794 } else {
8f8e3aa4
PB
3795 tmp = neon_load_reg(rd, pass);
3796 tmp2 = new_tmp();
3797 tcg_gen_shri_i32(tmp2, tmp, 16);
1b2b1e54
FN
3798 gen_st16(tmp, addr, IS_USER(s));
3799 tcg_gen_addi_i32(addr, addr, stride);
3800 gen_st16(tmp2, addr, IS_USER(s));
3801 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb
PB
3802 }
3803 } else /* size == 0 */ {
3804 if (load) {
a50f5b91 3805 TCGV_UNUSED(tmp2);
9ee6e8bb 3806 for (n = 0; n < 4; n++) {
1b2b1e54
FN
3807 tmp = gen_ld8u(addr, IS_USER(s));
3808 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3809 if (n == 0) {
8f8e3aa4 3810 tmp2 = tmp;
9ee6e8bb 3811 } else {
8f8e3aa4
PB
3812 gen_bfi(tmp2, tmp2, tmp, n * 8, 0xff);
3813 dead_tmp(tmp);
9ee6e8bb 3814 }
9ee6e8bb 3815 }
8f8e3aa4 3816 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 3817 } else {
8f8e3aa4 3818 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 3819 for (n = 0; n < 4; n++) {
8f8e3aa4 3820 tmp = new_tmp();
9ee6e8bb 3821 if (n == 0) {
8f8e3aa4 3822 tcg_gen_mov_i32(tmp, tmp2);
9ee6e8bb 3823 } else {
8f8e3aa4 3824 tcg_gen_shri_i32(tmp, tmp2, n * 8);
9ee6e8bb 3825 }
1b2b1e54
FN
3826 gen_st8(tmp, addr, IS_USER(s));
3827 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3828 }
8f8e3aa4 3829 dead_tmp(tmp2);
9ee6e8bb
PB
3830 }
3831 }
3832 }
3833 rd += neon_ls_element_type[op].spacing;
3834 }
3835 stride = nregs * 8;
3836 } else {
3837 size = (insn >> 10) & 3;
3838 if (size == 3) {
3839 /* Load single element to all lanes. */
3840 if (!load)
3841 return 1;
3842 size = (insn >> 6) & 3;
3843 nregs = ((insn >> 8) & 3) + 1;
3844 stride = (insn & (1 << 5)) ? 2 : 1;
1b2b1e54 3845 tcg_gen_mov_i32(addr, cpu_R[rn]);
9ee6e8bb
PB
3846 for (reg = 0; reg < nregs; reg++) {
3847 switch (size) {
3848 case 0:
1b2b1e54 3849 tmp = gen_ld8u(addr, IS_USER(s));
ad69471c 3850 gen_neon_dup_u8(tmp, 0);
9ee6e8bb
PB
3851 break;
3852 case 1:
1b2b1e54 3853 tmp = gen_ld16u(addr, IS_USER(s));
ad69471c 3854 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
3855 break;
3856 case 2:
1b2b1e54 3857 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
3858 break;
3859 case 3:
3860 return 1;
a50f5b91
PB
3861 default: /* Avoid compiler warnings. */
3862 abort();
99c475ab 3863 }
1b2b1e54 3864 tcg_gen_addi_i32(addr, addr, 1 << size);
ad69471c
PB
3865 tmp2 = new_tmp();
3866 tcg_gen_mov_i32(tmp2, tmp);
3867 neon_store_reg(rd, 0, tmp2);
3018f259 3868 neon_store_reg(rd, 1, tmp);
9ee6e8bb
PB
3869 rd += stride;
3870 }
3871 stride = (1 << size) * nregs;
3872 } else {
3873 /* Single element. */
3874 pass = (insn >> 7) & 1;
3875 switch (size) {
3876 case 0:
3877 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
3878 stride = 1;
3879 break;
3880 case 1:
3881 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
3882 stride = (insn & (1 << 5)) ? 2 : 1;
3883 break;
3884 case 2:
3885 shift = 0;
9ee6e8bb
PB
3886 stride = (insn & (1 << 6)) ? 2 : 1;
3887 break;
3888 default:
3889 abort();
3890 }
3891 nregs = ((insn >> 8) & 3) + 1;
1b2b1e54 3892 tcg_gen_mov_i32(addr, cpu_R[rn]);
9ee6e8bb
PB
3893 for (reg = 0; reg < nregs; reg++) {
3894 if (load) {
9ee6e8bb
PB
3895 switch (size) {
3896 case 0:
1b2b1e54 3897 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb
PB
3898 break;
3899 case 1:
1b2b1e54 3900 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
3901 break;
3902 case 2:
1b2b1e54 3903 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 3904 break;
a50f5b91
PB
3905 default: /* Avoid compiler warnings. */
3906 abort();
9ee6e8bb
PB
3907 }
3908 if (size != 2) {
8f8e3aa4
PB
3909 tmp2 = neon_load_reg(rd, pass);
3910 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3911 dead_tmp(tmp2);
9ee6e8bb 3912 }
8f8e3aa4 3913 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3914 } else { /* Store */
8f8e3aa4
PB
3915 tmp = neon_load_reg(rd, pass);
3916 if (shift)
3917 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
3918 switch (size) {
3919 case 0:
1b2b1e54 3920 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
3921 break;
3922 case 1:
1b2b1e54 3923 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
3924 break;
3925 case 2:
1b2b1e54 3926 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 3927 break;
99c475ab 3928 }
99c475ab 3929 }
9ee6e8bb 3930 rd += stride;
1b2b1e54 3931 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 3932 }
9ee6e8bb 3933 stride = nregs * (1 << size);
99c475ab 3934 }
9ee6e8bb 3935 }
1b2b1e54 3936 dead_tmp(addr);
9ee6e8bb 3937 if (rm != 15) {
b26eefb6
PB
3938 TCGv base;
3939
3940 base = load_reg(s, rn);
9ee6e8bb 3941 if (rm == 13) {
b26eefb6 3942 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 3943 } else {
b26eefb6
PB
3944 TCGv index;
3945 index = load_reg(s, rm);
3946 tcg_gen_add_i32(base, base, index);
3947 dead_tmp(index);
9ee6e8bb 3948 }
b26eefb6 3949 store_reg(s, rn, base);
9ee6e8bb
PB
3950 }
3951 return 0;
3952}
3b46e624 3953
8f8e3aa4
PB
3954/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
3955static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
3956{
3957 tcg_gen_and_i32(t, t, c);
3958 tcg_gen_bic_i32(f, f, c);
3959 tcg_gen_or_i32(dest, t, f);
3960}
3961
a7812ae4 3962static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
3963{
3964 switch (size) {
3965 case 0: gen_helper_neon_narrow_u8(dest, src); break;
3966 case 1: gen_helper_neon_narrow_u16(dest, src); break;
3967 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
3968 default: abort();
3969 }
3970}
3971
a7812ae4 3972static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
3973{
3974 switch (size) {
3975 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
3976 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
3977 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
3978 default: abort();
3979 }
3980}
3981
a7812ae4 3982static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
3983{
3984 switch (size) {
3985 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
3986 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
3987 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
3988 default: abort();
3989 }
3990}
3991
3992static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
3993 int q, int u)
3994{
3995 if (q) {
3996 if (u) {
3997 switch (size) {
3998 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3999 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4000 default: abort();
4001 }
4002 } else {
4003 switch (size) {
4004 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4005 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4006 default: abort();
4007 }
4008 }
4009 } else {
4010 if (u) {
4011 switch (size) {
4012 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4013 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4014 default: abort();
4015 }
4016 } else {
4017 switch (size) {
4018 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4019 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4020 default: abort();
4021 }
4022 }
4023 }
4024}
4025
a7812ae4 4026static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
4027{
4028 if (u) {
4029 switch (size) {
4030 case 0: gen_helper_neon_widen_u8(dest, src); break;
4031 case 1: gen_helper_neon_widen_u16(dest, src); break;
4032 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4033 default: abort();
4034 }
4035 } else {
4036 switch (size) {
4037 case 0: gen_helper_neon_widen_s8(dest, src); break;
4038 case 1: gen_helper_neon_widen_s16(dest, src); break;
4039 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4040 default: abort();
4041 }
4042 }
4043 dead_tmp(src);
4044}
4045
4046static inline void gen_neon_addl(int size)
4047{
4048 switch (size) {
4049 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4050 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4051 case 2: tcg_gen_add_i64(CPU_V001); break;
4052 default: abort();
4053 }
4054}
4055
4056static inline void gen_neon_subl(int size)
4057{
4058 switch (size) {
4059 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4060 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4061 case 2: tcg_gen_sub_i64(CPU_V001); break;
4062 default: abort();
4063 }
4064}
4065
a7812ae4 4066static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4067{
4068 switch (size) {
4069 case 0: gen_helper_neon_negl_u16(var, var); break;
4070 case 1: gen_helper_neon_negl_u32(var, var); break;
4071 case 2: gen_helper_neon_negl_u64(var, var); break;
4072 default: abort();
4073 }
4074}
4075
a7812ae4 4076static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4077{
4078 switch (size) {
4079 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4080 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4081 default: abort();
4082 }
4083}
4084
a7812ae4 4085static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4086{
a7812ae4 4087 TCGv_i64 tmp;
ad69471c
PB
4088
4089 switch ((size << 1) | u) {
4090 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4091 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4092 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4093 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4094 case 4:
4095 tmp = gen_muls_i64_i32(a, b);
4096 tcg_gen_mov_i64(dest, tmp);
4097 break;
4098 case 5:
4099 tmp = gen_mulu_i64_i32(a, b);
4100 tcg_gen_mov_i64(dest, tmp);
4101 break;
4102 default: abort();
4103 }
ad69471c
PB
4104}
4105
9ee6e8bb
PB
4106/* Translate a NEON data processing instruction. Return nonzero if the
4107 instruction is invalid.
ad69471c
PB
4108 We process data in a mixture of 32-bit and 64-bit chunks.
4109 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4110
9ee6e8bb
PB
4111static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4112{
4113 int op;
4114 int q;
4115 int rd, rn, rm;
4116 int size;
4117 int shift;
4118 int pass;
4119 int count;
4120 int pairwise;
4121 int u;
4122 int n;
4123 uint32_t imm;
8f8e3aa4
PB
4124 TCGv tmp;
4125 TCGv tmp2;
4126 TCGv tmp3;
a7812ae4 4127 TCGv_i64 tmp64;
9ee6e8bb
PB
4128
4129 if (!vfp_enabled(env))
4130 return 1;
4131 q = (insn & (1 << 6)) != 0;
4132 u = (insn >> 24) & 1;
4133 VFP_DREG_D(rd, insn);
4134 VFP_DREG_N(rn, insn);
4135 VFP_DREG_M(rm, insn);
4136 size = (insn >> 20) & 3;
4137 if ((insn & (1 << 23)) == 0) {
4138 /* Three register same length. */
4139 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
ad69471c
PB
4140 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4141 || op == 10 || op == 11 || op == 16)) {
4142 /* 64-bit element instructions. */
9ee6e8bb 4143 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4144 neon_load_reg64(cpu_V0, rn + pass);
4145 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb
PB
4146 switch (op) {
4147 case 1: /* VQADD */
4148 if (u) {
ad69471c 4149 gen_helper_neon_add_saturate_u64(CPU_V001);
2c0262af 4150 } else {
ad69471c 4151 gen_helper_neon_add_saturate_s64(CPU_V001);
2c0262af 4152 }
9ee6e8bb
PB
4153 break;
4154 case 5: /* VQSUB */
4155 if (u) {
ad69471c
PB
4156 gen_helper_neon_sub_saturate_u64(CPU_V001);
4157 } else {
4158 gen_helper_neon_sub_saturate_s64(CPU_V001);
4159 }
4160 break;
4161 case 8: /* VSHL */
4162 if (u) {
4163 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4164 } else {
4165 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4166 }
4167 break;
4168 case 9: /* VQSHL */
4169 if (u) {
4170 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4171 cpu_V0, cpu_V0);
4172 } else {
4173 gen_helper_neon_qshl_s64(cpu_V1, cpu_env,
4174 cpu_V1, cpu_V0);
4175 }
4176 break;
4177 case 10: /* VRSHL */
4178 if (u) {
4179 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4180 } else {
ad69471c
PB
4181 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4182 }
4183 break;
4184 case 11: /* VQRSHL */
4185 if (u) {
4186 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4187 cpu_V1, cpu_V0);
4188 } else {
4189 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4190 cpu_V1, cpu_V0);
1e8d4eec 4191 }
9ee6e8bb
PB
4192 break;
4193 case 16:
4194 if (u) {
ad69471c 4195 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4196 } else {
ad69471c 4197 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4198 }
4199 break;
4200 default:
4201 abort();
2c0262af 4202 }
ad69471c 4203 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4204 }
9ee6e8bb 4205 return 0;
2c0262af 4206 }
9ee6e8bb
PB
4207 switch (op) {
4208 case 8: /* VSHL */
4209 case 9: /* VQSHL */
4210 case 10: /* VRSHL */
ad69471c 4211 case 11: /* VQRSHL */
9ee6e8bb 4212 {
ad69471c
PB
4213 int rtmp;
4214 /* Shift instruction operands are reversed. */
4215 rtmp = rn;
9ee6e8bb 4216 rn = rm;
ad69471c 4217 rm = rtmp;
9ee6e8bb
PB
4218 pairwise = 0;
4219 }
2c0262af 4220 break;
9ee6e8bb
PB
4221 case 20: /* VPMAX */
4222 case 21: /* VPMIN */
4223 case 23: /* VPADD */
4224 pairwise = 1;
2c0262af 4225 break;
9ee6e8bb
PB
4226 case 26: /* VPADD (float) */
4227 pairwise = (u && size < 2);
2c0262af 4228 break;
9ee6e8bb
PB
4229 case 30: /* VPMIN/VPMAX (float) */
4230 pairwise = u;
2c0262af 4231 break;
9ee6e8bb
PB
4232 default:
4233 pairwise = 0;
2c0262af 4234 break;
9ee6e8bb 4235 }
dd8fbd78 4236
9ee6e8bb
PB
4237 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4238
4239 if (pairwise) {
4240 /* Pairwise. */
4241 if (q)
4242 n = (pass & 1) * 2;
2c0262af 4243 else
9ee6e8bb
PB
4244 n = 0;
4245 if (pass < q + 1) {
dd8fbd78
FN
4246 tmp = neon_load_reg(rn, n);
4247 tmp2 = neon_load_reg(rn, n + 1);
9ee6e8bb 4248 } else {
dd8fbd78
FN
4249 tmp = neon_load_reg(rm, n);
4250 tmp2 = neon_load_reg(rm, n + 1);
9ee6e8bb
PB
4251 }
4252 } else {
4253 /* Elementwise. */
dd8fbd78
FN
4254 tmp = neon_load_reg(rn, pass);
4255 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4256 }
4257 switch (op) {
4258 case 0: /* VHADD */
4259 GEN_NEON_INTEGER_OP(hadd);
4260 break;
4261 case 1: /* VQADD */
ad69471c 4262 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4263 break;
9ee6e8bb
PB
4264 case 2: /* VRHADD */
4265 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4266 break;
9ee6e8bb
PB
4267 case 3: /* Logic ops. */
4268 switch ((u << 2) | size) {
4269 case 0: /* VAND */
dd8fbd78 4270 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4271 break;
4272 case 1: /* BIC */
dd8fbd78 4273 tcg_gen_bic_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4274 break;
4275 case 2: /* VORR */
dd8fbd78 4276 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4277 break;
4278 case 3: /* VORN */
dd8fbd78
FN
4279 tcg_gen_not_i32(tmp2, tmp2);
4280 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4281 break;
4282 case 4: /* VEOR */
dd8fbd78 4283 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4284 break;
4285 case 5: /* VBSL */
dd8fbd78
FN
4286 tmp3 = neon_load_reg(rd, pass);
4287 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4288 dead_tmp(tmp3);
9ee6e8bb
PB
4289 break;
4290 case 6: /* VBIT */
dd8fbd78
FN
4291 tmp3 = neon_load_reg(rd, pass);
4292 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4293 dead_tmp(tmp3);
9ee6e8bb
PB
4294 break;
4295 case 7: /* VBIF */
dd8fbd78
FN
4296 tmp3 = neon_load_reg(rd, pass);
4297 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4298 dead_tmp(tmp3);
9ee6e8bb 4299 break;
2c0262af
FB
4300 }
4301 break;
9ee6e8bb
PB
4302 case 4: /* VHSUB */
4303 GEN_NEON_INTEGER_OP(hsub);
4304 break;
4305 case 5: /* VQSUB */
ad69471c 4306 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4307 break;
9ee6e8bb
PB
4308 case 6: /* VCGT */
4309 GEN_NEON_INTEGER_OP(cgt);
4310 break;
4311 case 7: /* VCGE */
4312 GEN_NEON_INTEGER_OP(cge);
4313 break;
4314 case 8: /* VSHL */
ad69471c 4315 GEN_NEON_INTEGER_OP(shl);
2c0262af 4316 break;
9ee6e8bb 4317 case 9: /* VQSHL */
ad69471c 4318 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4319 break;
9ee6e8bb 4320 case 10: /* VRSHL */
ad69471c 4321 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4322 break;
9ee6e8bb 4323 case 11: /* VQRSHL */
ad69471c 4324 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb
PB
4325 break;
4326 case 12: /* VMAX */
4327 GEN_NEON_INTEGER_OP(max);
4328 break;
4329 case 13: /* VMIN */
4330 GEN_NEON_INTEGER_OP(min);
4331 break;
4332 case 14: /* VABD */
4333 GEN_NEON_INTEGER_OP(abd);
4334 break;
4335 case 15: /* VABA */
4336 GEN_NEON_INTEGER_OP(abd);
dd8fbd78
FN
4337 dead_tmp(tmp2);
4338 tmp2 = neon_load_reg(rd, pass);
4339 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4340 break;
4341 case 16:
4342 if (!u) { /* VADD */
dd8fbd78 4343 if (gen_neon_add(size, tmp, tmp2))
9ee6e8bb
PB
4344 return 1;
4345 } else { /* VSUB */
4346 switch (size) {
dd8fbd78
FN
4347 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4348 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4349 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4350 default: return 1;
4351 }
4352 }
4353 break;
4354 case 17:
4355 if (!u) { /* VTST */
4356 switch (size) {
dd8fbd78
FN
4357 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4358 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4359 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4360 default: return 1;
4361 }
4362 } else { /* VCEQ */
4363 switch (size) {
dd8fbd78
FN
4364 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4365 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4366 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4367 default: return 1;
4368 }
4369 }
4370 break;
4371 case 18: /* Multiply. */
4372 switch (size) {
dd8fbd78
FN
4373 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4374 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4375 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4376 default: return 1;
4377 }
dd8fbd78
FN
4378 dead_tmp(tmp2);
4379 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4380 if (u) { /* VMLS */
dd8fbd78 4381 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4382 } else { /* VMLA */
dd8fbd78 4383 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4384 }
4385 break;
4386 case 19: /* VMUL */
4387 if (u) { /* polynomial */
dd8fbd78 4388 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4389 } else { /* Integer */
4390 switch (size) {
dd8fbd78
FN
4391 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4392 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4393 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4394 default: return 1;
4395 }
4396 }
4397 break;
4398 case 20: /* VPMAX */
4399 GEN_NEON_INTEGER_OP(pmax);
4400 break;
4401 case 21: /* VPMIN */
4402 GEN_NEON_INTEGER_OP(pmin);
4403 break;
4404 case 22: /* Hultiply high. */
4405 if (!u) { /* VQDMULH */
4406 switch (size) {
dd8fbd78
FN
4407 case 1: gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4408 case 2: gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
9ee6e8bb
PB
4409 default: return 1;
4410 }
4411 } else { /* VQRDHMUL */
4412 switch (size) {
dd8fbd78
FN
4413 case 1: gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4414 case 2: gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
9ee6e8bb
PB
4415 default: return 1;
4416 }
4417 }
4418 break;
4419 case 23: /* VPADD */
4420 if (u)
4421 return 1;
4422 switch (size) {
dd8fbd78
FN
4423 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4424 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4425 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
4426 default: return 1;
4427 }
4428 break;
4429 case 26: /* Floating point arithnetic. */
4430 switch ((u << 2) | size) {
4431 case 0: /* VADD */
dd8fbd78 4432 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4433 break;
4434 case 2: /* VSUB */
dd8fbd78 4435 gen_helper_neon_sub_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4436 break;
4437 case 4: /* VPADD */
dd8fbd78 4438 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4439 break;
4440 case 6: /* VABD */
dd8fbd78 4441 gen_helper_neon_abd_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4442 break;
4443 default:
4444 return 1;
4445 }
4446 break;
4447 case 27: /* Float multiply. */
dd8fbd78 4448 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb 4449 if (!u) {
dd8fbd78
FN
4450 dead_tmp(tmp2);
4451 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4452 if (size == 0) {
dd8fbd78 4453 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb 4454 } else {
dd8fbd78 4455 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
4456 }
4457 }
4458 break;
4459 case 28: /* Float compare. */
4460 if (!u) {
dd8fbd78 4461 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
b5ff1b31 4462 } else {
9ee6e8bb 4463 if (size == 0)
dd8fbd78 4464 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
9ee6e8bb 4465 else
dd8fbd78 4466 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
b5ff1b31 4467 }
2c0262af 4468 break;
9ee6e8bb
PB
4469 case 29: /* Float compare absolute. */
4470 if (!u)
4471 return 1;
4472 if (size == 0)
dd8fbd78 4473 gen_helper_neon_acge_f32(tmp, tmp, tmp2);
9ee6e8bb 4474 else
dd8fbd78 4475 gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
2c0262af 4476 break;
9ee6e8bb
PB
4477 case 30: /* Float min/max. */
4478 if (size == 0)
dd8fbd78 4479 gen_helper_neon_max_f32(tmp, tmp, tmp2);
9ee6e8bb 4480 else
dd8fbd78 4481 gen_helper_neon_min_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4482 break;
4483 case 31:
4484 if (size == 0)
dd8fbd78 4485 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 4486 else
dd8fbd78 4487 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 4488 break;
9ee6e8bb
PB
4489 default:
4490 abort();
2c0262af 4491 }
dd8fbd78
FN
4492 dead_tmp(tmp2);
4493
9ee6e8bb
PB
4494 /* Save the result. For elementwise operations we can put it
4495 straight into the destination register. For pairwise operations
4496 we have to be careful to avoid clobbering the source operands. */
4497 if (pairwise && rd == rm) {
dd8fbd78 4498 neon_store_scratch(pass, tmp);
9ee6e8bb 4499 } else {
dd8fbd78 4500 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4501 }
4502
4503 } /* for pass */
4504 if (pairwise && rd == rm) {
4505 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
4506 tmp = neon_load_scratch(pass);
4507 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4508 }
4509 }
ad69471c 4510 /* End of 3 register same size operations. */
9ee6e8bb
PB
4511 } else if (insn & (1 << 4)) {
4512 if ((insn & 0x00380080) != 0) {
4513 /* Two registers and shift. */
4514 op = (insn >> 8) & 0xf;
4515 if (insn & (1 << 7)) {
4516 /* 64-bit shift. */
4517 size = 3;
4518 } else {
4519 size = 2;
4520 while ((insn & (1 << (size + 19))) == 0)
4521 size--;
4522 }
4523 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4524 /* To avoid excessive dumplication of ops we implement shift
4525 by immediate using the variable shift operations. */
4526 if (op < 8) {
4527 /* Shift by immediate:
4528 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4529 /* Right shifts are encoded as N - shift, where N is the
4530 element size in bits. */
4531 if (op <= 4)
4532 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4533 if (size == 3) {
4534 count = q + 1;
4535 } else {
4536 count = q ? 4: 2;
4537 }
4538 switch (size) {
4539 case 0:
4540 imm = (uint8_t) shift;
4541 imm |= imm << 8;
4542 imm |= imm << 16;
4543 break;
4544 case 1:
4545 imm = (uint16_t) shift;
4546 imm |= imm << 16;
4547 break;
4548 case 2:
4549 case 3:
4550 imm = shift;
4551 break;
4552 default:
4553 abort();
4554 }
4555
4556 for (pass = 0; pass < count; pass++) {
ad69471c
PB
4557 if (size == 3) {
4558 neon_load_reg64(cpu_V0, rm + pass);
4559 tcg_gen_movi_i64(cpu_V1, imm);
4560 switch (op) {
4561 case 0: /* VSHR */
4562 case 1: /* VSRA */
4563 if (u)
4564 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4565 else
ad69471c 4566 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4567 break;
ad69471c
PB
4568 case 2: /* VRSHR */
4569 case 3: /* VRSRA */
4570 if (u)
4571 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4572 else
ad69471c 4573 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4574 break;
ad69471c
PB
4575 case 4: /* VSRI */
4576 if (!u)
4577 return 1;
4578 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4579 break;
4580 case 5: /* VSHL, VSLI */
4581 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4582 break;
4583 case 6: /* VQSHL */
4584 if (u)
4585 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4586 else
ad69471c
PB
4587 gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4588 break;
4589 case 7: /* VQSHLU */
4590 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4591 break;
9ee6e8bb 4592 }
ad69471c
PB
4593 if (op == 1 || op == 3) {
4594 /* Accumulate. */
4595 neon_load_reg64(cpu_V0, rd + pass);
4596 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4597 } else if (op == 4 || (op == 5 && u)) {
4598 /* Insert */
4599 cpu_abort(env, "VS[LR]I.64 not implemented");
4600 }
4601 neon_store_reg64(cpu_V0, rd + pass);
4602 } else { /* size < 3 */
4603 /* Operands in T0 and T1. */
dd8fbd78
FN
4604 tmp = neon_load_reg(rm, pass);
4605 tmp2 = new_tmp();
4606 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
4607 switch (op) {
4608 case 0: /* VSHR */
4609 case 1: /* VSRA */
4610 GEN_NEON_INTEGER_OP(shl);
4611 break;
4612 case 2: /* VRSHR */
4613 case 3: /* VRSRA */
4614 GEN_NEON_INTEGER_OP(rshl);
4615 break;
4616 case 4: /* VSRI */
4617 if (!u)
4618 return 1;
4619 GEN_NEON_INTEGER_OP(shl);
4620 break;
4621 case 5: /* VSHL, VSLI */
4622 switch (size) {
dd8fbd78
FN
4623 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
4624 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
4625 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
ad69471c
PB
4626 default: return 1;
4627 }
4628 break;
4629 case 6: /* VQSHL */
4630 GEN_NEON_INTEGER_OP_ENV(qshl);
4631 break;
4632 case 7: /* VQSHLU */
4633 switch (size) {
dd8fbd78
FN
4634 case 0: gen_helper_neon_qshl_u8(tmp, cpu_env, tmp, tmp2); break;
4635 case 1: gen_helper_neon_qshl_u16(tmp, cpu_env, tmp, tmp2); break;
4636 case 2: gen_helper_neon_qshl_u32(tmp, cpu_env, tmp, tmp2); break;
ad69471c
PB
4637 default: return 1;
4638 }
4639 break;
4640 }
dd8fbd78 4641 dead_tmp(tmp2);
ad69471c
PB
4642
4643 if (op == 1 || op == 3) {
4644 /* Accumulate. */
dd8fbd78
FN
4645 tmp2 = neon_load_reg(rd, pass);
4646 gen_neon_add(size, tmp2, tmp);
4647 dead_tmp(tmp2);
ad69471c
PB
4648 } else if (op == 4 || (op == 5 && u)) {
4649 /* Insert */
4650 switch (size) {
4651 case 0:
4652 if (op == 4)
4653 imm = 0xff >> -shift;
4654 else
4655 imm = (uint8_t)(0xff << shift);
4656 imm |= imm << 8;
4657 imm |= imm << 16;
4658 break;
4659 case 1:
4660 if (op == 4)
4661 imm = 0xffff >> -shift;
4662 else
4663 imm = (uint16_t)(0xffff << shift);
4664 imm |= imm << 16;
4665 break;
4666 case 2:
4667 if (op == 4)
4668 imm = 0xffffffffu >> -shift;
4669 else
4670 imm = 0xffffffffu << shift;
4671 break;
4672 default:
4673 abort();
4674 }
dd8fbd78
FN
4675 tmp2 = neon_load_reg(rd, pass);
4676 tcg_gen_andi_i32(tmp, tmp, imm);
4677 tcg_gen_andi_i32(tmp2, tmp2, ~imm);
4678 tcg_gen_or_i32(tmp, tmp, tmp2);
4679 dead_tmp(tmp2);
ad69471c 4680 }
dd8fbd78 4681 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4682 }
4683 } /* for pass */
4684 } else if (op < 10) {
ad69471c 4685 /* Shift by immediate and narrow:
9ee6e8bb
PB
4686 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4687 shift = shift - (1 << (size + 3));
4688 size++;
9ee6e8bb
PB
4689 switch (size) {
4690 case 1:
ad69471c 4691 imm = (uint16_t)shift;
9ee6e8bb 4692 imm |= imm << 16;
ad69471c 4693 tmp2 = tcg_const_i32(imm);
a7812ae4 4694 TCGV_UNUSED_I64(tmp64);
9ee6e8bb
PB
4695 break;
4696 case 2:
ad69471c
PB
4697 imm = (uint32_t)shift;
4698 tmp2 = tcg_const_i32(imm);
a7812ae4 4699 TCGV_UNUSED_I64(tmp64);
4cc633c3 4700 break;
9ee6e8bb 4701 case 3:
a7812ae4
PB
4702 tmp64 = tcg_const_i64(shift);
4703 TCGV_UNUSED(tmp2);
9ee6e8bb
PB
4704 break;
4705 default:
4706 abort();
4707 }
4708
ad69471c
PB
4709 for (pass = 0; pass < 2; pass++) {
4710 if (size == 3) {
4711 neon_load_reg64(cpu_V0, rm + pass);
4712 if (q) {
4713 if (u)
a7812ae4 4714 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4715 else
a7812ae4 4716 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c
PB
4717 } else {
4718 if (u)
a7812ae4 4719 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4720 else
a7812ae4 4721 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c 4722 }
2c0262af 4723 } else {
ad69471c
PB
4724 tmp = neon_load_reg(rm + pass, 0);
4725 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
36aa55dc
PB
4726 tmp3 = neon_load_reg(rm + pass, 1);
4727 gen_neon_shift_narrow(size, tmp3, tmp2, q, u);
4728 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
ad69471c 4729 dead_tmp(tmp);
36aa55dc 4730 dead_tmp(tmp3);
9ee6e8bb 4731 }
ad69471c
PB
4732 tmp = new_tmp();
4733 if (op == 8 && !u) {
4734 gen_neon_narrow(size - 1, tmp, cpu_V0);
9ee6e8bb 4735 } else {
ad69471c
PB
4736 if (op == 8)
4737 gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
9ee6e8bb 4738 else
ad69471c
PB
4739 gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
4740 }
4741 if (pass == 0) {
4742 tmp2 = tmp;
4743 } else {
4744 neon_store_reg(rd, 0, tmp2);
4745 neon_store_reg(rd, 1, tmp);
9ee6e8bb
PB
4746 }
4747 } /* for pass */
4748 } else if (op == 10) {
4749 /* VSHLL */
ad69471c 4750 if (q || size == 3)
9ee6e8bb 4751 return 1;
ad69471c
PB
4752 tmp = neon_load_reg(rm, 0);
4753 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 4754 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4755 if (pass == 1)
4756 tmp = tmp2;
4757
4758 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 4759
9ee6e8bb
PB
4760 if (shift != 0) {
4761 /* The shift is less than the width of the source
ad69471c
PB
4762 type, so we can just shift the whole register. */
4763 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4764 if (size < 2 || !u) {
4765 uint64_t imm64;
4766 if (size == 0) {
4767 imm = (0xffu >> (8 - shift));
4768 imm |= imm << 16;
4769 } else {
4770 imm = 0xffff >> (16 - shift);
9ee6e8bb 4771 }
ad69471c
PB
4772 imm64 = imm | (((uint64_t)imm) << 32);
4773 tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
9ee6e8bb
PB
4774 }
4775 }
ad69471c 4776 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4777 }
4778 } else if (op == 15 || op == 16) {
4779 /* VCVT fixed-point. */
4780 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 4781 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
9ee6e8bb
PB
4782 if (op & 1) {
4783 if (u)
4373f3ce 4784 gen_vfp_ulto(0, shift);
9ee6e8bb 4785 else
4373f3ce 4786 gen_vfp_slto(0, shift);
9ee6e8bb
PB
4787 } else {
4788 if (u)
4373f3ce 4789 gen_vfp_toul(0, shift);
9ee6e8bb 4790 else
4373f3ce 4791 gen_vfp_tosl(0, shift);
2c0262af 4792 }
4373f3ce 4793 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
4794 }
4795 } else {
9ee6e8bb
PB
4796 return 1;
4797 }
4798 } else { /* (insn & 0x00380080) == 0 */
4799 int invert;
4800
4801 op = (insn >> 8) & 0xf;
4802 /* One register and immediate. */
4803 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4804 invert = (insn & (1 << 5)) != 0;
4805 switch (op) {
4806 case 0: case 1:
4807 /* no-op */
4808 break;
4809 case 2: case 3:
4810 imm <<= 8;
4811 break;
4812 case 4: case 5:
4813 imm <<= 16;
4814 break;
4815 case 6: case 7:
4816 imm <<= 24;
4817 break;
4818 case 8: case 9:
4819 imm |= imm << 16;
4820 break;
4821 case 10: case 11:
4822 imm = (imm << 8) | (imm << 24);
4823 break;
4824 case 12:
4825 imm = (imm < 8) | 0xff;
4826 break;
4827 case 13:
4828 imm = (imm << 16) | 0xffff;
4829 break;
4830 case 14:
4831 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4832 if (invert)
4833 imm = ~imm;
4834 break;
4835 case 15:
4836 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4837 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4838 break;
4839 }
4840 if (invert)
4841 imm = ~imm;
4842
9ee6e8bb
PB
4843 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4844 if (op & 1 && op < 12) {
ad69471c 4845 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
4846 if (invert) {
4847 /* The immediate value has already been inverted, so
4848 BIC becomes AND. */
ad69471c 4849 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 4850 } else {
ad69471c 4851 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 4852 }
9ee6e8bb 4853 } else {
ad69471c
PB
4854 /* VMOV, VMVN. */
4855 tmp = new_tmp();
9ee6e8bb 4856 if (op == 14 && invert) {
ad69471c
PB
4857 uint32_t val;
4858 val = 0;
9ee6e8bb
PB
4859 for (n = 0; n < 4; n++) {
4860 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 4861 val |= 0xff << (n * 8);
9ee6e8bb 4862 }
ad69471c
PB
4863 tcg_gen_movi_i32(tmp, val);
4864 } else {
4865 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 4866 }
9ee6e8bb 4867 }
ad69471c 4868 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4869 }
4870 }
e4b3861d 4871 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
4872 if (size != 3) {
4873 op = (insn >> 8) & 0xf;
4874 if ((insn & (1 << 6)) == 0) {
4875 /* Three registers of different lengths. */
4876 int src1_wide;
4877 int src2_wide;
4878 int prewiden;
4879 /* prewiden, src1_wide, src2_wide */
4880 static const int neon_3reg_wide[16][3] = {
4881 {1, 0, 0}, /* VADDL */
4882 {1, 1, 0}, /* VADDW */
4883 {1, 0, 0}, /* VSUBL */
4884 {1, 1, 0}, /* VSUBW */
4885 {0, 1, 1}, /* VADDHN */
4886 {0, 0, 0}, /* VABAL */
4887 {0, 1, 1}, /* VSUBHN */
4888 {0, 0, 0}, /* VABDL */
4889 {0, 0, 0}, /* VMLAL */
4890 {0, 0, 0}, /* VQDMLAL */
4891 {0, 0, 0}, /* VMLSL */
4892 {0, 0, 0}, /* VQDMLSL */
4893 {0, 0, 0}, /* Integer VMULL */
4894 {0, 0, 0}, /* VQDMULL */
4895 {0, 0, 0} /* Polynomial VMULL */
4896 };
4897
4898 prewiden = neon_3reg_wide[op][0];
4899 src1_wide = neon_3reg_wide[op][1];
4900 src2_wide = neon_3reg_wide[op][2];
4901
ad69471c
PB
4902 if (size == 0 && (op == 9 || op == 11 || op == 13))
4903 return 1;
4904
9ee6e8bb
PB
4905 /* Avoid overlapping operands. Wide source operands are
4906 always aligned so will never overlap with wide
4907 destinations in problematic ways. */
8f8e3aa4 4908 if (rd == rm && !src2_wide) {
dd8fbd78
FN
4909 tmp = neon_load_reg(rm, 1);
4910 neon_store_scratch(2, tmp);
8f8e3aa4 4911 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
4912 tmp = neon_load_reg(rn, 1);
4913 neon_store_scratch(2, tmp);
9ee6e8bb 4914 }
a50f5b91 4915 TCGV_UNUSED(tmp3);
9ee6e8bb 4916 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4917 if (src1_wide) {
4918 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 4919 TCGV_UNUSED(tmp);
9ee6e8bb 4920 } else {
ad69471c 4921 if (pass == 1 && rd == rn) {
dd8fbd78 4922 tmp = neon_load_scratch(2);
9ee6e8bb 4923 } else {
ad69471c
PB
4924 tmp = neon_load_reg(rn, pass);
4925 }
4926 if (prewiden) {
4927 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
4928 }
4929 }
ad69471c
PB
4930 if (src2_wide) {
4931 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 4932 TCGV_UNUSED(tmp2);
9ee6e8bb 4933 } else {
ad69471c 4934 if (pass == 1 && rd == rm) {
dd8fbd78 4935 tmp2 = neon_load_scratch(2);
9ee6e8bb 4936 } else {
ad69471c
PB
4937 tmp2 = neon_load_reg(rm, pass);
4938 }
4939 if (prewiden) {
4940 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 4941 }
9ee6e8bb
PB
4942 }
4943 switch (op) {
4944 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 4945 gen_neon_addl(size);
9ee6e8bb
PB
4946 break;
4947 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
ad69471c 4948 gen_neon_subl(size);
9ee6e8bb
PB
4949 break;
4950 case 5: case 7: /* VABAL, VABDL */
4951 switch ((size << 1) | u) {
ad69471c
PB
4952 case 0:
4953 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
4954 break;
4955 case 1:
4956 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
4957 break;
4958 case 2:
4959 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
4960 break;
4961 case 3:
4962 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
4963 break;
4964 case 4:
4965 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
4966 break;
4967 case 5:
4968 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
4969 break;
9ee6e8bb
PB
4970 default: abort();
4971 }
ad69471c
PB
4972 dead_tmp(tmp2);
4973 dead_tmp(tmp);
9ee6e8bb
PB
4974 break;
4975 case 8: case 9: case 10: case 11: case 12: case 13:
4976 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 4977 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
dd8fbd78
FN
4978 dead_tmp(tmp2);
4979 dead_tmp(tmp);
9ee6e8bb
PB
4980 break;
4981 case 14: /* Polynomial VMULL */
4982 cpu_abort(env, "Polynomial VMULL not implemented");
4983
4984 default: /* 15 is RESERVED. */
4985 return 1;
4986 }
4987 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
4988 /* Accumulate. */
4989 if (op == 10 || op == 11) {
ad69471c 4990 gen_neon_negl(cpu_V0, size);
9ee6e8bb
PB
4991 }
4992
9ee6e8bb 4993 if (op != 13) {
ad69471c 4994 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb
PB
4995 }
4996
4997 switch (op) {
4998 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
ad69471c 4999 gen_neon_addl(size);
9ee6e8bb
PB
5000 break;
5001 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c
PB
5002 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5003 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5004 break;
9ee6e8bb
PB
5005 /* Fall through. */
5006 case 13: /* VQDMULL */
ad69471c 5007 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5008 break;
5009 default:
5010 abort();
5011 }
ad69471c 5012 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5013 } else if (op == 4 || op == 6) {
5014 /* Narrowing operation. */
ad69471c 5015 tmp = new_tmp();
9ee6e8bb
PB
5016 if (u) {
5017 switch (size) {
ad69471c
PB
5018 case 0:
5019 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5020 break;
5021 case 1:
5022 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5023 break;
5024 case 2:
5025 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5026 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5027 break;
9ee6e8bb
PB
5028 default: abort();
5029 }
5030 } else {
5031 switch (size) {
ad69471c
PB
5032 case 0:
5033 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5034 break;
5035 case 1:
5036 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5037 break;
5038 case 2:
5039 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5040 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5041 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5042 break;
9ee6e8bb
PB
5043 default: abort();
5044 }
5045 }
ad69471c
PB
5046 if (pass == 0) {
5047 tmp3 = tmp;
5048 } else {
5049 neon_store_reg(rd, 0, tmp3);
5050 neon_store_reg(rd, 1, tmp);
5051 }
9ee6e8bb
PB
5052 } else {
5053 /* Write back the result. */
ad69471c 5054 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5055 }
5056 }
5057 } else {
5058 /* Two registers and a scalar. */
5059 switch (op) {
5060 case 0: /* Integer VMLA scalar */
5061 case 1: /* Float VMLA scalar */
5062 case 4: /* Integer VMLS scalar */
5063 case 5: /* Floating point VMLS scalar */
5064 case 8: /* Integer VMUL scalar */
5065 case 9: /* Floating point VMUL scalar */
5066 case 12: /* VQDMULH scalar */
5067 case 13: /* VQRDMULH scalar */
dd8fbd78
FN
5068 tmp = neon_get_scalar(size, rm);
5069 neon_store_scratch(0, tmp);
9ee6e8bb 5070 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5071 tmp = neon_load_scratch(0);
5072 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5073 if (op == 12) {
5074 if (size == 1) {
dd8fbd78 5075 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5076 } else {
dd8fbd78 5077 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5078 }
5079 } else if (op == 13) {
5080 if (size == 1) {
dd8fbd78 5081 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5082 } else {
dd8fbd78 5083 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5084 }
5085 } else if (op & 1) {
dd8fbd78 5086 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5087 } else {
5088 switch (size) {
dd8fbd78
FN
5089 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5090 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5091 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5092 default: return 1;
5093 }
5094 }
dd8fbd78 5095 dead_tmp(tmp2);
9ee6e8bb
PB
5096 if (op < 8) {
5097 /* Accumulate. */
dd8fbd78 5098 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5099 switch (op) {
5100 case 0:
dd8fbd78 5101 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5102 break;
5103 case 1:
dd8fbd78 5104 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5105 break;
5106 case 4:
dd8fbd78 5107 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5108 break;
5109 case 5:
dd8fbd78 5110 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
5111 break;
5112 default:
5113 abort();
5114 }
dd8fbd78 5115 dead_tmp(tmp2);
9ee6e8bb 5116 }
dd8fbd78 5117 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5118 }
5119 break;
5120 case 2: /* VMLAL sclar */
5121 case 3: /* VQDMLAL scalar */
5122 case 6: /* VMLSL scalar */
5123 case 7: /* VQDMLSL scalar */
5124 case 10: /* VMULL scalar */
5125 case 11: /* VQDMULL scalar */
ad69471c
PB
5126 if (size == 0 && (op == 3 || op == 7 || op == 11))
5127 return 1;
5128
dd8fbd78
FN
5129 tmp2 = neon_get_scalar(size, rm);
5130 tmp3 = neon_load_reg(rn, 1);
ad69471c 5131
9ee6e8bb 5132 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5133 if (pass == 0) {
5134 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5135 } else {
dd8fbd78 5136 tmp = tmp3;
9ee6e8bb 5137 }
ad69471c 5138 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
dd8fbd78 5139 dead_tmp(tmp);
9ee6e8bb 5140 if (op == 6 || op == 7) {
ad69471c
PB
5141 gen_neon_negl(cpu_V0, size);
5142 }
5143 if (op != 11) {
5144 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5145 }
9ee6e8bb
PB
5146 switch (op) {
5147 case 2: case 6:
ad69471c 5148 gen_neon_addl(size);
9ee6e8bb
PB
5149 break;
5150 case 3: case 7:
ad69471c
PB
5151 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5152 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5153 break;
5154 case 10:
5155 /* no-op */
5156 break;
5157 case 11:
ad69471c 5158 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5159 break;
5160 default:
5161 abort();
5162 }
ad69471c 5163 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5164 }
dd8fbd78
FN
5165
5166 dead_tmp(tmp2);
5167
9ee6e8bb
PB
5168 break;
5169 default: /* 14 and 15 are RESERVED */
5170 return 1;
5171 }
5172 }
5173 } else { /* size == 3 */
5174 if (!u) {
5175 /* Extract. */
9ee6e8bb 5176 imm = (insn >> 8) & 0xf;
ad69471c
PB
5177 count = q + 1;
5178
5179 if (imm > 7 && !q)
5180 return 1;
5181
5182 if (imm == 0) {
5183 neon_load_reg64(cpu_V0, rn);
5184 if (q) {
5185 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5186 }
ad69471c
PB
5187 } else if (imm == 8) {
5188 neon_load_reg64(cpu_V0, rn + 1);
5189 if (q) {
5190 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5191 }
ad69471c 5192 } else if (q) {
a7812ae4 5193 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5194 if (imm < 8) {
5195 neon_load_reg64(cpu_V0, rn);
a7812ae4 5196 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5197 } else {
5198 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5199 neon_load_reg64(tmp64, rm);
ad69471c
PB
5200 }
5201 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5202 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5203 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5204 if (imm < 8) {
5205 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5206 } else {
ad69471c
PB
5207 neon_load_reg64(cpu_V1, rm + 1);
5208 imm -= 8;
9ee6e8bb 5209 }
ad69471c 5210 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5211 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5212 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
ad69471c 5213 } else {
a7812ae4 5214 /* BUGFIX */
ad69471c 5215 neon_load_reg64(cpu_V0, rn);
a7812ae4 5216 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5217 neon_load_reg64(cpu_V1, rm);
a7812ae4 5218 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5219 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5220 }
5221 neon_store_reg64(cpu_V0, rd);
5222 if (q) {
5223 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5224 }
5225 } else if ((insn & (1 << 11)) == 0) {
5226 /* Two register misc. */
5227 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5228 size = (insn >> 18) & 3;
5229 switch (op) {
5230 case 0: /* VREV64 */
5231 if (size == 3)
5232 return 1;
5233 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5234 tmp = neon_load_reg(rm, pass * 2);
5235 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5236 switch (size) {
dd8fbd78
FN
5237 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5238 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5239 case 2: /* no-op */ break;
5240 default: abort();
5241 }
dd8fbd78 5242 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5243 if (size == 2) {
dd8fbd78 5244 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5245 } else {
9ee6e8bb 5246 switch (size) {
dd8fbd78
FN
5247 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5248 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5249 default: abort();
5250 }
dd8fbd78 5251 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5252 }
5253 }
5254 break;
5255 case 4: case 5: /* VPADDL */
5256 case 12: case 13: /* VPADAL */
9ee6e8bb
PB
5257 if (size == 3)
5258 return 1;
ad69471c
PB
5259 for (pass = 0; pass < q + 1; pass++) {
5260 tmp = neon_load_reg(rm, pass * 2);
5261 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5262 tmp = neon_load_reg(rm, pass * 2 + 1);
5263 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5264 switch (size) {
5265 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5266 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5267 case 2: tcg_gen_add_i64(CPU_V001); break;
5268 default: abort();
5269 }
9ee6e8bb
PB
5270 if (op >= 12) {
5271 /* Accumulate. */
ad69471c
PB
5272 neon_load_reg64(cpu_V1, rd + pass);
5273 gen_neon_addl(size);
9ee6e8bb 5274 }
ad69471c 5275 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5276 }
5277 break;
5278 case 33: /* VTRN */
5279 if (size == 2) {
5280 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
5281 tmp = neon_load_reg(rm, n);
5282 tmp2 = neon_load_reg(rd, n + 1);
5283 neon_store_reg(rm, n, tmp2);
5284 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
5285 }
5286 } else {
5287 goto elementwise;
5288 }
5289 break;
5290 case 34: /* VUZP */
5291 /* Reg Before After
5292 Rd A3 A2 A1 A0 B2 B0 A2 A0
5293 Rm B3 B2 B1 B0 B3 B1 A3 A1
5294 */
5295 if (size == 3)
5296 return 1;
5297 gen_neon_unzip(rd, q, 0, size);
5298 gen_neon_unzip(rm, q, 4, size);
5299 if (q) {
5300 static int unzip_order_q[8] =
5301 {0, 2, 4, 6, 1, 3, 5, 7};
5302 for (n = 0; n < 8; n++) {
5303 int reg = (n < 4) ? rd : rm;
dd8fbd78
FN
5304 tmp = neon_load_scratch(unzip_order_q[n]);
5305 neon_store_reg(reg, n % 4, tmp);
9ee6e8bb
PB
5306 }
5307 } else {
5308 static int unzip_order[4] =
5309 {0, 4, 1, 5};
5310 for (n = 0; n < 4; n++) {
5311 int reg = (n < 2) ? rd : rm;
dd8fbd78
FN
5312 tmp = neon_load_scratch(unzip_order[n]);
5313 neon_store_reg(reg, n % 2, tmp);
9ee6e8bb
PB
5314 }
5315 }
5316 break;
5317 case 35: /* VZIP */
5318 /* Reg Before After
5319 Rd A3 A2 A1 A0 B1 A1 B0 A0
5320 Rm B3 B2 B1 B0 B3 A3 B2 A2
5321 */
5322 if (size == 3)
5323 return 1;
5324 count = (q ? 4 : 2);
5325 for (n = 0; n < count; n++) {
dd8fbd78
FN
5326 tmp = neon_load_reg(rd, n);
5327 tmp2 = neon_load_reg(rd, n);
9ee6e8bb 5328 switch (size) {
dd8fbd78
FN
5329 case 0: gen_neon_zip_u8(tmp, tmp2); break;
5330 case 1: gen_neon_zip_u16(tmp, tmp2); break;
9ee6e8bb
PB
5331 case 2: /* no-op */; break;
5332 default: abort();
5333 }
dd8fbd78
FN
5334 neon_store_scratch(n * 2, tmp);
5335 neon_store_scratch(n * 2 + 1, tmp2);
9ee6e8bb
PB
5336 }
5337 for (n = 0; n < count * 2; n++) {
5338 int reg = (n < count) ? rd : rm;
dd8fbd78
FN
5339 tmp = neon_load_scratch(n);
5340 neon_store_reg(reg, n % count, tmp);
9ee6e8bb
PB
5341 }
5342 break;
5343 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
ad69471c
PB
5344 if (size == 3)
5345 return 1;
a50f5b91 5346 TCGV_UNUSED(tmp2);
9ee6e8bb 5347 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5348 neon_load_reg64(cpu_V0, rm + pass);
5349 tmp = new_tmp();
9ee6e8bb 5350 if (op == 36 && q == 0) {
ad69471c 5351 gen_neon_narrow(size, tmp, cpu_V0);
9ee6e8bb 5352 } else if (q) {
ad69471c 5353 gen_neon_narrow_satu(size, tmp, cpu_V0);
9ee6e8bb 5354 } else {
ad69471c
PB
5355 gen_neon_narrow_sats(size, tmp, cpu_V0);
5356 }
5357 if (pass == 0) {
5358 tmp2 = tmp;
5359 } else {
5360 neon_store_reg(rd, 0, tmp2);
5361 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5362 }
9ee6e8bb
PB
5363 }
5364 break;
5365 case 38: /* VSHLL */
ad69471c 5366 if (q || size == 3)
9ee6e8bb 5367 return 1;
ad69471c
PB
5368 tmp = neon_load_reg(rm, 0);
5369 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5370 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5371 if (pass == 1)
5372 tmp = tmp2;
5373 gen_neon_widen(cpu_V0, tmp, size, 1);
5374 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5375 }
5376 break;
5377 default:
5378 elementwise:
5379 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5380 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5381 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5382 neon_reg_offset(rm, pass));
dd8fbd78 5383 TCGV_UNUSED(tmp);
9ee6e8bb 5384 } else {
dd8fbd78 5385 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
5386 }
5387 switch (op) {
5388 case 1: /* VREV32 */
5389 switch (size) {
dd8fbd78
FN
5390 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5391 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5392 default: return 1;
5393 }
5394 break;
5395 case 2: /* VREV16 */
5396 if (size != 0)
5397 return 1;
dd8fbd78 5398 gen_rev16(tmp);
9ee6e8bb 5399 break;
9ee6e8bb
PB
5400 case 8: /* CLS */
5401 switch (size) {
dd8fbd78
FN
5402 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5403 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5404 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
9ee6e8bb
PB
5405 default: return 1;
5406 }
5407 break;
5408 case 9: /* CLZ */
5409 switch (size) {
dd8fbd78
FN
5410 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5411 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5412 case 2: gen_helper_clz(tmp, tmp); break;
9ee6e8bb
PB
5413 default: return 1;
5414 }
5415 break;
5416 case 10: /* CNT */
5417 if (size != 0)
5418 return 1;
dd8fbd78 5419 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb
PB
5420 break;
5421 case 11: /* VNOT */
5422 if (size != 0)
5423 return 1;
dd8fbd78 5424 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5425 break;
5426 case 14: /* VQABS */
5427 switch (size) {
dd8fbd78
FN
5428 case 0: gen_helper_neon_qabs_s8(tmp, cpu_env, tmp); break;
5429 case 1: gen_helper_neon_qabs_s16(tmp, cpu_env, tmp); break;
5430 case 2: gen_helper_neon_qabs_s32(tmp, cpu_env, tmp); break;
9ee6e8bb
PB
5431 default: return 1;
5432 }
5433 break;
5434 case 15: /* VQNEG */
5435 switch (size) {
dd8fbd78
FN
5436 case 0: gen_helper_neon_qneg_s8(tmp, cpu_env, tmp); break;
5437 case 1: gen_helper_neon_qneg_s16(tmp, cpu_env, tmp); break;
5438 case 2: gen_helper_neon_qneg_s32(tmp, cpu_env, tmp); break;
9ee6e8bb
PB
5439 default: return 1;
5440 }
5441 break;
5442 case 16: case 19: /* VCGT #0, VCLE #0 */
dd8fbd78 5443 tmp2 = tcg_const_i32(0);
9ee6e8bb 5444 switch(size) {
dd8fbd78
FN
5445 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5446 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5447 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5448 default: return 1;
5449 }
dd8fbd78 5450 tcg_temp_free(tmp2);
9ee6e8bb 5451 if (op == 19)
dd8fbd78 5452 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5453 break;
5454 case 17: case 20: /* VCGE #0, VCLT #0 */
dd8fbd78 5455 tmp2 = tcg_const_i32(0);
9ee6e8bb 5456 switch(size) {
dd8fbd78
FN
5457 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5458 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5459 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5460 default: return 1;
5461 }
dd8fbd78 5462 tcg_temp_free(tmp2);
9ee6e8bb 5463 if (op == 20)
dd8fbd78 5464 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5465 break;
5466 case 18: /* VCEQ #0 */
dd8fbd78 5467 tmp2 = tcg_const_i32(0);
9ee6e8bb 5468 switch(size) {
dd8fbd78
FN
5469 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5470 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5471 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5472 default: return 1;
5473 }
dd8fbd78 5474 tcg_temp_free(tmp2);
9ee6e8bb
PB
5475 break;
5476 case 22: /* VABS */
5477 switch(size) {
dd8fbd78
FN
5478 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
5479 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
5480 case 2: tcg_gen_abs_i32(tmp, tmp); break;
9ee6e8bb
PB
5481 default: return 1;
5482 }
5483 break;
5484 case 23: /* VNEG */
ad69471c
PB
5485 if (size == 3)
5486 return 1;
dd8fbd78
FN
5487 tmp2 = tcg_const_i32(0);
5488 gen_neon_rsb(size, tmp, tmp2);
5489 tcg_temp_free(tmp2);
9ee6e8bb
PB
5490 break;
5491 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
dd8fbd78
FN
5492 tmp2 = tcg_const_i32(0);
5493 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
5494 tcg_temp_free(tmp2);
9ee6e8bb 5495 if (op == 27)
dd8fbd78 5496 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5497 break;
5498 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
dd8fbd78
FN
5499 tmp2 = tcg_const_i32(0);
5500 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
5501 tcg_temp_free(tmp2);
9ee6e8bb 5502 if (op == 28)
dd8fbd78 5503 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5504 break;
5505 case 26: /* Float VCEQ #0 */
dd8fbd78
FN
5506 tmp2 = tcg_const_i32(0);
5507 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
5508 tcg_temp_free(tmp2);
9ee6e8bb
PB
5509 break;
5510 case 30: /* Float VABS */
4373f3ce 5511 gen_vfp_abs(0);
9ee6e8bb
PB
5512 break;
5513 case 31: /* Float VNEG */
4373f3ce 5514 gen_vfp_neg(0);
9ee6e8bb
PB
5515 break;
5516 case 32: /* VSWP */
dd8fbd78
FN
5517 tmp2 = neon_load_reg(rd, pass);
5518 neon_store_reg(rm, pass, tmp2);
9ee6e8bb
PB
5519 break;
5520 case 33: /* VTRN */
dd8fbd78 5521 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5522 switch (size) {
dd8fbd78
FN
5523 case 0: gen_neon_trn_u8(tmp, tmp2); break;
5524 case 1: gen_neon_trn_u16(tmp, tmp2); break;
9ee6e8bb
PB
5525 case 2: abort();
5526 default: return 1;
5527 }
dd8fbd78 5528 neon_store_reg(rm, pass, tmp2);
9ee6e8bb
PB
5529 break;
5530 case 56: /* Integer VRECPE */
dd8fbd78 5531 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb
PB
5532 break;
5533 case 57: /* Integer VRSQRTE */
dd8fbd78 5534 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb
PB
5535 break;
5536 case 58: /* Float VRECPE */
4373f3ce 5537 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5538 break;
5539 case 59: /* Float VRSQRTE */
4373f3ce 5540 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5541 break;
5542 case 60: /* VCVT.F32.S32 */
4373f3ce 5543 gen_vfp_tosiz(0);
9ee6e8bb
PB
5544 break;
5545 case 61: /* VCVT.F32.U32 */
4373f3ce 5546 gen_vfp_touiz(0);
9ee6e8bb
PB
5547 break;
5548 case 62: /* VCVT.S32.F32 */
4373f3ce 5549 gen_vfp_sito(0);
9ee6e8bb
PB
5550 break;
5551 case 63: /* VCVT.U32.F32 */
4373f3ce 5552 gen_vfp_uito(0);
9ee6e8bb
PB
5553 break;
5554 default:
5555 /* Reserved: 21, 29, 39-56 */
5556 return 1;
5557 }
5558 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5559 tcg_gen_st_f32(cpu_F0s, cpu_env,
5560 neon_reg_offset(rd, pass));
9ee6e8bb 5561 } else {
dd8fbd78 5562 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5563 }
5564 }
5565 break;
5566 }
5567 } else if ((insn & (1 << 10)) == 0) {
5568 /* VTBL, VTBX. */
3018f259 5569 n = ((insn >> 5) & 0x18) + 8;
9ee6e8bb 5570 if (insn & (1 << 6)) {
8f8e3aa4 5571 tmp = neon_load_reg(rd, 0);
9ee6e8bb 5572 } else {
8f8e3aa4
PB
5573 tmp = new_tmp();
5574 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5575 }
8f8e3aa4
PB
5576 tmp2 = neon_load_reg(rm, 0);
5577 gen_helper_neon_tbl(tmp2, tmp2, tmp, tcg_const_i32(rn),
5578 tcg_const_i32(n));
3018f259 5579 dead_tmp(tmp);
9ee6e8bb 5580 if (insn & (1 << 6)) {
8f8e3aa4 5581 tmp = neon_load_reg(rd, 1);
9ee6e8bb 5582 } else {
8f8e3aa4
PB
5583 tmp = new_tmp();
5584 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5585 }
8f8e3aa4
PB
5586 tmp3 = neon_load_reg(rm, 1);
5587 gen_helper_neon_tbl(tmp3, tmp3, tmp, tcg_const_i32(rn),
5588 tcg_const_i32(n));
5589 neon_store_reg(rd, 0, tmp2);
3018f259
PB
5590 neon_store_reg(rd, 1, tmp3);
5591 dead_tmp(tmp);
9ee6e8bb
PB
5592 } else if ((insn & 0x380) == 0) {
5593 /* VDUP */
5594 if (insn & (1 << 19)) {
dd8fbd78 5595 tmp = neon_load_reg(rm, 1);
9ee6e8bb 5596 } else {
dd8fbd78 5597 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
5598 }
5599 if (insn & (1 << 16)) {
dd8fbd78 5600 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
5601 } else if (insn & (1 << 17)) {
5602 if ((insn >> 18) & 1)
dd8fbd78 5603 gen_neon_dup_high16(tmp);
9ee6e8bb 5604 else
dd8fbd78 5605 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
5606 }
5607 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5608 tmp2 = new_tmp();
5609 tcg_gen_mov_i32(tmp2, tmp);
5610 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 5611 }
dd8fbd78 5612 dead_tmp(tmp);
9ee6e8bb
PB
5613 } else {
5614 return 1;
5615 }
5616 }
5617 }
5618 return 0;
5619}
5620
fe1479c3
PB
5621static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5622{
5623 int crn = (insn >> 16) & 0xf;
5624 int crm = insn & 0xf;
5625 int op1 = (insn >> 21) & 7;
5626 int op2 = (insn >> 5) & 7;
5627 int rt = (insn >> 12) & 0xf;
5628 TCGv tmp;
5629
5630 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5631 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5632 /* TEECR */
5633 if (IS_USER(s))
5634 return 1;
5635 tmp = load_cpu_field(teecr);
5636 store_reg(s, rt, tmp);
5637 return 0;
5638 }
5639 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5640 /* TEEHBR */
5641 if (IS_USER(s) && (env->teecr & 1))
5642 return 1;
5643 tmp = load_cpu_field(teehbr);
5644 store_reg(s, rt, tmp);
5645 return 0;
5646 }
5647 }
5648 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5649 op1, crn, crm, op2);
5650 return 1;
5651}
5652
5653static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5654{
5655 int crn = (insn >> 16) & 0xf;
5656 int crm = insn & 0xf;
5657 int op1 = (insn >> 21) & 7;
5658 int op2 = (insn >> 5) & 7;
5659 int rt = (insn >> 12) & 0xf;
5660 TCGv tmp;
5661
5662 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5663 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5664 /* TEECR */
5665 if (IS_USER(s))
5666 return 1;
5667 tmp = load_reg(s, rt);
5668 gen_helper_set_teecr(cpu_env, tmp);
5669 dead_tmp(tmp);
5670 return 0;
5671 }
5672 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5673 /* TEEHBR */
5674 if (IS_USER(s) && (env->teecr & 1))
5675 return 1;
5676 tmp = load_reg(s, rt);
5677 store_cpu_field(tmp, teehbr);
5678 return 0;
5679 }
5680 }
5681 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5682 op1, crn, crm, op2);
5683 return 1;
5684}
5685
9ee6e8bb
PB
5686static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5687{
5688 int cpnum;
5689
5690 cpnum = (insn >> 8) & 0xf;
5691 if (arm_feature(env, ARM_FEATURE_XSCALE)
5692 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5693 return 1;
5694
5695 switch (cpnum) {
5696 case 0:
5697 case 1:
5698 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5699 return disas_iwmmxt_insn(env, s, insn);
5700 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5701 return disas_dsp_insn(env, s, insn);
5702 }
5703 return 1;
5704 case 10:
5705 case 11:
5706 return disas_vfp_insn (env, s, insn);
fe1479c3
PB
5707 case 14:
5708 /* Coprocessors 7-15 are architecturally reserved by ARM.
5709 Unfortunately Intel decided to ignore this. */
5710 if (arm_feature(env, ARM_FEATURE_XSCALE))
5711 goto board;
5712 if (insn & (1 << 20))
5713 return disas_cp14_read(env, s, insn);
5714 else
5715 return disas_cp14_write(env, s, insn);
9ee6e8bb
PB
5716 case 15:
5717 return disas_cp15_insn (env, s, insn);
5718 default:
fe1479c3 5719 board:
9ee6e8bb
PB
5720 /* Unknown coprocessor. See if the board has hooked it. */
5721 return disas_cp_insn (env, s, insn);
5722 }
5723}
5724
5e3f878a
PB
5725
5726/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 5727static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
5728{
5729 TCGv tmp;
5730 tmp = new_tmp();
5731 tcg_gen_trunc_i64_i32(tmp, val);
5732 store_reg(s, rlow, tmp);
5733 tmp = new_tmp();
5734 tcg_gen_shri_i64(val, val, 32);
5735 tcg_gen_trunc_i64_i32(tmp, val);
5736 store_reg(s, rhigh, tmp);
5737}
5738
5739/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 5740static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 5741{
a7812ae4 5742 TCGv_i64 tmp;
5e3f878a
PB
5743 TCGv tmp2;
5744
36aa55dc 5745 /* Load value and extend to 64 bits. */
a7812ae4 5746 tmp = tcg_temp_new_i64();
5e3f878a
PB
5747 tmp2 = load_reg(s, rlow);
5748 tcg_gen_extu_i32_i64(tmp, tmp2);
5749 dead_tmp(tmp2);
5750 tcg_gen_add_i64(val, val, tmp);
5751}
5752
5753/* load and add a 64-bit value from a register pair. */
a7812ae4 5754static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 5755{
a7812ae4 5756 TCGv_i64 tmp;
36aa55dc
PB
5757 TCGv tmpl;
5758 TCGv tmph;
5e3f878a
PB
5759
5760 /* Load 64-bit value rd:rn. */
36aa55dc
PB
5761 tmpl = load_reg(s, rlow);
5762 tmph = load_reg(s, rhigh);
a7812ae4 5763 tmp = tcg_temp_new_i64();
36aa55dc
PB
5764 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5765 dead_tmp(tmpl);
5766 dead_tmp(tmph);
5e3f878a
PB
5767 tcg_gen_add_i64(val, val, tmp);
5768}
5769
5770/* Set N and Z flags from a 64-bit value. */
a7812ae4 5771static void gen_logicq_cc(TCGv_i64 val)
5e3f878a
PB
5772{
5773 TCGv tmp = new_tmp();
5774 gen_helper_logicq_cc(tmp, val);
6fbe23d5
PB
5775 gen_logic_CC(tmp);
5776 dead_tmp(tmp);
5e3f878a
PB
5777}
5778
9ee6e8bb
PB
5779static void disas_arm_insn(CPUState * env, DisasContext *s)
5780{
5781 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 5782 TCGv tmp;
3670669c 5783 TCGv tmp2;
6ddbc6e4 5784 TCGv tmp3;
b0109805 5785 TCGv addr;
a7812ae4 5786 TCGv_i64 tmp64;
9ee6e8bb
PB
5787
5788 insn = ldl_code(s->pc);
5789 s->pc += 4;
5790
5791 /* M variants do not implement ARM mode. */
5792 if (IS_M(env))
5793 goto illegal_op;
5794 cond = insn >> 28;
5795 if (cond == 0xf){
5796 /* Unconditional instructions. */
5797 if (((insn >> 25) & 7) == 1) {
5798 /* NEON Data processing. */
5799 if (!arm_feature(env, ARM_FEATURE_NEON))
5800 goto illegal_op;
5801
5802 if (disas_neon_data_insn(env, s, insn))
5803 goto illegal_op;
5804 return;
5805 }
5806 if ((insn & 0x0f100000) == 0x04000000) {
5807 /* NEON load/store. */
5808 if (!arm_feature(env, ARM_FEATURE_NEON))
5809 goto illegal_op;
5810
5811 if (disas_neon_ls_insn(env, s, insn))
5812 goto illegal_op;
5813 return;
5814 }
5815 if ((insn & 0x0d70f000) == 0x0550f000)
5816 return; /* PLD */
5817 else if ((insn & 0x0ffffdff) == 0x01010000) {
5818 ARCH(6);
5819 /* setend */
5820 if (insn & (1 << 9)) {
5821 /* BE8 mode not implemented. */
5822 goto illegal_op;
5823 }
5824 return;
5825 } else if ((insn & 0x0fffff00) == 0x057ff000) {
5826 switch ((insn >> 4) & 0xf) {
5827 case 1: /* clrex */
5828 ARCH(6K);
8f8e3aa4 5829 gen_helper_clrex(cpu_env);
9ee6e8bb
PB
5830 return;
5831 case 4: /* dsb */
5832 case 5: /* dmb */
5833 case 6: /* isb */
5834 ARCH(7);
5835 /* We don't emulate caches so these are a no-op. */
5836 return;
5837 default:
5838 goto illegal_op;
5839 }
5840 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
5841 /* srs */
c67b6b71 5842 int32_t offset;
9ee6e8bb
PB
5843 if (IS_USER(s))
5844 goto illegal_op;
5845 ARCH(6);
5846 op1 = (insn & 0x1f);
5847 if (op1 == (env->uncached_cpsr & CPSR_M)) {
b0109805 5848 addr = load_reg(s, 13);
9ee6e8bb 5849 } else {
b0109805
PB
5850 addr = new_tmp();
5851 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op1));
9ee6e8bb
PB
5852 }
5853 i = (insn >> 23) & 3;
5854 switch (i) {
5855 case 0: offset = -4; break; /* DA */
c67b6b71
FN
5856 case 1: offset = 0; break; /* IA */
5857 case 2: offset = -8; break; /* DB */
9ee6e8bb
PB
5858 case 3: offset = 4; break; /* IB */
5859 default: abort();
5860 }
5861 if (offset)
b0109805
PB
5862 tcg_gen_addi_i32(addr, addr, offset);
5863 tmp = load_reg(s, 14);
5864 gen_st32(tmp, addr, 0);
c67b6b71 5865 tmp = load_cpu_field(spsr);
b0109805
PB
5866 tcg_gen_addi_i32(addr, addr, 4);
5867 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
5868 if (insn & (1 << 21)) {
5869 /* Base writeback. */
5870 switch (i) {
5871 case 0: offset = -8; break;
c67b6b71
FN
5872 case 1: offset = 4; break;
5873 case 2: offset = -4; break;
9ee6e8bb
PB
5874 case 3: offset = 0; break;
5875 default: abort();
5876 }
5877 if (offset)
c67b6b71 5878 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb 5879 if (op1 == (env->uncached_cpsr & CPSR_M)) {
c67b6b71 5880 store_reg(s, 13, addr);
9ee6e8bb 5881 } else {
c67b6b71
FN
5882 gen_helper_set_r13_banked(cpu_env, tcg_const_i32(op1), addr);
5883 dead_tmp(addr);
9ee6e8bb 5884 }
b0109805
PB
5885 } else {
5886 dead_tmp(addr);
9ee6e8bb
PB
5887 }
5888 } else if ((insn & 0x0e5fffe0) == 0x081d0a00) {
5889 /* rfe */
c67b6b71 5890 int32_t offset;
9ee6e8bb
PB
5891 if (IS_USER(s))
5892 goto illegal_op;
5893 ARCH(6);
5894 rn = (insn >> 16) & 0xf;
b0109805 5895 addr = load_reg(s, rn);
9ee6e8bb
PB
5896 i = (insn >> 23) & 3;
5897 switch (i) {
b0109805 5898 case 0: offset = -4; break; /* DA */
c67b6b71
FN
5899 case 1: offset = 0; break; /* IA */
5900 case 2: offset = -8; break; /* DB */
b0109805 5901 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
5902 default: abort();
5903 }
5904 if (offset)
b0109805
PB
5905 tcg_gen_addi_i32(addr, addr, offset);
5906 /* Load PC into tmp and CPSR into tmp2. */
5907 tmp = gen_ld32(addr, 0);
5908 tcg_gen_addi_i32(addr, addr, 4);
5909 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
5910 if (insn & (1 << 21)) {
5911 /* Base writeback. */
5912 switch (i) {
b0109805 5913 case 0: offset = -8; break;
c67b6b71
FN
5914 case 1: offset = 4; break;
5915 case 2: offset = -4; break;
b0109805 5916 case 3: offset = 0; break;
9ee6e8bb
PB
5917 default: abort();
5918 }
5919 if (offset)
b0109805
PB
5920 tcg_gen_addi_i32(addr, addr, offset);
5921 store_reg(s, rn, addr);
5922 } else {
5923 dead_tmp(addr);
9ee6e8bb 5924 }
b0109805 5925 gen_rfe(s, tmp, tmp2);
c67b6b71 5926 return;
9ee6e8bb
PB
5927 } else if ((insn & 0x0e000000) == 0x0a000000) {
5928 /* branch link and change to thumb (blx <offset>) */
5929 int32_t offset;
5930
5931 val = (uint32_t)s->pc;
d9ba4830
PB
5932 tmp = new_tmp();
5933 tcg_gen_movi_i32(tmp, val);
5934 store_reg(s, 14, tmp);
9ee6e8bb
PB
5935 /* Sign-extend the 24-bit offset */
5936 offset = (((int32_t)insn) << 8) >> 8;
5937 /* offset * 4 + bit24 * 2 + (thumb bit) */
5938 val += (offset << 2) | ((insn >> 23) & 2) | 1;
5939 /* pipeline offset */
5940 val += 4;
d9ba4830 5941 gen_bx_im(s, val);
9ee6e8bb
PB
5942 return;
5943 } else if ((insn & 0x0e000f00) == 0x0c000100) {
5944 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5945 /* iWMMXt register transfer. */
5946 if (env->cp15.c15_cpar & (1 << 1))
5947 if (!disas_iwmmxt_insn(env, s, insn))
5948 return;
5949 }
5950 } else if ((insn & 0x0fe00000) == 0x0c400000) {
5951 /* Coprocessor double register transfer. */
5952 } else if ((insn & 0x0f000010) == 0x0e000010) {
5953 /* Additional coprocessor register transfer. */
7997d92f 5954 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
5955 uint32_t mask;
5956 uint32_t val;
5957 /* cps (privileged) */
5958 if (IS_USER(s))
5959 return;
5960 mask = val = 0;
5961 if (insn & (1 << 19)) {
5962 if (insn & (1 << 8))
5963 mask |= CPSR_A;
5964 if (insn & (1 << 7))
5965 mask |= CPSR_I;
5966 if (insn & (1 << 6))
5967 mask |= CPSR_F;
5968 if (insn & (1 << 18))
5969 val |= mask;
5970 }
7997d92f 5971 if (insn & (1 << 17)) {
9ee6e8bb
PB
5972 mask |= CPSR_M;
5973 val |= (insn & 0x1f);
5974 }
5975 if (mask) {
2fbac54b 5976 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
5977 }
5978 return;
5979 }
5980 goto illegal_op;
5981 }
5982 if (cond != 0xe) {
5983 /* if not always execute, we generate a conditional jump to
5984 next instruction */
5985 s->condlabel = gen_new_label();
d9ba4830 5986 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
5987 s->condjmp = 1;
5988 }
5989 if ((insn & 0x0f900000) == 0x03000000) {
5990 if ((insn & (1 << 21)) == 0) {
5991 ARCH(6T2);
5992 rd = (insn >> 12) & 0xf;
5993 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
5994 if ((insn & (1 << 22)) == 0) {
5995 /* MOVW */
5e3f878a
PB
5996 tmp = new_tmp();
5997 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
5998 } else {
5999 /* MOVT */
5e3f878a 6000 tmp = load_reg(s, rd);
86831435 6001 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 6002 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 6003 }
5e3f878a 6004 store_reg(s, rd, tmp);
9ee6e8bb
PB
6005 } else {
6006 if (((insn >> 12) & 0xf) != 0xf)
6007 goto illegal_op;
6008 if (((insn >> 16) & 0xf) == 0) {
6009 gen_nop_hint(s, insn & 0xff);
6010 } else {
6011 /* CPSR = immediate */
6012 val = insn & 0xff;
6013 shift = ((insn >> 8) & 0xf) * 2;
6014 if (shift)
6015 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 6016 i = ((insn & (1 << 22)) != 0);
2fbac54b 6017 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
6018 goto illegal_op;
6019 }
6020 }
6021 } else if ((insn & 0x0f900000) == 0x01000000
6022 && (insn & 0x00000090) != 0x00000090) {
6023 /* miscellaneous instructions */
6024 op1 = (insn >> 21) & 3;
6025 sh = (insn >> 4) & 0xf;
6026 rm = insn & 0xf;
6027 switch (sh) {
6028 case 0x0: /* move program status register */
6029 if (op1 & 1) {
6030 /* PSR = reg */
2fbac54b 6031 tmp = load_reg(s, rm);
9ee6e8bb 6032 i = ((op1 & 2) != 0);
2fbac54b 6033 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
6034 goto illegal_op;
6035 } else {
6036 /* reg = PSR */
6037 rd = (insn >> 12) & 0xf;
6038 if (op1 & 2) {
6039 if (IS_USER(s))
6040 goto illegal_op;
d9ba4830 6041 tmp = load_cpu_field(spsr);
9ee6e8bb 6042 } else {
d9ba4830
PB
6043 tmp = new_tmp();
6044 gen_helper_cpsr_read(tmp);
9ee6e8bb 6045 }
d9ba4830 6046 store_reg(s, rd, tmp);
9ee6e8bb
PB
6047 }
6048 break;
6049 case 0x1:
6050 if (op1 == 1) {
6051 /* branch/exchange thumb (bx). */
d9ba4830
PB
6052 tmp = load_reg(s, rm);
6053 gen_bx(s, tmp);
9ee6e8bb
PB
6054 } else if (op1 == 3) {
6055 /* clz */
6056 rd = (insn >> 12) & 0xf;
1497c961
PB
6057 tmp = load_reg(s, rm);
6058 gen_helper_clz(tmp, tmp);
6059 store_reg(s, rd, tmp);
9ee6e8bb
PB
6060 } else {
6061 goto illegal_op;
6062 }
6063 break;
6064 case 0x2:
6065 if (op1 == 1) {
6066 ARCH(5J); /* bxj */
6067 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6068 tmp = load_reg(s, rm);
6069 gen_bx(s, tmp);
9ee6e8bb
PB
6070 } else {
6071 goto illegal_op;
6072 }
6073 break;
6074 case 0x3:
6075 if (op1 != 1)
6076 goto illegal_op;
6077
6078 /* branch link/exchange thumb (blx) */
d9ba4830
PB
6079 tmp = load_reg(s, rm);
6080 tmp2 = new_tmp();
6081 tcg_gen_movi_i32(tmp2, s->pc);
6082 store_reg(s, 14, tmp2);
6083 gen_bx(s, tmp);
9ee6e8bb
PB
6084 break;
6085 case 0x5: /* saturating add/subtract */
6086 rd = (insn >> 12) & 0xf;
6087 rn = (insn >> 16) & 0xf;
b40d0353 6088 tmp = load_reg(s, rm);
5e3f878a 6089 tmp2 = load_reg(s, rn);
9ee6e8bb 6090 if (op1 & 2)
5e3f878a 6091 gen_helper_double_saturate(tmp2, tmp2);
9ee6e8bb 6092 if (op1 & 1)
5e3f878a 6093 gen_helper_sub_saturate(tmp, tmp, tmp2);
9ee6e8bb 6094 else
5e3f878a
PB
6095 gen_helper_add_saturate(tmp, tmp, tmp2);
6096 dead_tmp(tmp2);
6097 store_reg(s, rd, tmp);
9ee6e8bb
PB
6098 break;
6099 case 7: /* bkpt */
6100 gen_set_condexec(s);
5e3f878a 6101 gen_set_pc_im(s->pc - 4);
d9ba4830 6102 gen_exception(EXCP_BKPT);
9ee6e8bb
PB
6103 s->is_jmp = DISAS_JUMP;
6104 break;
6105 case 0x8: /* signed multiply */
6106 case 0xa:
6107 case 0xc:
6108 case 0xe:
6109 rs = (insn >> 8) & 0xf;
6110 rn = (insn >> 12) & 0xf;
6111 rd = (insn >> 16) & 0xf;
6112 if (op1 == 1) {
6113 /* (32 * 16) >> 16 */
5e3f878a
PB
6114 tmp = load_reg(s, rm);
6115 tmp2 = load_reg(s, rs);
9ee6e8bb 6116 if (sh & 4)
5e3f878a 6117 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 6118 else
5e3f878a 6119 gen_sxth(tmp2);
a7812ae4
PB
6120 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6121 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 6122 tmp = new_tmp();
a7812ae4 6123 tcg_gen_trunc_i64_i32(tmp, tmp64);
9ee6e8bb 6124 if ((sh & 2) == 0) {
5e3f878a
PB
6125 tmp2 = load_reg(s, rn);
6126 gen_helper_add_setq(tmp, tmp, tmp2);
6127 dead_tmp(tmp2);
9ee6e8bb 6128 }
5e3f878a 6129 store_reg(s, rd, tmp);
9ee6e8bb
PB
6130 } else {
6131 /* 16 * 16 */
5e3f878a
PB
6132 tmp = load_reg(s, rm);
6133 tmp2 = load_reg(s, rs);
6134 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6135 dead_tmp(tmp2);
9ee6e8bb 6136 if (op1 == 2) {
a7812ae4
PB
6137 tmp64 = tcg_temp_new_i64();
6138 tcg_gen_ext_i32_i64(tmp64, tmp);
22478e79 6139 dead_tmp(tmp);
a7812ae4
PB
6140 gen_addq(s, tmp64, rn, rd);
6141 gen_storeq_reg(s, rn, rd, tmp64);
9ee6e8bb
PB
6142 } else {
6143 if (op1 == 0) {
5e3f878a
PB
6144 tmp2 = load_reg(s, rn);
6145 gen_helper_add_setq(tmp, tmp, tmp2);
6146 dead_tmp(tmp2);
9ee6e8bb 6147 }
5e3f878a 6148 store_reg(s, rd, tmp);
9ee6e8bb
PB
6149 }
6150 }
6151 break;
6152 default:
6153 goto illegal_op;
6154 }
6155 } else if (((insn & 0x0e000000) == 0 &&
6156 (insn & 0x00000090) != 0x90) ||
6157 ((insn & 0x0e000000) == (1 << 25))) {
6158 int set_cc, logic_cc, shiftop;
6159
6160 op1 = (insn >> 21) & 0xf;
6161 set_cc = (insn >> 20) & 1;
6162 logic_cc = table_logic_cc[op1] & set_cc;
6163
6164 /* data processing instruction */
6165 if (insn & (1 << 25)) {
6166 /* immediate operand */
6167 val = insn & 0xff;
6168 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 6169 if (shift) {
9ee6e8bb 6170 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9
JR
6171 }
6172 tmp2 = new_tmp();
6173 tcg_gen_movi_i32(tmp2, val);
6174 if (logic_cc && shift) {
6175 gen_set_CF_bit31(tmp2);
6176 }
9ee6e8bb
PB
6177 } else {
6178 /* register */
6179 rm = (insn) & 0xf;
e9bb4aa9 6180 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6181 shiftop = (insn >> 5) & 3;
6182 if (!(insn & (1 << 4))) {
6183 shift = (insn >> 7) & 0x1f;
e9bb4aa9 6184 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
6185 } else {
6186 rs = (insn >> 8) & 0xf;
8984bd2e 6187 tmp = load_reg(s, rs);
e9bb4aa9 6188 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
6189 }
6190 }
6191 if (op1 != 0x0f && op1 != 0x0d) {
6192 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
6193 tmp = load_reg(s, rn);
6194 } else {
6195 TCGV_UNUSED(tmp);
9ee6e8bb
PB
6196 }
6197 rd = (insn >> 12) & 0xf;
6198 switch(op1) {
6199 case 0x00:
e9bb4aa9
JR
6200 tcg_gen_and_i32(tmp, tmp, tmp2);
6201 if (logic_cc) {
6202 gen_logic_CC(tmp);
6203 }
21aeb343 6204 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6205 break;
6206 case 0x01:
e9bb4aa9
JR
6207 tcg_gen_xor_i32(tmp, tmp, tmp2);
6208 if (logic_cc) {
6209 gen_logic_CC(tmp);
6210 }
21aeb343 6211 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6212 break;
6213 case 0x02:
6214 if (set_cc && rd == 15) {
6215 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 6216 if (IS_USER(s)) {
9ee6e8bb 6217 goto illegal_op;
e9bb4aa9
JR
6218 }
6219 gen_helper_sub_cc(tmp, tmp, tmp2);
6220 gen_exception_return(s, tmp);
9ee6e8bb 6221 } else {
e9bb4aa9
JR
6222 if (set_cc) {
6223 gen_helper_sub_cc(tmp, tmp, tmp2);
6224 } else {
6225 tcg_gen_sub_i32(tmp, tmp, tmp2);
6226 }
21aeb343 6227 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6228 }
6229 break;
6230 case 0x03:
e9bb4aa9
JR
6231 if (set_cc) {
6232 gen_helper_sub_cc(tmp, tmp2, tmp);
6233 } else {
6234 tcg_gen_sub_i32(tmp, tmp2, tmp);
6235 }
21aeb343 6236 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6237 break;
6238 case 0x04:
e9bb4aa9
JR
6239 if (set_cc) {
6240 gen_helper_add_cc(tmp, tmp, tmp2);
6241 } else {
6242 tcg_gen_add_i32(tmp, tmp, tmp2);
6243 }
21aeb343 6244 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6245 break;
6246 case 0x05:
e9bb4aa9
JR
6247 if (set_cc) {
6248 gen_helper_adc_cc(tmp, tmp, tmp2);
6249 } else {
6250 gen_add_carry(tmp, tmp, tmp2);
6251 }
21aeb343 6252 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6253 break;
6254 case 0x06:
e9bb4aa9
JR
6255 if (set_cc) {
6256 gen_helper_sbc_cc(tmp, tmp, tmp2);
6257 } else {
6258 gen_sub_carry(tmp, tmp, tmp2);
6259 }
21aeb343 6260 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6261 break;
6262 case 0x07:
e9bb4aa9
JR
6263 if (set_cc) {
6264 gen_helper_sbc_cc(tmp, tmp2, tmp);
6265 } else {
6266 gen_sub_carry(tmp, tmp2, tmp);
6267 }
21aeb343 6268 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6269 break;
6270 case 0x08:
6271 if (set_cc) {
e9bb4aa9
JR
6272 tcg_gen_and_i32(tmp, tmp, tmp2);
6273 gen_logic_CC(tmp);
9ee6e8bb 6274 }
e9bb4aa9 6275 dead_tmp(tmp);
9ee6e8bb
PB
6276 break;
6277 case 0x09:
6278 if (set_cc) {
e9bb4aa9
JR
6279 tcg_gen_xor_i32(tmp, tmp, tmp2);
6280 gen_logic_CC(tmp);
9ee6e8bb 6281 }
e9bb4aa9 6282 dead_tmp(tmp);
9ee6e8bb
PB
6283 break;
6284 case 0x0a:
6285 if (set_cc) {
e9bb4aa9 6286 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb 6287 }
e9bb4aa9 6288 dead_tmp(tmp);
9ee6e8bb
PB
6289 break;
6290 case 0x0b:
6291 if (set_cc) {
e9bb4aa9 6292 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 6293 }
e9bb4aa9 6294 dead_tmp(tmp);
9ee6e8bb
PB
6295 break;
6296 case 0x0c:
e9bb4aa9
JR
6297 tcg_gen_or_i32(tmp, tmp, tmp2);
6298 if (logic_cc) {
6299 gen_logic_CC(tmp);
6300 }
21aeb343 6301 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6302 break;
6303 case 0x0d:
6304 if (logic_cc && rd == 15) {
6305 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 6306 if (IS_USER(s)) {
9ee6e8bb 6307 goto illegal_op;
e9bb4aa9
JR
6308 }
6309 gen_exception_return(s, tmp2);
9ee6e8bb 6310 } else {
e9bb4aa9
JR
6311 if (logic_cc) {
6312 gen_logic_CC(tmp2);
6313 }
21aeb343 6314 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6315 }
6316 break;
6317 case 0x0e:
e9bb4aa9
JR
6318 tcg_gen_bic_i32(tmp, tmp, tmp2);
6319 if (logic_cc) {
6320 gen_logic_CC(tmp);
6321 }
21aeb343 6322 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6323 break;
6324 default:
6325 case 0x0f:
e9bb4aa9
JR
6326 tcg_gen_not_i32(tmp2, tmp2);
6327 if (logic_cc) {
6328 gen_logic_CC(tmp2);
6329 }
21aeb343 6330 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6331 break;
6332 }
e9bb4aa9
JR
6333 if (op1 != 0x0f && op1 != 0x0d) {
6334 dead_tmp(tmp2);
6335 }
9ee6e8bb
PB
6336 } else {
6337 /* other instructions */
6338 op1 = (insn >> 24) & 0xf;
6339 switch(op1) {
6340 case 0x0:
6341 case 0x1:
6342 /* multiplies, extra load/stores */
6343 sh = (insn >> 5) & 3;
6344 if (sh == 0) {
6345 if (op1 == 0x0) {
6346 rd = (insn >> 16) & 0xf;
6347 rn = (insn >> 12) & 0xf;
6348 rs = (insn >> 8) & 0xf;
6349 rm = (insn) & 0xf;
6350 op1 = (insn >> 20) & 0xf;
6351 switch (op1) {
6352 case 0: case 1: case 2: case 3: case 6:
6353 /* 32 bit mul */
5e3f878a
PB
6354 tmp = load_reg(s, rs);
6355 tmp2 = load_reg(s, rm);
6356 tcg_gen_mul_i32(tmp, tmp, tmp2);
6357 dead_tmp(tmp2);
9ee6e8bb
PB
6358 if (insn & (1 << 22)) {
6359 /* Subtract (mls) */
6360 ARCH(6T2);
5e3f878a
PB
6361 tmp2 = load_reg(s, rn);
6362 tcg_gen_sub_i32(tmp, tmp2, tmp);
6363 dead_tmp(tmp2);
9ee6e8bb
PB
6364 } else if (insn & (1 << 21)) {
6365 /* Add */
5e3f878a
PB
6366 tmp2 = load_reg(s, rn);
6367 tcg_gen_add_i32(tmp, tmp, tmp2);
6368 dead_tmp(tmp2);
9ee6e8bb
PB
6369 }
6370 if (insn & (1 << 20))
5e3f878a
PB
6371 gen_logic_CC(tmp);
6372 store_reg(s, rd, tmp);
9ee6e8bb
PB
6373 break;
6374 default:
6375 /* 64 bit mul */
5e3f878a
PB
6376 tmp = load_reg(s, rs);
6377 tmp2 = load_reg(s, rm);
9ee6e8bb 6378 if (insn & (1 << 22))
a7812ae4 6379 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6380 else
a7812ae4 6381 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
9ee6e8bb 6382 if (insn & (1 << 21)) /* mult accumulate */
a7812ae4 6383 gen_addq(s, tmp64, rn, rd);
9ee6e8bb
PB
6384 if (!(insn & (1 << 23))) { /* double accumulate */
6385 ARCH(6);
a7812ae4
PB
6386 gen_addq_lo(s, tmp64, rn);
6387 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
6388 }
6389 if (insn & (1 << 20))
a7812ae4
PB
6390 gen_logicq_cc(tmp64);
6391 gen_storeq_reg(s, rn, rd, tmp64);
9ee6e8bb
PB
6392 break;
6393 }
6394 } else {
6395 rn = (insn >> 16) & 0xf;
6396 rd = (insn >> 12) & 0xf;
6397 if (insn & (1 << 23)) {
6398 /* load/store exclusive */
86753403
PB
6399 op1 = (insn >> 21) & 0x3;
6400 if (op1)
a47f43d2 6401 ARCH(6K);
86753403
PB
6402 else
6403 ARCH(6);
3174f8e9
FN
6404 addr = tcg_temp_local_new_i32();
6405 tcg_gen_mov_i32(addr, cpu_R[rn]);
9ee6e8bb 6406 if (insn & (1 << 20)) {
3174f8e9 6407 gen_helper_mark_exclusive(cpu_env, addr);
86753403
PB
6408 switch (op1) {
6409 case 0: /* ldrex */
6410 tmp = gen_ld32(addr, IS_USER(s));
6411 break;
6412 case 1: /* ldrexd */
6413 tmp = gen_ld32(addr, IS_USER(s));
6414 store_reg(s, rd, tmp);
6415 tcg_gen_addi_i32(addr, addr, 4);
6416 tmp = gen_ld32(addr, IS_USER(s));
6417 rd++;
6418 break;
6419 case 2: /* ldrexb */
6420 tmp = gen_ld8u(addr, IS_USER(s));
6421 break;
6422 case 3: /* ldrexh */
6423 tmp = gen_ld16u(addr, IS_USER(s));
6424 break;
6425 default:
6426 abort();
6427 }
8f8e3aa4 6428 store_reg(s, rd, tmp);
9ee6e8bb 6429 } else {
8f8e3aa4 6430 int label = gen_new_label();
9ee6e8bb 6431 rm = insn & 0xf;
3174f8e9
FN
6432 tmp2 = tcg_temp_local_new_i32();
6433 gen_helper_test_exclusive(tmp2, cpu_env, addr);
6434 tcg_gen_brcondi_i32(TCG_COND_NE, tmp2, 0, label);
8f8e3aa4 6435 tmp = load_reg(s,rm);
86753403
PB
6436 switch (op1) {
6437 case 0: /* strex */
6438 gen_st32(tmp, addr, IS_USER(s));
6439 break;
6440 case 1: /* strexd */
6441 gen_st32(tmp, addr, IS_USER(s));
6442 tcg_gen_addi_i32(addr, addr, 4);
6443 tmp = load_reg(s, rm + 1);
6444 gen_st32(tmp, addr, IS_USER(s));
6445 break;
6446 case 2: /* strexb */
6447 gen_st8(tmp, addr, IS_USER(s));
6448 break;
6449 case 3: /* strexh */
6450 gen_st16(tmp, addr, IS_USER(s));
6451 break;
6452 default:
6453 abort();
6454 }
2637a3be 6455 gen_set_label(label);
3174f8e9
FN
6456 tcg_gen_mov_i32(cpu_R[rd], tmp2);
6457 tcg_temp_free(tmp2);
9ee6e8bb 6458 }
3174f8e9 6459 tcg_temp_free(addr);
9ee6e8bb
PB
6460 } else {
6461 /* SWP instruction */
6462 rm = (insn) & 0xf;
6463
8984bd2e
PB
6464 /* ??? This is not really atomic. However we know
6465 we never have multiple CPUs running in parallel,
6466 so it is good enough. */
6467 addr = load_reg(s, rn);
6468 tmp = load_reg(s, rm);
9ee6e8bb 6469 if (insn & (1 << 22)) {
8984bd2e
PB
6470 tmp2 = gen_ld8u(addr, IS_USER(s));
6471 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 6472 } else {
8984bd2e
PB
6473 tmp2 = gen_ld32(addr, IS_USER(s));
6474 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 6475 }
8984bd2e
PB
6476 dead_tmp(addr);
6477 store_reg(s, rd, tmp2);
9ee6e8bb
PB
6478 }
6479 }
6480 } else {
6481 int address_offset;
6482 int load;
6483 /* Misc load/store */
6484 rn = (insn >> 16) & 0xf;
6485 rd = (insn >> 12) & 0xf;
b0109805 6486 addr = load_reg(s, rn);
9ee6e8bb 6487 if (insn & (1 << 24))
b0109805 6488 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
6489 address_offset = 0;
6490 if (insn & (1 << 20)) {
6491 /* load */
6492 switch(sh) {
6493 case 1:
b0109805 6494 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
6495 break;
6496 case 2:
b0109805 6497 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
6498 break;
6499 default:
6500 case 3:
b0109805 6501 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
6502 break;
6503 }
6504 load = 1;
6505 } else if (sh & 2) {
6506 /* doubleword */
6507 if (sh & 1) {
6508 /* store */
b0109805
PB
6509 tmp = load_reg(s, rd);
6510 gen_st32(tmp, addr, IS_USER(s));
6511 tcg_gen_addi_i32(addr, addr, 4);
6512 tmp = load_reg(s, rd + 1);
6513 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6514 load = 0;
6515 } else {
6516 /* load */
b0109805
PB
6517 tmp = gen_ld32(addr, IS_USER(s));
6518 store_reg(s, rd, tmp);
6519 tcg_gen_addi_i32(addr, addr, 4);
6520 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
6521 rd++;
6522 load = 1;
6523 }
6524 address_offset = -4;
6525 } else {
6526 /* store */
b0109805
PB
6527 tmp = load_reg(s, rd);
6528 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6529 load = 0;
6530 }
6531 /* Perform base writeback before the loaded value to
6532 ensure correct behavior with overlapping index registers.
6533 ldrd with base writeback is is undefined if the
6534 destination and index registers overlap. */
6535 if (!(insn & (1 << 24))) {
b0109805
PB
6536 gen_add_datah_offset(s, insn, address_offset, addr);
6537 store_reg(s, rn, addr);
9ee6e8bb
PB
6538 } else if (insn & (1 << 21)) {
6539 if (address_offset)
b0109805
PB
6540 tcg_gen_addi_i32(addr, addr, address_offset);
6541 store_reg(s, rn, addr);
6542 } else {
6543 dead_tmp(addr);
9ee6e8bb
PB
6544 }
6545 if (load) {
6546 /* Complete the load. */
b0109805 6547 store_reg(s, rd, tmp);
9ee6e8bb
PB
6548 }
6549 }
6550 break;
6551 case 0x4:
6552 case 0x5:
6553 goto do_ldst;
6554 case 0x6:
6555 case 0x7:
6556 if (insn & (1 << 4)) {
6557 ARCH(6);
6558 /* Armv6 Media instructions. */
6559 rm = insn & 0xf;
6560 rn = (insn >> 16) & 0xf;
2c0262af 6561 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
6562 rs = (insn >> 8) & 0xf;
6563 switch ((insn >> 23) & 3) {
6564 case 0: /* Parallel add/subtract. */
6565 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
6566 tmp = load_reg(s, rn);
6567 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6568 sh = (insn >> 5) & 7;
6569 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6570 goto illegal_op;
6ddbc6e4
PB
6571 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6572 dead_tmp(tmp2);
6573 store_reg(s, rd, tmp);
9ee6e8bb
PB
6574 break;
6575 case 1:
6576 if ((insn & 0x00700020) == 0) {
6c95676b 6577 /* Halfword pack. */
3670669c
PB
6578 tmp = load_reg(s, rn);
6579 tmp2 = load_reg(s, rm);
9ee6e8bb 6580 shift = (insn >> 7) & 0x1f;
3670669c
PB
6581 if (insn & (1 << 6)) {
6582 /* pkhtb */
22478e79
AZ
6583 if (shift == 0)
6584 shift = 31;
6585 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 6586 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 6587 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
6588 } else {
6589 /* pkhbt */
22478e79
AZ
6590 if (shift)
6591 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 6592 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
6593 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6594 }
6595 tcg_gen_or_i32(tmp, tmp, tmp2);
22478e79 6596 dead_tmp(tmp2);
3670669c 6597 store_reg(s, rd, tmp);
9ee6e8bb
PB
6598 } else if ((insn & 0x00200020) == 0x00200000) {
6599 /* [us]sat */
6ddbc6e4 6600 tmp = load_reg(s, rm);
9ee6e8bb
PB
6601 shift = (insn >> 7) & 0x1f;
6602 if (insn & (1 << 6)) {
6603 if (shift == 0)
6604 shift = 31;
6ddbc6e4 6605 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 6606 } else {
6ddbc6e4 6607 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
6608 }
6609 sh = (insn >> 16) & 0x1f;
6610 if (sh != 0) {
6611 if (insn & (1 << 22))
6ddbc6e4 6612 gen_helper_usat(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6613 else
6ddbc6e4 6614 gen_helper_ssat(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6615 }
6ddbc6e4 6616 store_reg(s, rd, tmp);
9ee6e8bb
PB
6617 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6618 /* [us]sat16 */
6ddbc6e4 6619 tmp = load_reg(s, rm);
9ee6e8bb
PB
6620 sh = (insn >> 16) & 0x1f;
6621 if (sh != 0) {
6622 if (insn & (1 << 22))
6ddbc6e4 6623 gen_helper_usat16(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6624 else
6ddbc6e4 6625 gen_helper_ssat16(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6626 }
6ddbc6e4 6627 store_reg(s, rd, tmp);
9ee6e8bb
PB
6628 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6629 /* Select bytes. */
6ddbc6e4
PB
6630 tmp = load_reg(s, rn);
6631 tmp2 = load_reg(s, rm);
6632 tmp3 = new_tmp();
6633 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6634 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6635 dead_tmp(tmp3);
6636 dead_tmp(tmp2);
6637 store_reg(s, rd, tmp);
9ee6e8bb 6638 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 6639 tmp = load_reg(s, rm);
9ee6e8bb
PB
6640 shift = (insn >> 10) & 3;
6641 /* ??? In many cases it's not neccessary to do a
6642 rotate, a shift is sufficient. */
6643 if (shift != 0)
5e3f878a 6644 tcg_gen_rori_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
6645 op1 = (insn >> 20) & 7;
6646 switch (op1) {
5e3f878a
PB
6647 case 0: gen_sxtb16(tmp); break;
6648 case 2: gen_sxtb(tmp); break;
6649 case 3: gen_sxth(tmp); break;
6650 case 4: gen_uxtb16(tmp); break;
6651 case 6: gen_uxtb(tmp); break;
6652 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
6653 default: goto illegal_op;
6654 }
6655 if (rn != 15) {
5e3f878a 6656 tmp2 = load_reg(s, rn);
9ee6e8bb 6657 if ((op1 & 3) == 0) {
5e3f878a 6658 gen_add16(tmp, tmp2);
9ee6e8bb 6659 } else {
5e3f878a
PB
6660 tcg_gen_add_i32(tmp, tmp, tmp2);
6661 dead_tmp(tmp2);
9ee6e8bb
PB
6662 }
6663 }
6c95676b 6664 store_reg(s, rd, tmp);
9ee6e8bb
PB
6665 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6666 /* rev */
b0109805 6667 tmp = load_reg(s, rm);
9ee6e8bb
PB
6668 if (insn & (1 << 22)) {
6669 if (insn & (1 << 7)) {
b0109805 6670 gen_revsh(tmp);
9ee6e8bb
PB
6671 } else {
6672 ARCH(6T2);
b0109805 6673 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
6674 }
6675 } else {
6676 if (insn & (1 << 7))
b0109805 6677 gen_rev16(tmp);
9ee6e8bb 6678 else
66896cb8 6679 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 6680 }
b0109805 6681 store_reg(s, rd, tmp);
9ee6e8bb
PB
6682 } else {
6683 goto illegal_op;
6684 }
6685 break;
6686 case 2: /* Multiplies (Type 3). */
5e3f878a
PB
6687 tmp = load_reg(s, rm);
6688 tmp2 = load_reg(s, rs);
9ee6e8bb
PB
6689 if (insn & (1 << 20)) {
6690 /* Signed multiply most significant [accumulate]. */
a7812ae4 6691 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6692 if (insn & (1 << 5))
a7812ae4
PB
6693 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
6694 tcg_gen_shri_i64(tmp64, tmp64, 32);
5e3f878a 6695 tmp = new_tmp();
a7812ae4 6696 tcg_gen_trunc_i64_i32(tmp, tmp64);
955a7dd5
AZ
6697 if (rd != 15) {
6698 tmp2 = load_reg(s, rd);
9ee6e8bb 6699 if (insn & (1 << 6)) {
5e3f878a 6700 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 6701 } else {
5e3f878a 6702 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 6703 }
5e3f878a 6704 dead_tmp(tmp2);
9ee6e8bb 6705 }
955a7dd5 6706 store_reg(s, rn, tmp);
9ee6e8bb
PB
6707 } else {
6708 if (insn & (1 << 5))
5e3f878a
PB
6709 gen_swap_half(tmp2);
6710 gen_smul_dual(tmp, tmp2);
6711 /* This addition cannot overflow. */
6712 if (insn & (1 << 6)) {
6713 tcg_gen_sub_i32(tmp, tmp, tmp2);
6714 } else {
6715 tcg_gen_add_i32(tmp, tmp, tmp2);
6716 }
6717 dead_tmp(tmp2);
9ee6e8bb 6718 if (insn & (1 << 22)) {
5e3f878a 6719 /* smlald, smlsld */
a7812ae4
PB
6720 tmp64 = tcg_temp_new_i64();
6721 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 6722 dead_tmp(tmp);
a7812ae4
PB
6723 gen_addq(s, tmp64, rd, rn);
6724 gen_storeq_reg(s, rd, rn, tmp64);
9ee6e8bb 6725 } else {
5e3f878a 6726 /* smuad, smusd, smlad, smlsd */
22478e79 6727 if (rd != 15)
9ee6e8bb 6728 {
22478e79 6729 tmp2 = load_reg(s, rd);
5e3f878a
PB
6730 gen_helper_add_setq(tmp, tmp, tmp2);
6731 dead_tmp(tmp2);
9ee6e8bb 6732 }
22478e79 6733 store_reg(s, rn, tmp);
9ee6e8bb
PB
6734 }
6735 }
6736 break;
6737 case 3:
6738 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
6739 switch (op1) {
6740 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
6741 ARCH(6);
6742 tmp = load_reg(s, rm);
6743 tmp2 = load_reg(s, rs);
6744 gen_helper_usad8(tmp, tmp, tmp2);
6745 dead_tmp(tmp2);
ded9d295
AZ
6746 if (rd != 15) {
6747 tmp2 = load_reg(s, rd);
6ddbc6e4
PB
6748 tcg_gen_add_i32(tmp, tmp, tmp2);
6749 dead_tmp(tmp2);
9ee6e8bb 6750 }
ded9d295 6751 store_reg(s, rn, tmp);
9ee6e8bb
PB
6752 break;
6753 case 0x20: case 0x24: case 0x28: case 0x2c:
6754 /* Bitfield insert/clear. */
6755 ARCH(6T2);
6756 shift = (insn >> 7) & 0x1f;
6757 i = (insn >> 16) & 0x1f;
6758 i = i + 1 - shift;
6759 if (rm == 15) {
5e3f878a
PB
6760 tmp = new_tmp();
6761 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6762 } else {
5e3f878a 6763 tmp = load_reg(s, rm);
9ee6e8bb
PB
6764 }
6765 if (i != 32) {
5e3f878a 6766 tmp2 = load_reg(s, rd);
8f8e3aa4 6767 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
5e3f878a 6768 dead_tmp(tmp2);
9ee6e8bb 6769 }
5e3f878a 6770 store_reg(s, rd, tmp);
9ee6e8bb
PB
6771 break;
6772 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
6773 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 6774 ARCH(6T2);
5e3f878a 6775 tmp = load_reg(s, rm);
9ee6e8bb
PB
6776 shift = (insn >> 7) & 0x1f;
6777 i = ((insn >> 16) & 0x1f) + 1;
6778 if (shift + i > 32)
6779 goto illegal_op;
6780 if (i < 32) {
6781 if (op1 & 0x20) {
5e3f878a 6782 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 6783 } else {
5e3f878a 6784 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
6785 }
6786 }
5e3f878a 6787 store_reg(s, rd, tmp);
9ee6e8bb
PB
6788 break;
6789 default:
6790 goto illegal_op;
6791 }
6792 break;
6793 }
6794 break;
6795 }
6796 do_ldst:
6797 /* Check for undefined extension instructions
6798 * per the ARM Bible IE:
6799 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
6800 */
6801 sh = (0xf << 20) | (0xf << 4);
6802 if (op1 == 0x7 && ((insn & sh) == sh))
6803 {
6804 goto illegal_op;
6805 }
6806 /* load/store byte/word */
6807 rn = (insn >> 16) & 0xf;
6808 rd = (insn >> 12) & 0xf;
b0109805 6809 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
6810 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
6811 if (insn & (1 << 24))
b0109805 6812 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
6813 if (insn & (1 << 20)) {
6814 /* load */
9ee6e8bb 6815 if (insn & (1 << 22)) {
b0109805 6816 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 6817 } else {
b0109805 6818 tmp = gen_ld32(tmp2, i);
9ee6e8bb 6819 }
9ee6e8bb
PB
6820 } else {
6821 /* store */
b0109805 6822 tmp = load_reg(s, rd);
9ee6e8bb 6823 if (insn & (1 << 22))
b0109805 6824 gen_st8(tmp, tmp2, i);
9ee6e8bb 6825 else
b0109805 6826 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
6827 }
6828 if (!(insn & (1 << 24))) {
b0109805
PB
6829 gen_add_data_offset(s, insn, tmp2);
6830 store_reg(s, rn, tmp2);
6831 } else if (insn & (1 << 21)) {
6832 store_reg(s, rn, tmp2);
6833 } else {
6834 dead_tmp(tmp2);
9ee6e8bb
PB
6835 }
6836 if (insn & (1 << 20)) {
6837 /* Complete the load. */
6838 if (rd == 15)
b0109805 6839 gen_bx(s, tmp);
9ee6e8bb 6840 else
b0109805 6841 store_reg(s, rd, tmp);
9ee6e8bb
PB
6842 }
6843 break;
6844 case 0x08:
6845 case 0x09:
6846 {
6847 int j, n, user, loaded_base;
b0109805 6848 TCGv loaded_var;
9ee6e8bb
PB
6849 /* load/store multiple words */
6850 /* XXX: store correct base if write back */
6851 user = 0;
6852 if (insn & (1 << 22)) {
6853 if (IS_USER(s))
6854 goto illegal_op; /* only usable in supervisor mode */
6855
6856 if ((insn & (1 << 15)) == 0)
6857 user = 1;
6858 }
6859 rn = (insn >> 16) & 0xf;
b0109805 6860 addr = load_reg(s, rn);
9ee6e8bb
PB
6861
6862 /* compute total size */
6863 loaded_base = 0;
a50f5b91 6864 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
6865 n = 0;
6866 for(i=0;i<16;i++) {
6867 if (insn & (1 << i))
6868 n++;
6869 }
6870 /* XXX: test invalid n == 0 case ? */
6871 if (insn & (1 << 23)) {
6872 if (insn & (1 << 24)) {
6873 /* pre increment */
b0109805 6874 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6875 } else {
6876 /* post increment */
6877 }
6878 } else {
6879 if (insn & (1 << 24)) {
6880 /* pre decrement */
b0109805 6881 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
6882 } else {
6883 /* post decrement */
6884 if (n != 1)
b0109805 6885 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
6886 }
6887 }
6888 j = 0;
6889 for(i=0;i<16;i++) {
6890 if (insn & (1 << i)) {
6891 if (insn & (1 << 20)) {
6892 /* load */
b0109805 6893 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 6894 if (i == 15) {
b0109805 6895 gen_bx(s, tmp);
9ee6e8bb 6896 } else if (user) {
b0109805
PB
6897 gen_helper_set_user_reg(tcg_const_i32(i), tmp);
6898 dead_tmp(tmp);
9ee6e8bb 6899 } else if (i == rn) {
b0109805 6900 loaded_var = tmp;
9ee6e8bb
PB
6901 loaded_base = 1;
6902 } else {
b0109805 6903 store_reg(s, i, tmp);
9ee6e8bb
PB
6904 }
6905 } else {
6906 /* store */
6907 if (i == 15) {
6908 /* special case: r15 = PC + 8 */
6909 val = (long)s->pc + 4;
b0109805
PB
6910 tmp = new_tmp();
6911 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 6912 } else if (user) {
b0109805
PB
6913 tmp = new_tmp();
6914 gen_helper_get_user_reg(tmp, tcg_const_i32(i));
9ee6e8bb 6915 } else {
b0109805 6916 tmp = load_reg(s, i);
9ee6e8bb 6917 }
b0109805 6918 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6919 }
6920 j++;
6921 /* no need to add after the last transfer */
6922 if (j != n)
b0109805 6923 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6924 }
6925 }
6926 if (insn & (1 << 21)) {
6927 /* write back */
6928 if (insn & (1 << 23)) {
6929 if (insn & (1 << 24)) {
6930 /* pre increment */
6931 } else {
6932 /* post increment */
b0109805 6933 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6934 }
6935 } else {
6936 if (insn & (1 << 24)) {
6937 /* pre decrement */
6938 if (n != 1)
b0109805 6939 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
6940 } else {
6941 /* post decrement */
b0109805 6942 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
6943 }
6944 }
b0109805
PB
6945 store_reg(s, rn, addr);
6946 } else {
6947 dead_tmp(addr);
9ee6e8bb
PB
6948 }
6949 if (loaded_base) {
b0109805 6950 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
6951 }
6952 if ((insn & (1 << 22)) && !user) {
6953 /* Restore CPSR from SPSR. */
d9ba4830
PB
6954 tmp = load_cpu_field(spsr);
6955 gen_set_cpsr(tmp, 0xffffffff);
6956 dead_tmp(tmp);
9ee6e8bb
PB
6957 s->is_jmp = DISAS_UPDATE;
6958 }
6959 }
6960 break;
6961 case 0xa:
6962 case 0xb:
6963 {
6964 int32_t offset;
6965
6966 /* branch (and link) */
6967 val = (int32_t)s->pc;
6968 if (insn & (1 << 24)) {
5e3f878a
PB
6969 tmp = new_tmp();
6970 tcg_gen_movi_i32(tmp, val);
6971 store_reg(s, 14, tmp);
9ee6e8bb
PB
6972 }
6973 offset = (((int32_t)insn << 8) >> 8);
6974 val += (offset << 2) + 4;
6975 gen_jmp(s, val);
6976 }
6977 break;
6978 case 0xc:
6979 case 0xd:
6980 case 0xe:
6981 /* Coprocessor. */
6982 if (disas_coproc_insn(env, s, insn))
6983 goto illegal_op;
6984 break;
6985 case 0xf:
6986 /* swi */
5e3f878a 6987 gen_set_pc_im(s->pc);
9ee6e8bb
PB
6988 s->is_jmp = DISAS_SWI;
6989 break;
6990 default:
6991 illegal_op:
6992 gen_set_condexec(s);
5e3f878a 6993 gen_set_pc_im(s->pc - 4);
d9ba4830 6994 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
6995 s->is_jmp = DISAS_JUMP;
6996 break;
6997 }
6998 }
6999}
7000
7001/* Return true if this is a Thumb-2 logical op. */
7002static int
7003thumb2_logic_op(int op)
7004{
7005 return (op < 8);
7006}
7007
7008/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7009 then set condition code flags based on the result of the operation.
7010 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7011 to the high bit of T1.
7012 Returns zero if the opcode is valid. */
7013
7014static int
396e467c 7015gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
9ee6e8bb
PB
7016{
7017 int logic_cc;
7018
7019 logic_cc = 0;
7020 switch (op) {
7021 case 0: /* and */
396e467c 7022 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
7023 logic_cc = conds;
7024 break;
7025 case 1: /* bic */
396e467c 7026 tcg_gen_bic_i32(t0, t0, t1);
9ee6e8bb
PB
7027 logic_cc = conds;
7028 break;
7029 case 2: /* orr */
396e467c 7030 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7031 logic_cc = conds;
7032 break;
7033 case 3: /* orn */
396e467c
FN
7034 tcg_gen_not_i32(t1, t1);
7035 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7036 logic_cc = conds;
7037 break;
7038 case 4: /* eor */
396e467c 7039 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
7040 logic_cc = conds;
7041 break;
7042 case 8: /* add */
7043 if (conds)
396e467c 7044 gen_helper_add_cc(t0, t0, t1);
9ee6e8bb 7045 else
396e467c 7046 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
7047 break;
7048 case 10: /* adc */
7049 if (conds)
396e467c 7050 gen_helper_adc_cc(t0, t0, t1);
9ee6e8bb 7051 else
396e467c 7052 gen_adc(t0, t1);
9ee6e8bb
PB
7053 break;
7054 case 11: /* sbc */
7055 if (conds)
396e467c 7056 gen_helper_sbc_cc(t0, t0, t1);
9ee6e8bb 7057 else
396e467c 7058 gen_sub_carry(t0, t0, t1);
9ee6e8bb
PB
7059 break;
7060 case 13: /* sub */
7061 if (conds)
396e467c 7062 gen_helper_sub_cc(t0, t0, t1);
9ee6e8bb 7063 else
396e467c 7064 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
7065 break;
7066 case 14: /* rsb */
7067 if (conds)
396e467c 7068 gen_helper_sub_cc(t0, t1, t0);
9ee6e8bb 7069 else
396e467c 7070 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
7071 break;
7072 default: /* 5, 6, 7, 9, 12, 15. */
7073 return 1;
7074 }
7075 if (logic_cc) {
396e467c 7076 gen_logic_CC(t0);
9ee6e8bb 7077 if (shifter_out)
396e467c 7078 gen_set_CF_bit31(t1);
9ee6e8bb
PB
7079 }
7080 return 0;
7081}
7082
7083/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7084 is not legal. */
7085static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7086{
b0109805 7087 uint32_t insn, imm, shift, offset;
9ee6e8bb 7088 uint32_t rd, rn, rm, rs;
b26eefb6 7089 TCGv tmp;
6ddbc6e4
PB
7090 TCGv tmp2;
7091 TCGv tmp3;
b0109805 7092 TCGv addr;
a7812ae4 7093 TCGv_i64 tmp64;
9ee6e8bb
PB
7094 int op;
7095 int shiftop;
7096 int conds;
7097 int logic_cc;
7098
7099 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7100 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 7101 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
7102 16-bit instructions to get correct prefetch abort behavior. */
7103 insn = insn_hw1;
7104 if ((insn & (1 << 12)) == 0) {
7105 /* Second half of blx. */
7106 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
7107 tmp = load_reg(s, 14);
7108 tcg_gen_addi_i32(tmp, tmp, offset);
7109 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 7110
d9ba4830 7111 tmp2 = new_tmp();
b0109805 7112 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7113 store_reg(s, 14, tmp2);
7114 gen_bx(s, tmp);
9ee6e8bb
PB
7115 return 0;
7116 }
7117 if (insn & (1 << 11)) {
7118 /* Second half of bl. */
7119 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 7120 tmp = load_reg(s, 14);
6a0d8a1d 7121 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 7122
d9ba4830 7123 tmp2 = new_tmp();
b0109805 7124 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7125 store_reg(s, 14, tmp2);
7126 gen_bx(s, tmp);
9ee6e8bb
PB
7127 return 0;
7128 }
7129 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7130 /* Instruction spans a page boundary. Implement it as two
7131 16-bit instructions in case the second half causes an
7132 prefetch abort. */
7133 offset = ((int32_t)insn << 21) >> 9;
396e467c 7134 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
7135 return 0;
7136 }
7137 /* Fall through to 32-bit decode. */
7138 }
7139
7140 insn = lduw_code(s->pc);
7141 s->pc += 2;
7142 insn |= (uint32_t)insn_hw1 << 16;
7143
7144 if ((insn & 0xf800e800) != 0xf000e800) {
7145 ARCH(6T2);
7146 }
7147
7148 rn = (insn >> 16) & 0xf;
7149 rs = (insn >> 12) & 0xf;
7150 rd = (insn >> 8) & 0xf;
7151 rm = insn & 0xf;
7152 switch ((insn >> 25) & 0xf) {
7153 case 0: case 1: case 2: case 3:
7154 /* 16-bit instructions. Should never happen. */
7155 abort();
7156 case 4:
7157 if (insn & (1 << 22)) {
7158 /* Other load/store, table branch. */
7159 if (insn & 0x01200000) {
7160 /* Load/store doubleword. */
7161 if (rn == 15) {
b0109805
PB
7162 addr = new_tmp();
7163 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 7164 } else {
b0109805 7165 addr = load_reg(s, rn);
9ee6e8bb
PB
7166 }
7167 offset = (insn & 0xff) * 4;
7168 if ((insn & (1 << 23)) == 0)
7169 offset = -offset;
7170 if (insn & (1 << 24)) {
b0109805 7171 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
7172 offset = 0;
7173 }
7174 if (insn & (1 << 20)) {
7175 /* ldrd */
b0109805
PB
7176 tmp = gen_ld32(addr, IS_USER(s));
7177 store_reg(s, rs, tmp);
7178 tcg_gen_addi_i32(addr, addr, 4);
7179 tmp = gen_ld32(addr, IS_USER(s));
7180 store_reg(s, rd, tmp);
9ee6e8bb
PB
7181 } else {
7182 /* strd */
b0109805
PB
7183 tmp = load_reg(s, rs);
7184 gen_st32(tmp, addr, IS_USER(s));
7185 tcg_gen_addi_i32(addr, addr, 4);
7186 tmp = load_reg(s, rd);
7187 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7188 }
7189 if (insn & (1 << 21)) {
7190 /* Base writeback. */
7191 if (rn == 15)
7192 goto illegal_op;
b0109805
PB
7193 tcg_gen_addi_i32(addr, addr, offset - 4);
7194 store_reg(s, rn, addr);
7195 } else {
7196 dead_tmp(addr);
9ee6e8bb
PB
7197 }
7198 } else if ((insn & (1 << 23)) == 0) {
7199 /* Load/store exclusive word. */
3174f8e9
FN
7200 addr = tcg_temp_local_new();
7201 tcg_gen_mov_i32(addr, cpu_R[rn]);
2c0262af 7202 if (insn & (1 << 20)) {
3174f8e9 7203 gen_helper_mark_exclusive(cpu_env, addr);
8f8e3aa4
PB
7204 tmp = gen_ld32(addr, IS_USER(s));
7205 store_reg(s, rd, tmp);
9ee6e8bb 7206 } else {
8f8e3aa4 7207 int label = gen_new_label();
3174f8e9
FN
7208 tmp2 = tcg_temp_local_new();
7209 gen_helper_test_exclusive(tmp2, cpu_env, addr);
7210 tcg_gen_brcondi_i32(TCG_COND_NE, tmp2, 0, label);
8f8e3aa4 7211 tmp = load_reg(s, rs);
3174f8e9 7212 gen_st32(tmp, addr, IS_USER(s));
8f8e3aa4 7213 gen_set_label(label);
3174f8e9
FN
7214 tcg_gen_mov_i32(cpu_R[rd], tmp2);
7215 tcg_temp_free(tmp2);
9ee6e8bb 7216 }
3174f8e9 7217 tcg_temp_free(addr);
9ee6e8bb
PB
7218 } else if ((insn & (1 << 6)) == 0) {
7219 /* Table Branch. */
7220 if (rn == 15) {
b0109805
PB
7221 addr = new_tmp();
7222 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 7223 } else {
b0109805 7224 addr = load_reg(s, rn);
9ee6e8bb 7225 }
b26eefb6 7226 tmp = load_reg(s, rm);
b0109805 7227 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
7228 if (insn & (1 << 4)) {
7229 /* tbh */
b0109805 7230 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 7231 dead_tmp(tmp);
b0109805 7232 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 7233 } else { /* tbb */
b26eefb6 7234 dead_tmp(tmp);
b0109805 7235 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7236 }
b0109805
PB
7237 dead_tmp(addr);
7238 tcg_gen_shli_i32(tmp, tmp, 1);
7239 tcg_gen_addi_i32(tmp, tmp, s->pc);
7240 store_reg(s, 15, tmp);
9ee6e8bb
PB
7241 } else {
7242 /* Load/store exclusive byte/halfword/doubleword. */
8f8e3aa4
PB
7243 /* ??? These are not really atomic. However we know
7244 we never have multiple CPUs running in parallel,
7245 so it is good enough. */
9ee6e8bb 7246 op = (insn >> 4) & 0x3;
3174f8e9
FN
7247 addr = tcg_temp_local_new();
7248 tcg_gen_mov_i32(addr, cpu_R[rn]);
9ee6e8bb 7249 if (insn & (1 << 20)) {
8f8e3aa4 7250 gen_helper_mark_exclusive(cpu_env, addr);
9ee6e8bb
PB
7251 switch (op) {
7252 case 0:
8f8e3aa4 7253 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7254 break;
2c0262af 7255 case 1:
8f8e3aa4 7256 tmp = gen_ld16u(addr, IS_USER(s));
2c0262af 7257 break;
9ee6e8bb 7258 case 3:
8f8e3aa4
PB
7259 tmp = gen_ld32(addr, IS_USER(s));
7260 tcg_gen_addi_i32(addr, addr, 4);
7261 tmp2 = gen_ld32(addr, IS_USER(s));
7262 store_reg(s, rd, tmp2);
2c0262af
FB
7263 break;
7264 default:
9ee6e8bb
PB
7265 goto illegal_op;
7266 }
8f8e3aa4 7267 store_reg(s, rs, tmp);
9ee6e8bb 7268 } else {
8f8e3aa4 7269 int label = gen_new_label();
3174f8e9
FN
7270 tmp2 = tcg_temp_local_new();
7271 gen_helper_test_exclusive(tmp2, cpu_env, addr);
7272 tcg_gen_brcondi_i32(TCG_COND_NE, tmp2, 0, label);
8f8e3aa4 7273 tmp = load_reg(s, rs);
9ee6e8bb
PB
7274 switch (op) {
7275 case 0:
8f8e3aa4 7276 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7277 break;
7278 case 1:
8f8e3aa4 7279 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb 7280 break;
2c0262af 7281 case 3:
8f8e3aa4
PB
7282 gen_st32(tmp, addr, IS_USER(s));
7283 tcg_gen_addi_i32(addr, addr, 4);
7284 tmp = load_reg(s, rd);
7285 gen_st32(tmp, addr, IS_USER(s));
2c0262af 7286 break;
9ee6e8bb
PB
7287 default:
7288 goto illegal_op;
2c0262af 7289 }
8f8e3aa4 7290 gen_set_label(label);
3174f8e9
FN
7291 tcg_gen_mov_i32(cpu_R[rm], tmp2);
7292 tcg_temp_free(tmp2);
9ee6e8bb 7293 }
3174f8e9 7294 tcg_temp_free(addr);
9ee6e8bb
PB
7295 }
7296 } else {
7297 /* Load/store multiple, RFE, SRS. */
7298 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7299 /* Not available in user mode. */
b0109805 7300 if (IS_USER(s))
9ee6e8bb
PB
7301 goto illegal_op;
7302 if (insn & (1 << 20)) {
7303 /* rfe */
b0109805
PB
7304 addr = load_reg(s, rn);
7305 if ((insn & (1 << 24)) == 0)
7306 tcg_gen_addi_i32(addr, addr, -8);
7307 /* Load PC into tmp and CPSR into tmp2. */
7308 tmp = gen_ld32(addr, 0);
7309 tcg_gen_addi_i32(addr, addr, 4);
7310 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
7311 if (insn & (1 << 21)) {
7312 /* Base writeback. */
b0109805
PB
7313 if (insn & (1 << 24)) {
7314 tcg_gen_addi_i32(addr, addr, 4);
7315 } else {
7316 tcg_gen_addi_i32(addr, addr, -4);
7317 }
7318 store_reg(s, rn, addr);
7319 } else {
7320 dead_tmp(addr);
9ee6e8bb 7321 }
b0109805 7322 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
7323 } else {
7324 /* srs */
7325 op = (insn & 0x1f);
7326 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7327 addr = load_reg(s, 13);
9ee6e8bb 7328 } else {
b0109805
PB
7329 addr = new_tmp();
7330 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op));
9ee6e8bb
PB
7331 }
7332 if ((insn & (1 << 24)) == 0) {
b0109805 7333 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 7334 }
b0109805
PB
7335 tmp = load_reg(s, 14);
7336 gen_st32(tmp, addr, 0);
7337 tcg_gen_addi_i32(addr, addr, 4);
7338 tmp = new_tmp();
7339 gen_helper_cpsr_read(tmp);
7340 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
7341 if (insn & (1 << 21)) {
7342 if ((insn & (1 << 24)) == 0) {
b0109805 7343 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 7344 } else {
b0109805 7345 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7346 }
7347 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7348 store_reg(s, 13, addr);
9ee6e8bb 7349 } else {
b0109805
PB
7350 gen_helper_set_r13_banked(cpu_env,
7351 tcg_const_i32(op), addr);
9ee6e8bb 7352 }
b0109805
PB
7353 } else {
7354 dead_tmp(addr);
9ee6e8bb
PB
7355 }
7356 }
7357 } else {
7358 int i;
7359 /* Load/store multiple. */
b0109805 7360 addr = load_reg(s, rn);
9ee6e8bb
PB
7361 offset = 0;
7362 for (i = 0; i < 16; i++) {
7363 if (insn & (1 << i))
7364 offset += 4;
7365 }
7366 if (insn & (1 << 24)) {
b0109805 7367 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7368 }
7369
7370 for (i = 0; i < 16; i++) {
7371 if ((insn & (1 << i)) == 0)
7372 continue;
7373 if (insn & (1 << 20)) {
7374 /* Load. */
b0109805 7375 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 7376 if (i == 15) {
b0109805 7377 gen_bx(s, tmp);
9ee6e8bb 7378 } else {
b0109805 7379 store_reg(s, i, tmp);
9ee6e8bb
PB
7380 }
7381 } else {
7382 /* Store. */
b0109805
PB
7383 tmp = load_reg(s, i);
7384 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7385 }
b0109805 7386 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7387 }
7388 if (insn & (1 << 21)) {
7389 /* Base register writeback. */
7390 if (insn & (1 << 24)) {
b0109805 7391 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7392 }
7393 /* Fault if writeback register is in register list. */
7394 if (insn & (1 << rn))
7395 goto illegal_op;
b0109805
PB
7396 store_reg(s, rn, addr);
7397 } else {
7398 dead_tmp(addr);
9ee6e8bb
PB
7399 }
7400 }
7401 }
7402 break;
7403 case 5: /* Data processing register constant shift. */
3174f8e9
FN
7404 if (rn == 15) {
7405 tmp = new_tmp();
7406 tcg_gen_movi_i32(tmp, 0);
7407 } else {
7408 tmp = load_reg(s, rn);
7409 }
7410 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7411 op = (insn >> 21) & 0xf;
7412 shiftop = (insn >> 4) & 3;
7413 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7414 conds = (insn & (1 << 20)) != 0;
7415 logic_cc = (conds && thumb2_logic_op(op));
3174f8e9
FN
7416 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7417 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9ee6e8bb 7418 goto illegal_op;
3174f8e9
FN
7419 dead_tmp(tmp2);
7420 if (rd != 15) {
7421 store_reg(s, rd, tmp);
7422 } else {
7423 dead_tmp(tmp);
7424 }
9ee6e8bb
PB
7425 break;
7426 case 13: /* Misc data processing. */
7427 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7428 if (op < 4 && (insn & 0xf000) != 0xf000)
7429 goto illegal_op;
7430 switch (op) {
7431 case 0: /* Register controlled shift. */
8984bd2e
PB
7432 tmp = load_reg(s, rn);
7433 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7434 if ((insn & 0x70) != 0)
7435 goto illegal_op;
7436 op = (insn >> 21) & 3;
8984bd2e
PB
7437 logic_cc = (insn & (1 << 20)) != 0;
7438 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7439 if (logic_cc)
7440 gen_logic_CC(tmp);
21aeb343 7441 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7442 break;
7443 case 1: /* Sign/zero extend. */
5e3f878a 7444 tmp = load_reg(s, rm);
9ee6e8bb
PB
7445 shift = (insn >> 4) & 3;
7446 /* ??? In many cases it's not neccessary to do a
7447 rotate, a shift is sufficient. */
7448 if (shift != 0)
5e3f878a 7449 tcg_gen_rori_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7450 op = (insn >> 20) & 7;
7451 switch (op) {
5e3f878a
PB
7452 case 0: gen_sxth(tmp); break;
7453 case 1: gen_uxth(tmp); break;
7454 case 2: gen_sxtb16(tmp); break;
7455 case 3: gen_uxtb16(tmp); break;
7456 case 4: gen_sxtb(tmp); break;
7457 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
7458 default: goto illegal_op;
7459 }
7460 if (rn != 15) {
5e3f878a 7461 tmp2 = load_reg(s, rn);
9ee6e8bb 7462 if ((op >> 1) == 1) {
5e3f878a 7463 gen_add16(tmp, tmp2);
9ee6e8bb 7464 } else {
5e3f878a
PB
7465 tcg_gen_add_i32(tmp, tmp, tmp2);
7466 dead_tmp(tmp2);
9ee6e8bb
PB
7467 }
7468 }
5e3f878a 7469 store_reg(s, rd, tmp);
9ee6e8bb
PB
7470 break;
7471 case 2: /* SIMD add/subtract. */
7472 op = (insn >> 20) & 7;
7473 shift = (insn >> 4) & 7;
7474 if ((op & 3) == 3 || (shift & 3) == 3)
7475 goto illegal_op;
6ddbc6e4
PB
7476 tmp = load_reg(s, rn);
7477 tmp2 = load_reg(s, rm);
7478 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7479 dead_tmp(tmp2);
7480 store_reg(s, rd, tmp);
9ee6e8bb
PB
7481 break;
7482 case 3: /* Other data processing. */
7483 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7484 if (op < 4) {
7485 /* Saturating add/subtract. */
d9ba4830
PB
7486 tmp = load_reg(s, rn);
7487 tmp2 = load_reg(s, rm);
9ee6e8bb 7488 if (op & 2)
d9ba4830 7489 gen_helper_double_saturate(tmp, tmp);
9ee6e8bb 7490 if (op & 1)
d9ba4830 7491 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 7492 else
d9ba4830
PB
7493 gen_helper_add_saturate(tmp, tmp, tmp2);
7494 dead_tmp(tmp2);
9ee6e8bb 7495 } else {
d9ba4830 7496 tmp = load_reg(s, rn);
9ee6e8bb
PB
7497 switch (op) {
7498 case 0x0a: /* rbit */
d9ba4830 7499 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7500 break;
7501 case 0x08: /* rev */
66896cb8 7502 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
7503 break;
7504 case 0x09: /* rev16 */
d9ba4830 7505 gen_rev16(tmp);
9ee6e8bb
PB
7506 break;
7507 case 0x0b: /* revsh */
d9ba4830 7508 gen_revsh(tmp);
9ee6e8bb
PB
7509 break;
7510 case 0x10: /* sel */
d9ba4830 7511 tmp2 = load_reg(s, rm);
6ddbc6e4
PB
7512 tmp3 = new_tmp();
7513 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
d9ba4830 7514 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6ddbc6e4 7515 dead_tmp(tmp3);
d9ba4830 7516 dead_tmp(tmp2);
9ee6e8bb
PB
7517 break;
7518 case 0x18: /* clz */
d9ba4830 7519 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
7520 break;
7521 default:
7522 goto illegal_op;
7523 }
7524 }
d9ba4830 7525 store_reg(s, rd, tmp);
9ee6e8bb
PB
7526 break;
7527 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7528 op = (insn >> 4) & 0xf;
d9ba4830
PB
7529 tmp = load_reg(s, rn);
7530 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7531 switch ((insn >> 20) & 7) {
7532 case 0: /* 32 x 32 -> 32 */
d9ba4830
PB
7533 tcg_gen_mul_i32(tmp, tmp, tmp2);
7534 dead_tmp(tmp2);
9ee6e8bb 7535 if (rs != 15) {
d9ba4830 7536 tmp2 = load_reg(s, rs);
9ee6e8bb 7537 if (op)
d9ba4830 7538 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 7539 else
d9ba4830
PB
7540 tcg_gen_add_i32(tmp, tmp, tmp2);
7541 dead_tmp(tmp2);
9ee6e8bb 7542 }
9ee6e8bb
PB
7543 break;
7544 case 1: /* 16 x 16 -> 32 */
d9ba4830
PB
7545 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7546 dead_tmp(tmp2);
9ee6e8bb 7547 if (rs != 15) {
d9ba4830
PB
7548 tmp2 = load_reg(s, rs);
7549 gen_helper_add_setq(tmp, tmp, tmp2);
7550 dead_tmp(tmp2);
9ee6e8bb 7551 }
9ee6e8bb
PB
7552 break;
7553 case 2: /* Dual multiply add. */
7554 case 4: /* Dual multiply subtract. */
7555 if (op)
d9ba4830
PB
7556 gen_swap_half(tmp2);
7557 gen_smul_dual(tmp, tmp2);
9ee6e8bb
PB
7558 /* This addition cannot overflow. */
7559 if (insn & (1 << 22)) {
d9ba4830 7560 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 7561 } else {
d9ba4830 7562 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 7563 }
d9ba4830 7564 dead_tmp(tmp2);
9ee6e8bb
PB
7565 if (rs != 15)
7566 {
d9ba4830
PB
7567 tmp2 = load_reg(s, rs);
7568 gen_helper_add_setq(tmp, tmp, tmp2);
7569 dead_tmp(tmp2);
9ee6e8bb 7570 }
9ee6e8bb
PB
7571 break;
7572 case 3: /* 32 * 16 -> 32msb */
7573 if (op)
d9ba4830 7574 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7575 else
d9ba4830 7576 gen_sxth(tmp2);
a7812ae4
PB
7577 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7578 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 7579 tmp = new_tmp();
a7812ae4 7580 tcg_gen_trunc_i64_i32(tmp, tmp64);
9ee6e8bb
PB
7581 if (rs != 15)
7582 {
d9ba4830
PB
7583 tmp2 = load_reg(s, rs);
7584 gen_helper_add_setq(tmp, tmp, tmp2);
7585 dead_tmp(tmp2);
9ee6e8bb 7586 }
9ee6e8bb
PB
7587 break;
7588 case 5: case 6: /* 32 * 32 -> 32msb */
d9ba4830
PB
7589 gen_imull(tmp, tmp2);
7590 if (insn & (1 << 5)) {
7591 gen_roundqd(tmp, tmp2);
7592 dead_tmp(tmp2);
7593 } else {
7594 dead_tmp(tmp);
7595 tmp = tmp2;
7596 }
9ee6e8bb 7597 if (rs != 15) {
d9ba4830 7598 tmp2 = load_reg(s, rs);
9ee6e8bb 7599 if (insn & (1 << 21)) {
d9ba4830 7600 tcg_gen_add_i32(tmp, tmp, tmp2);
99c475ab 7601 } else {
d9ba4830 7602 tcg_gen_sub_i32(tmp, tmp2, tmp);
99c475ab 7603 }
d9ba4830 7604 dead_tmp(tmp2);
2c0262af 7605 }
9ee6e8bb
PB
7606 break;
7607 case 7: /* Unsigned sum of absolute differences. */
d9ba4830
PB
7608 gen_helper_usad8(tmp, tmp, tmp2);
7609 dead_tmp(tmp2);
9ee6e8bb 7610 if (rs != 15) {
d9ba4830
PB
7611 tmp2 = load_reg(s, rs);
7612 tcg_gen_add_i32(tmp, tmp, tmp2);
7613 dead_tmp(tmp2);
5fd46862 7614 }
9ee6e8bb 7615 break;
2c0262af 7616 }
d9ba4830 7617 store_reg(s, rd, tmp);
2c0262af 7618 break;
9ee6e8bb
PB
7619 case 6: case 7: /* 64-bit multiply, Divide. */
7620 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
7621 tmp = load_reg(s, rn);
7622 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7623 if ((op & 0x50) == 0x10) {
7624 /* sdiv, udiv */
7625 if (!arm_feature(env, ARM_FEATURE_DIV))
7626 goto illegal_op;
7627 if (op & 0x20)
5e3f878a 7628 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 7629 else
5e3f878a
PB
7630 gen_helper_sdiv(tmp, tmp, tmp2);
7631 dead_tmp(tmp2);
7632 store_reg(s, rd, tmp);
9ee6e8bb
PB
7633 } else if ((op & 0xe) == 0xc) {
7634 /* Dual multiply accumulate long. */
7635 if (op & 1)
5e3f878a
PB
7636 gen_swap_half(tmp2);
7637 gen_smul_dual(tmp, tmp2);
9ee6e8bb 7638 if (op & 0x10) {
5e3f878a 7639 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 7640 } else {
5e3f878a 7641 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 7642 }
5e3f878a 7643 dead_tmp(tmp2);
a7812ae4
PB
7644 /* BUGFIX */
7645 tmp64 = tcg_temp_new_i64();
7646 tcg_gen_ext_i32_i64(tmp64, tmp);
7647 dead_tmp(tmp);
7648 gen_addq(s, tmp64, rs, rd);
7649 gen_storeq_reg(s, rs, rd, tmp64);
2c0262af 7650 } else {
9ee6e8bb
PB
7651 if (op & 0x20) {
7652 /* Unsigned 64-bit multiply */
a7812ae4 7653 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 7654 } else {
9ee6e8bb
PB
7655 if (op & 8) {
7656 /* smlalxy */
5e3f878a
PB
7657 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7658 dead_tmp(tmp2);
a7812ae4
PB
7659 tmp64 = tcg_temp_new_i64();
7660 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 7661 dead_tmp(tmp);
9ee6e8bb
PB
7662 } else {
7663 /* Signed 64-bit multiply */
a7812ae4 7664 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 7665 }
b5ff1b31 7666 }
9ee6e8bb
PB
7667 if (op & 4) {
7668 /* umaal */
a7812ae4
PB
7669 gen_addq_lo(s, tmp64, rs);
7670 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
7671 } else if (op & 0x40) {
7672 /* 64-bit accumulate. */
a7812ae4 7673 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 7674 }
a7812ae4 7675 gen_storeq_reg(s, rs, rd, tmp64);
5fd46862 7676 }
2c0262af 7677 break;
9ee6e8bb
PB
7678 }
7679 break;
7680 case 6: case 7: case 14: case 15:
7681 /* Coprocessor. */
7682 if (((insn >> 24) & 3) == 3) {
7683 /* Translate into the equivalent ARM encoding. */
7684 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
7685 if (disas_neon_data_insn(env, s, insn))
7686 goto illegal_op;
7687 } else {
7688 if (insn & (1 << 28))
7689 goto illegal_op;
7690 if (disas_coproc_insn (env, s, insn))
7691 goto illegal_op;
7692 }
7693 break;
7694 case 8: case 9: case 10: case 11:
7695 if (insn & (1 << 15)) {
7696 /* Branches, misc control. */
7697 if (insn & 0x5000) {
7698 /* Unconditional branch. */
7699 /* signextend(hw1[10:0]) -> offset[:12]. */
7700 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7701 /* hw1[10:0] -> offset[11:1]. */
7702 offset |= (insn & 0x7ff) << 1;
7703 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7704 offset[24:22] already have the same value because of the
7705 sign extension above. */
7706 offset ^= ((~insn) & (1 << 13)) << 10;
7707 offset ^= ((~insn) & (1 << 11)) << 11;
7708
9ee6e8bb
PB
7709 if (insn & (1 << 14)) {
7710 /* Branch and link. */
3174f8e9 7711 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 7712 }
3b46e624 7713
b0109805 7714 offset += s->pc;
9ee6e8bb
PB
7715 if (insn & (1 << 12)) {
7716 /* b/bl */
b0109805 7717 gen_jmp(s, offset);
9ee6e8bb
PB
7718 } else {
7719 /* blx */
b0109805
PB
7720 offset &= ~(uint32_t)2;
7721 gen_bx_im(s, offset);
2c0262af 7722 }
9ee6e8bb
PB
7723 } else if (((insn >> 23) & 7) == 7) {
7724 /* Misc control */
7725 if (insn & (1 << 13))
7726 goto illegal_op;
7727
7728 if (insn & (1 << 26)) {
7729 /* Secure monitor call (v6Z) */
7730 goto illegal_op; /* not implemented. */
2c0262af 7731 } else {
9ee6e8bb
PB
7732 op = (insn >> 20) & 7;
7733 switch (op) {
7734 case 0: /* msr cpsr. */
7735 if (IS_M(env)) {
8984bd2e
PB
7736 tmp = load_reg(s, rn);
7737 addr = tcg_const_i32(insn & 0xff);
7738 gen_helper_v7m_msr(cpu_env, addr, tmp);
9ee6e8bb
PB
7739 gen_lookup_tb(s);
7740 break;
7741 }
7742 /* fall through */
7743 case 1: /* msr spsr. */
7744 if (IS_M(env))
7745 goto illegal_op;
2fbac54b
FN
7746 tmp = load_reg(s, rn);
7747 if (gen_set_psr(s,
9ee6e8bb 7748 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 7749 op == 1, tmp))
9ee6e8bb
PB
7750 goto illegal_op;
7751 break;
7752 case 2: /* cps, nop-hint. */
7753 if (((insn >> 8) & 7) == 0) {
7754 gen_nop_hint(s, insn & 0xff);
7755 }
7756 /* Implemented as NOP in user mode. */
7757 if (IS_USER(s))
7758 break;
7759 offset = 0;
7760 imm = 0;
7761 if (insn & (1 << 10)) {
7762 if (insn & (1 << 7))
7763 offset |= CPSR_A;
7764 if (insn & (1 << 6))
7765 offset |= CPSR_I;
7766 if (insn & (1 << 5))
7767 offset |= CPSR_F;
7768 if (insn & (1 << 9))
7769 imm = CPSR_A | CPSR_I | CPSR_F;
7770 }
7771 if (insn & (1 << 8)) {
7772 offset |= 0x1f;
7773 imm |= (insn & 0x1f);
7774 }
7775 if (offset) {
2fbac54b 7776 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
7777 }
7778 break;
7779 case 3: /* Special control operations. */
7780 op = (insn >> 4) & 0xf;
7781 switch (op) {
7782 case 2: /* clrex */
8f8e3aa4 7783 gen_helper_clrex(cpu_env);
9ee6e8bb
PB
7784 break;
7785 case 4: /* dsb */
7786 case 5: /* dmb */
7787 case 6: /* isb */
7788 /* These execute as NOPs. */
7789 ARCH(7);
7790 break;
7791 default:
7792 goto illegal_op;
7793 }
7794 break;
7795 case 4: /* bxj */
7796 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
7797 tmp = load_reg(s, rn);
7798 gen_bx(s, tmp);
9ee6e8bb
PB
7799 break;
7800 case 5: /* Exception return. */
7801 /* Unpredictable in user mode. */
7802 goto illegal_op;
7803 case 6: /* mrs cpsr. */
8984bd2e 7804 tmp = new_tmp();
9ee6e8bb 7805 if (IS_M(env)) {
8984bd2e
PB
7806 addr = tcg_const_i32(insn & 0xff);
7807 gen_helper_v7m_mrs(tmp, cpu_env, addr);
9ee6e8bb 7808 } else {
8984bd2e 7809 gen_helper_cpsr_read(tmp);
9ee6e8bb 7810 }
8984bd2e 7811 store_reg(s, rd, tmp);
9ee6e8bb
PB
7812 break;
7813 case 7: /* mrs spsr. */
7814 /* Not accessible in user mode. */
7815 if (IS_USER(s) || IS_M(env))
7816 goto illegal_op;
d9ba4830
PB
7817 tmp = load_cpu_field(spsr);
7818 store_reg(s, rd, tmp);
9ee6e8bb 7819 break;
2c0262af
FB
7820 }
7821 }
9ee6e8bb
PB
7822 } else {
7823 /* Conditional branch. */
7824 op = (insn >> 22) & 0xf;
7825 /* Generate a conditional jump to next instruction. */
7826 s->condlabel = gen_new_label();
d9ba4830 7827 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
7828 s->condjmp = 1;
7829
7830 /* offset[11:1] = insn[10:0] */
7831 offset = (insn & 0x7ff) << 1;
7832 /* offset[17:12] = insn[21:16]. */
7833 offset |= (insn & 0x003f0000) >> 4;
7834 /* offset[31:20] = insn[26]. */
7835 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
7836 /* offset[18] = insn[13]. */
7837 offset |= (insn & (1 << 13)) << 5;
7838 /* offset[19] = insn[11]. */
7839 offset |= (insn & (1 << 11)) << 8;
7840
7841 /* jump to the offset */
b0109805 7842 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
7843 }
7844 } else {
7845 /* Data processing immediate. */
7846 if (insn & (1 << 25)) {
7847 if (insn & (1 << 24)) {
7848 if (insn & (1 << 20))
7849 goto illegal_op;
7850 /* Bitfield/Saturate. */
7851 op = (insn >> 21) & 7;
7852 imm = insn & 0x1f;
7853 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4
PB
7854 if (rn == 15) {
7855 tmp = new_tmp();
7856 tcg_gen_movi_i32(tmp, 0);
7857 } else {
7858 tmp = load_reg(s, rn);
7859 }
9ee6e8bb
PB
7860 switch (op) {
7861 case 2: /* Signed bitfield extract. */
7862 imm++;
7863 if (shift + imm > 32)
7864 goto illegal_op;
7865 if (imm < 32)
6ddbc6e4 7866 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
7867 break;
7868 case 6: /* Unsigned bitfield extract. */
7869 imm++;
7870 if (shift + imm > 32)
7871 goto illegal_op;
7872 if (imm < 32)
6ddbc6e4 7873 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
7874 break;
7875 case 3: /* Bitfield insert/clear. */
7876 if (imm < shift)
7877 goto illegal_op;
7878 imm = imm + 1 - shift;
7879 if (imm != 32) {
6ddbc6e4 7880 tmp2 = load_reg(s, rd);
8f8e3aa4 7881 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
6ddbc6e4 7882 dead_tmp(tmp2);
9ee6e8bb
PB
7883 }
7884 break;
7885 case 7:
7886 goto illegal_op;
7887 default: /* Saturate. */
9ee6e8bb
PB
7888 if (shift) {
7889 if (op & 1)
6ddbc6e4 7890 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7891 else
6ddbc6e4 7892 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 7893 }
6ddbc6e4 7894 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
7895 if (op & 4) {
7896 /* Unsigned. */
9ee6e8bb 7897 if ((op & 1) && shift == 0)
6ddbc6e4 7898 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 7899 else
6ddbc6e4 7900 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 7901 } else {
9ee6e8bb 7902 /* Signed. */
9ee6e8bb 7903 if ((op & 1) && shift == 0)
6ddbc6e4 7904 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 7905 else
6ddbc6e4 7906 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 7907 }
9ee6e8bb 7908 break;
2c0262af 7909 }
6ddbc6e4 7910 store_reg(s, rd, tmp);
9ee6e8bb
PB
7911 } else {
7912 imm = ((insn & 0x04000000) >> 15)
7913 | ((insn & 0x7000) >> 4) | (insn & 0xff);
7914 if (insn & (1 << 22)) {
7915 /* 16-bit immediate. */
7916 imm |= (insn >> 4) & 0xf000;
7917 if (insn & (1 << 23)) {
7918 /* movt */
5e3f878a 7919 tmp = load_reg(s, rd);
86831435 7920 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 7921 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 7922 } else {
9ee6e8bb 7923 /* movw */
5e3f878a
PB
7924 tmp = new_tmp();
7925 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
7926 }
7927 } else {
9ee6e8bb
PB
7928 /* Add/sub 12-bit immediate. */
7929 if (rn == 15) {
b0109805 7930 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 7931 if (insn & (1 << 23))
b0109805 7932 offset -= imm;
9ee6e8bb 7933 else
b0109805 7934 offset += imm;
5e3f878a
PB
7935 tmp = new_tmp();
7936 tcg_gen_movi_i32(tmp, offset);
2c0262af 7937 } else {
5e3f878a 7938 tmp = load_reg(s, rn);
9ee6e8bb 7939 if (insn & (1 << 23))
5e3f878a 7940 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 7941 else
5e3f878a 7942 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 7943 }
9ee6e8bb 7944 }
5e3f878a 7945 store_reg(s, rd, tmp);
191abaa2 7946 }
9ee6e8bb
PB
7947 } else {
7948 int shifter_out = 0;
7949 /* modified 12-bit immediate. */
7950 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
7951 imm = (insn & 0xff);
7952 switch (shift) {
7953 case 0: /* XY */
7954 /* Nothing to do. */
7955 break;
7956 case 1: /* 00XY00XY */
7957 imm |= imm << 16;
7958 break;
7959 case 2: /* XY00XY00 */
7960 imm |= imm << 16;
7961 imm <<= 8;
7962 break;
7963 case 3: /* XYXYXYXY */
7964 imm |= imm << 16;
7965 imm |= imm << 8;
7966 break;
7967 default: /* Rotated constant. */
7968 shift = (shift << 1) | (imm >> 7);
7969 imm |= 0x80;
7970 imm = imm << (32 - shift);
7971 shifter_out = 1;
7972 break;
b5ff1b31 7973 }
3174f8e9
FN
7974 tmp2 = new_tmp();
7975 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 7976 rn = (insn >> 16) & 0xf;
3174f8e9
FN
7977 if (rn == 15) {
7978 tmp = new_tmp();
7979 tcg_gen_movi_i32(tmp, 0);
7980 } else {
7981 tmp = load_reg(s, rn);
7982 }
9ee6e8bb
PB
7983 op = (insn >> 21) & 0xf;
7984 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 7985 shifter_out, tmp, tmp2))
9ee6e8bb 7986 goto illegal_op;
3174f8e9 7987 dead_tmp(tmp2);
9ee6e8bb
PB
7988 rd = (insn >> 8) & 0xf;
7989 if (rd != 15) {
3174f8e9
FN
7990 store_reg(s, rd, tmp);
7991 } else {
7992 dead_tmp(tmp);
2c0262af 7993 }
2c0262af 7994 }
9ee6e8bb
PB
7995 }
7996 break;
7997 case 12: /* Load/store single data item. */
7998 {
7999 int postinc = 0;
8000 int writeback = 0;
b0109805 8001 int user;
9ee6e8bb
PB
8002 if ((insn & 0x01100000) == 0x01000000) {
8003 if (disas_neon_ls_insn(env, s, insn))
c1713132 8004 goto illegal_op;
9ee6e8bb
PB
8005 break;
8006 }
b0109805 8007 user = IS_USER(s);
9ee6e8bb 8008 if (rn == 15) {
b0109805 8009 addr = new_tmp();
9ee6e8bb
PB
8010 /* PC relative. */
8011 /* s->pc has already been incremented by 4. */
8012 imm = s->pc & 0xfffffffc;
8013 if (insn & (1 << 23))
8014 imm += insn & 0xfff;
8015 else
8016 imm -= insn & 0xfff;
b0109805 8017 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 8018 } else {
b0109805 8019 addr = load_reg(s, rn);
9ee6e8bb
PB
8020 if (insn & (1 << 23)) {
8021 /* Positive offset. */
8022 imm = insn & 0xfff;
b0109805 8023 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8024 } else {
8025 op = (insn >> 8) & 7;
8026 imm = insn & 0xff;
8027 switch (op) {
8028 case 0: case 8: /* Shifted Register. */
8029 shift = (insn >> 4) & 0xf;
8030 if (shift > 3)
18c9b560 8031 goto illegal_op;
b26eefb6 8032 tmp = load_reg(s, rm);
9ee6e8bb 8033 if (shift)
b26eefb6 8034 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 8035 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 8036 dead_tmp(tmp);
9ee6e8bb
PB
8037 break;
8038 case 4: /* Negative offset. */
b0109805 8039 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb
PB
8040 break;
8041 case 6: /* User privilege. */
b0109805
PB
8042 tcg_gen_addi_i32(addr, addr, imm);
8043 user = 1;
9ee6e8bb
PB
8044 break;
8045 case 1: /* Post-decrement. */
8046 imm = -imm;
8047 /* Fall through. */
8048 case 3: /* Post-increment. */
9ee6e8bb
PB
8049 postinc = 1;
8050 writeback = 1;
8051 break;
8052 case 5: /* Pre-decrement. */
8053 imm = -imm;
8054 /* Fall through. */
8055 case 7: /* Pre-increment. */
b0109805 8056 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8057 writeback = 1;
8058 break;
8059 default:
b7bcbe95 8060 goto illegal_op;
9ee6e8bb
PB
8061 }
8062 }
8063 }
8064 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8065 if (insn & (1 << 20)) {
8066 /* Load. */
8067 if (rs == 15 && op != 2) {
8068 if (op & 2)
b5ff1b31 8069 goto illegal_op;
9ee6e8bb
PB
8070 /* Memory hint. Implemented as NOP. */
8071 } else {
8072 switch (op) {
b0109805
PB
8073 case 0: tmp = gen_ld8u(addr, user); break;
8074 case 4: tmp = gen_ld8s(addr, user); break;
8075 case 1: tmp = gen_ld16u(addr, user); break;
8076 case 5: tmp = gen_ld16s(addr, user); break;
8077 case 2: tmp = gen_ld32(addr, user); break;
9ee6e8bb
PB
8078 default: goto illegal_op;
8079 }
8080 if (rs == 15) {
b0109805 8081 gen_bx(s, tmp);
9ee6e8bb 8082 } else {
b0109805 8083 store_reg(s, rs, tmp);
9ee6e8bb
PB
8084 }
8085 }
8086 } else {
8087 /* Store. */
8088 if (rs == 15)
b7bcbe95 8089 goto illegal_op;
b0109805 8090 tmp = load_reg(s, rs);
9ee6e8bb 8091 switch (op) {
b0109805
PB
8092 case 0: gen_st8(tmp, addr, user); break;
8093 case 1: gen_st16(tmp, addr, user); break;
8094 case 2: gen_st32(tmp, addr, user); break;
9ee6e8bb 8095 default: goto illegal_op;
b7bcbe95 8096 }
2c0262af 8097 }
9ee6e8bb 8098 if (postinc)
b0109805
PB
8099 tcg_gen_addi_i32(addr, addr, imm);
8100 if (writeback) {
8101 store_reg(s, rn, addr);
8102 } else {
8103 dead_tmp(addr);
8104 }
9ee6e8bb
PB
8105 }
8106 break;
8107 default:
8108 goto illegal_op;
2c0262af 8109 }
9ee6e8bb
PB
8110 return 0;
8111illegal_op:
8112 return 1;
2c0262af
FB
8113}
8114
9ee6e8bb 8115static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
8116{
8117 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8118 int32_t offset;
8119 int i;
b26eefb6 8120 TCGv tmp;
d9ba4830 8121 TCGv tmp2;
b0109805 8122 TCGv addr;
99c475ab 8123
9ee6e8bb
PB
8124 if (s->condexec_mask) {
8125 cond = s->condexec_cond;
8126 s->condlabel = gen_new_label();
d9ba4830 8127 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
8128 s->condjmp = 1;
8129 }
8130
b5ff1b31 8131 insn = lduw_code(s->pc);
99c475ab 8132 s->pc += 2;
b5ff1b31 8133
99c475ab
FB
8134 switch (insn >> 12) {
8135 case 0: case 1:
396e467c 8136
99c475ab
FB
8137 rd = insn & 7;
8138 op = (insn >> 11) & 3;
8139 if (op == 3) {
8140 /* add/subtract */
8141 rn = (insn >> 3) & 7;
396e467c 8142 tmp = load_reg(s, rn);
99c475ab
FB
8143 if (insn & (1 << 10)) {
8144 /* immediate */
396e467c
FN
8145 tmp2 = new_tmp();
8146 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
8147 } else {
8148 /* reg */
8149 rm = (insn >> 6) & 7;
396e467c 8150 tmp2 = load_reg(s, rm);
99c475ab 8151 }
9ee6e8bb
PB
8152 if (insn & (1 << 9)) {
8153 if (s->condexec_mask)
396e467c 8154 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8155 else
396e467c 8156 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb
PB
8157 } else {
8158 if (s->condexec_mask)
396e467c 8159 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 8160 else
396e467c 8161 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 8162 }
396e467c
FN
8163 dead_tmp(tmp2);
8164 store_reg(s, rd, tmp);
99c475ab
FB
8165 } else {
8166 /* shift immediate */
8167 rm = (insn >> 3) & 7;
8168 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
8169 tmp = load_reg(s, rm);
8170 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8171 if (!s->condexec_mask)
8172 gen_logic_CC(tmp);
8173 store_reg(s, rd, tmp);
99c475ab
FB
8174 }
8175 break;
8176 case 2: case 3:
8177 /* arithmetic large immediate */
8178 op = (insn >> 11) & 3;
8179 rd = (insn >> 8) & 0x7;
396e467c
FN
8180 if (op == 0) { /* mov */
8181 tmp = new_tmp();
8182 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 8183 if (!s->condexec_mask)
396e467c
FN
8184 gen_logic_CC(tmp);
8185 store_reg(s, rd, tmp);
8186 } else {
8187 tmp = load_reg(s, rd);
8188 tmp2 = new_tmp();
8189 tcg_gen_movi_i32(tmp2, insn & 0xff);
8190 switch (op) {
8191 case 1: /* cmp */
8192 gen_helper_sub_cc(tmp, tmp, tmp2);
8193 dead_tmp(tmp);
8194 dead_tmp(tmp2);
8195 break;
8196 case 2: /* add */
8197 if (s->condexec_mask)
8198 tcg_gen_add_i32(tmp, tmp, tmp2);
8199 else
8200 gen_helper_add_cc(tmp, tmp, tmp2);
8201 dead_tmp(tmp2);
8202 store_reg(s, rd, tmp);
8203 break;
8204 case 3: /* sub */
8205 if (s->condexec_mask)
8206 tcg_gen_sub_i32(tmp, tmp, tmp2);
8207 else
8208 gen_helper_sub_cc(tmp, tmp, tmp2);
8209 dead_tmp(tmp2);
8210 store_reg(s, rd, tmp);
8211 break;
8212 }
99c475ab 8213 }
99c475ab
FB
8214 break;
8215 case 4:
8216 if (insn & (1 << 11)) {
8217 rd = (insn >> 8) & 7;
5899f386
FB
8218 /* load pc-relative. Bit 1 of PC is ignored. */
8219 val = s->pc + 2 + ((insn & 0xff) * 4);
8220 val &= ~(uint32_t)2;
b0109805
PB
8221 addr = new_tmp();
8222 tcg_gen_movi_i32(addr, val);
8223 tmp = gen_ld32(addr, IS_USER(s));
8224 dead_tmp(addr);
8225 store_reg(s, rd, tmp);
99c475ab
FB
8226 break;
8227 }
8228 if (insn & (1 << 10)) {
8229 /* data processing extended or blx */
8230 rd = (insn & 7) | ((insn >> 4) & 8);
8231 rm = (insn >> 3) & 0xf;
8232 op = (insn >> 8) & 3;
8233 switch (op) {
8234 case 0: /* add */
396e467c
FN
8235 tmp = load_reg(s, rd);
8236 tmp2 = load_reg(s, rm);
8237 tcg_gen_add_i32(tmp, tmp, tmp2);
8238 dead_tmp(tmp2);
8239 store_reg(s, rd, tmp);
99c475ab
FB
8240 break;
8241 case 1: /* cmp */
396e467c
FN
8242 tmp = load_reg(s, rd);
8243 tmp2 = load_reg(s, rm);
8244 gen_helper_sub_cc(tmp, tmp, tmp2);
8245 dead_tmp(tmp2);
8246 dead_tmp(tmp);
99c475ab
FB
8247 break;
8248 case 2: /* mov/cpy */
396e467c
FN
8249 tmp = load_reg(s, rm);
8250 store_reg(s, rd, tmp);
99c475ab
FB
8251 break;
8252 case 3:/* branch [and link] exchange thumb register */
b0109805 8253 tmp = load_reg(s, rm);
99c475ab
FB
8254 if (insn & (1 << 7)) {
8255 val = (uint32_t)s->pc | 1;
b0109805
PB
8256 tmp2 = new_tmp();
8257 tcg_gen_movi_i32(tmp2, val);
8258 store_reg(s, 14, tmp2);
99c475ab 8259 }
d9ba4830 8260 gen_bx(s, tmp);
99c475ab
FB
8261 break;
8262 }
8263 break;
8264 }
8265
8266 /* data processing register */
8267 rd = insn & 7;
8268 rm = (insn >> 3) & 7;
8269 op = (insn >> 6) & 0xf;
8270 if (op == 2 || op == 3 || op == 4 || op == 7) {
8271 /* the shift/rotate ops want the operands backwards */
8272 val = rm;
8273 rm = rd;
8274 rd = val;
8275 val = 1;
8276 } else {
8277 val = 0;
8278 }
8279
396e467c
FN
8280 if (op == 9) { /* neg */
8281 tmp = new_tmp();
8282 tcg_gen_movi_i32(tmp, 0);
8283 } else if (op != 0xf) { /* mvn doesn't read its first operand */
8284 tmp = load_reg(s, rd);
8285 } else {
8286 TCGV_UNUSED(tmp);
8287 }
99c475ab 8288
396e467c 8289 tmp2 = load_reg(s, rm);
5899f386 8290 switch (op) {
99c475ab 8291 case 0x0: /* and */
396e467c 8292 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 8293 if (!s->condexec_mask)
396e467c 8294 gen_logic_CC(tmp);
99c475ab
FB
8295 break;
8296 case 0x1: /* eor */
396e467c 8297 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 8298 if (!s->condexec_mask)
396e467c 8299 gen_logic_CC(tmp);
99c475ab
FB
8300 break;
8301 case 0x2: /* lsl */
9ee6e8bb 8302 if (s->condexec_mask) {
396e467c 8303 gen_helper_shl(tmp2, tmp2, tmp);
9ee6e8bb 8304 } else {
396e467c
FN
8305 gen_helper_shl_cc(tmp2, tmp2, tmp);
8306 gen_logic_CC(tmp2);
9ee6e8bb 8307 }
99c475ab
FB
8308 break;
8309 case 0x3: /* lsr */
9ee6e8bb 8310 if (s->condexec_mask) {
396e467c 8311 gen_helper_shr(tmp2, tmp2, tmp);
9ee6e8bb 8312 } else {
396e467c
FN
8313 gen_helper_shr_cc(tmp2, tmp2, tmp);
8314 gen_logic_CC(tmp2);
9ee6e8bb 8315 }
99c475ab
FB
8316 break;
8317 case 0x4: /* asr */
9ee6e8bb 8318 if (s->condexec_mask) {
396e467c 8319 gen_helper_sar(tmp2, tmp2, tmp);
9ee6e8bb 8320 } else {
396e467c
FN
8321 gen_helper_sar_cc(tmp2, tmp2, tmp);
8322 gen_logic_CC(tmp2);
9ee6e8bb 8323 }
99c475ab
FB
8324 break;
8325 case 0x5: /* adc */
9ee6e8bb 8326 if (s->condexec_mask)
396e467c 8327 gen_adc(tmp, tmp2);
9ee6e8bb 8328 else
396e467c 8329 gen_helper_adc_cc(tmp, tmp, tmp2);
99c475ab
FB
8330 break;
8331 case 0x6: /* sbc */
9ee6e8bb 8332 if (s->condexec_mask)
396e467c 8333 gen_sub_carry(tmp, tmp, tmp2);
9ee6e8bb 8334 else
396e467c 8335 gen_helper_sbc_cc(tmp, tmp, tmp2);
99c475ab
FB
8336 break;
8337 case 0x7: /* ror */
9ee6e8bb 8338 if (s->condexec_mask) {
396e467c 8339 gen_helper_ror(tmp2, tmp2, tmp);
9ee6e8bb 8340 } else {
396e467c
FN
8341 gen_helper_ror_cc(tmp2, tmp2, tmp);
8342 gen_logic_CC(tmp2);
9ee6e8bb 8343 }
99c475ab
FB
8344 break;
8345 case 0x8: /* tst */
396e467c
FN
8346 tcg_gen_and_i32(tmp, tmp, tmp2);
8347 gen_logic_CC(tmp);
99c475ab 8348 rd = 16;
5899f386 8349 break;
99c475ab 8350 case 0x9: /* neg */
9ee6e8bb 8351 if (s->condexec_mask)
396e467c 8352 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 8353 else
396e467c 8354 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
8355 break;
8356 case 0xa: /* cmp */
396e467c 8357 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
8358 rd = 16;
8359 break;
8360 case 0xb: /* cmn */
396e467c 8361 gen_helper_add_cc(tmp, tmp, tmp2);
99c475ab
FB
8362 rd = 16;
8363 break;
8364 case 0xc: /* orr */
396e467c 8365 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 8366 if (!s->condexec_mask)
396e467c 8367 gen_logic_CC(tmp);
99c475ab
FB
8368 break;
8369 case 0xd: /* mul */
396e467c 8370 gen_mull(tmp, tmp2);
9ee6e8bb 8371 if (!s->condexec_mask)
396e467c 8372 gen_logic_CC(tmp);
99c475ab
FB
8373 break;
8374 case 0xe: /* bic */
396e467c 8375 tcg_gen_bic_i32(tmp, tmp, tmp2);
9ee6e8bb 8376 if (!s->condexec_mask)
396e467c 8377 gen_logic_CC(tmp);
99c475ab
FB
8378 break;
8379 case 0xf: /* mvn */
396e467c 8380 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 8381 if (!s->condexec_mask)
396e467c 8382 gen_logic_CC(tmp2);
99c475ab 8383 val = 1;
5899f386 8384 rm = rd;
99c475ab
FB
8385 break;
8386 }
8387 if (rd != 16) {
396e467c
FN
8388 if (val) {
8389 store_reg(s, rm, tmp2);
8390 if (op != 0xf)
8391 dead_tmp(tmp);
8392 } else {
8393 store_reg(s, rd, tmp);
8394 dead_tmp(tmp2);
8395 }
8396 } else {
8397 dead_tmp(tmp);
8398 dead_tmp(tmp2);
99c475ab
FB
8399 }
8400 break;
8401
8402 case 5:
8403 /* load/store register offset. */
8404 rd = insn & 7;
8405 rn = (insn >> 3) & 7;
8406 rm = (insn >> 6) & 7;
8407 op = (insn >> 9) & 7;
b0109805 8408 addr = load_reg(s, rn);
b26eefb6 8409 tmp = load_reg(s, rm);
b0109805 8410 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 8411 dead_tmp(tmp);
99c475ab
FB
8412
8413 if (op < 3) /* store */
b0109805 8414 tmp = load_reg(s, rd);
99c475ab
FB
8415
8416 switch (op) {
8417 case 0: /* str */
b0109805 8418 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
8419 break;
8420 case 1: /* strh */
b0109805 8421 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
8422 break;
8423 case 2: /* strb */
b0109805 8424 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
8425 break;
8426 case 3: /* ldrsb */
b0109805 8427 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
8428 break;
8429 case 4: /* ldr */
b0109805 8430 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8431 break;
8432 case 5: /* ldrh */
b0109805 8433 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
8434 break;
8435 case 6: /* ldrb */
b0109805 8436 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
8437 break;
8438 case 7: /* ldrsh */
b0109805 8439 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
8440 break;
8441 }
8442 if (op >= 3) /* load */
b0109805
PB
8443 store_reg(s, rd, tmp);
8444 dead_tmp(addr);
99c475ab
FB
8445 break;
8446
8447 case 6:
8448 /* load/store word immediate offset */
8449 rd = insn & 7;
8450 rn = (insn >> 3) & 7;
b0109805 8451 addr = load_reg(s, rn);
99c475ab 8452 val = (insn >> 4) & 0x7c;
b0109805 8453 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8454
8455 if (insn & (1 << 11)) {
8456 /* load */
b0109805
PB
8457 tmp = gen_ld32(addr, IS_USER(s));
8458 store_reg(s, rd, tmp);
99c475ab
FB
8459 } else {
8460 /* store */
b0109805
PB
8461 tmp = load_reg(s, rd);
8462 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8463 }
b0109805 8464 dead_tmp(addr);
99c475ab
FB
8465 break;
8466
8467 case 7:
8468 /* load/store byte immediate offset */
8469 rd = insn & 7;
8470 rn = (insn >> 3) & 7;
b0109805 8471 addr = load_reg(s, rn);
99c475ab 8472 val = (insn >> 6) & 0x1f;
b0109805 8473 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8474
8475 if (insn & (1 << 11)) {
8476 /* load */
b0109805
PB
8477 tmp = gen_ld8u(addr, IS_USER(s));
8478 store_reg(s, rd, tmp);
99c475ab
FB
8479 } else {
8480 /* store */
b0109805
PB
8481 tmp = load_reg(s, rd);
8482 gen_st8(tmp, addr, IS_USER(s));
99c475ab 8483 }
b0109805 8484 dead_tmp(addr);
99c475ab
FB
8485 break;
8486
8487 case 8:
8488 /* load/store halfword immediate offset */
8489 rd = insn & 7;
8490 rn = (insn >> 3) & 7;
b0109805 8491 addr = load_reg(s, rn);
99c475ab 8492 val = (insn >> 5) & 0x3e;
b0109805 8493 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8494
8495 if (insn & (1 << 11)) {
8496 /* load */
b0109805
PB
8497 tmp = gen_ld16u(addr, IS_USER(s));
8498 store_reg(s, rd, tmp);
99c475ab
FB
8499 } else {
8500 /* store */
b0109805
PB
8501 tmp = load_reg(s, rd);
8502 gen_st16(tmp, addr, IS_USER(s));
99c475ab 8503 }
b0109805 8504 dead_tmp(addr);
99c475ab
FB
8505 break;
8506
8507 case 9:
8508 /* load/store from stack */
8509 rd = (insn >> 8) & 7;
b0109805 8510 addr = load_reg(s, 13);
99c475ab 8511 val = (insn & 0xff) * 4;
b0109805 8512 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8513
8514 if (insn & (1 << 11)) {
8515 /* load */
b0109805
PB
8516 tmp = gen_ld32(addr, IS_USER(s));
8517 store_reg(s, rd, tmp);
99c475ab
FB
8518 } else {
8519 /* store */
b0109805
PB
8520 tmp = load_reg(s, rd);
8521 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8522 }
b0109805 8523 dead_tmp(addr);
99c475ab
FB
8524 break;
8525
8526 case 10:
8527 /* add to high reg */
8528 rd = (insn >> 8) & 7;
5899f386
FB
8529 if (insn & (1 << 11)) {
8530 /* SP */
5e3f878a 8531 tmp = load_reg(s, 13);
5899f386
FB
8532 } else {
8533 /* PC. bit 1 is ignored. */
5e3f878a
PB
8534 tmp = new_tmp();
8535 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 8536 }
99c475ab 8537 val = (insn & 0xff) * 4;
5e3f878a
PB
8538 tcg_gen_addi_i32(tmp, tmp, val);
8539 store_reg(s, rd, tmp);
99c475ab
FB
8540 break;
8541
8542 case 11:
8543 /* misc */
8544 op = (insn >> 8) & 0xf;
8545 switch (op) {
8546 case 0:
8547 /* adjust stack pointer */
b26eefb6 8548 tmp = load_reg(s, 13);
99c475ab
FB
8549 val = (insn & 0x7f) * 4;
8550 if (insn & (1 << 7))
6a0d8a1d 8551 val = -(int32_t)val;
b26eefb6
PB
8552 tcg_gen_addi_i32(tmp, tmp, val);
8553 store_reg(s, 13, tmp);
99c475ab
FB
8554 break;
8555
9ee6e8bb
PB
8556 case 2: /* sign/zero extend. */
8557 ARCH(6);
8558 rd = insn & 7;
8559 rm = (insn >> 3) & 7;
b0109805 8560 tmp = load_reg(s, rm);
9ee6e8bb 8561 switch ((insn >> 6) & 3) {
b0109805
PB
8562 case 0: gen_sxth(tmp); break;
8563 case 1: gen_sxtb(tmp); break;
8564 case 2: gen_uxth(tmp); break;
8565 case 3: gen_uxtb(tmp); break;
9ee6e8bb 8566 }
b0109805 8567 store_reg(s, rd, tmp);
9ee6e8bb 8568 break;
99c475ab
FB
8569 case 4: case 5: case 0xc: case 0xd:
8570 /* push/pop */
b0109805 8571 addr = load_reg(s, 13);
5899f386
FB
8572 if (insn & (1 << 8))
8573 offset = 4;
99c475ab 8574 else
5899f386
FB
8575 offset = 0;
8576 for (i = 0; i < 8; i++) {
8577 if (insn & (1 << i))
8578 offset += 4;
8579 }
8580 if ((insn & (1 << 11)) == 0) {
b0109805 8581 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8582 }
99c475ab
FB
8583 for (i = 0; i < 8; i++) {
8584 if (insn & (1 << i)) {
8585 if (insn & (1 << 11)) {
8586 /* pop */
b0109805
PB
8587 tmp = gen_ld32(addr, IS_USER(s));
8588 store_reg(s, i, tmp);
99c475ab
FB
8589 } else {
8590 /* push */
b0109805
PB
8591 tmp = load_reg(s, i);
8592 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8593 }
5899f386 8594 /* advance to the next address. */
b0109805 8595 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8596 }
8597 }
a50f5b91 8598 TCGV_UNUSED(tmp);
99c475ab
FB
8599 if (insn & (1 << 8)) {
8600 if (insn & (1 << 11)) {
8601 /* pop pc */
b0109805 8602 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8603 /* don't set the pc until the rest of the instruction
8604 has completed */
8605 } else {
8606 /* push lr */
b0109805
PB
8607 tmp = load_reg(s, 14);
8608 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8609 }
b0109805 8610 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 8611 }
5899f386 8612 if ((insn & (1 << 11)) == 0) {
b0109805 8613 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8614 }
99c475ab 8615 /* write back the new stack pointer */
b0109805 8616 store_reg(s, 13, addr);
99c475ab
FB
8617 /* set the new PC value */
8618 if ((insn & 0x0900) == 0x0900)
b0109805 8619 gen_bx(s, tmp);
99c475ab
FB
8620 break;
8621
9ee6e8bb
PB
8622 case 1: case 3: case 9: case 11: /* czb */
8623 rm = insn & 7;
d9ba4830 8624 tmp = load_reg(s, rm);
9ee6e8bb
PB
8625 s->condlabel = gen_new_label();
8626 s->condjmp = 1;
8627 if (insn & (1 << 11))
cb63669a 8628 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 8629 else
cb63669a 8630 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
d9ba4830 8631 dead_tmp(tmp);
9ee6e8bb
PB
8632 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8633 val = (uint32_t)s->pc + 2;
8634 val += offset;
8635 gen_jmp(s, val);
8636 break;
8637
8638 case 15: /* IT, nop-hint. */
8639 if ((insn & 0xf) == 0) {
8640 gen_nop_hint(s, (insn >> 4) & 0xf);
8641 break;
8642 }
8643 /* If Then. */
8644 s->condexec_cond = (insn >> 4) & 0xe;
8645 s->condexec_mask = insn & 0x1f;
8646 /* No actual code generated for this insn, just setup state. */
8647 break;
8648
06c949e6 8649 case 0xe: /* bkpt */
9ee6e8bb 8650 gen_set_condexec(s);
5e3f878a 8651 gen_set_pc_im(s->pc - 2);
d9ba4830 8652 gen_exception(EXCP_BKPT);
06c949e6
PB
8653 s->is_jmp = DISAS_JUMP;
8654 break;
8655
9ee6e8bb
PB
8656 case 0xa: /* rev */
8657 ARCH(6);
8658 rn = (insn >> 3) & 0x7;
8659 rd = insn & 0x7;
b0109805 8660 tmp = load_reg(s, rn);
9ee6e8bb 8661 switch ((insn >> 6) & 3) {
66896cb8 8662 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
8663 case 1: gen_rev16(tmp); break;
8664 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
8665 default: goto illegal_op;
8666 }
b0109805 8667 store_reg(s, rd, tmp);
9ee6e8bb
PB
8668 break;
8669
8670 case 6: /* cps */
8671 ARCH(6);
8672 if (IS_USER(s))
8673 break;
8674 if (IS_M(env)) {
8984bd2e 8675 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9ee6e8bb 8676 /* PRIMASK */
8984bd2e
PB
8677 if (insn & 1) {
8678 addr = tcg_const_i32(16);
8679 gen_helper_v7m_msr(cpu_env, addr, tmp);
8680 }
9ee6e8bb 8681 /* FAULTMASK */
8984bd2e
PB
8682 if (insn & 2) {
8683 addr = tcg_const_i32(17);
8684 gen_helper_v7m_msr(cpu_env, addr, tmp);
8685 }
9ee6e8bb
PB
8686 gen_lookup_tb(s);
8687 } else {
8688 if (insn & (1 << 4))
8689 shift = CPSR_A | CPSR_I | CPSR_F;
8690 else
8691 shift = 0;
2fbac54b 8692 gen_set_psr_im(s, shift, 0, ((insn & 7) << 6) & shift);
9ee6e8bb
PB
8693 }
8694 break;
8695
99c475ab
FB
8696 default:
8697 goto undef;
8698 }
8699 break;
8700
8701 case 12:
8702 /* load/store multiple */
8703 rn = (insn >> 8) & 0x7;
b0109805 8704 addr = load_reg(s, rn);
99c475ab
FB
8705 for (i = 0; i < 8; i++) {
8706 if (insn & (1 << i)) {
99c475ab
FB
8707 if (insn & (1 << 11)) {
8708 /* load */
b0109805
PB
8709 tmp = gen_ld32(addr, IS_USER(s));
8710 store_reg(s, i, tmp);
99c475ab
FB
8711 } else {
8712 /* store */
b0109805
PB
8713 tmp = load_reg(s, i);
8714 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8715 }
5899f386 8716 /* advance to the next address */
b0109805 8717 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8718 }
8719 }
5899f386 8720 /* Base register writeback. */
b0109805
PB
8721 if ((insn & (1 << rn)) == 0) {
8722 store_reg(s, rn, addr);
8723 } else {
8724 dead_tmp(addr);
8725 }
99c475ab
FB
8726 break;
8727
8728 case 13:
8729 /* conditional branch or swi */
8730 cond = (insn >> 8) & 0xf;
8731 if (cond == 0xe)
8732 goto undef;
8733
8734 if (cond == 0xf) {
8735 /* swi */
9ee6e8bb 8736 gen_set_condexec(s);
422ebf69 8737 gen_set_pc_im(s->pc);
9ee6e8bb 8738 s->is_jmp = DISAS_SWI;
99c475ab
FB
8739 break;
8740 }
8741 /* generate a conditional jump to next instruction */
e50e6a20 8742 s->condlabel = gen_new_label();
d9ba4830 8743 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 8744 s->condjmp = 1;
99c475ab
FB
8745
8746 /* jump to the offset */
5899f386 8747 val = (uint32_t)s->pc + 2;
99c475ab 8748 offset = ((int32_t)insn << 24) >> 24;
5899f386 8749 val += offset << 1;
8aaca4c0 8750 gen_jmp(s, val);
99c475ab
FB
8751 break;
8752
8753 case 14:
358bf29e 8754 if (insn & (1 << 11)) {
9ee6e8bb
PB
8755 if (disas_thumb2_insn(env, s, insn))
8756 goto undef32;
358bf29e
PB
8757 break;
8758 }
9ee6e8bb 8759 /* unconditional branch */
99c475ab
FB
8760 val = (uint32_t)s->pc;
8761 offset = ((int32_t)insn << 21) >> 21;
8762 val += (offset << 1) + 2;
8aaca4c0 8763 gen_jmp(s, val);
99c475ab
FB
8764 break;
8765
8766 case 15:
9ee6e8bb 8767 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 8768 goto undef32;
9ee6e8bb 8769 break;
99c475ab
FB
8770 }
8771 return;
9ee6e8bb
PB
8772undef32:
8773 gen_set_condexec(s);
5e3f878a 8774 gen_set_pc_im(s->pc - 4);
d9ba4830 8775 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
8776 s->is_jmp = DISAS_JUMP;
8777 return;
8778illegal_op:
99c475ab 8779undef:
9ee6e8bb 8780 gen_set_condexec(s);
5e3f878a 8781 gen_set_pc_im(s->pc - 2);
d9ba4830 8782 gen_exception(EXCP_UDEF);
99c475ab
FB
8783 s->is_jmp = DISAS_JUMP;
8784}
8785
2c0262af
FB
8786/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8787 basic block 'tb'. If search_pc is TRUE, also generate PC
8788 information for each intermediate instruction. */
2cfc5f17
TS
8789static inline void gen_intermediate_code_internal(CPUState *env,
8790 TranslationBlock *tb,
8791 int search_pc)
2c0262af
FB
8792{
8793 DisasContext dc1, *dc = &dc1;
a1d1bb31 8794 CPUBreakpoint *bp;
2c0262af
FB
8795 uint16_t *gen_opc_end;
8796 int j, lj;
0fa85d43 8797 target_ulong pc_start;
b5ff1b31 8798 uint32_t next_page_start;
2e70f6ef
PB
8799 int num_insns;
8800 int max_insns;
3b46e624 8801
2c0262af 8802 /* generate intermediate code */
b26eefb6 8803 num_temps = 0;
b26eefb6 8804
0fa85d43 8805 pc_start = tb->pc;
3b46e624 8806
2c0262af
FB
8807 dc->tb = tb;
8808
2c0262af 8809 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
8810
8811 dc->is_jmp = DISAS_NEXT;
8812 dc->pc = pc_start;
8aaca4c0 8813 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 8814 dc->condjmp = 0;
5899f386 8815 dc->thumb = env->thumb;
9ee6e8bb
PB
8816 dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
8817 dc->condexec_cond = env->condexec_bits >> 4;
b5ff1b31 8818#if !defined(CONFIG_USER_ONLY)
9ee6e8bb
PB
8819 if (IS_M(env)) {
8820 dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
8821 } else {
8822 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
8823 }
b5ff1b31 8824#endif
a7812ae4
PB
8825 cpu_F0s = tcg_temp_new_i32();
8826 cpu_F1s = tcg_temp_new_i32();
8827 cpu_F0d = tcg_temp_new_i64();
8828 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
8829 cpu_V0 = cpu_F0d;
8830 cpu_V1 = cpu_F1d;
e677137d 8831 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 8832 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 8833 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 8834 lj = -1;
2e70f6ef
PB
8835 num_insns = 0;
8836 max_insns = tb->cflags & CF_COUNT_MASK;
8837 if (max_insns == 0)
8838 max_insns = CF_COUNT_MASK;
8839
8840 gen_icount_start();
9ee6e8bb
PB
8841 /* Reset the conditional execution bits immediately. This avoids
8842 complications trying to do it at the end of the block. */
8843 if (env->condexec_bits)
8f01245e
PB
8844 {
8845 TCGv tmp = new_tmp();
8846 tcg_gen_movi_i32(tmp, 0);
d9ba4830 8847 store_cpu_field(tmp, condexec_bits);
8f01245e 8848 }
2c0262af 8849 do {
fbb4a2e3
PB
8850#ifdef CONFIG_USER_ONLY
8851 /* Intercept jump to the magic kernel page. */
8852 if (dc->pc >= 0xffff0000) {
8853 /* We always get here via a jump, so know we are not in a
8854 conditional execution block. */
8855 gen_exception(EXCP_KERNEL_TRAP);
8856 dc->is_jmp = DISAS_UPDATE;
8857 break;
8858 }
8859#else
9ee6e8bb
PB
8860 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
8861 /* We always get here via a jump, so know we are not in a
8862 conditional execution block. */
d9ba4830 8863 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
8864 dc->is_jmp = DISAS_UPDATE;
8865 break;
9ee6e8bb
PB
8866 }
8867#endif
8868
72cf2d4f
BS
8869 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
8870 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 8871 if (bp->pc == dc->pc) {
9ee6e8bb 8872 gen_set_condexec(dc);
5e3f878a 8873 gen_set_pc_im(dc->pc);
d9ba4830 8874 gen_exception(EXCP_DEBUG);
1fddef4b 8875 dc->is_jmp = DISAS_JUMP;
9ee6e8bb
PB
8876 /* Advance PC so that clearing the breakpoint will
8877 invalidate this TB. */
8878 dc->pc += 2;
8879 goto done_generating;
1fddef4b
FB
8880 break;
8881 }
8882 }
8883 }
2c0262af
FB
8884 if (search_pc) {
8885 j = gen_opc_ptr - gen_opc_buf;
8886 if (lj < j) {
8887 lj++;
8888 while (lj < j)
8889 gen_opc_instr_start[lj++] = 0;
8890 }
0fa85d43 8891 gen_opc_pc[lj] = dc->pc;
2c0262af 8892 gen_opc_instr_start[lj] = 1;
2e70f6ef 8893 gen_opc_icount[lj] = num_insns;
2c0262af 8894 }
e50e6a20 8895
2e70f6ef
PB
8896 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
8897 gen_io_start();
8898
9ee6e8bb
PB
8899 if (env->thumb) {
8900 disas_thumb_insn(env, dc);
8901 if (dc->condexec_mask) {
8902 dc->condexec_cond = (dc->condexec_cond & 0xe)
8903 | ((dc->condexec_mask >> 4) & 1);
8904 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
8905 if (dc->condexec_mask == 0) {
8906 dc->condexec_cond = 0;
8907 }
8908 }
8909 } else {
8910 disas_arm_insn(env, dc);
8911 }
b26eefb6
PB
8912 if (num_temps) {
8913 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
8914 num_temps = 0;
8915 }
e50e6a20
FB
8916
8917 if (dc->condjmp && !dc->is_jmp) {
8918 gen_set_label(dc->condlabel);
8919 dc->condjmp = 0;
8920 }
aaf2d97d 8921 /* Translation stops when a conditional branch is encountered.
e50e6a20 8922 * Otherwise the subsequent code could get translated several times.
b5ff1b31 8923 * Also stop translation when a page boundary is reached. This
bf20dc07 8924 * ensures prefetch aborts occur at the right place. */
2e70f6ef 8925 num_insns ++;
1fddef4b
FB
8926 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
8927 !env->singlestep_enabled &&
1b530a6d 8928 !singlestep &&
2e70f6ef
PB
8929 dc->pc < next_page_start &&
8930 num_insns < max_insns);
8931
8932 if (tb->cflags & CF_LAST_IO) {
8933 if (dc->condjmp) {
8934 /* FIXME: This can theoretically happen with self-modifying
8935 code. */
8936 cpu_abort(env, "IO on conditional branch instruction");
8937 }
8938 gen_io_end();
8939 }
9ee6e8bb 8940
b5ff1b31 8941 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
8942 instruction was a conditional branch or trap, and the PC has
8943 already been written. */
551bd27f 8944 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 8945 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 8946 if (dc->condjmp) {
9ee6e8bb
PB
8947 gen_set_condexec(dc);
8948 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 8949 gen_exception(EXCP_SWI);
9ee6e8bb 8950 } else {
d9ba4830 8951 gen_exception(EXCP_DEBUG);
9ee6e8bb 8952 }
e50e6a20
FB
8953 gen_set_label(dc->condlabel);
8954 }
8955 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 8956 gen_set_pc_im(dc->pc);
e50e6a20 8957 dc->condjmp = 0;
8aaca4c0 8958 }
9ee6e8bb
PB
8959 gen_set_condexec(dc);
8960 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 8961 gen_exception(EXCP_SWI);
9ee6e8bb
PB
8962 } else {
8963 /* FIXME: Single stepping a WFI insn will not halt
8964 the CPU. */
d9ba4830 8965 gen_exception(EXCP_DEBUG);
9ee6e8bb 8966 }
8aaca4c0 8967 } else {
9ee6e8bb
PB
8968 /* While branches must always occur at the end of an IT block,
8969 there are a few other things that can cause us to terminate
8970 the TB in the middel of an IT block:
8971 - Exception generating instructions (bkpt, swi, undefined).
8972 - Page boundaries.
8973 - Hardware watchpoints.
8974 Hardware breakpoints have already been handled and skip this code.
8975 */
8976 gen_set_condexec(dc);
8aaca4c0 8977 switch(dc->is_jmp) {
8aaca4c0 8978 case DISAS_NEXT:
6e256c93 8979 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
8980 break;
8981 default:
8982 case DISAS_JUMP:
8983 case DISAS_UPDATE:
8984 /* indicate that the hash table must be used to find the next TB */
57fec1fe 8985 tcg_gen_exit_tb(0);
8aaca4c0
FB
8986 break;
8987 case DISAS_TB_JUMP:
8988 /* nothing more to generate */
8989 break;
9ee6e8bb 8990 case DISAS_WFI:
d9ba4830 8991 gen_helper_wfi();
9ee6e8bb
PB
8992 break;
8993 case DISAS_SWI:
d9ba4830 8994 gen_exception(EXCP_SWI);
9ee6e8bb 8995 break;
8aaca4c0 8996 }
e50e6a20
FB
8997 if (dc->condjmp) {
8998 gen_set_label(dc->condlabel);
9ee6e8bb 8999 gen_set_condexec(dc);
6e256c93 9000 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
9001 dc->condjmp = 0;
9002 }
2c0262af 9003 }
2e70f6ef 9004
9ee6e8bb 9005done_generating:
2e70f6ef 9006 gen_icount_end(tb, num_insns);
2c0262af
FB
9007 *gen_opc_ptr = INDEX_op_end;
9008
9009#ifdef DEBUG_DISAS
8fec2b8c 9010 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
9011 qemu_log("----------------\n");
9012 qemu_log("IN: %s\n", lookup_symbol(pc_start));
9013 log_target_disas(pc_start, dc->pc - pc_start, env->thumb);
9014 qemu_log("\n");
2c0262af
FB
9015 }
9016#endif
b5ff1b31
FB
9017 if (search_pc) {
9018 j = gen_opc_ptr - gen_opc_buf;
9019 lj++;
9020 while (lj <= j)
9021 gen_opc_instr_start[lj++] = 0;
b5ff1b31 9022 } else {
2c0262af 9023 tb->size = dc->pc - pc_start;
2e70f6ef 9024 tb->icount = num_insns;
b5ff1b31 9025 }
2c0262af
FB
9026}
9027
2cfc5f17 9028void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2c0262af 9029{
2cfc5f17 9030 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
9031}
9032
2cfc5f17 9033void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2c0262af 9034{
2cfc5f17 9035 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
9036}
9037
b5ff1b31
FB
9038static const char *cpu_mode_names[16] = {
9039 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9040 "???", "???", "???", "und", "???", "???", "???", "sys"
9041};
9ee6e8bb 9042
5fafdf24 9043void cpu_dump_state(CPUState *env, FILE *f,
7fe48483
FB
9044 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
9045 int flags)
2c0262af
FB
9046{
9047 int i;
06e80fc9 9048#if 0
bc380d17 9049 union {
b7bcbe95
FB
9050 uint32_t i;
9051 float s;
9052 } s0, s1;
9053 CPU_DoubleU d;
a94a6abf
PB
9054 /* ??? This assumes float64 and double have the same layout.
9055 Oh well, it's only debug dumps. */
9056 union {
9057 float64 f64;
9058 double d;
9059 } d0;
06e80fc9 9060#endif
b5ff1b31 9061 uint32_t psr;
2c0262af
FB
9062
9063 for(i=0;i<16;i++) {
7fe48483 9064 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 9065 if ((i % 4) == 3)
7fe48483 9066 cpu_fprintf(f, "\n");
2c0262af 9067 else
7fe48483 9068 cpu_fprintf(f, " ");
2c0262af 9069 }
b5ff1b31 9070 psr = cpsr_read(env);
687fa640
TS
9071 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9072 psr,
b5ff1b31
FB
9073 psr & (1 << 31) ? 'N' : '-',
9074 psr & (1 << 30) ? 'Z' : '-',
9075 psr & (1 << 29) ? 'C' : '-',
9076 psr & (1 << 28) ? 'V' : '-',
5fafdf24 9077 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 9078 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 9079
5e3f878a 9080#if 0
b7bcbe95 9081 for (i = 0; i < 16; i++) {
8e96005d
FB
9082 d.d = env->vfp.regs[i];
9083 s0.i = d.l.lower;
9084 s1.i = d.l.upper;
a94a6abf
PB
9085 d0.f64 = d.d;
9086 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 9087 i * 2, (int)s0.i, s0.s,
a94a6abf 9088 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 9089 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 9090 d0.d);
b7bcbe95 9091 }
40f137e1 9092 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
5e3f878a 9093#endif
2c0262af 9094}
a6b025d3 9095
d2856f1a
AJ
9096void gen_pc_load(CPUState *env, TranslationBlock *tb,
9097 unsigned long searched_pc, int pc_pos, void *puc)
9098{
9099 env->regs[15] = gen_opc_pc[pc_pos];
9100}