]> git.proxmox.com Git - qemu.git/blame - target-arm/translate.c
target-arm: replace thumb usage of cpu_T registers by proper register allocations
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
28#include "exec-all.h"
29#include "disas.h"
57fec1fe 30#include "tcg-op.h"
79383c9c 31#include "qemu-log.h"
1497c961 32
a7812ae4 33#include "helpers.h"
1497c961 34#define GEN_HELPER 1
b26eefb6 35#include "helpers.h"
2c0262af 36
9ee6e8bb
PB
37#define ENABLE_ARCH_5J 0
38#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 42
86753403 43#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 44
2c0262af
FB
45/* internal defines */
46typedef struct DisasContext {
0fa85d43 47 target_ulong pc;
2c0262af 48 int is_jmp;
e50e6a20
FB
49 /* Nonzero if this instruction has been conditionally skipped. */
50 int condjmp;
51 /* The label that will be jumped to when the instruction is skipped. */
52 int condlabel;
9ee6e8bb
PB
53 /* Thumb-2 condtional execution bits. */
54 int condexec_mask;
55 int condexec_cond;
2c0262af 56 struct TranslationBlock *tb;
8aaca4c0 57 int singlestep_enabled;
5899f386 58 int thumb;
b5ff1b31
FB
59#if !defined(CONFIG_USER_ONLY)
60 int user;
61#endif
2c0262af
FB
62} DisasContext;
63
b5ff1b31
FB
64#if defined(CONFIG_USER_ONLY)
65#define IS_USER(s) 1
66#else
67#define IS_USER(s) (s->user)
68#endif
69
9ee6e8bb
PB
70/* These instructions trap after executing, so defer them until after the
71 conditional executions state has been updated. */
72#define DISAS_WFI 4
73#define DISAS_SWI 5
2c0262af 74
a7812ae4 75static TCGv_ptr cpu_env;
ad69471c 76/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 77static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 78static TCGv_i32 cpu_R[16];
ad69471c 79
b26eefb6 80/* FIXME: These should be removed. */
8f8e3aa4 81static TCGv cpu_T[2];
a7812ae4
PB
82static TCGv cpu_F0s, cpu_F1s;
83static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 84
2e70f6ef
PB
85#define ICOUNT_TEMP cpu_T[0]
86#include "gen-icount.h"
87
155c3eac
FN
88static const char *regnames[] =
89 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
90 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
91
b26eefb6
PB
92/* initialize TCG globals. */
93void arm_translate_init(void)
94{
155c3eac
FN
95 int i;
96
a7812ae4
PB
97 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
98
99 cpu_T[0] = tcg_global_reg_new_i32(TCG_AREG1, "T0");
100 cpu_T[1] = tcg_global_reg_new_i32(TCG_AREG2, "T1");
b26eefb6 101
155c3eac
FN
102 for (i = 0; i < 16; i++) {
103 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
104 offsetof(CPUState, regs[i]),
105 regnames[i]);
106 }
107
a7812ae4
PB
108#define GEN_HELPER 2
109#include "helpers.h"
b26eefb6
PB
110}
111
b26eefb6 112static int num_temps;
b26eefb6
PB
113
114/* Allocate a temporary variable. */
a7812ae4 115static TCGv_i32 new_tmp(void)
b26eefb6 116{
12edd4f2
FN
117 num_temps++;
118 return tcg_temp_new_i32();
b26eefb6
PB
119}
120
121/* Release a temporary variable. */
122static void dead_tmp(TCGv tmp)
123{
12edd4f2 124 tcg_temp_free(tmp);
b26eefb6 125 num_temps--;
b26eefb6
PB
126}
127
d9ba4830
PB
128static inline TCGv load_cpu_offset(int offset)
129{
130 TCGv tmp = new_tmp();
131 tcg_gen_ld_i32(tmp, cpu_env, offset);
132 return tmp;
133}
134
135#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
136
137static inline void store_cpu_offset(TCGv var, int offset)
138{
139 tcg_gen_st_i32(var, cpu_env, offset);
140 dead_tmp(var);
141}
142
143#define store_cpu_field(var, name) \
144 store_cpu_offset(var, offsetof(CPUState, name))
145
b26eefb6
PB
146/* Set a variable to the value of a CPU register. */
147static void load_reg_var(DisasContext *s, TCGv var, int reg)
148{
149 if (reg == 15) {
150 uint32_t addr;
151 /* normaly, since we updated PC, we need only to add one insn */
152 if (s->thumb)
153 addr = (long)s->pc + 2;
154 else
155 addr = (long)s->pc + 4;
156 tcg_gen_movi_i32(var, addr);
157 } else {
155c3eac 158 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
159 }
160}
161
162/* Create a new temporary and set it to the value of a CPU register. */
163static inline TCGv load_reg(DisasContext *s, int reg)
164{
165 TCGv tmp = new_tmp();
166 load_reg_var(s, tmp, reg);
167 return tmp;
168}
169
170/* Set a CPU register. The source must be a temporary and will be
171 marked as dead. */
172static void store_reg(DisasContext *s, int reg, TCGv var)
173{
174 if (reg == 15) {
175 tcg_gen_andi_i32(var, var, ~1);
176 s->is_jmp = DISAS_JUMP;
177 }
155c3eac 178 tcg_gen_mov_i32(cpu_R[reg], var);
b26eefb6
PB
179 dead_tmp(var);
180}
181
182
183/* Basic operations. */
184#define gen_op_movl_T0_T1() tcg_gen_mov_i32(cpu_T[0], cpu_T[1])
b26eefb6
PB
185#define gen_op_movl_T0_im(im) tcg_gen_movi_i32(cpu_T[0], im)
186#define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im)
b26eefb6
PB
187
188#define gen_op_addl_T1_im(im) tcg_gen_addi_i32(cpu_T[1], cpu_T[1], im)
189#define gen_op_addl_T0_T1() tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1])
190#define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1])
191#define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0])
192
193#define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1])
194#define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1])
195#define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1])
196#define gen_op_notl_T0() tcg_gen_not_i32(cpu_T[0], cpu_T[0])
197#define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1])
b26eefb6 198
b26eefb6
PB
199#define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im)
200#define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im)
b26eefb6
PB
201
202/* Value extensions. */
86831435
PB
203#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
204#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
205#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
206#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
207
1497c961
PB
208#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
209#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e
PB
210
211#define gen_op_mul_T0_T1() tcg_gen_mul_i32(cpu_T[0], cpu_T[0], cpu_T[1])
b26eefb6 212
d9ba4830
PB
213#define gen_set_cpsr(var, mask) gen_helper_cpsr_write(var, tcg_const_i32(mask))
214/* Set NZCV flags from the high 4 bits of var. */
215#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
216
217static void gen_exception(int excp)
218{
219 TCGv tmp = new_tmp();
220 tcg_gen_movi_i32(tmp, excp);
221 gen_helper_exception(tmp);
222 dead_tmp(tmp);
223}
224
3670669c
PB
225static void gen_smul_dual(TCGv a, TCGv b)
226{
227 TCGv tmp1 = new_tmp();
228 TCGv tmp2 = new_tmp();
22478e79
AZ
229 tcg_gen_ext16s_i32(tmp1, a);
230 tcg_gen_ext16s_i32(tmp2, b);
3670669c
PB
231 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
232 dead_tmp(tmp2);
233 tcg_gen_sari_i32(a, a, 16);
234 tcg_gen_sari_i32(b, b, 16);
235 tcg_gen_mul_i32(b, b, a);
236 tcg_gen_mov_i32(a, tmp1);
237 dead_tmp(tmp1);
238}
239
240/* Byteswap each halfword. */
241static void gen_rev16(TCGv var)
242{
243 TCGv tmp = new_tmp();
244 tcg_gen_shri_i32(tmp, var, 8);
245 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
246 tcg_gen_shli_i32(var, var, 8);
247 tcg_gen_andi_i32(var, var, 0xff00ff00);
248 tcg_gen_or_i32(var, var, tmp);
249 dead_tmp(tmp);
250}
251
252/* Byteswap low halfword and sign extend. */
253static void gen_revsh(TCGv var)
254{
255 TCGv tmp = new_tmp();
256 tcg_gen_shri_i32(tmp, var, 8);
257 tcg_gen_andi_i32(tmp, tmp, 0x00ff);
258 tcg_gen_shli_i32(var, var, 8);
259 tcg_gen_ext8s_i32(var, var);
260 tcg_gen_or_i32(var, var, tmp);
261 dead_tmp(tmp);
262}
263
264/* Unsigned bitfield extract. */
265static void gen_ubfx(TCGv var, int shift, uint32_t mask)
266{
267 if (shift)
268 tcg_gen_shri_i32(var, var, shift);
269 tcg_gen_andi_i32(var, var, mask);
270}
271
272/* Signed bitfield extract. */
273static void gen_sbfx(TCGv var, int shift, int width)
274{
275 uint32_t signbit;
276
277 if (shift)
278 tcg_gen_sari_i32(var, var, shift);
279 if (shift + width < 32) {
280 signbit = 1u << (width - 1);
281 tcg_gen_andi_i32(var, var, (1u << width) - 1);
282 tcg_gen_xori_i32(var, var, signbit);
283 tcg_gen_subi_i32(var, var, signbit);
284 }
285}
286
287/* Bitfield insertion. Insert val into base. Clobbers base and val. */
288static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
289{
3670669c 290 tcg_gen_andi_i32(val, val, mask);
8f8e3aa4
PB
291 tcg_gen_shli_i32(val, val, shift);
292 tcg_gen_andi_i32(base, base, ~(mask << shift));
3670669c
PB
293 tcg_gen_or_i32(dest, base, val);
294}
295
d9ba4830
PB
296/* Round the top 32 bits of a 64-bit value. */
297static void gen_roundqd(TCGv a, TCGv b)
3670669c 298{
d9ba4830
PB
299 tcg_gen_shri_i32(a, a, 31);
300 tcg_gen_add_i32(a, a, b);
3670669c
PB
301}
302
8f01245e
PB
303/* FIXME: Most targets have native widening multiplication.
304 It would be good to use that instead of a full wide multiply. */
5e3f878a 305/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 306static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 307{
a7812ae4
PB
308 TCGv_i64 tmp1 = tcg_temp_new_i64();
309 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
310
311 tcg_gen_extu_i32_i64(tmp1, a);
312 dead_tmp(a);
313 tcg_gen_extu_i32_i64(tmp2, b);
314 dead_tmp(b);
315 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
316 return tmp1;
317}
318
a7812ae4 319static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 320{
a7812ae4
PB
321 TCGv_i64 tmp1 = tcg_temp_new_i64();
322 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
323
324 tcg_gen_ext_i32_i64(tmp1, a);
325 dead_tmp(a);
326 tcg_gen_ext_i32_i64(tmp2, b);
327 dead_tmp(b);
328 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
329 return tmp1;
330}
331
8f01245e 332/* Unsigned 32x32->64 multiply. */
396e467c 333static void gen_mull(TCGv a, TCGv b)
8f01245e 334{
a7812ae4
PB
335 TCGv_i64 tmp1 = tcg_temp_new_i64();
336 TCGv_i64 tmp2 = tcg_temp_new_i64();
8f01245e 337
396e467c
FN
338 tcg_gen_extu_i32_i64(tmp1, a);
339 tcg_gen_extu_i32_i64(tmp2, b);
8f01245e 340 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
396e467c 341 tcg_gen_trunc_i64_i32(a, tmp1);
8f01245e 342 tcg_gen_shri_i64(tmp1, tmp1, 32);
396e467c 343 tcg_gen_trunc_i64_i32(b, tmp1);
8f01245e
PB
344}
345
346/* Signed 32x32->64 multiply. */
d9ba4830 347static void gen_imull(TCGv a, TCGv b)
8f01245e 348{
a7812ae4
PB
349 TCGv_i64 tmp1 = tcg_temp_new_i64();
350 TCGv_i64 tmp2 = tcg_temp_new_i64();
8f01245e 351
d9ba4830
PB
352 tcg_gen_ext_i32_i64(tmp1, a);
353 tcg_gen_ext_i32_i64(tmp2, b);
8f01245e 354 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
d9ba4830 355 tcg_gen_trunc_i64_i32(a, tmp1);
8f01245e 356 tcg_gen_shri_i64(tmp1, tmp1, 32);
d9ba4830
PB
357 tcg_gen_trunc_i64_i32(b, tmp1);
358}
d9ba4830 359
8f01245e
PB
360/* Swap low and high halfwords. */
361static void gen_swap_half(TCGv var)
362{
363 TCGv tmp = new_tmp();
364 tcg_gen_shri_i32(tmp, var, 16);
365 tcg_gen_shli_i32(var, var, 16);
366 tcg_gen_or_i32(var, var, tmp);
3670669c 367 dead_tmp(tmp);
8f01245e
PB
368}
369
b26eefb6
PB
370/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
371 tmp = (t0 ^ t1) & 0x8000;
372 t0 &= ~0x8000;
373 t1 &= ~0x8000;
374 t0 = (t0 + t1) ^ tmp;
375 */
376
377static void gen_add16(TCGv t0, TCGv t1)
378{
379 TCGv tmp = new_tmp();
380 tcg_gen_xor_i32(tmp, t0, t1);
381 tcg_gen_andi_i32(tmp, tmp, 0x8000);
382 tcg_gen_andi_i32(t0, t0, ~0x8000);
383 tcg_gen_andi_i32(t1, t1, ~0x8000);
384 tcg_gen_add_i32(t0, t0, t1);
385 tcg_gen_xor_i32(t0, t0, tmp);
386 dead_tmp(tmp);
387 dead_tmp(t1);
388}
389
9a119ff6
PB
390#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
391
b26eefb6
PB
392/* Set CF to the top bit of var. */
393static void gen_set_CF_bit31(TCGv var)
394{
395 TCGv tmp = new_tmp();
396 tcg_gen_shri_i32(tmp, var, 31);
4cc633c3 397 gen_set_CF(tmp);
b26eefb6
PB
398 dead_tmp(tmp);
399}
400
401/* Set N and Z flags from var. */
402static inline void gen_logic_CC(TCGv var)
403{
6fbe23d5
PB
404 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
405 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
b26eefb6
PB
406}
407
408/* T0 += T1 + CF. */
396e467c 409static void gen_adc(TCGv t0, TCGv t1)
b26eefb6 410{
d9ba4830 411 TCGv tmp;
396e467c 412 tcg_gen_add_i32(t0, t0, t1);
d9ba4830 413 tmp = load_cpu_field(CF);
396e467c 414 tcg_gen_add_i32(t0, t0, tmp);
b26eefb6
PB
415 dead_tmp(tmp);
416}
417
e9bb4aa9
JR
418/* dest = T0 + T1 + CF. */
419static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
420{
421 TCGv tmp;
422 tcg_gen_add_i32(dest, t0, t1);
423 tmp = load_cpu_field(CF);
424 tcg_gen_add_i32(dest, dest, tmp);
425 dead_tmp(tmp);
426}
427
3670669c
PB
428/* dest = T0 - T1 + CF - 1. */
429static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
430{
d9ba4830 431 TCGv tmp;
3670669c 432 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 433 tmp = load_cpu_field(CF);
3670669c
PB
434 tcg_gen_add_i32(dest, dest, tmp);
435 tcg_gen_subi_i32(dest, dest, 1);
436 dead_tmp(tmp);
437}
438
b26eefb6
PB
439/* T0 &= ~T1. Clobbers T1. */
440/* FIXME: Implement bic natively. */
8f8e3aa4
PB
441static inline void tcg_gen_bic_i32(TCGv dest, TCGv t0, TCGv t1)
442{
443 TCGv tmp = new_tmp();
444 tcg_gen_not_i32(tmp, t1);
445 tcg_gen_and_i32(dest, t0, tmp);
446 dead_tmp(tmp);
447}
b26eefb6
PB
448static inline void gen_op_bicl_T0_T1(void)
449{
450 gen_op_notl_T1();
451 gen_op_andl_T0_T1();
452}
453
ad69471c
PB
454/* FIXME: Implement this natively. */
455#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
456
b26eefb6
PB
457/* FIXME: Implement this natively. */
458static void tcg_gen_rori_i32(TCGv t0, TCGv t1, int i)
459{
460 TCGv tmp;
461
462 if (i == 0)
463 return;
464
465 tmp = new_tmp();
466 tcg_gen_shri_i32(tmp, t1, i);
467 tcg_gen_shli_i32(t1, t1, 32 - i);
468 tcg_gen_or_i32(t0, t1, tmp);
469 dead_tmp(tmp);
470}
471
9a119ff6 472static void shifter_out_im(TCGv var, int shift)
b26eefb6 473{
9a119ff6
PB
474 TCGv tmp = new_tmp();
475 if (shift == 0) {
476 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 477 } else {
9a119ff6 478 tcg_gen_shri_i32(tmp, var, shift);
4cc633c3 479 if (shift != 31)
9a119ff6
PB
480 tcg_gen_andi_i32(tmp, tmp, 1);
481 }
482 gen_set_CF(tmp);
483 dead_tmp(tmp);
484}
b26eefb6 485
9a119ff6
PB
486/* Shift by immediate. Includes special handling for shift == 0. */
487static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
488{
489 switch (shiftop) {
490 case 0: /* LSL */
491 if (shift != 0) {
492 if (flags)
493 shifter_out_im(var, 32 - shift);
494 tcg_gen_shli_i32(var, var, shift);
495 }
496 break;
497 case 1: /* LSR */
498 if (shift == 0) {
499 if (flags) {
500 tcg_gen_shri_i32(var, var, 31);
501 gen_set_CF(var);
502 }
503 tcg_gen_movi_i32(var, 0);
504 } else {
505 if (flags)
506 shifter_out_im(var, shift - 1);
507 tcg_gen_shri_i32(var, var, shift);
508 }
509 break;
510 case 2: /* ASR */
511 if (shift == 0)
512 shift = 32;
513 if (flags)
514 shifter_out_im(var, shift - 1);
515 if (shift == 32)
516 shift = 31;
517 tcg_gen_sari_i32(var, var, shift);
518 break;
519 case 3: /* ROR/RRX */
520 if (shift != 0) {
521 if (flags)
522 shifter_out_im(var, shift - 1);
523 tcg_gen_rori_i32(var, var, shift); break;
524 } else {
d9ba4830 525 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
526 if (flags)
527 shifter_out_im(var, 0);
528 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
529 tcg_gen_shli_i32(tmp, tmp, 31);
530 tcg_gen_or_i32(var, var, tmp);
531 dead_tmp(tmp);
b26eefb6
PB
532 }
533 }
534};
535
8984bd2e
PB
536static inline void gen_arm_shift_reg(TCGv var, int shiftop,
537 TCGv shift, int flags)
538{
539 if (flags) {
540 switch (shiftop) {
541 case 0: gen_helper_shl_cc(var, var, shift); break;
542 case 1: gen_helper_shr_cc(var, var, shift); break;
543 case 2: gen_helper_sar_cc(var, var, shift); break;
544 case 3: gen_helper_ror_cc(var, var, shift); break;
545 }
546 } else {
547 switch (shiftop) {
548 case 0: gen_helper_shl(var, var, shift); break;
549 case 1: gen_helper_shr(var, var, shift); break;
550 case 2: gen_helper_sar(var, var, shift); break;
551 case 3: gen_helper_ror(var, var, shift); break;
552 }
553 }
554 dead_tmp(shift);
555}
556
6ddbc6e4
PB
557#define PAS_OP(pfx) \
558 switch (op2) { \
559 case 0: gen_pas_helper(glue(pfx,add16)); break; \
560 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
561 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
562 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
563 case 4: gen_pas_helper(glue(pfx,add8)); break; \
564 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
565 }
d9ba4830 566static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 567{
a7812ae4 568 TCGv_ptr tmp;
6ddbc6e4
PB
569
570 switch (op1) {
571#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
572 case 1:
a7812ae4 573 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
574 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
575 PAS_OP(s)
576 break;
577 case 5:
a7812ae4 578 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
579 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
580 PAS_OP(u)
581 break;
582#undef gen_pas_helper
583#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
584 case 2:
585 PAS_OP(q);
586 break;
587 case 3:
588 PAS_OP(sh);
589 break;
590 case 6:
591 PAS_OP(uq);
592 break;
593 case 7:
594 PAS_OP(uh);
595 break;
596#undef gen_pas_helper
597 }
598}
9ee6e8bb
PB
599#undef PAS_OP
600
6ddbc6e4
PB
601/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
602#define PAS_OP(pfx) \
603 switch (op2) { \
604 case 0: gen_pas_helper(glue(pfx,add8)); break; \
605 case 1: gen_pas_helper(glue(pfx,add16)); break; \
606 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
607 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
608 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
609 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
610 }
d9ba4830 611static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 612{
a7812ae4 613 TCGv_ptr tmp;
6ddbc6e4
PB
614
615 switch (op1) {
616#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
617 case 0:
a7812ae4 618 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
619 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
620 PAS_OP(s)
621 break;
622 case 4:
a7812ae4 623 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
624 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
625 PAS_OP(u)
626 break;
627#undef gen_pas_helper
628#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
629 case 1:
630 PAS_OP(q);
631 break;
632 case 2:
633 PAS_OP(sh);
634 break;
635 case 5:
636 PAS_OP(uq);
637 break;
638 case 6:
639 PAS_OP(uh);
640 break;
641#undef gen_pas_helper
642 }
643}
9ee6e8bb
PB
644#undef PAS_OP
645
d9ba4830
PB
646static void gen_test_cc(int cc, int label)
647{
648 TCGv tmp;
649 TCGv tmp2;
d9ba4830
PB
650 int inv;
651
d9ba4830
PB
652 switch (cc) {
653 case 0: /* eq: Z */
6fbe23d5 654 tmp = load_cpu_field(ZF);
cb63669a 655 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
656 break;
657 case 1: /* ne: !Z */
6fbe23d5 658 tmp = load_cpu_field(ZF);
cb63669a 659 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
660 break;
661 case 2: /* cs: C */
662 tmp = load_cpu_field(CF);
cb63669a 663 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
664 break;
665 case 3: /* cc: !C */
666 tmp = load_cpu_field(CF);
cb63669a 667 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
668 break;
669 case 4: /* mi: N */
6fbe23d5 670 tmp = load_cpu_field(NF);
cb63669a 671 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
672 break;
673 case 5: /* pl: !N */
6fbe23d5 674 tmp = load_cpu_field(NF);
cb63669a 675 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
676 break;
677 case 6: /* vs: V */
678 tmp = load_cpu_field(VF);
cb63669a 679 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
680 break;
681 case 7: /* vc: !V */
682 tmp = load_cpu_field(VF);
cb63669a 683 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
684 break;
685 case 8: /* hi: C && !Z */
686 inv = gen_new_label();
687 tmp = load_cpu_field(CF);
cb63669a 688 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830 689 dead_tmp(tmp);
6fbe23d5 690 tmp = load_cpu_field(ZF);
cb63669a 691 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
692 gen_set_label(inv);
693 break;
694 case 9: /* ls: !C || Z */
695 tmp = load_cpu_field(CF);
cb63669a 696 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830 697 dead_tmp(tmp);
6fbe23d5 698 tmp = load_cpu_field(ZF);
cb63669a 699 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
700 break;
701 case 10: /* ge: N == V -> N ^ V == 0 */
702 tmp = load_cpu_field(VF);
6fbe23d5 703 tmp2 = load_cpu_field(NF);
d9ba4830
PB
704 tcg_gen_xor_i32(tmp, tmp, tmp2);
705 dead_tmp(tmp2);
cb63669a 706 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
707 break;
708 case 11: /* lt: N != V -> N ^ V != 0 */
709 tmp = load_cpu_field(VF);
6fbe23d5 710 tmp2 = load_cpu_field(NF);
d9ba4830
PB
711 tcg_gen_xor_i32(tmp, tmp, tmp2);
712 dead_tmp(tmp2);
cb63669a 713 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
714 break;
715 case 12: /* gt: !Z && N == V */
716 inv = gen_new_label();
6fbe23d5 717 tmp = load_cpu_field(ZF);
cb63669a 718 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830
PB
719 dead_tmp(tmp);
720 tmp = load_cpu_field(VF);
6fbe23d5 721 tmp2 = load_cpu_field(NF);
d9ba4830
PB
722 tcg_gen_xor_i32(tmp, tmp, tmp2);
723 dead_tmp(tmp2);
cb63669a 724 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
725 gen_set_label(inv);
726 break;
727 case 13: /* le: Z || N != V */
6fbe23d5 728 tmp = load_cpu_field(ZF);
cb63669a 729 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
730 dead_tmp(tmp);
731 tmp = load_cpu_field(VF);
6fbe23d5 732 tmp2 = load_cpu_field(NF);
d9ba4830
PB
733 tcg_gen_xor_i32(tmp, tmp, tmp2);
734 dead_tmp(tmp2);
cb63669a 735 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
736 break;
737 default:
738 fprintf(stderr, "Bad condition code 0x%x\n", cc);
739 abort();
740 }
741 dead_tmp(tmp);
742}
2c0262af 743
b1d8e52e 744static const uint8_t table_logic_cc[16] = {
2c0262af
FB
745 1, /* and */
746 1, /* xor */
747 0, /* sub */
748 0, /* rsb */
749 0, /* add */
750 0, /* adc */
751 0, /* sbc */
752 0, /* rsc */
753 1, /* andl */
754 1, /* xorl */
755 0, /* cmp */
756 0, /* cmn */
757 1, /* orr */
758 1, /* mov */
759 1, /* bic */
760 1, /* mvn */
761};
3b46e624 762
d9ba4830
PB
763/* Set PC and Thumb state from an immediate address. */
764static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 765{
b26eefb6 766 TCGv tmp;
99c475ab 767
b26eefb6 768 s->is_jmp = DISAS_UPDATE;
d9ba4830 769 if (s->thumb != (addr & 1)) {
155c3eac 770 tmp = new_tmp();
d9ba4830
PB
771 tcg_gen_movi_i32(tmp, addr & 1);
772 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
155c3eac 773 dead_tmp(tmp);
d9ba4830 774 }
155c3eac 775 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
776}
777
778/* Set PC and Thumb state from var. var is marked as dead. */
779static inline void gen_bx(DisasContext *s, TCGv var)
780{
d9ba4830 781 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
782 tcg_gen_andi_i32(cpu_R[15], var, ~1);
783 tcg_gen_andi_i32(var, var, 1);
784 store_cpu_field(var, thumb);
d9ba4830
PB
785}
786
21aeb343
JR
787/* Variant of store_reg which uses branch&exchange logic when storing
788 to r15 in ARM architecture v7 and above. The source must be a temporary
789 and will be marked as dead. */
790static inline void store_reg_bx(CPUState *env, DisasContext *s,
791 int reg, TCGv var)
792{
793 if (reg == 15 && ENABLE_ARCH_7) {
794 gen_bx(s, var);
795 } else {
796 store_reg(s, reg, var);
797 }
798}
799
b0109805
PB
800static inline TCGv gen_ld8s(TCGv addr, int index)
801{
802 TCGv tmp = new_tmp();
803 tcg_gen_qemu_ld8s(tmp, addr, index);
804 return tmp;
805}
806static inline TCGv gen_ld8u(TCGv addr, int index)
807{
808 TCGv tmp = new_tmp();
809 tcg_gen_qemu_ld8u(tmp, addr, index);
810 return tmp;
811}
812static inline TCGv gen_ld16s(TCGv addr, int index)
813{
814 TCGv tmp = new_tmp();
815 tcg_gen_qemu_ld16s(tmp, addr, index);
816 return tmp;
817}
818static inline TCGv gen_ld16u(TCGv addr, int index)
819{
820 TCGv tmp = new_tmp();
821 tcg_gen_qemu_ld16u(tmp, addr, index);
822 return tmp;
823}
824static inline TCGv gen_ld32(TCGv addr, int index)
825{
826 TCGv tmp = new_tmp();
827 tcg_gen_qemu_ld32u(tmp, addr, index);
828 return tmp;
829}
830static inline void gen_st8(TCGv val, TCGv addr, int index)
831{
832 tcg_gen_qemu_st8(val, addr, index);
833 dead_tmp(val);
834}
835static inline void gen_st16(TCGv val, TCGv addr, int index)
836{
837 tcg_gen_qemu_st16(val, addr, index);
838 dead_tmp(val);
839}
840static inline void gen_st32(TCGv val, TCGv addr, int index)
841{
842 tcg_gen_qemu_st32(val, addr, index);
843 dead_tmp(val);
844}
b5ff1b31 845
2c0262af
FB
846static inline void gen_movl_T0_reg(DisasContext *s, int reg)
847{
b26eefb6 848 load_reg_var(s, cpu_T[0], reg);
2c0262af
FB
849}
850
851static inline void gen_movl_T1_reg(DisasContext *s, int reg)
852{
b26eefb6 853 load_reg_var(s, cpu_T[1], reg);
2c0262af
FB
854}
855
5e3f878a
PB
856static inline void gen_set_pc_im(uint32_t val)
857{
155c3eac 858 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
859}
860
2c0262af
FB
861static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
862{
b26eefb6
PB
863 TCGv tmp;
864 if (reg == 15) {
865 tmp = new_tmp();
866 tcg_gen_andi_i32(tmp, cpu_T[t], ~1);
867 } else {
868 tmp = cpu_T[t];
869 }
155c3eac 870 tcg_gen_mov_i32(cpu_R[reg], tmp);
2c0262af 871 if (reg == 15) {
b26eefb6 872 dead_tmp(tmp);
2c0262af
FB
873 s->is_jmp = DISAS_JUMP;
874 }
875}
876
877static inline void gen_movl_reg_T0(DisasContext *s, int reg)
878{
879 gen_movl_reg_TN(s, reg, 0);
880}
881
882static inline void gen_movl_reg_T1(DisasContext *s, int reg)
883{
884 gen_movl_reg_TN(s, reg, 1);
885}
886
b5ff1b31
FB
887/* Force a TB lookup after an instruction that changes the CPU state. */
888static inline void gen_lookup_tb(DisasContext *s)
889{
a6445c52 890 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
891 s->is_jmp = DISAS_UPDATE;
892}
893
b0109805
PB
894static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
895 TCGv var)
2c0262af 896{
1e8d4eec 897 int val, rm, shift, shiftop;
b26eefb6 898 TCGv offset;
2c0262af
FB
899
900 if (!(insn & (1 << 25))) {
901 /* immediate */
902 val = insn & 0xfff;
903 if (!(insn & (1 << 23)))
904 val = -val;
537730b9 905 if (val != 0)
b0109805 906 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
907 } else {
908 /* shift/register */
909 rm = (insn) & 0xf;
910 shift = (insn >> 7) & 0x1f;
1e8d4eec 911 shiftop = (insn >> 5) & 3;
b26eefb6 912 offset = load_reg(s, rm);
9a119ff6 913 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 914 if (!(insn & (1 << 23)))
b0109805 915 tcg_gen_sub_i32(var, var, offset);
2c0262af 916 else
b0109805 917 tcg_gen_add_i32(var, var, offset);
b26eefb6 918 dead_tmp(offset);
2c0262af
FB
919 }
920}
921
191f9a93 922static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 923 int extra, TCGv var)
2c0262af
FB
924{
925 int val, rm;
b26eefb6 926 TCGv offset;
3b46e624 927
2c0262af
FB
928 if (insn & (1 << 22)) {
929 /* immediate */
930 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
931 if (!(insn & (1 << 23)))
932 val = -val;
18acad92 933 val += extra;
537730b9 934 if (val != 0)
b0109805 935 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
936 } else {
937 /* register */
191f9a93 938 if (extra)
b0109805 939 tcg_gen_addi_i32(var, var, extra);
2c0262af 940 rm = (insn) & 0xf;
b26eefb6 941 offset = load_reg(s, rm);
2c0262af 942 if (!(insn & (1 << 23)))
b0109805 943 tcg_gen_sub_i32(var, var, offset);
2c0262af 944 else
b0109805 945 tcg_gen_add_i32(var, var, offset);
b26eefb6 946 dead_tmp(offset);
2c0262af
FB
947 }
948}
949
4373f3ce
PB
950#define VFP_OP2(name) \
951static inline void gen_vfp_##name(int dp) \
952{ \
953 if (dp) \
954 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
955 else \
956 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
b7bcbe95
FB
957}
958
4373f3ce
PB
959VFP_OP2(add)
960VFP_OP2(sub)
961VFP_OP2(mul)
962VFP_OP2(div)
963
964#undef VFP_OP2
965
966static inline void gen_vfp_abs(int dp)
967{
968 if (dp)
969 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
970 else
971 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
972}
973
974static inline void gen_vfp_neg(int dp)
975{
976 if (dp)
977 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
978 else
979 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
980}
981
982static inline void gen_vfp_sqrt(int dp)
983{
984 if (dp)
985 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
986 else
987 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
988}
989
990static inline void gen_vfp_cmp(int dp)
991{
992 if (dp)
993 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
994 else
995 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
996}
997
998static inline void gen_vfp_cmpe(int dp)
999{
1000 if (dp)
1001 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1002 else
1003 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1004}
1005
1006static inline void gen_vfp_F1_ld0(int dp)
1007{
1008 if (dp)
5b340b51 1009 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1010 else
5b340b51 1011 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1012}
1013
1014static inline void gen_vfp_uito(int dp)
1015{
1016 if (dp)
1017 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
1018 else
1019 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
1020}
1021
1022static inline void gen_vfp_sito(int dp)
1023{
1024 if (dp)
66230e0d 1025 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
4373f3ce 1026 else
66230e0d 1027 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
4373f3ce
PB
1028}
1029
1030static inline void gen_vfp_toui(int dp)
1031{
1032 if (dp)
1033 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
1034 else
1035 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
1036}
1037
1038static inline void gen_vfp_touiz(int dp)
1039{
1040 if (dp)
1041 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
1042 else
1043 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
1044}
1045
1046static inline void gen_vfp_tosi(int dp)
1047{
1048 if (dp)
1049 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
1050 else
1051 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
1052}
1053
1054static inline void gen_vfp_tosiz(int dp)
9ee6e8bb
PB
1055{
1056 if (dp)
4373f3ce 1057 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
9ee6e8bb 1058 else
4373f3ce
PB
1059 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
1060}
1061
1062#define VFP_GEN_FIX(name) \
1063static inline void gen_vfp_##name(int dp, int shift) \
1064{ \
1065 if (dp) \
1066 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tcg_const_i32(shift), cpu_env);\
1067 else \
1068 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tcg_const_i32(shift), cpu_env);\
9ee6e8bb 1069}
4373f3ce
PB
1070VFP_GEN_FIX(tosh)
1071VFP_GEN_FIX(tosl)
1072VFP_GEN_FIX(touh)
1073VFP_GEN_FIX(toul)
1074VFP_GEN_FIX(shto)
1075VFP_GEN_FIX(slto)
1076VFP_GEN_FIX(uhto)
1077VFP_GEN_FIX(ulto)
1078#undef VFP_GEN_FIX
9ee6e8bb 1079
b5ff1b31
FB
1080static inline void gen_vfp_ld(DisasContext *s, int dp)
1081{
1082 if (dp)
4373f3ce 1083 tcg_gen_qemu_ld64(cpu_F0d, cpu_T[1], IS_USER(s));
b5ff1b31 1084 else
4373f3ce 1085 tcg_gen_qemu_ld32u(cpu_F0s, cpu_T[1], IS_USER(s));
b5ff1b31
FB
1086}
1087
1088static inline void gen_vfp_st(DisasContext *s, int dp)
1089{
1090 if (dp)
4373f3ce 1091 tcg_gen_qemu_st64(cpu_F0d, cpu_T[1], IS_USER(s));
b5ff1b31 1092 else
4373f3ce 1093 tcg_gen_qemu_st32(cpu_F0s, cpu_T[1], IS_USER(s));
b5ff1b31
FB
1094}
1095
8e96005d
FB
1096static inline long
1097vfp_reg_offset (int dp, int reg)
1098{
1099 if (dp)
1100 return offsetof(CPUARMState, vfp.regs[reg]);
1101 else if (reg & 1) {
1102 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1103 + offsetof(CPU_DoubleU, l.upper);
1104 } else {
1105 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1106 + offsetof(CPU_DoubleU, l.lower);
1107 }
1108}
9ee6e8bb
PB
1109
1110/* Return the offset of a 32-bit piece of a NEON register.
1111 zero is the least significant end of the register. */
1112static inline long
1113neon_reg_offset (int reg, int n)
1114{
1115 int sreg;
1116 sreg = reg * 2 + n;
1117 return vfp_reg_offset(0, sreg);
1118}
1119
ad69471c
PB
1120/* FIXME: Remove these. */
1121#define neon_T0 cpu_T[0]
1122#define neon_T1 cpu_T[1]
1123#define NEON_GET_REG(T, reg, n) \
1124 tcg_gen_ld_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
1125#define NEON_SET_REG(T, reg, n) \
1126 tcg_gen_st_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
9ee6e8bb 1127
8f8e3aa4
PB
1128static TCGv neon_load_reg(int reg, int pass)
1129{
1130 TCGv tmp = new_tmp();
1131 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1132 return tmp;
1133}
1134
1135static void neon_store_reg(int reg, int pass, TCGv var)
1136{
1137 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1138 dead_tmp(var);
1139}
1140
a7812ae4 1141static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1142{
1143 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1144}
1145
a7812ae4 1146static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1147{
1148 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1149}
1150
4373f3ce
PB
1151#define tcg_gen_ld_f32 tcg_gen_ld_i32
1152#define tcg_gen_ld_f64 tcg_gen_ld_i64
1153#define tcg_gen_st_f32 tcg_gen_st_i32
1154#define tcg_gen_st_f64 tcg_gen_st_i64
1155
b7bcbe95
FB
1156static inline void gen_mov_F0_vreg(int dp, int reg)
1157{
1158 if (dp)
4373f3ce 1159 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1160 else
4373f3ce 1161 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1162}
1163
1164static inline void gen_mov_F1_vreg(int dp, int reg)
1165{
1166 if (dp)
4373f3ce 1167 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1168 else
4373f3ce 1169 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1170}
1171
1172static inline void gen_mov_vreg_F0(int dp, int reg)
1173{
1174 if (dp)
4373f3ce 1175 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1176 else
4373f3ce 1177 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1178}
1179
18c9b560
AZ
1180#define ARM_CP_RW_BIT (1 << 20)
1181
a7812ae4 1182static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d
PB
1183{
1184 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1185}
1186
a7812ae4 1187static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d
PB
1188{
1189 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1190}
1191
1192static inline void gen_op_iwmmxt_movl_wCx_T0(int reg)
1193{
1194 tcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1195}
1196
1197static inline void gen_op_iwmmxt_movl_T0_wCx(int reg)
1198{
1199 tcg_gen_ld_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1200}
1201
1202static inline void gen_op_iwmmxt_movl_T1_wCx(int reg)
1203{
1204 tcg_gen_ld_i32(cpu_T[1], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1205}
1206
1207static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1208{
1209 iwmmxt_store_reg(cpu_M0, rn);
1210}
1211
1212static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1213{
1214 iwmmxt_load_reg(cpu_M0, rn);
1215}
1216
1217static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1218{
1219 iwmmxt_load_reg(cpu_V1, rn);
1220 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1221}
1222
1223static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1224{
1225 iwmmxt_load_reg(cpu_V1, rn);
1226 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1227}
1228
1229static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1230{
1231 iwmmxt_load_reg(cpu_V1, rn);
1232 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1233}
1234
1235#define IWMMXT_OP(name) \
1236static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1237{ \
1238 iwmmxt_load_reg(cpu_V1, rn); \
1239 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1240}
1241
1242#define IWMMXT_OP_ENV(name) \
1243static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1244{ \
1245 iwmmxt_load_reg(cpu_V1, rn); \
1246 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1247}
1248
1249#define IWMMXT_OP_ENV_SIZE(name) \
1250IWMMXT_OP_ENV(name##b) \
1251IWMMXT_OP_ENV(name##w) \
1252IWMMXT_OP_ENV(name##l)
1253
1254#define IWMMXT_OP_ENV1(name) \
1255static inline void gen_op_iwmmxt_##name##_M0(void) \
1256{ \
1257 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1258}
1259
1260IWMMXT_OP(maddsq)
1261IWMMXT_OP(madduq)
1262IWMMXT_OP(sadb)
1263IWMMXT_OP(sadw)
1264IWMMXT_OP(mulslw)
1265IWMMXT_OP(mulshw)
1266IWMMXT_OP(mululw)
1267IWMMXT_OP(muluhw)
1268IWMMXT_OP(macsw)
1269IWMMXT_OP(macuw)
1270
1271IWMMXT_OP_ENV_SIZE(unpackl)
1272IWMMXT_OP_ENV_SIZE(unpackh)
1273
1274IWMMXT_OP_ENV1(unpacklub)
1275IWMMXT_OP_ENV1(unpackluw)
1276IWMMXT_OP_ENV1(unpacklul)
1277IWMMXT_OP_ENV1(unpackhub)
1278IWMMXT_OP_ENV1(unpackhuw)
1279IWMMXT_OP_ENV1(unpackhul)
1280IWMMXT_OP_ENV1(unpacklsb)
1281IWMMXT_OP_ENV1(unpacklsw)
1282IWMMXT_OP_ENV1(unpacklsl)
1283IWMMXT_OP_ENV1(unpackhsb)
1284IWMMXT_OP_ENV1(unpackhsw)
1285IWMMXT_OP_ENV1(unpackhsl)
1286
1287IWMMXT_OP_ENV_SIZE(cmpeq)
1288IWMMXT_OP_ENV_SIZE(cmpgtu)
1289IWMMXT_OP_ENV_SIZE(cmpgts)
1290
1291IWMMXT_OP_ENV_SIZE(mins)
1292IWMMXT_OP_ENV_SIZE(minu)
1293IWMMXT_OP_ENV_SIZE(maxs)
1294IWMMXT_OP_ENV_SIZE(maxu)
1295
1296IWMMXT_OP_ENV_SIZE(subn)
1297IWMMXT_OP_ENV_SIZE(addn)
1298IWMMXT_OP_ENV_SIZE(subu)
1299IWMMXT_OP_ENV_SIZE(addu)
1300IWMMXT_OP_ENV_SIZE(subs)
1301IWMMXT_OP_ENV_SIZE(adds)
1302
1303IWMMXT_OP_ENV(avgb0)
1304IWMMXT_OP_ENV(avgb1)
1305IWMMXT_OP_ENV(avgw0)
1306IWMMXT_OP_ENV(avgw1)
1307
1308IWMMXT_OP(msadb)
1309
1310IWMMXT_OP_ENV(packuw)
1311IWMMXT_OP_ENV(packul)
1312IWMMXT_OP_ENV(packuq)
1313IWMMXT_OP_ENV(packsw)
1314IWMMXT_OP_ENV(packsl)
1315IWMMXT_OP_ENV(packsq)
1316
1317static inline void gen_op_iwmmxt_muladdsl_M0_T0_T1(void)
1318{
1319 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1320}
1321
1322static inline void gen_op_iwmmxt_muladdsw_M0_T0_T1(void)
1323{
1324 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1325}
1326
1327static inline void gen_op_iwmmxt_muladdswl_M0_T0_T1(void)
1328{
1329 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1330}
1331
1332static inline void gen_op_iwmmxt_align_M0_T0_wRn(int rn)
1333{
1334 iwmmxt_load_reg(cpu_V1, rn);
1335 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, cpu_T[0]);
1336}
1337
1338static inline void gen_op_iwmmxt_insr_M0_T0_T1(int shift)
1339{
1340 TCGv tmp = tcg_const_i32(shift);
1341 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1], tmp);
1342}
1343
1344static inline void gen_op_iwmmxt_extrsb_T0_M0(int shift)
1345{
1346 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1347 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1348 tcg_gen_ext8s_i32(cpu_T[0], cpu_T[0]);
1349}
1350
1351static inline void gen_op_iwmmxt_extrsw_T0_M0(int shift)
1352{
1353 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1354 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1355 tcg_gen_ext16s_i32(cpu_T[0], cpu_T[0]);
1356}
1357
1358static inline void gen_op_iwmmxt_extru_T0_M0(int shift, uint32_t mask)
1359{
1360 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1361 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1362 if (mask != ~0u)
1363 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
1364}
1365
1366static void gen_op_iwmmxt_set_mup(void)
1367{
1368 TCGv tmp;
1369 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1370 tcg_gen_ori_i32(tmp, tmp, 2);
1371 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1372}
1373
1374static void gen_op_iwmmxt_set_cup(void)
1375{
1376 TCGv tmp;
1377 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1378 tcg_gen_ori_i32(tmp, tmp, 1);
1379 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1380}
1381
1382static void gen_op_iwmmxt_setpsr_nz(void)
1383{
1384 TCGv tmp = new_tmp();
1385 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1386 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1387}
1388
1389static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1390{
1391 iwmmxt_load_reg(cpu_V1, rn);
86831435 1392 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1393 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1394}
1395
1396
1397static void gen_iwmmxt_movl_T0_T1_wRn(int rn)
1398{
1399 iwmmxt_load_reg(cpu_V0, rn);
1400 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_V0);
1401 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1402 tcg_gen_trunc_i64_i32(cpu_T[1], cpu_V0);
1403}
1404
1405static void gen_iwmmxt_movl_wRn_T0_T1(int rn)
1406{
36aa55dc 1407 tcg_gen_concat_i32_i64(cpu_V0, cpu_T[0], cpu_T[1]);
e677137d
PB
1408 iwmmxt_store_reg(cpu_V0, rn);
1409}
1410
18c9b560
AZ
1411static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn)
1412{
1413 int rd;
1414 uint32_t offset;
1415
1416 rd = (insn >> 16) & 0xf;
1417 gen_movl_T1_reg(s, rd);
1418
1419 offset = (insn & 0xff) << ((insn >> 7) & 2);
1420 if (insn & (1 << 24)) {
1421 /* Pre indexed */
1422 if (insn & (1 << 23))
1423 gen_op_addl_T1_im(offset);
1424 else
1425 gen_op_addl_T1_im(-offset);
1426
1427 if (insn & (1 << 21))
1428 gen_movl_reg_T1(s, rd);
1429 } else if (insn & (1 << 21)) {
1430 /* Post indexed */
1431 if (insn & (1 << 23))
1432 gen_op_movl_T0_im(offset);
1433 else
1434 gen_op_movl_T0_im(- offset);
1435 gen_op_addl_T0_T1();
1436 gen_movl_reg_T0(s, rd);
1437 } else if (!(insn & (1 << 23)))
1438 return 1;
1439 return 0;
1440}
1441
1442static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask)
1443{
1444 int rd = (insn >> 0) & 0xf;
1445
1446 if (insn & (1 << 8))
1447 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3)
1448 return 1;
1449 else
1450 gen_op_iwmmxt_movl_T0_wCx(rd);
1451 else
e677137d 1452 gen_iwmmxt_movl_T0_T1_wRn(rd);
18c9b560
AZ
1453
1454 gen_op_movl_T1_im(mask);
1455 gen_op_andl_T0_T1();
1456 return 0;
1457}
1458
1459/* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1460 (ie. an undefined instruction). */
1461static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1462{
1463 int rd, wrd;
1464 int rdhi, rdlo, rd0, rd1, i;
b0109805 1465 TCGv tmp;
18c9b560
AZ
1466
1467 if ((insn & 0x0e000e00) == 0x0c000000) {
1468 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1469 wrd = insn & 0xf;
1470 rdlo = (insn >> 12) & 0xf;
1471 rdhi = (insn >> 16) & 0xf;
1472 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
e677137d 1473 gen_iwmmxt_movl_T0_T1_wRn(wrd);
18c9b560
AZ
1474 gen_movl_reg_T0(s, rdlo);
1475 gen_movl_reg_T1(s, rdhi);
1476 } else { /* TMCRR */
1477 gen_movl_T0_reg(s, rdlo);
1478 gen_movl_T1_reg(s, rdhi);
e677137d 1479 gen_iwmmxt_movl_wRn_T0_T1(wrd);
18c9b560
AZ
1480 gen_op_iwmmxt_set_mup();
1481 }
1482 return 0;
1483 }
1484
1485 wrd = (insn >> 12) & 0xf;
1486 if (gen_iwmmxt_address(s, insn))
1487 return 1;
1488 if (insn & ARM_CP_RW_BIT) {
1489 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
b0109805
PB
1490 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1491 tcg_gen_mov_i32(cpu_T[0], tmp);
1492 dead_tmp(tmp);
18c9b560
AZ
1493 gen_op_iwmmxt_movl_wCx_T0(wrd);
1494 } else {
e677137d
PB
1495 i = 1;
1496 if (insn & (1 << 8)) {
1497 if (insn & (1 << 22)) { /* WLDRD */
1498 tcg_gen_qemu_ld64(cpu_M0, cpu_T[1], IS_USER(s));
1499 i = 0;
1500 } else { /* WLDRW wRd */
1501 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1502 }
1503 } else {
1504 if (insn & (1 << 22)) { /* WLDRH */
1505 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
1506 } else { /* WLDRB */
1507 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
1508 }
1509 }
1510 if (i) {
1511 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1512 dead_tmp(tmp);
1513 }
18c9b560
AZ
1514 gen_op_iwmmxt_movq_wRn_M0(wrd);
1515 }
1516 } else {
1517 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1518 gen_op_iwmmxt_movl_T0_wCx(wrd);
b0109805
PB
1519 tmp = new_tmp();
1520 tcg_gen_mov_i32(tmp, cpu_T[0]);
1521 gen_st32(tmp, cpu_T[1], IS_USER(s));
18c9b560
AZ
1522 } else {
1523 gen_op_iwmmxt_movq_M0_wRn(wrd);
e677137d
PB
1524 tmp = new_tmp();
1525 if (insn & (1 << 8)) {
1526 if (insn & (1 << 22)) { /* WSTRD */
1527 dead_tmp(tmp);
1528 tcg_gen_qemu_st64(cpu_M0, cpu_T[1], IS_USER(s));
1529 } else { /* WSTRW wRd */
1530 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1531 gen_st32(tmp, cpu_T[1], IS_USER(s));
1532 }
1533 } else {
1534 if (insn & (1 << 22)) { /* WSTRH */
1535 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1536 gen_st16(tmp, cpu_T[1], IS_USER(s));
1537 } else { /* WSTRB */
1538 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1539 gen_st8(tmp, cpu_T[1], IS_USER(s));
1540 }
1541 }
18c9b560
AZ
1542 }
1543 }
1544 return 0;
1545 }
1546
1547 if ((insn & 0x0f000000) != 0x0e000000)
1548 return 1;
1549
1550 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1551 case 0x000: /* WOR */
1552 wrd = (insn >> 12) & 0xf;
1553 rd0 = (insn >> 0) & 0xf;
1554 rd1 = (insn >> 16) & 0xf;
1555 gen_op_iwmmxt_movq_M0_wRn(rd0);
1556 gen_op_iwmmxt_orq_M0_wRn(rd1);
1557 gen_op_iwmmxt_setpsr_nz();
1558 gen_op_iwmmxt_movq_wRn_M0(wrd);
1559 gen_op_iwmmxt_set_mup();
1560 gen_op_iwmmxt_set_cup();
1561 break;
1562 case 0x011: /* TMCR */
1563 if (insn & 0xf)
1564 return 1;
1565 rd = (insn >> 12) & 0xf;
1566 wrd = (insn >> 16) & 0xf;
1567 switch (wrd) {
1568 case ARM_IWMMXT_wCID:
1569 case ARM_IWMMXT_wCASF:
1570 break;
1571 case ARM_IWMMXT_wCon:
1572 gen_op_iwmmxt_set_cup();
1573 /* Fall through. */
1574 case ARM_IWMMXT_wCSSF:
1575 gen_op_iwmmxt_movl_T0_wCx(wrd);
1576 gen_movl_T1_reg(s, rd);
1577 gen_op_bicl_T0_T1();
1578 gen_op_iwmmxt_movl_wCx_T0(wrd);
1579 break;
1580 case ARM_IWMMXT_wCGR0:
1581 case ARM_IWMMXT_wCGR1:
1582 case ARM_IWMMXT_wCGR2:
1583 case ARM_IWMMXT_wCGR3:
1584 gen_op_iwmmxt_set_cup();
1585 gen_movl_reg_T0(s, rd);
1586 gen_op_iwmmxt_movl_wCx_T0(wrd);
1587 break;
1588 default:
1589 return 1;
1590 }
1591 break;
1592 case 0x100: /* WXOR */
1593 wrd = (insn >> 12) & 0xf;
1594 rd0 = (insn >> 0) & 0xf;
1595 rd1 = (insn >> 16) & 0xf;
1596 gen_op_iwmmxt_movq_M0_wRn(rd0);
1597 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1598 gen_op_iwmmxt_setpsr_nz();
1599 gen_op_iwmmxt_movq_wRn_M0(wrd);
1600 gen_op_iwmmxt_set_mup();
1601 gen_op_iwmmxt_set_cup();
1602 break;
1603 case 0x111: /* TMRC */
1604 if (insn & 0xf)
1605 return 1;
1606 rd = (insn >> 12) & 0xf;
1607 wrd = (insn >> 16) & 0xf;
1608 gen_op_iwmmxt_movl_T0_wCx(wrd);
1609 gen_movl_reg_T0(s, rd);
1610 break;
1611 case 0x300: /* WANDN */
1612 wrd = (insn >> 12) & 0xf;
1613 rd0 = (insn >> 0) & 0xf;
1614 rd1 = (insn >> 16) & 0xf;
1615 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1616 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1617 gen_op_iwmmxt_andq_M0_wRn(rd1);
1618 gen_op_iwmmxt_setpsr_nz();
1619 gen_op_iwmmxt_movq_wRn_M0(wrd);
1620 gen_op_iwmmxt_set_mup();
1621 gen_op_iwmmxt_set_cup();
1622 break;
1623 case 0x200: /* WAND */
1624 wrd = (insn >> 12) & 0xf;
1625 rd0 = (insn >> 0) & 0xf;
1626 rd1 = (insn >> 16) & 0xf;
1627 gen_op_iwmmxt_movq_M0_wRn(rd0);
1628 gen_op_iwmmxt_andq_M0_wRn(rd1);
1629 gen_op_iwmmxt_setpsr_nz();
1630 gen_op_iwmmxt_movq_wRn_M0(wrd);
1631 gen_op_iwmmxt_set_mup();
1632 gen_op_iwmmxt_set_cup();
1633 break;
1634 case 0x810: case 0xa10: /* WMADD */
1635 wrd = (insn >> 12) & 0xf;
1636 rd0 = (insn >> 0) & 0xf;
1637 rd1 = (insn >> 16) & 0xf;
1638 gen_op_iwmmxt_movq_M0_wRn(rd0);
1639 if (insn & (1 << 21))
1640 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1641 else
1642 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1643 gen_op_iwmmxt_movq_wRn_M0(wrd);
1644 gen_op_iwmmxt_set_mup();
1645 break;
1646 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1647 wrd = (insn >> 12) & 0xf;
1648 rd0 = (insn >> 16) & 0xf;
1649 rd1 = (insn >> 0) & 0xf;
1650 gen_op_iwmmxt_movq_M0_wRn(rd0);
1651 switch ((insn >> 22) & 3) {
1652 case 0:
1653 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1654 break;
1655 case 1:
1656 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1657 break;
1658 case 2:
1659 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1660 break;
1661 case 3:
1662 return 1;
1663 }
1664 gen_op_iwmmxt_movq_wRn_M0(wrd);
1665 gen_op_iwmmxt_set_mup();
1666 gen_op_iwmmxt_set_cup();
1667 break;
1668 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1669 wrd = (insn >> 12) & 0xf;
1670 rd0 = (insn >> 16) & 0xf;
1671 rd1 = (insn >> 0) & 0xf;
1672 gen_op_iwmmxt_movq_M0_wRn(rd0);
1673 switch ((insn >> 22) & 3) {
1674 case 0:
1675 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1676 break;
1677 case 1:
1678 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1679 break;
1680 case 2:
1681 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1682 break;
1683 case 3:
1684 return 1;
1685 }
1686 gen_op_iwmmxt_movq_wRn_M0(wrd);
1687 gen_op_iwmmxt_set_mup();
1688 gen_op_iwmmxt_set_cup();
1689 break;
1690 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1691 wrd = (insn >> 12) & 0xf;
1692 rd0 = (insn >> 16) & 0xf;
1693 rd1 = (insn >> 0) & 0xf;
1694 gen_op_iwmmxt_movq_M0_wRn(rd0);
1695 if (insn & (1 << 22))
1696 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1697 else
1698 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1699 if (!(insn & (1 << 20)))
1700 gen_op_iwmmxt_addl_M0_wRn(wrd);
1701 gen_op_iwmmxt_movq_wRn_M0(wrd);
1702 gen_op_iwmmxt_set_mup();
1703 break;
1704 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1705 wrd = (insn >> 12) & 0xf;
1706 rd0 = (insn >> 16) & 0xf;
1707 rd1 = (insn >> 0) & 0xf;
1708 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1709 if (insn & (1 << 21)) {
1710 if (insn & (1 << 20))
1711 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1712 else
1713 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1714 } else {
1715 if (insn & (1 << 20))
1716 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1717 else
1718 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1719 }
18c9b560
AZ
1720 gen_op_iwmmxt_movq_wRn_M0(wrd);
1721 gen_op_iwmmxt_set_mup();
1722 break;
1723 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1724 wrd = (insn >> 12) & 0xf;
1725 rd0 = (insn >> 16) & 0xf;
1726 rd1 = (insn >> 0) & 0xf;
1727 gen_op_iwmmxt_movq_M0_wRn(rd0);
1728 if (insn & (1 << 21))
1729 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1730 else
1731 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1732 if (!(insn & (1 << 20))) {
e677137d
PB
1733 iwmmxt_load_reg(cpu_V1, wrd);
1734 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1735 }
1736 gen_op_iwmmxt_movq_wRn_M0(wrd);
1737 gen_op_iwmmxt_set_mup();
1738 break;
1739 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1740 wrd = (insn >> 12) & 0xf;
1741 rd0 = (insn >> 16) & 0xf;
1742 rd1 = (insn >> 0) & 0xf;
1743 gen_op_iwmmxt_movq_M0_wRn(rd0);
1744 switch ((insn >> 22) & 3) {
1745 case 0:
1746 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1747 break;
1748 case 1:
1749 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1750 break;
1751 case 2:
1752 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1753 break;
1754 case 3:
1755 return 1;
1756 }
1757 gen_op_iwmmxt_movq_wRn_M0(wrd);
1758 gen_op_iwmmxt_set_mup();
1759 gen_op_iwmmxt_set_cup();
1760 break;
1761 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1762 wrd = (insn >> 12) & 0xf;
1763 rd0 = (insn >> 16) & 0xf;
1764 rd1 = (insn >> 0) & 0xf;
1765 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1766 if (insn & (1 << 22)) {
1767 if (insn & (1 << 20))
1768 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1769 else
1770 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1771 } else {
1772 if (insn & (1 << 20))
1773 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1774 else
1775 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1776 }
18c9b560
AZ
1777 gen_op_iwmmxt_movq_wRn_M0(wrd);
1778 gen_op_iwmmxt_set_mup();
1779 gen_op_iwmmxt_set_cup();
1780 break;
1781 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1782 wrd = (insn >> 12) & 0xf;
1783 rd0 = (insn >> 16) & 0xf;
1784 rd1 = (insn >> 0) & 0xf;
1785 gen_op_iwmmxt_movq_M0_wRn(rd0);
1786 gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1787 gen_op_movl_T1_im(7);
1788 gen_op_andl_T0_T1();
1789 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
1790 gen_op_iwmmxt_movq_wRn_M0(wrd);
1791 gen_op_iwmmxt_set_mup();
1792 break;
1793 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1794 rd = (insn >> 12) & 0xf;
1795 wrd = (insn >> 16) & 0xf;
1796 gen_movl_T0_reg(s, rd);
1797 gen_op_iwmmxt_movq_M0_wRn(wrd);
1798 switch ((insn >> 6) & 3) {
1799 case 0:
1800 gen_op_movl_T1_im(0xff);
1801 gen_op_iwmmxt_insr_M0_T0_T1((insn & 7) << 3);
1802 break;
1803 case 1:
1804 gen_op_movl_T1_im(0xffff);
1805 gen_op_iwmmxt_insr_M0_T0_T1((insn & 3) << 4);
1806 break;
1807 case 2:
1808 gen_op_movl_T1_im(0xffffffff);
1809 gen_op_iwmmxt_insr_M0_T0_T1((insn & 1) << 5);
1810 break;
1811 case 3:
1812 return 1;
1813 }
1814 gen_op_iwmmxt_movq_wRn_M0(wrd);
1815 gen_op_iwmmxt_set_mup();
1816 break;
1817 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1818 rd = (insn >> 12) & 0xf;
1819 wrd = (insn >> 16) & 0xf;
1820 if (rd == 15)
1821 return 1;
1822 gen_op_iwmmxt_movq_M0_wRn(wrd);
1823 switch ((insn >> 22) & 3) {
1824 case 0:
1825 if (insn & 8)
1826 gen_op_iwmmxt_extrsb_T0_M0((insn & 7) << 3);
1827 else {
e677137d 1828 gen_op_iwmmxt_extru_T0_M0((insn & 7) << 3, 0xff);
18c9b560
AZ
1829 }
1830 break;
1831 case 1:
1832 if (insn & 8)
1833 gen_op_iwmmxt_extrsw_T0_M0((insn & 3) << 4);
1834 else {
e677137d 1835 gen_op_iwmmxt_extru_T0_M0((insn & 3) << 4, 0xffff);
18c9b560
AZ
1836 }
1837 break;
1838 case 2:
e677137d 1839 gen_op_iwmmxt_extru_T0_M0((insn & 1) << 5, ~0u);
18c9b560
AZ
1840 break;
1841 case 3:
1842 return 1;
1843 }
b26eefb6 1844 gen_movl_reg_T0(s, rd);
18c9b560
AZ
1845 break;
1846 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1847 if ((insn & 0x000ff008) != 0x0003f000)
1848 return 1;
1849 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1850 switch ((insn >> 22) & 3) {
1851 case 0:
1852 gen_op_shrl_T1_im(((insn & 7) << 2) + 0);
1853 break;
1854 case 1:
1855 gen_op_shrl_T1_im(((insn & 3) << 3) + 4);
1856 break;
1857 case 2:
1858 gen_op_shrl_T1_im(((insn & 1) << 4) + 12);
1859 break;
1860 case 3:
1861 return 1;
1862 }
1863 gen_op_shll_T1_im(28);
d9ba4830 1864 gen_set_nzcv(cpu_T[1]);
18c9b560
AZ
1865 break;
1866 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1867 rd = (insn >> 12) & 0xf;
1868 wrd = (insn >> 16) & 0xf;
1869 gen_movl_T0_reg(s, rd);
1870 switch ((insn >> 6) & 3) {
1871 case 0:
e677137d 1872 gen_helper_iwmmxt_bcstb(cpu_M0, cpu_T[0]);
18c9b560
AZ
1873 break;
1874 case 1:
e677137d 1875 gen_helper_iwmmxt_bcstw(cpu_M0, cpu_T[0]);
18c9b560
AZ
1876 break;
1877 case 2:
e677137d 1878 gen_helper_iwmmxt_bcstl(cpu_M0, cpu_T[0]);
18c9b560
AZ
1879 break;
1880 case 3:
1881 return 1;
1882 }
1883 gen_op_iwmmxt_movq_wRn_M0(wrd);
1884 gen_op_iwmmxt_set_mup();
1885 break;
1886 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1887 if ((insn & 0x000ff00f) != 0x0003f000)
1888 return 1;
1889 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
15bb4eac 1890 gen_op_movl_T0_T1();
18c9b560
AZ
1891 switch ((insn >> 22) & 3) {
1892 case 0:
1893 for (i = 0; i < 7; i ++) {
1894 gen_op_shll_T1_im(4);
1895 gen_op_andl_T0_T1();
1896 }
1897 break;
1898 case 1:
1899 for (i = 0; i < 3; i ++) {
1900 gen_op_shll_T1_im(8);
1901 gen_op_andl_T0_T1();
1902 }
1903 break;
1904 case 2:
1905 gen_op_shll_T1_im(16);
1906 gen_op_andl_T0_T1();
1907 break;
1908 case 3:
1909 return 1;
1910 }
d9ba4830 1911 gen_set_nzcv(cpu_T[0]);
18c9b560
AZ
1912 break;
1913 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1914 wrd = (insn >> 12) & 0xf;
1915 rd0 = (insn >> 16) & 0xf;
1916 gen_op_iwmmxt_movq_M0_wRn(rd0);
1917 switch ((insn >> 22) & 3) {
1918 case 0:
e677137d 1919 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1920 break;
1921 case 1:
e677137d 1922 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1923 break;
1924 case 2:
e677137d 1925 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1926 break;
1927 case 3:
1928 return 1;
1929 }
1930 gen_op_iwmmxt_movq_wRn_M0(wrd);
1931 gen_op_iwmmxt_set_mup();
1932 break;
1933 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1934 if ((insn & 0x000ff00f) != 0x0003f000)
1935 return 1;
1936 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
15bb4eac 1937 gen_op_movl_T0_T1();
18c9b560
AZ
1938 switch ((insn >> 22) & 3) {
1939 case 0:
1940 for (i = 0; i < 7; i ++) {
1941 gen_op_shll_T1_im(4);
1942 gen_op_orl_T0_T1();
1943 }
1944 break;
1945 case 1:
1946 for (i = 0; i < 3; i ++) {
1947 gen_op_shll_T1_im(8);
1948 gen_op_orl_T0_T1();
1949 }
1950 break;
1951 case 2:
1952 gen_op_shll_T1_im(16);
1953 gen_op_orl_T0_T1();
1954 break;
1955 case 3:
1956 return 1;
1957 }
d9ba4830 1958 gen_set_nzcv(cpu_T[0]);
18c9b560
AZ
1959 break;
1960 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1961 rd = (insn >> 12) & 0xf;
1962 rd0 = (insn >> 16) & 0xf;
1963 if ((insn & 0xf) != 0)
1964 return 1;
1965 gen_op_iwmmxt_movq_M0_wRn(rd0);
1966 switch ((insn >> 22) & 3) {
1967 case 0:
e677137d 1968 gen_helper_iwmmxt_msbb(cpu_T[0], cpu_M0);
18c9b560
AZ
1969 break;
1970 case 1:
e677137d 1971 gen_helper_iwmmxt_msbw(cpu_T[0], cpu_M0);
18c9b560
AZ
1972 break;
1973 case 2:
e677137d 1974 gen_helper_iwmmxt_msbl(cpu_T[0], cpu_M0);
18c9b560
AZ
1975 break;
1976 case 3:
1977 return 1;
1978 }
1979 gen_movl_reg_T0(s, rd);
1980 break;
1981 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1982 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1983 wrd = (insn >> 12) & 0xf;
1984 rd0 = (insn >> 16) & 0xf;
1985 rd1 = (insn >> 0) & 0xf;
1986 gen_op_iwmmxt_movq_M0_wRn(rd0);
1987 switch ((insn >> 22) & 3) {
1988 case 0:
1989 if (insn & (1 << 21))
1990 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1991 else
1992 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1993 break;
1994 case 1:
1995 if (insn & (1 << 21))
1996 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1997 else
1998 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1999 break;
2000 case 2:
2001 if (insn & (1 << 21))
2002 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2003 else
2004 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2005 break;
2006 case 3:
2007 return 1;
2008 }
2009 gen_op_iwmmxt_movq_wRn_M0(wrd);
2010 gen_op_iwmmxt_set_mup();
2011 gen_op_iwmmxt_set_cup();
2012 break;
2013 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2014 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2015 wrd = (insn >> 12) & 0xf;
2016 rd0 = (insn >> 16) & 0xf;
2017 gen_op_iwmmxt_movq_M0_wRn(rd0);
2018 switch ((insn >> 22) & 3) {
2019 case 0:
2020 if (insn & (1 << 21))
2021 gen_op_iwmmxt_unpacklsb_M0();
2022 else
2023 gen_op_iwmmxt_unpacklub_M0();
2024 break;
2025 case 1:
2026 if (insn & (1 << 21))
2027 gen_op_iwmmxt_unpacklsw_M0();
2028 else
2029 gen_op_iwmmxt_unpackluw_M0();
2030 break;
2031 case 2:
2032 if (insn & (1 << 21))
2033 gen_op_iwmmxt_unpacklsl_M0();
2034 else
2035 gen_op_iwmmxt_unpacklul_M0();
2036 break;
2037 case 3:
2038 return 1;
2039 }
2040 gen_op_iwmmxt_movq_wRn_M0(wrd);
2041 gen_op_iwmmxt_set_mup();
2042 gen_op_iwmmxt_set_cup();
2043 break;
2044 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2045 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2046 wrd = (insn >> 12) & 0xf;
2047 rd0 = (insn >> 16) & 0xf;
2048 gen_op_iwmmxt_movq_M0_wRn(rd0);
2049 switch ((insn >> 22) & 3) {
2050 case 0:
2051 if (insn & (1 << 21))
2052 gen_op_iwmmxt_unpackhsb_M0();
2053 else
2054 gen_op_iwmmxt_unpackhub_M0();
2055 break;
2056 case 1:
2057 if (insn & (1 << 21))
2058 gen_op_iwmmxt_unpackhsw_M0();
2059 else
2060 gen_op_iwmmxt_unpackhuw_M0();
2061 break;
2062 case 2:
2063 if (insn & (1 << 21))
2064 gen_op_iwmmxt_unpackhsl_M0();
2065 else
2066 gen_op_iwmmxt_unpackhul_M0();
2067 break;
2068 case 3:
2069 return 1;
2070 }
2071 gen_op_iwmmxt_movq_wRn_M0(wrd);
2072 gen_op_iwmmxt_set_mup();
2073 gen_op_iwmmxt_set_cup();
2074 break;
2075 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2076 case 0x214: case 0x614: case 0xa14: case 0xe14:
2077 wrd = (insn >> 12) & 0xf;
2078 rd0 = (insn >> 16) & 0xf;
2079 gen_op_iwmmxt_movq_M0_wRn(rd0);
2080 if (gen_iwmmxt_shift(insn, 0xff))
2081 return 1;
2082 switch ((insn >> 22) & 3) {
2083 case 0:
2084 return 1;
2085 case 1:
e677137d 2086 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2087 break;
2088 case 2:
e677137d 2089 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2090 break;
2091 case 3:
e677137d 2092 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2093 break;
2094 }
2095 gen_op_iwmmxt_movq_wRn_M0(wrd);
2096 gen_op_iwmmxt_set_mup();
2097 gen_op_iwmmxt_set_cup();
2098 break;
2099 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2100 case 0x014: case 0x414: case 0x814: case 0xc14:
2101 wrd = (insn >> 12) & 0xf;
2102 rd0 = (insn >> 16) & 0xf;
2103 gen_op_iwmmxt_movq_M0_wRn(rd0);
2104 if (gen_iwmmxt_shift(insn, 0xff))
2105 return 1;
2106 switch ((insn >> 22) & 3) {
2107 case 0:
2108 return 1;
2109 case 1:
e677137d 2110 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2111 break;
2112 case 2:
e677137d 2113 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2114 break;
2115 case 3:
e677137d 2116 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2117 break;
2118 }
2119 gen_op_iwmmxt_movq_wRn_M0(wrd);
2120 gen_op_iwmmxt_set_mup();
2121 gen_op_iwmmxt_set_cup();
2122 break;
2123 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2124 case 0x114: case 0x514: case 0x914: case 0xd14:
2125 wrd = (insn >> 12) & 0xf;
2126 rd0 = (insn >> 16) & 0xf;
2127 gen_op_iwmmxt_movq_M0_wRn(rd0);
2128 if (gen_iwmmxt_shift(insn, 0xff))
2129 return 1;
2130 switch ((insn >> 22) & 3) {
2131 case 0:
2132 return 1;
2133 case 1:
e677137d 2134 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2135 break;
2136 case 2:
e677137d 2137 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2138 break;
2139 case 3:
e677137d 2140 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2141 break;
2142 }
2143 gen_op_iwmmxt_movq_wRn_M0(wrd);
2144 gen_op_iwmmxt_set_mup();
2145 gen_op_iwmmxt_set_cup();
2146 break;
2147 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2148 case 0x314: case 0x714: case 0xb14: case 0xf14:
2149 wrd = (insn >> 12) & 0xf;
2150 rd0 = (insn >> 16) & 0xf;
2151 gen_op_iwmmxt_movq_M0_wRn(rd0);
2152 switch ((insn >> 22) & 3) {
2153 case 0:
2154 return 1;
2155 case 1:
2156 if (gen_iwmmxt_shift(insn, 0xf))
2157 return 1;
e677137d 2158 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2159 break;
2160 case 2:
2161 if (gen_iwmmxt_shift(insn, 0x1f))
2162 return 1;
e677137d 2163 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2164 break;
2165 case 3:
2166 if (gen_iwmmxt_shift(insn, 0x3f))
2167 return 1;
e677137d 2168 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2169 break;
2170 }
2171 gen_op_iwmmxt_movq_wRn_M0(wrd);
2172 gen_op_iwmmxt_set_mup();
2173 gen_op_iwmmxt_set_cup();
2174 break;
2175 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2176 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2177 wrd = (insn >> 12) & 0xf;
2178 rd0 = (insn >> 16) & 0xf;
2179 rd1 = (insn >> 0) & 0xf;
2180 gen_op_iwmmxt_movq_M0_wRn(rd0);
2181 switch ((insn >> 22) & 3) {
2182 case 0:
2183 if (insn & (1 << 21))
2184 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2185 else
2186 gen_op_iwmmxt_minub_M0_wRn(rd1);
2187 break;
2188 case 1:
2189 if (insn & (1 << 21))
2190 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2191 else
2192 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2193 break;
2194 case 2:
2195 if (insn & (1 << 21))
2196 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2197 else
2198 gen_op_iwmmxt_minul_M0_wRn(rd1);
2199 break;
2200 case 3:
2201 return 1;
2202 }
2203 gen_op_iwmmxt_movq_wRn_M0(wrd);
2204 gen_op_iwmmxt_set_mup();
2205 break;
2206 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2207 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2208 wrd = (insn >> 12) & 0xf;
2209 rd0 = (insn >> 16) & 0xf;
2210 rd1 = (insn >> 0) & 0xf;
2211 gen_op_iwmmxt_movq_M0_wRn(rd0);
2212 switch ((insn >> 22) & 3) {
2213 case 0:
2214 if (insn & (1 << 21))
2215 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2216 else
2217 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2218 break;
2219 case 1:
2220 if (insn & (1 << 21))
2221 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2222 else
2223 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2224 break;
2225 case 2:
2226 if (insn & (1 << 21))
2227 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2228 else
2229 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2230 break;
2231 case 3:
2232 return 1;
2233 }
2234 gen_op_iwmmxt_movq_wRn_M0(wrd);
2235 gen_op_iwmmxt_set_mup();
2236 break;
2237 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2238 case 0x402: case 0x502: case 0x602: case 0x702:
2239 wrd = (insn >> 12) & 0xf;
2240 rd0 = (insn >> 16) & 0xf;
2241 rd1 = (insn >> 0) & 0xf;
2242 gen_op_iwmmxt_movq_M0_wRn(rd0);
2243 gen_op_movl_T0_im((insn >> 20) & 3);
2244 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
2245 gen_op_iwmmxt_movq_wRn_M0(wrd);
2246 gen_op_iwmmxt_set_mup();
2247 break;
2248 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2249 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2250 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2251 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2252 wrd = (insn >> 12) & 0xf;
2253 rd0 = (insn >> 16) & 0xf;
2254 rd1 = (insn >> 0) & 0xf;
2255 gen_op_iwmmxt_movq_M0_wRn(rd0);
2256 switch ((insn >> 20) & 0xf) {
2257 case 0x0:
2258 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2259 break;
2260 case 0x1:
2261 gen_op_iwmmxt_subub_M0_wRn(rd1);
2262 break;
2263 case 0x3:
2264 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2265 break;
2266 case 0x4:
2267 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2268 break;
2269 case 0x5:
2270 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2271 break;
2272 case 0x7:
2273 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2274 break;
2275 case 0x8:
2276 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2277 break;
2278 case 0x9:
2279 gen_op_iwmmxt_subul_M0_wRn(rd1);
2280 break;
2281 case 0xb:
2282 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2283 break;
2284 default:
2285 return 1;
2286 }
2287 gen_op_iwmmxt_movq_wRn_M0(wrd);
2288 gen_op_iwmmxt_set_mup();
2289 gen_op_iwmmxt_set_cup();
2290 break;
2291 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2292 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2293 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2294 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2295 wrd = (insn >> 12) & 0xf;
2296 rd0 = (insn >> 16) & 0xf;
2297 gen_op_iwmmxt_movq_M0_wRn(rd0);
2298 gen_op_movl_T0_im(((insn >> 16) & 0xf0) | (insn & 0x0f));
e677137d 2299 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2300 gen_op_iwmmxt_movq_wRn_M0(wrd);
2301 gen_op_iwmmxt_set_mup();
2302 gen_op_iwmmxt_set_cup();
2303 break;
2304 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2305 case 0x418: case 0x518: case 0x618: case 0x718:
2306 case 0x818: case 0x918: case 0xa18: case 0xb18:
2307 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2308 wrd = (insn >> 12) & 0xf;
2309 rd0 = (insn >> 16) & 0xf;
2310 rd1 = (insn >> 0) & 0xf;
2311 gen_op_iwmmxt_movq_M0_wRn(rd0);
2312 switch ((insn >> 20) & 0xf) {
2313 case 0x0:
2314 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2315 break;
2316 case 0x1:
2317 gen_op_iwmmxt_addub_M0_wRn(rd1);
2318 break;
2319 case 0x3:
2320 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2321 break;
2322 case 0x4:
2323 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2324 break;
2325 case 0x5:
2326 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2327 break;
2328 case 0x7:
2329 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2330 break;
2331 case 0x8:
2332 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2333 break;
2334 case 0x9:
2335 gen_op_iwmmxt_addul_M0_wRn(rd1);
2336 break;
2337 case 0xb:
2338 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2339 break;
2340 default:
2341 return 1;
2342 }
2343 gen_op_iwmmxt_movq_wRn_M0(wrd);
2344 gen_op_iwmmxt_set_mup();
2345 gen_op_iwmmxt_set_cup();
2346 break;
2347 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2348 case 0x408: case 0x508: case 0x608: case 0x708:
2349 case 0x808: case 0x908: case 0xa08: case 0xb08:
2350 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2351 wrd = (insn >> 12) & 0xf;
2352 rd0 = (insn >> 16) & 0xf;
2353 rd1 = (insn >> 0) & 0xf;
2354 gen_op_iwmmxt_movq_M0_wRn(rd0);
2355 if (!(insn & (1 << 20)))
2356 return 1;
2357 switch ((insn >> 22) & 3) {
2358 case 0:
2359 return 1;
2360 case 1:
2361 if (insn & (1 << 21))
2362 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2363 else
2364 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2365 break;
2366 case 2:
2367 if (insn & (1 << 21))
2368 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2369 else
2370 gen_op_iwmmxt_packul_M0_wRn(rd1);
2371 break;
2372 case 3:
2373 if (insn & (1 << 21))
2374 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2375 else
2376 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2377 break;
2378 }
2379 gen_op_iwmmxt_movq_wRn_M0(wrd);
2380 gen_op_iwmmxt_set_mup();
2381 gen_op_iwmmxt_set_cup();
2382 break;
2383 case 0x201: case 0x203: case 0x205: case 0x207:
2384 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2385 case 0x211: case 0x213: case 0x215: case 0x217:
2386 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2387 wrd = (insn >> 5) & 0xf;
2388 rd0 = (insn >> 12) & 0xf;
2389 rd1 = (insn >> 0) & 0xf;
2390 if (rd0 == 0xf || rd1 == 0xf)
2391 return 1;
2392 gen_op_iwmmxt_movq_M0_wRn(wrd);
2393 switch ((insn >> 16) & 0xf) {
2394 case 0x0: /* TMIA */
b26eefb6
PB
2395 gen_movl_T0_reg(s, rd0);
2396 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2397 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2398 break;
2399 case 0x8: /* TMIAPH */
b26eefb6
PB
2400 gen_movl_T0_reg(s, rd0);
2401 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2402 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2403 break;
2404 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
b26eefb6 2405 gen_movl_T1_reg(s, rd0);
18c9b560
AZ
2406 if (insn & (1 << 16))
2407 gen_op_shrl_T1_im(16);
2408 gen_op_movl_T0_T1();
b26eefb6 2409 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2410 if (insn & (1 << 17))
2411 gen_op_shrl_T1_im(16);
2412 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2413 break;
2414 default:
2415 return 1;
2416 }
2417 gen_op_iwmmxt_movq_wRn_M0(wrd);
2418 gen_op_iwmmxt_set_mup();
2419 break;
2420 default:
2421 return 1;
2422 }
2423
2424 return 0;
2425}
2426
2427/* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2428 (ie. an undefined instruction). */
2429static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2430{
2431 int acc, rd0, rd1, rdhi, rdlo;
2432
2433 if ((insn & 0x0ff00f10) == 0x0e200010) {
2434 /* Multiply with Internal Accumulate Format */
2435 rd0 = (insn >> 12) & 0xf;
2436 rd1 = insn & 0xf;
2437 acc = (insn >> 5) & 7;
2438
2439 if (acc != 0)
2440 return 1;
2441
2442 switch ((insn >> 16) & 0xf) {
2443 case 0x0: /* MIA */
b26eefb6
PB
2444 gen_movl_T0_reg(s, rd0);
2445 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2446 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2447 break;
2448 case 0x8: /* MIAPH */
b26eefb6
PB
2449 gen_movl_T0_reg(s, rd0);
2450 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2451 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2452 break;
2453 case 0xc: /* MIABB */
2454 case 0xd: /* MIABT */
2455 case 0xe: /* MIATB */
2456 case 0xf: /* MIATT */
b26eefb6 2457 gen_movl_T1_reg(s, rd0);
18c9b560
AZ
2458 if (insn & (1 << 16))
2459 gen_op_shrl_T1_im(16);
2460 gen_op_movl_T0_T1();
b26eefb6 2461 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2462 if (insn & (1 << 17))
2463 gen_op_shrl_T1_im(16);
2464 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2465 break;
2466 default:
2467 return 1;
2468 }
2469
2470 gen_op_iwmmxt_movq_wRn_M0(acc);
2471 return 0;
2472 }
2473
2474 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2475 /* Internal Accumulator Access Format */
2476 rdhi = (insn >> 16) & 0xf;
2477 rdlo = (insn >> 12) & 0xf;
2478 acc = insn & 7;
2479
2480 if (acc != 0)
2481 return 1;
2482
2483 if (insn & ARM_CP_RW_BIT) { /* MRA */
e677137d 2484 gen_iwmmxt_movl_T0_T1_wRn(acc);
b26eefb6 2485 gen_movl_reg_T0(s, rdlo);
18c9b560
AZ
2486 gen_op_movl_T0_im((1 << (40 - 32)) - 1);
2487 gen_op_andl_T0_T1();
b26eefb6 2488 gen_movl_reg_T0(s, rdhi);
18c9b560 2489 } else { /* MAR */
b26eefb6
PB
2490 gen_movl_T0_reg(s, rdlo);
2491 gen_movl_T1_reg(s, rdhi);
e677137d 2492 gen_iwmmxt_movl_wRn_T0_T1(acc);
18c9b560
AZ
2493 }
2494 return 0;
2495 }
2496
2497 return 1;
2498}
2499
c1713132
AZ
2500/* Disassemble system coprocessor instruction. Return nonzero if
2501 instruction is not defined. */
2502static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2503{
8984bd2e 2504 TCGv tmp;
c1713132
AZ
2505 uint32_t rd = (insn >> 12) & 0xf;
2506 uint32_t cp = (insn >> 8) & 0xf;
2507 if (IS_USER(s)) {
2508 return 1;
2509 }
2510
18c9b560 2511 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2512 if (!env->cp[cp].cp_read)
2513 return 1;
8984bd2e
PB
2514 gen_set_pc_im(s->pc);
2515 tmp = new_tmp();
2516 gen_helper_get_cp(tmp, cpu_env, tcg_const_i32(insn));
2517 store_reg(s, rd, tmp);
c1713132
AZ
2518 } else {
2519 if (!env->cp[cp].cp_write)
2520 return 1;
8984bd2e
PB
2521 gen_set_pc_im(s->pc);
2522 tmp = load_reg(s, rd);
2523 gen_helper_set_cp(cpu_env, tcg_const_i32(insn), tmp);
a60de947 2524 dead_tmp(tmp);
c1713132
AZ
2525 }
2526 return 0;
2527}
2528
9ee6e8bb
PB
2529static int cp15_user_ok(uint32_t insn)
2530{
2531 int cpn = (insn >> 16) & 0xf;
2532 int cpm = insn & 0xf;
2533 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2534
2535 if (cpn == 13 && cpm == 0) {
2536 /* TLS register. */
2537 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2538 return 1;
2539 }
2540 if (cpn == 7) {
2541 /* ISB, DSB, DMB. */
2542 if ((cpm == 5 && op == 4)
2543 || (cpm == 10 && (op == 4 || op == 5)))
2544 return 1;
2545 }
2546 return 0;
2547}
2548
b5ff1b31
FB
2549/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2550 instruction is not defined. */
a90b7318 2551static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2552{
2553 uint32_t rd;
8984bd2e 2554 TCGv tmp;
b5ff1b31 2555
9ee6e8bb
PB
2556 /* M profile cores use memory mapped registers instead of cp15. */
2557 if (arm_feature(env, ARM_FEATURE_M))
2558 return 1;
2559
2560 if ((insn & (1 << 25)) == 0) {
2561 if (insn & (1 << 20)) {
2562 /* mrrc */
2563 return 1;
2564 }
2565 /* mcrr. Used for block cache operations, so implement as no-op. */
2566 return 0;
2567 }
2568 if ((insn & (1 << 4)) == 0) {
2569 /* cdp */
2570 return 1;
2571 }
2572 if (IS_USER(s) && !cp15_user_ok(insn)) {
b5ff1b31
FB
2573 return 1;
2574 }
9332f9da
FB
2575 if ((insn & 0x0fff0fff) == 0x0e070f90
2576 || (insn & 0x0fff0fff) == 0x0e070f58) {
2577 /* Wait for interrupt. */
8984bd2e 2578 gen_set_pc_im(s->pc);
9ee6e8bb 2579 s->is_jmp = DISAS_WFI;
9332f9da
FB
2580 return 0;
2581 }
b5ff1b31 2582 rd = (insn >> 12) & 0xf;
18c9b560 2583 if (insn & ARM_CP_RW_BIT) {
8984bd2e
PB
2584 tmp = new_tmp();
2585 gen_helper_get_cp15(tmp, cpu_env, tcg_const_i32(insn));
b5ff1b31
FB
2586 /* If the destination register is r15 then sets condition codes. */
2587 if (rd != 15)
8984bd2e
PB
2588 store_reg(s, rd, tmp);
2589 else
2590 dead_tmp(tmp);
b5ff1b31 2591 } else {
8984bd2e
PB
2592 tmp = load_reg(s, rd);
2593 gen_helper_set_cp15(cpu_env, tcg_const_i32(insn), tmp);
2594 dead_tmp(tmp);
a90b7318
AZ
2595 /* Normally we would always end the TB here, but Linux
2596 * arch/arm/mach-pxa/sleep.S expects two instructions following
2597 * an MMU enable to execute from cache. Imitate this behaviour. */
2598 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2599 (insn & 0x0fff0fff) != 0x0e010f10)
2600 gen_lookup_tb(s);
b5ff1b31 2601 }
b5ff1b31
FB
2602 return 0;
2603}
2604
9ee6e8bb
PB
2605#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2606#define VFP_SREG(insn, bigbit, smallbit) \
2607 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2608#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2609 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2610 reg = (((insn) >> (bigbit)) & 0x0f) \
2611 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2612 } else { \
2613 if (insn & (1 << (smallbit))) \
2614 return 1; \
2615 reg = ((insn) >> (bigbit)) & 0x0f; \
2616 }} while (0)
2617
2618#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2619#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2620#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2621#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2622#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2623#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2624
4373f3ce
PB
2625/* Move between integer and VFP cores. */
2626static TCGv gen_vfp_mrs(void)
2627{
2628 TCGv tmp = new_tmp();
2629 tcg_gen_mov_i32(tmp, cpu_F0s);
2630 return tmp;
2631}
2632
2633static void gen_vfp_msr(TCGv tmp)
2634{
2635 tcg_gen_mov_i32(cpu_F0s, tmp);
2636 dead_tmp(tmp);
2637}
2638
9ee6e8bb
PB
2639static inline int
2640vfp_enabled(CPUState * env)
2641{
2642 return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
2643}
2644
ad69471c
PB
2645static void gen_neon_dup_u8(TCGv var, int shift)
2646{
2647 TCGv tmp = new_tmp();
2648 if (shift)
2649 tcg_gen_shri_i32(var, var, shift);
86831435 2650 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2651 tcg_gen_shli_i32(tmp, var, 8);
2652 tcg_gen_or_i32(var, var, tmp);
2653 tcg_gen_shli_i32(tmp, var, 16);
2654 tcg_gen_or_i32(var, var, tmp);
2655 dead_tmp(tmp);
2656}
2657
2658static void gen_neon_dup_low16(TCGv var)
2659{
2660 TCGv tmp = new_tmp();
86831435 2661 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2662 tcg_gen_shli_i32(tmp, var, 16);
2663 tcg_gen_or_i32(var, var, tmp);
2664 dead_tmp(tmp);
2665}
2666
2667static void gen_neon_dup_high16(TCGv var)
2668{
2669 TCGv tmp = new_tmp();
2670 tcg_gen_andi_i32(var, var, 0xffff0000);
2671 tcg_gen_shri_i32(tmp, var, 16);
2672 tcg_gen_or_i32(var, var, tmp);
2673 dead_tmp(tmp);
2674}
2675
b7bcbe95
FB
2676/* Disassemble a VFP instruction. Returns nonzero if an error occured
2677 (ie. an undefined instruction). */
2678static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2679{
2680 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2681 int dp, veclen;
4373f3ce 2682 TCGv tmp;
ad69471c 2683 TCGv tmp2;
b7bcbe95 2684
40f137e1
PB
2685 if (!arm_feature(env, ARM_FEATURE_VFP))
2686 return 1;
2687
9ee6e8bb
PB
2688 if (!vfp_enabled(env)) {
2689 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2690 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2691 return 1;
2692 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2693 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2694 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2695 return 1;
2696 }
b7bcbe95
FB
2697 dp = ((insn & 0xf00) == 0xb00);
2698 switch ((insn >> 24) & 0xf) {
2699 case 0xe:
2700 if (insn & (1 << 4)) {
2701 /* single register transfer */
b7bcbe95
FB
2702 rd = (insn >> 12) & 0xf;
2703 if (dp) {
9ee6e8bb
PB
2704 int size;
2705 int pass;
2706
2707 VFP_DREG_N(rn, insn);
2708 if (insn & 0xf)
b7bcbe95 2709 return 1;
9ee6e8bb
PB
2710 if (insn & 0x00c00060
2711 && !arm_feature(env, ARM_FEATURE_NEON))
2712 return 1;
2713
2714 pass = (insn >> 21) & 1;
2715 if (insn & (1 << 22)) {
2716 size = 0;
2717 offset = ((insn >> 5) & 3) * 8;
2718 } else if (insn & (1 << 5)) {
2719 size = 1;
2720 offset = (insn & (1 << 6)) ? 16 : 0;
2721 } else {
2722 size = 2;
2723 offset = 0;
2724 }
18c9b560 2725 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2726 /* vfp->arm */
ad69471c 2727 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2728 switch (size) {
2729 case 0:
9ee6e8bb 2730 if (offset)
ad69471c 2731 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2732 if (insn & (1 << 23))
ad69471c 2733 gen_uxtb(tmp);
9ee6e8bb 2734 else
ad69471c 2735 gen_sxtb(tmp);
9ee6e8bb
PB
2736 break;
2737 case 1:
9ee6e8bb
PB
2738 if (insn & (1 << 23)) {
2739 if (offset) {
ad69471c 2740 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2741 } else {
ad69471c 2742 gen_uxth(tmp);
9ee6e8bb
PB
2743 }
2744 } else {
2745 if (offset) {
ad69471c 2746 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2747 } else {
ad69471c 2748 gen_sxth(tmp);
9ee6e8bb
PB
2749 }
2750 }
2751 break;
2752 case 2:
9ee6e8bb
PB
2753 break;
2754 }
ad69471c 2755 store_reg(s, rd, tmp);
b7bcbe95
FB
2756 } else {
2757 /* arm->vfp */
ad69471c 2758 tmp = load_reg(s, rd);
9ee6e8bb
PB
2759 if (insn & (1 << 23)) {
2760 /* VDUP */
2761 if (size == 0) {
ad69471c 2762 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2763 } else if (size == 1) {
ad69471c 2764 gen_neon_dup_low16(tmp);
9ee6e8bb 2765 }
cbbccffc
PB
2766 for (n = 0; n <= pass * 2; n++) {
2767 tmp2 = new_tmp();
2768 tcg_gen_mov_i32(tmp2, tmp);
2769 neon_store_reg(rn, n, tmp2);
2770 }
2771 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2772 } else {
2773 /* VMOV */
2774 switch (size) {
2775 case 0:
ad69471c
PB
2776 tmp2 = neon_load_reg(rn, pass);
2777 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2778 dead_tmp(tmp2);
9ee6e8bb
PB
2779 break;
2780 case 1:
ad69471c
PB
2781 tmp2 = neon_load_reg(rn, pass);
2782 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2783 dead_tmp(tmp2);
9ee6e8bb
PB
2784 break;
2785 case 2:
9ee6e8bb
PB
2786 break;
2787 }
ad69471c 2788 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2789 }
b7bcbe95 2790 }
9ee6e8bb
PB
2791 } else { /* !dp */
2792 if ((insn & 0x6f) != 0x00)
2793 return 1;
2794 rn = VFP_SREG_N(insn);
18c9b560 2795 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2796 /* vfp->arm */
2797 if (insn & (1 << 21)) {
2798 /* system register */
40f137e1 2799 rn >>= 1;
9ee6e8bb 2800
b7bcbe95 2801 switch (rn) {
40f137e1 2802 case ARM_VFP_FPSID:
4373f3ce 2803 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2804 VFP3 restricts all id registers to privileged
2805 accesses. */
2806 if (IS_USER(s)
2807 && arm_feature(env, ARM_FEATURE_VFP3))
2808 return 1;
4373f3ce 2809 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2810 break;
40f137e1 2811 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2812 if (IS_USER(s))
2813 return 1;
4373f3ce 2814 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2815 break;
40f137e1
PB
2816 case ARM_VFP_FPINST:
2817 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2818 /* Not present in VFP3. */
2819 if (IS_USER(s)
2820 || arm_feature(env, ARM_FEATURE_VFP3))
2821 return 1;
4373f3ce 2822 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2823 break;
40f137e1 2824 case ARM_VFP_FPSCR:
601d70b9 2825 if (rd == 15) {
4373f3ce
PB
2826 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2827 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2828 } else {
2829 tmp = new_tmp();
2830 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2831 }
b7bcbe95 2832 break;
9ee6e8bb
PB
2833 case ARM_VFP_MVFR0:
2834 case ARM_VFP_MVFR1:
2835 if (IS_USER(s)
2836 || !arm_feature(env, ARM_FEATURE_VFP3))
2837 return 1;
4373f3ce 2838 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2839 break;
b7bcbe95
FB
2840 default:
2841 return 1;
2842 }
2843 } else {
2844 gen_mov_F0_vreg(0, rn);
4373f3ce 2845 tmp = gen_vfp_mrs();
b7bcbe95
FB
2846 }
2847 if (rd == 15) {
b5ff1b31 2848 /* Set the 4 flag bits in the CPSR. */
4373f3ce
PB
2849 gen_set_nzcv(tmp);
2850 dead_tmp(tmp);
2851 } else {
2852 store_reg(s, rd, tmp);
2853 }
b7bcbe95
FB
2854 } else {
2855 /* arm->vfp */
4373f3ce 2856 tmp = load_reg(s, rd);
b7bcbe95 2857 if (insn & (1 << 21)) {
40f137e1 2858 rn >>= 1;
b7bcbe95
FB
2859 /* system register */
2860 switch (rn) {
40f137e1 2861 case ARM_VFP_FPSID:
9ee6e8bb
PB
2862 case ARM_VFP_MVFR0:
2863 case ARM_VFP_MVFR1:
b7bcbe95
FB
2864 /* Writes are ignored. */
2865 break;
40f137e1 2866 case ARM_VFP_FPSCR:
4373f3ce
PB
2867 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2868 dead_tmp(tmp);
b5ff1b31 2869 gen_lookup_tb(s);
b7bcbe95 2870 break;
40f137e1 2871 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2872 if (IS_USER(s))
2873 return 1;
4373f3ce 2874 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2875 gen_lookup_tb(s);
2876 break;
2877 case ARM_VFP_FPINST:
2878 case ARM_VFP_FPINST2:
4373f3ce 2879 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2880 break;
b7bcbe95
FB
2881 default:
2882 return 1;
2883 }
2884 } else {
4373f3ce 2885 gen_vfp_msr(tmp);
b7bcbe95
FB
2886 gen_mov_vreg_F0(0, rn);
2887 }
2888 }
2889 }
2890 } else {
2891 /* data processing */
2892 /* The opcode is in bits 23, 21, 20 and 6. */
2893 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2894 if (dp) {
2895 if (op == 15) {
2896 /* rn is opcode */
2897 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2898 } else {
2899 /* rn is register number */
9ee6e8bb 2900 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2901 }
2902
2903 if (op == 15 && (rn == 15 || rn > 17)) {
2904 /* Integer or single precision destination. */
9ee6e8bb 2905 rd = VFP_SREG_D(insn);
b7bcbe95 2906 } else {
9ee6e8bb 2907 VFP_DREG_D(rd, insn);
b7bcbe95
FB
2908 }
2909
2910 if (op == 15 && (rn == 16 || rn == 17)) {
2911 /* Integer source. */
2912 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
2913 } else {
9ee6e8bb 2914 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2915 }
2916 } else {
9ee6e8bb 2917 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2918 if (op == 15 && rn == 15) {
2919 /* Double precision destination. */
9ee6e8bb
PB
2920 VFP_DREG_D(rd, insn);
2921 } else {
2922 rd = VFP_SREG_D(insn);
2923 }
2924 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2925 }
2926
2927 veclen = env->vfp.vec_len;
2928 if (op == 15 && rn > 3)
2929 veclen = 0;
2930
2931 /* Shut up compiler warnings. */
2932 delta_m = 0;
2933 delta_d = 0;
2934 bank_mask = 0;
3b46e624 2935
b7bcbe95
FB
2936 if (veclen > 0) {
2937 if (dp)
2938 bank_mask = 0xc;
2939 else
2940 bank_mask = 0x18;
2941
2942 /* Figure out what type of vector operation this is. */
2943 if ((rd & bank_mask) == 0) {
2944 /* scalar */
2945 veclen = 0;
2946 } else {
2947 if (dp)
2948 delta_d = (env->vfp.vec_stride >> 1) + 1;
2949 else
2950 delta_d = env->vfp.vec_stride + 1;
2951
2952 if ((rm & bank_mask) == 0) {
2953 /* mixed scalar/vector */
2954 delta_m = 0;
2955 } else {
2956 /* vector */
2957 delta_m = delta_d;
2958 }
2959 }
2960 }
2961
2962 /* Load the initial operands. */
2963 if (op == 15) {
2964 switch (rn) {
2965 case 16:
2966 case 17:
2967 /* Integer source */
2968 gen_mov_F0_vreg(0, rm);
2969 break;
2970 case 8:
2971 case 9:
2972 /* Compare */
2973 gen_mov_F0_vreg(dp, rd);
2974 gen_mov_F1_vreg(dp, rm);
2975 break;
2976 case 10:
2977 case 11:
2978 /* Compare with zero */
2979 gen_mov_F0_vreg(dp, rd);
2980 gen_vfp_F1_ld0(dp);
2981 break;
9ee6e8bb
PB
2982 case 20:
2983 case 21:
2984 case 22:
2985 case 23:
644ad806
PB
2986 case 28:
2987 case 29:
2988 case 30:
2989 case 31:
9ee6e8bb
PB
2990 /* Source and destination the same. */
2991 gen_mov_F0_vreg(dp, rd);
2992 break;
b7bcbe95
FB
2993 default:
2994 /* One source operand. */
2995 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 2996 break;
b7bcbe95
FB
2997 }
2998 } else {
2999 /* Two source operands. */
3000 gen_mov_F0_vreg(dp, rn);
3001 gen_mov_F1_vreg(dp, rm);
3002 }
3003
3004 for (;;) {
3005 /* Perform the calculation. */
3006 switch (op) {
3007 case 0: /* mac: fd + (fn * fm) */
3008 gen_vfp_mul(dp);
3009 gen_mov_F1_vreg(dp, rd);
3010 gen_vfp_add(dp);
3011 break;
3012 case 1: /* nmac: fd - (fn * fm) */
3013 gen_vfp_mul(dp);
3014 gen_vfp_neg(dp);
3015 gen_mov_F1_vreg(dp, rd);
3016 gen_vfp_add(dp);
3017 break;
3018 case 2: /* msc: -fd + (fn * fm) */
3019 gen_vfp_mul(dp);
3020 gen_mov_F1_vreg(dp, rd);
3021 gen_vfp_sub(dp);
3022 break;
3023 case 3: /* nmsc: -fd - (fn * fm) */
3024 gen_vfp_mul(dp);
b7bcbe95 3025 gen_vfp_neg(dp);
c9fb531a
PB
3026 gen_mov_F1_vreg(dp, rd);
3027 gen_vfp_sub(dp);
b7bcbe95
FB
3028 break;
3029 case 4: /* mul: fn * fm */
3030 gen_vfp_mul(dp);
3031 break;
3032 case 5: /* nmul: -(fn * fm) */
3033 gen_vfp_mul(dp);
3034 gen_vfp_neg(dp);
3035 break;
3036 case 6: /* add: fn + fm */
3037 gen_vfp_add(dp);
3038 break;
3039 case 7: /* sub: fn - fm */
3040 gen_vfp_sub(dp);
3041 break;
3042 case 8: /* div: fn / fm */
3043 gen_vfp_div(dp);
3044 break;
9ee6e8bb
PB
3045 case 14: /* fconst */
3046 if (!arm_feature(env, ARM_FEATURE_VFP3))
3047 return 1;
3048
3049 n = (insn << 12) & 0x80000000;
3050 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3051 if (dp) {
3052 if (i & 0x40)
3053 i |= 0x3f80;
3054 else
3055 i |= 0x4000;
3056 n |= i << 16;
4373f3ce 3057 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3058 } else {
3059 if (i & 0x40)
3060 i |= 0x780;
3061 else
3062 i |= 0x800;
3063 n |= i << 19;
5b340b51 3064 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3065 }
9ee6e8bb 3066 break;
b7bcbe95
FB
3067 case 15: /* extension space */
3068 switch (rn) {
3069 case 0: /* cpy */
3070 /* no-op */
3071 break;
3072 case 1: /* abs */
3073 gen_vfp_abs(dp);
3074 break;
3075 case 2: /* neg */
3076 gen_vfp_neg(dp);
3077 break;
3078 case 3: /* sqrt */
3079 gen_vfp_sqrt(dp);
3080 break;
3081 case 8: /* cmp */
3082 gen_vfp_cmp(dp);
3083 break;
3084 case 9: /* cmpe */
3085 gen_vfp_cmpe(dp);
3086 break;
3087 case 10: /* cmpz */
3088 gen_vfp_cmp(dp);
3089 break;
3090 case 11: /* cmpez */
3091 gen_vfp_F1_ld0(dp);
3092 gen_vfp_cmpe(dp);
3093 break;
3094 case 15: /* single<->double conversion */
3095 if (dp)
4373f3ce 3096 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3097 else
4373f3ce 3098 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3099 break;
3100 case 16: /* fuito */
3101 gen_vfp_uito(dp);
3102 break;
3103 case 17: /* fsito */
3104 gen_vfp_sito(dp);
3105 break;
9ee6e8bb
PB
3106 case 20: /* fshto */
3107 if (!arm_feature(env, ARM_FEATURE_VFP3))
3108 return 1;
644ad806 3109 gen_vfp_shto(dp, 16 - rm);
9ee6e8bb
PB
3110 break;
3111 case 21: /* fslto */
3112 if (!arm_feature(env, ARM_FEATURE_VFP3))
3113 return 1;
644ad806 3114 gen_vfp_slto(dp, 32 - rm);
9ee6e8bb
PB
3115 break;
3116 case 22: /* fuhto */
3117 if (!arm_feature(env, ARM_FEATURE_VFP3))
3118 return 1;
644ad806 3119 gen_vfp_uhto(dp, 16 - rm);
9ee6e8bb
PB
3120 break;
3121 case 23: /* fulto */
3122 if (!arm_feature(env, ARM_FEATURE_VFP3))
3123 return 1;
644ad806 3124 gen_vfp_ulto(dp, 32 - rm);
9ee6e8bb 3125 break;
b7bcbe95
FB
3126 case 24: /* ftoui */
3127 gen_vfp_toui(dp);
3128 break;
3129 case 25: /* ftouiz */
3130 gen_vfp_touiz(dp);
3131 break;
3132 case 26: /* ftosi */
3133 gen_vfp_tosi(dp);
3134 break;
3135 case 27: /* ftosiz */
3136 gen_vfp_tosiz(dp);
3137 break;
9ee6e8bb
PB
3138 case 28: /* ftosh */
3139 if (!arm_feature(env, ARM_FEATURE_VFP3))
3140 return 1;
644ad806 3141 gen_vfp_tosh(dp, 16 - rm);
9ee6e8bb
PB
3142 break;
3143 case 29: /* ftosl */
3144 if (!arm_feature(env, ARM_FEATURE_VFP3))
3145 return 1;
644ad806 3146 gen_vfp_tosl(dp, 32 - rm);
9ee6e8bb
PB
3147 break;
3148 case 30: /* ftouh */
3149 if (!arm_feature(env, ARM_FEATURE_VFP3))
3150 return 1;
644ad806 3151 gen_vfp_touh(dp, 16 - rm);
9ee6e8bb
PB
3152 break;
3153 case 31: /* ftoul */
3154 if (!arm_feature(env, ARM_FEATURE_VFP3))
3155 return 1;
644ad806 3156 gen_vfp_toul(dp, 32 - rm);
9ee6e8bb 3157 break;
b7bcbe95
FB
3158 default: /* undefined */
3159 printf ("rn:%d\n", rn);
3160 return 1;
3161 }
3162 break;
3163 default: /* undefined */
3164 printf ("op:%d\n", op);
3165 return 1;
3166 }
3167
3168 /* Write back the result. */
3169 if (op == 15 && (rn >= 8 && rn <= 11))
3170 ; /* Comparison, do nothing. */
3171 else if (op == 15 && rn > 17)
3172 /* Integer result. */
3173 gen_mov_vreg_F0(0, rd);
3174 else if (op == 15 && rn == 15)
3175 /* conversion */
3176 gen_mov_vreg_F0(!dp, rd);
3177 else
3178 gen_mov_vreg_F0(dp, rd);
3179
3180 /* break out of the loop if we have finished */
3181 if (veclen == 0)
3182 break;
3183
3184 if (op == 15 && delta_m == 0) {
3185 /* single source one-many */
3186 while (veclen--) {
3187 rd = ((rd + delta_d) & (bank_mask - 1))
3188 | (rd & bank_mask);
3189 gen_mov_vreg_F0(dp, rd);
3190 }
3191 break;
3192 }
3193 /* Setup the next operands. */
3194 veclen--;
3195 rd = ((rd + delta_d) & (bank_mask - 1))
3196 | (rd & bank_mask);
3197
3198 if (op == 15) {
3199 /* One source operand. */
3200 rm = ((rm + delta_m) & (bank_mask - 1))
3201 | (rm & bank_mask);
3202 gen_mov_F0_vreg(dp, rm);
3203 } else {
3204 /* Two source operands. */
3205 rn = ((rn + delta_d) & (bank_mask - 1))
3206 | (rn & bank_mask);
3207 gen_mov_F0_vreg(dp, rn);
3208 if (delta_m) {
3209 rm = ((rm + delta_m) & (bank_mask - 1))
3210 | (rm & bank_mask);
3211 gen_mov_F1_vreg(dp, rm);
3212 }
3213 }
3214 }
3215 }
3216 break;
3217 case 0xc:
3218 case 0xd:
9ee6e8bb 3219 if (dp && (insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3220 /* two-register transfer */
3221 rn = (insn >> 16) & 0xf;
3222 rd = (insn >> 12) & 0xf;
3223 if (dp) {
9ee6e8bb
PB
3224 VFP_DREG_M(rm, insn);
3225 } else {
3226 rm = VFP_SREG_M(insn);
3227 }
b7bcbe95 3228
18c9b560 3229 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3230 /* vfp->arm */
3231 if (dp) {
4373f3ce
PB
3232 gen_mov_F0_vreg(0, rm * 2);
3233 tmp = gen_vfp_mrs();
3234 store_reg(s, rd, tmp);
3235 gen_mov_F0_vreg(0, rm * 2 + 1);
3236 tmp = gen_vfp_mrs();
3237 store_reg(s, rn, tmp);
b7bcbe95
FB
3238 } else {
3239 gen_mov_F0_vreg(0, rm);
4373f3ce
PB
3240 tmp = gen_vfp_mrs();
3241 store_reg(s, rn, tmp);
b7bcbe95 3242 gen_mov_F0_vreg(0, rm + 1);
4373f3ce
PB
3243 tmp = gen_vfp_mrs();
3244 store_reg(s, rd, tmp);
b7bcbe95
FB
3245 }
3246 } else {
3247 /* arm->vfp */
3248 if (dp) {
4373f3ce
PB
3249 tmp = load_reg(s, rd);
3250 gen_vfp_msr(tmp);
3251 gen_mov_vreg_F0(0, rm * 2);
3252 tmp = load_reg(s, rn);
3253 gen_vfp_msr(tmp);
3254 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3255 } else {
4373f3ce
PB
3256 tmp = load_reg(s, rn);
3257 gen_vfp_msr(tmp);
b7bcbe95 3258 gen_mov_vreg_F0(0, rm);
4373f3ce
PB
3259 tmp = load_reg(s, rd);
3260 gen_vfp_msr(tmp);
b7bcbe95
FB
3261 gen_mov_vreg_F0(0, rm + 1);
3262 }
3263 }
3264 } else {
3265 /* Load/store */
3266 rn = (insn >> 16) & 0xf;
3267 if (dp)
9ee6e8bb 3268 VFP_DREG_D(rd, insn);
b7bcbe95 3269 else
9ee6e8bb
PB
3270 rd = VFP_SREG_D(insn);
3271 if (s->thumb && rn == 15) {
3272 gen_op_movl_T1_im(s->pc & ~2);
3273 } else {
3274 gen_movl_T1_reg(s, rn);
3275 }
b7bcbe95
FB
3276 if ((insn & 0x01200000) == 0x01000000) {
3277 /* Single load/store */
3278 offset = (insn & 0xff) << 2;
3279 if ((insn & (1 << 23)) == 0)
3280 offset = -offset;
3281 gen_op_addl_T1_im(offset);
3282 if (insn & (1 << 20)) {
b5ff1b31 3283 gen_vfp_ld(s, dp);
b7bcbe95
FB
3284 gen_mov_vreg_F0(dp, rd);
3285 } else {
3286 gen_mov_F0_vreg(dp, rd);
b5ff1b31 3287 gen_vfp_st(s, dp);
b7bcbe95
FB
3288 }
3289 } else {
3290 /* load/store multiple */
3291 if (dp)
3292 n = (insn >> 1) & 0x7f;
3293 else
3294 n = insn & 0xff;
3295
3296 if (insn & (1 << 24)) /* pre-decrement */
3297 gen_op_addl_T1_im(-((insn & 0xff) << 2));
3298
3299 if (dp)
3300 offset = 8;
3301 else
3302 offset = 4;
3303 for (i = 0; i < n; i++) {
18c9b560 3304 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3305 /* load */
b5ff1b31 3306 gen_vfp_ld(s, dp);
b7bcbe95
FB
3307 gen_mov_vreg_F0(dp, rd + i);
3308 } else {
3309 /* store */
3310 gen_mov_F0_vreg(dp, rd + i);
b5ff1b31 3311 gen_vfp_st(s, dp);
b7bcbe95
FB
3312 }
3313 gen_op_addl_T1_im(offset);
3314 }
3315 if (insn & (1 << 21)) {
3316 /* writeback */
3317 if (insn & (1 << 24))
3318 offset = -offset * n;
3319 else if (dp && (insn & 1))
3320 offset = 4;
3321 else
3322 offset = 0;
3323
3324 if (offset != 0)
3325 gen_op_addl_T1_im(offset);
3326 gen_movl_reg_T1(s, rn);
3327 }
3328 }
3329 }
3330 break;
3331 default:
3332 /* Should never happen. */
3333 return 1;
3334 }
3335 return 0;
3336}
3337
6e256c93 3338static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3339{
6e256c93
FB
3340 TranslationBlock *tb;
3341
3342 tb = s->tb;
3343 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3344 tcg_gen_goto_tb(n);
8984bd2e 3345 gen_set_pc_im(dest);
57fec1fe 3346 tcg_gen_exit_tb((long)tb + n);
6e256c93 3347 } else {
8984bd2e 3348 gen_set_pc_im(dest);
57fec1fe 3349 tcg_gen_exit_tb(0);
6e256c93 3350 }
c53be334
FB
3351}
3352
8aaca4c0
FB
3353static inline void gen_jmp (DisasContext *s, uint32_t dest)
3354{
551bd27f 3355 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3356 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3357 if (s->thumb)
d9ba4830
PB
3358 dest |= 1;
3359 gen_bx_im(s, dest);
8aaca4c0 3360 } else {
6e256c93 3361 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3362 s->is_jmp = DISAS_TB_JUMP;
3363 }
3364}
3365
d9ba4830 3366static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3367{
ee097184 3368 if (x)
d9ba4830 3369 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3370 else
d9ba4830 3371 gen_sxth(t0);
ee097184 3372 if (y)
d9ba4830 3373 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3374 else
d9ba4830
PB
3375 gen_sxth(t1);
3376 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3377}
3378
3379/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 3380static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3381 uint32_t mask;
3382
3383 mask = 0;
3384 if (flags & (1 << 0))
3385 mask |= 0xff;
3386 if (flags & (1 << 1))
3387 mask |= 0xff00;
3388 if (flags & (1 << 2))
3389 mask |= 0xff0000;
3390 if (flags & (1 << 3))
3391 mask |= 0xff000000;
9ee6e8bb 3392
2ae23e75 3393 /* Mask out undefined bits. */
9ee6e8bb
PB
3394 mask &= ~CPSR_RESERVED;
3395 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3396 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3397 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3398 mask &= ~CPSR_IT;
9ee6e8bb 3399 /* Mask out execution state bits. */
2ae23e75 3400 if (!spsr)
e160c51c 3401 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3402 /* Mask out privileged bits. */
3403 if (IS_USER(s))
9ee6e8bb 3404 mask &= CPSR_USER;
b5ff1b31
FB
3405 return mask;
3406}
3407
2fbac54b
FN
3408/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3409static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
b5ff1b31 3410{
d9ba4830 3411 TCGv tmp;
b5ff1b31
FB
3412 if (spsr) {
3413 /* ??? This is also undefined in system mode. */
3414 if (IS_USER(s))
3415 return 1;
d9ba4830
PB
3416
3417 tmp = load_cpu_field(spsr);
3418 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3419 tcg_gen_andi_i32(t0, t0, mask);
3420 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3421 store_cpu_field(tmp, spsr);
b5ff1b31 3422 } else {
2fbac54b 3423 gen_set_cpsr(t0, mask);
b5ff1b31 3424 }
2fbac54b 3425 dead_tmp(t0);
b5ff1b31
FB
3426 gen_lookup_tb(s);
3427 return 0;
3428}
3429
2fbac54b
FN
3430/* Returns nonzero if access to the PSR is not permitted. */
3431static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3432{
3433 TCGv tmp;
3434 tmp = new_tmp();
3435 tcg_gen_movi_i32(tmp, val);
3436 return gen_set_psr(s, mask, spsr, tmp);
3437}
3438
e9bb4aa9
JR
3439/* Generate an old-style exception return. Marks pc as dead. */
3440static void gen_exception_return(DisasContext *s, TCGv pc)
b5ff1b31 3441{
d9ba4830 3442 TCGv tmp;
e9bb4aa9 3443 store_reg(s, 15, pc);
d9ba4830
PB
3444 tmp = load_cpu_field(spsr);
3445 gen_set_cpsr(tmp, 0xffffffff);
3446 dead_tmp(tmp);
b5ff1b31
FB
3447 s->is_jmp = DISAS_UPDATE;
3448}
3449
b0109805
PB
3450/* Generate a v6 exception return. Marks both values as dead. */
3451static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3452{
b0109805
PB
3453 gen_set_cpsr(cpsr, 0xffffffff);
3454 dead_tmp(cpsr);
3455 store_reg(s, 15, pc);
9ee6e8bb
PB
3456 s->is_jmp = DISAS_UPDATE;
3457}
3b46e624 3458
9ee6e8bb
PB
3459static inline void
3460gen_set_condexec (DisasContext *s)
3461{
3462 if (s->condexec_mask) {
8f01245e
PB
3463 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3464 TCGv tmp = new_tmp();
3465 tcg_gen_movi_i32(tmp, val);
d9ba4830 3466 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3467 }
3468}
3b46e624 3469
9ee6e8bb
PB
3470static void gen_nop_hint(DisasContext *s, int val)
3471{
3472 switch (val) {
3473 case 3: /* wfi */
8984bd2e 3474 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3475 s->is_jmp = DISAS_WFI;
3476 break;
3477 case 2: /* wfe */
3478 case 4: /* sev */
3479 /* TODO: Implement SEV and WFE. May help SMP performance. */
3480 default: /* nop */
3481 break;
3482 }
3483}
99c475ab 3484
ad69471c
PB
3485/* These macros help make the code more readable when migrating from the
3486 old dyngen helpers. They should probably be removed when
3487 T0/T1 are removed. */
3488#define CPU_T001 cpu_T[0], cpu_T[0], cpu_T[1]
3489#define CPU_T0E01 cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]
9ee6e8bb 3490
ad69471c 3491#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb
PB
3492
3493static inline int gen_neon_add(int size)
3494{
3495 switch (size) {
ad69471c
PB
3496 case 0: gen_helper_neon_add_u8(CPU_T001); break;
3497 case 1: gen_helper_neon_add_u16(CPU_T001); break;
9ee6e8bb
PB
3498 case 2: gen_op_addl_T0_T1(); break;
3499 default: return 1;
3500 }
3501 return 0;
3502}
3503
ad69471c
PB
3504static inline void gen_neon_rsb(int size)
3505{
3506 switch (size) {
3507 case 0: gen_helper_neon_sub_u8(cpu_T[0], cpu_T[1], cpu_T[0]); break;
3508 case 1: gen_helper_neon_sub_u16(cpu_T[0], cpu_T[1], cpu_T[0]); break;
3509 case 2: gen_op_rsbl_T0_T1(); break;
3510 default: return;
3511 }
3512}
3513
3514/* 32-bit pairwise ops end up the same as the elementwise versions. */
3515#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3516#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3517#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3518#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3519
3520/* FIXME: This is wrong. They set the wrong overflow bit. */
3521#define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3522#define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3523#define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3524#define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3525
3526#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3527 switch ((size << 1) | u) { \
3528 case 0: \
3529 gen_helper_neon_##name##_s8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3530 break; \
3531 case 1: \
3532 gen_helper_neon_##name##_u8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3533 break; \
3534 case 2: \
3535 gen_helper_neon_##name##_s16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3536 break; \
3537 case 3: \
3538 gen_helper_neon_##name##_u16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3539 break; \
3540 case 4: \
3541 gen_helper_neon_##name##_s32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3542 break; \
3543 case 5: \
3544 gen_helper_neon_##name##_u32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3545 break; \
3546 default: return 1; \
3547 }} while (0)
9ee6e8bb
PB
3548
3549#define GEN_NEON_INTEGER_OP(name) do { \
3550 switch ((size << 1) | u) { \
ad69471c
PB
3551 case 0: \
3552 gen_helper_neon_##name##_s8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3553 break; \
3554 case 1: \
3555 gen_helper_neon_##name##_u8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3556 break; \
3557 case 2: \
3558 gen_helper_neon_##name##_s16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3559 break; \
3560 case 3: \
3561 gen_helper_neon_##name##_u16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3562 break; \
3563 case 4: \
3564 gen_helper_neon_##name##_s32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3565 break; \
3566 case 5: \
3567 gen_helper_neon_##name##_u32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3568 break; \
9ee6e8bb
PB
3569 default: return 1; \
3570 }} while (0)
3571
3572static inline void
3573gen_neon_movl_scratch_T0(int scratch)
3574{
3575 uint32_t offset;
3576
3577 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3578 tcg_gen_st_i32(cpu_T[0], cpu_env, offset);
9ee6e8bb
PB
3579}
3580
3581static inline void
3582gen_neon_movl_scratch_T1(int scratch)
3583{
3584 uint32_t offset;
3585
3586 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3587 tcg_gen_st_i32(cpu_T[1], cpu_env, offset);
9ee6e8bb
PB
3588}
3589
3590static inline void
3591gen_neon_movl_T0_scratch(int scratch)
3592{
3593 uint32_t offset;
3594
3595 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3596 tcg_gen_ld_i32(cpu_T[0], cpu_env, offset);
9ee6e8bb
PB
3597}
3598
3599static inline void
3600gen_neon_movl_T1_scratch(int scratch)
3601{
3602 uint32_t offset;
3603
3604 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3605 tcg_gen_ld_i32(cpu_T[1], cpu_env, offset);
9ee6e8bb
PB
3606}
3607
3608static inline void gen_neon_get_scalar(int size, int reg)
3609{
3610 if (size == 1) {
3611 NEON_GET_REG(T0, reg >> 1, reg & 1);
3612 } else {
3613 NEON_GET_REG(T0, reg >> 2, (reg >> 1) & 1);
3614 if (reg & 1)
ad69471c 3615 gen_neon_dup_low16(cpu_T[0]);
9ee6e8bb 3616 else
ad69471c 3617 gen_neon_dup_high16(cpu_T[0]);
9ee6e8bb
PB
3618 }
3619}
3620
19457615
FN
3621static void gen_neon_unzip_u8(TCGv t0, TCGv t1)
3622{
3623 TCGv rd, rm, tmp;
3624
3625 rd = new_tmp();
3626 rm = new_tmp();
3627 tmp = new_tmp();
3628
3629 tcg_gen_andi_i32(rd, t0, 0xff);
3630 tcg_gen_shri_i32(tmp, t0, 8);
3631 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3632 tcg_gen_or_i32(rd, rd, tmp);
3633 tcg_gen_shli_i32(tmp, t1, 16);
3634 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3635 tcg_gen_or_i32(rd, rd, tmp);
3636 tcg_gen_shli_i32(tmp, t1, 8);
3637 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3638 tcg_gen_or_i32(rd, rd, tmp);
3639
3640 tcg_gen_shri_i32(rm, t0, 8);
3641 tcg_gen_andi_i32(rm, rm, 0xff);
3642 tcg_gen_shri_i32(tmp, t0, 16);
3643 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3644 tcg_gen_or_i32(rm, rm, tmp);
3645 tcg_gen_shli_i32(tmp, t1, 8);
3646 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3647 tcg_gen_or_i32(rm, rm, tmp);
3648 tcg_gen_andi_i32(tmp, t1, 0xff000000);
3649 tcg_gen_or_i32(t1, rm, tmp);
3650 tcg_gen_mov_i32(t0, rd);
3651
3652 dead_tmp(tmp);
3653 dead_tmp(rm);
3654 dead_tmp(rd);
3655}
3656
3657static void gen_neon_zip_u8(TCGv t0, TCGv t1)
3658{
3659 TCGv rd, rm, tmp;
3660
3661 rd = new_tmp();
3662 rm = new_tmp();
3663 tmp = new_tmp();
3664
3665 tcg_gen_andi_i32(rd, t0, 0xff);
3666 tcg_gen_shli_i32(tmp, t1, 8);
3667 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3668 tcg_gen_or_i32(rd, rd, tmp);
3669 tcg_gen_shli_i32(tmp, t0, 16);
3670 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3671 tcg_gen_or_i32(rd, rd, tmp);
3672 tcg_gen_shli_i32(tmp, t1, 24);
3673 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3674 tcg_gen_or_i32(rd, rd, tmp);
3675
3676 tcg_gen_andi_i32(rm, t1, 0xff000000);
3677 tcg_gen_shri_i32(tmp, t0, 8);
3678 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3679 tcg_gen_or_i32(rm, rm, tmp);
3680 tcg_gen_shri_i32(tmp, t1, 8);
3681 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3682 tcg_gen_or_i32(rm, rm, tmp);
3683 tcg_gen_shri_i32(tmp, t0, 16);
3684 tcg_gen_andi_i32(tmp, tmp, 0xff);
3685 tcg_gen_or_i32(t1, rm, tmp);
3686 tcg_gen_mov_i32(t0, rd);
3687
3688 dead_tmp(tmp);
3689 dead_tmp(rm);
3690 dead_tmp(rd);
3691}
3692
3693static void gen_neon_zip_u16(TCGv t0, TCGv t1)
3694{
3695 TCGv tmp, tmp2;
3696
3697 tmp = new_tmp();
3698 tmp2 = new_tmp();
3699
3700 tcg_gen_andi_i32(tmp, t0, 0xffff);
3701 tcg_gen_shli_i32(tmp2, t1, 16);
3702 tcg_gen_or_i32(tmp, tmp, tmp2);
3703 tcg_gen_andi_i32(t1, t1, 0xffff0000);
3704 tcg_gen_shri_i32(tmp2, t0, 16);
3705 tcg_gen_or_i32(t1, t1, tmp2);
3706 tcg_gen_mov_i32(t0, tmp);
3707
3708 dead_tmp(tmp2);
3709 dead_tmp(tmp);
3710}
3711
9ee6e8bb
PB
3712static void gen_neon_unzip(int reg, int q, int tmp, int size)
3713{
3714 int n;
3715
3716 for (n = 0; n < q + 1; n += 2) {
3717 NEON_GET_REG(T0, reg, n);
19457615 3718 NEON_GET_REG(T1, reg, n + 1);
9ee6e8bb 3719 switch (size) {
19457615
FN
3720 case 0: gen_neon_unzip_u8(cpu_T[0], cpu_T[1]); break;
3721 case 1: gen_neon_zip_u16(cpu_T[0], cpu_T[1]); break; /* zip and unzip are the same. */
9ee6e8bb
PB
3722 case 2: /* no-op */; break;
3723 default: abort();
3724 }
19457615
FN
3725 gen_neon_movl_T0_scratch(tmp + n);
3726 gen_neon_movl_T1_scratch(tmp + n + 1);
9ee6e8bb
PB
3727 }
3728}
3729
19457615
FN
3730static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3731{
3732 TCGv rd, tmp;
3733
3734 rd = new_tmp();
3735 tmp = new_tmp();
3736
3737 tcg_gen_shli_i32(rd, t0, 8);
3738 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3739 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3740 tcg_gen_or_i32(rd, rd, tmp);
3741
3742 tcg_gen_shri_i32(t1, t1, 8);
3743 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3744 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3745 tcg_gen_or_i32(t1, t1, tmp);
3746 tcg_gen_mov_i32(t0, rd);
3747
3748 dead_tmp(tmp);
3749 dead_tmp(rd);
3750}
3751
3752static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3753{
3754 TCGv rd, tmp;
3755
3756 rd = new_tmp();
3757 tmp = new_tmp();
3758
3759 tcg_gen_shli_i32(rd, t0, 16);
3760 tcg_gen_andi_i32(tmp, t1, 0xffff);
3761 tcg_gen_or_i32(rd, rd, tmp);
3762 tcg_gen_shri_i32(t1, t1, 16);
3763 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3764 tcg_gen_or_i32(t1, t1, tmp);
3765 tcg_gen_mov_i32(t0, rd);
3766
3767 dead_tmp(tmp);
3768 dead_tmp(rd);
3769}
3770
3771
9ee6e8bb
PB
3772static struct {
3773 int nregs;
3774 int interleave;
3775 int spacing;
3776} neon_ls_element_type[11] = {
3777 {4, 4, 1},
3778 {4, 4, 2},
3779 {4, 1, 1},
3780 {4, 2, 1},
3781 {3, 3, 1},
3782 {3, 3, 2},
3783 {3, 1, 1},
3784 {1, 1, 1},
3785 {2, 2, 1},
3786 {2, 2, 2},
3787 {2, 1, 1}
3788};
3789
3790/* Translate a NEON load/store element instruction. Return nonzero if the
3791 instruction is invalid. */
3792static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3793{
3794 int rd, rn, rm;
3795 int op;
3796 int nregs;
3797 int interleave;
3798 int stride;
3799 int size;
3800 int reg;
3801 int pass;
3802 int load;
3803 int shift;
9ee6e8bb 3804 int n;
b0109805 3805 TCGv tmp;
8f8e3aa4 3806 TCGv tmp2;
9ee6e8bb
PB
3807
3808 if (!vfp_enabled(env))
3809 return 1;
3810 VFP_DREG_D(rd, insn);
3811 rn = (insn >> 16) & 0xf;
3812 rm = insn & 0xf;
3813 load = (insn & (1 << 21)) != 0;
3814 if ((insn & (1 << 23)) == 0) {
3815 /* Load store all elements. */
3816 op = (insn >> 8) & 0xf;
3817 size = (insn >> 6) & 3;
3818 if (op > 10 || size == 3)
3819 return 1;
3820 nregs = neon_ls_element_type[op].nregs;
3821 interleave = neon_ls_element_type[op].interleave;
3822 gen_movl_T1_reg(s, rn);
3823 stride = (1 << size) * interleave;
3824 for (reg = 0; reg < nregs; reg++) {
3825 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3826 gen_movl_T1_reg(s, rn);
3827 gen_op_addl_T1_im((1 << size) * reg);
3828 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3829 gen_movl_T1_reg(s, rn);
3830 gen_op_addl_T1_im(1 << size);
3831 }
3832 for (pass = 0; pass < 2; pass++) {
3833 if (size == 2) {
3834 if (load) {
b0109805 3835 tmp = gen_ld32(cpu_T[1], IS_USER(s));
ad69471c 3836 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3837 } else {
ad69471c 3838 tmp = neon_load_reg(rd, pass);
b0109805 3839 gen_st32(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3840 }
3841 gen_op_addl_T1_im(stride);
3842 } else if (size == 1) {
3843 if (load) {
b0109805 3844 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
9ee6e8bb 3845 gen_op_addl_T1_im(stride);
8f8e3aa4 3846 tmp2 = gen_ld16u(cpu_T[1], IS_USER(s));
9ee6e8bb 3847 gen_op_addl_T1_im(stride);
8f8e3aa4
PB
3848 gen_bfi(tmp, tmp, tmp2, 16, 0xffff);
3849 dead_tmp(tmp2);
3850 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3851 } else {
8f8e3aa4
PB
3852 tmp = neon_load_reg(rd, pass);
3853 tmp2 = new_tmp();
3854 tcg_gen_shri_i32(tmp2, tmp, 16);
b0109805 3855 gen_st16(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb 3856 gen_op_addl_T1_im(stride);
8f8e3aa4 3857 gen_st16(tmp2, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3858 gen_op_addl_T1_im(stride);
3859 }
3860 } else /* size == 0 */ {
3861 if (load) {
a50f5b91 3862 TCGV_UNUSED(tmp2);
9ee6e8bb 3863 for (n = 0; n < 4; n++) {
b0109805 3864 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3865 gen_op_addl_T1_im(stride);
3866 if (n == 0) {
8f8e3aa4 3867 tmp2 = tmp;
9ee6e8bb 3868 } else {
8f8e3aa4
PB
3869 gen_bfi(tmp2, tmp2, tmp, n * 8, 0xff);
3870 dead_tmp(tmp);
9ee6e8bb 3871 }
9ee6e8bb 3872 }
8f8e3aa4 3873 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 3874 } else {
8f8e3aa4 3875 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 3876 for (n = 0; n < 4; n++) {
8f8e3aa4 3877 tmp = new_tmp();
9ee6e8bb 3878 if (n == 0) {
8f8e3aa4 3879 tcg_gen_mov_i32(tmp, tmp2);
9ee6e8bb 3880 } else {
8f8e3aa4 3881 tcg_gen_shri_i32(tmp, tmp2, n * 8);
9ee6e8bb 3882 }
b0109805 3883 gen_st8(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb 3884 gen_op_addl_T1_im(stride);
9ee6e8bb 3885 }
8f8e3aa4 3886 dead_tmp(tmp2);
9ee6e8bb
PB
3887 }
3888 }
3889 }
3890 rd += neon_ls_element_type[op].spacing;
3891 }
3892 stride = nregs * 8;
3893 } else {
3894 size = (insn >> 10) & 3;
3895 if (size == 3) {
3896 /* Load single element to all lanes. */
3897 if (!load)
3898 return 1;
3899 size = (insn >> 6) & 3;
3900 nregs = ((insn >> 8) & 3) + 1;
3901 stride = (insn & (1 << 5)) ? 2 : 1;
ff8263a9 3902 gen_movl_T1_reg(s, rn);
9ee6e8bb
PB
3903 for (reg = 0; reg < nregs; reg++) {
3904 switch (size) {
3905 case 0:
b0109805 3906 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
ad69471c 3907 gen_neon_dup_u8(tmp, 0);
9ee6e8bb
PB
3908 break;
3909 case 1:
b0109805 3910 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
ad69471c 3911 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
3912 break;
3913 case 2:
b0109805 3914 tmp = gen_ld32(cpu_T[0], IS_USER(s));
9ee6e8bb
PB
3915 break;
3916 case 3:
3917 return 1;
a50f5b91
PB
3918 default: /* Avoid compiler warnings. */
3919 abort();
99c475ab 3920 }
9ee6e8bb 3921 gen_op_addl_T1_im(1 << size);
ad69471c
PB
3922 tmp2 = new_tmp();
3923 tcg_gen_mov_i32(tmp2, tmp);
3924 neon_store_reg(rd, 0, tmp2);
3018f259 3925 neon_store_reg(rd, 1, tmp);
9ee6e8bb
PB
3926 rd += stride;
3927 }
3928 stride = (1 << size) * nregs;
3929 } else {
3930 /* Single element. */
3931 pass = (insn >> 7) & 1;
3932 switch (size) {
3933 case 0:
3934 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
3935 stride = 1;
3936 break;
3937 case 1:
3938 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
3939 stride = (insn & (1 << 5)) ? 2 : 1;
3940 break;
3941 case 2:
3942 shift = 0;
9ee6e8bb
PB
3943 stride = (insn & (1 << 6)) ? 2 : 1;
3944 break;
3945 default:
3946 abort();
3947 }
3948 nregs = ((insn >> 8) & 3) + 1;
3949 gen_movl_T1_reg(s, rn);
3950 for (reg = 0; reg < nregs; reg++) {
3951 if (load) {
9ee6e8bb
PB
3952 switch (size) {
3953 case 0:
b0109805 3954 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3955 break;
3956 case 1:
b0109805 3957 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3958 break;
3959 case 2:
b0109805 3960 tmp = gen_ld32(cpu_T[1], IS_USER(s));
9ee6e8bb 3961 break;
a50f5b91
PB
3962 default: /* Avoid compiler warnings. */
3963 abort();
9ee6e8bb
PB
3964 }
3965 if (size != 2) {
8f8e3aa4
PB
3966 tmp2 = neon_load_reg(rd, pass);
3967 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3968 dead_tmp(tmp2);
9ee6e8bb 3969 }
8f8e3aa4 3970 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3971 } else { /* Store */
8f8e3aa4
PB
3972 tmp = neon_load_reg(rd, pass);
3973 if (shift)
3974 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
3975 switch (size) {
3976 case 0:
b0109805 3977 gen_st8(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3978 break;
3979 case 1:
b0109805 3980 gen_st16(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3981 break;
3982 case 2:
b0109805 3983 gen_st32(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb 3984 break;
99c475ab 3985 }
99c475ab 3986 }
9ee6e8bb
PB
3987 rd += stride;
3988 gen_op_addl_T1_im(1 << size);
99c475ab 3989 }
9ee6e8bb 3990 stride = nregs * (1 << size);
99c475ab 3991 }
9ee6e8bb
PB
3992 }
3993 if (rm != 15) {
b26eefb6
PB
3994 TCGv base;
3995
3996 base = load_reg(s, rn);
9ee6e8bb 3997 if (rm == 13) {
b26eefb6 3998 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 3999 } else {
b26eefb6
PB
4000 TCGv index;
4001 index = load_reg(s, rm);
4002 tcg_gen_add_i32(base, base, index);
4003 dead_tmp(index);
9ee6e8bb 4004 }
b26eefb6 4005 store_reg(s, rn, base);
9ee6e8bb
PB
4006 }
4007 return 0;
4008}
3b46e624 4009
8f8e3aa4
PB
4010/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4011static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4012{
4013 tcg_gen_and_i32(t, t, c);
4014 tcg_gen_bic_i32(f, f, c);
4015 tcg_gen_or_i32(dest, t, f);
4016}
4017
a7812ae4 4018static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4019{
4020 switch (size) {
4021 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4022 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4023 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4024 default: abort();
4025 }
4026}
4027
a7812ae4 4028static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4029{
4030 switch (size) {
4031 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4032 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4033 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
4034 default: abort();
4035 }
4036}
4037
a7812ae4 4038static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4039{
4040 switch (size) {
4041 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4042 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4043 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4044 default: abort();
4045 }
4046}
4047
4048static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4049 int q, int u)
4050{
4051 if (q) {
4052 if (u) {
4053 switch (size) {
4054 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4055 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4056 default: abort();
4057 }
4058 } else {
4059 switch (size) {
4060 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4061 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4062 default: abort();
4063 }
4064 }
4065 } else {
4066 if (u) {
4067 switch (size) {
4068 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4069 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4070 default: abort();
4071 }
4072 } else {
4073 switch (size) {
4074 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4075 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4076 default: abort();
4077 }
4078 }
4079 }
4080}
4081
a7812ae4 4082static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
4083{
4084 if (u) {
4085 switch (size) {
4086 case 0: gen_helper_neon_widen_u8(dest, src); break;
4087 case 1: gen_helper_neon_widen_u16(dest, src); break;
4088 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4089 default: abort();
4090 }
4091 } else {
4092 switch (size) {
4093 case 0: gen_helper_neon_widen_s8(dest, src); break;
4094 case 1: gen_helper_neon_widen_s16(dest, src); break;
4095 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4096 default: abort();
4097 }
4098 }
4099 dead_tmp(src);
4100}
4101
4102static inline void gen_neon_addl(int size)
4103{
4104 switch (size) {
4105 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4106 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4107 case 2: tcg_gen_add_i64(CPU_V001); break;
4108 default: abort();
4109 }
4110}
4111
4112static inline void gen_neon_subl(int size)
4113{
4114 switch (size) {
4115 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4116 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4117 case 2: tcg_gen_sub_i64(CPU_V001); break;
4118 default: abort();
4119 }
4120}
4121
a7812ae4 4122static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4123{
4124 switch (size) {
4125 case 0: gen_helper_neon_negl_u16(var, var); break;
4126 case 1: gen_helper_neon_negl_u32(var, var); break;
4127 case 2: gen_helper_neon_negl_u64(var, var); break;
4128 default: abort();
4129 }
4130}
4131
a7812ae4 4132static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4133{
4134 switch (size) {
4135 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4136 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4137 default: abort();
4138 }
4139}
4140
a7812ae4 4141static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4142{
a7812ae4 4143 TCGv_i64 tmp;
ad69471c
PB
4144
4145 switch ((size << 1) | u) {
4146 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4147 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4148 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4149 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4150 case 4:
4151 tmp = gen_muls_i64_i32(a, b);
4152 tcg_gen_mov_i64(dest, tmp);
4153 break;
4154 case 5:
4155 tmp = gen_mulu_i64_i32(a, b);
4156 tcg_gen_mov_i64(dest, tmp);
4157 break;
4158 default: abort();
4159 }
4160 if (size < 2) {
4161 dead_tmp(b);
4162 dead_tmp(a);
4163 }
4164}
4165
9ee6e8bb
PB
4166/* Translate a NEON data processing instruction. Return nonzero if the
4167 instruction is invalid.
ad69471c
PB
4168 We process data in a mixture of 32-bit and 64-bit chunks.
4169 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4170
9ee6e8bb
PB
4171static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4172{
4173 int op;
4174 int q;
4175 int rd, rn, rm;
4176 int size;
4177 int shift;
4178 int pass;
4179 int count;
4180 int pairwise;
4181 int u;
4182 int n;
4183 uint32_t imm;
8f8e3aa4
PB
4184 TCGv tmp;
4185 TCGv tmp2;
4186 TCGv tmp3;
a7812ae4 4187 TCGv_i64 tmp64;
9ee6e8bb
PB
4188
4189 if (!vfp_enabled(env))
4190 return 1;
4191 q = (insn & (1 << 6)) != 0;
4192 u = (insn >> 24) & 1;
4193 VFP_DREG_D(rd, insn);
4194 VFP_DREG_N(rn, insn);
4195 VFP_DREG_M(rm, insn);
4196 size = (insn >> 20) & 3;
4197 if ((insn & (1 << 23)) == 0) {
4198 /* Three register same length. */
4199 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
ad69471c
PB
4200 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4201 || op == 10 || op == 11 || op == 16)) {
4202 /* 64-bit element instructions. */
9ee6e8bb 4203 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4204 neon_load_reg64(cpu_V0, rn + pass);
4205 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb
PB
4206 switch (op) {
4207 case 1: /* VQADD */
4208 if (u) {
ad69471c 4209 gen_helper_neon_add_saturate_u64(CPU_V001);
2c0262af 4210 } else {
ad69471c 4211 gen_helper_neon_add_saturate_s64(CPU_V001);
2c0262af 4212 }
9ee6e8bb
PB
4213 break;
4214 case 5: /* VQSUB */
4215 if (u) {
ad69471c
PB
4216 gen_helper_neon_sub_saturate_u64(CPU_V001);
4217 } else {
4218 gen_helper_neon_sub_saturate_s64(CPU_V001);
4219 }
4220 break;
4221 case 8: /* VSHL */
4222 if (u) {
4223 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4224 } else {
4225 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4226 }
4227 break;
4228 case 9: /* VQSHL */
4229 if (u) {
4230 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4231 cpu_V0, cpu_V0);
4232 } else {
4233 gen_helper_neon_qshl_s64(cpu_V1, cpu_env,
4234 cpu_V1, cpu_V0);
4235 }
4236 break;
4237 case 10: /* VRSHL */
4238 if (u) {
4239 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4240 } else {
ad69471c
PB
4241 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4242 }
4243 break;
4244 case 11: /* VQRSHL */
4245 if (u) {
4246 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4247 cpu_V1, cpu_V0);
4248 } else {
4249 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4250 cpu_V1, cpu_V0);
1e8d4eec 4251 }
9ee6e8bb
PB
4252 break;
4253 case 16:
4254 if (u) {
ad69471c 4255 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4256 } else {
ad69471c 4257 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4258 }
4259 break;
4260 default:
4261 abort();
2c0262af 4262 }
ad69471c 4263 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4264 }
9ee6e8bb 4265 return 0;
2c0262af 4266 }
9ee6e8bb
PB
4267 switch (op) {
4268 case 8: /* VSHL */
4269 case 9: /* VQSHL */
4270 case 10: /* VRSHL */
ad69471c 4271 case 11: /* VQRSHL */
9ee6e8bb 4272 {
ad69471c
PB
4273 int rtmp;
4274 /* Shift instruction operands are reversed. */
4275 rtmp = rn;
9ee6e8bb 4276 rn = rm;
ad69471c 4277 rm = rtmp;
9ee6e8bb
PB
4278 pairwise = 0;
4279 }
2c0262af 4280 break;
9ee6e8bb
PB
4281 case 20: /* VPMAX */
4282 case 21: /* VPMIN */
4283 case 23: /* VPADD */
4284 pairwise = 1;
2c0262af 4285 break;
9ee6e8bb
PB
4286 case 26: /* VPADD (float) */
4287 pairwise = (u && size < 2);
2c0262af 4288 break;
9ee6e8bb
PB
4289 case 30: /* VPMIN/VPMAX (float) */
4290 pairwise = u;
2c0262af 4291 break;
9ee6e8bb
PB
4292 default:
4293 pairwise = 0;
2c0262af 4294 break;
9ee6e8bb
PB
4295 }
4296 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4297
4298 if (pairwise) {
4299 /* Pairwise. */
4300 if (q)
4301 n = (pass & 1) * 2;
2c0262af 4302 else
9ee6e8bb
PB
4303 n = 0;
4304 if (pass < q + 1) {
4305 NEON_GET_REG(T0, rn, n);
4306 NEON_GET_REG(T1, rn, n + 1);
4307 } else {
4308 NEON_GET_REG(T0, rm, n);
4309 NEON_GET_REG(T1, rm, n + 1);
4310 }
4311 } else {
4312 /* Elementwise. */
4313 NEON_GET_REG(T0, rn, pass);
4314 NEON_GET_REG(T1, rm, pass);
4315 }
4316 switch (op) {
4317 case 0: /* VHADD */
4318 GEN_NEON_INTEGER_OP(hadd);
4319 break;
4320 case 1: /* VQADD */
ad69471c 4321 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4322 break;
9ee6e8bb
PB
4323 case 2: /* VRHADD */
4324 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4325 break;
9ee6e8bb
PB
4326 case 3: /* Logic ops. */
4327 switch ((u << 2) | size) {
4328 case 0: /* VAND */
2c0262af 4329 gen_op_andl_T0_T1();
9ee6e8bb
PB
4330 break;
4331 case 1: /* BIC */
4332 gen_op_bicl_T0_T1();
4333 break;
4334 case 2: /* VORR */
4335 gen_op_orl_T0_T1();
4336 break;
4337 case 3: /* VORN */
4338 gen_op_notl_T1();
4339 gen_op_orl_T0_T1();
4340 break;
4341 case 4: /* VEOR */
4342 gen_op_xorl_T0_T1();
4343 break;
4344 case 5: /* VBSL */
8f8e3aa4
PB
4345 tmp = neon_load_reg(rd, pass);
4346 gen_neon_bsl(cpu_T[0], cpu_T[0], cpu_T[1], tmp);
4347 dead_tmp(tmp);
9ee6e8bb
PB
4348 break;
4349 case 6: /* VBIT */
8f8e3aa4
PB
4350 tmp = neon_load_reg(rd, pass);
4351 gen_neon_bsl(cpu_T[0], cpu_T[0], tmp, cpu_T[1]);
4352 dead_tmp(tmp);
9ee6e8bb
PB
4353 break;
4354 case 7: /* VBIF */
8f8e3aa4
PB
4355 tmp = neon_load_reg(rd, pass);
4356 gen_neon_bsl(cpu_T[0], tmp, cpu_T[0], cpu_T[1]);
4357 dead_tmp(tmp);
9ee6e8bb 4358 break;
2c0262af
FB
4359 }
4360 break;
9ee6e8bb
PB
4361 case 4: /* VHSUB */
4362 GEN_NEON_INTEGER_OP(hsub);
4363 break;
4364 case 5: /* VQSUB */
ad69471c 4365 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4366 break;
9ee6e8bb
PB
4367 case 6: /* VCGT */
4368 GEN_NEON_INTEGER_OP(cgt);
4369 break;
4370 case 7: /* VCGE */
4371 GEN_NEON_INTEGER_OP(cge);
4372 break;
4373 case 8: /* VSHL */
ad69471c 4374 GEN_NEON_INTEGER_OP(shl);
2c0262af 4375 break;
9ee6e8bb 4376 case 9: /* VQSHL */
ad69471c 4377 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4378 break;
9ee6e8bb 4379 case 10: /* VRSHL */
ad69471c 4380 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4381 break;
9ee6e8bb 4382 case 11: /* VQRSHL */
ad69471c 4383 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb
PB
4384 break;
4385 case 12: /* VMAX */
4386 GEN_NEON_INTEGER_OP(max);
4387 break;
4388 case 13: /* VMIN */
4389 GEN_NEON_INTEGER_OP(min);
4390 break;
4391 case 14: /* VABD */
4392 GEN_NEON_INTEGER_OP(abd);
4393 break;
4394 case 15: /* VABA */
4395 GEN_NEON_INTEGER_OP(abd);
4396 NEON_GET_REG(T1, rd, pass);
4397 gen_neon_add(size);
4398 break;
4399 case 16:
4400 if (!u) { /* VADD */
4401 if (gen_neon_add(size))
4402 return 1;
4403 } else { /* VSUB */
4404 switch (size) {
ad69471c
PB
4405 case 0: gen_helper_neon_sub_u8(CPU_T001); break;
4406 case 1: gen_helper_neon_sub_u16(CPU_T001); break;
9ee6e8bb
PB
4407 case 2: gen_op_subl_T0_T1(); break;
4408 default: return 1;
4409 }
4410 }
4411 break;
4412 case 17:
4413 if (!u) { /* VTST */
4414 switch (size) {
ad69471c
PB
4415 case 0: gen_helper_neon_tst_u8(CPU_T001); break;
4416 case 1: gen_helper_neon_tst_u16(CPU_T001); break;
4417 case 2: gen_helper_neon_tst_u32(CPU_T001); break;
9ee6e8bb
PB
4418 default: return 1;
4419 }
4420 } else { /* VCEQ */
4421 switch (size) {
ad69471c
PB
4422 case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
4423 case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
4424 case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
9ee6e8bb
PB
4425 default: return 1;
4426 }
4427 }
4428 break;
4429 case 18: /* Multiply. */
4430 switch (size) {
ad69471c
PB
4431 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
4432 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
9ee6e8bb
PB
4433 case 2: gen_op_mul_T0_T1(); break;
4434 default: return 1;
4435 }
4436 NEON_GET_REG(T1, rd, pass);
4437 if (u) { /* VMLS */
ad69471c 4438 gen_neon_rsb(size);
9ee6e8bb
PB
4439 } else { /* VMLA */
4440 gen_neon_add(size);
4441 }
4442 break;
4443 case 19: /* VMUL */
4444 if (u) { /* polynomial */
ad69471c 4445 gen_helper_neon_mul_p8(CPU_T001);
9ee6e8bb
PB
4446 } else { /* Integer */
4447 switch (size) {
ad69471c
PB
4448 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
4449 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
9ee6e8bb
PB
4450 case 2: gen_op_mul_T0_T1(); break;
4451 default: return 1;
4452 }
4453 }
4454 break;
4455 case 20: /* VPMAX */
4456 GEN_NEON_INTEGER_OP(pmax);
4457 break;
4458 case 21: /* VPMIN */
4459 GEN_NEON_INTEGER_OP(pmin);
4460 break;
4461 case 22: /* Hultiply high. */
4462 if (!u) { /* VQDMULH */
4463 switch (size) {
ad69471c
PB
4464 case 1: gen_helper_neon_qdmulh_s16(CPU_T0E01); break;
4465 case 2: gen_helper_neon_qdmulh_s32(CPU_T0E01); break;
9ee6e8bb
PB
4466 default: return 1;
4467 }
4468 } else { /* VQRDHMUL */
4469 switch (size) {
ad69471c
PB
4470 case 1: gen_helper_neon_qrdmulh_s16(CPU_T0E01); break;
4471 case 2: gen_helper_neon_qrdmulh_s32(CPU_T0E01); break;
9ee6e8bb
PB
4472 default: return 1;
4473 }
4474 }
4475 break;
4476 case 23: /* VPADD */
4477 if (u)
4478 return 1;
4479 switch (size) {
ad69471c
PB
4480 case 0: gen_helper_neon_padd_u8(CPU_T001); break;
4481 case 1: gen_helper_neon_padd_u16(CPU_T001); break;
9ee6e8bb
PB
4482 case 2: gen_op_addl_T0_T1(); break;
4483 default: return 1;
4484 }
4485 break;
4486 case 26: /* Floating point arithnetic. */
4487 switch ((u << 2) | size) {
4488 case 0: /* VADD */
ad69471c 4489 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb
PB
4490 break;
4491 case 2: /* VSUB */
ad69471c 4492 gen_helper_neon_sub_f32(CPU_T001);
9ee6e8bb
PB
4493 break;
4494 case 4: /* VPADD */
ad69471c 4495 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb
PB
4496 break;
4497 case 6: /* VABD */
ad69471c 4498 gen_helper_neon_abd_f32(CPU_T001);
9ee6e8bb
PB
4499 break;
4500 default:
4501 return 1;
4502 }
4503 break;
4504 case 27: /* Float multiply. */
ad69471c 4505 gen_helper_neon_mul_f32(CPU_T001);
9ee6e8bb
PB
4506 if (!u) {
4507 NEON_GET_REG(T1, rd, pass);
4508 if (size == 0) {
ad69471c 4509 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb 4510 } else {
ad69471c 4511 gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
4512 }
4513 }
4514 break;
4515 case 28: /* Float compare. */
4516 if (!u) {
ad69471c 4517 gen_helper_neon_ceq_f32(CPU_T001);
b5ff1b31 4518 } else {
9ee6e8bb 4519 if (size == 0)
ad69471c 4520 gen_helper_neon_cge_f32(CPU_T001);
9ee6e8bb 4521 else
ad69471c 4522 gen_helper_neon_cgt_f32(CPU_T001);
b5ff1b31 4523 }
2c0262af 4524 break;
9ee6e8bb
PB
4525 case 29: /* Float compare absolute. */
4526 if (!u)
4527 return 1;
4528 if (size == 0)
ad69471c 4529 gen_helper_neon_acge_f32(CPU_T001);
9ee6e8bb 4530 else
ad69471c 4531 gen_helper_neon_acgt_f32(CPU_T001);
2c0262af 4532 break;
9ee6e8bb
PB
4533 case 30: /* Float min/max. */
4534 if (size == 0)
ad69471c 4535 gen_helper_neon_max_f32(CPU_T001);
9ee6e8bb 4536 else
ad69471c 4537 gen_helper_neon_min_f32(CPU_T001);
9ee6e8bb
PB
4538 break;
4539 case 31:
4540 if (size == 0)
4373f3ce 4541 gen_helper_recps_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
9ee6e8bb 4542 else
4373f3ce 4543 gen_helper_rsqrts_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
2c0262af 4544 break;
9ee6e8bb
PB
4545 default:
4546 abort();
2c0262af 4547 }
9ee6e8bb
PB
4548 /* Save the result. For elementwise operations we can put it
4549 straight into the destination register. For pairwise operations
4550 we have to be careful to avoid clobbering the source operands. */
4551 if (pairwise && rd == rm) {
4552 gen_neon_movl_scratch_T0(pass);
4553 } else {
4554 NEON_SET_REG(T0, rd, pass);
4555 }
4556
4557 } /* for pass */
4558 if (pairwise && rd == rm) {
4559 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4560 gen_neon_movl_T0_scratch(pass);
4561 NEON_SET_REG(T0, rd, pass);
4562 }
4563 }
ad69471c 4564 /* End of 3 register same size operations. */
9ee6e8bb
PB
4565 } else if (insn & (1 << 4)) {
4566 if ((insn & 0x00380080) != 0) {
4567 /* Two registers and shift. */
4568 op = (insn >> 8) & 0xf;
4569 if (insn & (1 << 7)) {
4570 /* 64-bit shift. */
4571 size = 3;
4572 } else {
4573 size = 2;
4574 while ((insn & (1 << (size + 19))) == 0)
4575 size--;
4576 }
4577 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4578 /* To avoid excessive dumplication of ops we implement shift
4579 by immediate using the variable shift operations. */
4580 if (op < 8) {
4581 /* Shift by immediate:
4582 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4583 /* Right shifts are encoded as N - shift, where N is the
4584 element size in bits. */
4585 if (op <= 4)
4586 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4587 if (size == 3) {
4588 count = q + 1;
4589 } else {
4590 count = q ? 4: 2;
4591 }
4592 switch (size) {
4593 case 0:
4594 imm = (uint8_t) shift;
4595 imm |= imm << 8;
4596 imm |= imm << 16;
4597 break;
4598 case 1:
4599 imm = (uint16_t) shift;
4600 imm |= imm << 16;
4601 break;
4602 case 2:
4603 case 3:
4604 imm = shift;
4605 break;
4606 default:
4607 abort();
4608 }
4609
4610 for (pass = 0; pass < count; pass++) {
ad69471c
PB
4611 if (size == 3) {
4612 neon_load_reg64(cpu_V0, rm + pass);
4613 tcg_gen_movi_i64(cpu_V1, imm);
4614 switch (op) {
4615 case 0: /* VSHR */
4616 case 1: /* VSRA */
4617 if (u)
4618 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4619 else
ad69471c 4620 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4621 break;
ad69471c
PB
4622 case 2: /* VRSHR */
4623 case 3: /* VRSRA */
4624 if (u)
4625 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4626 else
ad69471c 4627 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4628 break;
ad69471c
PB
4629 case 4: /* VSRI */
4630 if (!u)
4631 return 1;
4632 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4633 break;
4634 case 5: /* VSHL, VSLI */
4635 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4636 break;
4637 case 6: /* VQSHL */
4638 if (u)
4639 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4640 else
ad69471c
PB
4641 gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4642 break;
4643 case 7: /* VQSHLU */
4644 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4645 break;
9ee6e8bb 4646 }
ad69471c
PB
4647 if (op == 1 || op == 3) {
4648 /* Accumulate. */
4649 neon_load_reg64(cpu_V0, rd + pass);
4650 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4651 } else if (op == 4 || (op == 5 && u)) {
4652 /* Insert */
4653 cpu_abort(env, "VS[LR]I.64 not implemented");
4654 }
4655 neon_store_reg64(cpu_V0, rd + pass);
4656 } else { /* size < 3 */
4657 /* Operands in T0 and T1. */
4658 gen_op_movl_T1_im(imm);
4659 NEON_GET_REG(T0, rm, pass);
4660 switch (op) {
4661 case 0: /* VSHR */
4662 case 1: /* VSRA */
4663 GEN_NEON_INTEGER_OP(shl);
4664 break;
4665 case 2: /* VRSHR */
4666 case 3: /* VRSRA */
4667 GEN_NEON_INTEGER_OP(rshl);
4668 break;
4669 case 4: /* VSRI */
4670 if (!u)
4671 return 1;
4672 GEN_NEON_INTEGER_OP(shl);
4673 break;
4674 case 5: /* VSHL, VSLI */
4675 switch (size) {
4676 case 0: gen_helper_neon_shl_u8(CPU_T001); break;
4677 case 1: gen_helper_neon_shl_u16(CPU_T001); break;
4678 case 2: gen_helper_neon_shl_u32(CPU_T001); break;
4679 default: return 1;
4680 }
4681 break;
4682 case 6: /* VQSHL */
4683 GEN_NEON_INTEGER_OP_ENV(qshl);
4684 break;
4685 case 7: /* VQSHLU */
4686 switch (size) {
4687 case 0: gen_helper_neon_qshl_u8(CPU_T0E01); break;
4688 case 1: gen_helper_neon_qshl_u16(CPU_T0E01); break;
4689 case 2: gen_helper_neon_qshl_u32(CPU_T0E01); break;
4690 default: return 1;
4691 }
4692 break;
4693 }
4694
4695 if (op == 1 || op == 3) {
4696 /* Accumulate. */
4697 NEON_GET_REG(T1, rd, pass);
4698 gen_neon_add(size);
4699 } else if (op == 4 || (op == 5 && u)) {
4700 /* Insert */
4701 switch (size) {
4702 case 0:
4703 if (op == 4)
4704 imm = 0xff >> -shift;
4705 else
4706 imm = (uint8_t)(0xff << shift);
4707 imm |= imm << 8;
4708 imm |= imm << 16;
4709 break;
4710 case 1:
4711 if (op == 4)
4712 imm = 0xffff >> -shift;
4713 else
4714 imm = (uint16_t)(0xffff << shift);
4715 imm |= imm << 16;
4716 break;
4717 case 2:
4718 if (op == 4)
4719 imm = 0xffffffffu >> -shift;
4720 else
4721 imm = 0xffffffffu << shift;
4722 break;
4723 default:
4724 abort();
4725 }
4726 tmp = neon_load_reg(rd, pass);
4727 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], imm);
4728 tcg_gen_andi_i32(tmp, tmp, ~imm);
4729 tcg_gen_or_i32(cpu_T[0], cpu_T[0], tmp);
4730 }
9ee6e8bb
PB
4731 NEON_SET_REG(T0, rd, pass);
4732 }
4733 } /* for pass */
4734 } else if (op < 10) {
ad69471c 4735 /* Shift by immediate and narrow:
9ee6e8bb
PB
4736 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4737 shift = shift - (1 << (size + 3));
4738 size++;
9ee6e8bb
PB
4739 switch (size) {
4740 case 1:
ad69471c 4741 imm = (uint16_t)shift;
9ee6e8bb 4742 imm |= imm << 16;
ad69471c 4743 tmp2 = tcg_const_i32(imm);
a7812ae4 4744 TCGV_UNUSED_I64(tmp64);
9ee6e8bb
PB
4745 break;
4746 case 2:
ad69471c
PB
4747 imm = (uint32_t)shift;
4748 tmp2 = tcg_const_i32(imm);
a7812ae4 4749 TCGV_UNUSED_I64(tmp64);
4cc633c3 4750 break;
9ee6e8bb 4751 case 3:
a7812ae4
PB
4752 tmp64 = tcg_const_i64(shift);
4753 TCGV_UNUSED(tmp2);
9ee6e8bb
PB
4754 break;
4755 default:
4756 abort();
4757 }
4758
ad69471c
PB
4759 for (pass = 0; pass < 2; pass++) {
4760 if (size == 3) {
4761 neon_load_reg64(cpu_V0, rm + pass);
4762 if (q) {
4763 if (u)
a7812ae4 4764 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4765 else
a7812ae4 4766 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c
PB
4767 } else {
4768 if (u)
a7812ae4 4769 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4770 else
a7812ae4 4771 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c 4772 }
2c0262af 4773 } else {
ad69471c
PB
4774 tmp = neon_load_reg(rm + pass, 0);
4775 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
36aa55dc
PB
4776 tmp3 = neon_load_reg(rm + pass, 1);
4777 gen_neon_shift_narrow(size, tmp3, tmp2, q, u);
4778 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
ad69471c 4779 dead_tmp(tmp);
36aa55dc 4780 dead_tmp(tmp3);
9ee6e8bb 4781 }
ad69471c
PB
4782 tmp = new_tmp();
4783 if (op == 8 && !u) {
4784 gen_neon_narrow(size - 1, tmp, cpu_V0);
9ee6e8bb 4785 } else {
ad69471c
PB
4786 if (op == 8)
4787 gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
9ee6e8bb 4788 else
ad69471c
PB
4789 gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
4790 }
4791 if (pass == 0) {
4792 tmp2 = tmp;
4793 } else {
4794 neon_store_reg(rd, 0, tmp2);
4795 neon_store_reg(rd, 1, tmp);
9ee6e8bb
PB
4796 }
4797 } /* for pass */
4798 } else if (op == 10) {
4799 /* VSHLL */
ad69471c 4800 if (q || size == 3)
9ee6e8bb 4801 return 1;
ad69471c
PB
4802 tmp = neon_load_reg(rm, 0);
4803 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 4804 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4805 if (pass == 1)
4806 tmp = tmp2;
4807
4808 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 4809
9ee6e8bb
PB
4810 if (shift != 0) {
4811 /* The shift is less than the width of the source
ad69471c
PB
4812 type, so we can just shift the whole register. */
4813 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4814 if (size < 2 || !u) {
4815 uint64_t imm64;
4816 if (size == 0) {
4817 imm = (0xffu >> (8 - shift));
4818 imm |= imm << 16;
4819 } else {
4820 imm = 0xffff >> (16 - shift);
9ee6e8bb 4821 }
ad69471c
PB
4822 imm64 = imm | (((uint64_t)imm) << 32);
4823 tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
9ee6e8bb
PB
4824 }
4825 }
ad69471c 4826 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4827 }
4828 } else if (op == 15 || op == 16) {
4829 /* VCVT fixed-point. */
4830 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 4831 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
9ee6e8bb
PB
4832 if (op & 1) {
4833 if (u)
4373f3ce 4834 gen_vfp_ulto(0, shift);
9ee6e8bb 4835 else
4373f3ce 4836 gen_vfp_slto(0, shift);
9ee6e8bb
PB
4837 } else {
4838 if (u)
4373f3ce 4839 gen_vfp_toul(0, shift);
9ee6e8bb 4840 else
4373f3ce 4841 gen_vfp_tosl(0, shift);
2c0262af 4842 }
4373f3ce 4843 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
4844 }
4845 } else {
9ee6e8bb
PB
4846 return 1;
4847 }
4848 } else { /* (insn & 0x00380080) == 0 */
4849 int invert;
4850
4851 op = (insn >> 8) & 0xf;
4852 /* One register and immediate. */
4853 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4854 invert = (insn & (1 << 5)) != 0;
4855 switch (op) {
4856 case 0: case 1:
4857 /* no-op */
4858 break;
4859 case 2: case 3:
4860 imm <<= 8;
4861 break;
4862 case 4: case 5:
4863 imm <<= 16;
4864 break;
4865 case 6: case 7:
4866 imm <<= 24;
4867 break;
4868 case 8: case 9:
4869 imm |= imm << 16;
4870 break;
4871 case 10: case 11:
4872 imm = (imm << 8) | (imm << 24);
4873 break;
4874 case 12:
4875 imm = (imm < 8) | 0xff;
4876 break;
4877 case 13:
4878 imm = (imm << 16) | 0xffff;
4879 break;
4880 case 14:
4881 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4882 if (invert)
4883 imm = ~imm;
4884 break;
4885 case 15:
4886 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4887 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4888 break;
4889 }
4890 if (invert)
4891 imm = ~imm;
4892
4893 if (op != 14 || !invert)
4894 gen_op_movl_T1_im(imm);
4895
4896 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4897 if (op & 1 && op < 12) {
ad69471c 4898 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
4899 if (invert) {
4900 /* The immediate value has already been inverted, so
4901 BIC becomes AND. */
ad69471c 4902 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 4903 } else {
ad69471c 4904 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 4905 }
9ee6e8bb 4906 } else {
ad69471c
PB
4907 /* VMOV, VMVN. */
4908 tmp = new_tmp();
9ee6e8bb 4909 if (op == 14 && invert) {
ad69471c
PB
4910 uint32_t val;
4911 val = 0;
9ee6e8bb
PB
4912 for (n = 0; n < 4; n++) {
4913 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 4914 val |= 0xff << (n * 8);
9ee6e8bb 4915 }
ad69471c
PB
4916 tcg_gen_movi_i32(tmp, val);
4917 } else {
4918 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 4919 }
9ee6e8bb 4920 }
ad69471c 4921 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4922 }
4923 }
e4b3861d 4924 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
4925 if (size != 3) {
4926 op = (insn >> 8) & 0xf;
4927 if ((insn & (1 << 6)) == 0) {
4928 /* Three registers of different lengths. */
4929 int src1_wide;
4930 int src2_wide;
4931 int prewiden;
4932 /* prewiden, src1_wide, src2_wide */
4933 static const int neon_3reg_wide[16][3] = {
4934 {1, 0, 0}, /* VADDL */
4935 {1, 1, 0}, /* VADDW */
4936 {1, 0, 0}, /* VSUBL */
4937 {1, 1, 0}, /* VSUBW */
4938 {0, 1, 1}, /* VADDHN */
4939 {0, 0, 0}, /* VABAL */
4940 {0, 1, 1}, /* VSUBHN */
4941 {0, 0, 0}, /* VABDL */
4942 {0, 0, 0}, /* VMLAL */
4943 {0, 0, 0}, /* VQDMLAL */
4944 {0, 0, 0}, /* VMLSL */
4945 {0, 0, 0}, /* VQDMLSL */
4946 {0, 0, 0}, /* Integer VMULL */
4947 {0, 0, 0}, /* VQDMULL */
4948 {0, 0, 0} /* Polynomial VMULL */
4949 };
4950
4951 prewiden = neon_3reg_wide[op][0];
4952 src1_wide = neon_3reg_wide[op][1];
4953 src2_wide = neon_3reg_wide[op][2];
4954
ad69471c
PB
4955 if (size == 0 && (op == 9 || op == 11 || op == 13))
4956 return 1;
4957
9ee6e8bb
PB
4958 /* Avoid overlapping operands. Wide source operands are
4959 always aligned so will never overlap with wide
4960 destinations in problematic ways. */
8f8e3aa4
PB
4961 if (rd == rm && !src2_wide) {
4962 NEON_GET_REG(T0, rm, 1);
4963 gen_neon_movl_scratch_T0(2);
4964 } else if (rd == rn && !src1_wide) {
4965 NEON_GET_REG(T0, rn, 1);
4966 gen_neon_movl_scratch_T0(2);
9ee6e8bb 4967 }
a50f5b91 4968 TCGV_UNUSED(tmp3);
9ee6e8bb 4969 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4970 if (src1_wide) {
4971 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 4972 TCGV_UNUSED(tmp);
9ee6e8bb 4973 } else {
ad69471c
PB
4974 if (pass == 1 && rd == rn) {
4975 gen_neon_movl_T0_scratch(2);
4976 tmp = new_tmp();
4977 tcg_gen_mov_i32(tmp, cpu_T[0]);
9ee6e8bb 4978 } else {
ad69471c
PB
4979 tmp = neon_load_reg(rn, pass);
4980 }
4981 if (prewiden) {
4982 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
4983 }
4984 }
ad69471c
PB
4985 if (src2_wide) {
4986 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 4987 TCGV_UNUSED(tmp2);
9ee6e8bb 4988 } else {
ad69471c 4989 if (pass == 1 && rd == rm) {
8f8e3aa4 4990 gen_neon_movl_T0_scratch(2);
ad69471c
PB
4991 tmp2 = new_tmp();
4992 tcg_gen_mov_i32(tmp2, cpu_T[0]);
9ee6e8bb 4993 } else {
ad69471c
PB
4994 tmp2 = neon_load_reg(rm, pass);
4995 }
4996 if (prewiden) {
4997 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 4998 }
9ee6e8bb
PB
4999 }
5000 switch (op) {
5001 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5002 gen_neon_addl(size);
9ee6e8bb
PB
5003 break;
5004 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
ad69471c 5005 gen_neon_subl(size);
9ee6e8bb
PB
5006 break;
5007 case 5: case 7: /* VABAL, VABDL */
5008 switch ((size << 1) | u) {
ad69471c
PB
5009 case 0:
5010 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5011 break;
5012 case 1:
5013 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5014 break;
5015 case 2:
5016 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5017 break;
5018 case 3:
5019 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5020 break;
5021 case 4:
5022 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5023 break;
5024 case 5:
5025 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5026 break;
9ee6e8bb
PB
5027 default: abort();
5028 }
ad69471c
PB
5029 dead_tmp(tmp2);
5030 dead_tmp(tmp);
9ee6e8bb
PB
5031 break;
5032 case 8: case 9: case 10: case 11: case 12: case 13:
5033 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5034 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
5035 break;
5036 case 14: /* Polynomial VMULL */
5037 cpu_abort(env, "Polynomial VMULL not implemented");
5038
5039 default: /* 15 is RESERVED. */
5040 return 1;
5041 }
5042 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
5043 /* Accumulate. */
5044 if (op == 10 || op == 11) {
ad69471c 5045 gen_neon_negl(cpu_V0, size);
9ee6e8bb
PB
5046 }
5047
9ee6e8bb 5048 if (op != 13) {
ad69471c 5049 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb
PB
5050 }
5051
5052 switch (op) {
5053 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
ad69471c 5054 gen_neon_addl(size);
9ee6e8bb
PB
5055 break;
5056 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c
PB
5057 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5058 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5059 break;
9ee6e8bb
PB
5060 /* Fall through. */
5061 case 13: /* VQDMULL */
ad69471c 5062 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5063 break;
5064 default:
5065 abort();
5066 }
ad69471c 5067 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5068 } else if (op == 4 || op == 6) {
5069 /* Narrowing operation. */
ad69471c 5070 tmp = new_tmp();
9ee6e8bb
PB
5071 if (u) {
5072 switch (size) {
ad69471c
PB
5073 case 0:
5074 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5075 break;
5076 case 1:
5077 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5078 break;
5079 case 2:
5080 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5081 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5082 break;
9ee6e8bb
PB
5083 default: abort();
5084 }
5085 } else {
5086 switch (size) {
ad69471c
PB
5087 case 0:
5088 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5089 break;
5090 case 1:
5091 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5092 break;
5093 case 2:
5094 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5095 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5096 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5097 break;
9ee6e8bb
PB
5098 default: abort();
5099 }
5100 }
ad69471c
PB
5101 if (pass == 0) {
5102 tmp3 = tmp;
5103 } else {
5104 neon_store_reg(rd, 0, tmp3);
5105 neon_store_reg(rd, 1, tmp);
5106 }
9ee6e8bb
PB
5107 } else {
5108 /* Write back the result. */
ad69471c 5109 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5110 }
5111 }
5112 } else {
5113 /* Two registers and a scalar. */
5114 switch (op) {
5115 case 0: /* Integer VMLA scalar */
5116 case 1: /* Float VMLA scalar */
5117 case 4: /* Integer VMLS scalar */
5118 case 5: /* Floating point VMLS scalar */
5119 case 8: /* Integer VMUL scalar */
5120 case 9: /* Floating point VMUL scalar */
5121 case 12: /* VQDMULH scalar */
5122 case 13: /* VQRDMULH scalar */
5123 gen_neon_get_scalar(size, rm);
8f8e3aa4 5124 gen_neon_movl_scratch_T0(0);
9ee6e8bb
PB
5125 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5126 if (pass != 0)
8f8e3aa4 5127 gen_neon_movl_T0_scratch(0);
9ee6e8bb
PB
5128 NEON_GET_REG(T1, rn, pass);
5129 if (op == 12) {
5130 if (size == 1) {
ad69471c 5131 gen_helper_neon_qdmulh_s16(CPU_T0E01);
9ee6e8bb 5132 } else {
ad69471c 5133 gen_helper_neon_qdmulh_s32(CPU_T0E01);
9ee6e8bb
PB
5134 }
5135 } else if (op == 13) {
5136 if (size == 1) {
ad69471c 5137 gen_helper_neon_qrdmulh_s16(CPU_T0E01);
9ee6e8bb 5138 } else {
ad69471c 5139 gen_helper_neon_qrdmulh_s32(CPU_T0E01);
9ee6e8bb
PB
5140 }
5141 } else if (op & 1) {
ad69471c 5142 gen_helper_neon_mul_f32(CPU_T001);
9ee6e8bb
PB
5143 } else {
5144 switch (size) {
ad69471c
PB
5145 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
5146 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
9ee6e8bb
PB
5147 case 2: gen_op_mul_T0_T1(); break;
5148 default: return 1;
5149 }
5150 }
5151 if (op < 8) {
5152 /* Accumulate. */
5153 NEON_GET_REG(T1, rd, pass);
5154 switch (op) {
5155 case 0:
5156 gen_neon_add(size);
5157 break;
5158 case 1:
ad69471c 5159 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb
PB
5160 break;
5161 case 4:
ad69471c 5162 gen_neon_rsb(size);
9ee6e8bb
PB
5163 break;
5164 case 5:
ad69471c 5165 gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
5166 break;
5167 default:
5168 abort();
5169 }
5170 }
5171 NEON_SET_REG(T0, rd, pass);
5172 }
5173 break;
5174 case 2: /* VMLAL sclar */
5175 case 3: /* VQDMLAL scalar */
5176 case 6: /* VMLSL scalar */
5177 case 7: /* VQDMLSL scalar */
5178 case 10: /* VMULL scalar */
5179 case 11: /* VQDMULL scalar */
ad69471c
PB
5180 if (size == 0 && (op == 3 || op == 7 || op == 11))
5181 return 1;
5182
9ee6e8bb 5183 gen_neon_get_scalar(size, rm);
ad69471c
PB
5184 NEON_GET_REG(T1, rn, 1);
5185
9ee6e8bb 5186 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5187 if (pass == 0) {
5188 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5189 } else {
ad69471c
PB
5190 tmp = new_tmp();
5191 tcg_gen_mov_i32(tmp, cpu_T[1]);
9ee6e8bb 5192 }
ad69471c
PB
5193 tmp2 = new_tmp();
5194 tcg_gen_mov_i32(tmp2, cpu_T[0]);
5195 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb 5196 if (op == 6 || op == 7) {
ad69471c
PB
5197 gen_neon_negl(cpu_V0, size);
5198 }
5199 if (op != 11) {
5200 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5201 }
9ee6e8bb
PB
5202 switch (op) {
5203 case 2: case 6:
ad69471c 5204 gen_neon_addl(size);
9ee6e8bb
PB
5205 break;
5206 case 3: case 7:
ad69471c
PB
5207 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5208 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5209 break;
5210 case 10:
5211 /* no-op */
5212 break;
5213 case 11:
ad69471c 5214 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5215 break;
5216 default:
5217 abort();
5218 }
ad69471c 5219 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5220 }
5221 break;
5222 default: /* 14 and 15 are RESERVED */
5223 return 1;
5224 }
5225 }
5226 } else { /* size == 3 */
5227 if (!u) {
5228 /* Extract. */
9ee6e8bb 5229 imm = (insn >> 8) & 0xf;
ad69471c
PB
5230 count = q + 1;
5231
5232 if (imm > 7 && !q)
5233 return 1;
5234
5235 if (imm == 0) {
5236 neon_load_reg64(cpu_V0, rn);
5237 if (q) {
5238 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5239 }
ad69471c
PB
5240 } else if (imm == 8) {
5241 neon_load_reg64(cpu_V0, rn + 1);
5242 if (q) {
5243 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5244 }
ad69471c 5245 } else if (q) {
a7812ae4 5246 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5247 if (imm < 8) {
5248 neon_load_reg64(cpu_V0, rn);
a7812ae4 5249 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5250 } else {
5251 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5252 neon_load_reg64(tmp64, rm);
ad69471c
PB
5253 }
5254 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5255 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5256 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5257 if (imm < 8) {
5258 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5259 } else {
ad69471c
PB
5260 neon_load_reg64(cpu_V1, rm + 1);
5261 imm -= 8;
9ee6e8bb 5262 }
ad69471c 5263 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5264 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5265 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
ad69471c 5266 } else {
a7812ae4 5267 /* BUGFIX */
ad69471c 5268 neon_load_reg64(cpu_V0, rn);
a7812ae4 5269 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5270 neon_load_reg64(cpu_V1, rm);
a7812ae4 5271 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5272 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5273 }
5274 neon_store_reg64(cpu_V0, rd);
5275 if (q) {
5276 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5277 }
5278 } else if ((insn & (1 << 11)) == 0) {
5279 /* Two register misc. */
5280 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5281 size = (insn >> 18) & 3;
5282 switch (op) {
5283 case 0: /* VREV64 */
5284 if (size == 3)
5285 return 1;
5286 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5287 NEON_GET_REG(T0, rm, pass * 2);
5288 NEON_GET_REG(T1, rm, pass * 2 + 1);
5289 switch (size) {
66896cb8 5290 case 0: tcg_gen_bswap32_i32(cpu_T[0], cpu_T[0]); break;
8f01245e 5291 case 1: gen_swap_half(cpu_T[0]); break;
9ee6e8bb
PB
5292 case 2: /* no-op */ break;
5293 default: abort();
5294 }
5295 NEON_SET_REG(T0, rd, pass * 2 + 1);
5296 if (size == 2) {
5297 NEON_SET_REG(T1, rd, pass * 2);
5298 } else {
5299 gen_op_movl_T0_T1();
5300 switch (size) {
66896cb8 5301 case 0: tcg_gen_bswap32_i32(cpu_T[0], cpu_T[0]); break;
8f01245e 5302 case 1: gen_swap_half(cpu_T[0]); break;
9ee6e8bb
PB
5303 default: abort();
5304 }
5305 NEON_SET_REG(T0, rd, pass * 2);
5306 }
5307 }
5308 break;
5309 case 4: case 5: /* VPADDL */
5310 case 12: case 13: /* VPADAL */
9ee6e8bb
PB
5311 if (size == 3)
5312 return 1;
ad69471c
PB
5313 for (pass = 0; pass < q + 1; pass++) {
5314 tmp = neon_load_reg(rm, pass * 2);
5315 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5316 tmp = neon_load_reg(rm, pass * 2 + 1);
5317 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5318 switch (size) {
5319 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5320 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5321 case 2: tcg_gen_add_i64(CPU_V001); break;
5322 default: abort();
5323 }
9ee6e8bb
PB
5324 if (op >= 12) {
5325 /* Accumulate. */
ad69471c
PB
5326 neon_load_reg64(cpu_V1, rd + pass);
5327 gen_neon_addl(size);
9ee6e8bb 5328 }
ad69471c 5329 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5330 }
5331 break;
5332 case 33: /* VTRN */
5333 if (size == 2) {
5334 for (n = 0; n < (q ? 4 : 2); n += 2) {
5335 NEON_GET_REG(T0, rm, n);
5336 NEON_GET_REG(T1, rd, n + 1);
5337 NEON_SET_REG(T1, rm, n);
5338 NEON_SET_REG(T0, rd, n + 1);
5339 }
5340 } else {
5341 goto elementwise;
5342 }
5343 break;
5344 case 34: /* VUZP */
5345 /* Reg Before After
5346 Rd A3 A2 A1 A0 B2 B0 A2 A0
5347 Rm B3 B2 B1 B0 B3 B1 A3 A1
5348 */
5349 if (size == 3)
5350 return 1;
5351 gen_neon_unzip(rd, q, 0, size);
5352 gen_neon_unzip(rm, q, 4, size);
5353 if (q) {
5354 static int unzip_order_q[8] =
5355 {0, 2, 4, 6, 1, 3, 5, 7};
5356 for (n = 0; n < 8; n++) {
5357 int reg = (n < 4) ? rd : rm;
5358 gen_neon_movl_T0_scratch(unzip_order_q[n]);
5359 NEON_SET_REG(T0, reg, n % 4);
5360 }
5361 } else {
5362 static int unzip_order[4] =
5363 {0, 4, 1, 5};
5364 for (n = 0; n < 4; n++) {
5365 int reg = (n < 2) ? rd : rm;
5366 gen_neon_movl_T0_scratch(unzip_order[n]);
5367 NEON_SET_REG(T0, reg, n % 2);
5368 }
5369 }
5370 break;
5371 case 35: /* VZIP */
5372 /* Reg Before After
5373 Rd A3 A2 A1 A0 B1 A1 B0 A0
5374 Rm B3 B2 B1 B0 B3 A3 B2 A2
5375 */
5376 if (size == 3)
5377 return 1;
5378 count = (q ? 4 : 2);
5379 for (n = 0; n < count; n++) {
5380 NEON_GET_REG(T0, rd, n);
5381 NEON_GET_REG(T1, rd, n);
5382 switch (size) {
19457615
FN
5383 case 0: gen_neon_zip_u8(cpu_T[0], cpu_T[1]); break;
5384 case 1: gen_neon_zip_u16(cpu_T[0], cpu_T[1]); break;
9ee6e8bb
PB
5385 case 2: /* no-op */; break;
5386 default: abort();
5387 }
5388 gen_neon_movl_scratch_T0(n * 2);
5389 gen_neon_movl_scratch_T1(n * 2 + 1);
5390 }
5391 for (n = 0; n < count * 2; n++) {
5392 int reg = (n < count) ? rd : rm;
5393 gen_neon_movl_T0_scratch(n);
5394 NEON_SET_REG(T0, reg, n % count);
5395 }
5396 break;
5397 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
ad69471c
PB
5398 if (size == 3)
5399 return 1;
a50f5b91 5400 TCGV_UNUSED(tmp2);
9ee6e8bb 5401 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5402 neon_load_reg64(cpu_V0, rm + pass);
5403 tmp = new_tmp();
9ee6e8bb 5404 if (op == 36 && q == 0) {
ad69471c 5405 gen_neon_narrow(size, tmp, cpu_V0);
9ee6e8bb 5406 } else if (q) {
ad69471c 5407 gen_neon_narrow_satu(size, tmp, cpu_V0);
9ee6e8bb 5408 } else {
ad69471c
PB
5409 gen_neon_narrow_sats(size, tmp, cpu_V0);
5410 }
5411 if (pass == 0) {
5412 tmp2 = tmp;
5413 } else {
5414 neon_store_reg(rd, 0, tmp2);
5415 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5416 }
9ee6e8bb
PB
5417 }
5418 break;
5419 case 38: /* VSHLL */
ad69471c 5420 if (q || size == 3)
9ee6e8bb 5421 return 1;
ad69471c
PB
5422 tmp = neon_load_reg(rm, 0);
5423 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5424 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5425 if (pass == 1)
5426 tmp = tmp2;
5427 gen_neon_widen(cpu_V0, tmp, size, 1);
5428 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5429 }
5430 break;
5431 default:
5432 elementwise:
5433 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5434 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5435 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5436 neon_reg_offset(rm, pass));
9ee6e8bb
PB
5437 } else {
5438 NEON_GET_REG(T0, rm, pass);
5439 }
5440 switch (op) {
5441 case 1: /* VREV32 */
5442 switch (size) {
66896cb8 5443 case 0: tcg_gen_bswap32_i32(cpu_T[0], cpu_T[0]); break;
8f01245e 5444 case 1: gen_swap_half(cpu_T[0]); break;
9ee6e8bb
PB
5445 default: return 1;
5446 }
5447 break;
5448 case 2: /* VREV16 */
5449 if (size != 0)
5450 return 1;
3670669c 5451 gen_rev16(cpu_T[0]);
9ee6e8bb 5452 break;
9ee6e8bb
PB
5453 case 8: /* CLS */
5454 switch (size) {
ad69471c
PB
5455 case 0: gen_helper_neon_cls_s8(cpu_T[0], cpu_T[0]); break;
5456 case 1: gen_helper_neon_cls_s16(cpu_T[0], cpu_T[0]); break;
5457 case 2: gen_helper_neon_cls_s32(cpu_T[0], cpu_T[0]); break;
9ee6e8bb
PB
5458 default: return 1;
5459 }
5460 break;
5461 case 9: /* CLZ */
5462 switch (size) {
ad69471c
PB
5463 case 0: gen_helper_neon_clz_u8(cpu_T[0], cpu_T[0]); break;
5464 case 1: gen_helper_neon_clz_u16(cpu_T[0], cpu_T[0]); break;
1497c961 5465 case 2: gen_helper_clz(cpu_T[0], cpu_T[0]); break;
9ee6e8bb
PB
5466 default: return 1;
5467 }
5468 break;
5469 case 10: /* CNT */
5470 if (size != 0)
5471 return 1;
ad69471c 5472 gen_helper_neon_cnt_u8(cpu_T[0], cpu_T[0]);
9ee6e8bb
PB
5473 break;
5474 case 11: /* VNOT */
5475 if (size != 0)
5476 return 1;
5477 gen_op_notl_T0();
5478 break;
5479 case 14: /* VQABS */
5480 switch (size) {
ad69471c
PB
5481 case 0: gen_helper_neon_qabs_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
5482 case 1: gen_helper_neon_qabs_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
5483 case 2: gen_helper_neon_qabs_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
9ee6e8bb
PB
5484 default: return 1;
5485 }
5486 break;
5487 case 15: /* VQNEG */
5488 switch (size) {
ad69471c
PB
5489 case 0: gen_helper_neon_qneg_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
5490 case 1: gen_helper_neon_qneg_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
5491 case 2: gen_helper_neon_qneg_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
9ee6e8bb
PB
5492 default: return 1;
5493 }
5494 break;
5495 case 16: case 19: /* VCGT #0, VCLE #0 */
5496 gen_op_movl_T1_im(0);
5497 switch(size) {
ad69471c
PB
5498 case 0: gen_helper_neon_cgt_s8(CPU_T001); break;
5499 case 1: gen_helper_neon_cgt_s16(CPU_T001); break;
5500 case 2: gen_helper_neon_cgt_s32(CPU_T001); break;
9ee6e8bb
PB
5501 default: return 1;
5502 }
5503 if (op == 19)
5504 gen_op_notl_T0();
5505 break;
5506 case 17: case 20: /* VCGE #0, VCLT #0 */
5507 gen_op_movl_T1_im(0);
5508 switch(size) {
ad69471c
PB
5509 case 0: gen_helper_neon_cge_s8(CPU_T001); break;
5510 case 1: gen_helper_neon_cge_s16(CPU_T001); break;
5511 case 2: gen_helper_neon_cge_s32(CPU_T001); break;
9ee6e8bb
PB
5512 default: return 1;
5513 }
5514 if (op == 20)
5515 gen_op_notl_T0();
5516 break;
5517 case 18: /* VCEQ #0 */
5518 gen_op_movl_T1_im(0);
5519 switch(size) {
ad69471c
PB
5520 case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
5521 case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
5522 case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
9ee6e8bb
PB
5523 default: return 1;
5524 }
5525 break;
5526 case 22: /* VABS */
5527 switch(size) {
ad69471c
PB
5528 case 0: gen_helper_neon_abs_s8(cpu_T[0], cpu_T[0]); break;
5529 case 1: gen_helper_neon_abs_s16(cpu_T[0], cpu_T[0]); break;
5530 case 2: tcg_gen_abs_i32(cpu_T[0], cpu_T[0]); break;
9ee6e8bb
PB
5531 default: return 1;
5532 }
5533 break;
5534 case 23: /* VNEG */
5535 gen_op_movl_T1_im(0);
ad69471c
PB
5536 if (size == 3)
5537 return 1;
5538 gen_neon_rsb(size);
9ee6e8bb
PB
5539 break;
5540 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
5541 gen_op_movl_T1_im(0);
ad69471c 5542 gen_helper_neon_cgt_f32(CPU_T001);
9ee6e8bb
PB
5543 if (op == 27)
5544 gen_op_notl_T0();
5545 break;
5546 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
5547 gen_op_movl_T1_im(0);
ad69471c 5548 gen_helper_neon_cge_f32(CPU_T001);
9ee6e8bb
PB
5549 if (op == 28)
5550 gen_op_notl_T0();
5551 break;
5552 case 26: /* Float VCEQ #0 */
5553 gen_op_movl_T1_im(0);
ad69471c 5554 gen_helper_neon_ceq_f32(CPU_T001);
9ee6e8bb
PB
5555 break;
5556 case 30: /* Float VABS */
4373f3ce 5557 gen_vfp_abs(0);
9ee6e8bb
PB
5558 break;
5559 case 31: /* Float VNEG */
4373f3ce 5560 gen_vfp_neg(0);
9ee6e8bb
PB
5561 break;
5562 case 32: /* VSWP */
5563 NEON_GET_REG(T1, rd, pass);
5564 NEON_SET_REG(T1, rm, pass);
5565 break;
5566 case 33: /* VTRN */
5567 NEON_GET_REG(T1, rd, pass);
5568 switch (size) {
19457615
FN
5569 case 0: gen_neon_trn_u8(cpu_T[0], cpu_T[1]); break;
5570 case 1: gen_neon_trn_u16(cpu_T[0], cpu_T[1]); break;
9ee6e8bb
PB
5571 case 2: abort();
5572 default: return 1;
5573 }
5574 NEON_SET_REG(T1, rm, pass);
5575 break;
5576 case 56: /* Integer VRECPE */
4373f3ce 5577 gen_helper_recpe_u32(cpu_T[0], cpu_T[0], cpu_env);
9ee6e8bb
PB
5578 break;
5579 case 57: /* Integer VRSQRTE */
4373f3ce 5580 gen_helper_rsqrte_u32(cpu_T[0], cpu_T[0], cpu_env);
9ee6e8bb
PB
5581 break;
5582 case 58: /* Float VRECPE */
4373f3ce 5583 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5584 break;
5585 case 59: /* Float VRSQRTE */
4373f3ce 5586 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5587 break;
5588 case 60: /* VCVT.F32.S32 */
4373f3ce 5589 gen_vfp_tosiz(0);
9ee6e8bb
PB
5590 break;
5591 case 61: /* VCVT.F32.U32 */
4373f3ce 5592 gen_vfp_touiz(0);
9ee6e8bb
PB
5593 break;
5594 case 62: /* VCVT.S32.F32 */
4373f3ce 5595 gen_vfp_sito(0);
9ee6e8bb
PB
5596 break;
5597 case 63: /* VCVT.U32.F32 */
4373f3ce 5598 gen_vfp_uito(0);
9ee6e8bb
PB
5599 break;
5600 default:
5601 /* Reserved: 21, 29, 39-56 */
5602 return 1;
5603 }
5604 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5605 tcg_gen_st_f32(cpu_F0s, cpu_env,
5606 neon_reg_offset(rd, pass));
9ee6e8bb
PB
5607 } else {
5608 NEON_SET_REG(T0, rd, pass);
5609 }
5610 }
5611 break;
5612 }
5613 } else if ((insn & (1 << 10)) == 0) {
5614 /* VTBL, VTBX. */
3018f259 5615 n = ((insn >> 5) & 0x18) + 8;
9ee6e8bb 5616 if (insn & (1 << 6)) {
8f8e3aa4 5617 tmp = neon_load_reg(rd, 0);
9ee6e8bb 5618 } else {
8f8e3aa4
PB
5619 tmp = new_tmp();
5620 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5621 }
8f8e3aa4
PB
5622 tmp2 = neon_load_reg(rm, 0);
5623 gen_helper_neon_tbl(tmp2, tmp2, tmp, tcg_const_i32(rn),
5624 tcg_const_i32(n));
3018f259 5625 dead_tmp(tmp);
9ee6e8bb 5626 if (insn & (1 << 6)) {
8f8e3aa4 5627 tmp = neon_load_reg(rd, 1);
9ee6e8bb 5628 } else {
8f8e3aa4
PB
5629 tmp = new_tmp();
5630 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5631 }
8f8e3aa4
PB
5632 tmp3 = neon_load_reg(rm, 1);
5633 gen_helper_neon_tbl(tmp3, tmp3, tmp, tcg_const_i32(rn),
5634 tcg_const_i32(n));
5635 neon_store_reg(rd, 0, tmp2);
3018f259
PB
5636 neon_store_reg(rd, 1, tmp3);
5637 dead_tmp(tmp);
9ee6e8bb
PB
5638 } else if ((insn & 0x380) == 0) {
5639 /* VDUP */
5640 if (insn & (1 << 19)) {
5641 NEON_SET_REG(T0, rm, 1);
5642 } else {
5643 NEON_SET_REG(T0, rm, 0);
5644 }
5645 if (insn & (1 << 16)) {
ad69471c 5646 gen_neon_dup_u8(cpu_T[0], ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
5647 } else if (insn & (1 << 17)) {
5648 if ((insn >> 18) & 1)
ad69471c 5649 gen_neon_dup_high16(cpu_T[0]);
9ee6e8bb 5650 else
ad69471c 5651 gen_neon_dup_low16(cpu_T[0]);
9ee6e8bb
PB
5652 }
5653 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5654 NEON_SET_REG(T0, rd, pass);
5655 }
5656 } else {
5657 return 1;
5658 }
5659 }
5660 }
5661 return 0;
5662}
5663
fe1479c3
PB
5664static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5665{
5666 int crn = (insn >> 16) & 0xf;
5667 int crm = insn & 0xf;
5668 int op1 = (insn >> 21) & 7;
5669 int op2 = (insn >> 5) & 7;
5670 int rt = (insn >> 12) & 0xf;
5671 TCGv tmp;
5672
5673 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5674 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5675 /* TEECR */
5676 if (IS_USER(s))
5677 return 1;
5678 tmp = load_cpu_field(teecr);
5679 store_reg(s, rt, tmp);
5680 return 0;
5681 }
5682 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5683 /* TEEHBR */
5684 if (IS_USER(s) && (env->teecr & 1))
5685 return 1;
5686 tmp = load_cpu_field(teehbr);
5687 store_reg(s, rt, tmp);
5688 return 0;
5689 }
5690 }
5691 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5692 op1, crn, crm, op2);
5693 return 1;
5694}
5695
5696static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5697{
5698 int crn = (insn >> 16) & 0xf;
5699 int crm = insn & 0xf;
5700 int op1 = (insn >> 21) & 7;
5701 int op2 = (insn >> 5) & 7;
5702 int rt = (insn >> 12) & 0xf;
5703 TCGv tmp;
5704
5705 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5706 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5707 /* TEECR */
5708 if (IS_USER(s))
5709 return 1;
5710 tmp = load_reg(s, rt);
5711 gen_helper_set_teecr(cpu_env, tmp);
5712 dead_tmp(tmp);
5713 return 0;
5714 }
5715 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5716 /* TEEHBR */
5717 if (IS_USER(s) && (env->teecr & 1))
5718 return 1;
5719 tmp = load_reg(s, rt);
5720 store_cpu_field(tmp, teehbr);
5721 return 0;
5722 }
5723 }
5724 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5725 op1, crn, crm, op2);
5726 return 1;
5727}
5728
9ee6e8bb
PB
5729static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5730{
5731 int cpnum;
5732
5733 cpnum = (insn >> 8) & 0xf;
5734 if (arm_feature(env, ARM_FEATURE_XSCALE)
5735 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5736 return 1;
5737
5738 switch (cpnum) {
5739 case 0:
5740 case 1:
5741 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5742 return disas_iwmmxt_insn(env, s, insn);
5743 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5744 return disas_dsp_insn(env, s, insn);
5745 }
5746 return 1;
5747 case 10:
5748 case 11:
5749 return disas_vfp_insn (env, s, insn);
fe1479c3
PB
5750 case 14:
5751 /* Coprocessors 7-15 are architecturally reserved by ARM.
5752 Unfortunately Intel decided to ignore this. */
5753 if (arm_feature(env, ARM_FEATURE_XSCALE))
5754 goto board;
5755 if (insn & (1 << 20))
5756 return disas_cp14_read(env, s, insn);
5757 else
5758 return disas_cp14_write(env, s, insn);
9ee6e8bb
PB
5759 case 15:
5760 return disas_cp15_insn (env, s, insn);
5761 default:
fe1479c3 5762 board:
9ee6e8bb
PB
5763 /* Unknown coprocessor. See if the board has hooked it. */
5764 return disas_cp_insn (env, s, insn);
5765 }
5766}
5767
5e3f878a
PB
5768
5769/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 5770static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
5771{
5772 TCGv tmp;
5773 tmp = new_tmp();
5774 tcg_gen_trunc_i64_i32(tmp, val);
5775 store_reg(s, rlow, tmp);
5776 tmp = new_tmp();
5777 tcg_gen_shri_i64(val, val, 32);
5778 tcg_gen_trunc_i64_i32(tmp, val);
5779 store_reg(s, rhigh, tmp);
5780}
5781
5782/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 5783static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 5784{
a7812ae4 5785 TCGv_i64 tmp;
5e3f878a
PB
5786 TCGv tmp2;
5787
36aa55dc 5788 /* Load value and extend to 64 bits. */
a7812ae4 5789 tmp = tcg_temp_new_i64();
5e3f878a
PB
5790 tmp2 = load_reg(s, rlow);
5791 tcg_gen_extu_i32_i64(tmp, tmp2);
5792 dead_tmp(tmp2);
5793 tcg_gen_add_i64(val, val, tmp);
5794}
5795
5796/* load and add a 64-bit value from a register pair. */
a7812ae4 5797static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 5798{
a7812ae4 5799 TCGv_i64 tmp;
36aa55dc
PB
5800 TCGv tmpl;
5801 TCGv tmph;
5e3f878a
PB
5802
5803 /* Load 64-bit value rd:rn. */
36aa55dc
PB
5804 tmpl = load_reg(s, rlow);
5805 tmph = load_reg(s, rhigh);
a7812ae4 5806 tmp = tcg_temp_new_i64();
36aa55dc
PB
5807 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5808 dead_tmp(tmpl);
5809 dead_tmp(tmph);
5e3f878a
PB
5810 tcg_gen_add_i64(val, val, tmp);
5811}
5812
5813/* Set N and Z flags from a 64-bit value. */
a7812ae4 5814static void gen_logicq_cc(TCGv_i64 val)
5e3f878a
PB
5815{
5816 TCGv tmp = new_tmp();
5817 gen_helper_logicq_cc(tmp, val);
6fbe23d5
PB
5818 gen_logic_CC(tmp);
5819 dead_tmp(tmp);
5e3f878a
PB
5820}
5821
9ee6e8bb
PB
5822static void disas_arm_insn(CPUState * env, DisasContext *s)
5823{
5824 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 5825 TCGv tmp;
3670669c 5826 TCGv tmp2;
6ddbc6e4 5827 TCGv tmp3;
b0109805 5828 TCGv addr;
a7812ae4 5829 TCGv_i64 tmp64;
9ee6e8bb
PB
5830
5831 insn = ldl_code(s->pc);
5832 s->pc += 4;
5833
5834 /* M variants do not implement ARM mode. */
5835 if (IS_M(env))
5836 goto illegal_op;
5837 cond = insn >> 28;
5838 if (cond == 0xf){
5839 /* Unconditional instructions. */
5840 if (((insn >> 25) & 7) == 1) {
5841 /* NEON Data processing. */
5842 if (!arm_feature(env, ARM_FEATURE_NEON))
5843 goto illegal_op;
5844
5845 if (disas_neon_data_insn(env, s, insn))
5846 goto illegal_op;
5847 return;
5848 }
5849 if ((insn & 0x0f100000) == 0x04000000) {
5850 /* NEON load/store. */
5851 if (!arm_feature(env, ARM_FEATURE_NEON))
5852 goto illegal_op;
5853
5854 if (disas_neon_ls_insn(env, s, insn))
5855 goto illegal_op;
5856 return;
5857 }
5858 if ((insn & 0x0d70f000) == 0x0550f000)
5859 return; /* PLD */
5860 else if ((insn & 0x0ffffdff) == 0x01010000) {
5861 ARCH(6);
5862 /* setend */
5863 if (insn & (1 << 9)) {
5864 /* BE8 mode not implemented. */
5865 goto illegal_op;
5866 }
5867 return;
5868 } else if ((insn & 0x0fffff00) == 0x057ff000) {
5869 switch ((insn >> 4) & 0xf) {
5870 case 1: /* clrex */
5871 ARCH(6K);
8f8e3aa4 5872 gen_helper_clrex(cpu_env);
9ee6e8bb
PB
5873 return;
5874 case 4: /* dsb */
5875 case 5: /* dmb */
5876 case 6: /* isb */
5877 ARCH(7);
5878 /* We don't emulate caches so these are a no-op. */
5879 return;
5880 default:
5881 goto illegal_op;
5882 }
5883 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
5884 /* srs */
c67b6b71 5885 int32_t offset;
9ee6e8bb
PB
5886 if (IS_USER(s))
5887 goto illegal_op;
5888 ARCH(6);
5889 op1 = (insn & 0x1f);
5890 if (op1 == (env->uncached_cpsr & CPSR_M)) {
b0109805 5891 addr = load_reg(s, 13);
9ee6e8bb 5892 } else {
b0109805
PB
5893 addr = new_tmp();
5894 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op1));
9ee6e8bb
PB
5895 }
5896 i = (insn >> 23) & 3;
5897 switch (i) {
5898 case 0: offset = -4; break; /* DA */
c67b6b71
FN
5899 case 1: offset = 0; break; /* IA */
5900 case 2: offset = -8; break; /* DB */
9ee6e8bb
PB
5901 case 3: offset = 4; break; /* IB */
5902 default: abort();
5903 }
5904 if (offset)
b0109805
PB
5905 tcg_gen_addi_i32(addr, addr, offset);
5906 tmp = load_reg(s, 14);
5907 gen_st32(tmp, addr, 0);
c67b6b71 5908 tmp = load_cpu_field(spsr);
b0109805
PB
5909 tcg_gen_addi_i32(addr, addr, 4);
5910 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
5911 if (insn & (1 << 21)) {
5912 /* Base writeback. */
5913 switch (i) {
5914 case 0: offset = -8; break;
c67b6b71
FN
5915 case 1: offset = 4; break;
5916 case 2: offset = -4; break;
9ee6e8bb
PB
5917 case 3: offset = 0; break;
5918 default: abort();
5919 }
5920 if (offset)
c67b6b71 5921 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb 5922 if (op1 == (env->uncached_cpsr & CPSR_M)) {
c67b6b71 5923 store_reg(s, 13, addr);
9ee6e8bb 5924 } else {
c67b6b71
FN
5925 gen_helper_set_r13_banked(cpu_env, tcg_const_i32(op1), addr);
5926 dead_tmp(addr);
9ee6e8bb 5927 }
b0109805
PB
5928 } else {
5929 dead_tmp(addr);
9ee6e8bb
PB
5930 }
5931 } else if ((insn & 0x0e5fffe0) == 0x081d0a00) {
5932 /* rfe */
c67b6b71 5933 int32_t offset;
9ee6e8bb
PB
5934 if (IS_USER(s))
5935 goto illegal_op;
5936 ARCH(6);
5937 rn = (insn >> 16) & 0xf;
b0109805 5938 addr = load_reg(s, rn);
9ee6e8bb
PB
5939 i = (insn >> 23) & 3;
5940 switch (i) {
b0109805 5941 case 0: offset = -4; break; /* DA */
c67b6b71
FN
5942 case 1: offset = 0; break; /* IA */
5943 case 2: offset = -8; break; /* DB */
b0109805 5944 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
5945 default: abort();
5946 }
5947 if (offset)
b0109805
PB
5948 tcg_gen_addi_i32(addr, addr, offset);
5949 /* Load PC into tmp and CPSR into tmp2. */
5950 tmp = gen_ld32(addr, 0);
5951 tcg_gen_addi_i32(addr, addr, 4);
5952 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
5953 if (insn & (1 << 21)) {
5954 /* Base writeback. */
5955 switch (i) {
b0109805 5956 case 0: offset = -8; break;
c67b6b71
FN
5957 case 1: offset = 4; break;
5958 case 2: offset = -4; break;
b0109805 5959 case 3: offset = 0; break;
9ee6e8bb
PB
5960 default: abort();
5961 }
5962 if (offset)
b0109805
PB
5963 tcg_gen_addi_i32(addr, addr, offset);
5964 store_reg(s, rn, addr);
5965 } else {
5966 dead_tmp(addr);
9ee6e8bb 5967 }
b0109805 5968 gen_rfe(s, tmp, tmp2);
c67b6b71 5969 return;
9ee6e8bb
PB
5970 } else if ((insn & 0x0e000000) == 0x0a000000) {
5971 /* branch link and change to thumb (blx <offset>) */
5972 int32_t offset;
5973
5974 val = (uint32_t)s->pc;
d9ba4830
PB
5975 tmp = new_tmp();
5976 tcg_gen_movi_i32(tmp, val);
5977 store_reg(s, 14, tmp);
9ee6e8bb
PB
5978 /* Sign-extend the 24-bit offset */
5979 offset = (((int32_t)insn) << 8) >> 8;
5980 /* offset * 4 + bit24 * 2 + (thumb bit) */
5981 val += (offset << 2) | ((insn >> 23) & 2) | 1;
5982 /* pipeline offset */
5983 val += 4;
d9ba4830 5984 gen_bx_im(s, val);
9ee6e8bb
PB
5985 return;
5986 } else if ((insn & 0x0e000f00) == 0x0c000100) {
5987 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5988 /* iWMMXt register transfer. */
5989 if (env->cp15.c15_cpar & (1 << 1))
5990 if (!disas_iwmmxt_insn(env, s, insn))
5991 return;
5992 }
5993 } else if ((insn & 0x0fe00000) == 0x0c400000) {
5994 /* Coprocessor double register transfer. */
5995 } else if ((insn & 0x0f000010) == 0x0e000010) {
5996 /* Additional coprocessor register transfer. */
7997d92f 5997 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
5998 uint32_t mask;
5999 uint32_t val;
6000 /* cps (privileged) */
6001 if (IS_USER(s))
6002 return;
6003 mask = val = 0;
6004 if (insn & (1 << 19)) {
6005 if (insn & (1 << 8))
6006 mask |= CPSR_A;
6007 if (insn & (1 << 7))
6008 mask |= CPSR_I;
6009 if (insn & (1 << 6))
6010 mask |= CPSR_F;
6011 if (insn & (1 << 18))
6012 val |= mask;
6013 }
7997d92f 6014 if (insn & (1 << 17)) {
9ee6e8bb
PB
6015 mask |= CPSR_M;
6016 val |= (insn & 0x1f);
6017 }
6018 if (mask) {
2fbac54b 6019 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
6020 }
6021 return;
6022 }
6023 goto illegal_op;
6024 }
6025 if (cond != 0xe) {
6026 /* if not always execute, we generate a conditional jump to
6027 next instruction */
6028 s->condlabel = gen_new_label();
d9ba4830 6029 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
6030 s->condjmp = 1;
6031 }
6032 if ((insn & 0x0f900000) == 0x03000000) {
6033 if ((insn & (1 << 21)) == 0) {
6034 ARCH(6T2);
6035 rd = (insn >> 12) & 0xf;
6036 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6037 if ((insn & (1 << 22)) == 0) {
6038 /* MOVW */
5e3f878a
PB
6039 tmp = new_tmp();
6040 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
6041 } else {
6042 /* MOVT */
5e3f878a 6043 tmp = load_reg(s, rd);
86831435 6044 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 6045 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 6046 }
5e3f878a 6047 store_reg(s, rd, tmp);
9ee6e8bb
PB
6048 } else {
6049 if (((insn >> 12) & 0xf) != 0xf)
6050 goto illegal_op;
6051 if (((insn >> 16) & 0xf) == 0) {
6052 gen_nop_hint(s, insn & 0xff);
6053 } else {
6054 /* CPSR = immediate */
6055 val = insn & 0xff;
6056 shift = ((insn >> 8) & 0xf) * 2;
6057 if (shift)
6058 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 6059 i = ((insn & (1 << 22)) != 0);
2fbac54b 6060 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
6061 goto illegal_op;
6062 }
6063 }
6064 } else if ((insn & 0x0f900000) == 0x01000000
6065 && (insn & 0x00000090) != 0x00000090) {
6066 /* miscellaneous instructions */
6067 op1 = (insn >> 21) & 3;
6068 sh = (insn >> 4) & 0xf;
6069 rm = insn & 0xf;
6070 switch (sh) {
6071 case 0x0: /* move program status register */
6072 if (op1 & 1) {
6073 /* PSR = reg */
2fbac54b 6074 tmp = load_reg(s, rm);
9ee6e8bb 6075 i = ((op1 & 2) != 0);
2fbac54b 6076 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
6077 goto illegal_op;
6078 } else {
6079 /* reg = PSR */
6080 rd = (insn >> 12) & 0xf;
6081 if (op1 & 2) {
6082 if (IS_USER(s))
6083 goto illegal_op;
d9ba4830 6084 tmp = load_cpu_field(spsr);
9ee6e8bb 6085 } else {
d9ba4830
PB
6086 tmp = new_tmp();
6087 gen_helper_cpsr_read(tmp);
9ee6e8bb 6088 }
d9ba4830 6089 store_reg(s, rd, tmp);
9ee6e8bb
PB
6090 }
6091 break;
6092 case 0x1:
6093 if (op1 == 1) {
6094 /* branch/exchange thumb (bx). */
d9ba4830
PB
6095 tmp = load_reg(s, rm);
6096 gen_bx(s, tmp);
9ee6e8bb
PB
6097 } else if (op1 == 3) {
6098 /* clz */
6099 rd = (insn >> 12) & 0xf;
1497c961
PB
6100 tmp = load_reg(s, rm);
6101 gen_helper_clz(tmp, tmp);
6102 store_reg(s, rd, tmp);
9ee6e8bb
PB
6103 } else {
6104 goto illegal_op;
6105 }
6106 break;
6107 case 0x2:
6108 if (op1 == 1) {
6109 ARCH(5J); /* bxj */
6110 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6111 tmp = load_reg(s, rm);
6112 gen_bx(s, tmp);
9ee6e8bb
PB
6113 } else {
6114 goto illegal_op;
6115 }
6116 break;
6117 case 0x3:
6118 if (op1 != 1)
6119 goto illegal_op;
6120
6121 /* branch link/exchange thumb (blx) */
d9ba4830
PB
6122 tmp = load_reg(s, rm);
6123 tmp2 = new_tmp();
6124 tcg_gen_movi_i32(tmp2, s->pc);
6125 store_reg(s, 14, tmp2);
6126 gen_bx(s, tmp);
9ee6e8bb
PB
6127 break;
6128 case 0x5: /* saturating add/subtract */
6129 rd = (insn >> 12) & 0xf;
6130 rn = (insn >> 16) & 0xf;
b40d0353 6131 tmp = load_reg(s, rm);
5e3f878a 6132 tmp2 = load_reg(s, rn);
9ee6e8bb 6133 if (op1 & 2)
5e3f878a 6134 gen_helper_double_saturate(tmp2, tmp2);
9ee6e8bb 6135 if (op1 & 1)
5e3f878a 6136 gen_helper_sub_saturate(tmp, tmp, tmp2);
9ee6e8bb 6137 else
5e3f878a
PB
6138 gen_helper_add_saturate(tmp, tmp, tmp2);
6139 dead_tmp(tmp2);
6140 store_reg(s, rd, tmp);
9ee6e8bb
PB
6141 break;
6142 case 7: /* bkpt */
6143 gen_set_condexec(s);
5e3f878a 6144 gen_set_pc_im(s->pc - 4);
d9ba4830 6145 gen_exception(EXCP_BKPT);
9ee6e8bb
PB
6146 s->is_jmp = DISAS_JUMP;
6147 break;
6148 case 0x8: /* signed multiply */
6149 case 0xa:
6150 case 0xc:
6151 case 0xe:
6152 rs = (insn >> 8) & 0xf;
6153 rn = (insn >> 12) & 0xf;
6154 rd = (insn >> 16) & 0xf;
6155 if (op1 == 1) {
6156 /* (32 * 16) >> 16 */
5e3f878a
PB
6157 tmp = load_reg(s, rm);
6158 tmp2 = load_reg(s, rs);
9ee6e8bb 6159 if (sh & 4)
5e3f878a 6160 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 6161 else
5e3f878a 6162 gen_sxth(tmp2);
a7812ae4
PB
6163 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6164 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 6165 tmp = new_tmp();
a7812ae4 6166 tcg_gen_trunc_i64_i32(tmp, tmp64);
9ee6e8bb 6167 if ((sh & 2) == 0) {
5e3f878a
PB
6168 tmp2 = load_reg(s, rn);
6169 gen_helper_add_setq(tmp, tmp, tmp2);
6170 dead_tmp(tmp2);
9ee6e8bb 6171 }
5e3f878a 6172 store_reg(s, rd, tmp);
9ee6e8bb
PB
6173 } else {
6174 /* 16 * 16 */
5e3f878a
PB
6175 tmp = load_reg(s, rm);
6176 tmp2 = load_reg(s, rs);
6177 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6178 dead_tmp(tmp2);
9ee6e8bb 6179 if (op1 == 2) {
a7812ae4
PB
6180 tmp64 = tcg_temp_new_i64();
6181 tcg_gen_ext_i32_i64(tmp64, tmp);
22478e79 6182 dead_tmp(tmp);
a7812ae4
PB
6183 gen_addq(s, tmp64, rn, rd);
6184 gen_storeq_reg(s, rn, rd, tmp64);
9ee6e8bb
PB
6185 } else {
6186 if (op1 == 0) {
5e3f878a
PB
6187 tmp2 = load_reg(s, rn);
6188 gen_helper_add_setq(tmp, tmp, tmp2);
6189 dead_tmp(tmp2);
9ee6e8bb 6190 }
5e3f878a 6191 store_reg(s, rd, tmp);
9ee6e8bb
PB
6192 }
6193 }
6194 break;
6195 default:
6196 goto illegal_op;
6197 }
6198 } else if (((insn & 0x0e000000) == 0 &&
6199 (insn & 0x00000090) != 0x90) ||
6200 ((insn & 0x0e000000) == (1 << 25))) {
6201 int set_cc, logic_cc, shiftop;
6202
6203 op1 = (insn >> 21) & 0xf;
6204 set_cc = (insn >> 20) & 1;
6205 logic_cc = table_logic_cc[op1] & set_cc;
6206
6207 /* data processing instruction */
6208 if (insn & (1 << 25)) {
6209 /* immediate operand */
6210 val = insn & 0xff;
6211 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 6212 if (shift) {
9ee6e8bb 6213 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9
JR
6214 }
6215 tmp2 = new_tmp();
6216 tcg_gen_movi_i32(tmp2, val);
6217 if (logic_cc && shift) {
6218 gen_set_CF_bit31(tmp2);
6219 }
9ee6e8bb
PB
6220 } else {
6221 /* register */
6222 rm = (insn) & 0xf;
e9bb4aa9 6223 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6224 shiftop = (insn >> 5) & 3;
6225 if (!(insn & (1 << 4))) {
6226 shift = (insn >> 7) & 0x1f;
e9bb4aa9 6227 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
6228 } else {
6229 rs = (insn >> 8) & 0xf;
8984bd2e 6230 tmp = load_reg(s, rs);
e9bb4aa9 6231 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
6232 }
6233 }
6234 if (op1 != 0x0f && op1 != 0x0d) {
6235 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
6236 tmp = load_reg(s, rn);
6237 } else {
6238 TCGV_UNUSED(tmp);
9ee6e8bb
PB
6239 }
6240 rd = (insn >> 12) & 0xf;
6241 switch(op1) {
6242 case 0x00:
e9bb4aa9
JR
6243 tcg_gen_and_i32(tmp, tmp, tmp2);
6244 if (logic_cc) {
6245 gen_logic_CC(tmp);
6246 }
21aeb343 6247 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6248 break;
6249 case 0x01:
e9bb4aa9
JR
6250 tcg_gen_xor_i32(tmp, tmp, tmp2);
6251 if (logic_cc) {
6252 gen_logic_CC(tmp);
6253 }
21aeb343 6254 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6255 break;
6256 case 0x02:
6257 if (set_cc && rd == 15) {
6258 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 6259 if (IS_USER(s)) {
9ee6e8bb 6260 goto illegal_op;
e9bb4aa9
JR
6261 }
6262 gen_helper_sub_cc(tmp, tmp, tmp2);
6263 gen_exception_return(s, tmp);
9ee6e8bb 6264 } else {
e9bb4aa9
JR
6265 if (set_cc) {
6266 gen_helper_sub_cc(tmp, tmp, tmp2);
6267 } else {
6268 tcg_gen_sub_i32(tmp, tmp, tmp2);
6269 }
21aeb343 6270 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6271 }
6272 break;
6273 case 0x03:
e9bb4aa9
JR
6274 if (set_cc) {
6275 gen_helper_sub_cc(tmp, tmp2, tmp);
6276 } else {
6277 tcg_gen_sub_i32(tmp, tmp2, tmp);
6278 }
21aeb343 6279 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6280 break;
6281 case 0x04:
e9bb4aa9
JR
6282 if (set_cc) {
6283 gen_helper_add_cc(tmp, tmp, tmp2);
6284 } else {
6285 tcg_gen_add_i32(tmp, tmp, tmp2);
6286 }
21aeb343 6287 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6288 break;
6289 case 0x05:
e9bb4aa9
JR
6290 if (set_cc) {
6291 gen_helper_adc_cc(tmp, tmp, tmp2);
6292 } else {
6293 gen_add_carry(tmp, tmp, tmp2);
6294 }
21aeb343 6295 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6296 break;
6297 case 0x06:
e9bb4aa9
JR
6298 if (set_cc) {
6299 gen_helper_sbc_cc(tmp, tmp, tmp2);
6300 } else {
6301 gen_sub_carry(tmp, tmp, tmp2);
6302 }
21aeb343 6303 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6304 break;
6305 case 0x07:
e9bb4aa9
JR
6306 if (set_cc) {
6307 gen_helper_sbc_cc(tmp, tmp2, tmp);
6308 } else {
6309 gen_sub_carry(tmp, tmp2, tmp);
6310 }
21aeb343 6311 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6312 break;
6313 case 0x08:
6314 if (set_cc) {
e9bb4aa9
JR
6315 tcg_gen_and_i32(tmp, tmp, tmp2);
6316 gen_logic_CC(tmp);
9ee6e8bb 6317 }
e9bb4aa9 6318 dead_tmp(tmp);
9ee6e8bb
PB
6319 break;
6320 case 0x09:
6321 if (set_cc) {
e9bb4aa9
JR
6322 tcg_gen_xor_i32(tmp, tmp, tmp2);
6323 gen_logic_CC(tmp);
9ee6e8bb 6324 }
e9bb4aa9 6325 dead_tmp(tmp);
9ee6e8bb
PB
6326 break;
6327 case 0x0a:
6328 if (set_cc) {
e9bb4aa9 6329 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb 6330 }
e9bb4aa9 6331 dead_tmp(tmp);
9ee6e8bb
PB
6332 break;
6333 case 0x0b:
6334 if (set_cc) {
e9bb4aa9 6335 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 6336 }
e9bb4aa9 6337 dead_tmp(tmp);
9ee6e8bb
PB
6338 break;
6339 case 0x0c:
e9bb4aa9
JR
6340 tcg_gen_or_i32(tmp, tmp, tmp2);
6341 if (logic_cc) {
6342 gen_logic_CC(tmp);
6343 }
21aeb343 6344 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6345 break;
6346 case 0x0d:
6347 if (logic_cc && rd == 15) {
6348 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 6349 if (IS_USER(s)) {
9ee6e8bb 6350 goto illegal_op;
e9bb4aa9
JR
6351 }
6352 gen_exception_return(s, tmp2);
9ee6e8bb 6353 } else {
e9bb4aa9
JR
6354 if (logic_cc) {
6355 gen_logic_CC(tmp2);
6356 }
21aeb343 6357 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6358 }
6359 break;
6360 case 0x0e:
e9bb4aa9
JR
6361 tcg_gen_bic_i32(tmp, tmp, tmp2);
6362 if (logic_cc) {
6363 gen_logic_CC(tmp);
6364 }
21aeb343 6365 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6366 break;
6367 default:
6368 case 0x0f:
e9bb4aa9
JR
6369 tcg_gen_not_i32(tmp2, tmp2);
6370 if (logic_cc) {
6371 gen_logic_CC(tmp2);
6372 }
21aeb343 6373 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6374 break;
6375 }
e9bb4aa9
JR
6376 if (op1 != 0x0f && op1 != 0x0d) {
6377 dead_tmp(tmp2);
6378 }
9ee6e8bb
PB
6379 } else {
6380 /* other instructions */
6381 op1 = (insn >> 24) & 0xf;
6382 switch(op1) {
6383 case 0x0:
6384 case 0x1:
6385 /* multiplies, extra load/stores */
6386 sh = (insn >> 5) & 3;
6387 if (sh == 0) {
6388 if (op1 == 0x0) {
6389 rd = (insn >> 16) & 0xf;
6390 rn = (insn >> 12) & 0xf;
6391 rs = (insn >> 8) & 0xf;
6392 rm = (insn) & 0xf;
6393 op1 = (insn >> 20) & 0xf;
6394 switch (op1) {
6395 case 0: case 1: case 2: case 3: case 6:
6396 /* 32 bit mul */
5e3f878a
PB
6397 tmp = load_reg(s, rs);
6398 tmp2 = load_reg(s, rm);
6399 tcg_gen_mul_i32(tmp, tmp, tmp2);
6400 dead_tmp(tmp2);
9ee6e8bb
PB
6401 if (insn & (1 << 22)) {
6402 /* Subtract (mls) */
6403 ARCH(6T2);
5e3f878a
PB
6404 tmp2 = load_reg(s, rn);
6405 tcg_gen_sub_i32(tmp, tmp2, tmp);
6406 dead_tmp(tmp2);
9ee6e8bb
PB
6407 } else if (insn & (1 << 21)) {
6408 /* Add */
5e3f878a
PB
6409 tmp2 = load_reg(s, rn);
6410 tcg_gen_add_i32(tmp, tmp, tmp2);
6411 dead_tmp(tmp2);
9ee6e8bb
PB
6412 }
6413 if (insn & (1 << 20))
5e3f878a
PB
6414 gen_logic_CC(tmp);
6415 store_reg(s, rd, tmp);
9ee6e8bb
PB
6416 break;
6417 default:
6418 /* 64 bit mul */
5e3f878a
PB
6419 tmp = load_reg(s, rs);
6420 tmp2 = load_reg(s, rm);
9ee6e8bb 6421 if (insn & (1 << 22))
a7812ae4 6422 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6423 else
a7812ae4 6424 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
9ee6e8bb 6425 if (insn & (1 << 21)) /* mult accumulate */
a7812ae4 6426 gen_addq(s, tmp64, rn, rd);
9ee6e8bb
PB
6427 if (!(insn & (1 << 23))) { /* double accumulate */
6428 ARCH(6);
a7812ae4
PB
6429 gen_addq_lo(s, tmp64, rn);
6430 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
6431 }
6432 if (insn & (1 << 20))
a7812ae4
PB
6433 gen_logicq_cc(tmp64);
6434 gen_storeq_reg(s, rn, rd, tmp64);
9ee6e8bb
PB
6435 break;
6436 }
6437 } else {
6438 rn = (insn >> 16) & 0xf;
6439 rd = (insn >> 12) & 0xf;
6440 if (insn & (1 << 23)) {
6441 /* load/store exclusive */
86753403
PB
6442 op1 = (insn >> 21) & 0x3;
6443 if (op1)
a47f43d2 6444 ARCH(6K);
86753403
PB
6445 else
6446 ARCH(6);
9ee6e8bb 6447 gen_movl_T1_reg(s, rn);
72f1c62f 6448 addr = cpu_T[1];
9ee6e8bb 6449 if (insn & (1 << 20)) {
8f8e3aa4 6450 gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
86753403
PB
6451 switch (op1) {
6452 case 0: /* ldrex */
6453 tmp = gen_ld32(addr, IS_USER(s));
6454 break;
6455 case 1: /* ldrexd */
6456 tmp = gen_ld32(addr, IS_USER(s));
6457 store_reg(s, rd, tmp);
6458 tcg_gen_addi_i32(addr, addr, 4);
6459 tmp = gen_ld32(addr, IS_USER(s));
6460 rd++;
6461 break;
6462 case 2: /* ldrexb */
6463 tmp = gen_ld8u(addr, IS_USER(s));
6464 break;
6465 case 3: /* ldrexh */
6466 tmp = gen_ld16u(addr, IS_USER(s));
6467 break;
6468 default:
6469 abort();
6470 }
8f8e3aa4 6471 store_reg(s, rd, tmp);
9ee6e8bb 6472 } else {
8f8e3aa4 6473 int label = gen_new_label();
9ee6e8bb 6474 rm = insn & 0xf;
8f8e3aa4 6475 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
cb63669a
PB
6476 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
6477 0, label);
8f8e3aa4 6478 tmp = load_reg(s,rm);
86753403
PB
6479 switch (op1) {
6480 case 0: /* strex */
6481 gen_st32(tmp, addr, IS_USER(s));
6482 break;
6483 case 1: /* strexd */
6484 gen_st32(tmp, addr, IS_USER(s));
6485 tcg_gen_addi_i32(addr, addr, 4);
6486 tmp = load_reg(s, rm + 1);
6487 gen_st32(tmp, addr, IS_USER(s));
6488 break;
6489 case 2: /* strexb */
6490 gen_st8(tmp, addr, IS_USER(s));
6491 break;
6492 case 3: /* strexh */
6493 gen_st16(tmp, addr, IS_USER(s));
6494 break;
6495 default:
6496 abort();
6497 }
2637a3be 6498 gen_set_label(label);
8f8e3aa4 6499 gen_movl_reg_T0(s, rd);
9ee6e8bb 6500 }
9ee6e8bb
PB
6501 } else {
6502 /* SWP instruction */
6503 rm = (insn) & 0xf;
6504
8984bd2e
PB
6505 /* ??? This is not really atomic. However we know
6506 we never have multiple CPUs running in parallel,
6507 so it is good enough. */
6508 addr = load_reg(s, rn);
6509 tmp = load_reg(s, rm);
9ee6e8bb 6510 if (insn & (1 << 22)) {
8984bd2e
PB
6511 tmp2 = gen_ld8u(addr, IS_USER(s));
6512 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 6513 } else {
8984bd2e
PB
6514 tmp2 = gen_ld32(addr, IS_USER(s));
6515 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 6516 }
8984bd2e
PB
6517 dead_tmp(addr);
6518 store_reg(s, rd, tmp2);
9ee6e8bb
PB
6519 }
6520 }
6521 } else {
6522 int address_offset;
6523 int load;
6524 /* Misc load/store */
6525 rn = (insn >> 16) & 0xf;
6526 rd = (insn >> 12) & 0xf;
b0109805 6527 addr = load_reg(s, rn);
9ee6e8bb 6528 if (insn & (1 << 24))
b0109805 6529 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
6530 address_offset = 0;
6531 if (insn & (1 << 20)) {
6532 /* load */
6533 switch(sh) {
6534 case 1:
b0109805 6535 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
6536 break;
6537 case 2:
b0109805 6538 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
6539 break;
6540 default:
6541 case 3:
b0109805 6542 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
6543 break;
6544 }
6545 load = 1;
6546 } else if (sh & 2) {
6547 /* doubleword */
6548 if (sh & 1) {
6549 /* store */
b0109805
PB
6550 tmp = load_reg(s, rd);
6551 gen_st32(tmp, addr, IS_USER(s));
6552 tcg_gen_addi_i32(addr, addr, 4);
6553 tmp = load_reg(s, rd + 1);
6554 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6555 load = 0;
6556 } else {
6557 /* load */
b0109805
PB
6558 tmp = gen_ld32(addr, IS_USER(s));
6559 store_reg(s, rd, tmp);
6560 tcg_gen_addi_i32(addr, addr, 4);
6561 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
6562 rd++;
6563 load = 1;
6564 }
6565 address_offset = -4;
6566 } else {
6567 /* store */
b0109805
PB
6568 tmp = load_reg(s, rd);
6569 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6570 load = 0;
6571 }
6572 /* Perform base writeback before the loaded value to
6573 ensure correct behavior with overlapping index registers.
6574 ldrd with base writeback is is undefined if the
6575 destination and index registers overlap. */
6576 if (!(insn & (1 << 24))) {
b0109805
PB
6577 gen_add_datah_offset(s, insn, address_offset, addr);
6578 store_reg(s, rn, addr);
9ee6e8bb
PB
6579 } else if (insn & (1 << 21)) {
6580 if (address_offset)
b0109805
PB
6581 tcg_gen_addi_i32(addr, addr, address_offset);
6582 store_reg(s, rn, addr);
6583 } else {
6584 dead_tmp(addr);
9ee6e8bb
PB
6585 }
6586 if (load) {
6587 /* Complete the load. */
b0109805 6588 store_reg(s, rd, tmp);
9ee6e8bb
PB
6589 }
6590 }
6591 break;
6592 case 0x4:
6593 case 0x5:
6594 goto do_ldst;
6595 case 0x6:
6596 case 0x7:
6597 if (insn & (1 << 4)) {
6598 ARCH(6);
6599 /* Armv6 Media instructions. */
6600 rm = insn & 0xf;
6601 rn = (insn >> 16) & 0xf;
2c0262af 6602 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
6603 rs = (insn >> 8) & 0xf;
6604 switch ((insn >> 23) & 3) {
6605 case 0: /* Parallel add/subtract. */
6606 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
6607 tmp = load_reg(s, rn);
6608 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6609 sh = (insn >> 5) & 7;
6610 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6611 goto illegal_op;
6ddbc6e4
PB
6612 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6613 dead_tmp(tmp2);
6614 store_reg(s, rd, tmp);
9ee6e8bb
PB
6615 break;
6616 case 1:
6617 if ((insn & 0x00700020) == 0) {
6c95676b 6618 /* Halfword pack. */
3670669c
PB
6619 tmp = load_reg(s, rn);
6620 tmp2 = load_reg(s, rm);
9ee6e8bb 6621 shift = (insn >> 7) & 0x1f;
3670669c
PB
6622 if (insn & (1 << 6)) {
6623 /* pkhtb */
22478e79
AZ
6624 if (shift == 0)
6625 shift = 31;
6626 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 6627 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 6628 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
6629 } else {
6630 /* pkhbt */
22478e79
AZ
6631 if (shift)
6632 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 6633 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
6634 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6635 }
6636 tcg_gen_or_i32(tmp, tmp, tmp2);
22478e79 6637 dead_tmp(tmp2);
3670669c 6638 store_reg(s, rd, tmp);
9ee6e8bb
PB
6639 } else if ((insn & 0x00200020) == 0x00200000) {
6640 /* [us]sat */
6ddbc6e4 6641 tmp = load_reg(s, rm);
9ee6e8bb
PB
6642 shift = (insn >> 7) & 0x1f;
6643 if (insn & (1 << 6)) {
6644 if (shift == 0)
6645 shift = 31;
6ddbc6e4 6646 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 6647 } else {
6ddbc6e4 6648 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
6649 }
6650 sh = (insn >> 16) & 0x1f;
6651 if (sh != 0) {
6652 if (insn & (1 << 22))
6ddbc6e4 6653 gen_helper_usat(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6654 else
6ddbc6e4 6655 gen_helper_ssat(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6656 }
6ddbc6e4 6657 store_reg(s, rd, tmp);
9ee6e8bb
PB
6658 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6659 /* [us]sat16 */
6ddbc6e4 6660 tmp = load_reg(s, rm);
9ee6e8bb
PB
6661 sh = (insn >> 16) & 0x1f;
6662 if (sh != 0) {
6663 if (insn & (1 << 22))
6ddbc6e4 6664 gen_helper_usat16(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6665 else
6ddbc6e4 6666 gen_helper_ssat16(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6667 }
6ddbc6e4 6668 store_reg(s, rd, tmp);
9ee6e8bb
PB
6669 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6670 /* Select bytes. */
6ddbc6e4
PB
6671 tmp = load_reg(s, rn);
6672 tmp2 = load_reg(s, rm);
6673 tmp3 = new_tmp();
6674 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6675 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6676 dead_tmp(tmp3);
6677 dead_tmp(tmp2);
6678 store_reg(s, rd, tmp);
9ee6e8bb 6679 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 6680 tmp = load_reg(s, rm);
9ee6e8bb
PB
6681 shift = (insn >> 10) & 3;
6682 /* ??? In many cases it's not neccessary to do a
6683 rotate, a shift is sufficient. */
6684 if (shift != 0)
5e3f878a 6685 tcg_gen_rori_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
6686 op1 = (insn >> 20) & 7;
6687 switch (op1) {
5e3f878a
PB
6688 case 0: gen_sxtb16(tmp); break;
6689 case 2: gen_sxtb(tmp); break;
6690 case 3: gen_sxth(tmp); break;
6691 case 4: gen_uxtb16(tmp); break;
6692 case 6: gen_uxtb(tmp); break;
6693 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
6694 default: goto illegal_op;
6695 }
6696 if (rn != 15) {
5e3f878a 6697 tmp2 = load_reg(s, rn);
9ee6e8bb 6698 if ((op1 & 3) == 0) {
5e3f878a 6699 gen_add16(tmp, tmp2);
9ee6e8bb 6700 } else {
5e3f878a
PB
6701 tcg_gen_add_i32(tmp, tmp, tmp2);
6702 dead_tmp(tmp2);
9ee6e8bb
PB
6703 }
6704 }
6c95676b 6705 store_reg(s, rd, tmp);
9ee6e8bb
PB
6706 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6707 /* rev */
b0109805 6708 tmp = load_reg(s, rm);
9ee6e8bb
PB
6709 if (insn & (1 << 22)) {
6710 if (insn & (1 << 7)) {
b0109805 6711 gen_revsh(tmp);
9ee6e8bb
PB
6712 } else {
6713 ARCH(6T2);
b0109805 6714 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
6715 }
6716 } else {
6717 if (insn & (1 << 7))
b0109805 6718 gen_rev16(tmp);
9ee6e8bb 6719 else
66896cb8 6720 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 6721 }
b0109805 6722 store_reg(s, rd, tmp);
9ee6e8bb
PB
6723 } else {
6724 goto illegal_op;
6725 }
6726 break;
6727 case 2: /* Multiplies (Type 3). */
5e3f878a
PB
6728 tmp = load_reg(s, rm);
6729 tmp2 = load_reg(s, rs);
9ee6e8bb
PB
6730 if (insn & (1 << 20)) {
6731 /* Signed multiply most significant [accumulate]. */
a7812ae4 6732 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6733 if (insn & (1 << 5))
a7812ae4
PB
6734 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
6735 tcg_gen_shri_i64(tmp64, tmp64, 32);
5e3f878a 6736 tmp = new_tmp();
a7812ae4 6737 tcg_gen_trunc_i64_i32(tmp, tmp64);
955a7dd5
AZ
6738 if (rd != 15) {
6739 tmp2 = load_reg(s, rd);
9ee6e8bb 6740 if (insn & (1 << 6)) {
5e3f878a 6741 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 6742 } else {
5e3f878a 6743 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 6744 }
5e3f878a 6745 dead_tmp(tmp2);
9ee6e8bb 6746 }
955a7dd5 6747 store_reg(s, rn, tmp);
9ee6e8bb
PB
6748 } else {
6749 if (insn & (1 << 5))
5e3f878a
PB
6750 gen_swap_half(tmp2);
6751 gen_smul_dual(tmp, tmp2);
6752 /* This addition cannot overflow. */
6753 if (insn & (1 << 6)) {
6754 tcg_gen_sub_i32(tmp, tmp, tmp2);
6755 } else {
6756 tcg_gen_add_i32(tmp, tmp, tmp2);
6757 }
6758 dead_tmp(tmp2);
9ee6e8bb 6759 if (insn & (1 << 22)) {
5e3f878a 6760 /* smlald, smlsld */
a7812ae4
PB
6761 tmp64 = tcg_temp_new_i64();
6762 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 6763 dead_tmp(tmp);
a7812ae4
PB
6764 gen_addq(s, tmp64, rd, rn);
6765 gen_storeq_reg(s, rd, rn, tmp64);
9ee6e8bb 6766 } else {
5e3f878a 6767 /* smuad, smusd, smlad, smlsd */
22478e79 6768 if (rd != 15)
9ee6e8bb 6769 {
22478e79 6770 tmp2 = load_reg(s, rd);
5e3f878a
PB
6771 gen_helper_add_setq(tmp, tmp, tmp2);
6772 dead_tmp(tmp2);
9ee6e8bb 6773 }
22478e79 6774 store_reg(s, rn, tmp);
9ee6e8bb
PB
6775 }
6776 }
6777 break;
6778 case 3:
6779 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
6780 switch (op1) {
6781 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
6782 ARCH(6);
6783 tmp = load_reg(s, rm);
6784 tmp2 = load_reg(s, rs);
6785 gen_helper_usad8(tmp, tmp, tmp2);
6786 dead_tmp(tmp2);
ded9d295
AZ
6787 if (rd != 15) {
6788 tmp2 = load_reg(s, rd);
6ddbc6e4
PB
6789 tcg_gen_add_i32(tmp, tmp, tmp2);
6790 dead_tmp(tmp2);
9ee6e8bb 6791 }
ded9d295 6792 store_reg(s, rn, tmp);
9ee6e8bb
PB
6793 break;
6794 case 0x20: case 0x24: case 0x28: case 0x2c:
6795 /* Bitfield insert/clear. */
6796 ARCH(6T2);
6797 shift = (insn >> 7) & 0x1f;
6798 i = (insn >> 16) & 0x1f;
6799 i = i + 1 - shift;
6800 if (rm == 15) {
5e3f878a
PB
6801 tmp = new_tmp();
6802 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6803 } else {
5e3f878a 6804 tmp = load_reg(s, rm);
9ee6e8bb
PB
6805 }
6806 if (i != 32) {
5e3f878a 6807 tmp2 = load_reg(s, rd);
8f8e3aa4 6808 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
5e3f878a 6809 dead_tmp(tmp2);
9ee6e8bb 6810 }
5e3f878a 6811 store_reg(s, rd, tmp);
9ee6e8bb
PB
6812 break;
6813 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
6814 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 6815 ARCH(6T2);
5e3f878a 6816 tmp = load_reg(s, rm);
9ee6e8bb
PB
6817 shift = (insn >> 7) & 0x1f;
6818 i = ((insn >> 16) & 0x1f) + 1;
6819 if (shift + i > 32)
6820 goto illegal_op;
6821 if (i < 32) {
6822 if (op1 & 0x20) {
5e3f878a 6823 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 6824 } else {
5e3f878a 6825 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
6826 }
6827 }
5e3f878a 6828 store_reg(s, rd, tmp);
9ee6e8bb
PB
6829 break;
6830 default:
6831 goto illegal_op;
6832 }
6833 break;
6834 }
6835 break;
6836 }
6837 do_ldst:
6838 /* Check for undefined extension instructions
6839 * per the ARM Bible IE:
6840 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
6841 */
6842 sh = (0xf << 20) | (0xf << 4);
6843 if (op1 == 0x7 && ((insn & sh) == sh))
6844 {
6845 goto illegal_op;
6846 }
6847 /* load/store byte/word */
6848 rn = (insn >> 16) & 0xf;
6849 rd = (insn >> 12) & 0xf;
b0109805 6850 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
6851 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
6852 if (insn & (1 << 24))
b0109805 6853 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
6854 if (insn & (1 << 20)) {
6855 /* load */
9ee6e8bb 6856 if (insn & (1 << 22)) {
b0109805 6857 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 6858 } else {
b0109805 6859 tmp = gen_ld32(tmp2, i);
9ee6e8bb 6860 }
9ee6e8bb
PB
6861 } else {
6862 /* store */
b0109805 6863 tmp = load_reg(s, rd);
9ee6e8bb 6864 if (insn & (1 << 22))
b0109805 6865 gen_st8(tmp, tmp2, i);
9ee6e8bb 6866 else
b0109805 6867 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
6868 }
6869 if (!(insn & (1 << 24))) {
b0109805
PB
6870 gen_add_data_offset(s, insn, tmp2);
6871 store_reg(s, rn, tmp2);
6872 } else if (insn & (1 << 21)) {
6873 store_reg(s, rn, tmp2);
6874 } else {
6875 dead_tmp(tmp2);
9ee6e8bb
PB
6876 }
6877 if (insn & (1 << 20)) {
6878 /* Complete the load. */
6879 if (rd == 15)
b0109805 6880 gen_bx(s, tmp);
9ee6e8bb 6881 else
b0109805 6882 store_reg(s, rd, tmp);
9ee6e8bb
PB
6883 }
6884 break;
6885 case 0x08:
6886 case 0x09:
6887 {
6888 int j, n, user, loaded_base;
b0109805 6889 TCGv loaded_var;
9ee6e8bb
PB
6890 /* load/store multiple words */
6891 /* XXX: store correct base if write back */
6892 user = 0;
6893 if (insn & (1 << 22)) {
6894 if (IS_USER(s))
6895 goto illegal_op; /* only usable in supervisor mode */
6896
6897 if ((insn & (1 << 15)) == 0)
6898 user = 1;
6899 }
6900 rn = (insn >> 16) & 0xf;
b0109805 6901 addr = load_reg(s, rn);
9ee6e8bb
PB
6902
6903 /* compute total size */
6904 loaded_base = 0;
a50f5b91 6905 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
6906 n = 0;
6907 for(i=0;i<16;i++) {
6908 if (insn & (1 << i))
6909 n++;
6910 }
6911 /* XXX: test invalid n == 0 case ? */
6912 if (insn & (1 << 23)) {
6913 if (insn & (1 << 24)) {
6914 /* pre increment */
b0109805 6915 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6916 } else {
6917 /* post increment */
6918 }
6919 } else {
6920 if (insn & (1 << 24)) {
6921 /* pre decrement */
b0109805 6922 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
6923 } else {
6924 /* post decrement */
6925 if (n != 1)
b0109805 6926 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
6927 }
6928 }
6929 j = 0;
6930 for(i=0;i<16;i++) {
6931 if (insn & (1 << i)) {
6932 if (insn & (1 << 20)) {
6933 /* load */
b0109805 6934 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 6935 if (i == 15) {
b0109805 6936 gen_bx(s, tmp);
9ee6e8bb 6937 } else if (user) {
b0109805
PB
6938 gen_helper_set_user_reg(tcg_const_i32(i), tmp);
6939 dead_tmp(tmp);
9ee6e8bb 6940 } else if (i == rn) {
b0109805 6941 loaded_var = tmp;
9ee6e8bb
PB
6942 loaded_base = 1;
6943 } else {
b0109805 6944 store_reg(s, i, tmp);
9ee6e8bb
PB
6945 }
6946 } else {
6947 /* store */
6948 if (i == 15) {
6949 /* special case: r15 = PC + 8 */
6950 val = (long)s->pc + 4;
b0109805
PB
6951 tmp = new_tmp();
6952 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 6953 } else if (user) {
b0109805
PB
6954 tmp = new_tmp();
6955 gen_helper_get_user_reg(tmp, tcg_const_i32(i));
9ee6e8bb 6956 } else {
b0109805 6957 tmp = load_reg(s, i);
9ee6e8bb 6958 }
b0109805 6959 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6960 }
6961 j++;
6962 /* no need to add after the last transfer */
6963 if (j != n)
b0109805 6964 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6965 }
6966 }
6967 if (insn & (1 << 21)) {
6968 /* write back */
6969 if (insn & (1 << 23)) {
6970 if (insn & (1 << 24)) {
6971 /* pre increment */
6972 } else {
6973 /* post increment */
b0109805 6974 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6975 }
6976 } else {
6977 if (insn & (1 << 24)) {
6978 /* pre decrement */
6979 if (n != 1)
b0109805 6980 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
6981 } else {
6982 /* post decrement */
b0109805 6983 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
6984 }
6985 }
b0109805
PB
6986 store_reg(s, rn, addr);
6987 } else {
6988 dead_tmp(addr);
9ee6e8bb
PB
6989 }
6990 if (loaded_base) {
b0109805 6991 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
6992 }
6993 if ((insn & (1 << 22)) && !user) {
6994 /* Restore CPSR from SPSR. */
d9ba4830
PB
6995 tmp = load_cpu_field(spsr);
6996 gen_set_cpsr(tmp, 0xffffffff);
6997 dead_tmp(tmp);
9ee6e8bb
PB
6998 s->is_jmp = DISAS_UPDATE;
6999 }
7000 }
7001 break;
7002 case 0xa:
7003 case 0xb:
7004 {
7005 int32_t offset;
7006
7007 /* branch (and link) */
7008 val = (int32_t)s->pc;
7009 if (insn & (1 << 24)) {
5e3f878a
PB
7010 tmp = new_tmp();
7011 tcg_gen_movi_i32(tmp, val);
7012 store_reg(s, 14, tmp);
9ee6e8bb
PB
7013 }
7014 offset = (((int32_t)insn << 8) >> 8);
7015 val += (offset << 2) + 4;
7016 gen_jmp(s, val);
7017 }
7018 break;
7019 case 0xc:
7020 case 0xd:
7021 case 0xe:
7022 /* Coprocessor. */
7023 if (disas_coproc_insn(env, s, insn))
7024 goto illegal_op;
7025 break;
7026 case 0xf:
7027 /* swi */
5e3f878a 7028 gen_set_pc_im(s->pc);
9ee6e8bb
PB
7029 s->is_jmp = DISAS_SWI;
7030 break;
7031 default:
7032 illegal_op:
7033 gen_set_condexec(s);
5e3f878a 7034 gen_set_pc_im(s->pc - 4);
d9ba4830 7035 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
7036 s->is_jmp = DISAS_JUMP;
7037 break;
7038 }
7039 }
7040}
7041
7042/* Return true if this is a Thumb-2 logical op. */
7043static int
7044thumb2_logic_op(int op)
7045{
7046 return (op < 8);
7047}
7048
7049/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7050 then set condition code flags based on the result of the operation.
7051 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7052 to the high bit of T1.
7053 Returns zero if the opcode is valid. */
7054
7055static int
396e467c 7056gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
9ee6e8bb
PB
7057{
7058 int logic_cc;
7059
7060 logic_cc = 0;
7061 switch (op) {
7062 case 0: /* and */
396e467c 7063 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
7064 logic_cc = conds;
7065 break;
7066 case 1: /* bic */
396e467c 7067 tcg_gen_bic_i32(t0, t0, t1);
9ee6e8bb
PB
7068 logic_cc = conds;
7069 break;
7070 case 2: /* orr */
396e467c 7071 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7072 logic_cc = conds;
7073 break;
7074 case 3: /* orn */
396e467c
FN
7075 tcg_gen_not_i32(t1, t1);
7076 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7077 logic_cc = conds;
7078 break;
7079 case 4: /* eor */
396e467c 7080 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
7081 logic_cc = conds;
7082 break;
7083 case 8: /* add */
7084 if (conds)
396e467c 7085 gen_helper_add_cc(t0, t0, t1);
9ee6e8bb 7086 else
396e467c 7087 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
7088 break;
7089 case 10: /* adc */
7090 if (conds)
396e467c 7091 gen_helper_adc_cc(t0, t0, t1);
9ee6e8bb 7092 else
396e467c 7093 gen_adc(t0, t1);
9ee6e8bb
PB
7094 break;
7095 case 11: /* sbc */
7096 if (conds)
396e467c 7097 gen_helper_sbc_cc(t0, t0, t1);
9ee6e8bb 7098 else
396e467c 7099 gen_sub_carry(t0, t0, t1);
9ee6e8bb
PB
7100 break;
7101 case 13: /* sub */
7102 if (conds)
396e467c 7103 gen_helper_sub_cc(t0, t0, t1);
9ee6e8bb 7104 else
396e467c 7105 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
7106 break;
7107 case 14: /* rsb */
7108 if (conds)
396e467c 7109 gen_helper_sub_cc(t0, t1, t0);
9ee6e8bb 7110 else
396e467c 7111 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
7112 break;
7113 default: /* 5, 6, 7, 9, 12, 15. */
7114 return 1;
7115 }
7116 if (logic_cc) {
396e467c 7117 gen_logic_CC(t0);
9ee6e8bb 7118 if (shifter_out)
396e467c 7119 gen_set_CF_bit31(t1);
9ee6e8bb
PB
7120 }
7121 return 0;
7122}
7123
7124/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7125 is not legal. */
7126static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7127{
b0109805 7128 uint32_t insn, imm, shift, offset;
9ee6e8bb 7129 uint32_t rd, rn, rm, rs;
b26eefb6 7130 TCGv tmp;
6ddbc6e4
PB
7131 TCGv tmp2;
7132 TCGv tmp3;
b0109805 7133 TCGv addr;
a7812ae4 7134 TCGv_i64 tmp64;
9ee6e8bb
PB
7135 int op;
7136 int shiftop;
7137 int conds;
7138 int logic_cc;
7139
7140 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7141 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 7142 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
7143 16-bit instructions to get correct prefetch abort behavior. */
7144 insn = insn_hw1;
7145 if ((insn & (1 << 12)) == 0) {
7146 /* Second half of blx. */
7147 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
7148 tmp = load_reg(s, 14);
7149 tcg_gen_addi_i32(tmp, tmp, offset);
7150 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 7151
d9ba4830 7152 tmp2 = new_tmp();
b0109805 7153 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7154 store_reg(s, 14, tmp2);
7155 gen_bx(s, tmp);
9ee6e8bb
PB
7156 return 0;
7157 }
7158 if (insn & (1 << 11)) {
7159 /* Second half of bl. */
7160 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 7161 tmp = load_reg(s, 14);
6a0d8a1d 7162 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 7163
d9ba4830 7164 tmp2 = new_tmp();
b0109805 7165 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7166 store_reg(s, 14, tmp2);
7167 gen_bx(s, tmp);
9ee6e8bb
PB
7168 return 0;
7169 }
7170 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7171 /* Instruction spans a page boundary. Implement it as two
7172 16-bit instructions in case the second half causes an
7173 prefetch abort. */
7174 offset = ((int32_t)insn << 21) >> 9;
396e467c 7175 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
7176 return 0;
7177 }
7178 /* Fall through to 32-bit decode. */
7179 }
7180
7181 insn = lduw_code(s->pc);
7182 s->pc += 2;
7183 insn |= (uint32_t)insn_hw1 << 16;
7184
7185 if ((insn & 0xf800e800) != 0xf000e800) {
7186 ARCH(6T2);
7187 }
7188
7189 rn = (insn >> 16) & 0xf;
7190 rs = (insn >> 12) & 0xf;
7191 rd = (insn >> 8) & 0xf;
7192 rm = insn & 0xf;
7193 switch ((insn >> 25) & 0xf) {
7194 case 0: case 1: case 2: case 3:
7195 /* 16-bit instructions. Should never happen. */
7196 abort();
7197 case 4:
7198 if (insn & (1 << 22)) {
7199 /* Other load/store, table branch. */
7200 if (insn & 0x01200000) {
7201 /* Load/store doubleword. */
7202 if (rn == 15) {
b0109805
PB
7203 addr = new_tmp();
7204 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 7205 } else {
b0109805 7206 addr = load_reg(s, rn);
9ee6e8bb
PB
7207 }
7208 offset = (insn & 0xff) * 4;
7209 if ((insn & (1 << 23)) == 0)
7210 offset = -offset;
7211 if (insn & (1 << 24)) {
b0109805 7212 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
7213 offset = 0;
7214 }
7215 if (insn & (1 << 20)) {
7216 /* ldrd */
b0109805
PB
7217 tmp = gen_ld32(addr, IS_USER(s));
7218 store_reg(s, rs, tmp);
7219 tcg_gen_addi_i32(addr, addr, 4);
7220 tmp = gen_ld32(addr, IS_USER(s));
7221 store_reg(s, rd, tmp);
9ee6e8bb
PB
7222 } else {
7223 /* strd */
b0109805
PB
7224 tmp = load_reg(s, rs);
7225 gen_st32(tmp, addr, IS_USER(s));
7226 tcg_gen_addi_i32(addr, addr, 4);
7227 tmp = load_reg(s, rd);
7228 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7229 }
7230 if (insn & (1 << 21)) {
7231 /* Base writeback. */
7232 if (rn == 15)
7233 goto illegal_op;
b0109805
PB
7234 tcg_gen_addi_i32(addr, addr, offset - 4);
7235 store_reg(s, rn, addr);
7236 } else {
7237 dead_tmp(addr);
9ee6e8bb
PB
7238 }
7239 } else if ((insn & (1 << 23)) == 0) {
7240 /* Load/store exclusive word. */
2c0262af 7241 gen_movl_T1_reg(s, rn);
72f1c62f 7242 addr = cpu_T[1];
2c0262af 7243 if (insn & (1 << 20)) {
8f8e3aa4
PB
7244 gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
7245 tmp = gen_ld32(addr, IS_USER(s));
7246 store_reg(s, rd, tmp);
9ee6e8bb 7247 } else {
8f8e3aa4
PB
7248 int label = gen_new_label();
7249 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
cb63669a
PB
7250 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
7251 0, label);
8f8e3aa4
PB
7252 tmp = load_reg(s, rs);
7253 gen_st32(tmp, cpu_T[1], IS_USER(s));
7254 gen_set_label(label);
7255 gen_movl_reg_T0(s, rd);
9ee6e8bb 7256 }
9ee6e8bb
PB
7257 } else if ((insn & (1 << 6)) == 0) {
7258 /* Table Branch. */
7259 if (rn == 15) {
b0109805
PB
7260 addr = new_tmp();
7261 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 7262 } else {
b0109805 7263 addr = load_reg(s, rn);
9ee6e8bb 7264 }
b26eefb6 7265 tmp = load_reg(s, rm);
b0109805 7266 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
7267 if (insn & (1 << 4)) {
7268 /* tbh */
b0109805 7269 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 7270 dead_tmp(tmp);
b0109805 7271 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 7272 } else { /* tbb */
b26eefb6 7273 dead_tmp(tmp);
b0109805 7274 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7275 }
b0109805
PB
7276 dead_tmp(addr);
7277 tcg_gen_shli_i32(tmp, tmp, 1);
7278 tcg_gen_addi_i32(tmp, tmp, s->pc);
7279 store_reg(s, 15, tmp);
9ee6e8bb
PB
7280 } else {
7281 /* Load/store exclusive byte/halfword/doubleword. */
8f8e3aa4
PB
7282 /* ??? These are not really atomic. However we know
7283 we never have multiple CPUs running in parallel,
7284 so it is good enough. */
9ee6e8bb 7285 op = (insn >> 4) & 0x3;
8f8e3aa4
PB
7286 /* Must use a global reg for the address because we have
7287 a conditional branch in the store instruction. */
9ee6e8bb 7288 gen_movl_T1_reg(s, rn);
8f8e3aa4 7289 addr = cpu_T[1];
9ee6e8bb 7290 if (insn & (1 << 20)) {
8f8e3aa4 7291 gen_helper_mark_exclusive(cpu_env, addr);
9ee6e8bb
PB
7292 switch (op) {
7293 case 0:
8f8e3aa4 7294 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7295 break;
2c0262af 7296 case 1:
8f8e3aa4 7297 tmp = gen_ld16u(addr, IS_USER(s));
2c0262af 7298 break;
9ee6e8bb 7299 case 3:
8f8e3aa4
PB
7300 tmp = gen_ld32(addr, IS_USER(s));
7301 tcg_gen_addi_i32(addr, addr, 4);
7302 tmp2 = gen_ld32(addr, IS_USER(s));
7303 store_reg(s, rd, tmp2);
2c0262af
FB
7304 break;
7305 default:
9ee6e8bb
PB
7306 goto illegal_op;
7307 }
8f8e3aa4 7308 store_reg(s, rs, tmp);
9ee6e8bb 7309 } else {
8f8e3aa4
PB
7310 int label = gen_new_label();
7311 /* Must use a global that is not killed by the branch. */
7312 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
cb63669a 7313 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0], 0, label);
8f8e3aa4 7314 tmp = load_reg(s, rs);
9ee6e8bb
PB
7315 switch (op) {
7316 case 0:
8f8e3aa4 7317 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7318 break;
7319 case 1:
8f8e3aa4 7320 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb 7321 break;
2c0262af 7322 case 3:
8f8e3aa4
PB
7323 gen_st32(tmp, addr, IS_USER(s));
7324 tcg_gen_addi_i32(addr, addr, 4);
7325 tmp = load_reg(s, rd);
7326 gen_st32(tmp, addr, IS_USER(s));
2c0262af 7327 break;
9ee6e8bb
PB
7328 default:
7329 goto illegal_op;
2c0262af 7330 }
8f8e3aa4 7331 gen_set_label(label);
9ee6e8bb
PB
7332 gen_movl_reg_T0(s, rm);
7333 }
7334 }
7335 } else {
7336 /* Load/store multiple, RFE, SRS. */
7337 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7338 /* Not available in user mode. */
b0109805 7339 if (IS_USER(s))
9ee6e8bb
PB
7340 goto illegal_op;
7341 if (insn & (1 << 20)) {
7342 /* rfe */
b0109805
PB
7343 addr = load_reg(s, rn);
7344 if ((insn & (1 << 24)) == 0)
7345 tcg_gen_addi_i32(addr, addr, -8);
7346 /* Load PC into tmp and CPSR into tmp2. */
7347 tmp = gen_ld32(addr, 0);
7348 tcg_gen_addi_i32(addr, addr, 4);
7349 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
7350 if (insn & (1 << 21)) {
7351 /* Base writeback. */
b0109805
PB
7352 if (insn & (1 << 24)) {
7353 tcg_gen_addi_i32(addr, addr, 4);
7354 } else {
7355 tcg_gen_addi_i32(addr, addr, -4);
7356 }
7357 store_reg(s, rn, addr);
7358 } else {
7359 dead_tmp(addr);
9ee6e8bb 7360 }
b0109805 7361 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
7362 } else {
7363 /* srs */
7364 op = (insn & 0x1f);
7365 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7366 addr = load_reg(s, 13);
9ee6e8bb 7367 } else {
b0109805
PB
7368 addr = new_tmp();
7369 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op));
9ee6e8bb
PB
7370 }
7371 if ((insn & (1 << 24)) == 0) {
b0109805 7372 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 7373 }
b0109805
PB
7374 tmp = load_reg(s, 14);
7375 gen_st32(tmp, addr, 0);
7376 tcg_gen_addi_i32(addr, addr, 4);
7377 tmp = new_tmp();
7378 gen_helper_cpsr_read(tmp);
7379 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
7380 if (insn & (1 << 21)) {
7381 if ((insn & (1 << 24)) == 0) {
b0109805 7382 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 7383 } else {
b0109805 7384 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7385 }
7386 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7387 store_reg(s, 13, addr);
9ee6e8bb 7388 } else {
b0109805
PB
7389 gen_helper_set_r13_banked(cpu_env,
7390 tcg_const_i32(op), addr);
9ee6e8bb 7391 }
b0109805
PB
7392 } else {
7393 dead_tmp(addr);
9ee6e8bb
PB
7394 }
7395 }
7396 } else {
7397 int i;
7398 /* Load/store multiple. */
b0109805 7399 addr = load_reg(s, rn);
9ee6e8bb
PB
7400 offset = 0;
7401 for (i = 0; i < 16; i++) {
7402 if (insn & (1 << i))
7403 offset += 4;
7404 }
7405 if (insn & (1 << 24)) {
b0109805 7406 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7407 }
7408
7409 for (i = 0; i < 16; i++) {
7410 if ((insn & (1 << i)) == 0)
7411 continue;
7412 if (insn & (1 << 20)) {
7413 /* Load. */
b0109805 7414 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 7415 if (i == 15) {
b0109805 7416 gen_bx(s, tmp);
9ee6e8bb 7417 } else {
b0109805 7418 store_reg(s, i, tmp);
9ee6e8bb
PB
7419 }
7420 } else {
7421 /* Store. */
b0109805
PB
7422 tmp = load_reg(s, i);
7423 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7424 }
b0109805 7425 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7426 }
7427 if (insn & (1 << 21)) {
7428 /* Base register writeback. */
7429 if (insn & (1 << 24)) {
b0109805 7430 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7431 }
7432 /* Fault if writeback register is in register list. */
7433 if (insn & (1 << rn))
7434 goto illegal_op;
b0109805
PB
7435 store_reg(s, rn, addr);
7436 } else {
7437 dead_tmp(addr);
9ee6e8bb
PB
7438 }
7439 }
7440 }
7441 break;
7442 case 5: /* Data processing register constant shift. */
7443 if (rn == 15)
7444 gen_op_movl_T0_im(0);
7445 else
7446 gen_movl_T0_reg(s, rn);
7447 gen_movl_T1_reg(s, rm);
7448 op = (insn >> 21) & 0xf;
7449 shiftop = (insn >> 4) & 3;
7450 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7451 conds = (insn & (1 << 20)) != 0;
7452 logic_cc = (conds && thumb2_logic_op(op));
9a119ff6 7453 gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
396e467c 7454 if (gen_thumb2_data_op(s, op, conds, 0, cpu_T[0], cpu_T[1]))
9ee6e8bb
PB
7455 goto illegal_op;
7456 if (rd != 15)
7457 gen_movl_reg_T0(s, rd);
7458 break;
7459 case 13: /* Misc data processing. */
7460 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7461 if (op < 4 && (insn & 0xf000) != 0xf000)
7462 goto illegal_op;
7463 switch (op) {
7464 case 0: /* Register controlled shift. */
8984bd2e
PB
7465 tmp = load_reg(s, rn);
7466 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7467 if ((insn & 0x70) != 0)
7468 goto illegal_op;
7469 op = (insn >> 21) & 3;
8984bd2e
PB
7470 logic_cc = (insn & (1 << 20)) != 0;
7471 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7472 if (logic_cc)
7473 gen_logic_CC(tmp);
21aeb343 7474 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7475 break;
7476 case 1: /* Sign/zero extend. */
5e3f878a 7477 tmp = load_reg(s, rm);
9ee6e8bb
PB
7478 shift = (insn >> 4) & 3;
7479 /* ??? In many cases it's not neccessary to do a
7480 rotate, a shift is sufficient. */
7481 if (shift != 0)
5e3f878a 7482 tcg_gen_rori_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7483 op = (insn >> 20) & 7;
7484 switch (op) {
5e3f878a
PB
7485 case 0: gen_sxth(tmp); break;
7486 case 1: gen_uxth(tmp); break;
7487 case 2: gen_sxtb16(tmp); break;
7488 case 3: gen_uxtb16(tmp); break;
7489 case 4: gen_sxtb(tmp); break;
7490 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
7491 default: goto illegal_op;
7492 }
7493 if (rn != 15) {
5e3f878a 7494 tmp2 = load_reg(s, rn);
9ee6e8bb 7495 if ((op >> 1) == 1) {
5e3f878a 7496 gen_add16(tmp, tmp2);
9ee6e8bb 7497 } else {
5e3f878a
PB
7498 tcg_gen_add_i32(tmp, tmp, tmp2);
7499 dead_tmp(tmp2);
9ee6e8bb
PB
7500 }
7501 }
5e3f878a 7502 store_reg(s, rd, tmp);
9ee6e8bb
PB
7503 break;
7504 case 2: /* SIMD add/subtract. */
7505 op = (insn >> 20) & 7;
7506 shift = (insn >> 4) & 7;
7507 if ((op & 3) == 3 || (shift & 3) == 3)
7508 goto illegal_op;
6ddbc6e4
PB
7509 tmp = load_reg(s, rn);
7510 tmp2 = load_reg(s, rm);
7511 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7512 dead_tmp(tmp2);
7513 store_reg(s, rd, tmp);
9ee6e8bb
PB
7514 break;
7515 case 3: /* Other data processing. */
7516 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7517 if (op < 4) {
7518 /* Saturating add/subtract. */
d9ba4830
PB
7519 tmp = load_reg(s, rn);
7520 tmp2 = load_reg(s, rm);
9ee6e8bb 7521 if (op & 2)
d9ba4830 7522 gen_helper_double_saturate(tmp, tmp);
9ee6e8bb 7523 if (op & 1)
d9ba4830 7524 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 7525 else
d9ba4830
PB
7526 gen_helper_add_saturate(tmp, tmp, tmp2);
7527 dead_tmp(tmp2);
9ee6e8bb 7528 } else {
d9ba4830 7529 tmp = load_reg(s, rn);
9ee6e8bb
PB
7530 switch (op) {
7531 case 0x0a: /* rbit */
d9ba4830 7532 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7533 break;
7534 case 0x08: /* rev */
66896cb8 7535 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
7536 break;
7537 case 0x09: /* rev16 */
d9ba4830 7538 gen_rev16(tmp);
9ee6e8bb
PB
7539 break;
7540 case 0x0b: /* revsh */
d9ba4830 7541 gen_revsh(tmp);
9ee6e8bb
PB
7542 break;
7543 case 0x10: /* sel */
d9ba4830 7544 tmp2 = load_reg(s, rm);
6ddbc6e4
PB
7545 tmp3 = new_tmp();
7546 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
d9ba4830 7547 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6ddbc6e4 7548 dead_tmp(tmp3);
d9ba4830 7549 dead_tmp(tmp2);
9ee6e8bb
PB
7550 break;
7551 case 0x18: /* clz */
d9ba4830 7552 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
7553 break;
7554 default:
7555 goto illegal_op;
7556 }
7557 }
d9ba4830 7558 store_reg(s, rd, tmp);
9ee6e8bb
PB
7559 break;
7560 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7561 op = (insn >> 4) & 0xf;
d9ba4830
PB
7562 tmp = load_reg(s, rn);
7563 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7564 switch ((insn >> 20) & 7) {
7565 case 0: /* 32 x 32 -> 32 */
d9ba4830
PB
7566 tcg_gen_mul_i32(tmp, tmp, tmp2);
7567 dead_tmp(tmp2);
9ee6e8bb 7568 if (rs != 15) {
d9ba4830 7569 tmp2 = load_reg(s, rs);
9ee6e8bb 7570 if (op)
d9ba4830 7571 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 7572 else
d9ba4830
PB
7573 tcg_gen_add_i32(tmp, tmp, tmp2);
7574 dead_tmp(tmp2);
9ee6e8bb 7575 }
9ee6e8bb
PB
7576 break;
7577 case 1: /* 16 x 16 -> 32 */
d9ba4830
PB
7578 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7579 dead_tmp(tmp2);
9ee6e8bb 7580 if (rs != 15) {
d9ba4830
PB
7581 tmp2 = load_reg(s, rs);
7582 gen_helper_add_setq(tmp, tmp, tmp2);
7583 dead_tmp(tmp2);
9ee6e8bb 7584 }
9ee6e8bb
PB
7585 break;
7586 case 2: /* Dual multiply add. */
7587 case 4: /* Dual multiply subtract. */
7588 if (op)
d9ba4830
PB
7589 gen_swap_half(tmp2);
7590 gen_smul_dual(tmp, tmp2);
9ee6e8bb
PB
7591 /* This addition cannot overflow. */
7592 if (insn & (1 << 22)) {
d9ba4830 7593 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 7594 } else {
d9ba4830 7595 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 7596 }
d9ba4830 7597 dead_tmp(tmp2);
9ee6e8bb
PB
7598 if (rs != 15)
7599 {
d9ba4830
PB
7600 tmp2 = load_reg(s, rs);
7601 gen_helper_add_setq(tmp, tmp, tmp2);
7602 dead_tmp(tmp2);
9ee6e8bb 7603 }
9ee6e8bb
PB
7604 break;
7605 case 3: /* 32 * 16 -> 32msb */
7606 if (op)
d9ba4830 7607 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7608 else
d9ba4830 7609 gen_sxth(tmp2);
a7812ae4
PB
7610 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7611 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 7612 tmp = new_tmp();
a7812ae4 7613 tcg_gen_trunc_i64_i32(tmp, tmp64);
9ee6e8bb
PB
7614 if (rs != 15)
7615 {
d9ba4830
PB
7616 tmp2 = load_reg(s, rs);
7617 gen_helper_add_setq(tmp, tmp, tmp2);
7618 dead_tmp(tmp2);
9ee6e8bb 7619 }
9ee6e8bb
PB
7620 break;
7621 case 5: case 6: /* 32 * 32 -> 32msb */
d9ba4830
PB
7622 gen_imull(tmp, tmp2);
7623 if (insn & (1 << 5)) {
7624 gen_roundqd(tmp, tmp2);
7625 dead_tmp(tmp2);
7626 } else {
7627 dead_tmp(tmp);
7628 tmp = tmp2;
7629 }
9ee6e8bb 7630 if (rs != 15) {
d9ba4830 7631 tmp2 = load_reg(s, rs);
9ee6e8bb 7632 if (insn & (1 << 21)) {
d9ba4830 7633 tcg_gen_add_i32(tmp, tmp, tmp2);
99c475ab 7634 } else {
d9ba4830 7635 tcg_gen_sub_i32(tmp, tmp2, tmp);
99c475ab 7636 }
d9ba4830 7637 dead_tmp(tmp2);
2c0262af 7638 }
9ee6e8bb
PB
7639 break;
7640 case 7: /* Unsigned sum of absolute differences. */
d9ba4830
PB
7641 gen_helper_usad8(tmp, tmp, tmp2);
7642 dead_tmp(tmp2);
9ee6e8bb 7643 if (rs != 15) {
d9ba4830
PB
7644 tmp2 = load_reg(s, rs);
7645 tcg_gen_add_i32(tmp, tmp, tmp2);
7646 dead_tmp(tmp2);
5fd46862 7647 }
9ee6e8bb 7648 break;
2c0262af 7649 }
d9ba4830 7650 store_reg(s, rd, tmp);
2c0262af 7651 break;
9ee6e8bb
PB
7652 case 6: case 7: /* 64-bit multiply, Divide. */
7653 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
7654 tmp = load_reg(s, rn);
7655 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7656 if ((op & 0x50) == 0x10) {
7657 /* sdiv, udiv */
7658 if (!arm_feature(env, ARM_FEATURE_DIV))
7659 goto illegal_op;
7660 if (op & 0x20)
5e3f878a 7661 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 7662 else
5e3f878a
PB
7663 gen_helper_sdiv(tmp, tmp, tmp2);
7664 dead_tmp(tmp2);
7665 store_reg(s, rd, tmp);
9ee6e8bb
PB
7666 } else if ((op & 0xe) == 0xc) {
7667 /* Dual multiply accumulate long. */
7668 if (op & 1)
5e3f878a
PB
7669 gen_swap_half(tmp2);
7670 gen_smul_dual(tmp, tmp2);
9ee6e8bb 7671 if (op & 0x10) {
5e3f878a 7672 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 7673 } else {
5e3f878a 7674 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 7675 }
5e3f878a 7676 dead_tmp(tmp2);
a7812ae4
PB
7677 /* BUGFIX */
7678 tmp64 = tcg_temp_new_i64();
7679 tcg_gen_ext_i32_i64(tmp64, tmp);
7680 dead_tmp(tmp);
7681 gen_addq(s, tmp64, rs, rd);
7682 gen_storeq_reg(s, rs, rd, tmp64);
2c0262af 7683 } else {
9ee6e8bb
PB
7684 if (op & 0x20) {
7685 /* Unsigned 64-bit multiply */
a7812ae4 7686 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 7687 } else {
9ee6e8bb
PB
7688 if (op & 8) {
7689 /* smlalxy */
5e3f878a
PB
7690 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7691 dead_tmp(tmp2);
a7812ae4
PB
7692 tmp64 = tcg_temp_new_i64();
7693 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 7694 dead_tmp(tmp);
9ee6e8bb
PB
7695 } else {
7696 /* Signed 64-bit multiply */
a7812ae4 7697 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 7698 }
b5ff1b31 7699 }
9ee6e8bb
PB
7700 if (op & 4) {
7701 /* umaal */
a7812ae4
PB
7702 gen_addq_lo(s, tmp64, rs);
7703 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
7704 } else if (op & 0x40) {
7705 /* 64-bit accumulate. */
a7812ae4 7706 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 7707 }
a7812ae4 7708 gen_storeq_reg(s, rs, rd, tmp64);
5fd46862 7709 }
2c0262af 7710 break;
9ee6e8bb
PB
7711 }
7712 break;
7713 case 6: case 7: case 14: case 15:
7714 /* Coprocessor. */
7715 if (((insn >> 24) & 3) == 3) {
7716 /* Translate into the equivalent ARM encoding. */
7717 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
7718 if (disas_neon_data_insn(env, s, insn))
7719 goto illegal_op;
7720 } else {
7721 if (insn & (1 << 28))
7722 goto illegal_op;
7723 if (disas_coproc_insn (env, s, insn))
7724 goto illegal_op;
7725 }
7726 break;
7727 case 8: case 9: case 10: case 11:
7728 if (insn & (1 << 15)) {
7729 /* Branches, misc control. */
7730 if (insn & 0x5000) {
7731 /* Unconditional branch. */
7732 /* signextend(hw1[10:0]) -> offset[:12]. */
7733 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7734 /* hw1[10:0] -> offset[11:1]. */
7735 offset |= (insn & 0x7ff) << 1;
7736 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7737 offset[24:22] already have the same value because of the
7738 sign extension above. */
7739 offset ^= ((~insn) & (1 << 13)) << 10;
7740 offset ^= ((~insn) & (1 << 11)) << 11;
7741
9ee6e8bb
PB
7742 if (insn & (1 << 14)) {
7743 /* Branch and link. */
b0109805 7744 gen_op_movl_T1_im(s->pc | 1);
9ee6e8bb 7745 gen_movl_reg_T1(s, 14);
b5ff1b31 7746 }
3b46e624 7747
b0109805 7748 offset += s->pc;
9ee6e8bb
PB
7749 if (insn & (1 << 12)) {
7750 /* b/bl */
b0109805 7751 gen_jmp(s, offset);
9ee6e8bb
PB
7752 } else {
7753 /* blx */
b0109805
PB
7754 offset &= ~(uint32_t)2;
7755 gen_bx_im(s, offset);
2c0262af 7756 }
9ee6e8bb
PB
7757 } else if (((insn >> 23) & 7) == 7) {
7758 /* Misc control */
7759 if (insn & (1 << 13))
7760 goto illegal_op;
7761
7762 if (insn & (1 << 26)) {
7763 /* Secure monitor call (v6Z) */
7764 goto illegal_op; /* not implemented. */
2c0262af 7765 } else {
9ee6e8bb
PB
7766 op = (insn >> 20) & 7;
7767 switch (op) {
7768 case 0: /* msr cpsr. */
7769 if (IS_M(env)) {
8984bd2e
PB
7770 tmp = load_reg(s, rn);
7771 addr = tcg_const_i32(insn & 0xff);
7772 gen_helper_v7m_msr(cpu_env, addr, tmp);
9ee6e8bb
PB
7773 gen_lookup_tb(s);
7774 break;
7775 }
7776 /* fall through */
7777 case 1: /* msr spsr. */
7778 if (IS_M(env))
7779 goto illegal_op;
2fbac54b
FN
7780 tmp = load_reg(s, rn);
7781 if (gen_set_psr(s,
9ee6e8bb 7782 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 7783 op == 1, tmp))
9ee6e8bb
PB
7784 goto illegal_op;
7785 break;
7786 case 2: /* cps, nop-hint. */
7787 if (((insn >> 8) & 7) == 0) {
7788 gen_nop_hint(s, insn & 0xff);
7789 }
7790 /* Implemented as NOP in user mode. */
7791 if (IS_USER(s))
7792 break;
7793 offset = 0;
7794 imm = 0;
7795 if (insn & (1 << 10)) {
7796 if (insn & (1 << 7))
7797 offset |= CPSR_A;
7798 if (insn & (1 << 6))
7799 offset |= CPSR_I;
7800 if (insn & (1 << 5))
7801 offset |= CPSR_F;
7802 if (insn & (1 << 9))
7803 imm = CPSR_A | CPSR_I | CPSR_F;
7804 }
7805 if (insn & (1 << 8)) {
7806 offset |= 0x1f;
7807 imm |= (insn & 0x1f);
7808 }
7809 if (offset) {
2fbac54b 7810 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
7811 }
7812 break;
7813 case 3: /* Special control operations. */
7814 op = (insn >> 4) & 0xf;
7815 switch (op) {
7816 case 2: /* clrex */
8f8e3aa4 7817 gen_helper_clrex(cpu_env);
9ee6e8bb
PB
7818 break;
7819 case 4: /* dsb */
7820 case 5: /* dmb */
7821 case 6: /* isb */
7822 /* These execute as NOPs. */
7823 ARCH(7);
7824 break;
7825 default:
7826 goto illegal_op;
7827 }
7828 break;
7829 case 4: /* bxj */
7830 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
7831 tmp = load_reg(s, rn);
7832 gen_bx(s, tmp);
9ee6e8bb
PB
7833 break;
7834 case 5: /* Exception return. */
7835 /* Unpredictable in user mode. */
7836 goto illegal_op;
7837 case 6: /* mrs cpsr. */
8984bd2e 7838 tmp = new_tmp();
9ee6e8bb 7839 if (IS_M(env)) {
8984bd2e
PB
7840 addr = tcg_const_i32(insn & 0xff);
7841 gen_helper_v7m_mrs(tmp, cpu_env, addr);
9ee6e8bb 7842 } else {
8984bd2e 7843 gen_helper_cpsr_read(tmp);
9ee6e8bb 7844 }
8984bd2e 7845 store_reg(s, rd, tmp);
9ee6e8bb
PB
7846 break;
7847 case 7: /* mrs spsr. */
7848 /* Not accessible in user mode. */
7849 if (IS_USER(s) || IS_M(env))
7850 goto illegal_op;
d9ba4830
PB
7851 tmp = load_cpu_field(spsr);
7852 store_reg(s, rd, tmp);
9ee6e8bb 7853 break;
2c0262af
FB
7854 }
7855 }
9ee6e8bb
PB
7856 } else {
7857 /* Conditional branch. */
7858 op = (insn >> 22) & 0xf;
7859 /* Generate a conditional jump to next instruction. */
7860 s->condlabel = gen_new_label();
d9ba4830 7861 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
7862 s->condjmp = 1;
7863
7864 /* offset[11:1] = insn[10:0] */
7865 offset = (insn & 0x7ff) << 1;
7866 /* offset[17:12] = insn[21:16]. */
7867 offset |= (insn & 0x003f0000) >> 4;
7868 /* offset[31:20] = insn[26]. */
7869 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
7870 /* offset[18] = insn[13]. */
7871 offset |= (insn & (1 << 13)) << 5;
7872 /* offset[19] = insn[11]. */
7873 offset |= (insn & (1 << 11)) << 8;
7874
7875 /* jump to the offset */
b0109805 7876 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
7877 }
7878 } else {
7879 /* Data processing immediate. */
7880 if (insn & (1 << 25)) {
7881 if (insn & (1 << 24)) {
7882 if (insn & (1 << 20))
7883 goto illegal_op;
7884 /* Bitfield/Saturate. */
7885 op = (insn >> 21) & 7;
7886 imm = insn & 0x1f;
7887 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4
PB
7888 if (rn == 15) {
7889 tmp = new_tmp();
7890 tcg_gen_movi_i32(tmp, 0);
7891 } else {
7892 tmp = load_reg(s, rn);
7893 }
9ee6e8bb
PB
7894 switch (op) {
7895 case 2: /* Signed bitfield extract. */
7896 imm++;
7897 if (shift + imm > 32)
7898 goto illegal_op;
7899 if (imm < 32)
6ddbc6e4 7900 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
7901 break;
7902 case 6: /* Unsigned bitfield extract. */
7903 imm++;
7904 if (shift + imm > 32)
7905 goto illegal_op;
7906 if (imm < 32)
6ddbc6e4 7907 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
7908 break;
7909 case 3: /* Bitfield insert/clear. */
7910 if (imm < shift)
7911 goto illegal_op;
7912 imm = imm + 1 - shift;
7913 if (imm != 32) {
6ddbc6e4 7914 tmp2 = load_reg(s, rd);
8f8e3aa4 7915 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
6ddbc6e4 7916 dead_tmp(tmp2);
9ee6e8bb
PB
7917 }
7918 break;
7919 case 7:
7920 goto illegal_op;
7921 default: /* Saturate. */
9ee6e8bb
PB
7922 if (shift) {
7923 if (op & 1)
6ddbc6e4 7924 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7925 else
6ddbc6e4 7926 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 7927 }
6ddbc6e4 7928 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
7929 if (op & 4) {
7930 /* Unsigned. */
9ee6e8bb 7931 if ((op & 1) && shift == 0)
6ddbc6e4 7932 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 7933 else
6ddbc6e4 7934 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 7935 } else {
9ee6e8bb 7936 /* Signed. */
9ee6e8bb 7937 if ((op & 1) && shift == 0)
6ddbc6e4 7938 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 7939 else
6ddbc6e4 7940 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 7941 }
9ee6e8bb 7942 break;
2c0262af 7943 }
6ddbc6e4 7944 store_reg(s, rd, tmp);
9ee6e8bb
PB
7945 } else {
7946 imm = ((insn & 0x04000000) >> 15)
7947 | ((insn & 0x7000) >> 4) | (insn & 0xff);
7948 if (insn & (1 << 22)) {
7949 /* 16-bit immediate. */
7950 imm |= (insn >> 4) & 0xf000;
7951 if (insn & (1 << 23)) {
7952 /* movt */
5e3f878a 7953 tmp = load_reg(s, rd);
86831435 7954 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 7955 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 7956 } else {
9ee6e8bb 7957 /* movw */
5e3f878a
PB
7958 tmp = new_tmp();
7959 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
7960 }
7961 } else {
9ee6e8bb
PB
7962 /* Add/sub 12-bit immediate. */
7963 if (rn == 15) {
b0109805 7964 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 7965 if (insn & (1 << 23))
b0109805 7966 offset -= imm;
9ee6e8bb 7967 else
b0109805 7968 offset += imm;
5e3f878a
PB
7969 tmp = new_tmp();
7970 tcg_gen_movi_i32(tmp, offset);
2c0262af 7971 } else {
5e3f878a 7972 tmp = load_reg(s, rn);
9ee6e8bb 7973 if (insn & (1 << 23))
5e3f878a 7974 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 7975 else
5e3f878a 7976 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 7977 }
9ee6e8bb 7978 }
5e3f878a 7979 store_reg(s, rd, tmp);
191abaa2 7980 }
9ee6e8bb
PB
7981 } else {
7982 int shifter_out = 0;
7983 /* modified 12-bit immediate. */
7984 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
7985 imm = (insn & 0xff);
7986 switch (shift) {
7987 case 0: /* XY */
7988 /* Nothing to do. */
7989 break;
7990 case 1: /* 00XY00XY */
7991 imm |= imm << 16;
7992 break;
7993 case 2: /* XY00XY00 */
7994 imm |= imm << 16;
7995 imm <<= 8;
7996 break;
7997 case 3: /* XYXYXYXY */
7998 imm |= imm << 16;
7999 imm |= imm << 8;
8000 break;
8001 default: /* Rotated constant. */
8002 shift = (shift << 1) | (imm >> 7);
8003 imm |= 0x80;
8004 imm = imm << (32 - shift);
8005 shifter_out = 1;
8006 break;
b5ff1b31 8007 }
9ee6e8bb
PB
8008 gen_op_movl_T1_im(imm);
8009 rn = (insn >> 16) & 0xf;
8010 if (rn == 15)
8011 gen_op_movl_T0_im(0);
8012 else
8013 gen_movl_T0_reg(s, rn);
8014 op = (insn >> 21) & 0xf;
8015 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
396e467c 8016 shifter_out, cpu_T[0], cpu_T[1]))
9ee6e8bb
PB
8017 goto illegal_op;
8018 rd = (insn >> 8) & 0xf;
8019 if (rd != 15) {
8020 gen_movl_reg_T0(s, rd);
2c0262af 8021 }
2c0262af 8022 }
9ee6e8bb
PB
8023 }
8024 break;
8025 case 12: /* Load/store single data item. */
8026 {
8027 int postinc = 0;
8028 int writeback = 0;
b0109805 8029 int user;
9ee6e8bb
PB
8030 if ((insn & 0x01100000) == 0x01000000) {
8031 if (disas_neon_ls_insn(env, s, insn))
c1713132 8032 goto illegal_op;
9ee6e8bb
PB
8033 break;
8034 }
b0109805 8035 user = IS_USER(s);
9ee6e8bb 8036 if (rn == 15) {
b0109805 8037 addr = new_tmp();
9ee6e8bb
PB
8038 /* PC relative. */
8039 /* s->pc has already been incremented by 4. */
8040 imm = s->pc & 0xfffffffc;
8041 if (insn & (1 << 23))
8042 imm += insn & 0xfff;
8043 else
8044 imm -= insn & 0xfff;
b0109805 8045 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 8046 } else {
b0109805 8047 addr = load_reg(s, rn);
9ee6e8bb
PB
8048 if (insn & (1 << 23)) {
8049 /* Positive offset. */
8050 imm = insn & 0xfff;
b0109805 8051 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8052 } else {
8053 op = (insn >> 8) & 7;
8054 imm = insn & 0xff;
8055 switch (op) {
8056 case 0: case 8: /* Shifted Register. */
8057 shift = (insn >> 4) & 0xf;
8058 if (shift > 3)
18c9b560 8059 goto illegal_op;
b26eefb6 8060 tmp = load_reg(s, rm);
9ee6e8bb 8061 if (shift)
b26eefb6 8062 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 8063 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 8064 dead_tmp(tmp);
9ee6e8bb
PB
8065 break;
8066 case 4: /* Negative offset. */
b0109805 8067 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb
PB
8068 break;
8069 case 6: /* User privilege. */
b0109805
PB
8070 tcg_gen_addi_i32(addr, addr, imm);
8071 user = 1;
9ee6e8bb
PB
8072 break;
8073 case 1: /* Post-decrement. */
8074 imm = -imm;
8075 /* Fall through. */
8076 case 3: /* Post-increment. */
9ee6e8bb
PB
8077 postinc = 1;
8078 writeback = 1;
8079 break;
8080 case 5: /* Pre-decrement. */
8081 imm = -imm;
8082 /* Fall through. */
8083 case 7: /* Pre-increment. */
b0109805 8084 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8085 writeback = 1;
8086 break;
8087 default:
b7bcbe95 8088 goto illegal_op;
9ee6e8bb
PB
8089 }
8090 }
8091 }
8092 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8093 if (insn & (1 << 20)) {
8094 /* Load. */
8095 if (rs == 15 && op != 2) {
8096 if (op & 2)
b5ff1b31 8097 goto illegal_op;
9ee6e8bb
PB
8098 /* Memory hint. Implemented as NOP. */
8099 } else {
8100 switch (op) {
b0109805
PB
8101 case 0: tmp = gen_ld8u(addr, user); break;
8102 case 4: tmp = gen_ld8s(addr, user); break;
8103 case 1: tmp = gen_ld16u(addr, user); break;
8104 case 5: tmp = gen_ld16s(addr, user); break;
8105 case 2: tmp = gen_ld32(addr, user); break;
9ee6e8bb
PB
8106 default: goto illegal_op;
8107 }
8108 if (rs == 15) {
b0109805 8109 gen_bx(s, tmp);
9ee6e8bb 8110 } else {
b0109805 8111 store_reg(s, rs, tmp);
9ee6e8bb
PB
8112 }
8113 }
8114 } else {
8115 /* Store. */
8116 if (rs == 15)
b7bcbe95 8117 goto illegal_op;
b0109805 8118 tmp = load_reg(s, rs);
9ee6e8bb 8119 switch (op) {
b0109805
PB
8120 case 0: gen_st8(tmp, addr, user); break;
8121 case 1: gen_st16(tmp, addr, user); break;
8122 case 2: gen_st32(tmp, addr, user); break;
9ee6e8bb 8123 default: goto illegal_op;
b7bcbe95 8124 }
2c0262af 8125 }
9ee6e8bb 8126 if (postinc)
b0109805
PB
8127 tcg_gen_addi_i32(addr, addr, imm);
8128 if (writeback) {
8129 store_reg(s, rn, addr);
8130 } else {
8131 dead_tmp(addr);
8132 }
9ee6e8bb
PB
8133 }
8134 break;
8135 default:
8136 goto illegal_op;
2c0262af 8137 }
9ee6e8bb
PB
8138 return 0;
8139illegal_op:
8140 return 1;
2c0262af
FB
8141}
8142
9ee6e8bb 8143static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
8144{
8145 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8146 int32_t offset;
8147 int i;
b26eefb6 8148 TCGv tmp;
d9ba4830 8149 TCGv tmp2;
b0109805 8150 TCGv addr;
99c475ab 8151
9ee6e8bb
PB
8152 if (s->condexec_mask) {
8153 cond = s->condexec_cond;
8154 s->condlabel = gen_new_label();
d9ba4830 8155 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
8156 s->condjmp = 1;
8157 }
8158
b5ff1b31 8159 insn = lduw_code(s->pc);
99c475ab 8160 s->pc += 2;
b5ff1b31 8161
99c475ab
FB
8162 switch (insn >> 12) {
8163 case 0: case 1:
396e467c 8164
99c475ab
FB
8165 rd = insn & 7;
8166 op = (insn >> 11) & 3;
8167 if (op == 3) {
8168 /* add/subtract */
8169 rn = (insn >> 3) & 7;
396e467c 8170 tmp = load_reg(s, rn);
99c475ab
FB
8171 if (insn & (1 << 10)) {
8172 /* immediate */
396e467c
FN
8173 tmp2 = new_tmp();
8174 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
8175 } else {
8176 /* reg */
8177 rm = (insn >> 6) & 7;
396e467c 8178 tmp2 = load_reg(s, rm);
99c475ab 8179 }
9ee6e8bb
PB
8180 if (insn & (1 << 9)) {
8181 if (s->condexec_mask)
396e467c 8182 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8183 else
396e467c 8184 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb
PB
8185 } else {
8186 if (s->condexec_mask)
396e467c 8187 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 8188 else
396e467c 8189 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 8190 }
396e467c
FN
8191 dead_tmp(tmp2);
8192 store_reg(s, rd, tmp);
99c475ab
FB
8193 } else {
8194 /* shift immediate */
8195 rm = (insn >> 3) & 7;
8196 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
8197 tmp = load_reg(s, rm);
8198 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8199 if (!s->condexec_mask)
8200 gen_logic_CC(tmp);
8201 store_reg(s, rd, tmp);
99c475ab
FB
8202 }
8203 break;
8204 case 2: case 3:
8205 /* arithmetic large immediate */
8206 op = (insn >> 11) & 3;
8207 rd = (insn >> 8) & 0x7;
396e467c
FN
8208 if (op == 0) { /* mov */
8209 tmp = new_tmp();
8210 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 8211 if (!s->condexec_mask)
396e467c
FN
8212 gen_logic_CC(tmp);
8213 store_reg(s, rd, tmp);
8214 } else {
8215 tmp = load_reg(s, rd);
8216 tmp2 = new_tmp();
8217 tcg_gen_movi_i32(tmp2, insn & 0xff);
8218 switch (op) {
8219 case 1: /* cmp */
8220 gen_helper_sub_cc(tmp, tmp, tmp2);
8221 dead_tmp(tmp);
8222 dead_tmp(tmp2);
8223 break;
8224 case 2: /* add */
8225 if (s->condexec_mask)
8226 tcg_gen_add_i32(tmp, tmp, tmp2);
8227 else
8228 gen_helper_add_cc(tmp, tmp, tmp2);
8229 dead_tmp(tmp2);
8230 store_reg(s, rd, tmp);
8231 break;
8232 case 3: /* sub */
8233 if (s->condexec_mask)
8234 tcg_gen_sub_i32(tmp, tmp, tmp2);
8235 else
8236 gen_helper_sub_cc(tmp, tmp, tmp2);
8237 dead_tmp(tmp2);
8238 store_reg(s, rd, tmp);
8239 break;
8240 }
99c475ab 8241 }
99c475ab
FB
8242 break;
8243 case 4:
8244 if (insn & (1 << 11)) {
8245 rd = (insn >> 8) & 7;
5899f386
FB
8246 /* load pc-relative. Bit 1 of PC is ignored. */
8247 val = s->pc + 2 + ((insn & 0xff) * 4);
8248 val &= ~(uint32_t)2;
b0109805
PB
8249 addr = new_tmp();
8250 tcg_gen_movi_i32(addr, val);
8251 tmp = gen_ld32(addr, IS_USER(s));
8252 dead_tmp(addr);
8253 store_reg(s, rd, tmp);
99c475ab
FB
8254 break;
8255 }
8256 if (insn & (1 << 10)) {
8257 /* data processing extended or blx */
8258 rd = (insn & 7) | ((insn >> 4) & 8);
8259 rm = (insn >> 3) & 0xf;
8260 op = (insn >> 8) & 3;
8261 switch (op) {
8262 case 0: /* add */
396e467c
FN
8263 tmp = load_reg(s, rd);
8264 tmp2 = load_reg(s, rm);
8265 tcg_gen_add_i32(tmp, tmp, tmp2);
8266 dead_tmp(tmp2);
8267 store_reg(s, rd, tmp);
99c475ab
FB
8268 break;
8269 case 1: /* cmp */
396e467c
FN
8270 tmp = load_reg(s, rd);
8271 tmp2 = load_reg(s, rm);
8272 gen_helper_sub_cc(tmp, tmp, tmp2);
8273 dead_tmp(tmp2);
8274 dead_tmp(tmp);
99c475ab
FB
8275 break;
8276 case 2: /* mov/cpy */
396e467c
FN
8277 tmp = load_reg(s, rm);
8278 store_reg(s, rd, tmp);
99c475ab
FB
8279 break;
8280 case 3:/* branch [and link] exchange thumb register */
b0109805 8281 tmp = load_reg(s, rm);
99c475ab
FB
8282 if (insn & (1 << 7)) {
8283 val = (uint32_t)s->pc | 1;
b0109805
PB
8284 tmp2 = new_tmp();
8285 tcg_gen_movi_i32(tmp2, val);
8286 store_reg(s, 14, tmp2);
99c475ab 8287 }
d9ba4830 8288 gen_bx(s, tmp);
99c475ab
FB
8289 break;
8290 }
8291 break;
8292 }
8293
8294 /* data processing register */
8295 rd = insn & 7;
8296 rm = (insn >> 3) & 7;
8297 op = (insn >> 6) & 0xf;
8298 if (op == 2 || op == 3 || op == 4 || op == 7) {
8299 /* the shift/rotate ops want the operands backwards */
8300 val = rm;
8301 rm = rd;
8302 rd = val;
8303 val = 1;
8304 } else {
8305 val = 0;
8306 }
8307
396e467c
FN
8308 if (op == 9) { /* neg */
8309 tmp = new_tmp();
8310 tcg_gen_movi_i32(tmp, 0);
8311 } else if (op != 0xf) { /* mvn doesn't read its first operand */
8312 tmp = load_reg(s, rd);
8313 } else {
8314 TCGV_UNUSED(tmp);
8315 }
99c475ab 8316
396e467c 8317 tmp2 = load_reg(s, rm);
5899f386 8318 switch (op) {
99c475ab 8319 case 0x0: /* and */
396e467c 8320 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 8321 if (!s->condexec_mask)
396e467c 8322 gen_logic_CC(tmp);
99c475ab
FB
8323 break;
8324 case 0x1: /* eor */
396e467c 8325 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 8326 if (!s->condexec_mask)
396e467c 8327 gen_logic_CC(tmp);
99c475ab
FB
8328 break;
8329 case 0x2: /* lsl */
9ee6e8bb 8330 if (s->condexec_mask) {
396e467c 8331 gen_helper_shl(tmp2, tmp2, tmp);
9ee6e8bb 8332 } else {
396e467c
FN
8333 gen_helper_shl_cc(tmp2, tmp2, tmp);
8334 gen_logic_CC(tmp2);
9ee6e8bb 8335 }
99c475ab
FB
8336 break;
8337 case 0x3: /* lsr */
9ee6e8bb 8338 if (s->condexec_mask) {
396e467c 8339 gen_helper_shr(tmp2, tmp2, tmp);
9ee6e8bb 8340 } else {
396e467c
FN
8341 gen_helper_shr_cc(tmp2, tmp2, tmp);
8342 gen_logic_CC(tmp2);
9ee6e8bb 8343 }
99c475ab
FB
8344 break;
8345 case 0x4: /* asr */
9ee6e8bb 8346 if (s->condexec_mask) {
396e467c 8347 gen_helper_sar(tmp2, tmp2, tmp);
9ee6e8bb 8348 } else {
396e467c
FN
8349 gen_helper_sar_cc(tmp2, tmp2, tmp);
8350 gen_logic_CC(tmp2);
9ee6e8bb 8351 }
99c475ab
FB
8352 break;
8353 case 0x5: /* adc */
9ee6e8bb 8354 if (s->condexec_mask)
396e467c 8355 gen_adc(tmp, tmp2);
9ee6e8bb 8356 else
396e467c 8357 gen_helper_adc_cc(tmp, tmp, tmp2);
99c475ab
FB
8358 break;
8359 case 0x6: /* sbc */
9ee6e8bb 8360 if (s->condexec_mask)
396e467c 8361 gen_sub_carry(tmp, tmp, tmp2);
9ee6e8bb 8362 else
396e467c 8363 gen_helper_sbc_cc(tmp, tmp, tmp2);
99c475ab
FB
8364 break;
8365 case 0x7: /* ror */
9ee6e8bb 8366 if (s->condexec_mask) {
396e467c 8367 gen_helper_ror(tmp2, tmp2, tmp);
9ee6e8bb 8368 } else {
396e467c
FN
8369 gen_helper_ror_cc(tmp2, tmp2, tmp);
8370 gen_logic_CC(tmp2);
9ee6e8bb 8371 }
99c475ab
FB
8372 break;
8373 case 0x8: /* tst */
396e467c
FN
8374 tcg_gen_and_i32(tmp, tmp, tmp2);
8375 gen_logic_CC(tmp);
99c475ab 8376 rd = 16;
5899f386 8377 break;
99c475ab 8378 case 0x9: /* neg */
9ee6e8bb 8379 if (s->condexec_mask)
396e467c 8380 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 8381 else
396e467c 8382 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
8383 break;
8384 case 0xa: /* cmp */
396e467c 8385 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
8386 rd = 16;
8387 break;
8388 case 0xb: /* cmn */
396e467c 8389 gen_helper_add_cc(tmp, tmp, tmp2);
99c475ab
FB
8390 rd = 16;
8391 break;
8392 case 0xc: /* orr */
396e467c 8393 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 8394 if (!s->condexec_mask)
396e467c 8395 gen_logic_CC(tmp);
99c475ab
FB
8396 break;
8397 case 0xd: /* mul */
396e467c 8398 gen_mull(tmp, tmp2);
9ee6e8bb 8399 if (!s->condexec_mask)
396e467c 8400 gen_logic_CC(tmp);
99c475ab
FB
8401 break;
8402 case 0xe: /* bic */
396e467c 8403 tcg_gen_bic_i32(tmp, tmp, tmp2);
9ee6e8bb 8404 if (!s->condexec_mask)
396e467c 8405 gen_logic_CC(tmp);
99c475ab
FB
8406 break;
8407 case 0xf: /* mvn */
396e467c 8408 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 8409 if (!s->condexec_mask)
396e467c 8410 gen_logic_CC(tmp2);
99c475ab 8411 val = 1;
5899f386 8412 rm = rd;
99c475ab
FB
8413 break;
8414 }
8415 if (rd != 16) {
396e467c
FN
8416 if (val) {
8417 store_reg(s, rm, tmp2);
8418 if (op != 0xf)
8419 dead_tmp(tmp);
8420 } else {
8421 store_reg(s, rd, tmp);
8422 dead_tmp(tmp2);
8423 }
8424 } else {
8425 dead_tmp(tmp);
8426 dead_tmp(tmp2);
99c475ab
FB
8427 }
8428 break;
8429
8430 case 5:
8431 /* load/store register offset. */
8432 rd = insn & 7;
8433 rn = (insn >> 3) & 7;
8434 rm = (insn >> 6) & 7;
8435 op = (insn >> 9) & 7;
b0109805 8436 addr = load_reg(s, rn);
b26eefb6 8437 tmp = load_reg(s, rm);
b0109805 8438 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 8439 dead_tmp(tmp);
99c475ab
FB
8440
8441 if (op < 3) /* store */
b0109805 8442 tmp = load_reg(s, rd);
99c475ab
FB
8443
8444 switch (op) {
8445 case 0: /* str */
b0109805 8446 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
8447 break;
8448 case 1: /* strh */
b0109805 8449 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
8450 break;
8451 case 2: /* strb */
b0109805 8452 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
8453 break;
8454 case 3: /* ldrsb */
b0109805 8455 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
8456 break;
8457 case 4: /* ldr */
b0109805 8458 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8459 break;
8460 case 5: /* ldrh */
b0109805 8461 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
8462 break;
8463 case 6: /* ldrb */
b0109805 8464 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
8465 break;
8466 case 7: /* ldrsh */
b0109805 8467 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
8468 break;
8469 }
8470 if (op >= 3) /* load */
b0109805
PB
8471 store_reg(s, rd, tmp);
8472 dead_tmp(addr);
99c475ab
FB
8473 break;
8474
8475 case 6:
8476 /* load/store word immediate offset */
8477 rd = insn & 7;
8478 rn = (insn >> 3) & 7;
b0109805 8479 addr = load_reg(s, rn);
99c475ab 8480 val = (insn >> 4) & 0x7c;
b0109805 8481 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8482
8483 if (insn & (1 << 11)) {
8484 /* load */
b0109805
PB
8485 tmp = gen_ld32(addr, IS_USER(s));
8486 store_reg(s, rd, tmp);
99c475ab
FB
8487 } else {
8488 /* store */
b0109805
PB
8489 tmp = load_reg(s, rd);
8490 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8491 }
b0109805 8492 dead_tmp(addr);
99c475ab
FB
8493 break;
8494
8495 case 7:
8496 /* load/store byte immediate offset */
8497 rd = insn & 7;
8498 rn = (insn >> 3) & 7;
b0109805 8499 addr = load_reg(s, rn);
99c475ab 8500 val = (insn >> 6) & 0x1f;
b0109805 8501 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8502
8503 if (insn & (1 << 11)) {
8504 /* load */
b0109805
PB
8505 tmp = gen_ld8u(addr, IS_USER(s));
8506 store_reg(s, rd, tmp);
99c475ab
FB
8507 } else {
8508 /* store */
b0109805
PB
8509 tmp = load_reg(s, rd);
8510 gen_st8(tmp, addr, IS_USER(s));
99c475ab 8511 }
b0109805 8512 dead_tmp(addr);
99c475ab
FB
8513 break;
8514
8515 case 8:
8516 /* load/store halfword immediate offset */
8517 rd = insn & 7;
8518 rn = (insn >> 3) & 7;
b0109805 8519 addr = load_reg(s, rn);
99c475ab 8520 val = (insn >> 5) & 0x3e;
b0109805 8521 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8522
8523 if (insn & (1 << 11)) {
8524 /* load */
b0109805
PB
8525 tmp = gen_ld16u(addr, IS_USER(s));
8526 store_reg(s, rd, tmp);
99c475ab
FB
8527 } else {
8528 /* store */
b0109805
PB
8529 tmp = load_reg(s, rd);
8530 gen_st16(tmp, addr, IS_USER(s));
99c475ab 8531 }
b0109805 8532 dead_tmp(addr);
99c475ab
FB
8533 break;
8534
8535 case 9:
8536 /* load/store from stack */
8537 rd = (insn >> 8) & 7;
b0109805 8538 addr = load_reg(s, 13);
99c475ab 8539 val = (insn & 0xff) * 4;
b0109805 8540 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8541
8542 if (insn & (1 << 11)) {
8543 /* load */
b0109805
PB
8544 tmp = gen_ld32(addr, IS_USER(s));
8545 store_reg(s, rd, tmp);
99c475ab
FB
8546 } else {
8547 /* store */
b0109805
PB
8548 tmp = load_reg(s, rd);
8549 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8550 }
b0109805 8551 dead_tmp(addr);
99c475ab
FB
8552 break;
8553
8554 case 10:
8555 /* add to high reg */
8556 rd = (insn >> 8) & 7;
5899f386
FB
8557 if (insn & (1 << 11)) {
8558 /* SP */
5e3f878a 8559 tmp = load_reg(s, 13);
5899f386
FB
8560 } else {
8561 /* PC. bit 1 is ignored. */
5e3f878a
PB
8562 tmp = new_tmp();
8563 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 8564 }
99c475ab 8565 val = (insn & 0xff) * 4;
5e3f878a
PB
8566 tcg_gen_addi_i32(tmp, tmp, val);
8567 store_reg(s, rd, tmp);
99c475ab
FB
8568 break;
8569
8570 case 11:
8571 /* misc */
8572 op = (insn >> 8) & 0xf;
8573 switch (op) {
8574 case 0:
8575 /* adjust stack pointer */
b26eefb6 8576 tmp = load_reg(s, 13);
99c475ab
FB
8577 val = (insn & 0x7f) * 4;
8578 if (insn & (1 << 7))
6a0d8a1d 8579 val = -(int32_t)val;
b26eefb6
PB
8580 tcg_gen_addi_i32(tmp, tmp, val);
8581 store_reg(s, 13, tmp);
99c475ab
FB
8582 break;
8583
9ee6e8bb
PB
8584 case 2: /* sign/zero extend. */
8585 ARCH(6);
8586 rd = insn & 7;
8587 rm = (insn >> 3) & 7;
b0109805 8588 tmp = load_reg(s, rm);
9ee6e8bb 8589 switch ((insn >> 6) & 3) {
b0109805
PB
8590 case 0: gen_sxth(tmp); break;
8591 case 1: gen_sxtb(tmp); break;
8592 case 2: gen_uxth(tmp); break;
8593 case 3: gen_uxtb(tmp); break;
9ee6e8bb 8594 }
b0109805 8595 store_reg(s, rd, tmp);
9ee6e8bb 8596 break;
99c475ab
FB
8597 case 4: case 5: case 0xc: case 0xd:
8598 /* push/pop */
b0109805 8599 addr = load_reg(s, 13);
5899f386
FB
8600 if (insn & (1 << 8))
8601 offset = 4;
99c475ab 8602 else
5899f386
FB
8603 offset = 0;
8604 for (i = 0; i < 8; i++) {
8605 if (insn & (1 << i))
8606 offset += 4;
8607 }
8608 if ((insn & (1 << 11)) == 0) {
b0109805 8609 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8610 }
99c475ab
FB
8611 for (i = 0; i < 8; i++) {
8612 if (insn & (1 << i)) {
8613 if (insn & (1 << 11)) {
8614 /* pop */
b0109805
PB
8615 tmp = gen_ld32(addr, IS_USER(s));
8616 store_reg(s, i, tmp);
99c475ab
FB
8617 } else {
8618 /* push */
b0109805
PB
8619 tmp = load_reg(s, i);
8620 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8621 }
5899f386 8622 /* advance to the next address. */
b0109805 8623 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8624 }
8625 }
a50f5b91 8626 TCGV_UNUSED(tmp);
99c475ab
FB
8627 if (insn & (1 << 8)) {
8628 if (insn & (1 << 11)) {
8629 /* pop pc */
b0109805 8630 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8631 /* don't set the pc until the rest of the instruction
8632 has completed */
8633 } else {
8634 /* push lr */
b0109805
PB
8635 tmp = load_reg(s, 14);
8636 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8637 }
b0109805 8638 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 8639 }
5899f386 8640 if ((insn & (1 << 11)) == 0) {
b0109805 8641 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8642 }
99c475ab 8643 /* write back the new stack pointer */
b0109805 8644 store_reg(s, 13, addr);
99c475ab
FB
8645 /* set the new PC value */
8646 if ((insn & 0x0900) == 0x0900)
b0109805 8647 gen_bx(s, tmp);
99c475ab
FB
8648 break;
8649
9ee6e8bb
PB
8650 case 1: case 3: case 9: case 11: /* czb */
8651 rm = insn & 7;
d9ba4830 8652 tmp = load_reg(s, rm);
9ee6e8bb
PB
8653 s->condlabel = gen_new_label();
8654 s->condjmp = 1;
8655 if (insn & (1 << 11))
cb63669a 8656 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 8657 else
cb63669a 8658 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
d9ba4830 8659 dead_tmp(tmp);
9ee6e8bb
PB
8660 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8661 val = (uint32_t)s->pc + 2;
8662 val += offset;
8663 gen_jmp(s, val);
8664 break;
8665
8666 case 15: /* IT, nop-hint. */
8667 if ((insn & 0xf) == 0) {
8668 gen_nop_hint(s, (insn >> 4) & 0xf);
8669 break;
8670 }
8671 /* If Then. */
8672 s->condexec_cond = (insn >> 4) & 0xe;
8673 s->condexec_mask = insn & 0x1f;
8674 /* No actual code generated for this insn, just setup state. */
8675 break;
8676
06c949e6 8677 case 0xe: /* bkpt */
9ee6e8bb 8678 gen_set_condexec(s);
5e3f878a 8679 gen_set_pc_im(s->pc - 2);
d9ba4830 8680 gen_exception(EXCP_BKPT);
06c949e6
PB
8681 s->is_jmp = DISAS_JUMP;
8682 break;
8683
9ee6e8bb
PB
8684 case 0xa: /* rev */
8685 ARCH(6);
8686 rn = (insn >> 3) & 0x7;
8687 rd = insn & 0x7;
b0109805 8688 tmp = load_reg(s, rn);
9ee6e8bb 8689 switch ((insn >> 6) & 3) {
66896cb8 8690 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
8691 case 1: gen_rev16(tmp); break;
8692 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
8693 default: goto illegal_op;
8694 }
b0109805 8695 store_reg(s, rd, tmp);
9ee6e8bb
PB
8696 break;
8697
8698 case 6: /* cps */
8699 ARCH(6);
8700 if (IS_USER(s))
8701 break;
8702 if (IS_M(env)) {
8984bd2e 8703 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9ee6e8bb 8704 /* PRIMASK */
8984bd2e
PB
8705 if (insn & 1) {
8706 addr = tcg_const_i32(16);
8707 gen_helper_v7m_msr(cpu_env, addr, tmp);
8708 }
9ee6e8bb 8709 /* FAULTMASK */
8984bd2e
PB
8710 if (insn & 2) {
8711 addr = tcg_const_i32(17);
8712 gen_helper_v7m_msr(cpu_env, addr, tmp);
8713 }
9ee6e8bb
PB
8714 gen_lookup_tb(s);
8715 } else {
8716 if (insn & (1 << 4))
8717 shift = CPSR_A | CPSR_I | CPSR_F;
8718 else
8719 shift = 0;
2fbac54b 8720 gen_set_psr_im(s, shift, 0, ((insn & 7) << 6) & shift);
9ee6e8bb
PB
8721 }
8722 break;
8723
99c475ab
FB
8724 default:
8725 goto undef;
8726 }
8727 break;
8728
8729 case 12:
8730 /* load/store multiple */
8731 rn = (insn >> 8) & 0x7;
b0109805 8732 addr = load_reg(s, rn);
99c475ab
FB
8733 for (i = 0; i < 8; i++) {
8734 if (insn & (1 << i)) {
99c475ab
FB
8735 if (insn & (1 << 11)) {
8736 /* load */
b0109805
PB
8737 tmp = gen_ld32(addr, IS_USER(s));
8738 store_reg(s, i, tmp);
99c475ab
FB
8739 } else {
8740 /* store */
b0109805
PB
8741 tmp = load_reg(s, i);
8742 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8743 }
5899f386 8744 /* advance to the next address */
b0109805 8745 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8746 }
8747 }
5899f386 8748 /* Base register writeback. */
b0109805
PB
8749 if ((insn & (1 << rn)) == 0) {
8750 store_reg(s, rn, addr);
8751 } else {
8752 dead_tmp(addr);
8753 }
99c475ab
FB
8754 break;
8755
8756 case 13:
8757 /* conditional branch or swi */
8758 cond = (insn >> 8) & 0xf;
8759 if (cond == 0xe)
8760 goto undef;
8761
8762 if (cond == 0xf) {
8763 /* swi */
9ee6e8bb 8764 gen_set_condexec(s);
422ebf69 8765 gen_set_pc_im(s->pc);
9ee6e8bb 8766 s->is_jmp = DISAS_SWI;
99c475ab
FB
8767 break;
8768 }
8769 /* generate a conditional jump to next instruction */
e50e6a20 8770 s->condlabel = gen_new_label();
d9ba4830 8771 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 8772 s->condjmp = 1;
99c475ab
FB
8773
8774 /* jump to the offset */
5899f386 8775 val = (uint32_t)s->pc + 2;
99c475ab 8776 offset = ((int32_t)insn << 24) >> 24;
5899f386 8777 val += offset << 1;
8aaca4c0 8778 gen_jmp(s, val);
99c475ab
FB
8779 break;
8780
8781 case 14:
358bf29e 8782 if (insn & (1 << 11)) {
9ee6e8bb
PB
8783 if (disas_thumb2_insn(env, s, insn))
8784 goto undef32;
358bf29e
PB
8785 break;
8786 }
9ee6e8bb 8787 /* unconditional branch */
99c475ab
FB
8788 val = (uint32_t)s->pc;
8789 offset = ((int32_t)insn << 21) >> 21;
8790 val += (offset << 1) + 2;
8aaca4c0 8791 gen_jmp(s, val);
99c475ab
FB
8792 break;
8793
8794 case 15:
9ee6e8bb 8795 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 8796 goto undef32;
9ee6e8bb 8797 break;
99c475ab
FB
8798 }
8799 return;
9ee6e8bb
PB
8800undef32:
8801 gen_set_condexec(s);
5e3f878a 8802 gen_set_pc_im(s->pc - 4);
d9ba4830 8803 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
8804 s->is_jmp = DISAS_JUMP;
8805 return;
8806illegal_op:
99c475ab 8807undef:
9ee6e8bb 8808 gen_set_condexec(s);
5e3f878a 8809 gen_set_pc_im(s->pc - 2);
d9ba4830 8810 gen_exception(EXCP_UDEF);
99c475ab
FB
8811 s->is_jmp = DISAS_JUMP;
8812}
8813
2c0262af
FB
8814/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8815 basic block 'tb'. If search_pc is TRUE, also generate PC
8816 information for each intermediate instruction. */
2cfc5f17
TS
8817static inline void gen_intermediate_code_internal(CPUState *env,
8818 TranslationBlock *tb,
8819 int search_pc)
2c0262af
FB
8820{
8821 DisasContext dc1, *dc = &dc1;
a1d1bb31 8822 CPUBreakpoint *bp;
2c0262af
FB
8823 uint16_t *gen_opc_end;
8824 int j, lj;
0fa85d43 8825 target_ulong pc_start;
b5ff1b31 8826 uint32_t next_page_start;
2e70f6ef
PB
8827 int num_insns;
8828 int max_insns;
3b46e624 8829
2c0262af 8830 /* generate intermediate code */
b26eefb6 8831 num_temps = 0;
b26eefb6 8832
0fa85d43 8833 pc_start = tb->pc;
3b46e624 8834
2c0262af
FB
8835 dc->tb = tb;
8836
2c0262af 8837 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
8838
8839 dc->is_jmp = DISAS_NEXT;
8840 dc->pc = pc_start;
8aaca4c0 8841 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 8842 dc->condjmp = 0;
5899f386 8843 dc->thumb = env->thumb;
9ee6e8bb
PB
8844 dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
8845 dc->condexec_cond = env->condexec_bits >> 4;
b5ff1b31 8846#if !defined(CONFIG_USER_ONLY)
9ee6e8bb
PB
8847 if (IS_M(env)) {
8848 dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
8849 } else {
8850 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
8851 }
b5ff1b31 8852#endif
a7812ae4
PB
8853 cpu_F0s = tcg_temp_new_i32();
8854 cpu_F1s = tcg_temp_new_i32();
8855 cpu_F0d = tcg_temp_new_i64();
8856 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
8857 cpu_V0 = cpu_F0d;
8858 cpu_V1 = cpu_F1d;
e677137d 8859 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 8860 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 8861 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 8862 lj = -1;
2e70f6ef
PB
8863 num_insns = 0;
8864 max_insns = tb->cflags & CF_COUNT_MASK;
8865 if (max_insns == 0)
8866 max_insns = CF_COUNT_MASK;
8867
8868 gen_icount_start();
9ee6e8bb
PB
8869 /* Reset the conditional execution bits immediately. This avoids
8870 complications trying to do it at the end of the block. */
8871 if (env->condexec_bits)
8f01245e
PB
8872 {
8873 TCGv tmp = new_tmp();
8874 tcg_gen_movi_i32(tmp, 0);
d9ba4830 8875 store_cpu_field(tmp, condexec_bits);
8f01245e 8876 }
2c0262af 8877 do {
fbb4a2e3
PB
8878#ifdef CONFIG_USER_ONLY
8879 /* Intercept jump to the magic kernel page. */
8880 if (dc->pc >= 0xffff0000) {
8881 /* We always get here via a jump, so know we are not in a
8882 conditional execution block. */
8883 gen_exception(EXCP_KERNEL_TRAP);
8884 dc->is_jmp = DISAS_UPDATE;
8885 break;
8886 }
8887#else
9ee6e8bb
PB
8888 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
8889 /* We always get here via a jump, so know we are not in a
8890 conditional execution block. */
d9ba4830 8891 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
8892 dc->is_jmp = DISAS_UPDATE;
8893 break;
9ee6e8bb
PB
8894 }
8895#endif
8896
72cf2d4f
BS
8897 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
8898 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 8899 if (bp->pc == dc->pc) {
9ee6e8bb 8900 gen_set_condexec(dc);
5e3f878a 8901 gen_set_pc_im(dc->pc);
d9ba4830 8902 gen_exception(EXCP_DEBUG);
1fddef4b 8903 dc->is_jmp = DISAS_JUMP;
9ee6e8bb
PB
8904 /* Advance PC so that clearing the breakpoint will
8905 invalidate this TB. */
8906 dc->pc += 2;
8907 goto done_generating;
1fddef4b
FB
8908 break;
8909 }
8910 }
8911 }
2c0262af
FB
8912 if (search_pc) {
8913 j = gen_opc_ptr - gen_opc_buf;
8914 if (lj < j) {
8915 lj++;
8916 while (lj < j)
8917 gen_opc_instr_start[lj++] = 0;
8918 }
0fa85d43 8919 gen_opc_pc[lj] = dc->pc;
2c0262af 8920 gen_opc_instr_start[lj] = 1;
2e70f6ef 8921 gen_opc_icount[lj] = num_insns;
2c0262af 8922 }
e50e6a20 8923
2e70f6ef
PB
8924 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
8925 gen_io_start();
8926
9ee6e8bb
PB
8927 if (env->thumb) {
8928 disas_thumb_insn(env, dc);
8929 if (dc->condexec_mask) {
8930 dc->condexec_cond = (dc->condexec_cond & 0xe)
8931 | ((dc->condexec_mask >> 4) & 1);
8932 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
8933 if (dc->condexec_mask == 0) {
8934 dc->condexec_cond = 0;
8935 }
8936 }
8937 } else {
8938 disas_arm_insn(env, dc);
8939 }
b26eefb6
PB
8940 if (num_temps) {
8941 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
8942 num_temps = 0;
8943 }
e50e6a20
FB
8944
8945 if (dc->condjmp && !dc->is_jmp) {
8946 gen_set_label(dc->condlabel);
8947 dc->condjmp = 0;
8948 }
aaf2d97d 8949 /* Translation stops when a conditional branch is encountered.
e50e6a20 8950 * Otherwise the subsequent code could get translated several times.
b5ff1b31 8951 * Also stop translation when a page boundary is reached. This
bf20dc07 8952 * ensures prefetch aborts occur at the right place. */
2e70f6ef 8953 num_insns ++;
1fddef4b
FB
8954 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
8955 !env->singlestep_enabled &&
1b530a6d 8956 !singlestep &&
2e70f6ef
PB
8957 dc->pc < next_page_start &&
8958 num_insns < max_insns);
8959
8960 if (tb->cflags & CF_LAST_IO) {
8961 if (dc->condjmp) {
8962 /* FIXME: This can theoretically happen with self-modifying
8963 code. */
8964 cpu_abort(env, "IO on conditional branch instruction");
8965 }
8966 gen_io_end();
8967 }
9ee6e8bb 8968
b5ff1b31 8969 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
8970 instruction was a conditional branch or trap, and the PC has
8971 already been written. */
551bd27f 8972 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 8973 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 8974 if (dc->condjmp) {
9ee6e8bb
PB
8975 gen_set_condexec(dc);
8976 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 8977 gen_exception(EXCP_SWI);
9ee6e8bb 8978 } else {
d9ba4830 8979 gen_exception(EXCP_DEBUG);
9ee6e8bb 8980 }
e50e6a20
FB
8981 gen_set_label(dc->condlabel);
8982 }
8983 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 8984 gen_set_pc_im(dc->pc);
e50e6a20 8985 dc->condjmp = 0;
8aaca4c0 8986 }
9ee6e8bb
PB
8987 gen_set_condexec(dc);
8988 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 8989 gen_exception(EXCP_SWI);
9ee6e8bb
PB
8990 } else {
8991 /* FIXME: Single stepping a WFI insn will not halt
8992 the CPU. */
d9ba4830 8993 gen_exception(EXCP_DEBUG);
9ee6e8bb 8994 }
8aaca4c0 8995 } else {
9ee6e8bb
PB
8996 /* While branches must always occur at the end of an IT block,
8997 there are a few other things that can cause us to terminate
8998 the TB in the middel of an IT block:
8999 - Exception generating instructions (bkpt, swi, undefined).
9000 - Page boundaries.
9001 - Hardware watchpoints.
9002 Hardware breakpoints have already been handled and skip this code.
9003 */
9004 gen_set_condexec(dc);
8aaca4c0 9005 switch(dc->is_jmp) {
8aaca4c0 9006 case DISAS_NEXT:
6e256c93 9007 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
9008 break;
9009 default:
9010 case DISAS_JUMP:
9011 case DISAS_UPDATE:
9012 /* indicate that the hash table must be used to find the next TB */
57fec1fe 9013 tcg_gen_exit_tb(0);
8aaca4c0
FB
9014 break;
9015 case DISAS_TB_JUMP:
9016 /* nothing more to generate */
9017 break;
9ee6e8bb 9018 case DISAS_WFI:
d9ba4830 9019 gen_helper_wfi();
9ee6e8bb
PB
9020 break;
9021 case DISAS_SWI:
d9ba4830 9022 gen_exception(EXCP_SWI);
9ee6e8bb 9023 break;
8aaca4c0 9024 }
e50e6a20
FB
9025 if (dc->condjmp) {
9026 gen_set_label(dc->condlabel);
9ee6e8bb 9027 gen_set_condexec(dc);
6e256c93 9028 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
9029 dc->condjmp = 0;
9030 }
2c0262af 9031 }
2e70f6ef 9032
9ee6e8bb 9033done_generating:
2e70f6ef 9034 gen_icount_end(tb, num_insns);
2c0262af
FB
9035 *gen_opc_ptr = INDEX_op_end;
9036
9037#ifdef DEBUG_DISAS
8fec2b8c 9038 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
9039 qemu_log("----------------\n");
9040 qemu_log("IN: %s\n", lookup_symbol(pc_start));
9041 log_target_disas(pc_start, dc->pc - pc_start, env->thumb);
9042 qemu_log("\n");
2c0262af
FB
9043 }
9044#endif
b5ff1b31
FB
9045 if (search_pc) {
9046 j = gen_opc_ptr - gen_opc_buf;
9047 lj++;
9048 while (lj <= j)
9049 gen_opc_instr_start[lj++] = 0;
b5ff1b31 9050 } else {
2c0262af 9051 tb->size = dc->pc - pc_start;
2e70f6ef 9052 tb->icount = num_insns;
b5ff1b31 9053 }
2c0262af
FB
9054}
9055
2cfc5f17 9056void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2c0262af 9057{
2cfc5f17 9058 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
9059}
9060
2cfc5f17 9061void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2c0262af 9062{
2cfc5f17 9063 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
9064}
9065
b5ff1b31
FB
9066static const char *cpu_mode_names[16] = {
9067 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9068 "???", "???", "???", "und", "???", "???", "???", "sys"
9069};
9ee6e8bb 9070
5fafdf24 9071void cpu_dump_state(CPUState *env, FILE *f,
7fe48483
FB
9072 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
9073 int flags)
2c0262af
FB
9074{
9075 int i;
06e80fc9 9076#if 0
bc380d17 9077 union {
b7bcbe95
FB
9078 uint32_t i;
9079 float s;
9080 } s0, s1;
9081 CPU_DoubleU d;
a94a6abf
PB
9082 /* ??? This assumes float64 and double have the same layout.
9083 Oh well, it's only debug dumps. */
9084 union {
9085 float64 f64;
9086 double d;
9087 } d0;
06e80fc9 9088#endif
b5ff1b31 9089 uint32_t psr;
2c0262af
FB
9090
9091 for(i=0;i<16;i++) {
7fe48483 9092 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 9093 if ((i % 4) == 3)
7fe48483 9094 cpu_fprintf(f, "\n");
2c0262af 9095 else
7fe48483 9096 cpu_fprintf(f, " ");
2c0262af 9097 }
b5ff1b31 9098 psr = cpsr_read(env);
687fa640
TS
9099 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9100 psr,
b5ff1b31
FB
9101 psr & (1 << 31) ? 'N' : '-',
9102 psr & (1 << 30) ? 'Z' : '-',
9103 psr & (1 << 29) ? 'C' : '-',
9104 psr & (1 << 28) ? 'V' : '-',
5fafdf24 9105 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 9106 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 9107
5e3f878a 9108#if 0
b7bcbe95 9109 for (i = 0; i < 16; i++) {
8e96005d
FB
9110 d.d = env->vfp.regs[i];
9111 s0.i = d.l.lower;
9112 s1.i = d.l.upper;
a94a6abf
PB
9113 d0.f64 = d.d;
9114 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 9115 i * 2, (int)s0.i, s0.s,
a94a6abf 9116 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 9117 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 9118 d0.d);
b7bcbe95 9119 }
40f137e1 9120 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
5e3f878a 9121#endif
2c0262af 9122}
a6b025d3 9123
d2856f1a
AJ
9124void gen_pc_load(CPUState *env, TranslationBlock *tb,
9125 unsigned long searched_pc, int pc_pos, void *puc)
9126{
9127 env->regs[15] = gen_opc_pc[pc_pos];
9128}