]> git.proxmox.com Git - qemu.git/blame - target-arm/translate.c
target-arm: remove unused gen_movl_T2_reg function
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
28#include "exec-all.h"
29#include "disas.h"
57fec1fe 30#include "tcg-op.h"
79383c9c 31#include "qemu-log.h"
1497c961 32
a7812ae4 33#include "helpers.h"
1497c961 34#define GEN_HELPER 1
b26eefb6 35#include "helpers.h"
2c0262af 36
9ee6e8bb
PB
37#define ENABLE_ARCH_5J 0
38#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 42
86753403 43#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 44
2c0262af
FB
45/* internal defines */
46typedef struct DisasContext {
0fa85d43 47 target_ulong pc;
2c0262af 48 int is_jmp;
e50e6a20
FB
49 /* Nonzero if this instruction has been conditionally skipped. */
50 int condjmp;
51 /* The label that will be jumped to when the instruction is skipped. */
52 int condlabel;
9ee6e8bb
PB
53 /* Thumb-2 condtional execution bits. */
54 int condexec_mask;
55 int condexec_cond;
2c0262af 56 struct TranslationBlock *tb;
8aaca4c0 57 int singlestep_enabled;
5899f386 58 int thumb;
b5ff1b31
FB
59#if !defined(CONFIG_USER_ONLY)
60 int user;
61#endif
2c0262af
FB
62} DisasContext;
63
b5ff1b31
FB
64#if defined(CONFIG_USER_ONLY)
65#define IS_USER(s) 1
66#else
67#define IS_USER(s) (s->user)
68#endif
69
9ee6e8bb
PB
70/* These instructions trap after executing, so defer them until after the
71 conditional executions state has been updated. */
72#define DISAS_WFI 4
73#define DISAS_SWI 5
2c0262af 74
a7812ae4 75static TCGv_ptr cpu_env;
ad69471c 76/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 77static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 78static TCGv_i32 cpu_R[16];
ad69471c 79
b26eefb6 80/* FIXME: These should be removed. */
8f8e3aa4 81static TCGv cpu_T[2];
a7812ae4
PB
82static TCGv cpu_F0s, cpu_F1s;
83static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 84
2e70f6ef
PB
85#define ICOUNT_TEMP cpu_T[0]
86#include "gen-icount.h"
87
155c3eac
FN
88static const char *regnames[] =
89 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
90 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
91
b26eefb6
PB
92/* initialize TCG globals. */
93void arm_translate_init(void)
94{
155c3eac
FN
95 int i;
96
a7812ae4
PB
97 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
98
99 cpu_T[0] = tcg_global_reg_new_i32(TCG_AREG1, "T0");
100 cpu_T[1] = tcg_global_reg_new_i32(TCG_AREG2, "T1");
b26eefb6 101
155c3eac
FN
102 for (i = 0; i < 16; i++) {
103 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
104 offsetof(CPUState, regs[i]),
105 regnames[i]);
106 }
107
a7812ae4
PB
108#define GEN_HELPER 2
109#include "helpers.h"
b26eefb6
PB
110}
111
b26eefb6 112static int num_temps;
b26eefb6
PB
113
114/* Allocate a temporary variable. */
a7812ae4 115static TCGv_i32 new_tmp(void)
b26eefb6 116{
12edd4f2
FN
117 num_temps++;
118 return tcg_temp_new_i32();
b26eefb6
PB
119}
120
121/* Release a temporary variable. */
122static void dead_tmp(TCGv tmp)
123{
12edd4f2 124 tcg_temp_free(tmp);
b26eefb6 125 num_temps--;
b26eefb6
PB
126}
127
d9ba4830
PB
128static inline TCGv load_cpu_offset(int offset)
129{
130 TCGv tmp = new_tmp();
131 tcg_gen_ld_i32(tmp, cpu_env, offset);
132 return tmp;
133}
134
135#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
136
137static inline void store_cpu_offset(TCGv var, int offset)
138{
139 tcg_gen_st_i32(var, cpu_env, offset);
140 dead_tmp(var);
141}
142
143#define store_cpu_field(var, name) \
144 store_cpu_offset(var, offsetof(CPUState, name))
145
b26eefb6
PB
146/* Set a variable to the value of a CPU register. */
147static void load_reg_var(DisasContext *s, TCGv var, int reg)
148{
149 if (reg == 15) {
150 uint32_t addr;
151 /* normaly, since we updated PC, we need only to add one insn */
152 if (s->thumb)
153 addr = (long)s->pc + 2;
154 else
155 addr = (long)s->pc + 4;
156 tcg_gen_movi_i32(var, addr);
157 } else {
155c3eac 158 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
159 }
160}
161
162/* Create a new temporary and set it to the value of a CPU register. */
163static inline TCGv load_reg(DisasContext *s, int reg)
164{
165 TCGv tmp = new_tmp();
166 load_reg_var(s, tmp, reg);
167 return tmp;
168}
169
170/* Set a CPU register. The source must be a temporary and will be
171 marked as dead. */
172static void store_reg(DisasContext *s, int reg, TCGv var)
173{
174 if (reg == 15) {
175 tcg_gen_andi_i32(var, var, ~1);
176 s->is_jmp = DISAS_JUMP;
177 }
155c3eac 178 tcg_gen_mov_i32(cpu_R[reg], var);
b26eefb6
PB
179 dead_tmp(var);
180}
181
182
183/* Basic operations. */
184#define gen_op_movl_T0_T1() tcg_gen_mov_i32(cpu_T[0], cpu_T[1])
b26eefb6
PB
185#define gen_op_movl_T0_im(im) tcg_gen_movi_i32(cpu_T[0], im)
186#define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im)
b26eefb6
PB
187
188#define gen_op_addl_T1_im(im) tcg_gen_addi_i32(cpu_T[1], cpu_T[1], im)
189#define gen_op_addl_T0_T1() tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1])
190#define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1])
191#define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0])
192
8984bd2e
PB
193#define gen_op_addl_T0_T1_cc() gen_helper_add_cc(cpu_T[0], cpu_T[0], cpu_T[1])
194#define gen_op_adcl_T0_T1_cc() gen_helper_adc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
195#define gen_op_subl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[0], cpu_T[1])
196#define gen_op_sbcl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
197#define gen_op_rsbl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[1], cpu_T[0])
8984bd2e 198
b26eefb6
PB
199#define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1])
200#define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1])
201#define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1])
202#define gen_op_notl_T0() tcg_gen_not_i32(cpu_T[0], cpu_T[0])
203#define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1])
204#define gen_op_logic_T0_cc() gen_logic_CC(cpu_T[0]);
205#define gen_op_logic_T1_cc() gen_logic_CC(cpu_T[1]);
206
b26eefb6
PB
207#define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im)
208#define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im)
b26eefb6
PB
209
210/* Value extensions. */
86831435
PB
211#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
212#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
213#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
214#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
215
1497c961
PB
216#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
217#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e
PB
218
219#define gen_op_mul_T0_T1() tcg_gen_mul_i32(cpu_T[0], cpu_T[0], cpu_T[1])
b26eefb6 220
d9ba4830
PB
221#define gen_set_cpsr(var, mask) gen_helper_cpsr_write(var, tcg_const_i32(mask))
222/* Set NZCV flags from the high 4 bits of var. */
223#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
224
225static void gen_exception(int excp)
226{
227 TCGv tmp = new_tmp();
228 tcg_gen_movi_i32(tmp, excp);
229 gen_helper_exception(tmp);
230 dead_tmp(tmp);
231}
232
3670669c
PB
233static void gen_smul_dual(TCGv a, TCGv b)
234{
235 TCGv tmp1 = new_tmp();
236 TCGv tmp2 = new_tmp();
22478e79
AZ
237 tcg_gen_ext16s_i32(tmp1, a);
238 tcg_gen_ext16s_i32(tmp2, b);
3670669c
PB
239 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
240 dead_tmp(tmp2);
241 tcg_gen_sari_i32(a, a, 16);
242 tcg_gen_sari_i32(b, b, 16);
243 tcg_gen_mul_i32(b, b, a);
244 tcg_gen_mov_i32(a, tmp1);
245 dead_tmp(tmp1);
246}
247
248/* Byteswap each halfword. */
249static void gen_rev16(TCGv var)
250{
251 TCGv tmp = new_tmp();
252 tcg_gen_shri_i32(tmp, var, 8);
253 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
254 tcg_gen_shli_i32(var, var, 8);
255 tcg_gen_andi_i32(var, var, 0xff00ff00);
256 tcg_gen_or_i32(var, var, tmp);
257 dead_tmp(tmp);
258}
259
260/* Byteswap low halfword and sign extend. */
261static void gen_revsh(TCGv var)
262{
263 TCGv tmp = new_tmp();
264 tcg_gen_shri_i32(tmp, var, 8);
265 tcg_gen_andi_i32(tmp, tmp, 0x00ff);
266 tcg_gen_shli_i32(var, var, 8);
267 tcg_gen_ext8s_i32(var, var);
268 tcg_gen_or_i32(var, var, tmp);
269 dead_tmp(tmp);
270}
271
272/* Unsigned bitfield extract. */
273static void gen_ubfx(TCGv var, int shift, uint32_t mask)
274{
275 if (shift)
276 tcg_gen_shri_i32(var, var, shift);
277 tcg_gen_andi_i32(var, var, mask);
278}
279
280/* Signed bitfield extract. */
281static void gen_sbfx(TCGv var, int shift, int width)
282{
283 uint32_t signbit;
284
285 if (shift)
286 tcg_gen_sari_i32(var, var, shift);
287 if (shift + width < 32) {
288 signbit = 1u << (width - 1);
289 tcg_gen_andi_i32(var, var, (1u << width) - 1);
290 tcg_gen_xori_i32(var, var, signbit);
291 tcg_gen_subi_i32(var, var, signbit);
292 }
293}
294
295/* Bitfield insertion. Insert val into base. Clobbers base and val. */
296static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
297{
3670669c 298 tcg_gen_andi_i32(val, val, mask);
8f8e3aa4
PB
299 tcg_gen_shli_i32(val, val, shift);
300 tcg_gen_andi_i32(base, base, ~(mask << shift));
3670669c
PB
301 tcg_gen_or_i32(dest, base, val);
302}
303
d9ba4830
PB
304/* Round the top 32 bits of a 64-bit value. */
305static void gen_roundqd(TCGv a, TCGv b)
3670669c 306{
d9ba4830
PB
307 tcg_gen_shri_i32(a, a, 31);
308 tcg_gen_add_i32(a, a, b);
3670669c
PB
309}
310
8f01245e
PB
311/* FIXME: Most targets have native widening multiplication.
312 It would be good to use that instead of a full wide multiply. */
5e3f878a 313/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 314static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 315{
a7812ae4
PB
316 TCGv_i64 tmp1 = tcg_temp_new_i64();
317 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
318
319 tcg_gen_extu_i32_i64(tmp1, a);
320 dead_tmp(a);
321 tcg_gen_extu_i32_i64(tmp2, b);
322 dead_tmp(b);
323 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
324 return tmp1;
325}
326
a7812ae4 327static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 328{
a7812ae4
PB
329 TCGv_i64 tmp1 = tcg_temp_new_i64();
330 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
331
332 tcg_gen_ext_i32_i64(tmp1, a);
333 dead_tmp(a);
334 tcg_gen_ext_i32_i64(tmp2, b);
335 dead_tmp(b);
336 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
337 return tmp1;
338}
339
8f01245e
PB
340/* Unsigned 32x32->64 multiply. */
341static void gen_op_mull_T0_T1(void)
342{
a7812ae4
PB
343 TCGv_i64 tmp1 = tcg_temp_new_i64();
344 TCGv_i64 tmp2 = tcg_temp_new_i64();
8f01245e
PB
345
346 tcg_gen_extu_i32_i64(tmp1, cpu_T[0]);
347 tcg_gen_extu_i32_i64(tmp2, cpu_T[1]);
348 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
349 tcg_gen_trunc_i64_i32(cpu_T[0], tmp1);
350 tcg_gen_shri_i64(tmp1, tmp1, 32);
351 tcg_gen_trunc_i64_i32(cpu_T[1], tmp1);
352}
353
354/* Signed 32x32->64 multiply. */
d9ba4830 355static void gen_imull(TCGv a, TCGv b)
8f01245e 356{
a7812ae4
PB
357 TCGv_i64 tmp1 = tcg_temp_new_i64();
358 TCGv_i64 tmp2 = tcg_temp_new_i64();
8f01245e 359
d9ba4830
PB
360 tcg_gen_ext_i32_i64(tmp1, a);
361 tcg_gen_ext_i32_i64(tmp2, b);
8f01245e 362 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
d9ba4830 363 tcg_gen_trunc_i64_i32(a, tmp1);
8f01245e 364 tcg_gen_shri_i64(tmp1, tmp1, 32);
d9ba4830
PB
365 tcg_gen_trunc_i64_i32(b, tmp1);
366}
d9ba4830 367
8f01245e
PB
368/* Swap low and high halfwords. */
369static void gen_swap_half(TCGv var)
370{
371 TCGv tmp = new_tmp();
372 tcg_gen_shri_i32(tmp, var, 16);
373 tcg_gen_shli_i32(var, var, 16);
374 tcg_gen_or_i32(var, var, tmp);
3670669c 375 dead_tmp(tmp);
8f01245e
PB
376}
377
b26eefb6
PB
378/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
379 tmp = (t0 ^ t1) & 0x8000;
380 t0 &= ~0x8000;
381 t1 &= ~0x8000;
382 t0 = (t0 + t1) ^ tmp;
383 */
384
385static void gen_add16(TCGv t0, TCGv t1)
386{
387 TCGv tmp = new_tmp();
388 tcg_gen_xor_i32(tmp, t0, t1);
389 tcg_gen_andi_i32(tmp, tmp, 0x8000);
390 tcg_gen_andi_i32(t0, t0, ~0x8000);
391 tcg_gen_andi_i32(t1, t1, ~0x8000);
392 tcg_gen_add_i32(t0, t0, t1);
393 tcg_gen_xor_i32(t0, t0, tmp);
394 dead_tmp(tmp);
395 dead_tmp(t1);
396}
397
9a119ff6
PB
398#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
399
b26eefb6
PB
400/* Set CF to the top bit of var. */
401static void gen_set_CF_bit31(TCGv var)
402{
403 TCGv tmp = new_tmp();
404 tcg_gen_shri_i32(tmp, var, 31);
4cc633c3 405 gen_set_CF(tmp);
b26eefb6
PB
406 dead_tmp(tmp);
407}
408
409/* Set N and Z flags from var. */
410static inline void gen_logic_CC(TCGv var)
411{
6fbe23d5
PB
412 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
413 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
b26eefb6
PB
414}
415
416/* T0 += T1 + CF. */
417static void gen_adc_T0_T1(void)
418{
d9ba4830 419 TCGv tmp;
b26eefb6 420 gen_op_addl_T0_T1();
d9ba4830 421 tmp = load_cpu_field(CF);
b26eefb6
PB
422 tcg_gen_add_i32(cpu_T[0], cpu_T[0], tmp);
423 dead_tmp(tmp);
424}
425
e9bb4aa9
JR
426/* dest = T0 + T1 + CF. */
427static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
428{
429 TCGv tmp;
430 tcg_gen_add_i32(dest, t0, t1);
431 tmp = load_cpu_field(CF);
432 tcg_gen_add_i32(dest, dest, tmp);
433 dead_tmp(tmp);
434}
435
3670669c
PB
436/* dest = T0 - T1 + CF - 1. */
437static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
438{
d9ba4830 439 TCGv tmp;
3670669c 440 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 441 tmp = load_cpu_field(CF);
3670669c
PB
442 tcg_gen_add_i32(dest, dest, tmp);
443 tcg_gen_subi_i32(dest, dest, 1);
444 dead_tmp(tmp);
445}
446
447#define gen_sbc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[0], cpu_T[1])
448#define gen_rsc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[1], cpu_T[0])
449
b26eefb6
PB
450/* T0 &= ~T1. Clobbers T1. */
451/* FIXME: Implement bic natively. */
8f8e3aa4
PB
452static inline void tcg_gen_bic_i32(TCGv dest, TCGv t0, TCGv t1)
453{
454 TCGv tmp = new_tmp();
455 tcg_gen_not_i32(tmp, t1);
456 tcg_gen_and_i32(dest, t0, tmp);
457 dead_tmp(tmp);
458}
b26eefb6
PB
459static inline void gen_op_bicl_T0_T1(void)
460{
461 gen_op_notl_T1();
462 gen_op_andl_T0_T1();
463}
464
ad69471c
PB
465/* FIXME: Implement this natively. */
466#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
467
b26eefb6
PB
468/* FIXME: Implement this natively. */
469static void tcg_gen_rori_i32(TCGv t0, TCGv t1, int i)
470{
471 TCGv tmp;
472
473 if (i == 0)
474 return;
475
476 tmp = new_tmp();
477 tcg_gen_shri_i32(tmp, t1, i);
478 tcg_gen_shli_i32(t1, t1, 32 - i);
479 tcg_gen_or_i32(t0, t1, tmp);
480 dead_tmp(tmp);
481}
482
9a119ff6 483static void shifter_out_im(TCGv var, int shift)
b26eefb6 484{
9a119ff6
PB
485 TCGv tmp = new_tmp();
486 if (shift == 0) {
487 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 488 } else {
9a119ff6 489 tcg_gen_shri_i32(tmp, var, shift);
4cc633c3 490 if (shift != 31)
9a119ff6
PB
491 tcg_gen_andi_i32(tmp, tmp, 1);
492 }
493 gen_set_CF(tmp);
494 dead_tmp(tmp);
495}
b26eefb6 496
9a119ff6
PB
497/* Shift by immediate. Includes special handling for shift == 0. */
498static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
499{
500 switch (shiftop) {
501 case 0: /* LSL */
502 if (shift != 0) {
503 if (flags)
504 shifter_out_im(var, 32 - shift);
505 tcg_gen_shli_i32(var, var, shift);
506 }
507 break;
508 case 1: /* LSR */
509 if (shift == 0) {
510 if (flags) {
511 tcg_gen_shri_i32(var, var, 31);
512 gen_set_CF(var);
513 }
514 tcg_gen_movi_i32(var, 0);
515 } else {
516 if (flags)
517 shifter_out_im(var, shift - 1);
518 tcg_gen_shri_i32(var, var, shift);
519 }
520 break;
521 case 2: /* ASR */
522 if (shift == 0)
523 shift = 32;
524 if (flags)
525 shifter_out_im(var, shift - 1);
526 if (shift == 32)
527 shift = 31;
528 tcg_gen_sari_i32(var, var, shift);
529 break;
530 case 3: /* ROR/RRX */
531 if (shift != 0) {
532 if (flags)
533 shifter_out_im(var, shift - 1);
534 tcg_gen_rori_i32(var, var, shift); break;
535 } else {
d9ba4830 536 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
537 if (flags)
538 shifter_out_im(var, 0);
539 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
540 tcg_gen_shli_i32(tmp, tmp, 31);
541 tcg_gen_or_i32(var, var, tmp);
542 dead_tmp(tmp);
b26eefb6
PB
543 }
544 }
545};
546
8984bd2e
PB
547static inline void gen_arm_shift_reg(TCGv var, int shiftop,
548 TCGv shift, int flags)
549{
550 if (flags) {
551 switch (shiftop) {
552 case 0: gen_helper_shl_cc(var, var, shift); break;
553 case 1: gen_helper_shr_cc(var, var, shift); break;
554 case 2: gen_helper_sar_cc(var, var, shift); break;
555 case 3: gen_helper_ror_cc(var, var, shift); break;
556 }
557 } else {
558 switch (shiftop) {
559 case 0: gen_helper_shl(var, var, shift); break;
560 case 1: gen_helper_shr(var, var, shift); break;
561 case 2: gen_helper_sar(var, var, shift); break;
562 case 3: gen_helper_ror(var, var, shift); break;
563 }
564 }
565 dead_tmp(shift);
566}
567
6ddbc6e4
PB
568#define PAS_OP(pfx) \
569 switch (op2) { \
570 case 0: gen_pas_helper(glue(pfx,add16)); break; \
571 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
572 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
573 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
574 case 4: gen_pas_helper(glue(pfx,add8)); break; \
575 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
576 }
d9ba4830 577static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 578{
a7812ae4 579 TCGv_ptr tmp;
6ddbc6e4
PB
580
581 switch (op1) {
582#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
583 case 1:
a7812ae4 584 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
585 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
586 PAS_OP(s)
587 break;
588 case 5:
a7812ae4 589 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
590 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
591 PAS_OP(u)
592 break;
593#undef gen_pas_helper
594#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
595 case 2:
596 PAS_OP(q);
597 break;
598 case 3:
599 PAS_OP(sh);
600 break;
601 case 6:
602 PAS_OP(uq);
603 break;
604 case 7:
605 PAS_OP(uh);
606 break;
607#undef gen_pas_helper
608 }
609}
9ee6e8bb
PB
610#undef PAS_OP
611
6ddbc6e4
PB
612/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
613#define PAS_OP(pfx) \
614 switch (op2) { \
615 case 0: gen_pas_helper(glue(pfx,add8)); break; \
616 case 1: gen_pas_helper(glue(pfx,add16)); break; \
617 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
618 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
619 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
620 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
621 }
d9ba4830 622static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 623{
a7812ae4 624 TCGv_ptr tmp;
6ddbc6e4
PB
625
626 switch (op1) {
627#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
628 case 0:
a7812ae4 629 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
630 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
631 PAS_OP(s)
632 break;
633 case 4:
a7812ae4 634 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
635 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
636 PAS_OP(u)
637 break;
638#undef gen_pas_helper
639#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
640 case 1:
641 PAS_OP(q);
642 break;
643 case 2:
644 PAS_OP(sh);
645 break;
646 case 5:
647 PAS_OP(uq);
648 break;
649 case 6:
650 PAS_OP(uh);
651 break;
652#undef gen_pas_helper
653 }
654}
9ee6e8bb
PB
655#undef PAS_OP
656
d9ba4830
PB
657static void gen_test_cc(int cc, int label)
658{
659 TCGv tmp;
660 TCGv tmp2;
d9ba4830
PB
661 int inv;
662
d9ba4830
PB
663 switch (cc) {
664 case 0: /* eq: Z */
6fbe23d5 665 tmp = load_cpu_field(ZF);
cb63669a 666 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
667 break;
668 case 1: /* ne: !Z */
6fbe23d5 669 tmp = load_cpu_field(ZF);
cb63669a 670 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
671 break;
672 case 2: /* cs: C */
673 tmp = load_cpu_field(CF);
cb63669a 674 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
675 break;
676 case 3: /* cc: !C */
677 tmp = load_cpu_field(CF);
cb63669a 678 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
679 break;
680 case 4: /* mi: N */
6fbe23d5 681 tmp = load_cpu_field(NF);
cb63669a 682 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
683 break;
684 case 5: /* pl: !N */
6fbe23d5 685 tmp = load_cpu_field(NF);
cb63669a 686 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
687 break;
688 case 6: /* vs: V */
689 tmp = load_cpu_field(VF);
cb63669a 690 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
691 break;
692 case 7: /* vc: !V */
693 tmp = load_cpu_field(VF);
cb63669a 694 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
695 break;
696 case 8: /* hi: C && !Z */
697 inv = gen_new_label();
698 tmp = load_cpu_field(CF);
cb63669a 699 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830 700 dead_tmp(tmp);
6fbe23d5 701 tmp = load_cpu_field(ZF);
cb63669a 702 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
703 gen_set_label(inv);
704 break;
705 case 9: /* ls: !C || Z */
706 tmp = load_cpu_field(CF);
cb63669a 707 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830 708 dead_tmp(tmp);
6fbe23d5 709 tmp = load_cpu_field(ZF);
cb63669a 710 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
711 break;
712 case 10: /* ge: N == V -> N ^ V == 0 */
713 tmp = load_cpu_field(VF);
6fbe23d5 714 tmp2 = load_cpu_field(NF);
d9ba4830
PB
715 tcg_gen_xor_i32(tmp, tmp, tmp2);
716 dead_tmp(tmp2);
cb63669a 717 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
718 break;
719 case 11: /* lt: N != V -> N ^ V != 0 */
720 tmp = load_cpu_field(VF);
6fbe23d5 721 tmp2 = load_cpu_field(NF);
d9ba4830
PB
722 tcg_gen_xor_i32(tmp, tmp, tmp2);
723 dead_tmp(tmp2);
cb63669a 724 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
725 break;
726 case 12: /* gt: !Z && N == V */
727 inv = gen_new_label();
6fbe23d5 728 tmp = load_cpu_field(ZF);
cb63669a 729 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830
PB
730 dead_tmp(tmp);
731 tmp = load_cpu_field(VF);
6fbe23d5 732 tmp2 = load_cpu_field(NF);
d9ba4830
PB
733 tcg_gen_xor_i32(tmp, tmp, tmp2);
734 dead_tmp(tmp2);
cb63669a 735 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
736 gen_set_label(inv);
737 break;
738 case 13: /* le: Z || N != V */
6fbe23d5 739 tmp = load_cpu_field(ZF);
cb63669a 740 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
741 dead_tmp(tmp);
742 tmp = load_cpu_field(VF);
6fbe23d5 743 tmp2 = load_cpu_field(NF);
d9ba4830
PB
744 tcg_gen_xor_i32(tmp, tmp, tmp2);
745 dead_tmp(tmp2);
cb63669a 746 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
747 break;
748 default:
749 fprintf(stderr, "Bad condition code 0x%x\n", cc);
750 abort();
751 }
752 dead_tmp(tmp);
753}
2c0262af 754
b1d8e52e 755static const uint8_t table_logic_cc[16] = {
2c0262af
FB
756 1, /* and */
757 1, /* xor */
758 0, /* sub */
759 0, /* rsb */
760 0, /* add */
761 0, /* adc */
762 0, /* sbc */
763 0, /* rsc */
764 1, /* andl */
765 1, /* xorl */
766 0, /* cmp */
767 0, /* cmn */
768 1, /* orr */
769 1, /* mov */
770 1, /* bic */
771 1, /* mvn */
772};
3b46e624 773
d9ba4830
PB
774/* Set PC and Thumb state from an immediate address. */
775static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 776{
b26eefb6 777 TCGv tmp;
99c475ab 778
b26eefb6 779 s->is_jmp = DISAS_UPDATE;
d9ba4830 780 if (s->thumb != (addr & 1)) {
155c3eac 781 tmp = new_tmp();
d9ba4830
PB
782 tcg_gen_movi_i32(tmp, addr & 1);
783 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
155c3eac 784 dead_tmp(tmp);
d9ba4830 785 }
155c3eac 786 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
787}
788
789/* Set PC and Thumb state from var. var is marked as dead. */
790static inline void gen_bx(DisasContext *s, TCGv var)
791{
d9ba4830 792 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
793 tcg_gen_andi_i32(cpu_R[15], var, ~1);
794 tcg_gen_andi_i32(var, var, 1);
795 store_cpu_field(var, thumb);
d9ba4830
PB
796}
797
21aeb343
JR
798/* Variant of store_reg which uses branch&exchange logic when storing
799 to r15 in ARM architecture v7 and above. The source must be a temporary
800 and will be marked as dead. */
801static inline void store_reg_bx(CPUState *env, DisasContext *s,
802 int reg, TCGv var)
803{
804 if (reg == 15 && ENABLE_ARCH_7) {
805 gen_bx(s, var);
806 } else {
807 store_reg(s, reg, var);
808 }
809}
810
b0109805
PB
811static inline TCGv gen_ld8s(TCGv addr, int index)
812{
813 TCGv tmp = new_tmp();
814 tcg_gen_qemu_ld8s(tmp, addr, index);
815 return tmp;
816}
817static inline TCGv gen_ld8u(TCGv addr, int index)
818{
819 TCGv tmp = new_tmp();
820 tcg_gen_qemu_ld8u(tmp, addr, index);
821 return tmp;
822}
823static inline TCGv gen_ld16s(TCGv addr, int index)
824{
825 TCGv tmp = new_tmp();
826 tcg_gen_qemu_ld16s(tmp, addr, index);
827 return tmp;
828}
829static inline TCGv gen_ld16u(TCGv addr, int index)
830{
831 TCGv tmp = new_tmp();
832 tcg_gen_qemu_ld16u(tmp, addr, index);
833 return tmp;
834}
835static inline TCGv gen_ld32(TCGv addr, int index)
836{
837 TCGv tmp = new_tmp();
838 tcg_gen_qemu_ld32u(tmp, addr, index);
839 return tmp;
840}
841static inline void gen_st8(TCGv val, TCGv addr, int index)
842{
843 tcg_gen_qemu_st8(val, addr, index);
844 dead_tmp(val);
845}
846static inline void gen_st16(TCGv val, TCGv addr, int index)
847{
848 tcg_gen_qemu_st16(val, addr, index);
849 dead_tmp(val);
850}
851static inline void gen_st32(TCGv val, TCGv addr, int index)
852{
853 tcg_gen_qemu_st32(val, addr, index);
854 dead_tmp(val);
855}
b5ff1b31 856
2c0262af
FB
857static inline void gen_movl_T0_reg(DisasContext *s, int reg)
858{
b26eefb6 859 load_reg_var(s, cpu_T[0], reg);
2c0262af
FB
860}
861
862static inline void gen_movl_T1_reg(DisasContext *s, int reg)
863{
b26eefb6 864 load_reg_var(s, cpu_T[1], reg);
2c0262af
FB
865}
866
5e3f878a
PB
867static inline void gen_set_pc_im(uint32_t val)
868{
155c3eac 869 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
870}
871
2c0262af
FB
872static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
873{
b26eefb6
PB
874 TCGv tmp;
875 if (reg == 15) {
876 tmp = new_tmp();
877 tcg_gen_andi_i32(tmp, cpu_T[t], ~1);
878 } else {
879 tmp = cpu_T[t];
880 }
155c3eac 881 tcg_gen_mov_i32(cpu_R[reg], tmp);
2c0262af 882 if (reg == 15) {
b26eefb6 883 dead_tmp(tmp);
2c0262af
FB
884 s->is_jmp = DISAS_JUMP;
885 }
886}
887
888static inline void gen_movl_reg_T0(DisasContext *s, int reg)
889{
890 gen_movl_reg_TN(s, reg, 0);
891}
892
893static inline void gen_movl_reg_T1(DisasContext *s, int reg)
894{
895 gen_movl_reg_TN(s, reg, 1);
896}
897
b5ff1b31
FB
898/* Force a TB lookup after an instruction that changes the CPU state. */
899static inline void gen_lookup_tb(DisasContext *s)
900{
901 gen_op_movl_T0_im(s->pc);
902 gen_movl_reg_T0(s, 15);
903 s->is_jmp = DISAS_UPDATE;
904}
905
b0109805
PB
906static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
907 TCGv var)
2c0262af 908{
1e8d4eec 909 int val, rm, shift, shiftop;
b26eefb6 910 TCGv offset;
2c0262af
FB
911
912 if (!(insn & (1 << 25))) {
913 /* immediate */
914 val = insn & 0xfff;
915 if (!(insn & (1 << 23)))
916 val = -val;
537730b9 917 if (val != 0)
b0109805 918 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
919 } else {
920 /* shift/register */
921 rm = (insn) & 0xf;
922 shift = (insn >> 7) & 0x1f;
1e8d4eec 923 shiftop = (insn >> 5) & 3;
b26eefb6 924 offset = load_reg(s, rm);
9a119ff6 925 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 926 if (!(insn & (1 << 23)))
b0109805 927 tcg_gen_sub_i32(var, var, offset);
2c0262af 928 else
b0109805 929 tcg_gen_add_i32(var, var, offset);
b26eefb6 930 dead_tmp(offset);
2c0262af
FB
931 }
932}
933
191f9a93 934static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 935 int extra, TCGv var)
2c0262af
FB
936{
937 int val, rm;
b26eefb6 938 TCGv offset;
3b46e624 939
2c0262af
FB
940 if (insn & (1 << 22)) {
941 /* immediate */
942 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
943 if (!(insn & (1 << 23)))
944 val = -val;
18acad92 945 val += extra;
537730b9 946 if (val != 0)
b0109805 947 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
948 } else {
949 /* register */
191f9a93 950 if (extra)
b0109805 951 tcg_gen_addi_i32(var, var, extra);
2c0262af 952 rm = (insn) & 0xf;
b26eefb6 953 offset = load_reg(s, rm);
2c0262af 954 if (!(insn & (1 << 23)))
b0109805 955 tcg_gen_sub_i32(var, var, offset);
2c0262af 956 else
b0109805 957 tcg_gen_add_i32(var, var, offset);
b26eefb6 958 dead_tmp(offset);
2c0262af
FB
959 }
960}
961
4373f3ce
PB
962#define VFP_OP2(name) \
963static inline void gen_vfp_##name(int dp) \
964{ \
965 if (dp) \
966 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
967 else \
968 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
b7bcbe95
FB
969}
970
4373f3ce
PB
971VFP_OP2(add)
972VFP_OP2(sub)
973VFP_OP2(mul)
974VFP_OP2(div)
975
976#undef VFP_OP2
977
978static inline void gen_vfp_abs(int dp)
979{
980 if (dp)
981 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
982 else
983 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
984}
985
986static inline void gen_vfp_neg(int dp)
987{
988 if (dp)
989 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
990 else
991 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
992}
993
994static inline void gen_vfp_sqrt(int dp)
995{
996 if (dp)
997 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
998 else
999 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1000}
1001
1002static inline void gen_vfp_cmp(int dp)
1003{
1004 if (dp)
1005 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1006 else
1007 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1008}
1009
1010static inline void gen_vfp_cmpe(int dp)
1011{
1012 if (dp)
1013 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1014 else
1015 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1016}
1017
1018static inline void gen_vfp_F1_ld0(int dp)
1019{
1020 if (dp)
5b340b51 1021 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1022 else
5b340b51 1023 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1024}
1025
1026static inline void gen_vfp_uito(int dp)
1027{
1028 if (dp)
1029 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
1030 else
1031 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
1032}
1033
1034static inline void gen_vfp_sito(int dp)
1035{
1036 if (dp)
66230e0d 1037 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
4373f3ce 1038 else
66230e0d 1039 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
4373f3ce
PB
1040}
1041
1042static inline void gen_vfp_toui(int dp)
1043{
1044 if (dp)
1045 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
1046 else
1047 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
1048}
1049
1050static inline void gen_vfp_touiz(int dp)
1051{
1052 if (dp)
1053 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
1054 else
1055 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
1056}
1057
1058static inline void gen_vfp_tosi(int dp)
1059{
1060 if (dp)
1061 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
1062 else
1063 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
1064}
1065
1066static inline void gen_vfp_tosiz(int dp)
9ee6e8bb
PB
1067{
1068 if (dp)
4373f3ce 1069 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
9ee6e8bb 1070 else
4373f3ce
PB
1071 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
1072}
1073
1074#define VFP_GEN_FIX(name) \
1075static inline void gen_vfp_##name(int dp, int shift) \
1076{ \
1077 if (dp) \
1078 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tcg_const_i32(shift), cpu_env);\
1079 else \
1080 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tcg_const_i32(shift), cpu_env);\
9ee6e8bb 1081}
4373f3ce
PB
1082VFP_GEN_FIX(tosh)
1083VFP_GEN_FIX(tosl)
1084VFP_GEN_FIX(touh)
1085VFP_GEN_FIX(toul)
1086VFP_GEN_FIX(shto)
1087VFP_GEN_FIX(slto)
1088VFP_GEN_FIX(uhto)
1089VFP_GEN_FIX(ulto)
1090#undef VFP_GEN_FIX
9ee6e8bb 1091
b5ff1b31
FB
1092static inline void gen_vfp_ld(DisasContext *s, int dp)
1093{
1094 if (dp)
4373f3ce 1095 tcg_gen_qemu_ld64(cpu_F0d, cpu_T[1], IS_USER(s));
b5ff1b31 1096 else
4373f3ce 1097 tcg_gen_qemu_ld32u(cpu_F0s, cpu_T[1], IS_USER(s));
b5ff1b31
FB
1098}
1099
1100static inline void gen_vfp_st(DisasContext *s, int dp)
1101{
1102 if (dp)
4373f3ce 1103 tcg_gen_qemu_st64(cpu_F0d, cpu_T[1], IS_USER(s));
b5ff1b31 1104 else
4373f3ce 1105 tcg_gen_qemu_st32(cpu_F0s, cpu_T[1], IS_USER(s));
b5ff1b31
FB
1106}
1107
8e96005d
FB
1108static inline long
1109vfp_reg_offset (int dp, int reg)
1110{
1111 if (dp)
1112 return offsetof(CPUARMState, vfp.regs[reg]);
1113 else if (reg & 1) {
1114 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1115 + offsetof(CPU_DoubleU, l.upper);
1116 } else {
1117 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1118 + offsetof(CPU_DoubleU, l.lower);
1119 }
1120}
9ee6e8bb
PB
1121
1122/* Return the offset of a 32-bit piece of a NEON register.
1123 zero is the least significant end of the register. */
1124static inline long
1125neon_reg_offset (int reg, int n)
1126{
1127 int sreg;
1128 sreg = reg * 2 + n;
1129 return vfp_reg_offset(0, sreg);
1130}
1131
ad69471c
PB
1132/* FIXME: Remove these. */
1133#define neon_T0 cpu_T[0]
1134#define neon_T1 cpu_T[1]
1135#define NEON_GET_REG(T, reg, n) \
1136 tcg_gen_ld_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
1137#define NEON_SET_REG(T, reg, n) \
1138 tcg_gen_st_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
9ee6e8bb 1139
8f8e3aa4
PB
1140static TCGv neon_load_reg(int reg, int pass)
1141{
1142 TCGv tmp = new_tmp();
1143 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1144 return tmp;
1145}
1146
1147static void neon_store_reg(int reg, int pass, TCGv var)
1148{
1149 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1150 dead_tmp(var);
1151}
1152
a7812ae4 1153static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1154{
1155 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1156}
1157
a7812ae4 1158static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1159{
1160 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1161}
1162
4373f3ce
PB
1163#define tcg_gen_ld_f32 tcg_gen_ld_i32
1164#define tcg_gen_ld_f64 tcg_gen_ld_i64
1165#define tcg_gen_st_f32 tcg_gen_st_i32
1166#define tcg_gen_st_f64 tcg_gen_st_i64
1167
b7bcbe95
FB
1168static inline void gen_mov_F0_vreg(int dp, int reg)
1169{
1170 if (dp)
4373f3ce 1171 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1172 else
4373f3ce 1173 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1174}
1175
1176static inline void gen_mov_F1_vreg(int dp, int reg)
1177{
1178 if (dp)
4373f3ce 1179 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1180 else
4373f3ce 1181 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1182}
1183
1184static inline void gen_mov_vreg_F0(int dp, int reg)
1185{
1186 if (dp)
4373f3ce 1187 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1188 else
4373f3ce 1189 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1190}
1191
18c9b560
AZ
1192#define ARM_CP_RW_BIT (1 << 20)
1193
a7812ae4 1194static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d
PB
1195{
1196 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1197}
1198
a7812ae4 1199static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d
PB
1200{
1201 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1202}
1203
1204static inline void gen_op_iwmmxt_movl_wCx_T0(int reg)
1205{
1206 tcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1207}
1208
1209static inline void gen_op_iwmmxt_movl_T0_wCx(int reg)
1210{
1211 tcg_gen_ld_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1212}
1213
1214static inline void gen_op_iwmmxt_movl_T1_wCx(int reg)
1215{
1216 tcg_gen_ld_i32(cpu_T[1], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1217}
1218
1219static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1220{
1221 iwmmxt_store_reg(cpu_M0, rn);
1222}
1223
1224static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1225{
1226 iwmmxt_load_reg(cpu_M0, rn);
1227}
1228
1229static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1230{
1231 iwmmxt_load_reg(cpu_V1, rn);
1232 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1233}
1234
1235static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1236{
1237 iwmmxt_load_reg(cpu_V1, rn);
1238 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1239}
1240
1241static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1242{
1243 iwmmxt_load_reg(cpu_V1, rn);
1244 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1245}
1246
1247#define IWMMXT_OP(name) \
1248static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1249{ \
1250 iwmmxt_load_reg(cpu_V1, rn); \
1251 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1252}
1253
1254#define IWMMXT_OP_ENV(name) \
1255static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1256{ \
1257 iwmmxt_load_reg(cpu_V1, rn); \
1258 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1259}
1260
1261#define IWMMXT_OP_ENV_SIZE(name) \
1262IWMMXT_OP_ENV(name##b) \
1263IWMMXT_OP_ENV(name##w) \
1264IWMMXT_OP_ENV(name##l)
1265
1266#define IWMMXT_OP_ENV1(name) \
1267static inline void gen_op_iwmmxt_##name##_M0(void) \
1268{ \
1269 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1270}
1271
1272IWMMXT_OP(maddsq)
1273IWMMXT_OP(madduq)
1274IWMMXT_OP(sadb)
1275IWMMXT_OP(sadw)
1276IWMMXT_OP(mulslw)
1277IWMMXT_OP(mulshw)
1278IWMMXT_OP(mululw)
1279IWMMXT_OP(muluhw)
1280IWMMXT_OP(macsw)
1281IWMMXT_OP(macuw)
1282
1283IWMMXT_OP_ENV_SIZE(unpackl)
1284IWMMXT_OP_ENV_SIZE(unpackh)
1285
1286IWMMXT_OP_ENV1(unpacklub)
1287IWMMXT_OP_ENV1(unpackluw)
1288IWMMXT_OP_ENV1(unpacklul)
1289IWMMXT_OP_ENV1(unpackhub)
1290IWMMXT_OP_ENV1(unpackhuw)
1291IWMMXT_OP_ENV1(unpackhul)
1292IWMMXT_OP_ENV1(unpacklsb)
1293IWMMXT_OP_ENV1(unpacklsw)
1294IWMMXT_OP_ENV1(unpacklsl)
1295IWMMXT_OP_ENV1(unpackhsb)
1296IWMMXT_OP_ENV1(unpackhsw)
1297IWMMXT_OP_ENV1(unpackhsl)
1298
1299IWMMXT_OP_ENV_SIZE(cmpeq)
1300IWMMXT_OP_ENV_SIZE(cmpgtu)
1301IWMMXT_OP_ENV_SIZE(cmpgts)
1302
1303IWMMXT_OP_ENV_SIZE(mins)
1304IWMMXT_OP_ENV_SIZE(minu)
1305IWMMXT_OP_ENV_SIZE(maxs)
1306IWMMXT_OP_ENV_SIZE(maxu)
1307
1308IWMMXT_OP_ENV_SIZE(subn)
1309IWMMXT_OP_ENV_SIZE(addn)
1310IWMMXT_OP_ENV_SIZE(subu)
1311IWMMXT_OP_ENV_SIZE(addu)
1312IWMMXT_OP_ENV_SIZE(subs)
1313IWMMXT_OP_ENV_SIZE(adds)
1314
1315IWMMXT_OP_ENV(avgb0)
1316IWMMXT_OP_ENV(avgb1)
1317IWMMXT_OP_ENV(avgw0)
1318IWMMXT_OP_ENV(avgw1)
1319
1320IWMMXT_OP(msadb)
1321
1322IWMMXT_OP_ENV(packuw)
1323IWMMXT_OP_ENV(packul)
1324IWMMXT_OP_ENV(packuq)
1325IWMMXT_OP_ENV(packsw)
1326IWMMXT_OP_ENV(packsl)
1327IWMMXT_OP_ENV(packsq)
1328
1329static inline void gen_op_iwmmxt_muladdsl_M0_T0_T1(void)
1330{
1331 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1332}
1333
1334static inline void gen_op_iwmmxt_muladdsw_M0_T0_T1(void)
1335{
1336 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1337}
1338
1339static inline void gen_op_iwmmxt_muladdswl_M0_T0_T1(void)
1340{
1341 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1342}
1343
1344static inline void gen_op_iwmmxt_align_M0_T0_wRn(int rn)
1345{
1346 iwmmxt_load_reg(cpu_V1, rn);
1347 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, cpu_T[0]);
1348}
1349
1350static inline void gen_op_iwmmxt_insr_M0_T0_T1(int shift)
1351{
1352 TCGv tmp = tcg_const_i32(shift);
1353 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1], tmp);
1354}
1355
1356static inline void gen_op_iwmmxt_extrsb_T0_M0(int shift)
1357{
1358 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1359 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1360 tcg_gen_ext8s_i32(cpu_T[0], cpu_T[0]);
1361}
1362
1363static inline void gen_op_iwmmxt_extrsw_T0_M0(int shift)
1364{
1365 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1366 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1367 tcg_gen_ext16s_i32(cpu_T[0], cpu_T[0]);
1368}
1369
1370static inline void gen_op_iwmmxt_extru_T0_M0(int shift, uint32_t mask)
1371{
1372 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1373 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1374 if (mask != ~0u)
1375 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
1376}
1377
1378static void gen_op_iwmmxt_set_mup(void)
1379{
1380 TCGv tmp;
1381 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1382 tcg_gen_ori_i32(tmp, tmp, 2);
1383 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1384}
1385
1386static void gen_op_iwmmxt_set_cup(void)
1387{
1388 TCGv tmp;
1389 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1390 tcg_gen_ori_i32(tmp, tmp, 1);
1391 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1392}
1393
1394static void gen_op_iwmmxt_setpsr_nz(void)
1395{
1396 TCGv tmp = new_tmp();
1397 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1398 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1399}
1400
1401static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1402{
1403 iwmmxt_load_reg(cpu_V1, rn);
86831435 1404 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1405 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1406}
1407
1408
1409static void gen_iwmmxt_movl_T0_T1_wRn(int rn)
1410{
1411 iwmmxt_load_reg(cpu_V0, rn);
1412 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_V0);
1413 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1414 tcg_gen_trunc_i64_i32(cpu_T[1], cpu_V0);
1415}
1416
1417static void gen_iwmmxt_movl_wRn_T0_T1(int rn)
1418{
36aa55dc 1419 tcg_gen_concat_i32_i64(cpu_V0, cpu_T[0], cpu_T[1]);
e677137d
PB
1420 iwmmxt_store_reg(cpu_V0, rn);
1421}
1422
18c9b560
AZ
1423static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn)
1424{
1425 int rd;
1426 uint32_t offset;
1427
1428 rd = (insn >> 16) & 0xf;
1429 gen_movl_T1_reg(s, rd);
1430
1431 offset = (insn & 0xff) << ((insn >> 7) & 2);
1432 if (insn & (1 << 24)) {
1433 /* Pre indexed */
1434 if (insn & (1 << 23))
1435 gen_op_addl_T1_im(offset);
1436 else
1437 gen_op_addl_T1_im(-offset);
1438
1439 if (insn & (1 << 21))
1440 gen_movl_reg_T1(s, rd);
1441 } else if (insn & (1 << 21)) {
1442 /* Post indexed */
1443 if (insn & (1 << 23))
1444 gen_op_movl_T0_im(offset);
1445 else
1446 gen_op_movl_T0_im(- offset);
1447 gen_op_addl_T0_T1();
1448 gen_movl_reg_T0(s, rd);
1449 } else if (!(insn & (1 << 23)))
1450 return 1;
1451 return 0;
1452}
1453
1454static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask)
1455{
1456 int rd = (insn >> 0) & 0xf;
1457
1458 if (insn & (1 << 8))
1459 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3)
1460 return 1;
1461 else
1462 gen_op_iwmmxt_movl_T0_wCx(rd);
1463 else
e677137d 1464 gen_iwmmxt_movl_T0_T1_wRn(rd);
18c9b560
AZ
1465
1466 gen_op_movl_T1_im(mask);
1467 gen_op_andl_T0_T1();
1468 return 0;
1469}
1470
1471/* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1472 (ie. an undefined instruction). */
1473static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1474{
1475 int rd, wrd;
1476 int rdhi, rdlo, rd0, rd1, i;
b0109805 1477 TCGv tmp;
18c9b560
AZ
1478
1479 if ((insn & 0x0e000e00) == 0x0c000000) {
1480 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1481 wrd = insn & 0xf;
1482 rdlo = (insn >> 12) & 0xf;
1483 rdhi = (insn >> 16) & 0xf;
1484 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
e677137d 1485 gen_iwmmxt_movl_T0_T1_wRn(wrd);
18c9b560
AZ
1486 gen_movl_reg_T0(s, rdlo);
1487 gen_movl_reg_T1(s, rdhi);
1488 } else { /* TMCRR */
1489 gen_movl_T0_reg(s, rdlo);
1490 gen_movl_T1_reg(s, rdhi);
e677137d 1491 gen_iwmmxt_movl_wRn_T0_T1(wrd);
18c9b560
AZ
1492 gen_op_iwmmxt_set_mup();
1493 }
1494 return 0;
1495 }
1496
1497 wrd = (insn >> 12) & 0xf;
1498 if (gen_iwmmxt_address(s, insn))
1499 return 1;
1500 if (insn & ARM_CP_RW_BIT) {
1501 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
b0109805
PB
1502 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1503 tcg_gen_mov_i32(cpu_T[0], tmp);
1504 dead_tmp(tmp);
18c9b560
AZ
1505 gen_op_iwmmxt_movl_wCx_T0(wrd);
1506 } else {
e677137d
PB
1507 i = 1;
1508 if (insn & (1 << 8)) {
1509 if (insn & (1 << 22)) { /* WLDRD */
1510 tcg_gen_qemu_ld64(cpu_M0, cpu_T[1], IS_USER(s));
1511 i = 0;
1512 } else { /* WLDRW wRd */
1513 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1514 }
1515 } else {
1516 if (insn & (1 << 22)) { /* WLDRH */
1517 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
1518 } else { /* WLDRB */
1519 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
1520 }
1521 }
1522 if (i) {
1523 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1524 dead_tmp(tmp);
1525 }
18c9b560
AZ
1526 gen_op_iwmmxt_movq_wRn_M0(wrd);
1527 }
1528 } else {
1529 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1530 gen_op_iwmmxt_movl_T0_wCx(wrd);
b0109805
PB
1531 tmp = new_tmp();
1532 tcg_gen_mov_i32(tmp, cpu_T[0]);
1533 gen_st32(tmp, cpu_T[1], IS_USER(s));
18c9b560
AZ
1534 } else {
1535 gen_op_iwmmxt_movq_M0_wRn(wrd);
e677137d
PB
1536 tmp = new_tmp();
1537 if (insn & (1 << 8)) {
1538 if (insn & (1 << 22)) { /* WSTRD */
1539 dead_tmp(tmp);
1540 tcg_gen_qemu_st64(cpu_M0, cpu_T[1], IS_USER(s));
1541 } else { /* WSTRW wRd */
1542 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1543 gen_st32(tmp, cpu_T[1], IS_USER(s));
1544 }
1545 } else {
1546 if (insn & (1 << 22)) { /* WSTRH */
1547 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1548 gen_st16(tmp, cpu_T[1], IS_USER(s));
1549 } else { /* WSTRB */
1550 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1551 gen_st8(tmp, cpu_T[1], IS_USER(s));
1552 }
1553 }
18c9b560
AZ
1554 }
1555 }
1556 return 0;
1557 }
1558
1559 if ((insn & 0x0f000000) != 0x0e000000)
1560 return 1;
1561
1562 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1563 case 0x000: /* WOR */
1564 wrd = (insn >> 12) & 0xf;
1565 rd0 = (insn >> 0) & 0xf;
1566 rd1 = (insn >> 16) & 0xf;
1567 gen_op_iwmmxt_movq_M0_wRn(rd0);
1568 gen_op_iwmmxt_orq_M0_wRn(rd1);
1569 gen_op_iwmmxt_setpsr_nz();
1570 gen_op_iwmmxt_movq_wRn_M0(wrd);
1571 gen_op_iwmmxt_set_mup();
1572 gen_op_iwmmxt_set_cup();
1573 break;
1574 case 0x011: /* TMCR */
1575 if (insn & 0xf)
1576 return 1;
1577 rd = (insn >> 12) & 0xf;
1578 wrd = (insn >> 16) & 0xf;
1579 switch (wrd) {
1580 case ARM_IWMMXT_wCID:
1581 case ARM_IWMMXT_wCASF:
1582 break;
1583 case ARM_IWMMXT_wCon:
1584 gen_op_iwmmxt_set_cup();
1585 /* Fall through. */
1586 case ARM_IWMMXT_wCSSF:
1587 gen_op_iwmmxt_movl_T0_wCx(wrd);
1588 gen_movl_T1_reg(s, rd);
1589 gen_op_bicl_T0_T1();
1590 gen_op_iwmmxt_movl_wCx_T0(wrd);
1591 break;
1592 case ARM_IWMMXT_wCGR0:
1593 case ARM_IWMMXT_wCGR1:
1594 case ARM_IWMMXT_wCGR2:
1595 case ARM_IWMMXT_wCGR3:
1596 gen_op_iwmmxt_set_cup();
1597 gen_movl_reg_T0(s, rd);
1598 gen_op_iwmmxt_movl_wCx_T0(wrd);
1599 break;
1600 default:
1601 return 1;
1602 }
1603 break;
1604 case 0x100: /* WXOR */
1605 wrd = (insn >> 12) & 0xf;
1606 rd0 = (insn >> 0) & 0xf;
1607 rd1 = (insn >> 16) & 0xf;
1608 gen_op_iwmmxt_movq_M0_wRn(rd0);
1609 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1610 gen_op_iwmmxt_setpsr_nz();
1611 gen_op_iwmmxt_movq_wRn_M0(wrd);
1612 gen_op_iwmmxt_set_mup();
1613 gen_op_iwmmxt_set_cup();
1614 break;
1615 case 0x111: /* TMRC */
1616 if (insn & 0xf)
1617 return 1;
1618 rd = (insn >> 12) & 0xf;
1619 wrd = (insn >> 16) & 0xf;
1620 gen_op_iwmmxt_movl_T0_wCx(wrd);
1621 gen_movl_reg_T0(s, rd);
1622 break;
1623 case 0x300: /* WANDN */
1624 wrd = (insn >> 12) & 0xf;
1625 rd0 = (insn >> 0) & 0xf;
1626 rd1 = (insn >> 16) & 0xf;
1627 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1628 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1629 gen_op_iwmmxt_andq_M0_wRn(rd1);
1630 gen_op_iwmmxt_setpsr_nz();
1631 gen_op_iwmmxt_movq_wRn_M0(wrd);
1632 gen_op_iwmmxt_set_mup();
1633 gen_op_iwmmxt_set_cup();
1634 break;
1635 case 0x200: /* WAND */
1636 wrd = (insn >> 12) & 0xf;
1637 rd0 = (insn >> 0) & 0xf;
1638 rd1 = (insn >> 16) & 0xf;
1639 gen_op_iwmmxt_movq_M0_wRn(rd0);
1640 gen_op_iwmmxt_andq_M0_wRn(rd1);
1641 gen_op_iwmmxt_setpsr_nz();
1642 gen_op_iwmmxt_movq_wRn_M0(wrd);
1643 gen_op_iwmmxt_set_mup();
1644 gen_op_iwmmxt_set_cup();
1645 break;
1646 case 0x810: case 0xa10: /* WMADD */
1647 wrd = (insn >> 12) & 0xf;
1648 rd0 = (insn >> 0) & 0xf;
1649 rd1 = (insn >> 16) & 0xf;
1650 gen_op_iwmmxt_movq_M0_wRn(rd0);
1651 if (insn & (1 << 21))
1652 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1653 else
1654 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1655 gen_op_iwmmxt_movq_wRn_M0(wrd);
1656 gen_op_iwmmxt_set_mup();
1657 break;
1658 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1659 wrd = (insn >> 12) & 0xf;
1660 rd0 = (insn >> 16) & 0xf;
1661 rd1 = (insn >> 0) & 0xf;
1662 gen_op_iwmmxt_movq_M0_wRn(rd0);
1663 switch ((insn >> 22) & 3) {
1664 case 0:
1665 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1666 break;
1667 case 1:
1668 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1669 break;
1670 case 2:
1671 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1672 break;
1673 case 3:
1674 return 1;
1675 }
1676 gen_op_iwmmxt_movq_wRn_M0(wrd);
1677 gen_op_iwmmxt_set_mup();
1678 gen_op_iwmmxt_set_cup();
1679 break;
1680 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1681 wrd = (insn >> 12) & 0xf;
1682 rd0 = (insn >> 16) & 0xf;
1683 rd1 = (insn >> 0) & 0xf;
1684 gen_op_iwmmxt_movq_M0_wRn(rd0);
1685 switch ((insn >> 22) & 3) {
1686 case 0:
1687 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1688 break;
1689 case 1:
1690 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1691 break;
1692 case 2:
1693 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1694 break;
1695 case 3:
1696 return 1;
1697 }
1698 gen_op_iwmmxt_movq_wRn_M0(wrd);
1699 gen_op_iwmmxt_set_mup();
1700 gen_op_iwmmxt_set_cup();
1701 break;
1702 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1703 wrd = (insn >> 12) & 0xf;
1704 rd0 = (insn >> 16) & 0xf;
1705 rd1 = (insn >> 0) & 0xf;
1706 gen_op_iwmmxt_movq_M0_wRn(rd0);
1707 if (insn & (1 << 22))
1708 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1709 else
1710 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1711 if (!(insn & (1 << 20)))
1712 gen_op_iwmmxt_addl_M0_wRn(wrd);
1713 gen_op_iwmmxt_movq_wRn_M0(wrd);
1714 gen_op_iwmmxt_set_mup();
1715 break;
1716 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1717 wrd = (insn >> 12) & 0xf;
1718 rd0 = (insn >> 16) & 0xf;
1719 rd1 = (insn >> 0) & 0xf;
1720 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1721 if (insn & (1 << 21)) {
1722 if (insn & (1 << 20))
1723 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1724 else
1725 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1726 } else {
1727 if (insn & (1 << 20))
1728 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1729 else
1730 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1731 }
18c9b560
AZ
1732 gen_op_iwmmxt_movq_wRn_M0(wrd);
1733 gen_op_iwmmxt_set_mup();
1734 break;
1735 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1736 wrd = (insn >> 12) & 0xf;
1737 rd0 = (insn >> 16) & 0xf;
1738 rd1 = (insn >> 0) & 0xf;
1739 gen_op_iwmmxt_movq_M0_wRn(rd0);
1740 if (insn & (1 << 21))
1741 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1742 else
1743 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1744 if (!(insn & (1 << 20))) {
e677137d
PB
1745 iwmmxt_load_reg(cpu_V1, wrd);
1746 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1747 }
1748 gen_op_iwmmxt_movq_wRn_M0(wrd);
1749 gen_op_iwmmxt_set_mup();
1750 break;
1751 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1752 wrd = (insn >> 12) & 0xf;
1753 rd0 = (insn >> 16) & 0xf;
1754 rd1 = (insn >> 0) & 0xf;
1755 gen_op_iwmmxt_movq_M0_wRn(rd0);
1756 switch ((insn >> 22) & 3) {
1757 case 0:
1758 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1759 break;
1760 case 1:
1761 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1762 break;
1763 case 2:
1764 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1765 break;
1766 case 3:
1767 return 1;
1768 }
1769 gen_op_iwmmxt_movq_wRn_M0(wrd);
1770 gen_op_iwmmxt_set_mup();
1771 gen_op_iwmmxt_set_cup();
1772 break;
1773 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1774 wrd = (insn >> 12) & 0xf;
1775 rd0 = (insn >> 16) & 0xf;
1776 rd1 = (insn >> 0) & 0xf;
1777 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1778 if (insn & (1 << 22)) {
1779 if (insn & (1 << 20))
1780 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1781 else
1782 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1783 } else {
1784 if (insn & (1 << 20))
1785 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1786 else
1787 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1788 }
18c9b560
AZ
1789 gen_op_iwmmxt_movq_wRn_M0(wrd);
1790 gen_op_iwmmxt_set_mup();
1791 gen_op_iwmmxt_set_cup();
1792 break;
1793 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1794 wrd = (insn >> 12) & 0xf;
1795 rd0 = (insn >> 16) & 0xf;
1796 rd1 = (insn >> 0) & 0xf;
1797 gen_op_iwmmxt_movq_M0_wRn(rd0);
1798 gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1799 gen_op_movl_T1_im(7);
1800 gen_op_andl_T0_T1();
1801 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
1802 gen_op_iwmmxt_movq_wRn_M0(wrd);
1803 gen_op_iwmmxt_set_mup();
1804 break;
1805 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1806 rd = (insn >> 12) & 0xf;
1807 wrd = (insn >> 16) & 0xf;
1808 gen_movl_T0_reg(s, rd);
1809 gen_op_iwmmxt_movq_M0_wRn(wrd);
1810 switch ((insn >> 6) & 3) {
1811 case 0:
1812 gen_op_movl_T1_im(0xff);
1813 gen_op_iwmmxt_insr_M0_T0_T1((insn & 7) << 3);
1814 break;
1815 case 1:
1816 gen_op_movl_T1_im(0xffff);
1817 gen_op_iwmmxt_insr_M0_T0_T1((insn & 3) << 4);
1818 break;
1819 case 2:
1820 gen_op_movl_T1_im(0xffffffff);
1821 gen_op_iwmmxt_insr_M0_T0_T1((insn & 1) << 5);
1822 break;
1823 case 3:
1824 return 1;
1825 }
1826 gen_op_iwmmxt_movq_wRn_M0(wrd);
1827 gen_op_iwmmxt_set_mup();
1828 break;
1829 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1830 rd = (insn >> 12) & 0xf;
1831 wrd = (insn >> 16) & 0xf;
1832 if (rd == 15)
1833 return 1;
1834 gen_op_iwmmxt_movq_M0_wRn(wrd);
1835 switch ((insn >> 22) & 3) {
1836 case 0:
1837 if (insn & 8)
1838 gen_op_iwmmxt_extrsb_T0_M0((insn & 7) << 3);
1839 else {
e677137d 1840 gen_op_iwmmxt_extru_T0_M0((insn & 7) << 3, 0xff);
18c9b560
AZ
1841 }
1842 break;
1843 case 1:
1844 if (insn & 8)
1845 gen_op_iwmmxt_extrsw_T0_M0((insn & 3) << 4);
1846 else {
e677137d 1847 gen_op_iwmmxt_extru_T0_M0((insn & 3) << 4, 0xffff);
18c9b560
AZ
1848 }
1849 break;
1850 case 2:
e677137d 1851 gen_op_iwmmxt_extru_T0_M0((insn & 1) << 5, ~0u);
18c9b560
AZ
1852 break;
1853 case 3:
1854 return 1;
1855 }
b26eefb6 1856 gen_movl_reg_T0(s, rd);
18c9b560
AZ
1857 break;
1858 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1859 if ((insn & 0x000ff008) != 0x0003f000)
1860 return 1;
1861 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1862 switch ((insn >> 22) & 3) {
1863 case 0:
1864 gen_op_shrl_T1_im(((insn & 7) << 2) + 0);
1865 break;
1866 case 1:
1867 gen_op_shrl_T1_im(((insn & 3) << 3) + 4);
1868 break;
1869 case 2:
1870 gen_op_shrl_T1_im(((insn & 1) << 4) + 12);
1871 break;
1872 case 3:
1873 return 1;
1874 }
1875 gen_op_shll_T1_im(28);
d9ba4830 1876 gen_set_nzcv(cpu_T[1]);
18c9b560
AZ
1877 break;
1878 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1879 rd = (insn >> 12) & 0xf;
1880 wrd = (insn >> 16) & 0xf;
1881 gen_movl_T0_reg(s, rd);
1882 switch ((insn >> 6) & 3) {
1883 case 0:
e677137d 1884 gen_helper_iwmmxt_bcstb(cpu_M0, cpu_T[0]);
18c9b560
AZ
1885 break;
1886 case 1:
e677137d 1887 gen_helper_iwmmxt_bcstw(cpu_M0, cpu_T[0]);
18c9b560
AZ
1888 break;
1889 case 2:
e677137d 1890 gen_helper_iwmmxt_bcstl(cpu_M0, cpu_T[0]);
18c9b560
AZ
1891 break;
1892 case 3:
1893 return 1;
1894 }
1895 gen_op_iwmmxt_movq_wRn_M0(wrd);
1896 gen_op_iwmmxt_set_mup();
1897 break;
1898 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1899 if ((insn & 0x000ff00f) != 0x0003f000)
1900 return 1;
1901 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1902 switch ((insn >> 22) & 3) {
1903 case 0:
1904 for (i = 0; i < 7; i ++) {
1905 gen_op_shll_T1_im(4);
1906 gen_op_andl_T0_T1();
1907 }
1908 break;
1909 case 1:
1910 for (i = 0; i < 3; i ++) {
1911 gen_op_shll_T1_im(8);
1912 gen_op_andl_T0_T1();
1913 }
1914 break;
1915 case 2:
1916 gen_op_shll_T1_im(16);
1917 gen_op_andl_T0_T1();
1918 break;
1919 case 3:
1920 return 1;
1921 }
d9ba4830 1922 gen_set_nzcv(cpu_T[0]);
18c9b560
AZ
1923 break;
1924 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1925 wrd = (insn >> 12) & 0xf;
1926 rd0 = (insn >> 16) & 0xf;
1927 gen_op_iwmmxt_movq_M0_wRn(rd0);
1928 switch ((insn >> 22) & 3) {
1929 case 0:
e677137d 1930 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1931 break;
1932 case 1:
e677137d 1933 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1934 break;
1935 case 2:
e677137d 1936 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1937 break;
1938 case 3:
1939 return 1;
1940 }
1941 gen_op_iwmmxt_movq_wRn_M0(wrd);
1942 gen_op_iwmmxt_set_mup();
1943 break;
1944 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1945 if ((insn & 0x000ff00f) != 0x0003f000)
1946 return 1;
1947 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1948 switch ((insn >> 22) & 3) {
1949 case 0:
1950 for (i = 0; i < 7; i ++) {
1951 gen_op_shll_T1_im(4);
1952 gen_op_orl_T0_T1();
1953 }
1954 break;
1955 case 1:
1956 for (i = 0; i < 3; i ++) {
1957 gen_op_shll_T1_im(8);
1958 gen_op_orl_T0_T1();
1959 }
1960 break;
1961 case 2:
1962 gen_op_shll_T1_im(16);
1963 gen_op_orl_T0_T1();
1964 break;
1965 case 3:
1966 return 1;
1967 }
d9ba4830 1968 gen_set_nzcv(cpu_T[0]);
18c9b560
AZ
1969 break;
1970 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1971 rd = (insn >> 12) & 0xf;
1972 rd0 = (insn >> 16) & 0xf;
1973 if ((insn & 0xf) != 0)
1974 return 1;
1975 gen_op_iwmmxt_movq_M0_wRn(rd0);
1976 switch ((insn >> 22) & 3) {
1977 case 0:
e677137d 1978 gen_helper_iwmmxt_msbb(cpu_T[0], cpu_M0);
18c9b560
AZ
1979 break;
1980 case 1:
e677137d 1981 gen_helper_iwmmxt_msbw(cpu_T[0], cpu_M0);
18c9b560
AZ
1982 break;
1983 case 2:
e677137d 1984 gen_helper_iwmmxt_msbl(cpu_T[0], cpu_M0);
18c9b560
AZ
1985 break;
1986 case 3:
1987 return 1;
1988 }
1989 gen_movl_reg_T0(s, rd);
1990 break;
1991 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1992 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1993 wrd = (insn >> 12) & 0xf;
1994 rd0 = (insn >> 16) & 0xf;
1995 rd1 = (insn >> 0) & 0xf;
1996 gen_op_iwmmxt_movq_M0_wRn(rd0);
1997 switch ((insn >> 22) & 3) {
1998 case 0:
1999 if (insn & (1 << 21))
2000 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2001 else
2002 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2003 break;
2004 case 1:
2005 if (insn & (1 << 21))
2006 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2007 else
2008 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2009 break;
2010 case 2:
2011 if (insn & (1 << 21))
2012 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2013 else
2014 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2015 break;
2016 case 3:
2017 return 1;
2018 }
2019 gen_op_iwmmxt_movq_wRn_M0(wrd);
2020 gen_op_iwmmxt_set_mup();
2021 gen_op_iwmmxt_set_cup();
2022 break;
2023 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2024 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2025 wrd = (insn >> 12) & 0xf;
2026 rd0 = (insn >> 16) & 0xf;
2027 gen_op_iwmmxt_movq_M0_wRn(rd0);
2028 switch ((insn >> 22) & 3) {
2029 case 0:
2030 if (insn & (1 << 21))
2031 gen_op_iwmmxt_unpacklsb_M0();
2032 else
2033 gen_op_iwmmxt_unpacklub_M0();
2034 break;
2035 case 1:
2036 if (insn & (1 << 21))
2037 gen_op_iwmmxt_unpacklsw_M0();
2038 else
2039 gen_op_iwmmxt_unpackluw_M0();
2040 break;
2041 case 2:
2042 if (insn & (1 << 21))
2043 gen_op_iwmmxt_unpacklsl_M0();
2044 else
2045 gen_op_iwmmxt_unpacklul_M0();
2046 break;
2047 case 3:
2048 return 1;
2049 }
2050 gen_op_iwmmxt_movq_wRn_M0(wrd);
2051 gen_op_iwmmxt_set_mup();
2052 gen_op_iwmmxt_set_cup();
2053 break;
2054 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2055 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2056 wrd = (insn >> 12) & 0xf;
2057 rd0 = (insn >> 16) & 0xf;
2058 gen_op_iwmmxt_movq_M0_wRn(rd0);
2059 switch ((insn >> 22) & 3) {
2060 case 0:
2061 if (insn & (1 << 21))
2062 gen_op_iwmmxt_unpackhsb_M0();
2063 else
2064 gen_op_iwmmxt_unpackhub_M0();
2065 break;
2066 case 1:
2067 if (insn & (1 << 21))
2068 gen_op_iwmmxt_unpackhsw_M0();
2069 else
2070 gen_op_iwmmxt_unpackhuw_M0();
2071 break;
2072 case 2:
2073 if (insn & (1 << 21))
2074 gen_op_iwmmxt_unpackhsl_M0();
2075 else
2076 gen_op_iwmmxt_unpackhul_M0();
2077 break;
2078 case 3:
2079 return 1;
2080 }
2081 gen_op_iwmmxt_movq_wRn_M0(wrd);
2082 gen_op_iwmmxt_set_mup();
2083 gen_op_iwmmxt_set_cup();
2084 break;
2085 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2086 case 0x214: case 0x614: case 0xa14: case 0xe14:
2087 wrd = (insn >> 12) & 0xf;
2088 rd0 = (insn >> 16) & 0xf;
2089 gen_op_iwmmxt_movq_M0_wRn(rd0);
2090 if (gen_iwmmxt_shift(insn, 0xff))
2091 return 1;
2092 switch ((insn >> 22) & 3) {
2093 case 0:
2094 return 1;
2095 case 1:
e677137d 2096 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2097 break;
2098 case 2:
e677137d 2099 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2100 break;
2101 case 3:
e677137d 2102 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2103 break;
2104 }
2105 gen_op_iwmmxt_movq_wRn_M0(wrd);
2106 gen_op_iwmmxt_set_mup();
2107 gen_op_iwmmxt_set_cup();
2108 break;
2109 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2110 case 0x014: case 0x414: case 0x814: case 0xc14:
2111 wrd = (insn >> 12) & 0xf;
2112 rd0 = (insn >> 16) & 0xf;
2113 gen_op_iwmmxt_movq_M0_wRn(rd0);
2114 if (gen_iwmmxt_shift(insn, 0xff))
2115 return 1;
2116 switch ((insn >> 22) & 3) {
2117 case 0:
2118 return 1;
2119 case 1:
e677137d 2120 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2121 break;
2122 case 2:
e677137d 2123 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2124 break;
2125 case 3:
e677137d 2126 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2127 break;
2128 }
2129 gen_op_iwmmxt_movq_wRn_M0(wrd);
2130 gen_op_iwmmxt_set_mup();
2131 gen_op_iwmmxt_set_cup();
2132 break;
2133 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2134 case 0x114: case 0x514: case 0x914: case 0xd14:
2135 wrd = (insn >> 12) & 0xf;
2136 rd0 = (insn >> 16) & 0xf;
2137 gen_op_iwmmxt_movq_M0_wRn(rd0);
2138 if (gen_iwmmxt_shift(insn, 0xff))
2139 return 1;
2140 switch ((insn >> 22) & 3) {
2141 case 0:
2142 return 1;
2143 case 1:
e677137d 2144 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2145 break;
2146 case 2:
e677137d 2147 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2148 break;
2149 case 3:
e677137d 2150 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2151 break;
2152 }
2153 gen_op_iwmmxt_movq_wRn_M0(wrd);
2154 gen_op_iwmmxt_set_mup();
2155 gen_op_iwmmxt_set_cup();
2156 break;
2157 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2158 case 0x314: case 0x714: case 0xb14: case 0xf14:
2159 wrd = (insn >> 12) & 0xf;
2160 rd0 = (insn >> 16) & 0xf;
2161 gen_op_iwmmxt_movq_M0_wRn(rd0);
2162 switch ((insn >> 22) & 3) {
2163 case 0:
2164 return 1;
2165 case 1:
2166 if (gen_iwmmxt_shift(insn, 0xf))
2167 return 1;
e677137d 2168 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2169 break;
2170 case 2:
2171 if (gen_iwmmxt_shift(insn, 0x1f))
2172 return 1;
e677137d 2173 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2174 break;
2175 case 3:
2176 if (gen_iwmmxt_shift(insn, 0x3f))
2177 return 1;
e677137d 2178 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2179 break;
2180 }
2181 gen_op_iwmmxt_movq_wRn_M0(wrd);
2182 gen_op_iwmmxt_set_mup();
2183 gen_op_iwmmxt_set_cup();
2184 break;
2185 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2186 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2187 wrd = (insn >> 12) & 0xf;
2188 rd0 = (insn >> 16) & 0xf;
2189 rd1 = (insn >> 0) & 0xf;
2190 gen_op_iwmmxt_movq_M0_wRn(rd0);
2191 switch ((insn >> 22) & 3) {
2192 case 0:
2193 if (insn & (1 << 21))
2194 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2195 else
2196 gen_op_iwmmxt_minub_M0_wRn(rd1);
2197 break;
2198 case 1:
2199 if (insn & (1 << 21))
2200 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2201 else
2202 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2203 break;
2204 case 2:
2205 if (insn & (1 << 21))
2206 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2207 else
2208 gen_op_iwmmxt_minul_M0_wRn(rd1);
2209 break;
2210 case 3:
2211 return 1;
2212 }
2213 gen_op_iwmmxt_movq_wRn_M0(wrd);
2214 gen_op_iwmmxt_set_mup();
2215 break;
2216 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2217 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2218 wrd = (insn >> 12) & 0xf;
2219 rd0 = (insn >> 16) & 0xf;
2220 rd1 = (insn >> 0) & 0xf;
2221 gen_op_iwmmxt_movq_M0_wRn(rd0);
2222 switch ((insn >> 22) & 3) {
2223 case 0:
2224 if (insn & (1 << 21))
2225 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2226 else
2227 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2228 break;
2229 case 1:
2230 if (insn & (1 << 21))
2231 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2232 else
2233 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2234 break;
2235 case 2:
2236 if (insn & (1 << 21))
2237 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2238 else
2239 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2240 break;
2241 case 3:
2242 return 1;
2243 }
2244 gen_op_iwmmxt_movq_wRn_M0(wrd);
2245 gen_op_iwmmxt_set_mup();
2246 break;
2247 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2248 case 0x402: case 0x502: case 0x602: case 0x702:
2249 wrd = (insn >> 12) & 0xf;
2250 rd0 = (insn >> 16) & 0xf;
2251 rd1 = (insn >> 0) & 0xf;
2252 gen_op_iwmmxt_movq_M0_wRn(rd0);
2253 gen_op_movl_T0_im((insn >> 20) & 3);
2254 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
2255 gen_op_iwmmxt_movq_wRn_M0(wrd);
2256 gen_op_iwmmxt_set_mup();
2257 break;
2258 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2259 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2260 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2261 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2262 wrd = (insn >> 12) & 0xf;
2263 rd0 = (insn >> 16) & 0xf;
2264 rd1 = (insn >> 0) & 0xf;
2265 gen_op_iwmmxt_movq_M0_wRn(rd0);
2266 switch ((insn >> 20) & 0xf) {
2267 case 0x0:
2268 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2269 break;
2270 case 0x1:
2271 gen_op_iwmmxt_subub_M0_wRn(rd1);
2272 break;
2273 case 0x3:
2274 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2275 break;
2276 case 0x4:
2277 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2278 break;
2279 case 0x5:
2280 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2281 break;
2282 case 0x7:
2283 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2284 break;
2285 case 0x8:
2286 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2287 break;
2288 case 0x9:
2289 gen_op_iwmmxt_subul_M0_wRn(rd1);
2290 break;
2291 case 0xb:
2292 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2293 break;
2294 default:
2295 return 1;
2296 }
2297 gen_op_iwmmxt_movq_wRn_M0(wrd);
2298 gen_op_iwmmxt_set_mup();
2299 gen_op_iwmmxt_set_cup();
2300 break;
2301 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2302 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2303 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2304 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2305 wrd = (insn >> 12) & 0xf;
2306 rd0 = (insn >> 16) & 0xf;
2307 gen_op_iwmmxt_movq_M0_wRn(rd0);
2308 gen_op_movl_T0_im(((insn >> 16) & 0xf0) | (insn & 0x0f));
e677137d 2309 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2310 gen_op_iwmmxt_movq_wRn_M0(wrd);
2311 gen_op_iwmmxt_set_mup();
2312 gen_op_iwmmxt_set_cup();
2313 break;
2314 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2315 case 0x418: case 0x518: case 0x618: case 0x718:
2316 case 0x818: case 0x918: case 0xa18: case 0xb18:
2317 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2318 wrd = (insn >> 12) & 0xf;
2319 rd0 = (insn >> 16) & 0xf;
2320 rd1 = (insn >> 0) & 0xf;
2321 gen_op_iwmmxt_movq_M0_wRn(rd0);
2322 switch ((insn >> 20) & 0xf) {
2323 case 0x0:
2324 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2325 break;
2326 case 0x1:
2327 gen_op_iwmmxt_addub_M0_wRn(rd1);
2328 break;
2329 case 0x3:
2330 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2331 break;
2332 case 0x4:
2333 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2334 break;
2335 case 0x5:
2336 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2337 break;
2338 case 0x7:
2339 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2340 break;
2341 case 0x8:
2342 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2343 break;
2344 case 0x9:
2345 gen_op_iwmmxt_addul_M0_wRn(rd1);
2346 break;
2347 case 0xb:
2348 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2349 break;
2350 default:
2351 return 1;
2352 }
2353 gen_op_iwmmxt_movq_wRn_M0(wrd);
2354 gen_op_iwmmxt_set_mup();
2355 gen_op_iwmmxt_set_cup();
2356 break;
2357 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2358 case 0x408: case 0x508: case 0x608: case 0x708:
2359 case 0x808: case 0x908: case 0xa08: case 0xb08:
2360 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2361 wrd = (insn >> 12) & 0xf;
2362 rd0 = (insn >> 16) & 0xf;
2363 rd1 = (insn >> 0) & 0xf;
2364 gen_op_iwmmxt_movq_M0_wRn(rd0);
2365 if (!(insn & (1 << 20)))
2366 return 1;
2367 switch ((insn >> 22) & 3) {
2368 case 0:
2369 return 1;
2370 case 1:
2371 if (insn & (1 << 21))
2372 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2373 else
2374 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2375 break;
2376 case 2:
2377 if (insn & (1 << 21))
2378 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2379 else
2380 gen_op_iwmmxt_packul_M0_wRn(rd1);
2381 break;
2382 case 3:
2383 if (insn & (1 << 21))
2384 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2385 else
2386 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2387 break;
2388 }
2389 gen_op_iwmmxt_movq_wRn_M0(wrd);
2390 gen_op_iwmmxt_set_mup();
2391 gen_op_iwmmxt_set_cup();
2392 break;
2393 case 0x201: case 0x203: case 0x205: case 0x207:
2394 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2395 case 0x211: case 0x213: case 0x215: case 0x217:
2396 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2397 wrd = (insn >> 5) & 0xf;
2398 rd0 = (insn >> 12) & 0xf;
2399 rd1 = (insn >> 0) & 0xf;
2400 if (rd0 == 0xf || rd1 == 0xf)
2401 return 1;
2402 gen_op_iwmmxt_movq_M0_wRn(wrd);
2403 switch ((insn >> 16) & 0xf) {
2404 case 0x0: /* TMIA */
b26eefb6
PB
2405 gen_movl_T0_reg(s, rd0);
2406 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2407 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2408 break;
2409 case 0x8: /* TMIAPH */
b26eefb6
PB
2410 gen_movl_T0_reg(s, rd0);
2411 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2412 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2413 break;
2414 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
b26eefb6 2415 gen_movl_T1_reg(s, rd0);
18c9b560
AZ
2416 if (insn & (1 << 16))
2417 gen_op_shrl_T1_im(16);
2418 gen_op_movl_T0_T1();
b26eefb6 2419 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2420 if (insn & (1 << 17))
2421 gen_op_shrl_T1_im(16);
2422 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2423 break;
2424 default:
2425 return 1;
2426 }
2427 gen_op_iwmmxt_movq_wRn_M0(wrd);
2428 gen_op_iwmmxt_set_mup();
2429 break;
2430 default:
2431 return 1;
2432 }
2433
2434 return 0;
2435}
2436
2437/* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2438 (ie. an undefined instruction). */
2439static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2440{
2441 int acc, rd0, rd1, rdhi, rdlo;
2442
2443 if ((insn & 0x0ff00f10) == 0x0e200010) {
2444 /* Multiply with Internal Accumulate Format */
2445 rd0 = (insn >> 12) & 0xf;
2446 rd1 = insn & 0xf;
2447 acc = (insn >> 5) & 7;
2448
2449 if (acc != 0)
2450 return 1;
2451
2452 switch ((insn >> 16) & 0xf) {
2453 case 0x0: /* MIA */
b26eefb6
PB
2454 gen_movl_T0_reg(s, rd0);
2455 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2456 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2457 break;
2458 case 0x8: /* MIAPH */
b26eefb6
PB
2459 gen_movl_T0_reg(s, rd0);
2460 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2461 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2462 break;
2463 case 0xc: /* MIABB */
2464 case 0xd: /* MIABT */
2465 case 0xe: /* MIATB */
2466 case 0xf: /* MIATT */
b26eefb6 2467 gen_movl_T1_reg(s, rd0);
18c9b560
AZ
2468 if (insn & (1 << 16))
2469 gen_op_shrl_T1_im(16);
2470 gen_op_movl_T0_T1();
b26eefb6 2471 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2472 if (insn & (1 << 17))
2473 gen_op_shrl_T1_im(16);
2474 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2475 break;
2476 default:
2477 return 1;
2478 }
2479
2480 gen_op_iwmmxt_movq_wRn_M0(acc);
2481 return 0;
2482 }
2483
2484 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2485 /* Internal Accumulator Access Format */
2486 rdhi = (insn >> 16) & 0xf;
2487 rdlo = (insn >> 12) & 0xf;
2488 acc = insn & 7;
2489
2490 if (acc != 0)
2491 return 1;
2492
2493 if (insn & ARM_CP_RW_BIT) { /* MRA */
e677137d 2494 gen_iwmmxt_movl_T0_T1_wRn(acc);
b26eefb6 2495 gen_movl_reg_T0(s, rdlo);
18c9b560
AZ
2496 gen_op_movl_T0_im((1 << (40 - 32)) - 1);
2497 gen_op_andl_T0_T1();
b26eefb6 2498 gen_movl_reg_T0(s, rdhi);
18c9b560 2499 } else { /* MAR */
b26eefb6
PB
2500 gen_movl_T0_reg(s, rdlo);
2501 gen_movl_T1_reg(s, rdhi);
e677137d 2502 gen_iwmmxt_movl_wRn_T0_T1(acc);
18c9b560
AZ
2503 }
2504 return 0;
2505 }
2506
2507 return 1;
2508}
2509
c1713132
AZ
2510/* Disassemble system coprocessor instruction. Return nonzero if
2511 instruction is not defined. */
2512static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2513{
8984bd2e 2514 TCGv tmp;
c1713132
AZ
2515 uint32_t rd = (insn >> 12) & 0xf;
2516 uint32_t cp = (insn >> 8) & 0xf;
2517 if (IS_USER(s)) {
2518 return 1;
2519 }
2520
18c9b560 2521 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2522 if (!env->cp[cp].cp_read)
2523 return 1;
8984bd2e
PB
2524 gen_set_pc_im(s->pc);
2525 tmp = new_tmp();
2526 gen_helper_get_cp(tmp, cpu_env, tcg_const_i32(insn));
2527 store_reg(s, rd, tmp);
c1713132
AZ
2528 } else {
2529 if (!env->cp[cp].cp_write)
2530 return 1;
8984bd2e
PB
2531 gen_set_pc_im(s->pc);
2532 tmp = load_reg(s, rd);
2533 gen_helper_set_cp(cpu_env, tcg_const_i32(insn), tmp);
a60de947 2534 dead_tmp(tmp);
c1713132
AZ
2535 }
2536 return 0;
2537}
2538
9ee6e8bb
PB
2539static int cp15_user_ok(uint32_t insn)
2540{
2541 int cpn = (insn >> 16) & 0xf;
2542 int cpm = insn & 0xf;
2543 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2544
2545 if (cpn == 13 && cpm == 0) {
2546 /* TLS register. */
2547 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2548 return 1;
2549 }
2550 if (cpn == 7) {
2551 /* ISB, DSB, DMB. */
2552 if ((cpm == 5 && op == 4)
2553 || (cpm == 10 && (op == 4 || op == 5)))
2554 return 1;
2555 }
2556 return 0;
2557}
2558
b5ff1b31
FB
2559/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2560 instruction is not defined. */
a90b7318 2561static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2562{
2563 uint32_t rd;
8984bd2e 2564 TCGv tmp;
b5ff1b31 2565
9ee6e8bb
PB
2566 /* M profile cores use memory mapped registers instead of cp15. */
2567 if (arm_feature(env, ARM_FEATURE_M))
2568 return 1;
2569
2570 if ((insn & (1 << 25)) == 0) {
2571 if (insn & (1 << 20)) {
2572 /* mrrc */
2573 return 1;
2574 }
2575 /* mcrr. Used for block cache operations, so implement as no-op. */
2576 return 0;
2577 }
2578 if ((insn & (1 << 4)) == 0) {
2579 /* cdp */
2580 return 1;
2581 }
2582 if (IS_USER(s) && !cp15_user_ok(insn)) {
b5ff1b31
FB
2583 return 1;
2584 }
9332f9da
FB
2585 if ((insn & 0x0fff0fff) == 0x0e070f90
2586 || (insn & 0x0fff0fff) == 0x0e070f58) {
2587 /* Wait for interrupt. */
8984bd2e 2588 gen_set_pc_im(s->pc);
9ee6e8bb 2589 s->is_jmp = DISAS_WFI;
9332f9da
FB
2590 return 0;
2591 }
b5ff1b31 2592 rd = (insn >> 12) & 0xf;
18c9b560 2593 if (insn & ARM_CP_RW_BIT) {
8984bd2e
PB
2594 tmp = new_tmp();
2595 gen_helper_get_cp15(tmp, cpu_env, tcg_const_i32(insn));
b5ff1b31
FB
2596 /* If the destination register is r15 then sets condition codes. */
2597 if (rd != 15)
8984bd2e
PB
2598 store_reg(s, rd, tmp);
2599 else
2600 dead_tmp(tmp);
b5ff1b31 2601 } else {
8984bd2e
PB
2602 tmp = load_reg(s, rd);
2603 gen_helper_set_cp15(cpu_env, tcg_const_i32(insn), tmp);
2604 dead_tmp(tmp);
a90b7318
AZ
2605 /* Normally we would always end the TB here, but Linux
2606 * arch/arm/mach-pxa/sleep.S expects two instructions following
2607 * an MMU enable to execute from cache. Imitate this behaviour. */
2608 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2609 (insn & 0x0fff0fff) != 0x0e010f10)
2610 gen_lookup_tb(s);
b5ff1b31 2611 }
b5ff1b31
FB
2612 return 0;
2613}
2614
9ee6e8bb
PB
2615#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2616#define VFP_SREG(insn, bigbit, smallbit) \
2617 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2618#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2619 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2620 reg = (((insn) >> (bigbit)) & 0x0f) \
2621 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2622 } else { \
2623 if (insn & (1 << (smallbit))) \
2624 return 1; \
2625 reg = ((insn) >> (bigbit)) & 0x0f; \
2626 }} while (0)
2627
2628#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2629#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2630#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2631#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2632#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2633#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2634
4373f3ce
PB
2635/* Move between integer and VFP cores. */
2636static TCGv gen_vfp_mrs(void)
2637{
2638 TCGv tmp = new_tmp();
2639 tcg_gen_mov_i32(tmp, cpu_F0s);
2640 return tmp;
2641}
2642
2643static void gen_vfp_msr(TCGv tmp)
2644{
2645 tcg_gen_mov_i32(cpu_F0s, tmp);
2646 dead_tmp(tmp);
2647}
2648
9ee6e8bb
PB
2649static inline int
2650vfp_enabled(CPUState * env)
2651{
2652 return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
2653}
2654
ad69471c
PB
2655static void gen_neon_dup_u8(TCGv var, int shift)
2656{
2657 TCGv tmp = new_tmp();
2658 if (shift)
2659 tcg_gen_shri_i32(var, var, shift);
86831435 2660 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2661 tcg_gen_shli_i32(tmp, var, 8);
2662 tcg_gen_or_i32(var, var, tmp);
2663 tcg_gen_shli_i32(tmp, var, 16);
2664 tcg_gen_or_i32(var, var, tmp);
2665 dead_tmp(tmp);
2666}
2667
2668static void gen_neon_dup_low16(TCGv var)
2669{
2670 TCGv tmp = new_tmp();
86831435 2671 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2672 tcg_gen_shli_i32(tmp, var, 16);
2673 tcg_gen_or_i32(var, var, tmp);
2674 dead_tmp(tmp);
2675}
2676
2677static void gen_neon_dup_high16(TCGv var)
2678{
2679 TCGv tmp = new_tmp();
2680 tcg_gen_andi_i32(var, var, 0xffff0000);
2681 tcg_gen_shri_i32(tmp, var, 16);
2682 tcg_gen_or_i32(var, var, tmp);
2683 dead_tmp(tmp);
2684}
2685
b7bcbe95
FB
2686/* Disassemble a VFP instruction. Returns nonzero if an error occured
2687 (ie. an undefined instruction). */
2688static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2689{
2690 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2691 int dp, veclen;
4373f3ce 2692 TCGv tmp;
ad69471c 2693 TCGv tmp2;
b7bcbe95 2694
40f137e1
PB
2695 if (!arm_feature(env, ARM_FEATURE_VFP))
2696 return 1;
2697
9ee6e8bb
PB
2698 if (!vfp_enabled(env)) {
2699 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2700 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2701 return 1;
2702 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2703 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2704 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2705 return 1;
2706 }
b7bcbe95
FB
2707 dp = ((insn & 0xf00) == 0xb00);
2708 switch ((insn >> 24) & 0xf) {
2709 case 0xe:
2710 if (insn & (1 << 4)) {
2711 /* single register transfer */
b7bcbe95
FB
2712 rd = (insn >> 12) & 0xf;
2713 if (dp) {
9ee6e8bb
PB
2714 int size;
2715 int pass;
2716
2717 VFP_DREG_N(rn, insn);
2718 if (insn & 0xf)
b7bcbe95 2719 return 1;
9ee6e8bb
PB
2720 if (insn & 0x00c00060
2721 && !arm_feature(env, ARM_FEATURE_NEON))
2722 return 1;
2723
2724 pass = (insn >> 21) & 1;
2725 if (insn & (1 << 22)) {
2726 size = 0;
2727 offset = ((insn >> 5) & 3) * 8;
2728 } else if (insn & (1 << 5)) {
2729 size = 1;
2730 offset = (insn & (1 << 6)) ? 16 : 0;
2731 } else {
2732 size = 2;
2733 offset = 0;
2734 }
18c9b560 2735 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2736 /* vfp->arm */
ad69471c 2737 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2738 switch (size) {
2739 case 0:
9ee6e8bb 2740 if (offset)
ad69471c 2741 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2742 if (insn & (1 << 23))
ad69471c 2743 gen_uxtb(tmp);
9ee6e8bb 2744 else
ad69471c 2745 gen_sxtb(tmp);
9ee6e8bb
PB
2746 break;
2747 case 1:
9ee6e8bb
PB
2748 if (insn & (1 << 23)) {
2749 if (offset) {
ad69471c 2750 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2751 } else {
ad69471c 2752 gen_uxth(tmp);
9ee6e8bb
PB
2753 }
2754 } else {
2755 if (offset) {
ad69471c 2756 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2757 } else {
ad69471c 2758 gen_sxth(tmp);
9ee6e8bb
PB
2759 }
2760 }
2761 break;
2762 case 2:
9ee6e8bb
PB
2763 break;
2764 }
ad69471c 2765 store_reg(s, rd, tmp);
b7bcbe95
FB
2766 } else {
2767 /* arm->vfp */
ad69471c 2768 tmp = load_reg(s, rd);
9ee6e8bb
PB
2769 if (insn & (1 << 23)) {
2770 /* VDUP */
2771 if (size == 0) {
ad69471c 2772 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2773 } else if (size == 1) {
ad69471c 2774 gen_neon_dup_low16(tmp);
9ee6e8bb 2775 }
cbbccffc
PB
2776 for (n = 0; n <= pass * 2; n++) {
2777 tmp2 = new_tmp();
2778 tcg_gen_mov_i32(tmp2, tmp);
2779 neon_store_reg(rn, n, tmp2);
2780 }
2781 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2782 } else {
2783 /* VMOV */
2784 switch (size) {
2785 case 0:
ad69471c
PB
2786 tmp2 = neon_load_reg(rn, pass);
2787 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2788 dead_tmp(tmp2);
9ee6e8bb
PB
2789 break;
2790 case 1:
ad69471c
PB
2791 tmp2 = neon_load_reg(rn, pass);
2792 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2793 dead_tmp(tmp2);
9ee6e8bb
PB
2794 break;
2795 case 2:
9ee6e8bb
PB
2796 break;
2797 }
ad69471c 2798 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2799 }
b7bcbe95 2800 }
9ee6e8bb
PB
2801 } else { /* !dp */
2802 if ((insn & 0x6f) != 0x00)
2803 return 1;
2804 rn = VFP_SREG_N(insn);
18c9b560 2805 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2806 /* vfp->arm */
2807 if (insn & (1 << 21)) {
2808 /* system register */
40f137e1 2809 rn >>= 1;
9ee6e8bb 2810
b7bcbe95 2811 switch (rn) {
40f137e1 2812 case ARM_VFP_FPSID:
4373f3ce 2813 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2814 VFP3 restricts all id registers to privileged
2815 accesses. */
2816 if (IS_USER(s)
2817 && arm_feature(env, ARM_FEATURE_VFP3))
2818 return 1;
4373f3ce 2819 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2820 break;
40f137e1 2821 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2822 if (IS_USER(s))
2823 return 1;
4373f3ce 2824 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2825 break;
40f137e1
PB
2826 case ARM_VFP_FPINST:
2827 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2828 /* Not present in VFP3. */
2829 if (IS_USER(s)
2830 || arm_feature(env, ARM_FEATURE_VFP3))
2831 return 1;
4373f3ce 2832 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2833 break;
40f137e1 2834 case ARM_VFP_FPSCR:
601d70b9 2835 if (rd == 15) {
4373f3ce
PB
2836 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2837 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2838 } else {
2839 tmp = new_tmp();
2840 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2841 }
b7bcbe95 2842 break;
9ee6e8bb
PB
2843 case ARM_VFP_MVFR0:
2844 case ARM_VFP_MVFR1:
2845 if (IS_USER(s)
2846 || !arm_feature(env, ARM_FEATURE_VFP3))
2847 return 1;
4373f3ce 2848 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2849 break;
b7bcbe95
FB
2850 default:
2851 return 1;
2852 }
2853 } else {
2854 gen_mov_F0_vreg(0, rn);
4373f3ce 2855 tmp = gen_vfp_mrs();
b7bcbe95
FB
2856 }
2857 if (rd == 15) {
b5ff1b31 2858 /* Set the 4 flag bits in the CPSR. */
4373f3ce
PB
2859 gen_set_nzcv(tmp);
2860 dead_tmp(tmp);
2861 } else {
2862 store_reg(s, rd, tmp);
2863 }
b7bcbe95
FB
2864 } else {
2865 /* arm->vfp */
4373f3ce 2866 tmp = load_reg(s, rd);
b7bcbe95 2867 if (insn & (1 << 21)) {
40f137e1 2868 rn >>= 1;
b7bcbe95
FB
2869 /* system register */
2870 switch (rn) {
40f137e1 2871 case ARM_VFP_FPSID:
9ee6e8bb
PB
2872 case ARM_VFP_MVFR0:
2873 case ARM_VFP_MVFR1:
b7bcbe95
FB
2874 /* Writes are ignored. */
2875 break;
40f137e1 2876 case ARM_VFP_FPSCR:
4373f3ce
PB
2877 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2878 dead_tmp(tmp);
b5ff1b31 2879 gen_lookup_tb(s);
b7bcbe95 2880 break;
40f137e1 2881 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2882 if (IS_USER(s))
2883 return 1;
4373f3ce 2884 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2885 gen_lookup_tb(s);
2886 break;
2887 case ARM_VFP_FPINST:
2888 case ARM_VFP_FPINST2:
4373f3ce 2889 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2890 break;
b7bcbe95
FB
2891 default:
2892 return 1;
2893 }
2894 } else {
4373f3ce 2895 gen_vfp_msr(tmp);
b7bcbe95
FB
2896 gen_mov_vreg_F0(0, rn);
2897 }
2898 }
2899 }
2900 } else {
2901 /* data processing */
2902 /* The opcode is in bits 23, 21, 20 and 6. */
2903 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2904 if (dp) {
2905 if (op == 15) {
2906 /* rn is opcode */
2907 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2908 } else {
2909 /* rn is register number */
9ee6e8bb 2910 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2911 }
2912
2913 if (op == 15 && (rn == 15 || rn > 17)) {
2914 /* Integer or single precision destination. */
9ee6e8bb 2915 rd = VFP_SREG_D(insn);
b7bcbe95 2916 } else {
9ee6e8bb 2917 VFP_DREG_D(rd, insn);
b7bcbe95
FB
2918 }
2919
2920 if (op == 15 && (rn == 16 || rn == 17)) {
2921 /* Integer source. */
2922 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
2923 } else {
9ee6e8bb 2924 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2925 }
2926 } else {
9ee6e8bb 2927 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2928 if (op == 15 && rn == 15) {
2929 /* Double precision destination. */
9ee6e8bb
PB
2930 VFP_DREG_D(rd, insn);
2931 } else {
2932 rd = VFP_SREG_D(insn);
2933 }
2934 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2935 }
2936
2937 veclen = env->vfp.vec_len;
2938 if (op == 15 && rn > 3)
2939 veclen = 0;
2940
2941 /* Shut up compiler warnings. */
2942 delta_m = 0;
2943 delta_d = 0;
2944 bank_mask = 0;
3b46e624 2945
b7bcbe95
FB
2946 if (veclen > 0) {
2947 if (dp)
2948 bank_mask = 0xc;
2949 else
2950 bank_mask = 0x18;
2951
2952 /* Figure out what type of vector operation this is. */
2953 if ((rd & bank_mask) == 0) {
2954 /* scalar */
2955 veclen = 0;
2956 } else {
2957 if (dp)
2958 delta_d = (env->vfp.vec_stride >> 1) + 1;
2959 else
2960 delta_d = env->vfp.vec_stride + 1;
2961
2962 if ((rm & bank_mask) == 0) {
2963 /* mixed scalar/vector */
2964 delta_m = 0;
2965 } else {
2966 /* vector */
2967 delta_m = delta_d;
2968 }
2969 }
2970 }
2971
2972 /* Load the initial operands. */
2973 if (op == 15) {
2974 switch (rn) {
2975 case 16:
2976 case 17:
2977 /* Integer source */
2978 gen_mov_F0_vreg(0, rm);
2979 break;
2980 case 8:
2981 case 9:
2982 /* Compare */
2983 gen_mov_F0_vreg(dp, rd);
2984 gen_mov_F1_vreg(dp, rm);
2985 break;
2986 case 10:
2987 case 11:
2988 /* Compare with zero */
2989 gen_mov_F0_vreg(dp, rd);
2990 gen_vfp_F1_ld0(dp);
2991 break;
9ee6e8bb
PB
2992 case 20:
2993 case 21:
2994 case 22:
2995 case 23:
644ad806
PB
2996 case 28:
2997 case 29:
2998 case 30:
2999 case 31:
9ee6e8bb
PB
3000 /* Source and destination the same. */
3001 gen_mov_F0_vreg(dp, rd);
3002 break;
b7bcbe95
FB
3003 default:
3004 /* One source operand. */
3005 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3006 break;
b7bcbe95
FB
3007 }
3008 } else {
3009 /* Two source operands. */
3010 gen_mov_F0_vreg(dp, rn);
3011 gen_mov_F1_vreg(dp, rm);
3012 }
3013
3014 for (;;) {
3015 /* Perform the calculation. */
3016 switch (op) {
3017 case 0: /* mac: fd + (fn * fm) */
3018 gen_vfp_mul(dp);
3019 gen_mov_F1_vreg(dp, rd);
3020 gen_vfp_add(dp);
3021 break;
3022 case 1: /* nmac: fd - (fn * fm) */
3023 gen_vfp_mul(dp);
3024 gen_vfp_neg(dp);
3025 gen_mov_F1_vreg(dp, rd);
3026 gen_vfp_add(dp);
3027 break;
3028 case 2: /* msc: -fd + (fn * fm) */
3029 gen_vfp_mul(dp);
3030 gen_mov_F1_vreg(dp, rd);
3031 gen_vfp_sub(dp);
3032 break;
3033 case 3: /* nmsc: -fd - (fn * fm) */
3034 gen_vfp_mul(dp);
b7bcbe95 3035 gen_vfp_neg(dp);
c9fb531a
PB
3036 gen_mov_F1_vreg(dp, rd);
3037 gen_vfp_sub(dp);
b7bcbe95
FB
3038 break;
3039 case 4: /* mul: fn * fm */
3040 gen_vfp_mul(dp);
3041 break;
3042 case 5: /* nmul: -(fn * fm) */
3043 gen_vfp_mul(dp);
3044 gen_vfp_neg(dp);
3045 break;
3046 case 6: /* add: fn + fm */
3047 gen_vfp_add(dp);
3048 break;
3049 case 7: /* sub: fn - fm */
3050 gen_vfp_sub(dp);
3051 break;
3052 case 8: /* div: fn / fm */
3053 gen_vfp_div(dp);
3054 break;
9ee6e8bb
PB
3055 case 14: /* fconst */
3056 if (!arm_feature(env, ARM_FEATURE_VFP3))
3057 return 1;
3058
3059 n = (insn << 12) & 0x80000000;
3060 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3061 if (dp) {
3062 if (i & 0x40)
3063 i |= 0x3f80;
3064 else
3065 i |= 0x4000;
3066 n |= i << 16;
4373f3ce 3067 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3068 } else {
3069 if (i & 0x40)
3070 i |= 0x780;
3071 else
3072 i |= 0x800;
3073 n |= i << 19;
5b340b51 3074 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3075 }
9ee6e8bb 3076 break;
b7bcbe95
FB
3077 case 15: /* extension space */
3078 switch (rn) {
3079 case 0: /* cpy */
3080 /* no-op */
3081 break;
3082 case 1: /* abs */
3083 gen_vfp_abs(dp);
3084 break;
3085 case 2: /* neg */
3086 gen_vfp_neg(dp);
3087 break;
3088 case 3: /* sqrt */
3089 gen_vfp_sqrt(dp);
3090 break;
3091 case 8: /* cmp */
3092 gen_vfp_cmp(dp);
3093 break;
3094 case 9: /* cmpe */
3095 gen_vfp_cmpe(dp);
3096 break;
3097 case 10: /* cmpz */
3098 gen_vfp_cmp(dp);
3099 break;
3100 case 11: /* cmpez */
3101 gen_vfp_F1_ld0(dp);
3102 gen_vfp_cmpe(dp);
3103 break;
3104 case 15: /* single<->double conversion */
3105 if (dp)
4373f3ce 3106 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3107 else
4373f3ce 3108 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3109 break;
3110 case 16: /* fuito */
3111 gen_vfp_uito(dp);
3112 break;
3113 case 17: /* fsito */
3114 gen_vfp_sito(dp);
3115 break;
9ee6e8bb
PB
3116 case 20: /* fshto */
3117 if (!arm_feature(env, ARM_FEATURE_VFP3))
3118 return 1;
644ad806 3119 gen_vfp_shto(dp, 16 - rm);
9ee6e8bb
PB
3120 break;
3121 case 21: /* fslto */
3122 if (!arm_feature(env, ARM_FEATURE_VFP3))
3123 return 1;
644ad806 3124 gen_vfp_slto(dp, 32 - rm);
9ee6e8bb
PB
3125 break;
3126 case 22: /* fuhto */
3127 if (!arm_feature(env, ARM_FEATURE_VFP3))
3128 return 1;
644ad806 3129 gen_vfp_uhto(dp, 16 - rm);
9ee6e8bb
PB
3130 break;
3131 case 23: /* fulto */
3132 if (!arm_feature(env, ARM_FEATURE_VFP3))
3133 return 1;
644ad806 3134 gen_vfp_ulto(dp, 32 - rm);
9ee6e8bb 3135 break;
b7bcbe95
FB
3136 case 24: /* ftoui */
3137 gen_vfp_toui(dp);
3138 break;
3139 case 25: /* ftouiz */
3140 gen_vfp_touiz(dp);
3141 break;
3142 case 26: /* ftosi */
3143 gen_vfp_tosi(dp);
3144 break;
3145 case 27: /* ftosiz */
3146 gen_vfp_tosiz(dp);
3147 break;
9ee6e8bb
PB
3148 case 28: /* ftosh */
3149 if (!arm_feature(env, ARM_FEATURE_VFP3))
3150 return 1;
644ad806 3151 gen_vfp_tosh(dp, 16 - rm);
9ee6e8bb
PB
3152 break;
3153 case 29: /* ftosl */
3154 if (!arm_feature(env, ARM_FEATURE_VFP3))
3155 return 1;
644ad806 3156 gen_vfp_tosl(dp, 32 - rm);
9ee6e8bb
PB
3157 break;
3158 case 30: /* ftouh */
3159 if (!arm_feature(env, ARM_FEATURE_VFP3))
3160 return 1;
644ad806 3161 gen_vfp_touh(dp, 16 - rm);
9ee6e8bb
PB
3162 break;
3163 case 31: /* ftoul */
3164 if (!arm_feature(env, ARM_FEATURE_VFP3))
3165 return 1;
644ad806 3166 gen_vfp_toul(dp, 32 - rm);
9ee6e8bb 3167 break;
b7bcbe95
FB
3168 default: /* undefined */
3169 printf ("rn:%d\n", rn);
3170 return 1;
3171 }
3172 break;
3173 default: /* undefined */
3174 printf ("op:%d\n", op);
3175 return 1;
3176 }
3177
3178 /* Write back the result. */
3179 if (op == 15 && (rn >= 8 && rn <= 11))
3180 ; /* Comparison, do nothing. */
3181 else if (op == 15 && rn > 17)
3182 /* Integer result. */
3183 gen_mov_vreg_F0(0, rd);
3184 else if (op == 15 && rn == 15)
3185 /* conversion */
3186 gen_mov_vreg_F0(!dp, rd);
3187 else
3188 gen_mov_vreg_F0(dp, rd);
3189
3190 /* break out of the loop if we have finished */
3191 if (veclen == 0)
3192 break;
3193
3194 if (op == 15 && delta_m == 0) {
3195 /* single source one-many */
3196 while (veclen--) {
3197 rd = ((rd + delta_d) & (bank_mask - 1))
3198 | (rd & bank_mask);
3199 gen_mov_vreg_F0(dp, rd);
3200 }
3201 break;
3202 }
3203 /* Setup the next operands. */
3204 veclen--;
3205 rd = ((rd + delta_d) & (bank_mask - 1))
3206 | (rd & bank_mask);
3207
3208 if (op == 15) {
3209 /* One source operand. */
3210 rm = ((rm + delta_m) & (bank_mask - 1))
3211 | (rm & bank_mask);
3212 gen_mov_F0_vreg(dp, rm);
3213 } else {
3214 /* Two source operands. */
3215 rn = ((rn + delta_d) & (bank_mask - 1))
3216 | (rn & bank_mask);
3217 gen_mov_F0_vreg(dp, rn);
3218 if (delta_m) {
3219 rm = ((rm + delta_m) & (bank_mask - 1))
3220 | (rm & bank_mask);
3221 gen_mov_F1_vreg(dp, rm);
3222 }
3223 }
3224 }
3225 }
3226 break;
3227 case 0xc:
3228 case 0xd:
9ee6e8bb 3229 if (dp && (insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3230 /* two-register transfer */
3231 rn = (insn >> 16) & 0xf;
3232 rd = (insn >> 12) & 0xf;
3233 if (dp) {
9ee6e8bb
PB
3234 VFP_DREG_M(rm, insn);
3235 } else {
3236 rm = VFP_SREG_M(insn);
3237 }
b7bcbe95 3238
18c9b560 3239 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3240 /* vfp->arm */
3241 if (dp) {
4373f3ce
PB
3242 gen_mov_F0_vreg(0, rm * 2);
3243 tmp = gen_vfp_mrs();
3244 store_reg(s, rd, tmp);
3245 gen_mov_F0_vreg(0, rm * 2 + 1);
3246 tmp = gen_vfp_mrs();
3247 store_reg(s, rn, tmp);
b7bcbe95
FB
3248 } else {
3249 gen_mov_F0_vreg(0, rm);
4373f3ce
PB
3250 tmp = gen_vfp_mrs();
3251 store_reg(s, rn, tmp);
b7bcbe95 3252 gen_mov_F0_vreg(0, rm + 1);
4373f3ce
PB
3253 tmp = gen_vfp_mrs();
3254 store_reg(s, rd, tmp);
b7bcbe95
FB
3255 }
3256 } else {
3257 /* arm->vfp */
3258 if (dp) {
4373f3ce
PB
3259 tmp = load_reg(s, rd);
3260 gen_vfp_msr(tmp);
3261 gen_mov_vreg_F0(0, rm * 2);
3262 tmp = load_reg(s, rn);
3263 gen_vfp_msr(tmp);
3264 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3265 } else {
4373f3ce
PB
3266 tmp = load_reg(s, rn);
3267 gen_vfp_msr(tmp);
b7bcbe95 3268 gen_mov_vreg_F0(0, rm);
4373f3ce
PB
3269 tmp = load_reg(s, rd);
3270 gen_vfp_msr(tmp);
b7bcbe95
FB
3271 gen_mov_vreg_F0(0, rm + 1);
3272 }
3273 }
3274 } else {
3275 /* Load/store */
3276 rn = (insn >> 16) & 0xf;
3277 if (dp)
9ee6e8bb 3278 VFP_DREG_D(rd, insn);
b7bcbe95 3279 else
9ee6e8bb
PB
3280 rd = VFP_SREG_D(insn);
3281 if (s->thumb && rn == 15) {
3282 gen_op_movl_T1_im(s->pc & ~2);
3283 } else {
3284 gen_movl_T1_reg(s, rn);
3285 }
b7bcbe95
FB
3286 if ((insn & 0x01200000) == 0x01000000) {
3287 /* Single load/store */
3288 offset = (insn & 0xff) << 2;
3289 if ((insn & (1 << 23)) == 0)
3290 offset = -offset;
3291 gen_op_addl_T1_im(offset);
3292 if (insn & (1 << 20)) {
b5ff1b31 3293 gen_vfp_ld(s, dp);
b7bcbe95
FB
3294 gen_mov_vreg_F0(dp, rd);
3295 } else {
3296 gen_mov_F0_vreg(dp, rd);
b5ff1b31 3297 gen_vfp_st(s, dp);
b7bcbe95
FB
3298 }
3299 } else {
3300 /* load/store multiple */
3301 if (dp)
3302 n = (insn >> 1) & 0x7f;
3303 else
3304 n = insn & 0xff;
3305
3306 if (insn & (1 << 24)) /* pre-decrement */
3307 gen_op_addl_T1_im(-((insn & 0xff) << 2));
3308
3309 if (dp)
3310 offset = 8;
3311 else
3312 offset = 4;
3313 for (i = 0; i < n; i++) {
18c9b560 3314 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3315 /* load */
b5ff1b31 3316 gen_vfp_ld(s, dp);
b7bcbe95
FB
3317 gen_mov_vreg_F0(dp, rd + i);
3318 } else {
3319 /* store */
3320 gen_mov_F0_vreg(dp, rd + i);
b5ff1b31 3321 gen_vfp_st(s, dp);
b7bcbe95
FB
3322 }
3323 gen_op_addl_T1_im(offset);
3324 }
3325 if (insn & (1 << 21)) {
3326 /* writeback */
3327 if (insn & (1 << 24))
3328 offset = -offset * n;
3329 else if (dp && (insn & 1))
3330 offset = 4;
3331 else
3332 offset = 0;
3333
3334 if (offset != 0)
3335 gen_op_addl_T1_im(offset);
3336 gen_movl_reg_T1(s, rn);
3337 }
3338 }
3339 }
3340 break;
3341 default:
3342 /* Should never happen. */
3343 return 1;
3344 }
3345 return 0;
3346}
3347
6e256c93 3348static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3349{
6e256c93
FB
3350 TranslationBlock *tb;
3351
3352 tb = s->tb;
3353 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3354 tcg_gen_goto_tb(n);
8984bd2e 3355 gen_set_pc_im(dest);
57fec1fe 3356 tcg_gen_exit_tb((long)tb + n);
6e256c93 3357 } else {
8984bd2e 3358 gen_set_pc_im(dest);
57fec1fe 3359 tcg_gen_exit_tb(0);
6e256c93 3360 }
c53be334
FB
3361}
3362
8aaca4c0
FB
3363static inline void gen_jmp (DisasContext *s, uint32_t dest)
3364{
551bd27f 3365 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3366 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3367 if (s->thumb)
d9ba4830
PB
3368 dest |= 1;
3369 gen_bx_im(s, dest);
8aaca4c0 3370 } else {
6e256c93 3371 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3372 s->is_jmp = DISAS_TB_JUMP;
3373 }
3374}
3375
d9ba4830 3376static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3377{
ee097184 3378 if (x)
d9ba4830 3379 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3380 else
d9ba4830 3381 gen_sxth(t0);
ee097184 3382 if (y)
d9ba4830 3383 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3384 else
d9ba4830
PB
3385 gen_sxth(t1);
3386 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3387}
3388
3389/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 3390static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3391 uint32_t mask;
3392
3393 mask = 0;
3394 if (flags & (1 << 0))
3395 mask |= 0xff;
3396 if (flags & (1 << 1))
3397 mask |= 0xff00;
3398 if (flags & (1 << 2))
3399 mask |= 0xff0000;
3400 if (flags & (1 << 3))
3401 mask |= 0xff000000;
9ee6e8bb 3402
2ae23e75 3403 /* Mask out undefined bits. */
9ee6e8bb
PB
3404 mask &= ~CPSR_RESERVED;
3405 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3406 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3407 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3408 mask &= ~CPSR_IT;
9ee6e8bb 3409 /* Mask out execution state bits. */
2ae23e75 3410 if (!spsr)
e160c51c 3411 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3412 /* Mask out privileged bits. */
3413 if (IS_USER(s))
9ee6e8bb 3414 mask &= CPSR_USER;
b5ff1b31
FB
3415 return mask;
3416}
3417
3418/* Returns nonzero if access to the PSR is not permitted. */
3419static int gen_set_psr_T0(DisasContext *s, uint32_t mask, int spsr)
3420{
d9ba4830 3421 TCGv tmp;
b5ff1b31
FB
3422 if (spsr) {
3423 /* ??? This is also undefined in system mode. */
3424 if (IS_USER(s))
3425 return 1;
d9ba4830
PB
3426
3427 tmp = load_cpu_field(spsr);
3428 tcg_gen_andi_i32(tmp, tmp, ~mask);
3429 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
3430 tcg_gen_or_i32(tmp, tmp, cpu_T[0]);
3431 store_cpu_field(tmp, spsr);
b5ff1b31 3432 } else {
d9ba4830 3433 gen_set_cpsr(cpu_T[0], mask);
b5ff1b31
FB
3434 }
3435 gen_lookup_tb(s);
3436 return 0;
3437}
3438
e9bb4aa9
JR
3439/* Generate an old-style exception return. Marks pc as dead. */
3440static void gen_exception_return(DisasContext *s, TCGv pc)
b5ff1b31 3441{
d9ba4830 3442 TCGv tmp;
e9bb4aa9 3443 store_reg(s, 15, pc);
d9ba4830
PB
3444 tmp = load_cpu_field(spsr);
3445 gen_set_cpsr(tmp, 0xffffffff);
3446 dead_tmp(tmp);
b5ff1b31
FB
3447 s->is_jmp = DISAS_UPDATE;
3448}
3449
b0109805
PB
3450/* Generate a v6 exception return. Marks both values as dead. */
3451static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3452{
b0109805
PB
3453 gen_set_cpsr(cpsr, 0xffffffff);
3454 dead_tmp(cpsr);
3455 store_reg(s, 15, pc);
9ee6e8bb
PB
3456 s->is_jmp = DISAS_UPDATE;
3457}
3b46e624 3458
9ee6e8bb
PB
3459static inline void
3460gen_set_condexec (DisasContext *s)
3461{
3462 if (s->condexec_mask) {
8f01245e
PB
3463 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3464 TCGv tmp = new_tmp();
3465 tcg_gen_movi_i32(tmp, val);
d9ba4830 3466 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3467 }
3468}
3b46e624 3469
9ee6e8bb
PB
3470static void gen_nop_hint(DisasContext *s, int val)
3471{
3472 switch (val) {
3473 case 3: /* wfi */
8984bd2e 3474 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3475 s->is_jmp = DISAS_WFI;
3476 break;
3477 case 2: /* wfe */
3478 case 4: /* sev */
3479 /* TODO: Implement SEV and WFE. May help SMP performance. */
3480 default: /* nop */
3481 break;
3482 }
3483}
99c475ab 3484
ad69471c
PB
3485/* These macros help make the code more readable when migrating from the
3486 old dyngen helpers. They should probably be removed when
3487 T0/T1 are removed. */
3488#define CPU_T001 cpu_T[0], cpu_T[0], cpu_T[1]
3489#define CPU_T0E01 cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]
9ee6e8bb 3490
ad69471c 3491#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb
PB
3492
3493static inline int gen_neon_add(int size)
3494{
3495 switch (size) {
ad69471c
PB
3496 case 0: gen_helper_neon_add_u8(CPU_T001); break;
3497 case 1: gen_helper_neon_add_u16(CPU_T001); break;
9ee6e8bb
PB
3498 case 2: gen_op_addl_T0_T1(); break;
3499 default: return 1;
3500 }
3501 return 0;
3502}
3503
ad69471c
PB
3504static inline void gen_neon_rsb(int size)
3505{
3506 switch (size) {
3507 case 0: gen_helper_neon_sub_u8(cpu_T[0], cpu_T[1], cpu_T[0]); break;
3508 case 1: gen_helper_neon_sub_u16(cpu_T[0], cpu_T[1], cpu_T[0]); break;
3509 case 2: gen_op_rsbl_T0_T1(); break;
3510 default: return;
3511 }
3512}
3513
3514/* 32-bit pairwise ops end up the same as the elementwise versions. */
3515#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3516#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3517#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3518#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3519
3520/* FIXME: This is wrong. They set the wrong overflow bit. */
3521#define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3522#define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3523#define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3524#define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3525
3526#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3527 switch ((size << 1) | u) { \
3528 case 0: \
3529 gen_helper_neon_##name##_s8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3530 break; \
3531 case 1: \
3532 gen_helper_neon_##name##_u8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3533 break; \
3534 case 2: \
3535 gen_helper_neon_##name##_s16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3536 break; \
3537 case 3: \
3538 gen_helper_neon_##name##_u16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3539 break; \
3540 case 4: \
3541 gen_helper_neon_##name##_s32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3542 break; \
3543 case 5: \
3544 gen_helper_neon_##name##_u32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3545 break; \
3546 default: return 1; \
3547 }} while (0)
9ee6e8bb
PB
3548
3549#define GEN_NEON_INTEGER_OP(name) do { \
3550 switch ((size << 1) | u) { \
ad69471c
PB
3551 case 0: \
3552 gen_helper_neon_##name##_s8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3553 break; \
3554 case 1: \
3555 gen_helper_neon_##name##_u8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3556 break; \
3557 case 2: \
3558 gen_helper_neon_##name##_s16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3559 break; \
3560 case 3: \
3561 gen_helper_neon_##name##_u16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3562 break; \
3563 case 4: \
3564 gen_helper_neon_##name##_s32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3565 break; \
3566 case 5: \
3567 gen_helper_neon_##name##_u32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3568 break; \
9ee6e8bb
PB
3569 default: return 1; \
3570 }} while (0)
3571
3572static inline void
3573gen_neon_movl_scratch_T0(int scratch)
3574{
3575 uint32_t offset;
3576
3577 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3578 tcg_gen_st_i32(cpu_T[0], cpu_env, offset);
9ee6e8bb
PB
3579}
3580
3581static inline void
3582gen_neon_movl_scratch_T1(int scratch)
3583{
3584 uint32_t offset;
3585
3586 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3587 tcg_gen_st_i32(cpu_T[1], cpu_env, offset);
9ee6e8bb
PB
3588}
3589
3590static inline void
3591gen_neon_movl_T0_scratch(int scratch)
3592{
3593 uint32_t offset;
3594
3595 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3596 tcg_gen_ld_i32(cpu_T[0], cpu_env, offset);
9ee6e8bb
PB
3597}
3598
3599static inline void
3600gen_neon_movl_T1_scratch(int scratch)
3601{
3602 uint32_t offset;
3603
3604 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3605 tcg_gen_ld_i32(cpu_T[1], cpu_env, offset);
9ee6e8bb
PB
3606}
3607
3608static inline void gen_neon_get_scalar(int size, int reg)
3609{
3610 if (size == 1) {
3611 NEON_GET_REG(T0, reg >> 1, reg & 1);
3612 } else {
3613 NEON_GET_REG(T0, reg >> 2, (reg >> 1) & 1);
3614 if (reg & 1)
ad69471c 3615 gen_neon_dup_low16(cpu_T[0]);
9ee6e8bb 3616 else
ad69471c 3617 gen_neon_dup_high16(cpu_T[0]);
9ee6e8bb
PB
3618 }
3619}
3620
3621static void gen_neon_unzip(int reg, int q, int tmp, int size)
3622{
3623 int n;
3624
3625 for (n = 0; n < q + 1; n += 2) {
3626 NEON_GET_REG(T0, reg, n);
3627 NEON_GET_REG(T0, reg, n + n);
3628 switch (size) {
ad69471c
PB
3629 case 0: gen_helper_neon_unzip_u8(); break;
3630 case 1: gen_helper_neon_zip_u16(); break; /* zip and unzip are the same. */
9ee6e8bb
PB
3631 case 2: /* no-op */; break;
3632 default: abort();
3633 }
3634 gen_neon_movl_scratch_T0(tmp + n);
3635 gen_neon_movl_scratch_T1(tmp + n + 1);
3636 }
3637}
3638
3639static struct {
3640 int nregs;
3641 int interleave;
3642 int spacing;
3643} neon_ls_element_type[11] = {
3644 {4, 4, 1},
3645 {4, 4, 2},
3646 {4, 1, 1},
3647 {4, 2, 1},
3648 {3, 3, 1},
3649 {3, 3, 2},
3650 {3, 1, 1},
3651 {1, 1, 1},
3652 {2, 2, 1},
3653 {2, 2, 2},
3654 {2, 1, 1}
3655};
3656
3657/* Translate a NEON load/store element instruction. Return nonzero if the
3658 instruction is invalid. */
3659static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3660{
3661 int rd, rn, rm;
3662 int op;
3663 int nregs;
3664 int interleave;
3665 int stride;
3666 int size;
3667 int reg;
3668 int pass;
3669 int load;
3670 int shift;
9ee6e8bb 3671 int n;
b0109805 3672 TCGv tmp;
8f8e3aa4 3673 TCGv tmp2;
9ee6e8bb
PB
3674
3675 if (!vfp_enabled(env))
3676 return 1;
3677 VFP_DREG_D(rd, insn);
3678 rn = (insn >> 16) & 0xf;
3679 rm = insn & 0xf;
3680 load = (insn & (1 << 21)) != 0;
3681 if ((insn & (1 << 23)) == 0) {
3682 /* Load store all elements. */
3683 op = (insn >> 8) & 0xf;
3684 size = (insn >> 6) & 3;
3685 if (op > 10 || size == 3)
3686 return 1;
3687 nregs = neon_ls_element_type[op].nregs;
3688 interleave = neon_ls_element_type[op].interleave;
3689 gen_movl_T1_reg(s, rn);
3690 stride = (1 << size) * interleave;
3691 for (reg = 0; reg < nregs; reg++) {
3692 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3693 gen_movl_T1_reg(s, rn);
3694 gen_op_addl_T1_im((1 << size) * reg);
3695 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3696 gen_movl_T1_reg(s, rn);
3697 gen_op_addl_T1_im(1 << size);
3698 }
3699 for (pass = 0; pass < 2; pass++) {
3700 if (size == 2) {
3701 if (load) {
b0109805 3702 tmp = gen_ld32(cpu_T[1], IS_USER(s));
ad69471c 3703 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3704 } else {
ad69471c 3705 tmp = neon_load_reg(rd, pass);
b0109805 3706 gen_st32(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3707 }
3708 gen_op_addl_T1_im(stride);
3709 } else if (size == 1) {
3710 if (load) {
b0109805 3711 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
9ee6e8bb 3712 gen_op_addl_T1_im(stride);
8f8e3aa4 3713 tmp2 = gen_ld16u(cpu_T[1], IS_USER(s));
9ee6e8bb 3714 gen_op_addl_T1_im(stride);
8f8e3aa4
PB
3715 gen_bfi(tmp, tmp, tmp2, 16, 0xffff);
3716 dead_tmp(tmp2);
3717 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3718 } else {
8f8e3aa4
PB
3719 tmp = neon_load_reg(rd, pass);
3720 tmp2 = new_tmp();
3721 tcg_gen_shri_i32(tmp2, tmp, 16);
b0109805 3722 gen_st16(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb 3723 gen_op_addl_T1_im(stride);
8f8e3aa4 3724 gen_st16(tmp2, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3725 gen_op_addl_T1_im(stride);
3726 }
3727 } else /* size == 0 */ {
3728 if (load) {
a50f5b91 3729 TCGV_UNUSED(tmp2);
9ee6e8bb 3730 for (n = 0; n < 4; n++) {
b0109805 3731 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3732 gen_op_addl_T1_im(stride);
3733 if (n == 0) {
8f8e3aa4 3734 tmp2 = tmp;
9ee6e8bb 3735 } else {
8f8e3aa4
PB
3736 gen_bfi(tmp2, tmp2, tmp, n * 8, 0xff);
3737 dead_tmp(tmp);
9ee6e8bb 3738 }
9ee6e8bb 3739 }
8f8e3aa4 3740 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 3741 } else {
8f8e3aa4 3742 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 3743 for (n = 0; n < 4; n++) {
8f8e3aa4 3744 tmp = new_tmp();
9ee6e8bb 3745 if (n == 0) {
8f8e3aa4 3746 tcg_gen_mov_i32(tmp, tmp2);
9ee6e8bb 3747 } else {
8f8e3aa4 3748 tcg_gen_shri_i32(tmp, tmp2, n * 8);
9ee6e8bb 3749 }
b0109805 3750 gen_st8(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb 3751 gen_op_addl_T1_im(stride);
9ee6e8bb 3752 }
8f8e3aa4 3753 dead_tmp(tmp2);
9ee6e8bb
PB
3754 }
3755 }
3756 }
3757 rd += neon_ls_element_type[op].spacing;
3758 }
3759 stride = nregs * 8;
3760 } else {
3761 size = (insn >> 10) & 3;
3762 if (size == 3) {
3763 /* Load single element to all lanes. */
3764 if (!load)
3765 return 1;
3766 size = (insn >> 6) & 3;
3767 nregs = ((insn >> 8) & 3) + 1;
3768 stride = (insn & (1 << 5)) ? 2 : 1;
ff8263a9 3769 gen_movl_T1_reg(s, rn);
9ee6e8bb
PB
3770 for (reg = 0; reg < nregs; reg++) {
3771 switch (size) {
3772 case 0:
b0109805 3773 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
ad69471c 3774 gen_neon_dup_u8(tmp, 0);
9ee6e8bb
PB
3775 break;
3776 case 1:
b0109805 3777 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
ad69471c 3778 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
3779 break;
3780 case 2:
b0109805 3781 tmp = gen_ld32(cpu_T[0], IS_USER(s));
9ee6e8bb
PB
3782 break;
3783 case 3:
3784 return 1;
a50f5b91
PB
3785 default: /* Avoid compiler warnings. */
3786 abort();
99c475ab 3787 }
9ee6e8bb 3788 gen_op_addl_T1_im(1 << size);
ad69471c
PB
3789 tmp2 = new_tmp();
3790 tcg_gen_mov_i32(tmp2, tmp);
3791 neon_store_reg(rd, 0, tmp2);
3018f259 3792 neon_store_reg(rd, 1, tmp);
9ee6e8bb
PB
3793 rd += stride;
3794 }
3795 stride = (1 << size) * nregs;
3796 } else {
3797 /* Single element. */
3798 pass = (insn >> 7) & 1;
3799 switch (size) {
3800 case 0:
3801 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
3802 stride = 1;
3803 break;
3804 case 1:
3805 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
3806 stride = (insn & (1 << 5)) ? 2 : 1;
3807 break;
3808 case 2:
3809 shift = 0;
9ee6e8bb
PB
3810 stride = (insn & (1 << 6)) ? 2 : 1;
3811 break;
3812 default:
3813 abort();
3814 }
3815 nregs = ((insn >> 8) & 3) + 1;
3816 gen_movl_T1_reg(s, rn);
3817 for (reg = 0; reg < nregs; reg++) {
3818 if (load) {
9ee6e8bb
PB
3819 switch (size) {
3820 case 0:
b0109805 3821 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3822 break;
3823 case 1:
b0109805 3824 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3825 break;
3826 case 2:
b0109805 3827 tmp = gen_ld32(cpu_T[1], IS_USER(s));
9ee6e8bb 3828 break;
a50f5b91
PB
3829 default: /* Avoid compiler warnings. */
3830 abort();
9ee6e8bb
PB
3831 }
3832 if (size != 2) {
8f8e3aa4
PB
3833 tmp2 = neon_load_reg(rd, pass);
3834 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3835 dead_tmp(tmp2);
9ee6e8bb 3836 }
8f8e3aa4 3837 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3838 } else { /* Store */
8f8e3aa4
PB
3839 tmp = neon_load_reg(rd, pass);
3840 if (shift)
3841 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
3842 switch (size) {
3843 case 0:
b0109805 3844 gen_st8(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3845 break;
3846 case 1:
b0109805 3847 gen_st16(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3848 break;
3849 case 2:
b0109805 3850 gen_st32(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb 3851 break;
99c475ab 3852 }
99c475ab 3853 }
9ee6e8bb
PB
3854 rd += stride;
3855 gen_op_addl_T1_im(1 << size);
99c475ab 3856 }
9ee6e8bb 3857 stride = nregs * (1 << size);
99c475ab 3858 }
9ee6e8bb
PB
3859 }
3860 if (rm != 15) {
b26eefb6
PB
3861 TCGv base;
3862
3863 base = load_reg(s, rn);
9ee6e8bb 3864 if (rm == 13) {
b26eefb6 3865 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 3866 } else {
b26eefb6
PB
3867 TCGv index;
3868 index = load_reg(s, rm);
3869 tcg_gen_add_i32(base, base, index);
3870 dead_tmp(index);
9ee6e8bb 3871 }
b26eefb6 3872 store_reg(s, rn, base);
9ee6e8bb
PB
3873 }
3874 return 0;
3875}
3b46e624 3876
8f8e3aa4
PB
3877/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
3878static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
3879{
3880 tcg_gen_and_i32(t, t, c);
3881 tcg_gen_bic_i32(f, f, c);
3882 tcg_gen_or_i32(dest, t, f);
3883}
3884
a7812ae4 3885static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
3886{
3887 switch (size) {
3888 case 0: gen_helper_neon_narrow_u8(dest, src); break;
3889 case 1: gen_helper_neon_narrow_u16(dest, src); break;
3890 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
3891 default: abort();
3892 }
3893}
3894
a7812ae4 3895static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
3896{
3897 switch (size) {
3898 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
3899 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
3900 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
3901 default: abort();
3902 }
3903}
3904
a7812ae4 3905static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
3906{
3907 switch (size) {
3908 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
3909 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
3910 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
3911 default: abort();
3912 }
3913}
3914
3915static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
3916 int q, int u)
3917{
3918 if (q) {
3919 if (u) {
3920 switch (size) {
3921 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3922 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3923 default: abort();
3924 }
3925 } else {
3926 switch (size) {
3927 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
3928 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
3929 default: abort();
3930 }
3931 }
3932 } else {
3933 if (u) {
3934 switch (size) {
3935 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3936 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3937 default: abort();
3938 }
3939 } else {
3940 switch (size) {
3941 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
3942 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
3943 default: abort();
3944 }
3945 }
3946 }
3947}
3948
a7812ae4 3949static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
3950{
3951 if (u) {
3952 switch (size) {
3953 case 0: gen_helper_neon_widen_u8(dest, src); break;
3954 case 1: gen_helper_neon_widen_u16(dest, src); break;
3955 case 2: tcg_gen_extu_i32_i64(dest, src); break;
3956 default: abort();
3957 }
3958 } else {
3959 switch (size) {
3960 case 0: gen_helper_neon_widen_s8(dest, src); break;
3961 case 1: gen_helper_neon_widen_s16(dest, src); break;
3962 case 2: tcg_gen_ext_i32_i64(dest, src); break;
3963 default: abort();
3964 }
3965 }
3966 dead_tmp(src);
3967}
3968
3969static inline void gen_neon_addl(int size)
3970{
3971 switch (size) {
3972 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
3973 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
3974 case 2: tcg_gen_add_i64(CPU_V001); break;
3975 default: abort();
3976 }
3977}
3978
3979static inline void gen_neon_subl(int size)
3980{
3981 switch (size) {
3982 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
3983 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
3984 case 2: tcg_gen_sub_i64(CPU_V001); break;
3985 default: abort();
3986 }
3987}
3988
a7812ae4 3989static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
3990{
3991 switch (size) {
3992 case 0: gen_helper_neon_negl_u16(var, var); break;
3993 case 1: gen_helper_neon_negl_u32(var, var); break;
3994 case 2: gen_helper_neon_negl_u64(var, var); break;
3995 default: abort();
3996 }
3997}
3998
a7812ae4 3999static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4000{
4001 switch (size) {
4002 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4003 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4004 default: abort();
4005 }
4006}
4007
a7812ae4 4008static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4009{
a7812ae4 4010 TCGv_i64 tmp;
ad69471c
PB
4011
4012 switch ((size << 1) | u) {
4013 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4014 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4015 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4016 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4017 case 4:
4018 tmp = gen_muls_i64_i32(a, b);
4019 tcg_gen_mov_i64(dest, tmp);
4020 break;
4021 case 5:
4022 tmp = gen_mulu_i64_i32(a, b);
4023 tcg_gen_mov_i64(dest, tmp);
4024 break;
4025 default: abort();
4026 }
4027 if (size < 2) {
4028 dead_tmp(b);
4029 dead_tmp(a);
4030 }
4031}
4032
9ee6e8bb
PB
4033/* Translate a NEON data processing instruction. Return nonzero if the
4034 instruction is invalid.
ad69471c
PB
4035 We process data in a mixture of 32-bit and 64-bit chunks.
4036 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4037
9ee6e8bb
PB
4038static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4039{
4040 int op;
4041 int q;
4042 int rd, rn, rm;
4043 int size;
4044 int shift;
4045 int pass;
4046 int count;
4047 int pairwise;
4048 int u;
4049 int n;
4050 uint32_t imm;
8f8e3aa4
PB
4051 TCGv tmp;
4052 TCGv tmp2;
4053 TCGv tmp3;
a7812ae4 4054 TCGv_i64 tmp64;
9ee6e8bb
PB
4055
4056 if (!vfp_enabled(env))
4057 return 1;
4058 q = (insn & (1 << 6)) != 0;
4059 u = (insn >> 24) & 1;
4060 VFP_DREG_D(rd, insn);
4061 VFP_DREG_N(rn, insn);
4062 VFP_DREG_M(rm, insn);
4063 size = (insn >> 20) & 3;
4064 if ((insn & (1 << 23)) == 0) {
4065 /* Three register same length. */
4066 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
ad69471c
PB
4067 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4068 || op == 10 || op == 11 || op == 16)) {
4069 /* 64-bit element instructions. */
9ee6e8bb 4070 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4071 neon_load_reg64(cpu_V0, rn + pass);
4072 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb
PB
4073 switch (op) {
4074 case 1: /* VQADD */
4075 if (u) {
ad69471c 4076 gen_helper_neon_add_saturate_u64(CPU_V001);
2c0262af 4077 } else {
ad69471c 4078 gen_helper_neon_add_saturate_s64(CPU_V001);
2c0262af 4079 }
9ee6e8bb
PB
4080 break;
4081 case 5: /* VQSUB */
4082 if (u) {
ad69471c
PB
4083 gen_helper_neon_sub_saturate_u64(CPU_V001);
4084 } else {
4085 gen_helper_neon_sub_saturate_s64(CPU_V001);
4086 }
4087 break;
4088 case 8: /* VSHL */
4089 if (u) {
4090 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4091 } else {
4092 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4093 }
4094 break;
4095 case 9: /* VQSHL */
4096 if (u) {
4097 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4098 cpu_V0, cpu_V0);
4099 } else {
4100 gen_helper_neon_qshl_s64(cpu_V1, cpu_env,
4101 cpu_V1, cpu_V0);
4102 }
4103 break;
4104 case 10: /* VRSHL */
4105 if (u) {
4106 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4107 } else {
ad69471c
PB
4108 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4109 }
4110 break;
4111 case 11: /* VQRSHL */
4112 if (u) {
4113 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4114 cpu_V1, cpu_V0);
4115 } else {
4116 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4117 cpu_V1, cpu_V0);
1e8d4eec 4118 }
9ee6e8bb
PB
4119 break;
4120 case 16:
4121 if (u) {
ad69471c 4122 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4123 } else {
ad69471c 4124 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4125 }
4126 break;
4127 default:
4128 abort();
2c0262af 4129 }
ad69471c 4130 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4131 }
9ee6e8bb 4132 return 0;
2c0262af 4133 }
9ee6e8bb
PB
4134 switch (op) {
4135 case 8: /* VSHL */
4136 case 9: /* VQSHL */
4137 case 10: /* VRSHL */
ad69471c 4138 case 11: /* VQRSHL */
9ee6e8bb 4139 {
ad69471c
PB
4140 int rtmp;
4141 /* Shift instruction operands are reversed. */
4142 rtmp = rn;
9ee6e8bb 4143 rn = rm;
ad69471c 4144 rm = rtmp;
9ee6e8bb
PB
4145 pairwise = 0;
4146 }
2c0262af 4147 break;
9ee6e8bb
PB
4148 case 20: /* VPMAX */
4149 case 21: /* VPMIN */
4150 case 23: /* VPADD */
4151 pairwise = 1;
2c0262af 4152 break;
9ee6e8bb
PB
4153 case 26: /* VPADD (float) */
4154 pairwise = (u && size < 2);
2c0262af 4155 break;
9ee6e8bb
PB
4156 case 30: /* VPMIN/VPMAX (float) */
4157 pairwise = u;
2c0262af 4158 break;
9ee6e8bb
PB
4159 default:
4160 pairwise = 0;
2c0262af 4161 break;
9ee6e8bb
PB
4162 }
4163 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4164
4165 if (pairwise) {
4166 /* Pairwise. */
4167 if (q)
4168 n = (pass & 1) * 2;
2c0262af 4169 else
9ee6e8bb
PB
4170 n = 0;
4171 if (pass < q + 1) {
4172 NEON_GET_REG(T0, rn, n);
4173 NEON_GET_REG(T1, rn, n + 1);
4174 } else {
4175 NEON_GET_REG(T0, rm, n);
4176 NEON_GET_REG(T1, rm, n + 1);
4177 }
4178 } else {
4179 /* Elementwise. */
4180 NEON_GET_REG(T0, rn, pass);
4181 NEON_GET_REG(T1, rm, pass);
4182 }
4183 switch (op) {
4184 case 0: /* VHADD */
4185 GEN_NEON_INTEGER_OP(hadd);
4186 break;
4187 case 1: /* VQADD */
ad69471c 4188 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4189 break;
9ee6e8bb
PB
4190 case 2: /* VRHADD */
4191 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4192 break;
9ee6e8bb
PB
4193 case 3: /* Logic ops. */
4194 switch ((u << 2) | size) {
4195 case 0: /* VAND */
2c0262af 4196 gen_op_andl_T0_T1();
9ee6e8bb
PB
4197 break;
4198 case 1: /* BIC */
4199 gen_op_bicl_T0_T1();
4200 break;
4201 case 2: /* VORR */
4202 gen_op_orl_T0_T1();
4203 break;
4204 case 3: /* VORN */
4205 gen_op_notl_T1();
4206 gen_op_orl_T0_T1();
4207 break;
4208 case 4: /* VEOR */
4209 gen_op_xorl_T0_T1();
4210 break;
4211 case 5: /* VBSL */
8f8e3aa4
PB
4212 tmp = neon_load_reg(rd, pass);
4213 gen_neon_bsl(cpu_T[0], cpu_T[0], cpu_T[1], tmp);
4214 dead_tmp(tmp);
9ee6e8bb
PB
4215 break;
4216 case 6: /* VBIT */
8f8e3aa4
PB
4217 tmp = neon_load_reg(rd, pass);
4218 gen_neon_bsl(cpu_T[0], cpu_T[0], tmp, cpu_T[1]);
4219 dead_tmp(tmp);
9ee6e8bb
PB
4220 break;
4221 case 7: /* VBIF */
8f8e3aa4
PB
4222 tmp = neon_load_reg(rd, pass);
4223 gen_neon_bsl(cpu_T[0], tmp, cpu_T[0], cpu_T[1]);
4224 dead_tmp(tmp);
9ee6e8bb 4225 break;
2c0262af
FB
4226 }
4227 break;
9ee6e8bb
PB
4228 case 4: /* VHSUB */
4229 GEN_NEON_INTEGER_OP(hsub);
4230 break;
4231 case 5: /* VQSUB */
ad69471c 4232 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4233 break;
9ee6e8bb
PB
4234 case 6: /* VCGT */
4235 GEN_NEON_INTEGER_OP(cgt);
4236 break;
4237 case 7: /* VCGE */
4238 GEN_NEON_INTEGER_OP(cge);
4239 break;
4240 case 8: /* VSHL */
ad69471c 4241 GEN_NEON_INTEGER_OP(shl);
2c0262af 4242 break;
9ee6e8bb 4243 case 9: /* VQSHL */
ad69471c 4244 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4245 break;
9ee6e8bb 4246 case 10: /* VRSHL */
ad69471c 4247 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4248 break;
9ee6e8bb 4249 case 11: /* VQRSHL */
ad69471c 4250 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb
PB
4251 break;
4252 case 12: /* VMAX */
4253 GEN_NEON_INTEGER_OP(max);
4254 break;
4255 case 13: /* VMIN */
4256 GEN_NEON_INTEGER_OP(min);
4257 break;
4258 case 14: /* VABD */
4259 GEN_NEON_INTEGER_OP(abd);
4260 break;
4261 case 15: /* VABA */
4262 GEN_NEON_INTEGER_OP(abd);
4263 NEON_GET_REG(T1, rd, pass);
4264 gen_neon_add(size);
4265 break;
4266 case 16:
4267 if (!u) { /* VADD */
4268 if (gen_neon_add(size))
4269 return 1;
4270 } else { /* VSUB */
4271 switch (size) {
ad69471c
PB
4272 case 0: gen_helper_neon_sub_u8(CPU_T001); break;
4273 case 1: gen_helper_neon_sub_u16(CPU_T001); break;
9ee6e8bb
PB
4274 case 2: gen_op_subl_T0_T1(); break;
4275 default: return 1;
4276 }
4277 }
4278 break;
4279 case 17:
4280 if (!u) { /* VTST */
4281 switch (size) {
ad69471c
PB
4282 case 0: gen_helper_neon_tst_u8(CPU_T001); break;
4283 case 1: gen_helper_neon_tst_u16(CPU_T001); break;
4284 case 2: gen_helper_neon_tst_u32(CPU_T001); break;
9ee6e8bb
PB
4285 default: return 1;
4286 }
4287 } else { /* VCEQ */
4288 switch (size) {
ad69471c
PB
4289 case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
4290 case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
4291 case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
9ee6e8bb
PB
4292 default: return 1;
4293 }
4294 }
4295 break;
4296 case 18: /* Multiply. */
4297 switch (size) {
ad69471c
PB
4298 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
4299 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
9ee6e8bb
PB
4300 case 2: gen_op_mul_T0_T1(); break;
4301 default: return 1;
4302 }
4303 NEON_GET_REG(T1, rd, pass);
4304 if (u) { /* VMLS */
ad69471c 4305 gen_neon_rsb(size);
9ee6e8bb
PB
4306 } else { /* VMLA */
4307 gen_neon_add(size);
4308 }
4309 break;
4310 case 19: /* VMUL */
4311 if (u) { /* polynomial */
ad69471c 4312 gen_helper_neon_mul_p8(CPU_T001);
9ee6e8bb
PB
4313 } else { /* Integer */
4314 switch (size) {
ad69471c
PB
4315 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
4316 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
9ee6e8bb
PB
4317 case 2: gen_op_mul_T0_T1(); break;
4318 default: return 1;
4319 }
4320 }
4321 break;
4322 case 20: /* VPMAX */
4323 GEN_NEON_INTEGER_OP(pmax);
4324 break;
4325 case 21: /* VPMIN */
4326 GEN_NEON_INTEGER_OP(pmin);
4327 break;
4328 case 22: /* Hultiply high. */
4329 if (!u) { /* VQDMULH */
4330 switch (size) {
ad69471c
PB
4331 case 1: gen_helper_neon_qdmulh_s16(CPU_T0E01); break;
4332 case 2: gen_helper_neon_qdmulh_s32(CPU_T0E01); break;
9ee6e8bb
PB
4333 default: return 1;
4334 }
4335 } else { /* VQRDHMUL */
4336 switch (size) {
ad69471c
PB
4337 case 1: gen_helper_neon_qrdmulh_s16(CPU_T0E01); break;
4338 case 2: gen_helper_neon_qrdmulh_s32(CPU_T0E01); break;
9ee6e8bb
PB
4339 default: return 1;
4340 }
4341 }
4342 break;
4343 case 23: /* VPADD */
4344 if (u)
4345 return 1;
4346 switch (size) {
ad69471c
PB
4347 case 0: gen_helper_neon_padd_u8(CPU_T001); break;
4348 case 1: gen_helper_neon_padd_u16(CPU_T001); break;
9ee6e8bb
PB
4349 case 2: gen_op_addl_T0_T1(); break;
4350 default: return 1;
4351 }
4352 break;
4353 case 26: /* Floating point arithnetic. */
4354 switch ((u << 2) | size) {
4355 case 0: /* VADD */
ad69471c 4356 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb
PB
4357 break;
4358 case 2: /* VSUB */
ad69471c 4359 gen_helper_neon_sub_f32(CPU_T001);
9ee6e8bb
PB
4360 break;
4361 case 4: /* VPADD */
ad69471c 4362 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb
PB
4363 break;
4364 case 6: /* VABD */
ad69471c 4365 gen_helper_neon_abd_f32(CPU_T001);
9ee6e8bb
PB
4366 break;
4367 default:
4368 return 1;
4369 }
4370 break;
4371 case 27: /* Float multiply. */
ad69471c 4372 gen_helper_neon_mul_f32(CPU_T001);
9ee6e8bb
PB
4373 if (!u) {
4374 NEON_GET_REG(T1, rd, pass);
4375 if (size == 0) {
ad69471c 4376 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb 4377 } else {
ad69471c 4378 gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
4379 }
4380 }
4381 break;
4382 case 28: /* Float compare. */
4383 if (!u) {
ad69471c 4384 gen_helper_neon_ceq_f32(CPU_T001);
b5ff1b31 4385 } else {
9ee6e8bb 4386 if (size == 0)
ad69471c 4387 gen_helper_neon_cge_f32(CPU_T001);
9ee6e8bb 4388 else
ad69471c 4389 gen_helper_neon_cgt_f32(CPU_T001);
b5ff1b31 4390 }
2c0262af 4391 break;
9ee6e8bb
PB
4392 case 29: /* Float compare absolute. */
4393 if (!u)
4394 return 1;
4395 if (size == 0)
ad69471c 4396 gen_helper_neon_acge_f32(CPU_T001);
9ee6e8bb 4397 else
ad69471c 4398 gen_helper_neon_acgt_f32(CPU_T001);
2c0262af 4399 break;
9ee6e8bb
PB
4400 case 30: /* Float min/max. */
4401 if (size == 0)
ad69471c 4402 gen_helper_neon_max_f32(CPU_T001);
9ee6e8bb 4403 else
ad69471c 4404 gen_helper_neon_min_f32(CPU_T001);
9ee6e8bb
PB
4405 break;
4406 case 31:
4407 if (size == 0)
4373f3ce 4408 gen_helper_recps_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
9ee6e8bb 4409 else
4373f3ce 4410 gen_helper_rsqrts_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
2c0262af 4411 break;
9ee6e8bb
PB
4412 default:
4413 abort();
2c0262af 4414 }
9ee6e8bb
PB
4415 /* Save the result. For elementwise operations we can put it
4416 straight into the destination register. For pairwise operations
4417 we have to be careful to avoid clobbering the source operands. */
4418 if (pairwise && rd == rm) {
4419 gen_neon_movl_scratch_T0(pass);
4420 } else {
4421 NEON_SET_REG(T0, rd, pass);
4422 }
4423
4424 } /* for pass */
4425 if (pairwise && rd == rm) {
4426 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4427 gen_neon_movl_T0_scratch(pass);
4428 NEON_SET_REG(T0, rd, pass);
4429 }
4430 }
ad69471c 4431 /* End of 3 register same size operations. */
9ee6e8bb
PB
4432 } else if (insn & (1 << 4)) {
4433 if ((insn & 0x00380080) != 0) {
4434 /* Two registers and shift. */
4435 op = (insn >> 8) & 0xf;
4436 if (insn & (1 << 7)) {
4437 /* 64-bit shift. */
4438 size = 3;
4439 } else {
4440 size = 2;
4441 while ((insn & (1 << (size + 19))) == 0)
4442 size--;
4443 }
4444 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4445 /* To avoid excessive dumplication of ops we implement shift
4446 by immediate using the variable shift operations. */
4447 if (op < 8) {
4448 /* Shift by immediate:
4449 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4450 /* Right shifts are encoded as N - shift, where N is the
4451 element size in bits. */
4452 if (op <= 4)
4453 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4454 if (size == 3) {
4455 count = q + 1;
4456 } else {
4457 count = q ? 4: 2;
4458 }
4459 switch (size) {
4460 case 0:
4461 imm = (uint8_t) shift;
4462 imm |= imm << 8;
4463 imm |= imm << 16;
4464 break;
4465 case 1:
4466 imm = (uint16_t) shift;
4467 imm |= imm << 16;
4468 break;
4469 case 2:
4470 case 3:
4471 imm = shift;
4472 break;
4473 default:
4474 abort();
4475 }
4476
4477 for (pass = 0; pass < count; pass++) {
ad69471c
PB
4478 if (size == 3) {
4479 neon_load_reg64(cpu_V0, rm + pass);
4480 tcg_gen_movi_i64(cpu_V1, imm);
4481 switch (op) {
4482 case 0: /* VSHR */
4483 case 1: /* VSRA */
4484 if (u)
4485 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4486 else
ad69471c 4487 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4488 break;
ad69471c
PB
4489 case 2: /* VRSHR */
4490 case 3: /* VRSRA */
4491 if (u)
4492 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4493 else
ad69471c 4494 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4495 break;
ad69471c
PB
4496 case 4: /* VSRI */
4497 if (!u)
4498 return 1;
4499 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4500 break;
4501 case 5: /* VSHL, VSLI */
4502 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4503 break;
4504 case 6: /* VQSHL */
4505 if (u)
4506 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4507 else
ad69471c
PB
4508 gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4509 break;
4510 case 7: /* VQSHLU */
4511 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4512 break;
9ee6e8bb 4513 }
ad69471c
PB
4514 if (op == 1 || op == 3) {
4515 /* Accumulate. */
4516 neon_load_reg64(cpu_V0, rd + pass);
4517 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4518 } else if (op == 4 || (op == 5 && u)) {
4519 /* Insert */
4520 cpu_abort(env, "VS[LR]I.64 not implemented");
4521 }
4522 neon_store_reg64(cpu_V0, rd + pass);
4523 } else { /* size < 3 */
4524 /* Operands in T0 and T1. */
4525 gen_op_movl_T1_im(imm);
4526 NEON_GET_REG(T0, rm, pass);
4527 switch (op) {
4528 case 0: /* VSHR */
4529 case 1: /* VSRA */
4530 GEN_NEON_INTEGER_OP(shl);
4531 break;
4532 case 2: /* VRSHR */
4533 case 3: /* VRSRA */
4534 GEN_NEON_INTEGER_OP(rshl);
4535 break;
4536 case 4: /* VSRI */
4537 if (!u)
4538 return 1;
4539 GEN_NEON_INTEGER_OP(shl);
4540 break;
4541 case 5: /* VSHL, VSLI */
4542 switch (size) {
4543 case 0: gen_helper_neon_shl_u8(CPU_T001); break;
4544 case 1: gen_helper_neon_shl_u16(CPU_T001); break;
4545 case 2: gen_helper_neon_shl_u32(CPU_T001); break;
4546 default: return 1;
4547 }
4548 break;
4549 case 6: /* VQSHL */
4550 GEN_NEON_INTEGER_OP_ENV(qshl);
4551 break;
4552 case 7: /* VQSHLU */
4553 switch (size) {
4554 case 0: gen_helper_neon_qshl_u8(CPU_T0E01); break;
4555 case 1: gen_helper_neon_qshl_u16(CPU_T0E01); break;
4556 case 2: gen_helper_neon_qshl_u32(CPU_T0E01); break;
4557 default: return 1;
4558 }
4559 break;
4560 }
4561
4562 if (op == 1 || op == 3) {
4563 /* Accumulate. */
4564 NEON_GET_REG(T1, rd, pass);
4565 gen_neon_add(size);
4566 } else if (op == 4 || (op == 5 && u)) {
4567 /* Insert */
4568 switch (size) {
4569 case 0:
4570 if (op == 4)
4571 imm = 0xff >> -shift;
4572 else
4573 imm = (uint8_t)(0xff << shift);
4574 imm |= imm << 8;
4575 imm |= imm << 16;
4576 break;
4577 case 1:
4578 if (op == 4)
4579 imm = 0xffff >> -shift;
4580 else
4581 imm = (uint16_t)(0xffff << shift);
4582 imm |= imm << 16;
4583 break;
4584 case 2:
4585 if (op == 4)
4586 imm = 0xffffffffu >> -shift;
4587 else
4588 imm = 0xffffffffu << shift;
4589 break;
4590 default:
4591 abort();
4592 }
4593 tmp = neon_load_reg(rd, pass);
4594 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], imm);
4595 tcg_gen_andi_i32(tmp, tmp, ~imm);
4596 tcg_gen_or_i32(cpu_T[0], cpu_T[0], tmp);
4597 }
9ee6e8bb
PB
4598 NEON_SET_REG(T0, rd, pass);
4599 }
4600 } /* for pass */
4601 } else if (op < 10) {
ad69471c 4602 /* Shift by immediate and narrow:
9ee6e8bb
PB
4603 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4604 shift = shift - (1 << (size + 3));
4605 size++;
9ee6e8bb
PB
4606 switch (size) {
4607 case 1:
ad69471c 4608 imm = (uint16_t)shift;
9ee6e8bb 4609 imm |= imm << 16;
ad69471c 4610 tmp2 = tcg_const_i32(imm);
a7812ae4 4611 TCGV_UNUSED_I64(tmp64);
9ee6e8bb
PB
4612 break;
4613 case 2:
ad69471c
PB
4614 imm = (uint32_t)shift;
4615 tmp2 = tcg_const_i32(imm);
a7812ae4 4616 TCGV_UNUSED_I64(tmp64);
4cc633c3 4617 break;
9ee6e8bb 4618 case 3:
a7812ae4
PB
4619 tmp64 = tcg_const_i64(shift);
4620 TCGV_UNUSED(tmp2);
9ee6e8bb
PB
4621 break;
4622 default:
4623 abort();
4624 }
4625
ad69471c
PB
4626 for (pass = 0; pass < 2; pass++) {
4627 if (size == 3) {
4628 neon_load_reg64(cpu_V0, rm + pass);
4629 if (q) {
4630 if (u)
a7812ae4 4631 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4632 else
a7812ae4 4633 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c
PB
4634 } else {
4635 if (u)
a7812ae4 4636 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4637 else
a7812ae4 4638 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c 4639 }
2c0262af 4640 } else {
ad69471c
PB
4641 tmp = neon_load_reg(rm + pass, 0);
4642 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
36aa55dc
PB
4643 tmp3 = neon_load_reg(rm + pass, 1);
4644 gen_neon_shift_narrow(size, tmp3, tmp2, q, u);
4645 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
ad69471c 4646 dead_tmp(tmp);
36aa55dc 4647 dead_tmp(tmp3);
9ee6e8bb 4648 }
ad69471c
PB
4649 tmp = new_tmp();
4650 if (op == 8 && !u) {
4651 gen_neon_narrow(size - 1, tmp, cpu_V0);
9ee6e8bb 4652 } else {
ad69471c
PB
4653 if (op == 8)
4654 gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
9ee6e8bb 4655 else
ad69471c
PB
4656 gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
4657 }
4658 if (pass == 0) {
4659 tmp2 = tmp;
4660 } else {
4661 neon_store_reg(rd, 0, tmp2);
4662 neon_store_reg(rd, 1, tmp);
9ee6e8bb
PB
4663 }
4664 } /* for pass */
4665 } else if (op == 10) {
4666 /* VSHLL */
ad69471c 4667 if (q || size == 3)
9ee6e8bb 4668 return 1;
ad69471c
PB
4669 tmp = neon_load_reg(rm, 0);
4670 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 4671 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4672 if (pass == 1)
4673 tmp = tmp2;
4674
4675 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 4676
9ee6e8bb
PB
4677 if (shift != 0) {
4678 /* The shift is less than the width of the source
ad69471c
PB
4679 type, so we can just shift the whole register. */
4680 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4681 if (size < 2 || !u) {
4682 uint64_t imm64;
4683 if (size == 0) {
4684 imm = (0xffu >> (8 - shift));
4685 imm |= imm << 16;
4686 } else {
4687 imm = 0xffff >> (16 - shift);
9ee6e8bb 4688 }
ad69471c
PB
4689 imm64 = imm | (((uint64_t)imm) << 32);
4690 tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
9ee6e8bb
PB
4691 }
4692 }
ad69471c 4693 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4694 }
4695 } else if (op == 15 || op == 16) {
4696 /* VCVT fixed-point. */
4697 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 4698 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
9ee6e8bb
PB
4699 if (op & 1) {
4700 if (u)
4373f3ce 4701 gen_vfp_ulto(0, shift);
9ee6e8bb 4702 else
4373f3ce 4703 gen_vfp_slto(0, shift);
9ee6e8bb
PB
4704 } else {
4705 if (u)
4373f3ce 4706 gen_vfp_toul(0, shift);
9ee6e8bb 4707 else
4373f3ce 4708 gen_vfp_tosl(0, shift);
2c0262af 4709 }
4373f3ce 4710 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
4711 }
4712 } else {
9ee6e8bb
PB
4713 return 1;
4714 }
4715 } else { /* (insn & 0x00380080) == 0 */
4716 int invert;
4717
4718 op = (insn >> 8) & 0xf;
4719 /* One register and immediate. */
4720 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4721 invert = (insn & (1 << 5)) != 0;
4722 switch (op) {
4723 case 0: case 1:
4724 /* no-op */
4725 break;
4726 case 2: case 3:
4727 imm <<= 8;
4728 break;
4729 case 4: case 5:
4730 imm <<= 16;
4731 break;
4732 case 6: case 7:
4733 imm <<= 24;
4734 break;
4735 case 8: case 9:
4736 imm |= imm << 16;
4737 break;
4738 case 10: case 11:
4739 imm = (imm << 8) | (imm << 24);
4740 break;
4741 case 12:
4742 imm = (imm < 8) | 0xff;
4743 break;
4744 case 13:
4745 imm = (imm << 16) | 0xffff;
4746 break;
4747 case 14:
4748 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4749 if (invert)
4750 imm = ~imm;
4751 break;
4752 case 15:
4753 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4754 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4755 break;
4756 }
4757 if (invert)
4758 imm = ~imm;
4759
4760 if (op != 14 || !invert)
4761 gen_op_movl_T1_im(imm);
4762
4763 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4764 if (op & 1 && op < 12) {
ad69471c 4765 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
4766 if (invert) {
4767 /* The immediate value has already been inverted, so
4768 BIC becomes AND. */
ad69471c 4769 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 4770 } else {
ad69471c 4771 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 4772 }
9ee6e8bb 4773 } else {
ad69471c
PB
4774 /* VMOV, VMVN. */
4775 tmp = new_tmp();
9ee6e8bb 4776 if (op == 14 && invert) {
ad69471c
PB
4777 uint32_t val;
4778 val = 0;
9ee6e8bb
PB
4779 for (n = 0; n < 4; n++) {
4780 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 4781 val |= 0xff << (n * 8);
9ee6e8bb 4782 }
ad69471c
PB
4783 tcg_gen_movi_i32(tmp, val);
4784 } else {
4785 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 4786 }
9ee6e8bb 4787 }
ad69471c 4788 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4789 }
4790 }
e4b3861d 4791 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
4792 if (size != 3) {
4793 op = (insn >> 8) & 0xf;
4794 if ((insn & (1 << 6)) == 0) {
4795 /* Three registers of different lengths. */
4796 int src1_wide;
4797 int src2_wide;
4798 int prewiden;
4799 /* prewiden, src1_wide, src2_wide */
4800 static const int neon_3reg_wide[16][3] = {
4801 {1, 0, 0}, /* VADDL */
4802 {1, 1, 0}, /* VADDW */
4803 {1, 0, 0}, /* VSUBL */
4804 {1, 1, 0}, /* VSUBW */
4805 {0, 1, 1}, /* VADDHN */
4806 {0, 0, 0}, /* VABAL */
4807 {0, 1, 1}, /* VSUBHN */
4808 {0, 0, 0}, /* VABDL */
4809 {0, 0, 0}, /* VMLAL */
4810 {0, 0, 0}, /* VQDMLAL */
4811 {0, 0, 0}, /* VMLSL */
4812 {0, 0, 0}, /* VQDMLSL */
4813 {0, 0, 0}, /* Integer VMULL */
4814 {0, 0, 0}, /* VQDMULL */
4815 {0, 0, 0} /* Polynomial VMULL */
4816 };
4817
4818 prewiden = neon_3reg_wide[op][0];
4819 src1_wide = neon_3reg_wide[op][1];
4820 src2_wide = neon_3reg_wide[op][2];
4821
ad69471c
PB
4822 if (size == 0 && (op == 9 || op == 11 || op == 13))
4823 return 1;
4824
9ee6e8bb
PB
4825 /* Avoid overlapping operands. Wide source operands are
4826 always aligned so will never overlap with wide
4827 destinations in problematic ways. */
8f8e3aa4
PB
4828 if (rd == rm && !src2_wide) {
4829 NEON_GET_REG(T0, rm, 1);
4830 gen_neon_movl_scratch_T0(2);
4831 } else if (rd == rn && !src1_wide) {
4832 NEON_GET_REG(T0, rn, 1);
4833 gen_neon_movl_scratch_T0(2);
9ee6e8bb 4834 }
a50f5b91 4835 TCGV_UNUSED(tmp3);
9ee6e8bb 4836 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4837 if (src1_wide) {
4838 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 4839 TCGV_UNUSED(tmp);
9ee6e8bb 4840 } else {
ad69471c
PB
4841 if (pass == 1 && rd == rn) {
4842 gen_neon_movl_T0_scratch(2);
4843 tmp = new_tmp();
4844 tcg_gen_mov_i32(tmp, cpu_T[0]);
9ee6e8bb 4845 } else {
ad69471c
PB
4846 tmp = neon_load_reg(rn, pass);
4847 }
4848 if (prewiden) {
4849 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
4850 }
4851 }
ad69471c
PB
4852 if (src2_wide) {
4853 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 4854 TCGV_UNUSED(tmp2);
9ee6e8bb 4855 } else {
ad69471c 4856 if (pass == 1 && rd == rm) {
8f8e3aa4 4857 gen_neon_movl_T0_scratch(2);
ad69471c
PB
4858 tmp2 = new_tmp();
4859 tcg_gen_mov_i32(tmp2, cpu_T[0]);
9ee6e8bb 4860 } else {
ad69471c
PB
4861 tmp2 = neon_load_reg(rm, pass);
4862 }
4863 if (prewiden) {
4864 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 4865 }
9ee6e8bb
PB
4866 }
4867 switch (op) {
4868 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 4869 gen_neon_addl(size);
9ee6e8bb
PB
4870 break;
4871 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
ad69471c 4872 gen_neon_subl(size);
9ee6e8bb
PB
4873 break;
4874 case 5: case 7: /* VABAL, VABDL */
4875 switch ((size << 1) | u) {
ad69471c
PB
4876 case 0:
4877 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
4878 break;
4879 case 1:
4880 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
4881 break;
4882 case 2:
4883 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
4884 break;
4885 case 3:
4886 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
4887 break;
4888 case 4:
4889 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
4890 break;
4891 case 5:
4892 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
4893 break;
9ee6e8bb
PB
4894 default: abort();
4895 }
ad69471c
PB
4896 dead_tmp(tmp2);
4897 dead_tmp(tmp);
9ee6e8bb
PB
4898 break;
4899 case 8: case 9: case 10: case 11: case 12: case 13:
4900 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 4901 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
4902 break;
4903 case 14: /* Polynomial VMULL */
4904 cpu_abort(env, "Polynomial VMULL not implemented");
4905
4906 default: /* 15 is RESERVED. */
4907 return 1;
4908 }
4909 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
4910 /* Accumulate. */
4911 if (op == 10 || op == 11) {
ad69471c 4912 gen_neon_negl(cpu_V0, size);
9ee6e8bb
PB
4913 }
4914
9ee6e8bb 4915 if (op != 13) {
ad69471c 4916 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb
PB
4917 }
4918
4919 switch (op) {
4920 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
ad69471c 4921 gen_neon_addl(size);
9ee6e8bb
PB
4922 break;
4923 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c
PB
4924 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4925 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
4926 break;
9ee6e8bb
PB
4927 /* Fall through. */
4928 case 13: /* VQDMULL */
ad69471c 4929 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
4930 break;
4931 default:
4932 abort();
4933 }
ad69471c 4934 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4935 } else if (op == 4 || op == 6) {
4936 /* Narrowing operation. */
ad69471c 4937 tmp = new_tmp();
9ee6e8bb
PB
4938 if (u) {
4939 switch (size) {
ad69471c
PB
4940 case 0:
4941 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
4942 break;
4943 case 1:
4944 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
4945 break;
4946 case 2:
4947 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4948 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
4949 break;
9ee6e8bb
PB
4950 default: abort();
4951 }
4952 } else {
4953 switch (size) {
ad69471c
PB
4954 case 0:
4955 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
4956 break;
4957 case 1:
4958 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
4959 break;
4960 case 2:
4961 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
4962 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4963 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
4964 break;
9ee6e8bb
PB
4965 default: abort();
4966 }
4967 }
ad69471c
PB
4968 if (pass == 0) {
4969 tmp3 = tmp;
4970 } else {
4971 neon_store_reg(rd, 0, tmp3);
4972 neon_store_reg(rd, 1, tmp);
4973 }
9ee6e8bb
PB
4974 } else {
4975 /* Write back the result. */
ad69471c 4976 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4977 }
4978 }
4979 } else {
4980 /* Two registers and a scalar. */
4981 switch (op) {
4982 case 0: /* Integer VMLA scalar */
4983 case 1: /* Float VMLA scalar */
4984 case 4: /* Integer VMLS scalar */
4985 case 5: /* Floating point VMLS scalar */
4986 case 8: /* Integer VMUL scalar */
4987 case 9: /* Floating point VMUL scalar */
4988 case 12: /* VQDMULH scalar */
4989 case 13: /* VQRDMULH scalar */
4990 gen_neon_get_scalar(size, rm);
8f8e3aa4 4991 gen_neon_movl_scratch_T0(0);
9ee6e8bb
PB
4992 for (pass = 0; pass < (u ? 4 : 2); pass++) {
4993 if (pass != 0)
8f8e3aa4 4994 gen_neon_movl_T0_scratch(0);
9ee6e8bb
PB
4995 NEON_GET_REG(T1, rn, pass);
4996 if (op == 12) {
4997 if (size == 1) {
ad69471c 4998 gen_helper_neon_qdmulh_s16(CPU_T0E01);
9ee6e8bb 4999 } else {
ad69471c 5000 gen_helper_neon_qdmulh_s32(CPU_T0E01);
9ee6e8bb
PB
5001 }
5002 } else if (op == 13) {
5003 if (size == 1) {
ad69471c 5004 gen_helper_neon_qrdmulh_s16(CPU_T0E01);
9ee6e8bb 5005 } else {
ad69471c 5006 gen_helper_neon_qrdmulh_s32(CPU_T0E01);
9ee6e8bb
PB
5007 }
5008 } else if (op & 1) {
ad69471c 5009 gen_helper_neon_mul_f32(CPU_T001);
9ee6e8bb
PB
5010 } else {
5011 switch (size) {
ad69471c
PB
5012 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
5013 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
9ee6e8bb
PB
5014 case 2: gen_op_mul_T0_T1(); break;
5015 default: return 1;
5016 }
5017 }
5018 if (op < 8) {
5019 /* Accumulate. */
5020 NEON_GET_REG(T1, rd, pass);
5021 switch (op) {
5022 case 0:
5023 gen_neon_add(size);
5024 break;
5025 case 1:
ad69471c 5026 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb
PB
5027 break;
5028 case 4:
ad69471c 5029 gen_neon_rsb(size);
9ee6e8bb
PB
5030 break;
5031 case 5:
ad69471c 5032 gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
5033 break;
5034 default:
5035 abort();
5036 }
5037 }
5038 NEON_SET_REG(T0, rd, pass);
5039 }
5040 break;
5041 case 2: /* VMLAL sclar */
5042 case 3: /* VQDMLAL scalar */
5043 case 6: /* VMLSL scalar */
5044 case 7: /* VQDMLSL scalar */
5045 case 10: /* VMULL scalar */
5046 case 11: /* VQDMULL scalar */
ad69471c
PB
5047 if (size == 0 && (op == 3 || op == 7 || op == 11))
5048 return 1;
5049
9ee6e8bb 5050 gen_neon_get_scalar(size, rm);
ad69471c
PB
5051 NEON_GET_REG(T1, rn, 1);
5052
9ee6e8bb 5053 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5054 if (pass == 0) {
5055 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5056 } else {
ad69471c
PB
5057 tmp = new_tmp();
5058 tcg_gen_mov_i32(tmp, cpu_T[1]);
9ee6e8bb 5059 }
ad69471c
PB
5060 tmp2 = new_tmp();
5061 tcg_gen_mov_i32(tmp2, cpu_T[0]);
5062 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb 5063 if (op == 6 || op == 7) {
ad69471c
PB
5064 gen_neon_negl(cpu_V0, size);
5065 }
5066 if (op != 11) {
5067 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5068 }
9ee6e8bb
PB
5069 switch (op) {
5070 case 2: case 6:
ad69471c 5071 gen_neon_addl(size);
9ee6e8bb
PB
5072 break;
5073 case 3: case 7:
ad69471c
PB
5074 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5075 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5076 break;
5077 case 10:
5078 /* no-op */
5079 break;
5080 case 11:
ad69471c 5081 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5082 break;
5083 default:
5084 abort();
5085 }
ad69471c 5086 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5087 }
5088 break;
5089 default: /* 14 and 15 are RESERVED */
5090 return 1;
5091 }
5092 }
5093 } else { /* size == 3 */
5094 if (!u) {
5095 /* Extract. */
9ee6e8bb 5096 imm = (insn >> 8) & 0xf;
ad69471c
PB
5097 count = q + 1;
5098
5099 if (imm > 7 && !q)
5100 return 1;
5101
5102 if (imm == 0) {
5103 neon_load_reg64(cpu_V0, rn);
5104 if (q) {
5105 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5106 }
ad69471c
PB
5107 } else if (imm == 8) {
5108 neon_load_reg64(cpu_V0, rn + 1);
5109 if (q) {
5110 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5111 }
ad69471c 5112 } else if (q) {
a7812ae4 5113 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5114 if (imm < 8) {
5115 neon_load_reg64(cpu_V0, rn);
a7812ae4 5116 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5117 } else {
5118 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5119 neon_load_reg64(tmp64, rm);
ad69471c
PB
5120 }
5121 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5122 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5123 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5124 if (imm < 8) {
5125 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5126 } else {
ad69471c
PB
5127 neon_load_reg64(cpu_V1, rm + 1);
5128 imm -= 8;
9ee6e8bb 5129 }
ad69471c 5130 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5131 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5132 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
ad69471c 5133 } else {
a7812ae4 5134 /* BUGFIX */
ad69471c 5135 neon_load_reg64(cpu_V0, rn);
a7812ae4 5136 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5137 neon_load_reg64(cpu_V1, rm);
a7812ae4 5138 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5139 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5140 }
5141 neon_store_reg64(cpu_V0, rd);
5142 if (q) {
5143 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5144 }
5145 } else if ((insn & (1 << 11)) == 0) {
5146 /* Two register misc. */
5147 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5148 size = (insn >> 18) & 3;
5149 switch (op) {
5150 case 0: /* VREV64 */
5151 if (size == 3)
5152 return 1;
5153 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5154 NEON_GET_REG(T0, rm, pass * 2);
5155 NEON_GET_REG(T1, rm, pass * 2 + 1);
5156 switch (size) {
66896cb8 5157 case 0: tcg_gen_bswap32_i32(cpu_T[0], cpu_T[0]); break;
8f01245e 5158 case 1: gen_swap_half(cpu_T[0]); break;
9ee6e8bb
PB
5159 case 2: /* no-op */ break;
5160 default: abort();
5161 }
5162 NEON_SET_REG(T0, rd, pass * 2 + 1);
5163 if (size == 2) {
5164 NEON_SET_REG(T1, rd, pass * 2);
5165 } else {
5166 gen_op_movl_T0_T1();
5167 switch (size) {
66896cb8 5168 case 0: tcg_gen_bswap32_i32(cpu_T[0], cpu_T[0]); break;
8f01245e 5169 case 1: gen_swap_half(cpu_T[0]); break;
9ee6e8bb
PB
5170 default: abort();
5171 }
5172 NEON_SET_REG(T0, rd, pass * 2);
5173 }
5174 }
5175 break;
5176 case 4: case 5: /* VPADDL */
5177 case 12: case 13: /* VPADAL */
9ee6e8bb
PB
5178 if (size == 3)
5179 return 1;
ad69471c
PB
5180 for (pass = 0; pass < q + 1; pass++) {
5181 tmp = neon_load_reg(rm, pass * 2);
5182 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5183 tmp = neon_load_reg(rm, pass * 2 + 1);
5184 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5185 switch (size) {
5186 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5187 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5188 case 2: tcg_gen_add_i64(CPU_V001); break;
5189 default: abort();
5190 }
9ee6e8bb
PB
5191 if (op >= 12) {
5192 /* Accumulate. */
ad69471c
PB
5193 neon_load_reg64(cpu_V1, rd + pass);
5194 gen_neon_addl(size);
9ee6e8bb 5195 }
ad69471c 5196 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5197 }
5198 break;
5199 case 33: /* VTRN */
5200 if (size == 2) {
5201 for (n = 0; n < (q ? 4 : 2); n += 2) {
5202 NEON_GET_REG(T0, rm, n);
5203 NEON_GET_REG(T1, rd, n + 1);
5204 NEON_SET_REG(T1, rm, n);
5205 NEON_SET_REG(T0, rd, n + 1);
5206 }
5207 } else {
5208 goto elementwise;
5209 }
5210 break;
5211 case 34: /* VUZP */
5212 /* Reg Before After
5213 Rd A3 A2 A1 A0 B2 B0 A2 A0
5214 Rm B3 B2 B1 B0 B3 B1 A3 A1
5215 */
5216 if (size == 3)
5217 return 1;
5218 gen_neon_unzip(rd, q, 0, size);
5219 gen_neon_unzip(rm, q, 4, size);
5220 if (q) {
5221 static int unzip_order_q[8] =
5222 {0, 2, 4, 6, 1, 3, 5, 7};
5223 for (n = 0; n < 8; n++) {
5224 int reg = (n < 4) ? rd : rm;
5225 gen_neon_movl_T0_scratch(unzip_order_q[n]);
5226 NEON_SET_REG(T0, reg, n % 4);
5227 }
5228 } else {
5229 static int unzip_order[4] =
5230 {0, 4, 1, 5};
5231 for (n = 0; n < 4; n++) {
5232 int reg = (n < 2) ? rd : rm;
5233 gen_neon_movl_T0_scratch(unzip_order[n]);
5234 NEON_SET_REG(T0, reg, n % 2);
5235 }
5236 }
5237 break;
5238 case 35: /* VZIP */
5239 /* Reg Before After
5240 Rd A3 A2 A1 A0 B1 A1 B0 A0
5241 Rm B3 B2 B1 B0 B3 A3 B2 A2
5242 */
5243 if (size == 3)
5244 return 1;
5245 count = (q ? 4 : 2);
5246 for (n = 0; n < count; n++) {
5247 NEON_GET_REG(T0, rd, n);
5248 NEON_GET_REG(T1, rd, n);
5249 switch (size) {
ad69471c
PB
5250 case 0: gen_helper_neon_zip_u8(); break;
5251 case 1: gen_helper_neon_zip_u16(); break;
9ee6e8bb
PB
5252 case 2: /* no-op */; break;
5253 default: abort();
5254 }
5255 gen_neon_movl_scratch_T0(n * 2);
5256 gen_neon_movl_scratch_T1(n * 2 + 1);
5257 }
5258 for (n = 0; n < count * 2; n++) {
5259 int reg = (n < count) ? rd : rm;
5260 gen_neon_movl_T0_scratch(n);
5261 NEON_SET_REG(T0, reg, n % count);
5262 }
5263 break;
5264 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
ad69471c
PB
5265 if (size == 3)
5266 return 1;
a50f5b91 5267 TCGV_UNUSED(tmp2);
9ee6e8bb 5268 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5269 neon_load_reg64(cpu_V0, rm + pass);
5270 tmp = new_tmp();
9ee6e8bb 5271 if (op == 36 && q == 0) {
ad69471c 5272 gen_neon_narrow(size, tmp, cpu_V0);
9ee6e8bb 5273 } else if (q) {
ad69471c 5274 gen_neon_narrow_satu(size, tmp, cpu_V0);
9ee6e8bb 5275 } else {
ad69471c
PB
5276 gen_neon_narrow_sats(size, tmp, cpu_V0);
5277 }
5278 if (pass == 0) {
5279 tmp2 = tmp;
5280 } else {
5281 neon_store_reg(rd, 0, tmp2);
5282 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5283 }
9ee6e8bb
PB
5284 }
5285 break;
5286 case 38: /* VSHLL */
ad69471c 5287 if (q || size == 3)
9ee6e8bb 5288 return 1;
ad69471c
PB
5289 tmp = neon_load_reg(rm, 0);
5290 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5291 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5292 if (pass == 1)
5293 tmp = tmp2;
5294 gen_neon_widen(cpu_V0, tmp, size, 1);
5295 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5296 }
5297 break;
5298 default:
5299 elementwise:
5300 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5301 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5302 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5303 neon_reg_offset(rm, pass));
9ee6e8bb
PB
5304 } else {
5305 NEON_GET_REG(T0, rm, pass);
5306 }
5307 switch (op) {
5308 case 1: /* VREV32 */
5309 switch (size) {
66896cb8 5310 case 0: tcg_gen_bswap32_i32(cpu_T[0], cpu_T[0]); break;
8f01245e 5311 case 1: gen_swap_half(cpu_T[0]); break;
9ee6e8bb
PB
5312 default: return 1;
5313 }
5314 break;
5315 case 2: /* VREV16 */
5316 if (size != 0)
5317 return 1;
3670669c 5318 gen_rev16(cpu_T[0]);
9ee6e8bb 5319 break;
9ee6e8bb
PB
5320 case 8: /* CLS */
5321 switch (size) {
ad69471c
PB
5322 case 0: gen_helper_neon_cls_s8(cpu_T[0], cpu_T[0]); break;
5323 case 1: gen_helper_neon_cls_s16(cpu_T[0], cpu_T[0]); break;
5324 case 2: gen_helper_neon_cls_s32(cpu_T[0], cpu_T[0]); break;
9ee6e8bb
PB
5325 default: return 1;
5326 }
5327 break;
5328 case 9: /* CLZ */
5329 switch (size) {
ad69471c
PB
5330 case 0: gen_helper_neon_clz_u8(cpu_T[0], cpu_T[0]); break;
5331 case 1: gen_helper_neon_clz_u16(cpu_T[0], cpu_T[0]); break;
1497c961 5332 case 2: gen_helper_clz(cpu_T[0], cpu_T[0]); break;
9ee6e8bb
PB
5333 default: return 1;
5334 }
5335 break;
5336 case 10: /* CNT */
5337 if (size != 0)
5338 return 1;
ad69471c 5339 gen_helper_neon_cnt_u8(cpu_T[0], cpu_T[0]);
9ee6e8bb
PB
5340 break;
5341 case 11: /* VNOT */
5342 if (size != 0)
5343 return 1;
5344 gen_op_notl_T0();
5345 break;
5346 case 14: /* VQABS */
5347 switch (size) {
ad69471c
PB
5348 case 0: gen_helper_neon_qabs_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
5349 case 1: gen_helper_neon_qabs_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
5350 case 2: gen_helper_neon_qabs_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
9ee6e8bb
PB
5351 default: return 1;
5352 }
5353 break;
5354 case 15: /* VQNEG */
5355 switch (size) {
ad69471c
PB
5356 case 0: gen_helper_neon_qneg_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
5357 case 1: gen_helper_neon_qneg_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
5358 case 2: gen_helper_neon_qneg_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
9ee6e8bb
PB
5359 default: return 1;
5360 }
5361 break;
5362 case 16: case 19: /* VCGT #0, VCLE #0 */
5363 gen_op_movl_T1_im(0);
5364 switch(size) {
ad69471c
PB
5365 case 0: gen_helper_neon_cgt_s8(CPU_T001); break;
5366 case 1: gen_helper_neon_cgt_s16(CPU_T001); break;
5367 case 2: gen_helper_neon_cgt_s32(CPU_T001); break;
9ee6e8bb
PB
5368 default: return 1;
5369 }
5370 if (op == 19)
5371 gen_op_notl_T0();
5372 break;
5373 case 17: case 20: /* VCGE #0, VCLT #0 */
5374 gen_op_movl_T1_im(0);
5375 switch(size) {
ad69471c
PB
5376 case 0: gen_helper_neon_cge_s8(CPU_T001); break;
5377 case 1: gen_helper_neon_cge_s16(CPU_T001); break;
5378 case 2: gen_helper_neon_cge_s32(CPU_T001); break;
9ee6e8bb
PB
5379 default: return 1;
5380 }
5381 if (op == 20)
5382 gen_op_notl_T0();
5383 break;
5384 case 18: /* VCEQ #0 */
5385 gen_op_movl_T1_im(0);
5386 switch(size) {
ad69471c
PB
5387 case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
5388 case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
5389 case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
9ee6e8bb
PB
5390 default: return 1;
5391 }
5392 break;
5393 case 22: /* VABS */
5394 switch(size) {
ad69471c
PB
5395 case 0: gen_helper_neon_abs_s8(cpu_T[0], cpu_T[0]); break;
5396 case 1: gen_helper_neon_abs_s16(cpu_T[0], cpu_T[0]); break;
5397 case 2: tcg_gen_abs_i32(cpu_T[0], cpu_T[0]); break;
9ee6e8bb
PB
5398 default: return 1;
5399 }
5400 break;
5401 case 23: /* VNEG */
5402 gen_op_movl_T1_im(0);
ad69471c
PB
5403 if (size == 3)
5404 return 1;
5405 gen_neon_rsb(size);
9ee6e8bb
PB
5406 break;
5407 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
5408 gen_op_movl_T1_im(0);
ad69471c 5409 gen_helper_neon_cgt_f32(CPU_T001);
9ee6e8bb
PB
5410 if (op == 27)
5411 gen_op_notl_T0();
5412 break;
5413 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
5414 gen_op_movl_T1_im(0);
ad69471c 5415 gen_helper_neon_cge_f32(CPU_T001);
9ee6e8bb
PB
5416 if (op == 28)
5417 gen_op_notl_T0();
5418 break;
5419 case 26: /* Float VCEQ #0 */
5420 gen_op_movl_T1_im(0);
ad69471c 5421 gen_helper_neon_ceq_f32(CPU_T001);
9ee6e8bb
PB
5422 break;
5423 case 30: /* Float VABS */
4373f3ce 5424 gen_vfp_abs(0);
9ee6e8bb
PB
5425 break;
5426 case 31: /* Float VNEG */
4373f3ce 5427 gen_vfp_neg(0);
9ee6e8bb
PB
5428 break;
5429 case 32: /* VSWP */
5430 NEON_GET_REG(T1, rd, pass);
5431 NEON_SET_REG(T1, rm, pass);
5432 break;
5433 case 33: /* VTRN */
5434 NEON_GET_REG(T1, rd, pass);
5435 switch (size) {
ad69471c
PB
5436 case 0: gen_helper_neon_trn_u8(); break;
5437 case 1: gen_helper_neon_trn_u16(); break;
9ee6e8bb
PB
5438 case 2: abort();
5439 default: return 1;
5440 }
5441 NEON_SET_REG(T1, rm, pass);
5442 break;
5443 case 56: /* Integer VRECPE */
4373f3ce 5444 gen_helper_recpe_u32(cpu_T[0], cpu_T[0], cpu_env);
9ee6e8bb
PB
5445 break;
5446 case 57: /* Integer VRSQRTE */
4373f3ce 5447 gen_helper_rsqrte_u32(cpu_T[0], cpu_T[0], cpu_env);
9ee6e8bb
PB
5448 break;
5449 case 58: /* Float VRECPE */
4373f3ce 5450 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5451 break;
5452 case 59: /* Float VRSQRTE */
4373f3ce 5453 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5454 break;
5455 case 60: /* VCVT.F32.S32 */
4373f3ce 5456 gen_vfp_tosiz(0);
9ee6e8bb
PB
5457 break;
5458 case 61: /* VCVT.F32.U32 */
4373f3ce 5459 gen_vfp_touiz(0);
9ee6e8bb
PB
5460 break;
5461 case 62: /* VCVT.S32.F32 */
4373f3ce 5462 gen_vfp_sito(0);
9ee6e8bb
PB
5463 break;
5464 case 63: /* VCVT.U32.F32 */
4373f3ce 5465 gen_vfp_uito(0);
9ee6e8bb
PB
5466 break;
5467 default:
5468 /* Reserved: 21, 29, 39-56 */
5469 return 1;
5470 }
5471 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5472 tcg_gen_st_f32(cpu_F0s, cpu_env,
5473 neon_reg_offset(rd, pass));
9ee6e8bb
PB
5474 } else {
5475 NEON_SET_REG(T0, rd, pass);
5476 }
5477 }
5478 break;
5479 }
5480 } else if ((insn & (1 << 10)) == 0) {
5481 /* VTBL, VTBX. */
3018f259 5482 n = ((insn >> 5) & 0x18) + 8;
9ee6e8bb 5483 if (insn & (1 << 6)) {
8f8e3aa4 5484 tmp = neon_load_reg(rd, 0);
9ee6e8bb 5485 } else {
8f8e3aa4
PB
5486 tmp = new_tmp();
5487 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5488 }
8f8e3aa4
PB
5489 tmp2 = neon_load_reg(rm, 0);
5490 gen_helper_neon_tbl(tmp2, tmp2, tmp, tcg_const_i32(rn),
5491 tcg_const_i32(n));
3018f259 5492 dead_tmp(tmp);
9ee6e8bb 5493 if (insn & (1 << 6)) {
8f8e3aa4 5494 tmp = neon_load_reg(rd, 1);
9ee6e8bb 5495 } else {
8f8e3aa4
PB
5496 tmp = new_tmp();
5497 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5498 }
8f8e3aa4
PB
5499 tmp3 = neon_load_reg(rm, 1);
5500 gen_helper_neon_tbl(tmp3, tmp3, tmp, tcg_const_i32(rn),
5501 tcg_const_i32(n));
5502 neon_store_reg(rd, 0, tmp2);
3018f259
PB
5503 neon_store_reg(rd, 1, tmp3);
5504 dead_tmp(tmp);
9ee6e8bb
PB
5505 } else if ((insn & 0x380) == 0) {
5506 /* VDUP */
5507 if (insn & (1 << 19)) {
5508 NEON_SET_REG(T0, rm, 1);
5509 } else {
5510 NEON_SET_REG(T0, rm, 0);
5511 }
5512 if (insn & (1 << 16)) {
ad69471c 5513 gen_neon_dup_u8(cpu_T[0], ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
5514 } else if (insn & (1 << 17)) {
5515 if ((insn >> 18) & 1)
ad69471c 5516 gen_neon_dup_high16(cpu_T[0]);
9ee6e8bb 5517 else
ad69471c 5518 gen_neon_dup_low16(cpu_T[0]);
9ee6e8bb
PB
5519 }
5520 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5521 NEON_SET_REG(T0, rd, pass);
5522 }
5523 } else {
5524 return 1;
5525 }
5526 }
5527 }
5528 return 0;
5529}
5530
fe1479c3
PB
5531static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5532{
5533 int crn = (insn >> 16) & 0xf;
5534 int crm = insn & 0xf;
5535 int op1 = (insn >> 21) & 7;
5536 int op2 = (insn >> 5) & 7;
5537 int rt = (insn >> 12) & 0xf;
5538 TCGv tmp;
5539
5540 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5541 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5542 /* TEECR */
5543 if (IS_USER(s))
5544 return 1;
5545 tmp = load_cpu_field(teecr);
5546 store_reg(s, rt, tmp);
5547 return 0;
5548 }
5549 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5550 /* TEEHBR */
5551 if (IS_USER(s) && (env->teecr & 1))
5552 return 1;
5553 tmp = load_cpu_field(teehbr);
5554 store_reg(s, rt, tmp);
5555 return 0;
5556 }
5557 }
5558 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5559 op1, crn, crm, op2);
5560 return 1;
5561}
5562
5563static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5564{
5565 int crn = (insn >> 16) & 0xf;
5566 int crm = insn & 0xf;
5567 int op1 = (insn >> 21) & 7;
5568 int op2 = (insn >> 5) & 7;
5569 int rt = (insn >> 12) & 0xf;
5570 TCGv tmp;
5571
5572 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5573 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5574 /* TEECR */
5575 if (IS_USER(s))
5576 return 1;
5577 tmp = load_reg(s, rt);
5578 gen_helper_set_teecr(cpu_env, tmp);
5579 dead_tmp(tmp);
5580 return 0;
5581 }
5582 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5583 /* TEEHBR */
5584 if (IS_USER(s) && (env->teecr & 1))
5585 return 1;
5586 tmp = load_reg(s, rt);
5587 store_cpu_field(tmp, teehbr);
5588 return 0;
5589 }
5590 }
5591 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5592 op1, crn, crm, op2);
5593 return 1;
5594}
5595
9ee6e8bb
PB
5596static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5597{
5598 int cpnum;
5599
5600 cpnum = (insn >> 8) & 0xf;
5601 if (arm_feature(env, ARM_FEATURE_XSCALE)
5602 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5603 return 1;
5604
5605 switch (cpnum) {
5606 case 0:
5607 case 1:
5608 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5609 return disas_iwmmxt_insn(env, s, insn);
5610 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5611 return disas_dsp_insn(env, s, insn);
5612 }
5613 return 1;
5614 case 10:
5615 case 11:
5616 return disas_vfp_insn (env, s, insn);
fe1479c3
PB
5617 case 14:
5618 /* Coprocessors 7-15 are architecturally reserved by ARM.
5619 Unfortunately Intel decided to ignore this. */
5620 if (arm_feature(env, ARM_FEATURE_XSCALE))
5621 goto board;
5622 if (insn & (1 << 20))
5623 return disas_cp14_read(env, s, insn);
5624 else
5625 return disas_cp14_write(env, s, insn);
9ee6e8bb
PB
5626 case 15:
5627 return disas_cp15_insn (env, s, insn);
5628 default:
fe1479c3 5629 board:
9ee6e8bb
PB
5630 /* Unknown coprocessor. See if the board has hooked it. */
5631 return disas_cp_insn (env, s, insn);
5632 }
5633}
5634
5e3f878a
PB
5635
5636/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 5637static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
5638{
5639 TCGv tmp;
5640 tmp = new_tmp();
5641 tcg_gen_trunc_i64_i32(tmp, val);
5642 store_reg(s, rlow, tmp);
5643 tmp = new_tmp();
5644 tcg_gen_shri_i64(val, val, 32);
5645 tcg_gen_trunc_i64_i32(tmp, val);
5646 store_reg(s, rhigh, tmp);
5647}
5648
5649/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 5650static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 5651{
a7812ae4 5652 TCGv_i64 tmp;
5e3f878a
PB
5653 TCGv tmp2;
5654
36aa55dc 5655 /* Load value and extend to 64 bits. */
a7812ae4 5656 tmp = tcg_temp_new_i64();
5e3f878a
PB
5657 tmp2 = load_reg(s, rlow);
5658 tcg_gen_extu_i32_i64(tmp, tmp2);
5659 dead_tmp(tmp2);
5660 tcg_gen_add_i64(val, val, tmp);
5661}
5662
5663/* load and add a 64-bit value from a register pair. */
a7812ae4 5664static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 5665{
a7812ae4 5666 TCGv_i64 tmp;
36aa55dc
PB
5667 TCGv tmpl;
5668 TCGv tmph;
5e3f878a
PB
5669
5670 /* Load 64-bit value rd:rn. */
36aa55dc
PB
5671 tmpl = load_reg(s, rlow);
5672 tmph = load_reg(s, rhigh);
a7812ae4 5673 tmp = tcg_temp_new_i64();
36aa55dc
PB
5674 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5675 dead_tmp(tmpl);
5676 dead_tmp(tmph);
5e3f878a
PB
5677 tcg_gen_add_i64(val, val, tmp);
5678}
5679
5680/* Set N and Z flags from a 64-bit value. */
a7812ae4 5681static void gen_logicq_cc(TCGv_i64 val)
5e3f878a
PB
5682{
5683 TCGv tmp = new_tmp();
5684 gen_helper_logicq_cc(tmp, val);
6fbe23d5
PB
5685 gen_logic_CC(tmp);
5686 dead_tmp(tmp);
5e3f878a
PB
5687}
5688
9ee6e8bb
PB
5689static void disas_arm_insn(CPUState * env, DisasContext *s)
5690{
5691 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 5692 TCGv tmp;
3670669c 5693 TCGv tmp2;
6ddbc6e4 5694 TCGv tmp3;
b0109805 5695 TCGv addr;
a7812ae4 5696 TCGv_i64 tmp64;
9ee6e8bb
PB
5697
5698 insn = ldl_code(s->pc);
5699 s->pc += 4;
5700
5701 /* M variants do not implement ARM mode. */
5702 if (IS_M(env))
5703 goto illegal_op;
5704 cond = insn >> 28;
5705 if (cond == 0xf){
5706 /* Unconditional instructions. */
5707 if (((insn >> 25) & 7) == 1) {
5708 /* NEON Data processing. */
5709 if (!arm_feature(env, ARM_FEATURE_NEON))
5710 goto illegal_op;
5711
5712 if (disas_neon_data_insn(env, s, insn))
5713 goto illegal_op;
5714 return;
5715 }
5716 if ((insn & 0x0f100000) == 0x04000000) {
5717 /* NEON load/store. */
5718 if (!arm_feature(env, ARM_FEATURE_NEON))
5719 goto illegal_op;
5720
5721 if (disas_neon_ls_insn(env, s, insn))
5722 goto illegal_op;
5723 return;
5724 }
5725 if ((insn & 0x0d70f000) == 0x0550f000)
5726 return; /* PLD */
5727 else if ((insn & 0x0ffffdff) == 0x01010000) {
5728 ARCH(6);
5729 /* setend */
5730 if (insn & (1 << 9)) {
5731 /* BE8 mode not implemented. */
5732 goto illegal_op;
5733 }
5734 return;
5735 } else if ((insn & 0x0fffff00) == 0x057ff000) {
5736 switch ((insn >> 4) & 0xf) {
5737 case 1: /* clrex */
5738 ARCH(6K);
8f8e3aa4 5739 gen_helper_clrex(cpu_env);
9ee6e8bb
PB
5740 return;
5741 case 4: /* dsb */
5742 case 5: /* dmb */
5743 case 6: /* isb */
5744 ARCH(7);
5745 /* We don't emulate caches so these are a no-op. */
5746 return;
5747 default:
5748 goto illegal_op;
5749 }
5750 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
5751 /* srs */
5752 uint32_t offset;
5753 if (IS_USER(s))
5754 goto illegal_op;
5755 ARCH(6);
5756 op1 = (insn & 0x1f);
5757 if (op1 == (env->uncached_cpsr & CPSR_M)) {
b0109805 5758 addr = load_reg(s, 13);
9ee6e8bb 5759 } else {
b0109805
PB
5760 addr = new_tmp();
5761 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op1));
9ee6e8bb
PB
5762 }
5763 i = (insn >> 23) & 3;
5764 switch (i) {
5765 case 0: offset = -4; break; /* DA */
5766 case 1: offset = -8; break; /* DB */
5767 case 2: offset = 0; break; /* IA */
5768 case 3: offset = 4; break; /* IB */
5769 default: abort();
5770 }
5771 if (offset)
b0109805
PB
5772 tcg_gen_addi_i32(addr, addr, offset);
5773 tmp = load_reg(s, 14);
5774 gen_st32(tmp, addr, 0);
5775 tmp = new_tmp();
5776 gen_helper_cpsr_read(tmp);
5777 tcg_gen_addi_i32(addr, addr, 4);
5778 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
5779 if (insn & (1 << 21)) {
5780 /* Base writeback. */
5781 switch (i) {
5782 case 0: offset = -8; break;
5783 case 1: offset = -4; break;
5784 case 2: offset = 4; break;
5785 case 3: offset = 0; break;
5786 default: abort();
5787 }
5788 if (offset)
b0109805 5789 tcg_gen_addi_i32(addr, tmp, offset);
9ee6e8bb
PB
5790 if (op1 == (env->uncached_cpsr & CPSR_M)) {
5791 gen_movl_reg_T1(s, 13);
5792 } else {
b0109805 5793 gen_helper_set_r13_banked(cpu_env, tcg_const_i32(op1), cpu_T[1]);
9ee6e8bb 5794 }
b0109805
PB
5795 } else {
5796 dead_tmp(addr);
9ee6e8bb
PB
5797 }
5798 } else if ((insn & 0x0e5fffe0) == 0x081d0a00) {
5799 /* rfe */
5800 uint32_t offset;
5801 if (IS_USER(s))
5802 goto illegal_op;
5803 ARCH(6);
5804 rn = (insn >> 16) & 0xf;
b0109805 5805 addr = load_reg(s, rn);
9ee6e8bb
PB
5806 i = (insn >> 23) & 3;
5807 switch (i) {
b0109805
PB
5808 case 0: offset = -4; break; /* DA */
5809 case 1: offset = -8; break; /* DB */
5810 case 2: offset = 0; break; /* IA */
5811 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
5812 default: abort();
5813 }
5814 if (offset)
b0109805
PB
5815 tcg_gen_addi_i32(addr, addr, offset);
5816 /* Load PC into tmp and CPSR into tmp2. */
5817 tmp = gen_ld32(addr, 0);
5818 tcg_gen_addi_i32(addr, addr, 4);
5819 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
5820 if (insn & (1 << 21)) {
5821 /* Base writeback. */
5822 switch (i) {
b0109805
PB
5823 case 0: offset = -8; break;
5824 case 1: offset = -4; break;
5825 case 2: offset = 4; break;
5826 case 3: offset = 0; break;
9ee6e8bb
PB
5827 default: abort();
5828 }
5829 if (offset)
b0109805
PB
5830 tcg_gen_addi_i32(addr, addr, offset);
5831 store_reg(s, rn, addr);
5832 } else {
5833 dead_tmp(addr);
9ee6e8bb 5834 }
b0109805 5835 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
5836 } else if ((insn & 0x0e000000) == 0x0a000000) {
5837 /* branch link and change to thumb (blx <offset>) */
5838 int32_t offset;
5839
5840 val = (uint32_t)s->pc;
d9ba4830
PB
5841 tmp = new_tmp();
5842 tcg_gen_movi_i32(tmp, val);
5843 store_reg(s, 14, tmp);
9ee6e8bb
PB
5844 /* Sign-extend the 24-bit offset */
5845 offset = (((int32_t)insn) << 8) >> 8;
5846 /* offset * 4 + bit24 * 2 + (thumb bit) */
5847 val += (offset << 2) | ((insn >> 23) & 2) | 1;
5848 /* pipeline offset */
5849 val += 4;
d9ba4830 5850 gen_bx_im(s, val);
9ee6e8bb
PB
5851 return;
5852 } else if ((insn & 0x0e000f00) == 0x0c000100) {
5853 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5854 /* iWMMXt register transfer. */
5855 if (env->cp15.c15_cpar & (1 << 1))
5856 if (!disas_iwmmxt_insn(env, s, insn))
5857 return;
5858 }
5859 } else if ((insn & 0x0fe00000) == 0x0c400000) {
5860 /* Coprocessor double register transfer. */
5861 } else if ((insn & 0x0f000010) == 0x0e000010) {
5862 /* Additional coprocessor register transfer. */
7997d92f 5863 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
5864 uint32_t mask;
5865 uint32_t val;
5866 /* cps (privileged) */
5867 if (IS_USER(s))
5868 return;
5869 mask = val = 0;
5870 if (insn & (1 << 19)) {
5871 if (insn & (1 << 8))
5872 mask |= CPSR_A;
5873 if (insn & (1 << 7))
5874 mask |= CPSR_I;
5875 if (insn & (1 << 6))
5876 mask |= CPSR_F;
5877 if (insn & (1 << 18))
5878 val |= mask;
5879 }
7997d92f 5880 if (insn & (1 << 17)) {
9ee6e8bb
PB
5881 mask |= CPSR_M;
5882 val |= (insn & 0x1f);
5883 }
5884 if (mask) {
5885 gen_op_movl_T0_im(val);
5886 gen_set_psr_T0(s, mask, 0);
5887 }
5888 return;
5889 }
5890 goto illegal_op;
5891 }
5892 if (cond != 0xe) {
5893 /* if not always execute, we generate a conditional jump to
5894 next instruction */
5895 s->condlabel = gen_new_label();
d9ba4830 5896 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
5897 s->condjmp = 1;
5898 }
5899 if ((insn & 0x0f900000) == 0x03000000) {
5900 if ((insn & (1 << 21)) == 0) {
5901 ARCH(6T2);
5902 rd = (insn >> 12) & 0xf;
5903 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
5904 if ((insn & (1 << 22)) == 0) {
5905 /* MOVW */
5e3f878a
PB
5906 tmp = new_tmp();
5907 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
5908 } else {
5909 /* MOVT */
5e3f878a 5910 tmp = load_reg(s, rd);
86831435 5911 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 5912 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 5913 }
5e3f878a 5914 store_reg(s, rd, tmp);
9ee6e8bb
PB
5915 } else {
5916 if (((insn >> 12) & 0xf) != 0xf)
5917 goto illegal_op;
5918 if (((insn >> 16) & 0xf) == 0) {
5919 gen_nop_hint(s, insn & 0xff);
5920 } else {
5921 /* CPSR = immediate */
5922 val = insn & 0xff;
5923 shift = ((insn >> 8) & 0xf) * 2;
5924 if (shift)
5925 val = (val >> shift) | (val << (32 - shift));
5926 gen_op_movl_T0_im(val);
5927 i = ((insn & (1 << 22)) != 0);
5928 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
5929 goto illegal_op;
5930 }
5931 }
5932 } else if ((insn & 0x0f900000) == 0x01000000
5933 && (insn & 0x00000090) != 0x00000090) {
5934 /* miscellaneous instructions */
5935 op1 = (insn >> 21) & 3;
5936 sh = (insn >> 4) & 0xf;
5937 rm = insn & 0xf;
5938 switch (sh) {
5939 case 0x0: /* move program status register */
5940 if (op1 & 1) {
5941 /* PSR = reg */
5942 gen_movl_T0_reg(s, rm);
5943 i = ((op1 & 2) != 0);
5944 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
5945 goto illegal_op;
5946 } else {
5947 /* reg = PSR */
5948 rd = (insn >> 12) & 0xf;
5949 if (op1 & 2) {
5950 if (IS_USER(s))
5951 goto illegal_op;
d9ba4830 5952 tmp = load_cpu_field(spsr);
9ee6e8bb 5953 } else {
d9ba4830
PB
5954 tmp = new_tmp();
5955 gen_helper_cpsr_read(tmp);
9ee6e8bb 5956 }
d9ba4830 5957 store_reg(s, rd, tmp);
9ee6e8bb
PB
5958 }
5959 break;
5960 case 0x1:
5961 if (op1 == 1) {
5962 /* branch/exchange thumb (bx). */
d9ba4830
PB
5963 tmp = load_reg(s, rm);
5964 gen_bx(s, tmp);
9ee6e8bb
PB
5965 } else if (op1 == 3) {
5966 /* clz */
5967 rd = (insn >> 12) & 0xf;
1497c961
PB
5968 tmp = load_reg(s, rm);
5969 gen_helper_clz(tmp, tmp);
5970 store_reg(s, rd, tmp);
9ee6e8bb
PB
5971 } else {
5972 goto illegal_op;
5973 }
5974 break;
5975 case 0x2:
5976 if (op1 == 1) {
5977 ARCH(5J); /* bxj */
5978 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
5979 tmp = load_reg(s, rm);
5980 gen_bx(s, tmp);
9ee6e8bb
PB
5981 } else {
5982 goto illegal_op;
5983 }
5984 break;
5985 case 0x3:
5986 if (op1 != 1)
5987 goto illegal_op;
5988
5989 /* branch link/exchange thumb (blx) */
d9ba4830
PB
5990 tmp = load_reg(s, rm);
5991 tmp2 = new_tmp();
5992 tcg_gen_movi_i32(tmp2, s->pc);
5993 store_reg(s, 14, tmp2);
5994 gen_bx(s, tmp);
9ee6e8bb
PB
5995 break;
5996 case 0x5: /* saturating add/subtract */
5997 rd = (insn >> 12) & 0xf;
5998 rn = (insn >> 16) & 0xf;
b40d0353 5999 tmp = load_reg(s, rm);
5e3f878a 6000 tmp2 = load_reg(s, rn);
9ee6e8bb 6001 if (op1 & 2)
5e3f878a 6002 gen_helper_double_saturate(tmp2, tmp2);
9ee6e8bb 6003 if (op1 & 1)
5e3f878a 6004 gen_helper_sub_saturate(tmp, tmp, tmp2);
9ee6e8bb 6005 else
5e3f878a
PB
6006 gen_helper_add_saturate(tmp, tmp, tmp2);
6007 dead_tmp(tmp2);
6008 store_reg(s, rd, tmp);
9ee6e8bb
PB
6009 break;
6010 case 7: /* bkpt */
6011 gen_set_condexec(s);
5e3f878a 6012 gen_set_pc_im(s->pc - 4);
d9ba4830 6013 gen_exception(EXCP_BKPT);
9ee6e8bb
PB
6014 s->is_jmp = DISAS_JUMP;
6015 break;
6016 case 0x8: /* signed multiply */
6017 case 0xa:
6018 case 0xc:
6019 case 0xe:
6020 rs = (insn >> 8) & 0xf;
6021 rn = (insn >> 12) & 0xf;
6022 rd = (insn >> 16) & 0xf;
6023 if (op1 == 1) {
6024 /* (32 * 16) >> 16 */
5e3f878a
PB
6025 tmp = load_reg(s, rm);
6026 tmp2 = load_reg(s, rs);
9ee6e8bb 6027 if (sh & 4)
5e3f878a 6028 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 6029 else
5e3f878a 6030 gen_sxth(tmp2);
a7812ae4
PB
6031 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6032 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 6033 tmp = new_tmp();
a7812ae4 6034 tcg_gen_trunc_i64_i32(tmp, tmp64);
9ee6e8bb 6035 if ((sh & 2) == 0) {
5e3f878a
PB
6036 tmp2 = load_reg(s, rn);
6037 gen_helper_add_setq(tmp, tmp, tmp2);
6038 dead_tmp(tmp2);
9ee6e8bb 6039 }
5e3f878a 6040 store_reg(s, rd, tmp);
9ee6e8bb
PB
6041 } else {
6042 /* 16 * 16 */
5e3f878a
PB
6043 tmp = load_reg(s, rm);
6044 tmp2 = load_reg(s, rs);
6045 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6046 dead_tmp(tmp2);
9ee6e8bb 6047 if (op1 == 2) {
a7812ae4
PB
6048 tmp64 = tcg_temp_new_i64();
6049 tcg_gen_ext_i32_i64(tmp64, tmp);
22478e79 6050 dead_tmp(tmp);
a7812ae4
PB
6051 gen_addq(s, tmp64, rn, rd);
6052 gen_storeq_reg(s, rn, rd, tmp64);
9ee6e8bb
PB
6053 } else {
6054 if (op1 == 0) {
5e3f878a
PB
6055 tmp2 = load_reg(s, rn);
6056 gen_helper_add_setq(tmp, tmp, tmp2);
6057 dead_tmp(tmp2);
9ee6e8bb 6058 }
5e3f878a 6059 store_reg(s, rd, tmp);
9ee6e8bb
PB
6060 }
6061 }
6062 break;
6063 default:
6064 goto illegal_op;
6065 }
6066 } else if (((insn & 0x0e000000) == 0 &&
6067 (insn & 0x00000090) != 0x90) ||
6068 ((insn & 0x0e000000) == (1 << 25))) {
6069 int set_cc, logic_cc, shiftop;
6070
6071 op1 = (insn >> 21) & 0xf;
6072 set_cc = (insn >> 20) & 1;
6073 logic_cc = table_logic_cc[op1] & set_cc;
6074
6075 /* data processing instruction */
6076 if (insn & (1 << 25)) {
6077 /* immediate operand */
6078 val = insn & 0xff;
6079 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 6080 if (shift) {
9ee6e8bb 6081 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9
JR
6082 }
6083 tmp2 = new_tmp();
6084 tcg_gen_movi_i32(tmp2, val);
6085 if (logic_cc && shift) {
6086 gen_set_CF_bit31(tmp2);
6087 }
9ee6e8bb
PB
6088 } else {
6089 /* register */
6090 rm = (insn) & 0xf;
e9bb4aa9 6091 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6092 shiftop = (insn >> 5) & 3;
6093 if (!(insn & (1 << 4))) {
6094 shift = (insn >> 7) & 0x1f;
e9bb4aa9 6095 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
6096 } else {
6097 rs = (insn >> 8) & 0xf;
8984bd2e 6098 tmp = load_reg(s, rs);
e9bb4aa9 6099 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
6100 }
6101 }
6102 if (op1 != 0x0f && op1 != 0x0d) {
6103 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
6104 tmp = load_reg(s, rn);
6105 } else {
6106 TCGV_UNUSED(tmp);
9ee6e8bb
PB
6107 }
6108 rd = (insn >> 12) & 0xf;
6109 switch(op1) {
6110 case 0x00:
e9bb4aa9
JR
6111 tcg_gen_and_i32(tmp, tmp, tmp2);
6112 if (logic_cc) {
6113 gen_logic_CC(tmp);
6114 }
21aeb343 6115 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6116 break;
6117 case 0x01:
e9bb4aa9
JR
6118 tcg_gen_xor_i32(tmp, tmp, tmp2);
6119 if (logic_cc) {
6120 gen_logic_CC(tmp);
6121 }
21aeb343 6122 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6123 break;
6124 case 0x02:
6125 if (set_cc && rd == 15) {
6126 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 6127 if (IS_USER(s)) {
9ee6e8bb 6128 goto illegal_op;
e9bb4aa9
JR
6129 }
6130 gen_helper_sub_cc(tmp, tmp, tmp2);
6131 gen_exception_return(s, tmp);
9ee6e8bb 6132 } else {
e9bb4aa9
JR
6133 if (set_cc) {
6134 gen_helper_sub_cc(tmp, tmp, tmp2);
6135 } else {
6136 tcg_gen_sub_i32(tmp, tmp, tmp2);
6137 }
21aeb343 6138 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6139 }
6140 break;
6141 case 0x03:
e9bb4aa9
JR
6142 if (set_cc) {
6143 gen_helper_sub_cc(tmp, tmp2, tmp);
6144 } else {
6145 tcg_gen_sub_i32(tmp, tmp2, tmp);
6146 }
21aeb343 6147 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6148 break;
6149 case 0x04:
e9bb4aa9
JR
6150 if (set_cc) {
6151 gen_helper_add_cc(tmp, tmp, tmp2);
6152 } else {
6153 tcg_gen_add_i32(tmp, tmp, tmp2);
6154 }
21aeb343 6155 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6156 break;
6157 case 0x05:
e9bb4aa9
JR
6158 if (set_cc) {
6159 gen_helper_adc_cc(tmp, tmp, tmp2);
6160 } else {
6161 gen_add_carry(tmp, tmp, tmp2);
6162 }
21aeb343 6163 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6164 break;
6165 case 0x06:
e9bb4aa9
JR
6166 if (set_cc) {
6167 gen_helper_sbc_cc(tmp, tmp, tmp2);
6168 } else {
6169 gen_sub_carry(tmp, tmp, tmp2);
6170 }
21aeb343 6171 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6172 break;
6173 case 0x07:
e9bb4aa9
JR
6174 if (set_cc) {
6175 gen_helper_sbc_cc(tmp, tmp2, tmp);
6176 } else {
6177 gen_sub_carry(tmp, tmp2, tmp);
6178 }
21aeb343 6179 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6180 break;
6181 case 0x08:
6182 if (set_cc) {
e9bb4aa9
JR
6183 tcg_gen_and_i32(tmp, tmp, tmp2);
6184 gen_logic_CC(tmp);
9ee6e8bb 6185 }
e9bb4aa9 6186 dead_tmp(tmp);
9ee6e8bb
PB
6187 break;
6188 case 0x09:
6189 if (set_cc) {
e9bb4aa9
JR
6190 tcg_gen_xor_i32(tmp, tmp, tmp2);
6191 gen_logic_CC(tmp);
9ee6e8bb 6192 }
e9bb4aa9 6193 dead_tmp(tmp);
9ee6e8bb
PB
6194 break;
6195 case 0x0a:
6196 if (set_cc) {
e9bb4aa9 6197 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb 6198 }
e9bb4aa9 6199 dead_tmp(tmp);
9ee6e8bb
PB
6200 break;
6201 case 0x0b:
6202 if (set_cc) {
e9bb4aa9 6203 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 6204 }
e9bb4aa9 6205 dead_tmp(tmp);
9ee6e8bb
PB
6206 break;
6207 case 0x0c:
e9bb4aa9
JR
6208 tcg_gen_or_i32(tmp, tmp, tmp2);
6209 if (logic_cc) {
6210 gen_logic_CC(tmp);
6211 }
21aeb343 6212 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6213 break;
6214 case 0x0d:
6215 if (logic_cc && rd == 15) {
6216 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 6217 if (IS_USER(s)) {
9ee6e8bb 6218 goto illegal_op;
e9bb4aa9
JR
6219 }
6220 gen_exception_return(s, tmp2);
9ee6e8bb 6221 } else {
e9bb4aa9
JR
6222 if (logic_cc) {
6223 gen_logic_CC(tmp2);
6224 }
21aeb343 6225 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6226 }
6227 break;
6228 case 0x0e:
e9bb4aa9
JR
6229 tcg_gen_bic_i32(tmp, tmp, tmp2);
6230 if (logic_cc) {
6231 gen_logic_CC(tmp);
6232 }
21aeb343 6233 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6234 break;
6235 default:
6236 case 0x0f:
e9bb4aa9
JR
6237 tcg_gen_not_i32(tmp2, tmp2);
6238 if (logic_cc) {
6239 gen_logic_CC(tmp2);
6240 }
21aeb343 6241 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6242 break;
6243 }
e9bb4aa9
JR
6244 if (op1 != 0x0f && op1 != 0x0d) {
6245 dead_tmp(tmp2);
6246 }
9ee6e8bb
PB
6247 } else {
6248 /* other instructions */
6249 op1 = (insn >> 24) & 0xf;
6250 switch(op1) {
6251 case 0x0:
6252 case 0x1:
6253 /* multiplies, extra load/stores */
6254 sh = (insn >> 5) & 3;
6255 if (sh == 0) {
6256 if (op1 == 0x0) {
6257 rd = (insn >> 16) & 0xf;
6258 rn = (insn >> 12) & 0xf;
6259 rs = (insn >> 8) & 0xf;
6260 rm = (insn) & 0xf;
6261 op1 = (insn >> 20) & 0xf;
6262 switch (op1) {
6263 case 0: case 1: case 2: case 3: case 6:
6264 /* 32 bit mul */
5e3f878a
PB
6265 tmp = load_reg(s, rs);
6266 tmp2 = load_reg(s, rm);
6267 tcg_gen_mul_i32(tmp, tmp, tmp2);
6268 dead_tmp(tmp2);
9ee6e8bb
PB
6269 if (insn & (1 << 22)) {
6270 /* Subtract (mls) */
6271 ARCH(6T2);
5e3f878a
PB
6272 tmp2 = load_reg(s, rn);
6273 tcg_gen_sub_i32(tmp, tmp2, tmp);
6274 dead_tmp(tmp2);
9ee6e8bb
PB
6275 } else if (insn & (1 << 21)) {
6276 /* Add */
5e3f878a
PB
6277 tmp2 = load_reg(s, rn);
6278 tcg_gen_add_i32(tmp, tmp, tmp2);
6279 dead_tmp(tmp2);
9ee6e8bb
PB
6280 }
6281 if (insn & (1 << 20))
5e3f878a
PB
6282 gen_logic_CC(tmp);
6283 store_reg(s, rd, tmp);
9ee6e8bb
PB
6284 break;
6285 default:
6286 /* 64 bit mul */
5e3f878a
PB
6287 tmp = load_reg(s, rs);
6288 tmp2 = load_reg(s, rm);
9ee6e8bb 6289 if (insn & (1 << 22))
a7812ae4 6290 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6291 else
a7812ae4 6292 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
9ee6e8bb 6293 if (insn & (1 << 21)) /* mult accumulate */
a7812ae4 6294 gen_addq(s, tmp64, rn, rd);
9ee6e8bb
PB
6295 if (!(insn & (1 << 23))) { /* double accumulate */
6296 ARCH(6);
a7812ae4
PB
6297 gen_addq_lo(s, tmp64, rn);
6298 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
6299 }
6300 if (insn & (1 << 20))
a7812ae4
PB
6301 gen_logicq_cc(tmp64);
6302 gen_storeq_reg(s, rn, rd, tmp64);
9ee6e8bb
PB
6303 break;
6304 }
6305 } else {
6306 rn = (insn >> 16) & 0xf;
6307 rd = (insn >> 12) & 0xf;
6308 if (insn & (1 << 23)) {
6309 /* load/store exclusive */
86753403
PB
6310 op1 = (insn >> 21) & 0x3;
6311 if (op1)
a47f43d2 6312 ARCH(6K);
86753403
PB
6313 else
6314 ARCH(6);
9ee6e8bb 6315 gen_movl_T1_reg(s, rn);
72f1c62f 6316 addr = cpu_T[1];
9ee6e8bb 6317 if (insn & (1 << 20)) {
8f8e3aa4 6318 gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
86753403
PB
6319 switch (op1) {
6320 case 0: /* ldrex */
6321 tmp = gen_ld32(addr, IS_USER(s));
6322 break;
6323 case 1: /* ldrexd */
6324 tmp = gen_ld32(addr, IS_USER(s));
6325 store_reg(s, rd, tmp);
6326 tcg_gen_addi_i32(addr, addr, 4);
6327 tmp = gen_ld32(addr, IS_USER(s));
6328 rd++;
6329 break;
6330 case 2: /* ldrexb */
6331 tmp = gen_ld8u(addr, IS_USER(s));
6332 break;
6333 case 3: /* ldrexh */
6334 tmp = gen_ld16u(addr, IS_USER(s));
6335 break;
6336 default:
6337 abort();
6338 }
8f8e3aa4 6339 store_reg(s, rd, tmp);
9ee6e8bb 6340 } else {
8f8e3aa4 6341 int label = gen_new_label();
9ee6e8bb 6342 rm = insn & 0xf;
8f8e3aa4 6343 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
cb63669a
PB
6344 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
6345 0, label);
8f8e3aa4 6346 tmp = load_reg(s,rm);
86753403
PB
6347 switch (op1) {
6348 case 0: /* strex */
6349 gen_st32(tmp, addr, IS_USER(s));
6350 break;
6351 case 1: /* strexd */
6352 gen_st32(tmp, addr, IS_USER(s));
6353 tcg_gen_addi_i32(addr, addr, 4);
6354 tmp = load_reg(s, rm + 1);
6355 gen_st32(tmp, addr, IS_USER(s));
6356 break;
6357 case 2: /* strexb */
6358 gen_st8(tmp, addr, IS_USER(s));
6359 break;
6360 case 3: /* strexh */
6361 gen_st16(tmp, addr, IS_USER(s));
6362 break;
6363 default:
6364 abort();
6365 }
2637a3be 6366 gen_set_label(label);
8f8e3aa4 6367 gen_movl_reg_T0(s, rd);
9ee6e8bb 6368 }
9ee6e8bb
PB
6369 } else {
6370 /* SWP instruction */
6371 rm = (insn) & 0xf;
6372
8984bd2e
PB
6373 /* ??? This is not really atomic. However we know
6374 we never have multiple CPUs running in parallel,
6375 so it is good enough. */
6376 addr = load_reg(s, rn);
6377 tmp = load_reg(s, rm);
9ee6e8bb 6378 if (insn & (1 << 22)) {
8984bd2e
PB
6379 tmp2 = gen_ld8u(addr, IS_USER(s));
6380 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 6381 } else {
8984bd2e
PB
6382 tmp2 = gen_ld32(addr, IS_USER(s));
6383 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 6384 }
8984bd2e
PB
6385 dead_tmp(addr);
6386 store_reg(s, rd, tmp2);
9ee6e8bb
PB
6387 }
6388 }
6389 } else {
6390 int address_offset;
6391 int load;
6392 /* Misc load/store */
6393 rn = (insn >> 16) & 0xf;
6394 rd = (insn >> 12) & 0xf;
b0109805 6395 addr = load_reg(s, rn);
9ee6e8bb 6396 if (insn & (1 << 24))
b0109805 6397 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
6398 address_offset = 0;
6399 if (insn & (1 << 20)) {
6400 /* load */
6401 switch(sh) {
6402 case 1:
b0109805 6403 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
6404 break;
6405 case 2:
b0109805 6406 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
6407 break;
6408 default:
6409 case 3:
b0109805 6410 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
6411 break;
6412 }
6413 load = 1;
6414 } else if (sh & 2) {
6415 /* doubleword */
6416 if (sh & 1) {
6417 /* store */
b0109805
PB
6418 tmp = load_reg(s, rd);
6419 gen_st32(tmp, addr, IS_USER(s));
6420 tcg_gen_addi_i32(addr, addr, 4);
6421 tmp = load_reg(s, rd + 1);
6422 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6423 load = 0;
6424 } else {
6425 /* load */
b0109805
PB
6426 tmp = gen_ld32(addr, IS_USER(s));
6427 store_reg(s, rd, tmp);
6428 tcg_gen_addi_i32(addr, addr, 4);
6429 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
6430 rd++;
6431 load = 1;
6432 }
6433 address_offset = -4;
6434 } else {
6435 /* store */
b0109805
PB
6436 tmp = load_reg(s, rd);
6437 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6438 load = 0;
6439 }
6440 /* Perform base writeback before the loaded value to
6441 ensure correct behavior with overlapping index registers.
6442 ldrd with base writeback is is undefined if the
6443 destination and index registers overlap. */
6444 if (!(insn & (1 << 24))) {
b0109805
PB
6445 gen_add_datah_offset(s, insn, address_offset, addr);
6446 store_reg(s, rn, addr);
9ee6e8bb
PB
6447 } else if (insn & (1 << 21)) {
6448 if (address_offset)
b0109805
PB
6449 tcg_gen_addi_i32(addr, addr, address_offset);
6450 store_reg(s, rn, addr);
6451 } else {
6452 dead_tmp(addr);
9ee6e8bb
PB
6453 }
6454 if (load) {
6455 /* Complete the load. */
b0109805 6456 store_reg(s, rd, tmp);
9ee6e8bb
PB
6457 }
6458 }
6459 break;
6460 case 0x4:
6461 case 0x5:
6462 goto do_ldst;
6463 case 0x6:
6464 case 0x7:
6465 if (insn & (1 << 4)) {
6466 ARCH(6);
6467 /* Armv6 Media instructions. */
6468 rm = insn & 0xf;
6469 rn = (insn >> 16) & 0xf;
2c0262af 6470 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
6471 rs = (insn >> 8) & 0xf;
6472 switch ((insn >> 23) & 3) {
6473 case 0: /* Parallel add/subtract. */
6474 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
6475 tmp = load_reg(s, rn);
6476 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6477 sh = (insn >> 5) & 7;
6478 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6479 goto illegal_op;
6ddbc6e4
PB
6480 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6481 dead_tmp(tmp2);
6482 store_reg(s, rd, tmp);
9ee6e8bb
PB
6483 break;
6484 case 1:
6485 if ((insn & 0x00700020) == 0) {
6c95676b 6486 /* Halfword pack. */
3670669c
PB
6487 tmp = load_reg(s, rn);
6488 tmp2 = load_reg(s, rm);
9ee6e8bb 6489 shift = (insn >> 7) & 0x1f;
3670669c
PB
6490 if (insn & (1 << 6)) {
6491 /* pkhtb */
22478e79
AZ
6492 if (shift == 0)
6493 shift = 31;
6494 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 6495 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 6496 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
6497 } else {
6498 /* pkhbt */
22478e79
AZ
6499 if (shift)
6500 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 6501 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
6502 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6503 }
6504 tcg_gen_or_i32(tmp, tmp, tmp2);
22478e79 6505 dead_tmp(tmp2);
3670669c 6506 store_reg(s, rd, tmp);
9ee6e8bb
PB
6507 } else if ((insn & 0x00200020) == 0x00200000) {
6508 /* [us]sat */
6ddbc6e4 6509 tmp = load_reg(s, rm);
9ee6e8bb
PB
6510 shift = (insn >> 7) & 0x1f;
6511 if (insn & (1 << 6)) {
6512 if (shift == 0)
6513 shift = 31;
6ddbc6e4 6514 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 6515 } else {
6ddbc6e4 6516 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
6517 }
6518 sh = (insn >> 16) & 0x1f;
6519 if (sh != 0) {
6520 if (insn & (1 << 22))
6ddbc6e4 6521 gen_helper_usat(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6522 else
6ddbc6e4 6523 gen_helper_ssat(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6524 }
6ddbc6e4 6525 store_reg(s, rd, tmp);
9ee6e8bb
PB
6526 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6527 /* [us]sat16 */
6ddbc6e4 6528 tmp = load_reg(s, rm);
9ee6e8bb
PB
6529 sh = (insn >> 16) & 0x1f;
6530 if (sh != 0) {
6531 if (insn & (1 << 22))
6ddbc6e4 6532 gen_helper_usat16(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6533 else
6ddbc6e4 6534 gen_helper_ssat16(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6535 }
6ddbc6e4 6536 store_reg(s, rd, tmp);
9ee6e8bb
PB
6537 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6538 /* Select bytes. */
6ddbc6e4
PB
6539 tmp = load_reg(s, rn);
6540 tmp2 = load_reg(s, rm);
6541 tmp3 = new_tmp();
6542 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6543 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6544 dead_tmp(tmp3);
6545 dead_tmp(tmp2);
6546 store_reg(s, rd, tmp);
9ee6e8bb 6547 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 6548 tmp = load_reg(s, rm);
9ee6e8bb
PB
6549 shift = (insn >> 10) & 3;
6550 /* ??? In many cases it's not neccessary to do a
6551 rotate, a shift is sufficient. */
6552 if (shift != 0)
5e3f878a 6553 tcg_gen_rori_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
6554 op1 = (insn >> 20) & 7;
6555 switch (op1) {
5e3f878a
PB
6556 case 0: gen_sxtb16(tmp); break;
6557 case 2: gen_sxtb(tmp); break;
6558 case 3: gen_sxth(tmp); break;
6559 case 4: gen_uxtb16(tmp); break;
6560 case 6: gen_uxtb(tmp); break;
6561 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
6562 default: goto illegal_op;
6563 }
6564 if (rn != 15) {
5e3f878a 6565 tmp2 = load_reg(s, rn);
9ee6e8bb 6566 if ((op1 & 3) == 0) {
5e3f878a 6567 gen_add16(tmp, tmp2);
9ee6e8bb 6568 } else {
5e3f878a
PB
6569 tcg_gen_add_i32(tmp, tmp, tmp2);
6570 dead_tmp(tmp2);
9ee6e8bb
PB
6571 }
6572 }
6c95676b 6573 store_reg(s, rd, tmp);
9ee6e8bb
PB
6574 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6575 /* rev */
b0109805 6576 tmp = load_reg(s, rm);
9ee6e8bb
PB
6577 if (insn & (1 << 22)) {
6578 if (insn & (1 << 7)) {
b0109805 6579 gen_revsh(tmp);
9ee6e8bb
PB
6580 } else {
6581 ARCH(6T2);
b0109805 6582 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
6583 }
6584 } else {
6585 if (insn & (1 << 7))
b0109805 6586 gen_rev16(tmp);
9ee6e8bb 6587 else
66896cb8 6588 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 6589 }
b0109805 6590 store_reg(s, rd, tmp);
9ee6e8bb
PB
6591 } else {
6592 goto illegal_op;
6593 }
6594 break;
6595 case 2: /* Multiplies (Type 3). */
5e3f878a
PB
6596 tmp = load_reg(s, rm);
6597 tmp2 = load_reg(s, rs);
9ee6e8bb
PB
6598 if (insn & (1 << 20)) {
6599 /* Signed multiply most significant [accumulate]. */
a7812ae4 6600 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6601 if (insn & (1 << 5))
a7812ae4
PB
6602 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
6603 tcg_gen_shri_i64(tmp64, tmp64, 32);
5e3f878a 6604 tmp = new_tmp();
a7812ae4 6605 tcg_gen_trunc_i64_i32(tmp, tmp64);
955a7dd5
AZ
6606 if (rd != 15) {
6607 tmp2 = load_reg(s, rd);
9ee6e8bb 6608 if (insn & (1 << 6)) {
5e3f878a 6609 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 6610 } else {
5e3f878a 6611 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 6612 }
5e3f878a 6613 dead_tmp(tmp2);
9ee6e8bb 6614 }
955a7dd5 6615 store_reg(s, rn, tmp);
9ee6e8bb
PB
6616 } else {
6617 if (insn & (1 << 5))
5e3f878a
PB
6618 gen_swap_half(tmp2);
6619 gen_smul_dual(tmp, tmp2);
6620 /* This addition cannot overflow. */
6621 if (insn & (1 << 6)) {
6622 tcg_gen_sub_i32(tmp, tmp, tmp2);
6623 } else {
6624 tcg_gen_add_i32(tmp, tmp, tmp2);
6625 }
6626 dead_tmp(tmp2);
9ee6e8bb 6627 if (insn & (1 << 22)) {
5e3f878a 6628 /* smlald, smlsld */
a7812ae4
PB
6629 tmp64 = tcg_temp_new_i64();
6630 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 6631 dead_tmp(tmp);
a7812ae4
PB
6632 gen_addq(s, tmp64, rd, rn);
6633 gen_storeq_reg(s, rd, rn, tmp64);
9ee6e8bb 6634 } else {
5e3f878a 6635 /* smuad, smusd, smlad, smlsd */
22478e79 6636 if (rd != 15)
9ee6e8bb 6637 {
22478e79 6638 tmp2 = load_reg(s, rd);
5e3f878a
PB
6639 gen_helper_add_setq(tmp, tmp, tmp2);
6640 dead_tmp(tmp2);
9ee6e8bb 6641 }
22478e79 6642 store_reg(s, rn, tmp);
9ee6e8bb
PB
6643 }
6644 }
6645 break;
6646 case 3:
6647 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
6648 switch (op1) {
6649 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
6650 ARCH(6);
6651 tmp = load_reg(s, rm);
6652 tmp2 = load_reg(s, rs);
6653 gen_helper_usad8(tmp, tmp, tmp2);
6654 dead_tmp(tmp2);
ded9d295
AZ
6655 if (rd != 15) {
6656 tmp2 = load_reg(s, rd);
6ddbc6e4
PB
6657 tcg_gen_add_i32(tmp, tmp, tmp2);
6658 dead_tmp(tmp2);
9ee6e8bb 6659 }
ded9d295 6660 store_reg(s, rn, tmp);
9ee6e8bb
PB
6661 break;
6662 case 0x20: case 0x24: case 0x28: case 0x2c:
6663 /* Bitfield insert/clear. */
6664 ARCH(6T2);
6665 shift = (insn >> 7) & 0x1f;
6666 i = (insn >> 16) & 0x1f;
6667 i = i + 1 - shift;
6668 if (rm == 15) {
5e3f878a
PB
6669 tmp = new_tmp();
6670 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6671 } else {
5e3f878a 6672 tmp = load_reg(s, rm);
9ee6e8bb
PB
6673 }
6674 if (i != 32) {
5e3f878a 6675 tmp2 = load_reg(s, rd);
8f8e3aa4 6676 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
5e3f878a 6677 dead_tmp(tmp2);
9ee6e8bb 6678 }
5e3f878a 6679 store_reg(s, rd, tmp);
9ee6e8bb
PB
6680 break;
6681 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
6682 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 6683 ARCH(6T2);
5e3f878a 6684 tmp = load_reg(s, rm);
9ee6e8bb
PB
6685 shift = (insn >> 7) & 0x1f;
6686 i = ((insn >> 16) & 0x1f) + 1;
6687 if (shift + i > 32)
6688 goto illegal_op;
6689 if (i < 32) {
6690 if (op1 & 0x20) {
5e3f878a 6691 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 6692 } else {
5e3f878a 6693 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
6694 }
6695 }
5e3f878a 6696 store_reg(s, rd, tmp);
9ee6e8bb
PB
6697 break;
6698 default:
6699 goto illegal_op;
6700 }
6701 break;
6702 }
6703 break;
6704 }
6705 do_ldst:
6706 /* Check for undefined extension instructions
6707 * per the ARM Bible IE:
6708 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
6709 */
6710 sh = (0xf << 20) | (0xf << 4);
6711 if (op1 == 0x7 && ((insn & sh) == sh))
6712 {
6713 goto illegal_op;
6714 }
6715 /* load/store byte/word */
6716 rn = (insn >> 16) & 0xf;
6717 rd = (insn >> 12) & 0xf;
b0109805 6718 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
6719 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
6720 if (insn & (1 << 24))
b0109805 6721 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
6722 if (insn & (1 << 20)) {
6723 /* load */
9ee6e8bb 6724 if (insn & (1 << 22)) {
b0109805 6725 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 6726 } else {
b0109805 6727 tmp = gen_ld32(tmp2, i);
9ee6e8bb 6728 }
9ee6e8bb
PB
6729 } else {
6730 /* store */
b0109805 6731 tmp = load_reg(s, rd);
9ee6e8bb 6732 if (insn & (1 << 22))
b0109805 6733 gen_st8(tmp, tmp2, i);
9ee6e8bb 6734 else
b0109805 6735 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
6736 }
6737 if (!(insn & (1 << 24))) {
b0109805
PB
6738 gen_add_data_offset(s, insn, tmp2);
6739 store_reg(s, rn, tmp2);
6740 } else if (insn & (1 << 21)) {
6741 store_reg(s, rn, tmp2);
6742 } else {
6743 dead_tmp(tmp2);
9ee6e8bb
PB
6744 }
6745 if (insn & (1 << 20)) {
6746 /* Complete the load. */
6747 if (rd == 15)
b0109805 6748 gen_bx(s, tmp);
9ee6e8bb 6749 else
b0109805 6750 store_reg(s, rd, tmp);
9ee6e8bb
PB
6751 }
6752 break;
6753 case 0x08:
6754 case 0x09:
6755 {
6756 int j, n, user, loaded_base;
b0109805 6757 TCGv loaded_var;
9ee6e8bb
PB
6758 /* load/store multiple words */
6759 /* XXX: store correct base if write back */
6760 user = 0;
6761 if (insn & (1 << 22)) {
6762 if (IS_USER(s))
6763 goto illegal_op; /* only usable in supervisor mode */
6764
6765 if ((insn & (1 << 15)) == 0)
6766 user = 1;
6767 }
6768 rn = (insn >> 16) & 0xf;
b0109805 6769 addr = load_reg(s, rn);
9ee6e8bb
PB
6770
6771 /* compute total size */
6772 loaded_base = 0;
a50f5b91 6773 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
6774 n = 0;
6775 for(i=0;i<16;i++) {
6776 if (insn & (1 << i))
6777 n++;
6778 }
6779 /* XXX: test invalid n == 0 case ? */
6780 if (insn & (1 << 23)) {
6781 if (insn & (1 << 24)) {
6782 /* pre increment */
b0109805 6783 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6784 } else {
6785 /* post increment */
6786 }
6787 } else {
6788 if (insn & (1 << 24)) {
6789 /* pre decrement */
b0109805 6790 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
6791 } else {
6792 /* post decrement */
6793 if (n != 1)
b0109805 6794 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
6795 }
6796 }
6797 j = 0;
6798 for(i=0;i<16;i++) {
6799 if (insn & (1 << i)) {
6800 if (insn & (1 << 20)) {
6801 /* load */
b0109805 6802 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 6803 if (i == 15) {
b0109805 6804 gen_bx(s, tmp);
9ee6e8bb 6805 } else if (user) {
b0109805
PB
6806 gen_helper_set_user_reg(tcg_const_i32(i), tmp);
6807 dead_tmp(tmp);
9ee6e8bb 6808 } else if (i == rn) {
b0109805 6809 loaded_var = tmp;
9ee6e8bb
PB
6810 loaded_base = 1;
6811 } else {
b0109805 6812 store_reg(s, i, tmp);
9ee6e8bb
PB
6813 }
6814 } else {
6815 /* store */
6816 if (i == 15) {
6817 /* special case: r15 = PC + 8 */
6818 val = (long)s->pc + 4;
b0109805
PB
6819 tmp = new_tmp();
6820 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 6821 } else if (user) {
b0109805
PB
6822 tmp = new_tmp();
6823 gen_helper_get_user_reg(tmp, tcg_const_i32(i));
9ee6e8bb 6824 } else {
b0109805 6825 tmp = load_reg(s, i);
9ee6e8bb 6826 }
b0109805 6827 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6828 }
6829 j++;
6830 /* no need to add after the last transfer */
6831 if (j != n)
b0109805 6832 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6833 }
6834 }
6835 if (insn & (1 << 21)) {
6836 /* write back */
6837 if (insn & (1 << 23)) {
6838 if (insn & (1 << 24)) {
6839 /* pre increment */
6840 } else {
6841 /* post increment */
b0109805 6842 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6843 }
6844 } else {
6845 if (insn & (1 << 24)) {
6846 /* pre decrement */
6847 if (n != 1)
b0109805 6848 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
6849 } else {
6850 /* post decrement */
b0109805 6851 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
6852 }
6853 }
b0109805
PB
6854 store_reg(s, rn, addr);
6855 } else {
6856 dead_tmp(addr);
9ee6e8bb
PB
6857 }
6858 if (loaded_base) {
b0109805 6859 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
6860 }
6861 if ((insn & (1 << 22)) && !user) {
6862 /* Restore CPSR from SPSR. */
d9ba4830
PB
6863 tmp = load_cpu_field(spsr);
6864 gen_set_cpsr(tmp, 0xffffffff);
6865 dead_tmp(tmp);
9ee6e8bb
PB
6866 s->is_jmp = DISAS_UPDATE;
6867 }
6868 }
6869 break;
6870 case 0xa:
6871 case 0xb:
6872 {
6873 int32_t offset;
6874
6875 /* branch (and link) */
6876 val = (int32_t)s->pc;
6877 if (insn & (1 << 24)) {
5e3f878a
PB
6878 tmp = new_tmp();
6879 tcg_gen_movi_i32(tmp, val);
6880 store_reg(s, 14, tmp);
9ee6e8bb
PB
6881 }
6882 offset = (((int32_t)insn << 8) >> 8);
6883 val += (offset << 2) + 4;
6884 gen_jmp(s, val);
6885 }
6886 break;
6887 case 0xc:
6888 case 0xd:
6889 case 0xe:
6890 /* Coprocessor. */
6891 if (disas_coproc_insn(env, s, insn))
6892 goto illegal_op;
6893 break;
6894 case 0xf:
6895 /* swi */
5e3f878a 6896 gen_set_pc_im(s->pc);
9ee6e8bb
PB
6897 s->is_jmp = DISAS_SWI;
6898 break;
6899 default:
6900 illegal_op:
6901 gen_set_condexec(s);
5e3f878a 6902 gen_set_pc_im(s->pc - 4);
d9ba4830 6903 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
6904 s->is_jmp = DISAS_JUMP;
6905 break;
6906 }
6907 }
6908}
6909
6910/* Return true if this is a Thumb-2 logical op. */
6911static int
6912thumb2_logic_op(int op)
6913{
6914 return (op < 8);
6915}
6916
6917/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
6918 then set condition code flags based on the result of the operation.
6919 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
6920 to the high bit of T1.
6921 Returns zero if the opcode is valid. */
6922
6923static int
6924gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out)
6925{
6926 int logic_cc;
6927
6928 logic_cc = 0;
6929 switch (op) {
6930 case 0: /* and */
6931 gen_op_andl_T0_T1();
6932 logic_cc = conds;
6933 break;
6934 case 1: /* bic */
6935 gen_op_bicl_T0_T1();
6936 logic_cc = conds;
6937 break;
6938 case 2: /* orr */
6939 gen_op_orl_T0_T1();
6940 logic_cc = conds;
6941 break;
6942 case 3: /* orn */
6943 gen_op_notl_T1();
6944 gen_op_orl_T0_T1();
6945 logic_cc = conds;
6946 break;
6947 case 4: /* eor */
6948 gen_op_xorl_T0_T1();
6949 logic_cc = conds;
6950 break;
6951 case 8: /* add */
6952 if (conds)
6953 gen_op_addl_T0_T1_cc();
6954 else
6955 gen_op_addl_T0_T1();
6956 break;
6957 case 10: /* adc */
6958 if (conds)
6959 gen_op_adcl_T0_T1_cc();
6960 else
b26eefb6 6961 gen_adc_T0_T1();
9ee6e8bb
PB
6962 break;
6963 case 11: /* sbc */
6964 if (conds)
6965 gen_op_sbcl_T0_T1_cc();
6966 else
3670669c 6967 gen_sbc_T0_T1();
9ee6e8bb
PB
6968 break;
6969 case 13: /* sub */
6970 if (conds)
6971 gen_op_subl_T0_T1_cc();
6972 else
6973 gen_op_subl_T0_T1();
6974 break;
6975 case 14: /* rsb */
6976 if (conds)
6977 gen_op_rsbl_T0_T1_cc();
6978 else
6979 gen_op_rsbl_T0_T1();
6980 break;
6981 default: /* 5, 6, 7, 9, 12, 15. */
6982 return 1;
6983 }
6984 if (logic_cc) {
6985 gen_op_logic_T0_cc();
6986 if (shifter_out)
b26eefb6 6987 gen_set_CF_bit31(cpu_T[1]);
9ee6e8bb
PB
6988 }
6989 return 0;
6990}
6991
6992/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
6993 is not legal. */
6994static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
6995{
b0109805 6996 uint32_t insn, imm, shift, offset;
9ee6e8bb 6997 uint32_t rd, rn, rm, rs;
b26eefb6 6998 TCGv tmp;
6ddbc6e4
PB
6999 TCGv tmp2;
7000 TCGv tmp3;
b0109805 7001 TCGv addr;
a7812ae4 7002 TCGv_i64 tmp64;
9ee6e8bb
PB
7003 int op;
7004 int shiftop;
7005 int conds;
7006 int logic_cc;
7007
7008 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7009 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 7010 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
7011 16-bit instructions to get correct prefetch abort behavior. */
7012 insn = insn_hw1;
7013 if ((insn & (1 << 12)) == 0) {
7014 /* Second half of blx. */
7015 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
7016 tmp = load_reg(s, 14);
7017 tcg_gen_addi_i32(tmp, tmp, offset);
7018 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 7019
d9ba4830 7020 tmp2 = new_tmp();
b0109805 7021 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7022 store_reg(s, 14, tmp2);
7023 gen_bx(s, tmp);
9ee6e8bb
PB
7024 return 0;
7025 }
7026 if (insn & (1 << 11)) {
7027 /* Second half of bl. */
7028 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 7029 tmp = load_reg(s, 14);
6a0d8a1d 7030 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 7031
d9ba4830 7032 tmp2 = new_tmp();
b0109805 7033 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7034 store_reg(s, 14, tmp2);
7035 gen_bx(s, tmp);
9ee6e8bb
PB
7036 return 0;
7037 }
7038 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7039 /* Instruction spans a page boundary. Implement it as two
7040 16-bit instructions in case the second half causes an
7041 prefetch abort. */
7042 offset = ((int32_t)insn << 21) >> 9;
b0109805 7043 gen_op_movl_T0_im(s->pc + 2 + offset);
9ee6e8bb
PB
7044 gen_movl_reg_T0(s, 14);
7045 return 0;
7046 }
7047 /* Fall through to 32-bit decode. */
7048 }
7049
7050 insn = lduw_code(s->pc);
7051 s->pc += 2;
7052 insn |= (uint32_t)insn_hw1 << 16;
7053
7054 if ((insn & 0xf800e800) != 0xf000e800) {
7055 ARCH(6T2);
7056 }
7057
7058 rn = (insn >> 16) & 0xf;
7059 rs = (insn >> 12) & 0xf;
7060 rd = (insn >> 8) & 0xf;
7061 rm = insn & 0xf;
7062 switch ((insn >> 25) & 0xf) {
7063 case 0: case 1: case 2: case 3:
7064 /* 16-bit instructions. Should never happen. */
7065 abort();
7066 case 4:
7067 if (insn & (1 << 22)) {
7068 /* Other load/store, table branch. */
7069 if (insn & 0x01200000) {
7070 /* Load/store doubleword. */
7071 if (rn == 15) {
b0109805
PB
7072 addr = new_tmp();
7073 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 7074 } else {
b0109805 7075 addr = load_reg(s, rn);
9ee6e8bb
PB
7076 }
7077 offset = (insn & 0xff) * 4;
7078 if ((insn & (1 << 23)) == 0)
7079 offset = -offset;
7080 if (insn & (1 << 24)) {
b0109805 7081 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
7082 offset = 0;
7083 }
7084 if (insn & (1 << 20)) {
7085 /* ldrd */
b0109805
PB
7086 tmp = gen_ld32(addr, IS_USER(s));
7087 store_reg(s, rs, tmp);
7088 tcg_gen_addi_i32(addr, addr, 4);
7089 tmp = gen_ld32(addr, IS_USER(s));
7090 store_reg(s, rd, tmp);
9ee6e8bb
PB
7091 } else {
7092 /* strd */
b0109805
PB
7093 tmp = load_reg(s, rs);
7094 gen_st32(tmp, addr, IS_USER(s));
7095 tcg_gen_addi_i32(addr, addr, 4);
7096 tmp = load_reg(s, rd);
7097 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7098 }
7099 if (insn & (1 << 21)) {
7100 /* Base writeback. */
7101 if (rn == 15)
7102 goto illegal_op;
b0109805
PB
7103 tcg_gen_addi_i32(addr, addr, offset - 4);
7104 store_reg(s, rn, addr);
7105 } else {
7106 dead_tmp(addr);
9ee6e8bb
PB
7107 }
7108 } else if ((insn & (1 << 23)) == 0) {
7109 /* Load/store exclusive word. */
2c0262af 7110 gen_movl_T1_reg(s, rn);
72f1c62f 7111 addr = cpu_T[1];
2c0262af 7112 if (insn & (1 << 20)) {
8f8e3aa4
PB
7113 gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
7114 tmp = gen_ld32(addr, IS_USER(s));
7115 store_reg(s, rd, tmp);
9ee6e8bb 7116 } else {
8f8e3aa4
PB
7117 int label = gen_new_label();
7118 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
cb63669a
PB
7119 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
7120 0, label);
8f8e3aa4
PB
7121 tmp = load_reg(s, rs);
7122 gen_st32(tmp, cpu_T[1], IS_USER(s));
7123 gen_set_label(label);
7124 gen_movl_reg_T0(s, rd);
9ee6e8bb 7125 }
9ee6e8bb
PB
7126 } else if ((insn & (1 << 6)) == 0) {
7127 /* Table Branch. */
7128 if (rn == 15) {
b0109805
PB
7129 addr = new_tmp();
7130 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 7131 } else {
b0109805 7132 addr = load_reg(s, rn);
9ee6e8bb 7133 }
b26eefb6 7134 tmp = load_reg(s, rm);
b0109805 7135 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
7136 if (insn & (1 << 4)) {
7137 /* tbh */
b0109805 7138 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 7139 dead_tmp(tmp);
b0109805 7140 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 7141 } else { /* tbb */
b26eefb6 7142 dead_tmp(tmp);
b0109805 7143 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7144 }
b0109805
PB
7145 dead_tmp(addr);
7146 tcg_gen_shli_i32(tmp, tmp, 1);
7147 tcg_gen_addi_i32(tmp, tmp, s->pc);
7148 store_reg(s, 15, tmp);
9ee6e8bb
PB
7149 } else {
7150 /* Load/store exclusive byte/halfword/doubleword. */
8f8e3aa4
PB
7151 /* ??? These are not really atomic. However we know
7152 we never have multiple CPUs running in parallel,
7153 so it is good enough. */
9ee6e8bb 7154 op = (insn >> 4) & 0x3;
8f8e3aa4
PB
7155 /* Must use a global reg for the address because we have
7156 a conditional branch in the store instruction. */
9ee6e8bb 7157 gen_movl_T1_reg(s, rn);
8f8e3aa4 7158 addr = cpu_T[1];
9ee6e8bb 7159 if (insn & (1 << 20)) {
8f8e3aa4 7160 gen_helper_mark_exclusive(cpu_env, addr);
9ee6e8bb
PB
7161 switch (op) {
7162 case 0:
8f8e3aa4 7163 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7164 break;
2c0262af 7165 case 1:
8f8e3aa4 7166 tmp = gen_ld16u(addr, IS_USER(s));
2c0262af 7167 break;
9ee6e8bb 7168 case 3:
8f8e3aa4
PB
7169 tmp = gen_ld32(addr, IS_USER(s));
7170 tcg_gen_addi_i32(addr, addr, 4);
7171 tmp2 = gen_ld32(addr, IS_USER(s));
7172 store_reg(s, rd, tmp2);
2c0262af
FB
7173 break;
7174 default:
9ee6e8bb
PB
7175 goto illegal_op;
7176 }
8f8e3aa4 7177 store_reg(s, rs, tmp);
9ee6e8bb 7178 } else {
8f8e3aa4
PB
7179 int label = gen_new_label();
7180 /* Must use a global that is not killed by the branch. */
7181 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
cb63669a 7182 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0], 0, label);
8f8e3aa4 7183 tmp = load_reg(s, rs);
9ee6e8bb
PB
7184 switch (op) {
7185 case 0:
8f8e3aa4 7186 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7187 break;
7188 case 1:
8f8e3aa4 7189 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb 7190 break;
2c0262af 7191 case 3:
8f8e3aa4
PB
7192 gen_st32(tmp, addr, IS_USER(s));
7193 tcg_gen_addi_i32(addr, addr, 4);
7194 tmp = load_reg(s, rd);
7195 gen_st32(tmp, addr, IS_USER(s));
2c0262af 7196 break;
9ee6e8bb
PB
7197 default:
7198 goto illegal_op;
2c0262af 7199 }
8f8e3aa4 7200 gen_set_label(label);
9ee6e8bb
PB
7201 gen_movl_reg_T0(s, rm);
7202 }
7203 }
7204 } else {
7205 /* Load/store multiple, RFE, SRS. */
7206 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7207 /* Not available in user mode. */
b0109805 7208 if (IS_USER(s))
9ee6e8bb
PB
7209 goto illegal_op;
7210 if (insn & (1 << 20)) {
7211 /* rfe */
b0109805
PB
7212 addr = load_reg(s, rn);
7213 if ((insn & (1 << 24)) == 0)
7214 tcg_gen_addi_i32(addr, addr, -8);
7215 /* Load PC into tmp and CPSR into tmp2. */
7216 tmp = gen_ld32(addr, 0);
7217 tcg_gen_addi_i32(addr, addr, 4);
7218 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
7219 if (insn & (1 << 21)) {
7220 /* Base writeback. */
b0109805
PB
7221 if (insn & (1 << 24)) {
7222 tcg_gen_addi_i32(addr, addr, 4);
7223 } else {
7224 tcg_gen_addi_i32(addr, addr, -4);
7225 }
7226 store_reg(s, rn, addr);
7227 } else {
7228 dead_tmp(addr);
9ee6e8bb 7229 }
b0109805 7230 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
7231 } else {
7232 /* srs */
7233 op = (insn & 0x1f);
7234 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7235 addr = load_reg(s, 13);
9ee6e8bb 7236 } else {
b0109805
PB
7237 addr = new_tmp();
7238 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op));
9ee6e8bb
PB
7239 }
7240 if ((insn & (1 << 24)) == 0) {
b0109805 7241 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 7242 }
b0109805
PB
7243 tmp = load_reg(s, 14);
7244 gen_st32(tmp, addr, 0);
7245 tcg_gen_addi_i32(addr, addr, 4);
7246 tmp = new_tmp();
7247 gen_helper_cpsr_read(tmp);
7248 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
7249 if (insn & (1 << 21)) {
7250 if ((insn & (1 << 24)) == 0) {
b0109805 7251 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 7252 } else {
b0109805 7253 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7254 }
7255 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7256 store_reg(s, 13, addr);
9ee6e8bb 7257 } else {
b0109805
PB
7258 gen_helper_set_r13_banked(cpu_env,
7259 tcg_const_i32(op), addr);
9ee6e8bb 7260 }
b0109805
PB
7261 } else {
7262 dead_tmp(addr);
9ee6e8bb
PB
7263 }
7264 }
7265 } else {
7266 int i;
7267 /* Load/store multiple. */
b0109805 7268 addr = load_reg(s, rn);
9ee6e8bb
PB
7269 offset = 0;
7270 for (i = 0; i < 16; i++) {
7271 if (insn & (1 << i))
7272 offset += 4;
7273 }
7274 if (insn & (1 << 24)) {
b0109805 7275 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7276 }
7277
7278 for (i = 0; i < 16; i++) {
7279 if ((insn & (1 << i)) == 0)
7280 continue;
7281 if (insn & (1 << 20)) {
7282 /* Load. */
b0109805 7283 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 7284 if (i == 15) {
b0109805 7285 gen_bx(s, tmp);
9ee6e8bb 7286 } else {
b0109805 7287 store_reg(s, i, tmp);
9ee6e8bb
PB
7288 }
7289 } else {
7290 /* Store. */
b0109805
PB
7291 tmp = load_reg(s, i);
7292 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7293 }
b0109805 7294 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7295 }
7296 if (insn & (1 << 21)) {
7297 /* Base register writeback. */
7298 if (insn & (1 << 24)) {
b0109805 7299 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7300 }
7301 /* Fault if writeback register is in register list. */
7302 if (insn & (1 << rn))
7303 goto illegal_op;
b0109805
PB
7304 store_reg(s, rn, addr);
7305 } else {
7306 dead_tmp(addr);
9ee6e8bb
PB
7307 }
7308 }
7309 }
7310 break;
7311 case 5: /* Data processing register constant shift. */
7312 if (rn == 15)
7313 gen_op_movl_T0_im(0);
7314 else
7315 gen_movl_T0_reg(s, rn);
7316 gen_movl_T1_reg(s, rm);
7317 op = (insn >> 21) & 0xf;
7318 shiftop = (insn >> 4) & 3;
7319 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7320 conds = (insn & (1 << 20)) != 0;
7321 logic_cc = (conds && thumb2_logic_op(op));
9a119ff6 7322 gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
9ee6e8bb
PB
7323 if (gen_thumb2_data_op(s, op, conds, 0))
7324 goto illegal_op;
7325 if (rd != 15)
7326 gen_movl_reg_T0(s, rd);
7327 break;
7328 case 13: /* Misc data processing. */
7329 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7330 if (op < 4 && (insn & 0xf000) != 0xf000)
7331 goto illegal_op;
7332 switch (op) {
7333 case 0: /* Register controlled shift. */
8984bd2e
PB
7334 tmp = load_reg(s, rn);
7335 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7336 if ((insn & 0x70) != 0)
7337 goto illegal_op;
7338 op = (insn >> 21) & 3;
8984bd2e
PB
7339 logic_cc = (insn & (1 << 20)) != 0;
7340 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7341 if (logic_cc)
7342 gen_logic_CC(tmp);
21aeb343 7343 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7344 break;
7345 case 1: /* Sign/zero extend. */
5e3f878a 7346 tmp = load_reg(s, rm);
9ee6e8bb
PB
7347 shift = (insn >> 4) & 3;
7348 /* ??? In many cases it's not neccessary to do a
7349 rotate, a shift is sufficient. */
7350 if (shift != 0)
5e3f878a 7351 tcg_gen_rori_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7352 op = (insn >> 20) & 7;
7353 switch (op) {
5e3f878a
PB
7354 case 0: gen_sxth(tmp); break;
7355 case 1: gen_uxth(tmp); break;
7356 case 2: gen_sxtb16(tmp); break;
7357 case 3: gen_uxtb16(tmp); break;
7358 case 4: gen_sxtb(tmp); break;
7359 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
7360 default: goto illegal_op;
7361 }
7362 if (rn != 15) {
5e3f878a 7363 tmp2 = load_reg(s, rn);
9ee6e8bb 7364 if ((op >> 1) == 1) {
5e3f878a 7365 gen_add16(tmp, tmp2);
9ee6e8bb 7366 } else {
5e3f878a
PB
7367 tcg_gen_add_i32(tmp, tmp, tmp2);
7368 dead_tmp(tmp2);
9ee6e8bb
PB
7369 }
7370 }
5e3f878a 7371 store_reg(s, rd, tmp);
9ee6e8bb
PB
7372 break;
7373 case 2: /* SIMD add/subtract. */
7374 op = (insn >> 20) & 7;
7375 shift = (insn >> 4) & 7;
7376 if ((op & 3) == 3 || (shift & 3) == 3)
7377 goto illegal_op;
6ddbc6e4
PB
7378 tmp = load_reg(s, rn);
7379 tmp2 = load_reg(s, rm);
7380 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7381 dead_tmp(tmp2);
7382 store_reg(s, rd, tmp);
9ee6e8bb
PB
7383 break;
7384 case 3: /* Other data processing. */
7385 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7386 if (op < 4) {
7387 /* Saturating add/subtract. */
d9ba4830
PB
7388 tmp = load_reg(s, rn);
7389 tmp2 = load_reg(s, rm);
9ee6e8bb 7390 if (op & 2)
d9ba4830 7391 gen_helper_double_saturate(tmp, tmp);
9ee6e8bb 7392 if (op & 1)
d9ba4830 7393 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 7394 else
d9ba4830
PB
7395 gen_helper_add_saturate(tmp, tmp, tmp2);
7396 dead_tmp(tmp2);
9ee6e8bb 7397 } else {
d9ba4830 7398 tmp = load_reg(s, rn);
9ee6e8bb
PB
7399 switch (op) {
7400 case 0x0a: /* rbit */
d9ba4830 7401 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7402 break;
7403 case 0x08: /* rev */
66896cb8 7404 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
7405 break;
7406 case 0x09: /* rev16 */
d9ba4830 7407 gen_rev16(tmp);
9ee6e8bb
PB
7408 break;
7409 case 0x0b: /* revsh */
d9ba4830 7410 gen_revsh(tmp);
9ee6e8bb
PB
7411 break;
7412 case 0x10: /* sel */
d9ba4830 7413 tmp2 = load_reg(s, rm);
6ddbc6e4
PB
7414 tmp3 = new_tmp();
7415 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
d9ba4830 7416 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6ddbc6e4 7417 dead_tmp(tmp3);
d9ba4830 7418 dead_tmp(tmp2);
9ee6e8bb
PB
7419 break;
7420 case 0x18: /* clz */
d9ba4830 7421 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
7422 break;
7423 default:
7424 goto illegal_op;
7425 }
7426 }
d9ba4830 7427 store_reg(s, rd, tmp);
9ee6e8bb
PB
7428 break;
7429 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7430 op = (insn >> 4) & 0xf;
d9ba4830
PB
7431 tmp = load_reg(s, rn);
7432 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7433 switch ((insn >> 20) & 7) {
7434 case 0: /* 32 x 32 -> 32 */
d9ba4830
PB
7435 tcg_gen_mul_i32(tmp, tmp, tmp2);
7436 dead_tmp(tmp2);
9ee6e8bb 7437 if (rs != 15) {
d9ba4830 7438 tmp2 = load_reg(s, rs);
9ee6e8bb 7439 if (op)
d9ba4830 7440 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 7441 else
d9ba4830
PB
7442 tcg_gen_add_i32(tmp, tmp, tmp2);
7443 dead_tmp(tmp2);
9ee6e8bb 7444 }
9ee6e8bb
PB
7445 break;
7446 case 1: /* 16 x 16 -> 32 */
d9ba4830
PB
7447 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7448 dead_tmp(tmp2);
9ee6e8bb 7449 if (rs != 15) {
d9ba4830
PB
7450 tmp2 = load_reg(s, rs);
7451 gen_helper_add_setq(tmp, tmp, tmp2);
7452 dead_tmp(tmp2);
9ee6e8bb 7453 }
9ee6e8bb
PB
7454 break;
7455 case 2: /* Dual multiply add. */
7456 case 4: /* Dual multiply subtract. */
7457 if (op)
d9ba4830
PB
7458 gen_swap_half(tmp2);
7459 gen_smul_dual(tmp, tmp2);
9ee6e8bb
PB
7460 /* This addition cannot overflow. */
7461 if (insn & (1 << 22)) {
d9ba4830 7462 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 7463 } else {
d9ba4830 7464 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 7465 }
d9ba4830 7466 dead_tmp(tmp2);
9ee6e8bb
PB
7467 if (rs != 15)
7468 {
d9ba4830
PB
7469 tmp2 = load_reg(s, rs);
7470 gen_helper_add_setq(tmp, tmp, tmp2);
7471 dead_tmp(tmp2);
9ee6e8bb 7472 }
9ee6e8bb
PB
7473 break;
7474 case 3: /* 32 * 16 -> 32msb */
7475 if (op)
d9ba4830 7476 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7477 else
d9ba4830 7478 gen_sxth(tmp2);
a7812ae4
PB
7479 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7480 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 7481 tmp = new_tmp();
a7812ae4 7482 tcg_gen_trunc_i64_i32(tmp, tmp64);
9ee6e8bb
PB
7483 if (rs != 15)
7484 {
d9ba4830
PB
7485 tmp2 = load_reg(s, rs);
7486 gen_helper_add_setq(tmp, tmp, tmp2);
7487 dead_tmp(tmp2);
9ee6e8bb 7488 }
9ee6e8bb
PB
7489 break;
7490 case 5: case 6: /* 32 * 32 -> 32msb */
d9ba4830
PB
7491 gen_imull(tmp, tmp2);
7492 if (insn & (1 << 5)) {
7493 gen_roundqd(tmp, tmp2);
7494 dead_tmp(tmp2);
7495 } else {
7496 dead_tmp(tmp);
7497 tmp = tmp2;
7498 }
9ee6e8bb 7499 if (rs != 15) {
d9ba4830 7500 tmp2 = load_reg(s, rs);
9ee6e8bb 7501 if (insn & (1 << 21)) {
d9ba4830 7502 tcg_gen_add_i32(tmp, tmp, tmp2);
99c475ab 7503 } else {
d9ba4830 7504 tcg_gen_sub_i32(tmp, tmp2, tmp);
99c475ab 7505 }
d9ba4830 7506 dead_tmp(tmp2);
2c0262af 7507 }
9ee6e8bb
PB
7508 break;
7509 case 7: /* Unsigned sum of absolute differences. */
d9ba4830
PB
7510 gen_helper_usad8(tmp, tmp, tmp2);
7511 dead_tmp(tmp2);
9ee6e8bb 7512 if (rs != 15) {
d9ba4830
PB
7513 tmp2 = load_reg(s, rs);
7514 tcg_gen_add_i32(tmp, tmp, tmp2);
7515 dead_tmp(tmp2);
5fd46862 7516 }
9ee6e8bb 7517 break;
2c0262af 7518 }
d9ba4830 7519 store_reg(s, rd, tmp);
2c0262af 7520 break;
9ee6e8bb
PB
7521 case 6: case 7: /* 64-bit multiply, Divide. */
7522 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
7523 tmp = load_reg(s, rn);
7524 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7525 if ((op & 0x50) == 0x10) {
7526 /* sdiv, udiv */
7527 if (!arm_feature(env, ARM_FEATURE_DIV))
7528 goto illegal_op;
7529 if (op & 0x20)
5e3f878a 7530 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 7531 else
5e3f878a
PB
7532 gen_helper_sdiv(tmp, tmp, tmp2);
7533 dead_tmp(tmp2);
7534 store_reg(s, rd, tmp);
9ee6e8bb
PB
7535 } else if ((op & 0xe) == 0xc) {
7536 /* Dual multiply accumulate long. */
7537 if (op & 1)
5e3f878a
PB
7538 gen_swap_half(tmp2);
7539 gen_smul_dual(tmp, tmp2);
9ee6e8bb 7540 if (op & 0x10) {
5e3f878a 7541 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 7542 } else {
5e3f878a 7543 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 7544 }
5e3f878a 7545 dead_tmp(tmp2);
a7812ae4
PB
7546 /* BUGFIX */
7547 tmp64 = tcg_temp_new_i64();
7548 tcg_gen_ext_i32_i64(tmp64, tmp);
7549 dead_tmp(tmp);
7550 gen_addq(s, tmp64, rs, rd);
7551 gen_storeq_reg(s, rs, rd, tmp64);
2c0262af 7552 } else {
9ee6e8bb
PB
7553 if (op & 0x20) {
7554 /* Unsigned 64-bit multiply */
a7812ae4 7555 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 7556 } else {
9ee6e8bb
PB
7557 if (op & 8) {
7558 /* smlalxy */
5e3f878a
PB
7559 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7560 dead_tmp(tmp2);
a7812ae4
PB
7561 tmp64 = tcg_temp_new_i64();
7562 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 7563 dead_tmp(tmp);
9ee6e8bb
PB
7564 } else {
7565 /* Signed 64-bit multiply */
a7812ae4 7566 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 7567 }
b5ff1b31 7568 }
9ee6e8bb
PB
7569 if (op & 4) {
7570 /* umaal */
a7812ae4
PB
7571 gen_addq_lo(s, tmp64, rs);
7572 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
7573 } else if (op & 0x40) {
7574 /* 64-bit accumulate. */
a7812ae4 7575 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 7576 }
a7812ae4 7577 gen_storeq_reg(s, rs, rd, tmp64);
5fd46862 7578 }
2c0262af 7579 break;
9ee6e8bb
PB
7580 }
7581 break;
7582 case 6: case 7: case 14: case 15:
7583 /* Coprocessor. */
7584 if (((insn >> 24) & 3) == 3) {
7585 /* Translate into the equivalent ARM encoding. */
7586 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
7587 if (disas_neon_data_insn(env, s, insn))
7588 goto illegal_op;
7589 } else {
7590 if (insn & (1 << 28))
7591 goto illegal_op;
7592 if (disas_coproc_insn (env, s, insn))
7593 goto illegal_op;
7594 }
7595 break;
7596 case 8: case 9: case 10: case 11:
7597 if (insn & (1 << 15)) {
7598 /* Branches, misc control. */
7599 if (insn & 0x5000) {
7600 /* Unconditional branch. */
7601 /* signextend(hw1[10:0]) -> offset[:12]. */
7602 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7603 /* hw1[10:0] -> offset[11:1]. */
7604 offset |= (insn & 0x7ff) << 1;
7605 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7606 offset[24:22] already have the same value because of the
7607 sign extension above. */
7608 offset ^= ((~insn) & (1 << 13)) << 10;
7609 offset ^= ((~insn) & (1 << 11)) << 11;
7610
9ee6e8bb
PB
7611 if (insn & (1 << 14)) {
7612 /* Branch and link. */
b0109805 7613 gen_op_movl_T1_im(s->pc | 1);
9ee6e8bb 7614 gen_movl_reg_T1(s, 14);
b5ff1b31 7615 }
3b46e624 7616
b0109805 7617 offset += s->pc;
9ee6e8bb
PB
7618 if (insn & (1 << 12)) {
7619 /* b/bl */
b0109805 7620 gen_jmp(s, offset);
9ee6e8bb
PB
7621 } else {
7622 /* blx */
b0109805
PB
7623 offset &= ~(uint32_t)2;
7624 gen_bx_im(s, offset);
2c0262af 7625 }
9ee6e8bb
PB
7626 } else if (((insn >> 23) & 7) == 7) {
7627 /* Misc control */
7628 if (insn & (1 << 13))
7629 goto illegal_op;
7630
7631 if (insn & (1 << 26)) {
7632 /* Secure monitor call (v6Z) */
7633 goto illegal_op; /* not implemented. */
2c0262af 7634 } else {
9ee6e8bb
PB
7635 op = (insn >> 20) & 7;
7636 switch (op) {
7637 case 0: /* msr cpsr. */
7638 if (IS_M(env)) {
8984bd2e
PB
7639 tmp = load_reg(s, rn);
7640 addr = tcg_const_i32(insn & 0xff);
7641 gen_helper_v7m_msr(cpu_env, addr, tmp);
9ee6e8bb
PB
7642 gen_lookup_tb(s);
7643 break;
7644 }
7645 /* fall through */
7646 case 1: /* msr spsr. */
7647 if (IS_M(env))
7648 goto illegal_op;
7649 gen_movl_T0_reg(s, rn);
7650 if (gen_set_psr_T0(s,
7651 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
7652 op == 1))
7653 goto illegal_op;
7654 break;
7655 case 2: /* cps, nop-hint. */
7656 if (((insn >> 8) & 7) == 0) {
7657 gen_nop_hint(s, insn & 0xff);
7658 }
7659 /* Implemented as NOP in user mode. */
7660 if (IS_USER(s))
7661 break;
7662 offset = 0;
7663 imm = 0;
7664 if (insn & (1 << 10)) {
7665 if (insn & (1 << 7))
7666 offset |= CPSR_A;
7667 if (insn & (1 << 6))
7668 offset |= CPSR_I;
7669 if (insn & (1 << 5))
7670 offset |= CPSR_F;
7671 if (insn & (1 << 9))
7672 imm = CPSR_A | CPSR_I | CPSR_F;
7673 }
7674 if (insn & (1 << 8)) {
7675 offset |= 0x1f;
7676 imm |= (insn & 0x1f);
7677 }
7678 if (offset) {
7679 gen_op_movl_T0_im(imm);
7680 gen_set_psr_T0(s, offset, 0);
7681 }
7682 break;
7683 case 3: /* Special control operations. */
7684 op = (insn >> 4) & 0xf;
7685 switch (op) {
7686 case 2: /* clrex */
8f8e3aa4 7687 gen_helper_clrex(cpu_env);
9ee6e8bb
PB
7688 break;
7689 case 4: /* dsb */
7690 case 5: /* dmb */
7691 case 6: /* isb */
7692 /* These execute as NOPs. */
7693 ARCH(7);
7694 break;
7695 default:
7696 goto illegal_op;
7697 }
7698 break;
7699 case 4: /* bxj */
7700 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
7701 tmp = load_reg(s, rn);
7702 gen_bx(s, tmp);
9ee6e8bb
PB
7703 break;
7704 case 5: /* Exception return. */
7705 /* Unpredictable in user mode. */
7706 goto illegal_op;
7707 case 6: /* mrs cpsr. */
8984bd2e 7708 tmp = new_tmp();
9ee6e8bb 7709 if (IS_M(env)) {
8984bd2e
PB
7710 addr = tcg_const_i32(insn & 0xff);
7711 gen_helper_v7m_mrs(tmp, cpu_env, addr);
9ee6e8bb 7712 } else {
8984bd2e 7713 gen_helper_cpsr_read(tmp);
9ee6e8bb 7714 }
8984bd2e 7715 store_reg(s, rd, tmp);
9ee6e8bb
PB
7716 break;
7717 case 7: /* mrs spsr. */
7718 /* Not accessible in user mode. */
7719 if (IS_USER(s) || IS_M(env))
7720 goto illegal_op;
d9ba4830
PB
7721 tmp = load_cpu_field(spsr);
7722 store_reg(s, rd, tmp);
9ee6e8bb 7723 break;
2c0262af
FB
7724 }
7725 }
9ee6e8bb
PB
7726 } else {
7727 /* Conditional branch. */
7728 op = (insn >> 22) & 0xf;
7729 /* Generate a conditional jump to next instruction. */
7730 s->condlabel = gen_new_label();
d9ba4830 7731 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
7732 s->condjmp = 1;
7733
7734 /* offset[11:1] = insn[10:0] */
7735 offset = (insn & 0x7ff) << 1;
7736 /* offset[17:12] = insn[21:16]. */
7737 offset |= (insn & 0x003f0000) >> 4;
7738 /* offset[31:20] = insn[26]. */
7739 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
7740 /* offset[18] = insn[13]. */
7741 offset |= (insn & (1 << 13)) << 5;
7742 /* offset[19] = insn[11]. */
7743 offset |= (insn & (1 << 11)) << 8;
7744
7745 /* jump to the offset */
b0109805 7746 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
7747 }
7748 } else {
7749 /* Data processing immediate. */
7750 if (insn & (1 << 25)) {
7751 if (insn & (1 << 24)) {
7752 if (insn & (1 << 20))
7753 goto illegal_op;
7754 /* Bitfield/Saturate. */
7755 op = (insn >> 21) & 7;
7756 imm = insn & 0x1f;
7757 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4
PB
7758 if (rn == 15) {
7759 tmp = new_tmp();
7760 tcg_gen_movi_i32(tmp, 0);
7761 } else {
7762 tmp = load_reg(s, rn);
7763 }
9ee6e8bb
PB
7764 switch (op) {
7765 case 2: /* Signed bitfield extract. */
7766 imm++;
7767 if (shift + imm > 32)
7768 goto illegal_op;
7769 if (imm < 32)
6ddbc6e4 7770 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
7771 break;
7772 case 6: /* Unsigned bitfield extract. */
7773 imm++;
7774 if (shift + imm > 32)
7775 goto illegal_op;
7776 if (imm < 32)
6ddbc6e4 7777 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
7778 break;
7779 case 3: /* Bitfield insert/clear. */
7780 if (imm < shift)
7781 goto illegal_op;
7782 imm = imm + 1 - shift;
7783 if (imm != 32) {
6ddbc6e4 7784 tmp2 = load_reg(s, rd);
8f8e3aa4 7785 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
6ddbc6e4 7786 dead_tmp(tmp2);
9ee6e8bb
PB
7787 }
7788 break;
7789 case 7:
7790 goto illegal_op;
7791 default: /* Saturate. */
9ee6e8bb
PB
7792 if (shift) {
7793 if (op & 1)
6ddbc6e4 7794 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7795 else
6ddbc6e4 7796 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 7797 }
6ddbc6e4 7798 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
7799 if (op & 4) {
7800 /* Unsigned. */
9ee6e8bb 7801 if ((op & 1) && shift == 0)
6ddbc6e4 7802 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 7803 else
6ddbc6e4 7804 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 7805 } else {
9ee6e8bb 7806 /* Signed. */
9ee6e8bb 7807 if ((op & 1) && shift == 0)
6ddbc6e4 7808 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 7809 else
6ddbc6e4 7810 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 7811 }
9ee6e8bb 7812 break;
2c0262af 7813 }
6ddbc6e4 7814 store_reg(s, rd, tmp);
9ee6e8bb
PB
7815 } else {
7816 imm = ((insn & 0x04000000) >> 15)
7817 | ((insn & 0x7000) >> 4) | (insn & 0xff);
7818 if (insn & (1 << 22)) {
7819 /* 16-bit immediate. */
7820 imm |= (insn >> 4) & 0xf000;
7821 if (insn & (1 << 23)) {
7822 /* movt */
5e3f878a 7823 tmp = load_reg(s, rd);
86831435 7824 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 7825 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 7826 } else {
9ee6e8bb 7827 /* movw */
5e3f878a
PB
7828 tmp = new_tmp();
7829 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
7830 }
7831 } else {
9ee6e8bb
PB
7832 /* Add/sub 12-bit immediate. */
7833 if (rn == 15) {
b0109805 7834 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 7835 if (insn & (1 << 23))
b0109805 7836 offset -= imm;
9ee6e8bb 7837 else
b0109805 7838 offset += imm;
5e3f878a
PB
7839 tmp = new_tmp();
7840 tcg_gen_movi_i32(tmp, offset);
2c0262af 7841 } else {
5e3f878a 7842 tmp = load_reg(s, rn);
9ee6e8bb 7843 if (insn & (1 << 23))
5e3f878a 7844 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 7845 else
5e3f878a 7846 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 7847 }
9ee6e8bb 7848 }
5e3f878a 7849 store_reg(s, rd, tmp);
191abaa2 7850 }
9ee6e8bb
PB
7851 } else {
7852 int shifter_out = 0;
7853 /* modified 12-bit immediate. */
7854 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
7855 imm = (insn & 0xff);
7856 switch (shift) {
7857 case 0: /* XY */
7858 /* Nothing to do. */
7859 break;
7860 case 1: /* 00XY00XY */
7861 imm |= imm << 16;
7862 break;
7863 case 2: /* XY00XY00 */
7864 imm |= imm << 16;
7865 imm <<= 8;
7866 break;
7867 case 3: /* XYXYXYXY */
7868 imm |= imm << 16;
7869 imm |= imm << 8;
7870 break;
7871 default: /* Rotated constant. */
7872 shift = (shift << 1) | (imm >> 7);
7873 imm |= 0x80;
7874 imm = imm << (32 - shift);
7875 shifter_out = 1;
7876 break;
b5ff1b31 7877 }
9ee6e8bb
PB
7878 gen_op_movl_T1_im(imm);
7879 rn = (insn >> 16) & 0xf;
7880 if (rn == 15)
7881 gen_op_movl_T0_im(0);
7882 else
7883 gen_movl_T0_reg(s, rn);
7884 op = (insn >> 21) & 0xf;
7885 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
7886 shifter_out))
7887 goto illegal_op;
7888 rd = (insn >> 8) & 0xf;
7889 if (rd != 15) {
7890 gen_movl_reg_T0(s, rd);
2c0262af 7891 }
2c0262af 7892 }
9ee6e8bb
PB
7893 }
7894 break;
7895 case 12: /* Load/store single data item. */
7896 {
7897 int postinc = 0;
7898 int writeback = 0;
b0109805 7899 int user;
9ee6e8bb
PB
7900 if ((insn & 0x01100000) == 0x01000000) {
7901 if (disas_neon_ls_insn(env, s, insn))
c1713132 7902 goto illegal_op;
9ee6e8bb
PB
7903 break;
7904 }
b0109805 7905 user = IS_USER(s);
9ee6e8bb 7906 if (rn == 15) {
b0109805 7907 addr = new_tmp();
9ee6e8bb
PB
7908 /* PC relative. */
7909 /* s->pc has already been incremented by 4. */
7910 imm = s->pc & 0xfffffffc;
7911 if (insn & (1 << 23))
7912 imm += insn & 0xfff;
7913 else
7914 imm -= insn & 0xfff;
b0109805 7915 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 7916 } else {
b0109805 7917 addr = load_reg(s, rn);
9ee6e8bb
PB
7918 if (insn & (1 << 23)) {
7919 /* Positive offset. */
7920 imm = insn & 0xfff;
b0109805 7921 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
7922 } else {
7923 op = (insn >> 8) & 7;
7924 imm = insn & 0xff;
7925 switch (op) {
7926 case 0: case 8: /* Shifted Register. */
7927 shift = (insn >> 4) & 0xf;
7928 if (shift > 3)
18c9b560 7929 goto illegal_op;
b26eefb6 7930 tmp = load_reg(s, rm);
9ee6e8bb 7931 if (shift)
b26eefb6 7932 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 7933 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 7934 dead_tmp(tmp);
9ee6e8bb
PB
7935 break;
7936 case 4: /* Negative offset. */
b0109805 7937 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb
PB
7938 break;
7939 case 6: /* User privilege. */
b0109805
PB
7940 tcg_gen_addi_i32(addr, addr, imm);
7941 user = 1;
9ee6e8bb
PB
7942 break;
7943 case 1: /* Post-decrement. */
7944 imm = -imm;
7945 /* Fall through. */
7946 case 3: /* Post-increment. */
9ee6e8bb
PB
7947 postinc = 1;
7948 writeback = 1;
7949 break;
7950 case 5: /* Pre-decrement. */
7951 imm = -imm;
7952 /* Fall through. */
7953 case 7: /* Pre-increment. */
b0109805 7954 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
7955 writeback = 1;
7956 break;
7957 default:
b7bcbe95 7958 goto illegal_op;
9ee6e8bb
PB
7959 }
7960 }
7961 }
7962 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
7963 if (insn & (1 << 20)) {
7964 /* Load. */
7965 if (rs == 15 && op != 2) {
7966 if (op & 2)
b5ff1b31 7967 goto illegal_op;
9ee6e8bb
PB
7968 /* Memory hint. Implemented as NOP. */
7969 } else {
7970 switch (op) {
b0109805
PB
7971 case 0: tmp = gen_ld8u(addr, user); break;
7972 case 4: tmp = gen_ld8s(addr, user); break;
7973 case 1: tmp = gen_ld16u(addr, user); break;
7974 case 5: tmp = gen_ld16s(addr, user); break;
7975 case 2: tmp = gen_ld32(addr, user); break;
9ee6e8bb
PB
7976 default: goto illegal_op;
7977 }
7978 if (rs == 15) {
b0109805 7979 gen_bx(s, tmp);
9ee6e8bb 7980 } else {
b0109805 7981 store_reg(s, rs, tmp);
9ee6e8bb
PB
7982 }
7983 }
7984 } else {
7985 /* Store. */
7986 if (rs == 15)
b7bcbe95 7987 goto illegal_op;
b0109805 7988 tmp = load_reg(s, rs);
9ee6e8bb 7989 switch (op) {
b0109805
PB
7990 case 0: gen_st8(tmp, addr, user); break;
7991 case 1: gen_st16(tmp, addr, user); break;
7992 case 2: gen_st32(tmp, addr, user); break;
9ee6e8bb 7993 default: goto illegal_op;
b7bcbe95 7994 }
2c0262af 7995 }
9ee6e8bb 7996 if (postinc)
b0109805
PB
7997 tcg_gen_addi_i32(addr, addr, imm);
7998 if (writeback) {
7999 store_reg(s, rn, addr);
8000 } else {
8001 dead_tmp(addr);
8002 }
9ee6e8bb
PB
8003 }
8004 break;
8005 default:
8006 goto illegal_op;
2c0262af 8007 }
9ee6e8bb
PB
8008 return 0;
8009illegal_op:
8010 return 1;
2c0262af
FB
8011}
8012
9ee6e8bb 8013static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
8014{
8015 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8016 int32_t offset;
8017 int i;
b26eefb6 8018 TCGv tmp;
d9ba4830 8019 TCGv tmp2;
b0109805 8020 TCGv addr;
99c475ab 8021
9ee6e8bb
PB
8022 if (s->condexec_mask) {
8023 cond = s->condexec_cond;
8024 s->condlabel = gen_new_label();
d9ba4830 8025 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
8026 s->condjmp = 1;
8027 }
8028
b5ff1b31 8029 insn = lduw_code(s->pc);
99c475ab 8030 s->pc += 2;
b5ff1b31 8031
99c475ab
FB
8032 switch (insn >> 12) {
8033 case 0: case 1:
8034 rd = insn & 7;
8035 op = (insn >> 11) & 3;
8036 if (op == 3) {
8037 /* add/subtract */
8038 rn = (insn >> 3) & 7;
8039 gen_movl_T0_reg(s, rn);
8040 if (insn & (1 << 10)) {
8041 /* immediate */
8042 gen_op_movl_T1_im((insn >> 6) & 7);
8043 } else {
8044 /* reg */
8045 rm = (insn >> 6) & 7;
8046 gen_movl_T1_reg(s, rm);
8047 }
9ee6e8bb
PB
8048 if (insn & (1 << 9)) {
8049 if (s->condexec_mask)
8050 gen_op_subl_T0_T1();
8051 else
8052 gen_op_subl_T0_T1_cc();
8053 } else {
8054 if (s->condexec_mask)
8055 gen_op_addl_T0_T1();
8056 else
8057 gen_op_addl_T0_T1_cc();
8058 }
99c475ab
FB
8059 gen_movl_reg_T0(s, rd);
8060 } else {
8061 /* shift immediate */
8062 rm = (insn >> 3) & 7;
8063 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
8064 tmp = load_reg(s, rm);
8065 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8066 if (!s->condexec_mask)
8067 gen_logic_CC(tmp);
8068 store_reg(s, rd, tmp);
99c475ab
FB
8069 }
8070 break;
8071 case 2: case 3:
8072 /* arithmetic large immediate */
8073 op = (insn >> 11) & 3;
8074 rd = (insn >> 8) & 0x7;
8075 if (op == 0) {
8076 gen_op_movl_T0_im(insn & 0xff);
8077 } else {
8078 gen_movl_T0_reg(s, rd);
8079 gen_op_movl_T1_im(insn & 0xff);
8080 }
8081 switch (op) {
8082 case 0: /* mov */
9ee6e8bb
PB
8083 if (!s->condexec_mask)
8084 gen_op_logic_T0_cc();
99c475ab
FB
8085 break;
8086 case 1: /* cmp */
8087 gen_op_subl_T0_T1_cc();
8088 break;
8089 case 2: /* add */
9ee6e8bb
PB
8090 if (s->condexec_mask)
8091 gen_op_addl_T0_T1();
8092 else
8093 gen_op_addl_T0_T1_cc();
99c475ab
FB
8094 break;
8095 case 3: /* sub */
9ee6e8bb
PB
8096 if (s->condexec_mask)
8097 gen_op_subl_T0_T1();
8098 else
8099 gen_op_subl_T0_T1_cc();
99c475ab
FB
8100 break;
8101 }
8102 if (op != 1)
8103 gen_movl_reg_T0(s, rd);
8104 break;
8105 case 4:
8106 if (insn & (1 << 11)) {
8107 rd = (insn >> 8) & 7;
5899f386
FB
8108 /* load pc-relative. Bit 1 of PC is ignored. */
8109 val = s->pc + 2 + ((insn & 0xff) * 4);
8110 val &= ~(uint32_t)2;
b0109805
PB
8111 addr = new_tmp();
8112 tcg_gen_movi_i32(addr, val);
8113 tmp = gen_ld32(addr, IS_USER(s));
8114 dead_tmp(addr);
8115 store_reg(s, rd, tmp);
99c475ab
FB
8116 break;
8117 }
8118 if (insn & (1 << 10)) {
8119 /* data processing extended or blx */
8120 rd = (insn & 7) | ((insn >> 4) & 8);
8121 rm = (insn >> 3) & 0xf;
8122 op = (insn >> 8) & 3;
8123 switch (op) {
8124 case 0: /* add */
8125 gen_movl_T0_reg(s, rd);
8126 gen_movl_T1_reg(s, rm);
8127 gen_op_addl_T0_T1();
8128 gen_movl_reg_T0(s, rd);
8129 break;
8130 case 1: /* cmp */
8131 gen_movl_T0_reg(s, rd);
8132 gen_movl_T1_reg(s, rm);
8133 gen_op_subl_T0_T1_cc();
8134 break;
8135 case 2: /* mov/cpy */
8136 gen_movl_T0_reg(s, rm);
8137 gen_movl_reg_T0(s, rd);
8138 break;
8139 case 3:/* branch [and link] exchange thumb register */
b0109805 8140 tmp = load_reg(s, rm);
99c475ab
FB
8141 if (insn & (1 << 7)) {
8142 val = (uint32_t)s->pc | 1;
b0109805
PB
8143 tmp2 = new_tmp();
8144 tcg_gen_movi_i32(tmp2, val);
8145 store_reg(s, 14, tmp2);
99c475ab 8146 }
d9ba4830 8147 gen_bx(s, tmp);
99c475ab
FB
8148 break;
8149 }
8150 break;
8151 }
8152
8153 /* data processing register */
8154 rd = insn & 7;
8155 rm = (insn >> 3) & 7;
8156 op = (insn >> 6) & 0xf;
8157 if (op == 2 || op == 3 || op == 4 || op == 7) {
8158 /* the shift/rotate ops want the operands backwards */
8159 val = rm;
8160 rm = rd;
8161 rd = val;
8162 val = 1;
8163 } else {
8164 val = 0;
8165 }
8166
8167 if (op == 9) /* neg */
8168 gen_op_movl_T0_im(0);
8169 else if (op != 0xf) /* mvn doesn't read its first operand */
8170 gen_movl_T0_reg(s, rd);
8171
8172 gen_movl_T1_reg(s, rm);
5899f386 8173 switch (op) {
99c475ab
FB
8174 case 0x0: /* and */
8175 gen_op_andl_T0_T1();
9ee6e8bb
PB
8176 if (!s->condexec_mask)
8177 gen_op_logic_T0_cc();
99c475ab
FB
8178 break;
8179 case 0x1: /* eor */
8180 gen_op_xorl_T0_T1();
9ee6e8bb
PB
8181 if (!s->condexec_mask)
8182 gen_op_logic_T0_cc();
99c475ab
FB
8183 break;
8184 case 0x2: /* lsl */
9ee6e8bb 8185 if (s->condexec_mask) {
8984bd2e 8186 gen_helper_shl(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb 8187 } else {
8984bd2e 8188 gen_helper_shl_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
8189 gen_op_logic_T1_cc();
8190 }
99c475ab
FB
8191 break;
8192 case 0x3: /* lsr */
9ee6e8bb 8193 if (s->condexec_mask) {
8984bd2e 8194 gen_helper_shr(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb 8195 } else {
8984bd2e 8196 gen_helper_shr_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
8197 gen_op_logic_T1_cc();
8198 }
99c475ab
FB
8199 break;
8200 case 0x4: /* asr */
9ee6e8bb 8201 if (s->condexec_mask) {
8984bd2e 8202 gen_helper_sar(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb 8203 } else {
8984bd2e 8204 gen_helper_sar_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
8205 gen_op_logic_T1_cc();
8206 }
99c475ab
FB
8207 break;
8208 case 0x5: /* adc */
9ee6e8bb 8209 if (s->condexec_mask)
b26eefb6 8210 gen_adc_T0_T1();
9ee6e8bb
PB
8211 else
8212 gen_op_adcl_T0_T1_cc();
99c475ab
FB
8213 break;
8214 case 0x6: /* sbc */
9ee6e8bb 8215 if (s->condexec_mask)
3670669c 8216 gen_sbc_T0_T1();
9ee6e8bb
PB
8217 else
8218 gen_op_sbcl_T0_T1_cc();
99c475ab
FB
8219 break;
8220 case 0x7: /* ror */
9ee6e8bb 8221 if (s->condexec_mask) {
8984bd2e 8222 gen_helper_ror(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb 8223 } else {
8984bd2e 8224 gen_helper_ror_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
8225 gen_op_logic_T1_cc();
8226 }
99c475ab
FB
8227 break;
8228 case 0x8: /* tst */
8229 gen_op_andl_T0_T1();
8230 gen_op_logic_T0_cc();
8231 rd = 16;
5899f386 8232 break;
99c475ab 8233 case 0x9: /* neg */
9ee6e8bb 8234 if (s->condexec_mask)
390efc54 8235 tcg_gen_neg_i32(cpu_T[0], cpu_T[1]);
9ee6e8bb
PB
8236 else
8237 gen_op_subl_T0_T1_cc();
99c475ab
FB
8238 break;
8239 case 0xa: /* cmp */
8240 gen_op_subl_T0_T1_cc();
8241 rd = 16;
8242 break;
8243 case 0xb: /* cmn */
8244 gen_op_addl_T0_T1_cc();
8245 rd = 16;
8246 break;
8247 case 0xc: /* orr */
8248 gen_op_orl_T0_T1();
9ee6e8bb
PB
8249 if (!s->condexec_mask)
8250 gen_op_logic_T0_cc();
99c475ab
FB
8251 break;
8252 case 0xd: /* mul */
8253 gen_op_mull_T0_T1();
9ee6e8bb
PB
8254 if (!s->condexec_mask)
8255 gen_op_logic_T0_cc();
99c475ab
FB
8256 break;
8257 case 0xe: /* bic */
8258 gen_op_bicl_T0_T1();
9ee6e8bb
PB
8259 if (!s->condexec_mask)
8260 gen_op_logic_T0_cc();
99c475ab
FB
8261 break;
8262 case 0xf: /* mvn */
8263 gen_op_notl_T1();
9ee6e8bb
PB
8264 if (!s->condexec_mask)
8265 gen_op_logic_T1_cc();
99c475ab 8266 val = 1;
5899f386 8267 rm = rd;
99c475ab
FB
8268 break;
8269 }
8270 if (rd != 16) {
8271 if (val)
5899f386 8272 gen_movl_reg_T1(s, rm);
99c475ab
FB
8273 else
8274 gen_movl_reg_T0(s, rd);
8275 }
8276 break;
8277
8278 case 5:
8279 /* load/store register offset. */
8280 rd = insn & 7;
8281 rn = (insn >> 3) & 7;
8282 rm = (insn >> 6) & 7;
8283 op = (insn >> 9) & 7;
b0109805 8284 addr = load_reg(s, rn);
b26eefb6 8285 tmp = load_reg(s, rm);
b0109805 8286 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 8287 dead_tmp(tmp);
99c475ab
FB
8288
8289 if (op < 3) /* store */
b0109805 8290 tmp = load_reg(s, rd);
99c475ab
FB
8291
8292 switch (op) {
8293 case 0: /* str */
b0109805 8294 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
8295 break;
8296 case 1: /* strh */
b0109805 8297 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
8298 break;
8299 case 2: /* strb */
b0109805 8300 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
8301 break;
8302 case 3: /* ldrsb */
b0109805 8303 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
8304 break;
8305 case 4: /* ldr */
b0109805 8306 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8307 break;
8308 case 5: /* ldrh */
b0109805 8309 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
8310 break;
8311 case 6: /* ldrb */
b0109805 8312 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
8313 break;
8314 case 7: /* ldrsh */
b0109805 8315 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
8316 break;
8317 }
8318 if (op >= 3) /* load */
b0109805
PB
8319 store_reg(s, rd, tmp);
8320 dead_tmp(addr);
99c475ab
FB
8321 break;
8322
8323 case 6:
8324 /* load/store word immediate offset */
8325 rd = insn & 7;
8326 rn = (insn >> 3) & 7;
b0109805 8327 addr = load_reg(s, rn);
99c475ab 8328 val = (insn >> 4) & 0x7c;
b0109805 8329 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8330
8331 if (insn & (1 << 11)) {
8332 /* load */
b0109805
PB
8333 tmp = gen_ld32(addr, IS_USER(s));
8334 store_reg(s, rd, tmp);
99c475ab
FB
8335 } else {
8336 /* store */
b0109805
PB
8337 tmp = load_reg(s, rd);
8338 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8339 }
b0109805 8340 dead_tmp(addr);
99c475ab
FB
8341 break;
8342
8343 case 7:
8344 /* load/store byte immediate offset */
8345 rd = insn & 7;
8346 rn = (insn >> 3) & 7;
b0109805 8347 addr = load_reg(s, rn);
99c475ab 8348 val = (insn >> 6) & 0x1f;
b0109805 8349 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8350
8351 if (insn & (1 << 11)) {
8352 /* load */
b0109805
PB
8353 tmp = gen_ld8u(addr, IS_USER(s));
8354 store_reg(s, rd, tmp);
99c475ab
FB
8355 } else {
8356 /* store */
b0109805
PB
8357 tmp = load_reg(s, rd);
8358 gen_st8(tmp, addr, IS_USER(s));
99c475ab 8359 }
b0109805 8360 dead_tmp(addr);
99c475ab
FB
8361 break;
8362
8363 case 8:
8364 /* load/store halfword immediate offset */
8365 rd = insn & 7;
8366 rn = (insn >> 3) & 7;
b0109805 8367 addr = load_reg(s, rn);
99c475ab 8368 val = (insn >> 5) & 0x3e;
b0109805 8369 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8370
8371 if (insn & (1 << 11)) {
8372 /* load */
b0109805
PB
8373 tmp = gen_ld16u(addr, IS_USER(s));
8374 store_reg(s, rd, tmp);
99c475ab
FB
8375 } else {
8376 /* store */
b0109805
PB
8377 tmp = load_reg(s, rd);
8378 gen_st16(tmp, addr, IS_USER(s));
99c475ab 8379 }
b0109805 8380 dead_tmp(addr);
99c475ab
FB
8381 break;
8382
8383 case 9:
8384 /* load/store from stack */
8385 rd = (insn >> 8) & 7;
b0109805 8386 addr = load_reg(s, 13);
99c475ab 8387 val = (insn & 0xff) * 4;
b0109805 8388 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8389
8390 if (insn & (1 << 11)) {
8391 /* load */
b0109805
PB
8392 tmp = gen_ld32(addr, IS_USER(s));
8393 store_reg(s, rd, tmp);
99c475ab
FB
8394 } else {
8395 /* store */
b0109805
PB
8396 tmp = load_reg(s, rd);
8397 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8398 }
b0109805 8399 dead_tmp(addr);
99c475ab
FB
8400 break;
8401
8402 case 10:
8403 /* add to high reg */
8404 rd = (insn >> 8) & 7;
5899f386
FB
8405 if (insn & (1 << 11)) {
8406 /* SP */
5e3f878a 8407 tmp = load_reg(s, 13);
5899f386
FB
8408 } else {
8409 /* PC. bit 1 is ignored. */
5e3f878a
PB
8410 tmp = new_tmp();
8411 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 8412 }
99c475ab 8413 val = (insn & 0xff) * 4;
5e3f878a
PB
8414 tcg_gen_addi_i32(tmp, tmp, val);
8415 store_reg(s, rd, tmp);
99c475ab
FB
8416 break;
8417
8418 case 11:
8419 /* misc */
8420 op = (insn >> 8) & 0xf;
8421 switch (op) {
8422 case 0:
8423 /* adjust stack pointer */
b26eefb6 8424 tmp = load_reg(s, 13);
99c475ab
FB
8425 val = (insn & 0x7f) * 4;
8426 if (insn & (1 << 7))
6a0d8a1d 8427 val = -(int32_t)val;
b26eefb6
PB
8428 tcg_gen_addi_i32(tmp, tmp, val);
8429 store_reg(s, 13, tmp);
99c475ab
FB
8430 break;
8431
9ee6e8bb
PB
8432 case 2: /* sign/zero extend. */
8433 ARCH(6);
8434 rd = insn & 7;
8435 rm = (insn >> 3) & 7;
b0109805 8436 tmp = load_reg(s, rm);
9ee6e8bb 8437 switch ((insn >> 6) & 3) {
b0109805
PB
8438 case 0: gen_sxth(tmp); break;
8439 case 1: gen_sxtb(tmp); break;
8440 case 2: gen_uxth(tmp); break;
8441 case 3: gen_uxtb(tmp); break;
9ee6e8bb 8442 }
b0109805 8443 store_reg(s, rd, tmp);
9ee6e8bb 8444 break;
99c475ab
FB
8445 case 4: case 5: case 0xc: case 0xd:
8446 /* push/pop */
b0109805 8447 addr = load_reg(s, 13);
5899f386
FB
8448 if (insn & (1 << 8))
8449 offset = 4;
99c475ab 8450 else
5899f386
FB
8451 offset = 0;
8452 for (i = 0; i < 8; i++) {
8453 if (insn & (1 << i))
8454 offset += 4;
8455 }
8456 if ((insn & (1 << 11)) == 0) {
b0109805 8457 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8458 }
99c475ab
FB
8459 for (i = 0; i < 8; i++) {
8460 if (insn & (1 << i)) {
8461 if (insn & (1 << 11)) {
8462 /* pop */
b0109805
PB
8463 tmp = gen_ld32(addr, IS_USER(s));
8464 store_reg(s, i, tmp);
99c475ab
FB
8465 } else {
8466 /* push */
b0109805
PB
8467 tmp = load_reg(s, i);
8468 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8469 }
5899f386 8470 /* advance to the next address. */
b0109805 8471 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8472 }
8473 }
a50f5b91 8474 TCGV_UNUSED(tmp);
99c475ab
FB
8475 if (insn & (1 << 8)) {
8476 if (insn & (1 << 11)) {
8477 /* pop pc */
b0109805 8478 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8479 /* don't set the pc until the rest of the instruction
8480 has completed */
8481 } else {
8482 /* push lr */
b0109805
PB
8483 tmp = load_reg(s, 14);
8484 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8485 }
b0109805 8486 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 8487 }
5899f386 8488 if ((insn & (1 << 11)) == 0) {
b0109805 8489 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8490 }
99c475ab 8491 /* write back the new stack pointer */
b0109805 8492 store_reg(s, 13, addr);
99c475ab
FB
8493 /* set the new PC value */
8494 if ((insn & 0x0900) == 0x0900)
b0109805 8495 gen_bx(s, tmp);
99c475ab
FB
8496 break;
8497
9ee6e8bb
PB
8498 case 1: case 3: case 9: case 11: /* czb */
8499 rm = insn & 7;
d9ba4830 8500 tmp = load_reg(s, rm);
9ee6e8bb
PB
8501 s->condlabel = gen_new_label();
8502 s->condjmp = 1;
8503 if (insn & (1 << 11))
cb63669a 8504 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 8505 else
cb63669a 8506 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
d9ba4830 8507 dead_tmp(tmp);
9ee6e8bb
PB
8508 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8509 val = (uint32_t)s->pc + 2;
8510 val += offset;
8511 gen_jmp(s, val);
8512 break;
8513
8514 case 15: /* IT, nop-hint. */
8515 if ((insn & 0xf) == 0) {
8516 gen_nop_hint(s, (insn >> 4) & 0xf);
8517 break;
8518 }
8519 /* If Then. */
8520 s->condexec_cond = (insn >> 4) & 0xe;
8521 s->condexec_mask = insn & 0x1f;
8522 /* No actual code generated for this insn, just setup state. */
8523 break;
8524
06c949e6 8525 case 0xe: /* bkpt */
9ee6e8bb 8526 gen_set_condexec(s);
5e3f878a 8527 gen_set_pc_im(s->pc - 2);
d9ba4830 8528 gen_exception(EXCP_BKPT);
06c949e6
PB
8529 s->is_jmp = DISAS_JUMP;
8530 break;
8531
9ee6e8bb
PB
8532 case 0xa: /* rev */
8533 ARCH(6);
8534 rn = (insn >> 3) & 0x7;
8535 rd = insn & 0x7;
b0109805 8536 tmp = load_reg(s, rn);
9ee6e8bb 8537 switch ((insn >> 6) & 3) {
66896cb8 8538 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
8539 case 1: gen_rev16(tmp); break;
8540 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
8541 default: goto illegal_op;
8542 }
b0109805 8543 store_reg(s, rd, tmp);
9ee6e8bb
PB
8544 break;
8545
8546 case 6: /* cps */
8547 ARCH(6);
8548 if (IS_USER(s))
8549 break;
8550 if (IS_M(env)) {
8984bd2e 8551 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9ee6e8bb 8552 /* PRIMASK */
8984bd2e
PB
8553 if (insn & 1) {
8554 addr = tcg_const_i32(16);
8555 gen_helper_v7m_msr(cpu_env, addr, tmp);
8556 }
9ee6e8bb 8557 /* FAULTMASK */
8984bd2e
PB
8558 if (insn & 2) {
8559 addr = tcg_const_i32(17);
8560 gen_helper_v7m_msr(cpu_env, addr, tmp);
8561 }
9ee6e8bb
PB
8562 gen_lookup_tb(s);
8563 } else {
8564 if (insn & (1 << 4))
8565 shift = CPSR_A | CPSR_I | CPSR_F;
8566 else
8567 shift = 0;
8568
8569 val = ((insn & 7) << 6) & shift;
8570 gen_op_movl_T0_im(val);
8571 gen_set_psr_T0(s, shift, 0);
8572 }
8573 break;
8574
99c475ab
FB
8575 default:
8576 goto undef;
8577 }
8578 break;
8579
8580 case 12:
8581 /* load/store multiple */
8582 rn = (insn >> 8) & 0x7;
b0109805 8583 addr = load_reg(s, rn);
99c475ab
FB
8584 for (i = 0; i < 8; i++) {
8585 if (insn & (1 << i)) {
99c475ab
FB
8586 if (insn & (1 << 11)) {
8587 /* load */
b0109805
PB
8588 tmp = gen_ld32(addr, IS_USER(s));
8589 store_reg(s, i, tmp);
99c475ab
FB
8590 } else {
8591 /* store */
b0109805
PB
8592 tmp = load_reg(s, i);
8593 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8594 }
5899f386 8595 /* advance to the next address */
b0109805 8596 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8597 }
8598 }
5899f386 8599 /* Base register writeback. */
b0109805
PB
8600 if ((insn & (1 << rn)) == 0) {
8601 store_reg(s, rn, addr);
8602 } else {
8603 dead_tmp(addr);
8604 }
99c475ab
FB
8605 break;
8606
8607 case 13:
8608 /* conditional branch or swi */
8609 cond = (insn >> 8) & 0xf;
8610 if (cond == 0xe)
8611 goto undef;
8612
8613 if (cond == 0xf) {
8614 /* swi */
9ee6e8bb 8615 gen_set_condexec(s);
422ebf69 8616 gen_set_pc_im(s->pc);
9ee6e8bb 8617 s->is_jmp = DISAS_SWI;
99c475ab
FB
8618 break;
8619 }
8620 /* generate a conditional jump to next instruction */
e50e6a20 8621 s->condlabel = gen_new_label();
d9ba4830 8622 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 8623 s->condjmp = 1;
99c475ab
FB
8624
8625 /* jump to the offset */
5899f386 8626 val = (uint32_t)s->pc + 2;
99c475ab 8627 offset = ((int32_t)insn << 24) >> 24;
5899f386 8628 val += offset << 1;
8aaca4c0 8629 gen_jmp(s, val);
99c475ab
FB
8630 break;
8631
8632 case 14:
358bf29e 8633 if (insn & (1 << 11)) {
9ee6e8bb
PB
8634 if (disas_thumb2_insn(env, s, insn))
8635 goto undef32;
358bf29e
PB
8636 break;
8637 }
9ee6e8bb 8638 /* unconditional branch */
99c475ab
FB
8639 val = (uint32_t)s->pc;
8640 offset = ((int32_t)insn << 21) >> 21;
8641 val += (offset << 1) + 2;
8aaca4c0 8642 gen_jmp(s, val);
99c475ab
FB
8643 break;
8644
8645 case 15:
9ee6e8bb 8646 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 8647 goto undef32;
9ee6e8bb 8648 break;
99c475ab
FB
8649 }
8650 return;
9ee6e8bb
PB
8651undef32:
8652 gen_set_condexec(s);
5e3f878a 8653 gen_set_pc_im(s->pc - 4);
d9ba4830 8654 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
8655 s->is_jmp = DISAS_JUMP;
8656 return;
8657illegal_op:
99c475ab 8658undef:
9ee6e8bb 8659 gen_set_condexec(s);
5e3f878a 8660 gen_set_pc_im(s->pc - 2);
d9ba4830 8661 gen_exception(EXCP_UDEF);
99c475ab
FB
8662 s->is_jmp = DISAS_JUMP;
8663}
8664
2c0262af
FB
8665/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8666 basic block 'tb'. If search_pc is TRUE, also generate PC
8667 information for each intermediate instruction. */
2cfc5f17
TS
8668static inline void gen_intermediate_code_internal(CPUState *env,
8669 TranslationBlock *tb,
8670 int search_pc)
2c0262af
FB
8671{
8672 DisasContext dc1, *dc = &dc1;
a1d1bb31 8673 CPUBreakpoint *bp;
2c0262af
FB
8674 uint16_t *gen_opc_end;
8675 int j, lj;
0fa85d43 8676 target_ulong pc_start;
b5ff1b31 8677 uint32_t next_page_start;
2e70f6ef
PB
8678 int num_insns;
8679 int max_insns;
3b46e624 8680
2c0262af 8681 /* generate intermediate code */
b26eefb6 8682 num_temps = 0;
b26eefb6 8683
0fa85d43 8684 pc_start = tb->pc;
3b46e624 8685
2c0262af
FB
8686 dc->tb = tb;
8687
2c0262af 8688 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
8689
8690 dc->is_jmp = DISAS_NEXT;
8691 dc->pc = pc_start;
8aaca4c0 8692 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 8693 dc->condjmp = 0;
5899f386 8694 dc->thumb = env->thumb;
9ee6e8bb
PB
8695 dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
8696 dc->condexec_cond = env->condexec_bits >> 4;
b5ff1b31 8697#if !defined(CONFIG_USER_ONLY)
9ee6e8bb
PB
8698 if (IS_M(env)) {
8699 dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
8700 } else {
8701 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
8702 }
b5ff1b31 8703#endif
a7812ae4
PB
8704 cpu_F0s = tcg_temp_new_i32();
8705 cpu_F1s = tcg_temp_new_i32();
8706 cpu_F0d = tcg_temp_new_i64();
8707 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
8708 cpu_V0 = cpu_F0d;
8709 cpu_V1 = cpu_F1d;
e677137d 8710 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 8711 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 8712 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 8713 lj = -1;
2e70f6ef
PB
8714 num_insns = 0;
8715 max_insns = tb->cflags & CF_COUNT_MASK;
8716 if (max_insns == 0)
8717 max_insns = CF_COUNT_MASK;
8718
8719 gen_icount_start();
9ee6e8bb
PB
8720 /* Reset the conditional execution bits immediately. This avoids
8721 complications trying to do it at the end of the block. */
8722 if (env->condexec_bits)
8f01245e
PB
8723 {
8724 TCGv tmp = new_tmp();
8725 tcg_gen_movi_i32(tmp, 0);
d9ba4830 8726 store_cpu_field(tmp, condexec_bits);
8f01245e 8727 }
2c0262af 8728 do {
fbb4a2e3
PB
8729#ifdef CONFIG_USER_ONLY
8730 /* Intercept jump to the magic kernel page. */
8731 if (dc->pc >= 0xffff0000) {
8732 /* We always get here via a jump, so know we are not in a
8733 conditional execution block. */
8734 gen_exception(EXCP_KERNEL_TRAP);
8735 dc->is_jmp = DISAS_UPDATE;
8736 break;
8737 }
8738#else
9ee6e8bb
PB
8739 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
8740 /* We always get here via a jump, so know we are not in a
8741 conditional execution block. */
d9ba4830 8742 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
8743 dc->is_jmp = DISAS_UPDATE;
8744 break;
9ee6e8bb
PB
8745 }
8746#endif
8747
72cf2d4f
BS
8748 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
8749 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 8750 if (bp->pc == dc->pc) {
9ee6e8bb 8751 gen_set_condexec(dc);
5e3f878a 8752 gen_set_pc_im(dc->pc);
d9ba4830 8753 gen_exception(EXCP_DEBUG);
1fddef4b 8754 dc->is_jmp = DISAS_JUMP;
9ee6e8bb
PB
8755 /* Advance PC so that clearing the breakpoint will
8756 invalidate this TB. */
8757 dc->pc += 2;
8758 goto done_generating;
1fddef4b
FB
8759 break;
8760 }
8761 }
8762 }
2c0262af
FB
8763 if (search_pc) {
8764 j = gen_opc_ptr - gen_opc_buf;
8765 if (lj < j) {
8766 lj++;
8767 while (lj < j)
8768 gen_opc_instr_start[lj++] = 0;
8769 }
0fa85d43 8770 gen_opc_pc[lj] = dc->pc;
2c0262af 8771 gen_opc_instr_start[lj] = 1;
2e70f6ef 8772 gen_opc_icount[lj] = num_insns;
2c0262af 8773 }
e50e6a20 8774
2e70f6ef
PB
8775 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
8776 gen_io_start();
8777
9ee6e8bb
PB
8778 if (env->thumb) {
8779 disas_thumb_insn(env, dc);
8780 if (dc->condexec_mask) {
8781 dc->condexec_cond = (dc->condexec_cond & 0xe)
8782 | ((dc->condexec_mask >> 4) & 1);
8783 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
8784 if (dc->condexec_mask == 0) {
8785 dc->condexec_cond = 0;
8786 }
8787 }
8788 } else {
8789 disas_arm_insn(env, dc);
8790 }
b26eefb6
PB
8791 if (num_temps) {
8792 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
8793 num_temps = 0;
8794 }
e50e6a20
FB
8795
8796 if (dc->condjmp && !dc->is_jmp) {
8797 gen_set_label(dc->condlabel);
8798 dc->condjmp = 0;
8799 }
aaf2d97d 8800 /* Translation stops when a conditional branch is encountered.
e50e6a20 8801 * Otherwise the subsequent code could get translated several times.
b5ff1b31 8802 * Also stop translation when a page boundary is reached. This
bf20dc07 8803 * ensures prefetch aborts occur at the right place. */
2e70f6ef 8804 num_insns ++;
1fddef4b
FB
8805 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
8806 !env->singlestep_enabled &&
1b530a6d 8807 !singlestep &&
2e70f6ef
PB
8808 dc->pc < next_page_start &&
8809 num_insns < max_insns);
8810
8811 if (tb->cflags & CF_LAST_IO) {
8812 if (dc->condjmp) {
8813 /* FIXME: This can theoretically happen with self-modifying
8814 code. */
8815 cpu_abort(env, "IO on conditional branch instruction");
8816 }
8817 gen_io_end();
8818 }
9ee6e8bb 8819
b5ff1b31 8820 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
8821 instruction was a conditional branch or trap, and the PC has
8822 already been written. */
551bd27f 8823 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 8824 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 8825 if (dc->condjmp) {
9ee6e8bb
PB
8826 gen_set_condexec(dc);
8827 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 8828 gen_exception(EXCP_SWI);
9ee6e8bb 8829 } else {
d9ba4830 8830 gen_exception(EXCP_DEBUG);
9ee6e8bb 8831 }
e50e6a20
FB
8832 gen_set_label(dc->condlabel);
8833 }
8834 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 8835 gen_set_pc_im(dc->pc);
e50e6a20 8836 dc->condjmp = 0;
8aaca4c0 8837 }
9ee6e8bb
PB
8838 gen_set_condexec(dc);
8839 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 8840 gen_exception(EXCP_SWI);
9ee6e8bb
PB
8841 } else {
8842 /* FIXME: Single stepping a WFI insn will not halt
8843 the CPU. */
d9ba4830 8844 gen_exception(EXCP_DEBUG);
9ee6e8bb 8845 }
8aaca4c0 8846 } else {
9ee6e8bb
PB
8847 /* While branches must always occur at the end of an IT block,
8848 there are a few other things that can cause us to terminate
8849 the TB in the middel of an IT block:
8850 - Exception generating instructions (bkpt, swi, undefined).
8851 - Page boundaries.
8852 - Hardware watchpoints.
8853 Hardware breakpoints have already been handled and skip this code.
8854 */
8855 gen_set_condexec(dc);
8aaca4c0 8856 switch(dc->is_jmp) {
8aaca4c0 8857 case DISAS_NEXT:
6e256c93 8858 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
8859 break;
8860 default:
8861 case DISAS_JUMP:
8862 case DISAS_UPDATE:
8863 /* indicate that the hash table must be used to find the next TB */
57fec1fe 8864 tcg_gen_exit_tb(0);
8aaca4c0
FB
8865 break;
8866 case DISAS_TB_JUMP:
8867 /* nothing more to generate */
8868 break;
9ee6e8bb 8869 case DISAS_WFI:
d9ba4830 8870 gen_helper_wfi();
9ee6e8bb
PB
8871 break;
8872 case DISAS_SWI:
d9ba4830 8873 gen_exception(EXCP_SWI);
9ee6e8bb 8874 break;
8aaca4c0 8875 }
e50e6a20
FB
8876 if (dc->condjmp) {
8877 gen_set_label(dc->condlabel);
9ee6e8bb 8878 gen_set_condexec(dc);
6e256c93 8879 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
8880 dc->condjmp = 0;
8881 }
2c0262af 8882 }
2e70f6ef 8883
9ee6e8bb 8884done_generating:
2e70f6ef 8885 gen_icount_end(tb, num_insns);
2c0262af
FB
8886 *gen_opc_ptr = INDEX_op_end;
8887
8888#ifdef DEBUG_DISAS
8fec2b8c 8889 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
8890 qemu_log("----------------\n");
8891 qemu_log("IN: %s\n", lookup_symbol(pc_start));
8892 log_target_disas(pc_start, dc->pc - pc_start, env->thumb);
8893 qemu_log("\n");
2c0262af
FB
8894 }
8895#endif
b5ff1b31
FB
8896 if (search_pc) {
8897 j = gen_opc_ptr - gen_opc_buf;
8898 lj++;
8899 while (lj <= j)
8900 gen_opc_instr_start[lj++] = 0;
b5ff1b31 8901 } else {
2c0262af 8902 tb->size = dc->pc - pc_start;
2e70f6ef 8903 tb->icount = num_insns;
b5ff1b31 8904 }
2c0262af
FB
8905}
8906
2cfc5f17 8907void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2c0262af 8908{
2cfc5f17 8909 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
8910}
8911
2cfc5f17 8912void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2c0262af 8913{
2cfc5f17 8914 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
8915}
8916
b5ff1b31
FB
8917static const char *cpu_mode_names[16] = {
8918 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
8919 "???", "???", "???", "und", "???", "???", "???", "sys"
8920};
9ee6e8bb 8921
5fafdf24 8922void cpu_dump_state(CPUState *env, FILE *f,
7fe48483
FB
8923 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
8924 int flags)
2c0262af
FB
8925{
8926 int i;
06e80fc9 8927#if 0
bc380d17 8928 union {
b7bcbe95
FB
8929 uint32_t i;
8930 float s;
8931 } s0, s1;
8932 CPU_DoubleU d;
a94a6abf
PB
8933 /* ??? This assumes float64 and double have the same layout.
8934 Oh well, it's only debug dumps. */
8935 union {
8936 float64 f64;
8937 double d;
8938 } d0;
06e80fc9 8939#endif
b5ff1b31 8940 uint32_t psr;
2c0262af
FB
8941
8942 for(i=0;i<16;i++) {
7fe48483 8943 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 8944 if ((i % 4) == 3)
7fe48483 8945 cpu_fprintf(f, "\n");
2c0262af 8946 else
7fe48483 8947 cpu_fprintf(f, " ");
2c0262af 8948 }
b5ff1b31 8949 psr = cpsr_read(env);
687fa640
TS
8950 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
8951 psr,
b5ff1b31
FB
8952 psr & (1 << 31) ? 'N' : '-',
8953 psr & (1 << 30) ? 'Z' : '-',
8954 psr & (1 << 29) ? 'C' : '-',
8955 psr & (1 << 28) ? 'V' : '-',
5fafdf24 8956 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 8957 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 8958
5e3f878a 8959#if 0
b7bcbe95 8960 for (i = 0; i < 16; i++) {
8e96005d
FB
8961 d.d = env->vfp.regs[i];
8962 s0.i = d.l.lower;
8963 s1.i = d.l.upper;
a94a6abf
PB
8964 d0.f64 = d.d;
8965 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 8966 i * 2, (int)s0.i, s0.s,
a94a6abf 8967 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 8968 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 8969 d0.d);
b7bcbe95 8970 }
40f137e1 8971 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
5e3f878a 8972#endif
2c0262af 8973}
a6b025d3 8974
d2856f1a
AJ
8975void gen_pc_load(CPUState *env, TranslationBlock *tb,
8976 unsigned long searched_pc, int pc_pos, void *puc)
8977{
8978 env->regs[15] = gen_opc_pc[pc_pos];
8979}