]> git.proxmox.com Git - qemu.git/blame - target-arm/translate.c
cpus: Change qemu_kvm_init_cpu_signals() argument to CPUState
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
76cad711 28#include "disas/disas.h"
57fec1fe 29#include "tcg-op.h"
1de7afc9 30#include "qemu/log.h"
1497c961 31
7b59220e 32#include "helper.h"
1497c961 33#define GEN_HELPER 1
7b59220e 34#include "helper.h"
2c0262af 35
be5e7a76
DES
36#define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
37#define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
38/* currently all emulated v5 cores are also v5TE, so don't bother */
39#define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
9ee6e8bb
PB
40#define ENABLE_ARCH_5J 0
41#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
42#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
43#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
44#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 45
86753403 46#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 47
2c0262af
FB
48/* internal defines */
49typedef struct DisasContext {
0fa85d43 50 target_ulong pc;
2c0262af 51 int is_jmp;
e50e6a20
FB
52 /* Nonzero if this instruction has been conditionally skipped. */
53 int condjmp;
54 /* The label that will be jumped to when the instruction is skipped. */
55 int condlabel;
b90372ad 56 /* Thumb-2 conditional execution bits. */
9ee6e8bb
PB
57 int condexec_mask;
58 int condexec_cond;
2c0262af 59 struct TranslationBlock *tb;
8aaca4c0 60 int singlestep_enabled;
5899f386 61 int thumb;
d8fd2954 62 int bswap_code;
b5ff1b31
FB
63#if !defined(CONFIG_USER_ONLY)
64 int user;
65#endif
5df8bac1 66 int vfp_enabled;
69d1fc22
PM
67 int vec_len;
68 int vec_stride;
2c0262af
FB
69} DisasContext;
70
e12ce78d
PM
71static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
72
b5ff1b31
FB
73#if defined(CONFIG_USER_ONLY)
74#define IS_USER(s) 1
75#else
76#define IS_USER(s) (s->user)
77#endif
78
9ee6e8bb 79/* These instructions trap after executing, so defer them until after the
b90372ad 80 conditional execution state has been updated. */
9ee6e8bb
PB
81#define DISAS_WFI 4
82#define DISAS_SWI 5
2c0262af 83
a7812ae4 84static TCGv_ptr cpu_env;
ad69471c 85/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 86static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 87static TCGv_i32 cpu_R[16];
66c374de 88static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
426f5abc
PB
89static TCGv_i32 cpu_exclusive_addr;
90static TCGv_i32 cpu_exclusive_val;
91static TCGv_i32 cpu_exclusive_high;
92#ifdef CONFIG_USER_ONLY
93static TCGv_i32 cpu_exclusive_test;
94static TCGv_i32 cpu_exclusive_info;
95#endif
ad69471c 96
b26eefb6 97/* FIXME: These should be removed. */
39d5492a 98static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 99static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 100
022c62cb 101#include "exec/gen-icount.h"
2e70f6ef 102
155c3eac
FN
103static const char *regnames[] =
104 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
105 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
106
b26eefb6
PB
107/* initialize TCG globals. */
108void arm_translate_init(void)
109{
155c3eac
FN
110 int i;
111
a7812ae4
PB
112 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
113
155c3eac
FN
114 for (i = 0; i < 16; i++) {
115 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 116 offsetof(CPUARMState, regs[i]),
155c3eac
FN
117 regnames[i]);
118 }
66c374de
AJ
119 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
120 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
121 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
122 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
123
426f5abc 124 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 125 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
426f5abc 126 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 127 offsetof(CPUARMState, exclusive_val), "exclusive_val");
426f5abc 128 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 129 offsetof(CPUARMState, exclusive_high), "exclusive_high");
426f5abc
PB
130#ifdef CONFIG_USER_ONLY
131 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 132 offsetof(CPUARMState, exclusive_test), "exclusive_test");
426f5abc 133 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 134 offsetof(CPUARMState, exclusive_info), "exclusive_info");
426f5abc 135#endif
155c3eac 136
a7812ae4 137#define GEN_HELPER 2
7b59220e 138#include "helper.h"
b26eefb6
PB
139}
140
39d5492a 141static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 142{
39d5492a 143 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
144 tcg_gen_ld_i32(tmp, cpu_env, offset);
145 return tmp;
146}
147
0ecb72a5 148#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 149
39d5492a 150static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
151{
152 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 153 tcg_temp_free_i32(var);
d9ba4830
PB
154}
155
156#define store_cpu_field(var, name) \
0ecb72a5 157 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 158
b26eefb6 159/* Set a variable to the value of a CPU register. */
39d5492a 160static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
161{
162 if (reg == 15) {
163 uint32_t addr;
b90372ad 164 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
165 if (s->thumb)
166 addr = (long)s->pc + 2;
167 else
168 addr = (long)s->pc + 4;
169 tcg_gen_movi_i32(var, addr);
170 } else {
155c3eac 171 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
172 }
173}
174
175/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 176static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 177{
39d5492a 178 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
179 load_reg_var(s, tmp, reg);
180 return tmp;
181}
182
183/* Set a CPU register. The source must be a temporary and will be
184 marked as dead. */
39d5492a 185static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
186{
187 if (reg == 15) {
188 tcg_gen_andi_i32(var, var, ~1);
189 s->is_jmp = DISAS_JUMP;
190 }
155c3eac 191 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 192 tcg_temp_free_i32(var);
b26eefb6
PB
193}
194
b26eefb6 195/* Value extensions. */
86831435
PB
196#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
197#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
198#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
199#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
200
1497c961
PB
201#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
202#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 203
b26eefb6 204
39d5492a 205static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 206{
39d5492a 207 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 208 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
209 tcg_temp_free_i32(tmp_mask);
210}
d9ba4830
PB
211/* Set NZCV flags from the high 4 bits of var. */
212#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
213
214static void gen_exception(int excp)
215{
39d5492a 216 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830 217 tcg_gen_movi_i32(tmp, excp);
1ce94f81 218 gen_helper_exception(cpu_env, tmp);
7d1b0095 219 tcg_temp_free_i32(tmp);
d9ba4830
PB
220}
221
39d5492a 222static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 223{
39d5492a
PM
224 TCGv_i32 tmp1 = tcg_temp_new_i32();
225 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
226 tcg_gen_ext16s_i32(tmp1, a);
227 tcg_gen_ext16s_i32(tmp2, b);
3670669c 228 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 229 tcg_temp_free_i32(tmp2);
3670669c
PB
230 tcg_gen_sari_i32(a, a, 16);
231 tcg_gen_sari_i32(b, b, 16);
232 tcg_gen_mul_i32(b, b, a);
233 tcg_gen_mov_i32(a, tmp1);
7d1b0095 234 tcg_temp_free_i32(tmp1);
3670669c
PB
235}
236
237/* Byteswap each halfword. */
39d5492a 238static void gen_rev16(TCGv_i32 var)
3670669c 239{
39d5492a 240 TCGv_i32 tmp = tcg_temp_new_i32();
3670669c
PB
241 tcg_gen_shri_i32(tmp, var, 8);
242 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
243 tcg_gen_shli_i32(var, var, 8);
244 tcg_gen_andi_i32(var, var, 0xff00ff00);
245 tcg_gen_or_i32(var, var, tmp);
7d1b0095 246 tcg_temp_free_i32(tmp);
3670669c
PB
247}
248
249/* Byteswap low halfword and sign extend. */
39d5492a 250static void gen_revsh(TCGv_i32 var)
3670669c 251{
1a855029
AJ
252 tcg_gen_ext16u_i32(var, var);
253 tcg_gen_bswap16_i32(var, var);
254 tcg_gen_ext16s_i32(var, var);
3670669c
PB
255}
256
257/* Unsigned bitfield extract. */
39d5492a 258static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
3670669c
PB
259{
260 if (shift)
261 tcg_gen_shri_i32(var, var, shift);
262 tcg_gen_andi_i32(var, var, mask);
263}
264
265/* Signed bitfield extract. */
39d5492a 266static void gen_sbfx(TCGv_i32 var, int shift, int width)
3670669c
PB
267{
268 uint32_t signbit;
269
270 if (shift)
271 tcg_gen_sari_i32(var, var, shift);
272 if (shift + width < 32) {
273 signbit = 1u << (width - 1);
274 tcg_gen_andi_i32(var, var, (1u << width) - 1);
275 tcg_gen_xori_i32(var, var, signbit);
276 tcg_gen_subi_i32(var, var, signbit);
277 }
278}
279
838fa72d 280/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 281static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 282{
838fa72d
AJ
283 TCGv_i64 tmp64 = tcg_temp_new_i64();
284
285 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 286 tcg_temp_free_i32(b);
838fa72d
AJ
287 tcg_gen_shli_i64(tmp64, tmp64, 32);
288 tcg_gen_add_i64(a, tmp64, a);
289
290 tcg_temp_free_i64(tmp64);
291 return a;
292}
293
294/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 295static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
296{
297 TCGv_i64 tmp64 = tcg_temp_new_i64();
298
299 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 300 tcg_temp_free_i32(b);
838fa72d
AJ
301 tcg_gen_shli_i64(tmp64, tmp64, 32);
302 tcg_gen_sub_i64(a, tmp64, a);
303
304 tcg_temp_free_i64(tmp64);
305 return a;
3670669c
PB
306}
307
5e3f878a 308/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 309static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 310{
39d5492a
PM
311 TCGv_i32 lo = tcg_temp_new_i32();
312 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 313 TCGv_i64 ret;
5e3f878a 314
831d7fe8 315 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 316 tcg_temp_free_i32(a);
7d1b0095 317 tcg_temp_free_i32(b);
831d7fe8
RH
318
319 ret = tcg_temp_new_i64();
320 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
321 tcg_temp_free_i32(lo);
322 tcg_temp_free_i32(hi);
831d7fe8
RH
323
324 return ret;
5e3f878a
PB
325}
326
39d5492a 327static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 328{
39d5492a
PM
329 TCGv_i32 lo = tcg_temp_new_i32();
330 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 331 TCGv_i64 ret;
5e3f878a 332
831d7fe8 333 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 334 tcg_temp_free_i32(a);
7d1b0095 335 tcg_temp_free_i32(b);
831d7fe8
RH
336
337 ret = tcg_temp_new_i64();
338 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
339 tcg_temp_free_i32(lo);
340 tcg_temp_free_i32(hi);
831d7fe8
RH
341
342 return ret;
5e3f878a
PB
343}
344
8f01245e 345/* Swap low and high halfwords. */
39d5492a 346static void gen_swap_half(TCGv_i32 var)
8f01245e 347{
39d5492a 348 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
349 tcg_gen_shri_i32(tmp, var, 16);
350 tcg_gen_shli_i32(var, var, 16);
351 tcg_gen_or_i32(var, var, tmp);
7d1b0095 352 tcg_temp_free_i32(tmp);
8f01245e
PB
353}
354
b26eefb6
PB
355/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
356 tmp = (t0 ^ t1) & 0x8000;
357 t0 &= ~0x8000;
358 t1 &= ~0x8000;
359 t0 = (t0 + t1) ^ tmp;
360 */
361
39d5492a 362static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 363{
39d5492a 364 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
365 tcg_gen_xor_i32(tmp, t0, t1);
366 tcg_gen_andi_i32(tmp, tmp, 0x8000);
367 tcg_gen_andi_i32(t0, t0, ~0x8000);
368 tcg_gen_andi_i32(t1, t1, ~0x8000);
369 tcg_gen_add_i32(t0, t0, t1);
370 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
371 tcg_temp_free_i32(tmp);
372 tcg_temp_free_i32(t1);
b26eefb6
PB
373}
374
375/* Set CF to the top bit of var. */
39d5492a 376static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 377{
66c374de 378 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
379}
380
381/* Set N and Z flags from var. */
39d5492a 382static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 383{
66c374de
AJ
384 tcg_gen_mov_i32(cpu_NF, var);
385 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
386}
387
388/* T0 += T1 + CF. */
39d5492a 389static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 390{
396e467c 391 tcg_gen_add_i32(t0, t0, t1);
66c374de 392 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
393}
394
e9bb4aa9 395/* dest = T0 + T1 + CF. */
39d5492a 396static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 397{
e9bb4aa9 398 tcg_gen_add_i32(dest, t0, t1);
66c374de 399 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
400}
401
3670669c 402/* dest = T0 - T1 + CF - 1. */
39d5492a 403static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 404{
3670669c 405 tcg_gen_sub_i32(dest, t0, t1);
66c374de 406 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 407 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
408}
409
72485ec4 410/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 411static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 412{
39d5492a 413 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
414 tcg_gen_movi_i32(tmp, 0);
415 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 416 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 417 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
418 tcg_gen_xor_i32(tmp, t0, t1);
419 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
420 tcg_temp_free_i32(tmp);
421 tcg_gen_mov_i32(dest, cpu_NF);
422}
423
49b4c31e 424/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 425static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 426{
39d5492a 427 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
428 if (TCG_TARGET_HAS_add2_i32) {
429 tcg_gen_movi_i32(tmp, 0);
430 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 431 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
432 } else {
433 TCGv_i64 q0 = tcg_temp_new_i64();
434 TCGv_i64 q1 = tcg_temp_new_i64();
435 tcg_gen_extu_i32_i64(q0, t0);
436 tcg_gen_extu_i32_i64(q1, t1);
437 tcg_gen_add_i64(q0, q0, q1);
438 tcg_gen_extu_i32_i64(q1, cpu_CF);
439 tcg_gen_add_i64(q0, q0, q1);
440 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
441 tcg_temp_free_i64(q0);
442 tcg_temp_free_i64(q1);
443 }
444 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
445 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
446 tcg_gen_xor_i32(tmp, t0, t1);
447 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
448 tcg_temp_free_i32(tmp);
449 tcg_gen_mov_i32(dest, cpu_NF);
450}
451
72485ec4 452/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 453static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 454{
39d5492a 455 TCGv_i32 tmp;
72485ec4
AJ
456 tcg_gen_sub_i32(cpu_NF, t0, t1);
457 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
458 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
459 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
460 tmp = tcg_temp_new_i32();
461 tcg_gen_xor_i32(tmp, t0, t1);
462 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
463 tcg_temp_free_i32(tmp);
464 tcg_gen_mov_i32(dest, cpu_NF);
465}
466
e77f0832 467/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 468static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 469{
39d5492a 470 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
471 tcg_gen_not_i32(tmp, t1);
472 gen_adc_CC(dest, t0, tmp);
39d5492a 473 tcg_temp_free_i32(tmp);
2de68a49
RH
474}
475
365af80e 476#define GEN_SHIFT(name) \
39d5492a 477static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 478{ \
39d5492a 479 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
480 tmp1 = tcg_temp_new_i32(); \
481 tcg_gen_andi_i32(tmp1, t1, 0xff); \
482 tmp2 = tcg_const_i32(0); \
483 tmp3 = tcg_const_i32(0x1f); \
484 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
485 tcg_temp_free_i32(tmp3); \
486 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
487 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
488 tcg_temp_free_i32(tmp2); \
489 tcg_temp_free_i32(tmp1); \
490}
491GEN_SHIFT(shl)
492GEN_SHIFT(shr)
493#undef GEN_SHIFT
494
39d5492a 495static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 496{
39d5492a 497 TCGv_i32 tmp1, tmp2;
365af80e
AJ
498 tmp1 = tcg_temp_new_i32();
499 tcg_gen_andi_i32(tmp1, t1, 0xff);
500 tmp2 = tcg_const_i32(0x1f);
501 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
502 tcg_temp_free_i32(tmp2);
503 tcg_gen_sar_i32(dest, t0, tmp1);
504 tcg_temp_free_i32(tmp1);
505}
506
39d5492a 507static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 508{
39d5492a
PM
509 TCGv_i32 c0 = tcg_const_i32(0);
510 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
511 tcg_gen_neg_i32(tmp, src);
512 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
513 tcg_temp_free_i32(c0);
514 tcg_temp_free_i32(tmp);
515}
ad69471c 516
39d5492a 517static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 518{
9a119ff6 519 if (shift == 0) {
66c374de 520 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 521 } else {
66c374de
AJ
522 tcg_gen_shri_i32(cpu_CF, var, shift);
523 if (shift != 31) {
524 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
525 }
9a119ff6 526 }
9a119ff6 527}
b26eefb6 528
9a119ff6 529/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
530static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
531 int shift, int flags)
9a119ff6
PB
532{
533 switch (shiftop) {
534 case 0: /* LSL */
535 if (shift != 0) {
536 if (flags)
537 shifter_out_im(var, 32 - shift);
538 tcg_gen_shli_i32(var, var, shift);
539 }
540 break;
541 case 1: /* LSR */
542 if (shift == 0) {
543 if (flags) {
66c374de 544 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
545 }
546 tcg_gen_movi_i32(var, 0);
547 } else {
548 if (flags)
549 shifter_out_im(var, shift - 1);
550 tcg_gen_shri_i32(var, var, shift);
551 }
552 break;
553 case 2: /* ASR */
554 if (shift == 0)
555 shift = 32;
556 if (flags)
557 shifter_out_im(var, shift - 1);
558 if (shift == 32)
559 shift = 31;
560 tcg_gen_sari_i32(var, var, shift);
561 break;
562 case 3: /* ROR/RRX */
563 if (shift != 0) {
564 if (flags)
565 shifter_out_im(var, shift - 1);
f669df27 566 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 567 } else {
39d5492a 568 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 569 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
570 if (flags)
571 shifter_out_im(var, 0);
572 tcg_gen_shri_i32(var, var, 1);
b26eefb6 573 tcg_gen_or_i32(var, var, tmp);
7d1b0095 574 tcg_temp_free_i32(tmp);
b26eefb6
PB
575 }
576 }
577};
578
39d5492a
PM
579static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
580 TCGv_i32 shift, int flags)
8984bd2e
PB
581{
582 if (flags) {
583 switch (shiftop) {
9ef39277
BS
584 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
585 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
586 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
587 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
588 }
589 } else {
590 switch (shiftop) {
365af80e
AJ
591 case 0:
592 gen_shl(var, var, shift);
593 break;
594 case 1:
595 gen_shr(var, var, shift);
596 break;
597 case 2:
598 gen_sar(var, var, shift);
599 break;
f669df27
AJ
600 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
601 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
602 }
603 }
7d1b0095 604 tcg_temp_free_i32(shift);
8984bd2e
PB
605}
606
6ddbc6e4
PB
607#define PAS_OP(pfx) \
608 switch (op2) { \
609 case 0: gen_pas_helper(glue(pfx,add16)); break; \
610 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
611 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
612 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
613 case 4: gen_pas_helper(glue(pfx,add8)); break; \
614 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
615 }
39d5492a 616static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 617{
a7812ae4 618 TCGv_ptr tmp;
6ddbc6e4
PB
619
620 switch (op1) {
621#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
622 case 1:
a7812ae4 623 tmp = tcg_temp_new_ptr();
0ecb72a5 624 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 625 PAS_OP(s)
b75263d6 626 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
627 break;
628 case 5:
a7812ae4 629 tmp = tcg_temp_new_ptr();
0ecb72a5 630 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 631 PAS_OP(u)
b75263d6 632 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
633 break;
634#undef gen_pas_helper
635#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
636 case 2:
637 PAS_OP(q);
638 break;
639 case 3:
640 PAS_OP(sh);
641 break;
642 case 6:
643 PAS_OP(uq);
644 break;
645 case 7:
646 PAS_OP(uh);
647 break;
648#undef gen_pas_helper
649 }
650}
9ee6e8bb
PB
651#undef PAS_OP
652
6ddbc6e4
PB
653/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
654#define PAS_OP(pfx) \
ed89a2f1 655 switch (op1) { \
6ddbc6e4
PB
656 case 0: gen_pas_helper(glue(pfx,add8)); break; \
657 case 1: gen_pas_helper(glue(pfx,add16)); break; \
658 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
659 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
660 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
661 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
662 }
39d5492a 663static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 664{
a7812ae4 665 TCGv_ptr tmp;
6ddbc6e4 666
ed89a2f1 667 switch (op2) {
6ddbc6e4
PB
668#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
669 case 0:
a7812ae4 670 tmp = tcg_temp_new_ptr();
0ecb72a5 671 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 672 PAS_OP(s)
b75263d6 673 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
674 break;
675 case 4:
a7812ae4 676 tmp = tcg_temp_new_ptr();
0ecb72a5 677 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 678 PAS_OP(u)
b75263d6 679 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
680 break;
681#undef gen_pas_helper
682#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
683 case 1:
684 PAS_OP(q);
685 break;
686 case 2:
687 PAS_OP(sh);
688 break;
689 case 5:
690 PAS_OP(uq);
691 break;
692 case 6:
693 PAS_OP(uh);
694 break;
695#undef gen_pas_helper
696 }
697}
9ee6e8bb
PB
698#undef PAS_OP
699
d9ba4830
PB
700static void gen_test_cc(int cc, int label)
701{
39d5492a 702 TCGv_i32 tmp;
d9ba4830
PB
703 int inv;
704
d9ba4830
PB
705 switch (cc) {
706 case 0: /* eq: Z */
66c374de 707 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
708 break;
709 case 1: /* ne: !Z */
66c374de 710 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
711 break;
712 case 2: /* cs: C */
66c374de 713 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_CF, 0, label);
d9ba4830
PB
714 break;
715 case 3: /* cc: !C */
66c374de 716 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
d9ba4830
PB
717 break;
718 case 4: /* mi: N */
66c374de 719 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_NF, 0, label);
d9ba4830
PB
720 break;
721 case 5: /* pl: !N */
66c374de 722 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_NF, 0, label);
d9ba4830
PB
723 break;
724 case 6: /* vs: V */
66c374de 725 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_VF, 0, label);
d9ba4830
PB
726 break;
727 case 7: /* vc: !V */
66c374de 728 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_VF, 0, label);
d9ba4830
PB
729 break;
730 case 8: /* hi: C && !Z */
731 inv = gen_new_label();
66c374de
AJ
732 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, inv);
733 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
734 gen_set_label(inv);
735 break;
736 case 9: /* ls: !C || Z */
66c374de
AJ
737 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
738 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
739 break;
740 case 10: /* ge: N == V -> N ^ V == 0 */
66c374de
AJ
741 tmp = tcg_temp_new_i32();
742 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 743 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 744 tcg_temp_free_i32(tmp);
d9ba4830
PB
745 break;
746 case 11: /* lt: N != V -> N ^ V != 0 */
66c374de
AJ
747 tmp = tcg_temp_new_i32();
748 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 749 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 750 tcg_temp_free_i32(tmp);
d9ba4830
PB
751 break;
752 case 12: /* gt: !Z && N == V */
753 inv = gen_new_label();
66c374de
AJ
754 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, inv);
755 tmp = tcg_temp_new_i32();
756 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 757 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 758 tcg_temp_free_i32(tmp);
d9ba4830
PB
759 gen_set_label(inv);
760 break;
761 case 13: /* le: Z || N != V */
66c374de
AJ
762 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
763 tmp = tcg_temp_new_i32();
764 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 765 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 766 tcg_temp_free_i32(tmp);
d9ba4830
PB
767 break;
768 default:
769 fprintf(stderr, "Bad condition code 0x%x\n", cc);
770 abort();
771 }
d9ba4830 772}
2c0262af 773
b1d8e52e 774static const uint8_t table_logic_cc[16] = {
2c0262af
FB
775 1, /* and */
776 1, /* xor */
777 0, /* sub */
778 0, /* rsb */
779 0, /* add */
780 0, /* adc */
781 0, /* sbc */
782 0, /* rsc */
783 1, /* andl */
784 1, /* xorl */
785 0, /* cmp */
786 0, /* cmn */
787 1, /* orr */
788 1, /* mov */
789 1, /* bic */
790 1, /* mvn */
791};
3b46e624 792
d9ba4830
PB
793/* Set PC and Thumb state from an immediate address. */
794static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 795{
39d5492a 796 TCGv_i32 tmp;
99c475ab 797
b26eefb6 798 s->is_jmp = DISAS_UPDATE;
d9ba4830 799 if (s->thumb != (addr & 1)) {
7d1b0095 800 tmp = tcg_temp_new_i32();
d9ba4830 801 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 802 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 803 tcg_temp_free_i32(tmp);
d9ba4830 804 }
155c3eac 805 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
806}
807
808/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 809static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 810{
d9ba4830 811 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
812 tcg_gen_andi_i32(cpu_R[15], var, ~1);
813 tcg_gen_andi_i32(var, var, 1);
814 store_cpu_field(var, thumb);
d9ba4830
PB
815}
816
21aeb343
JR
817/* Variant of store_reg which uses branch&exchange logic when storing
818 to r15 in ARM architecture v7 and above. The source must be a temporary
819 and will be marked as dead. */
0ecb72a5 820static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
39d5492a 821 int reg, TCGv_i32 var)
21aeb343
JR
822{
823 if (reg == 15 && ENABLE_ARCH_7) {
824 gen_bx(s, var);
825 } else {
826 store_reg(s, reg, var);
827 }
828}
829
be5e7a76
DES
830/* Variant of store_reg which uses branch&exchange logic when storing
831 * to r15 in ARM architecture v5T and above. This is used for storing
832 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
833 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
0ecb72a5 834static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
39d5492a 835 int reg, TCGv_i32 var)
be5e7a76
DES
836{
837 if (reg == 15 && ENABLE_ARCH_5) {
838 gen_bx(s, var);
839 } else {
840 store_reg(s, reg, var);
841 }
842}
843
5e3f878a
PB
844static inline void gen_set_pc_im(uint32_t val)
845{
155c3eac 846 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
847}
848
b5ff1b31
FB
849/* Force a TB lookup after an instruction that changes the CPU state. */
850static inline void gen_lookup_tb(DisasContext *s)
851{
a6445c52 852 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
853 s->is_jmp = DISAS_UPDATE;
854}
855
b0109805 856static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 857 TCGv_i32 var)
2c0262af 858{
1e8d4eec 859 int val, rm, shift, shiftop;
39d5492a 860 TCGv_i32 offset;
2c0262af
FB
861
862 if (!(insn & (1 << 25))) {
863 /* immediate */
864 val = insn & 0xfff;
865 if (!(insn & (1 << 23)))
866 val = -val;
537730b9 867 if (val != 0)
b0109805 868 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
869 } else {
870 /* shift/register */
871 rm = (insn) & 0xf;
872 shift = (insn >> 7) & 0x1f;
1e8d4eec 873 shiftop = (insn >> 5) & 3;
b26eefb6 874 offset = load_reg(s, rm);
9a119ff6 875 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 876 if (!(insn & (1 << 23)))
b0109805 877 tcg_gen_sub_i32(var, var, offset);
2c0262af 878 else
b0109805 879 tcg_gen_add_i32(var, var, offset);
7d1b0095 880 tcg_temp_free_i32(offset);
2c0262af
FB
881 }
882}
883
191f9a93 884static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 885 int extra, TCGv_i32 var)
2c0262af
FB
886{
887 int val, rm;
39d5492a 888 TCGv_i32 offset;
3b46e624 889
2c0262af
FB
890 if (insn & (1 << 22)) {
891 /* immediate */
892 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
893 if (!(insn & (1 << 23)))
894 val = -val;
18acad92 895 val += extra;
537730b9 896 if (val != 0)
b0109805 897 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
898 } else {
899 /* register */
191f9a93 900 if (extra)
b0109805 901 tcg_gen_addi_i32(var, var, extra);
2c0262af 902 rm = (insn) & 0xf;
b26eefb6 903 offset = load_reg(s, rm);
2c0262af 904 if (!(insn & (1 << 23)))
b0109805 905 tcg_gen_sub_i32(var, var, offset);
2c0262af 906 else
b0109805 907 tcg_gen_add_i32(var, var, offset);
7d1b0095 908 tcg_temp_free_i32(offset);
2c0262af
FB
909 }
910}
911
5aaebd13
PM
912static TCGv_ptr get_fpstatus_ptr(int neon)
913{
914 TCGv_ptr statusptr = tcg_temp_new_ptr();
915 int offset;
916 if (neon) {
0ecb72a5 917 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 918 } else {
0ecb72a5 919 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
920 }
921 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
922 return statusptr;
923}
924
4373f3ce
PB
925#define VFP_OP2(name) \
926static inline void gen_vfp_##name(int dp) \
927{ \
ae1857ec
PM
928 TCGv_ptr fpst = get_fpstatus_ptr(0); \
929 if (dp) { \
930 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
931 } else { \
932 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
933 } \
934 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
935}
936
4373f3ce
PB
937VFP_OP2(add)
938VFP_OP2(sub)
939VFP_OP2(mul)
940VFP_OP2(div)
941
942#undef VFP_OP2
943
605a6aed
PM
944static inline void gen_vfp_F1_mul(int dp)
945{
946 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 947 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 948 if (dp) {
ae1857ec 949 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 950 } else {
ae1857ec 951 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 952 }
ae1857ec 953 tcg_temp_free_ptr(fpst);
605a6aed
PM
954}
955
956static inline void gen_vfp_F1_neg(int dp)
957{
958 /* Like gen_vfp_neg() but put result in F1 */
959 if (dp) {
960 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
961 } else {
962 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
963 }
964}
965
4373f3ce
PB
966static inline void gen_vfp_abs(int dp)
967{
968 if (dp)
969 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
970 else
971 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
972}
973
974static inline void gen_vfp_neg(int dp)
975{
976 if (dp)
977 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
978 else
979 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
980}
981
982static inline void gen_vfp_sqrt(int dp)
983{
984 if (dp)
985 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
986 else
987 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
988}
989
990static inline void gen_vfp_cmp(int dp)
991{
992 if (dp)
993 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
994 else
995 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
996}
997
998static inline void gen_vfp_cmpe(int dp)
999{
1000 if (dp)
1001 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1002 else
1003 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1004}
1005
1006static inline void gen_vfp_F1_ld0(int dp)
1007{
1008 if (dp)
5b340b51 1009 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1010 else
5b340b51 1011 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1012}
1013
5500b06c
PM
1014#define VFP_GEN_ITOF(name) \
1015static inline void gen_vfp_##name(int dp, int neon) \
1016{ \
5aaebd13 1017 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1018 if (dp) { \
1019 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1020 } else { \
1021 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1022 } \
b7fa9214 1023 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1024}
1025
5500b06c
PM
1026VFP_GEN_ITOF(uito)
1027VFP_GEN_ITOF(sito)
1028#undef VFP_GEN_ITOF
4373f3ce 1029
5500b06c
PM
1030#define VFP_GEN_FTOI(name) \
1031static inline void gen_vfp_##name(int dp, int neon) \
1032{ \
5aaebd13 1033 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1034 if (dp) { \
1035 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1036 } else { \
1037 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1038 } \
b7fa9214 1039 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1040}
1041
5500b06c
PM
1042VFP_GEN_FTOI(toui)
1043VFP_GEN_FTOI(touiz)
1044VFP_GEN_FTOI(tosi)
1045VFP_GEN_FTOI(tosiz)
1046#undef VFP_GEN_FTOI
4373f3ce
PB
1047
1048#define VFP_GEN_FIX(name) \
5500b06c 1049static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1050{ \
39d5492a 1051 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1052 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1053 if (dp) { \
1054 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1055 } else { \
1056 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1057 } \
b75263d6 1058 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1059 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1060}
4373f3ce
PB
1061VFP_GEN_FIX(tosh)
1062VFP_GEN_FIX(tosl)
1063VFP_GEN_FIX(touh)
1064VFP_GEN_FIX(toul)
1065VFP_GEN_FIX(shto)
1066VFP_GEN_FIX(slto)
1067VFP_GEN_FIX(uhto)
1068VFP_GEN_FIX(ulto)
1069#undef VFP_GEN_FIX
9ee6e8bb 1070
39d5492a 1071static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31
FB
1072{
1073 if (dp)
312eea9f 1074 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1075 else
312eea9f 1076 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1077}
1078
39d5492a 1079static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31
FB
1080{
1081 if (dp)
312eea9f 1082 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1083 else
312eea9f 1084 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1085}
1086
8e96005d
FB
1087static inline long
1088vfp_reg_offset (int dp, int reg)
1089{
1090 if (dp)
1091 return offsetof(CPUARMState, vfp.regs[reg]);
1092 else if (reg & 1) {
1093 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1094 + offsetof(CPU_DoubleU, l.upper);
1095 } else {
1096 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1097 + offsetof(CPU_DoubleU, l.lower);
1098 }
1099}
9ee6e8bb
PB
1100
1101/* Return the offset of a 32-bit piece of a NEON register.
1102 zero is the least significant end of the register. */
1103static inline long
1104neon_reg_offset (int reg, int n)
1105{
1106 int sreg;
1107 sreg = reg * 2 + n;
1108 return vfp_reg_offset(0, sreg);
1109}
1110
39d5492a 1111static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1112{
39d5492a 1113 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1114 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1115 return tmp;
1116}
1117
39d5492a 1118static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1119{
1120 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1121 tcg_temp_free_i32(var);
8f8e3aa4
PB
1122}
1123
a7812ae4 1124static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1125{
1126 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1127}
1128
a7812ae4 1129static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1130{
1131 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1132}
1133
4373f3ce
PB
1134#define tcg_gen_ld_f32 tcg_gen_ld_i32
1135#define tcg_gen_ld_f64 tcg_gen_ld_i64
1136#define tcg_gen_st_f32 tcg_gen_st_i32
1137#define tcg_gen_st_f64 tcg_gen_st_i64
1138
b7bcbe95
FB
1139static inline void gen_mov_F0_vreg(int dp, int reg)
1140{
1141 if (dp)
4373f3ce 1142 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1143 else
4373f3ce 1144 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1145}
1146
1147static inline void gen_mov_F1_vreg(int dp, int reg)
1148{
1149 if (dp)
4373f3ce 1150 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1151 else
4373f3ce 1152 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1153}
1154
1155static inline void gen_mov_vreg_F0(int dp, int reg)
1156{
1157 if (dp)
4373f3ce 1158 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1159 else
4373f3ce 1160 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1161}
1162
18c9b560
AZ
1163#define ARM_CP_RW_BIT (1 << 20)
1164
a7812ae4 1165static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1166{
0ecb72a5 1167 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1168}
1169
a7812ae4 1170static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1171{
0ecb72a5 1172 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1173}
1174
39d5492a 1175static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1176{
39d5492a 1177 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1178 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1179 return var;
e677137d
PB
1180}
1181
39d5492a 1182static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1183{
0ecb72a5 1184 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1185 tcg_temp_free_i32(var);
e677137d
PB
1186}
1187
1188static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1189{
1190 iwmmxt_store_reg(cpu_M0, rn);
1191}
1192
1193static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1194{
1195 iwmmxt_load_reg(cpu_M0, rn);
1196}
1197
1198static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1199{
1200 iwmmxt_load_reg(cpu_V1, rn);
1201 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1202}
1203
1204static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1205{
1206 iwmmxt_load_reg(cpu_V1, rn);
1207 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1208}
1209
1210static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1211{
1212 iwmmxt_load_reg(cpu_V1, rn);
1213 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1214}
1215
1216#define IWMMXT_OP(name) \
1217static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1218{ \
1219 iwmmxt_load_reg(cpu_V1, rn); \
1220 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1221}
1222
477955bd
PM
1223#define IWMMXT_OP_ENV(name) \
1224static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1225{ \
1226 iwmmxt_load_reg(cpu_V1, rn); \
1227 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1228}
1229
1230#define IWMMXT_OP_ENV_SIZE(name) \
1231IWMMXT_OP_ENV(name##b) \
1232IWMMXT_OP_ENV(name##w) \
1233IWMMXT_OP_ENV(name##l)
e677137d 1234
477955bd 1235#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1236static inline void gen_op_iwmmxt_##name##_M0(void) \
1237{ \
477955bd 1238 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1239}
1240
1241IWMMXT_OP(maddsq)
1242IWMMXT_OP(madduq)
1243IWMMXT_OP(sadb)
1244IWMMXT_OP(sadw)
1245IWMMXT_OP(mulslw)
1246IWMMXT_OP(mulshw)
1247IWMMXT_OP(mululw)
1248IWMMXT_OP(muluhw)
1249IWMMXT_OP(macsw)
1250IWMMXT_OP(macuw)
1251
477955bd
PM
1252IWMMXT_OP_ENV_SIZE(unpackl)
1253IWMMXT_OP_ENV_SIZE(unpackh)
1254
1255IWMMXT_OP_ENV1(unpacklub)
1256IWMMXT_OP_ENV1(unpackluw)
1257IWMMXT_OP_ENV1(unpacklul)
1258IWMMXT_OP_ENV1(unpackhub)
1259IWMMXT_OP_ENV1(unpackhuw)
1260IWMMXT_OP_ENV1(unpackhul)
1261IWMMXT_OP_ENV1(unpacklsb)
1262IWMMXT_OP_ENV1(unpacklsw)
1263IWMMXT_OP_ENV1(unpacklsl)
1264IWMMXT_OP_ENV1(unpackhsb)
1265IWMMXT_OP_ENV1(unpackhsw)
1266IWMMXT_OP_ENV1(unpackhsl)
1267
1268IWMMXT_OP_ENV_SIZE(cmpeq)
1269IWMMXT_OP_ENV_SIZE(cmpgtu)
1270IWMMXT_OP_ENV_SIZE(cmpgts)
1271
1272IWMMXT_OP_ENV_SIZE(mins)
1273IWMMXT_OP_ENV_SIZE(minu)
1274IWMMXT_OP_ENV_SIZE(maxs)
1275IWMMXT_OP_ENV_SIZE(maxu)
1276
1277IWMMXT_OP_ENV_SIZE(subn)
1278IWMMXT_OP_ENV_SIZE(addn)
1279IWMMXT_OP_ENV_SIZE(subu)
1280IWMMXT_OP_ENV_SIZE(addu)
1281IWMMXT_OP_ENV_SIZE(subs)
1282IWMMXT_OP_ENV_SIZE(adds)
1283
1284IWMMXT_OP_ENV(avgb0)
1285IWMMXT_OP_ENV(avgb1)
1286IWMMXT_OP_ENV(avgw0)
1287IWMMXT_OP_ENV(avgw1)
e677137d
PB
1288
1289IWMMXT_OP(msadb)
1290
477955bd
PM
1291IWMMXT_OP_ENV(packuw)
1292IWMMXT_OP_ENV(packul)
1293IWMMXT_OP_ENV(packuq)
1294IWMMXT_OP_ENV(packsw)
1295IWMMXT_OP_ENV(packsl)
1296IWMMXT_OP_ENV(packsq)
e677137d 1297
e677137d
PB
1298static void gen_op_iwmmxt_set_mup(void)
1299{
39d5492a 1300 TCGv_i32 tmp;
e677137d
PB
1301 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1302 tcg_gen_ori_i32(tmp, tmp, 2);
1303 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1304}
1305
1306static void gen_op_iwmmxt_set_cup(void)
1307{
39d5492a 1308 TCGv_i32 tmp;
e677137d
PB
1309 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1310 tcg_gen_ori_i32(tmp, tmp, 1);
1311 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1312}
1313
1314static void gen_op_iwmmxt_setpsr_nz(void)
1315{
39d5492a 1316 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1317 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1318 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1319}
1320
1321static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1322{
1323 iwmmxt_load_reg(cpu_V1, rn);
86831435 1324 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1325 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1326}
1327
39d5492a
PM
1328static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1329 TCGv_i32 dest)
18c9b560
AZ
1330{
1331 int rd;
1332 uint32_t offset;
39d5492a 1333 TCGv_i32 tmp;
18c9b560
AZ
1334
1335 rd = (insn >> 16) & 0xf;
da6b5335 1336 tmp = load_reg(s, rd);
18c9b560
AZ
1337
1338 offset = (insn & 0xff) << ((insn >> 7) & 2);
1339 if (insn & (1 << 24)) {
1340 /* Pre indexed */
1341 if (insn & (1 << 23))
da6b5335 1342 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1343 else
da6b5335
FN
1344 tcg_gen_addi_i32(tmp, tmp, -offset);
1345 tcg_gen_mov_i32(dest, tmp);
18c9b560 1346 if (insn & (1 << 21))
da6b5335
FN
1347 store_reg(s, rd, tmp);
1348 else
7d1b0095 1349 tcg_temp_free_i32(tmp);
18c9b560
AZ
1350 } else if (insn & (1 << 21)) {
1351 /* Post indexed */
da6b5335 1352 tcg_gen_mov_i32(dest, tmp);
18c9b560 1353 if (insn & (1 << 23))
da6b5335 1354 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1355 else
da6b5335
FN
1356 tcg_gen_addi_i32(tmp, tmp, -offset);
1357 store_reg(s, rd, tmp);
18c9b560
AZ
1358 } else if (!(insn & (1 << 23)))
1359 return 1;
1360 return 0;
1361}
1362
39d5492a 1363static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1364{
1365 int rd = (insn >> 0) & 0xf;
39d5492a 1366 TCGv_i32 tmp;
18c9b560 1367
da6b5335
FN
1368 if (insn & (1 << 8)) {
1369 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1370 return 1;
da6b5335
FN
1371 } else {
1372 tmp = iwmmxt_load_creg(rd);
1373 }
1374 } else {
7d1b0095 1375 tmp = tcg_temp_new_i32();
da6b5335
FN
1376 iwmmxt_load_reg(cpu_V0, rd);
1377 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1378 }
1379 tcg_gen_andi_i32(tmp, tmp, mask);
1380 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1381 tcg_temp_free_i32(tmp);
18c9b560
AZ
1382 return 0;
1383}
1384
a1c7273b 1385/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1386 (ie. an undefined instruction). */
0ecb72a5 1387static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
1388{
1389 int rd, wrd;
1390 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1391 TCGv_i32 addr;
1392 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1393
1394 if ((insn & 0x0e000e00) == 0x0c000000) {
1395 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1396 wrd = insn & 0xf;
1397 rdlo = (insn >> 12) & 0xf;
1398 rdhi = (insn >> 16) & 0xf;
1399 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1400 iwmmxt_load_reg(cpu_V0, wrd);
1401 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1402 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1403 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1404 } else { /* TMCRR */
da6b5335
FN
1405 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1406 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1407 gen_op_iwmmxt_set_mup();
1408 }
1409 return 0;
1410 }
1411
1412 wrd = (insn >> 12) & 0xf;
7d1b0095 1413 addr = tcg_temp_new_i32();
da6b5335 1414 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1415 tcg_temp_free_i32(addr);
18c9b560 1416 return 1;
da6b5335 1417 }
18c9b560
AZ
1418 if (insn & ARM_CP_RW_BIT) {
1419 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1420 tmp = tcg_temp_new_i32();
da6b5335
FN
1421 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1422 iwmmxt_store_creg(wrd, tmp);
18c9b560 1423 } else {
e677137d
PB
1424 i = 1;
1425 if (insn & (1 << 8)) {
1426 if (insn & (1 << 22)) { /* WLDRD */
da6b5335 1427 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1428 i = 0;
1429 } else { /* WLDRW wRd */
29531141
PM
1430 tmp = tcg_temp_new_i32();
1431 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
e677137d
PB
1432 }
1433 } else {
29531141 1434 tmp = tcg_temp_new_i32();
e677137d 1435 if (insn & (1 << 22)) { /* WLDRH */
29531141 1436 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
e677137d 1437 } else { /* WLDRB */
29531141 1438 tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
e677137d
PB
1439 }
1440 }
1441 if (i) {
1442 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1443 tcg_temp_free_i32(tmp);
e677137d 1444 }
18c9b560
AZ
1445 gen_op_iwmmxt_movq_wRn_M0(wrd);
1446 }
1447 } else {
1448 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1449 tmp = iwmmxt_load_creg(wrd);
29531141 1450 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1451 } else {
1452 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1453 tmp = tcg_temp_new_i32();
e677137d
PB
1454 if (insn & (1 << 8)) {
1455 if (insn & (1 << 22)) { /* WSTRD */
da6b5335 1456 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1457 } else { /* WSTRW wRd */
1458 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
29531141 1459 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
e677137d
PB
1460 }
1461 } else {
1462 if (insn & (1 << 22)) { /* WSTRH */
1463 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
29531141 1464 tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
e677137d
PB
1465 } else { /* WSTRB */
1466 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
29531141 1467 tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
e677137d
PB
1468 }
1469 }
18c9b560 1470 }
29531141 1471 tcg_temp_free_i32(tmp);
18c9b560 1472 }
7d1b0095 1473 tcg_temp_free_i32(addr);
18c9b560
AZ
1474 return 0;
1475 }
1476
1477 if ((insn & 0x0f000000) != 0x0e000000)
1478 return 1;
1479
1480 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1481 case 0x000: /* WOR */
1482 wrd = (insn >> 12) & 0xf;
1483 rd0 = (insn >> 0) & 0xf;
1484 rd1 = (insn >> 16) & 0xf;
1485 gen_op_iwmmxt_movq_M0_wRn(rd0);
1486 gen_op_iwmmxt_orq_M0_wRn(rd1);
1487 gen_op_iwmmxt_setpsr_nz();
1488 gen_op_iwmmxt_movq_wRn_M0(wrd);
1489 gen_op_iwmmxt_set_mup();
1490 gen_op_iwmmxt_set_cup();
1491 break;
1492 case 0x011: /* TMCR */
1493 if (insn & 0xf)
1494 return 1;
1495 rd = (insn >> 12) & 0xf;
1496 wrd = (insn >> 16) & 0xf;
1497 switch (wrd) {
1498 case ARM_IWMMXT_wCID:
1499 case ARM_IWMMXT_wCASF:
1500 break;
1501 case ARM_IWMMXT_wCon:
1502 gen_op_iwmmxt_set_cup();
1503 /* Fall through. */
1504 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1505 tmp = iwmmxt_load_creg(wrd);
1506 tmp2 = load_reg(s, rd);
f669df27 1507 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1508 tcg_temp_free_i32(tmp2);
da6b5335 1509 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1510 break;
1511 case ARM_IWMMXT_wCGR0:
1512 case ARM_IWMMXT_wCGR1:
1513 case ARM_IWMMXT_wCGR2:
1514 case ARM_IWMMXT_wCGR3:
1515 gen_op_iwmmxt_set_cup();
da6b5335
FN
1516 tmp = load_reg(s, rd);
1517 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1518 break;
1519 default:
1520 return 1;
1521 }
1522 break;
1523 case 0x100: /* WXOR */
1524 wrd = (insn >> 12) & 0xf;
1525 rd0 = (insn >> 0) & 0xf;
1526 rd1 = (insn >> 16) & 0xf;
1527 gen_op_iwmmxt_movq_M0_wRn(rd0);
1528 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1529 gen_op_iwmmxt_setpsr_nz();
1530 gen_op_iwmmxt_movq_wRn_M0(wrd);
1531 gen_op_iwmmxt_set_mup();
1532 gen_op_iwmmxt_set_cup();
1533 break;
1534 case 0x111: /* TMRC */
1535 if (insn & 0xf)
1536 return 1;
1537 rd = (insn >> 12) & 0xf;
1538 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1539 tmp = iwmmxt_load_creg(wrd);
1540 store_reg(s, rd, tmp);
18c9b560
AZ
1541 break;
1542 case 0x300: /* WANDN */
1543 wrd = (insn >> 12) & 0xf;
1544 rd0 = (insn >> 0) & 0xf;
1545 rd1 = (insn >> 16) & 0xf;
1546 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1547 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1548 gen_op_iwmmxt_andq_M0_wRn(rd1);
1549 gen_op_iwmmxt_setpsr_nz();
1550 gen_op_iwmmxt_movq_wRn_M0(wrd);
1551 gen_op_iwmmxt_set_mup();
1552 gen_op_iwmmxt_set_cup();
1553 break;
1554 case 0x200: /* WAND */
1555 wrd = (insn >> 12) & 0xf;
1556 rd0 = (insn >> 0) & 0xf;
1557 rd1 = (insn >> 16) & 0xf;
1558 gen_op_iwmmxt_movq_M0_wRn(rd0);
1559 gen_op_iwmmxt_andq_M0_wRn(rd1);
1560 gen_op_iwmmxt_setpsr_nz();
1561 gen_op_iwmmxt_movq_wRn_M0(wrd);
1562 gen_op_iwmmxt_set_mup();
1563 gen_op_iwmmxt_set_cup();
1564 break;
1565 case 0x810: case 0xa10: /* WMADD */
1566 wrd = (insn >> 12) & 0xf;
1567 rd0 = (insn >> 0) & 0xf;
1568 rd1 = (insn >> 16) & 0xf;
1569 gen_op_iwmmxt_movq_M0_wRn(rd0);
1570 if (insn & (1 << 21))
1571 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1572 else
1573 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1574 gen_op_iwmmxt_movq_wRn_M0(wrd);
1575 gen_op_iwmmxt_set_mup();
1576 break;
1577 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1578 wrd = (insn >> 12) & 0xf;
1579 rd0 = (insn >> 16) & 0xf;
1580 rd1 = (insn >> 0) & 0xf;
1581 gen_op_iwmmxt_movq_M0_wRn(rd0);
1582 switch ((insn >> 22) & 3) {
1583 case 0:
1584 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1585 break;
1586 case 1:
1587 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1588 break;
1589 case 2:
1590 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1591 break;
1592 case 3:
1593 return 1;
1594 }
1595 gen_op_iwmmxt_movq_wRn_M0(wrd);
1596 gen_op_iwmmxt_set_mup();
1597 gen_op_iwmmxt_set_cup();
1598 break;
1599 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1600 wrd = (insn >> 12) & 0xf;
1601 rd0 = (insn >> 16) & 0xf;
1602 rd1 = (insn >> 0) & 0xf;
1603 gen_op_iwmmxt_movq_M0_wRn(rd0);
1604 switch ((insn >> 22) & 3) {
1605 case 0:
1606 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1607 break;
1608 case 1:
1609 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1610 break;
1611 case 2:
1612 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1613 break;
1614 case 3:
1615 return 1;
1616 }
1617 gen_op_iwmmxt_movq_wRn_M0(wrd);
1618 gen_op_iwmmxt_set_mup();
1619 gen_op_iwmmxt_set_cup();
1620 break;
1621 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1622 wrd = (insn >> 12) & 0xf;
1623 rd0 = (insn >> 16) & 0xf;
1624 rd1 = (insn >> 0) & 0xf;
1625 gen_op_iwmmxt_movq_M0_wRn(rd0);
1626 if (insn & (1 << 22))
1627 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1628 else
1629 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1630 if (!(insn & (1 << 20)))
1631 gen_op_iwmmxt_addl_M0_wRn(wrd);
1632 gen_op_iwmmxt_movq_wRn_M0(wrd);
1633 gen_op_iwmmxt_set_mup();
1634 break;
1635 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1636 wrd = (insn >> 12) & 0xf;
1637 rd0 = (insn >> 16) & 0xf;
1638 rd1 = (insn >> 0) & 0xf;
1639 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1640 if (insn & (1 << 21)) {
1641 if (insn & (1 << 20))
1642 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1643 else
1644 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1645 } else {
1646 if (insn & (1 << 20))
1647 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1648 else
1649 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1650 }
18c9b560
AZ
1651 gen_op_iwmmxt_movq_wRn_M0(wrd);
1652 gen_op_iwmmxt_set_mup();
1653 break;
1654 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1655 wrd = (insn >> 12) & 0xf;
1656 rd0 = (insn >> 16) & 0xf;
1657 rd1 = (insn >> 0) & 0xf;
1658 gen_op_iwmmxt_movq_M0_wRn(rd0);
1659 if (insn & (1 << 21))
1660 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1661 else
1662 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1663 if (!(insn & (1 << 20))) {
e677137d
PB
1664 iwmmxt_load_reg(cpu_V1, wrd);
1665 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1666 }
1667 gen_op_iwmmxt_movq_wRn_M0(wrd);
1668 gen_op_iwmmxt_set_mup();
1669 break;
1670 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1671 wrd = (insn >> 12) & 0xf;
1672 rd0 = (insn >> 16) & 0xf;
1673 rd1 = (insn >> 0) & 0xf;
1674 gen_op_iwmmxt_movq_M0_wRn(rd0);
1675 switch ((insn >> 22) & 3) {
1676 case 0:
1677 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1678 break;
1679 case 1:
1680 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1681 break;
1682 case 2:
1683 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1684 break;
1685 case 3:
1686 return 1;
1687 }
1688 gen_op_iwmmxt_movq_wRn_M0(wrd);
1689 gen_op_iwmmxt_set_mup();
1690 gen_op_iwmmxt_set_cup();
1691 break;
1692 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1693 wrd = (insn >> 12) & 0xf;
1694 rd0 = (insn >> 16) & 0xf;
1695 rd1 = (insn >> 0) & 0xf;
1696 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1697 if (insn & (1 << 22)) {
1698 if (insn & (1 << 20))
1699 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1700 else
1701 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1702 } else {
1703 if (insn & (1 << 20))
1704 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1705 else
1706 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1707 }
18c9b560
AZ
1708 gen_op_iwmmxt_movq_wRn_M0(wrd);
1709 gen_op_iwmmxt_set_mup();
1710 gen_op_iwmmxt_set_cup();
1711 break;
1712 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1713 wrd = (insn >> 12) & 0xf;
1714 rd0 = (insn >> 16) & 0xf;
1715 rd1 = (insn >> 0) & 0xf;
1716 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1717 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1718 tcg_gen_andi_i32(tmp, tmp, 7);
1719 iwmmxt_load_reg(cpu_V1, rd1);
1720 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1721 tcg_temp_free_i32(tmp);
18c9b560
AZ
1722 gen_op_iwmmxt_movq_wRn_M0(wrd);
1723 gen_op_iwmmxt_set_mup();
1724 break;
1725 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1726 if (((insn >> 6) & 3) == 3)
1727 return 1;
18c9b560
AZ
1728 rd = (insn >> 12) & 0xf;
1729 wrd = (insn >> 16) & 0xf;
da6b5335 1730 tmp = load_reg(s, rd);
18c9b560
AZ
1731 gen_op_iwmmxt_movq_M0_wRn(wrd);
1732 switch ((insn >> 6) & 3) {
1733 case 0:
da6b5335
FN
1734 tmp2 = tcg_const_i32(0xff);
1735 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1736 break;
1737 case 1:
da6b5335
FN
1738 tmp2 = tcg_const_i32(0xffff);
1739 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1740 break;
1741 case 2:
da6b5335
FN
1742 tmp2 = tcg_const_i32(0xffffffff);
1743 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1744 break;
da6b5335 1745 default:
39d5492a
PM
1746 TCGV_UNUSED_I32(tmp2);
1747 TCGV_UNUSED_I32(tmp3);
18c9b560 1748 }
da6b5335 1749 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
1750 tcg_temp_free_i32(tmp3);
1751 tcg_temp_free_i32(tmp2);
7d1b0095 1752 tcg_temp_free_i32(tmp);
18c9b560
AZ
1753 gen_op_iwmmxt_movq_wRn_M0(wrd);
1754 gen_op_iwmmxt_set_mup();
1755 break;
1756 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1757 rd = (insn >> 12) & 0xf;
1758 wrd = (insn >> 16) & 0xf;
da6b5335 1759 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1760 return 1;
1761 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1762 tmp = tcg_temp_new_i32();
18c9b560
AZ
1763 switch ((insn >> 22) & 3) {
1764 case 0:
da6b5335
FN
1765 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1766 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1767 if (insn & 8) {
1768 tcg_gen_ext8s_i32(tmp, tmp);
1769 } else {
1770 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1771 }
1772 break;
1773 case 1:
da6b5335
FN
1774 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1775 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1776 if (insn & 8) {
1777 tcg_gen_ext16s_i32(tmp, tmp);
1778 } else {
1779 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1780 }
1781 break;
1782 case 2:
da6b5335
FN
1783 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1784 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1785 break;
18c9b560 1786 }
da6b5335 1787 store_reg(s, rd, tmp);
18c9b560
AZ
1788 break;
1789 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1790 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1791 return 1;
da6b5335 1792 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1793 switch ((insn >> 22) & 3) {
1794 case 0:
da6b5335 1795 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1796 break;
1797 case 1:
da6b5335 1798 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1799 break;
1800 case 2:
da6b5335 1801 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1802 break;
18c9b560 1803 }
da6b5335
FN
1804 tcg_gen_shli_i32(tmp, tmp, 28);
1805 gen_set_nzcv(tmp);
7d1b0095 1806 tcg_temp_free_i32(tmp);
18c9b560
AZ
1807 break;
1808 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1809 if (((insn >> 6) & 3) == 3)
1810 return 1;
18c9b560
AZ
1811 rd = (insn >> 12) & 0xf;
1812 wrd = (insn >> 16) & 0xf;
da6b5335 1813 tmp = load_reg(s, rd);
18c9b560
AZ
1814 switch ((insn >> 6) & 3) {
1815 case 0:
da6b5335 1816 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1817 break;
1818 case 1:
da6b5335 1819 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1820 break;
1821 case 2:
da6b5335 1822 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1823 break;
18c9b560 1824 }
7d1b0095 1825 tcg_temp_free_i32(tmp);
18c9b560
AZ
1826 gen_op_iwmmxt_movq_wRn_M0(wrd);
1827 gen_op_iwmmxt_set_mup();
1828 break;
1829 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1830 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1831 return 1;
da6b5335 1832 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1833 tmp2 = tcg_temp_new_i32();
da6b5335 1834 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1835 switch ((insn >> 22) & 3) {
1836 case 0:
1837 for (i = 0; i < 7; i ++) {
da6b5335
FN
1838 tcg_gen_shli_i32(tmp2, tmp2, 4);
1839 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1840 }
1841 break;
1842 case 1:
1843 for (i = 0; i < 3; i ++) {
da6b5335
FN
1844 tcg_gen_shli_i32(tmp2, tmp2, 8);
1845 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1846 }
1847 break;
1848 case 2:
da6b5335
FN
1849 tcg_gen_shli_i32(tmp2, tmp2, 16);
1850 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1851 break;
18c9b560 1852 }
da6b5335 1853 gen_set_nzcv(tmp);
7d1b0095
PM
1854 tcg_temp_free_i32(tmp2);
1855 tcg_temp_free_i32(tmp);
18c9b560
AZ
1856 break;
1857 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1858 wrd = (insn >> 12) & 0xf;
1859 rd0 = (insn >> 16) & 0xf;
1860 gen_op_iwmmxt_movq_M0_wRn(rd0);
1861 switch ((insn >> 22) & 3) {
1862 case 0:
e677137d 1863 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1864 break;
1865 case 1:
e677137d 1866 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1867 break;
1868 case 2:
e677137d 1869 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1870 break;
1871 case 3:
1872 return 1;
1873 }
1874 gen_op_iwmmxt_movq_wRn_M0(wrd);
1875 gen_op_iwmmxt_set_mup();
1876 break;
1877 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1878 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1879 return 1;
da6b5335 1880 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1881 tmp2 = tcg_temp_new_i32();
da6b5335 1882 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1883 switch ((insn >> 22) & 3) {
1884 case 0:
1885 for (i = 0; i < 7; i ++) {
da6b5335
FN
1886 tcg_gen_shli_i32(tmp2, tmp2, 4);
1887 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1888 }
1889 break;
1890 case 1:
1891 for (i = 0; i < 3; i ++) {
da6b5335
FN
1892 tcg_gen_shli_i32(tmp2, tmp2, 8);
1893 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1894 }
1895 break;
1896 case 2:
da6b5335
FN
1897 tcg_gen_shli_i32(tmp2, tmp2, 16);
1898 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1899 break;
18c9b560 1900 }
da6b5335 1901 gen_set_nzcv(tmp);
7d1b0095
PM
1902 tcg_temp_free_i32(tmp2);
1903 tcg_temp_free_i32(tmp);
18c9b560
AZ
1904 break;
1905 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1906 rd = (insn >> 12) & 0xf;
1907 rd0 = (insn >> 16) & 0xf;
da6b5335 1908 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1909 return 1;
1910 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 1911 tmp = tcg_temp_new_i32();
18c9b560
AZ
1912 switch ((insn >> 22) & 3) {
1913 case 0:
da6b5335 1914 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1915 break;
1916 case 1:
da6b5335 1917 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1918 break;
1919 case 2:
da6b5335 1920 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1921 break;
18c9b560 1922 }
da6b5335 1923 store_reg(s, rd, tmp);
18c9b560
AZ
1924 break;
1925 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1926 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1927 wrd = (insn >> 12) & 0xf;
1928 rd0 = (insn >> 16) & 0xf;
1929 rd1 = (insn >> 0) & 0xf;
1930 gen_op_iwmmxt_movq_M0_wRn(rd0);
1931 switch ((insn >> 22) & 3) {
1932 case 0:
1933 if (insn & (1 << 21))
1934 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1935 else
1936 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1937 break;
1938 case 1:
1939 if (insn & (1 << 21))
1940 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1941 else
1942 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1943 break;
1944 case 2:
1945 if (insn & (1 << 21))
1946 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1947 else
1948 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1949 break;
1950 case 3:
1951 return 1;
1952 }
1953 gen_op_iwmmxt_movq_wRn_M0(wrd);
1954 gen_op_iwmmxt_set_mup();
1955 gen_op_iwmmxt_set_cup();
1956 break;
1957 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1958 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1959 wrd = (insn >> 12) & 0xf;
1960 rd0 = (insn >> 16) & 0xf;
1961 gen_op_iwmmxt_movq_M0_wRn(rd0);
1962 switch ((insn >> 22) & 3) {
1963 case 0:
1964 if (insn & (1 << 21))
1965 gen_op_iwmmxt_unpacklsb_M0();
1966 else
1967 gen_op_iwmmxt_unpacklub_M0();
1968 break;
1969 case 1:
1970 if (insn & (1 << 21))
1971 gen_op_iwmmxt_unpacklsw_M0();
1972 else
1973 gen_op_iwmmxt_unpackluw_M0();
1974 break;
1975 case 2:
1976 if (insn & (1 << 21))
1977 gen_op_iwmmxt_unpacklsl_M0();
1978 else
1979 gen_op_iwmmxt_unpacklul_M0();
1980 break;
1981 case 3:
1982 return 1;
1983 }
1984 gen_op_iwmmxt_movq_wRn_M0(wrd);
1985 gen_op_iwmmxt_set_mup();
1986 gen_op_iwmmxt_set_cup();
1987 break;
1988 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1989 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1990 wrd = (insn >> 12) & 0xf;
1991 rd0 = (insn >> 16) & 0xf;
1992 gen_op_iwmmxt_movq_M0_wRn(rd0);
1993 switch ((insn >> 22) & 3) {
1994 case 0:
1995 if (insn & (1 << 21))
1996 gen_op_iwmmxt_unpackhsb_M0();
1997 else
1998 gen_op_iwmmxt_unpackhub_M0();
1999 break;
2000 case 1:
2001 if (insn & (1 << 21))
2002 gen_op_iwmmxt_unpackhsw_M0();
2003 else
2004 gen_op_iwmmxt_unpackhuw_M0();
2005 break;
2006 case 2:
2007 if (insn & (1 << 21))
2008 gen_op_iwmmxt_unpackhsl_M0();
2009 else
2010 gen_op_iwmmxt_unpackhul_M0();
2011 break;
2012 case 3:
2013 return 1;
2014 }
2015 gen_op_iwmmxt_movq_wRn_M0(wrd);
2016 gen_op_iwmmxt_set_mup();
2017 gen_op_iwmmxt_set_cup();
2018 break;
2019 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2020 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2021 if (((insn >> 22) & 3) == 0)
2022 return 1;
18c9b560
AZ
2023 wrd = (insn >> 12) & 0xf;
2024 rd0 = (insn >> 16) & 0xf;
2025 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2026 tmp = tcg_temp_new_i32();
da6b5335 2027 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2028 tcg_temp_free_i32(tmp);
18c9b560 2029 return 1;
da6b5335 2030 }
18c9b560 2031 switch ((insn >> 22) & 3) {
18c9b560 2032 case 1:
477955bd 2033 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2034 break;
2035 case 2:
477955bd 2036 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2037 break;
2038 case 3:
477955bd 2039 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2040 break;
2041 }
7d1b0095 2042 tcg_temp_free_i32(tmp);
18c9b560
AZ
2043 gen_op_iwmmxt_movq_wRn_M0(wrd);
2044 gen_op_iwmmxt_set_mup();
2045 gen_op_iwmmxt_set_cup();
2046 break;
2047 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2048 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2049 if (((insn >> 22) & 3) == 0)
2050 return 1;
18c9b560
AZ
2051 wrd = (insn >> 12) & 0xf;
2052 rd0 = (insn >> 16) & 0xf;
2053 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2054 tmp = tcg_temp_new_i32();
da6b5335 2055 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2056 tcg_temp_free_i32(tmp);
18c9b560 2057 return 1;
da6b5335 2058 }
18c9b560 2059 switch ((insn >> 22) & 3) {
18c9b560 2060 case 1:
477955bd 2061 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2062 break;
2063 case 2:
477955bd 2064 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2065 break;
2066 case 3:
477955bd 2067 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2068 break;
2069 }
7d1b0095 2070 tcg_temp_free_i32(tmp);
18c9b560
AZ
2071 gen_op_iwmmxt_movq_wRn_M0(wrd);
2072 gen_op_iwmmxt_set_mup();
2073 gen_op_iwmmxt_set_cup();
2074 break;
2075 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2076 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2077 if (((insn >> 22) & 3) == 0)
2078 return 1;
18c9b560
AZ
2079 wrd = (insn >> 12) & 0xf;
2080 rd0 = (insn >> 16) & 0xf;
2081 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2082 tmp = tcg_temp_new_i32();
da6b5335 2083 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2084 tcg_temp_free_i32(tmp);
18c9b560 2085 return 1;
da6b5335 2086 }
18c9b560 2087 switch ((insn >> 22) & 3) {
18c9b560 2088 case 1:
477955bd 2089 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2090 break;
2091 case 2:
477955bd 2092 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2093 break;
2094 case 3:
477955bd 2095 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2096 break;
2097 }
7d1b0095 2098 tcg_temp_free_i32(tmp);
18c9b560
AZ
2099 gen_op_iwmmxt_movq_wRn_M0(wrd);
2100 gen_op_iwmmxt_set_mup();
2101 gen_op_iwmmxt_set_cup();
2102 break;
2103 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2104 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2105 if (((insn >> 22) & 3) == 0)
2106 return 1;
18c9b560
AZ
2107 wrd = (insn >> 12) & 0xf;
2108 rd0 = (insn >> 16) & 0xf;
2109 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2110 tmp = tcg_temp_new_i32();
18c9b560 2111 switch ((insn >> 22) & 3) {
18c9b560 2112 case 1:
da6b5335 2113 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2114 tcg_temp_free_i32(tmp);
18c9b560 2115 return 1;
da6b5335 2116 }
477955bd 2117 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2118 break;
2119 case 2:
da6b5335 2120 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2121 tcg_temp_free_i32(tmp);
18c9b560 2122 return 1;
da6b5335 2123 }
477955bd 2124 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2125 break;
2126 case 3:
da6b5335 2127 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2128 tcg_temp_free_i32(tmp);
18c9b560 2129 return 1;
da6b5335 2130 }
477955bd 2131 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2132 break;
2133 }
7d1b0095 2134 tcg_temp_free_i32(tmp);
18c9b560
AZ
2135 gen_op_iwmmxt_movq_wRn_M0(wrd);
2136 gen_op_iwmmxt_set_mup();
2137 gen_op_iwmmxt_set_cup();
2138 break;
2139 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2140 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2141 wrd = (insn >> 12) & 0xf;
2142 rd0 = (insn >> 16) & 0xf;
2143 rd1 = (insn >> 0) & 0xf;
2144 gen_op_iwmmxt_movq_M0_wRn(rd0);
2145 switch ((insn >> 22) & 3) {
2146 case 0:
2147 if (insn & (1 << 21))
2148 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2149 else
2150 gen_op_iwmmxt_minub_M0_wRn(rd1);
2151 break;
2152 case 1:
2153 if (insn & (1 << 21))
2154 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2155 else
2156 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2157 break;
2158 case 2:
2159 if (insn & (1 << 21))
2160 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2161 else
2162 gen_op_iwmmxt_minul_M0_wRn(rd1);
2163 break;
2164 case 3:
2165 return 1;
2166 }
2167 gen_op_iwmmxt_movq_wRn_M0(wrd);
2168 gen_op_iwmmxt_set_mup();
2169 break;
2170 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2171 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2172 wrd = (insn >> 12) & 0xf;
2173 rd0 = (insn >> 16) & 0xf;
2174 rd1 = (insn >> 0) & 0xf;
2175 gen_op_iwmmxt_movq_M0_wRn(rd0);
2176 switch ((insn >> 22) & 3) {
2177 case 0:
2178 if (insn & (1 << 21))
2179 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2180 else
2181 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2182 break;
2183 case 1:
2184 if (insn & (1 << 21))
2185 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2186 else
2187 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2188 break;
2189 case 2:
2190 if (insn & (1 << 21))
2191 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2192 else
2193 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2194 break;
2195 case 3:
2196 return 1;
2197 }
2198 gen_op_iwmmxt_movq_wRn_M0(wrd);
2199 gen_op_iwmmxt_set_mup();
2200 break;
2201 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2202 case 0x402: case 0x502: case 0x602: case 0x702:
2203 wrd = (insn >> 12) & 0xf;
2204 rd0 = (insn >> 16) & 0xf;
2205 rd1 = (insn >> 0) & 0xf;
2206 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2207 tmp = tcg_const_i32((insn >> 20) & 3);
2208 iwmmxt_load_reg(cpu_V1, rd1);
2209 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2210 tcg_temp_free_i32(tmp);
18c9b560
AZ
2211 gen_op_iwmmxt_movq_wRn_M0(wrd);
2212 gen_op_iwmmxt_set_mup();
2213 break;
2214 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2215 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2216 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2217 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2218 wrd = (insn >> 12) & 0xf;
2219 rd0 = (insn >> 16) & 0xf;
2220 rd1 = (insn >> 0) & 0xf;
2221 gen_op_iwmmxt_movq_M0_wRn(rd0);
2222 switch ((insn >> 20) & 0xf) {
2223 case 0x0:
2224 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2225 break;
2226 case 0x1:
2227 gen_op_iwmmxt_subub_M0_wRn(rd1);
2228 break;
2229 case 0x3:
2230 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2231 break;
2232 case 0x4:
2233 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2234 break;
2235 case 0x5:
2236 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2237 break;
2238 case 0x7:
2239 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2240 break;
2241 case 0x8:
2242 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2243 break;
2244 case 0x9:
2245 gen_op_iwmmxt_subul_M0_wRn(rd1);
2246 break;
2247 case 0xb:
2248 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2249 break;
2250 default:
2251 return 1;
2252 }
2253 gen_op_iwmmxt_movq_wRn_M0(wrd);
2254 gen_op_iwmmxt_set_mup();
2255 gen_op_iwmmxt_set_cup();
2256 break;
2257 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2258 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2259 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2260 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2261 wrd = (insn >> 12) & 0xf;
2262 rd0 = (insn >> 16) & 0xf;
2263 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2264 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2265 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2266 tcg_temp_free_i32(tmp);
18c9b560
AZ
2267 gen_op_iwmmxt_movq_wRn_M0(wrd);
2268 gen_op_iwmmxt_set_mup();
2269 gen_op_iwmmxt_set_cup();
2270 break;
2271 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2272 case 0x418: case 0x518: case 0x618: case 0x718:
2273 case 0x818: case 0x918: case 0xa18: case 0xb18:
2274 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2275 wrd = (insn >> 12) & 0xf;
2276 rd0 = (insn >> 16) & 0xf;
2277 rd1 = (insn >> 0) & 0xf;
2278 gen_op_iwmmxt_movq_M0_wRn(rd0);
2279 switch ((insn >> 20) & 0xf) {
2280 case 0x0:
2281 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2282 break;
2283 case 0x1:
2284 gen_op_iwmmxt_addub_M0_wRn(rd1);
2285 break;
2286 case 0x3:
2287 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2288 break;
2289 case 0x4:
2290 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2291 break;
2292 case 0x5:
2293 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2294 break;
2295 case 0x7:
2296 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2297 break;
2298 case 0x8:
2299 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2300 break;
2301 case 0x9:
2302 gen_op_iwmmxt_addul_M0_wRn(rd1);
2303 break;
2304 case 0xb:
2305 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2306 break;
2307 default:
2308 return 1;
2309 }
2310 gen_op_iwmmxt_movq_wRn_M0(wrd);
2311 gen_op_iwmmxt_set_mup();
2312 gen_op_iwmmxt_set_cup();
2313 break;
2314 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2315 case 0x408: case 0x508: case 0x608: case 0x708:
2316 case 0x808: case 0x908: case 0xa08: case 0xb08:
2317 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2318 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2319 return 1;
18c9b560
AZ
2320 wrd = (insn >> 12) & 0xf;
2321 rd0 = (insn >> 16) & 0xf;
2322 rd1 = (insn >> 0) & 0xf;
2323 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2324 switch ((insn >> 22) & 3) {
18c9b560
AZ
2325 case 1:
2326 if (insn & (1 << 21))
2327 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2328 else
2329 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2330 break;
2331 case 2:
2332 if (insn & (1 << 21))
2333 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2334 else
2335 gen_op_iwmmxt_packul_M0_wRn(rd1);
2336 break;
2337 case 3:
2338 if (insn & (1 << 21))
2339 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2340 else
2341 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2342 break;
2343 }
2344 gen_op_iwmmxt_movq_wRn_M0(wrd);
2345 gen_op_iwmmxt_set_mup();
2346 gen_op_iwmmxt_set_cup();
2347 break;
2348 case 0x201: case 0x203: case 0x205: case 0x207:
2349 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2350 case 0x211: case 0x213: case 0x215: case 0x217:
2351 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2352 wrd = (insn >> 5) & 0xf;
2353 rd0 = (insn >> 12) & 0xf;
2354 rd1 = (insn >> 0) & 0xf;
2355 if (rd0 == 0xf || rd1 == 0xf)
2356 return 1;
2357 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2358 tmp = load_reg(s, rd0);
2359 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2360 switch ((insn >> 16) & 0xf) {
2361 case 0x0: /* TMIA */
da6b5335 2362 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2363 break;
2364 case 0x8: /* TMIAPH */
da6b5335 2365 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2366 break;
2367 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2368 if (insn & (1 << 16))
da6b5335 2369 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2370 if (insn & (1 << 17))
da6b5335
FN
2371 tcg_gen_shri_i32(tmp2, tmp2, 16);
2372 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2373 break;
2374 default:
7d1b0095
PM
2375 tcg_temp_free_i32(tmp2);
2376 tcg_temp_free_i32(tmp);
18c9b560
AZ
2377 return 1;
2378 }
7d1b0095
PM
2379 tcg_temp_free_i32(tmp2);
2380 tcg_temp_free_i32(tmp);
18c9b560
AZ
2381 gen_op_iwmmxt_movq_wRn_M0(wrd);
2382 gen_op_iwmmxt_set_mup();
2383 break;
2384 default:
2385 return 1;
2386 }
2387
2388 return 0;
2389}
2390
a1c7273b 2391/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2392 (ie. an undefined instruction). */
0ecb72a5 2393static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
2394{
2395 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2396 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2397
2398 if ((insn & 0x0ff00f10) == 0x0e200010) {
2399 /* Multiply with Internal Accumulate Format */
2400 rd0 = (insn >> 12) & 0xf;
2401 rd1 = insn & 0xf;
2402 acc = (insn >> 5) & 7;
2403
2404 if (acc != 0)
2405 return 1;
2406
3a554c0f
FN
2407 tmp = load_reg(s, rd0);
2408 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2409 switch ((insn >> 16) & 0xf) {
2410 case 0x0: /* MIA */
3a554c0f 2411 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2412 break;
2413 case 0x8: /* MIAPH */
3a554c0f 2414 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2415 break;
2416 case 0xc: /* MIABB */
2417 case 0xd: /* MIABT */
2418 case 0xe: /* MIATB */
2419 case 0xf: /* MIATT */
18c9b560 2420 if (insn & (1 << 16))
3a554c0f 2421 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2422 if (insn & (1 << 17))
3a554c0f
FN
2423 tcg_gen_shri_i32(tmp2, tmp2, 16);
2424 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2425 break;
2426 default:
2427 return 1;
2428 }
7d1b0095
PM
2429 tcg_temp_free_i32(tmp2);
2430 tcg_temp_free_i32(tmp);
18c9b560
AZ
2431
2432 gen_op_iwmmxt_movq_wRn_M0(acc);
2433 return 0;
2434 }
2435
2436 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2437 /* Internal Accumulator Access Format */
2438 rdhi = (insn >> 16) & 0xf;
2439 rdlo = (insn >> 12) & 0xf;
2440 acc = insn & 7;
2441
2442 if (acc != 0)
2443 return 1;
2444
2445 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2446 iwmmxt_load_reg(cpu_V0, acc);
2447 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2448 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2449 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2450 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2451 } else { /* MAR */
3a554c0f
FN
2452 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2453 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2454 }
2455 return 0;
2456 }
2457
2458 return 1;
2459}
2460
9ee6e8bb
PB
2461#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2462#define VFP_SREG(insn, bigbit, smallbit) \
2463 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2464#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2465 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2466 reg = (((insn) >> (bigbit)) & 0x0f) \
2467 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2468 } else { \
2469 if (insn & (1 << (smallbit))) \
2470 return 1; \
2471 reg = ((insn) >> (bigbit)) & 0x0f; \
2472 }} while (0)
2473
2474#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2475#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2476#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2477#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2478#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2479#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2480
4373f3ce 2481/* Move between integer and VFP cores. */
39d5492a 2482static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 2483{
39d5492a 2484 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
2485 tcg_gen_mov_i32(tmp, cpu_F0s);
2486 return tmp;
2487}
2488
39d5492a 2489static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
2490{
2491 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2492 tcg_temp_free_i32(tmp);
4373f3ce
PB
2493}
2494
39d5492a 2495static void gen_neon_dup_u8(TCGv_i32 var, int shift)
ad69471c 2496{
39d5492a 2497 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2498 if (shift)
2499 tcg_gen_shri_i32(var, var, shift);
86831435 2500 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2501 tcg_gen_shli_i32(tmp, var, 8);
2502 tcg_gen_or_i32(var, var, tmp);
2503 tcg_gen_shli_i32(tmp, var, 16);
2504 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2505 tcg_temp_free_i32(tmp);
ad69471c
PB
2506}
2507
39d5492a 2508static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2509{
39d5492a 2510 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2511 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2512 tcg_gen_shli_i32(tmp, var, 16);
2513 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2514 tcg_temp_free_i32(tmp);
ad69471c
PB
2515}
2516
39d5492a 2517static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2518{
39d5492a 2519 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2520 tcg_gen_andi_i32(var, var, 0xffff0000);
2521 tcg_gen_shri_i32(tmp, var, 16);
2522 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2523 tcg_temp_free_i32(tmp);
ad69471c
PB
2524}
2525
39d5492a 2526static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
8e18cde3
PM
2527{
2528 /* Load a single Neon element and replicate into a 32 bit TCG reg */
58ab8e96 2529 TCGv_i32 tmp = tcg_temp_new_i32();
8e18cde3
PM
2530 switch (size) {
2531 case 0:
58ab8e96 2532 tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
8e18cde3
PM
2533 gen_neon_dup_u8(tmp, 0);
2534 break;
2535 case 1:
58ab8e96 2536 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
8e18cde3
PM
2537 gen_neon_dup_low16(tmp);
2538 break;
2539 case 2:
58ab8e96 2540 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
8e18cde3
PM
2541 break;
2542 default: /* Avoid compiler warnings. */
2543 abort();
2544 }
2545 return tmp;
2546}
2547
a1c7273b 2548/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 2549 (ie. an undefined instruction). */
0ecb72a5 2550static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
b7bcbe95
FB
2551{
2552 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2553 int dp, veclen;
39d5492a
PM
2554 TCGv_i32 addr;
2555 TCGv_i32 tmp;
2556 TCGv_i32 tmp2;
b7bcbe95 2557
40f137e1
PB
2558 if (!arm_feature(env, ARM_FEATURE_VFP))
2559 return 1;
2560
5df8bac1 2561 if (!s->vfp_enabled) {
9ee6e8bb 2562 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2563 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2564 return 1;
2565 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2566 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2567 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2568 return 1;
2569 }
b7bcbe95
FB
2570 dp = ((insn & 0xf00) == 0xb00);
2571 switch ((insn >> 24) & 0xf) {
2572 case 0xe:
2573 if (insn & (1 << 4)) {
2574 /* single register transfer */
b7bcbe95
FB
2575 rd = (insn >> 12) & 0xf;
2576 if (dp) {
9ee6e8bb
PB
2577 int size;
2578 int pass;
2579
2580 VFP_DREG_N(rn, insn);
2581 if (insn & 0xf)
b7bcbe95 2582 return 1;
9ee6e8bb
PB
2583 if (insn & 0x00c00060
2584 && !arm_feature(env, ARM_FEATURE_NEON))
2585 return 1;
2586
2587 pass = (insn >> 21) & 1;
2588 if (insn & (1 << 22)) {
2589 size = 0;
2590 offset = ((insn >> 5) & 3) * 8;
2591 } else if (insn & (1 << 5)) {
2592 size = 1;
2593 offset = (insn & (1 << 6)) ? 16 : 0;
2594 } else {
2595 size = 2;
2596 offset = 0;
2597 }
18c9b560 2598 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2599 /* vfp->arm */
ad69471c 2600 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2601 switch (size) {
2602 case 0:
9ee6e8bb 2603 if (offset)
ad69471c 2604 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2605 if (insn & (1 << 23))
ad69471c 2606 gen_uxtb(tmp);
9ee6e8bb 2607 else
ad69471c 2608 gen_sxtb(tmp);
9ee6e8bb
PB
2609 break;
2610 case 1:
9ee6e8bb
PB
2611 if (insn & (1 << 23)) {
2612 if (offset) {
ad69471c 2613 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2614 } else {
ad69471c 2615 gen_uxth(tmp);
9ee6e8bb
PB
2616 }
2617 } else {
2618 if (offset) {
ad69471c 2619 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2620 } else {
ad69471c 2621 gen_sxth(tmp);
9ee6e8bb
PB
2622 }
2623 }
2624 break;
2625 case 2:
9ee6e8bb
PB
2626 break;
2627 }
ad69471c 2628 store_reg(s, rd, tmp);
b7bcbe95
FB
2629 } else {
2630 /* arm->vfp */
ad69471c 2631 tmp = load_reg(s, rd);
9ee6e8bb
PB
2632 if (insn & (1 << 23)) {
2633 /* VDUP */
2634 if (size == 0) {
ad69471c 2635 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2636 } else if (size == 1) {
ad69471c 2637 gen_neon_dup_low16(tmp);
9ee6e8bb 2638 }
cbbccffc 2639 for (n = 0; n <= pass * 2; n++) {
7d1b0095 2640 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
2641 tcg_gen_mov_i32(tmp2, tmp);
2642 neon_store_reg(rn, n, tmp2);
2643 }
2644 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2645 } else {
2646 /* VMOV */
2647 switch (size) {
2648 case 0:
ad69471c 2649 tmp2 = neon_load_reg(rn, pass);
d593c48e 2650 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 2651 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2652 break;
2653 case 1:
ad69471c 2654 tmp2 = neon_load_reg(rn, pass);
d593c48e 2655 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 2656 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2657 break;
2658 case 2:
9ee6e8bb
PB
2659 break;
2660 }
ad69471c 2661 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2662 }
b7bcbe95 2663 }
9ee6e8bb
PB
2664 } else { /* !dp */
2665 if ((insn & 0x6f) != 0x00)
2666 return 1;
2667 rn = VFP_SREG_N(insn);
18c9b560 2668 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2669 /* vfp->arm */
2670 if (insn & (1 << 21)) {
2671 /* system register */
40f137e1 2672 rn >>= 1;
9ee6e8bb 2673
b7bcbe95 2674 switch (rn) {
40f137e1 2675 case ARM_VFP_FPSID:
4373f3ce 2676 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2677 VFP3 restricts all id registers to privileged
2678 accesses. */
2679 if (IS_USER(s)
2680 && arm_feature(env, ARM_FEATURE_VFP3))
2681 return 1;
4373f3ce 2682 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2683 break;
40f137e1 2684 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2685 if (IS_USER(s))
2686 return 1;
4373f3ce 2687 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2688 break;
40f137e1
PB
2689 case ARM_VFP_FPINST:
2690 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2691 /* Not present in VFP3. */
2692 if (IS_USER(s)
2693 || arm_feature(env, ARM_FEATURE_VFP3))
2694 return 1;
4373f3ce 2695 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2696 break;
40f137e1 2697 case ARM_VFP_FPSCR:
601d70b9 2698 if (rd == 15) {
4373f3ce
PB
2699 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2700 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2701 } else {
7d1b0095 2702 tmp = tcg_temp_new_i32();
4373f3ce
PB
2703 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2704 }
b7bcbe95 2705 break;
9ee6e8bb
PB
2706 case ARM_VFP_MVFR0:
2707 case ARM_VFP_MVFR1:
2708 if (IS_USER(s)
06ed5d66 2709 || !arm_feature(env, ARM_FEATURE_MVFR))
9ee6e8bb 2710 return 1;
4373f3ce 2711 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2712 break;
b7bcbe95
FB
2713 default:
2714 return 1;
2715 }
2716 } else {
2717 gen_mov_F0_vreg(0, rn);
4373f3ce 2718 tmp = gen_vfp_mrs();
b7bcbe95
FB
2719 }
2720 if (rd == 15) {
b5ff1b31 2721 /* Set the 4 flag bits in the CPSR. */
4373f3ce 2722 gen_set_nzcv(tmp);
7d1b0095 2723 tcg_temp_free_i32(tmp);
4373f3ce
PB
2724 } else {
2725 store_reg(s, rd, tmp);
2726 }
b7bcbe95
FB
2727 } else {
2728 /* arm->vfp */
b7bcbe95 2729 if (insn & (1 << 21)) {
40f137e1 2730 rn >>= 1;
b7bcbe95
FB
2731 /* system register */
2732 switch (rn) {
40f137e1 2733 case ARM_VFP_FPSID:
9ee6e8bb
PB
2734 case ARM_VFP_MVFR0:
2735 case ARM_VFP_MVFR1:
b7bcbe95
FB
2736 /* Writes are ignored. */
2737 break;
40f137e1 2738 case ARM_VFP_FPSCR:
e4c1cfa5 2739 tmp = load_reg(s, rd);
4373f3ce 2740 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 2741 tcg_temp_free_i32(tmp);
b5ff1b31 2742 gen_lookup_tb(s);
b7bcbe95 2743 break;
40f137e1 2744 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2745 if (IS_USER(s))
2746 return 1;
71b3c3de
JR
2747 /* TODO: VFP subarchitecture support.
2748 * For now, keep the EN bit only */
e4c1cfa5 2749 tmp = load_reg(s, rd);
71b3c3de 2750 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 2751 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2752 gen_lookup_tb(s);
2753 break;
2754 case ARM_VFP_FPINST:
2755 case ARM_VFP_FPINST2:
e4c1cfa5 2756 tmp = load_reg(s, rd);
4373f3ce 2757 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2758 break;
b7bcbe95
FB
2759 default:
2760 return 1;
2761 }
2762 } else {
e4c1cfa5 2763 tmp = load_reg(s, rd);
4373f3ce 2764 gen_vfp_msr(tmp);
b7bcbe95
FB
2765 gen_mov_vreg_F0(0, rn);
2766 }
2767 }
2768 }
2769 } else {
2770 /* data processing */
2771 /* The opcode is in bits 23, 21, 20 and 6. */
2772 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2773 if (dp) {
2774 if (op == 15) {
2775 /* rn is opcode */
2776 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2777 } else {
2778 /* rn is register number */
9ee6e8bb 2779 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2780 }
2781
04595bf6 2782 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
b7bcbe95 2783 /* Integer or single precision destination. */
9ee6e8bb 2784 rd = VFP_SREG_D(insn);
b7bcbe95 2785 } else {
9ee6e8bb 2786 VFP_DREG_D(rd, insn);
b7bcbe95 2787 }
04595bf6
PM
2788 if (op == 15 &&
2789 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2790 /* VCVT from int is always from S reg regardless of dp bit.
2791 * VCVT with immediate frac_bits has same format as SREG_M
2792 */
2793 rm = VFP_SREG_M(insn);
b7bcbe95 2794 } else {
9ee6e8bb 2795 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2796 }
2797 } else {
9ee6e8bb 2798 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2799 if (op == 15 && rn == 15) {
2800 /* Double precision destination. */
9ee6e8bb
PB
2801 VFP_DREG_D(rd, insn);
2802 } else {
2803 rd = VFP_SREG_D(insn);
2804 }
04595bf6
PM
2805 /* NB that we implicitly rely on the encoding for the frac_bits
2806 * in VCVT of fixed to float being the same as that of an SREG_M
2807 */
9ee6e8bb 2808 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2809 }
2810
69d1fc22 2811 veclen = s->vec_len;
b7bcbe95
FB
2812 if (op == 15 && rn > 3)
2813 veclen = 0;
2814
2815 /* Shut up compiler warnings. */
2816 delta_m = 0;
2817 delta_d = 0;
2818 bank_mask = 0;
3b46e624 2819
b7bcbe95
FB
2820 if (veclen > 0) {
2821 if (dp)
2822 bank_mask = 0xc;
2823 else
2824 bank_mask = 0x18;
2825
2826 /* Figure out what type of vector operation this is. */
2827 if ((rd & bank_mask) == 0) {
2828 /* scalar */
2829 veclen = 0;
2830 } else {
2831 if (dp)
69d1fc22 2832 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 2833 else
69d1fc22 2834 delta_d = s->vec_stride + 1;
b7bcbe95
FB
2835
2836 if ((rm & bank_mask) == 0) {
2837 /* mixed scalar/vector */
2838 delta_m = 0;
2839 } else {
2840 /* vector */
2841 delta_m = delta_d;
2842 }
2843 }
2844 }
2845
2846 /* Load the initial operands. */
2847 if (op == 15) {
2848 switch (rn) {
2849 case 16:
2850 case 17:
2851 /* Integer source */
2852 gen_mov_F0_vreg(0, rm);
2853 break;
2854 case 8:
2855 case 9:
2856 /* Compare */
2857 gen_mov_F0_vreg(dp, rd);
2858 gen_mov_F1_vreg(dp, rm);
2859 break;
2860 case 10:
2861 case 11:
2862 /* Compare with zero */
2863 gen_mov_F0_vreg(dp, rd);
2864 gen_vfp_F1_ld0(dp);
2865 break;
9ee6e8bb
PB
2866 case 20:
2867 case 21:
2868 case 22:
2869 case 23:
644ad806
PB
2870 case 28:
2871 case 29:
2872 case 30:
2873 case 31:
9ee6e8bb
PB
2874 /* Source and destination the same. */
2875 gen_mov_F0_vreg(dp, rd);
2876 break;
6e0c0ed1
PM
2877 case 4:
2878 case 5:
2879 case 6:
2880 case 7:
2881 /* VCVTB, VCVTT: only present with the halfprec extension,
2882 * UNPREDICTABLE if bit 8 is set (we choose to UNDEF)
2883 */
2884 if (dp || !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
2885 return 1;
2886 }
2887 /* Otherwise fall through */
b7bcbe95
FB
2888 default:
2889 /* One source operand. */
2890 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 2891 break;
b7bcbe95
FB
2892 }
2893 } else {
2894 /* Two source operands. */
2895 gen_mov_F0_vreg(dp, rn);
2896 gen_mov_F1_vreg(dp, rm);
2897 }
2898
2899 for (;;) {
2900 /* Perform the calculation. */
2901 switch (op) {
605a6aed
PM
2902 case 0: /* VMLA: fd + (fn * fm) */
2903 /* Note that order of inputs to the add matters for NaNs */
2904 gen_vfp_F1_mul(dp);
2905 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
2906 gen_vfp_add(dp);
2907 break;
605a6aed 2908 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 2909 gen_vfp_mul(dp);
605a6aed
PM
2910 gen_vfp_F1_neg(dp);
2911 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
2912 gen_vfp_add(dp);
2913 break;
605a6aed
PM
2914 case 2: /* VNMLS: -fd + (fn * fm) */
2915 /* Note that it isn't valid to replace (-A + B) with (B - A)
2916 * or similar plausible looking simplifications
2917 * because this will give wrong results for NaNs.
2918 */
2919 gen_vfp_F1_mul(dp);
2920 gen_mov_F0_vreg(dp, rd);
2921 gen_vfp_neg(dp);
2922 gen_vfp_add(dp);
b7bcbe95 2923 break;
605a6aed 2924 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 2925 gen_vfp_mul(dp);
605a6aed
PM
2926 gen_vfp_F1_neg(dp);
2927 gen_mov_F0_vreg(dp, rd);
b7bcbe95 2928 gen_vfp_neg(dp);
605a6aed 2929 gen_vfp_add(dp);
b7bcbe95
FB
2930 break;
2931 case 4: /* mul: fn * fm */
2932 gen_vfp_mul(dp);
2933 break;
2934 case 5: /* nmul: -(fn * fm) */
2935 gen_vfp_mul(dp);
2936 gen_vfp_neg(dp);
2937 break;
2938 case 6: /* add: fn + fm */
2939 gen_vfp_add(dp);
2940 break;
2941 case 7: /* sub: fn - fm */
2942 gen_vfp_sub(dp);
2943 break;
2944 case 8: /* div: fn / fm */
2945 gen_vfp_div(dp);
2946 break;
da97f52c
PM
2947 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
2948 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
2949 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
2950 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
2951 /* These are fused multiply-add, and must be done as one
2952 * floating point operation with no rounding between the
2953 * multiplication and addition steps.
2954 * NB that doing the negations here as separate steps is
2955 * correct : an input NaN should come out with its sign bit
2956 * flipped if it is a negated-input.
2957 */
2958 if (!arm_feature(env, ARM_FEATURE_VFP4)) {
2959 return 1;
2960 }
2961 if (dp) {
2962 TCGv_ptr fpst;
2963 TCGv_i64 frd;
2964 if (op & 1) {
2965 /* VFNMS, VFMS */
2966 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
2967 }
2968 frd = tcg_temp_new_i64();
2969 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
2970 if (op & 2) {
2971 /* VFNMA, VFNMS */
2972 gen_helper_vfp_negd(frd, frd);
2973 }
2974 fpst = get_fpstatus_ptr(0);
2975 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
2976 cpu_F1d, frd, fpst);
2977 tcg_temp_free_ptr(fpst);
2978 tcg_temp_free_i64(frd);
2979 } else {
2980 TCGv_ptr fpst;
2981 TCGv_i32 frd;
2982 if (op & 1) {
2983 /* VFNMS, VFMS */
2984 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
2985 }
2986 frd = tcg_temp_new_i32();
2987 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
2988 if (op & 2) {
2989 gen_helper_vfp_negs(frd, frd);
2990 }
2991 fpst = get_fpstatus_ptr(0);
2992 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
2993 cpu_F1s, frd, fpst);
2994 tcg_temp_free_ptr(fpst);
2995 tcg_temp_free_i32(frd);
2996 }
2997 break;
9ee6e8bb
PB
2998 case 14: /* fconst */
2999 if (!arm_feature(env, ARM_FEATURE_VFP3))
3000 return 1;
3001
3002 n = (insn << 12) & 0x80000000;
3003 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3004 if (dp) {
3005 if (i & 0x40)
3006 i |= 0x3f80;
3007 else
3008 i |= 0x4000;
3009 n |= i << 16;
4373f3ce 3010 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3011 } else {
3012 if (i & 0x40)
3013 i |= 0x780;
3014 else
3015 i |= 0x800;
3016 n |= i << 19;
5b340b51 3017 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3018 }
9ee6e8bb 3019 break;
b7bcbe95
FB
3020 case 15: /* extension space */
3021 switch (rn) {
3022 case 0: /* cpy */
3023 /* no-op */
3024 break;
3025 case 1: /* abs */
3026 gen_vfp_abs(dp);
3027 break;
3028 case 2: /* neg */
3029 gen_vfp_neg(dp);
3030 break;
3031 case 3: /* sqrt */
3032 gen_vfp_sqrt(dp);
3033 break;
60011498 3034 case 4: /* vcvtb.f32.f16 */
60011498
PB
3035 tmp = gen_vfp_mrs();
3036 tcg_gen_ext16u_i32(tmp, tmp);
3037 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3038 tcg_temp_free_i32(tmp);
60011498
PB
3039 break;
3040 case 5: /* vcvtt.f32.f16 */
60011498
PB
3041 tmp = gen_vfp_mrs();
3042 tcg_gen_shri_i32(tmp, tmp, 16);
3043 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3044 tcg_temp_free_i32(tmp);
60011498
PB
3045 break;
3046 case 6: /* vcvtb.f16.f32 */
7d1b0095 3047 tmp = tcg_temp_new_i32();
60011498
PB
3048 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3049 gen_mov_F0_vreg(0, rd);
3050 tmp2 = gen_vfp_mrs();
3051 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3052 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3053 tcg_temp_free_i32(tmp2);
60011498
PB
3054 gen_vfp_msr(tmp);
3055 break;
3056 case 7: /* vcvtt.f16.f32 */
7d1b0095 3057 tmp = tcg_temp_new_i32();
60011498
PB
3058 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3059 tcg_gen_shli_i32(tmp, tmp, 16);
3060 gen_mov_F0_vreg(0, rd);
3061 tmp2 = gen_vfp_mrs();
3062 tcg_gen_ext16u_i32(tmp2, tmp2);
3063 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3064 tcg_temp_free_i32(tmp2);
60011498
PB
3065 gen_vfp_msr(tmp);
3066 break;
b7bcbe95
FB
3067 case 8: /* cmp */
3068 gen_vfp_cmp(dp);
3069 break;
3070 case 9: /* cmpe */
3071 gen_vfp_cmpe(dp);
3072 break;
3073 case 10: /* cmpz */
3074 gen_vfp_cmp(dp);
3075 break;
3076 case 11: /* cmpez */
3077 gen_vfp_F1_ld0(dp);
3078 gen_vfp_cmpe(dp);
3079 break;
3080 case 15: /* single<->double conversion */
3081 if (dp)
4373f3ce 3082 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3083 else
4373f3ce 3084 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3085 break;
3086 case 16: /* fuito */
5500b06c 3087 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3088 break;
3089 case 17: /* fsito */
5500b06c 3090 gen_vfp_sito(dp, 0);
b7bcbe95 3091 break;
9ee6e8bb
PB
3092 case 20: /* fshto */
3093 if (!arm_feature(env, ARM_FEATURE_VFP3))
3094 return 1;
5500b06c 3095 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3096 break;
3097 case 21: /* fslto */
3098 if (!arm_feature(env, ARM_FEATURE_VFP3))
3099 return 1;
5500b06c 3100 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3101 break;
3102 case 22: /* fuhto */
3103 if (!arm_feature(env, ARM_FEATURE_VFP3))
3104 return 1;
5500b06c 3105 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3106 break;
3107 case 23: /* fulto */
3108 if (!arm_feature(env, ARM_FEATURE_VFP3))
3109 return 1;
5500b06c 3110 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3111 break;
b7bcbe95 3112 case 24: /* ftoui */
5500b06c 3113 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3114 break;
3115 case 25: /* ftouiz */
5500b06c 3116 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3117 break;
3118 case 26: /* ftosi */
5500b06c 3119 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3120 break;
3121 case 27: /* ftosiz */
5500b06c 3122 gen_vfp_tosiz(dp, 0);
b7bcbe95 3123 break;
9ee6e8bb
PB
3124 case 28: /* ftosh */
3125 if (!arm_feature(env, ARM_FEATURE_VFP3))
3126 return 1;
5500b06c 3127 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3128 break;
3129 case 29: /* ftosl */
3130 if (!arm_feature(env, ARM_FEATURE_VFP3))
3131 return 1;
5500b06c 3132 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3133 break;
3134 case 30: /* ftouh */
3135 if (!arm_feature(env, ARM_FEATURE_VFP3))
3136 return 1;
5500b06c 3137 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3138 break;
3139 case 31: /* ftoul */
3140 if (!arm_feature(env, ARM_FEATURE_VFP3))
3141 return 1;
5500b06c 3142 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3143 break;
b7bcbe95 3144 default: /* undefined */
b7bcbe95
FB
3145 return 1;
3146 }
3147 break;
3148 default: /* undefined */
b7bcbe95
FB
3149 return 1;
3150 }
3151
3152 /* Write back the result. */
3153 if (op == 15 && (rn >= 8 && rn <= 11))
3154 ; /* Comparison, do nothing. */
04595bf6
PM
3155 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3156 /* VCVT double to int: always integer result. */
b7bcbe95
FB
3157 gen_mov_vreg_F0(0, rd);
3158 else if (op == 15 && rn == 15)
3159 /* conversion */
3160 gen_mov_vreg_F0(!dp, rd);
3161 else
3162 gen_mov_vreg_F0(dp, rd);
3163
3164 /* break out of the loop if we have finished */
3165 if (veclen == 0)
3166 break;
3167
3168 if (op == 15 && delta_m == 0) {
3169 /* single source one-many */
3170 while (veclen--) {
3171 rd = ((rd + delta_d) & (bank_mask - 1))
3172 | (rd & bank_mask);
3173 gen_mov_vreg_F0(dp, rd);
3174 }
3175 break;
3176 }
3177 /* Setup the next operands. */
3178 veclen--;
3179 rd = ((rd + delta_d) & (bank_mask - 1))
3180 | (rd & bank_mask);
3181
3182 if (op == 15) {
3183 /* One source operand. */
3184 rm = ((rm + delta_m) & (bank_mask - 1))
3185 | (rm & bank_mask);
3186 gen_mov_F0_vreg(dp, rm);
3187 } else {
3188 /* Two source operands. */
3189 rn = ((rn + delta_d) & (bank_mask - 1))
3190 | (rn & bank_mask);
3191 gen_mov_F0_vreg(dp, rn);
3192 if (delta_m) {
3193 rm = ((rm + delta_m) & (bank_mask - 1))
3194 | (rm & bank_mask);
3195 gen_mov_F1_vreg(dp, rm);
3196 }
3197 }
3198 }
3199 }
3200 break;
3201 case 0xc:
3202 case 0xd:
8387da81 3203 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3204 /* two-register transfer */
3205 rn = (insn >> 16) & 0xf;
3206 rd = (insn >> 12) & 0xf;
3207 if (dp) {
9ee6e8bb
PB
3208 VFP_DREG_M(rm, insn);
3209 } else {
3210 rm = VFP_SREG_M(insn);
3211 }
b7bcbe95 3212
18c9b560 3213 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3214 /* vfp->arm */
3215 if (dp) {
4373f3ce
PB
3216 gen_mov_F0_vreg(0, rm * 2);
3217 tmp = gen_vfp_mrs();
3218 store_reg(s, rd, tmp);
3219 gen_mov_F0_vreg(0, rm * 2 + 1);
3220 tmp = gen_vfp_mrs();
3221 store_reg(s, rn, tmp);
b7bcbe95
FB
3222 } else {
3223 gen_mov_F0_vreg(0, rm);
4373f3ce 3224 tmp = gen_vfp_mrs();
8387da81 3225 store_reg(s, rd, tmp);
b7bcbe95 3226 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3227 tmp = gen_vfp_mrs();
8387da81 3228 store_reg(s, rn, tmp);
b7bcbe95
FB
3229 }
3230 } else {
3231 /* arm->vfp */
3232 if (dp) {
4373f3ce
PB
3233 tmp = load_reg(s, rd);
3234 gen_vfp_msr(tmp);
3235 gen_mov_vreg_F0(0, rm * 2);
3236 tmp = load_reg(s, rn);
3237 gen_vfp_msr(tmp);
3238 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3239 } else {
8387da81 3240 tmp = load_reg(s, rd);
4373f3ce 3241 gen_vfp_msr(tmp);
b7bcbe95 3242 gen_mov_vreg_F0(0, rm);
8387da81 3243 tmp = load_reg(s, rn);
4373f3ce 3244 gen_vfp_msr(tmp);
b7bcbe95
FB
3245 gen_mov_vreg_F0(0, rm + 1);
3246 }
3247 }
3248 } else {
3249 /* Load/store */
3250 rn = (insn >> 16) & 0xf;
3251 if (dp)
9ee6e8bb 3252 VFP_DREG_D(rd, insn);
b7bcbe95 3253 else
9ee6e8bb 3254 rd = VFP_SREG_D(insn);
b7bcbe95
FB
3255 if ((insn & 0x01200000) == 0x01000000) {
3256 /* Single load/store */
3257 offset = (insn & 0xff) << 2;
3258 if ((insn & (1 << 23)) == 0)
3259 offset = -offset;
934814f1
PM
3260 if (s->thumb && rn == 15) {
3261 /* This is actually UNPREDICTABLE */
3262 addr = tcg_temp_new_i32();
3263 tcg_gen_movi_i32(addr, s->pc & ~2);
3264 } else {
3265 addr = load_reg(s, rn);
3266 }
312eea9f 3267 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3268 if (insn & (1 << 20)) {
312eea9f 3269 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3270 gen_mov_vreg_F0(dp, rd);
3271 } else {
3272 gen_mov_F0_vreg(dp, rd);
312eea9f 3273 gen_vfp_st(s, dp, addr);
b7bcbe95 3274 }
7d1b0095 3275 tcg_temp_free_i32(addr);
b7bcbe95
FB
3276 } else {
3277 /* load/store multiple */
934814f1 3278 int w = insn & (1 << 21);
b7bcbe95
FB
3279 if (dp)
3280 n = (insn >> 1) & 0x7f;
3281 else
3282 n = insn & 0xff;
3283
934814f1
PM
3284 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3285 /* P == U , W == 1 => UNDEF */
3286 return 1;
3287 }
3288 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3289 /* UNPREDICTABLE cases for bad immediates: we choose to
3290 * UNDEF to avoid generating huge numbers of TCG ops
3291 */
3292 return 1;
3293 }
3294 if (rn == 15 && w) {
3295 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3296 return 1;
3297 }
3298
3299 if (s->thumb && rn == 15) {
3300 /* This is actually UNPREDICTABLE */
3301 addr = tcg_temp_new_i32();
3302 tcg_gen_movi_i32(addr, s->pc & ~2);
3303 } else {
3304 addr = load_reg(s, rn);
3305 }
b7bcbe95 3306 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3307 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3308
3309 if (dp)
3310 offset = 8;
3311 else
3312 offset = 4;
3313 for (i = 0; i < n; i++) {
18c9b560 3314 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3315 /* load */
312eea9f 3316 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3317 gen_mov_vreg_F0(dp, rd + i);
3318 } else {
3319 /* store */
3320 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3321 gen_vfp_st(s, dp, addr);
b7bcbe95 3322 }
312eea9f 3323 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3324 }
934814f1 3325 if (w) {
b7bcbe95
FB
3326 /* writeback */
3327 if (insn & (1 << 24))
3328 offset = -offset * n;
3329 else if (dp && (insn & 1))
3330 offset = 4;
3331 else
3332 offset = 0;
3333
3334 if (offset != 0)
312eea9f
FN
3335 tcg_gen_addi_i32(addr, addr, offset);
3336 store_reg(s, rn, addr);
3337 } else {
7d1b0095 3338 tcg_temp_free_i32(addr);
b7bcbe95
FB
3339 }
3340 }
3341 }
3342 break;
3343 default:
3344 /* Should never happen. */
3345 return 1;
3346 }
3347 return 0;
3348}
3349
6e256c93 3350static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3351{
6e256c93
FB
3352 TranslationBlock *tb;
3353
3354 tb = s->tb;
3355 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3356 tcg_gen_goto_tb(n);
8984bd2e 3357 gen_set_pc_im(dest);
4b4a72e5 3358 tcg_gen_exit_tb((tcg_target_long)tb + n);
6e256c93 3359 } else {
8984bd2e 3360 gen_set_pc_im(dest);
57fec1fe 3361 tcg_gen_exit_tb(0);
6e256c93 3362 }
c53be334
FB
3363}
3364
8aaca4c0
FB
3365static inline void gen_jmp (DisasContext *s, uint32_t dest)
3366{
551bd27f 3367 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3368 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3369 if (s->thumb)
d9ba4830
PB
3370 dest |= 1;
3371 gen_bx_im(s, dest);
8aaca4c0 3372 } else {
6e256c93 3373 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3374 s->is_jmp = DISAS_TB_JUMP;
3375 }
3376}
3377
39d5492a 3378static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 3379{
ee097184 3380 if (x)
d9ba4830 3381 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3382 else
d9ba4830 3383 gen_sxth(t0);
ee097184 3384 if (y)
d9ba4830 3385 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3386 else
d9ba4830
PB
3387 gen_sxth(t1);
3388 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3389}
3390
3391/* Return the mask of PSR bits set by a MSR instruction. */
0ecb72a5 3392static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3393 uint32_t mask;
3394
3395 mask = 0;
3396 if (flags & (1 << 0))
3397 mask |= 0xff;
3398 if (flags & (1 << 1))
3399 mask |= 0xff00;
3400 if (flags & (1 << 2))
3401 mask |= 0xff0000;
3402 if (flags & (1 << 3))
3403 mask |= 0xff000000;
9ee6e8bb 3404
2ae23e75 3405 /* Mask out undefined bits. */
9ee6e8bb 3406 mask &= ~CPSR_RESERVED;
be5e7a76
DES
3407 if (!arm_feature(env, ARM_FEATURE_V4T))
3408 mask &= ~CPSR_T;
3409 if (!arm_feature(env, ARM_FEATURE_V5))
3410 mask &= ~CPSR_Q; /* V5TE in reality*/
9ee6e8bb 3411 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3412 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3413 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3414 mask &= ~CPSR_IT;
9ee6e8bb 3415 /* Mask out execution state bits. */
2ae23e75 3416 if (!spsr)
e160c51c 3417 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3418 /* Mask out privileged bits. */
3419 if (IS_USER(s))
9ee6e8bb 3420 mask &= CPSR_USER;
b5ff1b31
FB
3421 return mask;
3422}
3423
2fbac54b 3424/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 3425static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 3426{
39d5492a 3427 TCGv_i32 tmp;
b5ff1b31
FB
3428 if (spsr) {
3429 /* ??? This is also undefined in system mode. */
3430 if (IS_USER(s))
3431 return 1;
d9ba4830
PB
3432
3433 tmp = load_cpu_field(spsr);
3434 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3435 tcg_gen_andi_i32(t0, t0, mask);
3436 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3437 store_cpu_field(tmp, spsr);
b5ff1b31 3438 } else {
2fbac54b 3439 gen_set_cpsr(t0, mask);
b5ff1b31 3440 }
7d1b0095 3441 tcg_temp_free_i32(t0);
b5ff1b31
FB
3442 gen_lookup_tb(s);
3443 return 0;
3444}
3445
2fbac54b
FN
3446/* Returns nonzero if access to the PSR is not permitted. */
3447static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3448{
39d5492a 3449 TCGv_i32 tmp;
7d1b0095 3450 tmp = tcg_temp_new_i32();
2fbac54b
FN
3451 tcg_gen_movi_i32(tmp, val);
3452 return gen_set_psr(s, mask, spsr, tmp);
3453}
3454
e9bb4aa9 3455/* Generate an old-style exception return. Marks pc as dead. */
39d5492a 3456static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
b5ff1b31 3457{
39d5492a 3458 TCGv_i32 tmp;
e9bb4aa9 3459 store_reg(s, 15, pc);
d9ba4830
PB
3460 tmp = load_cpu_field(spsr);
3461 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 3462 tcg_temp_free_i32(tmp);
b5ff1b31
FB
3463 s->is_jmp = DISAS_UPDATE;
3464}
3465
b0109805 3466/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 3467static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 3468{
b0109805 3469 gen_set_cpsr(cpsr, 0xffffffff);
7d1b0095 3470 tcg_temp_free_i32(cpsr);
b0109805 3471 store_reg(s, 15, pc);
9ee6e8bb
PB
3472 s->is_jmp = DISAS_UPDATE;
3473}
3b46e624 3474
9ee6e8bb
PB
3475static inline void
3476gen_set_condexec (DisasContext *s)
3477{
3478 if (s->condexec_mask) {
8f01245e 3479 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
39d5492a 3480 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e 3481 tcg_gen_movi_i32(tmp, val);
d9ba4830 3482 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3483 }
3484}
3b46e624 3485
bc4a0de0
PM
3486static void gen_exception_insn(DisasContext *s, int offset, int excp)
3487{
3488 gen_set_condexec(s);
3489 gen_set_pc_im(s->pc - offset);
3490 gen_exception(excp);
3491 s->is_jmp = DISAS_JUMP;
3492}
3493
9ee6e8bb
PB
3494static void gen_nop_hint(DisasContext *s, int val)
3495{
3496 switch (val) {
3497 case 3: /* wfi */
8984bd2e 3498 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3499 s->is_jmp = DISAS_WFI;
3500 break;
3501 case 2: /* wfe */
3502 case 4: /* sev */
3503 /* TODO: Implement SEV and WFE. May help SMP performance. */
3504 default: /* nop */
3505 break;
3506 }
3507}
99c475ab 3508
ad69471c 3509#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3510
39d5492a 3511static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
3512{
3513 switch (size) {
dd8fbd78
FN
3514 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3515 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3516 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 3517 default: abort();
9ee6e8bb 3518 }
9ee6e8bb
PB
3519}
3520
39d5492a 3521static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
3522{
3523 switch (size) {
dd8fbd78
FN
3524 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3525 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3526 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3527 default: return;
3528 }
3529}
3530
3531/* 32-bit pairwise ops end up the same as the elementwise versions. */
3532#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3533#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3534#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3535#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3536
ad69471c
PB
3537#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3538 switch ((size << 1) | u) { \
3539 case 0: \
dd8fbd78 3540 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3541 break; \
3542 case 1: \
dd8fbd78 3543 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3544 break; \
3545 case 2: \
dd8fbd78 3546 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3547 break; \
3548 case 3: \
dd8fbd78 3549 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3550 break; \
3551 case 4: \
dd8fbd78 3552 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3553 break; \
3554 case 5: \
dd8fbd78 3555 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3556 break; \
3557 default: return 1; \
3558 }} while (0)
9ee6e8bb
PB
3559
3560#define GEN_NEON_INTEGER_OP(name) do { \
3561 switch ((size << 1) | u) { \
ad69471c 3562 case 0: \
dd8fbd78 3563 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3564 break; \
3565 case 1: \
dd8fbd78 3566 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3567 break; \
3568 case 2: \
dd8fbd78 3569 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3570 break; \
3571 case 3: \
dd8fbd78 3572 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3573 break; \
3574 case 4: \
dd8fbd78 3575 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3576 break; \
3577 case 5: \
dd8fbd78 3578 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3579 break; \
9ee6e8bb
PB
3580 default: return 1; \
3581 }} while (0)
3582
39d5492a 3583static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 3584{
39d5492a 3585 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
3586 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3587 return tmp;
9ee6e8bb
PB
3588}
3589
39d5492a 3590static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 3591{
dd8fbd78 3592 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 3593 tcg_temp_free_i32(var);
9ee6e8bb
PB
3594}
3595
39d5492a 3596static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 3597{
39d5492a 3598 TCGv_i32 tmp;
9ee6e8bb 3599 if (size == 1) {
0fad6efc
PM
3600 tmp = neon_load_reg(reg & 7, reg >> 4);
3601 if (reg & 8) {
dd8fbd78 3602 gen_neon_dup_high16(tmp);
0fad6efc
PM
3603 } else {
3604 gen_neon_dup_low16(tmp);
dd8fbd78 3605 }
0fad6efc
PM
3606 } else {
3607 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 3608 }
dd8fbd78 3609 return tmp;
9ee6e8bb
PB
3610}
3611
02acedf9 3612static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 3613{
39d5492a 3614 TCGv_i32 tmp, tmp2;
600b828c 3615 if (!q && size == 2) {
02acedf9
PM
3616 return 1;
3617 }
3618 tmp = tcg_const_i32(rd);
3619 tmp2 = tcg_const_i32(rm);
3620 if (q) {
3621 switch (size) {
3622 case 0:
02da0b2d 3623 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3624 break;
3625 case 1:
02da0b2d 3626 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3627 break;
3628 case 2:
02da0b2d 3629 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
3630 break;
3631 default:
3632 abort();
3633 }
3634 } else {
3635 switch (size) {
3636 case 0:
02da0b2d 3637 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3638 break;
3639 case 1:
02da0b2d 3640 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3641 break;
3642 default:
3643 abort();
3644 }
3645 }
3646 tcg_temp_free_i32(tmp);
3647 tcg_temp_free_i32(tmp2);
3648 return 0;
19457615
FN
3649}
3650
d68a6f3a 3651static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 3652{
39d5492a 3653 TCGv_i32 tmp, tmp2;
600b828c 3654 if (!q && size == 2) {
d68a6f3a
PM
3655 return 1;
3656 }
3657 tmp = tcg_const_i32(rd);
3658 tmp2 = tcg_const_i32(rm);
3659 if (q) {
3660 switch (size) {
3661 case 0:
02da0b2d 3662 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
3663 break;
3664 case 1:
02da0b2d 3665 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
3666 break;
3667 case 2:
02da0b2d 3668 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
3669 break;
3670 default:
3671 abort();
3672 }
3673 } else {
3674 switch (size) {
3675 case 0:
02da0b2d 3676 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
3677 break;
3678 case 1:
02da0b2d 3679 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
3680 break;
3681 default:
3682 abort();
3683 }
3684 }
3685 tcg_temp_free_i32(tmp);
3686 tcg_temp_free_i32(tmp2);
3687 return 0;
19457615
FN
3688}
3689
39d5492a 3690static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 3691{
39d5492a 3692 TCGv_i32 rd, tmp;
19457615 3693
7d1b0095
PM
3694 rd = tcg_temp_new_i32();
3695 tmp = tcg_temp_new_i32();
19457615
FN
3696
3697 tcg_gen_shli_i32(rd, t0, 8);
3698 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3699 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3700 tcg_gen_or_i32(rd, rd, tmp);
3701
3702 tcg_gen_shri_i32(t1, t1, 8);
3703 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3704 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3705 tcg_gen_or_i32(t1, t1, tmp);
3706 tcg_gen_mov_i32(t0, rd);
3707
7d1b0095
PM
3708 tcg_temp_free_i32(tmp);
3709 tcg_temp_free_i32(rd);
19457615
FN
3710}
3711
39d5492a 3712static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 3713{
39d5492a 3714 TCGv_i32 rd, tmp;
19457615 3715
7d1b0095
PM
3716 rd = tcg_temp_new_i32();
3717 tmp = tcg_temp_new_i32();
19457615
FN
3718
3719 tcg_gen_shli_i32(rd, t0, 16);
3720 tcg_gen_andi_i32(tmp, t1, 0xffff);
3721 tcg_gen_or_i32(rd, rd, tmp);
3722 tcg_gen_shri_i32(t1, t1, 16);
3723 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3724 tcg_gen_or_i32(t1, t1, tmp);
3725 tcg_gen_mov_i32(t0, rd);
3726
7d1b0095
PM
3727 tcg_temp_free_i32(tmp);
3728 tcg_temp_free_i32(rd);
19457615
FN
3729}
3730
3731
9ee6e8bb
PB
3732static struct {
3733 int nregs;
3734 int interleave;
3735 int spacing;
3736} neon_ls_element_type[11] = {
3737 {4, 4, 1},
3738 {4, 4, 2},
3739 {4, 1, 1},
3740 {4, 2, 1},
3741 {3, 3, 1},
3742 {3, 3, 2},
3743 {3, 1, 1},
3744 {1, 1, 1},
3745 {2, 2, 1},
3746 {2, 2, 2},
3747 {2, 1, 1}
3748};
3749
3750/* Translate a NEON load/store element instruction. Return nonzero if the
3751 instruction is invalid. */
0ecb72a5 3752static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
3753{
3754 int rd, rn, rm;
3755 int op;
3756 int nregs;
3757 int interleave;
84496233 3758 int spacing;
9ee6e8bb
PB
3759 int stride;
3760 int size;
3761 int reg;
3762 int pass;
3763 int load;
3764 int shift;
9ee6e8bb 3765 int n;
39d5492a
PM
3766 TCGv_i32 addr;
3767 TCGv_i32 tmp;
3768 TCGv_i32 tmp2;
84496233 3769 TCGv_i64 tmp64;
9ee6e8bb 3770
5df8bac1 3771 if (!s->vfp_enabled)
9ee6e8bb
PB
3772 return 1;
3773 VFP_DREG_D(rd, insn);
3774 rn = (insn >> 16) & 0xf;
3775 rm = insn & 0xf;
3776 load = (insn & (1 << 21)) != 0;
3777 if ((insn & (1 << 23)) == 0) {
3778 /* Load store all elements. */
3779 op = (insn >> 8) & 0xf;
3780 size = (insn >> 6) & 3;
84496233 3781 if (op > 10)
9ee6e8bb 3782 return 1;
f2dd89d0
PM
3783 /* Catch UNDEF cases for bad values of align field */
3784 switch (op & 0xc) {
3785 case 4:
3786 if (((insn >> 5) & 1) == 1) {
3787 return 1;
3788 }
3789 break;
3790 case 8:
3791 if (((insn >> 4) & 3) == 3) {
3792 return 1;
3793 }
3794 break;
3795 default:
3796 break;
3797 }
9ee6e8bb
PB
3798 nregs = neon_ls_element_type[op].nregs;
3799 interleave = neon_ls_element_type[op].interleave;
84496233
JR
3800 spacing = neon_ls_element_type[op].spacing;
3801 if (size == 3 && (interleave | spacing) != 1)
3802 return 1;
e318a60b 3803 addr = tcg_temp_new_i32();
dcc65026 3804 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3805 stride = (1 << size) * interleave;
3806 for (reg = 0; reg < nregs; reg++) {
3807 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
3808 load_reg_var(s, addr, rn);
3809 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 3810 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
3811 load_reg_var(s, addr, rn);
3812 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 3813 }
84496233 3814 if (size == 3) {
8ed1237d 3815 tmp64 = tcg_temp_new_i64();
84496233 3816 if (load) {
8ed1237d 3817 tcg_gen_qemu_ld64(tmp64, addr, IS_USER(s));
84496233 3818 neon_store_reg64(tmp64, rd);
84496233 3819 } else {
84496233 3820 neon_load_reg64(tmp64, rd);
8ed1237d 3821 tcg_gen_qemu_st64(tmp64, addr, IS_USER(s));
84496233 3822 }
8ed1237d 3823 tcg_temp_free_i64(tmp64);
84496233
JR
3824 tcg_gen_addi_i32(addr, addr, stride);
3825 } else {
3826 for (pass = 0; pass < 2; pass++) {
3827 if (size == 2) {
3828 if (load) {
58ab8e96
PM
3829 tmp = tcg_temp_new_i32();
3830 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
84496233
JR
3831 neon_store_reg(rd, pass, tmp);
3832 } else {
3833 tmp = neon_load_reg(rd, pass);
58ab8e96
PM
3834 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
3835 tcg_temp_free_i32(tmp);
84496233 3836 }
1b2b1e54 3837 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
3838 } else if (size == 1) {
3839 if (load) {
58ab8e96
PM
3840 tmp = tcg_temp_new_i32();
3841 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
84496233 3842 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96
PM
3843 tmp2 = tcg_temp_new_i32();
3844 tcg_gen_qemu_ld16u(tmp2, addr, IS_USER(s));
84496233 3845 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
3846 tcg_gen_shli_i32(tmp2, tmp2, 16);
3847 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3848 tcg_temp_free_i32(tmp2);
84496233
JR
3849 neon_store_reg(rd, pass, tmp);
3850 } else {
3851 tmp = neon_load_reg(rd, pass);
7d1b0095 3852 tmp2 = tcg_temp_new_i32();
84496233 3853 tcg_gen_shri_i32(tmp2, tmp, 16);
58ab8e96
PM
3854 tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
3855 tcg_temp_free_i32(tmp);
84496233 3856 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96
PM
3857 tcg_gen_qemu_st16(tmp2, addr, IS_USER(s));
3858 tcg_temp_free_i32(tmp2);
1b2b1e54 3859 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3860 }
84496233
JR
3861 } else /* size == 0 */ {
3862 if (load) {
39d5492a 3863 TCGV_UNUSED_I32(tmp2);
84496233 3864 for (n = 0; n < 4; n++) {
58ab8e96
PM
3865 tmp = tcg_temp_new_i32();
3866 tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
84496233
JR
3867 tcg_gen_addi_i32(addr, addr, stride);
3868 if (n == 0) {
3869 tmp2 = tmp;
3870 } else {
41ba8341
PB
3871 tcg_gen_shli_i32(tmp, tmp, n * 8);
3872 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 3873 tcg_temp_free_i32(tmp);
84496233 3874 }
9ee6e8bb 3875 }
84496233
JR
3876 neon_store_reg(rd, pass, tmp2);
3877 } else {
3878 tmp2 = neon_load_reg(rd, pass);
3879 for (n = 0; n < 4; n++) {
7d1b0095 3880 tmp = tcg_temp_new_i32();
84496233
JR
3881 if (n == 0) {
3882 tcg_gen_mov_i32(tmp, tmp2);
3883 } else {
3884 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3885 }
58ab8e96
PM
3886 tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
3887 tcg_temp_free_i32(tmp);
84496233
JR
3888 tcg_gen_addi_i32(addr, addr, stride);
3889 }
7d1b0095 3890 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3891 }
3892 }
3893 }
3894 }
84496233 3895 rd += spacing;
9ee6e8bb 3896 }
e318a60b 3897 tcg_temp_free_i32(addr);
9ee6e8bb
PB
3898 stride = nregs * 8;
3899 } else {
3900 size = (insn >> 10) & 3;
3901 if (size == 3) {
3902 /* Load single element to all lanes. */
8e18cde3
PM
3903 int a = (insn >> 4) & 1;
3904 if (!load) {
9ee6e8bb 3905 return 1;
8e18cde3 3906 }
9ee6e8bb
PB
3907 size = (insn >> 6) & 3;
3908 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
3909
3910 if (size == 3) {
3911 if (nregs != 4 || a == 0) {
9ee6e8bb 3912 return 1;
99c475ab 3913 }
8e18cde3
PM
3914 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3915 size = 2;
3916 }
3917 if (nregs == 1 && a == 1 && size == 0) {
3918 return 1;
3919 }
3920 if (nregs == 3 && a == 1) {
3921 return 1;
3922 }
e318a60b 3923 addr = tcg_temp_new_i32();
8e18cde3
PM
3924 load_reg_var(s, addr, rn);
3925 if (nregs == 1) {
3926 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
3927 tmp = gen_load_and_replicate(s, addr, size);
3928 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3929 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3930 if (insn & (1 << 5)) {
3931 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
3932 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
3933 }
3934 tcg_temp_free_i32(tmp);
3935 } else {
3936 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
3937 stride = (insn & (1 << 5)) ? 2 : 1;
3938 for (reg = 0; reg < nregs; reg++) {
3939 tmp = gen_load_and_replicate(s, addr, size);
3940 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3941 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3942 tcg_temp_free_i32(tmp);
3943 tcg_gen_addi_i32(addr, addr, 1 << size);
3944 rd += stride;
3945 }
9ee6e8bb 3946 }
e318a60b 3947 tcg_temp_free_i32(addr);
9ee6e8bb
PB
3948 stride = (1 << size) * nregs;
3949 } else {
3950 /* Single element. */
93262b16 3951 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
3952 pass = (insn >> 7) & 1;
3953 switch (size) {
3954 case 0:
3955 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
3956 stride = 1;
3957 break;
3958 case 1:
3959 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
3960 stride = (insn & (1 << 5)) ? 2 : 1;
3961 break;
3962 case 2:
3963 shift = 0;
9ee6e8bb
PB
3964 stride = (insn & (1 << 6)) ? 2 : 1;
3965 break;
3966 default:
3967 abort();
3968 }
3969 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
3970 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
3971 switch (nregs) {
3972 case 1:
3973 if (((idx & (1 << size)) != 0) ||
3974 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
3975 return 1;
3976 }
3977 break;
3978 case 3:
3979 if ((idx & 1) != 0) {
3980 return 1;
3981 }
3982 /* fall through */
3983 case 2:
3984 if (size == 2 && (idx & 2) != 0) {
3985 return 1;
3986 }
3987 break;
3988 case 4:
3989 if ((size == 2) && ((idx & 3) == 3)) {
3990 return 1;
3991 }
3992 break;
3993 default:
3994 abort();
3995 }
3996 if ((rd + stride * (nregs - 1)) > 31) {
3997 /* Attempts to write off the end of the register file
3998 * are UNPREDICTABLE; we choose to UNDEF because otherwise
3999 * the neon_load_reg() would write off the end of the array.
4000 */
4001 return 1;
4002 }
e318a60b 4003 addr = tcg_temp_new_i32();
dcc65026 4004 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4005 for (reg = 0; reg < nregs; reg++) {
4006 if (load) {
58ab8e96 4007 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
4008 switch (size) {
4009 case 0:
58ab8e96 4010 tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4011 break;
4012 case 1:
58ab8e96 4013 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4014 break;
4015 case 2:
58ab8e96 4016 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
9ee6e8bb 4017 break;
a50f5b91
PB
4018 default: /* Avoid compiler warnings. */
4019 abort();
9ee6e8bb
PB
4020 }
4021 if (size != 2) {
8f8e3aa4 4022 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
4023 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4024 shift, size ? 16 : 8);
7d1b0095 4025 tcg_temp_free_i32(tmp2);
9ee6e8bb 4026 }
8f8e3aa4 4027 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4028 } else { /* Store */
8f8e3aa4
PB
4029 tmp = neon_load_reg(rd, pass);
4030 if (shift)
4031 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4032 switch (size) {
4033 case 0:
58ab8e96 4034 tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4035 break;
4036 case 1:
58ab8e96 4037 tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4038 break;
4039 case 2:
58ab8e96 4040 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
9ee6e8bb 4041 break;
99c475ab 4042 }
58ab8e96 4043 tcg_temp_free_i32(tmp);
99c475ab 4044 }
9ee6e8bb 4045 rd += stride;
1b2b1e54 4046 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4047 }
e318a60b 4048 tcg_temp_free_i32(addr);
9ee6e8bb 4049 stride = nregs * (1 << size);
99c475ab 4050 }
9ee6e8bb
PB
4051 }
4052 if (rm != 15) {
39d5492a 4053 TCGv_i32 base;
b26eefb6
PB
4054
4055 base = load_reg(s, rn);
9ee6e8bb 4056 if (rm == 13) {
b26eefb6 4057 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4058 } else {
39d5492a 4059 TCGv_i32 index;
b26eefb6
PB
4060 index = load_reg(s, rm);
4061 tcg_gen_add_i32(base, base, index);
7d1b0095 4062 tcg_temp_free_i32(index);
9ee6e8bb 4063 }
b26eefb6 4064 store_reg(s, rn, base);
9ee6e8bb
PB
4065 }
4066 return 0;
4067}
3b46e624 4068
8f8e3aa4 4069/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
39d5492a 4070static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
8f8e3aa4
PB
4071{
4072 tcg_gen_and_i32(t, t, c);
f669df27 4073 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4074 tcg_gen_or_i32(dest, t, f);
4075}
4076
39d5492a 4077static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4078{
4079 switch (size) {
4080 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4081 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4082 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4083 default: abort();
4084 }
4085}
4086
39d5492a 4087static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4088{
4089 switch (size) {
02da0b2d
PM
4090 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4091 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4092 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
4093 default: abort();
4094 }
4095}
4096
39d5492a 4097static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4098{
4099 switch (size) {
02da0b2d
PM
4100 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4101 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4102 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
4103 default: abort();
4104 }
4105}
4106
39d5492a 4107static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
4108{
4109 switch (size) {
02da0b2d
PM
4110 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4111 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4112 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
4113 default: abort();
4114 }
4115}
4116
39d5492a 4117static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
4118 int q, int u)
4119{
4120 if (q) {
4121 if (u) {
4122 switch (size) {
4123 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4124 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4125 default: abort();
4126 }
4127 } else {
4128 switch (size) {
4129 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4130 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4131 default: abort();
4132 }
4133 }
4134 } else {
4135 if (u) {
4136 switch (size) {
b408a9b0
CL
4137 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4138 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4139 default: abort();
4140 }
4141 } else {
4142 switch (size) {
4143 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4144 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4145 default: abort();
4146 }
4147 }
4148 }
4149}
4150
39d5492a 4151static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
4152{
4153 if (u) {
4154 switch (size) {
4155 case 0: gen_helper_neon_widen_u8(dest, src); break;
4156 case 1: gen_helper_neon_widen_u16(dest, src); break;
4157 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4158 default: abort();
4159 }
4160 } else {
4161 switch (size) {
4162 case 0: gen_helper_neon_widen_s8(dest, src); break;
4163 case 1: gen_helper_neon_widen_s16(dest, src); break;
4164 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4165 default: abort();
4166 }
4167 }
7d1b0095 4168 tcg_temp_free_i32(src);
ad69471c
PB
4169}
4170
4171static inline void gen_neon_addl(int size)
4172{
4173 switch (size) {
4174 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4175 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4176 case 2: tcg_gen_add_i64(CPU_V001); break;
4177 default: abort();
4178 }
4179}
4180
4181static inline void gen_neon_subl(int size)
4182{
4183 switch (size) {
4184 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4185 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4186 case 2: tcg_gen_sub_i64(CPU_V001); break;
4187 default: abort();
4188 }
4189}
4190
a7812ae4 4191static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4192{
4193 switch (size) {
4194 case 0: gen_helper_neon_negl_u16(var, var); break;
4195 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
4196 case 2:
4197 tcg_gen_neg_i64(var, var);
4198 break;
ad69471c
PB
4199 default: abort();
4200 }
4201}
4202
a7812ae4 4203static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4204{
4205 switch (size) {
02da0b2d
PM
4206 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4207 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
4208 default: abort();
4209 }
4210}
4211
39d5492a
PM
4212static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
4213 int size, int u)
ad69471c 4214{
a7812ae4 4215 TCGv_i64 tmp;
ad69471c
PB
4216
4217 switch ((size << 1) | u) {
4218 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4219 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4220 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4221 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4222 case 4:
4223 tmp = gen_muls_i64_i32(a, b);
4224 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4225 tcg_temp_free_i64(tmp);
ad69471c
PB
4226 break;
4227 case 5:
4228 tmp = gen_mulu_i64_i32(a, b);
4229 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4230 tcg_temp_free_i64(tmp);
ad69471c
PB
4231 break;
4232 default: abort();
4233 }
c6067f04
CL
4234
4235 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4236 Don't forget to clean them now. */
4237 if (size < 2) {
7d1b0095
PM
4238 tcg_temp_free_i32(a);
4239 tcg_temp_free_i32(b);
c6067f04 4240 }
ad69471c
PB
4241}
4242
39d5492a
PM
4243static void gen_neon_narrow_op(int op, int u, int size,
4244 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
4245{
4246 if (op) {
4247 if (u) {
4248 gen_neon_unarrow_sats(size, dest, src);
4249 } else {
4250 gen_neon_narrow(size, dest, src);
4251 }
4252 } else {
4253 if (u) {
4254 gen_neon_narrow_satu(size, dest, src);
4255 } else {
4256 gen_neon_narrow_sats(size, dest, src);
4257 }
4258 }
4259}
4260
62698be3
PM
4261/* Symbolic constants for op fields for Neon 3-register same-length.
4262 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4263 * table A7-9.
4264 */
4265#define NEON_3R_VHADD 0
4266#define NEON_3R_VQADD 1
4267#define NEON_3R_VRHADD 2
4268#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4269#define NEON_3R_VHSUB 4
4270#define NEON_3R_VQSUB 5
4271#define NEON_3R_VCGT 6
4272#define NEON_3R_VCGE 7
4273#define NEON_3R_VSHL 8
4274#define NEON_3R_VQSHL 9
4275#define NEON_3R_VRSHL 10
4276#define NEON_3R_VQRSHL 11
4277#define NEON_3R_VMAX 12
4278#define NEON_3R_VMIN 13
4279#define NEON_3R_VABD 14
4280#define NEON_3R_VABA 15
4281#define NEON_3R_VADD_VSUB 16
4282#define NEON_3R_VTST_VCEQ 17
4283#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4284#define NEON_3R_VMUL 19
4285#define NEON_3R_VPMAX 20
4286#define NEON_3R_VPMIN 21
4287#define NEON_3R_VQDMULH_VQRDMULH 22
4288#define NEON_3R_VPADD 23
da97f52c 4289#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
4290#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4291#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4292#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4293#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4294#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4295#define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4296
4297static const uint8_t neon_3r_sizes[] = {
4298 [NEON_3R_VHADD] = 0x7,
4299 [NEON_3R_VQADD] = 0xf,
4300 [NEON_3R_VRHADD] = 0x7,
4301 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4302 [NEON_3R_VHSUB] = 0x7,
4303 [NEON_3R_VQSUB] = 0xf,
4304 [NEON_3R_VCGT] = 0x7,
4305 [NEON_3R_VCGE] = 0x7,
4306 [NEON_3R_VSHL] = 0xf,
4307 [NEON_3R_VQSHL] = 0xf,
4308 [NEON_3R_VRSHL] = 0xf,
4309 [NEON_3R_VQRSHL] = 0xf,
4310 [NEON_3R_VMAX] = 0x7,
4311 [NEON_3R_VMIN] = 0x7,
4312 [NEON_3R_VABD] = 0x7,
4313 [NEON_3R_VABA] = 0x7,
4314 [NEON_3R_VADD_VSUB] = 0xf,
4315 [NEON_3R_VTST_VCEQ] = 0x7,
4316 [NEON_3R_VML] = 0x7,
4317 [NEON_3R_VMUL] = 0x7,
4318 [NEON_3R_VPMAX] = 0x7,
4319 [NEON_3R_VPMIN] = 0x7,
4320 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4321 [NEON_3R_VPADD] = 0x7,
da97f52c 4322 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4323 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4324 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4325 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4326 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4327 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4328 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4329};
4330
600b828c
PM
4331/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4332 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4333 * table A7-13.
4334 */
4335#define NEON_2RM_VREV64 0
4336#define NEON_2RM_VREV32 1
4337#define NEON_2RM_VREV16 2
4338#define NEON_2RM_VPADDL 4
4339#define NEON_2RM_VPADDL_U 5
4340#define NEON_2RM_VCLS 8
4341#define NEON_2RM_VCLZ 9
4342#define NEON_2RM_VCNT 10
4343#define NEON_2RM_VMVN 11
4344#define NEON_2RM_VPADAL 12
4345#define NEON_2RM_VPADAL_U 13
4346#define NEON_2RM_VQABS 14
4347#define NEON_2RM_VQNEG 15
4348#define NEON_2RM_VCGT0 16
4349#define NEON_2RM_VCGE0 17
4350#define NEON_2RM_VCEQ0 18
4351#define NEON_2RM_VCLE0 19
4352#define NEON_2RM_VCLT0 20
4353#define NEON_2RM_VABS 22
4354#define NEON_2RM_VNEG 23
4355#define NEON_2RM_VCGT0_F 24
4356#define NEON_2RM_VCGE0_F 25
4357#define NEON_2RM_VCEQ0_F 26
4358#define NEON_2RM_VCLE0_F 27
4359#define NEON_2RM_VCLT0_F 28
4360#define NEON_2RM_VABS_F 30
4361#define NEON_2RM_VNEG_F 31
4362#define NEON_2RM_VSWP 32
4363#define NEON_2RM_VTRN 33
4364#define NEON_2RM_VUZP 34
4365#define NEON_2RM_VZIP 35
4366#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4367#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4368#define NEON_2RM_VSHLL 38
4369#define NEON_2RM_VCVT_F16_F32 44
4370#define NEON_2RM_VCVT_F32_F16 46
4371#define NEON_2RM_VRECPE 56
4372#define NEON_2RM_VRSQRTE 57
4373#define NEON_2RM_VRECPE_F 58
4374#define NEON_2RM_VRSQRTE_F 59
4375#define NEON_2RM_VCVT_FS 60
4376#define NEON_2RM_VCVT_FU 61
4377#define NEON_2RM_VCVT_SF 62
4378#define NEON_2RM_VCVT_UF 63
4379
4380static int neon_2rm_is_float_op(int op)
4381{
4382 /* Return true if this neon 2reg-misc op is float-to-float */
4383 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4384 op >= NEON_2RM_VRECPE_F);
4385}
4386
4387/* Each entry in this array has bit n set if the insn allows
4388 * size value n (otherwise it will UNDEF). Since unallocated
4389 * op values will have no bits set they always UNDEF.
4390 */
4391static const uint8_t neon_2rm_sizes[] = {
4392 [NEON_2RM_VREV64] = 0x7,
4393 [NEON_2RM_VREV32] = 0x3,
4394 [NEON_2RM_VREV16] = 0x1,
4395 [NEON_2RM_VPADDL] = 0x7,
4396 [NEON_2RM_VPADDL_U] = 0x7,
4397 [NEON_2RM_VCLS] = 0x7,
4398 [NEON_2RM_VCLZ] = 0x7,
4399 [NEON_2RM_VCNT] = 0x1,
4400 [NEON_2RM_VMVN] = 0x1,
4401 [NEON_2RM_VPADAL] = 0x7,
4402 [NEON_2RM_VPADAL_U] = 0x7,
4403 [NEON_2RM_VQABS] = 0x7,
4404 [NEON_2RM_VQNEG] = 0x7,
4405 [NEON_2RM_VCGT0] = 0x7,
4406 [NEON_2RM_VCGE0] = 0x7,
4407 [NEON_2RM_VCEQ0] = 0x7,
4408 [NEON_2RM_VCLE0] = 0x7,
4409 [NEON_2RM_VCLT0] = 0x7,
4410 [NEON_2RM_VABS] = 0x7,
4411 [NEON_2RM_VNEG] = 0x7,
4412 [NEON_2RM_VCGT0_F] = 0x4,
4413 [NEON_2RM_VCGE0_F] = 0x4,
4414 [NEON_2RM_VCEQ0_F] = 0x4,
4415 [NEON_2RM_VCLE0_F] = 0x4,
4416 [NEON_2RM_VCLT0_F] = 0x4,
4417 [NEON_2RM_VABS_F] = 0x4,
4418 [NEON_2RM_VNEG_F] = 0x4,
4419 [NEON_2RM_VSWP] = 0x1,
4420 [NEON_2RM_VTRN] = 0x7,
4421 [NEON_2RM_VUZP] = 0x7,
4422 [NEON_2RM_VZIP] = 0x7,
4423 [NEON_2RM_VMOVN] = 0x7,
4424 [NEON_2RM_VQMOVN] = 0x7,
4425 [NEON_2RM_VSHLL] = 0x7,
4426 [NEON_2RM_VCVT_F16_F32] = 0x2,
4427 [NEON_2RM_VCVT_F32_F16] = 0x2,
4428 [NEON_2RM_VRECPE] = 0x4,
4429 [NEON_2RM_VRSQRTE] = 0x4,
4430 [NEON_2RM_VRECPE_F] = 0x4,
4431 [NEON_2RM_VRSQRTE_F] = 0x4,
4432 [NEON_2RM_VCVT_FS] = 0x4,
4433 [NEON_2RM_VCVT_FU] = 0x4,
4434 [NEON_2RM_VCVT_SF] = 0x4,
4435 [NEON_2RM_VCVT_UF] = 0x4,
4436};
4437
9ee6e8bb
PB
4438/* Translate a NEON data processing instruction. Return nonzero if the
4439 instruction is invalid.
ad69471c
PB
4440 We process data in a mixture of 32-bit and 64-bit chunks.
4441 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4442
0ecb72a5 4443static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4444{
4445 int op;
4446 int q;
4447 int rd, rn, rm;
4448 int size;
4449 int shift;
4450 int pass;
4451 int count;
4452 int pairwise;
4453 int u;
ca9a32e4 4454 uint32_t imm, mask;
39d5492a 4455 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4456 TCGv_i64 tmp64;
9ee6e8bb 4457
5df8bac1 4458 if (!s->vfp_enabled)
9ee6e8bb
PB
4459 return 1;
4460 q = (insn & (1 << 6)) != 0;
4461 u = (insn >> 24) & 1;
4462 VFP_DREG_D(rd, insn);
4463 VFP_DREG_N(rn, insn);
4464 VFP_DREG_M(rm, insn);
4465 size = (insn >> 20) & 3;
4466 if ((insn & (1 << 23)) == 0) {
4467 /* Three register same length. */
4468 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
4469 /* Catch invalid op and bad size combinations: UNDEF */
4470 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4471 return 1;
4472 }
25f84f79
PM
4473 /* All insns of this form UNDEF for either this condition or the
4474 * superset of cases "Q==1"; we catch the latter later.
4475 */
4476 if (q && ((rd | rn | rm) & 1)) {
4477 return 1;
4478 }
62698be3
PM
4479 if (size == 3 && op != NEON_3R_LOGIC) {
4480 /* 64-bit element instructions. */
9ee6e8bb 4481 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4482 neon_load_reg64(cpu_V0, rn + pass);
4483 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 4484 switch (op) {
62698be3 4485 case NEON_3R_VQADD:
9ee6e8bb 4486 if (u) {
02da0b2d
PM
4487 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4488 cpu_V0, cpu_V1);
2c0262af 4489 } else {
02da0b2d
PM
4490 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4491 cpu_V0, cpu_V1);
2c0262af 4492 }
9ee6e8bb 4493 break;
62698be3 4494 case NEON_3R_VQSUB:
9ee6e8bb 4495 if (u) {
02da0b2d
PM
4496 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4497 cpu_V0, cpu_V1);
ad69471c 4498 } else {
02da0b2d
PM
4499 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4500 cpu_V0, cpu_V1);
ad69471c
PB
4501 }
4502 break;
62698be3 4503 case NEON_3R_VSHL:
ad69471c
PB
4504 if (u) {
4505 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4506 } else {
4507 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4508 }
4509 break;
62698be3 4510 case NEON_3R_VQSHL:
ad69471c 4511 if (u) {
02da0b2d
PM
4512 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4513 cpu_V1, cpu_V0);
ad69471c 4514 } else {
02da0b2d
PM
4515 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4516 cpu_V1, cpu_V0);
ad69471c
PB
4517 }
4518 break;
62698be3 4519 case NEON_3R_VRSHL:
ad69471c
PB
4520 if (u) {
4521 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4522 } else {
ad69471c
PB
4523 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4524 }
4525 break;
62698be3 4526 case NEON_3R_VQRSHL:
ad69471c 4527 if (u) {
02da0b2d
PM
4528 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4529 cpu_V1, cpu_V0);
ad69471c 4530 } else {
02da0b2d
PM
4531 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4532 cpu_V1, cpu_V0);
1e8d4eec 4533 }
9ee6e8bb 4534 break;
62698be3 4535 case NEON_3R_VADD_VSUB:
9ee6e8bb 4536 if (u) {
ad69471c 4537 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4538 } else {
ad69471c 4539 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4540 }
4541 break;
4542 default:
4543 abort();
2c0262af 4544 }
ad69471c 4545 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4546 }
9ee6e8bb 4547 return 0;
2c0262af 4548 }
25f84f79 4549 pairwise = 0;
9ee6e8bb 4550 switch (op) {
62698be3
PM
4551 case NEON_3R_VSHL:
4552 case NEON_3R_VQSHL:
4553 case NEON_3R_VRSHL:
4554 case NEON_3R_VQRSHL:
9ee6e8bb 4555 {
ad69471c
PB
4556 int rtmp;
4557 /* Shift instruction operands are reversed. */
4558 rtmp = rn;
9ee6e8bb 4559 rn = rm;
ad69471c 4560 rm = rtmp;
9ee6e8bb 4561 }
2c0262af 4562 break;
25f84f79
PM
4563 case NEON_3R_VPADD:
4564 if (u) {
4565 return 1;
4566 }
4567 /* Fall through */
62698be3
PM
4568 case NEON_3R_VPMAX:
4569 case NEON_3R_VPMIN:
9ee6e8bb 4570 pairwise = 1;
2c0262af 4571 break;
25f84f79
PM
4572 case NEON_3R_FLOAT_ARITH:
4573 pairwise = (u && size < 2); /* if VPADD (float) */
4574 break;
4575 case NEON_3R_FLOAT_MINMAX:
4576 pairwise = u; /* if VPMIN/VPMAX (float) */
4577 break;
4578 case NEON_3R_FLOAT_CMP:
4579 if (!u && size) {
4580 /* no encoding for U=0 C=1x */
4581 return 1;
4582 }
4583 break;
4584 case NEON_3R_FLOAT_ACMP:
4585 if (!u) {
4586 return 1;
4587 }
4588 break;
4589 case NEON_3R_VRECPS_VRSQRTS:
4590 if (u) {
4591 return 1;
4592 }
2c0262af 4593 break;
25f84f79
PM
4594 case NEON_3R_VMUL:
4595 if (u && (size != 0)) {
4596 /* UNDEF on invalid size for polynomial subcase */
4597 return 1;
4598 }
2c0262af 4599 break;
da97f52c
PM
4600 case NEON_3R_VFM:
4601 if (!arm_feature(env, ARM_FEATURE_VFP4) || u) {
4602 return 1;
4603 }
4604 break;
9ee6e8bb 4605 default:
2c0262af 4606 break;
9ee6e8bb 4607 }
dd8fbd78 4608
25f84f79
PM
4609 if (pairwise && q) {
4610 /* All the pairwise insns UNDEF if Q is set */
4611 return 1;
4612 }
4613
9ee6e8bb
PB
4614 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4615
4616 if (pairwise) {
4617 /* Pairwise. */
a5a14945
JR
4618 if (pass < 1) {
4619 tmp = neon_load_reg(rn, 0);
4620 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 4621 } else {
a5a14945
JR
4622 tmp = neon_load_reg(rm, 0);
4623 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
4624 }
4625 } else {
4626 /* Elementwise. */
dd8fbd78
FN
4627 tmp = neon_load_reg(rn, pass);
4628 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4629 }
4630 switch (op) {
62698be3 4631 case NEON_3R_VHADD:
9ee6e8bb
PB
4632 GEN_NEON_INTEGER_OP(hadd);
4633 break;
62698be3 4634 case NEON_3R_VQADD:
02da0b2d 4635 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4636 break;
62698be3 4637 case NEON_3R_VRHADD:
9ee6e8bb 4638 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4639 break;
62698be3 4640 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
4641 switch ((u << 2) | size) {
4642 case 0: /* VAND */
dd8fbd78 4643 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4644 break;
4645 case 1: /* BIC */
f669df27 4646 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4647 break;
4648 case 2: /* VORR */
dd8fbd78 4649 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4650 break;
4651 case 3: /* VORN */
f669df27 4652 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4653 break;
4654 case 4: /* VEOR */
dd8fbd78 4655 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4656 break;
4657 case 5: /* VBSL */
dd8fbd78
FN
4658 tmp3 = neon_load_reg(rd, pass);
4659 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 4660 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4661 break;
4662 case 6: /* VBIT */
dd8fbd78
FN
4663 tmp3 = neon_load_reg(rd, pass);
4664 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 4665 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4666 break;
4667 case 7: /* VBIF */
dd8fbd78
FN
4668 tmp3 = neon_load_reg(rd, pass);
4669 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 4670 tcg_temp_free_i32(tmp3);
9ee6e8bb 4671 break;
2c0262af
FB
4672 }
4673 break;
62698be3 4674 case NEON_3R_VHSUB:
9ee6e8bb
PB
4675 GEN_NEON_INTEGER_OP(hsub);
4676 break;
62698be3 4677 case NEON_3R_VQSUB:
02da0b2d 4678 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4679 break;
62698be3 4680 case NEON_3R_VCGT:
9ee6e8bb
PB
4681 GEN_NEON_INTEGER_OP(cgt);
4682 break;
62698be3 4683 case NEON_3R_VCGE:
9ee6e8bb
PB
4684 GEN_NEON_INTEGER_OP(cge);
4685 break;
62698be3 4686 case NEON_3R_VSHL:
ad69471c 4687 GEN_NEON_INTEGER_OP(shl);
2c0262af 4688 break;
62698be3 4689 case NEON_3R_VQSHL:
02da0b2d 4690 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4691 break;
62698be3 4692 case NEON_3R_VRSHL:
ad69471c 4693 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4694 break;
62698be3 4695 case NEON_3R_VQRSHL:
02da0b2d 4696 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 4697 break;
62698be3 4698 case NEON_3R_VMAX:
9ee6e8bb
PB
4699 GEN_NEON_INTEGER_OP(max);
4700 break;
62698be3 4701 case NEON_3R_VMIN:
9ee6e8bb
PB
4702 GEN_NEON_INTEGER_OP(min);
4703 break;
62698be3 4704 case NEON_3R_VABD:
9ee6e8bb
PB
4705 GEN_NEON_INTEGER_OP(abd);
4706 break;
62698be3 4707 case NEON_3R_VABA:
9ee6e8bb 4708 GEN_NEON_INTEGER_OP(abd);
7d1b0095 4709 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
4710 tmp2 = neon_load_reg(rd, pass);
4711 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 4712 break;
62698be3 4713 case NEON_3R_VADD_VSUB:
9ee6e8bb 4714 if (!u) { /* VADD */
62698be3 4715 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4716 } else { /* VSUB */
4717 switch (size) {
dd8fbd78
FN
4718 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4719 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4720 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 4721 default: abort();
9ee6e8bb
PB
4722 }
4723 }
4724 break;
62698be3 4725 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
4726 if (!u) { /* VTST */
4727 switch (size) {
dd8fbd78
FN
4728 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4729 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4730 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 4731 default: abort();
9ee6e8bb
PB
4732 }
4733 } else { /* VCEQ */
4734 switch (size) {
dd8fbd78
FN
4735 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4736 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4737 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 4738 default: abort();
9ee6e8bb
PB
4739 }
4740 }
4741 break;
62698be3 4742 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 4743 switch (size) {
dd8fbd78
FN
4744 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4745 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4746 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4747 default: abort();
9ee6e8bb 4748 }
7d1b0095 4749 tcg_temp_free_i32(tmp2);
dd8fbd78 4750 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4751 if (u) { /* VMLS */
dd8fbd78 4752 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4753 } else { /* VMLA */
dd8fbd78 4754 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4755 }
4756 break;
62698be3 4757 case NEON_3R_VMUL:
9ee6e8bb 4758 if (u) { /* polynomial */
dd8fbd78 4759 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4760 } else { /* Integer */
4761 switch (size) {
dd8fbd78
FN
4762 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4763 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4764 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4765 default: abort();
9ee6e8bb
PB
4766 }
4767 }
4768 break;
62698be3 4769 case NEON_3R_VPMAX:
9ee6e8bb
PB
4770 GEN_NEON_INTEGER_OP(pmax);
4771 break;
62698be3 4772 case NEON_3R_VPMIN:
9ee6e8bb
PB
4773 GEN_NEON_INTEGER_OP(pmin);
4774 break;
62698be3 4775 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
4776 if (!u) { /* VQDMULH */
4777 switch (size) {
02da0b2d
PM
4778 case 1:
4779 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
4780 break;
4781 case 2:
4782 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
4783 break;
62698be3 4784 default: abort();
9ee6e8bb 4785 }
62698be3 4786 } else { /* VQRDMULH */
9ee6e8bb 4787 switch (size) {
02da0b2d
PM
4788 case 1:
4789 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
4790 break;
4791 case 2:
4792 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
4793 break;
62698be3 4794 default: abort();
9ee6e8bb
PB
4795 }
4796 }
4797 break;
62698be3 4798 case NEON_3R_VPADD:
9ee6e8bb 4799 switch (size) {
dd8fbd78
FN
4800 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4801 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4802 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 4803 default: abort();
9ee6e8bb
PB
4804 }
4805 break;
62698be3 4806 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
4807 {
4808 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
4809 switch ((u << 2) | size) {
4810 case 0: /* VADD */
aa47cfdd
PM
4811 case 4: /* VPADD */
4812 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4813 break;
4814 case 2: /* VSUB */
aa47cfdd 4815 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4816 break;
4817 case 6: /* VABD */
aa47cfdd 4818 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4819 break;
4820 default:
62698be3 4821 abort();
9ee6e8bb 4822 }
aa47cfdd 4823 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4824 break;
aa47cfdd 4825 }
62698be3 4826 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
4827 {
4828 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4829 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 4830 if (!u) {
7d1b0095 4831 tcg_temp_free_i32(tmp2);
dd8fbd78 4832 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4833 if (size == 0) {
aa47cfdd 4834 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 4835 } else {
aa47cfdd 4836 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
4837 }
4838 }
aa47cfdd 4839 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4840 break;
aa47cfdd 4841 }
62698be3 4842 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
4843 {
4844 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 4845 if (!u) {
aa47cfdd 4846 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 4847 } else {
aa47cfdd
PM
4848 if (size == 0) {
4849 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
4850 } else {
4851 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
4852 }
b5ff1b31 4853 }
aa47cfdd 4854 tcg_temp_free_ptr(fpstatus);
2c0262af 4855 break;
aa47cfdd 4856 }
62698be3 4857 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
4858 {
4859 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4860 if (size == 0) {
4861 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
4862 } else {
4863 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
4864 }
4865 tcg_temp_free_ptr(fpstatus);
2c0262af 4866 break;
aa47cfdd 4867 }
62698be3 4868 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
4869 {
4870 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4871 if (size == 0) {
4872 gen_helper_neon_max_f32(tmp, tmp, tmp2, fpstatus);
4873 } else {
4874 gen_helper_neon_min_f32(tmp, tmp, tmp2, fpstatus);
4875 }
4876 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4877 break;
aa47cfdd 4878 }
62698be3 4879 case NEON_3R_VRECPS_VRSQRTS:
9ee6e8bb 4880 if (size == 0)
dd8fbd78 4881 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 4882 else
dd8fbd78 4883 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 4884 break;
da97f52c
PM
4885 case NEON_3R_VFM:
4886 {
4887 /* VFMA, VFMS: fused multiply-add */
4888 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4889 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
4890 if (size) {
4891 /* VFMS */
4892 gen_helper_vfp_negs(tmp, tmp);
4893 }
4894 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
4895 tcg_temp_free_i32(tmp3);
4896 tcg_temp_free_ptr(fpstatus);
4897 break;
4898 }
9ee6e8bb
PB
4899 default:
4900 abort();
2c0262af 4901 }
7d1b0095 4902 tcg_temp_free_i32(tmp2);
dd8fbd78 4903
9ee6e8bb
PB
4904 /* Save the result. For elementwise operations we can put it
4905 straight into the destination register. For pairwise operations
4906 we have to be careful to avoid clobbering the source operands. */
4907 if (pairwise && rd == rm) {
dd8fbd78 4908 neon_store_scratch(pass, tmp);
9ee6e8bb 4909 } else {
dd8fbd78 4910 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4911 }
4912
4913 } /* for pass */
4914 if (pairwise && rd == rm) {
4915 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
4916 tmp = neon_load_scratch(pass);
4917 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4918 }
4919 }
ad69471c 4920 /* End of 3 register same size operations. */
9ee6e8bb
PB
4921 } else if (insn & (1 << 4)) {
4922 if ((insn & 0x00380080) != 0) {
4923 /* Two registers and shift. */
4924 op = (insn >> 8) & 0xf;
4925 if (insn & (1 << 7)) {
cc13115b
PM
4926 /* 64-bit shift. */
4927 if (op > 7) {
4928 return 1;
4929 }
9ee6e8bb
PB
4930 size = 3;
4931 } else {
4932 size = 2;
4933 while ((insn & (1 << (size + 19))) == 0)
4934 size--;
4935 }
4936 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 4937 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
4938 by immediate using the variable shift operations. */
4939 if (op < 8) {
4940 /* Shift by immediate:
4941 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
4942 if (q && ((rd | rm) & 1)) {
4943 return 1;
4944 }
4945 if (!u && (op == 4 || op == 6)) {
4946 return 1;
4947 }
9ee6e8bb
PB
4948 /* Right shifts are encoded as N - shift, where N is the
4949 element size in bits. */
4950 if (op <= 4)
4951 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4952 if (size == 3) {
4953 count = q + 1;
4954 } else {
4955 count = q ? 4: 2;
4956 }
4957 switch (size) {
4958 case 0:
4959 imm = (uint8_t) shift;
4960 imm |= imm << 8;
4961 imm |= imm << 16;
4962 break;
4963 case 1:
4964 imm = (uint16_t) shift;
4965 imm |= imm << 16;
4966 break;
4967 case 2:
4968 case 3:
4969 imm = shift;
4970 break;
4971 default:
4972 abort();
4973 }
4974
4975 for (pass = 0; pass < count; pass++) {
ad69471c
PB
4976 if (size == 3) {
4977 neon_load_reg64(cpu_V0, rm + pass);
4978 tcg_gen_movi_i64(cpu_V1, imm);
4979 switch (op) {
4980 case 0: /* VSHR */
4981 case 1: /* VSRA */
4982 if (u)
4983 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4984 else
ad69471c 4985 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4986 break;
ad69471c
PB
4987 case 2: /* VRSHR */
4988 case 3: /* VRSRA */
4989 if (u)
4990 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4991 else
ad69471c 4992 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4993 break;
ad69471c 4994 case 4: /* VSRI */
ad69471c
PB
4995 case 5: /* VSHL, VSLI */
4996 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4997 break;
0322b26e 4998 case 6: /* VQSHLU */
02da0b2d
PM
4999 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5000 cpu_V0, cpu_V1);
ad69471c 5001 break;
0322b26e
PM
5002 case 7: /* VQSHL */
5003 if (u) {
02da0b2d 5004 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
5005 cpu_V0, cpu_V1);
5006 } else {
02da0b2d 5007 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
5008 cpu_V0, cpu_V1);
5009 }
9ee6e8bb 5010 break;
9ee6e8bb 5011 }
ad69471c
PB
5012 if (op == 1 || op == 3) {
5013 /* Accumulate. */
5371cb81 5014 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
5015 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5016 } else if (op == 4 || (op == 5 && u)) {
5017 /* Insert */
923e6509
CL
5018 neon_load_reg64(cpu_V1, rd + pass);
5019 uint64_t mask;
5020 if (shift < -63 || shift > 63) {
5021 mask = 0;
5022 } else {
5023 if (op == 4) {
5024 mask = 0xffffffffffffffffull >> -shift;
5025 } else {
5026 mask = 0xffffffffffffffffull << shift;
5027 }
5028 }
5029 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5030 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5031 }
5032 neon_store_reg64(cpu_V0, rd + pass);
5033 } else { /* size < 3 */
5034 /* Operands in T0 and T1. */
dd8fbd78 5035 tmp = neon_load_reg(rm, pass);
7d1b0095 5036 tmp2 = tcg_temp_new_i32();
dd8fbd78 5037 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
5038 switch (op) {
5039 case 0: /* VSHR */
5040 case 1: /* VSRA */
5041 GEN_NEON_INTEGER_OP(shl);
5042 break;
5043 case 2: /* VRSHR */
5044 case 3: /* VRSRA */
5045 GEN_NEON_INTEGER_OP(rshl);
5046 break;
5047 case 4: /* VSRI */
ad69471c
PB
5048 case 5: /* VSHL, VSLI */
5049 switch (size) {
dd8fbd78
FN
5050 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5051 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5052 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 5053 default: abort();
ad69471c
PB
5054 }
5055 break;
0322b26e 5056 case 6: /* VQSHLU */
ad69471c 5057 switch (size) {
0322b26e 5058 case 0:
02da0b2d
PM
5059 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5060 tmp, tmp2);
0322b26e
PM
5061 break;
5062 case 1:
02da0b2d
PM
5063 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5064 tmp, tmp2);
0322b26e
PM
5065 break;
5066 case 2:
02da0b2d
PM
5067 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5068 tmp, tmp2);
0322b26e
PM
5069 break;
5070 default:
cc13115b 5071 abort();
ad69471c
PB
5072 }
5073 break;
0322b26e 5074 case 7: /* VQSHL */
02da0b2d 5075 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 5076 break;
ad69471c 5077 }
7d1b0095 5078 tcg_temp_free_i32(tmp2);
ad69471c
PB
5079
5080 if (op == 1 || op == 3) {
5081 /* Accumulate. */
dd8fbd78 5082 tmp2 = neon_load_reg(rd, pass);
5371cb81 5083 gen_neon_add(size, tmp, tmp2);
7d1b0095 5084 tcg_temp_free_i32(tmp2);
ad69471c
PB
5085 } else if (op == 4 || (op == 5 && u)) {
5086 /* Insert */
5087 switch (size) {
5088 case 0:
5089 if (op == 4)
ca9a32e4 5090 mask = 0xff >> -shift;
ad69471c 5091 else
ca9a32e4
JR
5092 mask = (uint8_t)(0xff << shift);
5093 mask |= mask << 8;
5094 mask |= mask << 16;
ad69471c
PB
5095 break;
5096 case 1:
5097 if (op == 4)
ca9a32e4 5098 mask = 0xffff >> -shift;
ad69471c 5099 else
ca9a32e4
JR
5100 mask = (uint16_t)(0xffff << shift);
5101 mask |= mask << 16;
ad69471c
PB
5102 break;
5103 case 2:
ca9a32e4
JR
5104 if (shift < -31 || shift > 31) {
5105 mask = 0;
5106 } else {
5107 if (op == 4)
5108 mask = 0xffffffffu >> -shift;
5109 else
5110 mask = 0xffffffffu << shift;
5111 }
ad69471c
PB
5112 break;
5113 default:
5114 abort();
5115 }
dd8fbd78 5116 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
5117 tcg_gen_andi_i32(tmp, tmp, mask);
5118 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 5119 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 5120 tcg_temp_free_i32(tmp2);
ad69471c 5121 }
dd8fbd78 5122 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5123 }
5124 } /* for pass */
5125 } else if (op < 10) {
ad69471c 5126 /* Shift by immediate and narrow:
9ee6e8bb 5127 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5128 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5129 if (rm & 1) {
5130 return 1;
5131 }
9ee6e8bb
PB
5132 shift = shift - (1 << (size + 3));
5133 size++;
92cdfaeb 5134 if (size == 3) {
a7812ae4 5135 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5136 neon_load_reg64(cpu_V0, rm);
5137 neon_load_reg64(cpu_V1, rm + 1);
5138 for (pass = 0; pass < 2; pass++) {
5139 TCGv_i64 in;
5140 if (pass == 0) {
5141 in = cpu_V0;
5142 } else {
5143 in = cpu_V1;
5144 }
ad69471c 5145 if (q) {
0b36f4cd 5146 if (input_unsigned) {
92cdfaeb 5147 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5148 } else {
92cdfaeb 5149 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5150 }
ad69471c 5151 } else {
0b36f4cd 5152 if (input_unsigned) {
92cdfaeb 5153 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5154 } else {
92cdfaeb 5155 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5156 }
ad69471c 5157 }
7d1b0095 5158 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5159 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5160 neon_store_reg(rd, pass, tmp);
5161 } /* for pass */
5162 tcg_temp_free_i64(tmp64);
5163 } else {
5164 if (size == 1) {
5165 imm = (uint16_t)shift;
5166 imm |= imm << 16;
2c0262af 5167 } else {
92cdfaeb
PM
5168 /* size == 2 */
5169 imm = (uint32_t)shift;
5170 }
5171 tmp2 = tcg_const_i32(imm);
5172 tmp4 = neon_load_reg(rm + 1, 0);
5173 tmp5 = neon_load_reg(rm + 1, 1);
5174 for (pass = 0; pass < 2; pass++) {
5175 if (pass == 0) {
5176 tmp = neon_load_reg(rm, 0);
5177 } else {
5178 tmp = tmp4;
5179 }
0b36f4cd
CL
5180 gen_neon_shift_narrow(size, tmp, tmp2, q,
5181 input_unsigned);
92cdfaeb
PM
5182 if (pass == 0) {
5183 tmp3 = neon_load_reg(rm, 1);
5184 } else {
5185 tmp3 = tmp5;
5186 }
0b36f4cd
CL
5187 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5188 input_unsigned);
36aa55dc 5189 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5190 tcg_temp_free_i32(tmp);
5191 tcg_temp_free_i32(tmp3);
5192 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5193 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5194 neon_store_reg(rd, pass, tmp);
5195 } /* for pass */
c6067f04 5196 tcg_temp_free_i32(tmp2);
b75263d6 5197 }
9ee6e8bb 5198 } else if (op == 10) {
cc13115b
PM
5199 /* VSHLL, VMOVL */
5200 if (q || (rd & 1)) {
9ee6e8bb 5201 return 1;
cc13115b 5202 }
ad69471c
PB
5203 tmp = neon_load_reg(rm, 0);
5204 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5205 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5206 if (pass == 1)
5207 tmp = tmp2;
5208
5209 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5210
9ee6e8bb
PB
5211 if (shift != 0) {
5212 /* The shift is less than the width of the source
ad69471c
PB
5213 type, so we can just shift the whole register. */
5214 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5215 /* Widen the result of shift: we need to clear
5216 * the potential overflow bits resulting from
5217 * left bits of the narrow input appearing as
5218 * right bits of left the neighbour narrow
5219 * input. */
ad69471c
PB
5220 if (size < 2 || !u) {
5221 uint64_t imm64;
5222 if (size == 0) {
5223 imm = (0xffu >> (8 - shift));
5224 imm |= imm << 16;
acdf01ef 5225 } else if (size == 1) {
ad69471c 5226 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5227 } else {
5228 /* size == 2 */
5229 imm = 0xffffffff >> (32 - shift);
5230 }
5231 if (size < 2) {
5232 imm64 = imm | (((uint64_t)imm) << 32);
5233 } else {
5234 imm64 = imm;
9ee6e8bb 5235 }
acdf01ef 5236 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5237 }
5238 }
ad69471c 5239 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5240 }
f73534a5 5241 } else if (op >= 14) {
9ee6e8bb 5242 /* VCVT fixed-point. */
cc13115b
PM
5243 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5244 return 1;
5245 }
f73534a5
PM
5246 /* We have already masked out the must-be-1 top bit of imm6,
5247 * hence this 32-shift where the ARM ARM has 64-imm6.
5248 */
5249 shift = 32 - shift;
9ee6e8bb 5250 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 5251 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 5252 if (!(op & 1)) {
9ee6e8bb 5253 if (u)
5500b06c 5254 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 5255 else
5500b06c 5256 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
5257 } else {
5258 if (u)
5500b06c 5259 gen_vfp_toul(0, shift, 1);
9ee6e8bb 5260 else
5500b06c 5261 gen_vfp_tosl(0, shift, 1);
2c0262af 5262 }
4373f3ce 5263 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
5264 }
5265 } else {
9ee6e8bb
PB
5266 return 1;
5267 }
5268 } else { /* (insn & 0x00380080) == 0 */
5269 int invert;
7d80fee5
PM
5270 if (q && (rd & 1)) {
5271 return 1;
5272 }
9ee6e8bb
PB
5273
5274 op = (insn >> 8) & 0xf;
5275 /* One register and immediate. */
5276 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5277 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
5278 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5279 * We choose to not special-case this and will behave as if a
5280 * valid constant encoding of 0 had been given.
5281 */
9ee6e8bb
PB
5282 switch (op) {
5283 case 0: case 1:
5284 /* no-op */
5285 break;
5286 case 2: case 3:
5287 imm <<= 8;
5288 break;
5289 case 4: case 5:
5290 imm <<= 16;
5291 break;
5292 case 6: case 7:
5293 imm <<= 24;
5294 break;
5295 case 8: case 9:
5296 imm |= imm << 16;
5297 break;
5298 case 10: case 11:
5299 imm = (imm << 8) | (imm << 24);
5300 break;
5301 case 12:
8e31209e 5302 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
5303 break;
5304 case 13:
5305 imm = (imm << 16) | 0xffff;
5306 break;
5307 case 14:
5308 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5309 if (invert)
5310 imm = ~imm;
5311 break;
5312 case 15:
7d80fee5
PM
5313 if (invert) {
5314 return 1;
5315 }
9ee6e8bb
PB
5316 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5317 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5318 break;
5319 }
5320 if (invert)
5321 imm = ~imm;
5322
9ee6e8bb
PB
5323 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5324 if (op & 1 && op < 12) {
ad69471c 5325 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
5326 if (invert) {
5327 /* The immediate value has already been inverted, so
5328 BIC becomes AND. */
ad69471c 5329 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 5330 } else {
ad69471c 5331 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 5332 }
9ee6e8bb 5333 } else {
ad69471c 5334 /* VMOV, VMVN. */
7d1b0095 5335 tmp = tcg_temp_new_i32();
9ee6e8bb 5336 if (op == 14 && invert) {
a5a14945 5337 int n;
ad69471c
PB
5338 uint32_t val;
5339 val = 0;
9ee6e8bb
PB
5340 for (n = 0; n < 4; n++) {
5341 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 5342 val |= 0xff << (n * 8);
9ee6e8bb 5343 }
ad69471c
PB
5344 tcg_gen_movi_i32(tmp, val);
5345 } else {
5346 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 5347 }
9ee6e8bb 5348 }
ad69471c 5349 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5350 }
5351 }
e4b3861d 5352 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
5353 if (size != 3) {
5354 op = (insn >> 8) & 0xf;
5355 if ((insn & (1 << 6)) == 0) {
5356 /* Three registers of different lengths. */
5357 int src1_wide;
5358 int src2_wide;
5359 int prewiden;
695272dc
PM
5360 /* undefreq: bit 0 : UNDEF if size != 0
5361 * bit 1 : UNDEF if size == 0
5362 * bit 2 : UNDEF if U == 1
5363 * Note that [1:0] set implies 'always UNDEF'
5364 */
5365 int undefreq;
5366 /* prewiden, src1_wide, src2_wide, undefreq */
5367 static const int neon_3reg_wide[16][4] = {
5368 {1, 0, 0, 0}, /* VADDL */
5369 {1, 1, 0, 0}, /* VADDW */
5370 {1, 0, 0, 0}, /* VSUBL */
5371 {1, 1, 0, 0}, /* VSUBW */
5372 {0, 1, 1, 0}, /* VADDHN */
5373 {0, 0, 0, 0}, /* VABAL */
5374 {0, 1, 1, 0}, /* VSUBHN */
5375 {0, 0, 0, 0}, /* VABDL */
5376 {0, 0, 0, 0}, /* VMLAL */
5377 {0, 0, 0, 6}, /* VQDMLAL */
5378 {0, 0, 0, 0}, /* VMLSL */
5379 {0, 0, 0, 6}, /* VQDMLSL */
5380 {0, 0, 0, 0}, /* Integer VMULL */
5381 {0, 0, 0, 2}, /* VQDMULL */
5382 {0, 0, 0, 5}, /* Polynomial VMULL */
5383 {0, 0, 0, 3}, /* Reserved: always UNDEF */
9ee6e8bb
PB
5384 };
5385
5386 prewiden = neon_3reg_wide[op][0];
5387 src1_wide = neon_3reg_wide[op][1];
5388 src2_wide = neon_3reg_wide[op][2];
695272dc 5389 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 5390
695272dc
PM
5391 if (((undefreq & 1) && (size != 0)) ||
5392 ((undefreq & 2) && (size == 0)) ||
5393 ((undefreq & 4) && u)) {
5394 return 1;
5395 }
5396 if ((src1_wide && (rn & 1)) ||
5397 (src2_wide && (rm & 1)) ||
5398 (!src2_wide && (rd & 1))) {
ad69471c 5399 return 1;
695272dc 5400 }
ad69471c 5401
9ee6e8bb
PB
5402 /* Avoid overlapping operands. Wide source operands are
5403 always aligned so will never overlap with wide
5404 destinations in problematic ways. */
8f8e3aa4 5405 if (rd == rm && !src2_wide) {
dd8fbd78
FN
5406 tmp = neon_load_reg(rm, 1);
5407 neon_store_scratch(2, tmp);
8f8e3aa4 5408 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
5409 tmp = neon_load_reg(rn, 1);
5410 neon_store_scratch(2, tmp);
9ee6e8bb 5411 }
39d5492a 5412 TCGV_UNUSED_I32(tmp3);
9ee6e8bb 5413 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5414 if (src1_wide) {
5415 neon_load_reg64(cpu_V0, rn + pass);
39d5492a 5416 TCGV_UNUSED_I32(tmp);
9ee6e8bb 5417 } else {
ad69471c 5418 if (pass == 1 && rd == rn) {
dd8fbd78 5419 tmp = neon_load_scratch(2);
9ee6e8bb 5420 } else {
ad69471c
PB
5421 tmp = neon_load_reg(rn, pass);
5422 }
5423 if (prewiden) {
5424 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5425 }
5426 }
ad69471c
PB
5427 if (src2_wide) {
5428 neon_load_reg64(cpu_V1, rm + pass);
39d5492a 5429 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 5430 } else {
ad69471c 5431 if (pass == 1 && rd == rm) {
dd8fbd78 5432 tmp2 = neon_load_scratch(2);
9ee6e8bb 5433 } else {
ad69471c
PB
5434 tmp2 = neon_load_reg(rm, pass);
5435 }
5436 if (prewiden) {
5437 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5438 }
9ee6e8bb
PB
5439 }
5440 switch (op) {
5441 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5442 gen_neon_addl(size);
9ee6e8bb 5443 break;
79b0e534 5444 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5445 gen_neon_subl(size);
9ee6e8bb
PB
5446 break;
5447 case 5: case 7: /* VABAL, VABDL */
5448 switch ((size << 1) | u) {
ad69471c
PB
5449 case 0:
5450 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5451 break;
5452 case 1:
5453 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5454 break;
5455 case 2:
5456 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5457 break;
5458 case 3:
5459 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5460 break;
5461 case 4:
5462 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5463 break;
5464 case 5:
5465 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5466 break;
9ee6e8bb
PB
5467 default: abort();
5468 }
7d1b0095
PM
5469 tcg_temp_free_i32(tmp2);
5470 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5471 break;
5472 case 8: case 9: case 10: case 11: case 12: case 13:
5473 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5474 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
5475 break;
5476 case 14: /* Polynomial VMULL */
e5ca24cb 5477 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
5478 tcg_temp_free_i32(tmp2);
5479 tcg_temp_free_i32(tmp);
e5ca24cb 5480 break;
695272dc
PM
5481 default: /* 15 is RESERVED: caught earlier */
5482 abort();
9ee6e8bb 5483 }
ebcd88ce
PM
5484 if (op == 13) {
5485 /* VQDMULL */
5486 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5487 neon_store_reg64(cpu_V0, rd + pass);
5488 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 5489 /* Accumulate. */
ebcd88ce 5490 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5491 switch (op) {
4dc064e6
PM
5492 case 10: /* VMLSL */
5493 gen_neon_negl(cpu_V0, size);
5494 /* Fall through */
5495 case 5: case 8: /* VABAL, VMLAL */
ad69471c 5496 gen_neon_addl(size);
9ee6e8bb
PB
5497 break;
5498 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 5499 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5500 if (op == 11) {
5501 gen_neon_negl(cpu_V0, size);
5502 }
ad69471c
PB
5503 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5504 break;
9ee6e8bb
PB
5505 default:
5506 abort();
5507 }
ad69471c 5508 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5509 } else if (op == 4 || op == 6) {
5510 /* Narrowing operation. */
7d1b0095 5511 tmp = tcg_temp_new_i32();
79b0e534 5512 if (!u) {
9ee6e8bb 5513 switch (size) {
ad69471c
PB
5514 case 0:
5515 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5516 break;
5517 case 1:
5518 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5519 break;
5520 case 2:
5521 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5522 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5523 break;
9ee6e8bb
PB
5524 default: abort();
5525 }
5526 } else {
5527 switch (size) {
ad69471c
PB
5528 case 0:
5529 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5530 break;
5531 case 1:
5532 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5533 break;
5534 case 2:
5535 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5536 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5537 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5538 break;
9ee6e8bb
PB
5539 default: abort();
5540 }
5541 }
ad69471c
PB
5542 if (pass == 0) {
5543 tmp3 = tmp;
5544 } else {
5545 neon_store_reg(rd, 0, tmp3);
5546 neon_store_reg(rd, 1, tmp);
5547 }
9ee6e8bb
PB
5548 } else {
5549 /* Write back the result. */
ad69471c 5550 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5551 }
5552 }
5553 } else {
3e3326df
PM
5554 /* Two registers and a scalar. NB that for ops of this form
5555 * the ARM ARM labels bit 24 as Q, but it is in our variable
5556 * 'u', not 'q'.
5557 */
5558 if (size == 0) {
5559 return 1;
5560 }
9ee6e8bb 5561 switch (op) {
9ee6e8bb 5562 case 1: /* Float VMLA scalar */
9ee6e8bb 5563 case 5: /* Floating point VMLS scalar */
9ee6e8bb 5564 case 9: /* Floating point VMUL scalar */
3e3326df
PM
5565 if (size == 1) {
5566 return 1;
5567 }
5568 /* fall through */
5569 case 0: /* Integer VMLA scalar */
5570 case 4: /* Integer VMLS scalar */
5571 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
5572 case 12: /* VQDMULH scalar */
5573 case 13: /* VQRDMULH scalar */
3e3326df
PM
5574 if (u && ((rd | rn) & 1)) {
5575 return 1;
5576 }
dd8fbd78
FN
5577 tmp = neon_get_scalar(size, rm);
5578 neon_store_scratch(0, tmp);
9ee6e8bb 5579 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5580 tmp = neon_load_scratch(0);
5581 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5582 if (op == 12) {
5583 if (size == 1) {
02da0b2d 5584 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5585 } else {
02da0b2d 5586 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5587 }
5588 } else if (op == 13) {
5589 if (size == 1) {
02da0b2d 5590 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5591 } else {
02da0b2d 5592 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5593 }
5594 } else if (op & 1) {
aa47cfdd
PM
5595 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5596 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5597 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
5598 } else {
5599 switch (size) {
dd8fbd78
FN
5600 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5601 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5602 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 5603 default: abort();
9ee6e8bb
PB
5604 }
5605 }
7d1b0095 5606 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
5607 if (op < 8) {
5608 /* Accumulate. */
dd8fbd78 5609 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5610 switch (op) {
5611 case 0:
dd8fbd78 5612 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5613 break;
5614 case 1:
aa47cfdd
PM
5615 {
5616 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5617 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5618 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5619 break;
aa47cfdd 5620 }
9ee6e8bb 5621 case 4:
dd8fbd78 5622 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5623 break;
5624 case 5:
aa47cfdd
PM
5625 {
5626 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5627 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5628 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5629 break;
aa47cfdd 5630 }
9ee6e8bb
PB
5631 default:
5632 abort();
5633 }
7d1b0095 5634 tcg_temp_free_i32(tmp2);
9ee6e8bb 5635 }
dd8fbd78 5636 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5637 }
5638 break;
9ee6e8bb 5639 case 3: /* VQDMLAL scalar */
9ee6e8bb 5640 case 7: /* VQDMLSL scalar */
9ee6e8bb 5641 case 11: /* VQDMULL scalar */
3e3326df 5642 if (u == 1) {
ad69471c 5643 return 1;
3e3326df
PM
5644 }
5645 /* fall through */
5646 case 2: /* VMLAL sclar */
5647 case 6: /* VMLSL scalar */
5648 case 10: /* VMULL scalar */
5649 if (rd & 1) {
5650 return 1;
5651 }
dd8fbd78 5652 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
5653 /* We need a copy of tmp2 because gen_neon_mull
5654 * deletes it during pass 0. */
7d1b0095 5655 tmp4 = tcg_temp_new_i32();
c6067f04 5656 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 5657 tmp3 = neon_load_reg(rn, 1);
ad69471c 5658
9ee6e8bb 5659 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5660 if (pass == 0) {
5661 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5662 } else {
dd8fbd78 5663 tmp = tmp3;
c6067f04 5664 tmp2 = tmp4;
9ee6e8bb 5665 }
ad69471c 5666 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
5667 if (op != 11) {
5668 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5669 }
9ee6e8bb 5670 switch (op) {
4dc064e6
PM
5671 case 6:
5672 gen_neon_negl(cpu_V0, size);
5673 /* Fall through */
5674 case 2:
ad69471c 5675 gen_neon_addl(size);
9ee6e8bb
PB
5676 break;
5677 case 3: case 7:
ad69471c 5678 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5679 if (op == 7) {
5680 gen_neon_negl(cpu_V0, size);
5681 }
ad69471c 5682 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5683 break;
5684 case 10:
5685 /* no-op */
5686 break;
5687 case 11:
ad69471c 5688 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5689 break;
5690 default:
5691 abort();
5692 }
ad69471c 5693 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5694 }
dd8fbd78 5695
dd8fbd78 5696
9ee6e8bb
PB
5697 break;
5698 default: /* 14 and 15 are RESERVED */
5699 return 1;
5700 }
5701 }
5702 } else { /* size == 3 */
5703 if (!u) {
5704 /* Extract. */
9ee6e8bb 5705 imm = (insn >> 8) & 0xf;
ad69471c
PB
5706
5707 if (imm > 7 && !q)
5708 return 1;
5709
52579ea1
PM
5710 if (q && ((rd | rn | rm) & 1)) {
5711 return 1;
5712 }
5713
ad69471c
PB
5714 if (imm == 0) {
5715 neon_load_reg64(cpu_V0, rn);
5716 if (q) {
5717 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5718 }
ad69471c
PB
5719 } else if (imm == 8) {
5720 neon_load_reg64(cpu_V0, rn + 1);
5721 if (q) {
5722 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5723 }
ad69471c 5724 } else if (q) {
a7812ae4 5725 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5726 if (imm < 8) {
5727 neon_load_reg64(cpu_V0, rn);
a7812ae4 5728 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5729 } else {
5730 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5731 neon_load_reg64(tmp64, rm);
ad69471c
PB
5732 }
5733 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5734 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5735 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5736 if (imm < 8) {
5737 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5738 } else {
ad69471c
PB
5739 neon_load_reg64(cpu_V1, rm + 1);
5740 imm -= 8;
9ee6e8bb 5741 }
ad69471c 5742 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5743 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5744 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 5745 tcg_temp_free_i64(tmp64);
ad69471c 5746 } else {
a7812ae4 5747 /* BUGFIX */
ad69471c 5748 neon_load_reg64(cpu_V0, rn);
a7812ae4 5749 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5750 neon_load_reg64(cpu_V1, rm);
a7812ae4 5751 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5752 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5753 }
5754 neon_store_reg64(cpu_V0, rd);
5755 if (q) {
5756 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5757 }
5758 } else if ((insn & (1 << 11)) == 0) {
5759 /* Two register misc. */
5760 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5761 size = (insn >> 18) & 3;
600b828c
PM
5762 /* UNDEF for unknown op values and bad op-size combinations */
5763 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
5764 return 1;
5765 }
fc2a9b37
PM
5766 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
5767 q && ((rm | rd) & 1)) {
5768 return 1;
5769 }
9ee6e8bb 5770 switch (op) {
600b828c 5771 case NEON_2RM_VREV64:
9ee6e8bb 5772 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5773 tmp = neon_load_reg(rm, pass * 2);
5774 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5775 switch (size) {
dd8fbd78
FN
5776 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5777 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5778 case 2: /* no-op */ break;
5779 default: abort();
5780 }
dd8fbd78 5781 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5782 if (size == 2) {
dd8fbd78 5783 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5784 } else {
9ee6e8bb 5785 switch (size) {
dd8fbd78
FN
5786 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5787 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5788 default: abort();
5789 }
dd8fbd78 5790 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5791 }
5792 }
5793 break;
600b828c
PM
5794 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
5795 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
5796 for (pass = 0; pass < q + 1; pass++) {
5797 tmp = neon_load_reg(rm, pass * 2);
5798 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5799 tmp = neon_load_reg(rm, pass * 2 + 1);
5800 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5801 switch (size) {
5802 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5803 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5804 case 2: tcg_gen_add_i64(CPU_V001); break;
5805 default: abort();
5806 }
600b828c 5807 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 5808 /* Accumulate. */
ad69471c
PB
5809 neon_load_reg64(cpu_V1, rd + pass);
5810 gen_neon_addl(size);
9ee6e8bb 5811 }
ad69471c 5812 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5813 }
5814 break;
600b828c 5815 case NEON_2RM_VTRN:
9ee6e8bb 5816 if (size == 2) {
a5a14945 5817 int n;
9ee6e8bb 5818 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
5819 tmp = neon_load_reg(rm, n);
5820 tmp2 = neon_load_reg(rd, n + 1);
5821 neon_store_reg(rm, n, tmp2);
5822 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
5823 }
5824 } else {
5825 goto elementwise;
5826 }
5827 break;
600b828c 5828 case NEON_2RM_VUZP:
02acedf9 5829 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 5830 return 1;
9ee6e8bb
PB
5831 }
5832 break;
600b828c 5833 case NEON_2RM_VZIP:
d68a6f3a 5834 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 5835 return 1;
9ee6e8bb
PB
5836 }
5837 break;
600b828c
PM
5838 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
5839 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
5840 if (rm & 1) {
5841 return 1;
5842 }
39d5492a 5843 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 5844 for (pass = 0; pass < 2; pass++) {
ad69471c 5845 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 5846 tmp = tcg_temp_new_i32();
600b828c
PM
5847 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
5848 tmp, cpu_V0);
ad69471c
PB
5849 if (pass == 0) {
5850 tmp2 = tmp;
5851 } else {
5852 neon_store_reg(rd, 0, tmp2);
5853 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5854 }
9ee6e8bb
PB
5855 }
5856 break;
600b828c 5857 case NEON_2RM_VSHLL:
fc2a9b37 5858 if (q || (rd & 1)) {
9ee6e8bb 5859 return 1;
600b828c 5860 }
ad69471c
PB
5861 tmp = neon_load_reg(rm, 0);
5862 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5863 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5864 if (pass == 1)
5865 tmp = tmp2;
5866 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 5867 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 5868 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5869 }
5870 break;
600b828c 5871 case NEON_2RM_VCVT_F16_F32:
fc2a9b37
PM
5872 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5873 q || (rm & 1)) {
5874 return 1;
5875 }
7d1b0095
PM
5876 tmp = tcg_temp_new_i32();
5877 tmp2 = tcg_temp_new_i32();
60011498 5878 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 5879 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 5880 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 5881 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5882 tcg_gen_shli_i32(tmp2, tmp2, 16);
5883 tcg_gen_or_i32(tmp2, tmp2, tmp);
5884 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 5885 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
5886 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5887 neon_store_reg(rd, 0, tmp2);
7d1b0095 5888 tmp2 = tcg_temp_new_i32();
2d981da7 5889 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5890 tcg_gen_shli_i32(tmp2, tmp2, 16);
5891 tcg_gen_or_i32(tmp2, tmp2, tmp);
5892 neon_store_reg(rd, 1, tmp2);
7d1b0095 5893 tcg_temp_free_i32(tmp);
60011498 5894 break;
600b828c 5895 case NEON_2RM_VCVT_F32_F16:
fc2a9b37
PM
5896 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5897 q || (rd & 1)) {
5898 return 1;
5899 }
7d1b0095 5900 tmp3 = tcg_temp_new_i32();
60011498
PB
5901 tmp = neon_load_reg(rm, 0);
5902 tmp2 = neon_load_reg(rm, 1);
5903 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 5904 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5905 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5906 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 5907 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5908 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 5909 tcg_temp_free_i32(tmp);
60011498 5910 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 5911 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5912 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5913 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 5914 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5915 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
5916 tcg_temp_free_i32(tmp2);
5917 tcg_temp_free_i32(tmp3);
60011498 5918 break;
9ee6e8bb
PB
5919 default:
5920 elementwise:
5921 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 5922 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
5923 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5924 neon_reg_offset(rm, pass));
39d5492a 5925 TCGV_UNUSED_I32(tmp);
9ee6e8bb 5926 } else {
dd8fbd78 5927 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
5928 }
5929 switch (op) {
600b828c 5930 case NEON_2RM_VREV32:
9ee6e8bb 5931 switch (size) {
dd8fbd78
FN
5932 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5933 case 1: gen_swap_half(tmp); break;
600b828c 5934 default: abort();
9ee6e8bb
PB
5935 }
5936 break;
600b828c 5937 case NEON_2RM_VREV16:
dd8fbd78 5938 gen_rev16(tmp);
9ee6e8bb 5939 break;
600b828c 5940 case NEON_2RM_VCLS:
9ee6e8bb 5941 switch (size) {
dd8fbd78
FN
5942 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5943 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5944 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 5945 default: abort();
9ee6e8bb
PB
5946 }
5947 break;
600b828c 5948 case NEON_2RM_VCLZ:
9ee6e8bb 5949 switch (size) {
dd8fbd78
FN
5950 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5951 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5952 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 5953 default: abort();
9ee6e8bb
PB
5954 }
5955 break;
600b828c 5956 case NEON_2RM_VCNT:
dd8fbd78 5957 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 5958 break;
600b828c 5959 case NEON_2RM_VMVN:
dd8fbd78 5960 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 5961 break;
600b828c 5962 case NEON_2RM_VQABS:
9ee6e8bb 5963 switch (size) {
02da0b2d
PM
5964 case 0:
5965 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
5966 break;
5967 case 1:
5968 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
5969 break;
5970 case 2:
5971 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
5972 break;
600b828c 5973 default: abort();
9ee6e8bb
PB
5974 }
5975 break;
600b828c 5976 case NEON_2RM_VQNEG:
9ee6e8bb 5977 switch (size) {
02da0b2d
PM
5978 case 0:
5979 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
5980 break;
5981 case 1:
5982 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
5983 break;
5984 case 2:
5985 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
5986 break;
600b828c 5987 default: abort();
9ee6e8bb
PB
5988 }
5989 break;
600b828c 5990 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 5991 tmp2 = tcg_const_i32(0);
9ee6e8bb 5992 switch(size) {
dd8fbd78
FN
5993 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5994 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5995 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 5996 default: abort();
9ee6e8bb 5997 }
39d5492a 5998 tcg_temp_free_i32(tmp2);
600b828c 5999 if (op == NEON_2RM_VCLE0) {
dd8fbd78 6000 tcg_gen_not_i32(tmp, tmp);
600b828c 6001 }
9ee6e8bb 6002 break;
600b828c 6003 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 6004 tmp2 = tcg_const_i32(0);
9ee6e8bb 6005 switch(size) {
dd8fbd78
FN
6006 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6007 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6008 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 6009 default: abort();
9ee6e8bb 6010 }
39d5492a 6011 tcg_temp_free_i32(tmp2);
600b828c 6012 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6013 tcg_gen_not_i32(tmp, tmp);
600b828c 6014 }
9ee6e8bb 6015 break;
600b828c 6016 case NEON_2RM_VCEQ0:
dd8fbd78 6017 tmp2 = tcg_const_i32(0);
9ee6e8bb 6018 switch(size) {
dd8fbd78
FN
6019 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6020 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6021 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6022 default: abort();
9ee6e8bb 6023 }
39d5492a 6024 tcg_temp_free_i32(tmp2);
9ee6e8bb 6025 break;
600b828c 6026 case NEON_2RM_VABS:
9ee6e8bb 6027 switch(size) {
dd8fbd78
FN
6028 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6029 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6030 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 6031 default: abort();
9ee6e8bb
PB
6032 }
6033 break;
600b828c 6034 case NEON_2RM_VNEG:
dd8fbd78
FN
6035 tmp2 = tcg_const_i32(0);
6036 gen_neon_rsb(size, tmp, tmp2);
39d5492a 6037 tcg_temp_free_i32(tmp2);
9ee6e8bb 6038 break;
600b828c 6039 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
6040 {
6041 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6042 tmp2 = tcg_const_i32(0);
aa47cfdd 6043 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6044 tcg_temp_free_i32(tmp2);
aa47cfdd 6045 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6046 break;
aa47cfdd 6047 }
600b828c 6048 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
6049 {
6050 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6051 tmp2 = tcg_const_i32(0);
aa47cfdd 6052 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6053 tcg_temp_free_i32(tmp2);
aa47cfdd 6054 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6055 break;
aa47cfdd 6056 }
600b828c 6057 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
6058 {
6059 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6060 tmp2 = tcg_const_i32(0);
aa47cfdd 6061 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6062 tcg_temp_free_i32(tmp2);
aa47cfdd 6063 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6064 break;
aa47cfdd 6065 }
600b828c 6066 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
6067 {
6068 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6069 tmp2 = tcg_const_i32(0);
aa47cfdd 6070 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6071 tcg_temp_free_i32(tmp2);
aa47cfdd 6072 tcg_temp_free_ptr(fpstatus);
0e326109 6073 break;
aa47cfdd 6074 }
600b828c 6075 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
6076 {
6077 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6078 tmp2 = tcg_const_i32(0);
aa47cfdd 6079 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6080 tcg_temp_free_i32(tmp2);
aa47cfdd 6081 tcg_temp_free_ptr(fpstatus);
0e326109 6082 break;
aa47cfdd 6083 }
600b828c 6084 case NEON_2RM_VABS_F:
4373f3ce 6085 gen_vfp_abs(0);
9ee6e8bb 6086 break;
600b828c 6087 case NEON_2RM_VNEG_F:
4373f3ce 6088 gen_vfp_neg(0);
9ee6e8bb 6089 break;
600b828c 6090 case NEON_2RM_VSWP:
dd8fbd78
FN
6091 tmp2 = neon_load_reg(rd, pass);
6092 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6093 break;
600b828c 6094 case NEON_2RM_VTRN:
dd8fbd78 6095 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6096 switch (size) {
dd8fbd78
FN
6097 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6098 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6099 default: abort();
9ee6e8bb 6100 }
dd8fbd78 6101 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6102 break;
600b828c 6103 case NEON_2RM_VRECPE:
dd8fbd78 6104 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb 6105 break;
600b828c 6106 case NEON_2RM_VRSQRTE:
dd8fbd78 6107 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb 6108 break;
600b828c 6109 case NEON_2RM_VRECPE_F:
4373f3ce 6110 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6111 break;
600b828c 6112 case NEON_2RM_VRSQRTE_F:
4373f3ce 6113 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6114 break;
600b828c 6115 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 6116 gen_vfp_sito(0, 1);
9ee6e8bb 6117 break;
600b828c 6118 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 6119 gen_vfp_uito(0, 1);
9ee6e8bb 6120 break;
600b828c 6121 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 6122 gen_vfp_tosiz(0, 1);
9ee6e8bb 6123 break;
600b828c 6124 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 6125 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
6126 break;
6127 default:
600b828c
PM
6128 /* Reserved op values were caught by the
6129 * neon_2rm_sizes[] check earlier.
6130 */
6131 abort();
9ee6e8bb 6132 }
600b828c 6133 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6134 tcg_gen_st_f32(cpu_F0s, cpu_env,
6135 neon_reg_offset(rd, pass));
9ee6e8bb 6136 } else {
dd8fbd78 6137 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6138 }
6139 }
6140 break;
6141 }
6142 } else if ((insn & (1 << 10)) == 0) {
6143 /* VTBL, VTBX. */
56907d77
PM
6144 int n = ((insn >> 8) & 3) + 1;
6145 if ((rn + n) > 32) {
6146 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6147 * helper function running off the end of the register file.
6148 */
6149 return 1;
6150 }
6151 n <<= 3;
9ee6e8bb 6152 if (insn & (1 << 6)) {
8f8e3aa4 6153 tmp = neon_load_reg(rd, 0);
9ee6e8bb 6154 } else {
7d1b0095 6155 tmp = tcg_temp_new_i32();
8f8e3aa4 6156 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6157 }
8f8e3aa4 6158 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
6159 tmp4 = tcg_const_i32(rn);
6160 tmp5 = tcg_const_i32(n);
9ef39277 6161 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7d1b0095 6162 tcg_temp_free_i32(tmp);
9ee6e8bb 6163 if (insn & (1 << 6)) {
8f8e3aa4 6164 tmp = neon_load_reg(rd, 1);
9ee6e8bb 6165 } else {
7d1b0095 6166 tmp = tcg_temp_new_i32();
8f8e3aa4 6167 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6168 }
8f8e3aa4 6169 tmp3 = neon_load_reg(rm, 1);
9ef39277 6170 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
6171 tcg_temp_free_i32(tmp5);
6172 tcg_temp_free_i32(tmp4);
8f8e3aa4 6173 neon_store_reg(rd, 0, tmp2);
3018f259 6174 neon_store_reg(rd, 1, tmp3);
7d1b0095 6175 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6176 } else if ((insn & 0x380) == 0) {
6177 /* VDUP */
133da6aa
JR
6178 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6179 return 1;
6180 }
9ee6e8bb 6181 if (insn & (1 << 19)) {
dd8fbd78 6182 tmp = neon_load_reg(rm, 1);
9ee6e8bb 6183 } else {
dd8fbd78 6184 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
6185 }
6186 if (insn & (1 << 16)) {
dd8fbd78 6187 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
6188 } else if (insn & (1 << 17)) {
6189 if ((insn >> 18) & 1)
dd8fbd78 6190 gen_neon_dup_high16(tmp);
9ee6e8bb 6191 else
dd8fbd78 6192 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
6193 }
6194 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 6195 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
6196 tcg_gen_mov_i32(tmp2, tmp);
6197 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 6198 }
7d1b0095 6199 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6200 } else {
6201 return 1;
6202 }
6203 }
6204 }
6205 return 0;
6206}
6207
0ecb72a5 6208static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb 6209{
4b6a83fb
PM
6210 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
6211 const ARMCPRegInfo *ri;
6212 ARMCPU *cpu = arm_env_get_cpu(env);
9ee6e8bb
PB
6213
6214 cpnum = (insn >> 8) & 0xf;
6215 if (arm_feature(env, ARM_FEATURE_XSCALE)
6216 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6217 return 1;
6218
4b6a83fb 6219 /* First check for coprocessor space used for actual instructions */
9ee6e8bb
PB
6220 switch (cpnum) {
6221 case 0:
6222 case 1:
6223 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6224 return disas_iwmmxt_insn(env, s, insn);
6225 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6226 return disas_dsp_insn(env, s, insn);
6227 }
6228 return 1;
6229 case 10:
6230 case 11:
6231 return disas_vfp_insn (env, s, insn);
4b6a83fb
PM
6232 default:
6233 break;
6234 }
6235
6236 /* Otherwise treat as a generic register access */
6237 is64 = (insn & (1 << 25)) == 0;
6238 if (!is64 && ((insn & (1 << 4)) == 0)) {
6239 /* cdp */
6240 return 1;
6241 }
6242
6243 crm = insn & 0xf;
6244 if (is64) {
6245 crn = 0;
6246 opc1 = (insn >> 4) & 0xf;
6247 opc2 = 0;
6248 rt2 = (insn >> 16) & 0xf;
6249 } else {
6250 crn = (insn >> 16) & 0xf;
6251 opc1 = (insn >> 21) & 7;
6252 opc2 = (insn >> 5) & 7;
6253 rt2 = 0;
6254 }
6255 isread = (insn >> 20) & 1;
6256 rt = (insn >> 12) & 0xf;
6257
6258 ri = get_arm_cp_reginfo(cpu,
6259 ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
6260 if (ri) {
6261 /* Check access permissions */
6262 if (!cp_access_ok(env, ri, isread)) {
6263 return 1;
6264 }
6265
6266 /* Handle special cases first */
6267 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
6268 case ARM_CP_NOP:
6269 return 0;
6270 case ARM_CP_WFI:
6271 if (isread) {
6272 return 1;
6273 }
6274 gen_set_pc_im(s->pc);
6275 s->is_jmp = DISAS_WFI;
2bee5105 6276 return 0;
4b6a83fb
PM
6277 default:
6278 break;
6279 }
6280
6281 if (isread) {
6282 /* Read */
6283 if (is64) {
6284 TCGv_i64 tmp64;
6285 TCGv_i32 tmp;
6286 if (ri->type & ARM_CP_CONST) {
6287 tmp64 = tcg_const_i64(ri->resetvalue);
6288 } else if (ri->readfn) {
6289 TCGv_ptr tmpptr;
6290 gen_set_pc_im(s->pc);
6291 tmp64 = tcg_temp_new_i64();
6292 tmpptr = tcg_const_ptr(ri);
6293 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
6294 tcg_temp_free_ptr(tmpptr);
6295 } else {
6296 tmp64 = tcg_temp_new_i64();
6297 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
6298 }
6299 tmp = tcg_temp_new_i32();
6300 tcg_gen_trunc_i64_i32(tmp, tmp64);
6301 store_reg(s, rt, tmp);
6302 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 6303 tmp = tcg_temp_new_i32();
4b6a83fb 6304 tcg_gen_trunc_i64_i32(tmp, tmp64);
ed336850 6305 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
6306 store_reg(s, rt2, tmp);
6307 } else {
39d5492a 6308 TCGv_i32 tmp;
4b6a83fb
PM
6309 if (ri->type & ARM_CP_CONST) {
6310 tmp = tcg_const_i32(ri->resetvalue);
6311 } else if (ri->readfn) {
6312 TCGv_ptr tmpptr;
6313 gen_set_pc_im(s->pc);
6314 tmp = tcg_temp_new_i32();
6315 tmpptr = tcg_const_ptr(ri);
6316 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
6317 tcg_temp_free_ptr(tmpptr);
6318 } else {
6319 tmp = load_cpu_offset(ri->fieldoffset);
6320 }
6321 if (rt == 15) {
6322 /* Destination register of r15 for 32 bit loads sets
6323 * the condition codes from the high 4 bits of the value
6324 */
6325 gen_set_nzcv(tmp);
6326 tcg_temp_free_i32(tmp);
6327 } else {
6328 store_reg(s, rt, tmp);
6329 }
6330 }
6331 } else {
6332 /* Write */
6333 if (ri->type & ARM_CP_CONST) {
6334 /* If not forbidden by access permissions, treat as WI */
6335 return 0;
6336 }
6337
6338 if (is64) {
39d5492a 6339 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
6340 TCGv_i64 tmp64 = tcg_temp_new_i64();
6341 tmplo = load_reg(s, rt);
6342 tmphi = load_reg(s, rt2);
6343 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
6344 tcg_temp_free_i32(tmplo);
6345 tcg_temp_free_i32(tmphi);
6346 if (ri->writefn) {
6347 TCGv_ptr tmpptr = tcg_const_ptr(ri);
6348 gen_set_pc_im(s->pc);
6349 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
6350 tcg_temp_free_ptr(tmpptr);
6351 } else {
6352 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
6353 }
6354 tcg_temp_free_i64(tmp64);
6355 } else {
6356 if (ri->writefn) {
39d5492a 6357 TCGv_i32 tmp;
4b6a83fb
PM
6358 TCGv_ptr tmpptr;
6359 gen_set_pc_im(s->pc);
6360 tmp = load_reg(s, rt);
6361 tmpptr = tcg_const_ptr(ri);
6362 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
6363 tcg_temp_free_ptr(tmpptr);
6364 tcg_temp_free_i32(tmp);
6365 } else {
39d5492a 6366 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
6367 store_cpu_offset(tmp, ri->fieldoffset);
6368 }
6369 }
6370 /* We default to ending the TB on a coprocessor register write,
6371 * but allow this to be suppressed by the register definition
6372 * (usually only necessary to work around guest bugs).
6373 */
6374 if (!(ri->type & ARM_CP_SUPPRESS_TB_END)) {
6375 gen_lookup_tb(s);
6376 }
6377 }
6378 return 0;
6379 }
6380
4a9a539f 6381 return 1;
9ee6e8bb
PB
6382}
6383
5e3f878a
PB
6384
6385/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 6386static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 6387{
39d5492a 6388 TCGv_i32 tmp;
7d1b0095 6389 tmp = tcg_temp_new_i32();
5e3f878a
PB
6390 tcg_gen_trunc_i64_i32(tmp, val);
6391 store_reg(s, rlow, tmp);
7d1b0095 6392 tmp = tcg_temp_new_i32();
5e3f878a
PB
6393 tcg_gen_shri_i64(val, val, 32);
6394 tcg_gen_trunc_i64_i32(tmp, val);
6395 store_reg(s, rhigh, tmp);
6396}
6397
6398/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 6399static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 6400{
a7812ae4 6401 TCGv_i64 tmp;
39d5492a 6402 TCGv_i32 tmp2;
5e3f878a 6403
36aa55dc 6404 /* Load value and extend to 64 bits. */
a7812ae4 6405 tmp = tcg_temp_new_i64();
5e3f878a
PB
6406 tmp2 = load_reg(s, rlow);
6407 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 6408 tcg_temp_free_i32(tmp2);
5e3f878a 6409 tcg_gen_add_i64(val, val, tmp);
b75263d6 6410 tcg_temp_free_i64(tmp);
5e3f878a
PB
6411}
6412
6413/* load and add a 64-bit value from a register pair. */
a7812ae4 6414static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 6415{
a7812ae4 6416 TCGv_i64 tmp;
39d5492a
PM
6417 TCGv_i32 tmpl;
6418 TCGv_i32 tmph;
5e3f878a
PB
6419
6420 /* Load 64-bit value rd:rn. */
36aa55dc
PB
6421 tmpl = load_reg(s, rlow);
6422 tmph = load_reg(s, rhigh);
a7812ae4 6423 tmp = tcg_temp_new_i64();
36aa55dc 6424 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
6425 tcg_temp_free_i32(tmpl);
6426 tcg_temp_free_i32(tmph);
5e3f878a 6427 tcg_gen_add_i64(val, val, tmp);
b75263d6 6428 tcg_temp_free_i64(tmp);
5e3f878a
PB
6429}
6430
c9f10124 6431/* Set N and Z flags from hi|lo. */
39d5492a 6432static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 6433{
c9f10124
RH
6434 tcg_gen_mov_i32(cpu_NF, hi);
6435 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
6436}
6437
426f5abc
PB
6438/* Load/Store exclusive instructions are implemented by remembering
6439 the value/address loaded, and seeing if these are the same
b90372ad 6440 when the store is performed. This should be sufficient to implement
426f5abc
PB
6441 the architecturally mandated semantics, and avoids having to monitor
6442 regular stores.
6443
6444 In system emulation mode only one CPU will be running at once, so
6445 this sequence is effectively atomic. In user emulation mode we
6446 throw an exception and handle the atomic operation elsewhere. */
6447static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 6448 TCGv_i32 addr, int size)
426f5abc 6449{
94ee24e7 6450 TCGv_i32 tmp = tcg_temp_new_i32();
426f5abc
PB
6451
6452 switch (size) {
6453 case 0:
94ee24e7 6454 tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
426f5abc
PB
6455 break;
6456 case 1:
94ee24e7 6457 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
426f5abc
PB
6458 break;
6459 case 2:
6460 case 3:
94ee24e7 6461 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
426f5abc
PB
6462 break;
6463 default:
6464 abort();
6465 }
6466 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6467 store_reg(s, rt, tmp);
6468 if (size == 3) {
39d5492a 6469 TCGv_i32 tmp2 = tcg_temp_new_i32();
2c9adbda 6470 tcg_gen_addi_i32(tmp2, addr, 4);
94ee24e7
PM
6471 tmp = tcg_temp_new_i32();
6472 tcg_gen_qemu_ld32u(tmp, tmp2, IS_USER(s));
7d1b0095 6473 tcg_temp_free_i32(tmp2);
426f5abc
PB
6474 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6475 store_reg(s, rt2, tmp);
6476 }
6477 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6478}
6479
6480static void gen_clrex(DisasContext *s)
6481{
6482 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6483}
6484
6485#ifdef CONFIG_USER_ONLY
6486static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 6487 TCGv_i32 addr, int size)
426f5abc
PB
6488{
6489 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6490 tcg_gen_movi_i32(cpu_exclusive_info,
6491 size | (rd << 4) | (rt << 8) | (rt2 << 12));
bc4a0de0 6492 gen_exception_insn(s, 4, EXCP_STREX);
426f5abc
PB
6493}
6494#else
6495static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 6496 TCGv_i32 addr, int size)
426f5abc 6497{
39d5492a 6498 TCGv_i32 tmp;
426f5abc
PB
6499 int done_label;
6500 int fail_label;
6501
6502 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6503 [addr] = {Rt};
6504 {Rd} = 0;
6505 } else {
6506 {Rd} = 1;
6507 } */
6508 fail_label = gen_new_label();
6509 done_label = gen_new_label();
6510 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
94ee24e7 6511 tmp = tcg_temp_new_i32();
426f5abc
PB
6512 switch (size) {
6513 case 0:
94ee24e7 6514 tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
426f5abc
PB
6515 break;
6516 case 1:
94ee24e7 6517 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
426f5abc
PB
6518 break;
6519 case 2:
6520 case 3:
94ee24e7 6521 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
426f5abc
PB
6522 break;
6523 default:
6524 abort();
6525 }
6526 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
7d1b0095 6527 tcg_temp_free_i32(tmp);
426f5abc 6528 if (size == 3) {
39d5492a 6529 TCGv_i32 tmp2 = tcg_temp_new_i32();
426f5abc 6530 tcg_gen_addi_i32(tmp2, addr, 4);
94ee24e7
PM
6531 tmp = tcg_temp_new_i32();
6532 tcg_gen_qemu_ld32u(tmp, tmp2, IS_USER(s));
7d1b0095 6533 tcg_temp_free_i32(tmp2);
426f5abc 6534 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
7d1b0095 6535 tcg_temp_free_i32(tmp);
426f5abc
PB
6536 }
6537 tmp = load_reg(s, rt);
6538 switch (size) {
6539 case 0:
94ee24e7 6540 tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
426f5abc
PB
6541 break;
6542 case 1:
94ee24e7 6543 tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
426f5abc
PB
6544 break;
6545 case 2:
6546 case 3:
94ee24e7 6547 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
426f5abc
PB
6548 break;
6549 default:
6550 abort();
6551 }
94ee24e7 6552 tcg_temp_free_i32(tmp);
426f5abc
PB
6553 if (size == 3) {
6554 tcg_gen_addi_i32(addr, addr, 4);
6555 tmp = load_reg(s, rt2);
94ee24e7
PM
6556 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
6557 tcg_temp_free_i32(tmp);
426f5abc
PB
6558 }
6559 tcg_gen_movi_i32(cpu_R[rd], 0);
6560 tcg_gen_br(done_label);
6561 gen_set_label(fail_label);
6562 tcg_gen_movi_i32(cpu_R[rd], 1);
6563 gen_set_label(done_label);
6564 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6565}
6566#endif
6567
81465888
PM
6568/* gen_srs:
6569 * @env: CPUARMState
6570 * @s: DisasContext
6571 * @mode: mode field from insn (which stack to store to)
6572 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
6573 * @writeback: true if writeback bit set
6574 *
6575 * Generate code for the SRS (Store Return State) insn.
6576 */
6577static void gen_srs(DisasContext *s,
6578 uint32_t mode, uint32_t amode, bool writeback)
6579{
6580 int32_t offset;
6581 TCGv_i32 addr = tcg_temp_new_i32();
6582 TCGv_i32 tmp = tcg_const_i32(mode);
6583 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6584 tcg_temp_free_i32(tmp);
6585 switch (amode) {
6586 case 0: /* DA */
6587 offset = -4;
6588 break;
6589 case 1: /* IA */
6590 offset = 0;
6591 break;
6592 case 2: /* DB */
6593 offset = -8;
6594 break;
6595 case 3: /* IB */
6596 offset = 4;
6597 break;
6598 default:
6599 abort();
6600 }
6601 tcg_gen_addi_i32(addr, addr, offset);
6602 tmp = load_reg(s, 14);
5a839c0d
PM
6603 tcg_gen_qemu_st32(tmp, addr, 0);
6604 tcg_temp_free_i32(tmp);
81465888
PM
6605 tmp = load_cpu_field(spsr);
6606 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d
PM
6607 tcg_gen_qemu_st32(tmp, addr, 0);
6608 tcg_temp_free_i32(tmp);
81465888
PM
6609 if (writeback) {
6610 switch (amode) {
6611 case 0:
6612 offset = -8;
6613 break;
6614 case 1:
6615 offset = 4;
6616 break;
6617 case 2:
6618 offset = -4;
6619 break;
6620 case 3:
6621 offset = 0;
6622 break;
6623 default:
6624 abort();
6625 }
6626 tcg_gen_addi_i32(addr, addr, offset);
6627 tmp = tcg_const_i32(mode);
6628 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6629 tcg_temp_free_i32(tmp);
6630 }
6631 tcg_temp_free_i32(addr);
6632}
6633
0ecb72a5 6634static void disas_arm_insn(CPUARMState * env, DisasContext *s)
9ee6e8bb
PB
6635{
6636 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
6637 TCGv_i32 tmp;
6638 TCGv_i32 tmp2;
6639 TCGv_i32 tmp3;
6640 TCGv_i32 addr;
a7812ae4 6641 TCGv_i64 tmp64;
9ee6e8bb 6642
d31dd73e 6643 insn = arm_ldl_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
6644 s->pc += 4;
6645
6646 /* M variants do not implement ARM mode. */
6647 if (IS_M(env))
6648 goto illegal_op;
6649 cond = insn >> 28;
6650 if (cond == 0xf){
be5e7a76
DES
6651 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6652 * choose to UNDEF. In ARMv5 and above the space is used
6653 * for miscellaneous unconditional instructions.
6654 */
6655 ARCH(5);
6656
9ee6e8bb
PB
6657 /* Unconditional instructions. */
6658 if (((insn >> 25) & 7) == 1) {
6659 /* NEON Data processing. */
6660 if (!arm_feature(env, ARM_FEATURE_NEON))
6661 goto illegal_op;
6662
6663 if (disas_neon_data_insn(env, s, insn))
6664 goto illegal_op;
6665 return;
6666 }
6667 if ((insn & 0x0f100000) == 0x04000000) {
6668 /* NEON load/store. */
6669 if (!arm_feature(env, ARM_FEATURE_NEON))
6670 goto illegal_op;
6671
6672 if (disas_neon_ls_insn(env, s, insn))
6673 goto illegal_op;
6674 return;
6675 }
3d185e5d
PM
6676 if (((insn & 0x0f30f000) == 0x0510f000) ||
6677 ((insn & 0x0f30f010) == 0x0710f000)) {
6678 if ((insn & (1 << 22)) == 0) {
6679 /* PLDW; v7MP */
6680 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6681 goto illegal_op;
6682 }
6683 }
6684 /* Otherwise PLD; v5TE+ */
be5e7a76 6685 ARCH(5TE);
3d185e5d
PM
6686 return;
6687 }
6688 if (((insn & 0x0f70f000) == 0x0450f000) ||
6689 ((insn & 0x0f70f010) == 0x0650f000)) {
6690 ARCH(7);
6691 return; /* PLI; V7 */
6692 }
6693 if (((insn & 0x0f700000) == 0x04100000) ||
6694 ((insn & 0x0f700010) == 0x06100000)) {
6695 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6696 goto illegal_op;
6697 }
6698 return; /* v7MP: Unallocated memory hint: must NOP */
6699 }
6700
6701 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
6702 ARCH(6);
6703 /* setend */
10962fd5
PM
6704 if (((insn >> 9) & 1) != s->bswap_code) {
6705 /* Dynamic endianness switching not implemented. */
9ee6e8bb
PB
6706 goto illegal_op;
6707 }
6708 return;
6709 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6710 switch ((insn >> 4) & 0xf) {
6711 case 1: /* clrex */
6712 ARCH(6K);
426f5abc 6713 gen_clrex(s);
9ee6e8bb
PB
6714 return;
6715 case 4: /* dsb */
6716 case 5: /* dmb */
6717 case 6: /* isb */
6718 ARCH(7);
6719 /* We don't emulate caches so these are a no-op. */
6720 return;
6721 default:
6722 goto illegal_op;
6723 }
6724 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6725 /* srs */
81465888 6726 if (IS_USER(s)) {
9ee6e8bb 6727 goto illegal_op;
9ee6e8bb 6728 }
81465888
PM
6729 ARCH(6);
6730 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 6731 return;
ea825eee 6732 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 6733 /* rfe */
c67b6b71 6734 int32_t offset;
9ee6e8bb
PB
6735 if (IS_USER(s))
6736 goto illegal_op;
6737 ARCH(6);
6738 rn = (insn >> 16) & 0xf;
b0109805 6739 addr = load_reg(s, rn);
9ee6e8bb
PB
6740 i = (insn >> 23) & 3;
6741 switch (i) {
b0109805 6742 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6743 case 1: offset = 0; break; /* IA */
6744 case 2: offset = -8; break; /* DB */
b0109805 6745 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
6746 default: abort();
6747 }
6748 if (offset)
b0109805
PB
6749 tcg_gen_addi_i32(addr, addr, offset);
6750 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d
PM
6751 tmp = tcg_temp_new_i32();
6752 tcg_gen_qemu_ld32u(tmp, addr, 0);
b0109805 6753 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 6754 tmp2 = tcg_temp_new_i32();
5866e078 6755 tcg_gen_qemu_ld32u(tmp2, addr, 0);
9ee6e8bb
PB
6756 if (insn & (1 << 21)) {
6757 /* Base writeback. */
6758 switch (i) {
b0109805 6759 case 0: offset = -8; break;
c67b6b71
FN
6760 case 1: offset = 4; break;
6761 case 2: offset = -4; break;
b0109805 6762 case 3: offset = 0; break;
9ee6e8bb
PB
6763 default: abort();
6764 }
6765 if (offset)
b0109805
PB
6766 tcg_gen_addi_i32(addr, addr, offset);
6767 store_reg(s, rn, addr);
6768 } else {
7d1b0095 6769 tcg_temp_free_i32(addr);
9ee6e8bb 6770 }
b0109805 6771 gen_rfe(s, tmp, tmp2);
c67b6b71 6772 return;
9ee6e8bb
PB
6773 } else if ((insn & 0x0e000000) == 0x0a000000) {
6774 /* branch link and change to thumb (blx <offset>) */
6775 int32_t offset;
6776
6777 val = (uint32_t)s->pc;
7d1b0095 6778 tmp = tcg_temp_new_i32();
d9ba4830
PB
6779 tcg_gen_movi_i32(tmp, val);
6780 store_reg(s, 14, tmp);
9ee6e8bb
PB
6781 /* Sign-extend the 24-bit offset */
6782 offset = (((int32_t)insn) << 8) >> 8;
6783 /* offset * 4 + bit24 * 2 + (thumb bit) */
6784 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6785 /* pipeline offset */
6786 val += 4;
be5e7a76 6787 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 6788 gen_bx_im(s, val);
9ee6e8bb
PB
6789 return;
6790 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6791 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6792 /* iWMMXt register transfer. */
6793 if (env->cp15.c15_cpar & (1 << 1))
6794 if (!disas_iwmmxt_insn(env, s, insn))
6795 return;
6796 }
6797 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6798 /* Coprocessor double register transfer. */
be5e7a76 6799 ARCH(5TE);
9ee6e8bb
PB
6800 } else if ((insn & 0x0f000010) == 0x0e000010) {
6801 /* Additional coprocessor register transfer. */
7997d92f 6802 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
6803 uint32_t mask;
6804 uint32_t val;
6805 /* cps (privileged) */
6806 if (IS_USER(s))
6807 return;
6808 mask = val = 0;
6809 if (insn & (1 << 19)) {
6810 if (insn & (1 << 8))
6811 mask |= CPSR_A;
6812 if (insn & (1 << 7))
6813 mask |= CPSR_I;
6814 if (insn & (1 << 6))
6815 mask |= CPSR_F;
6816 if (insn & (1 << 18))
6817 val |= mask;
6818 }
7997d92f 6819 if (insn & (1 << 17)) {
9ee6e8bb
PB
6820 mask |= CPSR_M;
6821 val |= (insn & 0x1f);
6822 }
6823 if (mask) {
2fbac54b 6824 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
6825 }
6826 return;
6827 }
6828 goto illegal_op;
6829 }
6830 if (cond != 0xe) {
6831 /* if not always execute, we generate a conditional jump to
6832 next instruction */
6833 s->condlabel = gen_new_label();
d9ba4830 6834 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
6835 s->condjmp = 1;
6836 }
6837 if ((insn & 0x0f900000) == 0x03000000) {
6838 if ((insn & (1 << 21)) == 0) {
6839 ARCH(6T2);
6840 rd = (insn >> 12) & 0xf;
6841 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6842 if ((insn & (1 << 22)) == 0) {
6843 /* MOVW */
7d1b0095 6844 tmp = tcg_temp_new_i32();
5e3f878a 6845 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
6846 } else {
6847 /* MOVT */
5e3f878a 6848 tmp = load_reg(s, rd);
86831435 6849 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 6850 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 6851 }
5e3f878a 6852 store_reg(s, rd, tmp);
9ee6e8bb
PB
6853 } else {
6854 if (((insn >> 12) & 0xf) != 0xf)
6855 goto illegal_op;
6856 if (((insn >> 16) & 0xf) == 0) {
6857 gen_nop_hint(s, insn & 0xff);
6858 } else {
6859 /* CPSR = immediate */
6860 val = insn & 0xff;
6861 shift = ((insn >> 8) & 0xf) * 2;
6862 if (shift)
6863 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 6864 i = ((insn & (1 << 22)) != 0);
2fbac54b 6865 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
6866 goto illegal_op;
6867 }
6868 }
6869 } else if ((insn & 0x0f900000) == 0x01000000
6870 && (insn & 0x00000090) != 0x00000090) {
6871 /* miscellaneous instructions */
6872 op1 = (insn >> 21) & 3;
6873 sh = (insn >> 4) & 0xf;
6874 rm = insn & 0xf;
6875 switch (sh) {
6876 case 0x0: /* move program status register */
6877 if (op1 & 1) {
6878 /* PSR = reg */
2fbac54b 6879 tmp = load_reg(s, rm);
9ee6e8bb 6880 i = ((op1 & 2) != 0);
2fbac54b 6881 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
6882 goto illegal_op;
6883 } else {
6884 /* reg = PSR */
6885 rd = (insn >> 12) & 0xf;
6886 if (op1 & 2) {
6887 if (IS_USER(s))
6888 goto illegal_op;
d9ba4830 6889 tmp = load_cpu_field(spsr);
9ee6e8bb 6890 } else {
7d1b0095 6891 tmp = tcg_temp_new_i32();
9ef39277 6892 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 6893 }
d9ba4830 6894 store_reg(s, rd, tmp);
9ee6e8bb
PB
6895 }
6896 break;
6897 case 0x1:
6898 if (op1 == 1) {
6899 /* branch/exchange thumb (bx). */
be5e7a76 6900 ARCH(4T);
d9ba4830
PB
6901 tmp = load_reg(s, rm);
6902 gen_bx(s, tmp);
9ee6e8bb
PB
6903 } else if (op1 == 3) {
6904 /* clz */
be5e7a76 6905 ARCH(5);
9ee6e8bb 6906 rd = (insn >> 12) & 0xf;
1497c961
PB
6907 tmp = load_reg(s, rm);
6908 gen_helper_clz(tmp, tmp);
6909 store_reg(s, rd, tmp);
9ee6e8bb
PB
6910 } else {
6911 goto illegal_op;
6912 }
6913 break;
6914 case 0x2:
6915 if (op1 == 1) {
6916 ARCH(5J); /* bxj */
6917 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6918 tmp = load_reg(s, rm);
6919 gen_bx(s, tmp);
9ee6e8bb
PB
6920 } else {
6921 goto illegal_op;
6922 }
6923 break;
6924 case 0x3:
6925 if (op1 != 1)
6926 goto illegal_op;
6927
be5e7a76 6928 ARCH(5);
9ee6e8bb 6929 /* branch link/exchange thumb (blx) */
d9ba4830 6930 tmp = load_reg(s, rm);
7d1b0095 6931 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
6932 tcg_gen_movi_i32(tmp2, s->pc);
6933 store_reg(s, 14, tmp2);
6934 gen_bx(s, tmp);
9ee6e8bb
PB
6935 break;
6936 case 0x5: /* saturating add/subtract */
be5e7a76 6937 ARCH(5TE);
9ee6e8bb
PB
6938 rd = (insn >> 12) & 0xf;
6939 rn = (insn >> 16) & 0xf;
b40d0353 6940 tmp = load_reg(s, rm);
5e3f878a 6941 tmp2 = load_reg(s, rn);
9ee6e8bb 6942 if (op1 & 2)
9ef39277 6943 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 6944 if (op1 & 1)
9ef39277 6945 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6946 else
9ef39277 6947 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 6948 tcg_temp_free_i32(tmp2);
5e3f878a 6949 store_reg(s, rd, tmp);
9ee6e8bb 6950 break;
49e14940
AL
6951 case 7:
6952 /* SMC instruction (op1 == 3)
6953 and undefined instructions (op1 == 0 || op1 == 2)
6954 will trap */
6955 if (op1 != 1) {
6956 goto illegal_op;
6957 }
6958 /* bkpt */
be5e7a76 6959 ARCH(5);
bc4a0de0 6960 gen_exception_insn(s, 4, EXCP_BKPT);
9ee6e8bb
PB
6961 break;
6962 case 0x8: /* signed multiply */
6963 case 0xa:
6964 case 0xc:
6965 case 0xe:
be5e7a76 6966 ARCH(5TE);
9ee6e8bb
PB
6967 rs = (insn >> 8) & 0xf;
6968 rn = (insn >> 12) & 0xf;
6969 rd = (insn >> 16) & 0xf;
6970 if (op1 == 1) {
6971 /* (32 * 16) >> 16 */
5e3f878a
PB
6972 tmp = load_reg(s, rm);
6973 tmp2 = load_reg(s, rs);
9ee6e8bb 6974 if (sh & 4)
5e3f878a 6975 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 6976 else
5e3f878a 6977 gen_sxth(tmp2);
a7812ae4
PB
6978 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6979 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 6980 tmp = tcg_temp_new_i32();
a7812ae4 6981 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 6982 tcg_temp_free_i64(tmp64);
9ee6e8bb 6983 if ((sh & 2) == 0) {
5e3f878a 6984 tmp2 = load_reg(s, rn);
9ef39277 6985 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 6986 tcg_temp_free_i32(tmp2);
9ee6e8bb 6987 }
5e3f878a 6988 store_reg(s, rd, tmp);
9ee6e8bb
PB
6989 } else {
6990 /* 16 * 16 */
5e3f878a
PB
6991 tmp = load_reg(s, rm);
6992 tmp2 = load_reg(s, rs);
6993 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 6994 tcg_temp_free_i32(tmp2);
9ee6e8bb 6995 if (op1 == 2) {
a7812ae4
PB
6996 tmp64 = tcg_temp_new_i64();
6997 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 6998 tcg_temp_free_i32(tmp);
a7812ae4
PB
6999 gen_addq(s, tmp64, rn, rd);
7000 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 7001 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7002 } else {
7003 if (op1 == 0) {
5e3f878a 7004 tmp2 = load_reg(s, rn);
9ef39277 7005 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7006 tcg_temp_free_i32(tmp2);
9ee6e8bb 7007 }
5e3f878a 7008 store_reg(s, rd, tmp);
9ee6e8bb
PB
7009 }
7010 }
7011 break;
7012 default:
7013 goto illegal_op;
7014 }
7015 } else if (((insn & 0x0e000000) == 0 &&
7016 (insn & 0x00000090) != 0x90) ||
7017 ((insn & 0x0e000000) == (1 << 25))) {
7018 int set_cc, logic_cc, shiftop;
7019
7020 op1 = (insn >> 21) & 0xf;
7021 set_cc = (insn >> 20) & 1;
7022 logic_cc = table_logic_cc[op1] & set_cc;
7023
7024 /* data processing instruction */
7025 if (insn & (1 << 25)) {
7026 /* immediate operand */
7027 val = insn & 0xff;
7028 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 7029 if (shift) {
9ee6e8bb 7030 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 7031 }
7d1b0095 7032 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
7033 tcg_gen_movi_i32(tmp2, val);
7034 if (logic_cc && shift) {
7035 gen_set_CF_bit31(tmp2);
7036 }
9ee6e8bb
PB
7037 } else {
7038 /* register */
7039 rm = (insn) & 0xf;
e9bb4aa9 7040 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7041 shiftop = (insn >> 5) & 3;
7042 if (!(insn & (1 << 4))) {
7043 shift = (insn >> 7) & 0x1f;
e9bb4aa9 7044 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
7045 } else {
7046 rs = (insn >> 8) & 0xf;
8984bd2e 7047 tmp = load_reg(s, rs);
e9bb4aa9 7048 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
7049 }
7050 }
7051 if (op1 != 0x0f && op1 != 0x0d) {
7052 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
7053 tmp = load_reg(s, rn);
7054 } else {
39d5492a 7055 TCGV_UNUSED_I32(tmp);
9ee6e8bb
PB
7056 }
7057 rd = (insn >> 12) & 0xf;
7058 switch(op1) {
7059 case 0x00:
e9bb4aa9
JR
7060 tcg_gen_and_i32(tmp, tmp, tmp2);
7061 if (logic_cc) {
7062 gen_logic_CC(tmp);
7063 }
21aeb343 7064 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7065 break;
7066 case 0x01:
e9bb4aa9
JR
7067 tcg_gen_xor_i32(tmp, tmp, tmp2);
7068 if (logic_cc) {
7069 gen_logic_CC(tmp);
7070 }
21aeb343 7071 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7072 break;
7073 case 0x02:
7074 if (set_cc && rd == 15) {
7075 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 7076 if (IS_USER(s)) {
9ee6e8bb 7077 goto illegal_op;
e9bb4aa9 7078 }
72485ec4 7079 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 7080 gen_exception_return(s, tmp);
9ee6e8bb 7081 } else {
e9bb4aa9 7082 if (set_cc) {
72485ec4 7083 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7084 } else {
7085 tcg_gen_sub_i32(tmp, tmp, tmp2);
7086 }
21aeb343 7087 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7088 }
7089 break;
7090 case 0x03:
e9bb4aa9 7091 if (set_cc) {
72485ec4 7092 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
7093 } else {
7094 tcg_gen_sub_i32(tmp, tmp2, tmp);
7095 }
21aeb343 7096 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7097 break;
7098 case 0x04:
e9bb4aa9 7099 if (set_cc) {
72485ec4 7100 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7101 } else {
7102 tcg_gen_add_i32(tmp, tmp, tmp2);
7103 }
21aeb343 7104 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7105 break;
7106 case 0x05:
e9bb4aa9 7107 if (set_cc) {
49b4c31e 7108 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7109 } else {
7110 gen_add_carry(tmp, tmp, tmp2);
7111 }
21aeb343 7112 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7113 break;
7114 case 0x06:
e9bb4aa9 7115 if (set_cc) {
2de68a49 7116 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7117 } else {
7118 gen_sub_carry(tmp, tmp, tmp2);
7119 }
21aeb343 7120 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7121 break;
7122 case 0x07:
e9bb4aa9 7123 if (set_cc) {
2de68a49 7124 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
7125 } else {
7126 gen_sub_carry(tmp, tmp2, tmp);
7127 }
21aeb343 7128 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7129 break;
7130 case 0x08:
7131 if (set_cc) {
e9bb4aa9
JR
7132 tcg_gen_and_i32(tmp, tmp, tmp2);
7133 gen_logic_CC(tmp);
9ee6e8bb 7134 }
7d1b0095 7135 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7136 break;
7137 case 0x09:
7138 if (set_cc) {
e9bb4aa9
JR
7139 tcg_gen_xor_i32(tmp, tmp, tmp2);
7140 gen_logic_CC(tmp);
9ee6e8bb 7141 }
7d1b0095 7142 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7143 break;
7144 case 0x0a:
7145 if (set_cc) {
72485ec4 7146 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 7147 }
7d1b0095 7148 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7149 break;
7150 case 0x0b:
7151 if (set_cc) {
72485ec4 7152 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 7153 }
7d1b0095 7154 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7155 break;
7156 case 0x0c:
e9bb4aa9
JR
7157 tcg_gen_or_i32(tmp, tmp, tmp2);
7158 if (logic_cc) {
7159 gen_logic_CC(tmp);
7160 }
21aeb343 7161 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7162 break;
7163 case 0x0d:
7164 if (logic_cc && rd == 15) {
7165 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 7166 if (IS_USER(s)) {
9ee6e8bb 7167 goto illegal_op;
e9bb4aa9
JR
7168 }
7169 gen_exception_return(s, tmp2);
9ee6e8bb 7170 } else {
e9bb4aa9
JR
7171 if (logic_cc) {
7172 gen_logic_CC(tmp2);
7173 }
21aeb343 7174 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7175 }
7176 break;
7177 case 0x0e:
f669df27 7178 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
7179 if (logic_cc) {
7180 gen_logic_CC(tmp);
7181 }
21aeb343 7182 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7183 break;
7184 default:
7185 case 0x0f:
e9bb4aa9
JR
7186 tcg_gen_not_i32(tmp2, tmp2);
7187 if (logic_cc) {
7188 gen_logic_CC(tmp2);
7189 }
21aeb343 7190 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7191 break;
7192 }
e9bb4aa9 7193 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 7194 tcg_temp_free_i32(tmp2);
e9bb4aa9 7195 }
9ee6e8bb
PB
7196 } else {
7197 /* other instructions */
7198 op1 = (insn >> 24) & 0xf;
7199 switch(op1) {
7200 case 0x0:
7201 case 0x1:
7202 /* multiplies, extra load/stores */
7203 sh = (insn >> 5) & 3;
7204 if (sh == 0) {
7205 if (op1 == 0x0) {
7206 rd = (insn >> 16) & 0xf;
7207 rn = (insn >> 12) & 0xf;
7208 rs = (insn >> 8) & 0xf;
7209 rm = (insn) & 0xf;
7210 op1 = (insn >> 20) & 0xf;
7211 switch (op1) {
7212 case 0: case 1: case 2: case 3: case 6:
7213 /* 32 bit mul */
5e3f878a
PB
7214 tmp = load_reg(s, rs);
7215 tmp2 = load_reg(s, rm);
7216 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 7217 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7218 if (insn & (1 << 22)) {
7219 /* Subtract (mls) */
7220 ARCH(6T2);
5e3f878a
PB
7221 tmp2 = load_reg(s, rn);
7222 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 7223 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7224 } else if (insn & (1 << 21)) {
7225 /* Add */
5e3f878a
PB
7226 tmp2 = load_reg(s, rn);
7227 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7228 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7229 }
7230 if (insn & (1 << 20))
5e3f878a
PB
7231 gen_logic_CC(tmp);
7232 store_reg(s, rd, tmp);
9ee6e8bb 7233 break;
8aac08b1
AJ
7234 case 4:
7235 /* 64 bit mul double accumulate (UMAAL) */
7236 ARCH(6);
7237 tmp = load_reg(s, rs);
7238 tmp2 = load_reg(s, rm);
7239 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7240 gen_addq_lo(s, tmp64, rn);
7241 gen_addq_lo(s, tmp64, rd);
7242 gen_storeq_reg(s, rn, rd, tmp64);
7243 tcg_temp_free_i64(tmp64);
7244 break;
7245 case 8: case 9: case 10: case 11:
7246 case 12: case 13: case 14: case 15:
7247 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
7248 tmp = load_reg(s, rs);
7249 tmp2 = load_reg(s, rm);
8aac08b1 7250 if (insn & (1 << 22)) {
c9f10124 7251 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 7252 } else {
c9f10124 7253 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
7254 }
7255 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
7256 TCGv_i32 al = load_reg(s, rn);
7257 TCGv_i32 ah = load_reg(s, rd);
c9f10124 7258 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
7259 tcg_temp_free_i32(al);
7260 tcg_temp_free_i32(ah);
9ee6e8bb 7261 }
8aac08b1 7262 if (insn & (1 << 20)) {
c9f10124 7263 gen_logicq_cc(tmp, tmp2);
8aac08b1 7264 }
c9f10124
RH
7265 store_reg(s, rn, tmp);
7266 store_reg(s, rd, tmp2);
9ee6e8bb 7267 break;
8aac08b1
AJ
7268 default:
7269 goto illegal_op;
9ee6e8bb
PB
7270 }
7271 } else {
7272 rn = (insn >> 16) & 0xf;
7273 rd = (insn >> 12) & 0xf;
7274 if (insn & (1 << 23)) {
7275 /* load/store exclusive */
86753403
PB
7276 op1 = (insn >> 21) & 0x3;
7277 if (op1)
a47f43d2 7278 ARCH(6K);
86753403
PB
7279 else
7280 ARCH(6);
3174f8e9 7281 addr = tcg_temp_local_new_i32();
98a46317 7282 load_reg_var(s, addr, rn);
9ee6e8bb 7283 if (insn & (1 << 20)) {
86753403
PB
7284 switch (op1) {
7285 case 0: /* ldrex */
426f5abc 7286 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
7287 break;
7288 case 1: /* ldrexd */
426f5abc 7289 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
7290 break;
7291 case 2: /* ldrexb */
426f5abc 7292 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
7293 break;
7294 case 3: /* ldrexh */
426f5abc 7295 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
7296 break;
7297 default:
7298 abort();
7299 }
9ee6e8bb
PB
7300 } else {
7301 rm = insn & 0xf;
86753403
PB
7302 switch (op1) {
7303 case 0: /* strex */
426f5abc 7304 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
7305 break;
7306 case 1: /* strexd */
502e64fe 7307 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
7308 break;
7309 case 2: /* strexb */
426f5abc 7310 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
7311 break;
7312 case 3: /* strexh */
426f5abc 7313 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
7314 break;
7315 default:
7316 abort();
7317 }
9ee6e8bb 7318 }
39d5492a 7319 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7320 } else {
7321 /* SWP instruction */
7322 rm = (insn) & 0xf;
7323
8984bd2e
PB
7324 /* ??? This is not really atomic. However we know
7325 we never have multiple CPUs running in parallel,
7326 so it is good enough. */
7327 addr = load_reg(s, rn);
7328 tmp = load_reg(s, rm);
5a839c0d 7329 tmp2 = tcg_temp_new_i32();
9ee6e8bb 7330 if (insn & (1 << 22)) {
5a839c0d
PM
7331 tcg_gen_qemu_ld8u(tmp2, addr, IS_USER(s));
7332 tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
9ee6e8bb 7333 } else {
5a839c0d
PM
7334 tcg_gen_qemu_ld32u(tmp2, addr, IS_USER(s));
7335 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7336 }
5a839c0d 7337 tcg_temp_free_i32(tmp);
7d1b0095 7338 tcg_temp_free_i32(addr);
8984bd2e 7339 store_reg(s, rd, tmp2);
9ee6e8bb
PB
7340 }
7341 }
7342 } else {
7343 int address_offset;
7344 int load;
7345 /* Misc load/store */
7346 rn = (insn >> 16) & 0xf;
7347 rd = (insn >> 12) & 0xf;
b0109805 7348 addr = load_reg(s, rn);
9ee6e8bb 7349 if (insn & (1 << 24))
b0109805 7350 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
7351 address_offset = 0;
7352 if (insn & (1 << 20)) {
7353 /* load */
5a839c0d 7354 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
7355 switch(sh) {
7356 case 1:
5a839c0d 7357 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7358 break;
7359 case 2:
5a839c0d 7360 tcg_gen_qemu_ld8s(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7361 break;
7362 default:
7363 case 3:
5a839c0d 7364 tcg_gen_qemu_ld16s(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7365 break;
7366 }
7367 load = 1;
7368 } else if (sh & 2) {
be5e7a76 7369 ARCH(5TE);
9ee6e8bb
PB
7370 /* doubleword */
7371 if (sh & 1) {
7372 /* store */
b0109805 7373 tmp = load_reg(s, rd);
5a839c0d
PM
7374 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
7375 tcg_temp_free_i32(tmp);
b0109805
PB
7376 tcg_gen_addi_i32(addr, addr, 4);
7377 tmp = load_reg(s, rd + 1);
5a839c0d
PM
7378 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
7379 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7380 load = 0;
7381 } else {
7382 /* load */
5a839c0d
PM
7383 tmp = tcg_temp_new_i32();
7384 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
b0109805
PB
7385 store_reg(s, rd, tmp);
7386 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d
PM
7387 tmp = tcg_temp_new_i32();
7388 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7389 rd++;
7390 load = 1;
7391 }
7392 address_offset = -4;
7393 } else {
7394 /* store */
b0109805 7395 tmp = load_reg(s, rd);
5a839c0d
PM
7396 tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
7397 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7398 load = 0;
7399 }
7400 /* Perform base writeback before the loaded value to
7401 ensure correct behavior with overlapping index registers.
7402 ldrd with base writeback is is undefined if the
7403 destination and index registers overlap. */
7404 if (!(insn & (1 << 24))) {
b0109805
PB
7405 gen_add_datah_offset(s, insn, address_offset, addr);
7406 store_reg(s, rn, addr);
9ee6e8bb
PB
7407 } else if (insn & (1 << 21)) {
7408 if (address_offset)
b0109805
PB
7409 tcg_gen_addi_i32(addr, addr, address_offset);
7410 store_reg(s, rn, addr);
7411 } else {
7d1b0095 7412 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7413 }
7414 if (load) {
7415 /* Complete the load. */
b0109805 7416 store_reg(s, rd, tmp);
9ee6e8bb
PB
7417 }
7418 }
7419 break;
7420 case 0x4:
7421 case 0x5:
7422 goto do_ldst;
7423 case 0x6:
7424 case 0x7:
7425 if (insn & (1 << 4)) {
7426 ARCH(6);
7427 /* Armv6 Media instructions. */
7428 rm = insn & 0xf;
7429 rn = (insn >> 16) & 0xf;
2c0262af 7430 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
7431 rs = (insn >> 8) & 0xf;
7432 switch ((insn >> 23) & 3) {
7433 case 0: /* Parallel add/subtract. */
7434 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
7435 tmp = load_reg(s, rn);
7436 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7437 sh = (insn >> 5) & 7;
7438 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7439 goto illegal_op;
6ddbc6e4 7440 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 7441 tcg_temp_free_i32(tmp2);
6ddbc6e4 7442 store_reg(s, rd, tmp);
9ee6e8bb
PB
7443 break;
7444 case 1:
7445 if ((insn & 0x00700020) == 0) {
6c95676b 7446 /* Halfword pack. */
3670669c
PB
7447 tmp = load_reg(s, rn);
7448 tmp2 = load_reg(s, rm);
9ee6e8bb 7449 shift = (insn >> 7) & 0x1f;
3670669c
PB
7450 if (insn & (1 << 6)) {
7451 /* pkhtb */
22478e79
AZ
7452 if (shift == 0)
7453 shift = 31;
7454 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 7455 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 7456 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
7457 } else {
7458 /* pkhbt */
22478e79
AZ
7459 if (shift)
7460 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 7461 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
7462 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7463 }
7464 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 7465 tcg_temp_free_i32(tmp2);
3670669c 7466 store_reg(s, rd, tmp);
9ee6e8bb
PB
7467 } else if ((insn & 0x00200020) == 0x00200000) {
7468 /* [us]sat */
6ddbc6e4 7469 tmp = load_reg(s, rm);
9ee6e8bb
PB
7470 shift = (insn >> 7) & 0x1f;
7471 if (insn & (1 << 6)) {
7472 if (shift == 0)
7473 shift = 31;
6ddbc6e4 7474 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7475 } else {
6ddbc6e4 7476 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
7477 }
7478 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7479 tmp2 = tcg_const_i32(sh);
7480 if (insn & (1 << 22))
9ef39277 7481 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 7482 else
9ef39277 7483 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 7484 tcg_temp_free_i32(tmp2);
6ddbc6e4 7485 store_reg(s, rd, tmp);
9ee6e8bb
PB
7486 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7487 /* [us]sat16 */
6ddbc6e4 7488 tmp = load_reg(s, rm);
9ee6e8bb 7489 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7490 tmp2 = tcg_const_i32(sh);
7491 if (insn & (1 << 22))
9ef39277 7492 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 7493 else
9ef39277 7494 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 7495 tcg_temp_free_i32(tmp2);
6ddbc6e4 7496 store_reg(s, rd, tmp);
9ee6e8bb
PB
7497 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7498 /* Select bytes. */
6ddbc6e4
PB
7499 tmp = load_reg(s, rn);
7500 tmp2 = load_reg(s, rm);
7d1b0095 7501 tmp3 = tcg_temp_new_i32();
0ecb72a5 7502 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 7503 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
7504 tcg_temp_free_i32(tmp3);
7505 tcg_temp_free_i32(tmp2);
6ddbc6e4 7506 store_reg(s, rd, tmp);
9ee6e8bb 7507 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 7508 tmp = load_reg(s, rm);
9ee6e8bb 7509 shift = (insn >> 10) & 3;
1301f322 7510 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
7511 rotate, a shift is sufficient. */
7512 if (shift != 0)
f669df27 7513 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7514 op1 = (insn >> 20) & 7;
7515 switch (op1) {
5e3f878a
PB
7516 case 0: gen_sxtb16(tmp); break;
7517 case 2: gen_sxtb(tmp); break;
7518 case 3: gen_sxth(tmp); break;
7519 case 4: gen_uxtb16(tmp); break;
7520 case 6: gen_uxtb(tmp); break;
7521 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
7522 default: goto illegal_op;
7523 }
7524 if (rn != 15) {
5e3f878a 7525 tmp2 = load_reg(s, rn);
9ee6e8bb 7526 if ((op1 & 3) == 0) {
5e3f878a 7527 gen_add16(tmp, tmp2);
9ee6e8bb 7528 } else {
5e3f878a 7529 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7530 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7531 }
7532 }
6c95676b 7533 store_reg(s, rd, tmp);
9ee6e8bb
PB
7534 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7535 /* rev */
b0109805 7536 tmp = load_reg(s, rm);
9ee6e8bb
PB
7537 if (insn & (1 << 22)) {
7538 if (insn & (1 << 7)) {
b0109805 7539 gen_revsh(tmp);
9ee6e8bb
PB
7540 } else {
7541 ARCH(6T2);
b0109805 7542 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7543 }
7544 } else {
7545 if (insn & (1 << 7))
b0109805 7546 gen_rev16(tmp);
9ee6e8bb 7547 else
66896cb8 7548 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 7549 }
b0109805 7550 store_reg(s, rd, tmp);
9ee6e8bb
PB
7551 } else {
7552 goto illegal_op;
7553 }
7554 break;
7555 case 2: /* Multiplies (Type 3). */
41e9564d
PM
7556 switch ((insn >> 20) & 0x7) {
7557 case 5:
7558 if (((insn >> 6) ^ (insn >> 7)) & 1) {
7559 /* op2 not 00x or 11x : UNDEF */
7560 goto illegal_op;
7561 }
838fa72d
AJ
7562 /* Signed multiply most significant [accumulate].
7563 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
7564 tmp = load_reg(s, rm);
7565 tmp2 = load_reg(s, rs);
a7812ae4 7566 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 7567
955a7dd5 7568 if (rd != 15) {
838fa72d 7569 tmp = load_reg(s, rd);
9ee6e8bb 7570 if (insn & (1 << 6)) {
838fa72d 7571 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 7572 } else {
838fa72d 7573 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
7574 }
7575 }
838fa72d
AJ
7576 if (insn & (1 << 5)) {
7577 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7578 }
7579 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 7580 tmp = tcg_temp_new_i32();
838fa72d
AJ
7581 tcg_gen_trunc_i64_i32(tmp, tmp64);
7582 tcg_temp_free_i64(tmp64);
955a7dd5 7583 store_reg(s, rn, tmp);
41e9564d
PM
7584 break;
7585 case 0:
7586 case 4:
7587 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
7588 if (insn & (1 << 7)) {
7589 goto illegal_op;
7590 }
7591 tmp = load_reg(s, rm);
7592 tmp2 = load_reg(s, rs);
9ee6e8bb 7593 if (insn & (1 << 5))
5e3f878a
PB
7594 gen_swap_half(tmp2);
7595 gen_smul_dual(tmp, tmp2);
5e3f878a 7596 if (insn & (1 << 6)) {
e1d177b9 7597 /* This subtraction cannot overflow. */
5e3f878a
PB
7598 tcg_gen_sub_i32(tmp, tmp, tmp2);
7599 } else {
e1d177b9
PM
7600 /* This addition cannot overflow 32 bits;
7601 * however it may overflow considered as a signed
7602 * operation, in which case we must set the Q flag.
7603 */
9ef39277 7604 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
5e3f878a 7605 }
7d1b0095 7606 tcg_temp_free_i32(tmp2);
9ee6e8bb 7607 if (insn & (1 << 22)) {
5e3f878a 7608 /* smlald, smlsld */
a7812ae4
PB
7609 tmp64 = tcg_temp_new_i64();
7610 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7611 tcg_temp_free_i32(tmp);
a7812ae4
PB
7612 gen_addq(s, tmp64, rd, rn);
7613 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 7614 tcg_temp_free_i64(tmp64);
9ee6e8bb 7615 } else {
5e3f878a 7616 /* smuad, smusd, smlad, smlsd */
22478e79 7617 if (rd != 15)
9ee6e8bb 7618 {
22478e79 7619 tmp2 = load_reg(s, rd);
9ef39277 7620 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7621 tcg_temp_free_i32(tmp2);
9ee6e8bb 7622 }
22478e79 7623 store_reg(s, rn, tmp);
9ee6e8bb 7624 }
41e9564d 7625 break;
b8b8ea05
PM
7626 case 1:
7627 case 3:
7628 /* SDIV, UDIV */
7629 if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) {
7630 goto illegal_op;
7631 }
7632 if (((insn >> 5) & 7) || (rd != 15)) {
7633 goto illegal_op;
7634 }
7635 tmp = load_reg(s, rm);
7636 tmp2 = load_reg(s, rs);
7637 if (insn & (1 << 21)) {
7638 gen_helper_udiv(tmp, tmp, tmp2);
7639 } else {
7640 gen_helper_sdiv(tmp, tmp, tmp2);
7641 }
7642 tcg_temp_free_i32(tmp2);
7643 store_reg(s, rn, tmp);
7644 break;
41e9564d
PM
7645 default:
7646 goto illegal_op;
9ee6e8bb
PB
7647 }
7648 break;
7649 case 3:
7650 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7651 switch (op1) {
7652 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
7653 ARCH(6);
7654 tmp = load_reg(s, rm);
7655 tmp2 = load_reg(s, rs);
7656 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 7657 tcg_temp_free_i32(tmp2);
ded9d295
AZ
7658 if (rd != 15) {
7659 tmp2 = load_reg(s, rd);
6ddbc6e4 7660 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7661 tcg_temp_free_i32(tmp2);
9ee6e8bb 7662 }
ded9d295 7663 store_reg(s, rn, tmp);
9ee6e8bb
PB
7664 break;
7665 case 0x20: case 0x24: case 0x28: case 0x2c:
7666 /* Bitfield insert/clear. */
7667 ARCH(6T2);
7668 shift = (insn >> 7) & 0x1f;
7669 i = (insn >> 16) & 0x1f;
7670 i = i + 1 - shift;
7671 if (rm == 15) {
7d1b0095 7672 tmp = tcg_temp_new_i32();
5e3f878a 7673 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7674 } else {
5e3f878a 7675 tmp = load_reg(s, rm);
9ee6e8bb
PB
7676 }
7677 if (i != 32) {
5e3f878a 7678 tmp2 = load_reg(s, rd);
d593c48e 7679 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 7680 tcg_temp_free_i32(tmp2);
9ee6e8bb 7681 }
5e3f878a 7682 store_reg(s, rd, tmp);
9ee6e8bb
PB
7683 break;
7684 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7685 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 7686 ARCH(6T2);
5e3f878a 7687 tmp = load_reg(s, rm);
9ee6e8bb
PB
7688 shift = (insn >> 7) & 0x1f;
7689 i = ((insn >> 16) & 0x1f) + 1;
7690 if (shift + i > 32)
7691 goto illegal_op;
7692 if (i < 32) {
7693 if (op1 & 0x20) {
5e3f878a 7694 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 7695 } else {
5e3f878a 7696 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
7697 }
7698 }
5e3f878a 7699 store_reg(s, rd, tmp);
9ee6e8bb
PB
7700 break;
7701 default:
7702 goto illegal_op;
7703 }
7704 break;
7705 }
7706 break;
7707 }
7708 do_ldst:
7709 /* Check for undefined extension instructions
7710 * per the ARM Bible IE:
7711 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7712 */
7713 sh = (0xf << 20) | (0xf << 4);
7714 if (op1 == 0x7 && ((insn & sh) == sh))
7715 {
7716 goto illegal_op;
7717 }
7718 /* load/store byte/word */
7719 rn = (insn >> 16) & 0xf;
7720 rd = (insn >> 12) & 0xf;
b0109805 7721 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
7722 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7723 if (insn & (1 << 24))
b0109805 7724 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
7725 if (insn & (1 << 20)) {
7726 /* load */
5a839c0d 7727 tmp = tcg_temp_new_i32();
9ee6e8bb 7728 if (insn & (1 << 22)) {
5a839c0d 7729 tcg_gen_qemu_ld8u(tmp, tmp2, i);
9ee6e8bb 7730 } else {
5a839c0d 7731 tcg_gen_qemu_ld32u(tmp, tmp2, i);
9ee6e8bb 7732 }
9ee6e8bb
PB
7733 } else {
7734 /* store */
b0109805 7735 tmp = load_reg(s, rd);
5a839c0d
PM
7736 if (insn & (1 << 22)) {
7737 tcg_gen_qemu_st8(tmp, tmp2, i);
7738 } else {
7739 tcg_gen_qemu_st32(tmp, tmp2, i);
7740 }
7741 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7742 }
7743 if (!(insn & (1 << 24))) {
b0109805
PB
7744 gen_add_data_offset(s, insn, tmp2);
7745 store_reg(s, rn, tmp2);
7746 } else if (insn & (1 << 21)) {
7747 store_reg(s, rn, tmp2);
7748 } else {
7d1b0095 7749 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7750 }
7751 if (insn & (1 << 20)) {
7752 /* Complete the load. */
be5e7a76 7753 store_reg_from_load(env, s, rd, tmp);
9ee6e8bb
PB
7754 }
7755 break;
7756 case 0x08:
7757 case 0x09:
7758 {
7759 int j, n, user, loaded_base;
39d5492a 7760 TCGv_i32 loaded_var;
9ee6e8bb
PB
7761 /* load/store multiple words */
7762 /* XXX: store correct base if write back */
7763 user = 0;
7764 if (insn & (1 << 22)) {
7765 if (IS_USER(s))
7766 goto illegal_op; /* only usable in supervisor mode */
7767
7768 if ((insn & (1 << 15)) == 0)
7769 user = 1;
7770 }
7771 rn = (insn >> 16) & 0xf;
b0109805 7772 addr = load_reg(s, rn);
9ee6e8bb
PB
7773
7774 /* compute total size */
7775 loaded_base = 0;
39d5492a 7776 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
7777 n = 0;
7778 for(i=0;i<16;i++) {
7779 if (insn & (1 << i))
7780 n++;
7781 }
7782 /* XXX: test invalid n == 0 case ? */
7783 if (insn & (1 << 23)) {
7784 if (insn & (1 << 24)) {
7785 /* pre increment */
b0109805 7786 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7787 } else {
7788 /* post increment */
7789 }
7790 } else {
7791 if (insn & (1 << 24)) {
7792 /* pre decrement */
b0109805 7793 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7794 } else {
7795 /* post decrement */
7796 if (n != 1)
b0109805 7797 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7798 }
7799 }
7800 j = 0;
7801 for(i=0;i<16;i++) {
7802 if (insn & (1 << i)) {
7803 if (insn & (1 << 20)) {
7804 /* load */
5a839c0d
PM
7805 tmp = tcg_temp_new_i32();
7806 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
be5e7a76 7807 if (user) {
b75263d6 7808 tmp2 = tcg_const_i32(i);
1ce94f81 7809 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 7810 tcg_temp_free_i32(tmp2);
7d1b0095 7811 tcg_temp_free_i32(tmp);
9ee6e8bb 7812 } else if (i == rn) {
b0109805 7813 loaded_var = tmp;
9ee6e8bb
PB
7814 loaded_base = 1;
7815 } else {
be5e7a76 7816 store_reg_from_load(env, s, i, tmp);
9ee6e8bb
PB
7817 }
7818 } else {
7819 /* store */
7820 if (i == 15) {
7821 /* special case: r15 = PC + 8 */
7822 val = (long)s->pc + 4;
7d1b0095 7823 tmp = tcg_temp_new_i32();
b0109805 7824 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 7825 } else if (user) {
7d1b0095 7826 tmp = tcg_temp_new_i32();
b75263d6 7827 tmp2 = tcg_const_i32(i);
9ef39277 7828 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 7829 tcg_temp_free_i32(tmp2);
9ee6e8bb 7830 } else {
b0109805 7831 tmp = load_reg(s, i);
9ee6e8bb 7832 }
5a839c0d
PM
7833 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
7834 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7835 }
7836 j++;
7837 /* no need to add after the last transfer */
7838 if (j != n)
b0109805 7839 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7840 }
7841 }
7842 if (insn & (1 << 21)) {
7843 /* write back */
7844 if (insn & (1 << 23)) {
7845 if (insn & (1 << 24)) {
7846 /* pre increment */
7847 } else {
7848 /* post increment */
b0109805 7849 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7850 }
7851 } else {
7852 if (insn & (1 << 24)) {
7853 /* pre decrement */
7854 if (n != 1)
b0109805 7855 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7856 } else {
7857 /* post decrement */
b0109805 7858 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7859 }
7860 }
b0109805
PB
7861 store_reg(s, rn, addr);
7862 } else {
7d1b0095 7863 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7864 }
7865 if (loaded_base) {
b0109805 7866 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
7867 }
7868 if ((insn & (1 << 22)) && !user) {
7869 /* Restore CPSR from SPSR. */
d9ba4830
PB
7870 tmp = load_cpu_field(spsr);
7871 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 7872 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7873 s->is_jmp = DISAS_UPDATE;
7874 }
7875 }
7876 break;
7877 case 0xa:
7878 case 0xb:
7879 {
7880 int32_t offset;
7881
7882 /* branch (and link) */
7883 val = (int32_t)s->pc;
7884 if (insn & (1 << 24)) {
7d1b0095 7885 tmp = tcg_temp_new_i32();
5e3f878a
PB
7886 tcg_gen_movi_i32(tmp, val);
7887 store_reg(s, 14, tmp);
9ee6e8bb
PB
7888 }
7889 offset = (((int32_t)insn << 8) >> 8);
7890 val += (offset << 2) + 4;
7891 gen_jmp(s, val);
7892 }
7893 break;
7894 case 0xc:
7895 case 0xd:
7896 case 0xe:
7897 /* Coprocessor. */
7898 if (disas_coproc_insn(env, s, insn))
7899 goto illegal_op;
7900 break;
7901 case 0xf:
7902 /* swi */
5e3f878a 7903 gen_set_pc_im(s->pc);
9ee6e8bb
PB
7904 s->is_jmp = DISAS_SWI;
7905 break;
7906 default:
7907 illegal_op:
bc4a0de0 7908 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
7909 break;
7910 }
7911 }
7912}
7913
7914/* Return true if this is a Thumb-2 logical op. */
7915static int
7916thumb2_logic_op(int op)
7917{
7918 return (op < 8);
7919}
7920
7921/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7922 then set condition code flags based on the result of the operation.
7923 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7924 to the high bit of T1.
7925 Returns zero if the opcode is valid. */
7926
7927static int
39d5492a
PM
7928gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
7929 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
7930{
7931 int logic_cc;
7932
7933 logic_cc = 0;
7934 switch (op) {
7935 case 0: /* and */
396e467c 7936 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
7937 logic_cc = conds;
7938 break;
7939 case 1: /* bic */
f669df27 7940 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
7941 logic_cc = conds;
7942 break;
7943 case 2: /* orr */
396e467c 7944 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7945 logic_cc = conds;
7946 break;
7947 case 3: /* orn */
29501f1b 7948 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
7949 logic_cc = conds;
7950 break;
7951 case 4: /* eor */
396e467c 7952 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
7953 logic_cc = conds;
7954 break;
7955 case 8: /* add */
7956 if (conds)
72485ec4 7957 gen_add_CC(t0, t0, t1);
9ee6e8bb 7958 else
396e467c 7959 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
7960 break;
7961 case 10: /* adc */
7962 if (conds)
49b4c31e 7963 gen_adc_CC(t0, t0, t1);
9ee6e8bb 7964 else
396e467c 7965 gen_adc(t0, t1);
9ee6e8bb
PB
7966 break;
7967 case 11: /* sbc */
2de68a49
RH
7968 if (conds) {
7969 gen_sbc_CC(t0, t0, t1);
7970 } else {
396e467c 7971 gen_sub_carry(t0, t0, t1);
2de68a49 7972 }
9ee6e8bb
PB
7973 break;
7974 case 13: /* sub */
7975 if (conds)
72485ec4 7976 gen_sub_CC(t0, t0, t1);
9ee6e8bb 7977 else
396e467c 7978 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
7979 break;
7980 case 14: /* rsb */
7981 if (conds)
72485ec4 7982 gen_sub_CC(t0, t1, t0);
9ee6e8bb 7983 else
396e467c 7984 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
7985 break;
7986 default: /* 5, 6, 7, 9, 12, 15. */
7987 return 1;
7988 }
7989 if (logic_cc) {
396e467c 7990 gen_logic_CC(t0);
9ee6e8bb 7991 if (shifter_out)
396e467c 7992 gen_set_CF_bit31(t1);
9ee6e8bb
PB
7993 }
7994 return 0;
7995}
7996
7997/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7998 is not legal. */
0ecb72a5 7999static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9ee6e8bb 8000{
b0109805 8001 uint32_t insn, imm, shift, offset;
9ee6e8bb 8002 uint32_t rd, rn, rm, rs;
39d5492a
PM
8003 TCGv_i32 tmp;
8004 TCGv_i32 tmp2;
8005 TCGv_i32 tmp3;
8006 TCGv_i32 addr;
a7812ae4 8007 TCGv_i64 tmp64;
9ee6e8bb
PB
8008 int op;
8009 int shiftop;
8010 int conds;
8011 int logic_cc;
8012
8013 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
8014 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 8015 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
8016 16-bit instructions to get correct prefetch abort behavior. */
8017 insn = insn_hw1;
8018 if ((insn & (1 << 12)) == 0) {
be5e7a76 8019 ARCH(5);
9ee6e8bb
PB
8020 /* Second half of blx. */
8021 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
8022 tmp = load_reg(s, 14);
8023 tcg_gen_addi_i32(tmp, tmp, offset);
8024 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 8025
7d1b0095 8026 tmp2 = tcg_temp_new_i32();
b0109805 8027 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
8028 store_reg(s, 14, tmp2);
8029 gen_bx(s, tmp);
9ee6e8bb
PB
8030 return 0;
8031 }
8032 if (insn & (1 << 11)) {
8033 /* Second half of bl. */
8034 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 8035 tmp = load_reg(s, 14);
6a0d8a1d 8036 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 8037
7d1b0095 8038 tmp2 = tcg_temp_new_i32();
b0109805 8039 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
8040 store_reg(s, 14, tmp2);
8041 gen_bx(s, tmp);
9ee6e8bb
PB
8042 return 0;
8043 }
8044 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
8045 /* Instruction spans a page boundary. Implement it as two
8046 16-bit instructions in case the second half causes an
8047 prefetch abort. */
8048 offset = ((int32_t)insn << 21) >> 9;
396e467c 8049 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
8050 return 0;
8051 }
8052 /* Fall through to 32-bit decode. */
8053 }
8054
d31dd73e 8055 insn = arm_lduw_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
8056 s->pc += 2;
8057 insn |= (uint32_t)insn_hw1 << 16;
8058
8059 if ((insn & 0xf800e800) != 0xf000e800) {
8060 ARCH(6T2);
8061 }
8062
8063 rn = (insn >> 16) & 0xf;
8064 rs = (insn >> 12) & 0xf;
8065 rd = (insn >> 8) & 0xf;
8066 rm = insn & 0xf;
8067 switch ((insn >> 25) & 0xf) {
8068 case 0: case 1: case 2: case 3:
8069 /* 16-bit instructions. Should never happen. */
8070 abort();
8071 case 4:
8072 if (insn & (1 << 22)) {
8073 /* Other load/store, table branch. */
8074 if (insn & 0x01200000) {
8075 /* Load/store doubleword. */
8076 if (rn == 15) {
7d1b0095 8077 addr = tcg_temp_new_i32();
b0109805 8078 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 8079 } else {
b0109805 8080 addr = load_reg(s, rn);
9ee6e8bb
PB
8081 }
8082 offset = (insn & 0xff) * 4;
8083 if ((insn & (1 << 23)) == 0)
8084 offset = -offset;
8085 if (insn & (1 << 24)) {
b0109805 8086 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
8087 offset = 0;
8088 }
8089 if (insn & (1 << 20)) {
8090 /* ldrd */
e2592fad
PM
8091 tmp = tcg_temp_new_i32();
8092 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
b0109805
PB
8093 store_reg(s, rs, tmp);
8094 tcg_gen_addi_i32(addr, addr, 4);
e2592fad
PM
8095 tmp = tcg_temp_new_i32();
8096 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
b0109805 8097 store_reg(s, rd, tmp);
9ee6e8bb
PB
8098 } else {
8099 /* strd */
b0109805 8100 tmp = load_reg(s, rs);
e2592fad
PM
8101 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
8102 tcg_temp_free_i32(tmp);
b0109805
PB
8103 tcg_gen_addi_i32(addr, addr, 4);
8104 tmp = load_reg(s, rd);
e2592fad
PM
8105 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
8106 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8107 }
8108 if (insn & (1 << 21)) {
8109 /* Base writeback. */
8110 if (rn == 15)
8111 goto illegal_op;
b0109805
PB
8112 tcg_gen_addi_i32(addr, addr, offset - 4);
8113 store_reg(s, rn, addr);
8114 } else {
7d1b0095 8115 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8116 }
8117 } else if ((insn & (1 << 23)) == 0) {
8118 /* Load/store exclusive word. */
39d5492a 8119 addr = tcg_temp_local_new_i32();
98a46317 8120 load_reg_var(s, addr, rn);
426f5abc 8121 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 8122 if (insn & (1 << 20)) {
426f5abc 8123 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 8124 } else {
426f5abc 8125 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 8126 }
39d5492a 8127 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8128 } else if ((insn & (1 << 6)) == 0) {
8129 /* Table Branch. */
8130 if (rn == 15) {
7d1b0095 8131 addr = tcg_temp_new_i32();
b0109805 8132 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 8133 } else {
b0109805 8134 addr = load_reg(s, rn);
9ee6e8bb 8135 }
b26eefb6 8136 tmp = load_reg(s, rm);
b0109805 8137 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
8138 if (insn & (1 << 4)) {
8139 /* tbh */
b0109805 8140 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8141 tcg_temp_free_i32(tmp);
e2592fad
PM
8142 tmp = tcg_temp_new_i32();
8143 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
9ee6e8bb 8144 } else { /* tbb */
7d1b0095 8145 tcg_temp_free_i32(tmp);
e2592fad
PM
8146 tmp = tcg_temp_new_i32();
8147 tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
9ee6e8bb 8148 }
7d1b0095 8149 tcg_temp_free_i32(addr);
b0109805
PB
8150 tcg_gen_shli_i32(tmp, tmp, 1);
8151 tcg_gen_addi_i32(tmp, tmp, s->pc);
8152 store_reg(s, 15, tmp);
9ee6e8bb
PB
8153 } else {
8154 /* Load/store exclusive byte/halfword/doubleword. */
426f5abc 8155 ARCH(7);
9ee6e8bb 8156 op = (insn >> 4) & 0x3;
426f5abc
PB
8157 if (op == 2) {
8158 goto illegal_op;
8159 }
39d5492a 8160 addr = tcg_temp_local_new_i32();
98a46317 8161 load_reg_var(s, addr, rn);
9ee6e8bb 8162 if (insn & (1 << 20)) {
426f5abc 8163 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 8164 } else {
426f5abc 8165 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 8166 }
39d5492a 8167 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8168 }
8169 } else {
8170 /* Load/store multiple, RFE, SRS. */
8171 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976
PM
8172 /* RFE, SRS: not available in user mode or on M profile */
8173 if (IS_USER(s) || IS_M(env)) {
9ee6e8bb 8174 goto illegal_op;
00115976 8175 }
9ee6e8bb
PB
8176 if (insn & (1 << 20)) {
8177 /* rfe */
b0109805
PB
8178 addr = load_reg(s, rn);
8179 if ((insn & (1 << 24)) == 0)
8180 tcg_gen_addi_i32(addr, addr, -8);
8181 /* Load PC into tmp and CPSR into tmp2. */
e2592fad
PM
8182 tmp = tcg_temp_new_i32();
8183 tcg_gen_qemu_ld32u(tmp, addr, 0);
b0109805 8184 tcg_gen_addi_i32(addr, addr, 4);
e2592fad
PM
8185 tmp2 = tcg_temp_new_i32();
8186 tcg_gen_qemu_ld32u(tmp2, addr, 0);
9ee6e8bb
PB
8187 if (insn & (1 << 21)) {
8188 /* Base writeback. */
b0109805
PB
8189 if (insn & (1 << 24)) {
8190 tcg_gen_addi_i32(addr, addr, 4);
8191 } else {
8192 tcg_gen_addi_i32(addr, addr, -4);
8193 }
8194 store_reg(s, rn, addr);
8195 } else {
7d1b0095 8196 tcg_temp_free_i32(addr);
9ee6e8bb 8197 }
b0109805 8198 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
8199 } else {
8200 /* srs */
81465888
PM
8201 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
8202 insn & (1 << 21));
9ee6e8bb
PB
8203 }
8204 } else {
5856d44e 8205 int i, loaded_base = 0;
39d5492a 8206 TCGv_i32 loaded_var;
9ee6e8bb 8207 /* Load/store multiple. */
b0109805 8208 addr = load_reg(s, rn);
9ee6e8bb
PB
8209 offset = 0;
8210 for (i = 0; i < 16; i++) {
8211 if (insn & (1 << i))
8212 offset += 4;
8213 }
8214 if (insn & (1 << 24)) {
b0109805 8215 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8216 }
8217
39d5492a 8218 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
8219 for (i = 0; i < 16; i++) {
8220 if ((insn & (1 << i)) == 0)
8221 continue;
8222 if (insn & (1 << 20)) {
8223 /* Load. */
e2592fad
PM
8224 tmp = tcg_temp_new_i32();
8225 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
9ee6e8bb 8226 if (i == 15) {
b0109805 8227 gen_bx(s, tmp);
5856d44e
YO
8228 } else if (i == rn) {
8229 loaded_var = tmp;
8230 loaded_base = 1;
9ee6e8bb 8231 } else {
b0109805 8232 store_reg(s, i, tmp);
9ee6e8bb
PB
8233 }
8234 } else {
8235 /* Store. */
b0109805 8236 tmp = load_reg(s, i);
e2592fad
PM
8237 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
8238 tcg_temp_free_i32(tmp);
9ee6e8bb 8239 }
b0109805 8240 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8241 }
5856d44e
YO
8242 if (loaded_base) {
8243 store_reg(s, rn, loaded_var);
8244 }
9ee6e8bb
PB
8245 if (insn & (1 << 21)) {
8246 /* Base register writeback. */
8247 if (insn & (1 << 24)) {
b0109805 8248 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8249 }
8250 /* Fault if writeback register is in register list. */
8251 if (insn & (1 << rn))
8252 goto illegal_op;
b0109805
PB
8253 store_reg(s, rn, addr);
8254 } else {
7d1b0095 8255 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8256 }
8257 }
8258 }
8259 break;
2af9ab77
JB
8260 case 5:
8261
9ee6e8bb 8262 op = (insn >> 21) & 0xf;
2af9ab77
JB
8263 if (op == 6) {
8264 /* Halfword pack. */
8265 tmp = load_reg(s, rn);
8266 tmp2 = load_reg(s, rm);
8267 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8268 if (insn & (1 << 5)) {
8269 /* pkhtb */
8270 if (shift == 0)
8271 shift = 31;
8272 tcg_gen_sari_i32(tmp2, tmp2, shift);
8273 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8274 tcg_gen_ext16u_i32(tmp2, tmp2);
8275 } else {
8276 /* pkhbt */
8277 if (shift)
8278 tcg_gen_shli_i32(tmp2, tmp2, shift);
8279 tcg_gen_ext16u_i32(tmp, tmp);
8280 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8281 }
8282 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8283 tcg_temp_free_i32(tmp2);
3174f8e9
FN
8284 store_reg(s, rd, tmp);
8285 } else {
2af9ab77
JB
8286 /* Data processing register constant shift. */
8287 if (rn == 15) {
7d1b0095 8288 tmp = tcg_temp_new_i32();
2af9ab77
JB
8289 tcg_gen_movi_i32(tmp, 0);
8290 } else {
8291 tmp = load_reg(s, rn);
8292 }
8293 tmp2 = load_reg(s, rm);
8294
8295 shiftop = (insn >> 4) & 3;
8296 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8297 conds = (insn & (1 << 20)) != 0;
8298 logic_cc = (conds && thumb2_logic_op(op));
8299 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8300 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
8301 goto illegal_op;
7d1b0095 8302 tcg_temp_free_i32(tmp2);
2af9ab77
JB
8303 if (rd != 15) {
8304 store_reg(s, rd, tmp);
8305 } else {
7d1b0095 8306 tcg_temp_free_i32(tmp);
2af9ab77 8307 }
3174f8e9 8308 }
9ee6e8bb
PB
8309 break;
8310 case 13: /* Misc data processing. */
8311 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
8312 if (op < 4 && (insn & 0xf000) != 0xf000)
8313 goto illegal_op;
8314 switch (op) {
8315 case 0: /* Register controlled shift. */
8984bd2e
PB
8316 tmp = load_reg(s, rn);
8317 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8318 if ((insn & 0x70) != 0)
8319 goto illegal_op;
8320 op = (insn >> 21) & 3;
8984bd2e
PB
8321 logic_cc = (insn & (1 << 20)) != 0;
8322 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
8323 if (logic_cc)
8324 gen_logic_CC(tmp);
21aeb343 8325 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8326 break;
8327 case 1: /* Sign/zero extend. */
5e3f878a 8328 tmp = load_reg(s, rm);
9ee6e8bb 8329 shift = (insn >> 4) & 3;
1301f322 8330 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
8331 rotate, a shift is sufficient. */
8332 if (shift != 0)
f669df27 8333 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8334 op = (insn >> 20) & 7;
8335 switch (op) {
5e3f878a
PB
8336 case 0: gen_sxth(tmp); break;
8337 case 1: gen_uxth(tmp); break;
8338 case 2: gen_sxtb16(tmp); break;
8339 case 3: gen_uxtb16(tmp); break;
8340 case 4: gen_sxtb(tmp); break;
8341 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
8342 default: goto illegal_op;
8343 }
8344 if (rn != 15) {
5e3f878a 8345 tmp2 = load_reg(s, rn);
9ee6e8bb 8346 if ((op >> 1) == 1) {
5e3f878a 8347 gen_add16(tmp, tmp2);
9ee6e8bb 8348 } else {
5e3f878a 8349 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8350 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8351 }
8352 }
5e3f878a 8353 store_reg(s, rd, tmp);
9ee6e8bb
PB
8354 break;
8355 case 2: /* SIMD add/subtract. */
8356 op = (insn >> 20) & 7;
8357 shift = (insn >> 4) & 7;
8358 if ((op & 3) == 3 || (shift & 3) == 3)
8359 goto illegal_op;
6ddbc6e4
PB
8360 tmp = load_reg(s, rn);
8361 tmp2 = load_reg(s, rm);
8362 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 8363 tcg_temp_free_i32(tmp2);
6ddbc6e4 8364 store_reg(s, rd, tmp);
9ee6e8bb
PB
8365 break;
8366 case 3: /* Other data processing. */
8367 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
8368 if (op < 4) {
8369 /* Saturating add/subtract. */
d9ba4830
PB
8370 tmp = load_reg(s, rn);
8371 tmp2 = load_reg(s, rm);
9ee6e8bb 8372 if (op & 1)
9ef39277 8373 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 8374 if (op & 2)
9ef39277 8375 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 8376 else
9ef39277 8377 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 8378 tcg_temp_free_i32(tmp2);
9ee6e8bb 8379 } else {
d9ba4830 8380 tmp = load_reg(s, rn);
9ee6e8bb
PB
8381 switch (op) {
8382 case 0x0a: /* rbit */
d9ba4830 8383 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8384 break;
8385 case 0x08: /* rev */
66896cb8 8386 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
8387 break;
8388 case 0x09: /* rev16 */
d9ba4830 8389 gen_rev16(tmp);
9ee6e8bb
PB
8390 break;
8391 case 0x0b: /* revsh */
d9ba4830 8392 gen_revsh(tmp);
9ee6e8bb
PB
8393 break;
8394 case 0x10: /* sel */
d9ba4830 8395 tmp2 = load_reg(s, rm);
7d1b0095 8396 tmp3 = tcg_temp_new_i32();
0ecb72a5 8397 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 8398 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8399 tcg_temp_free_i32(tmp3);
8400 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8401 break;
8402 case 0x18: /* clz */
d9ba4830 8403 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
8404 break;
8405 default:
8406 goto illegal_op;
8407 }
8408 }
d9ba4830 8409 store_reg(s, rd, tmp);
9ee6e8bb
PB
8410 break;
8411 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8412 op = (insn >> 4) & 0xf;
d9ba4830
PB
8413 tmp = load_reg(s, rn);
8414 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8415 switch ((insn >> 20) & 7) {
8416 case 0: /* 32 x 32 -> 32 */
d9ba4830 8417 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8418 tcg_temp_free_i32(tmp2);
9ee6e8bb 8419 if (rs != 15) {
d9ba4830 8420 tmp2 = load_reg(s, rs);
9ee6e8bb 8421 if (op)
d9ba4830 8422 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 8423 else
d9ba4830 8424 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8425 tcg_temp_free_i32(tmp2);
9ee6e8bb 8426 }
9ee6e8bb
PB
8427 break;
8428 case 1: /* 16 x 16 -> 32 */
d9ba4830 8429 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8430 tcg_temp_free_i32(tmp2);
9ee6e8bb 8431 if (rs != 15) {
d9ba4830 8432 tmp2 = load_reg(s, rs);
9ef39277 8433 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8434 tcg_temp_free_i32(tmp2);
9ee6e8bb 8435 }
9ee6e8bb
PB
8436 break;
8437 case 2: /* Dual multiply add. */
8438 case 4: /* Dual multiply subtract. */
8439 if (op)
d9ba4830
PB
8440 gen_swap_half(tmp2);
8441 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8442 if (insn & (1 << 22)) {
e1d177b9 8443 /* This subtraction cannot overflow. */
d9ba4830 8444 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8445 } else {
e1d177b9
PM
8446 /* This addition cannot overflow 32 bits;
8447 * however it may overflow considered as a signed
8448 * operation, in which case we must set the Q flag.
8449 */
9ef39277 8450 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8451 }
7d1b0095 8452 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8453 if (rs != 15)
8454 {
d9ba4830 8455 tmp2 = load_reg(s, rs);
9ef39277 8456 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8457 tcg_temp_free_i32(tmp2);
9ee6e8bb 8458 }
9ee6e8bb
PB
8459 break;
8460 case 3: /* 32 * 16 -> 32msb */
8461 if (op)
d9ba4830 8462 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8463 else
d9ba4830 8464 gen_sxth(tmp2);
a7812ae4
PB
8465 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8466 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8467 tmp = tcg_temp_new_i32();
a7812ae4 8468 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 8469 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8470 if (rs != 15)
8471 {
d9ba4830 8472 tmp2 = load_reg(s, rs);
9ef39277 8473 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8474 tcg_temp_free_i32(tmp2);
9ee6e8bb 8475 }
9ee6e8bb 8476 break;
838fa72d
AJ
8477 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8478 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8479 if (rs != 15) {
838fa72d
AJ
8480 tmp = load_reg(s, rs);
8481 if (insn & (1 << 20)) {
8482 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 8483 } else {
838fa72d 8484 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 8485 }
2c0262af 8486 }
838fa72d
AJ
8487 if (insn & (1 << 4)) {
8488 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8489 }
8490 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 8491 tmp = tcg_temp_new_i32();
838fa72d
AJ
8492 tcg_gen_trunc_i64_i32(tmp, tmp64);
8493 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8494 break;
8495 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 8496 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 8497 tcg_temp_free_i32(tmp2);
9ee6e8bb 8498 if (rs != 15) {
d9ba4830
PB
8499 tmp2 = load_reg(s, rs);
8500 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8501 tcg_temp_free_i32(tmp2);
5fd46862 8502 }
9ee6e8bb 8503 break;
2c0262af 8504 }
d9ba4830 8505 store_reg(s, rd, tmp);
2c0262af 8506 break;
9ee6e8bb
PB
8507 case 6: case 7: /* 64-bit multiply, Divide. */
8508 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
8509 tmp = load_reg(s, rn);
8510 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8511 if ((op & 0x50) == 0x10) {
8512 /* sdiv, udiv */
47789990 8513 if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 8514 goto illegal_op;
47789990 8515 }
9ee6e8bb 8516 if (op & 0x20)
5e3f878a 8517 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 8518 else
5e3f878a 8519 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 8520 tcg_temp_free_i32(tmp2);
5e3f878a 8521 store_reg(s, rd, tmp);
9ee6e8bb
PB
8522 } else if ((op & 0xe) == 0xc) {
8523 /* Dual multiply accumulate long. */
8524 if (op & 1)
5e3f878a
PB
8525 gen_swap_half(tmp2);
8526 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8527 if (op & 0x10) {
5e3f878a 8528 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 8529 } else {
5e3f878a 8530 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 8531 }
7d1b0095 8532 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8533 /* BUGFIX */
8534 tmp64 = tcg_temp_new_i64();
8535 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8536 tcg_temp_free_i32(tmp);
a7812ae4
PB
8537 gen_addq(s, tmp64, rs, rd);
8538 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8539 tcg_temp_free_i64(tmp64);
2c0262af 8540 } else {
9ee6e8bb
PB
8541 if (op & 0x20) {
8542 /* Unsigned 64-bit multiply */
a7812ae4 8543 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 8544 } else {
9ee6e8bb
PB
8545 if (op & 8) {
8546 /* smlalxy */
5e3f878a 8547 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8548 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8549 tmp64 = tcg_temp_new_i64();
8550 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8551 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8552 } else {
8553 /* Signed 64-bit multiply */
a7812ae4 8554 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8555 }
b5ff1b31 8556 }
9ee6e8bb
PB
8557 if (op & 4) {
8558 /* umaal */
a7812ae4
PB
8559 gen_addq_lo(s, tmp64, rs);
8560 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
8561 } else if (op & 0x40) {
8562 /* 64-bit accumulate. */
a7812ae4 8563 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 8564 }
a7812ae4 8565 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8566 tcg_temp_free_i64(tmp64);
5fd46862 8567 }
2c0262af 8568 break;
9ee6e8bb
PB
8569 }
8570 break;
8571 case 6: case 7: case 14: case 15:
8572 /* Coprocessor. */
8573 if (((insn >> 24) & 3) == 3) {
8574 /* Translate into the equivalent ARM encoding. */
f06053e3 8575 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9ee6e8bb
PB
8576 if (disas_neon_data_insn(env, s, insn))
8577 goto illegal_op;
8578 } else {
8579 if (insn & (1 << 28))
8580 goto illegal_op;
8581 if (disas_coproc_insn (env, s, insn))
8582 goto illegal_op;
8583 }
8584 break;
8585 case 8: case 9: case 10: case 11:
8586 if (insn & (1 << 15)) {
8587 /* Branches, misc control. */
8588 if (insn & 0x5000) {
8589 /* Unconditional branch. */
8590 /* signextend(hw1[10:0]) -> offset[:12]. */
8591 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8592 /* hw1[10:0] -> offset[11:1]. */
8593 offset |= (insn & 0x7ff) << 1;
8594 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8595 offset[24:22] already have the same value because of the
8596 sign extension above. */
8597 offset ^= ((~insn) & (1 << 13)) << 10;
8598 offset ^= ((~insn) & (1 << 11)) << 11;
8599
9ee6e8bb
PB
8600 if (insn & (1 << 14)) {
8601 /* Branch and link. */
3174f8e9 8602 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 8603 }
3b46e624 8604
b0109805 8605 offset += s->pc;
9ee6e8bb
PB
8606 if (insn & (1 << 12)) {
8607 /* b/bl */
b0109805 8608 gen_jmp(s, offset);
9ee6e8bb
PB
8609 } else {
8610 /* blx */
b0109805 8611 offset &= ~(uint32_t)2;
be5e7a76 8612 /* thumb2 bx, no need to check */
b0109805 8613 gen_bx_im(s, offset);
2c0262af 8614 }
9ee6e8bb
PB
8615 } else if (((insn >> 23) & 7) == 7) {
8616 /* Misc control */
8617 if (insn & (1 << 13))
8618 goto illegal_op;
8619
8620 if (insn & (1 << 26)) {
8621 /* Secure monitor call (v6Z) */
8622 goto illegal_op; /* not implemented. */
2c0262af 8623 } else {
9ee6e8bb
PB
8624 op = (insn >> 20) & 7;
8625 switch (op) {
8626 case 0: /* msr cpsr. */
8627 if (IS_M(env)) {
8984bd2e
PB
8628 tmp = load_reg(s, rn);
8629 addr = tcg_const_i32(insn & 0xff);
8630 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8631 tcg_temp_free_i32(addr);
7d1b0095 8632 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8633 gen_lookup_tb(s);
8634 break;
8635 }
8636 /* fall through */
8637 case 1: /* msr spsr. */
8638 if (IS_M(env))
8639 goto illegal_op;
2fbac54b
FN
8640 tmp = load_reg(s, rn);
8641 if (gen_set_psr(s,
9ee6e8bb 8642 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 8643 op == 1, tmp))
9ee6e8bb
PB
8644 goto illegal_op;
8645 break;
8646 case 2: /* cps, nop-hint. */
8647 if (((insn >> 8) & 7) == 0) {
8648 gen_nop_hint(s, insn & 0xff);
8649 }
8650 /* Implemented as NOP in user mode. */
8651 if (IS_USER(s))
8652 break;
8653 offset = 0;
8654 imm = 0;
8655 if (insn & (1 << 10)) {
8656 if (insn & (1 << 7))
8657 offset |= CPSR_A;
8658 if (insn & (1 << 6))
8659 offset |= CPSR_I;
8660 if (insn & (1 << 5))
8661 offset |= CPSR_F;
8662 if (insn & (1 << 9))
8663 imm = CPSR_A | CPSR_I | CPSR_F;
8664 }
8665 if (insn & (1 << 8)) {
8666 offset |= 0x1f;
8667 imm |= (insn & 0x1f);
8668 }
8669 if (offset) {
2fbac54b 8670 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
8671 }
8672 break;
8673 case 3: /* Special control operations. */
426f5abc 8674 ARCH(7);
9ee6e8bb
PB
8675 op = (insn >> 4) & 0xf;
8676 switch (op) {
8677 case 2: /* clrex */
426f5abc 8678 gen_clrex(s);
9ee6e8bb
PB
8679 break;
8680 case 4: /* dsb */
8681 case 5: /* dmb */
8682 case 6: /* isb */
8683 /* These execute as NOPs. */
9ee6e8bb
PB
8684 break;
8685 default:
8686 goto illegal_op;
8687 }
8688 break;
8689 case 4: /* bxj */
8690 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8691 tmp = load_reg(s, rn);
8692 gen_bx(s, tmp);
9ee6e8bb
PB
8693 break;
8694 case 5: /* Exception return. */
b8b45b68
RV
8695 if (IS_USER(s)) {
8696 goto illegal_op;
8697 }
8698 if (rn != 14 || rd != 15) {
8699 goto illegal_op;
8700 }
8701 tmp = load_reg(s, rn);
8702 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8703 gen_exception_return(s, tmp);
8704 break;
9ee6e8bb 8705 case 6: /* mrs cpsr. */
7d1b0095 8706 tmp = tcg_temp_new_i32();
9ee6e8bb 8707 if (IS_M(env)) {
8984bd2e
PB
8708 addr = tcg_const_i32(insn & 0xff);
8709 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 8710 tcg_temp_free_i32(addr);
9ee6e8bb 8711 } else {
9ef39277 8712 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 8713 }
8984bd2e 8714 store_reg(s, rd, tmp);
9ee6e8bb
PB
8715 break;
8716 case 7: /* mrs spsr. */
8717 /* Not accessible in user mode. */
8718 if (IS_USER(s) || IS_M(env))
8719 goto illegal_op;
d9ba4830
PB
8720 tmp = load_cpu_field(spsr);
8721 store_reg(s, rd, tmp);
9ee6e8bb 8722 break;
2c0262af
FB
8723 }
8724 }
9ee6e8bb
PB
8725 } else {
8726 /* Conditional branch. */
8727 op = (insn >> 22) & 0xf;
8728 /* Generate a conditional jump to next instruction. */
8729 s->condlabel = gen_new_label();
d9ba4830 8730 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
8731 s->condjmp = 1;
8732
8733 /* offset[11:1] = insn[10:0] */
8734 offset = (insn & 0x7ff) << 1;
8735 /* offset[17:12] = insn[21:16]. */
8736 offset |= (insn & 0x003f0000) >> 4;
8737 /* offset[31:20] = insn[26]. */
8738 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8739 /* offset[18] = insn[13]. */
8740 offset |= (insn & (1 << 13)) << 5;
8741 /* offset[19] = insn[11]. */
8742 offset |= (insn & (1 << 11)) << 8;
8743
8744 /* jump to the offset */
b0109805 8745 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
8746 }
8747 } else {
8748 /* Data processing immediate. */
8749 if (insn & (1 << 25)) {
8750 if (insn & (1 << 24)) {
8751 if (insn & (1 << 20))
8752 goto illegal_op;
8753 /* Bitfield/Saturate. */
8754 op = (insn >> 21) & 7;
8755 imm = insn & 0x1f;
8756 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 8757 if (rn == 15) {
7d1b0095 8758 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
8759 tcg_gen_movi_i32(tmp, 0);
8760 } else {
8761 tmp = load_reg(s, rn);
8762 }
9ee6e8bb
PB
8763 switch (op) {
8764 case 2: /* Signed bitfield extract. */
8765 imm++;
8766 if (shift + imm > 32)
8767 goto illegal_op;
8768 if (imm < 32)
6ddbc6e4 8769 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
8770 break;
8771 case 6: /* Unsigned bitfield extract. */
8772 imm++;
8773 if (shift + imm > 32)
8774 goto illegal_op;
8775 if (imm < 32)
6ddbc6e4 8776 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
8777 break;
8778 case 3: /* Bitfield insert/clear. */
8779 if (imm < shift)
8780 goto illegal_op;
8781 imm = imm + 1 - shift;
8782 if (imm != 32) {
6ddbc6e4 8783 tmp2 = load_reg(s, rd);
d593c48e 8784 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 8785 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8786 }
8787 break;
8788 case 7:
8789 goto illegal_op;
8790 default: /* Saturate. */
9ee6e8bb
PB
8791 if (shift) {
8792 if (op & 1)
6ddbc6e4 8793 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8794 else
6ddbc6e4 8795 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 8796 }
6ddbc6e4 8797 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
8798 if (op & 4) {
8799 /* Unsigned. */
9ee6e8bb 8800 if ((op & 1) && shift == 0)
9ef39277 8801 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8802 else
9ef39277 8803 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
2c0262af 8804 } else {
9ee6e8bb 8805 /* Signed. */
9ee6e8bb 8806 if ((op & 1) && shift == 0)
9ef39277 8807 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8808 else
9ef39277 8809 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
2c0262af 8810 }
b75263d6 8811 tcg_temp_free_i32(tmp2);
9ee6e8bb 8812 break;
2c0262af 8813 }
6ddbc6e4 8814 store_reg(s, rd, tmp);
9ee6e8bb
PB
8815 } else {
8816 imm = ((insn & 0x04000000) >> 15)
8817 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8818 if (insn & (1 << 22)) {
8819 /* 16-bit immediate. */
8820 imm |= (insn >> 4) & 0xf000;
8821 if (insn & (1 << 23)) {
8822 /* movt */
5e3f878a 8823 tmp = load_reg(s, rd);
86831435 8824 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8825 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 8826 } else {
9ee6e8bb 8827 /* movw */
7d1b0095 8828 tmp = tcg_temp_new_i32();
5e3f878a 8829 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
8830 }
8831 } else {
9ee6e8bb
PB
8832 /* Add/sub 12-bit immediate. */
8833 if (rn == 15) {
b0109805 8834 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 8835 if (insn & (1 << 23))
b0109805 8836 offset -= imm;
9ee6e8bb 8837 else
b0109805 8838 offset += imm;
7d1b0095 8839 tmp = tcg_temp_new_i32();
5e3f878a 8840 tcg_gen_movi_i32(tmp, offset);
2c0262af 8841 } else {
5e3f878a 8842 tmp = load_reg(s, rn);
9ee6e8bb 8843 if (insn & (1 << 23))
5e3f878a 8844 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 8845 else
5e3f878a 8846 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 8847 }
9ee6e8bb 8848 }
5e3f878a 8849 store_reg(s, rd, tmp);
191abaa2 8850 }
9ee6e8bb
PB
8851 } else {
8852 int shifter_out = 0;
8853 /* modified 12-bit immediate. */
8854 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8855 imm = (insn & 0xff);
8856 switch (shift) {
8857 case 0: /* XY */
8858 /* Nothing to do. */
8859 break;
8860 case 1: /* 00XY00XY */
8861 imm |= imm << 16;
8862 break;
8863 case 2: /* XY00XY00 */
8864 imm |= imm << 16;
8865 imm <<= 8;
8866 break;
8867 case 3: /* XYXYXYXY */
8868 imm |= imm << 16;
8869 imm |= imm << 8;
8870 break;
8871 default: /* Rotated constant. */
8872 shift = (shift << 1) | (imm >> 7);
8873 imm |= 0x80;
8874 imm = imm << (32 - shift);
8875 shifter_out = 1;
8876 break;
b5ff1b31 8877 }
7d1b0095 8878 tmp2 = tcg_temp_new_i32();
3174f8e9 8879 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 8880 rn = (insn >> 16) & 0xf;
3174f8e9 8881 if (rn == 15) {
7d1b0095 8882 tmp = tcg_temp_new_i32();
3174f8e9
FN
8883 tcg_gen_movi_i32(tmp, 0);
8884 } else {
8885 tmp = load_reg(s, rn);
8886 }
9ee6e8bb
PB
8887 op = (insn >> 21) & 0xf;
8888 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 8889 shifter_out, tmp, tmp2))
9ee6e8bb 8890 goto illegal_op;
7d1b0095 8891 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8892 rd = (insn >> 8) & 0xf;
8893 if (rd != 15) {
3174f8e9
FN
8894 store_reg(s, rd, tmp);
8895 } else {
7d1b0095 8896 tcg_temp_free_i32(tmp);
2c0262af 8897 }
2c0262af 8898 }
9ee6e8bb
PB
8899 }
8900 break;
8901 case 12: /* Load/store single data item. */
8902 {
8903 int postinc = 0;
8904 int writeback = 0;
b0109805 8905 int user;
9ee6e8bb
PB
8906 if ((insn & 0x01100000) == 0x01000000) {
8907 if (disas_neon_ls_insn(env, s, insn))
c1713132 8908 goto illegal_op;
9ee6e8bb
PB
8909 break;
8910 }
a2fdc890
PM
8911 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8912 if (rs == 15) {
8913 if (!(insn & (1 << 20))) {
8914 goto illegal_op;
8915 }
8916 if (op != 2) {
8917 /* Byte or halfword load space with dest == r15 : memory hints.
8918 * Catch them early so we don't emit pointless addressing code.
8919 * This space is a mix of:
8920 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8921 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8922 * cores)
8923 * unallocated hints, which must be treated as NOPs
8924 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8925 * which is easiest for the decoding logic
8926 * Some space which must UNDEF
8927 */
8928 int op1 = (insn >> 23) & 3;
8929 int op2 = (insn >> 6) & 0x3f;
8930 if (op & 2) {
8931 goto illegal_op;
8932 }
8933 if (rn == 15) {
02afbf64
PM
8934 /* UNPREDICTABLE, unallocated hint or
8935 * PLD/PLDW/PLI (literal)
8936 */
a2fdc890
PM
8937 return 0;
8938 }
8939 if (op1 & 1) {
02afbf64 8940 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
8941 }
8942 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
02afbf64 8943 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
8944 }
8945 /* UNDEF space, or an UNPREDICTABLE */
8946 return 1;
8947 }
8948 }
b0109805 8949 user = IS_USER(s);
9ee6e8bb 8950 if (rn == 15) {
7d1b0095 8951 addr = tcg_temp_new_i32();
9ee6e8bb
PB
8952 /* PC relative. */
8953 /* s->pc has already been incremented by 4. */
8954 imm = s->pc & 0xfffffffc;
8955 if (insn & (1 << 23))
8956 imm += insn & 0xfff;
8957 else
8958 imm -= insn & 0xfff;
b0109805 8959 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 8960 } else {
b0109805 8961 addr = load_reg(s, rn);
9ee6e8bb
PB
8962 if (insn & (1 << 23)) {
8963 /* Positive offset. */
8964 imm = insn & 0xfff;
b0109805 8965 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 8966 } else {
9ee6e8bb 8967 imm = insn & 0xff;
2a0308c5
PM
8968 switch ((insn >> 8) & 0xf) {
8969 case 0x0: /* Shifted Register. */
9ee6e8bb 8970 shift = (insn >> 4) & 0xf;
2a0308c5
PM
8971 if (shift > 3) {
8972 tcg_temp_free_i32(addr);
18c9b560 8973 goto illegal_op;
2a0308c5 8974 }
b26eefb6 8975 tmp = load_reg(s, rm);
9ee6e8bb 8976 if (shift)
b26eefb6 8977 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 8978 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8979 tcg_temp_free_i32(tmp);
9ee6e8bb 8980 break;
2a0308c5 8981 case 0xc: /* Negative offset. */
b0109805 8982 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 8983 break;
2a0308c5 8984 case 0xe: /* User privilege. */
b0109805
PB
8985 tcg_gen_addi_i32(addr, addr, imm);
8986 user = 1;
9ee6e8bb 8987 break;
2a0308c5 8988 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
8989 imm = -imm;
8990 /* Fall through. */
2a0308c5 8991 case 0xb: /* Post-increment. */
9ee6e8bb
PB
8992 postinc = 1;
8993 writeback = 1;
8994 break;
2a0308c5 8995 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
8996 imm = -imm;
8997 /* Fall through. */
2a0308c5 8998 case 0xf: /* Pre-increment. */
b0109805 8999 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
9000 writeback = 1;
9001 break;
9002 default:
2a0308c5 9003 tcg_temp_free_i32(addr);
b7bcbe95 9004 goto illegal_op;
9ee6e8bb
PB
9005 }
9006 }
9007 }
9ee6e8bb
PB
9008 if (insn & (1 << 20)) {
9009 /* Load. */
5a839c0d 9010 tmp = tcg_temp_new_i32();
a2fdc890 9011 switch (op) {
5a839c0d
PM
9012 case 0:
9013 tcg_gen_qemu_ld8u(tmp, addr, user);
9014 break;
9015 case 4:
9016 tcg_gen_qemu_ld8s(tmp, addr, user);
9017 break;
9018 case 1:
9019 tcg_gen_qemu_ld16u(tmp, addr, user);
9020 break;
9021 case 5:
9022 tcg_gen_qemu_ld16s(tmp, addr, user);
9023 break;
9024 case 2:
9025 tcg_gen_qemu_ld32u(tmp, addr, user);
9026 break;
2a0308c5 9027 default:
5a839c0d 9028 tcg_temp_free_i32(tmp);
2a0308c5
PM
9029 tcg_temp_free_i32(addr);
9030 goto illegal_op;
a2fdc890
PM
9031 }
9032 if (rs == 15) {
9033 gen_bx(s, tmp);
9ee6e8bb 9034 } else {
a2fdc890 9035 store_reg(s, rs, tmp);
9ee6e8bb
PB
9036 }
9037 } else {
9038 /* Store. */
b0109805 9039 tmp = load_reg(s, rs);
9ee6e8bb 9040 switch (op) {
5a839c0d
PM
9041 case 0:
9042 tcg_gen_qemu_st8(tmp, addr, user);
9043 break;
9044 case 1:
9045 tcg_gen_qemu_st16(tmp, addr, user);
9046 break;
9047 case 2:
9048 tcg_gen_qemu_st32(tmp, addr, user);
9049 break;
2a0308c5 9050 default:
5a839c0d 9051 tcg_temp_free_i32(tmp);
2a0308c5
PM
9052 tcg_temp_free_i32(addr);
9053 goto illegal_op;
b7bcbe95 9054 }
5a839c0d 9055 tcg_temp_free_i32(tmp);
2c0262af 9056 }
9ee6e8bb 9057 if (postinc)
b0109805
PB
9058 tcg_gen_addi_i32(addr, addr, imm);
9059 if (writeback) {
9060 store_reg(s, rn, addr);
9061 } else {
7d1b0095 9062 tcg_temp_free_i32(addr);
b0109805 9063 }
9ee6e8bb
PB
9064 }
9065 break;
9066 default:
9067 goto illegal_op;
2c0262af 9068 }
9ee6e8bb
PB
9069 return 0;
9070illegal_op:
9071 return 1;
2c0262af
FB
9072}
9073
0ecb72a5 9074static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
99c475ab
FB
9075{
9076 uint32_t val, insn, op, rm, rn, rd, shift, cond;
9077 int32_t offset;
9078 int i;
39d5492a
PM
9079 TCGv_i32 tmp;
9080 TCGv_i32 tmp2;
9081 TCGv_i32 addr;
99c475ab 9082
9ee6e8bb
PB
9083 if (s->condexec_mask) {
9084 cond = s->condexec_cond;
bedd2912
JB
9085 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
9086 s->condlabel = gen_new_label();
9087 gen_test_cc(cond ^ 1, s->condlabel);
9088 s->condjmp = 1;
9089 }
9ee6e8bb
PB
9090 }
9091
d31dd73e 9092 insn = arm_lduw_code(env, s->pc, s->bswap_code);
99c475ab 9093 s->pc += 2;
b5ff1b31 9094
99c475ab
FB
9095 switch (insn >> 12) {
9096 case 0: case 1:
396e467c 9097
99c475ab
FB
9098 rd = insn & 7;
9099 op = (insn >> 11) & 3;
9100 if (op == 3) {
9101 /* add/subtract */
9102 rn = (insn >> 3) & 7;
396e467c 9103 tmp = load_reg(s, rn);
99c475ab
FB
9104 if (insn & (1 << 10)) {
9105 /* immediate */
7d1b0095 9106 tmp2 = tcg_temp_new_i32();
396e467c 9107 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
9108 } else {
9109 /* reg */
9110 rm = (insn >> 6) & 7;
396e467c 9111 tmp2 = load_reg(s, rm);
99c475ab 9112 }
9ee6e8bb
PB
9113 if (insn & (1 << 9)) {
9114 if (s->condexec_mask)
396e467c 9115 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 9116 else
72485ec4 9117 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
9118 } else {
9119 if (s->condexec_mask)
396e467c 9120 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 9121 else
72485ec4 9122 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 9123 }
7d1b0095 9124 tcg_temp_free_i32(tmp2);
396e467c 9125 store_reg(s, rd, tmp);
99c475ab
FB
9126 } else {
9127 /* shift immediate */
9128 rm = (insn >> 3) & 7;
9129 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
9130 tmp = load_reg(s, rm);
9131 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
9132 if (!s->condexec_mask)
9133 gen_logic_CC(tmp);
9134 store_reg(s, rd, tmp);
99c475ab
FB
9135 }
9136 break;
9137 case 2: case 3:
9138 /* arithmetic large immediate */
9139 op = (insn >> 11) & 3;
9140 rd = (insn >> 8) & 0x7;
396e467c 9141 if (op == 0) { /* mov */
7d1b0095 9142 tmp = tcg_temp_new_i32();
396e467c 9143 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 9144 if (!s->condexec_mask)
396e467c
FN
9145 gen_logic_CC(tmp);
9146 store_reg(s, rd, tmp);
9147 } else {
9148 tmp = load_reg(s, rd);
7d1b0095 9149 tmp2 = tcg_temp_new_i32();
396e467c
FN
9150 tcg_gen_movi_i32(tmp2, insn & 0xff);
9151 switch (op) {
9152 case 1: /* cmp */
72485ec4 9153 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
9154 tcg_temp_free_i32(tmp);
9155 tcg_temp_free_i32(tmp2);
396e467c
FN
9156 break;
9157 case 2: /* add */
9158 if (s->condexec_mask)
9159 tcg_gen_add_i32(tmp, tmp, tmp2);
9160 else
72485ec4 9161 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 9162 tcg_temp_free_i32(tmp2);
396e467c
FN
9163 store_reg(s, rd, tmp);
9164 break;
9165 case 3: /* sub */
9166 if (s->condexec_mask)
9167 tcg_gen_sub_i32(tmp, tmp, tmp2);
9168 else
72485ec4 9169 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 9170 tcg_temp_free_i32(tmp2);
396e467c
FN
9171 store_reg(s, rd, tmp);
9172 break;
9173 }
99c475ab 9174 }
99c475ab
FB
9175 break;
9176 case 4:
9177 if (insn & (1 << 11)) {
9178 rd = (insn >> 8) & 7;
5899f386
FB
9179 /* load pc-relative. Bit 1 of PC is ignored. */
9180 val = s->pc + 2 + ((insn & 0xff) * 4);
9181 val &= ~(uint32_t)2;
7d1b0095 9182 addr = tcg_temp_new_i32();
b0109805 9183 tcg_gen_movi_i32(addr, val);
c40c8556
PM
9184 tmp = tcg_temp_new_i32();
9185 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
7d1b0095 9186 tcg_temp_free_i32(addr);
b0109805 9187 store_reg(s, rd, tmp);
99c475ab
FB
9188 break;
9189 }
9190 if (insn & (1 << 10)) {
9191 /* data processing extended or blx */
9192 rd = (insn & 7) | ((insn >> 4) & 8);
9193 rm = (insn >> 3) & 0xf;
9194 op = (insn >> 8) & 3;
9195 switch (op) {
9196 case 0: /* add */
396e467c
FN
9197 tmp = load_reg(s, rd);
9198 tmp2 = load_reg(s, rm);
9199 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9200 tcg_temp_free_i32(tmp2);
396e467c 9201 store_reg(s, rd, tmp);
99c475ab
FB
9202 break;
9203 case 1: /* cmp */
396e467c
FN
9204 tmp = load_reg(s, rd);
9205 tmp2 = load_reg(s, rm);
72485ec4 9206 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
9207 tcg_temp_free_i32(tmp2);
9208 tcg_temp_free_i32(tmp);
99c475ab
FB
9209 break;
9210 case 2: /* mov/cpy */
396e467c
FN
9211 tmp = load_reg(s, rm);
9212 store_reg(s, rd, tmp);
99c475ab
FB
9213 break;
9214 case 3:/* branch [and link] exchange thumb register */
b0109805 9215 tmp = load_reg(s, rm);
99c475ab 9216 if (insn & (1 << 7)) {
be5e7a76 9217 ARCH(5);
99c475ab 9218 val = (uint32_t)s->pc | 1;
7d1b0095 9219 tmp2 = tcg_temp_new_i32();
b0109805
PB
9220 tcg_gen_movi_i32(tmp2, val);
9221 store_reg(s, 14, tmp2);
99c475ab 9222 }
be5e7a76 9223 /* already thumb, no need to check */
d9ba4830 9224 gen_bx(s, tmp);
99c475ab
FB
9225 break;
9226 }
9227 break;
9228 }
9229
9230 /* data processing register */
9231 rd = insn & 7;
9232 rm = (insn >> 3) & 7;
9233 op = (insn >> 6) & 0xf;
9234 if (op == 2 || op == 3 || op == 4 || op == 7) {
9235 /* the shift/rotate ops want the operands backwards */
9236 val = rm;
9237 rm = rd;
9238 rd = val;
9239 val = 1;
9240 } else {
9241 val = 0;
9242 }
9243
396e467c 9244 if (op == 9) { /* neg */
7d1b0095 9245 tmp = tcg_temp_new_i32();
396e467c
FN
9246 tcg_gen_movi_i32(tmp, 0);
9247 } else if (op != 0xf) { /* mvn doesn't read its first operand */
9248 tmp = load_reg(s, rd);
9249 } else {
39d5492a 9250 TCGV_UNUSED_I32(tmp);
396e467c 9251 }
99c475ab 9252
396e467c 9253 tmp2 = load_reg(s, rm);
5899f386 9254 switch (op) {
99c475ab 9255 case 0x0: /* and */
396e467c 9256 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 9257 if (!s->condexec_mask)
396e467c 9258 gen_logic_CC(tmp);
99c475ab
FB
9259 break;
9260 case 0x1: /* eor */
396e467c 9261 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 9262 if (!s->condexec_mask)
396e467c 9263 gen_logic_CC(tmp);
99c475ab
FB
9264 break;
9265 case 0x2: /* lsl */
9ee6e8bb 9266 if (s->condexec_mask) {
365af80e 9267 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 9268 } else {
9ef39277 9269 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9270 gen_logic_CC(tmp2);
9ee6e8bb 9271 }
99c475ab
FB
9272 break;
9273 case 0x3: /* lsr */
9ee6e8bb 9274 if (s->condexec_mask) {
365af80e 9275 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 9276 } else {
9ef39277 9277 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9278 gen_logic_CC(tmp2);
9ee6e8bb 9279 }
99c475ab
FB
9280 break;
9281 case 0x4: /* asr */
9ee6e8bb 9282 if (s->condexec_mask) {
365af80e 9283 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 9284 } else {
9ef39277 9285 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9286 gen_logic_CC(tmp2);
9ee6e8bb 9287 }
99c475ab
FB
9288 break;
9289 case 0x5: /* adc */
49b4c31e 9290 if (s->condexec_mask) {
396e467c 9291 gen_adc(tmp, tmp2);
49b4c31e
RH
9292 } else {
9293 gen_adc_CC(tmp, tmp, tmp2);
9294 }
99c475ab
FB
9295 break;
9296 case 0x6: /* sbc */
2de68a49 9297 if (s->condexec_mask) {
396e467c 9298 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
9299 } else {
9300 gen_sbc_CC(tmp, tmp, tmp2);
9301 }
99c475ab
FB
9302 break;
9303 case 0x7: /* ror */
9ee6e8bb 9304 if (s->condexec_mask) {
f669df27
AJ
9305 tcg_gen_andi_i32(tmp, tmp, 0x1f);
9306 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 9307 } else {
9ef39277 9308 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9309 gen_logic_CC(tmp2);
9ee6e8bb 9310 }
99c475ab
FB
9311 break;
9312 case 0x8: /* tst */
396e467c
FN
9313 tcg_gen_and_i32(tmp, tmp, tmp2);
9314 gen_logic_CC(tmp);
99c475ab 9315 rd = 16;
5899f386 9316 break;
99c475ab 9317 case 0x9: /* neg */
9ee6e8bb 9318 if (s->condexec_mask)
396e467c 9319 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 9320 else
72485ec4 9321 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
9322 break;
9323 case 0xa: /* cmp */
72485ec4 9324 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
9325 rd = 16;
9326 break;
9327 case 0xb: /* cmn */
72485ec4 9328 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
9329 rd = 16;
9330 break;
9331 case 0xc: /* orr */
396e467c 9332 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 9333 if (!s->condexec_mask)
396e467c 9334 gen_logic_CC(tmp);
99c475ab
FB
9335 break;
9336 case 0xd: /* mul */
7b2919a0 9337 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 9338 if (!s->condexec_mask)
396e467c 9339 gen_logic_CC(tmp);
99c475ab
FB
9340 break;
9341 case 0xe: /* bic */
f669df27 9342 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 9343 if (!s->condexec_mask)
396e467c 9344 gen_logic_CC(tmp);
99c475ab
FB
9345 break;
9346 case 0xf: /* mvn */
396e467c 9347 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 9348 if (!s->condexec_mask)
396e467c 9349 gen_logic_CC(tmp2);
99c475ab 9350 val = 1;
5899f386 9351 rm = rd;
99c475ab
FB
9352 break;
9353 }
9354 if (rd != 16) {
396e467c
FN
9355 if (val) {
9356 store_reg(s, rm, tmp2);
9357 if (op != 0xf)
7d1b0095 9358 tcg_temp_free_i32(tmp);
396e467c
FN
9359 } else {
9360 store_reg(s, rd, tmp);
7d1b0095 9361 tcg_temp_free_i32(tmp2);
396e467c
FN
9362 }
9363 } else {
7d1b0095
PM
9364 tcg_temp_free_i32(tmp);
9365 tcg_temp_free_i32(tmp2);
99c475ab
FB
9366 }
9367 break;
9368
9369 case 5:
9370 /* load/store register offset. */
9371 rd = insn & 7;
9372 rn = (insn >> 3) & 7;
9373 rm = (insn >> 6) & 7;
9374 op = (insn >> 9) & 7;
b0109805 9375 addr = load_reg(s, rn);
b26eefb6 9376 tmp = load_reg(s, rm);
b0109805 9377 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9378 tcg_temp_free_i32(tmp);
99c475ab 9379
c40c8556 9380 if (op < 3) { /* store */
b0109805 9381 tmp = load_reg(s, rd);
c40c8556
PM
9382 } else {
9383 tmp = tcg_temp_new_i32();
9384 }
99c475ab
FB
9385
9386 switch (op) {
9387 case 0: /* str */
c40c8556 9388 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
99c475ab
FB
9389 break;
9390 case 1: /* strh */
c40c8556 9391 tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
99c475ab
FB
9392 break;
9393 case 2: /* strb */
c40c8556 9394 tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
99c475ab
FB
9395 break;
9396 case 3: /* ldrsb */
c40c8556 9397 tcg_gen_qemu_ld8s(tmp, addr, IS_USER(s));
99c475ab
FB
9398 break;
9399 case 4: /* ldr */
c40c8556 9400 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
99c475ab
FB
9401 break;
9402 case 5: /* ldrh */
c40c8556 9403 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
99c475ab
FB
9404 break;
9405 case 6: /* ldrb */
c40c8556 9406 tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
99c475ab
FB
9407 break;
9408 case 7: /* ldrsh */
c40c8556 9409 tcg_gen_qemu_ld16s(tmp, addr, IS_USER(s));
99c475ab
FB
9410 break;
9411 }
c40c8556 9412 if (op >= 3) { /* load */
b0109805 9413 store_reg(s, rd, tmp);
c40c8556
PM
9414 } else {
9415 tcg_temp_free_i32(tmp);
9416 }
7d1b0095 9417 tcg_temp_free_i32(addr);
99c475ab
FB
9418 break;
9419
9420 case 6:
9421 /* load/store word immediate offset */
9422 rd = insn & 7;
9423 rn = (insn >> 3) & 7;
b0109805 9424 addr = load_reg(s, rn);
99c475ab 9425 val = (insn >> 4) & 0x7c;
b0109805 9426 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9427
9428 if (insn & (1 << 11)) {
9429 /* load */
c40c8556
PM
9430 tmp = tcg_temp_new_i32();
9431 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
b0109805 9432 store_reg(s, rd, tmp);
99c475ab
FB
9433 } else {
9434 /* store */
b0109805 9435 tmp = load_reg(s, rd);
c40c8556
PM
9436 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
9437 tcg_temp_free_i32(tmp);
99c475ab 9438 }
7d1b0095 9439 tcg_temp_free_i32(addr);
99c475ab
FB
9440 break;
9441
9442 case 7:
9443 /* load/store byte immediate offset */
9444 rd = insn & 7;
9445 rn = (insn >> 3) & 7;
b0109805 9446 addr = load_reg(s, rn);
99c475ab 9447 val = (insn >> 6) & 0x1f;
b0109805 9448 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9449
9450 if (insn & (1 << 11)) {
9451 /* load */
c40c8556
PM
9452 tmp = tcg_temp_new_i32();
9453 tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
b0109805 9454 store_reg(s, rd, tmp);
99c475ab
FB
9455 } else {
9456 /* store */
b0109805 9457 tmp = load_reg(s, rd);
c40c8556
PM
9458 tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
9459 tcg_temp_free_i32(tmp);
99c475ab 9460 }
7d1b0095 9461 tcg_temp_free_i32(addr);
99c475ab
FB
9462 break;
9463
9464 case 8:
9465 /* load/store halfword immediate offset */
9466 rd = insn & 7;
9467 rn = (insn >> 3) & 7;
b0109805 9468 addr = load_reg(s, rn);
99c475ab 9469 val = (insn >> 5) & 0x3e;
b0109805 9470 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9471
9472 if (insn & (1 << 11)) {
9473 /* load */
c40c8556
PM
9474 tmp = tcg_temp_new_i32();
9475 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
b0109805 9476 store_reg(s, rd, tmp);
99c475ab
FB
9477 } else {
9478 /* store */
b0109805 9479 tmp = load_reg(s, rd);
c40c8556
PM
9480 tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
9481 tcg_temp_free_i32(tmp);
99c475ab 9482 }
7d1b0095 9483 tcg_temp_free_i32(addr);
99c475ab
FB
9484 break;
9485
9486 case 9:
9487 /* load/store from stack */
9488 rd = (insn >> 8) & 7;
b0109805 9489 addr = load_reg(s, 13);
99c475ab 9490 val = (insn & 0xff) * 4;
b0109805 9491 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9492
9493 if (insn & (1 << 11)) {
9494 /* load */
c40c8556
PM
9495 tmp = tcg_temp_new_i32();
9496 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
b0109805 9497 store_reg(s, rd, tmp);
99c475ab
FB
9498 } else {
9499 /* store */
b0109805 9500 tmp = load_reg(s, rd);
c40c8556
PM
9501 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
9502 tcg_temp_free_i32(tmp);
99c475ab 9503 }
7d1b0095 9504 tcg_temp_free_i32(addr);
99c475ab
FB
9505 break;
9506
9507 case 10:
9508 /* add to high reg */
9509 rd = (insn >> 8) & 7;
5899f386
FB
9510 if (insn & (1 << 11)) {
9511 /* SP */
5e3f878a 9512 tmp = load_reg(s, 13);
5899f386
FB
9513 } else {
9514 /* PC. bit 1 is ignored. */
7d1b0095 9515 tmp = tcg_temp_new_i32();
5e3f878a 9516 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 9517 }
99c475ab 9518 val = (insn & 0xff) * 4;
5e3f878a
PB
9519 tcg_gen_addi_i32(tmp, tmp, val);
9520 store_reg(s, rd, tmp);
99c475ab
FB
9521 break;
9522
9523 case 11:
9524 /* misc */
9525 op = (insn >> 8) & 0xf;
9526 switch (op) {
9527 case 0:
9528 /* adjust stack pointer */
b26eefb6 9529 tmp = load_reg(s, 13);
99c475ab
FB
9530 val = (insn & 0x7f) * 4;
9531 if (insn & (1 << 7))
6a0d8a1d 9532 val = -(int32_t)val;
b26eefb6
PB
9533 tcg_gen_addi_i32(tmp, tmp, val);
9534 store_reg(s, 13, tmp);
99c475ab
FB
9535 break;
9536
9ee6e8bb
PB
9537 case 2: /* sign/zero extend. */
9538 ARCH(6);
9539 rd = insn & 7;
9540 rm = (insn >> 3) & 7;
b0109805 9541 tmp = load_reg(s, rm);
9ee6e8bb 9542 switch ((insn >> 6) & 3) {
b0109805
PB
9543 case 0: gen_sxth(tmp); break;
9544 case 1: gen_sxtb(tmp); break;
9545 case 2: gen_uxth(tmp); break;
9546 case 3: gen_uxtb(tmp); break;
9ee6e8bb 9547 }
b0109805 9548 store_reg(s, rd, tmp);
9ee6e8bb 9549 break;
99c475ab
FB
9550 case 4: case 5: case 0xc: case 0xd:
9551 /* push/pop */
b0109805 9552 addr = load_reg(s, 13);
5899f386
FB
9553 if (insn & (1 << 8))
9554 offset = 4;
99c475ab 9555 else
5899f386
FB
9556 offset = 0;
9557 for (i = 0; i < 8; i++) {
9558 if (insn & (1 << i))
9559 offset += 4;
9560 }
9561 if ((insn & (1 << 11)) == 0) {
b0109805 9562 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9563 }
99c475ab
FB
9564 for (i = 0; i < 8; i++) {
9565 if (insn & (1 << i)) {
9566 if (insn & (1 << 11)) {
9567 /* pop */
c40c8556
PM
9568 tmp = tcg_temp_new_i32();
9569 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
b0109805 9570 store_reg(s, i, tmp);
99c475ab
FB
9571 } else {
9572 /* push */
b0109805 9573 tmp = load_reg(s, i);
c40c8556
PM
9574 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
9575 tcg_temp_free_i32(tmp);
99c475ab 9576 }
5899f386 9577 /* advance to the next address. */
b0109805 9578 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9579 }
9580 }
39d5492a 9581 TCGV_UNUSED_I32(tmp);
99c475ab
FB
9582 if (insn & (1 << 8)) {
9583 if (insn & (1 << 11)) {
9584 /* pop pc */
c40c8556
PM
9585 tmp = tcg_temp_new_i32();
9586 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
99c475ab
FB
9587 /* don't set the pc until the rest of the instruction
9588 has completed */
9589 } else {
9590 /* push lr */
b0109805 9591 tmp = load_reg(s, 14);
c40c8556
PM
9592 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
9593 tcg_temp_free_i32(tmp);
99c475ab 9594 }
b0109805 9595 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 9596 }
5899f386 9597 if ((insn & (1 << 11)) == 0) {
b0109805 9598 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9599 }
99c475ab 9600 /* write back the new stack pointer */
b0109805 9601 store_reg(s, 13, addr);
99c475ab 9602 /* set the new PC value */
be5e7a76
DES
9603 if ((insn & 0x0900) == 0x0900) {
9604 store_reg_from_load(env, s, 15, tmp);
9605 }
99c475ab
FB
9606 break;
9607
9ee6e8bb
PB
9608 case 1: case 3: case 9: case 11: /* czb */
9609 rm = insn & 7;
d9ba4830 9610 tmp = load_reg(s, rm);
9ee6e8bb
PB
9611 s->condlabel = gen_new_label();
9612 s->condjmp = 1;
9613 if (insn & (1 << 11))
cb63669a 9614 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 9615 else
cb63669a 9616 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 9617 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9618 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9619 val = (uint32_t)s->pc + 2;
9620 val += offset;
9621 gen_jmp(s, val);
9622 break;
9623
9624 case 15: /* IT, nop-hint. */
9625 if ((insn & 0xf) == 0) {
9626 gen_nop_hint(s, (insn >> 4) & 0xf);
9627 break;
9628 }
9629 /* If Then. */
9630 s->condexec_cond = (insn >> 4) & 0xe;
9631 s->condexec_mask = insn & 0x1f;
9632 /* No actual code generated for this insn, just setup state. */
9633 break;
9634
06c949e6 9635 case 0xe: /* bkpt */
be5e7a76 9636 ARCH(5);
bc4a0de0 9637 gen_exception_insn(s, 2, EXCP_BKPT);
06c949e6
PB
9638 break;
9639
9ee6e8bb
PB
9640 case 0xa: /* rev */
9641 ARCH(6);
9642 rn = (insn >> 3) & 0x7;
9643 rd = insn & 0x7;
b0109805 9644 tmp = load_reg(s, rn);
9ee6e8bb 9645 switch ((insn >> 6) & 3) {
66896cb8 9646 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
9647 case 1: gen_rev16(tmp); break;
9648 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
9649 default: goto illegal_op;
9650 }
b0109805 9651 store_reg(s, rd, tmp);
9ee6e8bb
PB
9652 break;
9653
d9e028c1
PM
9654 case 6:
9655 switch ((insn >> 5) & 7) {
9656 case 2:
9657 /* setend */
9658 ARCH(6);
10962fd5
PM
9659 if (((insn >> 3) & 1) != s->bswap_code) {
9660 /* Dynamic endianness switching not implemented. */
d9e028c1
PM
9661 goto illegal_op;
9662 }
9ee6e8bb 9663 break;
d9e028c1
PM
9664 case 3:
9665 /* cps */
9666 ARCH(6);
9667 if (IS_USER(s)) {
9668 break;
8984bd2e 9669 }
d9e028c1
PM
9670 if (IS_M(env)) {
9671 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9672 /* FAULTMASK */
9673 if (insn & 1) {
9674 addr = tcg_const_i32(19);
9675 gen_helper_v7m_msr(cpu_env, addr, tmp);
9676 tcg_temp_free_i32(addr);
9677 }
9678 /* PRIMASK */
9679 if (insn & 2) {
9680 addr = tcg_const_i32(16);
9681 gen_helper_v7m_msr(cpu_env, addr, tmp);
9682 tcg_temp_free_i32(addr);
9683 }
9684 tcg_temp_free_i32(tmp);
9685 gen_lookup_tb(s);
9686 } else {
9687 if (insn & (1 << 4)) {
9688 shift = CPSR_A | CPSR_I | CPSR_F;
9689 } else {
9690 shift = 0;
9691 }
9692 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 9693 }
d9e028c1
PM
9694 break;
9695 default:
9696 goto undef;
9ee6e8bb
PB
9697 }
9698 break;
9699
99c475ab
FB
9700 default:
9701 goto undef;
9702 }
9703 break;
9704
9705 case 12:
a7d3970d 9706 {
99c475ab 9707 /* load/store multiple */
39d5492a
PM
9708 TCGv_i32 loaded_var;
9709 TCGV_UNUSED_I32(loaded_var);
99c475ab 9710 rn = (insn >> 8) & 0x7;
b0109805 9711 addr = load_reg(s, rn);
99c475ab
FB
9712 for (i = 0; i < 8; i++) {
9713 if (insn & (1 << i)) {
99c475ab
FB
9714 if (insn & (1 << 11)) {
9715 /* load */
c40c8556
PM
9716 tmp = tcg_temp_new_i32();
9717 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
a7d3970d
PM
9718 if (i == rn) {
9719 loaded_var = tmp;
9720 } else {
9721 store_reg(s, i, tmp);
9722 }
99c475ab
FB
9723 } else {
9724 /* store */
b0109805 9725 tmp = load_reg(s, i);
c40c8556
PM
9726 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
9727 tcg_temp_free_i32(tmp);
99c475ab 9728 }
5899f386 9729 /* advance to the next address */
b0109805 9730 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9731 }
9732 }
b0109805 9733 if ((insn & (1 << rn)) == 0) {
a7d3970d 9734 /* base reg not in list: base register writeback */
b0109805
PB
9735 store_reg(s, rn, addr);
9736 } else {
a7d3970d
PM
9737 /* base reg in list: if load, complete it now */
9738 if (insn & (1 << 11)) {
9739 store_reg(s, rn, loaded_var);
9740 }
7d1b0095 9741 tcg_temp_free_i32(addr);
b0109805 9742 }
99c475ab 9743 break;
a7d3970d 9744 }
99c475ab
FB
9745 case 13:
9746 /* conditional branch or swi */
9747 cond = (insn >> 8) & 0xf;
9748 if (cond == 0xe)
9749 goto undef;
9750
9751 if (cond == 0xf) {
9752 /* swi */
422ebf69 9753 gen_set_pc_im(s->pc);
9ee6e8bb 9754 s->is_jmp = DISAS_SWI;
99c475ab
FB
9755 break;
9756 }
9757 /* generate a conditional jump to next instruction */
e50e6a20 9758 s->condlabel = gen_new_label();
d9ba4830 9759 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 9760 s->condjmp = 1;
99c475ab
FB
9761
9762 /* jump to the offset */
5899f386 9763 val = (uint32_t)s->pc + 2;
99c475ab 9764 offset = ((int32_t)insn << 24) >> 24;
5899f386 9765 val += offset << 1;
8aaca4c0 9766 gen_jmp(s, val);
99c475ab
FB
9767 break;
9768
9769 case 14:
358bf29e 9770 if (insn & (1 << 11)) {
9ee6e8bb
PB
9771 if (disas_thumb2_insn(env, s, insn))
9772 goto undef32;
358bf29e
PB
9773 break;
9774 }
9ee6e8bb 9775 /* unconditional branch */
99c475ab
FB
9776 val = (uint32_t)s->pc;
9777 offset = ((int32_t)insn << 21) >> 21;
9778 val += (offset << 1) + 2;
8aaca4c0 9779 gen_jmp(s, val);
99c475ab
FB
9780 break;
9781
9782 case 15:
9ee6e8bb 9783 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 9784 goto undef32;
9ee6e8bb 9785 break;
99c475ab
FB
9786 }
9787 return;
9ee6e8bb 9788undef32:
bc4a0de0 9789 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
9790 return;
9791illegal_op:
99c475ab 9792undef:
bc4a0de0 9793 gen_exception_insn(s, 2, EXCP_UDEF);
99c475ab
FB
9794}
9795
2c0262af
FB
9796/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9797 basic block 'tb'. If search_pc is TRUE, also generate PC
9798 information for each intermediate instruction. */
0ecb72a5 9799static inline void gen_intermediate_code_internal(CPUARMState *env,
2cfc5f17
TS
9800 TranslationBlock *tb,
9801 int search_pc)
2c0262af
FB
9802{
9803 DisasContext dc1, *dc = &dc1;
a1d1bb31 9804 CPUBreakpoint *bp;
2c0262af
FB
9805 uint16_t *gen_opc_end;
9806 int j, lj;
0fa85d43 9807 target_ulong pc_start;
b5ff1b31 9808 uint32_t next_page_start;
2e70f6ef
PB
9809 int num_insns;
9810 int max_insns;
3b46e624 9811
2c0262af 9812 /* generate intermediate code */
0fa85d43 9813 pc_start = tb->pc;
3b46e624 9814
2c0262af
FB
9815 dc->tb = tb;
9816
92414b31 9817 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
9818
9819 dc->is_jmp = DISAS_NEXT;
9820 dc->pc = pc_start;
8aaca4c0 9821 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 9822 dc->condjmp = 0;
7204ab88 9823 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
d8fd2954 9824 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
98eac7ca
PM
9825 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9826 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
b5ff1b31 9827#if !defined(CONFIG_USER_ONLY)
61f74d6a 9828 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
b5ff1b31 9829#endif
5df8bac1 9830 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
69d1fc22
PM
9831 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9832 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
a7812ae4
PB
9833 cpu_F0s = tcg_temp_new_i32();
9834 cpu_F1s = tcg_temp_new_i32();
9835 cpu_F0d = tcg_temp_new_i64();
9836 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
9837 cpu_V0 = cpu_F0d;
9838 cpu_V1 = cpu_F1d;
e677137d 9839 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 9840 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 9841 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 9842 lj = -1;
2e70f6ef
PB
9843 num_insns = 0;
9844 max_insns = tb->cflags & CF_COUNT_MASK;
9845 if (max_insns == 0)
9846 max_insns = CF_COUNT_MASK;
9847
806f352d 9848 gen_tb_start();
e12ce78d 9849
3849902c
PM
9850 tcg_clear_temp_count();
9851
e12ce78d
PM
9852 /* A note on handling of the condexec (IT) bits:
9853 *
9854 * We want to avoid the overhead of having to write the updated condexec
0ecb72a5 9855 * bits back to the CPUARMState for every instruction in an IT block. So:
e12ce78d 9856 * (1) if the condexec bits are not already zero then we write
0ecb72a5 9857 * zero back into the CPUARMState now. This avoids complications trying
e12ce78d
PM
9858 * to do it at the end of the block. (For example if we don't do this
9859 * it's hard to identify whether we can safely skip writing condexec
9860 * at the end of the TB, which we definitely want to do for the case
9861 * where a TB doesn't do anything with the IT state at all.)
9862 * (2) if we are going to leave the TB then we call gen_set_condexec()
0ecb72a5 9863 * which will write the correct value into CPUARMState if zero is wrong.
e12ce78d
PM
9864 * This is done both for leaving the TB at the end, and for leaving
9865 * it because of an exception we know will happen, which is done in
9866 * gen_exception_insn(). The latter is necessary because we need to
9867 * leave the TB with the PC/IT state just prior to execution of the
9868 * instruction which caused the exception.
9869 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
0ecb72a5 9870 * then the CPUARMState will be wrong and we need to reset it.
e12ce78d
PM
9871 * This is handled in the same way as restoration of the
9872 * PC in these situations: we will be called again with search_pc=1
9873 * and generate a mapping of the condexec bits for each PC in
e87b7cb0
SW
9874 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9875 * this to restore the condexec bits.
e12ce78d
PM
9876 *
9877 * Note that there are no instructions which can read the condexec
9878 * bits, and none which can write non-static values to them, so
0ecb72a5 9879 * we don't need to care about whether CPUARMState is correct in the
e12ce78d
PM
9880 * middle of a TB.
9881 */
9882
9ee6e8bb
PB
9883 /* Reset the conditional execution bits immediately. This avoids
9884 complications trying to do it at the end of the block. */
98eac7ca 9885 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 9886 {
39d5492a 9887 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e 9888 tcg_gen_movi_i32(tmp, 0);
d9ba4830 9889 store_cpu_field(tmp, condexec_bits);
8f01245e 9890 }
2c0262af 9891 do {
fbb4a2e3
PB
9892#ifdef CONFIG_USER_ONLY
9893 /* Intercept jump to the magic kernel page. */
9894 if (dc->pc >= 0xffff0000) {
9895 /* We always get here via a jump, so know we are not in a
9896 conditional execution block. */
9897 gen_exception(EXCP_KERNEL_TRAP);
9898 dc->is_jmp = DISAS_UPDATE;
9899 break;
9900 }
9901#else
9ee6e8bb
PB
9902 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9903 /* We always get here via a jump, so know we are not in a
9904 conditional execution block. */
d9ba4830 9905 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
9906 dc->is_jmp = DISAS_UPDATE;
9907 break;
9ee6e8bb
PB
9908 }
9909#endif
9910
72cf2d4f
BS
9911 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9912 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 9913 if (bp->pc == dc->pc) {
bc4a0de0 9914 gen_exception_insn(dc, 0, EXCP_DEBUG);
9ee6e8bb
PB
9915 /* Advance PC so that clearing the breakpoint will
9916 invalidate this TB. */
9917 dc->pc += 2;
9918 goto done_generating;
1fddef4b
FB
9919 }
9920 }
9921 }
2c0262af 9922 if (search_pc) {
92414b31 9923 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2c0262af
FB
9924 if (lj < j) {
9925 lj++;
9926 while (lj < j)
ab1103de 9927 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2c0262af 9928 }
25983cad 9929 tcg_ctx.gen_opc_pc[lj] = dc->pc;
e12ce78d 9930 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
ab1103de 9931 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 9932 tcg_ctx.gen_opc_icount[lj] = num_insns;
2c0262af 9933 }
e50e6a20 9934
2e70f6ef
PB
9935 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9936 gen_io_start();
9937
fdefe51c 9938 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
5642463a
PM
9939 tcg_gen_debug_insn_start(dc->pc);
9940 }
9941
7204ab88 9942 if (dc->thumb) {
9ee6e8bb
PB
9943 disas_thumb_insn(env, dc);
9944 if (dc->condexec_mask) {
9945 dc->condexec_cond = (dc->condexec_cond & 0xe)
9946 | ((dc->condexec_mask >> 4) & 1);
9947 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9948 if (dc->condexec_mask == 0) {
9949 dc->condexec_cond = 0;
9950 }
9951 }
9952 } else {
9953 disas_arm_insn(env, dc);
9954 }
e50e6a20
FB
9955
9956 if (dc->condjmp && !dc->is_jmp) {
9957 gen_set_label(dc->condlabel);
9958 dc->condjmp = 0;
9959 }
3849902c
PM
9960
9961 if (tcg_check_temp_count()) {
9962 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
9963 }
9964
aaf2d97d 9965 /* Translation stops when a conditional branch is encountered.
e50e6a20 9966 * Otherwise the subsequent code could get translated several times.
b5ff1b31 9967 * Also stop translation when a page boundary is reached. This
bf20dc07 9968 * ensures prefetch aborts occur at the right place. */
2e70f6ef 9969 num_insns ++;
efd7f486 9970 } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
1fddef4b 9971 !env->singlestep_enabled &&
1b530a6d 9972 !singlestep &&
2e70f6ef
PB
9973 dc->pc < next_page_start &&
9974 num_insns < max_insns);
9975
9976 if (tb->cflags & CF_LAST_IO) {
9977 if (dc->condjmp) {
9978 /* FIXME: This can theoretically happen with self-modifying
9979 code. */
9980 cpu_abort(env, "IO on conditional branch instruction");
9981 }
9982 gen_io_end();
9983 }
9ee6e8bb 9984
b5ff1b31 9985 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
9986 instruction was a conditional branch or trap, and the PC has
9987 already been written. */
551bd27f 9988 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 9989 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 9990 if (dc->condjmp) {
9ee6e8bb
PB
9991 gen_set_condexec(dc);
9992 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 9993 gen_exception(EXCP_SWI);
9ee6e8bb 9994 } else {
d9ba4830 9995 gen_exception(EXCP_DEBUG);
9ee6e8bb 9996 }
e50e6a20
FB
9997 gen_set_label(dc->condlabel);
9998 }
9999 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 10000 gen_set_pc_im(dc->pc);
e50e6a20 10001 dc->condjmp = 0;
8aaca4c0 10002 }
9ee6e8bb
PB
10003 gen_set_condexec(dc);
10004 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 10005 gen_exception(EXCP_SWI);
9ee6e8bb
PB
10006 } else {
10007 /* FIXME: Single stepping a WFI insn will not halt
10008 the CPU. */
d9ba4830 10009 gen_exception(EXCP_DEBUG);
9ee6e8bb 10010 }
8aaca4c0 10011 } else {
9ee6e8bb
PB
10012 /* While branches must always occur at the end of an IT block,
10013 there are a few other things that can cause us to terminate
65626741 10014 the TB in the middle of an IT block:
9ee6e8bb
PB
10015 - Exception generating instructions (bkpt, swi, undefined).
10016 - Page boundaries.
10017 - Hardware watchpoints.
10018 Hardware breakpoints have already been handled and skip this code.
10019 */
10020 gen_set_condexec(dc);
8aaca4c0 10021 switch(dc->is_jmp) {
8aaca4c0 10022 case DISAS_NEXT:
6e256c93 10023 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
10024 break;
10025 default:
10026 case DISAS_JUMP:
10027 case DISAS_UPDATE:
10028 /* indicate that the hash table must be used to find the next TB */
57fec1fe 10029 tcg_gen_exit_tb(0);
8aaca4c0
FB
10030 break;
10031 case DISAS_TB_JUMP:
10032 /* nothing more to generate */
10033 break;
9ee6e8bb 10034 case DISAS_WFI:
1ce94f81 10035 gen_helper_wfi(cpu_env);
9ee6e8bb
PB
10036 break;
10037 case DISAS_SWI:
d9ba4830 10038 gen_exception(EXCP_SWI);
9ee6e8bb 10039 break;
8aaca4c0 10040 }
e50e6a20
FB
10041 if (dc->condjmp) {
10042 gen_set_label(dc->condlabel);
9ee6e8bb 10043 gen_set_condexec(dc);
6e256c93 10044 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
10045 dc->condjmp = 0;
10046 }
2c0262af 10047 }
2e70f6ef 10048
9ee6e8bb 10049done_generating:
806f352d 10050 gen_tb_end(tb, num_insns);
efd7f486 10051 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
2c0262af
FB
10052
10053#ifdef DEBUG_DISAS
8fec2b8c 10054 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
10055 qemu_log("----------------\n");
10056 qemu_log("IN: %s\n", lookup_symbol(pc_start));
f4359b9f 10057 log_target_disas(env, pc_start, dc->pc - pc_start,
d8fd2954 10058 dc->thumb | (dc->bswap_code << 1));
93fcfe39 10059 qemu_log("\n");
2c0262af
FB
10060 }
10061#endif
b5ff1b31 10062 if (search_pc) {
92414b31 10063 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
b5ff1b31
FB
10064 lj++;
10065 while (lj <= j)
ab1103de 10066 tcg_ctx.gen_opc_instr_start[lj++] = 0;
b5ff1b31 10067 } else {
2c0262af 10068 tb->size = dc->pc - pc_start;
2e70f6ef 10069 tb->icount = num_insns;
b5ff1b31 10070 }
2c0262af
FB
10071}
10072
0ecb72a5 10073void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
2c0262af 10074{
2cfc5f17 10075 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
10076}
10077
0ecb72a5 10078void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
2c0262af 10079{
2cfc5f17 10080 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
10081}
10082
b5ff1b31
FB
10083static const char *cpu_mode_names[16] = {
10084 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
10085 "???", "???", "???", "und", "???", "???", "???", "sys"
10086};
9ee6e8bb 10087
0ecb72a5 10088void cpu_dump_state(CPUARMState *env, FILE *f, fprintf_function cpu_fprintf,
7fe48483 10089 int flags)
2c0262af
FB
10090{
10091 int i;
b5ff1b31 10092 uint32_t psr;
2c0262af
FB
10093
10094 for(i=0;i<16;i++) {
7fe48483 10095 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 10096 if ((i % 4) == 3)
7fe48483 10097 cpu_fprintf(f, "\n");
2c0262af 10098 else
7fe48483 10099 cpu_fprintf(f, " ");
2c0262af 10100 }
b5ff1b31 10101 psr = cpsr_read(env);
687fa640
TS
10102 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
10103 psr,
b5ff1b31
FB
10104 psr & (1 << 31) ? 'N' : '-',
10105 psr & (1 << 30) ? 'Z' : '-',
10106 psr & (1 << 29) ? 'C' : '-',
10107 psr & (1 << 28) ? 'V' : '-',
5fafdf24 10108 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 10109 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 10110
f2617cfc
PM
10111 if (flags & CPU_DUMP_FPU) {
10112 int numvfpregs = 0;
10113 if (arm_feature(env, ARM_FEATURE_VFP)) {
10114 numvfpregs += 16;
10115 }
10116 if (arm_feature(env, ARM_FEATURE_VFP3)) {
10117 numvfpregs += 16;
10118 }
10119 for (i = 0; i < numvfpregs; i++) {
10120 uint64_t v = float64_val(env->vfp.regs[i]);
10121 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
10122 i * 2, (uint32_t)v,
10123 i * 2 + 1, (uint32_t)(v >> 32),
10124 i, v);
10125 }
10126 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 10127 }
2c0262af 10128}
a6b025d3 10129
0ecb72a5 10130void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
d2856f1a 10131{
25983cad 10132 env->regs[15] = tcg_ctx.gen_opc_pc[pc_pos];
e12ce78d 10133 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
d2856f1a 10134}