]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/translate.c
target-arm: Allow 3reg_wide undefreq to encode more bad size options
[mirror_qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
ccd38087 28#include "internals.h"
76cad711 29#include "disas/disas.h"
57fec1fe 30#include "tcg-op.h"
1de7afc9 31#include "qemu/log.h"
534df156 32#include "qemu/bitops.h"
1d854765 33#include "arm_ldst.h"
1497c961 34
2ef6175a
RH
35#include "exec/helper-proto.h"
36#include "exec/helper-gen.h"
2c0262af 37
be5e7a76
DES
38#define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
39#define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
40/* currently all emulated v5 cores are also v5TE, so don't bother */
41#define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
9ee6e8bb
PB
42#define ENABLE_ARCH_5J 0
43#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
44#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
45#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
46#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
81e69fb0 47#define ENABLE_ARCH_8 arm_feature(env, ARM_FEATURE_V8)
b5ff1b31 48
86753403 49#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 50
f570c61e 51#include "translate.h"
e12ce78d
PM
52static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
53
b5ff1b31
FB
54#if defined(CONFIG_USER_ONLY)
55#define IS_USER(s) 1
56#else
57#define IS_USER(s) (s->user)
58#endif
59
3407ad0e 60TCGv_ptr cpu_env;
ad69471c 61/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 62static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 63static TCGv_i32 cpu_R[16];
66c374de 64static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
03d05e2d
PM
65static TCGv_i64 cpu_exclusive_addr;
66static TCGv_i64 cpu_exclusive_val;
426f5abc 67#ifdef CONFIG_USER_ONLY
03d05e2d 68static TCGv_i64 cpu_exclusive_test;
426f5abc
PB
69static TCGv_i32 cpu_exclusive_info;
70#endif
ad69471c 71
b26eefb6 72/* FIXME: These should be removed. */
39d5492a 73static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 74static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 75
022c62cb 76#include "exec/gen-icount.h"
2e70f6ef 77
155c3eac
FN
78static const char *regnames[] =
79 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
80 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
81
b26eefb6
PB
82/* initialize TCG globals. */
83void arm_translate_init(void)
84{
155c3eac
FN
85 int i;
86
a7812ae4
PB
87 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
88
155c3eac
FN
89 for (i = 0; i < 16; i++) {
90 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 91 offsetof(CPUARMState, regs[i]),
155c3eac
FN
92 regnames[i]);
93 }
66c374de
AJ
94 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
95 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
96 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
97 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
98
03d05e2d 99 cpu_exclusive_addr = tcg_global_mem_new_i64(TCG_AREG0,
0ecb72a5 100 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
03d05e2d 101 cpu_exclusive_val = tcg_global_mem_new_i64(TCG_AREG0,
0ecb72a5 102 offsetof(CPUARMState, exclusive_val), "exclusive_val");
426f5abc 103#ifdef CONFIG_USER_ONLY
03d05e2d 104 cpu_exclusive_test = tcg_global_mem_new_i64(TCG_AREG0,
0ecb72a5 105 offsetof(CPUARMState, exclusive_test), "exclusive_test");
426f5abc 106 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 107 offsetof(CPUARMState, exclusive_info), "exclusive_info");
426f5abc 108#endif
155c3eac 109
14ade10f 110 a64_translate_init();
b26eefb6
PB
111}
112
39d5492a 113static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 114{
39d5492a 115 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
116 tcg_gen_ld_i32(tmp, cpu_env, offset);
117 return tmp;
118}
119
0ecb72a5 120#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 121
39d5492a 122static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
123{
124 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 125 tcg_temp_free_i32(var);
d9ba4830
PB
126}
127
128#define store_cpu_field(var, name) \
0ecb72a5 129 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 130
b26eefb6 131/* Set a variable to the value of a CPU register. */
39d5492a 132static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
133{
134 if (reg == 15) {
135 uint32_t addr;
b90372ad 136 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
137 if (s->thumb)
138 addr = (long)s->pc + 2;
139 else
140 addr = (long)s->pc + 4;
141 tcg_gen_movi_i32(var, addr);
142 } else {
155c3eac 143 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
144 }
145}
146
147/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 148static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 149{
39d5492a 150 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
151 load_reg_var(s, tmp, reg);
152 return tmp;
153}
154
155/* Set a CPU register. The source must be a temporary and will be
156 marked as dead. */
39d5492a 157static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
158{
159 if (reg == 15) {
160 tcg_gen_andi_i32(var, var, ~1);
161 s->is_jmp = DISAS_JUMP;
162 }
155c3eac 163 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 164 tcg_temp_free_i32(var);
b26eefb6
PB
165}
166
b26eefb6 167/* Value extensions. */
86831435
PB
168#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
169#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
170#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
171#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
172
1497c961
PB
173#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
174#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 175
b26eefb6 176
39d5492a 177static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 178{
39d5492a 179 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 180 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
181 tcg_temp_free_i32(tmp_mask);
182}
d9ba4830
PB
183/* Set NZCV flags from the high 4 bits of var. */
184#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
185
d4a2dc67 186static void gen_exception_internal(int excp)
d9ba4830 187{
d4a2dc67
PM
188 TCGv_i32 tcg_excp = tcg_const_i32(excp);
189
190 assert(excp_is_internal(excp));
191 gen_helper_exception_internal(cpu_env, tcg_excp);
192 tcg_temp_free_i32(tcg_excp);
193}
194
195static void gen_exception(int excp, uint32_t syndrome)
196{
197 TCGv_i32 tcg_excp = tcg_const_i32(excp);
198 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
199
200 gen_helper_exception_with_syndrome(cpu_env, tcg_excp, tcg_syn);
201 tcg_temp_free_i32(tcg_syn);
202 tcg_temp_free_i32(tcg_excp);
d9ba4830
PB
203}
204
39d5492a 205static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 206{
39d5492a
PM
207 TCGv_i32 tmp1 = tcg_temp_new_i32();
208 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
209 tcg_gen_ext16s_i32(tmp1, a);
210 tcg_gen_ext16s_i32(tmp2, b);
3670669c 211 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 212 tcg_temp_free_i32(tmp2);
3670669c
PB
213 tcg_gen_sari_i32(a, a, 16);
214 tcg_gen_sari_i32(b, b, 16);
215 tcg_gen_mul_i32(b, b, a);
216 tcg_gen_mov_i32(a, tmp1);
7d1b0095 217 tcg_temp_free_i32(tmp1);
3670669c
PB
218}
219
220/* Byteswap each halfword. */
39d5492a 221static void gen_rev16(TCGv_i32 var)
3670669c 222{
39d5492a 223 TCGv_i32 tmp = tcg_temp_new_i32();
3670669c
PB
224 tcg_gen_shri_i32(tmp, var, 8);
225 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
226 tcg_gen_shli_i32(var, var, 8);
227 tcg_gen_andi_i32(var, var, 0xff00ff00);
228 tcg_gen_or_i32(var, var, tmp);
7d1b0095 229 tcg_temp_free_i32(tmp);
3670669c
PB
230}
231
232/* Byteswap low halfword and sign extend. */
39d5492a 233static void gen_revsh(TCGv_i32 var)
3670669c 234{
1a855029
AJ
235 tcg_gen_ext16u_i32(var, var);
236 tcg_gen_bswap16_i32(var, var);
237 tcg_gen_ext16s_i32(var, var);
3670669c
PB
238}
239
240/* Unsigned bitfield extract. */
39d5492a 241static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
3670669c
PB
242{
243 if (shift)
244 tcg_gen_shri_i32(var, var, shift);
245 tcg_gen_andi_i32(var, var, mask);
246}
247
248/* Signed bitfield extract. */
39d5492a 249static void gen_sbfx(TCGv_i32 var, int shift, int width)
3670669c
PB
250{
251 uint32_t signbit;
252
253 if (shift)
254 tcg_gen_sari_i32(var, var, shift);
255 if (shift + width < 32) {
256 signbit = 1u << (width - 1);
257 tcg_gen_andi_i32(var, var, (1u << width) - 1);
258 tcg_gen_xori_i32(var, var, signbit);
259 tcg_gen_subi_i32(var, var, signbit);
260 }
261}
262
838fa72d 263/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 264static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 265{
838fa72d
AJ
266 TCGv_i64 tmp64 = tcg_temp_new_i64();
267
268 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 269 tcg_temp_free_i32(b);
838fa72d
AJ
270 tcg_gen_shli_i64(tmp64, tmp64, 32);
271 tcg_gen_add_i64(a, tmp64, a);
272
273 tcg_temp_free_i64(tmp64);
274 return a;
275}
276
277/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 278static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
279{
280 TCGv_i64 tmp64 = tcg_temp_new_i64();
281
282 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 283 tcg_temp_free_i32(b);
838fa72d
AJ
284 tcg_gen_shli_i64(tmp64, tmp64, 32);
285 tcg_gen_sub_i64(a, tmp64, a);
286
287 tcg_temp_free_i64(tmp64);
288 return a;
3670669c
PB
289}
290
5e3f878a 291/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 292static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 293{
39d5492a
PM
294 TCGv_i32 lo = tcg_temp_new_i32();
295 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 296 TCGv_i64 ret;
5e3f878a 297
831d7fe8 298 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 299 tcg_temp_free_i32(a);
7d1b0095 300 tcg_temp_free_i32(b);
831d7fe8
RH
301
302 ret = tcg_temp_new_i64();
303 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
304 tcg_temp_free_i32(lo);
305 tcg_temp_free_i32(hi);
831d7fe8
RH
306
307 return ret;
5e3f878a
PB
308}
309
39d5492a 310static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 311{
39d5492a
PM
312 TCGv_i32 lo = tcg_temp_new_i32();
313 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 314 TCGv_i64 ret;
5e3f878a 315
831d7fe8 316 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 317 tcg_temp_free_i32(a);
7d1b0095 318 tcg_temp_free_i32(b);
831d7fe8
RH
319
320 ret = tcg_temp_new_i64();
321 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
322 tcg_temp_free_i32(lo);
323 tcg_temp_free_i32(hi);
831d7fe8
RH
324
325 return ret;
5e3f878a
PB
326}
327
8f01245e 328/* Swap low and high halfwords. */
39d5492a 329static void gen_swap_half(TCGv_i32 var)
8f01245e 330{
39d5492a 331 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
332 tcg_gen_shri_i32(tmp, var, 16);
333 tcg_gen_shli_i32(var, var, 16);
334 tcg_gen_or_i32(var, var, tmp);
7d1b0095 335 tcg_temp_free_i32(tmp);
8f01245e
PB
336}
337
b26eefb6
PB
338/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
339 tmp = (t0 ^ t1) & 0x8000;
340 t0 &= ~0x8000;
341 t1 &= ~0x8000;
342 t0 = (t0 + t1) ^ tmp;
343 */
344
39d5492a 345static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 346{
39d5492a 347 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
348 tcg_gen_xor_i32(tmp, t0, t1);
349 tcg_gen_andi_i32(tmp, tmp, 0x8000);
350 tcg_gen_andi_i32(t0, t0, ~0x8000);
351 tcg_gen_andi_i32(t1, t1, ~0x8000);
352 tcg_gen_add_i32(t0, t0, t1);
353 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
354 tcg_temp_free_i32(tmp);
355 tcg_temp_free_i32(t1);
b26eefb6
PB
356}
357
358/* Set CF to the top bit of var. */
39d5492a 359static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 360{
66c374de 361 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
362}
363
364/* Set N and Z flags from var. */
39d5492a 365static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 366{
66c374de
AJ
367 tcg_gen_mov_i32(cpu_NF, var);
368 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
369}
370
371/* T0 += T1 + CF. */
39d5492a 372static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 373{
396e467c 374 tcg_gen_add_i32(t0, t0, t1);
66c374de 375 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
376}
377
e9bb4aa9 378/* dest = T0 + T1 + CF. */
39d5492a 379static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 380{
e9bb4aa9 381 tcg_gen_add_i32(dest, t0, t1);
66c374de 382 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
383}
384
3670669c 385/* dest = T0 - T1 + CF - 1. */
39d5492a 386static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 387{
3670669c 388 tcg_gen_sub_i32(dest, t0, t1);
66c374de 389 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 390 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
391}
392
72485ec4 393/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 394static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 395{
39d5492a 396 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
397 tcg_gen_movi_i32(tmp, 0);
398 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 399 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 400 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
401 tcg_gen_xor_i32(tmp, t0, t1);
402 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
403 tcg_temp_free_i32(tmp);
404 tcg_gen_mov_i32(dest, cpu_NF);
405}
406
49b4c31e 407/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 408static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 409{
39d5492a 410 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
411 if (TCG_TARGET_HAS_add2_i32) {
412 tcg_gen_movi_i32(tmp, 0);
413 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 414 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
415 } else {
416 TCGv_i64 q0 = tcg_temp_new_i64();
417 TCGv_i64 q1 = tcg_temp_new_i64();
418 tcg_gen_extu_i32_i64(q0, t0);
419 tcg_gen_extu_i32_i64(q1, t1);
420 tcg_gen_add_i64(q0, q0, q1);
421 tcg_gen_extu_i32_i64(q1, cpu_CF);
422 tcg_gen_add_i64(q0, q0, q1);
423 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
424 tcg_temp_free_i64(q0);
425 tcg_temp_free_i64(q1);
426 }
427 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
428 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
429 tcg_gen_xor_i32(tmp, t0, t1);
430 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
431 tcg_temp_free_i32(tmp);
432 tcg_gen_mov_i32(dest, cpu_NF);
433}
434
72485ec4 435/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 436static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 437{
39d5492a 438 TCGv_i32 tmp;
72485ec4
AJ
439 tcg_gen_sub_i32(cpu_NF, t0, t1);
440 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
441 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
442 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
443 tmp = tcg_temp_new_i32();
444 tcg_gen_xor_i32(tmp, t0, t1);
445 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
446 tcg_temp_free_i32(tmp);
447 tcg_gen_mov_i32(dest, cpu_NF);
448}
449
e77f0832 450/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 451static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 452{
39d5492a 453 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
454 tcg_gen_not_i32(tmp, t1);
455 gen_adc_CC(dest, t0, tmp);
39d5492a 456 tcg_temp_free_i32(tmp);
2de68a49
RH
457}
458
365af80e 459#define GEN_SHIFT(name) \
39d5492a 460static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 461{ \
39d5492a 462 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
463 tmp1 = tcg_temp_new_i32(); \
464 tcg_gen_andi_i32(tmp1, t1, 0xff); \
465 tmp2 = tcg_const_i32(0); \
466 tmp3 = tcg_const_i32(0x1f); \
467 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
468 tcg_temp_free_i32(tmp3); \
469 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
470 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
471 tcg_temp_free_i32(tmp2); \
472 tcg_temp_free_i32(tmp1); \
473}
474GEN_SHIFT(shl)
475GEN_SHIFT(shr)
476#undef GEN_SHIFT
477
39d5492a 478static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 479{
39d5492a 480 TCGv_i32 tmp1, tmp2;
365af80e
AJ
481 tmp1 = tcg_temp_new_i32();
482 tcg_gen_andi_i32(tmp1, t1, 0xff);
483 tmp2 = tcg_const_i32(0x1f);
484 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
485 tcg_temp_free_i32(tmp2);
486 tcg_gen_sar_i32(dest, t0, tmp1);
487 tcg_temp_free_i32(tmp1);
488}
489
39d5492a 490static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 491{
39d5492a
PM
492 TCGv_i32 c0 = tcg_const_i32(0);
493 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
494 tcg_gen_neg_i32(tmp, src);
495 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
496 tcg_temp_free_i32(c0);
497 tcg_temp_free_i32(tmp);
498}
ad69471c 499
39d5492a 500static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 501{
9a119ff6 502 if (shift == 0) {
66c374de 503 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 504 } else {
66c374de
AJ
505 tcg_gen_shri_i32(cpu_CF, var, shift);
506 if (shift != 31) {
507 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
508 }
9a119ff6 509 }
9a119ff6 510}
b26eefb6 511
9a119ff6 512/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
513static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
514 int shift, int flags)
9a119ff6
PB
515{
516 switch (shiftop) {
517 case 0: /* LSL */
518 if (shift != 0) {
519 if (flags)
520 shifter_out_im(var, 32 - shift);
521 tcg_gen_shli_i32(var, var, shift);
522 }
523 break;
524 case 1: /* LSR */
525 if (shift == 0) {
526 if (flags) {
66c374de 527 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
528 }
529 tcg_gen_movi_i32(var, 0);
530 } else {
531 if (flags)
532 shifter_out_im(var, shift - 1);
533 tcg_gen_shri_i32(var, var, shift);
534 }
535 break;
536 case 2: /* ASR */
537 if (shift == 0)
538 shift = 32;
539 if (flags)
540 shifter_out_im(var, shift - 1);
541 if (shift == 32)
542 shift = 31;
543 tcg_gen_sari_i32(var, var, shift);
544 break;
545 case 3: /* ROR/RRX */
546 if (shift != 0) {
547 if (flags)
548 shifter_out_im(var, shift - 1);
f669df27 549 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 550 } else {
39d5492a 551 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 552 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
553 if (flags)
554 shifter_out_im(var, 0);
555 tcg_gen_shri_i32(var, var, 1);
b26eefb6 556 tcg_gen_or_i32(var, var, tmp);
7d1b0095 557 tcg_temp_free_i32(tmp);
b26eefb6
PB
558 }
559 }
560};
561
39d5492a
PM
562static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
563 TCGv_i32 shift, int flags)
8984bd2e
PB
564{
565 if (flags) {
566 switch (shiftop) {
9ef39277
BS
567 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
568 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
569 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
570 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
571 }
572 } else {
573 switch (shiftop) {
365af80e
AJ
574 case 0:
575 gen_shl(var, var, shift);
576 break;
577 case 1:
578 gen_shr(var, var, shift);
579 break;
580 case 2:
581 gen_sar(var, var, shift);
582 break;
f669df27
AJ
583 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
584 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
585 }
586 }
7d1b0095 587 tcg_temp_free_i32(shift);
8984bd2e
PB
588}
589
6ddbc6e4
PB
590#define PAS_OP(pfx) \
591 switch (op2) { \
592 case 0: gen_pas_helper(glue(pfx,add16)); break; \
593 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
594 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
595 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
596 case 4: gen_pas_helper(glue(pfx,add8)); break; \
597 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
598 }
39d5492a 599static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 600{
a7812ae4 601 TCGv_ptr tmp;
6ddbc6e4
PB
602
603 switch (op1) {
604#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
605 case 1:
a7812ae4 606 tmp = tcg_temp_new_ptr();
0ecb72a5 607 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 608 PAS_OP(s)
b75263d6 609 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
610 break;
611 case 5:
a7812ae4 612 tmp = tcg_temp_new_ptr();
0ecb72a5 613 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 614 PAS_OP(u)
b75263d6 615 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
616 break;
617#undef gen_pas_helper
618#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
619 case 2:
620 PAS_OP(q);
621 break;
622 case 3:
623 PAS_OP(sh);
624 break;
625 case 6:
626 PAS_OP(uq);
627 break;
628 case 7:
629 PAS_OP(uh);
630 break;
631#undef gen_pas_helper
632 }
633}
9ee6e8bb
PB
634#undef PAS_OP
635
6ddbc6e4
PB
636/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
637#define PAS_OP(pfx) \
ed89a2f1 638 switch (op1) { \
6ddbc6e4
PB
639 case 0: gen_pas_helper(glue(pfx,add8)); break; \
640 case 1: gen_pas_helper(glue(pfx,add16)); break; \
641 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
642 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
643 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
644 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
645 }
39d5492a 646static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 647{
a7812ae4 648 TCGv_ptr tmp;
6ddbc6e4 649
ed89a2f1 650 switch (op2) {
6ddbc6e4
PB
651#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
652 case 0:
a7812ae4 653 tmp = tcg_temp_new_ptr();
0ecb72a5 654 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 655 PAS_OP(s)
b75263d6 656 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
657 break;
658 case 4:
a7812ae4 659 tmp = tcg_temp_new_ptr();
0ecb72a5 660 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 661 PAS_OP(u)
b75263d6 662 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
663 break;
664#undef gen_pas_helper
665#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
666 case 1:
667 PAS_OP(q);
668 break;
669 case 2:
670 PAS_OP(sh);
671 break;
672 case 5:
673 PAS_OP(uq);
674 break;
675 case 6:
676 PAS_OP(uh);
677 break;
678#undef gen_pas_helper
679 }
680}
9ee6e8bb
PB
681#undef PAS_OP
682
39fb730a
AG
683/*
684 * generate a conditional branch based on ARM condition code cc.
685 * This is common between ARM and Aarch64 targets.
686 */
687void arm_gen_test_cc(int cc, int label)
d9ba4830 688{
39d5492a 689 TCGv_i32 tmp;
d9ba4830
PB
690 int inv;
691
d9ba4830
PB
692 switch (cc) {
693 case 0: /* eq: Z */
66c374de 694 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
695 break;
696 case 1: /* ne: !Z */
66c374de 697 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
698 break;
699 case 2: /* cs: C */
66c374de 700 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_CF, 0, label);
d9ba4830
PB
701 break;
702 case 3: /* cc: !C */
66c374de 703 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
d9ba4830
PB
704 break;
705 case 4: /* mi: N */
66c374de 706 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_NF, 0, label);
d9ba4830
PB
707 break;
708 case 5: /* pl: !N */
66c374de 709 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_NF, 0, label);
d9ba4830
PB
710 break;
711 case 6: /* vs: V */
66c374de 712 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_VF, 0, label);
d9ba4830
PB
713 break;
714 case 7: /* vc: !V */
66c374de 715 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_VF, 0, label);
d9ba4830
PB
716 break;
717 case 8: /* hi: C && !Z */
718 inv = gen_new_label();
66c374de
AJ
719 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, inv);
720 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
721 gen_set_label(inv);
722 break;
723 case 9: /* ls: !C || Z */
66c374de
AJ
724 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
725 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
726 break;
727 case 10: /* ge: N == V -> N ^ V == 0 */
66c374de
AJ
728 tmp = tcg_temp_new_i32();
729 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 730 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 731 tcg_temp_free_i32(tmp);
d9ba4830
PB
732 break;
733 case 11: /* lt: N != V -> N ^ V != 0 */
66c374de
AJ
734 tmp = tcg_temp_new_i32();
735 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 736 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 737 tcg_temp_free_i32(tmp);
d9ba4830
PB
738 break;
739 case 12: /* gt: !Z && N == V */
740 inv = gen_new_label();
66c374de
AJ
741 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, inv);
742 tmp = tcg_temp_new_i32();
743 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 744 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 745 tcg_temp_free_i32(tmp);
d9ba4830
PB
746 gen_set_label(inv);
747 break;
748 case 13: /* le: Z || N != V */
66c374de
AJ
749 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
750 tmp = tcg_temp_new_i32();
751 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 752 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 753 tcg_temp_free_i32(tmp);
d9ba4830
PB
754 break;
755 default:
756 fprintf(stderr, "Bad condition code 0x%x\n", cc);
757 abort();
758 }
d9ba4830 759}
2c0262af 760
b1d8e52e 761static const uint8_t table_logic_cc[16] = {
2c0262af
FB
762 1, /* and */
763 1, /* xor */
764 0, /* sub */
765 0, /* rsb */
766 0, /* add */
767 0, /* adc */
768 0, /* sbc */
769 0, /* rsc */
770 1, /* andl */
771 1, /* xorl */
772 0, /* cmp */
773 0, /* cmn */
774 1, /* orr */
775 1, /* mov */
776 1, /* bic */
777 1, /* mvn */
778};
3b46e624 779
d9ba4830
PB
780/* Set PC and Thumb state from an immediate address. */
781static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 782{
39d5492a 783 TCGv_i32 tmp;
99c475ab 784
b26eefb6 785 s->is_jmp = DISAS_UPDATE;
d9ba4830 786 if (s->thumb != (addr & 1)) {
7d1b0095 787 tmp = tcg_temp_new_i32();
d9ba4830 788 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 789 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 790 tcg_temp_free_i32(tmp);
d9ba4830 791 }
155c3eac 792 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
793}
794
795/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 796static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 797{
d9ba4830 798 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
799 tcg_gen_andi_i32(cpu_R[15], var, ~1);
800 tcg_gen_andi_i32(var, var, 1);
801 store_cpu_field(var, thumb);
d9ba4830
PB
802}
803
21aeb343
JR
804/* Variant of store_reg which uses branch&exchange logic when storing
805 to r15 in ARM architecture v7 and above. The source must be a temporary
806 and will be marked as dead. */
0ecb72a5 807static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
39d5492a 808 int reg, TCGv_i32 var)
21aeb343
JR
809{
810 if (reg == 15 && ENABLE_ARCH_7) {
811 gen_bx(s, var);
812 } else {
813 store_reg(s, reg, var);
814 }
815}
816
be5e7a76
DES
817/* Variant of store_reg which uses branch&exchange logic when storing
818 * to r15 in ARM architecture v5T and above. This is used for storing
819 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
820 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
0ecb72a5 821static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
39d5492a 822 int reg, TCGv_i32 var)
be5e7a76
DES
823{
824 if (reg == 15 && ENABLE_ARCH_5) {
825 gen_bx(s, var);
826 } else {
827 store_reg(s, reg, var);
828 }
829}
830
08307563
PM
831/* Abstractions of "generate code to do a guest load/store for
832 * AArch32", where a vaddr is always 32 bits (and is zero
833 * extended if we're a 64 bit core) and data is also
834 * 32 bits unless specifically doing a 64 bit access.
835 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 836 * that the address argument is TCGv_i32 rather than TCGv.
08307563
PM
837 */
838#if TARGET_LONG_BITS == 32
839
09f78135
RH
840#define DO_GEN_LD(SUFF, OPC) \
841static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563 842{ \
09f78135 843 tcg_gen_qemu_ld_i32(val, addr, index, OPC); \
08307563
PM
844}
845
09f78135
RH
846#define DO_GEN_ST(SUFF, OPC) \
847static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563 848{ \
09f78135 849 tcg_gen_qemu_st_i32(val, addr, index, OPC); \
08307563
PM
850}
851
852static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
853{
09f78135 854 tcg_gen_qemu_ld_i64(val, addr, index, MO_TEQ);
08307563
PM
855}
856
857static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
858{
09f78135 859 tcg_gen_qemu_st_i64(val, addr, index, MO_TEQ);
08307563
PM
860}
861
862#else
863
09f78135
RH
864#define DO_GEN_LD(SUFF, OPC) \
865static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563
PM
866{ \
867 TCGv addr64 = tcg_temp_new(); \
08307563 868 tcg_gen_extu_i32_i64(addr64, addr); \
09f78135 869 tcg_gen_qemu_ld_i32(val, addr64, index, OPC); \
08307563 870 tcg_temp_free(addr64); \
08307563
PM
871}
872
09f78135
RH
873#define DO_GEN_ST(SUFF, OPC) \
874static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563
PM
875{ \
876 TCGv addr64 = tcg_temp_new(); \
08307563 877 tcg_gen_extu_i32_i64(addr64, addr); \
09f78135 878 tcg_gen_qemu_st_i32(val, addr64, index, OPC); \
08307563 879 tcg_temp_free(addr64); \
08307563
PM
880}
881
882static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
883{
884 TCGv addr64 = tcg_temp_new();
885 tcg_gen_extu_i32_i64(addr64, addr);
09f78135 886 tcg_gen_qemu_ld_i64(val, addr64, index, MO_TEQ);
08307563
PM
887 tcg_temp_free(addr64);
888}
889
890static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
891{
892 TCGv addr64 = tcg_temp_new();
893 tcg_gen_extu_i32_i64(addr64, addr);
09f78135 894 tcg_gen_qemu_st_i64(val, addr64, index, MO_TEQ);
08307563
PM
895 tcg_temp_free(addr64);
896}
897
898#endif
899
09f78135
RH
900DO_GEN_LD(8s, MO_SB)
901DO_GEN_LD(8u, MO_UB)
902DO_GEN_LD(16s, MO_TESW)
903DO_GEN_LD(16u, MO_TEUW)
904DO_GEN_LD(32u, MO_TEUL)
905DO_GEN_ST(8, MO_UB)
906DO_GEN_ST(16, MO_TEUW)
907DO_GEN_ST(32, MO_TEUL)
08307563 908
eaed129d 909static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
5e3f878a 910{
40f860cd 911 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
912}
913
d4a2dc67
PM
914static inline void
915gen_set_condexec (DisasContext *s)
916{
917 if (s->condexec_mask) {
918 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
919 TCGv_i32 tmp = tcg_temp_new_i32();
920 tcg_gen_movi_i32(tmp, val);
921 store_cpu_field(tmp, condexec_bits);
922 }
923}
924
925static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
926{
927 gen_set_condexec(s);
928 gen_set_pc_im(s, s->pc - offset);
929 gen_exception_internal(excp);
930 s->is_jmp = DISAS_JUMP;
931}
932
933static void gen_exception_insn(DisasContext *s, int offset, int excp, int syn)
934{
935 gen_set_condexec(s);
936 gen_set_pc_im(s, s->pc - offset);
937 gen_exception(excp, syn);
938 s->is_jmp = DISAS_JUMP;
939}
940
b5ff1b31
FB
941/* Force a TB lookup after an instruction that changes the CPU state. */
942static inline void gen_lookup_tb(DisasContext *s)
943{
a6445c52 944 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
945 s->is_jmp = DISAS_UPDATE;
946}
947
b0109805 948static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 949 TCGv_i32 var)
2c0262af 950{
1e8d4eec 951 int val, rm, shift, shiftop;
39d5492a 952 TCGv_i32 offset;
2c0262af
FB
953
954 if (!(insn & (1 << 25))) {
955 /* immediate */
956 val = insn & 0xfff;
957 if (!(insn & (1 << 23)))
958 val = -val;
537730b9 959 if (val != 0)
b0109805 960 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
961 } else {
962 /* shift/register */
963 rm = (insn) & 0xf;
964 shift = (insn >> 7) & 0x1f;
1e8d4eec 965 shiftop = (insn >> 5) & 3;
b26eefb6 966 offset = load_reg(s, rm);
9a119ff6 967 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 968 if (!(insn & (1 << 23)))
b0109805 969 tcg_gen_sub_i32(var, var, offset);
2c0262af 970 else
b0109805 971 tcg_gen_add_i32(var, var, offset);
7d1b0095 972 tcg_temp_free_i32(offset);
2c0262af
FB
973 }
974}
975
191f9a93 976static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 977 int extra, TCGv_i32 var)
2c0262af
FB
978{
979 int val, rm;
39d5492a 980 TCGv_i32 offset;
3b46e624 981
2c0262af
FB
982 if (insn & (1 << 22)) {
983 /* immediate */
984 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
985 if (!(insn & (1 << 23)))
986 val = -val;
18acad92 987 val += extra;
537730b9 988 if (val != 0)
b0109805 989 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
990 } else {
991 /* register */
191f9a93 992 if (extra)
b0109805 993 tcg_gen_addi_i32(var, var, extra);
2c0262af 994 rm = (insn) & 0xf;
b26eefb6 995 offset = load_reg(s, rm);
2c0262af 996 if (!(insn & (1 << 23)))
b0109805 997 tcg_gen_sub_i32(var, var, offset);
2c0262af 998 else
b0109805 999 tcg_gen_add_i32(var, var, offset);
7d1b0095 1000 tcg_temp_free_i32(offset);
2c0262af
FB
1001 }
1002}
1003
5aaebd13
PM
1004static TCGv_ptr get_fpstatus_ptr(int neon)
1005{
1006 TCGv_ptr statusptr = tcg_temp_new_ptr();
1007 int offset;
1008 if (neon) {
0ecb72a5 1009 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1010 } else {
0ecb72a5 1011 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1012 }
1013 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1014 return statusptr;
1015}
1016
4373f3ce
PB
1017#define VFP_OP2(name) \
1018static inline void gen_vfp_##name(int dp) \
1019{ \
ae1857ec
PM
1020 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1021 if (dp) { \
1022 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1023 } else { \
1024 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1025 } \
1026 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
1027}
1028
4373f3ce
PB
1029VFP_OP2(add)
1030VFP_OP2(sub)
1031VFP_OP2(mul)
1032VFP_OP2(div)
1033
1034#undef VFP_OP2
1035
605a6aed
PM
1036static inline void gen_vfp_F1_mul(int dp)
1037{
1038 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1039 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1040 if (dp) {
ae1857ec 1041 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1042 } else {
ae1857ec 1043 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1044 }
ae1857ec 1045 tcg_temp_free_ptr(fpst);
605a6aed
PM
1046}
1047
1048static inline void gen_vfp_F1_neg(int dp)
1049{
1050 /* Like gen_vfp_neg() but put result in F1 */
1051 if (dp) {
1052 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1053 } else {
1054 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1055 }
1056}
1057
4373f3ce
PB
1058static inline void gen_vfp_abs(int dp)
1059{
1060 if (dp)
1061 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1062 else
1063 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1064}
1065
1066static inline void gen_vfp_neg(int dp)
1067{
1068 if (dp)
1069 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1070 else
1071 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1072}
1073
1074static inline void gen_vfp_sqrt(int dp)
1075{
1076 if (dp)
1077 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1078 else
1079 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1080}
1081
1082static inline void gen_vfp_cmp(int dp)
1083{
1084 if (dp)
1085 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1086 else
1087 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1088}
1089
1090static inline void gen_vfp_cmpe(int dp)
1091{
1092 if (dp)
1093 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1094 else
1095 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1096}
1097
1098static inline void gen_vfp_F1_ld0(int dp)
1099{
1100 if (dp)
5b340b51 1101 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1102 else
5b340b51 1103 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1104}
1105
5500b06c
PM
1106#define VFP_GEN_ITOF(name) \
1107static inline void gen_vfp_##name(int dp, int neon) \
1108{ \
5aaebd13 1109 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1110 if (dp) { \
1111 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1112 } else { \
1113 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1114 } \
b7fa9214 1115 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1116}
1117
5500b06c
PM
1118VFP_GEN_ITOF(uito)
1119VFP_GEN_ITOF(sito)
1120#undef VFP_GEN_ITOF
4373f3ce 1121
5500b06c
PM
1122#define VFP_GEN_FTOI(name) \
1123static inline void gen_vfp_##name(int dp, int neon) \
1124{ \
5aaebd13 1125 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1126 if (dp) { \
1127 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1128 } else { \
1129 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1130 } \
b7fa9214 1131 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1132}
1133
5500b06c
PM
1134VFP_GEN_FTOI(toui)
1135VFP_GEN_FTOI(touiz)
1136VFP_GEN_FTOI(tosi)
1137VFP_GEN_FTOI(tosiz)
1138#undef VFP_GEN_FTOI
4373f3ce 1139
16d5b3ca 1140#define VFP_GEN_FIX(name, round) \
5500b06c 1141static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1142{ \
39d5492a 1143 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1144 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c 1145 if (dp) { \
16d5b3ca
WN
1146 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1147 statusptr); \
5500b06c 1148 } else { \
16d5b3ca
WN
1149 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1150 statusptr); \
5500b06c 1151 } \
b75263d6 1152 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1153 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1154}
16d5b3ca
WN
1155VFP_GEN_FIX(tosh, _round_to_zero)
1156VFP_GEN_FIX(tosl, _round_to_zero)
1157VFP_GEN_FIX(touh, _round_to_zero)
1158VFP_GEN_FIX(toul, _round_to_zero)
1159VFP_GEN_FIX(shto, )
1160VFP_GEN_FIX(slto, )
1161VFP_GEN_FIX(uhto, )
1162VFP_GEN_FIX(ulto, )
4373f3ce 1163#undef VFP_GEN_FIX
9ee6e8bb 1164
39d5492a 1165static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1166{
08307563 1167 if (dp) {
6ce2faf4 1168 gen_aa32_ld64(cpu_F0d, addr, get_mem_index(s));
08307563 1169 } else {
6ce2faf4 1170 gen_aa32_ld32u(cpu_F0s, addr, get_mem_index(s));
08307563 1171 }
b5ff1b31
FB
1172}
1173
39d5492a 1174static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1175{
08307563 1176 if (dp) {
6ce2faf4 1177 gen_aa32_st64(cpu_F0d, addr, get_mem_index(s));
08307563 1178 } else {
6ce2faf4 1179 gen_aa32_st32(cpu_F0s, addr, get_mem_index(s));
08307563 1180 }
b5ff1b31
FB
1181}
1182
8e96005d
FB
1183static inline long
1184vfp_reg_offset (int dp, int reg)
1185{
1186 if (dp)
1187 return offsetof(CPUARMState, vfp.regs[reg]);
1188 else if (reg & 1) {
1189 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1190 + offsetof(CPU_DoubleU, l.upper);
1191 } else {
1192 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1193 + offsetof(CPU_DoubleU, l.lower);
1194 }
1195}
9ee6e8bb
PB
1196
1197/* Return the offset of a 32-bit piece of a NEON register.
1198 zero is the least significant end of the register. */
1199static inline long
1200neon_reg_offset (int reg, int n)
1201{
1202 int sreg;
1203 sreg = reg * 2 + n;
1204 return vfp_reg_offset(0, sreg);
1205}
1206
39d5492a 1207static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1208{
39d5492a 1209 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1210 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1211 return tmp;
1212}
1213
39d5492a 1214static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1215{
1216 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1217 tcg_temp_free_i32(var);
8f8e3aa4
PB
1218}
1219
a7812ae4 1220static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1221{
1222 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1223}
1224
a7812ae4 1225static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1226{
1227 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1228}
1229
4373f3ce
PB
1230#define tcg_gen_ld_f32 tcg_gen_ld_i32
1231#define tcg_gen_ld_f64 tcg_gen_ld_i64
1232#define tcg_gen_st_f32 tcg_gen_st_i32
1233#define tcg_gen_st_f64 tcg_gen_st_i64
1234
b7bcbe95
FB
1235static inline void gen_mov_F0_vreg(int dp, int reg)
1236{
1237 if (dp)
4373f3ce 1238 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1239 else
4373f3ce 1240 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1241}
1242
1243static inline void gen_mov_F1_vreg(int dp, int reg)
1244{
1245 if (dp)
4373f3ce 1246 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1247 else
4373f3ce 1248 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1249}
1250
1251static inline void gen_mov_vreg_F0(int dp, int reg)
1252{
1253 if (dp)
4373f3ce 1254 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1255 else
4373f3ce 1256 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1257}
1258
18c9b560
AZ
1259#define ARM_CP_RW_BIT (1 << 20)
1260
a7812ae4 1261static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1262{
0ecb72a5 1263 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1264}
1265
a7812ae4 1266static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1267{
0ecb72a5 1268 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1269}
1270
39d5492a 1271static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1272{
39d5492a 1273 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1274 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1275 return var;
e677137d
PB
1276}
1277
39d5492a 1278static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1279{
0ecb72a5 1280 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1281 tcg_temp_free_i32(var);
e677137d
PB
1282}
1283
1284static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1285{
1286 iwmmxt_store_reg(cpu_M0, rn);
1287}
1288
1289static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1290{
1291 iwmmxt_load_reg(cpu_M0, rn);
1292}
1293
1294static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1295{
1296 iwmmxt_load_reg(cpu_V1, rn);
1297 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1298}
1299
1300static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1301{
1302 iwmmxt_load_reg(cpu_V1, rn);
1303 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1304}
1305
1306static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1307{
1308 iwmmxt_load_reg(cpu_V1, rn);
1309 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1310}
1311
1312#define IWMMXT_OP(name) \
1313static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1314{ \
1315 iwmmxt_load_reg(cpu_V1, rn); \
1316 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1317}
1318
477955bd
PM
1319#define IWMMXT_OP_ENV(name) \
1320static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1321{ \
1322 iwmmxt_load_reg(cpu_V1, rn); \
1323 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1324}
1325
1326#define IWMMXT_OP_ENV_SIZE(name) \
1327IWMMXT_OP_ENV(name##b) \
1328IWMMXT_OP_ENV(name##w) \
1329IWMMXT_OP_ENV(name##l)
e677137d 1330
477955bd 1331#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1332static inline void gen_op_iwmmxt_##name##_M0(void) \
1333{ \
477955bd 1334 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1335}
1336
1337IWMMXT_OP(maddsq)
1338IWMMXT_OP(madduq)
1339IWMMXT_OP(sadb)
1340IWMMXT_OP(sadw)
1341IWMMXT_OP(mulslw)
1342IWMMXT_OP(mulshw)
1343IWMMXT_OP(mululw)
1344IWMMXT_OP(muluhw)
1345IWMMXT_OP(macsw)
1346IWMMXT_OP(macuw)
1347
477955bd
PM
1348IWMMXT_OP_ENV_SIZE(unpackl)
1349IWMMXT_OP_ENV_SIZE(unpackh)
1350
1351IWMMXT_OP_ENV1(unpacklub)
1352IWMMXT_OP_ENV1(unpackluw)
1353IWMMXT_OP_ENV1(unpacklul)
1354IWMMXT_OP_ENV1(unpackhub)
1355IWMMXT_OP_ENV1(unpackhuw)
1356IWMMXT_OP_ENV1(unpackhul)
1357IWMMXT_OP_ENV1(unpacklsb)
1358IWMMXT_OP_ENV1(unpacklsw)
1359IWMMXT_OP_ENV1(unpacklsl)
1360IWMMXT_OP_ENV1(unpackhsb)
1361IWMMXT_OP_ENV1(unpackhsw)
1362IWMMXT_OP_ENV1(unpackhsl)
1363
1364IWMMXT_OP_ENV_SIZE(cmpeq)
1365IWMMXT_OP_ENV_SIZE(cmpgtu)
1366IWMMXT_OP_ENV_SIZE(cmpgts)
1367
1368IWMMXT_OP_ENV_SIZE(mins)
1369IWMMXT_OP_ENV_SIZE(minu)
1370IWMMXT_OP_ENV_SIZE(maxs)
1371IWMMXT_OP_ENV_SIZE(maxu)
1372
1373IWMMXT_OP_ENV_SIZE(subn)
1374IWMMXT_OP_ENV_SIZE(addn)
1375IWMMXT_OP_ENV_SIZE(subu)
1376IWMMXT_OP_ENV_SIZE(addu)
1377IWMMXT_OP_ENV_SIZE(subs)
1378IWMMXT_OP_ENV_SIZE(adds)
1379
1380IWMMXT_OP_ENV(avgb0)
1381IWMMXT_OP_ENV(avgb1)
1382IWMMXT_OP_ENV(avgw0)
1383IWMMXT_OP_ENV(avgw1)
e677137d
PB
1384
1385IWMMXT_OP(msadb)
1386
477955bd
PM
1387IWMMXT_OP_ENV(packuw)
1388IWMMXT_OP_ENV(packul)
1389IWMMXT_OP_ENV(packuq)
1390IWMMXT_OP_ENV(packsw)
1391IWMMXT_OP_ENV(packsl)
1392IWMMXT_OP_ENV(packsq)
e677137d 1393
e677137d
PB
1394static void gen_op_iwmmxt_set_mup(void)
1395{
39d5492a 1396 TCGv_i32 tmp;
e677137d
PB
1397 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1398 tcg_gen_ori_i32(tmp, tmp, 2);
1399 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1400}
1401
1402static void gen_op_iwmmxt_set_cup(void)
1403{
39d5492a 1404 TCGv_i32 tmp;
e677137d
PB
1405 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1406 tcg_gen_ori_i32(tmp, tmp, 1);
1407 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1408}
1409
1410static void gen_op_iwmmxt_setpsr_nz(void)
1411{
39d5492a 1412 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1413 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1414 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1415}
1416
1417static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1418{
1419 iwmmxt_load_reg(cpu_V1, rn);
86831435 1420 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1421 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1422}
1423
39d5492a
PM
1424static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1425 TCGv_i32 dest)
18c9b560
AZ
1426{
1427 int rd;
1428 uint32_t offset;
39d5492a 1429 TCGv_i32 tmp;
18c9b560
AZ
1430
1431 rd = (insn >> 16) & 0xf;
da6b5335 1432 tmp = load_reg(s, rd);
18c9b560
AZ
1433
1434 offset = (insn & 0xff) << ((insn >> 7) & 2);
1435 if (insn & (1 << 24)) {
1436 /* Pre indexed */
1437 if (insn & (1 << 23))
da6b5335 1438 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1439 else
da6b5335
FN
1440 tcg_gen_addi_i32(tmp, tmp, -offset);
1441 tcg_gen_mov_i32(dest, tmp);
18c9b560 1442 if (insn & (1 << 21))
da6b5335
FN
1443 store_reg(s, rd, tmp);
1444 else
7d1b0095 1445 tcg_temp_free_i32(tmp);
18c9b560
AZ
1446 } else if (insn & (1 << 21)) {
1447 /* Post indexed */
da6b5335 1448 tcg_gen_mov_i32(dest, tmp);
18c9b560 1449 if (insn & (1 << 23))
da6b5335 1450 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1451 else
da6b5335
FN
1452 tcg_gen_addi_i32(tmp, tmp, -offset);
1453 store_reg(s, rd, tmp);
18c9b560
AZ
1454 } else if (!(insn & (1 << 23)))
1455 return 1;
1456 return 0;
1457}
1458
39d5492a 1459static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1460{
1461 int rd = (insn >> 0) & 0xf;
39d5492a 1462 TCGv_i32 tmp;
18c9b560 1463
da6b5335
FN
1464 if (insn & (1 << 8)) {
1465 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1466 return 1;
da6b5335
FN
1467 } else {
1468 tmp = iwmmxt_load_creg(rd);
1469 }
1470 } else {
7d1b0095 1471 tmp = tcg_temp_new_i32();
da6b5335
FN
1472 iwmmxt_load_reg(cpu_V0, rd);
1473 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1474 }
1475 tcg_gen_andi_i32(tmp, tmp, mask);
1476 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1477 tcg_temp_free_i32(tmp);
18c9b560
AZ
1478 return 0;
1479}
1480
a1c7273b 1481/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1482 (ie. an undefined instruction). */
0ecb72a5 1483static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
1484{
1485 int rd, wrd;
1486 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1487 TCGv_i32 addr;
1488 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1489
1490 if ((insn & 0x0e000e00) == 0x0c000000) {
1491 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1492 wrd = insn & 0xf;
1493 rdlo = (insn >> 12) & 0xf;
1494 rdhi = (insn >> 16) & 0xf;
1495 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1496 iwmmxt_load_reg(cpu_V0, wrd);
1497 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1498 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1499 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1500 } else { /* TMCRR */
da6b5335
FN
1501 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1502 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1503 gen_op_iwmmxt_set_mup();
1504 }
1505 return 0;
1506 }
1507
1508 wrd = (insn >> 12) & 0xf;
7d1b0095 1509 addr = tcg_temp_new_i32();
da6b5335 1510 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1511 tcg_temp_free_i32(addr);
18c9b560 1512 return 1;
da6b5335 1513 }
18c9b560
AZ
1514 if (insn & ARM_CP_RW_BIT) {
1515 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1516 tmp = tcg_temp_new_i32();
6ce2faf4 1517 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
da6b5335 1518 iwmmxt_store_creg(wrd, tmp);
18c9b560 1519 } else {
e677137d
PB
1520 i = 1;
1521 if (insn & (1 << 8)) {
1522 if (insn & (1 << 22)) { /* WLDRD */
6ce2faf4 1523 gen_aa32_ld64(cpu_M0, addr, get_mem_index(s));
e677137d
PB
1524 i = 0;
1525 } else { /* WLDRW wRd */
29531141 1526 tmp = tcg_temp_new_i32();
6ce2faf4 1527 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
e677137d
PB
1528 }
1529 } else {
29531141 1530 tmp = tcg_temp_new_i32();
e677137d 1531 if (insn & (1 << 22)) { /* WLDRH */
6ce2faf4 1532 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
e677137d 1533 } else { /* WLDRB */
6ce2faf4 1534 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
e677137d
PB
1535 }
1536 }
1537 if (i) {
1538 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1539 tcg_temp_free_i32(tmp);
e677137d 1540 }
18c9b560
AZ
1541 gen_op_iwmmxt_movq_wRn_M0(wrd);
1542 }
1543 } else {
1544 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1545 tmp = iwmmxt_load_creg(wrd);
6ce2faf4 1546 gen_aa32_st32(tmp, addr, get_mem_index(s));
18c9b560
AZ
1547 } else {
1548 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1549 tmp = tcg_temp_new_i32();
e677137d
PB
1550 if (insn & (1 << 8)) {
1551 if (insn & (1 << 22)) { /* WSTRD */
6ce2faf4 1552 gen_aa32_st64(cpu_M0, addr, get_mem_index(s));
e677137d
PB
1553 } else { /* WSTRW wRd */
1554 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
6ce2faf4 1555 gen_aa32_st32(tmp, addr, get_mem_index(s));
e677137d
PB
1556 }
1557 } else {
1558 if (insn & (1 << 22)) { /* WSTRH */
1559 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
6ce2faf4 1560 gen_aa32_st16(tmp, addr, get_mem_index(s));
e677137d
PB
1561 } else { /* WSTRB */
1562 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
6ce2faf4 1563 gen_aa32_st8(tmp, addr, get_mem_index(s));
e677137d
PB
1564 }
1565 }
18c9b560 1566 }
29531141 1567 tcg_temp_free_i32(tmp);
18c9b560 1568 }
7d1b0095 1569 tcg_temp_free_i32(addr);
18c9b560
AZ
1570 return 0;
1571 }
1572
1573 if ((insn & 0x0f000000) != 0x0e000000)
1574 return 1;
1575
1576 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1577 case 0x000: /* WOR */
1578 wrd = (insn >> 12) & 0xf;
1579 rd0 = (insn >> 0) & 0xf;
1580 rd1 = (insn >> 16) & 0xf;
1581 gen_op_iwmmxt_movq_M0_wRn(rd0);
1582 gen_op_iwmmxt_orq_M0_wRn(rd1);
1583 gen_op_iwmmxt_setpsr_nz();
1584 gen_op_iwmmxt_movq_wRn_M0(wrd);
1585 gen_op_iwmmxt_set_mup();
1586 gen_op_iwmmxt_set_cup();
1587 break;
1588 case 0x011: /* TMCR */
1589 if (insn & 0xf)
1590 return 1;
1591 rd = (insn >> 12) & 0xf;
1592 wrd = (insn >> 16) & 0xf;
1593 switch (wrd) {
1594 case ARM_IWMMXT_wCID:
1595 case ARM_IWMMXT_wCASF:
1596 break;
1597 case ARM_IWMMXT_wCon:
1598 gen_op_iwmmxt_set_cup();
1599 /* Fall through. */
1600 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1601 tmp = iwmmxt_load_creg(wrd);
1602 tmp2 = load_reg(s, rd);
f669df27 1603 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1604 tcg_temp_free_i32(tmp2);
da6b5335 1605 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1606 break;
1607 case ARM_IWMMXT_wCGR0:
1608 case ARM_IWMMXT_wCGR1:
1609 case ARM_IWMMXT_wCGR2:
1610 case ARM_IWMMXT_wCGR3:
1611 gen_op_iwmmxt_set_cup();
da6b5335
FN
1612 tmp = load_reg(s, rd);
1613 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1614 break;
1615 default:
1616 return 1;
1617 }
1618 break;
1619 case 0x100: /* WXOR */
1620 wrd = (insn >> 12) & 0xf;
1621 rd0 = (insn >> 0) & 0xf;
1622 rd1 = (insn >> 16) & 0xf;
1623 gen_op_iwmmxt_movq_M0_wRn(rd0);
1624 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1625 gen_op_iwmmxt_setpsr_nz();
1626 gen_op_iwmmxt_movq_wRn_M0(wrd);
1627 gen_op_iwmmxt_set_mup();
1628 gen_op_iwmmxt_set_cup();
1629 break;
1630 case 0x111: /* TMRC */
1631 if (insn & 0xf)
1632 return 1;
1633 rd = (insn >> 12) & 0xf;
1634 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1635 tmp = iwmmxt_load_creg(wrd);
1636 store_reg(s, rd, tmp);
18c9b560
AZ
1637 break;
1638 case 0x300: /* WANDN */
1639 wrd = (insn >> 12) & 0xf;
1640 rd0 = (insn >> 0) & 0xf;
1641 rd1 = (insn >> 16) & 0xf;
1642 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1643 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1644 gen_op_iwmmxt_andq_M0_wRn(rd1);
1645 gen_op_iwmmxt_setpsr_nz();
1646 gen_op_iwmmxt_movq_wRn_M0(wrd);
1647 gen_op_iwmmxt_set_mup();
1648 gen_op_iwmmxt_set_cup();
1649 break;
1650 case 0x200: /* WAND */
1651 wrd = (insn >> 12) & 0xf;
1652 rd0 = (insn >> 0) & 0xf;
1653 rd1 = (insn >> 16) & 0xf;
1654 gen_op_iwmmxt_movq_M0_wRn(rd0);
1655 gen_op_iwmmxt_andq_M0_wRn(rd1);
1656 gen_op_iwmmxt_setpsr_nz();
1657 gen_op_iwmmxt_movq_wRn_M0(wrd);
1658 gen_op_iwmmxt_set_mup();
1659 gen_op_iwmmxt_set_cup();
1660 break;
1661 case 0x810: case 0xa10: /* WMADD */
1662 wrd = (insn >> 12) & 0xf;
1663 rd0 = (insn >> 0) & 0xf;
1664 rd1 = (insn >> 16) & 0xf;
1665 gen_op_iwmmxt_movq_M0_wRn(rd0);
1666 if (insn & (1 << 21))
1667 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1668 else
1669 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1670 gen_op_iwmmxt_movq_wRn_M0(wrd);
1671 gen_op_iwmmxt_set_mup();
1672 break;
1673 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1674 wrd = (insn >> 12) & 0xf;
1675 rd0 = (insn >> 16) & 0xf;
1676 rd1 = (insn >> 0) & 0xf;
1677 gen_op_iwmmxt_movq_M0_wRn(rd0);
1678 switch ((insn >> 22) & 3) {
1679 case 0:
1680 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1681 break;
1682 case 1:
1683 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1684 break;
1685 case 2:
1686 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1687 break;
1688 case 3:
1689 return 1;
1690 }
1691 gen_op_iwmmxt_movq_wRn_M0(wrd);
1692 gen_op_iwmmxt_set_mup();
1693 gen_op_iwmmxt_set_cup();
1694 break;
1695 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1696 wrd = (insn >> 12) & 0xf;
1697 rd0 = (insn >> 16) & 0xf;
1698 rd1 = (insn >> 0) & 0xf;
1699 gen_op_iwmmxt_movq_M0_wRn(rd0);
1700 switch ((insn >> 22) & 3) {
1701 case 0:
1702 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1703 break;
1704 case 1:
1705 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1706 break;
1707 case 2:
1708 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1709 break;
1710 case 3:
1711 return 1;
1712 }
1713 gen_op_iwmmxt_movq_wRn_M0(wrd);
1714 gen_op_iwmmxt_set_mup();
1715 gen_op_iwmmxt_set_cup();
1716 break;
1717 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1718 wrd = (insn >> 12) & 0xf;
1719 rd0 = (insn >> 16) & 0xf;
1720 rd1 = (insn >> 0) & 0xf;
1721 gen_op_iwmmxt_movq_M0_wRn(rd0);
1722 if (insn & (1 << 22))
1723 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1724 else
1725 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1726 if (!(insn & (1 << 20)))
1727 gen_op_iwmmxt_addl_M0_wRn(wrd);
1728 gen_op_iwmmxt_movq_wRn_M0(wrd);
1729 gen_op_iwmmxt_set_mup();
1730 break;
1731 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1732 wrd = (insn >> 12) & 0xf;
1733 rd0 = (insn >> 16) & 0xf;
1734 rd1 = (insn >> 0) & 0xf;
1735 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1736 if (insn & (1 << 21)) {
1737 if (insn & (1 << 20))
1738 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1739 else
1740 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1741 } else {
1742 if (insn & (1 << 20))
1743 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1744 else
1745 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1746 }
18c9b560
AZ
1747 gen_op_iwmmxt_movq_wRn_M0(wrd);
1748 gen_op_iwmmxt_set_mup();
1749 break;
1750 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1751 wrd = (insn >> 12) & 0xf;
1752 rd0 = (insn >> 16) & 0xf;
1753 rd1 = (insn >> 0) & 0xf;
1754 gen_op_iwmmxt_movq_M0_wRn(rd0);
1755 if (insn & (1 << 21))
1756 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1757 else
1758 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1759 if (!(insn & (1 << 20))) {
e677137d
PB
1760 iwmmxt_load_reg(cpu_V1, wrd);
1761 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1762 }
1763 gen_op_iwmmxt_movq_wRn_M0(wrd);
1764 gen_op_iwmmxt_set_mup();
1765 break;
1766 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1767 wrd = (insn >> 12) & 0xf;
1768 rd0 = (insn >> 16) & 0xf;
1769 rd1 = (insn >> 0) & 0xf;
1770 gen_op_iwmmxt_movq_M0_wRn(rd0);
1771 switch ((insn >> 22) & 3) {
1772 case 0:
1773 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1774 break;
1775 case 1:
1776 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1777 break;
1778 case 2:
1779 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1780 break;
1781 case 3:
1782 return 1;
1783 }
1784 gen_op_iwmmxt_movq_wRn_M0(wrd);
1785 gen_op_iwmmxt_set_mup();
1786 gen_op_iwmmxt_set_cup();
1787 break;
1788 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1789 wrd = (insn >> 12) & 0xf;
1790 rd0 = (insn >> 16) & 0xf;
1791 rd1 = (insn >> 0) & 0xf;
1792 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1793 if (insn & (1 << 22)) {
1794 if (insn & (1 << 20))
1795 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1796 else
1797 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1798 } else {
1799 if (insn & (1 << 20))
1800 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1801 else
1802 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1803 }
18c9b560
AZ
1804 gen_op_iwmmxt_movq_wRn_M0(wrd);
1805 gen_op_iwmmxt_set_mup();
1806 gen_op_iwmmxt_set_cup();
1807 break;
1808 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1809 wrd = (insn >> 12) & 0xf;
1810 rd0 = (insn >> 16) & 0xf;
1811 rd1 = (insn >> 0) & 0xf;
1812 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1813 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1814 tcg_gen_andi_i32(tmp, tmp, 7);
1815 iwmmxt_load_reg(cpu_V1, rd1);
1816 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1817 tcg_temp_free_i32(tmp);
18c9b560
AZ
1818 gen_op_iwmmxt_movq_wRn_M0(wrd);
1819 gen_op_iwmmxt_set_mup();
1820 break;
1821 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1822 if (((insn >> 6) & 3) == 3)
1823 return 1;
18c9b560
AZ
1824 rd = (insn >> 12) & 0xf;
1825 wrd = (insn >> 16) & 0xf;
da6b5335 1826 tmp = load_reg(s, rd);
18c9b560
AZ
1827 gen_op_iwmmxt_movq_M0_wRn(wrd);
1828 switch ((insn >> 6) & 3) {
1829 case 0:
da6b5335
FN
1830 tmp2 = tcg_const_i32(0xff);
1831 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1832 break;
1833 case 1:
da6b5335
FN
1834 tmp2 = tcg_const_i32(0xffff);
1835 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1836 break;
1837 case 2:
da6b5335
FN
1838 tmp2 = tcg_const_i32(0xffffffff);
1839 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1840 break;
da6b5335 1841 default:
39d5492a
PM
1842 TCGV_UNUSED_I32(tmp2);
1843 TCGV_UNUSED_I32(tmp3);
18c9b560 1844 }
da6b5335 1845 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
1846 tcg_temp_free_i32(tmp3);
1847 tcg_temp_free_i32(tmp2);
7d1b0095 1848 tcg_temp_free_i32(tmp);
18c9b560
AZ
1849 gen_op_iwmmxt_movq_wRn_M0(wrd);
1850 gen_op_iwmmxt_set_mup();
1851 break;
1852 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1853 rd = (insn >> 12) & 0xf;
1854 wrd = (insn >> 16) & 0xf;
da6b5335 1855 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1856 return 1;
1857 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1858 tmp = tcg_temp_new_i32();
18c9b560
AZ
1859 switch ((insn >> 22) & 3) {
1860 case 0:
da6b5335
FN
1861 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1862 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1863 if (insn & 8) {
1864 tcg_gen_ext8s_i32(tmp, tmp);
1865 } else {
1866 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1867 }
1868 break;
1869 case 1:
da6b5335
FN
1870 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1871 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1872 if (insn & 8) {
1873 tcg_gen_ext16s_i32(tmp, tmp);
1874 } else {
1875 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1876 }
1877 break;
1878 case 2:
da6b5335
FN
1879 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1880 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1881 break;
18c9b560 1882 }
da6b5335 1883 store_reg(s, rd, tmp);
18c9b560
AZ
1884 break;
1885 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1886 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1887 return 1;
da6b5335 1888 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1889 switch ((insn >> 22) & 3) {
1890 case 0:
da6b5335 1891 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1892 break;
1893 case 1:
da6b5335 1894 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1895 break;
1896 case 2:
da6b5335 1897 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1898 break;
18c9b560 1899 }
da6b5335
FN
1900 tcg_gen_shli_i32(tmp, tmp, 28);
1901 gen_set_nzcv(tmp);
7d1b0095 1902 tcg_temp_free_i32(tmp);
18c9b560
AZ
1903 break;
1904 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1905 if (((insn >> 6) & 3) == 3)
1906 return 1;
18c9b560
AZ
1907 rd = (insn >> 12) & 0xf;
1908 wrd = (insn >> 16) & 0xf;
da6b5335 1909 tmp = load_reg(s, rd);
18c9b560
AZ
1910 switch ((insn >> 6) & 3) {
1911 case 0:
da6b5335 1912 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1913 break;
1914 case 1:
da6b5335 1915 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1916 break;
1917 case 2:
da6b5335 1918 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1919 break;
18c9b560 1920 }
7d1b0095 1921 tcg_temp_free_i32(tmp);
18c9b560
AZ
1922 gen_op_iwmmxt_movq_wRn_M0(wrd);
1923 gen_op_iwmmxt_set_mup();
1924 break;
1925 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1926 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1927 return 1;
da6b5335 1928 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1929 tmp2 = tcg_temp_new_i32();
da6b5335 1930 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1931 switch ((insn >> 22) & 3) {
1932 case 0:
1933 for (i = 0; i < 7; i ++) {
da6b5335
FN
1934 tcg_gen_shli_i32(tmp2, tmp2, 4);
1935 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1936 }
1937 break;
1938 case 1:
1939 for (i = 0; i < 3; i ++) {
da6b5335
FN
1940 tcg_gen_shli_i32(tmp2, tmp2, 8);
1941 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1942 }
1943 break;
1944 case 2:
da6b5335
FN
1945 tcg_gen_shli_i32(tmp2, tmp2, 16);
1946 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1947 break;
18c9b560 1948 }
da6b5335 1949 gen_set_nzcv(tmp);
7d1b0095
PM
1950 tcg_temp_free_i32(tmp2);
1951 tcg_temp_free_i32(tmp);
18c9b560
AZ
1952 break;
1953 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1954 wrd = (insn >> 12) & 0xf;
1955 rd0 = (insn >> 16) & 0xf;
1956 gen_op_iwmmxt_movq_M0_wRn(rd0);
1957 switch ((insn >> 22) & 3) {
1958 case 0:
e677137d 1959 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1960 break;
1961 case 1:
e677137d 1962 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1963 break;
1964 case 2:
e677137d 1965 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1966 break;
1967 case 3:
1968 return 1;
1969 }
1970 gen_op_iwmmxt_movq_wRn_M0(wrd);
1971 gen_op_iwmmxt_set_mup();
1972 break;
1973 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1974 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1975 return 1;
da6b5335 1976 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1977 tmp2 = tcg_temp_new_i32();
da6b5335 1978 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1979 switch ((insn >> 22) & 3) {
1980 case 0:
1981 for (i = 0; i < 7; i ++) {
da6b5335
FN
1982 tcg_gen_shli_i32(tmp2, tmp2, 4);
1983 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1984 }
1985 break;
1986 case 1:
1987 for (i = 0; i < 3; i ++) {
da6b5335
FN
1988 tcg_gen_shli_i32(tmp2, tmp2, 8);
1989 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1990 }
1991 break;
1992 case 2:
da6b5335
FN
1993 tcg_gen_shli_i32(tmp2, tmp2, 16);
1994 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1995 break;
18c9b560 1996 }
da6b5335 1997 gen_set_nzcv(tmp);
7d1b0095
PM
1998 tcg_temp_free_i32(tmp2);
1999 tcg_temp_free_i32(tmp);
18c9b560
AZ
2000 break;
2001 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2002 rd = (insn >> 12) & 0xf;
2003 rd0 = (insn >> 16) & 0xf;
da6b5335 2004 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2005 return 1;
2006 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2007 tmp = tcg_temp_new_i32();
18c9b560
AZ
2008 switch ((insn >> 22) & 3) {
2009 case 0:
da6b5335 2010 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2011 break;
2012 case 1:
da6b5335 2013 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2014 break;
2015 case 2:
da6b5335 2016 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2017 break;
18c9b560 2018 }
da6b5335 2019 store_reg(s, rd, tmp);
18c9b560
AZ
2020 break;
2021 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2022 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2023 wrd = (insn >> 12) & 0xf;
2024 rd0 = (insn >> 16) & 0xf;
2025 rd1 = (insn >> 0) & 0xf;
2026 gen_op_iwmmxt_movq_M0_wRn(rd0);
2027 switch ((insn >> 22) & 3) {
2028 case 0:
2029 if (insn & (1 << 21))
2030 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2031 else
2032 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2033 break;
2034 case 1:
2035 if (insn & (1 << 21))
2036 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2037 else
2038 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2039 break;
2040 case 2:
2041 if (insn & (1 << 21))
2042 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2043 else
2044 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2045 break;
2046 case 3:
2047 return 1;
2048 }
2049 gen_op_iwmmxt_movq_wRn_M0(wrd);
2050 gen_op_iwmmxt_set_mup();
2051 gen_op_iwmmxt_set_cup();
2052 break;
2053 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2054 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2055 wrd = (insn >> 12) & 0xf;
2056 rd0 = (insn >> 16) & 0xf;
2057 gen_op_iwmmxt_movq_M0_wRn(rd0);
2058 switch ((insn >> 22) & 3) {
2059 case 0:
2060 if (insn & (1 << 21))
2061 gen_op_iwmmxt_unpacklsb_M0();
2062 else
2063 gen_op_iwmmxt_unpacklub_M0();
2064 break;
2065 case 1:
2066 if (insn & (1 << 21))
2067 gen_op_iwmmxt_unpacklsw_M0();
2068 else
2069 gen_op_iwmmxt_unpackluw_M0();
2070 break;
2071 case 2:
2072 if (insn & (1 << 21))
2073 gen_op_iwmmxt_unpacklsl_M0();
2074 else
2075 gen_op_iwmmxt_unpacklul_M0();
2076 break;
2077 case 3:
2078 return 1;
2079 }
2080 gen_op_iwmmxt_movq_wRn_M0(wrd);
2081 gen_op_iwmmxt_set_mup();
2082 gen_op_iwmmxt_set_cup();
2083 break;
2084 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2085 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2086 wrd = (insn >> 12) & 0xf;
2087 rd0 = (insn >> 16) & 0xf;
2088 gen_op_iwmmxt_movq_M0_wRn(rd0);
2089 switch ((insn >> 22) & 3) {
2090 case 0:
2091 if (insn & (1 << 21))
2092 gen_op_iwmmxt_unpackhsb_M0();
2093 else
2094 gen_op_iwmmxt_unpackhub_M0();
2095 break;
2096 case 1:
2097 if (insn & (1 << 21))
2098 gen_op_iwmmxt_unpackhsw_M0();
2099 else
2100 gen_op_iwmmxt_unpackhuw_M0();
2101 break;
2102 case 2:
2103 if (insn & (1 << 21))
2104 gen_op_iwmmxt_unpackhsl_M0();
2105 else
2106 gen_op_iwmmxt_unpackhul_M0();
2107 break;
2108 case 3:
2109 return 1;
2110 }
2111 gen_op_iwmmxt_movq_wRn_M0(wrd);
2112 gen_op_iwmmxt_set_mup();
2113 gen_op_iwmmxt_set_cup();
2114 break;
2115 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2116 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2117 if (((insn >> 22) & 3) == 0)
2118 return 1;
18c9b560
AZ
2119 wrd = (insn >> 12) & 0xf;
2120 rd0 = (insn >> 16) & 0xf;
2121 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2122 tmp = tcg_temp_new_i32();
da6b5335 2123 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2124 tcg_temp_free_i32(tmp);
18c9b560 2125 return 1;
da6b5335 2126 }
18c9b560 2127 switch ((insn >> 22) & 3) {
18c9b560 2128 case 1:
477955bd 2129 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2130 break;
2131 case 2:
477955bd 2132 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2133 break;
2134 case 3:
477955bd 2135 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2136 break;
2137 }
7d1b0095 2138 tcg_temp_free_i32(tmp);
18c9b560
AZ
2139 gen_op_iwmmxt_movq_wRn_M0(wrd);
2140 gen_op_iwmmxt_set_mup();
2141 gen_op_iwmmxt_set_cup();
2142 break;
2143 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2144 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2145 if (((insn >> 22) & 3) == 0)
2146 return 1;
18c9b560
AZ
2147 wrd = (insn >> 12) & 0xf;
2148 rd0 = (insn >> 16) & 0xf;
2149 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2150 tmp = tcg_temp_new_i32();
da6b5335 2151 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2152 tcg_temp_free_i32(tmp);
18c9b560 2153 return 1;
da6b5335 2154 }
18c9b560 2155 switch ((insn >> 22) & 3) {
18c9b560 2156 case 1:
477955bd 2157 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2158 break;
2159 case 2:
477955bd 2160 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2161 break;
2162 case 3:
477955bd 2163 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2164 break;
2165 }
7d1b0095 2166 tcg_temp_free_i32(tmp);
18c9b560
AZ
2167 gen_op_iwmmxt_movq_wRn_M0(wrd);
2168 gen_op_iwmmxt_set_mup();
2169 gen_op_iwmmxt_set_cup();
2170 break;
2171 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2172 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2173 if (((insn >> 22) & 3) == 0)
2174 return 1;
18c9b560
AZ
2175 wrd = (insn >> 12) & 0xf;
2176 rd0 = (insn >> 16) & 0xf;
2177 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2178 tmp = tcg_temp_new_i32();
da6b5335 2179 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2180 tcg_temp_free_i32(tmp);
18c9b560 2181 return 1;
da6b5335 2182 }
18c9b560 2183 switch ((insn >> 22) & 3) {
18c9b560 2184 case 1:
477955bd 2185 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2186 break;
2187 case 2:
477955bd 2188 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2189 break;
2190 case 3:
477955bd 2191 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2192 break;
2193 }
7d1b0095 2194 tcg_temp_free_i32(tmp);
18c9b560
AZ
2195 gen_op_iwmmxt_movq_wRn_M0(wrd);
2196 gen_op_iwmmxt_set_mup();
2197 gen_op_iwmmxt_set_cup();
2198 break;
2199 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2200 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2201 if (((insn >> 22) & 3) == 0)
2202 return 1;
18c9b560
AZ
2203 wrd = (insn >> 12) & 0xf;
2204 rd0 = (insn >> 16) & 0xf;
2205 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2206 tmp = tcg_temp_new_i32();
18c9b560 2207 switch ((insn >> 22) & 3) {
18c9b560 2208 case 1:
da6b5335 2209 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2210 tcg_temp_free_i32(tmp);
18c9b560 2211 return 1;
da6b5335 2212 }
477955bd 2213 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2214 break;
2215 case 2:
da6b5335 2216 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2217 tcg_temp_free_i32(tmp);
18c9b560 2218 return 1;
da6b5335 2219 }
477955bd 2220 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2221 break;
2222 case 3:
da6b5335 2223 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2224 tcg_temp_free_i32(tmp);
18c9b560 2225 return 1;
da6b5335 2226 }
477955bd 2227 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2228 break;
2229 }
7d1b0095 2230 tcg_temp_free_i32(tmp);
18c9b560
AZ
2231 gen_op_iwmmxt_movq_wRn_M0(wrd);
2232 gen_op_iwmmxt_set_mup();
2233 gen_op_iwmmxt_set_cup();
2234 break;
2235 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2236 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2237 wrd = (insn >> 12) & 0xf;
2238 rd0 = (insn >> 16) & 0xf;
2239 rd1 = (insn >> 0) & 0xf;
2240 gen_op_iwmmxt_movq_M0_wRn(rd0);
2241 switch ((insn >> 22) & 3) {
2242 case 0:
2243 if (insn & (1 << 21))
2244 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2245 else
2246 gen_op_iwmmxt_minub_M0_wRn(rd1);
2247 break;
2248 case 1:
2249 if (insn & (1 << 21))
2250 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2251 else
2252 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2253 break;
2254 case 2:
2255 if (insn & (1 << 21))
2256 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2257 else
2258 gen_op_iwmmxt_minul_M0_wRn(rd1);
2259 break;
2260 case 3:
2261 return 1;
2262 }
2263 gen_op_iwmmxt_movq_wRn_M0(wrd);
2264 gen_op_iwmmxt_set_mup();
2265 break;
2266 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2267 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2268 wrd = (insn >> 12) & 0xf;
2269 rd0 = (insn >> 16) & 0xf;
2270 rd1 = (insn >> 0) & 0xf;
2271 gen_op_iwmmxt_movq_M0_wRn(rd0);
2272 switch ((insn >> 22) & 3) {
2273 case 0:
2274 if (insn & (1 << 21))
2275 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2276 else
2277 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2278 break;
2279 case 1:
2280 if (insn & (1 << 21))
2281 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2282 else
2283 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2284 break;
2285 case 2:
2286 if (insn & (1 << 21))
2287 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2288 else
2289 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2290 break;
2291 case 3:
2292 return 1;
2293 }
2294 gen_op_iwmmxt_movq_wRn_M0(wrd);
2295 gen_op_iwmmxt_set_mup();
2296 break;
2297 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2298 case 0x402: case 0x502: case 0x602: case 0x702:
2299 wrd = (insn >> 12) & 0xf;
2300 rd0 = (insn >> 16) & 0xf;
2301 rd1 = (insn >> 0) & 0xf;
2302 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2303 tmp = tcg_const_i32((insn >> 20) & 3);
2304 iwmmxt_load_reg(cpu_V1, rd1);
2305 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2306 tcg_temp_free_i32(tmp);
18c9b560
AZ
2307 gen_op_iwmmxt_movq_wRn_M0(wrd);
2308 gen_op_iwmmxt_set_mup();
2309 break;
2310 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2311 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2312 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2313 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2314 wrd = (insn >> 12) & 0xf;
2315 rd0 = (insn >> 16) & 0xf;
2316 rd1 = (insn >> 0) & 0xf;
2317 gen_op_iwmmxt_movq_M0_wRn(rd0);
2318 switch ((insn >> 20) & 0xf) {
2319 case 0x0:
2320 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2321 break;
2322 case 0x1:
2323 gen_op_iwmmxt_subub_M0_wRn(rd1);
2324 break;
2325 case 0x3:
2326 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2327 break;
2328 case 0x4:
2329 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2330 break;
2331 case 0x5:
2332 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2333 break;
2334 case 0x7:
2335 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2336 break;
2337 case 0x8:
2338 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2339 break;
2340 case 0x9:
2341 gen_op_iwmmxt_subul_M0_wRn(rd1);
2342 break;
2343 case 0xb:
2344 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2345 break;
2346 default:
2347 return 1;
2348 }
2349 gen_op_iwmmxt_movq_wRn_M0(wrd);
2350 gen_op_iwmmxt_set_mup();
2351 gen_op_iwmmxt_set_cup();
2352 break;
2353 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2354 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2355 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2356 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2357 wrd = (insn >> 12) & 0xf;
2358 rd0 = (insn >> 16) & 0xf;
2359 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2360 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2361 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2362 tcg_temp_free_i32(tmp);
18c9b560
AZ
2363 gen_op_iwmmxt_movq_wRn_M0(wrd);
2364 gen_op_iwmmxt_set_mup();
2365 gen_op_iwmmxt_set_cup();
2366 break;
2367 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2368 case 0x418: case 0x518: case 0x618: case 0x718:
2369 case 0x818: case 0x918: case 0xa18: case 0xb18:
2370 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2371 wrd = (insn >> 12) & 0xf;
2372 rd0 = (insn >> 16) & 0xf;
2373 rd1 = (insn >> 0) & 0xf;
2374 gen_op_iwmmxt_movq_M0_wRn(rd0);
2375 switch ((insn >> 20) & 0xf) {
2376 case 0x0:
2377 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2378 break;
2379 case 0x1:
2380 gen_op_iwmmxt_addub_M0_wRn(rd1);
2381 break;
2382 case 0x3:
2383 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2384 break;
2385 case 0x4:
2386 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2387 break;
2388 case 0x5:
2389 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2390 break;
2391 case 0x7:
2392 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2393 break;
2394 case 0x8:
2395 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2396 break;
2397 case 0x9:
2398 gen_op_iwmmxt_addul_M0_wRn(rd1);
2399 break;
2400 case 0xb:
2401 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2402 break;
2403 default:
2404 return 1;
2405 }
2406 gen_op_iwmmxt_movq_wRn_M0(wrd);
2407 gen_op_iwmmxt_set_mup();
2408 gen_op_iwmmxt_set_cup();
2409 break;
2410 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2411 case 0x408: case 0x508: case 0x608: case 0x708:
2412 case 0x808: case 0x908: case 0xa08: case 0xb08:
2413 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2414 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2415 return 1;
18c9b560
AZ
2416 wrd = (insn >> 12) & 0xf;
2417 rd0 = (insn >> 16) & 0xf;
2418 rd1 = (insn >> 0) & 0xf;
2419 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2420 switch ((insn >> 22) & 3) {
18c9b560
AZ
2421 case 1:
2422 if (insn & (1 << 21))
2423 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2424 else
2425 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2426 break;
2427 case 2:
2428 if (insn & (1 << 21))
2429 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2430 else
2431 gen_op_iwmmxt_packul_M0_wRn(rd1);
2432 break;
2433 case 3:
2434 if (insn & (1 << 21))
2435 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2436 else
2437 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2438 break;
2439 }
2440 gen_op_iwmmxt_movq_wRn_M0(wrd);
2441 gen_op_iwmmxt_set_mup();
2442 gen_op_iwmmxt_set_cup();
2443 break;
2444 case 0x201: case 0x203: case 0x205: case 0x207:
2445 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2446 case 0x211: case 0x213: case 0x215: case 0x217:
2447 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2448 wrd = (insn >> 5) & 0xf;
2449 rd0 = (insn >> 12) & 0xf;
2450 rd1 = (insn >> 0) & 0xf;
2451 if (rd0 == 0xf || rd1 == 0xf)
2452 return 1;
2453 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2454 tmp = load_reg(s, rd0);
2455 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2456 switch ((insn >> 16) & 0xf) {
2457 case 0x0: /* TMIA */
da6b5335 2458 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2459 break;
2460 case 0x8: /* TMIAPH */
da6b5335 2461 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2462 break;
2463 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2464 if (insn & (1 << 16))
da6b5335 2465 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2466 if (insn & (1 << 17))
da6b5335
FN
2467 tcg_gen_shri_i32(tmp2, tmp2, 16);
2468 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2469 break;
2470 default:
7d1b0095
PM
2471 tcg_temp_free_i32(tmp2);
2472 tcg_temp_free_i32(tmp);
18c9b560
AZ
2473 return 1;
2474 }
7d1b0095
PM
2475 tcg_temp_free_i32(tmp2);
2476 tcg_temp_free_i32(tmp);
18c9b560
AZ
2477 gen_op_iwmmxt_movq_wRn_M0(wrd);
2478 gen_op_iwmmxt_set_mup();
2479 break;
2480 default:
2481 return 1;
2482 }
2483
2484 return 0;
2485}
2486
a1c7273b 2487/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2488 (ie. an undefined instruction). */
0ecb72a5 2489static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
2490{
2491 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2492 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2493
2494 if ((insn & 0x0ff00f10) == 0x0e200010) {
2495 /* Multiply with Internal Accumulate Format */
2496 rd0 = (insn >> 12) & 0xf;
2497 rd1 = insn & 0xf;
2498 acc = (insn >> 5) & 7;
2499
2500 if (acc != 0)
2501 return 1;
2502
3a554c0f
FN
2503 tmp = load_reg(s, rd0);
2504 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2505 switch ((insn >> 16) & 0xf) {
2506 case 0x0: /* MIA */
3a554c0f 2507 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2508 break;
2509 case 0x8: /* MIAPH */
3a554c0f 2510 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2511 break;
2512 case 0xc: /* MIABB */
2513 case 0xd: /* MIABT */
2514 case 0xe: /* MIATB */
2515 case 0xf: /* MIATT */
18c9b560 2516 if (insn & (1 << 16))
3a554c0f 2517 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2518 if (insn & (1 << 17))
3a554c0f
FN
2519 tcg_gen_shri_i32(tmp2, tmp2, 16);
2520 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2521 break;
2522 default:
2523 return 1;
2524 }
7d1b0095
PM
2525 tcg_temp_free_i32(tmp2);
2526 tcg_temp_free_i32(tmp);
18c9b560
AZ
2527
2528 gen_op_iwmmxt_movq_wRn_M0(acc);
2529 return 0;
2530 }
2531
2532 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2533 /* Internal Accumulator Access Format */
2534 rdhi = (insn >> 16) & 0xf;
2535 rdlo = (insn >> 12) & 0xf;
2536 acc = insn & 7;
2537
2538 if (acc != 0)
2539 return 1;
2540
2541 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2542 iwmmxt_load_reg(cpu_V0, acc);
2543 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2544 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2545 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2546 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2547 } else { /* MAR */
3a554c0f
FN
2548 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2549 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2550 }
2551 return 0;
2552 }
2553
2554 return 1;
2555}
2556
9ee6e8bb
PB
2557#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2558#define VFP_SREG(insn, bigbit, smallbit) \
2559 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2560#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2561 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2562 reg = (((insn) >> (bigbit)) & 0x0f) \
2563 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2564 } else { \
2565 if (insn & (1 << (smallbit))) \
2566 return 1; \
2567 reg = ((insn) >> (bigbit)) & 0x0f; \
2568 }} while (0)
2569
2570#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2571#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2572#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2573#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2574#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2575#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2576
4373f3ce 2577/* Move between integer and VFP cores. */
39d5492a 2578static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 2579{
39d5492a 2580 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
2581 tcg_gen_mov_i32(tmp, cpu_F0s);
2582 return tmp;
2583}
2584
39d5492a 2585static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
2586{
2587 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2588 tcg_temp_free_i32(tmp);
4373f3ce
PB
2589}
2590
39d5492a 2591static void gen_neon_dup_u8(TCGv_i32 var, int shift)
ad69471c 2592{
39d5492a 2593 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2594 if (shift)
2595 tcg_gen_shri_i32(var, var, shift);
86831435 2596 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2597 tcg_gen_shli_i32(tmp, var, 8);
2598 tcg_gen_or_i32(var, var, tmp);
2599 tcg_gen_shli_i32(tmp, var, 16);
2600 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2601 tcg_temp_free_i32(tmp);
ad69471c
PB
2602}
2603
39d5492a 2604static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2605{
39d5492a 2606 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2607 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2608 tcg_gen_shli_i32(tmp, var, 16);
2609 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2610 tcg_temp_free_i32(tmp);
ad69471c
PB
2611}
2612
39d5492a 2613static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2614{
39d5492a 2615 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2616 tcg_gen_andi_i32(var, var, 0xffff0000);
2617 tcg_gen_shri_i32(tmp, var, 16);
2618 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2619 tcg_temp_free_i32(tmp);
ad69471c
PB
2620}
2621
39d5492a 2622static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
8e18cde3
PM
2623{
2624 /* Load a single Neon element and replicate into a 32 bit TCG reg */
58ab8e96 2625 TCGv_i32 tmp = tcg_temp_new_i32();
8e18cde3
PM
2626 switch (size) {
2627 case 0:
6ce2faf4 2628 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
8e18cde3
PM
2629 gen_neon_dup_u8(tmp, 0);
2630 break;
2631 case 1:
6ce2faf4 2632 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
8e18cde3
PM
2633 gen_neon_dup_low16(tmp);
2634 break;
2635 case 2:
6ce2faf4 2636 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
8e18cde3
PM
2637 break;
2638 default: /* Avoid compiler warnings. */
2639 abort();
2640 }
2641 return tmp;
2642}
2643
04731fb5
WN
2644static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2645 uint32_t dp)
2646{
2647 uint32_t cc = extract32(insn, 20, 2);
2648
2649 if (dp) {
2650 TCGv_i64 frn, frm, dest;
2651 TCGv_i64 tmp, zero, zf, nf, vf;
2652
2653 zero = tcg_const_i64(0);
2654
2655 frn = tcg_temp_new_i64();
2656 frm = tcg_temp_new_i64();
2657 dest = tcg_temp_new_i64();
2658
2659 zf = tcg_temp_new_i64();
2660 nf = tcg_temp_new_i64();
2661 vf = tcg_temp_new_i64();
2662
2663 tcg_gen_extu_i32_i64(zf, cpu_ZF);
2664 tcg_gen_ext_i32_i64(nf, cpu_NF);
2665 tcg_gen_ext_i32_i64(vf, cpu_VF);
2666
2667 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2668 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2669 switch (cc) {
2670 case 0: /* eq: Z */
2671 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
2672 frn, frm);
2673 break;
2674 case 1: /* vs: V */
2675 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
2676 frn, frm);
2677 break;
2678 case 2: /* ge: N == V -> N ^ V == 0 */
2679 tmp = tcg_temp_new_i64();
2680 tcg_gen_xor_i64(tmp, vf, nf);
2681 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2682 frn, frm);
2683 tcg_temp_free_i64(tmp);
2684 break;
2685 case 3: /* gt: !Z && N == V */
2686 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
2687 frn, frm);
2688 tmp = tcg_temp_new_i64();
2689 tcg_gen_xor_i64(tmp, vf, nf);
2690 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2691 dest, frm);
2692 tcg_temp_free_i64(tmp);
2693 break;
2694 }
2695 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2696 tcg_temp_free_i64(frn);
2697 tcg_temp_free_i64(frm);
2698 tcg_temp_free_i64(dest);
2699
2700 tcg_temp_free_i64(zf);
2701 tcg_temp_free_i64(nf);
2702 tcg_temp_free_i64(vf);
2703
2704 tcg_temp_free_i64(zero);
2705 } else {
2706 TCGv_i32 frn, frm, dest;
2707 TCGv_i32 tmp, zero;
2708
2709 zero = tcg_const_i32(0);
2710
2711 frn = tcg_temp_new_i32();
2712 frm = tcg_temp_new_i32();
2713 dest = tcg_temp_new_i32();
2714 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2715 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2716 switch (cc) {
2717 case 0: /* eq: Z */
2718 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
2719 frn, frm);
2720 break;
2721 case 1: /* vs: V */
2722 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
2723 frn, frm);
2724 break;
2725 case 2: /* ge: N == V -> N ^ V == 0 */
2726 tmp = tcg_temp_new_i32();
2727 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2728 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2729 frn, frm);
2730 tcg_temp_free_i32(tmp);
2731 break;
2732 case 3: /* gt: !Z && N == V */
2733 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
2734 frn, frm);
2735 tmp = tcg_temp_new_i32();
2736 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2737 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2738 dest, frm);
2739 tcg_temp_free_i32(tmp);
2740 break;
2741 }
2742 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2743 tcg_temp_free_i32(frn);
2744 tcg_temp_free_i32(frm);
2745 tcg_temp_free_i32(dest);
2746
2747 tcg_temp_free_i32(zero);
2748 }
2749
2750 return 0;
2751}
2752
40cfacdd
WN
2753static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
2754 uint32_t rm, uint32_t dp)
2755{
2756 uint32_t vmin = extract32(insn, 6, 1);
2757 TCGv_ptr fpst = get_fpstatus_ptr(0);
2758
2759 if (dp) {
2760 TCGv_i64 frn, frm, dest;
2761
2762 frn = tcg_temp_new_i64();
2763 frm = tcg_temp_new_i64();
2764 dest = tcg_temp_new_i64();
2765
2766 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2767 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2768 if (vmin) {
f71a2ae5 2769 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
40cfacdd 2770 } else {
f71a2ae5 2771 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
40cfacdd
WN
2772 }
2773 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2774 tcg_temp_free_i64(frn);
2775 tcg_temp_free_i64(frm);
2776 tcg_temp_free_i64(dest);
2777 } else {
2778 TCGv_i32 frn, frm, dest;
2779
2780 frn = tcg_temp_new_i32();
2781 frm = tcg_temp_new_i32();
2782 dest = tcg_temp_new_i32();
2783
2784 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2785 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2786 if (vmin) {
f71a2ae5 2787 gen_helper_vfp_minnums(dest, frn, frm, fpst);
40cfacdd 2788 } else {
f71a2ae5 2789 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
40cfacdd
WN
2790 }
2791 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2792 tcg_temp_free_i32(frn);
2793 tcg_temp_free_i32(frm);
2794 tcg_temp_free_i32(dest);
2795 }
2796
2797 tcg_temp_free_ptr(fpst);
2798 return 0;
2799}
2800
7655f39b
WN
2801static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2802 int rounding)
2803{
2804 TCGv_ptr fpst = get_fpstatus_ptr(0);
2805 TCGv_i32 tcg_rmode;
2806
2807 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2808 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2809
2810 if (dp) {
2811 TCGv_i64 tcg_op;
2812 TCGv_i64 tcg_res;
2813 tcg_op = tcg_temp_new_i64();
2814 tcg_res = tcg_temp_new_i64();
2815 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2816 gen_helper_rintd(tcg_res, tcg_op, fpst);
2817 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
2818 tcg_temp_free_i64(tcg_op);
2819 tcg_temp_free_i64(tcg_res);
2820 } else {
2821 TCGv_i32 tcg_op;
2822 TCGv_i32 tcg_res;
2823 tcg_op = tcg_temp_new_i32();
2824 tcg_res = tcg_temp_new_i32();
2825 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2826 gen_helper_rints(tcg_res, tcg_op, fpst);
2827 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
2828 tcg_temp_free_i32(tcg_op);
2829 tcg_temp_free_i32(tcg_res);
2830 }
2831
2832 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2833 tcg_temp_free_i32(tcg_rmode);
2834
2835 tcg_temp_free_ptr(fpst);
2836 return 0;
2837}
2838
c9975a83
WN
2839static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2840 int rounding)
2841{
2842 bool is_signed = extract32(insn, 7, 1);
2843 TCGv_ptr fpst = get_fpstatus_ptr(0);
2844 TCGv_i32 tcg_rmode, tcg_shift;
2845
2846 tcg_shift = tcg_const_i32(0);
2847
2848 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2849 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2850
2851 if (dp) {
2852 TCGv_i64 tcg_double, tcg_res;
2853 TCGv_i32 tcg_tmp;
2854 /* Rd is encoded as a single precision register even when the source
2855 * is double precision.
2856 */
2857 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
2858 tcg_double = tcg_temp_new_i64();
2859 tcg_res = tcg_temp_new_i64();
2860 tcg_tmp = tcg_temp_new_i32();
2861 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
2862 if (is_signed) {
2863 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
2864 } else {
2865 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
2866 }
2867 tcg_gen_trunc_i64_i32(tcg_tmp, tcg_res);
2868 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
2869 tcg_temp_free_i32(tcg_tmp);
2870 tcg_temp_free_i64(tcg_res);
2871 tcg_temp_free_i64(tcg_double);
2872 } else {
2873 TCGv_i32 tcg_single, tcg_res;
2874 tcg_single = tcg_temp_new_i32();
2875 tcg_res = tcg_temp_new_i32();
2876 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
2877 if (is_signed) {
2878 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
2879 } else {
2880 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
2881 }
2882 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
2883 tcg_temp_free_i32(tcg_res);
2884 tcg_temp_free_i32(tcg_single);
2885 }
2886
2887 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2888 tcg_temp_free_i32(tcg_rmode);
2889
2890 tcg_temp_free_i32(tcg_shift);
2891
2892 tcg_temp_free_ptr(fpst);
2893
2894 return 0;
2895}
7655f39b
WN
2896
2897/* Table for converting the most common AArch32 encoding of
2898 * rounding mode to arm_fprounding order (which matches the
2899 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
2900 */
2901static const uint8_t fp_decode_rm[] = {
2902 FPROUNDING_TIEAWAY,
2903 FPROUNDING_TIEEVEN,
2904 FPROUNDING_POSINF,
2905 FPROUNDING_NEGINF,
2906};
2907
04731fb5
WN
2908static int disas_vfp_v8_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
2909{
2910 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
2911
2912 if (!arm_feature(env, ARM_FEATURE_V8)) {
2913 return 1;
2914 }
2915
2916 if (dp) {
2917 VFP_DREG_D(rd, insn);
2918 VFP_DREG_N(rn, insn);
2919 VFP_DREG_M(rm, insn);
2920 } else {
2921 rd = VFP_SREG_D(insn);
2922 rn = VFP_SREG_N(insn);
2923 rm = VFP_SREG_M(insn);
2924 }
2925
2926 if ((insn & 0x0f800e50) == 0x0e000a00) {
2927 return handle_vsel(insn, rd, rn, rm, dp);
40cfacdd
WN
2928 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
2929 return handle_vminmaxnm(insn, rd, rn, rm, dp);
7655f39b
WN
2930 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
2931 /* VRINTA, VRINTN, VRINTP, VRINTM */
2932 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
2933 return handle_vrint(insn, rd, rm, dp, rounding);
c9975a83
WN
2934 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
2935 /* VCVTA, VCVTN, VCVTP, VCVTM */
2936 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
2937 return handle_vcvt(insn, rd, rm, dp, rounding);
04731fb5
WN
2938 }
2939 return 1;
2940}
2941
a1c7273b 2942/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 2943 (ie. an undefined instruction). */
0ecb72a5 2944static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
b7bcbe95
FB
2945{
2946 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2947 int dp, veclen;
39d5492a
PM
2948 TCGv_i32 addr;
2949 TCGv_i32 tmp;
2950 TCGv_i32 tmp2;
b7bcbe95 2951
40f137e1
PB
2952 if (!arm_feature(env, ARM_FEATURE_VFP))
2953 return 1;
2954
2c7ffc41
PM
2955 /* FIXME: this access check should not take precedence over UNDEF
2956 * for invalid encodings; we will generate incorrect syndrome information
2957 * for attempts to execute invalid vfp/neon encodings with FP disabled.
2958 */
2959 if (!s->cpacr_fpen) {
2960 gen_exception_insn(s, 4, EXCP_UDEF,
2961 syn_fp_access_trap(1, 0xe, s->thumb));
2962 return 0;
2963 }
2964
5df8bac1 2965 if (!s->vfp_enabled) {
9ee6e8bb 2966 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2967 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2968 return 1;
2969 rn = (insn >> 16) & 0xf;
a50c0f51
PM
2970 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
2971 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
40f137e1 2972 return 1;
a50c0f51 2973 }
40f137e1 2974 }
6a57f3eb
WN
2975
2976 if (extract32(insn, 28, 4) == 0xf) {
2977 /* Encodings with T=1 (Thumb) or unconditional (ARM):
2978 * only used in v8 and above.
2979 */
04731fb5 2980 return disas_vfp_v8_insn(env, s, insn);
6a57f3eb
WN
2981 }
2982
b7bcbe95
FB
2983 dp = ((insn & 0xf00) == 0xb00);
2984 switch ((insn >> 24) & 0xf) {
2985 case 0xe:
2986 if (insn & (1 << 4)) {
2987 /* single register transfer */
b7bcbe95
FB
2988 rd = (insn >> 12) & 0xf;
2989 if (dp) {
9ee6e8bb
PB
2990 int size;
2991 int pass;
2992
2993 VFP_DREG_N(rn, insn);
2994 if (insn & 0xf)
b7bcbe95 2995 return 1;
9ee6e8bb
PB
2996 if (insn & 0x00c00060
2997 && !arm_feature(env, ARM_FEATURE_NEON))
2998 return 1;
2999
3000 pass = (insn >> 21) & 1;
3001 if (insn & (1 << 22)) {
3002 size = 0;
3003 offset = ((insn >> 5) & 3) * 8;
3004 } else if (insn & (1 << 5)) {
3005 size = 1;
3006 offset = (insn & (1 << 6)) ? 16 : 0;
3007 } else {
3008 size = 2;
3009 offset = 0;
3010 }
18c9b560 3011 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3012 /* vfp->arm */
ad69471c 3013 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
3014 switch (size) {
3015 case 0:
9ee6e8bb 3016 if (offset)
ad69471c 3017 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 3018 if (insn & (1 << 23))
ad69471c 3019 gen_uxtb(tmp);
9ee6e8bb 3020 else
ad69471c 3021 gen_sxtb(tmp);
9ee6e8bb
PB
3022 break;
3023 case 1:
9ee6e8bb
PB
3024 if (insn & (1 << 23)) {
3025 if (offset) {
ad69471c 3026 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 3027 } else {
ad69471c 3028 gen_uxth(tmp);
9ee6e8bb
PB
3029 }
3030 } else {
3031 if (offset) {
ad69471c 3032 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 3033 } else {
ad69471c 3034 gen_sxth(tmp);
9ee6e8bb
PB
3035 }
3036 }
3037 break;
3038 case 2:
9ee6e8bb
PB
3039 break;
3040 }
ad69471c 3041 store_reg(s, rd, tmp);
b7bcbe95
FB
3042 } else {
3043 /* arm->vfp */
ad69471c 3044 tmp = load_reg(s, rd);
9ee6e8bb
PB
3045 if (insn & (1 << 23)) {
3046 /* VDUP */
3047 if (size == 0) {
ad69471c 3048 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 3049 } else if (size == 1) {
ad69471c 3050 gen_neon_dup_low16(tmp);
9ee6e8bb 3051 }
cbbccffc 3052 for (n = 0; n <= pass * 2; n++) {
7d1b0095 3053 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
3054 tcg_gen_mov_i32(tmp2, tmp);
3055 neon_store_reg(rn, n, tmp2);
3056 }
3057 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
3058 } else {
3059 /* VMOV */
3060 switch (size) {
3061 case 0:
ad69471c 3062 tmp2 = neon_load_reg(rn, pass);
d593c48e 3063 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 3064 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3065 break;
3066 case 1:
ad69471c 3067 tmp2 = neon_load_reg(rn, pass);
d593c48e 3068 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 3069 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3070 break;
3071 case 2:
9ee6e8bb
PB
3072 break;
3073 }
ad69471c 3074 neon_store_reg(rn, pass, tmp);
9ee6e8bb 3075 }
b7bcbe95 3076 }
9ee6e8bb
PB
3077 } else { /* !dp */
3078 if ((insn & 0x6f) != 0x00)
3079 return 1;
3080 rn = VFP_SREG_N(insn);
18c9b560 3081 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3082 /* vfp->arm */
3083 if (insn & (1 << 21)) {
3084 /* system register */
40f137e1 3085 rn >>= 1;
9ee6e8bb 3086
b7bcbe95 3087 switch (rn) {
40f137e1 3088 case ARM_VFP_FPSID:
4373f3ce 3089 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
3090 VFP3 restricts all id registers to privileged
3091 accesses. */
3092 if (IS_USER(s)
3093 && arm_feature(env, ARM_FEATURE_VFP3))
3094 return 1;
4373f3ce 3095 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3096 break;
40f137e1 3097 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3098 if (IS_USER(s))
3099 return 1;
4373f3ce 3100 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3101 break;
40f137e1
PB
3102 case ARM_VFP_FPINST:
3103 case ARM_VFP_FPINST2:
9ee6e8bb
PB
3104 /* Not present in VFP3. */
3105 if (IS_USER(s)
3106 || arm_feature(env, ARM_FEATURE_VFP3))
3107 return 1;
4373f3ce 3108 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 3109 break;
40f137e1 3110 case ARM_VFP_FPSCR:
601d70b9 3111 if (rd == 15) {
4373f3ce
PB
3112 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3113 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3114 } else {
7d1b0095 3115 tmp = tcg_temp_new_i32();
4373f3ce
PB
3116 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3117 }
b7bcbe95 3118 break;
a50c0f51
PM
3119 case ARM_VFP_MVFR2:
3120 if (!arm_feature(env, ARM_FEATURE_V8)) {
3121 return 1;
3122 }
3123 /* fall through */
9ee6e8bb
PB
3124 case ARM_VFP_MVFR0:
3125 case ARM_VFP_MVFR1:
3126 if (IS_USER(s)
06ed5d66 3127 || !arm_feature(env, ARM_FEATURE_MVFR))
9ee6e8bb 3128 return 1;
4373f3ce 3129 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3130 break;
b7bcbe95
FB
3131 default:
3132 return 1;
3133 }
3134 } else {
3135 gen_mov_F0_vreg(0, rn);
4373f3ce 3136 tmp = gen_vfp_mrs();
b7bcbe95
FB
3137 }
3138 if (rd == 15) {
b5ff1b31 3139 /* Set the 4 flag bits in the CPSR. */
4373f3ce 3140 gen_set_nzcv(tmp);
7d1b0095 3141 tcg_temp_free_i32(tmp);
4373f3ce
PB
3142 } else {
3143 store_reg(s, rd, tmp);
3144 }
b7bcbe95
FB
3145 } else {
3146 /* arm->vfp */
b7bcbe95 3147 if (insn & (1 << 21)) {
40f137e1 3148 rn >>= 1;
b7bcbe95
FB
3149 /* system register */
3150 switch (rn) {
40f137e1 3151 case ARM_VFP_FPSID:
9ee6e8bb
PB
3152 case ARM_VFP_MVFR0:
3153 case ARM_VFP_MVFR1:
b7bcbe95
FB
3154 /* Writes are ignored. */
3155 break;
40f137e1 3156 case ARM_VFP_FPSCR:
e4c1cfa5 3157 tmp = load_reg(s, rd);
4373f3ce 3158 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 3159 tcg_temp_free_i32(tmp);
b5ff1b31 3160 gen_lookup_tb(s);
b7bcbe95 3161 break;
40f137e1 3162 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3163 if (IS_USER(s))
3164 return 1;
71b3c3de
JR
3165 /* TODO: VFP subarchitecture support.
3166 * For now, keep the EN bit only */
e4c1cfa5 3167 tmp = load_reg(s, rd);
71b3c3de 3168 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 3169 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
3170 gen_lookup_tb(s);
3171 break;
3172 case ARM_VFP_FPINST:
3173 case ARM_VFP_FPINST2:
e4c1cfa5 3174 tmp = load_reg(s, rd);
4373f3ce 3175 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 3176 break;
b7bcbe95
FB
3177 default:
3178 return 1;
3179 }
3180 } else {
e4c1cfa5 3181 tmp = load_reg(s, rd);
4373f3ce 3182 gen_vfp_msr(tmp);
b7bcbe95
FB
3183 gen_mov_vreg_F0(0, rn);
3184 }
3185 }
3186 }
3187 } else {
3188 /* data processing */
3189 /* The opcode is in bits 23, 21, 20 and 6. */
3190 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3191 if (dp) {
3192 if (op == 15) {
3193 /* rn is opcode */
3194 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3195 } else {
3196 /* rn is register number */
9ee6e8bb 3197 VFP_DREG_N(rn, insn);
b7bcbe95
FB
3198 }
3199
239c20c7
WN
3200 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3201 ((rn & 0x1e) == 0x6))) {
3202 /* Integer or single/half precision destination. */
9ee6e8bb 3203 rd = VFP_SREG_D(insn);
b7bcbe95 3204 } else {
9ee6e8bb 3205 VFP_DREG_D(rd, insn);
b7bcbe95 3206 }
04595bf6 3207 if (op == 15 &&
239c20c7
WN
3208 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3209 ((rn & 0x1e) == 0x4))) {
3210 /* VCVT from int or half precision is always from S reg
3211 * regardless of dp bit. VCVT with immediate frac_bits
3212 * has same format as SREG_M.
04595bf6
PM
3213 */
3214 rm = VFP_SREG_M(insn);
b7bcbe95 3215 } else {
9ee6e8bb 3216 VFP_DREG_M(rm, insn);
b7bcbe95
FB
3217 }
3218 } else {
9ee6e8bb 3219 rn = VFP_SREG_N(insn);
b7bcbe95
FB
3220 if (op == 15 && rn == 15) {
3221 /* Double precision destination. */
9ee6e8bb
PB
3222 VFP_DREG_D(rd, insn);
3223 } else {
3224 rd = VFP_SREG_D(insn);
3225 }
04595bf6
PM
3226 /* NB that we implicitly rely on the encoding for the frac_bits
3227 * in VCVT of fixed to float being the same as that of an SREG_M
3228 */
9ee6e8bb 3229 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3230 }
3231
69d1fc22 3232 veclen = s->vec_len;
b7bcbe95
FB
3233 if (op == 15 && rn > 3)
3234 veclen = 0;
3235
3236 /* Shut up compiler warnings. */
3237 delta_m = 0;
3238 delta_d = 0;
3239 bank_mask = 0;
3b46e624 3240
b7bcbe95
FB
3241 if (veclen > 0) {
3242 if (dp)
3243 bank_mask = 0xc;
3244 else
3245 bank_mask = 0x18;
3246
3247 /* Figure out what type of vector operation this is. */
3248 if ((rd & bank_mask) == 0) {
3249 /* scalar */
3250 veclen = 0;
3251 } else {
3252 if (dp)
69d1fc22 3253 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3254 else
69d1fc22 3255 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3256
3257 if ((rm & bank_mask) == 0) {
3258 /* mixed scalar/vector */
3259 delta_m = 0;
3260 } else {
3261 /* vector */
3262 delta_m = delta_d;
3263 }
3264 }
3265 }
3266
3267 /* Load the initial operands. */
3268 if (op == 15) {
3269 switch (rn) {
3270 case 16:
3271 case 17:
3272 /* Integer source */
3273 gen_mov_F0_vreg(0, rm);
3274 break;
3275 case 8:
3276 case 9:
3277 /* Compare */
3278 gen_mov_F0_vreg(dp, rd);
3279 gen_mov_F1_vreg(dp, rm);
3280 break;
3281 case 10:
3282 case 11:
3283 /* Compare with zero */
3284 gen_mov_F0_vreg(dp, rd);
3285 gen_vfp_F1_ld0(dp);
3286 break;
9ee6e8bb
PB
3287 case 20:
3288 case 21:
3289 case 22:
3290 case 23:
644ad806
PB
3291 case 28:
3292 case 29:
3293 case 30:
3294 case 31:
9ee6e8bb
PB
3295 /* Source and destination the same. */
3296 gen_mov_F0_vreg(dp, rd);
3297 break;
6e0c0ed1
PM
3298 case 4:
3299 case 5:
3300 case 6:
3301 case 7:
239c20c7
WN
3302 /* VCVTB, VCVTT: only present with the halfprec extension
3303 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3304 * (we choose to UNDEF)
6e0c0ed1 3305 */
239c20c7
WN
3306 if ((dp && !arm_feature(env, ARM_FEATURE_V8)) ||
3307 !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
6e0c0ed1
PM
3308 return 1;
3309 }
239c20c7
WN
3310 if (!extract32(rn, 1, 1)) {
3311 /* Half precision source. */
3312 gen_mov_F0_vreg(0, rm);
3313 break;
3314 }
6e0c0ed1 3315 /* Otherwise fall through */
b7bcbe95
FB
3316 default:
3317 /* One source operand. */
3318 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3319 break;
b7bcbe95
FB
3320 }
3321 } else {
3322 /* Two source operands. */
3323 gen_mov_F0_vreg(dp, rn);
3324 gen_mov_F1_vreg(dp, rm);
3325 }
3326
3327 for (;;) {
3328 /* Perform the calculation. */
3329 switch (op) {
605a6aed
PM
3330 case 0: /* VMLA: fd + (fn * fm) */
3331 /* Note that order of inputs to the add matters for NaNs */
3332 gen_vfp_F1_mul(dp);
3333 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3334 gen_vfp_add(dp);
3335 break;
605a6aed 3336 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3337 gen_vfp_mul(dp);
605a6aed
PM
3338 gen_vfp_F1_neg(dp);
3339 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3340 gen_vfp_add(dp);
3341 break;
605a6aed
PM
3342 case 2: /* VNMLS: -fd + (fn * fm) */
3343 /* Note that it isn't valid to replace (-A + B) with (B - A)
3344 * or similar plausible looking simplifications
3345 * because this will give wrong results for NaNs.
3346 */
3347 gen_vfp_F1_mul(dp);
3348 gen_mov_F0_vreg(dp, rd);
3349 gen_vfp_neg(dp);
3350 gen_vfp_add(dp);
b7bcbe95 3351 break;
605a6aed 3352 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3353 gen_vfp_mul(dp);
605a6aed
PM
3354 gen_vfp_F1_neg(dp);
3355 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3356 gen_vfp_neg(dp);
605a6aed 3357 gen_vfp_add(dp);
b7bcbe95
FB
3358 break;
3359 case 4: /* mul: fn * fm */
3360 gen_vfp_mul(dp);
3361 break;
3362 case 5: /* nmul: -(fn * fm) */
3363 gen_vfp_mul(dp);
3364 gen_vfp_neg(dp);
3365 break;
3366 case 6: /* add: fn + fm */
3367 gen_vfp_add(dp);
3368 break;
3369 case 7: /* sub: fn - fm */
3370 gen_vfp_sub(dp);
3371 break;
3372 case 8: /* div: fn / fm */
3373 gen_vfp_div(dp);
3374 break;
da97f52c
PM
3375 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3376 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3377 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3378 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3379 /* These are fused multiply-add, and must be done as one
3380 * floating point operation with no rounding between the
3381 * multiplication and addition steps.
3382 * NB that doing the negations here as separate steps is
3383 * correct : an input NaN should come out with its sign bit
3384 * flipped if it is a negated-input.
3385 */
3386 if (!arm_feature(env, ARM_FEATURE_VFP4)) {
3387 return 1;
3388 }
3389 if (dp) {
3390 TCGv_ptr fpst;
3391 TCGv_i64 frd;
3392 if (op & 1) {
3393 /* VFNMS, VFMS */
3394 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3395 }
3396 frd = tcg_temp_new_i64();
3397 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3398 if (op & 2) {
3399 /* VFNMA, VFNMS */
3400 gen_helper_vfp_negd(frd, frd);
3401 }
3402 fpst = get_fpstatus_ptr(0);
3403 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3404 cpu_F1d, frd, fpst);
3405 tcg_temp_free_ptr(fpst);
3406 tcg_temp_free_i64(frd);
3407 } else {
3408 TCGv_ptr fpst;
3409 TCGv_i32 frd;
3410 if (op & 1) {
3411 /* VFNMS, VFMS */
3412 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3413 }
3414 frd = tcg_temp_new_i32();
3415 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3416 if (op & 2) {
3417 gen_helper_vfp_negs(frd, frd);
3418 }
3419 fpst = get_fpstatus_ptr(0);
3420 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3421 cpu_F1s, frd, fpst);
3422 tcg_temp_free_ptr(fpst);
3423 tcg_temp_free_i32(frd);
3424 }
3425 break;
9ee6e8bb
PB
3426 case 14: /* fconst */
3427 if (!arm_feature(env, ARM_FEATURE_VFP3))
3428 return 1;
3429
3430 n = (insn << 12) & 0x80000000;
3431 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3432 if (dp) {
3433 if (i & 0x40)
3434 i |= 0x3f80;
3435 else
3436 i |= 0x4000;
3437 n |= i << 16;
4373f3ce 3438 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3439 } else {
3440 if (i & 0x40)
3441 i |= 0x780;
3442 else
3443 i |= 0x800;
3444 n |= i << 19;
5b340b51 3445 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3446 }
9ee6e8bb 3447 break;
b7bcbe95
FB
3448 case 15: /* extension space */
3449 switch (rn) {
3450 case 0: /* cpy */
3451 /* no-op */
3452 break;
3453 case 1: /* abs */
3454 gen_vfp_abs(dp);
3455 break;
3456 case 2: /* neg */
3457 gen_vfp_neg(dp);
3458 break;
3459 case 3: /* sqrt */
3460 gen_vfp_sqrt(dp);
3461 break;
239c20c7 3462 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
60011498
PB
3463 tmp = gen_vfp_mrs();
3464 tcg_gen_ext16u_i32(tmp, tmp);
239c20c7
WN
3465 if (dp) {
3466 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3467 cpu_env);
3468 } else {
3469 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3470 cpu_env);
3471 }
7d1b0095 3472 tcg_temp_free_i32(tmp);
60011498 3473 break;
239c20c7 3474 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
60011498
PB
3475 tmp = gen_vfp_mrs();
3476 tcg_gen_shri_i32(tmp, tmp, 16);
239c20c7
WN
3477 if (dp) {
3478 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3479 cpu_env);
3480 } else {
3481 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3482 cpu_env);
3483 }
7d1b0095 3484 tcg_temp_free_i32(tmp);
60011498 3485 break;
239c20c7 3486 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
7d1b0095 3487 tmp = tcg_temp_new_i32();
239c20c7
WN
3488 if (dp) {
3489 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3490 cpu_env);
3491 } else {
3492 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3493 cpu_env);
3494 }
60011498
PB
3495 gen_mov_F0_vreg(0, rd);
3496 tmp2 = gen_vfp_mrs();
3497 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3498 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3499 tcg_temp_free_i32(tmp2);
60011498
PB
3500 gen_vfp_msr(tmp);
3501 break;
239c20c7 3502 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
7d1b0095 3503 tmp = tcg_temp_new_i32();
239c20c7
WN
3504 if (dp) {
3505 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3506 cpu_env);
3507 } else {
3508 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3509 cpu_env);
3510 }
60011498
PB
3511 tcg_gen_shli_i32(tmp, tmp, 16);
3512 gen_mov_F0_vreg(0, rd);
3513 tmp2 = gen_vfp_mrs();
3514 tcg_gen_ext16u_i32(tmp2, tmp2);
3515 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3516 tcg_temp_free_i32(tmp2);
60011498
PB
3517 gen_vfp_msr(tmp);
3518 break;
b7bcbe95
FB
3519 case 8: /* cmp */
3520 gen_vfp_cmp(dp);
3521 break;
3522 case 9: /* cmpe */
3523 gen_vfp_cmpe(dp);
3524 break;
3525 case 10: /* cmpz */
3526 gen_vfp_cmp(dp);
3527 break;
3528 case 11: /* cmpez */
3529 gen_vfp_F1_ld0(dp);
3530 gen_vfp_cmpe(dp);
3531 break;
664c6733
WN
3532 case 12: /* vrintr */
3533 {
3534 TCGv_ptr fpst = get_fpstatus_ptr(0);
3535 if (dp) {
3536 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3537 } else {
3538 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3539 }
3540 tcg_temp_free_ptr(fpst);
3541 break;
3542 }
a290c62a
WN
3543 case 13: /* vrintz */
3544 {
3545 TCGv_ptr fpst = get_fpstatus_ptr(0);
3546 TCGv_i32 tcg_rmode;
3547 tcg_rmode = tcg_const_i32(float_round_to_zero);
3548 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3549 if (dp) {
3550 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3551 } else {
3552 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3553 }
3554 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3555 tcg_temp_free_i32(tcg_rmode);
3556 tcg_temp_free_ptr(fpst);
3557 break;
3558 }
4e82bc01
WN
3559 case 14: /* vrintx */
3560 {
3561 TCGv_ptr fpst = get_fpstatus_ptr(0);
3562 if (dp) {
3563 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3564 } else {
3565 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3566 }
3567 tcg_temp_free_ptr(fpst);
3568 break;
3569 }
b7bcbe95
FB
3570 case 15: /* single<->double conversion */
3571 if (dp)
4373f3ce 3572 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3573 else
4373f3ce 3574 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3575 break;
3576 case 16: /* fuito */
5500b06c 3577 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3578 break;
3579 case 17: /* fsito */
5500b06c 3580 gen_vfp_sito(dp, 0);
b7bcbe95 3581 break;
9ee6e8bb
PB
3582 case 20: /* fshto */
3583 if (!arm_feature(env, ARM_FEATURE_VFP3))
3584 return 1;
5500b06c 3585 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3586 break;
3587 case 21: /* fslto */
3588 if (!arm_feature(env, ARM_FEATURE_VFP3))
3589 return 1;
5500b06c 3590 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3591 break;
3592 case 22: /* fuhto */
3593 if (!arm_feature(env, ARM_FEATURE_VFP3))
3594 return 1;
5500b06c 3595 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3596 break;
3597 case 23: /* fulto */
3598 if (!arm_feature(env, ARM_FEATURE_VFP3))
3599 return 1;
5500b06c 3600 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3601 break;
b7bcbe95 3602 case 24: /* ftoui */
5500b06c 3603 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3604 break;
3605 case 25: /* ftouiz */
5500b06c 3606 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3607 break;
3608 case 26: /* ftosi */
5500b06c 3609 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3610 break;
3611 case 27: /* ftosiz */
5500b06c 3612 gen_vfp_tosiz(dp, 0);
b7bcbe95 3613 break;
9ee6e8bb
PB
3614 case 28: /* ftosh */
3615 if (!arm_feature(env, ARM_FEATURE_VFP3))
3616 return 1;
5500b06c 3617 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3618 break;
3619 case 29: /* ftosl */
3620 if (!arm_feature(env, ARM_FEATURE_VFP3))
3621 return 1;
5500b06c 3622 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3623 break;
3624 case 30: /* ftouh */
3625 if (!arm_feature(env, ARM_FEATURE_VFP3))
3626 return 1;
5500b06c 3627 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3628 break;
3629 case 31: /* ftoul */
3630 if (!arm_feature(env, ARM_FEATURE_VFP3))
3631 return 1;
5500b06c 3632 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3633 break;
b7bcbe95 3634 default: /* undefined */
b7bcbe95
FB
3635 return 1;
3636 }
3637 break;
3638 default: /* undefined */
b7bcbe95
FB
3639 return 1;
3640 }
3641
3642 /* Write back the result. */
239c20c7
WN
3643 if (op == 15 && (rn >= 8 && rn <= 11)) {
3644 /* Comparison, do nothing. */
3645 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
3646 (rn & 0x1e) == 0x6)) {
3647 /* VCVT double to int: always integer result.
3648 * VCVT double to half precision is always a single
3649 * precision result.
3650 */
b7bcbe95 3651 gen_mov_vreg_F0(0, rd);
239c20c7 3652 } else if (op == 15 && rn == 15) {
b7bcbe95
FB
3653 /* conversion */
3654 gen_mov_vreg_F0(!dp, rd);
239c20c7 3655 } else {
b7bcbe95 3656 gen_mov_vreg_F0(dp, rd);
239c20c7 3657 }
b7bcbe95
FB
3658
3659 /* break out of the loop if we have finished */
3660 if (veclen == 0)
3661 break;
3662
3663 if (op == 15 && delta_m == 0) {
3664 /* single source one-many */
3665 while (veclen--) {
3666 rd = ((rd + delta_d) & (bank_mask - 1))
3667 | (rd & bank_mask);
3668 gen_mov_vreg_F0(dp, rd);
3669 }
3670 break;
3671 }
3672 /* Setup the next operands. */
3673 veclen--;
3674 rd = ((rd + delta_d) & (bank_mask - 1))
3675 | (rd & bank_mask);
3676
3677 if (op == 15) {
3678 /* One source operand. */
3679 rm = ((rm + delta_m) & (bank_mask - 1))
3680 | (rm & bank_mask);
3681 gen_mov_F0_vreg(dp, rm);
3682 } else {
3683 /* Two source operands. */
3684 rn = ((rn + delta_d) & (bank_mask - 1))
3685 | (rn & bank_mask);
3686 gen_mov_F0_vreg(dp, rn);
3687 if (delta_m) {
3688 rm = ((rm + delta_m) & (bank_mask - 1))
3689 | (rm & bank_mask);
3690 gen_mov_F1_vreg(dp, rm);
3691 }
3692 }
3693 }
3694 }
3695 break;
3696 case 0xc:
3697 case 0xd:
8387da81 3698 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3699 /* two-register transfer */
3700 rn = (insn >> 16) & 0xf;
3701 rd = (insn >> 12) & 0xf;
3702 if (dp) {
9ee6e8bb
PB
3703 VFP_DREG_M(rm, insn);
3704 } else {
3705 rm = VFP_SREG_M(insn);
3706 }
b7bcbe95 3707
18c9b560 3708 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3709 /* vfp->arm */
3710 if (dp) {
4373f3ce
PB
3711 gen_mov_F0_vreg(0, rm * 2);
3712 tmp = gen_vfp_mrs();
3713 store_reg(s, rd, tmp);
3714 gen_mov_F0_vreg(0, rm * 2 + 1);
3715 tmp = gen_vfp_mrs();
3716 store_reg(s, rn, tmp);
b7bcbe95
FB
3717 } else {
3718 gen_mov_F0_vreg(0, rm);
4373f3ce 3719 tmp = gen_vfp_mrs();
8387da81 3720 store_reg(s, rd, tmp);
b7bcbe95 3721 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3722 tmp = gen_vfp_mrs();
8387da81 3723 store_reg(s, rn, tmp);
b7bcbe95
FB
3724 }
3725 } else {
3726 /* arm->vfp */
3727 if (dp) {
4373f3ce
PB
3728 tmp = load_reg(s, rd);
3729 gen_vfp_msr(tmp);
3730 gen_mov_vreg_F0(0, rm * 2);
3731 tmp = load_reg(s, rn);
3732 gen_vfp_msr(tmp);
3733 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3734 } else {
8387da81 3735 tmp = load_reg(s, rd);
4373f3ce 3736 gen_vfp_msr(tmp);
b7bcbe95 3737 gen_mov_vreg_F0(0, rm);
8387da81 3738 tmp = load_reg(s, rn);
4373f3ce 3739 gen_vfp_msr(tmp);
b7bcbe95
FB
3740 gen_mov_vreg_F0(0, rm + 1);
3741 }
3742 }
3743 } else {
3744 /* Load/store */
3745 rn = (insn >> 16) & 0xf;
3746 if (dp)
9ee6e8bb 3747 VFP_DREG_D(rd, insn);
b7bcbe95 3748 else
9ee6e8bb 3749 rd = VFP_SREG_D(insn);
b7bcbe95
FB
3750 if ((insn & 0x01200000) == 0x01000000) {
3751 /* Single load/store */
3752 offset = (insn & 0xff) << 2;
3753 if ((insn & (1 << 23)) == 0)
3754 offset = -offset;
934814f1
PM
3755 if (s->thumb && rn == 15) {
3756 /* This is actually UNPREDICTABLE */
3757 addr = tcg_temp_new_i32();
3758 tcg_gen_movi_i32(addr, s->pc & ~2);
3759 } else {
3760 addr = load_reg(s, rn);
3761 }
312eea9f 3762 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3763 if (insn & (1 << 20)) {
312eea9f 3764 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3765 gen_mov_vreg_F0(dp, rd);
3766 } else {
3767 gen_mov_F0_vreg(dp, rd);
312eea9f 3768 gen_vfp_st(s, dp, addr);
b7bcbe95 3769 }
7d1b0095 3770 tcg_temp_free_i32(addr);
b7bcbe95
FB
3771 } else {
3772 /* load/store multiple */
934814f1 3773 int w = insn & (1 << 21);
b7bcbe95
FB
3774 if (dp)
3775 n = (insn >> 1) & 0x7f;
3776 else
3777 n = insn & 0xff;
3778
934814f1
PM
3779 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3780 /* P == U , W == 1 => UNDEF */
3781 return 1;
3782 }
3783 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3784 /* UNPREDICTABLE cases for bad immediates: we choose to
3785 * UNDEF to avoid generating huge numbers of TCG ops
3786 */
3787 return 1;
3788 }
3789 if (rn == 15 && w) {
3790 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3791 return 1;
3792 }
3793
3794 if (s->thumb && rn == 15) {
3795 /* This is actually UNPREDICTABLE */
3796 addr = tcg_temp_new_i32();
3797 tcg_gen_movi_i32(addr, s->pc & ~2);
3798 } else {
3799 addr = load_reg(s, rn);
3800 }
b7bcbe95 3801 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3802 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3803
3804 if (dp)
3805 offset = 8;
3806 else
3807 offset = 4;
3808 for (i = 0; i < n; i++) {
18c9b560 3809 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3810 /* load */
312eea9f 3811 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3812 gen_mov_vreg_F0(dp, rd + i);
3813 } else {
3814 /* store */
3815 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3816 gen_vfp_st(s, dp, addr);
b7bcbe95 3817 }
312eea9f 3818 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3819 }
934814f1 3820 if (w) {
b7bcbe95
FB
3821 /* writeback */
3822 if (insn & (1 << 24))
3823 offset = -offset * n;
3824 else if (dp && (insn & 1))
3825 offset = 4;
3826 else
3827 offset = 0;
3828
3829 if (offset != 0)
312eea9f
FN
3830 tcg_gen_addi_i32(addr, addr, offset);
3831 store_reg(s, rn, addr);
3832 } else {
7d1b0095 3833 tcg_temp_free_i32(addr);
b7bcbe95
FB
3834 }
3835 }
3836 }
3837 break;
3838 default:
3839 /* Should never happen. */
3840 return 1;
3841 }
3842 return 0;
3843}
3844
0a2461fa 3845static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
c53be334 3846{
6e256c93
FB
3847 TranslationBlock *tb;
3848
3849 tb = s->tb;
3850 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3851 tcg_gen_goto_tb(n);
eaed129d 3852 gen_set_pc_im(s, dest);
8cfd0495 3853 tcg_gen_exit_tb((uintptr_t)tb + n);
6e256c93 3854 } else {
eaed129d 3855 gen_set_pc_im(s, dest);
57fec1fe 3856 tcg_gen_exit_tb(0);
6e256c93 3857 }
c53be334
FB
3858}
3859
8aaca4c0
FB
3860static inline void gen_jmp (DisasContext *s, uint32_t dest)
3861{
551bd27f 3862 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3863 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3864 if (s->thumb)
d9ba4830
PB
3865 dest |= 1;
3866 gen_bx_im(s, dest);
8aaca4c0 3867 } else {
6e256c93 3868 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3869 s->is_jmp = DISAS_TB_JUMP;
3870 }
3871}
3872
39d5492a 3873static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 3874{
ee097184 3875 if (x)
d9ba4830 3876 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3877 else
d9ba4830 3878 gen_sxth(t0);
ee097184 3879 if (y)
d9ba4830 3880 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3881 else
d9ba4830
PB
3882 gen_sxth(t1);
3883 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3884}
3885
3886/* Return the mask of PSR bits set by a MSR instruction. */
0ecb72a5 3887static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3888 uint32_t mask;
3889
3890 mask = 0;
3891 if (flags & (1 << 0))
3892 mask |= 0xff;
3893 if (flags & (1 << 1))
3894 mask |= 0xff00;
3895 if (flags & (1 << 2))
3896 mask |= 0xff0000;
3897 if (flags & (1 << 3))
3898 mask |= 0xff000000;
9ee6e8bb 3899
2ae23e75 3900 /* Mask out undefined bits. */
9ee6e8bb 3901 mask &= ~CPSR_RESERVED;
be5e7a76
DES
3902 if (!arm_feature(env, ARM_FEATURE_V4T))
3903 mask &= ~CPSR_T;
3904 if (!arm_feature(env, ARM_FEATURE_V5))
3905 mask &= ~CPSR_Q; /* V5TE in reality*/
9ee6e8bb 3906 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3907 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3908 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3909 mask &= ~CPSR_IT;
9ee6e8bb 3910 /* Mask out execution state bits. */
2ae23e75 3911 if (!spsr)
e160c51c 3912 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3913 /* Mask out privileged bits. */
3914 if (IS_USER(s))
9ee6e8bb 3915 mask &= CPSR_USER;
b5ff1b31
FB
3916 return mask;
3917}
3918
2fbac54b 3919/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 3920static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 3921{
39d5492a 3922 TCGv_i32 tmp;
b5ff1b31
FB
3923 if (spsr) {
3924 /* ??? This is also undefined in system mode. */
3925 if (IS_USER(s))
3926 return 1;
d9ba4830
PB
3927
3928 tmp = load_cpu_field(spsr);
3929 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3930 tcg_gen_andi_i32(t0, t0, mask);
3931 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3932 store_cpu_field(tmp, spsr);
b5ff1b31 3933 } else {
2fbac54b 3934 gen_set_cpsr(t0, mask);
b5ff1b31 3935 }
7d1b0095 3936 tcg_temp_free_i32(t0);
b5ff1b31
FB
3937 gen_lookup_tb(s);
3938 return 0;
3939}
3940
2fbac54b
FN
3941/* Returns nonzero if access to the PSR is not permitted. */
3942static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3943{
39d5492a 3944 TCGv_i32 tmp;
7d1b0095 3945 tmp = tcg_temp_new_i32();
2fbac54b
FN
3946 tcg_gen_movi_i32(tmp, val);
3947 return gen_set_psr(s, mask, spsr, tmp);
3948}
3949
e9bb4aa9 3950/* Generate an old-style exception return. Marks pc as dead. */
39d5492a 3951static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
b5ff1b31 3952{
39d5492a 3953 TCGv_i32 tmp;
e9bb4aa9 3954 store_reg(s, 15, pc);
d9ba4830
PB
3955 tmp = load_cpu_field(spsr);
3956 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 3957 tcg_temp_free_i32(tmp);
b5ff1b31
FB
3958 s->is_jmp = DISAS_UPDATE;
3959}
3960
b0109805 3961/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 3962static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 3963{
b0109805 3964 gen_set_cpsr(cpsr, 0xffffffff);
7d1b0095 3965 tcg_temp_free_i32(cpsr);
b0109805 3966 store_reg(s, 15, pc);
9ee6e8bb
PB
3967 s->is_jmp = DISAS_UPDATE;
3968}
3b46e624 3969
9ee6e8bb
PB
3970static void gen_nop_hint(DisasContext *s, int val)
3971{
3972 switch (val) {
3973 case 3: /* wfi */
eaed129d 3974 gen_set_pc_im(s, s->pc);
9ee6e8bb
PB
3975 s->is_jmp = DISAS_WFI;
3976 break;
3977 case 2: /* wfe */
72c1d3af
PM
3978 gen_set_pc_im(s, s->pc);
3979 s->is_jmp = DISAS_WFE;
3980 break;
9ee6e8bb 3981 case 4: /* sev */
12b10571
MR
3982 case 5: /* sevl */
3983 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
3984 default: /* nop */
3985 break;
3986 }
3987}
99c475ab 3988
ad69471c 3989#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3990
39d5492a 3991static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
3992{
3993 switch (size) {
dd8fbd78
FN
3994 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3995 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3996 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 3997 default: abort();
9ee6e8bb 3998 }
9ee6e8bb
PB
3999}
4000
39d5492a 4001static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
4002{
4003 switch (size) {
dd8fbd78
FN
4004 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4005 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4006 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
4007 default: return;
4008 }
4009}
4010
4011/* 32-bit pairwise ops end up the same as the elementwise versions. */
4012#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4013#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4014#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4015#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4016
ad69471c
PB
4017#define GEN_NEON_INTEGER_OP_ENV(name) do { \
4018 switch ((size << 1) | u) { \
4019 case 0: \
dd8fbd78 4020 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4021 break; \
4022 case 1: \
dd8fbd78 4023 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4024 break; \
4025 case 2: \
dd8fbd78 4026 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4027 break; \
4028 case 3: \
dd8fbd78 4029 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4030 break; \
4031 case 4: \
dd8fbd78 4032 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4033 break; \
4034 case 5: \
dd8fbd78 4035 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4036 break; \
4037 default: return 1; \
4038 }} while (0)
9ee6e8bb
PB
4039
4040#define GEN_NEON_INTEGER_OP(name) do { \
4041 switch ((size << 1) | u) { \
ad69471c 4042 case 0: \
dd8fbd78 4043 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
4044 break; \
4045 case 1: \
dd8fbd78 4046 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
4047 break; \
4048 case 2: \
dd8fbd78 4049 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
4050 break; \
4051 case 3: \
dd8fbd78 4052 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
4053 break; \
4054 case 4: \
dd8fbd78 4055 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
4056 break; \
4057 case 5: \
dd8fbd78 4058 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 4059 break; \
9ee6e8bb
PB
4060 default: return 1; \
4061 }} while (0)
4062
39d5492a 4063static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 4064{
39d5492a 4065 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
4066 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4067 return tmp;
9ee6e8bb
PB
4068}
4069
39d5492a 4070static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 4071{
dd8fbd78 4072 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 4073 tcg_temp_free_i32(var);
9ee6e8bb
PB
4074}
4075
39d5492a 4076static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 4077{
39d5492a 4078 TCGv_i32 tmp;
9ee6e8bb 4079 if (size == 1) {
0fad6efc
PM
4080 tmp = neon_load_reg(reg & 7, reg >> 4);
4081 if (reg & 8) {
dd8fbd78 4082 gen_neon_dup_high16(tmp);
0fad6efc
PM
4083 } else {
4084 gen_neon_dup_low16(tmp);
dd8fbd78 4085 }
0fad6efc
PM
4086 } else {
4087 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 4088 }
dd8fbd78 4089 return tmp;
9ee6e8bb
PB
4090}
4091
02acedf9 4092static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 4093{
39d5492a 4094 TCGv_i32 tmp, tmp2;
600b828c 4095 if (!q && size == 2) {
02acedf9
PM
4096 return 1;
4097 }
4098 tmp = tcg_const_i32(rd);
4099 tmp2 = tcg_const_i32(rm);
4100 if (q) {
4101 switch (size) {
4102 case 0:
02da0b2d 4103 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4104 break;
4105 case 1:
02da0b2d 4106 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4107 break;
4108 case 2:
02da0b2d 4109 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
4110 break;
4111 default:
4112 abort();
4113 }
4114 } else {
4115 switch (size) {
4116 case 0:
02da0b2d 4117 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4118 break;
4119 case 1:
02da0b2d 4120 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4121 break;
4122 default:
4123 abort();
4124 }
4125 }
4126 tcg_temp_free_i32(tmp);
4127 tcg_temp_free_i32(tmp2);
4128 return 0;
19457615
FN
4129}
4130
d68a6f3a 4131static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 4132{
39d5492a 4133 TCGv_i32 tmp, tmp2;
600b828c 4134 if (!q && size == 2) {
d68a6f3a
PM
4135 return 1;
4136 }
4137 tmp = tcg_const_i32(rd);
4138 tmp2 = tcg_const_i32(rm);
4139 if (q) {
4140 switch (size) {
4141 case 0:
02da0b2d 4142 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4143 break;
4144 case 1:
02da0b2d 4145 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4146 break;
4147 case 2:
02da0b2d 4148 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
4149 break;
4150 default:
4151 abort();
4152 }
4153 } else {
4154 switch (size) {
4155 case 0:
02da0b2d 4156 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4157 break;
4158 case 1:
02da0b2d 4159 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4160 break;
4161 default:
4162 abort();
4163 }
4164 }
4165 tcg_temp_free_i32(tmp);
4166 tcg_temp_free_i32(tmp2);
4167 return 0;
19457615
FN
4168}
4169
39d5492a 4170static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 4171{
39d5492a 4172 TCGv_i32 rd, tmp;
19457615 4173
7d1b0095
PM
4174 rd = tcg_temp_new_i32();
4175 tmp = tcg_temp_new_i32();
19457615
FN
4176
4177 tcg_gen_shli_i32(rd, t0, 8);
4178 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4179 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4180 tcg_gen_or_i32(rd, rd, tmp);
4181
4182 tcg_gen_shri_i32(t1, t1, 8);
4183 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4184 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4185 tcg_gen_or_i32(t1, t1, tmp);
4186 tcg_gen_mov_i32(t0, rd);
4187
7d1b0095
PM
4188 tcg_temp_free_i32(tmp);
4189 tcg_temp_free_i32(rd);
19457615
FN
4190}
4191
39d5492a 4192static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 4193{
39d5492a 4194 TCGv_i32 rd, tmp;
19457615 4195
7d1b0095
PM
4196 rd = tcg_temp_new_i32();
4197 tmp = tcg_temp_new_i32();
19457615
FN
4198
4199 tcg_gen_shli_i32(rd, t0, 16);
4200 tcg_gen_andi_i32(tmp, t1, 0xffff);
4201 tcg_gen_or_i32(rd, rd, tmp);
4202 tcg_gen_shri_i32(t1, t1, 16);
4203 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4204 tcg_gen_or_i32(t1, t1, tmp);
4205 tcg_gen_mov_i32(t0, rd);
4206
7d1b0095
PM
4207 tcg_temp_free_i32(tmp);
4208 tcg_temp_free_i32(rd);
19457615
FN
4209}
4210
4211
9ee6e8bb
PB
4212static struct {
4213 int nregs;
4214 int interleave;
4215 int spacing;
4216} neon_ls_element_type[11] = {
4217 {4, 4, 1},
4218 {4, 4, 2},
4219 {4, 1, 1},
4220 {4, 2, 1},
4221 {3, 3, 1},
4222 {3, 3, 2},
4223 {3, 1, 1},
4224 {1, 1, 1},
4225 {2, 2, 1},
4226 {2, 2, 2},
4227 {2, 1, 1}
4228};
4229
4230/* Translate a NEON load/store element instruction. Return nonzero if the
4231 instruction is invalid. */
0ecb72a5 4232static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4233{
4234 int rd, rn, rm;
4235 int op;
4236 int nregs;
4237 int interleave;
84496233 4238 int spacing;
9ee6e8bb
PB
4239 int stride;
4240 int size;
4241 int reg;
4242 int pass;
4243 int load;
4244 int shift;
9ee6e8bb 4245 int n;
39d5492a
PM
4246 TCGv_i32 addr;
4247 TCGv_i32 tmp;
4248 TCGv_i32 tmp2;
84496233 4249 TCGv_i64 tmp64;
9ee6e8bb 4250
2c7ffc41
PM
4251 /* FIXME: this access check should not take precedence over UNDEF
4252 * for invalid encodings; we will generate incorrect syndrome information
4253 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4254 */
4255 if (!s->cpacr_fpen) {
4256 gen_exception_insn(s, 4, EXCP_UDEF,
4257 syn_fp_access_trap(1, 0xe, s->thumb));
4258 return 0;
4259 }
4260
5df8bac1 4261 if (!s->vfp_enabled)
9ee6e8bb
PB
4262 return 1;
4263 VFP_DREG_D(rd, insn);
4264 rn = (insn >> 16) & 0xf;
4265 rm = insn & 0xf;
4266 load = (insn & (1 << 21)) != 0;
4267 if ((insn & (1 << 23)) == 0) {
4268 /* Load store all elements. */
4269 op = (insn >> 8) & 0xf;
4270 size = (insn >> 6) & 3;
84496233 4271 if (op > 10)
9ee6e8bb 4272 return 1;
f2dd89d0
PM
4273 /* Catch UNDEF cases for bad values of align field */
4274 switch (op & 0xc) {
4275 case 4:
4276 if (((insn >> 5) & 1) == 1) {
4277 return 1;
4278 }
4279 break;
4280 case 8:
4281 if (((insn >> 4) & 3) == 3) {
4282 return 1;
4283 }
4284 break;
4285 default:
4286 break;
4287 }
9ee6e8bb
PB
4288 nregs = neon_ls_element_type[op].nregs;
4289 interleave = neon_ls_element_type[op].interleave;
84496233
JR
4290 spacing = neon_ls_element_type[op].spacing;
4291 if (size == 3 && (interleave | spacing) != 1)
4292 return 1;
e318a60b 4293 addr = tcg_temp_new_i32();
dcc65026 4294 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4295 stride = (1 << size) * interleave;
4296 for (reg = 0; reg < nregs; reg++) {
4297 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
4298 load_reg_var(s, addr, rn);
4299 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 4300 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
4301 load_reg_var(s, addr, rn);
4302 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 4303 }
84496233 4304 if (size == 3) {
8ed1237d 4305 tmp64 = tcg_temp_new_i64();
84496233 4306 if (load) {
6ce2faf4 4307 gen_aa32_ld64(tmp64, addr, get_mem_index(s));
84496233 4308 neon_store_reg64(tmp64, rd);
84496233 4309 } else {
84496233 4310 neon_load_reg64(tmp64, rd);
6ce2faf4 4311 gen_aa32_st64(tmp64, addr, get_mem_index(s));
84496233 4312 }
8ed1237d 4313 tcg_temp_free_i64(tmp64);
84496233
JR
4314 tcg_gen_addi_i32(addr, addr, stride);
4315 } else {
4316 for (pass = 0; pass < 2; pass++) {
4317 if (size == 2) {
4318 if (load) {
58ab8e96 4319 tmp = tcg_temp_new_i32();
6ce2faf4 4320 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
84496233
JR
4321 neon_store_reg(rd, pass, tmp);
4322 } else {
4323 tmp = neon_load_reg(rd, pass);
6ce2faf4 4324 gen_aa32_st32(tmp, addr, get_mem_index(s));
58ab8e96 4325 tcg_temp_free_i32(tmp);
84496233 4326 }
1b2b1e54 4327 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
4328 } else if (size == 1) {
4329 if (load) {
58ab8e96 4330 tmp = tcg_temp_new_i32();
6ce2faf4 4331 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
84496233 4332 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96 4333 tmp2 = tcg_temp_new_i32();
6ce2faf4 4334 gen_aa32_ld16u(tmp2, addr, get_mem_index(s));
84496233 4335 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
4336 tcg_gen_shli_i32(tmp2, tmp2, 16);
4337 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4338 tcg_temp_free_i32(tmp2);
84496233
JR
4339 neon_store_reg(rd, pass, tmp);
4340 } else {
4341 tmp = neon_load_reg(rd, pass);
7d1b0095 4342 tmp2 = tcg_temp_new_i32();
84496233 4343 tcg_gen_shri_i32(tmp2, tmp, 16);
6ce2faf4 4344 gen_aa32_st16(tmp, addr, get_mem_index(s));
58ab8e96 4345 tcg_temp_free_i32(tmp);
84496233 4346 tcg_gen_addi_i32(addr, addr, stride);
6ce2faf4 4347 gen_aa32_st16(tmp2, addr, get_mem_index(s));
58ab8e96 4348 tcg_temp_free_i32(tmp2);
1b2b1e54 4349 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 4350 }
84496233
JR
4351 } else /* size == 0 */ {
4352 if (load) {
39d5492a 4353 TCGV_UNUSED_I32(tmp2);
84496233 4354 for (n = 0; n < 4; n++) {
58ab8e96 4355 tmp = tcg_temp_new_i32();
6ce2faf4 4356 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
84496233
JR
4357 tcg_gen_addi_i32(addr, addr, stride);
4358 if (n == 0) {
4359 tmp2 = tmp;
4360 } else {
41ba8341
PB
4361 tcg_gen_shli_i32(tmp, tmp, n * 8);
4362 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 4363 tcg_temp_free_i32(tmp);
84496233 4364 }
9ee6e8bb 4365 }
84496233
JR
4366 neon_store_reg(rd, pass, tmp2);
4367 } else {
4368 tmp2 = neon_load_reg(rd, pass);
4369 for (n = 0; n < 4; n++) {
7d1b0095 4370 tmp = tcg_temp_new_i32();
84496233
JR
4371 if (n == 0) {
4372 tcg_gen_mov_i32(tmp, tmp2);
4373 } else {
4374 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4375 }
6ce2faf4 4376 gen_aa32_st8(tmp, addr, get_mem_index(s));
58ab8e96 4377 tcg_temp_free_i32(tmp);
84496233
JR
4378 tcg_gen_addi_i32(addr, addr, stride);
4379 }
7d1b0095 4380 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
4381 }
4382 }
4383 }
4384 }
84496233 4385 rd += spacing;
9ee6e8bb 4386 }
e318a60b 4387 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4388 stride = nregs * 8;
4389 } else {
4390 size = (insn >> 10) & 3;
4391 if (size == 3) {
4392 /* Load single element to all lanes. */
8e18cde3
PM
4393 int a = (insn >> 4) & 1;
4394 if (!load) {
9ee6e8bb 4395 return 1;
8e18cde3 4396 }
9ee6e8bb
PB
4397 size = (insn >> 6) & 3;
4398 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
4399
4400 if (size == 3) {
4401 if (nregs != 4 || a == 0) {
9ee6e8bb 4402 return 1;
99c475ab 4403 }
8e18cde3
PM
4404 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4405 size = 2;
4406 }
4407 if (nregs == 1 && a == 1 && size == 0) {
4408 return 1;
4409 }
4410 if (nregs == 3 && a == 1) {
4411 return 1;
4412 }
e318a60b 4413 addr = tcg_temp_new_i32();
8e18cde3
PM
4414 load_reg_var(s, addr, rn);
4415 if (nregs == 1) {
4416 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4417 tmp = gen_load_and_replicate(s, addr, size);
4418 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4419 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4420 if (insn & (1 << 5)) {
4421 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4422 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4423 }
4424 tcg_temp_free_i32(tmp);
4425 } else {
4426 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4427 stride = (insn & (1 << 5)) ? 2 : 1;
4428 for (reg = 0; reg < nregs; reg++) {
4429 tmp = gen_load_and_replicate(s, addr, size);
4430 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4431 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4432 tcg_temp_free_i32(tmp);
4433 tcg_gen_addi_i32(addr, addr, 1 << size);
4434 rd += stride;
4435 }
9ee6e8bb 4436 }
e318a60b 4437 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4438 stride = (1 << size) * nregs;
4439 } else {
4440 /* Single element. */
93262b16 4441 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
4442 pass = (insn >> 7) & 1;
4443 switch (size) {
4444 case 0:
4445 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
4446 stride = 1;
4447 break;
4448 case 1:
4449 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
4450 stride = (insn & (1 << 5)) ? 2 : 1;
4451 break;
4452 case 2:
4453 shift = 0;
9ee6e8bb
PB
4454 stride = (insn & (1 << 6)) ? 2 : 1;
4455 break;
4456 default:
4457 abort();
4458 }
4459 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
4460 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4461 switch (nregs) {
4462 case 1:
4463 if (((idx & (1 << size)) != 0) ||
4464 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4465 return 1;
4466 }
4467 break;
4468 case 3:
4469 if ((idx & 1) != 0) {
4470 return 1;
4471 }
4472 /* fall through */
4473 case 2:
4474 if (size == 2 && (idx & 2) != 0) {
4475 return 1;
4476 }
4477 break;
4478 case 4:
4479 if ((size == 2) && ((idx & 3) == 3)) {
4480 return 1;
4481 }
4482 break;
4483 default:
4484 abort();
4485 }
4486 if ((rd + stride * (nregs - 1)) > 31) {
4487 /* Attempts to write off the end of the register file
4488 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4489 * the neon_load_reg() would write off the end of the array.
4490 */
4491 return 1;
4492 }
e318a60b 4493 addr = tcg_temp_new_i32();
dcc65026 4494 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4495 for (reg = 0; reg < nregs; reg++) {
4496 if (load) {
58ab8e96 4497 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
4498 switch (size) {
4499 case 0:
6ce2faf4 4500 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4501 break;
4502 case 1:
6ce2faf4 4503 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4504 break;
4505 case 2:
6ce2faf4 4506 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9ee6e8bb 4507 break;
a50f5b91
PB
4508 default: /* Avoid compiler warnings. */
4509 abort();
9ee6e8bb
PB
4510 }
4511 if (size != 2) {
8f8e3aa4 4512 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
4513 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4514 shift, size ? 16 : 8);
7d1b0095 4515 tcg_temp_free_i32(tmp2);
9ee6e8bb 4516 }
8f8e3aa4 4517 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4518 } else { /* Store */
8f8e3aa4
PB
4519 tmp = neon_load_reg(rd, pass);
4520 if (shift)
4521 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4522 switch (size) {
4523 case 0:
6ce2faf4 4524 gen_aa32_st8(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4525 break;
4526 case 1:
6ce2faf4 4527 gen_aa32_st16(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4528 break;
4529 case 2:
6ce2faf4 4530 gen_aa32_st32(tmp, addr, get_mem_index(s));
9ee6e8bb 4531 break;
99c475ab 4532 }
58ab8e96 4533 tcg_temp_free_i32(tmp);
99c475ab 4534 }
9ee6e8bb 4535 rd += stride;
1b2b1e54 4536 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4537 }
e318a60b 4538 tcg_temp_free_i32(addr);
9ee6e8bb 4539 stride = nregs * (1 << size);
99c475ab 4540 }
9ee6e8bb
PB
4541 }
4542 if (rm != 15) {
39d5492a 4543 TCGv_i32 base;
b26eefb6
PB
4544
4545 base = load_reg(s, rn);
9ee6e8bb 4546 if (rm == 13) {
b26eefb6 4547 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4548 } else {
39d5492a 4549 TCGv_i32 index;
b26eefb6
PB
4550 index = load_reg(s, rm);
4551 tcg_gen_add_i32(base, base, index);
7d1b0095 4552 tcg_temp_free_i32(index);
9ee6e8bb 4553 }
b26eefb6 4554 store_reg(s, rn, base);
9ee6e8bb
PB
4555 }
4556 return 0;
4557}
3b46e624 4558
8f8e3aa4 4559/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
39d5492a 4560static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
8f8e3aa4
PB
4561{
4562 tcg_gen_and_i32(t, t, c);
f669df27 4563 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4564 tcg_gen_or_i32(dest, t, f);
4565}
4566
39d5492a 4567static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4568{
4569 switch (size) {
4570 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4571 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4572 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4573 default: abort();
4574 }
4575}
4576
39d5492a 4577static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4578{
4579 switch (size) {
02da0b2d
PM
4580 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4581 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4582 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
4583 default: abort();
4584 }
4585}
4586
39d5492a 4587static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4588{
4589 switch (size) {
02da0b2d
PM
4590 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4591 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4592 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
4593 default: abort();
4594 }
4595}
4596
39d5492a 4597static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
4598{
4599 switch (size) {
02da0b2d
PM
4600 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4601 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4602 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
4603 default: abort();
4604 }
4605}
4606
39d5492a 4607static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
4608 int q, int u)
4609{
4610 if (q) {
4611 if (u) {
4612 switch (size) {
4613 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4614 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4615 default: abort();
4616 }
4617 } else {
4618 switch (size) {
4619 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4620 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4621 default: abort();
4622 }
4623 }
4624 } else {
4625 if (u) {
4626 switch (size) {
b408a9b0
CL
4627 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4628 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4629 default: abort();
4630 }
4631 } else {
4632 switch (size) {
4633 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4634 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4635 default: abort();
4636 }
4637 }
4638 }
4639}
4640
39d5492a 4641static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
4642{
4643 if (u) {
4644 switch (size) {
4645 case 0: gen_helper_neon_widen_u8(dest, src); break;
4646 case 1: gen_helper_neon_widen_u16(dest, src); break;
4647 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4648 default: abort();
4649 }
4650 } else {
4651 switch (size) {
4652 case 0: gen_helper_neon_widen_s8(dest, src); break;
4653 case 1: gen_helper_neon_widen_s16(dest, src); break;
4654 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4655 default: abort();
4656 }
4657 }
7d1b0095 4658 tcg_temp_free_i32(src);
ad69471c
PB
4659}
4660
4661static inline void gen_neon_addl(int size)
4662{
4663 switch (size) {
4664 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4665 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4666 case 2: tcg_gen_add_i64(CPU_V001); break;
4667 default: abort();
4668 }
4669}
4670
4671static inline void gen_neon_subl(int size)
4672{
4673 switch (size) {
4674 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4675 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4676 case 2: tcg_gen_sub_i64(CPU_V001); break;
4677 default: abort();
4678 }
4679}
4680
a7812ae4 4681static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4682{
4683 switch (size) {
4684 case 0: gen_helper_neon_negl_u16(var, var); break;
4685 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
4686 case 2:
4687 tcg_gen_neg_i64(var, var);
4688 break;
ad69471c
PB
4689 default: abort();
4690 }
4691}
4692
a7812ae4 4693static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4694{
4695 switch (size) {
02da0b2d
PM
4696 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4697 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
4698 default: abort();
4699 }
4700}
4701
39d5492a
PM
4702static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
4703 int size, int u)
ad69471c 4704{
a7812ae4 4705 TCGv_i64 tmp;
ad69471c
PB
4706
4707 switch ((size << 1) | u) {
4708 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4709 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4710 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4711 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4712 case 4:
4713 tmp = gen_muls_i64_i32(a, b);
4714 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4715 tcg_temp_free_i64(tmp);
ad69471c
PB
4716 break;
4717 case 5:
4718 tmp = gen_mulu_i64_i32(a, b);
4719 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4720 tcg_temp_free_i64(tmp);
ad69471c
PB
4721 break;
4722 default: abort();
4723 }
c6067f04
CL
4724
4725 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4726 Don't forget to clean them now. */
4727 if (size < 2) {
7d1b0095
PM
4728 tcg_temp_free_i32(a);
4729 tcg_temp_free_i32(b);
c6067f04 4730 }
ad69471c
PB
4731}
4732
39d5492a
PM
4733static void gen_neon_narrow_op(int op, int u, int size,
4734 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
4735{
4736 if (op) {
4737 if (u) {
4738 gen_neon_unarrow_sats(size, dest, src);
4739 } else {
4740 gen_neon_narrow(size, dest, src);
4741 }
4742 } else {
4743 if (u) {
4744 gen_neon_narrow_satu(size, dest, src);
4745 } else {
4746 gen_neon_narrow_sats(size, dest, src);
4747 }
4748 }
4749}
4750
62698be3
PM
4751/* Symbolic constants for op fields for Neon 3-register same-length.
4752 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4753 * table A7-9.
4754 */
4755#define NEON_3R_VHADD 0
4756#define NEON_3R_VQADD 1
4757#define NEON_3R_VRHADD 2
4758#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4759#define NEON_3R_VHSUB 4
4760#define NEON_3R_VQSUB 5
4761#define NEON_3R_VCGT 6
4762#define NEON_3R_VCGE 7
4763#define NEON_3R_VSHL 8
4764#define NEON_3R_VQSHL 9
4765#define NEON_3R_VRSHL 10
4766#define NEON_3R_VQRSHL 11
4767#define NEON_3R_VMAX 12
4768#define NEON_3R_VMIN 13
4769#define NEON_3R_VABD 14
4770#define NEON_3R_VABA 15
4771#define NEON_3R_VADD_VSUB 16
4772#define NEON_3R_VTST_VCEQ 17
4773#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4774#define NEON_3R_VMUL 19
4775#define NEON_3R_VPMAX 20
4776#define NEON_3R_VPMIN 21
4777#define NEON_3R_VQDMULH_VQRDMULH 22
4778#define NEON_3R_VPADD 23
f1ecb913 4779#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
da97f52c 4780#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
4781#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4782#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4783#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4784#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4785#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 4786#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
4787
4788static const uint8_t neon_3r_sizes[] = {
4789 [NEON_3R_VHADD] = 0x7,
4790 [NEON_3R_VQADD] = 0xf,
4791 [NEON_3R_VRHADD] = 0x7,
4792 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4793 [NEON_3R_VHSUB] = 0x7,
4794 [NEON_3R_VQSUB] = 0xf,
4795 [NEON_3R_VCGT] = 0x7,
4796 [NEON_3R_VCGE] = 0x7,
4797 [NEON_3R_VSHL] = 0xf,
4798 [NEON_3R_VQSHL] = 0xf,
4799 [NEON_3R_VRSHL] = 0xf,
4800 [NEON_3R_VQRSHL] = 0xf,
4801 [NEON_3R_VMAX] = 0x7,
4802 [NEON_3R_VMIN] = 0x7,
4803 [NEON_3R_VABD] = 0x7,
4804 [NEON_3R_VABA] = 0x7,
4805 [NEON_3R_VADD_VSUB] = 0xf,
4806 [NEON_3R_VTST_VCEQ] = 0x7,
4807 [NEON_3R_VML] = 0x7,
4808 [NEON_3R_VMUL] = 0x7,
4809 [NEON_3R_VPMAX] = 0x7,
4810 [NEON_3R_VPMIN] = 0x7,
4811 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4812 [NEON_3R_VPADD] = 0x7,
f1ecb913 4813 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
da97f52c 4814 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4815 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4816 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4817 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4818 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4819 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 4820 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4821};
4822
600b828c
PM
4823/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4824 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4825 * table A7-13.
4826 */
4827#define NEON_2RM_VREV64 0
4828#define NEON_2RM_VREV32 1
4829#define NEON_2RM_VREV16 2
4830#define NEON_2RM_VPADDL 4
4831#define NEON_2RM_VPADDL_U 5
9d935509
AB
4832#define NEON_2RM_AESE 6 /* Includes AESD */
4833#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
4834#define NEON_2RM_VCLS 8
4835#define NEON_2RM_VCLZ 9
4836#define NEON_2RM_VCNT 10
4837#define NEON_2RM_VMVN 11
4838#define NEON_2RM_VPADAL 12
4839#define NEON_2RM_VPADAL_U 13
4840#define NEON_2RM_VQABS 14
4841#define NEON_2RM_VQNEG 15
4842#define NEON_2RM_VCGT0 16
4843#define NEON_2RM_VCGE0 17
4844#define NEON_2RM_VCEQ0 18
4845#define NEON_2RM_VCLE0 19
4846#define NEON_2RM_VCLT0 20
f1ecb913 4847#define NEON_2RM_SHA1H 21
600b828c
PM
4848#define NEON_2RM_VABS 22
4849#define NEON_2RM_VNEG 23
4850#define NEON_2RM_VCGT0_F 24
4851#define NEON_2RM_VCGE0_F 25
4852#define NEON_2RM_VCEQ0_F 26
4853#define NEON_2RM_VCLE0_F 27
4854#define NEON_2RM_VCLT0_F 28
4855#define NEON_2RM_VABS_F 30
4856#define NEON_2RM_VNEG_F 31
4857#define NEON_2RM_VSWP 32
4858#define NEON_2RM_VTRN 33
4859#define NEON_2RM_VUZP 34
4860#define NEON_2RM_VZIP 35
4861#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4862#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4863#define NEON_2RM_VSHLL 38
f1ecb913 4864#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
34f7b0a2 4865#define NEON_2RM_VRINTN 40
2ce70625 4866#define NEON_2RM_VRINTX 41
34f7b0a2
WN
4867#define NEON_2RM_VRINTA 42
4868#define NEON_2RM_VRINTZ 43
600b828c 4869#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 4870#define NEON_2RM_VRINTM 45
600b828c 4871#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 4872#define NEON_2RM_VRINTP 47
901ad525
WN
4873#define NEON_2RM_VCVTAU 48
4874#define NEON_2RM_VCVTAS 49
4875#define NEON_2RM_VCVTNU 50
4876#define NEON_2RM_VCVTNS 51
4877#define NEON_2RM_VCVTPU 52
4878#define NEON_2RM_VCVTPS 53
4879#define NEON_2RM_VCVTMU 54
4880#define NEON_2RM_VCVTMS 55
600b828c
PM
4881#define NEON_2RM_VRECPE 56
4882#define NEON_2RM_VRSQRTE 57
4883#define NEON_2RM_VRECPE_F 58
4884#define NEON_2RM_VRSQRTE_F 59
4885#define NEON_2RM_VCVT_FS 60
4886#define NEON_2RM_VCVT_FU 61
4887#define NEON_2RM_VCVT_SF 62
4888#define NEON_2RM_VCVT_UF 63
4889
4890static int neon_2rm_is_float_op(int op)
4891{
4892 /* Return true if this neon 2reg-misc op is float-to-float */
4893 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
34f7b0a2 4894 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
901ad525
WN
4895 op == NEON_2RM_VRINTM ||
4896 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
34f7b0a2 4897 op >= NEON_2RM_VRECPE_F);
600b828c
PM
4898}
4899
4900/* Each entry in this array has bit n set if the insn allows
4901 * size value n (otherwise it will UNDEF). Since unallocated
4902 * op values will have no bits set they always UNDEF.
4903 */
4904static const uint8_t neon_2rm_sizes[] = {
4905 [NEON_2RM_VREV64] = 0x7,
4906 [NEON_2RM_VREV32] = 0x3,
4907 [NEON_2RM_VREV16] = 0x1,
4908 [NEON_2RM_VPADDL] = 0x7,
4909 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
4910 [NEON_2RM_AESE] = 0x1,
4911 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
4912 [NEON_2RM_VCLS] = 0x7,
4913 [NEON_2RM_VCLZ] = 0x7,
4914 [NEON_2RM_VCNT] = 0x1,
4915 [NEON_2RM_VMVN] = 0x1,
4916 [NEON_2RM_VPADAL] = 0x7,
4917 [NEON_2RM_VPADAL_U] = 0x7,
4918 [NEON_2RM_VQABS] = 0x7,
4919 [NEON_2RM_VQNEG] = 0x7,
4920 [NEON_2RM_VCGT0] = 0x7,
4921 [NEON_2RM_VCGE0] = 0x7,
4922 [NEON_2RM_VCEQ0] = 0x7,
4923 [NEON_2RM_VCLE0] = 0x7,
4924 [NEON_2RM_VCLT0] = 0x7,
f1ecb913 4925 [NEON_2RM_SHA1H] = 0x4,
600b828c
PM
4926 [NEON_2RM_VABS] = 0x7,
4927 [NEON_2RM_VNEG] = 0x7,
4928 [NEON_2RM_VCGT0_F] = 0x4,
4929 [NEON_2RM_VCGE0_F] = 0x4,
4930 [NEON_2RM_VCEQ0_F] = 0x4,
4931 [NEON_2RM_VCLE0_F] = 0x4,
4932 [NEON_2RM_VCLT0_F] = 0x4,
4933 [NEON_2RM_VABS_F] = 0x4,
4934 [NEON_2RM_VNEG_F] = 0x4,
4935 [NEON_2RM_VSWP] = 0x1,
4936 [NEON_2RM_VTRN] = 0x7,
4937 [NEON_2RM_VUZP] = 0x7,
4938 [NEON_2RM_VZIP] = 0x7,
4939 [NEON_2RM_VMOVN] = 0x7,
4940 [NEON_2RM_VQMOVN] = 0x7,
4941 [NEON_2RM_VSHLL] = 0x7,
f1ecb913 4942 [NEON_2RM_SHA1SU1] = 0x4,
34f7b0a2 4943 [NEON_2RM_VRINTN] = 0x4,
2ce70625 4944 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
4945 [NEON_2RM_VRINTA] = 0x4,
4946 [NEON_2RM_VRINTZ] = 0x4,
600b828c 4947 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 4948 [NEON_2RM_VRINTM] = 0x4,
600b828c 4949 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 4950 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
4951 [NEON_2RM_VCVTAU] = 0x4,
4952 [NEON_2RM_VCVTAS] = 0x4,
4953 [NEON_2RM_VCVTNU] = 0x4,
4954 [NEON_2RM_VCVTNS] = 0x4,
4955 [NEON_2RM_VCVTPU] = 0x4,
4956 [NEON_2RM_VCVTPS] = 0x4,
4957 [NEON_2RM_VCVTMU] = 0x4,
4958 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
4959 [NEON_2RM_VRECPE] = 0x4,
4960 [NEON_2RM_VRSQRTE] = 0x4,
4961 [NEON_2RM_VRECPE_F] = 0x4,
4962 [NEON_2RM_VRSQRTE_F] = 0x4,
4963 [NEON_2RM_VCVT_FS] = 0x4,
4964 [NEON_2RM_VCVT_FU] = 0x4,
4965 [NEON_2RM_VCVT_SF] = 0x4,
4966 [NEON_2RM_VCVT_UF] = 0x4,
4967};
4968
9ee6e8bb
PB
4969/* Translate a NEON data processing instruction. Return nonzero if the
4970 instruction is invalid.
ad69471c
PB
4971 We process data in a mixture of 32-bit and 64-bit chunks.
4972 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4973
0ecb72a5 4974static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4975{
4976 int op;
4977 int q;
4978 int rd, rn, rm;
4979 int size;
4980 int shift;
4981 int pass;
4982 int count;
4983 int pairwise;
4984 int u;
ca9a32e4 4985 uint32_t imm, mask;
39d5492a 4986 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4987 TCGv_i64 tmp64;
9ee6e8bb 4988
2c7ffc41
PM
4989 /* FIXME: this access check should not take precedence over UNDEF
4990 * for invalid encodings; we will generate incorrect syndrome information
4991 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4992 */
4993 if (!s->cpacr_fpen) {
4994 gen_exception_insn(s, 4, EXCP_UDEF,
4995 syn_fp_access_trap(1, 0xe, s->thumb));
4996 return 0;
4997 }
4998
5df8bac1 4999 if (!s->vfp_enabled)
9ee6e8bb
PB
5000 return 1;
5001 q = (insn & (1 << 6)) != 0;
5002 u = (insn >> 24) & 1;
5003 VFP_DREG_D(rd, insn);
5004 VFP_DREG_N(rn, insn);
5005 VFP_DREG_M(rm, insn);
5006 size = (insn >> 20) & 3;
5007 if ((insn & (1 << 23)) == 0) {
5008 /* Three register same length. */
5009 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
5010 /* Catch invalid op and bad size combinations: UNDEF */
5011 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5012 return 1;
5013 }
25f84f79
PM
5014 /* All insns of this form UNDEF for either this condition or the
5015 * superset of cases "Q==1"; we catch the latter later.
5016 */
5017 if (q && ((rd | rn | rm) & 1)) {
5018 return 1;
5019 }
f1ecb913
AB
5020 /*
5021 * The SHA-1/SHA-256 3-register instructions require special treatment
5022 * here, as their size field is overloaded as an op type selector, and
5023 * they all consume their input in a single pass.
5024 */
5025 if (op == NEON_3R_SHA) {
5026 if (!q) {
5027 return 1;
5028 }
5029 if (!u) { /* SHA-1 */
5030 if (!arm_feature(env, ARM_FEATURE_V8_SHA1)) {
5031 return 1;
5032 }
5033 tmp = tcg_const_i32(rd);
5034 tmp2 = tcg_const_i32(rn);
5035 tmp3 = tcg_const_i32(rm);
5036 tmp4 = tcg_const_i32(size);
5037 gen_helper_crypto_sha1_3reg(cpu_env, tmp, tmp2, tmp3, tmp4);
5038 tcg_temp_free_i32(tmp4);
5039 } else { /* SHA-256 */
5040 if (!arm_feature(env, ARM_FEATURE_V8_SHA256) || size == 3) {
5041 return 1;
5042 }
5043 tmp = tcg_const_i32(rd);
5044 tmp2 = tcg_const_i32(rn);
5045 tmp3 = tcg_const_i32(rm);
5046 switch (size) {
5047 case 0:
5048 gen_helper_crypto_sha256h(cpu_env, tmp, tmp2, tmp3);
5049 break;
5050 case 1:
5051 gen_helper_crypto_sha256h2(cpu_env, tmp, tmp2, tmp3);
5052 break;
5053 case 2:
5054 gen_helper_crypto_sha256su1(cpu_env, tmp, tmp2, tmp3);
5055 break;
5056 }
5057 }
5058 tcg_temp_free_i32(tmp);
5059 tcg_temp_free_i32(tmp2);
5060 tcg_temp_free_i32(tmp3);
5061 return 0;
5062 }
62698be3
PM
5063 if (size == 3 && op != NEON_3R_LOGIC) {
5064 /* 64-bit element instructions. */
9ee6e8bb 5065 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
5066 neon_load_reg64(cpu_V0, rn + pass);
5067 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 5068 switch (op) {
62698be3 5069 case NEON_3R_VQADD:
9ee6e8bb 5070 if (u) {
02da0b2d
PM
5071 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5072 cpu_V0, cpu_V1);
2c0262af 5073 } else {
02da0b2d
PM
5074 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5075 cpu_V0, cpu_V1);
2c0262af 5076 }
9ee6e8bb 5077 break;
62698be3 5078 case NEON_3R_VQSUB:
9ee6e8bb 5079 if (u) {
02da0b2d
PM
5080 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5081 cpu_V0, cpu_V1);
ad69471c 5082 } else {
02da0b2d
PM
5083 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5084 cpu_V0, cpu_V1);
ad69471c
PB
5085 }
5086 break;
62698be3 5087 case NEON_3R_VSHL:
ad69471c
PB
5088 if (u) {
5089 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5090 } else {
5091 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5092 }
5093 break;
62698be3 5094 case NEON_3R_VQSHL:
ad69471c 5095 if (u) {
02da0b2d
PM
5096 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5097 cpu_V1, cpu_V0);
ad69471c 5098 } else {
02da0b2d
PM
5099 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5100 cpu_V1, cpu_V0);
ad69471c
PB
5101 }
5102 break;
62698be3 5103 case NEON_3R_VRSHL:
ad69471c
PB
5104 if (u) {
5105 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 5106 } else {
ad69471c
PB
5107 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5108 }
5109 break;
62698be3 5110 case NEON_3R_VQRSHL:
ad69471c 5111 if (u) {
02da0b2d
PM
5112 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5113 cpu_V1, cpu_V0);
ad69471c 5114 } else {
02da0b2d
PM
5115 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5116 cpu_V1, cpu_V0);
1e8d4eec 5117 }
9ee6e8bb 5118 break;
62698be3 5119 case NEON_3R_VADD_VSUB:
9ee6e8bb 5120 if (u) {
ad69471c 5121 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 5122 } else {
ad69471c 5123 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
5124 }
5125 break;
5126 default:
5127 abort();
2c0262af 5128 }
ad69471c 5129 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 5130 }
9ee6e8bb 5131 return 0;
2c0262af 5132 }
25f84f79 5133 pairwise = 0;
9ee6e8bb 5134 switch (op) {
62698be3
PM
5135 case NEON_3R_VSHL:
5136 case NEON_3R_VQSHL:
5137 case NEON_3R_VRSHL:
5138 case NEON_3R_VQRSHL:
9ee6e8bb 5139 {
ad69471c
PB
5140 int rtmp;
5141 /* Shift instruction operands are reversed. */
5142 rtmp = rn;
9ee6e8bb 5143 rn = rm;
ad69471c 5144 rm = rtmp;
9ee6e8bb 5145 }
2c0262af 5146 break;
25f84f79
PM
5147 case NEON_3R_VPADD:
5148 if (u) {
5149 return 1;
5150 }
5151 /* Fall through */
62698be3
PM
5152 case NEON_3R_VPMAX:
5153 case NEON_3R_VPMIN:
9ee6e8bb 5154 pairwise = 1;
2c0262af 5155 break;
25f84f79
PM
5156 case NEON_3R_FLOAT_ARITH:
5157 pairwise = (u && size < 2); /* if VPADD (float) */
5158 break;
5159 case NEON_3R_FLOAT_MINMAX:
5160 pairwise = u; /* if VPMIN/VPMAX (float) */
5161 break;
5162 case NEON_3R_FLOAT_CMP:
5163 if (!u && size) {
5164 /* no encoding for U=0 C=1x */
5165 return 1;
5166 }
5167 break;
5168 case NEON_3R_FLOAT_ACMP:
5169 if (!u) {
5170 return 1;
5171 }
5172 break;
505935fc
WN
5173 case NEON_3R_FLOAT_MISC:
5174 /* VMAXNM/VMINNM in ARMv8 */
5175 if (u && !arm_feature(env, ARM_FEATURE_V8)) {
25f84f79
PM
5176 return 1;
5177 }
2c0262af 5178 break;
25f84f79
PM
5179 case NEON_3R_VMUL:
5180 if (u && (size != 0)) {
5181 /* UNDEF on invalid size for polynomial subcase */
5182 return 1;
5183 }
2c0262af 5184 break;
da97f52c
PM
5185 case NEON_3R_VFM:
5186 if (!arm_feature(env, ARM_FEATURE_VFP4) || u) {
5187 return 1;
5188 }
5189 break;
9ee6e8bb 5190 default:
2c0262af 5191 break;
9ee6e8bb 5192 }
dd8fbd78 5193
25f84f79
PM
5194 if (pairwise && q) {
5195 /* All the pairwise insns UNDEF if Q is set */
5196 return 1;
5197 }
5198
9ee6e8bb
PB
5199 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5200
5201 if (pairwise) {
5202 /* Pairwise. */
a5a14945
JR
5203 if (pass < 1) {
5204 tmp = neon_load_reg(rn, 0);
5205 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 5206 } else {
a5a14945
JR
5207 tmp = neon_load_reg(rm, 0);
5208 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
5209 }
5210 } else {
5211 /* Elementwise. */
dd8fbd78
FN
5212 tmp = neon_load_reg(rn, pass);
5213 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
5214 }
5215 switch (op) {
62698be3 5216 case NEON_3R_VHADD:
9ee6e8bb
PB
5217 GEN_NEON_INTEGER_OP(hadd);
5218 break;
62698be3 5219 case NEON_3R_VQADD:
02da0b2d 5220 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 5221 break;
62698be3 5222 case NEON_3R_VRHADD:
9ee6e8bb 5223 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 5224 break;
62698be3 5225 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
5226 switch ((u << 2) | size) {
5227 case 0: /* VAND */
dd8fbd78 5228 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5229 break;
5230 case 1: /* BIC */
f669df27 5231 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5232 break;
5233 case 2: /* VORR */
dd8fbd78 5234 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5235 break;
5236 case 3: /* VORN */
f669df27 5237 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5238 break;
5239 case 4: /* VEOR */
dd8fbd78 5240 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5241 break;
5242 case 5: /* VBSL */
dd8fbd78
FN
5243 tmp3 = neon_load_reg(rd, pass);
5244 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 5245 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5246 break;
5247 case 6: /* VBIT */
dd8fbd78
FN
5248 tmp3 = neon_load_reg(rd, pass);
5249 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 5250 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5251 break;
5252 case 7: /* VBIF */
dd8fbd78
FN
5253 tmp3 = neon_load_reg(rd, pass);
5254 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 5255 tcg_temp_free_i32(tmp3);
9ee6e8bb 5256 break;
2c0262af
FB
5257 }
5258 break;
62698be3 5259 case NEON_3R_VHSUB:
9ee6e8bb
PB
5260 GEN_NEON_INTEGER_OP(hsub);
5261 break;
62698be3 5262 case NEON_3R_VQSUB:
02da0b2d 5263 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 5264 break;
62698be3 5265 case NEON_3R_VCGT:
9ee6e8bb
PB
5266 GEN_NEON_INTEGER_OP(cgt);
5267 break;
62698be3 5268 case NEON_3R_VCGE:
9ee6e8bb
PB
5269 GEN_NEON_INTEGER_OP(cge);
5270 break;
62698be3 5271 case NEON_3R_VSHL:
ad69471c 5272 GEN_NEON_INTEGER_OP(shl);
2c0262af 5273 break;
62698be3 5274 case NEON_3R_VQSHL:
02da0b2d 5275 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 5276 break;
62698be3 5277 case NEON_3R_VRSHL:
ad69471c 5278 GEN_NEON_INTEGER_OP(rshl);
2c0262af 5279 break;
62698be3 5280 case NEON_3R_VQRSHL:
02da0b2d 5281 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 5282 break;
62698be3 5283 case NEON_3R_VMAX:
9ee6e8bb
PB
5284 GEN_NEON_INTEGER_OP(max);
5285 break;
62698be3 5286 case NEON_3R_VMIN:
9ee6e8bb
PB
5287 GEN_NEON_INTEGER_OP(min);
5288 break;
62698be3 5289 case NEON_3R_VABD:
9ee6e8bb
PB
5290 GEN_NEON_INTEGER_OP(abd);
5291 break;
62698be3 5292 case NEON_3R_VABA:
9ee6e8bb 5293 GEN_NEON_INTEGER_OP(abd);
7d1b0095 5294 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
5295 tmp2 = neon_load_reg(rd, pass);
5296 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 5297 break;
62698be3 5298 case NEON_3R_VADD_VSUB:
9ee6e8bb 5299 if (!u) { /* VADD */
62698be3 5300 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5301 } else { /* VSUB */
5302 switch (size) {
dd8fbd78
FN
5303 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
5304 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
5305 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 5306 default: abort();
9ee6e8bb
PB
5307 }
5308 }
5309 break;
62698be3 5310 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
5311 if (!u) { /* VTST */
5312 switch (size) {
dd8fbd78
FN
5313 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
5314 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
5315 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 5316 default: abort();
9ee6e8bb
PB
5317 }
5318 } else { /* VCEQ */
5319 switch (size) {
dd8fbd78
FN
5320 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5321 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5322 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 5323 default: abort();
9ee6e8bb
PB
5324 }
5325 }
5326 break;
62698be3 5327 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 5328 switch (size) {
dd8fbd78
FN
5329 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5330 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5331 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5332 default: abort();
9ee6e8bb 5333 }
7d1b0095 5334 tcg_temp_free_i32(tmp2);
dd8fbd78 5335 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5336 if (u) { /* VMLS */
dd8fbd78 5337 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 5338 } else { /* VMLA */
dd8fbd78 5339 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5340 }
5341 break;
62698be3 5342 case NEON_3R_VMUL:
9ee6e8bb 5343 if (u) { /* polynomial */
dd8fbd78 5344 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
5345 } else { /* Integer */
5346 switch (size) {
dd8fbd78
FN
5347 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5348 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5349 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5350 default: abort();
9ee6e8bb
PB
5351 }
5352 }
5353 break;
62698be3 5354 case NEON_3R_VPMAX:
9ee6e8bb
PB
5355 GEN_NEON_INTEGER_OP(pmax);
5356 break;
62698be3 5357 case NEON_3R_VPMIN:
9ee6e8bb
PB
5358 GEN_NEON_INTEGER_OP(pmin);
5359 break;
62698be3 5360 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
5361 if (!u) { /* VQDMULH */
5362 switch (size) {
02da0b2d
PM
5363 case 1:
5364 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5365 break;
5366 case 2:
5367 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5368 break;
62698be3 5369 default: abort();
9ee6e8bb 5370 }
62698be3 5371 } else { /* VQRDMULH */
9ee6e8bb 5372 switch (size) {
02da0b2d
PM
5373 case 1:
5374 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5375 break;
5376 case 2:
5377 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5378 break;
62698be3 5379 default: abort();
9ee6e8bb
PB
5380 }
5381 }
5382 break;
62698be3 5383 case NEON_3R_VPADD:
9ee6e8bb 5384 switch (size) {
dd8fbd78
FN
5385 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5386 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5387 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 5388 default: abort();
9ee6e8bb
PB
5389 }
5390 break;
62698be3 5391 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
5392 {
5393 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
5394 switch ((u << 2) | size) {
5395 case 0: /* VADD */
aa47cfdd
PM
5396 case 4: /* VPADD */
5397 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5398 break;
5399 case 2: /* VSUB */
aa47cfdd 5400 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5401 break;
5402 case 6: /* VABD */
aa47cfdd 5403 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5404 break;
5405 default:
62698be3 5406 abort();
9ee6e8bb 5407 }
aa47cfdd 5408 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5409 break;
aa47cfdd 5410 }
62698be3 5411 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
5412 {
5413 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5414 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5415 if (!u) {
7d1b0095 5416 tcg_temp_free_i32(tmp2);
dd8fbd78 5417 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5418 if (size == 0) {
aa47cfdd 5419 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5420 } else {
aa47cfdd 5421 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
5422 }
5423 }
aa47cfdd 5424 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5425 break;
aa47cfdd 5426 }
62698be3 5427 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
5428 {
5429 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 5430 if (!u) {
aa47cfdd 5431 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 5432 } else {
aa47cfdd
PM
5433 if (size == 0) {
5434 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5435 } else {
5436 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5437 }
b5ff1b31 5438 }
aa47cfdd 5439 tcg_temp_free_ptr(fpstatus);
2c0262af 5440 break;
aa47cfdd 5441 }
62698be3 5442 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
5443 {
5444 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5445 if (size == 0) {
5446 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5447 } else {
5448 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5449 }
5450 tcg_temp_free_ptr(fpstatus);
2c0262af 5451 break;
aa47cfdd 5452 }
62698be3 5453 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
5454 {
5455 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5456 if (size == 0) {
f71a2ae5 5457 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 5458 } else {
f71a2ae5 5459 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
5460 }
5461 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5462 break;
aa47cfdd 5463 }
505935fc
WN
5464 case NEON_3R_FLOAT_MISC:
5465 if (u) {
5466 /* VMAXNM/VMINNM */
5467 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5468 if (size == 0) {
f71a2ae5 5469 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 5470 } else {
f71a2ae5 5471 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
5472 }
5473 tcg_temp_free_ptr(fpstatus);
5474 } else {
5475 if (size == 0) {
5476 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5477 } else {
5478 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5479 }
5480 }
2c0262af 5481 break;
da97f52c
PM
5482 case NEON_3R_VFM:
5483 {
5484 /* VFMA, VFMS: fused multiply-add */
5485 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5486 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5487 if (size) {
5488 /* VFMS */
5489 gen_helper_vfp_negs(tmp, tmp);
5490 }
5491 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5492 tcg_temp_free_i32(tmp3);
5493 tcg_temp_free_ptr(fpstatus);
5494 break;
5495 }
9ee6e8bb
PB
5496 default:
5497 abort();
2c0262af 5498 }
7d1b0095 5499 tcg_temp_free_i32(tmp2);
dd8fbd78 5500
9ee6e8bb
PB
5501 /* Save the result. For elementwise operations we can put it
5502 straight into the destination register. For pairwise operations
5503 we have to be careful to avoid clobbering the source operands. */
5504 if (pairwise && rd == rm) {
dd8fbd78 5505 neon_store_scratch(pass, tmp);
9ee6e8bb 5506 } else {
dd8fbd78 5507 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5508 }
5509
5510 } /* for pass */
5511 if (pairwise && rd == rm) {
5512 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5513 tmp = neon_load_scratch(pass);
5514 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5515 }
5516 }
ad69471c 5517 /* End of 3 register same size operations. */
9ee6e8bb
PB
5518 } else if (insn & (1 << 4)) {
5519 if ((insn & 0x00380080) != 0) {
5520 /* Two registers and shift. */
5521 op = (insn >> 8) & 0xf;
5522 if (insn & (1 << 7)) {
cc13115b
PM
5523 /* 64-bit shift. */
5524 if (op > 7) {
5525 return 1;
5526 }
9ee6e8bb
PB
5527 size = 3;
5528 } else {
5529 size = 2;
5530 while ((insn & (1 << (size + 19))) == 0)
5531 size--;
5532 }
5533 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 5534 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
5535 by immediate using the variable shift operations. */
5536 if (op < 8) {
5537 /* Shift by immediate:
5538 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
5539 if (q && ((rd | rm) & 1)) {
5540 return 1;
5541 }
5542 if (!u && (op == 4 || op == 6)) {
5543 return 1;
5544 }
9ee6e8bb
PB
5545 /* Right shifts are encoded as N - shift, where N is the
5546 element size in bits. */
5547 if (op <= 4)
5548 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
5549 if (size == 3) {
5550 count = q + 1;
5551 } else {
5552 count = q ? 4: 2;
5553 }
5554 switch (size) {
5555 case 0:
5556 imm = (uint8_t) shift;
5557 imm |= imm << 8;
5558 imm |= imm << 16;
5559 break;
5560 case 1:
5561 imm = (uint16_t) shift;
5562 imm |= imm << 16;
5563 break;
5564 case 2:
5565 case 3:
5566 imm = shift;
5567 break;
5568 default:
5569 abort();
5570 }
5571
5572 for (pass = 0; pass < count; pass++) {
ad69471c
PB
5573 if (size == 3) {
5574 neon_load_reg64(cpu_V0, rm + pass);
5575 tcg_gen_movi_i64(cpu_V1, imm);
5576 switch (op) {
5577 case 0: /* VSHR */
5578 case 1: /* VSRA */
5579 if (u)
5580 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5581 else
ad69471c 5582 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5583 break;
ad69471c
PB
5584 case 2: /* VRSHR */
5585 case 3: /* VRSRA */
5586 if (u)
5587 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5588 else
ad69471c 5589 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5590 break;
ad69471c 5591 case 4: /* VSRI */
ad69471c
PB
5592 case 5: /* VSHL, VSLI */
5593 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5594 break;
0322b26e 5595 case 6: /* VQSHLU */
02da0b2d
PM
5596 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5597 cpu_V0, cpu_V1);
ad69471c 5598 break;
0322b26e
PM
5599 case 7: /* VQSHL */
5600 if (u) {
02da0b2d 5601 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
5602 cpu_V0, cpu_V1);
5603 } else {
02da0b2d 5604 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
5605 cpu_V0, cpu_V1);
5606 }
9ee6e8bb 5607 break;
9ee6e8bb 5608 }
ad69471c
PB
5609 if (op == 1 || op == 3) {
5610 /* Accumulate. */
5371cb81 5611 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
5612 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5613 } else if (op == 4 || (op == 5 && u)) {
5614 /* Insert */
923e6509
CL
5615 neon_load_reg64(cpu_V1, rd + pass);
5616 uint64_t mask;
5617 if (shift < -63 || shift > 63) {
5618 mask = 0;
5619 } else {
5620 if (op == 4) {
5621 mask = 0xffffffffffffffffull >> -shift;
5622 } else {
5623 mask = 0xffffffffffffffffull << shift;
5624 }
5625 }
5626 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5627 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5628 }
5629 neon_store_reg64(cpu_V0, rd + pass);
5630 } else { /* size < 3 */
5631 /* Operands in T0 and T1. */
dd8fbd78 5632 tmp = neon_load_reg(rm, pass);
7d1b0095 5633 tmp2 = tcg_temp_new_i32();
dd8fbd78 5634 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
5635 switch (op) {
5636 case 0: /* VSHR */
5637 case 1: /* VSRA */
5638 GEN_NEON_INTEGER_OP(shl);
5639 break;
5640 case 2: /* VRSHR */
5641 case 3: /* VRSRA */
5642 GEN_NEON_INTEGER_OP(rshl);
5643 break;
5644 case 4: /* VSRI */
ad69471c
PB
5645 case 5: /* VSHL, VSLI */
5646 switch (size) {
dd8fbd78
FN
5647 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5648 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5649 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 5650 default: abort();
ad69471c
PB
5651 }
5652 break;
0322b26e 5653 case 6: /* VQSHLU */
ad69471c 5654 switch (size) {
0322b26e 5655 case 0:
02da0b2d
PM
5656 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5657 tmp, tmp2);
0322b26e
PM
5658 break;
5659 case 1:
02da0b2d
PM
5660 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5661 tmp, tmp2);
0322b26e
PM
5662 break;
5663 case 2:
02da0b2d
PM
5664 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5665 tmp, tmp2);
0322b26e
PM
5666 break;
5667 default:
cc13115b 5668 abort();
ad69471c
PB
5669 }
5670 break;
0322b26e 5671 case 7: /* VQSHL */
02da0b2d 5672 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 5673 break;
ad69471c 5674 }
7d1b0095 5675 tcg_temp_free_i32(tmp2);
ad69471c
PB
5676
5677 if (op == 1 || op == 3) {
5678 /* Accumulate. */
dd8fbd78 5679 tmp2 = neon_load_reg(rd, pass);
5371cb81 5680 gen_neon_add(size, tmp, tmp2);
7d1b0095 5681 tcg_temp_free_i32(tmp2);
ad69471c
PB
5682 } else if (op == 4 || (op == 5 && u)) {
5683 /* Insert */
5684 switch (size) {
5685 case 0:
5686 if (op == 4)
ca9a32e4 5687 mask = 0xff >> -shift;
ad69471c 5688 else
ca9a32e4
JR
5689 mask = (uint8_t)(0xff << shift);
5690 mask |= mask << 8;
5691 mask |= mask << 16;
ad69471c
PB
5692 break;
5693 case 1:
5694 if (op == 4)
ca9a32e4 5695 mask = 0xffff >> -shift;
ad69471c 5696 else
ca9a32e4
JR
5697 mask = (uint16_t)(0xffff << shift);
5698 mask |= mask << 16;
ad69471c
PB
5699 break;
5700 case 2:
ca9a32e4
JR
5701 if (shift < -31 || shift > 31) {
5702 mask = 0;
5703 } else {
5704 if (op == 4)
5705 mask = 0xffffffffu >> -shift;
5706 else
5707 mask = 0xffffffffu << shift;
5708 }
ad69471c
PB
5709 break;
5710 default:
5711 abort();
5712 }
dd8fbd78 5713 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
5714 tcg_gen_andi_i32(tmp, tmp, mask);
5715 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 5716 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 5717 tcg_temp_free_i32(tmp2);
ad69471c 5718 }
dd8fbd78 5719 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5720 }
5721 } /* for pass */
5722 } else if (op < 10) {
ad69471c 5723 /* Shift by immediate and narrow:
9ee6e8bb 5724 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5725 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5726 if (rm & 1) {
5727 return 1;
5728 }
9ee6e8bb
PB
5729 shift = shift - (1 << (size + 3));
5730 size++;
92cdfaeb 5731 if (size == 3) {
a7812ae4 5732 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5733 neon_load_reg64(cpu_V0, rm);
5734 neon_load_reg64(cpu_V1, rm + 1);
5735 for (pass = 0; pass < 2; pass++) {
5736 TCGv_i64 in;
5737 if (pass == 0) {
5738 in = cpu_V0;
5739 } else {
5740 in = cpu_V1;
5741 }
ad69471c 5742 if (q) {
0b36f4cd 5743 if (input_unsigned) {
92cdfaeb 5744 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5745 } else {
92cdfaeb 5746 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5747 }
ad69471c 5748 } else {
0b36f4cd 5749 if (input_unsigned) {
92cdfaeb 5750 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5751 } else {
92cdfaeb 5752 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5753 }
ad69471c 5754 }
7d1b0095 5755 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5756 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5757 neon_store_reg(rd, pass, tmp);
5758 } /* for pass */
5759 tcg_temp_free_i64(tmp64);
5760 } else {
5761 if (size == 1) {
5762 imm = (uint16_t)shift;
5763 imm |= imm << 16;
2c0262af 5764 } else {
92cdfaeb
PM
5765 /* size == 2 */
5766 imm = (uint32_t)shift;
5767 }
5768 tmp2 = tcg_const_i32(imm);
5769 tmp4 = neon_load_reg(rm + 1, 0);
5770 tmp5 = neon_load_reg(rm + 1, 1);
5771 for (pass = 0; pass < 2; pass++) {
5772 if (pass == 0) {
5773 tmp = neon_load_reg(rm, 0);
5774 } else {
5775 tmp = tmp4;
5776 }
0b36f4cd
CL
5777 gen_neon_shift_narrow(size, tmp, tmp2, q,
5778 input_unsigned);
92cdfaeb
PM
5779 if (pass == 0) {
5780 tmp3 = neon_load_reg(rm, 1);
5781 } else {
5782 tmp3 = tmp5;
5783 }
0b36f4cd
CL
5784 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5785 input_unsigned);
36aa55dc 5786 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5787 tcg_temp_free_i32(tmp);
5788 tcg_temp_free_i32(tmp3);
5789 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5790 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5791 neon_store_reg(rd, pass, tmp);
5792 } /* for pass */
c6067f04 5793 tcg_temp_free_i32(tmp2);
b75263d6 5794 }
9ee6e8bb 5795 } else if (op == 10) {
cc13115b
PM
5796 /* VSHLL, VMOVL */
5797 if (q || (rd & 1)) {
9ee6e8bb 5798 return 1;
cc13115b 5799 }
ad69471c
PB
5800 tmp = neon_load_reg(rm, 0);
5801 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5802 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5803 if (pass == 1)
5804 tmp = tmp2;
5805
5806 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5807
9ee6e8bb
PB
5808 if (shift != 0) {
5809 /* The shift is less than the width of the source
ad69471c
PB
5810 type, so we can just shift the whole register. */
5811 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5812 /* Widen the result of shift: we need to clear
5813 * the potential overflow bits resulting from
5814 * left bits of the narrow input appearing as
5815 * right bits of left the neighbour narrow
5816 * input. */
ad69471c
PB
5817 if (size < 2 || !u) {
5818 uint64_t imm64;
5819 if (size == 0) {
5820 imm = (0xffu >> (8 - shift));
5821 imm |= imm << 16;
acdf01ef 5822 } else if (size == 1) {
ad69471c 5823 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5824 } else {
5825 /* size == 2 */
5826 imm = 0xffffffff >> (32 - shift);
5827 }
5828 if (size < 2) {
5829 imm64 = imm | (((uint64_t)imm) << 32);
5830 } else {
5831 imm64 = imm;
9ee6e8bb 5832 }
acdf01ef 5833 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5834 }
5835 }
ad69471c 5836 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5837 }
f73534a5 5838 } else if (op >= 14) {
9ee6e8bb 5839 /* VCVT fixed-point. */
cc13115b
PM
5840 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5841 return 1;
5842 }
f73534a5
PM
5843 /* We have already masked out the must-be-1 top bit of imm6,
5844 * hence this 32-shift where the ARM ARM has 64-imm6.
5845 */
5846 shift = 32 - shift;
9ee6e8bb 5847 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 5848 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 5849 if (!(op & 1)) {
9ee6e8bb 5850 if (u)
5500b06c 5851 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 5852 else
5500b06c 5853 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
5854 } else {
5855 if (u)
5500b06c 5856 gen_vfp_toul(0, shift, 1);
9ee6e8bb 5857 else
5500b06c 5858 gen_vfp_tosl(0, shift, 1);
2c0262af 5859 }
4373f3ce 5860 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
5861 }
5862 } else {
9ee6e8bb
PB
5863 return 1;
5864 }
5865 } else { /* (insn & 0x00380080) == 0 */
5866 int invert;
7d80fee5
PM
5867 if (q && (rd & 1)) {
5868 return 1;
5869 }
9ee6e8bb
PB
5870
5871 op = (insn >> 8) & 0xf;
5872 /* One register and immediate. */
5873 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5874 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
5875 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5876 * We choose to not special-case this and will behave as if a
5877 * valid constant encoding of 0 had been given.
5878 */
9ee6e8bb
PB
5879 switch (op) {
5880 case 0: case 1:
5881 /* no-op */
5882 break;
5883 case 2: case 3:
5884 imm <<= 8;
5885 break;
5886 case 4: case 5:
5887 imm <<= 16;
5888 break;
5889 case 6: case 7:
5890 imm <<= 24;
5891 break;
5892 case 8: case 9:
5893 imm |= imm << 16;
5894 break;
5895 case 10: case 11:
5896 imm = (imm << 8) | (imm << 24);
5897 break;
5898 case 12:
8e31209e 5899 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
5900 break;
5901 case 13:
5902 imm = (imm << 16) | 0xffff;
5903 break;
5904 case 14:
5905 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5906 if (invert)
5907 imm = ~imm;
5908 break;
5909 case 15:
7d80fee5
PM
5910 if (invert) {
5911 return 1;
5912 }
9ee6e8bb
PB
5913 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5914 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5915 break;
5916 }
5917 if (invert)
5918 imm = ~imm;
5919
9ee6e8bb
PB
5920 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5921 if (op & 1 && op < 12) {
ad69471c 5922 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
5923 if (invert) {
5924 /* The immediate value has already been inverted, so
5925 BIC becomes AND. */
ad69471c 5926 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 5927 } else {
ad69471c 5928 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 5929 }
9ee6e8bb 5930 } else {
ad69471c 5931 /* VMOV, VMVN. */
7d1b0095 5932 tmp = tcg_temp_new_i32();
9ee6e8bb 5933 if (op == 14 && invert) {
a5a14945 5934 int n;
ad69471c
PB
5935 uint32_t val;
5936 val = 0;
9ee6e8bb
PB
5937 for (n = 0; n < 4; n++) {
5938 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 5939 val |= 0xff << (n * 8);
9ee6e8bb 5940 }
ad69471c
PB
5941 tcg_gen_movi_i32(tmp, val);
5942 } else {
5943 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 5944 }
9ee6e8bb 5945 }
ad69471c 5946 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5947 }
5948 }
e4b3861d 5949 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
5950 if (size != 3) {
5951 op = (insn >> 8) & 0xf;
5952 if ((insn & (1 << 6)) == 0) {
5953 /* Three registers of different lengths. */
5954 int src1_wide;
5955 int src2_wide;
5956 int prewiden;
526d0096
PM
5957 /* undefreq: bit 0 : UNDEF if size == 0
5958 * bit 1 : UNDEF if size == 1
5959 * bit 2 : UNDEF if size == 2
5960 * bit 3 : UNDEF if U == 1
5961 * Note that [2:0] set implies 'always UNDEF'
695272dc
PM
5962 */
5963 int undefreq;
5964 /* prewiden, src1_wide, src2_wide, undefreq */
5965 static const int neon_3reg_wide[16][4] = {
5966 {1, 0, 0, 0}, /* VADDL */
5967 {1, 1, 0, 0}, /* VADDW */
5968 {1, 0, 0, 0}, /* VSUBL */
5969 {1, 1, 0, 0}, /* VSUBW */
5970 {0, 1, 1, 0}, /* VADDHN */
5971 {0, 0, 0, 0}, /* VABAL */
5972 {0, 1, 1, 0}, /* VSUBHN */
5973 {0, 0, 0, 0}, /* VABDL */
5974 {0, 0, 0, 0}, /* VMLAL */
526d0096 5975 {0, 0, 0, 9}, /* VQDMLAL */
695272dc 5976 {0, 0, 0, 0}, /* VMLSL */
526d0096 5977 {0, 0, 0, 9}, /* VQDMLSL */
695272dc 5978 {0, 0, 0, 0}, /* Integer VMULL */
526d0096
PM
5979 {0, 0, 0, 1}, /* VQDMULL */
5980 {0, 0, 0, 15}, /* Polynomial VMULL */
5981 {0, 0, 0, 7}, /* Reserved: always UNDEF */
9ee6e8bb
PB
5982 };
5983
5984 prewiden = neon_3reg_wide[op][0];
5985 src1_wide = neon_3reg_wide[op][1];
5986 src2_wide = neon_3reg_wide[op][2];
695272dc 5987 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 5988
526d0096
PM
5989 if ((undefreq & (1 << size)) ||
5990 ((undefreq & 8) && u)) {
695272dc
PM
5991 return 1;
5992 }
5993 if ((src1_wide && (rn & 1)) ||
5994 (src2_wide && (rm & 1)) ||
5995 (!src2_wide && (rd & 1))) {
ad69471c 5996 return 1;
695272dc 5997 }
ad69471c 5998
9ee6e8bb
PB
5999 /* Avoid overlapping operands. Wide source operands are
6000 always aligned so will never overlap with wide
6001 destinations in problematic ways. */
8f8e3aa4 6002 if (rd == rm && !src2_wide) {
dd8fbd78
FN
6003 tmp = neon_load_reg(rm, 1);
6004 neon_store_scratch(2, tmp);
8f8e3aa4 6005 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
6006 tmp = neon_load_reg(rn, 1);
6007 neon_store_scratch(2, tmp);
9ee6e8bb 6008 }
39d5492a 6009 TCGV_UNUSED_I32(tmp3);
9ee6e8bb 6010 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6011 if (src1_wide) {
6012 neon_load_reg64(cpu_V0, rn + pass);
39d5492a 6013 TCGV_UNUSED_I32(tmp);
9ee6e8bb 6014 } else {
ad69471c 6015 if (pass == 1 && rd == rn) {
dd8fbd78 6016 tmp = neon_load_scratch(2);
9ee6e8bb 6017 } else {
ad69471c
PB
6018 tmp = neon_load_reg(rn, pass);
6019 }
6020 if (prewiden) {
6021 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
6022 }
6023 }
ad69471c
PB
6024 if (src2_wide) {
6025 neon_load_reg64(cpu_V1, rm + pass);
39d5492a 6026 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 6027 } else {
ad69471c 6028 if (pass == 1 && rd == rm) {
dd8fbd78 6029 tmp2 = neon_load_scratch(2);
9ee6e8bb 6030 } else {
ad69471c
PB
6031 tmp2 = neon_load_reg(rm, pass);
6032 }
6033 if (prewiden) {
6034 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 6035 }
9ee6e8bb
PB
6036 }
6037 switch (op) {
6038 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 6039 gen_neon_addl(size);
9ee6e8bb 6040 break;
79b0e534 6041 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 6042 gen_neon_subl(size);
9ee6e8bb
PB
6043 break;
6044 case 5: case 7: /* VABAL, VABDL */
6045 switch ((size << 1) | u) {
ad69471c
PB
6046 case 0:
6047 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6048 break;
6049 case 1:
6050 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6051 break;
6052 case 2:
6053 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6054 break;
6055 case 3:
6056 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6057 break;
6058 case 4:
6059 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6060 break;
6061 case 5:
6062 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6063 break;
9ee6e8bb
PB
6064 default: abort();
6065 }
7d1b0095
PM
6066 tcg_temp_free_i32(tmp2);
6067 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6068 break;
6069 case 8: case 9: case 10: case 11: case 12: case 13:
6070 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 6071 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
6072 break;
6073 case 14: /* Polynomial VMULL */
e5ca24cb 6074 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
6075 tcg_temp_free_i32(tmp2);
6076 tcg_temp_free_i32(tmp);
e5ca24cb 6077 break;
695272dc
PM
6078 default: /* 15 is RESERVED: caught earlier */
6079 abort();
9ee6e8bb 6080 }
ebcd88ce
PM
6081 if (op == 13) {
6082 /* VQDMULL */
6083 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6084 neon_store_reg64(cpu_V0, rd + pass);
6085 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 6086 /* Accumulate. */
ebcd88ce 6087 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6088 switch (op) {
4dc064e6
PM
6089 case 10: /* VMLSL */
6090 gen_neon_negl(cpu_V0, size);
6091 /* Fall through */
6092 case 5: case 8: /* VABAL, VMLAL */
ad69471c 6093 gen_neon_addl(size);
9ee6e8bb
PB
6094 break;
6095 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 6096 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6097 if (op == 11) {
6098 gen_neon_negl(cpu_V0, size);
6099 }
ad69471c
PB
6100 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6101 break;
9ee6e8bb
PB
6102 default:
6103 abort();
6104 }
ad69471c 6105 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6106 } else if (op == 4 || op == 6) {
6107 /* Narrowing operation. */
7d1b0095 6108 tmp = tcg_temp_new_i32();
79b0e534 6109 if (!u) {
9ee6e8bb 6110 switch (size) {
ad69471c
PB
6111 case 0:
6112 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6113 break;
6114 case 1:
6115 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6116 break;
6117 case 2:
6118 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6119 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
6120 break;
9ee6e8bb
PB
6121 default: abort();
6122 }
6123 } else {
6124 switch (size) {
ad69471c
PB
6125 case 0:
6126 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6127 break;
6128 case 1:
6129 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6130 break;
6131 case 2:
6132 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6133 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6134 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
6135 break;
9ee6e8bb
PB
6136 default: abort();
6137 }
6138 }
ad69471c
PB
6139 if (pass == 0) {
6140 tmp3 = tmp;
6141 } else {
6142 neon_store_reg(rd, 0, tmp3);
6143 neon_store_reg(rd, 1, tmp);
6144 }
9ee6e8bb
PB
6145 } else {
6146 /* Write back the result. */
ad69471c 6147 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6148 }
6149 }
6150 } else {
3e3326df
PM
6151 /* Two registers and a scalar. NB that for ops of this form
6152 * the ARM ARM labels bit 24 as Q, but it is in our variable
6153 * 'u', not 'q'.
6154 */
6155 if (size == 0) {
6156 return 1;
6157 }
9ee6e8bb 6158 switch (op) {
9ee6e8bb 6159 case 1: /* Float VMLA scalar */
9ee6e8bb 6160 case 5: /* Floating point VMLS scalar */
9ee6e8bb 6161 case 9: /* Floating point VMUL scalar */
3e3326df
PM
6162 if (size == 1) {
6163 return 1;
6164 }
6165 /* fall through */
6166 case 0: /* Integer VMLA scalar */
6167 case 4: /* Integer VMLS scalar */
6168 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
6169 case 12: /* VQDMULH scalar */
6170 case 13: /* VQRDMULH scalar */
3e3326df
PM
6171 if (u && ((rd | rn) & 1)) {
6172 return 1;
6173 }
dd8fbd78
FN
6174 tmp = neon_get_scalar(size, rm);
6175 neon_store_scratch(0, tmp);
9ee6e8bb 6176 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
6177 tmp = neon_load_scratch(0);
6178 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
6179 if (op == 12) {
6180 if (size == 1) {
02da0b2d 6181 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6182 } else {
02da0b2d 6183 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6184 }
6185 } else if (op == 13) {
6186 if (size == 1) {
02da0b2d 6187 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6188 } else {
02da0b2d 6189 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6190 }
6191 } else if (op & 1) {
aa47cfdd
PM
6192 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6193 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6194 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
6195 } else {
6196 switch (size) {
dd8fbd78
FN
6197 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6198 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6199 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 6200 default: abort();
9ee6e8bb
PB
6201 }
6202 }
7d1b0095 6203 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6204 if (op < 8) {
6205 /* Accumulate. */
dd8fbd78 6206 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
6207 switch (op) {
6208 case 0:
dd8fbd78 6209 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6210 break;
6211 case 1:
aa47cfdd
PM
6212 {
6213 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6214 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6215 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6216 break;
aa47cfdd 6217 }
9ee6e8bb 6218 case 4:
dd8fbd78 6219 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
6220 break;
6221 case 5:
aa47cfdd
PM
6222 {
6223 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6224 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6225 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6226 break;
aa47cfdd 6227 }
9ee6e8bb
PB
6228 default:
6229 abort();
6230 }
7d1b0095 6231 tcg_temp_free_i32(tmp2);
9ee6e8bb 6232 }
dd8fbd78 6233 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6234 }
6235 break;
9ee6e8bb 6236 case 3: /* VQDMLAL scalar */
9ee6e8bb 6237 case 7: /* VQDMLSL scalar */
9ee6e8bb 6238 case 11: /* VQDMULL scalar */
3e3326df 6239 if (u == 1) {
ad69471c 6240 return 1;
3e3326df
PM
6241 }
6242 /* fall through */
6243 case 2: /* VMLAL sclar */
6244 case 6: /* VMLSL scalar */
6245 case 10: /* VMULL scalar */
6246 if (rd & 1) {
6247 return 1;
6248 }
dd8fbd78 6249 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
6250 /* We need a copy of tmp2 because gen_neon_mull
6251 * deletes it during pass 0. */
7d1b0095 6252 tmp4 = tcg_temp_new_i32();
c6067f04 6253 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 6254 tmp3 = neon_load_reg(rn, 1);
ad69471c 6255
9ee6e8bb 6256 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6257 if (pass == 0) {
6258 tmp = neon_load_reg(rn, 0);
9ee6e8bb 6259 } else {
dd8fbd78 6260 tmp = tmp3;
c6067f04 6261 tmp2 = tmp4;
9ee6e8bb 6262 }
ad69471c 6263 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
6264 if (op != 11) {
6265 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6266 }
9ee6e8bb 6267 switch (op) {
4dc064e6
PM
6268 case 6:
6269 gen_neon_negl(cpu_V0, size);
6270 /* Fall through */
6271 case 2:
ad69471c 6272 gen_neon_addl(size);
9ee6e8bb
PB
6273 break;
6274 case 3: case 7:
ad69471c 6275 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6276 if (op == 7) {
6277 gen_neon_negl(cpu_V0, size);
6278 }
ad69471c 6279 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
6280 break;
6281 case 10:
6282 /* no-op */
6283 break;
6284 case 11:
ad69471c 6285 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
6286 break;
6287 default:
6288 abort();
6289 }
ad69471c 6290 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6291 }
dd8fbd78 6292
dd8fbd78 6293
9ee6e8bb
PB
6294 break;
6295 default: /* 14 and 15 are RESERVED */
6296 return 1;
6297 }
6298 }
6299 } else { /* size == 3 */
6300 if (!u) {
6301 /* Extract. */
9ee6e8bb 6302 imm = (insn >> 8) & 0xf;
ad69471c
PB
6303
6304 if (imm > 7 && !q)
6305 return 1;
6306
52579ea1
PM
6307 if (q && ((rd | rn | rm) & 1)) {
6308 return 1;
6309 }
6310
ad69471c
PB
6311 if (imm == 0) {
6312 neon_load_reg64(cpu_V0, rn);
6313 if (q) {
6314 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 6315 }
ad69471c
PB
6316 } else if (imm == 8) {
6317 neon_load_reg64(cpu_V0, rn + 1);
6318 if (q) {
6319 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6320 }
ad69471c 6321 } else if (q) {
a7812ae4 6322 tmp64 = tcg_temp_new_i64();
ad69471c
PB
6323 if (imm < 8) {
6324 neon_load_reg64(cpu_V0, rn);
a7812ae4 6325 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
6326 } else {
6327 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 6328 neon_load_reg64(tmp64, rm);
ad69471c
PB
6329 }
6330 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 6331 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
6332 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6333 if (imm < 8) {
6334 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6335 } else {
ad69471c
PB
6336 neon_load_reg64(cpu_V1, rm + 1);
6337 imm -= 8;
9ee6e8bb 6338 }
ad69471c 6339 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
6340 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6341 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 6342 tcg_temp_free_i64(tmp64);
ad69471c 6343 } else {
a7812ae4 6344 /* BUGFIX */
ad69471c 6345 neon_load_reg64(cpu_V0, rn);
a7812ae4 6346 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 6347 neon_load_reg64(cpu_V1, rm);
a7812ae4 6348 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
6349 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6350 }
6351 neon_store_reg64(cpu_V0, rd);
6352 if (q) {
6353 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
6354 }
6355 } else if ((insn & (1 << 11)) == 0) {
6356 /* Two register misc. */
6357 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6358 size = (insn >> 18) & 3;
600b828c
PM
6359 /* UNDEF for unknown op values and bad op-size combinations */
6360 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6361 return 1;
6362 }
fc2a9b37
PM
6363 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6364 q && ((rm | rd) & 1)) {
6365 return 1;
6366 }
9ee6e8bb 6367 switch (op) {
600b828c 6368 case NEON_2RM_VREV64:
9ee6e8bb 6369 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
6370 tmp = neon_load_reg(rm, pass * 2);
6371 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 6372 switch (size) {
dd8fbd78
FN
6373 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6374 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
6375 case 2: /* no-op */ break;
6376 default: abort();
6377 }
dd8fbd78 6378 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 6379 if (size == 2) {
dd8fbd78 6380 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 6381 } else {
9ee6e8bb 6382 switch (size) {
dd8fbd78
FN
6383 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6384 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
6385 default: abort();
6386 }
dd8fbd78 6387 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
6388 }
6389 }
6390 break;
600b828c
PM
6391 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6392 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
6393 for (pass = 0; pass < q + 1; pass++) {
6394 tmp = neon_load_reg(rm, pass * 2);
6395 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6396 tmp = neon_load_reg(rm, pass * 2 + 1);
6397 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6398 switch (size) {
6399 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6400 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6401 case 2: tcg_gen_add_i64(CPU_V001); break;
6402 default: abort();
6403 }
600b828c 6404 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 6405 /* Accumulate. */
ad69471c
PB
6406 neon_load_reg64(cpu_V1, rd + pass);
6407 gen_neon_addl(size);
9ee6e8bb 6408 }
ad69471c 6409 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6410 }
6411 break;
600b828c 6412 case NEON_2RM_VTRN:
9ee6e8bb 6413 if (size == 2) {
a5a14945 6414 int n;
9ee6e8bb 6415 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
6416 tmp = neon_load_reg(rm, n);
6417 tmp2 = neon_load_reg(rd, n + 1);
6418 neon_store_reg(rm, n, tmp2);
6419 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
6420 }
6421 } else {
6422 goto elementwise;
6423 }
6424 break;
600b828c 6425 case NEON_2RM_VUZP:
02acedf9 6426 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 6427 return 1;
9ee6e8bb
PB
6428 }
6429 break;
600b828c 6430 case NEON_2RM_VZIP:
d68a6f3a 6431 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 6432 return 1;
9ee6e8bb
PB
6433 }
6434 break;
600b828c
PM
6435 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6436 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
6437 if (rm & 1) {
6438 return 1;
6439 }
39d5492a 6440 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 6441 for (pass = 0; pass < 2; pass++) {
ad69471c 6442 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 6443 tmp = tcg_temp_new_i32();
600b828c
PM
6444 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6445 tmp, cpu_V0);
ad69471c
PB
6446 if (pass == 0) {
6447 tmp2 = tmp;
6448 } else {
6449 neon_store_reg(rd, 0, tmp2);
6450 neon_store_reg(rd, 1, tmp);
9ee6e8bb 6451 }
9ee6e8bb
PB
6452 }
6453 break;
600b828c 6454 case NEON_2RM_VSHLL:
fc2a9b37 6455 if (q || (rd & 1)) {
9ee6e8bb 6456 return 1;
600b828c 6457 }
ad69471c
PB
6458 tmp = neon_load_reg(rm, 0);
6459 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6460 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6461 if (pass == 1)
6462 tmp = tmp2;
6463 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 6464 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 6465 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6466 }
6467 break;
600b828c 6468 case NEON_2RM_VCVT_F16_F32:
fc2a9b37
PM
6469 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
6470 q || (rm & 1)) {
6471 return 1;
6472 }
7d1b0095
PM
6473 tmp = tcg_temp_new_i32();
6474 tmp2 = tcg_temp_new_i32();
60011498 6475 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 6476 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 6477 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 6478 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6479 tcg_gen_shli_i32(tmp2, tmp2, 16);
6480 tcg_gen_or_i32(tmp2, tmp2, tmp);
6481 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 6482 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
6483 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
6484 neon_store_reg(rd, 0, tmp2);
7d1b0095 6485 tmp2 = tcg_temp_new_i32();
2d981da7 6486 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6487 tcg_gen_shli_i32(tmp2, tmp2, 16);
6488 tcg_gen_or_i32(tmp2, tmp2, tmp);
6489 neon_store_reg(rd, 1, tmp2);
7d1b0095 6490 tcg_temp_free_i32(tmp);
60011498 6491 break;
600b828c 6492 case NEON_2RM_VCVT_F32_F16:
fc2a9b37
PM
6493 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
6494 q || (rd & 1)) {
6495 return 1;
6496 }
7d1b0095 6497 tmp3 = tcg_temp_new_i32();
60011498
PB
6498 tmp = neon_load_reg(rm, 0);
6499 tmp2 = neon_load_reg(rm, 1);
6500 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 6501 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6502 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
6503 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 6504 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6505 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 6506 tcg_temp_free_i32(tmp);
60011498 6507 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 6508 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6509 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
6510 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 6511 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6512 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
6513 tcg_temp_free_i32(tmp2);
6514 tcg_temp_free_i32(tmp3);
60011498 6515 break;
9d935509
AB
6516 case NEON_2RM_AESE: case NEON_2RM_AESMC:
6517 if (!arm_feature(env, ARM_FEATURE_V8_AES)
6518 || ((rm | rd) & 1)) {
6519 return 1;
6520 }
6521 tmp = tcg_const_i32(rd);
6522 tmp2 = tcg_const_i32(rm);
6523
6524 /* Bit 6 is the lowest opcode bit; it distinguishes between
6525 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6526 */
6527 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
6528
6529 if (op == NEON_2RM_AESE) {
6530 gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
6531 } else {
6532 gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
6533 }
6534 tcg_temp_free_i32(tmp);
6535 tcg_temp_free_i32(tmp2);
6536 tcg_temp_free_i32(tmp3);
6537 break;
f1ecb913
AB
6538 case NEON_2RM_SHA1H:
6539 if (!arm_feature(env, ARM_FEATURE_V8_SHA1)
6540 || ((rm | rd) & 1)) {
6541 return 1;
6542 }
6543 tmp = tcg_const_i32(rd);
6544 tmp2 = tcg_const_i32(rm);
6545
6546 gen_helper_crypto_sha1h(cpu_env, tmp, tmp2);
6547
6548 tcg_temp_free_i32(tmp);
6549 tcg_temp_free_i32(tmp2);
6550 break;
6551 case NEON_2RM_SHA1SU1:
6552 if ((rm | rd) & 1) {
6553 return 1;
6554 }
6555 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
6556 if (q) {
6557 if (!arm_feature(env, ARM_FEATURE_V8_SHA256)) {
6558 return 1;
6559 }
6560 } else if (!arm_feature(env, ARM_FEATURE_V8_SHA1)) {
6561 return 1;
6562 }
6563 tmp = tcg_const_i32(rd);
6564 tmp2 = tcg_const_i32(rm);
6565 if (q) {
6566 gen_helper_crypto_sha256su0(cpu_env, tmp, tmp2);
6567 } else {
6568 gen_helper_crypto_sha1su1(cpu_env, tmp, tmp2);
6569 }
6570 tcg_temp_free_i32(tmp);
6571 tcg_temp_free_i32(tmp2);
6572 break;
9ee6e8bb
PB
6573 default:
6574 elementwise:
6575 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 6576 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6577 tcg_gen_ld_f32(cpu_F0s, cpu_env,
6578 neon_reg_offset(rm, pass));
39d5492a 6579 TCGV_UNUSED_I32(tmp);
9ee6e8bb 6580 } else {
dd8fbd78 6581 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
6582 }
6583 switch (op) {
600b828c 6584 case NEON_2RM_VREV32:
9ee6e8bb 6585 switch (size) {
dd8fbd78
FN
6586 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6587 case 1: gen_swap_half(tmp); break;
600b828c 6588 default: abort();
9ee6e8bb
PB
6589 }
6590 break;
600b828c 6591 case NEON_2RM_VREV16:
dd8fbd78 6592 gen_rev16(tmp);
9ee6e8bb 6593 break;
600b828c 6594 case NEON_2RM_VCLS:
9ee6e8bb 6595 switch (size) {
dd8fbd78
FN
6596 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6597 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6598 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 6599 default: abort();
9ee6e8bb
PB
6600 }
6601 break;
600b828c 6602 case NEON_2RM_VCLZ:
9ee6e8bb 6603 switch (size) {
dd8fbd78
FN
6604 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6605 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6606 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 6607 default: abort();
9ee6e8bb
PB
6608 }
6609 break;
600b828c 6610 case NEON_2RM_VCNT:
dd8fbd78 6611 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 6612 break;
600b828c 6613 case NEON_2RM_VMVN:
dd8fbd78 6614 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 6615 break;
600b828c 6616 case NEON_2RM_VQABS:
9ee6e8bb 6617 switch (size) {
02da0b2d
PM
6618 case 0:
6619 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6620 break;
6621 case 1:
6622 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6623 break;
6624 case 2:
6625 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6626 break;
600b828c 6627 default: abort();
9ee6e8bb
PB
6628 }
6629 break;
600b828c 6630 case NEON_2RM_VQNEG:
9ee6e8bb 6631 switch (size) {
02da0b2d
PM
6632 case 0:
6633 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6634 break;
6635 case 1:
6636 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6637 break;
6638 case 2:
6639 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6640 break;
600b828c 6641 default: abort();
9ee6e8bb
PB
6642 }
6643 break;
600b828c 6644 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 6645 tmp2 = tcg_const_i32(0);
9ee6e8bb 6646 switch(size) {
dd8fbd78
FN
6647 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6648 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6649 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 6650 default: abort();
9ee6e8bb 6651 }
39d5492a 6652 tcg_temp_free_i32(tmp2);
600b828c 6653 if (op == NEON_2RM_VCLE0) {
dd8fbd78 6654 tcg_gen_not_i32(tmp, tmp);
600b828c 6655 }
9ee6e8bb 6656 break;
600b828c 6657 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 6658 tmp2 = tcg_const_i32(0);
9ee6e8bb 6659 switch(size) {
dd8fbd78
FN
6660 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6661 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6662 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 6663 default: abort();
9ee6e8bb 6664 }
39d5492a 6665 tcg_temp_free_i32(tmp2);
600b828c 6666 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6667 tcg_gen_not_i32(tmp, tmp);
600b828c 6668 }
9ee6e8bb 6669 break;
600b828c 6670 case NEON_2RM_VCEQ0:
dd8fbd78 6671 tmp2 = tcg_const_i32(0);
9ee6e8bb 6672 switch(size) {
dd8fbd78
FN
6673 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6674 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6675 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6676 default: abort();
9ee6e8bb 6677 }
39d5492a 6678 tcg_temp_free_i32(tmp2);
9ee6e8bb 6679 break;
600b828c 6680 case NEON_2RM_VABS:
9ee6e8bb 6681 switch(size) {
dd8fbd78
FN
6682 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6683 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6684 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 6685 default: abort();
9ee6e8bb
PB
6686 }
6687 break;
600b828c 6688 case NEON_2RM_VNEG:
dd8fbd78
FN
6689 tmp2 = tcg_const_i32(0);
6690 gen_neon_rsb(size, tmp, tmp2);
39d5492a 6691 tcg_temp_free_i32(tmp2);
9ee6e8bb 6692 break;
600b828c 6693 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
6694 {
6695 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6696 tmp2 = tcg_const_i32(0);
aa47cfdd 6697 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6698 tcg_temp_free_i32(tmp2);
aa47cfdd 6699 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6700 break;
aa47cfdd 6701 }
600b828c 6702 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
6703 {
6704 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6705 tmp2 = tcg_const_i32(0);
aa47cfdd 6706 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6707 tcg_temp_free_i32(tmp2);
aa47cfdd 6708 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6709 break;
aa47cfdd 6710 }
600b828c 6711 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
6712 {
6713 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6714 tmp2 = tcg_const_i32(0);
aa47cfdd 6715 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6716 tcg_temp_free_i32(tmp2);
aa47cfdd 6717 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6718 break;
aa47cfdd 6719 }
600b828c 6720 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
6721 {
6722 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6723 tmp2 = tcg_const_i32(0);
aa47cfdd 6724 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6725 tcg_temp_free_i32(tmp2);
aa47cfdd 6726 tcg_temp_free_ptr(fpstatus);
0e326109 6727 break;
aa47cfdd 6728 }
600b828c 6729 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
6730 {
6731 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6732 tmp2 = tcg_const_i32(0);
aa47cfdd 6733 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6734 tcg_temp_free_i32(tmp2);
aa47cfdd 6735 tcg_temp_free_ptr(fpstatus);
0e326109 6736 break;
aa47cfdd 6737 }
600b828c 6738 case NEON_2RM_VABS_F:
4373f3ce 6739 gen_vfp_abs(0);
9ee6e8bb 6740 break;
600b828c 6741 case NEON_2RM_VNEG_F:
4373f3ce 6742 gen_vfp_neg(0);
9ee6e8bb 6743 break;
600b828c 6744 case NEON_2RM_VSWP:
dd8fbd78
FN
6745 tmp2 = neon_load_reg(rd, pass);
6746 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6747 break;
600b828c 6748 case NEON_2RM_VTRN:
dd8fbd78 6749 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6750 switch (size) {
dd8fbd78
FN
6751 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6752 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6753 default: abort();
9ee6e8bb 6754 }
dd8fbd78 6755 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6756 break;
34f7b0a2
WN
6757 case NEON_2RM_VRINTN:
6758 case NEON_2RM_VRINTA:
6759 case NEON_2RM_VRINTM:
6760 case NEON_2RM_VRINTP:
6761 case NEON_2RM_VRINTZ:
6762 {
6763 TCGv_i32 tcg_rmode;
6764 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6765 int rmode;
6766
6767 if (op == NEON_2RM_VRINTZ) {
6768 rmode = FPROUNDING_ZERO;
6769 } else {
6770 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
6771 }
6772
6773 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6774 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6775 cpu_env);
6776 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
6777 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6778 cpu_env);
6779 tcg_temp_free_ptr(fpstatus);
6780 tcg_temp_free_i32(tcg_rmode);
6781 break;
6782 }
2ce70625
WN
6783 case NEON_2RM_VRINTX:
6784 {
6785 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6786 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
6787 tcg_temp_free_ptr(fpstatus);
6788 break;
6789 }
901ad525
WN
6790 case NEON_2RM_VCVTAU:
6791 case NEON_2RM_VCVTAS:
6792 case NEON_2RM_VCVTNU:
6793 case NEON_2RM_VCVTNS:
6794 case NEON_2RM_VCVTPU:
6795 case NEON_2RM_VCVTPS:
6796 case NEON_2RM_VCVTMU:
6797 case NEON_2RM_VCVTMS:
6798 {
6799 bool is_signed = !extract32(insn, 7, 1);
6800 TCGv_ptr fpst = get_fpstatus_ptr(1);
6801 TCGv_i32 tcg_rmode, tcg_shift;
6802 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
6803
6804 tcg_shift = tcg_const_i32(0);
6805 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6806 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6807 cpu_env);
6808
6809 if (is_signed) {
6810 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
6811 tcg_shift, fpst);
6812 } else {
6813 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
6814 tcg_shift, fpst);
6815 }
6816
6817 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6818 cpu_env);
6819 tcg_temp_free_i32(tcg_rmode);
6820 tcg_temp_free_i32(tcg_shift);
6821 tcg_temp_free_ptr(fpst);
6822 break;
6823 }
600b828c 6824 case NEON_2RM_VRECPE:
b6d4443a
AB
6825 {
6826 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6827 gen_helper_recpe_u32(tmp, tmp, fpstatus);
6828 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6829 break;
b6d4443a 6830 }
600b828c 6831 case NEON_2RM_VRSQRTE:
c2fb418e
AB
6832 {
6833 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6834 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
6835 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6836 break;
c2fb418e 6837 }
600b828c 6838 case NEON_2RM_VRECPE_F:
b6d4443a
AB
6839 {
6840 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6841 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
6842 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6843 break;
b6d4443a 6844 }
600b828c 6845 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
6846 {
6847 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6848 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
6849 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6850 break;
c2fb418e 6851 }
600b828c 6852 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 6853 gen_vfp_sito(0, 1);
9ee6e8bb 6854 break;
600b828c 6855 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 6856 gen_vfp_uito(0, 1);
9ee6e8bb 6857 break;
600b828c 6858 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 6859 gen_vfp_tosiz(0, 1);
9ee6e8bb 6860 break;
600b828c 6861 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 6862 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
6863 break;
6864 default:
600b828c
PM
6865 /* Reserved op values were caught by the
6866 * neon_2rm_sizes[] check earlier.
6867 */
6868 abort();
9ee6e8bb 6869 }
600b828c 6870 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6871 tcg_gen_st_f32(cpu_F0s, cpu_env,
6872 neon_reg_offset(rd, pass));
9ee6e8bb 6873 } else {
dd8fbd78 6874 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6875 }
6876 }
6877 break;
6878 }
6879 } else if ((insn & (1 << 10)) == 0) {
6880 /* VTBL, VTBX. */
56907d77
PM
6881 int n = ((insn >> 8) & 3) + 1;
6882 if ((rn + n) > 32) {
6883 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6884 * helper function running off the end of the register file.
6885 */
6886 return 1;
6887 }
6888 n <<= 3;
9ee6e8bb 6889 if (insn & (1 << 6)) {
8f8e3aa4 6890 tmp = neon_load_reg(rd, 0);
9ee6e8bb 6891 } else {
7d1b0095 6892 tmp = tcg_temp_new_i32();
8f8e3aa4 6893 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6894 }
8f8e3aa4 6895 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
6896 tmp4 = tcg_const_i32(rn);
6897 tmp5 = tcg_const_i32(n);
9ef39277 6898 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7d1b0095 6899 tcg_temp_free_i32(tmp);
9ee6e8bb 6900 if (insn & (1 << 6)) {
8f8e3aa4 6901 tmp = neon_load_reg(rd, 1);
9ee6e8bb 6902 } else {
7d1b0095 6903 tmp = tcg_temp_new_i32();
8f8e3aa4 6904 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6905 }
8f8e3aa4 6906 tmp3 = neon_load_reg(rm, 1);
9ef39277 6907 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
6908 tcg_temp_free_i32(tmp5);
6909 tcg_temp_free_i32(tmp4);
8f8e3aa4 6910 neon_store_reg(rd, 0, tmp2);
3018f259 6911 neon_store_reg(rd, 1, tmp3);
7d1b0095 6912 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6913 } else if ((insn & 0x380) == 0) {
6914 /* VDUP */
133da6aa
JR
6915 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6916 return 1;
6917 }
9ee6e8bb 6918 if (insn & (1 << 19)) {
dd8fbd78 6919 tmp = neon_load_reg(rm, 1);
9ee6e8bb 6920 } else {
dd8fbd78 6921 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
6922 }
6923 if (insn & (1 << 16)) {
dd8fbd78 6924 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
6925 } else if (insn & (1 << 17)) {
6926 if ((insn >> 18) & 1)
dd8fbd78 6927 gen_neon_dup_high16(tmp);
9ee6e8bb 6928 else
dd8fbd78 6929 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
6930 }
6931 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 6932 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
6933 tcg_gen_mov_i32(tmp2, tmp);
6934 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 6935 }
7d1b0095 6936 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6937 } else {
6938 return 1;
6939 }
6940 }
6941 }
6942 return 0;
6943}
6944
0ecb72a5 6945static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb 6946{
4b6a83fb
PM
6947 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
6948 const ARMCPRegInfo *ri;
9ee6e8bb
PB
6949
6950 cpnum = (insn >> 8) & 0xf;
6951 if (arm_feature(env, ARM_FEATURE_XSCALE)
6952 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6953 return 1;
6954
4b6a83fb 6955 /* First check for coprocessor space used for actual instructions */
9ee6e8bb
PB
6956 switch (cpnum) {
6957 case 0:
6958 case 1:
6959 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6960 return disas_iwmmxt_insn(env, s, insn);
6961 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6962 return disas_dsp_insn(env, s, insn);
6963 }
6964 return 1;
4b6a83fb
PM
6965 default:
6966 break;
6967 }
6968
6969 /* Otherwise treat as a generic register access */
6970 is64 = (insn & (1 << 25)) == 0;
6971 if (!is64 && ((insn & (1 << 4)) == 0)) {
6972 /* cdp */
6973 return 1;
6974 }
6975
6976 crm = insn & 0xf;
6977 if (is64) {
6978 crn = 0;
6979 opc1 = (insn >> 4) & 0xf;
6980 opc2 = 0;
6981 rt2 = (insn >> 16) & 0xf;
6982 } else {
6983 crn = (insn >> 16) & 0xf;
6984 opc1 = (insn >> 21) & 7;
6985 opc2 = (insn >> 5) & 7;
6986 rt2 = 0;
6987 }
6988 isread = (insn >> 20) & 1;
6989 rt = (insn >> 12) & 0xf;
6990
60322b39 6991 ri = get_arm_cp_reginfo(s->cp_regs,
4b6a83fb
PM
6992 ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
6993 if (ri) {
6994 /* Check access permissions */
60322b39 6995 if (!cp_access_ok(s->current_pl, ri, isread)) {
4b6a83fb
PM
6996 return 1;
6997 }
6998
f59df3f2
PM
6999 if (ri->accessfn) {
7000 /* Emit code to perform further access permissions checks at
7001 * runtime; this may result in an exception.
7002 */
7003 TCGv_ptr tmpptr;
8bcbf37c
PM
7004 TCGv_i32 tcg_syn;
7005 uint32_t syndrome;
7006
7007 /* Note that since we are an implementation which takes an
7008 * exception on a trapped conditional instruction only if the
7009 * instruction passes its condition code check, we can take
7010 * advantage of the clause in the ARM ARM that allows us to set
7011 * the COND field in the instruction to 0xE in all cases.
7012 * We could fish the actual condition out of the insn (ARM)
7013 * or the condexec bits (Thumb) but it isn't necessary.
7014 */
7015 switch (cpnum) {
7016 case 14:
7017 if (is64) {
7018 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7019 isread, s->thumb);
7020 } else {
7021 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7022 rt, isread, s->thumb);
7023 }
7024 break;
7025 case 15:
7026 if (is64) {
7027 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7028 isread, s->thumb);
7029 } else {
7030 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7031 rt, isread, s->thumb);
7032 }
7033 break;
7034 default:
7035 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7036 * so this can only happen if this is an ARMv7 or earlier CPU,
7037 * in which case the syndrome information won't actually be
7038 * guest visible.
7039 */
7040 assert(!arm_feature(env, ARM_FEATURE_V8));
7041 syndrome = syn_uncategorized();
7042 break;
7043 }
7044
f59df3f2
PM
7045 gen_set_pc_im(s, s->pc);
7046 tmpptr = tcg_const_ptr(ri);
8bcbf37c
PM
7047 tcg_syn = tcg_const_i32(syndrome);
7048 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn);
f59df3f2 7049 tcg_temp_free_ptr(tmpptr);
8bcbf37c 7050 tcg_temp_free_i32(tcg_syn);
f59df3f2
PM
7051 }
7052
4b6a83fb
PM
7053 /* Handle special cases first */
7054 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7055 case ARM_CP_NOP:
7056 return 0;
7057 case ARM_CP_WFI:
7058 if (isread) {
7059 return 1;
7060 }
eaed129d 7061 gen_set_pc_im(s, s->pc);
4b6a83fb 7062 s->is_jmp = DISAS_WFI;
2bee5105 7063 return 0;
4b6a83fb
PM
7064 default:
7065 break;
7066 }
7067
2452731c
PM
7068 if (use_icount && (ri->type & ARM_CP_IO)) {
7069 gen_io_start();
7070 }
7071
4b6a83fb
PM
7072 if (isread) {
7073 /* Read */
7074 if (is64) {
7075 TCGv_i64 tmp64;
7076 TCGv_i32 tmp;
7077 if (ri->type & ARM_CP_CONST) {
7078 tmp64 = tcg_const_i64(ri->resetvalue);
7079 } else if (ri->readfn) {
7080 TCGv_ptr tmpptr;
4b6a83fb
PM
7081 tmp64 = tcg_temp_new_i64();
7082 tmpptr = tcg_const_ptr(ri);
7083 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7084 tcg_temp_free_ptr(tmpptr);
7085 } else {
7086 tmp64 = tcg_temp_new_i64();
7087 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7088 }
7089 tmp = tcg_temp_new_i32();
7090 tcg_gen_trunc_i64_i32(tmp, tmp64);
7091 store_reg(s, rt, tmp);
7092 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 7093 tmp = tcg_temp_new_i32();
4b6a83fb 7094 tcg_gen_trunc_i64_i32(tmp, tmp64);
ed336850 7095 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
7096 store_reg(s, rt2, tmp);
7097 } else {
39d5492a 7098 TCGv_i32 tmp;
4b6a83fb
PM
7099 if (ri->type & ARM_CP_CONST) {
7100 tmp = tcg_const_i32(ri->resetvalue);
7101 } else if (ri->readfn) {
7102 TCGv_ptr tmpptr;
4b6a83fb
PM
7103 tmp = tcg_temp_new_i32();
7104 tmpptr = tcg_const_ptr(ri);
7105 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7106 tcg_temp_free_ptr(tmpptr);
7107 } else {
7108 tmp = load_cpu_offset(ri->fieldoffset);
7109 }
7110 if (rt == 15) {
7111 /* Destination register of r15 for 32 bit loads sets
7112 * the condition codes from the high 4 bits of the value
7113 */
7114 gen_set_nzcv(tmp);
7115 tcg_temp_free_i32(tmp);
7116 } else {
7117 store_reg(s, rt, tmp);
7118 }
7119 }
7120 } else {
7121 /* Write */
7122 if (ri->type & ARM_CP_CONST) {
7123 /* If not forbidden by access permissions, treat as WI */
7124 return 0;
7125 }
7126
7127 if (is64) {
39d5492a 7128 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
7129 TCGv_i64 tmp64 = tcg_temp_new_i64();
7130 tmplo = load_reg(s, rt);
7131 tmphi = load_reg(s, rt2);
7132 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7133 tcg_temp_free_i32(tmplo);
7134 tcg_temp_free_i32(tmphi);
7135 if (ri->writefn) {
7136 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
7137 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7138 tcg_temp_free_ptr(tmpptr);
7139 } else {
7140 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7141 }
7142 tcg_temp_free_i64(tmp64);
7143 } else {
7144 if (ri->writefn) {
39d5492a 7145 TCGv_i32 tmp;
4b6a83fb 7146 TCGv_ptr tmpptr;
4b6a83fb
PM
7147 tmp = load_reg(s, rt);
7148 tmpptr = tcg_const_ptr(ri);
7149 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7150 tcg_temp_free_ptr(tmpptr);
7151 tcg_temp_free_i32(tmp);
7152 } else {
39d5492a 7153 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
7154 store_cpu_offset(tmp, ri->fieldoffset);
7155 }
7156 }
2452731c
PM
7157 }
7158
7159 if (use_icount && (ri->type & ARM_CP_IO)) {
7160 /* I/O operations must end the TB here (whether read or write) */
7161 gen_io_end();
7162 gen_lookup_tb(s);
7163 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
7164 /* We default to ending the TB on a coprocessor register write,
7165 * but allow this to be suppressed by the register definition
7166 * (usually only necessary to work around guest bugs).
7167 */
2452731c 7168 gen_lookup_tb(s);
4b6a83fb 7169 }
2452731c 7170
4b6a83fb
PM
7171 return 0;
7172 }
7173
626187d8
PM
7174 /* Unknown register; this might be a guest error or a QEMU
7175 * unimplemented feature.
7176 */
7177 if (is64) {
7178 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7179 "64 bit system register cp:%d opc1: %d crm:%d\n",
7180 isread ? "read" : "write", cpnum, opc1, crm);
7181 } else {
7182 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7183 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d\n",
7184 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2);
7185 }
7186
4a9a539f 7187 return 1;
9ee6e8bb
PB
7188}
7189
5e3f878a
PB
7190
7191/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 7192static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 7193{
39d5492a 7194 TCGv_i32 tmp;
7d1b0095 7195 tmp = tcg_temp_new_i32();
5e3f878a
PB
7196 tcg_gen_trunc_i64_i32(tmp, val);
7197 store_reg(s, rlow, tmp);
7d1b0095 7198 tmp = tcg_temp_new_i32();
5e3f878a
PB
7199 tcg_gen_shri_i64(val, val, 32);
7200 tcg_gen_trunc_i64_i32(tmp, val);
7201 store_reg(s, rhigh, tmp);
7202}
7203
7204/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 7205static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 7206{
a7812ae4 7207 TCGv_i64 tmp;
39d5492a 7208 TCGv_i32 tmp2;
5e3f878a 7209
36aa55dc 7210 /* Load value and extend to 64 bits. */
a7812ae4 7211 tmp = tcg_temp_new_i64();
5e3f878a
PB
7212 tmp2 = load_reg(s, rlow);
7213 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 7214 tcg_temp_free_i32(tmp2);
5e3f878a 7215 tcg_gen_add_i64(val, val, tmp);
b75263d6 7216 tcg_temp_free_i64(tmp);
5e3f878a
PB
7217}
7218
7219/* load and add a 64-bit value from a register pair. */
a7812ae4 7220static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 7221{
a7812ae4 7222 TCGv_i64 tmp;
39d5492a
PM
7223 TCGv_i32 tmpl;
7224 TCGv_i32 tmph;
5e3f878a
PB
7225
7226 /* Load 64-bit value rd:rn. */
36aa55dc
PB
7227 tmpl = load_reg(s, rlow);
7228 tmph = load_reg(s, rhigh);
a7812ae4 7229 tmp = tcg_temp_new_i64();
36aa55dc 7230 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
7231 tcg_temp_free_i32(tmpl);
7232 tcg_temp_free_i32(tmph);
5e3f878a 7233 tcg_gen_add_i64(val, val, tmp);
b75263d6 7234 tcg_temp_free_i64(tmp);
5e3f878a
PB
7235}
7236
c9f10124 7237/* Set N and Z flags from hi|lo. */
39d5492a 7238static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 7239{
c9f10124
RH
7240 tcg_gen_mov_i32(cpu_NF, hi);
7241 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
7242}
7243
426f5abc
PB
7244/* Load/Store exclusive instructions are implemented by remembering
7245 the value/address loaded, and seeing if these are the same
b90372ad 7246 when the store is performed. This should be sufficient to implement
426f5abc
PB
7247 the architecturally mandated semantics, and avoids having to monitor
7248 regular stores.
7249
7250 In system emulation mode only one CPU will be running at once, so
7251 this sequence is effectively atomic. In user emulation mode we
7252 throw an exception and handle the atomic operation elsewhere. */
7253static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 7254 TCGv_i32 addr, int size)
426f5abc 7255{
94ee24e7 7256 TCGv_i32 tmp = tcg_temp_new_i32();
426f5abc
PB
7257
7258 switch (size) {
7259 case 0:
6ce2faf4 7260 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
426f5abc
PB
7261 break;
7262 case 1:
6ce2faf4 7263 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
426f5abc
PB
7264 break;
7265 case 2:
7266 case 3:
6ce2faf4 7267 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
426f5abc
PB
7268 break;
7269 default:
7270 abort();
7271 }
03d05e2d 7272
426f5abc 7273 if (size == 3) {
39d5492a 7274 TCGv_i32 tmp2 = tcg_temp_new_i32();
03d05e2d
PM
7275 TCGv_i32 tmp3 = tcg_temp_new_i32();
7276
2c9adbda 7277 tcg_gen_addi_i32(tmp2, addr, 4);
6ce2faf4 7278 gen_aa32_ld32u(tmp3, tmp2, get_mem_index(s));
7d1b0095 7279 tcg_temp_free_i32(tmp2);
03d05e2d
PM
7280 tcg_gen_concat_i32_i64(cpu_exclusive_val, tmp, tmp3);
7281 store_reg(s, rt2, tmp3);
7282 } else {
7283 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 7284 }
03d05e2d
PM
7285
7286 store_reg(s, rt, tmp);
7287 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
7288}
7289
7290static void gen_clrex(DisasContext *s)
7291{
03d05e2d 7292 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7293}
7294
7295#ifdef CONFIG_USER_ONLY
7296static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7297 TCGv_i32 addr, int size)
426f5abc 7298{
03d05e2d 7299 tcg_gen_extu_i32_i64(cpu_exclusive_test, addr);
426f5abc
PB
7300 tcg_gen_movi_i32(cpu_exclusive_info,
7301 size | (rd << 4) | (rt << 8) | (rt2 << 12));
d4a2dc67 7302 gen_exception_internal_insn(s, 4, EXCP_STREX);
426f5abc
PB
7303}
7304#else
7305static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7306 TCGv_i32 addr, int size)
426f5abc 7307{
39d5492a 7308 TCGv_i32 tmp;
03d05e2d 7309 TCGv_i64 val64, extaddr;
426f5abc
PB
7310 int done_label;
7311 int fail_label;
7312
7313 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7314 [addr] = {Rt};
7315 {Rd} = 0;
7316 } else {
7317 {Rd} = 1;
7318 } */
7319 fail_label = gen_new_label();
7320 done_label = gen_new_label();
03d05e2d
PM
7321 extaddr = tcg_temp_new_i64();
7322 tcg_gen_extu_i32_i64(extaddr, addr);
7323 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7324 tcg_temp_free_i64(extaddr);
7325
94ee24e7 7326 tmp = tcg_temp_new_i32();
426f5abc
PB
7327 switch (size) {
7328 case 0:
6ce2faf4 7329 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
426f5abc
PB
7330 break;
7331 case 1:
6ce2faf4 7332 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
426f5abc
PB
7333 break;
7334 case 2:
7335 case 3:
6ce2faf4 7336 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
426f5abc
PB
7337 break;
7338 default:
7339 abort();
7340 }
03d05e2d
PM
7341
7342 val64 = tcg_temp_new_i64();
426f5abc 7343 if (size == 3) {
39d5492a 7344 TCGv_i32 tmp2 = tcg_temp_new_i32();
03d05e2d 7345 TCGv_i32 tmp3 = tcg_temp_new_i32();
426f5abc 7346 tcg_gen_addi_i32(tmp2, addr, 4);
6ce2faf4 7347 gen_aa32_ld32u(tmp3, tmp2, get_mem_index(s));
7d1b0095 7348 tcg_temp_free_i32(tmp2);
03d05e2d
PM
7349 tcg_gen_concat_i32_i64(val64, tmp, tmp3);
7350 tcg_temp_free_i32(tmp3);
7351 } else {
7352 tcg_gen_extu_i32_i64(val64, tmp);
426f5abc 7353 }
03d05e2d
PM
7354 tcg_temp_free_i32(tmp);
7355
7356 tcg_gen_brcond_i64(TCG_COND_NE, val64, cpu_exclusive_val, fail_label);
7357 tcg_temp_free_i64(val64);
7358
426f5abc
PB
7359 tmp = load_reg(s, rt);
7360 switch (size) {
7361 case 0:
6ce2faf4 7362 gen_aa32_st8(tmp, addr, get_mem_index(s));
426f5abc
PB
7363 break;
7364 case 1:
6ce2faf4 7365 gen_aa32_st16(tmp, addr, get_mem_index(s));
426f5abc
PB
7366 break;
7367 case 2:
7368 case 3:
6ce2faf4 7369 gen_aa32_st32(tmp, addr, get_mem_index(s));
426f5abc
PB
7370 break;
7371 default:
7372 abort();
7373 }
94ee24e7 7374 tcg_temp_free_i32(tmp);
426f5abc
PB
7375 if (size == 3) {
7376 tcg_gen_addi_i32(addr, addr, 4);
7377 tmp = load_reg(s, rt2);
6ce2faf4 7378 gen_aa32_st32(tmp, addr, get_mem_index(s));
94ee24e7 7379 tcg_temp_free_i32(tmp);
426f5abc
PB
7380 }
7381 tcg_gen_movi_i32(cpu_R[rd], 0);
7382 tcg_gen_br(done_label);
7383 gen_set_label(fail_label);
7384 tcg_gen_movi_i32(cpu_R[rd], 1);
7385 gen_set_label(done_label);
03d05e2d 7386 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7387}
7388#endif
7389
81465888
PM
7390/* gen_srs:
7391 * @env: CPUARMState
7392 * @s: DisasContext
7393 * @mode: mode field from insn (which stack to store to)
7394 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7395 * @writeback: true if writeback bit set
7396 *
7397 * Generate code for the SRS (Store Return State) insn.
7398 */
7399static void gen_srs(DisasContext *s,
7400 uint32_t mode, uint32_t amode, bool writeback)
7401{
7402 int32_t offset;
7403 TCGv_i32 addr = tcg_temp_new_i32();
7404 TCGv_i32 tmp = tcg_const_i32(mode);
7405 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7406 tcg_temp_free_i32(tmp);
7407 switch (amode) {
7408 case 0: /* DA */
7409 offset = -4;
7410 break;
7411 case 1: /* IA */
7412 offset = 0;
7413 break;
7414 case 2: /* DB */
7415 offset = -8;
7416 break;
7417 case 3: /* IB */
7418 offset = 4;
7419 break;
7420 default:
7421 abort();
7422 }
7423 tcg_gen_addi_i32(addr, addr, offset);
7424 tmp = load_reg(s, 14);
c1197795 7425 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 7426 tcg_temp_free_i32(tmp);
81465888
PM
7427 tmp = load_cpu_field(spsr);
7428 tcg_gen_addi_i32(addr, addr, 4);
c1197795 7429 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 7430 tcg_temp_free_i32(tmp);
81465888
PM
7431 if (writeback) {
7432 switch (amode) {
7433 case 0:
7434 offset = -8;
7435 break;
7436 case 1:
7437 offset = 4;
7438 break;
7439 case 2:
7440 offset = -4;
7441 break;
7442 case 3:
7443 offset = 0;
7444 break;
7445 default:
7446 abort();
7447 }
7448 tcg_gen_addi_i32(addr, addr, offset);
7449 tmp = tcg_const_i32(mode);
7450 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7451 tcg_temp_free_i32(tmp);
7452 }
7453 tcg_temp_free_i32(addr);
7454}
7455
0ecb72a5 7456static void disas_arm_insn(CPUARMState * env, DisasContext *s)
9ee6e8bb
PB
7457{
7458 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
7459 TCGv_i32 tmp;
7460 TCGv_i32 tmp2;
7461 TCGv_i32 tmp3;
7462 TCGv_i32 addr;
a7812ae4 7463 TCGv_i64 tmp64;
9ee6e8bb 7464
d31dd73e 7465 insn = arm_ldl_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
7466 s->pc += 4;
7467
7468 /* M variants do not implement ARM mode. */
7469 if (IS_M(env))
7470 goto illegal_op;
7471 cond = insn >> 28;
7472 if (cond == 0xf){
be5e7a76
DES
7473 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
7474 * choose to UNDEF. In ARMv5 and above the space is used
7475 * for miscellaneous unconditional instructions.
7476 */
7477 ARCH(5);
7478
9ee6e8bb
PB
7479 /* Unconditional instructions. */
7480 if (((insn >> 25) & 7) == 1) {
7481 /* NEON Data processing. */
7482 if (!arm_feature(env, ARM_FEATURE_NEON))
7483 goto illegal_op;
7484
7485 if (disas_neon_data_insn(env, s, insn))
7486 goto illegal_op;
7487 return;
7488 }
7489 if ((insn & 0x0f100000) == 0x04000000) {
7490 /* NEON load/store. */
7491 if (!arm_feature(env, ARM_FEATURE_NEON))
7492 goto illegal_op;
7493
7494 if (disas_neon_ls_insn(env, s, insn))
7495 goto illegal_op;
7496 return;
7497 }
6a57f3eb
WN
7498 if ((insn & 0x0f000e10) == 0x0e000a00) {
7499 /* VFP. */
7500 if (disas_vfp_insn(env, s, insn)) {
7501 goto illegal_op;
7502 }
7503 return;
7504 }
3d185e5d
PM
7505 if (((insn & 0x0f30f000) == 0x0510f000) ||
7506 ((insn & 0x0f30f010) == 0x0710f000)) {
7507 if ((insn & (1 << 22)) == 0) {
7508 /* PLDW; v7MP */
7509 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
7510 goto illegal_op;
7511 }
7512 }
7513 /* Otherwise PLD; v5TE+ */
be5e7a76 7514 ARCH(5TE);
3d185e5d
PM
7515 return;
7516 }
7517 if (((insn & 0x0f70f000) == 0x0450f000) ||
7518 ((insn & 0x0f70f010) == 0x0650f000)) {
7519 ARCH(7);
7520 return; /* PLI; V7 */
7521 }
7522 if (((insn & 0x0f700000) == 0x04100000) ||
7523 ((insn & 0x0f700010) == 0x06100000)) {
7524 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
7525 goto illegal_op;
7526 }
7527 return; /* v7MP: Unallocated memory hint: must NOP */
7528 }
7529
7530 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
7531 ARCH(6);
7532 /* setend */
10962fd5
PM
7533 if (((insn >> 9) & 1) != s->bswap_code) {
7534 /* Dynamic endianness switching not implemented. */
e0c270d9 7535 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
9ee6e8bb
PB
7536 goto illegal_op;
7537 }
7538 return;
7539 } else if ((insn & 0x0fffff00) == 0x057ff000) {
7540 switch ((insn >> 4) & 0xf) {
7541 case 1: /* clrex */
7542 ARCH(6K);
426f5abc 7543 gen_clrex(s);
9ee6e8bb
PB
7544 return;
7545 case 4: /* dsb */
7546 case 5: /* dmb */
7547 case 6: /* isb */
7548 ARCH(7);
7549 /* We don't emulate caches so these are a no-op. */
7550 return;
7551 default:
7552 goto illegal_op;
7553 }
7554 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
7555 /* srs */
81465888 7556 if (IS_USER(s)) {
9ee6e8bb 7557 goto illegal_op;
9ee6e8bb 7558 }
81465888
PM
7559 ARCH(6);
7560 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 7561 return;
ea825eee 7562 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 7563 /* rfe */
c67b6b71 7564 int32_t offset;
9ee6e8bb
PB
7565 if (IS_USER(s))
7566 goto illegal_op;
7567 ARCH(6);
7568 rn = (insn >> 16) & 0xf;
b0109805 7569 addr = load_reg(s, rn);
9ee6e8bb
PB
7570 i = (insn >> 23) & 3;
7571 switch (i) {
b0109805 7572 case 0: offset = -4; break; /* DA */
c67b6b71
FN
7573 case 1: offset = 0; break; /* IA */
7574 case 2: offset = -8; break; /* DB */
b0109805 7575 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
7576 default: abort();
7577 }
7578 if (offset)
b0109805
PB
7579 tcg_gen_addi_i32(addr, addr, offset);
7580 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 7581 tmp = tcg_temp_new_i32();
6ce2faf4 7582 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 7583 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 7584 tmp2 = tcg_temp_new_i32();
6ce2faf4 7585 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
7586 if (insn & (1 << 21)) {
7587 /* Base writeback. */
7588 switch (i) {
b0109805 7589 case 0: offset = -8; break;
c67b6b71
FN
7590 case 1: offset = 4; break;
7591 case 2: offset = -4; break;
b0109805 7592 case 3: offset = 0; break;
9ee6e8bb
PB
7593 default: abort();
7594 }
7595 if (offset)
b0109805
PB
7596 tcg_gen_addi_i32(addr, addr, offset);
7597 store_reg(s, rn, addr);
7598 } else {
7d1b0095 7599 tcg_temp_free_i32(addr);
9ee6e8bb 7600 }
b0109805 7601 gen_rfe(s, tmp, tmp2);
c67b6b71 7602 return;
9ee6e8bb
PB
7603 } else if ((insn & 0x0e000000) == 0x0a000000) {
7604 /* branch link and change to thumb (blx <offset>) */
7605 int32_t offset;
7606
7607 val = (uint32_t)s->pc;
7d1b0095 7608 tmp = tcg_temp_new_i32();
d9ba4830
PB
7609 tcg_gen_movi_i32(tmp, val);
7610 store_reg(s, 14, tmp);
9ee6e8bb
PB
7611 /* Sign-extend the 24-bit offset */
7612 offset = (((int32_t)insn) << 8) >> 8;
7613 /* offset * 4 + bit24 * 2 + (thumb bit) */
7614 val += (offset << 2) | ((insn >> 23) & 2) | 1;
7615 /* pipeline offset */
7616 val += 4;
be5e7a76 7617 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 7618 gen_bx_im(s, val);
9ee6e8bb
PB
7619 return;
7620 } else if ((insn & 0x0e000f00) == 0x0c000100) {
7621 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
7622 /* iWMMXt register transfer. */
7623 if (env->cp15.c15_cpar & (1 << 1))
7624 if (!disas_iwmmxt_insn(env, s, insn))
7625 return;
7626 }
7627 } else if ((insn & 0x0fe00000) == 0x0c400000) {
7628 /* Coprocessor double register transfer. */
be5e7a76 7629 ARCH(5TE);
9ee6e8bb
PB
7630 } else if ((insn & 0x0f000010) == 0x0e000010) {
7631 /* Additional coprocessor register transfer. */
7997d92f 7632 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
7633 uint32_t mask;
7634 uint32_t val;
7635 /* cps (privileged) */
7636 if (IS_USER(s))
7637 return;
7638 mask = val = 0;
7639 if (insn & (1 << 19)) {
7640 if (insn & (1 << 8))
7641 mask |= CPSR_A;
7642 if (insn & (1 << 7))
7643 mask |= CPSR_I;
7644 if (insn & (1 << 6))
7645 mask |= CPSR_F;
7646 if (insn & (1 << 18))
7647 val |= mask;
7648 }
7997d92f 7649 if (insn & (1 << 17)) {
9ee6e8bb
PB
7650 mask |= CPSR_M;
7651 val |= (insn & 0x1f);
7652 }
7653 if (mask) {
2fbac54b 7654 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
7655 }
7656 return;
7657 }
7658 goto illegal_op;
7659 }
7660 if (cond != 0xe) {
7661 /* if not always execute, we generate a conditional jump to
7662 next instruction */
7663 s->condlabel = gen_new_label();
39fb730a 7664 arm_gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
7665 s->condjmp = 1;
7666 }
7667 if ((insn & 0x0f900000) == 0x03000000) {
7668 if ((insn & (1 << 21)) == 0) {
7669 ARCH(6T2);
7670 rd = (insn >> 12) & 0xf;
7671 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
7672 if ((insn & (1 << 22)) == 0) {
7673 /* MOVW */
7d1b0095 7674 tmp = tcg_temp_new_i32();
5e3f878a 7675 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
7676 } else {
7677 /* MOVT */
5e3f878a 7678 tmp = load_reg(s, rd);
86831435 7679 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 7680 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 7681 }
5e3f878a 7682 store_reg(s, rd, tmp);
9ee6e8bb
PB
7683 } else {
7684 if (((insn >> 12) & 0xf) != 0xf)
7685 goto illegal_op;
7686 if (((insn >> 16) & 0xf) == 0) {
7687 gen_nop_hint(s, insn & 0xff);
7688 } else {
7689 /* CPSR = immediate */
7690 val = insn & 0xff;
7691 shift = ((insn >> 8) & 0xf) * 2;
7692 if (shift)
7693 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 7694 i = ((insn & (1 << 22)) != 0);
2fbac54b 7695 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
7696 goto illegal_op;
7697 }
7698 }
7699 } else if ((insn & 0x0f900000) == 0x01000000
7700 && (insn & 0x00000090) != 0x00000090) {
7701 /* miscellaneous instructions */
7702 op1 = (insn >> 21) & 3;
7703 sh = (insn >> 4) & 0xf;
7704 rm = insn & 0xf;
7705 switch (sh) {
7706 case 0x0: /* move program status register */
7707 if (op1 & 1) {
7708 /* PSR = reg */
2fbac54b 7709 tmp = load_reg(s, rm);
9ee6e8bb 7710 i = ((op1 & 2) != 0);
2fbac54b 7711 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
7712 goto illegal_op;
7713 } else {
7714 /* reg = PSR */
7715 rd = (insn >> 12) & 0xf;
7716 if (op1 & 2) {
7717 if (IS_USER(s))
7718 goto illegal_op;
d9ba4830 7719 tmp = load_cpu_field(spsr);
9ee6e8bb 7720 } else {
7d1b0095 7721 tmp = tcg_temp_new_i32();
9ef39277 7722 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 7723 }
d9ba4830 7724 store_reg(s, rd, tmp);
9ee6e8bb
PB
7725 }
7726 break;
7727 case 0x1:
7728 if (op1 == 1) {
7729 /* branch/exchange thumb (bx). */
be5e7a76 7730 ARCH(4T);
d9ba4830
PB
7731 tmp = load_reg(s, rm);
7732 gen_bx(s, tmp);
9ee6e8bb
PB
7733 } else if (op1 == 3) {
7734 /* clz */
be5e7a76 7735 ARCH(5);
9ee6e8bb 7736 rd = (insn >> 12) & 0xf;
1497c961
PB
7737 tmp = load_reg(s, rm);
7738 gen_helper_clz(tmp, tmp);
7739 store_reg(s, rd, tmp);
9ee6e8bb
PB
7740 } else {
7741 goto illegal_op;
7742 }
7743 break;
7744 case 0x2:
7745 if (op1 == 1) {
7746 ARCH(5J); /* bxj */
7747 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
7748 tmp = load_reg(s, rm);
7749 gen_bx(s, tmp);
9ee6e8bb
PB
7750 } else {
7751 goto illegal_op;
7752 }
7753 break;
7754 case 0x3:
7755 if (op1 != 1)
7756 goto illegal_op;
7757
be5e7a76 7758 ARCH(5);
9ee6e8bb 7759 /* branch link/exchange thumb (blx) */
d9ba4830 7760 tmp = load_reg(s, rm);
7d1b0095 7761 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
7762 tcg_gen_movi_i32(tmp2, s->pc);
7763 store_reg(s, 14, tmp2);
7764 gen_bx(s, tmp);
9ee6e8bb 7765 break;
eb0ecd5a
WN
7766 case 0x4:
7767 {
7768 /* crc32/crc32c */
7769 uint32_t c = extract32(insn, 8, 4);
7770
7771 /* Check this CPU supports ARMv8 CRC instructions.
7772 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
7773 * Bits 8, 10 and 11 should be zero.
7774 */
7775 if (!arm_feature(env, ARM_FEATURE_CRC) || op1 == 0x3 ||
7776 (c & 0xd) != 0) {
7777 goto illegal_op;
7778 }
7779
7780 rn = extract32(insn, 16, 4);
7781 rd = extract32(insn, 12, 4);
7782
7783 tmp = load_reg(s, rn);
7784 tmp2 = load_reg(s, rm);
7785 tmp3 = tcg_const_i32(1 << op1);
7786 if (c & 0x2) {
7787 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
7788 } else {
7789 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
7790 }
7791 tcg_temp_free_i32(tmp2);
7792 tcg_temp_free_i32(tmp3);
7793 store_reg(s, rd, tmp);
7794 break;
7795 }
9ee6e8bb 7796 case 0x5: /* saturating add/subtract */
be5e7a76 7797 ARCH(5TE);
9ee6e8bb
PB
7798 rd = (insn >> 12) & 0xf;
7799 rn = (insn >> 16) & 0xf;
b40d0353 7800 tmp = load_reg(s, rm);
5e3f878a 7801 tmp2 = load_reg(s, rn);
9ee6e8bb 7802 if (op1 & 2)
9ef39277 7803 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 7804 if (op1 & 1)
9ef39277 7805 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 7806 else
9ef39277 7807 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 7808 tcg_temp_free_i32(tmp2);
5e3f878a 7809 store_reg(s, rd, tmp);
9ee6e8bb 7810 break;
49e14940 7811 case 7:
d4a2dc67
PM
7812 {
7813 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
49e14940
AL
7814 /* SMC instruction (op1 == 3)
7815 and undefined instructions (op1 == 0 || op1 == 2)
7816 will trap */
7817 if (op1 != 1) {
7818 goto illegal_op;
7819 }
7820 /* bkpt */
be5e7a76 7821 ARCH(5);
d4a2dc67 7822 gen_exception_insn(s, 4, EXCP_BKPT, syn_aa32_bkpt(imm16, false));
9ee6e8bb 7823 break;
d4a2dc67 7824 }
9ee6e8bb
PB
7825 case 0x8: /* signed multiply */
7826 case 0xa:
7827 case 0xc:
7828 case 0xe:
be5e7a76 7829 ARCH(5TE);
9ee6e8bb
PB
7830 rs = (insn >> 8) & 0xf;
7831 rn = (insn >> 12) & 0xf;
7832 rd = (insn >> 16) & 0xf;
7833 if (op1 == 1) {
7834 /* (32 * 16) >> 16 */
5e3f878a
PB
7835 tmp = load_reg(s, rm);
7836 tmp2 = load_reg(s, rs);
9ee6e8bb 7837 if (sh & 4)
5e3f878a 7838 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7839 else
5e3f878a 7840 gen_sxth(tmp2);
a7812ae4
PB
7841 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7842 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 7843 tmp = tcg_temp_new_i32();
a7812ae4 7844 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 7845 tcg_temp_free_i64(tmp64);
9ee6e8bb 7846 if ((sh & 2) == 0) {
5e3f878a 7847 tmp2 = load_reg(s, rn);
9ef39277 7848 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7849 tcg_temp_free_i32(tmp2);
9ee6e8bb 7850 }
5e3f878a 7851 store_reg(s, rd, tmp);
9ee6e8bb
PB
7852 } else {
7853 /* 16 * 16 */
5e3f878a
PB
7854 tmp = load_reg(s, rm);
7855 tmp2 = load_reg(s, rs);
7856 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 7857 tcg_temp_free_i32(tmp2);
9ee6e8bb 7858 if (op1 == 2) {
a7812ae4
PB
7859 tmp64 = tcg_temp_new_i64();
7860 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7861 tcg_temp_free_i32(tmp);
a7812ae4
PB
7862 gen_addq(s, tmp64, rn, rd);
7863 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 7864 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7865 } else {
7866 if (op1 == 0) {
5e3f878a 7867 tmp2 = load_reg(s, rn);
9ef39277 7868 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7869 tcg_temp_free_i32(tmp2);
9ee6e8bb 7870 }
5e3f878a 7871 store_reg(s, rd, tmp);
9ee6e8bb
PB
7872 }
7873 }
7874 break;
7875 default:
7876 goto illegal_op;
7877 }
7878 } else if (((insn & 0x0e000000) == 0 &&
7879 (insn & 0x00000090) != 0x90) ||
7880 ((insn & 0x0e000000) == (1 << 25))) {
7881 int set_cc, logic_cc, shiftop;
7882
7883 op1 = (insn >> 21) & 0xf;
7884 set_cc = (insn >> 20) & 1;
7885 logic_cc = table_logic_cc[op1] & set_cc;
7886
7887 /* data processing instruction */
7888 if (insn & (1 << 25)) {
7889 /* immediate operand */
7890 val = insn & 0xff;
7891 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 7892 if (shift) {
9ee6e8bb 7893 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 7894 }
7d1b0095 7895 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
7896 tcg_gen_movi_i32(tmp2, val);
7897 if (logic_cc && shift) {
7898 gen_set_CF_bit31(tmp2);
7899 }
9ee6e8bb
PB
7900 } else {
7901 /* register */
7902 rm = (insn) & 0xf;
e9bb4aa9 7903 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7904 shiftop = (insn >> 5) & 3;
7905 if (!(insn & (1 << 4))) {
7906 shift = (insn >> 7) & 0x1f;
e9bb4aa9 7907 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
7908 } else {
7909 rs = (insn >> 8) & 0xf;
8984bd2e 7910 tmp = load_reg(s, rs);
e9bb4aa9 7911 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
7912 }
7913 }
7914 if (op1 != 0x0f && op1 != 0x0d) {
7915 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
7916 tmp = load_reg(s, rn);
7917 } else {
39d5492a 7918 TCGV_UNUSED_I32(tmp);
9ee6e8bb
PB
7919 }
7920 rd = (insn >> 12) & 0xf;
7921 switch(op1) {
7922 case 0x00:
e9bb4aa9
JR
7923 tcg_gen_and_i32(tmp, tmp, tmp2);
7924 if (logic_cc) {
7925 gen_logic_CC(tmp);
7926 }
21aeb343 7927 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7928 break;
7929 case 0x01:
e9bb4aa9
JR
7930 tcg_gen_xor_i32(tmp, tmp, tmp2);
7931 if (logic_cc) {
7932 gen_logic_CC(tmp);
7933 }
21aeb343 7934 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7935 break;
7936 case 0x02:
7937 if (set_cc && rd == 15) {
7938 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 7939 if (IS_USER(s)) {
9ee6e8bb 7940 goto illegal_op;
e9bb4aa9 7941 }
72485ec4 7942 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 7943 gen_exception_return(s, tmp);
9ee6e8bb 7944 } else {
e9bb4aa9 7945 if (set_cc) {
72485ec4 7946 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7947 } else {
7948 tcg_gen_sub_i32(tmp, tmp, tmp2);
7949 }
21aeb343 7950 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7951 }
7952 break;
7953 case 0x03:
e9bb4aa9 7954 if (set_cc) {
72485ec4 7955 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
7956 } else {
7957 tcg_gen_sub_i32(tmp, tmp2, tmp);
7958 }
21aeb343 7959 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7960 break;
7961 case 0x04:
e9bb4aa9 7962 if (set_cc) {
72485ec4 7963 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7964 } else {
7965 tcg_gen_add_i32(tmp, tmp, tmp2);
7966 }
21aeb343 7967 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7968 break;
7969 case 0x05:
e9bb4aa9 7970 if (set_cc) {
49b4c31e 7971 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7972 } else {
7973 gen_add_carry(tmp, tmp, tmp2);
7974 }
21aeb343 7975 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7976 break;
7977 case 0x06:
e9bb4aa9 7978 if (set_cc) {
2de68a49 7979 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7980 } else {
7981 gen_sub_carry(tmp, tmp, tmp2);
7982 }
21aeb343 7983 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7984 break;
7985 case 0x07:
e9bb4aa9 7986 if (set_cc) {
2de68a49 7987 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
7988 } else {
7989 gen_sub_carry(tmp, tmp2, tmp);
7990 }
21aeb343 7991 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7992 break;
7993 case 0x08:
7994 if (set_cc) {
e9bb4aa9
JR
7995 tcg_gen_and_i32(tmp, tmp, tmp2);
7996 gen_logic_CC(tmp);
9ee6e8bb 7997 }
7d1b0095 7998 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7999 break;
8000 case 0x09:
8001 if (set_cc) {
e9bb4aa9
JR
8002 tcg_gen_xor_i32(tmp, tmp, tmp2);
8003 gen_logic_CC(tmp);
9ee6e8bb 8004 }
7d1b0095 8005 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8006 break;
8007 case 0x0a:
8008 if (set_cc) {
72485ec4 8009 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 8010 }
7d1b0095 8011 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8012 break;
8013 case 0x0b:
8014 if (set_cc) {
72485ec4 8015 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 8016 }
7d1b0095 8017 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8018 break;
8019 case 0x0c:
e9bb4aa9
JR
8020 tcg_gen_or_i32(tmp, tmp, tmp2);
8021 if (logic_cc) {
8022 gen_logic_CC(tmp);
8023 }
21aeb343 8024 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8025 break;
8026 case 0x0d:
8027 if (logic_cc && rd == 15) {
8028 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 8029 if (IS_USER(s)) {
9ee6e8bb 8030 goto illegal_op;
e9bb4aa9
JR
8031 }
8032 gen_exception_return(s, tmp2);
9ee6e8bb 8033 } else {
e9bb4aa9
JR
8034 if (logic_cc) {
8035 gen_logic_CC(tmp2);
8036 }
21aeb343 8037 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
8038 }
8039 break;
8040 case 0x0e:
f669df27 8041 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
8042 if (logic_cc) {
8043 gen_logic_CC(tmp);
8044 }
21aeb343 8045 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8046 break;
8047 default:
8048 case 0x0f:
e9bb4aa9
JR
8049 tcg_gen_not_i32(tmp2, tmp2);
8050 if (logic_cc) {
8051 gen_logic_CC(tmp2);
8052 }
21aeb343 8053 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
8054 break;
8055 }
e9bb4aa9 8056 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 8057 tcg_temp_free_i32(tmp2);
e9bb4aa9 8058 }
9ee6e8bb
PB
8059 } else {
8060 /* other instructions */
8061 op1 = (insn >> 24) & 0xf;
8062 switch(op1) {
8063 case 0x0:
8064 case 0x1:
8065 /* multiplies, extra load/stores */
8066 sh = (insn >> 5) & 3;
8067 if (sh == 0) {
8068 if (op1 == 0x0) {
8069 rd = (insn >> 16) & 0xf;
8070 rn = (insn >> 12) & 0xf;
8071 rs = (insn >> 8) & 0xf;
8072 rm = (insn) & 0xf;
8073 op1 = (insn >> 20) & 0xf;
8074 switch (op1) {
8075 case 0: case 1: case 2: case 3: case 6:
8076 /* 32 bit mul */
5e3f878a
PB
8077 tmp = load_reg(s, rs);
8078 tmp2 = load_reg(s, rm);
8079 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8080 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8081 if (insn & (1 << 22)) {
8082 /* Subtract (mls) */
8083 ARCH(6T2);
5e3f878a
PB
8084 tmp2 = load_reg(s, rn);
8085 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 8086 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8087 } else if (insn & (1 << 21)) {
8088 /* Add */
5e3f878a
PB
8089 tmp2 = load_reg(s, rn);
8090 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8091 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8092 }
8093 if (insn & (1 << 20))
5e3f878a
PB
8094 gen_logic_CC(tmp);
8095 store_reg(s, rd, tmp);
9ee6e8bb 8096 break;
8aac08b1
AJ
8097 case 4:
8098 /* 64 bit mul double accumulate (UMAAL) */
8099 ARCH(6);
8100 tmp = load_reg(s, rs);
8101 tmp2 = load_reg(s, rm);
8102 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8103 gen_addq_lo(s, tmp64, rn);
8104 gen_addq_lo(s, tmp64, rd);
8105 gen_storeq_reg(s, rn, rd, tmp64);
8106 tcg_temp_free_i64(tmp64);
8107 break;
8108 case 8: case 9: case 10: case 11:
8109 case 12: case 13: case 14: case 15:
8110 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
8111 tmp = load_reg(s, rs);
8112 tmp2 = load_reg(s, rm);
8aac08b1 8113 if (insn & (1 << 22)) {
c9f10124 8114 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 8115 } else {
c9f10124 8116 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
8117 }
8118 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
8119 TCGv_i32 al = load_reg(s, rn);
8120 TCGv_i32 ah = load_reg(s, rd);
c9f10124 8121 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
8122 tcg_temp_free_i32(al);
8123 tcg_temp_free_i32(ah);
9ee6e8bb 8124 }
8aac08b1 8125 if (insn & (1 << 20)) {
c9f10124 8126 gen_logicq_cc(tmp, tmp2);
8aac08b1 8127 }
c9f10124
RH
8128 store_reg(s, rn, tmp);
8129 store_reg(s, rd, tmp2);
9ee6e8bb 8130 break;
8aac08b1
AJ
8131 default:
8132 goto illegal_op;
9ee6e8bb
PB
8133 }
8134 } else {
8135 rn = (insn >> 16) & 0xf;
8136 rd = (insn >> 12) & 0xf;
8137 if (insn & (1 << 23)) {
8138 /* load/store exclusive */
2359bf80 8139 int op2 = (insn >> 8) & 3;
86753403 8140 op1 = (insn >> 21) & 0x3;
2359bf80
MR
8141
8142 switch (op2) {
8143 case 0: /* lda/stl */
8144 if (op1 == 1) {
8145 goto illegal_op;
8146 }
8147 ARCH(8);
8148 break;
8149 case 1: /* reserved */
8150 goto illegal_op;
8151 case 2: /* ldaex/stlex */
8152 ARCH(8);
8153 break;
8154 case 3: /* ldrex/strex */
8155 if (op1) {
8156 ARCH(6K);
8157 } else {
8158 ARCH(6);
8159 }
8160 break;
8161 }
8162
3174f8e9 8163 addr = tcg_temp_local_new_i32();
98a46317 8164 load_reg_var(s, addr, rn);
2359bf80
MR
8165
8166 /* Since the emulation does not have barriers,
8167 the acquire/release semantics need no special
8168 handling */
8169 if (op2 == 0) {
8170 if (insn & (1 << 20)) {
8171 tmp = tcg_temp_new_i32();
8172 switch (op1) {
8173 case 0: /* lda */
6ce2faf4 8174 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
2359bf80
MR
8175 break;
8176 case 2: /* ldab */
6ce2faf4 8177 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
2359bf80
MR
8178 break;
8179 case 3: /* ldah */
6ce2faf4 8180 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
2359bf80
MR
8181 break;
8182 default:
8183 abort();
8184 }
8185 store_reg(s, rd, tmp);
8186 } else {
8187 rm = insn & 0xf;
8188 tmp = load_reg(s, rm);
8189 switch (op1) {
8190 case 0: /* stl */
6ce2faf4 8191 gen_aa32_st32(tmp, addr, get_mem_index(s));
2359bf80
MR
8192 break;
8193 case 2: /* stlb */
6ce2faf4 8194 gen_aa32_st8(tmp, addr, get_mem_index(s));
2359bf80
MR
8195 break;
8196 case 3: /* stlh */
6ce2faf4 8197 gen_aa32_st16(tmp, addr, get_mem_index(s));
2359bf80
MR
8198 break;
8199 default:
8200 abort();
8201 }
8202 tcg_temp_free_i32(tmp);
8203 }
8204 } else if (insn & (1 << 20)) {
86753403
PB
8205 switch (op1) {
8206 case 0: /* ldrex */
426f5abc 8207 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
8208 break;
8209 case 1: /* ldrexd */
426f5abc 8210 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
8211 break;
8212 case 2: /* ldrexb */
426f5abc 8213 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
8214 break;
8215 case 3: /* ldrexh */
426f5abc 8216 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
8217 break;
8218 default:
8219 abort();
8220 }
9ee6e8bb
PB
8221 } else {
8222 rm = insn & 0xf;
86753403
PB
8223 switch (op1) {
8224 case 0: /* strex */
426f5abc 8225 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
8226 break;
8227 case 1: /* strexd */
502e64fe 8228 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
8229 break;
8230 case 2: /* strexb */
426f5abc 8231 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
8232 break;
8233 case 3: /* strexh */
426f5abc 8234 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
8235 break;
8236 default:
8237 abort();
8238 }
9ee6e8bb 8239 }
39d5492a 8240 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8241 } else {
8242 /* SWP instruction */
8243 rm = (insn) & 0xf;
8244
8984bd2e
PB
8245 /* ??? This is not really atomic. However we know
8246 we never have multiple CPUs running in parallel,
8247 so it is good enough. */
8248 addr = load_reg(s, rn);
8249 tmp = load_reg(s, rm);
5a839c0d 8250 tmp2 = tcg_temp_new_i32();
9ee6e8bb 8251 if (insn & (1 << 22)) {
6ce2faf4
EI
8252 gen_aa32_ld8u(tmp2, addr, get_mem_index(s));
8253 gen_aa32_st8(tmp, addr, get_mem_index(s));
9ee6e8bb 8254 } else {
6ce2faf4
EI
8255 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
8256 gen_aa32_st32(tmp, addr, get_mem_index(s));
9ee6e8bb 8257 }
5a839c0d 8258 tcg_temp_free_i32(tmp);
7d1b0095 8259 tcg_temp_free_i32(addr);
8984bd2e 8260 store_reg(s, rd, tmp2);
9ee6e8bb
PB
8261 }
8262 }
8263 } else {
8264 int address_offset;
8265 int load;
8266 /* Misc load/store */
8267 rn = (insn >> 16) & 0xf;
8268 rd = (insn >> 12) & 0xf;
b0109805 8269 addr = load_reg(s, rn);
9ee6e8bb 8270 if (insn & (1 << 24))
b0109805 8271 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
8272 address_offset = 0;
8273 if (insn & (1 << 20)) {
8274 /* load */
5a839c0d 8275 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
8276 switch(sh) {
8277 case 1:
6ce2faf4 8278 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
8279 break;
8280 case 2:
6ce2faf4 8281 gen_aa32_ld8s(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
8282 break;
8283 default:
8284 case 3:
6ce2faf4 8285 gen_aa32_ld16s(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
8286 break;
8287 }
8288 load = 1;
8289 } else if (sh & 2) {
be5e7a76 8290 ARCH(5TE);
9ee6e8bb
PB
8291 /* doubleword */
8292 if (sh & 1) {
8293 /* store */
b0109805 8294 tmp = load_reg(s, rd);
6ce2faf4 8295 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 8296 tcg_temp_free_i32(tmp);
b0109805
PB
8297 tcg_gen_addi_i32(addr, addr, 4);
8298 tmp = load_reg(s, rd + 1);
6ce2faf4 8299 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 8300 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8301 load = 0;
8302 } else {
8303 /* load */
5a839c0d 8304 tmp = tcg_temp_new_i32();
6ce2faf4 8305 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805
PB
8306 store_reg(s, rd, tmp);
8307 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8308 tmp = tcg_temp_new_i32();
6ce2faf4 8309 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
8310 rd++;
8311 load = 1;
8312 }
8313 address_offset = -4;
8314 } else {
8315 /* store */
b0109805 8316 tmp = load_reg(s, rd);
6ce2faf4 8317 gen_aa32_st16(tmp, addr, get_mem_index(s));
5a839c0d 8318 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8319 load = 0;
8320 }
8321 /* Perform base writeback before the loaded value to
8322 ensure correct behavior with overlapping index registers.
8323 ldrd with base writeback is is undefined if the
8324 destination and index registers overlap. */
8325 if (!(insn & (1 << 24))) {
b0109805
PB
8326 gen_add_datah_offset(s, insn, address_offset, addr);
8327 store_reg(s, rn, addr);
9ee6e8bb
PB
8328 } else if (insn & (1 << 21)) {
8329 if (address_offset)
b0109805
PB
8330 tcg_gen_addi_i32(addr, addr, address_offset);
8331 store_reg(s, rn, addr);
8332 } else {
7d1b0095 8333 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8334 }
8335 if (load) {
8336 /* Complete the load. */
b0109805 8337 store_reg(s, rd, tmp);
9ee6e8bb
PB
8338 }
8339 }
8340 break;
8341 case 0x4:
8342 case 0x5:
8343 goto do_ldst;
8344 case 0x6:
8345 case 0x7:
8346 if (insn & (1 << 4)) {
8347 ARCH(6);
8348 /* Armv6 Media instructions. */
8349 rm = insn & 0xf;
8350 rn = (insn >> 16) & 0xf;
2c0262af 8351 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
8352 rs = (insn >> 8) & 0xf;
8353 switch ((insn >> 23) & 3) {
8354 case 0: /* Parallel add/subtract. */
8355 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
8356 tmp = load_reg(s, rn);
8357 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8358 sh = (insn >> 5) & 7;
8359 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
8360 goto illegal_op;
6ddbc6e4 8361 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 8362 tcg_temp_free_i32(tmp2);
6ddbc6e4 8363 store_reg(s, rd, tmp);
9ee6e8bb
PB
8364 break;
8365 case 1:
8366 if ((insn & 0x00700020) == 0) {
6c95676b 8367 /* Halfword pack. */
3670669c
PB
8368 tmp = load_reg(s, rn);
8369 tmp2 = load_reg(s, rm);
9ee6e8bb 8370 shift = (insn >> 7) & 0x1f;
3670669c
PB
8371 if (insn & (1 << 6)) {
8372 /* pkhtb */
22478e79
AZ
8373 if (shift == 0)
8374 shift = 31;
8375 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 8376 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 8377 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
8378 } else {
8379 /* pkhbt */
22478e79
AZ
8380 if (shift)
8381 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 8382 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
8383 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8384 }
8385 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8386 tcg_temp_free_i32(tmp2);
3670669c 8387 store_reg(s, rd, tmp);
9ee6e8bb
PB
8388 } else if ((insn & 0x00200020) == 0x00200000) {
8389 /* [us]sat */
6ddbc6e4 8390 tmp = load_reg(s, rm);
9ee6e8bb
PB
8391 shift = (insn >> 7) & 0x1f;
8392 if (insn & (1 << 6)) {
8393 if (shift == 0)
8394 shift = 31;
6ddbc6e4 8395 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8396 } else {
6ddbc6e4 8397 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
8398 }
8399 sh = (insn >> 16) & 0x1f;
40d3c433
CL
8400 tmp2 = tcg_const_i32(sh);
8401 if (insn & (1 << 22))
9ef39277 8402 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 8403 else
9ef39277 8404 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 8405 tcg_temp_free_i32(tmp2);
6ddbc6e4 8406 store_reg(s, rd, tmp);
9ee6e8bb
PB
8407 } else if ((insn & 0x00300fe0) == 0x00200f20) {
8408 /* [us]sat16 */
6ddbc6e4 8409 tmp = load_reg(s, rm);
9ee6e8bb 8410 sh = (insn >> 16) & 0x1f;
40d3c433
CL
8411 tmp2 = tcg_const_i32(sh);
8412 if (insn & (1 << 22))
9ef39277 8413 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 8414 else
9ef39277 8415 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 8416 tcg_temp_free_i32(tmp2);
6ddbc6e4 8417 store_reg(s, rd, tmp);
9ee6e8bb
PB
8418 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
8419 /* Select bytes. */
6ddbc6e4
PB
8420 tmp = load_reg(s, rn);
8421 tmp2 = load_reg(s, rm);
7d1b0095 8422 tmp3 = tcg_temp_new_i32();
0ecb72a5 8423 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 8424 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8425 tcg_temp_free_i32(tmp3);
8426 tcg_temp_free_i32(tmp2);
6ddbc6e4 8427 store_reg(s, rd, tmp);
9ee6e8bb 8428 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 8429 tmp = load_reg(s, rm);
9ee6e8bb 8430 shift = (insn >> 10) & 3;
1301f322 8431 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
8432 rotate, a shift is sufficient. */
8433 if (shift != 0)
f669df27 8434 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8435 op1 = (insn >> 20) & 7;
8436 switch (op1) {
5e3f878a
PB
8437 case 0: gen_sxtb16(tmp); break;
8438 case 2: gen_sxtb(tmp); break;
8439 case 3: gen_sxth(tmp); break;
8440 case 4: gen_uxtb16(tmp); break;
8441 case 6: gen_uxtb(tmp); break;
8442 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
8443 default: goto illegal_op;
8444 }
8445 if (rn != 15) {
5e3f878a 8446 tmp2 = load_reg(s, rn);
9ee6e8bb 8447 if ((op1 & 3) == 0) {
5e3f878a 8448 gen_add16(tmp, tmp2);
9ee6e8bb 8449 } else {
5e3f878a 8450 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8451 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8452 }
8453 }
6c95676b 8454 store_reg(s, rd, tmp);
9ee6e8bb
PB
8455 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
8456 /* rev */
b0109805 8457 tmp = load_reg(s, rm);
9ee6e8bb
PB
8458 if (insn & (1 << 22)) {
8459 if (insn & (1 << 7)) {
b0109805 8460 gen_revsh(tmp);
9ee6e8bb
PB
8461 } else {
8462 ARCH(6T2);
b0109805 8463 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8464 }
8465 } else {
8466 if (insn & (1 << 7))
b0109805 8467 gen_rev16(tmp);
9ee6e8bb 8468 else
66896cb8 8469 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 8470 }
b0109805 8471 store_reg(s, rd, tmp);
9ee6e8bb
PB
8472 } else {
8473 goto illegal_op;
8474 }
8475 break;
8476 case 2: /* Multiplies (Type 3). */
41e9564d
PM
8477 switch ((insn >> 20) & 0x7) {
8478 case 5:
8479 if (((insn >> 6) ^ (insn >> 7)) & 1) {
8480 /* op2 not 00x or 11x : UNDEF */
8481 goto illegal_op;
8482 }
838fa72d
AJ
8483 /* Signed multiply most significant [accumulate].
8484 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
8485 tmp = load_reg(s, rm);
8486 tmp2 = load_reg(s, rs);
a7812ae4 8487 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 8488
955a7dd5 8489 if (rd != 15) {
838fa72d 8490 tmp = load_reg(s, rd);
9ee6e8bb 8491 if (insn & (1 << 6)) {
838fa72d 8492 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 8493 } else {
838fa72d 8494 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
8495 }
8496 }
838fa72d
AJ
8497 if (insn & (1 << 5)) {
8498 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8499 }
8500 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 8501 tmp = tcg_temp_new_i32();
838fa72d
AJ
8502 tcg_gen_trunc_i64_i32(tmp, tmp64);
8503 tcg_temp_free_i64(tmp64);
955a7dd5 8504 store_reg(s, rn, tmp);
41e9564d
PM
8505 break;
8506 case 0:
8507 case 4:
8508 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
8509 if (insn & (1 << 7)) {
8510 goto illegal_op;
8511 }
8512 tmp = load_reg(s, rm);
8513 tmp2 = load_reg(s, rs);
9ee6e8bb 8514 if (insn & (1 << 5))
5e3f878a
PB
8515 gen_swap_half(tmp2);
8516 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8517 if (insn & (1 << 22)) {
5e3f878a 8518 /* smlald, smlsld */
33bbd75a
PC
8519 TCGv_i64 tmp64_2;
8520
a7812ae4 8521 tmp64 = tcg_temp_new_i64();
33bbd75a 8522 tmp64_2 = tcg_temp_new_i64();
a7812ae4 8523 tcg_gen_ext_i32_i64(tmp64, tmp);
33bbd75a 8524 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
7d1b0095 8525 tcg_temp_free_i32(tmp);
33bbd75a
PC
8526 tcg_temp_free_i32(tmp2);
8527 if (insn & (1 << 6)) {
8528 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
8529 } else {
8530 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
8531 }
8532 tcg_temp_free_i64(tmp64_2);
a7812ae4
PB
8533 gen_addq(s, tmp64, rd, rn);
8534 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 8535 tcg_temp_free_i64(tmp64);
9ee6e8bb 8536 } else {
5e3f878a 8537 /* smuad, smusd, smlad, smlsd */
33bbd75a
PC
8538 if (insn & (1 << 6)) {
8539 /* This subtraction cannot overflow. */
8540 tcg_gen_sub_i32(tmp, tmp, tmp2);
8541 } else {
8542 /* This addition cannot overflow 32 bits;
8543 * however it may overflow considered as a
8544 * signed operation, in which case we must set
8545 * the Q flag.
8546 */
8547 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8548 }
8549 tcg_temp_free_i32(tmp2);
22478e79 8550 if (rd != 15)
9ee6e8bb 8551 {
22478e79 8552 tmp2 = load_reg(s, rd);
9ef39277 8553 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8554 tcg_temp_free_i32(tmp2);
9ee6e8bb 8555 }
22478e79 8556 store_reg(s, rn, tmp);
9ee6e8bb 8557 }
41e9564d 8558 break;
b8b8ea05
PM
8559 case 1:
8560 case 3:
8561 /* SDIV, UDIV */
8562 if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) {
8563 goto illegal_op;
8564 }
8565 if (((insn >> 5) & 7) || (rd != 15)) {
8566 goto illegal_op;
8567 }
8568 tmp = load_reg(s, rm);
8569 tmp2 = load_reg(s, rs);
8570 if (insn & (1 << 21)) {
8571 gen_helper_udiv(tmp, tmp, tmp2);
8572 } else {
8573 gen_helper_sdiv(tmp, tmp, tmp2);
8574 }
8575 tcg_temp_free_i32(tmp2);
8576 store_reg(s, rn, tmp);
8577 break;
41e9564d
PM
8578 default:
8579 goto illegal_op;
9ee6e8bb
PB
8580 }
8581 break;
8582 case 3:
8583 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
8584 switch (op1) {
8585 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
8586 ARCH(6);
8587 tmp = load_reg(s, rm);
8588 tmp2 = load_reg(s, rs);
8589 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 8590 tcg_temp_free_i32(tmp2);
ded9d295
AZ
8591 if (rd != 15) {
8592 tmp2 = load_reg(s, rd);
6ddbc6e4 8593 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8594 tcg_temp_free_i32(tmp2);
9ee6e8bb 8595 }
ded9d295 8596 store_reg(s, rn, tmp);
9ee6e8bb
PB
8597 break;
8598 case 0x20: case 0x24: case 0x28: case 0x2c:
8599 /* Bitfield insert/clear. */
8600 ARCH(6T2);
8601 shift = (insn >> 7) & 0x1f;
8602 i = (insn >> 16) & 0x1f;
8603 i = i + 1 - shift;
8604 if (rm == 15) {
7d1b0095 8605 tmp = tcg_temp_new_i32();
5e3f878a 8606 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 8607 } else {
5e3f878a 8608 tmp = load_reg(s, rm);
9ee6e8bb
PB
8609 }
8610 if (i != 32) {
5e3f878a 8611 tmp2 = load_reg(s, rd);
d593c48e 8612 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 8613 tcg_temp_free_i32(tmp2);
9ee6e8bb 8614 }
5e3f878a 8615 store_reg(s, rd, tmp);
9ee6e8bb
PB
8616 break;
8617 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
8618 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 8619 ARCH(6T2);
5e3f878a 8620 tmp = load_reg(s, rm);
9ee6e8bb
PB
8621 shift = (insn >> 7) & 0x1f;
8622 i = ((insn >> 16) & 0x1f) + 1;
8623 if (shift + i > 32)
8624 goto illegal_op;
8625 if (i < 32) {
8626 if (op1 & 0x20) {
5e3f878a 8627 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 8628 } else {
5e3f878a 8629 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
8630 }
8631 }
5e3f878a 8632 store_reg(s, rd, tmp);
9ee6e8bb
PB
8633 break;
8634 default:
8635 goto illegal_op;
8636 }
8637 break;
8638 }
8639 break;
8640 }
8641 do_ldst:
8642 /* Check for undefined extension instructions
8643 * per the ARM Bible IE:
8644 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
8645 */
8646 sh = (0xf << 20) | (0xf << 4);
8647 if (op1 == 0x7 && ((insn & sh) == sh))
8648 {
8649 goto illegal_op;
8650 }
8651 /* load/store byte/word */
8652 rn = (insn >> 16) & 0xf;
8653 rd = (insn >> 12) & 0xf;
b0109805 8654 tmp2 = load_reg(s, rn);
a99caa48
PM
8655 if ((insn & 0x01200000) == 0x00200000) {
8656 /* ldrt/strt */
8657 i = MMU_USER_IDX;
8658 } else {
8659 i = get_mem_index(s);
8660 }
9ee6e8bb 8661 if (insn & (1 << 24))
b0109805 8662 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
8663 if (insn & (1 << 20)) {
8664 /* load */
5a839c0d 8665 tmp = tcg_temp_new_i32();
9ee6e8bb 8666 if (insn & (1 << 22)) {
08307563 8667 gen_aa32_ld8u(tmp, tmp2, i);
9ee6e8bb 8668 } else {
08307563 8669 gen_aa32_ld32u(tmp, tmp2, i);
9ee6e8bb 8670 }
9ee6e8bb
PB
8671 } else {
8672 /* store */
b0109805 8673 tmp = load_reg(s, rd);
5a839c0d 8674 if (insn & (1 << 22)) {
08307563 8675 gen_aa32_st8(tmp, tmp2, i);
5a839c0d 8676 } else {
08307563 8677 gen_aa32_st32(tmp, tmp2, i);
5a839c0d
PM
8678 }
8679 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8680 }
8681 if (!(insn & (1 << 24))) {
b0109805
PB
8682 gen_add_data_offset(s, insn, tmp2);
8683 store_reg(s, rn, tmp2);
8684 } else if (insn & (1 << 21)) {
8685 store_reg(s, rn, tmp2);
8686 } else {
7d1b0095 8687 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8688 }
8689 if (insn & (1 << 20)) {
8690 /* Complete the load. */
be5e7a76 8691 store_reg_from_load(env, s, rd, tmp);
9ee6e8bb
PB
8692 }
8693 break;
8694 case 0x08:
8695 case 0x09:
8696 {
8697 int j, n, user, loaded_base;
39d5492a 8698 TCGv_i32 loaded_var;
9ee6e8bb
PB
8699 /* load/store multiple words */
8700 /* XXX: store correct base if write back */
8701 user = 0;
8702 if (insn & (1 << 22)) {
8703 if (IS_USER(s))
8704 goto illegal_op; /* only usable in supervisor mode */
8705
8706 if ((insn & (1 << 15)) == 0)
8707 user = 1;
8708 }
8709 rn = (insn >> 16) & 0xf;
b0109805 8710 addr = load_reg(s, rn);
9ee6e8bb
PB
8711
8712 /* compute total size */
8713 loaded_base = 0;
39d5492a 8714 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
8715 n = 0;
8716 for(i=0;i<16;i++) {
8717 if (insn & (1 << i))
8718 n++;
8719 }
8720 /* XXX: test invalid n == 0 case ? */
8721 if (insn & (1 << 23)) {
8722 if (insn & (1 << 24)) {
8723 /* pre increment */
b0109805 8724 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
8725 } else {
8726 /* post increment */
8727 }
8728 } else {
8729 if (insn & (1 << 24)) {
8730 /* pre decrement */
b0109805 8731 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
8732 } else {
8733 /* post decrement */
8734 if (n != 1)
b0109805 8735 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
8736 }
8737 }
8738 j = 0;
8739 for(i=0;i<16;i++) {
8740 if (insn & (1 << i)) {
8741 if (insn & (1 << 20)) {
8742 /* load */
5a839c0d 8743 tmp = tcg_temp_new_i32();
6ce2faf4 8744 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
be5e7a76 8745 if (user) {
b75263d6 8746 tmp2 = tcg_const_i32(i);
1ce94f81 8747 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 8748 tcg_temp_free_i32(tmp2);
7d1b0095 8749 tcg_temp_free_i32(tmp);
9ee6e8bb 8750 } else if (i == rn) {
b0109805 8751 loaded_var = tmp;
9ee6e8bb
PB
8752 loaded_base = 1;
8753 } else {
be5e7a76 8754 store_reg_from_load(env, s, i, tmp);
9ee6e8bb
PB
8755 }
8756 } else {
8757 /* store */
8758 if (i == 15) {
8759 /* special case: r15 = PC + 8 */
8760 val = (long)s->pc + 4;
7d1b0095 8761 tmp = tcg_temp_new_i32();
b0109805 8762 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 8763 } else if (user) {
7d1b0095 8764 tmp = tcg_temp_new_i32();
b75263d6 8765 tmp2 = tcg_const_i32(i);
9ef39277 8766 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 8767 tcg_temp_free_i32(tmp2);
9ee6e8bb 8768 } else {
b0109805 8769 tmp = load_reg(s, i);
9ee6e8bb 8770 }
6ce2faf4 8771 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 8772 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8773 }
8774 j++;
8775 /* no need to add after the last transfer */
8776 if (j != n)
b0109805 8777 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
8778 }
8779 }
8780 if (insn & (1 << 21)) {
8781 /* write back */
8782 if (insn & (1 << 23)) {
8783 if (insn & (1 << 24)) {
8784 /* pre increment */
8785 } else {
8786 /* post increment */
b0109805 8787 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
8788 }
8789 } else {
8790 if (insn & (1 << 24)) {
8791 /* pre decrement */
8792 if (n != 1)
b0109805 8793 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
8794 } else {
8795 /* post decrement */
b0109805 8796 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
8797 }
8798 }
b0109805
PB
8799 store_reg(s, rn, addr);
8800 } else {
7d1b0095 8801 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8802 }
8803 if (loaded_base) {
b0109805 8804 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
8805 }
8806 if ((insn & (1 << 22)) && !user) {
8807 /* Restore CPSR from SPSR. */
d9ba4830
PB
8808 tmp = load_cpu_field(spsr);
8809 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 8810 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8811 s->is_jmp = DISAS_UPDATE;
8812 }
8813 }
8814 break;
8815 case 0xa:
8816 case 0xb:
8817 {
8818 int32_t offset;
8819
8820 /* branch (and link) */
8821 val = (int32_t)s->pc;
8822 if (insn & (1 << 24)) {
7d1b0095 8823 tmp = tcg_temp_new_i32();
5e3f878a
PB
8824 tcg_gen_movi_i32(tmp, val);
8825 store_reg(s, 14, tmp);
9ee6e8bb 8826 }
534df156
PM
8827 offset = sextract32(insn << 2, 0, 26);
8828 val += offset + 4;
9ee6e8bb
PB
8829 gen_jmp(s, val);
8830 }
8831 break;
8832 case 0xc:
8833 case 0xd:
8834 case 0xe:
6a57f3eb
WN
8835 if (((insn >> 8) & 0xe) == 10) {
8836 /* VFP. */
8837 if (disas_vfp_insn(env, s, insn)) {
8838 goto illegal_op;
8839 }
8840 } else if (disas_coproc_insn(env, s, insn)) {
8841 /* Coprocessor. */
9ee6e8bb 8842 goto illegal_op;
6a57f3eb 8843 }
9ee6e8bb
PB
8844 break;
8845 case 0xf:
8846 /* swi */
eaed129d 8847 gen_set_pc_im(s, s->pc);
d4a2dc67 8848 s->svc_imm = extract32(insn, 0, 24);
9ee6e8bb
PB
8849 s->is_jmp = DISAS_SWI;
8850 break;
8851 default:
8852 illegal_op:
d4a2dc67 8853 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized());
9ee6e8bb
PB
8854 break;
8855 }
8856 }
8857}
8858
8859/* Return true if this is a Thumb-2 logical op. */
8860static int
8861thumb2_logic_op(int op)
8862{
8863 return (op < 8);
8864}
8865
8866/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
8867 then set condition code flags based on the result of the operation.
8868 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
8869 to the high bit of T1.
8870 Returns zero if the opcode is valid. */
8871
8872static int
39d5492a
PM
8873gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
8874 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
8875{
8876 int logic_cc;
8877
8878 logic_cc = 0;
8879 switch (op) {
8880 case 0: /* and */
396e467c 8881 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
8882 logic_cc = conds;
8883 break;
8884 case 1: /* bic */
f669df27 8885 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
8886 logic_cc = conds;
8887 break;
8888 case 2: /* orr */
396e467c 8889 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
8890 logic_cc = conds;
8891 break;
8892 case 3: /* orn */
29501f1b 8893 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
8894 logic_cc = conds;
8895 break;
8896 case 4: /* eor */
396e467c 8897 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
8898 logic_cc = conds;
8899 break;
8900 case 8: /* add */
8901 if (conds)
72485ec4 8902 gen_add_CC(t0, t0, t1);
9ee6e8bb 8903 else
396e467c 8904 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
8905 break;
8906 case 10: /* adc */
8907 if (conds)
49b4c31e 8908 gen_adc_CC(t0, t0, t1);
9ee6e8bb 8909 else
396e467c 8910 gen_adc(t0, t1);
9ee6e8bb
PB
8911 break;
8912 case 11: /* sbc */
2de68a49
RH
8913 if (conds) {
8914 gen_sbc_CC(t0, t0, t1);
8915 } else {
396e467c 8916 gen_sub_carry(t0, t0, t1);
2de68a49 8917 }
9ee6e8bb
PB
8918 break;
8919 case 13: /* sub */
8920 if (conds)
72485ec4 8921 gen_sub_CC(t0, t0, t1);
9ee6e8bb 8922 else
396e467c 8923 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
8924 break;
8925 case 14: /* rsb */
8926 if (conds)
72485ec4 8927 gen_sub_CC(t0, t1, t0);
9ee6e8bb 8928 else
396e467c 8929 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
8930 break;
8931 default: /* 5, 6, 7, 9, 12, 15. */
8932 return 1;
8933 }
8934 if (logic_cc) {
396e467c 8935 gen_logic_CC(t0);
9ee6e8bb 8936 if (shifter_out)
396e467c 8937 gen_set_CF_bit31(t1);
9ee6e8bb
PB
8938 }
8939 return 0;
8940}
8941
8942/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
8943 is not legal. */
0ecb72a5 8944static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9ee6e8bb 8945{
b0109805 8946 uint32_t insn, imm, shift, offset;
9ee6e8bb 8947 uint32_t rd, rn, rm, rs;
39d5492a
PM
8948 TCGv_i32 tmp;
8949 TCGv_i32 tmp2;
8950 TCGv_i32 tmp3;
8951 TCGv_i32 addr;
a7812ae4 8952 TCGv_i64 tmp64;
9ee6e8bb
PB
8953 int op;
8954 int shiftop;
8955 int conds;
8956 int logic_cc;
8957
8958 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
8959 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 8960 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
8961 16-bit instructions to get correct prefetch abort behavior. */
8962 insn = insn_hw1;
8963 if ((insn & (1 << 12)) == 0) {
be5e7a76 8964 ARCH(5);
9ee6e8bb
PB
8965 /* Second half of blx. */
8966 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
8967 tmp = load_reg(s, 14);
8968 tcg_gen_addi_i32(tmp, tmp, offset);
8969 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 8970
7d1b0095 8971 tmp2 = tcg_temp_new_i32();
b0109805 8972 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
8973 store_reg(s, 14, tmp2);
8974 gen_bx(s, tmp);
9ee6e8bb
PB
8975 return 0;
8976 }
8977 if (insn & (1 << 11)) {
8978 /* Second half of bl. */
8979 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 8980 tmp = load_reg(s, 14);
6a0d8a1d 8981 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 8982
7d1b0095 8983 tmp2 = tcg_temp_new_i32();
b0109805 8984 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
8985 store_reg(s, 14, tmp2);
8986 gen_bx(s, tmp);
9ee6e8bb
PB
8987 return 0;
8988 }
8989 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
8990 /* Instruction spans a page boundary. Implement it as two
8991 16-bit instructions in case the second half causes an
8992 prefetch abort. */
8993 offset = ((int32_t)insn << 21) >> 9;
396e467c 8994 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
8995 return 0;
8996 }
8997 /* Fall through to 32-bit decode. */
8998 }
8999
d31dd73e 9000 insn = arm_lduw_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
9001 s->pc += 2;
9002 insn |= (uint32_t)insn_hw1 << 16;
9003
9004 if ((insn & 0xf800e800) != 0xf000e800) {
9005 ARCH(6T2);
9006 }
9007
9008 rn = (insn >> 16) & 0xf;
9009 rs = (insn >> 12) & 0xf;
9010 rd = (insn >> 8) & 0xf;
9011 rm = insn & 0xf;
9012 switch ((insn >> 25) & 0xf) {
9013 case 0: case 1: case 2: case 3:
9014 /* 16-bit instructions. Should never happen. */
9015 abort();
9016 case 4:
9017 if (insn & (1 << 22)) {
9018 /* Other load/store, table branch. */
9019 if (insn & 0x01200000) {
9020 /* Load/store doubleword. */
9021 if (rn == 15) {
7d1b0095 9022 addr = tcg_temp_new_i32();
b0109805 9023 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 9024 } else {
b0109805 9025 addr = load_reg(s, rn);
9ee6e8bb
PB
9026 }
9027 offset = (insn & 0xff) * 4;
9028 if ((insn & (1 << 23)) == 0)
9029 offset = -offset;
9030 if (insn & (1 << 24)) {
b0109805 9031 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
9032 offset = 0;
9033 }
9034 if (insn & (1 << 20)) {
9035 /* ldrd */
e2592fad 9036 tmp = tcg_temp_new_i32();
6ce2faf4 9037 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805
PB
9038 store_reg(s, rs, tmp);
9039 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9040 tmp = tcg_temp_new_i32();
6ce2faf4 9041 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 9042 store_reg(s, rd, tmp);
9ee6e8bb
PB
9043 } else {
9044 /* strd */
b0109805 9045 tmp = load_reg(s, rs);
6ce2faf4 9046 gen_aa32_st32(tmp, addr, get_mem_index(s));
e2592fad 9047 tcg_temp_free_i32(tmp);
b0109805
PB
9048 tcg_gen_addi_i32(addr, addr, 4);
9049 tmp = load_reg(s, rd);
6ce2faf4 9050 gen_aa32_st32(tmp, addr, get_mem_index(s));
e2592fad 9051 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9052 }
9053 if (insn & (1 << 21)) {
9054 /* Base writeback. */
9055 if (rn == 15)
9056 goto illegal_op;
b0109805
PB
9057 tcg_gen_addi_i32(addr, addr, offset - 4);
9058 store_reg(s, rn, addr);
9059 } else {
7d1b0095 9060 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9061 }
9062 } else if ((insn & (1 << 23)) == 0) {
9063 /* Load/store exclusive word. */
39d5492a 9064 addr = tcg_temp_local_new_i32();
98a46317 9065 load_reg_var(s, addr, rn);
426f5abc 9066 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 9067 if (insn & (1 << 20)) {
426f5abc 9068 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 9069 } else {
426f5abc 9070 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 9071 }
39d5492a 9072 tcg_temp_free_i32(addr);
2359bf80 9073 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
9074 /* Table Branch. */
9075 if (rn == 15) {
7d1b0095 9076 addr = tcg_temp_new_i32();
b0109805 9077 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 9078 } else {
b0109805 9079 addr = load_reg(s, rn);
9ee6e8bb 9080 }
b26eefb6 9081 tmp = load_reg(s, rm);
b0109805 9082 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
9083 if (insn & (1 << 4)) {
9084 /* tbh */
b0109805 9085 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9086 tcg_temp_free_i32(tmp);
e2592fad 9087 tmp = tcg_temp_new_i32();
6ce2faf4 9088 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
9ee6e8bb 9089 } else { /* tbb */
7d1b0095 9090 tcg_temp_free_i32(tmp);
e2592fad 9091 tmp = tcg_temp_new_i32();
6ce2faf4 9092 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
9ee6e8bb 9093 }
7d1b0095 9094 tcg_temp_free_i32(addr);
b0109805
PB
9095 tcg_gen_shli_i32(tmp, tmp, 1);
9096 tcg_gen_addi_i32(tmp, tmp, s->pc);
9097 store_reg(s, 15, tmp);
9ee6e8bb 9098 } else {
2359bf80 9099 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 9100 op = (insn >> 4) & 0x3;
2359bf80
MR
9101 switch (op2) {
9102 case 0:
426f5abc 9103 goto illegal_op;
2359bf80
MR
9104 case 1:
9105 /* Load/store exclusive byte/halfword/doubleword */
9106 if (op == 2) {
9107 goto illegal_op;
9108 }
9109 ARCH(7);
9110 break;
9111 case 2:
9112 /* Load-acquire/store-release */
9113 if (op == 3) {
9114 goto illegal_op;
9115 }
9116 /* Fall through */
9117 case 3:
9118 /* Load-acquire/store-release exclusive */
9119 ARCH(8);
9120 break;
426f5abc 9121 }
39d5492a 9122 addr = tcg_temp_local_new_i32();
98a46317 9123 load_reg_var(s, addr, rn);
2359bf80
MR
9124 if (!(op2 & 1)) {
9125 if (insn & (1 << 20)) {
9126 tmp = tcg_temp_new_i32();
9127 switch (op) {
9128 case 0: /* ldab */
6ce2faf4 9129 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
2359bf80
MR
9130 break;
9131 case 1: /* ldah */
6ce2faf4 9132 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
2359bf80
MR
9133 break;
9134 case 2: /* lda */
6ce2faf4 9135 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
2359bf80
MR
9136 break;
9137 default:
9138 abort();
9139 }
9140 store_reg(s, rs, tmp);
9141 } else {
9142 tmp = load_reg(s, rs);
9143 switch (op) {
9144 case 0: /* stlb */
6ce2faf4 9145 gen_aa32_st8(tmp, addr, get_mem_index(s));
2359bf80
MR
9146 break;
9147 case 1: /* stlh */
6ce2faf4 9148 gen_aa32_st16(tmp, addr, get_mem_index(s));
2359bf80
MR
9149 break;
9150 case 2: /* stl */
6ce2faf4 9151 gen_aa32_st32(tmp, addr, get_mem_index(s));
2359bf80
MR
9152 break;
9153 default:
9154 abort();
9155 }
9156 tcg_temp_free_i32(tmp);
9157 }
9158 } else if (insn & (1 << 20)) {
426f5abc 9159 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 9160 } else {
426f5abc 9161 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 9162 }
39d5492a 9163 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9164 }
9165 } else {
9166 /* Load/store multiple, RFE, SRS. */
9167 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976
PM
9168 /* RFE, SRS: not available in user mode or on M profile */
9169 if (IS_USER(s) || IS_M(env)) {
9ee6e8bb 9170 goto illegal_op;
00115976 9171 }
9ee6e8bb
PB
9172 if (insn & (1 << 20)) {
9173 /* rfe */
b0109805
PB
9174 addr = load_reg(s, rn);
9175 if ((insn & (1 << 24)) == 0)
9176 tcg_gen_addi_i32(addr, addr, -8);
9177 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 9178 tmp = tcg_temp_new_i32();
6ce2faf4 9179 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 9180 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9181 tmp2 = tcg_temp_new_i32();
6ce2faf4 9182 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
9183 if (insn & (1 << 21)) {
9184 /* Base writeback. */
b0109805
PB
9185 if (insn & (1 << 24)) {
9186 tcg_gen_addi_i32(addr, addr, 4);
9187 } else {
9188 tcg_gen_addi_i32(addr, addr, -4);
9189 }
9190 store_reg(s, rn, addr);
9191 } else {
7d1b0095 9192 tcg_temp_free_i32(addr);
9ee6e8bb 9193 }
b0109805 9194 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
9195 } else {
9196 /* srs */
81465888
PM
9197 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
9198 insn & (1 << 21));
9ee6e8bb
PB
9199 }
9200 } else {
5856d44e 9201 int i, loaded_base = 0;
39d5492a 9202 TCGv_i32 loaded_var;
9ee6e8bb 9203 /* Load/store multiple. */
b0109805 9204 addr = load_reg(s, rn);
9ee6e8bb
PB
9205 offset = 0;
9206 for (i = 0; i < 16; i++) {
9207 if (insn & (1 << i))
9208 offset += 4;
9209 }
9210 if (insn & (1 << 24)) {
b0109805 9211 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9212 }
9213
39d5492a 9214 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
9215 for (i = 0; i < 16; i++) {
9216 if ((insn & (1 << i)) == 0)
9217 continue;
9218 if (insn & (1 << 20)) {
9219 /* Load. */
e2592fad 9220 tmp = tcg_temp_new_i32();
6ce2faf4 9221 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9ee6e8bb 9222 if (i == 15) {
b0109805 9223 gen_bx(s, tmp);
5856d44e
YO
9224 } else if (i == rn) {
9225 loaded_var = tmp;
9226 loaded_base = 1;
9ee6e8bb 9227 } else {
b0109805 9228 store_reg(s, i, tmp);
9ee6e8bb
PB
9229 }
9230 } else {
9231 /* Store. */
b0109805 9232 tmp = load_reg(s, i);
6ce2faf4 9233 gen_aa32_st32(tmp, addr, get_mem_index(s));
e2592fad 9234 tcg_temp_free_i32(tmp);
9ee6e8bb 9235 }
b0109805 9236 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 9237 }
5856d44e
YO
9238 if (loaded_base) {
9239 store_reg(s, rn, loaded_var);
9240 }
9ee6e8bb
PB
9241 if (insn & (1 << 21)) {
9242 /* Base register writeback. */
9243 if (insn & (1 << 24)) {
b0109805 9244 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9245 }
9246 /* Fault if writeback register is in register list. */
9247 if (insn & (1 << rn))
9248 goto illegal_op;
b0109805
PB
9249 store_reg(s, rn, addr);
9250 } else {
7d1b0095 9251 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9252 }
9253 }
9254 }
9255 break;
2af9ab77
JB
9256 case 5:
9257
9ee6e8bb 9258 op = (insn >> 21) & 0xf;
2af9ab77
JB
9259 if (op == 6) {
9260 /* Halfword pack. */
9261 tmp = load_reg(s, rn);
9262 tmp2 = load_reg(s, rm);
9263 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
9264 if (insn & (1 << 5)) {
9265 /* pkhtb */
9266 if (shift == 0)
9267 shift = 31;
9268 tcg_gen_sari_i32(tmp2, tmp2, shift);
9269 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
9270 tcg_gen_ext16u_i32(tmp2, tmp2);
9271 } else {
9272 /* pkhbt */
9273 if (shift)
9274 tcg_gen_shli_i32(tmp2, tmp2, shift);
9275 tcg_gen_ext16u_i32(tmp, tmp);
9276 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9277 }
9278 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 9279 tcg_temp_free_i32(tmp2);
3174f8e9
FN
9280 store_reg(s, rd, tmp);
9281 } else {
2af9ab77
JB
9282 /* Data processing register constant shift. */
9283 if (rn == 15) {
7d1b0095 9284 tmp = tcg_temp_new_i32();
2af9ab77
JB
9285 tcg_gen_movi_i32(tmp, 0);
9286 } else {
9287 tmp = load_reg(s, rn);
9288 }
9289 tmp2 = load_reg(s, rm);
9290
9291 shiftop = (insn >> 4) & 3;
9292 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
9293 conds = (insn & (1 << 20)) != 0;
9294 logic_cc = (conds && thumb2_logic_op(op));
9295 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9296 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9297 goto illegal_op;
7d1b0095 9298 tcg_temp_free_i32(tmp2);
2af9ab77
JB
9299 if (rd != 15) {
9300 store_reg(s, rd, tmp);
9301 } else {
7d1b0095 9302 tcg_temp_free_i32(tmp);
2af9ab77 9303 }
3174f8e9 9304 }
9ee6e8bb
PB
9305 break;
9306 case 13: /* Misc data processing. */
9307 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
9308 if (op < 4 && (insn & 0xf000) != 0xf000)
9309 goto illegal_op;
9310 switch (op) {
9311 case 0: /* Register controlled shift. */
8984bd2e
PB
9312 tmp = load_reg(s, rn);
9313 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9314 if ((insn & 0x70) != 0)
9315 goto illegal_op;
9316 op = (insn >> 21) & 3;
8984bd2e
PB
9317 logic_cc = (insn & (1 << 20)) != 0;
9318 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
9319 if (logic_cc)
9320 gen_logic_CC(tmp);
21aeb343 9321 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
9322 break;
9323 case 1: /* Sign/zero extend. */
5e3f878a 9324 tmp = load_reg(s, rm);
9ee6e8bb 9325 shift = (insn >> 4) & 3;
1301f322 9326 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
9327 rotate, a shift is sufficient. */
9328 if (shift != 0)
f669df27 9329 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9330 op = (insn >> 20) & 7;
9331 switch (op) {
5e3f878a
PB
9332 case 0: gen_sxth(tmp); break;
9333 case 1: gen_uxth(tmp); break;
9334 case 2: gen_sxtb16(tmp); break;
9335 case 3: gen_uxtb16(tmp); break;
9336 case 4: gen_sxtb(tmp); break;
9337 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
9338 default: goto illegal_op;
9339 }
9340 if (rn != 15) {
5e3f878a 9341 tmp2 = load_reg(s, rn);
9ee6e8bb 9342 if ((op >> 1) == 1) {
5e3f878a 9343 gen_add16(tmp, tmp2);
9ee6e8bb 9344 } else {
5e3f878a 9345 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9346 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9347 }
9348 }
5e3f878a 9349 store_reg(s, rd, tmp);
9ee6e8bb
PB
9350 break;
9351 case 2: /* SIMD add/subtract. */
9352 op = (insn >> 20) & 7;
9353 shift = (insn >> 4) & 7;
9354 if ((op & 3) == 3 || (shift & 3) == 3)
9355 goto illegal_op;
6ddbc6e4
PB
9356 tmp = load_reg(s, rn);
9357 tmp2 = load_reg(s, rm);
9358 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 9359 tcg_temp_free_i32(tmp2);
6ddbc6e4 9360 store_reg(s, rd, tmp);
9ee6e8bb
PB
9361 break;
9362 case 3: /* Other data processing. */
9363 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
9364 if (op < 4) {
9365 /* Saturating add/subtract. */
d9ba4830
PB
9366 tmp = load_reg(s, rn);
9367 tmp2 = load_reg(s, rm);
9ee6e8bb 9368 if (op & 1)
9ef39277 9369 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 9370 if (op & 2)
9ef39277 9371 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 9372 else
9ef39277 9373 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 9374 tcg_temp_free_i32(tmp2);
9ee6e8bb 9375 } else {
d9ba4830 9376 tmp = load_reg(s, rn);
9ee6e8bb
PB
9377 switch (op) {
9378 case 0x0a: /* rbit */
d9ba4830 9379 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
9380 break;
9381 case 0x08: /* rev */
66896cb8 9382 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
9383 break;
9384 case 0x09: /* rev16 */
d9ba4830 9385 gen_rev16(tmp);
9ee6e8bb
PB
9386 break;
9387 case 0x0b: /* revsh */
d9ba4830 9388 gen_revsh(tmp);
9ee6e8bb
PB
9389 break;
9390 case 0x10: /* sel */
d9ba4830 9391 tmp2 = load_reg(s, rm);
7d1b0095 9392 tmp3 = tcg_temp_new_i32();
0ecb72a5 9393 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 9394 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
9395 tcg_temp_free_i32(tmp3);
9396 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9397 break;
9398 case 0x18: /* clz */
d9ba4830 9399 gen_helper_clz(tmp, tmp);
9ee6e8bb 9400 break;
eb0ecd5a
WN
9401 case 0x20:
9402 case 0x21:
9403 case 0x22:
9404 case 0x28:
9405 case 0x29:
9406 case 0x2a:
9407 {
9408 /* crc32/crc32c */
9409 uint32_t sz = op & 0x3;
9410 uint32_t c = op & 0x8;
9411
9412 if (!arm_feature(env, ARM_FEATURE_CRC)) {
9413 goto illegal_op;
9414 }
9415
9416 tmp2 = load_reg(s, rm);
9417 tmp3 = tcg_const_i32(1 << sz);
9418 if (c) {
9419 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
9420 } else {
9421 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
9422 }
9423 tcg_temp_free_i32(tmp2);
9424 tcg_temp_free_i32(tmp3);
9425 break;
9426 }
9ee6e8bb
PB
9427 default:
9428 goto illegal_op;
9429 }
9430 }
d9ba4830 9431 store_reg(s, rd, tmp);
9ee6e8bb
PB
9432 break;
9433 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
9434 op = (insn >> 4) & 0xf;
d9ba4830
PB
9435 tmp = load_reg(s, rn);
9436 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9437 switch ((insn >> 20) & 7) {
9438 case 0: /* 32 x 32 -> 32 */
d9ba4830 9439 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 9440 tcg_temp_free_i32(tmp2);
9ee6e8bb 9441 if (rs != 15) {
d9ba4830 9442 tmp2 = load_reg(s, rs);
9ee6e8bb 9443 if (op)
d9ba4830 9444 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 9445 else
d9ba4830 9446 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9447 tcg_temp_free_i32(tmp2);
9ee6e8bb 9448 }
9ee6e8bb
PB
9449 break;
9450 case 1: /* 16 x 16 -> 32 */
d9ba4830 9451 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 9452 tcg_temp_free_i32(tmp2);
9ee6e8bb 9453 if (rs != 15) {
d9ba4830 9454 tmp2 = load_reg(s, rs);
9ef39277 9455 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9456 tcg_temp_free_i32(tmp2);
9ee6e8bb 9457 }
9ee6e8bb
PB
9458 break;
9459 case 2: /* Dual multiply add. */
9460 case 4: /* Dual multiply subtract. */
9461 if (op)
d9ba4830
PB
9462 gen_swap_half(tmp2);
9463 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9464 if (insn & (1 << 22)) {
e1d177b9 9465 /* This subtraction cannot overflow. */
d9ba4830 9466 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 9467 } else {
e1d177b9
PM
9468 /* This addition cannot overflow 32 bits;
9469 * however it may overflow considered as a signed
9470 * operation, in which case we must set the Q flag.
9471 */
9ef39277 9472 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 9473 }
7d1b0095 9474 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9475 if (rs != 15)
9476 {
d9ba4830 9477 tmp2 = load_reg(s, rs);
9ef39277 9478 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9479 tcg_temp_free_i32(tmp2);
9ee6e8bb 9480 }
9ee6e8bb
PB
9481 break;
9482 case 3: /* 32 * 16 -> 32msb */
9483 if (op)
d9ba4830 9484 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 9485 else
d9ba4830 9486 gen_sxth(tmp2);
a7812ae4
PB
9487 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9488 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 9489 tmp = tcg_temp_new_i32();
a7812ae4 9490 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 9491 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
9492 if (rs != 15)
9493 {
d9ba4830 9494 tmp2 = load_reg(s, rs);
9ef39277 9495 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9496 tcg_temp_free_i32(tmp2);
9ee6e8bb 9497 }
9ee6e8bb 9498 break;
838fa72d
AJ
9499 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
9500 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 9501 if (rs != 15) {
838fa72d
AJ
9502 tmp = load_reg(s, rs);
9503 if (insn & (1 << 20)) {
9504 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 9505 } else {
838fa72d 9506 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 9507 }
2c0262af 9508 }
838fa72d
AJ
9509 if (insn & (1 << 4)) {
9510 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9511 }
9512 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 9513 tmp = tcg_temp_new_i32();
838fa72d
AJ
9514 tcg_gen_trunc_i64_i32(tmp, tmp64);
9515 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
9516 break;
9517 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 9518 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 9519 tcg_temp_free_i32(tmp2);
9ee6e8bb 9520 if (rs != 15) {
d9ba4830
PB
9521 tmp2 = load_reg(s, rs);
9522 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9523 tcg_temp_free_i32(tmp2);
5fd46862 9524 }
9ee6e8bb 9525 break;
2c0262af 9526 }
d9ba4830 9527 store_reg(s, rd, tmp);
2c0262af 9528 break;
9ee6e8bb
PB
9529 case 6: case 7: /* 64-bit multiply, Divide. */
9530 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
9531 tmp = load_reg(s, rn);
9532 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9533 if ((op & 0x50) == 0x10) {
9534 /* sdiv, udiv */
47789990 9535 if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 9536 goto illegal_op;
47789990 9537 }
9ee6e8bb 9538 if (op & 0x20)
5e3f878a 9539 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 9540 else
5e3f878a 9541 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 9542 tcg_temp_free_i32(tmp2);
5e3f878a 9543 store_reg(s, rd, tmp);
9ee6e8bb
PB
9544 } else if ((op & 0xe) == 0xc) {
9545 /* Dual multiply accumulate long. */
9546 if (op & 1)
5e3f878a
PB
9547 gen_swap_half(tmp2);
9548 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9549 if (op & 0x10) {
5e3f878a 9550 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 9551 } else {
5e3f878a 9552 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 9553 }
7d1b0095 9554 tcg_temp_free_i32(tmp2);
a7812ae4
PB
9555 /* BUGFIX */
9556 tmp64 = tcg_temp_new_i64();
9557 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 9558 tcg_temp_free_i32(tmp);
a7812ae4
PB
9559 gen_addq(s, tmp64, rs, rd);
9560 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 9561 tcg_temp_free_i64(tmp64);
2c0262af 9562 } else {
9ee6e8bb
PB
9563 if (op & 0x20) {
9564 /* Unsigned 64-bit multiply */
a7812ae4 9565 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 9566 } else {
9ee6e8bb
PB
9567 if (op & 8) {
9568 /* smlalxy */
5e3f878a 9569 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 9570 tcg_temp_free_i32(tmp2);
a7812ae4
PB
9571 tmp64 = tcg_temp_new_i64();
9572 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 9573 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9574 } else {
9575 /* Signed 64-bit multiply */
a7812ae4 9576 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 9577 }
b5ff1b31 9578 }
9ee6e8bb
PB
9579 if (op & 4) {
9580 /* umaal */
a7812ae4
PB
9581 gen_addq_lo(s, tmp64, rs);
9582 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
9583 } else if (op & 0x40) {
9584 /* 64-bit accumulate. */
a7812ae4 9585 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 9586 }
a7812ae4 9587 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 9588 tcg_temp_free_i64(tmp64);
5fd46862 9589 }
2c0262af 9590 break;
9ee6e8bb
PB
9591 }
9592 break;
9593 case 6: case 7: case 14: case 15:
9594 /* Coprocessor. */
9595 if (((insn >> 24) & 3) == 3) {
9596 /* Translate into the equivalent ARM encoding. */
f06053e3 9597 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9ee6e8bb
PB
9598 if (disas_neon_data_insn(env, s, insn))
9599 goto illegal_op;
6a57f3eb
WN
9600 } else if (((insn >> 8) & 0xe) == 10) {
9601 if (disas_vfp_insn(env, s, insn)) {
9602 goto illegal_op;
9603 }
9ee6e8bb
PB
9604 } else {
9605 if (insn & (1 << 28))
9606 goto illegal_op;
9607 if (disas_coproc_insn (env, s, insn))
9608 goto illegal_op;
9609 }
9610 break;
9611 case 8: case 9: case 10: case 11:
9612 if (insn & (1 << 15)) {
9613 /* Branches, misc control. */
9614 if (insn & 0x5000) {
9615 /* Unconditional branch. */
9616 /* signextend(hw1[10:0]) -> offset[:12]. */
9617 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
9618 /* hw1[10:0] -> offset[11:1]. */
9619 offset |= (insn & 0x7ff) << 1;
9620 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
9621 offset[24:22] already have the same value because of the
9622 sign extension above. */
9623 offset ^= ((~insn) & (1 << 13)) << 10;
9624 offset ^= ((~insn) & (1 << 11)) << 11;
9625
9ee6e8bb
PB
9626 if (insn & (1 << 14)) {
9627 /* Branch and link. */
3174f8e9 9628 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 9629 }
3b46e624 9630
b0109805 9631 offset += s->pc;
9ee6e8bb
PB
9632 if (insn & (1 << 12)) {
9633 /* b/bl */
b0109805 9634 gen_jmp(s, offset);
9ee6e8bb
PB
9635 } else {
9636 /* blx */
b0109805 9637 offset &= ~(uint32_t)2;
be5e7a76 9638 /* thumb2 bx, no need to check */
b0109805 9639 gen_bx_im(s, offset);
2c0262af 9640 }
9ee6e8bb
PB
9641 } else if (((insn >> 23) & 7) == 7) {
9642 /* Misc control */
9643 if (insn & (1 << 13))
9644 goto illegal_op;
9645
9646 if (insn & (1 << 26)) {
9647 /* Secure monitor call (v6Z) */
e0c270d9
SW
9648 qemu_log_mask(LOG_UNIMP,
9649 "arm: unimplemented secure monitor call\n");
9ee6e8bb 9650 goto illegal_op; /* not implemented. */
2c0262af 9651 } else {
9ee6e8bb
PB
9652 op = (insn >> 20) & 7;
9653 switch (op) {
9654 case 0: /* msr cpsr. */
9655 if (IS_M(env)) {
8984bd2e
PB
9656 tmp = load_reg(s, rn);
9657 addr = tcg_const_i32(insn & 0xff);
9658 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 9659 tcg_temp_free_i32(addr);
7d1b0095 9660 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9661 gen_lookup_tb(s);
9662 break;
9663 }
9664 /* fall through */
9665 case 1: /* msr spsr. */
9666 if (IS_M(env))
9667 goto illegal_op;
2fbac54b
FN
9668 tmp = load_reg(s, rn);
9669 if (gen_set_psr(s,
9ee6e8bb 9670 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 9671 op == 1, tmp))
9ee6e8bb
PB
9672 goto illegal_op;
9673 break;
9674 case 2: /* cps, nop-hint. */
9675 if (((insn >> 8) & 7) == 0) {
9676 gen_nop_hint(s, insn & 0xff);
9677 }
9678 /* Implemented as NOP in user mode. */
9679 if (IS_USER(s))
9680 break;
9681 offset = 0;
9682 imm = 0;
9683 if (insn & (1 << 10)) {
9684 if (insn & (1 << 7))
9685 offset |= CPSR_A;
9686 if (insn & (1 << 6))
9687 offset |= CPSR_I;
9688 if (insn & (1 << 5))
9689 offset |= CPSR_F;
9690 if (insn & (1 << 9))
9691 imm = CPSR_A | CPSR_I | CPSR_F;
9692 }
9693 if (insn & (1 << 8)) {
9694 offset |= 0x1f;
9695 imm |= (insn & 0x1f);
9696 }
9697 if (offset) {
2fbac54b 9698 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
9699 }
9700 break;
9701 case 3: /* Special control operations. */
426f5abc 9702 ARCH(7);
9ee6e8bb
PB
9703 op = (insn >> 4) & 0xf;
9704 switch (op) {
9705 case 2: /* clrex */
426f5abc 9706 gen_clrex(s);
9ee6e8bb
PB
9707 break;
9708 case 4: /* dsb */
9709 case 5: /* dmb */
9710 case 6: /* isb */
9711 /* These execute as NOPs. */
9ee6e8bb
PB
9712 break;
9713 default:
9714 goto illegal_op;
9715 }
9716 break;
9717 case 4: /* bxj */
9718 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
9719 tmp = load_reg(s, rn);
9720 gen_bx(s, tmp);
9ee6e8bb
PB
9721 break;
9722 case 5: /* Exception return. */
b8b45b68
RV
9723 if (IS_USER(s)) {
9724 goto illegal_op;
9725 }
9726 if (rn != 14 || rd != 15) {
9727 goto illegal_op;
9728 }
9729 tmp = load_reg(s, rn);
9730 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
9731 gen_exception_return(s, tmp);
9732 break;
9ee6e8bb 9733 case 6: /* mrs cpsr. */
7d1b0095 9734 tmp = tcg_temp_new_i32();
9ee6e8bb 9735 if (IS_M(env)) {
8984bd2e
PB
9736 addr = tcg_const_i32(insn & 0xff);
9737 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 9738 tcg_temp_free_i32(addr);
9ee6e8bb 9739 } else {
9ef39277 9740 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 9741 }
8984bd2e 9742 store_reg(s, rd, tmp);
9ee6e8bb
PB
9743 break;
9744 case 7: /* mrs spsr. */
9745 /* Not accessible in user mode. */
9746 if (IS_USER(s) || IS_M(env))
9747 goto illegal_op;
d9ba4830
PB
9748 tmp = load_cpu_field(spsr);
9749 store_reg(s, rd, tmp);
9ee6e8bb 9750 break;
2c0262af
FB
9751 }
9752 }
9ee6e8bb
PB
9753 } else {
9754 /* Conditional branch. */
9755 op = (insn >> 22) & 0xf;
9756 /* Generate a conditional jump to next instruction. */
9757 s->condlabel = gen_new_label();
39fb730a 9758 arm_gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
9759 s->condjmp = 1;
9760
9761 /* offset[11:1] = insn[10:0] */
9762 offset = (insn & 0x7ff) << 1;
9763 /* offset[17:12] = insn[21:16]. */
9764 offset |= (insn & 0x003f0000) >> 4;
9765 /* offset[31:20] = insn[26]. */
9766 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
9767 /* offset[18] = insn[13]. */
9768 offset |= (insn & (1 << 13)) << 5;
9769 /* offset[19] = insn[11]. */
9770 offset |= (insn & (1 << 11)) << 8;
9771
9772 /* jump to the offset */
b0109805 9773 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
9774 }
9775 } else {
9776 /* Data processing immediate. */
9777 if (insn & (1 << 25)) {
9778 if (insn & (1 << 24)) {
9779 if (insn & (1 << 20))
9780 goto illegal_op;
9781 /* Bitfield/Saturate. */
9782 op = (insn >> 21) & 7;
9783 imm = insn & 0x1f;
9784 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 9785 if (rn == 15) {
7d1b0095 9786 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
9787 tcg_gen_movi_i32(tmp, 0);
9788 } else {
9789 tmp = load_reg(s, rn);
9790 }
9ee6e8bb
PB
9791 switch (op) {
9792 case 2: /* Signed bitfield extract. */
9793 imm++;
9794 if (shift + imm > 32)
9795 goto illegal_op;
9796 if (imm < 32)
6ddbc6e4 9797 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
9798 break;
9799 case 6: /* Unsigned bitfield extract. */
9800 imm++;
9801 if (shift + imm > 32)
9802 goto illegal_op;
9803 if (imm < 32)
6ddbc6e4 9804 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
9805 break;
9806 case 3: /* Bitfield insert/clear. */
9807 if (imm < shift)
9808 goto illegal_op;
9809 imm = imm + 1 - shift;
9810 if (imm != 32) {
6ddbc6e4 9811 tmp2 = load_reg(s, rd);
d593c48e 9812 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 9813 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9814 }
9815 break;
9816 case 7:
9817 goto illegal_op;
9818 default: /* Saturate. */
9ee6e8bb
PB
9819 if (shift) {
9820 if (op & 1)
6ddbc6e4 9821 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 9822 else
6ddbc6e4 9823 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 9824 }
6ddbc6e4 9825 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
9826 if (op & 4) {
9827 /* Unsigned. */
9ee6e8bb 9828 if ((op & 1) && shift == 0)
9ef39277 9829 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 9830 else
9ef39277 9831 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
2c0262af 9832 } else {
9ee6e8bb 9833 /* Signed. */
9ee6e8bb 9834 if ((op & 1) && shift == 0)
9ef39277 9835 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 9836 else
9ef39277 9837 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
2c0262af 9838 }
b75263d6 9839 tcg_temp_free_i32(tmp2);
9ee6e8bb 9840 break;
2c0262af 9841 }
6ddbc6e4 9842 store_reg(s, rd, tmp);
9ee6e8bb
PB
9843 } else {
9844 imm = ((insn & 0x04000000) >> 15)
9845 | ((insn & 0x7000) >> 4) | (insn & 0xff);
9846 if (insn & (1 << 22)) {
9847 /* 16-bit immediate. */
9848 imm |= (insn >> 4) & 0xf000;
9849 if (insn & (1 << 23)) {
9850 /* movt */
5e3f878a 9851 tmp = load_reg(s, rd);
86831435 9852 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 9853 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 9854 } else {
9ee6e8bb 9855 /* movw */
7d1b0095 9856 tmp = tcg_temp_new_i32();
5e3f878a 9857 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
9858 }
9859 } else {
9ee6e8bb
PB
9860 /* Add/sub 12-bit immediate. */
9861 if (rn == 15) {
b0109805 9862 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 9863 if (insn & (1 << 23))
b0109805 9864 offset -= imm;
9ee6e8bb 9865 else
b0109805 9866 offset += imm;
7d1b0095 9867 tmp = tcg_temp_new_i32();
5e3f878a 9868 tcg_gen_movi_i32(tmp, offset);
2c0262af 9869 } else {
5e3f878a 9870 tmp = load_reg(s, rn);
9ee6e8bb 9871 if (insn & (1 << 23))
5e3f878a 9872 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 9873 else
5e3f878a 9874 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 9875 }
9ee6e8bb 9876 }
5e3f878a 9877 store_reg(s, rd, tmp);
191abaa2 9878 }
9ee6e8bb
PB
9879 } else {
9880 int shifter_out = 0;
9881 /* modified 12-bit immediate. */
9882 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
9883 imm = (insn & 0xff);
9884 switch (shift) {
9885 case 0: /* XY */
9886 /* Nothing to do. */
9887 break;
9888 case 1: /* 00XY00XY */
9889 imm |= imm << 16;
9890 break;
9891 case 2: /* XY00XY00 */
9892 imm |= imm << 16;
9893 imm <<= 8;
9894 break;
9895 case 3: /* XYXYXYXY */
9896 imm |= imm << 16;
9897 imm |= imm << 8;
9898 break;
9899 default: /* Rotated constant. */
9900 shift = (shift << 1) | (imm >> 7);
9901 imm |= 0x80;
9902 imm = imm << (32 - shift);
9903 shifter_out = 1;
9904 break;
b5ff1b31 9905 }
7d1b0095 9906 tmp2 = tcg_temp_new_i32();
3174f8e9 9907 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 9908 rn = (insn >> 16) & 0xf;
3174f8e9 9909 if (rn == 15) {
7d1b0095 9910 tmp = tcg_temp_new_i32();
3174f8e9
FN
9911 tcg_gen_movi_i32(tmp, 0);
9912 } else {
9913 tmp = load_reg(s, rn);
9914 }
9ee6e8bb
PB
9915 op = (insn >> 21) & 0xf;
9916 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 9917 shifter_out, tmp, tmp2))
9ee6e8bb 9918 goto illegal_op;
7d1b0095 9919 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9920 rd = (insn >> 8) & 0xf;
9921 if (rd != 15) {
3174f8e9
FN
9922 store_reg(s, rd, tmp);
9923 } else {
7d1b0095 9924 tcg_temp_free_i32(tmp);
2c0262af 9925 }
2c0262af 9926 }
9ee6e8bb
PB
9927 }
9928 break;
9929 case 12: /* Load/store single data item. */
9930 {
9931 int postinc = 0;
9932 int writeback = 0;
a99caa48 9933 int memidx;
9ee6e8bb
PB
9934 if ((insn & 0x01100000) == 0x01000000) {
9935 if (disas_neon_ls_insn(env, s, insn))
c1713132 9936 goto illegal_op;
9ee6e8bb
PB
9937 break;
9938 }
a2fdc890
PM
9939 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
9940 if (rs == 15) {
9941 if (!(insn & (1 << 20))) {
9942 goto illegal_op;
9943 }
9944 if (op != 2) {
9945 /* Byte or halfword load space with dest == r15 : memory hints.
9946 * Catch them early so we don't emit pointless addressing code.
9947 * This space is a mix of:
9948 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
9949 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
9950 * cores)
9951 * unallocated hints, which must be treated as NOPs
9952 * UNPREDICTABLE space, which we NOP or UNDEF depending on
9953 * which is easiest for the decoding logic
9954 * Some space which must UNDEF
9955 */
9956 int op1 = (insn >> 23) & 3;
9957 int op2 = (insn >> 6) & 0x3f;
9958 if (op & 2) {
9959 goto illegal_op;
9960 }
9961 if (rn == 15) {
02afbf64
PM
9962 /* UNPREDICTABLE, unallocated hint or
9963 * PLD/PLDW/PLI (literal)
9964 */
a2fdc890
PM
9965 return 0;
9966 }
9967 if (op1 & 1) {
02afbf64 9968 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
9969 }
9970 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
02afbf64 9971 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
9972 }
9973 /* UNDEF space, or an UNPREDICTABLE */
9974 return 1;
9975 }
9976 }
a99caa48 9977 memidx = get_mem_index(s);
9ee6e8bb 9978 if (rn == 15) {
7d1b0095 9979 addr = tcg_temp_new_i32();
9ee6e8bb
PB
9980 /* PC relative. */
9981 /* s->pc has already been incremented by 4. */
9982 imm = s->pc & 0xfffffffc;
9983 if (insn & (1 << 23))
9984 imm += insn & 0xfff;
9985 else
9986 imm -= insn & 0xfff;
b0109805 9987 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 9988 } else {
b0109805 9989 addr = load_reg(s, rn);
9ee6e8bb
PB
9990 if (insn & (1 << 23)) {
9991 /* Positive offset. */
9992 imm = insn & 0xfff;
b0109805 9993 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 9994 } else {
9ee6e8bb 9995 imm = insn & 0xff;
2a0308c5
PM
9996 switch ((insn >> 8) & 0xf) {
9997 case 0x0: /* Shifted Register. */
9ee6e8bb 9998 shift = (insn >> 4) & 0xf;
2a0308c5
PM
9999 if (shift > 3) {
10000 tcg_temp_free_i32(addr);
18c9b560 10001 goto illegal_op;
2a0308c5 10002 }
b26eefb6 10003 tmp = load_reg(s, rm);
9ee6e8bb 10004 if (shift)
b26eefb6 10005 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 10006 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 10007 tcg_temp_free_i32(tmp);
9ee6e8bb 10008 break;
2a0308c5 10009 case 0xc: /* Negative offset. */
b0109805 10010 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 10011 break;
2a0308c5 10012 case 0xe: /* User privilege. */
b0109805 10013 tcg_gen_addi_i32(addr, addr, imm);
a99caa48 10014 memidx = MMU_USER_IDX;
9ee6e8bb 10015 break;
2a0308c5 10016 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
10017 imm = -imm;
10018 /* Fall through. */
2a0308c5 10019 case 0xb: /* Post-increment. */
9ee6e8bb
PB
10020 postinc = 1;
10021 writeback = 1;
10022 break;
2a0308c5 10023 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
10024 imm = -imm;
10025 /* Fall through. */
2a0308c5 10026 case 0xf: /* Pre-increment. */
b0109805 10027 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
10028 writeback = 1;
10029 break;
10030 default:
2a0308c5 10031 tcg_temp_free_i32(addr);
b7bcbe95 10032 goto illegal_op;
9ee6e8bb
PB
10033 }
10034 }
10035 }
9ee6e8bb
PB
10036 if (insn & (1 << 20)) {
10037 /* Load. */
5a839c0d 10038 tmp = tcg_temp_new_i32();
a2fdc890 10039 switch (op) {
5a839c0d 10040 case 0:
a99caa48 10041 gen_aa32_ld8u(tmp, addr, memidx);
5a839c0d
PM
10042 break;
10043 case 4:
a99caa48 10044 gen_aa32_ld8s(tmp, addr, memidx);
5a839c0d
PM
10045 break;
10046 case 1:
a99caa48 10047 gen_aa32_ld16u(tmp, addr, memidx);
5a839c0d
PM
10048 break;
10049 case 5:
a99caa48 10050 gen_aa32_ld16s(tmp, addr, memidx);
5a839c0d
PM
10051 break;
10052 case 2:
a99caa48 10053 gen_aa32_ld32u(tmp, addr, memidx);
5a839c0d 10054 break;
2a0308c5 10055 default:
5a839c0d 10056 tcg_temp_free_i32(tmp);
2a0308c5
PM
10057 tcg_temp_free_i32(addr);
10058 goto illegal_op;
a2fdc890
PM
10059 }
10060 if (rs == 15) {
10061 gen_bx(s, tmp);
9ee6e8bb 10062 } else {
a2fdc890 10063 store_reg(s, rs, tmp);
9ee6e8bb
PB
10064 }
10065 } else {
10066 /* Store. */
b0109805 10067 tmp = load_reg(s, rs);
9ee6e8bb 10068 switch (op) {
5a839c0d 10069 case 0:
a99caa48 10070 gen_aa32_st8(tmp, addr, memidx);
5a839c0d
PM
10071 break;
10072 case 1:
a99caa48 10073 gen_aa32_st16(tmp, addr, memidx);
5a839c0d
PM
10074 break;
10075 case 2:
a99caa48 10076 gen_aa32_st32(tmp, addr, memidx);
5a839c0d 10077 break;
2a0308c5 10078 default:
5a839c0d 10079 tcg_temp_free_i32(tmp);
2a0308c5
PM
10080 tcg_temp_free_i32(addr);
10081 goto illegal_op;
b7bcbe95 10082 }
5a839c0d 10083 tcg_temp_free_i32(tmp);
2c0262af 10084 }
9ee6e8bb 10085 if (postinc)
b0109805
PB
10086 tcg_gen_addi_i32(addr, addr, imm);
10087 if (writeback) {
10088 store_reg(s, rn, addr);
10089 } else {
7d1b0095 10090 tcg_temp_free_i32(addr);
b0109805 10091 }
9ee6e8bb
PB
10092 }
10093 break;
10094 default:
10095 goto illegal_op;
2c0262af 10096 }
9ee6e8bb
PB
10097 return 0;
10098illegal_op:
10099 return 1;
2c0262af
FB
10100}
10101
0ecb72a5 10102static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
99c475ab
FB
10103{
10104 uint32_t val, insn, op, rm, rn, rd, shift, cond;
10105 int32_t offset;
10106 int i;
39d5492a
PM
10107 TCGv_i32 tmp;
10108 TCGv_i32 tmp2;
10109 TCGv_i32 addr;
99c475ab 10110
9ee6e8bb
PB
10111 if (s->condexec_mask) {
10112 cond = s->condexec_cond;
bedd2912
JB
10113 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
10114 s->condlabel = gen_new_label();
39fb730a 10115 arm_gen_test_cc(cond ^ 1, s->condlabel);
bedd2912
JB
10116 s->condjmp = 1;
10117 }
9ee6e8bb
PB
10118 }
10119
d31dd73e 10120 insn = arm_lduw_code(env, s->pc, s->bswap_code);
99c475ab 10121 s->pc += 2;
b5ff1b31 10122
99c475ab
FB
10123 switch (insn >> 12) {
10124 case 0: case 1:
396e467c 10125
99c475ab
FB
10126 rd = insn & 7;
10127 op = (insn >> 11) & 3;
10128 if (op == 3) {
10129 /* add/subtract */
10130 rn = (insn >> 3) & 7;
396e467c 10131 tmp = load_reg(s, rn);
99c475ab
FB
10132 if (insn & (1 << 10)) {
10133 /* immediate */
7d1b0095 10134 tmp2 = tcg_temp_new_i32();
396e467c 10135 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
10136 } else {
10137 /* reg */
10138 rm = (insn >> 6) & 7;
396e467c 10139 tmp2 = load_reg(s, rm);
99c475ab 10140 }
9ee6e8bb
PB
10141 if (insn & (1 << 9)) {
10142 if (s->condexec_mask)
396e467c 10143 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10144 else
72485ec4 10145 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
10146 } else {
10147 if (s->condexec_mask)
396e467c 10148 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 10149 else
72485ec4 10150 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 10151 }
7d1b0095 10152 tcg_temp_free_i32(tmp2);
396e467c 10153 store_reg(s, rd, tmp);
99c475ab
FB
10154 } else {
10155 /* shift immediate */
10156 rm = (insn >> 3) & 7;
10157 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
10158 tmp = load_reg(s, rm);
10159 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
10160 if (!s->condexec_mask)
10161 gen_logic_CC(tmp);
10162 store_reg(s, rd, tmp);
99c475ab
FB
10163 }
10164 break;
10165 case 2: case 3:
10166 /* arithmetic large immediate */
10167 op = (insn >> 11) & 3;
10168 rd = (insn >> 8) & 0x7;
396e467c 10169 if (op == 0) { /* mov */
7d1b0095 10170 tmp = tcg_temp_new_i32();
396e467c 10171 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 10172 if (!s->condexec_mask)
396e467c
FN
10173 gen_logic_CC(tmp);
10174 store_reg(s, rd, tmp);
10175 } else {
10176 tmp = load_reg(s, rd);
7d1b0095 10177 tmp2 = tcg_temp_new_i32();
396e467c
FN
10178 tcg_gen_movi_i32(tmp2, insn & 0xff);
10179 switch (op) {
10180 case 1: /* cmp */
72485ec4 10181 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
10182 tcg_temp_free_i32(tmp);
10183 tcg_temp_free_i32(tmp2);
396e467c
FN
10184 break;
10185 case 2: /* add */
10186 if (s->condexec_mask)
10187 tcg_gen_add_i32(tmp, tmp, tmp2);
10188 else
72485ec4 10189 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 10190 tcg_temp_free_i32(tmp2);
396e467c
FN
10191 store_reg(s, rd, tmp);
10192 break;
10193 case 3: /* sub */
10194 if (s->condexec_mask)
10195 tcg_gen_sub_i32(tmp, tmp, tmp2);
10196 else
72485ec4 10197 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 10198 tcg_temp_free_i32(tmp2);
396e467c
FN
10199 store_reg(s, rd, tmp);
10200 break;
10201 }
99c475ab 10202 }
99c475ab
FB
10203 break;
10204 case 4:
10205 if (insn & (1 << 11)) {
10206 rd = (insn >> 8) & 7;
5899f386
FB
10207 /* load pc-relative. Bit 1 of PC is ignored. */
10208 val = s->pc + 2 + ((insn & 0xff) * 4);
10209 val &= ~(uint32_t)2;
7d1b0095 10210 addr = tcg_temp_new_i32();
b0109805 10211 tcg_gen_movi_i32(addr, val);
c40c8556 10212 tmp = tcg_temp_new_i32();
6ce2faf4 10213 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
7d1b0095 10214 tcg_temp_free_i32(addr);
b0109805 10215 store_reg(s, rd, tmp);
99c475ab
FB
10216 break;
10217 }
10218 if (insn & (1 << 10)) {
10219 /* data processing extended or blx */
10220 rd = (insn & 7) | ((insn >> 4) & 8);
10221 rm = (insn >> 3) & 0xf;
10222 op = (insn >> 8) & 3;
10223 switch (op) {
10224 case 0: /* add */
396e467c
FN
10225 tmp = load_reg(s, rd);
10226 tmp2 = load_reg(s, rm);
10227 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10228 tcg_temp_free_i32(tmp2);
396e467c 10229 store_reg(s, rd, tmp);
99c475ab
FB
10230 break;
10231 case 1: /* cmp */
396e467c
FN
10232 tmp = load_reg(s, rd);
10233 tmp2 = load_reg(s, rm);
72485ec4 10234 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
10235 tcg_temp_free_i32(tmp2);
10236 tcg_temp_free_i32(tmp);
99c475ab
FB
10237 break;
10238 case 2: /* mov/cpy */
396e467c
FN
10239 tmp = load_reg(s, rm);
10240 store_reg(s, rd, tmp);
99c475ab
FB
10241 break;
10242 case 3:/* branch [and link] exchange thumb register */
b0109805 10243 tmp = load_reg(s, rm);
99c475ab 10244 if (insn & (1 << 7)) {
be5e7a76 10245 ARCH(5);
99c475ab 10246 val = (uint32_t)s->pc | 1;
7d1b0095 10247 tmp2 = tcg_temp_new_i32();
b0109805
PB
10248 tcg_gen_movi_i32(tmp2, val);
10249 store_reg(s, 14, tmp2);
99c475ab 10250 }
be5e7a76 10251 /* already thumb, no need to check */
d9ba4830 10252 gen_bx(s, tmp);
99c475ab
FB
10253 break;
10254 }
10255 break;
10256 }
10257
10258 /* data processing register */
10259 rd = insn & 7;
10260 rm = (insn >> 3) & 7;
10261 op = (insn >> 6) & 0xf;
10262 if (op == 2 || op == 3 || op == 4 || op == 7) {
10263 /* the shift/rotate ops want the operands backwards */
10264 val = rm;
10265 rm = rd;
10266 rd = val;
10267 val = 1;
10268 } else {
10269 val = 0;
10270 }
10271
396e467c 10272 if (op == 9) { /* neg */
7d1b0095 10273 tmp = tcg_temp_new_i32();
396e467c
FN
10274 tcg_gen_movi_i32(tmp, 0);
10275 } else if (op != 0xf) { /* mvn doesn't read its first operand */
10276 tmp = load_reg(s, rd);
10277 } else {
39d5492a 10278 TCGV_UNUSED_I32(tmp);
396e467c 10279 }
99c475ab 10280
396e467c 10281 tmp2 = load_reg(s, rm);
5899f386 10282 switch (op) {
99c475ab 10283 case 0x0: /* and */
396e467c 10284 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 10285 if (!s->condexec_mask)
396e467c 10286 gen_logic_CC(tmp);
99c475ab
FB
10287 break;
10288 case 0x1: /* eor */
396e467c 10289 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 10290 if (!s->condexec_mask)
396e467c 10291 gen_logic_CC(tmp);
99c475ab
FB
10292 break;
10293 case 0x2: /* lsl */
9ee6e8bb 10294 if (s->condexec_mask) {
365af80e 10295 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 10296 } else {
9ef39277 10297 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10298 gen_logic_CC(tmp2);
9ee6e8bb 10299 }
99c475ab
FB
10300 break;
10301 case 0x3: /* lsr */
9ee6e8bb 10302 if (s->condexec_mask) {
365af80e 10303 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 10304 } else {
9ef39277 10305 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10306 gen_logic_CC(tmp2);
9ee6e8bb 10307 }
99c475ab
FB
10308 break;
10309 case 0x4: /* asr */
9ee6e8bb 10310 if (s->condexec_mask) {
365af80e 10311 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 10312 } else {
9ef39277 10313 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10314 gen_logic_CC(tmp2);
9ee6e8bb 10315 }
99c475ab
FB
10316 break;
10317 case 0x5: /* adc */
49b4c31e 10318 if (s->condexec_mask) {
396e467c 10319 gen_adc(tmp, tmp2);
49b4c31e
RH
10320 } else {
10321 gen_adc_CC(tmp, tmp, tmp2);
10322 }
99c475ab
FB
10323 break;
10324 case 0x6: /* sbc */
2de68a49 10325 if (s->condexec_mask) {
396e467c 10326 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
10327 } else {
10328 gen_sbc_CC(tmp, tmp, tmp2);
10329 }
99c475ab
FB
10330 break;
10331 case 0x7: /* ror */
9ee6e8bb 10332 if (s->condexec_mask) {
f669df27
AJ
10333 tcg_gen_andi_i32(tmp, tmp, 0x1f);
10334 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 10335 } else {
9ef39277 10336 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10337 gen_logic_CC(tmp2);
9ee6e8bb 10338 }
99c475ab
FB
10339 break;
10340 case 0x8: /* tst */
396e467c
FN
10341 tcg_gen_and_i32(tmp, tmp, tmp2);
10342 gen_logic_CC(tmp);
99c475ab 10343 rd = 16;
5899f386 10344 break;
99c475ab 10345 case 0x9: /* neg */
9ee6e8bb 10346 if (s->condexec_mask)
396e467c 10347 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 10348 else
72485ec4 10349 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
10350 break;
10351 case 0xa: /* cmp */
72485ec4 10352 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
10353 rd = 16;
10354 break;
10355 case 0xb: /* cmn */
72485ec4 10356 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
10357 rd = 16;
10358 break;
10359 case 0xc: /* orr */
396e467c 10360 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 10361 if (!s->condexec_mask)
396e467c 10362 gen_logic_CC(tmp);
99c475ab
FB
10363 break;
10364 case 0xd: /* mul */
7b2919a0 10365 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 10366 if (!s->condexec_mask)
396e467c 10367 gen_logic_CC(tmp);
99c475ab
FB
10368 break;
10369 case 0xe: /* bic */
f669df27 10370 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 10371 if (!s->condexec_mask)
396e467c 10372 gen_logic_CC(tmp);
99c475ab
FB
10373 break;
10374 case 0xf: /* mvn */
396e467c 10375 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 10376 if (!s->condexec_mask)
396e467c 10377 gen_logic_CC(tmp2);
99c475ab 10378 val = 1;
5899f386 10379 rm = rd;
99c475ab
FB
10380 break;
10381 }
10382 if (rd != 16) {
396e467c
FN
10383 if (val) {
10384 store_reg(s, rm, tmp2);
10385 if (op != 0xf)
7d1b0095 10386 tcg_temp_free_i32(tmp);
396e467c
FN
10387 } else {
10388 store_reg(s, rd, tmp);
7d1b0095 10389 tcg_temp_free_i32(tmp2);
396e467c
FN
10390 }
10391 } else {
7d1b0095
PM
10392 tcg_temp_free_i32(tmp);
10393 tcg_temp_free_i32(tmp2);
99c475ab
FB
10394 }
10395 break;
10396
10397 case 5:
10398 /* load/store register offset. */
10399 rd = insn & 7;
10400 rn = (insn >> 3) & 7;
10401 rm = (insn >> 6) & 7;
10402 op = (insn >> 9) & 7;
b0109805 10403 addr = load_reg(s, rn);
b26eefb6 10404 tmp = load_reg(s, rm);
b0109805 10405 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 10406 tcg_temp_free_i32(tmp);
99c475ab 10407
c40c8556 10408 if (op < 3) { /* store */
b0109805 10409 tmp = load_reg(s, rd);
c40c8556
PM
10410 } else {
10411 tmp = tcg_temp_new_i32();
10412 }
99c475ab
FB
10413
10414 switch (op) {
10415 case 0: /* str */
6ce2faf4 10416 gen_aa32_st32(tmp, addr, get_mem_index(s));
99c475ab
FB
10417 break;
10418 case 1: /* strh */
6ce2faf4 10419 gen_aa32_st16(tmp, addr, get_mem_index(s));
99c475ab
FB
10420 break;
10421 case 2: /* strb */
6ce2faf4 10422 gen_aa32_st8(tmp, addr, get_mem_index(s));
99c475ab
FB
10423 break;
10424 case 3: /* ldrsb */
6ce2faf4 10425 gen_aa32_ld8s(tmp, addr, get_mem_index(s));
99c475ab
FB
10426 break;
10427 case 4: /* ldr */
6ce2faf4 10428 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
99c475ab
FB
10429 break;
10430 case 5: /* ldrh */
6ce2faf4 10431 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
99c475ab
FB
10432 break;
10433 case 6: /* ldrb */
6ce2faf4 10434 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
99c475ab
FB
10435 break;
10436 case 7: /* ldrsh */
6ce2faf4 10437 gen_aa32_ld16s(tmp, addr, get_mem_index(s));
99c475ab
FB
10438 break;
10439 }
c40c8556 10440 if (op >= 3) { /* load */
b0109805 10441 store_reg(s, rd, tmp);
c40c8556
PM
10442 } else {
10443 tcg_temp_free_i32(tmp);
10444 }
7d1b0095 10445 tcg_temp_free_i32(addr);
99c475ab
FB
10446 break;
10447
10448 case 6:
10449 /* load/store word immediate offset */
10450 rd = insn & 7;
10451 rn = (insn >> 3) & 7;
b0109805 10452 addr = load_reg(s, rn);
99c475ab 10453 val = (insn >> 4) & 0x7c;
b0109805 10454 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10455
10456 if (insn & (1 << 11)) {
10457 /* load */
c40c8556 10458 tmp = tcg_temp_new_i32();
6ce2faf4 10459 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 10460 store_reg(s, rd, tmp);
99c475ab
FB
10461 } else {
10462 /* store */
b0109805 10463 tmp = load_reg(s, rd);
6ce2faf4 10464 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 10465 tcg_temp_free_i32(tmp);
99c475ab 10466 }
7d1b0095 10467 tcg_temp_free_i32(addr);
99c475ab
FB
10468 break;
10469
10470 case 7:
10471 /* load/store byte immediate offset */
10472 rd = insn & 7;
10473 rn = (insn >> 3) & 7;
b0109805 10474 addr = load_reg(s, rn);
99c475ab 10475 val = (insn >> 6) & 0x1f;
b0109805 10476 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10477
10478 if (insn & (1 << 11)) {
10479 /* load */
c40c8556 10480 tmp = tcg_temp_new_i32();
6ce2faf4 10481 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
b0109805 10482 store_reg(s, rd, tmp);
99c475ab
FB
10483 } else {
10484 /* store */
b0109805 10485 tmp = load_reg(s, rd);
6ce2faf4 10486 gen_aa32_st8(tmp, addr, get_mem_index(s));
c40c8556 10487 tcg_temp_free_i32(tmp);
99c475ab 10488 }
7d1b0095 10489 tcg_temp_free_i32(addr);
99c475ab
FB
10490 break;
10491
10492 case 8:
10493 /* load/store halfword immediate offset */
10494 rd = insn & 7;
10495 rn = (insn >> 3) & 7;
b0109805 10496 addr = load_reg(s, rn);
99c475ab 10497 val = (insn >> 5) & 0x3e;
b0109805 10498 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10499
10500 if (insn & (1 << 11)) {
10501 /* load */
c40c8556 10502 tmp = tcg_temp_new_i32();
6ce2faf4 10503 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
b0109805 10504 store_reg(s, rd, tmp);
99c475ab
FB
10505 } else {
10506 /* store */
b0109805 10507 tmp = load_reg(s, rd);
6ce2faf4 10508 gen_aa32_st16(tmp, addr, get_mem_index(s));
c40c8556 10509 tcg_temp_free_i32(tmp);
99c475ab 10510 }
7d1b0095 10511 tcg_temp_free_i32(addr);
99c475ab
FB
10512 break;
10513
10514 case 9:
10515 /* load/store from stack */
10516 rd = (insn >> 8) & 7;
b0109805 10517 addr = load_reg(s, 13);
99c475ab 10518 val = (insn & 0xff) * 4;
b0109805 10519 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10520
10521 if (insn & (1 << 11)) {
10522 /* load */
c40c8556 10523 tmp = tcg_temp_new_i32();
6ce2faf4 10524 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 10525 store_reg(s, rd, tmp);
99c475ab
FB
10526 } else {
10527 /* store */
b0109805 10528 tmp = load_reg(s, rd);
6ce2faf4 10529 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 10530 tcg_temp_free_i32(tmp);
99c475ab 10531 }
7d1b0095 10532 tcg_temp_free_i32(addr);
99c475ab
FB
10533 break;
10534
10535 case 10:
10536 /* add to high reg */
10537 rd = (insn >> 8) & 7;
5899f386
FB
10538 if (insn & (1 << 11)) {
10539 /* SP */
5e3f878a 10540 tmp = load_reg(s, 13);
5899f386
FB
10541 } else {
10542 /* PC. bit 1 is ignored. */
7d1b0095 10543 tmp = tcg_temp_new_i32();
5e3f878a 10544 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 10545 }
99c475ab 10546 val = (insn & 0xff) * 4;
5e3f878a
PB
10547 tcg_gen_addi_i32(tmp, tmp, val);
10548 store_reg(s, rd, tmp);
99c475ab
FB
10549 break;
10550
10551 case 11:
10552 /* misc */
10553 op = (insn >> 8) & 0xf;
10554 switch (op) {
10555 case 0:
10556 /* adjust stack pointer */
b26eefb6 10557 tmp = load_reg(s, 13);
99c475ab
FB
10558 val = (insn & 0x7f) * 4;
10559 if (insn & (1 << 7))
6a0d8a1d 10560 val = -(int32_t)val;
b26eefb6
PB
10561 tcg_gen_addi_i32(tmp, tmp, val);
10562 store_reg(s, 13, tmp);
99c475ab
FB
10563 break;
10564
9ee6e8bb
PB
10565 case 2: /* sign/zero extend. */
10566 ARCH(6);
10567 rd = insn & 7;
10568 rm = (insn >> 3) & 7;
b0109805 10569 tmp = load_reg(s, rm);
9ee6e8bb 10570 switch ((insn >> 6) & 3) {
b0109805
PB
10571 case 0: gen_sxth(tmp); break;
10572 case 1: gen_sxtb(tmp); break;
10573 case 2: gen_uxth(tmp); break;
10574 case 3: gen_uxtb(tmp); break;
9ee6e8bb 10575 }
b0109805 10576 store_reg(s, rd, tmp);
9ee6e8bb 10577 break;
99c475ab
FB
10578 case 4: case 5: case 0xc: case 0xd:
10579 /* push/pop */
b0109805 10580 addr = load_reg(s, 13);
5899f386
FB
10581 if (insn & (1 << 8))
10582 offset = 4;
99c475ab 10583 else
5899f386
FB
10584 offset = 0;
10585 for (i = 0; i < 8; i++) {
10586 if (insn & (1 << i))
10587 offset += 4;
10588 }
10589 if ((insn & (1 << 11)) == 0) {
b0109805 10590 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 10591 }
99c475ab
FB
10592 for (i = 0; i < 8; i++) {
10593 if (insn & (1 << i)) {
10594 if (insn & (1 << 11)) {
10595 /* pop */
c40c8556 10596 tmp = tcg_temp_new_i32();
6ce2faf4 10597 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 10598 store_reg(s, i, tmp);
99c475ab
FB
10599 } else {
10600 /* push */
b0109805 10601 tmp = load_reg(s, i);
6ce2faf4 10602 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 10603 tcg_temp_free_i32(tmp);
99c475ab 10604 }
5899f386 10605 /* advance to the next address. */
b0109805 10606 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
10607 }
10608 }
39d5492a 10609 TCGV_UNUSED_I32(tmp);
99c475ab
FB
10610 if (insn & (1 << 8)) {
10611 if (insn & (1 << 11)) {
10612 /* pop pc */
c40c8556 10613 tmp = tcg_temp_new_i32();
6ce2faf4 10614 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
99c475ab
FB
10615 /* don't set the pc until the rest of the instruction
10616 has completed */
10617 } else {
10618 /* push lr */
b0109805 10619 tmp = load_reg(s, 14);
6ce2faf4 10620 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 10621 tcg_temp_free_i32(tmp);
99c475ab 10622 }
b0109805 10623 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 10624 }
5899f386 10625 if ((insn & (1 << 11)) == 0) {
b0109805 10626 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 10627 }
99c475ab 10628 /* write back the new stack pointer */
b0109805 10629 store_reg(s, 13, addr);
99c475ab 10630 /* set the new PC value */
be5e7a76
DES
10631 if ((insn & 0x0900) == 0x0900) {
10632 store_reg_from_load(env, s, 15, tmp);
10633 }
99c475ab
FB
10634 break;
10635
9ee6e8bb
PB
10636 case 1: case 3: case 9: case 11: /* czb */
10637 rm = insn & 7;
d9ba4830 10638 tmp = load_reg(s, rm);
9ee6e8bb
PB
10639 s->condlabel = gen_new_label();
10640 s->condjmp = 1;
10641 if (insn & (1 << 11))
cb63669a 10642 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 10643 else
cb63669a 10644 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 10645 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10646 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
10647 val = (uint32_t)s->pc + 2;
10648 val += offset;
10649 gen_jmp(s, val);
10650 break;
10651
10652 case 15: /* IT, nop-hint. */
10653 if ((insn & 0xf) == 0) {
10654 gen_nop_hint(s, (insn >> 4) & 0xf);
10655 break;
10656 }
10657 /* If Then. */
10658 s->condexec_cond = (insn >> 4) & 0xe;
10659 s->condexec_mask = insn & 0x1f;
10660 /* No actual code generated for this insn, just setup state. */
10661 break;
10662
06c949e6 10663 case 0xe: /* bkpt */
d4a2dc67
PM
10664 {
10665 int imm8 = extract32(insn, 0, 8);
be5e7a76 10666 ARCH(5);
d4a2dc67 10667 gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true));
06c949e6 10668 break;
d4a2dc67 10669 }
06c949e6 10670
9ee6e8bb
PB
10671 case 0xa: /* rev */
10672 ARCH(6);
10673 rn = (insn >> 3) & 0x7;
10674 rd = insn & 0x7;
b0109805 10675 tmp = load_reg(s, rn);
9ee6e8bb 10676 switch ((insn >> 6) & 3) {
66896cb8 10677 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
10678 case 1: gen_rev16(tmp); break;
10679 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
10680 default: goto illegal_op;
10681 }
b0109805 10682 store_reg(s, rd, tmp);
9ee6e8bb
PB
10683 break;
10684
d9e028c1
PM
10685 case 6:
10686 switch ((insn >> 5) & 7) {
10687 case 2:
10688 /* setend */
10689 ARCH(6);
10962fd5
PM
10690 if (((insn >> 3) & 1) != s->bswap_code) {
10691 /* Dynamic endianness switching not implemented. */
e0c270d9 10692 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
d9e028c1
PM
10693 goto illegal_op;
10694 }
9ee6e8bb 10695 break;
d9e028c1
PM
10696 case 3:
10697 /* cps */
10698 ARCH(6);
10699 if (IS_USER(s)) {
10700 break;
8984bd2e 10701 }
d9e028c1
PM
10702 if (IS_M(env)) {
10703 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
10704 /* FAULTMASK */
10705 if (insn & 1) {
10706 addr = tcg_const_i32(19);
10707 gen_helper_v7m_msr(cpu_env, addr, tmp);
10708 tcg_temp_free_i32(addr);
10709 }
10710 /* PRIMASK */
10711 if (insn & 2) {
10712 addr = tcg_const_i32(16);
10713 gen_helper_v7m_msr(cpu_env, addr, tmp);
10714 tcg_temp_free_i32(addr);
10715 }
10716 tcg_temp_free_i32(tmp);
10717 gen_lookup_tb(s);
10718 } else {
10719 if (insn & (1 << 4)) {
10720 shift = CPSR_A | CPSR_I | CPSR_F;
10721 } else {
10722 shift = 0;
10723 }
10724 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 10725 }
d9e028c1
PM
10726 break;
10727 default:
10728 goto undef;
9ee6e8bb
PB
10729 }
10730 break;
10731
99c475ab
FB
10732 default:
10733 goto undef;
10734 }
10735 break;
10736
10737 case 12:
a7d3970d 10738 {
99c475ab 10739 /* load/store multiple */
39d5492a
PM
10740 TCGv_i32 loaded_var;
10741 TCGV_UNUSED_I32(loaded_var);
99c475ab 10742 rn = (insn >> 8) & 0x7;
b0109805 10743 addr = load_reg(s, rn);
99c475ab
FB
10744 for (i = 0; i < 8; i++) {
10745 if (insn & (1 << i)) {
99c475ab
FB
10746 if (insn & (1 << 11)) {
10747 /* load */
c40c8556 10748 tmp = tcg_temp_new_i32();
6ce2faf4 10749 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
a7d3970d
PM
10750 if (i == rn) {
10751 loaded_var = tmp;
10752 } else {
10753 store_reg(s, i, tmp);
10754 }
99c475ab
FB
10755 } else {
10756 /* store */
b0109805 10757 tmp = load_reg(s, i);
6ce2faf4 10758 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 10759 tcg_temp_free_i32(tmp);
99c475ab 10760 }
5899f386 10761 /* advance to the next address */
b0109805 10762 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
10763 }
10764 }
b0109805 10765 if ((insn & (1 << rn)) == 0) {
a7d3970d 10766 /* base reg not in list: base register writeback */
b0109805
PB
10767 store_reg(s, rn, addr);
10768 } else {
a7d3970d
PM
10769 /* base reg in list: if load, complete it now */
10770 if (insn & (1 << 11)) {
10771 store_reg(s, rn, loaded_var);
10772 }
7d1b0095 10773 tcg_temp_free_i32(addr);
b0109805 10774 }
99c475ab 10775 break;
a7d3970d 10776 }
99c475ab
FB
10777 case 13:
10778 /* conditional branch or swi */
10779 cond = (insn >> 8) & 0xf;
10780 if (cond == 0xe)
10781 goto undef;
10782
10783 if (cond == 0xf) {
10784 /* swi */
eaed129d 10785 gen_set_pc_im(s, s->pc);
d4a2dc67 10786 s->svc_imm = extract32(insn, 0, 8);
9ee6e8bb 10787 s->is_jmp = DISAS_SWI;
99c475ab
FB
10788 break;
10789 }
10790 /* generate a conditional jump to next instruction */
e50e6a20 10791 s->condlabel = gen_new_label();
39fb730a 10792 arm_gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 10793 s->condjmp = 1;
99c475ab
FB
10794
10795 /* jump to the offset */
5899f386 10796 val = (uint32_t)s->pc + 2;
99c475ab 10797 offset = ((int32_t)insn << 24) >> 24;
5899f386 10798 val += offset << 1;
8aaca4c0 10799 gen_jmp(s, val);
99c475ab
FB
10800 break;
10801
10802 case 14:
358bf29e 10803 if (insn & (1 << 11)) {
9ee6e8bb
PB
10804 if (disas_thumb2_insn(env, s, insn))
10805 goto undef32;
358bf29e
PB
10806 break;
10807 }
9ee6e8bb 10808 /* unconditional branch */
99c475ab
FB
10809 val = (uint32_t)s->pc;
10810 offset = ((int32_t)insn << 21) >> 21;
10811 val += (offset << 1) + 2;
8aaca4c0 10812 gen_jmp(s, val);
99c475ab
FB
10813 break;
10814
10815 case 15:
9ee6e8bb 10816 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 10817 goto undef32;
9ee6e8bb 10818 break;
99c475ab
FB
10819 }
10820 return;
9ee6e8bb 10821undef32:
d4a2dc67 10822 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized());
9ee6e8bb
PB
10823 return;
10824illegal_op:
99c475ab 10825undef:
d4a2dc67 10826 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized());
99c475ab
FB
10827}
10828
2c0262af
FB
10829/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
10830 basic block 'tb'. If search_pc is TRUE, also generate PC
10831 information for each intermediate instruction. */
5639c3f2 10832static inline void gen_intermediate_code_internal(ARMCPU *cpu,
2cfc5f17 10833 TranslationBlock *tb,
5639c3f2 10834 bool search_pc)
2c0262af 10835{
ed2803da 10836 CPUState *cs = CPU(cpu);
5639c3f2 10837 CPUARMState *env = &cpu->env;
2c0262af 10838 DisasContext dc1, *dc = &dc1;
a1d1bb31 10839 CPUBreakpoint *bp;
2c0262af
FB
10840 uint16_t *gen_opc_end;
10841 int j, lj;
0fa85d43 10842 target_ulong pc_start;
0a2461fa 10843 target_ulong next_page_start;
2e70f6ef
PB
10844 int num_insns;
10845 int max_insns;
3b46e624 10846
2c0262af 10847 /* generate intermediate code */
40f860cd
PM
10848
10849 /* The A64 decoder has its own top level loop, because it doesn't need
10850 * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
10851 */
10852 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
10853 gen_intermediate_code_internal_a64(cpu, tb, search_pc);
10854 return;
10855 }
10856
0fa85d43 10857 pc_start = tb->pc;
3b46e624 10858
2c0262af
FB
10859 dc->tb = tb;
10860
92414b31 10861 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
10862
10863 dc->is_jmp = DISAS_NEXT;
10864 dc->pc = pc_start;
ed2803da 10865 dc->singlestep_enabled = cs->singlestep_enabled;
e50e6a20 10866 dc->condjmp = 0;
3926cc84 10867
40f860cd
PM
10868 dc->aarch64 = 0;
10869 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
10870 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
10871 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
10872 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
3926cc84 10873#if !defined(CONFIG_USER_ONLY)
40f860cd 10874 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
3926cc84 10875#endif
2c7ffc41 10876 dc->cpacr_fpen = ARM_TBFLAG_CPACR_FPEN(tb->flags);
40f860cd
PM
10877 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
10878 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
10879 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
60322b39
PM
10880 dc->cp_regs = cpu->cp_regs;
10881 dc->current_pl = arm_current_pl(env);
a984e42c 10882 dc->features = env->features;
40f860cd 10883
a7812ae4
PB
10884 cpu_F0s = tcg_temp_new_i32();
10885 cpu_F1s = tcg_temp_new_i32();
10886 cpu_F0d = tcg_temp_new_i64();
10887 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
10888 cpu_V0 = cpu_F0d;
10889 cpu_V1 = cpu_F1d;
e677137d 10890 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 10891 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 10892 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 10893 lj = -1;
2e70f6ef
PB
10894 num_insns = 0;
10895 max_insns = tb->cflags & CF_COUNT_MASK;
10896 if (max_insns == 0)
10897 max_insns = CF_COUNT_MASK;
10898
806f352d 10899 gen_tb_start();
e12ce78d 10900
3849902c
PM
10901 tcg_clear_temp_count();
10902
e12ce78d
PM
10903 /* A note on handling of the condexec (IT) bits:
10904 *
10905 * We want to avoid the overhead of having to write the updated condexec
0ecb72a5 10906 * bits back to the CPUARMState for every instruction in an IT block. So:
e12ce78d 10907 * (1) if the condexec bits are not already zero then we write
0ecb72a5 10908 * zero back into the CPUARMState now. This avoids complications trying
e12ce78d
PM
10909 * to do it at the end of the block. (For example if we don't do this
10910 * it's hard to identify whether we can safely skip writing condexec
10911 * at the end of the TB, which we definitely want to do for the case
10912 * where a TB doesn't do anything with the IT state at all.)
10913 * (2) if we are going to leave the TB then we call gen_set_condexec()
0ecb72a5 10914 * which will write the correct value into CPUARMState if zero is wrong.
e12ce78d
PM
10915 * This is done both for leaving the TB at the end, and for leaving
10916 * it because of an exception we know will happen, which is done in
10917 * gen_exception_insn(). The latter is necessary because we need to
10918 * leave the TB with the PC/IT state just prior to execution of the
10919 * instruction which caused the exception.
10920 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
0ecb72a5 10921 * then the CPUARMState will be wrong and we need to reset it.
e12ce78d
PM
10922 * This is handled in the same way as restoration of the
10923 * PC in these situations: we will be called again with search_pc=1
10924 * and generate a mapping of the condexec bits for each PC in
e87b7cb0
SW
10925 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
10926 * this to restore the condexec bits.
e12ce78d
PM
10927 *
10928 * Note that there are no instructions which can read the condexec
10929 * bits, and none which can write non-static values to them, so
0ecb72a5 10930 * we don't need to care about whether CPUARMState is correct in the
e12ce78d
PM
10931 * middle of a TB.
10932 */
10933
9ee6e8bb
PB
10934 /* Reset the conditional execution bits immediately. This avoids
10935 complications trying to do it at the end of the block. */
98eac7ca 10936 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 10937 {
39d5492a 10938 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e 10939 tcg_gen_movi_i32(tmp, 0);
d9ba4830 10940 store_cpu_field(tmp, condexec_bits);
8f01245e 10941 }
2c0262af 10942 do {
fbb4a2e3
PB
10943#ifdef CONFIG_USER_ONLY
10944 /* Intercept jump to the magic kernel page. */
40f860cd 10945 if (dc->pc >= 0xffff0000) {
fbb4a2e3
PB
10946 /* We always get here via a jump, so know we are not in a
10947 conditional execution block. */
d4a2dc67 10948 gen_exception_internal(EXCP_KERNEL_TRAP);
fbb4a2e3
PB
10949 dc->is_jmp = DISAS_UPDATE;
10950 break;
10951 }
10952#else
9ee6e8bb
PB
10953 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
10954 /* We always get here via a jump, so know we are not in a
10955 conditional execution block. */
d4a2dc67 10956 gen_exception_internal(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
10957 dc->is_jmp = DISAS_UPDATE;
10958 break;
9ee6e8bb
PB
10959 }
10960#endif
10961
f0c3c505
AF
10962 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
10963 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
a1d1bb31 10964 if (bp->pc == dc->pc) {
d4a2dc67 10965 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
9ee6e8bb
PB
10966 /* Advance PC so that clearing the breakpoint will
10967 invalidate this TB. */
10968 dc->pc += 2;
10969 goto done_generating;
1fddef4b
FB
10970 }
10971 }
10972 }
2c0262af 10973 if (search_pc) {
92414b31 10974 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2c0262af
FB
10975 if (lj < j) {
10976 lj++;
10977 while (lj < j)
ab1103de 10978 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2c0262af 10979 }
25983cad 10980 tcg_ctx.gen_opc_pc[lj] = dc->pc;
e12ce78d 10981 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
ab1103de 10982 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 10983 tcg_ctx.gen_opc_icount[lj] = num_insns;
2c0262af 10984 }
e50e6a20 10985
2e70f6ef
PB
10986 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
10987 gen_io_start();
10988
fdefe51c 10989 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
5642463a
PM
10990 tcg_gen_debug_insn_start(dc->pc);
10991 }
10992
40f860cd 10993 if (dc->thumb) {
9ee6e8bb
PB
10994 disas_thumb_insn(env, dc);
10995 if (dc->condexec_mask) {
10996 dc->condexec_cond = (dc->condexec_cond & 0xe)
10997 | ((dc->condexec_mask >> 4) & 1);
10998 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
10999 if (dc->condexec_mask == 0) {
11000 dc->condexec_cond = 0;
11001 }
11002 }
11003 } else {
11004 disas_arm_insn(env, dc);
11005 }
e50e6a20
FB
11006
11007 if (dc->condjmp && !dc->is_jmp) {
11008 gen_set_label(dc->condlabel);
11009 dc->condjmp = 0;
11010 }
3849902c
PM
11011
11012 if (tcg_check_temp_count()) {
0a2461fa
AG
11013 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
11014 dc->pc);
3849902c
PM
11015 }
11016
aaf2d97d 11017 /* Translation stops when a conditional branch is encountered.
e50e6a20 11018 * Otherwise the subsequent code could get translated several times.
b5ff1b31 11019 * Also stop translation when a page boundary is reached. This
bf20dc07 11020 * ensures prefetch aborts occur at the right place. */
2e70f6ef 11021 num_insns ++;
efd7f486 11022 } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
ed2803da 11023 !cs->singlestep_enabled &&
1b530a6d 11024 !singlestep &&
2e70f6ef
PB
11025 dc->pc < next_page_start &&
11026 num_insns < max_insns);
11027
11028 if (tb->cflags & CF_LAST_IO) {
11029 if (dc->condjmp) {
11030 /* FIXME: This can theoretically happen with self-modifying
11031 code. */
a47dddd7 11032 cpu_abort(cs, "IO on conditional branch instruction");
2e70f6ef
PB
11033 }
11034 gen_io_end();
11035 }
9ee6e8bb 11036
b5ff1b31 11037 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
11038 instruction was a conditional branch or trap, and the PC has
11039 already been written. */
ed2803da 11040 if (unlikely(cs->singlestep_enabled)) {
8aaca4c0 11041 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 11042 if (dc->condjmp) {
9ee6e8bb
PB
11043 gen_set_condexec(dc);
11044 if (dc->is_jmp == DISAS_SWI) {
d4a2dc67 11045 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
9ee6e8bb 11046 } else {
d4a2dc67 11047 gen_exception_internal(EXCP_DEBUG);
9ee6e8bb 11048 }
e50e6a20
FB
11049 gen_set_label(dc->condlabel);
11050 }
11051 if (dc->condjmp || !dc->is_jmp) {
eaed129d 11052 gen_set_pc_im(dc, dc->pc);
e50e6a20 11053 dc->condjmp = 0;
8aaca4c0 11054 }
9ee6e8bb
PB
11055 gen_set_condexec(dc);
11056 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d4a2dc67 11057 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
9ee6e8bb
PB
11058 } else {
11059 /* FIXME: Single stepping a WFI insn will not halt
11060 the CPU. */
d4a2dc67 11061 gen_exception_internal(EXCP_DEBUG);
9ee6e8bb 11062 }
8aaca4c0 11063 } else {
9ee6e8bb
PB
11064 /* While branches must always occur at the end of an IT block,
11065 there are a few other things that can cause us to terminate
65626741 11066 the TB in the middle of an IT block:
9ee6e8bb
PB
11067 - Exception generating instructions (bkpt, swi, undefined).
11068 - Page boundaries.
11069 - Hardware watchpoints.
11070 Hardware breakpoints have already been handled and skip this code.
11071 */
11072 gen_set_condexec(dc);
8aaca4c0 11073 switch(dc->is_jmp) {
8aaca4c0 11074 case DISAS_NEXT:
6e256c93 11075 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
11076 break;
11077 default:
11078 case DISAS_JUMP:
11079 case DISAS_UPDATE:
11080 /* indicate that the hash table must be used to find the next TB */
57fec1fe 11081 tcg_gen_exit_tb(0);
8aaca4c0
FB
11082 break;
11083 case DISAS_TB_JUMP:
11084 /* nothing more to generate */
11085 break;
9ee6e8bb 11086 case DISAS_WFI:
1ce94f81 11087 gen_helper_wfi(cpu_env);
9ee6e8bb 11088 break;
72c1d3af
PM
11089 case DISAS_WFE:
11090 gen_helper_wfe(cpu_env);
11091 break;
9ee6e8bb 11092 case DISAS_SWI:
d4a2dc67 11093 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
9ee6e8bb 11094 break;
8aaca4c0 11095 }
e50e6a20
FB
11096 if (dc->condjmp) {
11097 gen_set_label(dc->condlabel);
9ee6e8bb 11098 gen_set_condexec(dc);
6e256c93 11099 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
11100 dc->condjmp = 0;
11101 }
2c0262af 11102 }
2e70f6ef 11103
9ee6e8bb 11104done_generating:
806f352d 11105 gen_tb_end(tb, num_insns);
efd7f486 11106 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
2c0262af
FB
11107
11108#ifdef DEBUG_DISAS
8fec2b8c 11109 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
11110 qemu_log("----------------\n");
11111 qemu_log("IN: %s\n", lookup_symbol(pc_start));
f4359b9f 11112 log_target_disas(env, pc_start, dc->pc - pc_start,
d8fd2954 11113 dc->thumb | (dc->bswap_code << 1));
93fcfe39 11114 qemu_log("\n");
2c0262af
FB
11115 }
11116#endif
b5ff1b31 11117 if (search_pc) {
92414b31 11118 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
b5ff1b31
FB
11119 lj++;
11120 while (lj <= j)
ab1103de 11121 tcg_ctx.gen_opc_instr_start[lj++] = 0;
b5ff1b31 11122 } else {
2c0262af 11123 tb->size = dc->pc - pc_start;
2e70f6ef 11124 tb->icount = num_insns;
b5ff1b31 11125 }
2c0262af
FB
11126}
11127
0ecb72a5 11128void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
2c0262af 11129{
5639c3f2 11130 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, false);
2c0262af
FB
11131}
11132
0ecb72a5 11133void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
2c0262af 11134{
5639c3f2 11135 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, true);
2c0262af
FB
11136}
11137
b5ff1b31 11138static const char *cpu_mode_names[16] = {
28c9457d
EI
11139 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
11140 "???", "???", "hyp", "und", "???", "???", "???", "sys"
b5ff1b31 11141};
9ee6e8bb 11142
878096ee
AF
11143void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
11144 int flags)
2c0262af 11145{
878096ee
AF
11146 ARMCPU *cpu = ARM_CPU(cs);
11147 CPUARMState *env = &cpu->env;
2c0262af 11148 int i;
b5ff1b31 11149 uint32_t psr;
2c0262af 11150
17731115
PM
11151 if (is_a64(env)) {
11152 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
11153 return;
11154 }
11155
2c0262af 11156 for(i=0;i<16;i++) {
7fe48483 11157 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 11158 if ((i % 4) == 3)
7fe48483 11159 cpu_fprintf(f, "\n");
2c0262af 11160 else
7fe48483 11161 cpu_fprintf(f, " ");
2c0262af 11162 }
b5ff1b31 11163 psr = cpsr_read(env);
687fa640
TS
11164 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
11165 psr,
b5ff1b31
FB
11166 psr & (1 << 31) ? 'N' : '-',
11167 psr & (1 << 30) ? 'Z' : '-',
11168 psr & (1 << 29) ? 'C' : '-',
11169 psr & (1 << 28) ? 'V' : '-',
5fafdf24 11170 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 11171 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 11172
f2617cfc
PM
11173 if (flags & CPU_DUMP_FPU) {
11174 int numvfpregs = 0;
11175 if (arm_feature(env, ARM_FEATURE_VFP)) {
11176 numvfpregs += 16;
11177 }
11178 if (arm_feature(env, ARM_FEATURE_VFP3)) {
11179 numvfpregs += 16;
11180 }
11181 for (i = 0; i < numvfpregs; i++) {
11182 uint64_t v = float64_val(env->vfp.regs[i]);
11183 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
11184 i * 2, (uint32_t)v,
11185 i * 2 + 1, (uint32_t)(v >> 32),
11186 i, v);
11187 }
11188 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 11189 }
2c0262af 11190}
a6b025d3 11191
0ecb72a5 11192void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
d2856f1a 11193{
3926cc84
AG
11194 if (is_a64(env)) {
11195 env->pc = tcg_ctx.gen_opc_pc[pc_pos];
40f860cd 11196 env->condexec_bits = 0;
3926cc84
AG
11197 } else {
11198 env->regs[15] = tcg_ctx.gen_opc_pc[pc_pos];
40f860cd 11199 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
3926cc84 11200 }
d2856f1a 11201}