]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/translate.c
target-arm: A64: Add assertion that FP access was checked
[mirror_qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
ccd38087 28#include "internals.h"
76cad711 29#include "disas/disas.h"
57fec1fe 30#include "tcg-op.h"
1de7afc9 31#include "qemu/log.h"
534df156 32#include "qemu/bitops.h"
1497c961 33
7b59220e 34#include "helper.h"
1497c961 35#define GEN_HELPER 1
7b59220e 36#include "helper.h"
2c0262af 37
be5e7a76
DES
38#define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
39#define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
40/* currently all emulated v5 cores are also v5TE, so don't bother */
41#define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
9ee6e8bb
PB
42#define ENABLE_ARCH_5J 0
43#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
44#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
45#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
46#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
81e69fb0 47#define ENABLE_ARCH_8 arm_feature(env, ARM_FEATURE_V8)
b5ff1b31 48
86753403 49#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 50
f570c61e 51#include "translate.h"
e12ce78d
PM
52static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
53
b5ff1b31
FB
54#if defined(CONFIG_USER_ONLY)
55#define IS_USER(s) 1
56#else
57#define IS_USER(s) (s->user)
58#endif
59
3407ad0e 60TCGv_ptr cpu_env;
ad69471c 61/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 62static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 63static TCGv_i32 cpu_R[16];
66c374de 64static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
03d05e2d
PM
65static TCGv_i64 cpu_exclusive_addr;
66static TCGv_i64 cpu_exclusive_val;
426f5abc 67#ifdef CONFIG_USER_ONLY
03d05e2d 68static TCGv_i64 cpu_exclusive_test;
426f5abc
PB
69static TCGv_i32 cpu_exclusive_info;
70#endif
ad69471c 71
b26eefb6 72/* FIXME: These should be removed. */
39d5492a 73static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 74static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 75
022c62cb 76#include "exec/gen-icount.h"
2e70f6ef 77
155c3eac
FN
78static const char *regnames[] =
79 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
80 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
81
b26eefb6
PB
82/* initialize TCG globals. */
83void arm_translate_init(void)
84{
155c3eac
FN
85 int i;
86
a7812ae4
PB
87 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
88
155c3eac
FN
89 for (i = 0; i < 16; i++) {
90 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 91 offsetof(CPUARMState, regs[i]),
155c3eac
FN
92 regnames[i]);
93 }
66c374de
AJ
94 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
95 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
96 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
97 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
98
03d05e2d 99 cpu_exclusive_addr = tcg_global_mem_new_i64(TCG_AREG0,
0ecb72a5 100 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
03d05e2d 101 cpu_exclusive_val = tcg_global_mem_new_i64(TCG_AREG0,
0ecb72a5 102 offsetof(CPUARMState, exclusive_val), "exclusive_val");
426f5abc 103#ifdef CONFIG_USER_ONLY
03d05e2d 104 cpu_exclusive_test = tcg_global_mem_new_i64(TCG_AREG0,
0ecb72a5 105 offsetof(CPUARMState, exclusive_test), "exclusive_test");
426f5abc 106 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 107 offsetof(CPUARMState, exclusive_info), "exclusive_info");
426f5abc 108#endif
155c3eac 109
14ade10f 110 a64_translate_init();
b26eefb6
PB
111}
112
39d5492a 113static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 114{
39d5492a 115 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
116 tcg_gen_ld_i32(tmp, cpu_env, offset);
117 return tmp;
118}
119
0ecb72a5 120#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 121
39d5492a 122static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
123{
124 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 125 tcg_temp_free_i32(var);
d9ba4830
PB
126}
127
128#define store_cpu_field(var, name) \
0ecb72a5 129 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 130
b26eefb6 131/* Set a variable to the value of a CPU register. */
39d5492a 132static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
133{
134 if (reg == 15) {
135 uint32_t addr;
b90372ad 136 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
137 if (s->thumb)
138 addr = (long)s->pc + 2;
139 else
140 addr = (long)s->pc + 4;
141 tcg_gen_movi_i32(var, addr);
142 } else {
155c3eac 143 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
144 }
145}
146
147/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 148static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 149{
39d5492a 150 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
151 load_reg_var(s, tmp, reg);
152 return tmp;
153}
154
155/* Set a CPU register. The source must be a temporary and will be
156 marked as dead. */
39d5492a 157static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
158{
159 if (reg == 15) {
160 tcg_gen_andi_i32(var, var, ~1);
161 s->is_jmp = DISAS_JUMP;
162 }
155c3eac 163 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 164 tcg_temp_free_i32(var);
b26eefb6
PB
165}
166
b26eefb6 167/* Value extensions. */
86831435
PB
168#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
169#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
170#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
171#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
172
1497c961
PB
173#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
174#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 175
b26eefb6 176
39d5492a 177static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 178{
39d5492a 179 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 180 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
181 tcg_temp_free_i32(tmp_mask);
182}
d9ba4830
PB
183/* Set NZCV flags from the high 4 bits of var. */
184#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
185
d4a2dc67 186static void gen_exception_internal(int excp)
d9ba4830 187{
d4a2dc67
PM
188 TCGv_i32 tcg_excp = tcg_const_i32(excp);
189
190 assert(excp_is_internal(excp));
191 gen_helper_exception_internal(cpu_env, tcg_excp);
192 tcg_temp_free_i32(tcg_excp);
193}
194
195static void gen_exception(int excp, uint32_t syndrome)
196{
197 TCGv_i32 tcg_excp = tcg_const_i32(excp);
198 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
199
200 gen_helper_exception_with_syndrome(cpu_env, tcg_excp, tcg_syn);
201 tcg_temp_free_i32(tcg_syn);
202 tcg_temp_free_i32(tcg_excp);
d9ba4830
PB
203}
204
39d5492a 205static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 206{
39d5492a
PM
207 TCGv_i32 tmp1 = tcg_temp_new_i32();
208 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
209 tcg_gen_ext16s_i32(tmp1, a);
210 tcg_gen_ext16s_i32(tmp2, b);
3670669c 211 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 212 tcg_temp_free_i32(tmp2);
3670669c
PB
213 tcg_gen_sari_i32(a, a, 16);
214 tcg_gen_sari_i32(b, b, 16);
215 tcg_gen_mul_i32(b, b, a);
216 tcg_gen_mov_i32(a, tmp1);
7d1b0095 217 tcg_temp_free_i32(tmp1);
3670669c
PB
218}
219
220/* Byteswap each halfword. */
39d5492a 221static void gen_rev16(TCGv_i32 var)
3670669c 222{
39d5492a 223 TCGv_i32 tmp = tcg_temp_new_i32();
3670669c
PB
224 tcg_gen_shri_i32(tmp, var, 8);
225 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
226 tcg_gen_shli_i32(var, var, 8);
227 tcg_gen_andi_i32(var, var, 0xff00ff00);
228 tcg_gen_or_i32(var, var, tmp);
7d1b0095 229 tcg_temp_free_i32(tmp);
3670669c
PB
230}
231
232/* Byteswap low halfword and sign extend. */
39d5492a 233static void gen_revsh(TCGv_i32 var)
3670669c 234{
1a855029
AJ
235 tcg_gen_ext16u_i32(var, var);
236 tcg_gen_bswap16_i32(var, var);
237 tcg_gen_ext16s_i32(var, var);
3670669c
PB
238}
239
240/* Unsigned bitfield extract. */
39d5492a 241static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
3670669c
PB
242{
243 if (shift)
244 tcg_gen_shri_i32(var, var, shift);
245 tcg_gen_andi_i32(var, var, mask);
246}
247
248/* Signed bitfield extract. */
39d5492a 249static void gen_sbfx(TCGv_i32 var, int shift, int width)
3670669c
PB
250{
251 uint32_t signbit;
252
253 if (shift)
254 tcg_gen_sari_i32(var, var, shift);
255 if (shift + width < 32) {
256 signbit = 1u << (width - 1);
257 tcg_gen_andi_i32(var, var, (1u << width) - 1);
258 tcg_gen_xori_i32(var, var, signbit);
259 tcg_gen_subi_i32(var, var, signbit);
260 }
261}
262
838fa72d 263/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 264static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 265{
838fa72d
AJ
266 TCGv_i64 tmp64 = tcg_temp_new_i64();
267
268 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 269 tcg_temp_free_i32(b);
838fa72d
AJ
270 tcg_gen_shli_i64(tmp64, tmp64, 32);
271 tcg_gen_add_i64(a, tmp64, a);
272
273 tcg_temp_free_i64(tmp64);
274 return a;
275}
276
277/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 278static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
279{
280 TCGv_i64 tmp64 = tcg_temp_new_i64();
281
282 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 283 tcg_temp_free_i32(b);
838fa72d
AJ
284 tcg_gen_shli_i64(tmp64, tmp64, 32);
285 tcg_gen_sub_i64(a, tmp64, a);
286
287 tcg_temp_free_i64(tmp64);
288 return a;
3670669c
PB
289}
290
5e3f878a 291/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 292static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 293{
39d5492a
PM
294 TCGv_i32 lo = tcg_temp_new_i32();
295 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 296 TCGv_i64 ret;
5e3f878a 297
831d7fe8 298 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 299 tcg_temp_free_i32(a);
7d1b0095 300 tcg_temp_free_i32(b);
831d7fe8
RH
301
302 ret = tcg_temp_new_i64();
303 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
304 tcg_temp_free_i32(lo);
305 tcg_temp_free_i32(hi);
831d7fe8
RH
306
307 return ret;
5e3f878a
PB
308}
309
39d5492a 310static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 311{
39d5492a
PM
312 TCGv_i32 lo = tcg_temp_new_i32();
313 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 314 TCGv_i64 ret;
5e3f878a 315
831d7fe8 316 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 317 tcg_temp_free_i32(a);
7d1b0095 318 tcg_temp_free_i32(b);
831d7fe8
RH
319
320 ret = tcg_temp_new_i64();
321 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
322 tcg_temp_free_i32(lo);
323 tcg_temp_free_i32(hi);
831d7fe8
RH
324
325 return ret;
5e3f878a
PB
326}
327
8f01245e 328/* Swap low and high halfwords. */
39d5492a 329static void gen_swap_half(TCGv_i32 var)
8f01245e 330{
39d5492a 331 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
332 tcg_gen_shri_i32(tmp, var, 16);
333 tcg_gen_shli_i32(var, var, 16);
334 tcg_gen_or_i32(var, var, tmp);
7d1b0095 335 tcg_temp_free_i32(tmp);
8f01245e
PB
336}
337
b26eefb6
PB
338/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
339 tmp = (t0 ^ t1) & 0x8000;
340 t0 &= ~0x8000;
341 t1 &= ~0x8000;
342 t0 = (t0 + t1) ^ tmp;
343 */
344
39d5492a 345static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 346{
39d5492a 347 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
348 tcg_gen_xor_i32(tmp, t0, t1);
349 tcg_gen_andi_i32(tmp, tmp, 0x8000);
350 tcg_gen_andi_i32(t0, t0, ~0x8000);
351 tcg_gen_andi_i32(t1, t1, ~0x8000);
352 tcg_gen_add_i32(t0, t0, t1);
353 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
354 tcg_temp_free_i32(tmp);
355 tcg_temp_free_i32(t1);
b26eefb6
PB
356}
357
358/* Set CF to the top bit of var. */
39d5492a 359static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 360{
66c374de 361 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
362}
363
364/* Set N and Z flags from var. */
39d5492a 365static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 366{
66c374de
AJ
367 tcg_gen_mov_i32(cpu_NF, var);
368 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
369}
370
371/* T0 += T1 + CF. */
39d5492a 372static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 373{
396e467c 374 tcg_gen_add_i32(t0, t0, t1);
66c374de 375 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
376}
377
e9bb4aa9 378/* dest = T0 + T1 + CF. */
39d5492a 379static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 380{
e9bb4aa9 381 tcg_gen_add_i32(dest, t0, t1);
66c374de 382 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
383}
384
3670669c 385/* dest = T0 - T1 + CF - 1. */
39d5492a 386static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 387{
3670669c 388 tcg_gen_sub_i32(dest, t0, t1);
66c374de 389 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 390 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
391}
392
72485ec4 393/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 394static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 395{
39d5492a 396 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
397 tcg_gen_movi_i32(tmp, 0);
398 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 399 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 400 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
401 tcg_gen_xor_i32(tmp, t0, t1);
402 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
403 tcg_temp_free_i32(tmp);
404 tcg_gen_mov_i32(dest, cpu_NF);
405}
406
49b4c31e 407/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 408static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 409{
39d5492a 410 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
411 if (TCG_TARGET_HAS_add2_i32) {
412 tcg_gen_movi_i32(tmp, 0);
413 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 414 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
415 } else {
416 TCGv_i64 q0 = tcg_temp_new_i64();
417 TCGv_i64 q1 = tcg_temp_new_i64();
418 tcg_gen_extu_i32_i64(q0, t0);
419 tcg_gen_extu_i32_i64(q1, t1);
420 tcg_gen_add_i64(q0, q0, q1);
421 tcg_gen_extu_i32_i64(q1, cpu_CF);
422 tcg_gen_add_i64(q0, q0, q1);
423 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
424 tcg_temp_free_i64(q0);
425 tcg_temp_free_i64(q1);
426 }
427 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
428 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
429 tcg_gen_xor_i32(tmp, t0, t1);
430 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
431 tcg_temp_free_i32(tmp);
432 tcg_gen_mov_i32(dest, cpu_NF);
433}
434
72485ec4 435/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 436static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 437{
39d5492a 438 TCGv_i32 tmp;
72485ec4
AJ
439 tcg_gen_sub_i32(cpu_NF, t0, t1);
440 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
441 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
442 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
443 tmp = tcg_temp_new_i32();
444 tcg_gen_xor_i32(tmp, t0, t1);
445 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
446 tcg_temp_free_i32(tmp);
447 tcg_gen_mov_i32(dest, cpu_NF);
448}
449
e77f0832 450/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 451static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 452{
39d5492a 453 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
454 tcg_gen_not_i32(tmp, t1);
455 gen_adc_CC(dest, t0, tmp);
39d5492a 456 tcg_temp_free_i32(tmp);
2de68a49
RH
457}
458
365af80e 459#define GEN_SHIFT(name) \
39d5492a 460static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 461{ \
39d5492a 462 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
463 tmp1 = tcg_temp_new_i32(); \
464 tcg_gen_andi_i32(tmp1, t1, 0xff); \
465 tmp2 = tcg_const_i32(0); \
466 tmp3 = tcg_const_i32(0x1f); \
467 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
468 tcg_temp_free_i32(tmp3); \
469 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
470 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
471 tcg_temp_free_i32(tmp2); \
472 tcg_temp_free_i32(tmp1); \
473}
474GEN_SHIFT(shl)
475GEN_SHIFT(shr)
476#undef GEN_SHIFT
477
39d5492a 478static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 479{
39d5492a 480 TCGv_i32 tmp1, tmp2;
365af80e
AJ
481 tmp1 = tcg_temp_new_i32();
482 tcg_gen_andi_i32(tmp1, t1, 0xff);
483 tmp2 = tcg_const_i32(0x1f);
484 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
485 tcg_temp_free_i32(tmp2);
486 tcg_gen_sar_i32(dest, t0, tmp1);
487 tcg_temp_free_i32(tmp1);
488}
489
39d5492a 490static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 491{
39d5492a
PM
492 TCGv_i32 c0 = tcg_const_i32(0);
493 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
494 tcg_gen_neg_i32(tmp, src);
495 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
496 tcg_temp_free_i32(c0);
497 tcg_temp_free_i32(tmp);
498}
ad69471c 499
39d5492a 500static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 501{
9a119ff6 502 if (shift == 0) {
66c374de 503 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 504 } else {
66c374de
AJ
505 tcg_gen_shri_i32(cpu_CF, var, shift);
506 if (shift != 31) {
507 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
508 }
9a119ff6 509 }
9a119ff6 510}
b26eefb6 511
9a119ff6 512/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
513static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
514 int shift, int flags)
9a119ff6
PB
515{
516 switch (shiftop) {
517 case 0: /* LSL */
518 if (shift != 0) {
519 if (flags)
520 shifter_out_im(var, 32 - shift);
521 tcg_gen_shli_i32(var, var, shift);
522 }
523 break;
524 case 1: /* LSR */
525 if (shift == 0) {
526 if (flags) {
66c374de 527 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
528 }
529 tcg_gen_movi_i32(var, 0);
530 } else {
531 if (flags)
532 shifter_out_im(var, shift - 1);
533 tcg_gen_shri_i32(var, var, shift);
534 }
535 break;
536 case 2: /* ASR */
537 if (shift == 0)
538 shift = 32;
539 if (flags)
540 shifter_out_im(var, shift - 1);
541 if (shift == 32)
542 shift = 31;
543 tcg_gen_sari_i32(var, var, shift);
544 break;
545 case 3: /* ROR/RRX */
546 if (shift != 0) {
547 if (flags)
548 shifter_out_im(var, shift - 1);
f669df27 549 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 550 } else {
39d5492a 551 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 552 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
553 if (flags)
554 shifter_out_im(var, 0);
555 tcg_gen_shri_i32(var, var, 1);
b26eefb6 556 tcg_gen_or_i32(var, var, tmp);
7d1b0095 557 tcg_temp_free_i32(tmp);
b26eefb6
PB
558 }
559 }
560};
561
39d5492a
PM
562static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
563 TCGv_i32 shift, int flags)
8984bd2e
PB
564{
565 if (flags) {
566 switch (shiftop) {
9ef39277
BS
567 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
568 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
569 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
570 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
571 }
572 } else {
573 switch (shiftop) {
365af80e
AJ
574 case 0:
575 gen_shl(var, var, shift);
576 break;
577 case 1:
578 gen_shr(var, var, shift);
579 break;
580 case 2:
581 gen_sar(var, var, shift);
582 break;
f669df27
AJ
583 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
584 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
585 }
586 }
7d1b0095 587 tcg_temp_free_i32(shift);
8984bd2e
PB
588}
589
6ddbc6e4
PB
590#define PAS_OP(pfx) \
591 switch (op2) { \
592 case 0: gen_pas_helper(glue(pfx,add16)); break; \
593 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
594 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
595 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
596 case 4: gen_pas_helper(glue(pfx,add8)); break; \
597 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
598 }
39d5492a 599static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 600{
a7812ae4 601 TCGv_ptr tmp;
6ddbc6e4
PB
602
603 switch (op1) {
604#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
605 case 1:
a7812ae4 606 tmp = tcg_temp_new_ptr();
0ecb72a5 607 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 608 PAS_OP(s)
b75263d6 609 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
610 break;
611 case 5:
a7812ae4 612 tmp = tcg_temp_new_ptr();
0ecb72a5 613 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 614 PAS_OP(u)
b75263d6 615 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
616 break;
617#undef gen_pas_helper
618#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
619 case 2:
620 PAS_OP(q);
621 break;
622 case 3:
623 PAS_OP(sh);
624 break;
625 case 6:
626 PAS_OP(uq);
627 break;
628 case 7:
629 PAS_OP(uh);
630 break;
631#undef gen_pas_helper
632 }
633}
9ee6e8bb
PB
634#undef PAS_OP
635
6ddbc6e4
PB
636/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
637#define PAS_OP(pfx) \
ed89a2f1 638 switch (op1) { \
6ddbc6e4
PB
639 case 0: gen_pas_helper(glue(pfx,add8)); break; \
640 case 1: gen_pas_helper(glue(pfx,add16)); break; \
641 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
642 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
643 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
644 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
645 }
39d5492a 646static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 647{
a7812ae4 648 TCGv_ptr tmp;
6ddbc6e4 649
ed89a2f1 650 switch (op2) {
6ddbc6e4
PB
651#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
652 case 0:
a7812ae4 653 tmp = tcg_temp_new_ptr();
0ecb72a5 654 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 655 PAS_OP(s)
b75263d6 656 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
657 break;
658 case 4:
a7812ae4 659 tmp = tcg_temp_new_ptr();
0ecb72a5 660 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 661 PAS_OP(u)
b75263d6 662 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
663 break;
664#undef gen_pas_helper
665#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
666 case 1:
667 PAS_OP(q);
668 break;
669 case 2:
670 PAS_OP(sh);
671 break;
672 case 5:
673 PAS_OP(uq);
674 break;
675 case 6:
676 PAS_OP(uh);
677 break;
678#undef gen_pas_helper
679 }
680}
9ee6e8bb
PB
681#undef PAS_OP
682
39fb730a
AG
683/*
684 * generate a conditional branch based on ARM condition code cc.
685 * This is common between ARM and Aarch64 targets.
686 */
687void arm_gen_test_cc(int cc, int label)
d9ba4830 688{
39d5492a 689 TCGv_i32 tmp;
d9ba4830
PB
690 int inv;
691
d9ba4830
PB
692 switch (cc) {
693 case 0: /* eq: Z */
66c374de 694 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
695 break;
696 case 1: /* ne: !Z */
66c374de 697 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
698 break;
699 case 2: /* cs: C */
66c374de 700 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_CF, 0, label);
d9ba4830
PB
701 break;
702 case 3: /* cc: !C */
66c374de 703 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
d9ba4830
PB
704 break;
705 case 4: /* mi: N */
66c374de 706 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_NF, 0, label);
d9ba4830
PB
707 break;
708 case 5: /* pl: !N */
66c374de 709 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_NF, 0, label);
d9ba4830
PB
710 break;
711 case 6: /* vs: V */
66c374de 712 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_VF, 0, label);
d9ba4830
PB
713 break;
714 case 7: /* vc: !V */
66c374de 715 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_VF, 0, label);
d9ba4830
PB
716 break;
717 case 8: /* hi: C && !Z */
718 inv = gen_new_label();
66c374de
AJ
719 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, inv);
720 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
721 gen_set_label(inv);
722 break;
723 case 9: /* ls: !C || Z */
66c374de
AJ
724 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
725 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
726 break;
727 case 10: /* ge: N == V -> N ^ V == 0 */
66c374de
AJ
728 tmp = tcg_temp_new_i32();
729 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 730 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 731 tcg_temp_free_i32(tmp);
d9ba4830
PB
732 break;
733 case 11: /* lt: N != V -> N ^ V != 0 */
66c374de
AJ
734 tmp = tcg_temp_new_i32();
735 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 736 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 737 tcg_temp_free_i32(tmp);
d9ba4830
PB
738 break;
739 case 12: /* gt: !Z && N == V */
740 inv = gen_new_label();
66c374de
AJ
741 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, inv);
742 tmp = tcg_temp_new_i32();
743 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 744 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 745 tcg_temp_free_i32(tmp);
d9ba4830
PB
746 gen_set_label(inv);
747 break;
748 case 13: /* le: Z || N != V */
66c374de
AJ
749 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
750 tmp = tcg_temp_new_i32();
751 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 752 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 753 tcg_temp_free_i32(tmp);
d9ba4830
PB
754 break;
755 default:
756 fprintf(stderr, "Bad condition code 0x%x\n", cc);
757 abort();
758 }
d9ba4830 759}
2c0262af 760
b1d8e52e 761static const uint8_t table_logic_cc[16] = {
2c0262af
FB
762 1, /* and */
763 1, /* xor */
764 0, /* sub */
765 0, /* rsb */
766 0, /* add */
767 0, /* adc */
768 0, /* sbc */
769 0, /* rsc */
770 1, /* andl */
771 1, /* xorl */
772 0, /* cmp */
773 0, /* cmn */
774 1, /* orr */
775 1, /* mov */
776 1, /* bic */
777 1, /* mvn */
778};
3b46e624 779
d9ba4830
PB
780/* Set PC and Thumb state from an immediate address. */
781static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 782{
39d5492a 783 TCGv_i32 tmp;
99c475ab 784
b26eefb6 785 s->is_jmp = DISAS_UPDATE;
d9ba4830 786 if (s->thumb != (addr & 1)) {
7d1b0095 787 tmp = tcg_temp_new_i32();
d9ba4830 788 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 789 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 790 tcg_temp_free_i32(tmp);
d9ba4830 791 }
155c3eac 792 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
793}
794
795/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 796static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 797{
d9ba4830 798 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
799 tcg_gen_andi_i32(cpu_R[15], var, ~1);
800 tcg_gen_andi_i32(var, var, 1);
801 store_cpu_field(var, thumb);
d9ba4830
PB
802}
803
21aeb343
JR
804/* Variant of store_reg which uses branch&exchange logic when storing
805 to r15 in ARM architecture v7 and above. The source must be a temporary
806 and will be marked as dead. */
0ecb72a5 807static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
39d5492a 808 int reg, TCGv_i32 var)
21aeb343
JR
809{
810 if (reg == 15 && ENABLE_ARCH_7) {
811 gen_bx(s, var);
812 } else {
813 store_reg(s, reg, var);
814 }
815}
816
be5e7a76
DES
817/* Variant of store_reg which uses branch&exchange logic when storing
818 * to r15 in ARM architecture v5T and above. This is used for storing
819 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
820 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
0ecb72a5 821static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
39d5492a 822 int reg, TCGv_i32 var)
be5e7a76
DES
823{
824 if (reg == 15 && ENABLE_ARCH_5) {
825 gen_bx(s, var);
826 } else {
827 store_reg(s, reg, var);
828 }
829}
830
08307563
PM
831/* Abstractions of "generate code to do a guest load/store for
832 * AArch32", where a vaddr is always 32 bits (and is zero
833 * extended if we're a 64 bit core) and data is also
834 * 32 bits unless specifically doing a 64 bit access.
835 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 836 * that the address argument is TCGv_i32 rather than TCGv.
08307563
PM
837 */
838#if TARGET_LONG_BITS == 32
839
09f78135
RH
840#define DO_GEN_LD(SUFF, OPC) \
841static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563 842{ \
09f78135 843 tcg_gen_qemu_ld_i32(val, addr, index, OPC); \
08307563
PM
844}
845
09f78135
RH
846#define DO_GEN_ST(SUFF, OPC) \
847static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563 848{ \
09f78135 849 tcg_gen_qemu_st_i32(val, addr, index, OPC); \
08307563
PM
850}
851
852static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
853{
09f78135 854 tcg_gen_qemu_ld_i64(val, addr, index, MO_TEQ);
08307563
PM
855}
856
857static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
858{
09f78135 859 tcg_gen_qemu_st_i64(val, addr, index, MO_TEQ);
08307563
PM
860}
861
862#else
863
09f78135
RH
864#define DO_GEN_LD(SUFF, OPC) \
865static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563
PM
866{ \
867 TCGv addr64 = tcg_temp_new(); \
08307563 868 tcg_gen_extu_i32_i64(addr64, addr); \
09f78135 869 tcg_gen_qemu_ld_i32(val, addr64, index, OPC); \
08307563 870 tcg_temp_free(addr64); \
08307563
PM
871}
872
09f78135
RH
873#define DO_GEN_ST(SUFF, OPC) \
874static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563
PM
875{ \
876 TCGv addr64 = tcg_temp_new(); \
08307563 877 tcg_gen_extu_i32_i64(addr64, addr); \
09f78135 878 tcg_gen_qemu_st_i32(val, addr64, index, OPC); \
08307563 879 tcg_temp_free(addr64); \
08307563
PM
880}
881
882static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
883{
884 TCGv addr64 = tcg_temp_new();
885 tcg_gen_extu_i32_i64(addr64, addr);
09f78135 886 tcg_gen_qemu_ld_i64(val, addr64, index, MO_TEQ);
08307563
PM
887 tcg_temp_free(addr64);
888}
889
890static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
891{
892 TCGv addr64 = tcg_temp_new();
893 tcg_gen_extu_i32_i64(addr64, addr);
09f78135 894 tcg_gen_qemu_st_i64(val, addr64, index, MO_TEQ);
08307563
PM
895 tcg_temp_free(addr64);
896}
897
898#endif
899
09f78135
RH
900DO_GEN_LD(8s, MO_SB)
901DO_GEN_LD(8u, MO_UB)
902DO_GEN_LD(16s, MO_TESW)
903DO_GEN_LD(16u, MO_TEUW)
904DO_GEN_LD(32u, MO_TEUL)
905DO_GEN_ST(8, MO_UB)
906DO_GEN_ST(16, MO_TEUW)
907DO_GEN_ST(32, MO_TEUL)
08307563 908
eaed129d 909static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
5e3f878a 910{
40f860cd 911 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
912}
913
d4a2dc67
PM
914static inline void
915gen_set_condexec (DisasContext *s)
916{
917 if (s->condexec_mask) {
918 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
919 TCGv_i32 tmp = tcg_temp_new_i32();
920 tcg_gen_movi_i32(tmp, val);
921 store_cpu_field(tmp, condexec_bits);
922 }
923}
924
925static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
926{
927 gen_set_condexec(s);
928 gen_set_pc_im(s, s->pc - offset);
929 gen_exception_internal(excp);
930 s->is_jmp = DISAS_JUMP;
931}
932
933static void gen_exception_insn(DisasContext *s, int offset, int excp, int syn)
934{
935 gen_set_condexec(s);
936 gen_set_pc_im(s, s->pc - offset);
937 gen_exception(excp, syn);
938 s->is_jmp = DISAS_JUMP;
939}
940
b5ff1b31
FB
941/* Force a TB lookup after an instruction that changes the CPU state. */
942static inline void gen_lookup_tb(DisasContext *s)
943{
a6445c52 944 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
945 s->is_jmp = DISAS_UPDATE;
946}
947
b0109805 948static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 949 TCGv_i32 var)
2c0262af 950{
1e8d4eec 951 int val, rm, shift, shiftop;
39d5492a 952 TCGv_i32 offset;
2c0262af
FB
953
954 if (!(insn & (1 << 25))) {
955 /* immediate */
956 val = insn & 0xfff;
957 if (!(insn & (1 << 23)))
958 val = -val;
537730b9 959 if (val != 0)
b0109805 960 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
961 } else {
962 /* shift/register */
963 rm = (insn) & 0xf;
964 shift = (insn >> 7) & 0x1f;
1e8d4eec 965 shiftop = (insn >> 5) & 3;
b26eefb6 966 offset = load_reg(s, rm);
9a119ff6 967 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 968 if (!(insn & (1 << 23)))
b0109805 969 tcg_gen_sub_i32(var, var, offset);
2c0262af 970 else
b0109805 971 tcg_gen_add_i32(var, var, offset);
7d1b0095 972 tcg_temp_free_i32(offset);
2c0262af
FB
973 }
974}
975
191f9a93 976static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 977 int extra, TCGv_i32 var)
2c0262af
FB
978{
979 int val, rm;
39d5492a 980 TCGv_i32 offset;
3b46e624 981
2c0262af
FB
982 if (insn & (1 << 22)) {
983 /* immediate */
984 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
985 if (!(insn & (1 << 23)))
986 val = -val;
18acad92 987 val += extra;
537730b9 988 if (val != 0)
b0109805 989 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
990 } else {
991 /* register */
191f9a93 992 if (extra)
b0109805 993 tcg_gen_addi_i32(var, var, extra);
2c0262af 994 rm = (insn) & 0xf;
b26eefb6 995 offset = load_reg(s, rm);
2c0262af 996 if (!(insn & (1 << 23)))
b0109805 997 tcg_gen_sub_i32(var, var, offset);
2c0262af 998 else
b0109805 999 tcg_gen_add_i32(var, var, offset);
7d1b0095 1000 tcg_temp_free_i32(offset);
2c0262af
FB
1001 }
1002}
1003
5aaebd13
PM
1004static TCGv_ptr get_fpstatus_ptr(int neon)
1005{
1006 TCGv_ptr statusptr = tcg_temp_new_ptr();
1007 int offset;
1008 if (neon) {
0ecb72a5 1009 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1010 } else {
0ecb72a5 1011 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1012 }
1013 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1014 return statusptr;
1015}
1016
4373f3ce
PB
1017#define VFP_OP2(name) \
1018static inline void gen_vfp_##name(int dp) \
1019{ \
ae1857ec
PM
1020 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1021 if (dp) { \
1022 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1023 } else { \
1024 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1025 } \
1026 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
1027}
1028
4373f3ce
PB
1029VFP_OP2(add)
1030VFP_OP2(sub)
1031VFP_OP2(mul)
1032VFP_OP2(div)
1033
1034#undef VFP_OP2
1035
605a6aed
PM
1036static inline void gen_vfp_F1_mul(int dp)
1037{
1038 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1039 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1040 if (dp) {
ae1857ec 1041 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1042 } else {
ae1857ec 1043 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1044 }
ae1857ec 1045 tcg_temp_free_ptr(fpst);
605a6aed
PM
1046}
1047
1048static inline void gen_vfp_F1_neg(int dp)
1049{
1050 /* Like gen_vfp_neg() but put result in F1 */
1051 if (dp) {
1052 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1053 } else {
1054 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1055 }
1056}
1057
4373f3ce
PB
1058static inline void gen_vfp_abs(int dp)
1059{
1060 if (dp)
1061 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1062 else
1063 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1064}
1065
1066static inline void gen_vfp_neg(int dp)
1067{
1068 if (dp)
1069 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1070 else
1071 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1072}
1073
1074static inline void gen_vfp_sqrt(int dp)
1075{
1076 if (dp)
1077 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1078 else
1079 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1080}
1081
1082static inline void gen_vfp_cmp(int dp)
1083{
1084 if (dp)
1085 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1086 else
1087 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1088}
1089
1090static inline void gen_vfp_cmpe(int dp)
1091{
1092 if (dp)
1093 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1094 else
1095 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1096}
1097
1098static inline void gen_vfp_F1_ld0(int dp)
1099{
1100 if (dp)
5b340b51 1101 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1102 else
5b340b51 1103 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1104}
1105
5500b06c
PM
1106#define VFP_GEN_ITOF(name) \
1107static inline void gen_vfp_##name(int dp, int neon) \
1108{ \
5aaebd13 1109 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1110 if (dp) { \
1111 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1112 } else { \
1113 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1114 } \
b7fa9214 1115 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1116}
1117
5500b06c
PM
1118VFP_GEN_ITOF(uito)
1119VFP_GEN_ITOF(sito)
1120#undef VFP_GEN_ITOF
4373f3ce 1121
5500b06c
PM
1122#define VFP_GEN_FTOI(name) \
1123static inline void gen_vfp_##name(int dp, int neon) \
1124{ \
5aaebd13 1125 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1126 if (dp) { \
1127 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1128 } else { \
1129 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1130 } \
b7fa9214 1131 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1132}
1133
5500b06c
PM
1134VFP_GEN_FTOI(toui)
1135VFP_GEN_FTOI(touiz)
1136VFP_GEN_FTOI(tosi)
1137VFP_GEN_FTOI(tosiz)
1138#undef VFP_GEN_FTOI
4373f3ce 1139
16d5b3ca 1140#define VFP_GEN_FIX(name, round) \
5500b06c 1141static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1142{ \
39d5492a 1143 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1144 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c 1145 if (dp) { \
16d5b3ca
WN
1146 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1147 statusptr); \
5500b06c 1148 } else { \
16d5b3ca
WN
1149 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1150 statusptr); \
5500b06c 1151 } \
b75263d6 1152 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1153 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1154}
16d5b3ca
WN
1155VFP_GEN_FIX(tosh, _round_to_zero)
1156VFP_GEN_FIX(tosl, _round_to_zero)
1157VFP_GEN_FIX(touh, _round_to_zero)
1158VFP_GEN_FIX(toul, _round_to_zero)
1159VFP_GEN_FIX(shto, )
1160VFP_GEN_FIX(slto, )
1161VFP_GEN_FIX(uhto, )
1162VFP_GEN_FIX(ulto, )
4373f3ce 1163#undef VFP_GEN_FIX
9ee6e8bb 1164
39d5492a 1165static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1166{
08307563
PM
1167 if (dp) {
1168 gen_aa32_ld64(cpu_F0d, addr, IS_USER(s));
1169 } else {
1170 gen_aa32_ld32u(cpu_F0s, addr, IS_USER(s));
1171 }
b5ff1b31
FB
1172}
1173
39d5492a 1174static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1175{
08307563
PM
1176 if (dp) {
1177 gen_aa32_st64(cpu_F0d, addr, IS_USER(s));
1178 } else {
1179 gen_aa32_st32(cpu_F0s, addr, IS_USER(s));
1180 }
b5ff1b31
FB
1181}
1182
8e96005d
FB
1183static inline long
1184vfp_reg_offset (int dp, int reg)
1185{
1186 if (dp)
1187 return offsetof(CPUARMState, vfp.regs[reg]);
1188 else if (reg & 1) {
1189 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1190 + offsetof(CPU_DoubleU, l.upper);
1191 } else {
1192 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1193 + offsetof(CPU_DoubleU, l.lower);
1194 }
1195}
9ee6e8bb
PB
1196
1197/* Return the offset of a 32-bit piece of a NEON register.
1198 zero is the least significant end of the register. */
1199static inline long
1200neon_reg_offset (int reg, int n)
1201{
1202 int sreg;
1203 sreg = reg * 2 + n;
1204 return vfp_reg_offset(0, sreg);
1205}
1206
39d5492a 1207static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1208{
39d5492a 1209 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1210 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1211 return tmp;
1212}
1213
39d5492a 1214static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1215{
1216 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1217 tcg_temp_free_i32(var);
8f8e3aa4
PB
1218}
1219
a7812ae4 1220static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1221{
1222 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1223}
1224
a7812ae4 1225static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1226{
1227 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1228}
1229
4373f3ce
PB
1230#define tcg_gen_ld_f32 tcg_gen_ld_i32
1231#define tcg_gen_ld_f64 tcg_gen_ld_i64
1232#define tcg_gen_st_f32 tcg_gen_st_i32
1233#define tcg_gen_st_f64 tcg_gen_st_i64
1234
b7bcbe95
FB
1235static inline void gen_mov_F0_vreg(int dp, int reg)
1236{
1237 if (dp)
4373f3ce 1238 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1239 else
4373f3ce 1240 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1241}
1242
1243static inline void gen_mov_F1_vreg(int dp, int reg)
1244{
1245 if (dp)
4373f3ce 1246 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1247 else
4373f3ce 1248 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1249}
1250
1251static inline void gen_mov_vreg_F0(int dp, int reg)
1252{
1253 if (dp)
4373f3ce 1254 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1255 else
4373f3ce 1256 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1257}
1258
18c9b560
AZ
1259#define ARM_CP_RW_BIT (1 << 20)
1260
a7812ae4 1261static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1262{
0ecb72a5 1263 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1264}
1265
a7812ae4 1266static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1267{
0ecb72a5 1268 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1269}
1270
39d5492a 1271static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1272{
39d5492a 1273 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1274 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1275 return var;
e677137d
PB
1276}
1277
39d5492a 1278static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1279{
0ecb72a5 1280 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1281 tcg_temp_free_i32(var);
e677137d
PB
1282}
1283
1284static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1285{
1286 iwmmxt_store_reg(cpu_M0, rn);
1287}
1288
1289static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1290{
1291 iwmmxt_load_reg(cpu_M0, rn);
1292}
1293
1294static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1295{
1296 iwmmxt_load_reg(cpu_V1, rn);
1297 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1298}
1299
1300static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1301{
1302 iwmmxt_load_reg(cpu_V1, rn);
1303 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1304}
1305
1306static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1307{
1308 iwmmxt_load_reg(cpu_V1, rn);
1309 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1310}
1311
1312#define IWMMXT_OP(name) \
1313static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1314{ \
1315 iwmmxt_load_reg(cpu_V1, rn); \
1316 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1317}
1318
477955bd
PM
1319#define IWMMXT_OP_ENV(name) \
1320static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1321{ \
1322 iwmmxt_load_reg(cpu_V1, rn); \
1323 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1324}
1325
1326#define IWMMXT_OP_ENV_SIZE(name) \
1327IWMMXT_OP_ENV(name##b) \
1328IWMMXT_OP_ENV(name##w) \
1329IWMMXT_OP_ENV(name##l)
e677137d 1330
477955bd 1331#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1332static inline void gen_op_iwmmxt_##name##_M0(void) \
1333{ \
477955bd 1334 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1335}
1336
1337IWMMXT_OP(maddsq)
1338IWMMXT_OP(madduq)
1339IWMMXT_OP(sadb)
1340IWMMXT_OP(sadw)
1341IWMMXT_OP(mulslw)
1342IWMMXT_OP(mulshw)
1343IWMMXT_OP(mululw)
1344IWMMXT_OP(muluhw)
1345IWMMXT_OP(macsw)
1346IWMMXT_OP(macuw)
1347
477955bd
PM
1348IWMMXT_OP_ENV_SIZE(unpackl)
1349IWMMXT_OP_ENV_SIZE(unpackh)
1350
1351IWMMXT_OP_ENV1(unpacklub)
1352IWMMXT_OP_ENV1(unpackluw)
1353IWMMXT_OP_ENV1(unpacklul)
1354IWMMXT_OP_ENV1(unpackhub)
1355IWMMXT_OP_ENV1(unpackhuw)
1356IWMMXT_OP_ENV1(unpackhul)
1357IWMMXT_OP_ENV1(unpacklsb)
1358IWMMXT_OP_ENV1(unpacklsw)
1359IWMMXT_OP_ENV1(unpacklsl)
1360IWMMXT_OP_ENV1(unpackhsb)
1361IWMMXT_OP_ENV1(unpackhsw)
1362IWMMXT_OP_ENV1(unpackhsl)
1363
1364IWMMXT_OP_ENV_SIZE(cmpeq)
1365IWMMXT_OP_ENV_SIZE(cmpgtu)
1366IWMMXT_OP_ENV_SIZE(cmpgts)
1367
1368IWMMXT_OP_ENV_SIZE(mins)
1369IWMMXT_OP_ENV_SIZE(minu)
1370IWMMXT_OP_ENV_SIZE(maxs)
1371IWMMXT_OP_ENV_SIZE(maxu)
1372
1373IWMMXT_OP_ENV_SIZE(subn)
1374IWMMXT_OP_ENV_SIZE(addn)
1375IWMMXT_OP_ENV_SIZE(subu)
1376IWMMXT_OP_ENV_SIZE(addu)
1377IWMMXT_OP_ENV_SIZE(subs)
1378IWMMXT_OP_ENV_SIZE(adds)
1379
1380IWMMXT_OP_ENV(avgb0)
1381IWMMXT_OP_ENV(avgb1)
1382IWMMXT_OP_ENV(avgw0)
1383IWMMXT_OP_ENV(avgw1)
e677137d
PB
1384
1385IWMMXT_OP(msadb)
1386
477955bd
PM
1387IWMMXT_OP_ENV(packuw)
1388IWMMXT_OP_ENV(packul)
1389IWMMXT_OP_ENV(packuq)
1390IWMMXT_OP_ENV(packsw)
1391IWMMXT_OP_ENV(packsl)
1392IWMMXT_OP_ENV(packsq)
e677137d 1393
e677137d
PB
1394static void gen_op_iwmmxt_set_mup(void)
1395{
39d5492a 1396 TCGv_i32 tmp;
e677137d
PB
1397 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1398 tcg_gen_ori_i32(tmp, tmp, 2);
1399 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1400}
1401
1402static void gen_op_iwmmxt_set_cup(void)
1403{
39d5492a 1404 TCGv_i32 tmp;
e677137d
PB
1405 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1406 tcg_gen_ori_i32(tmp, tmp, 1);
1407 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1408}
1409
1410static void gen_op_iwmmxt_setpsr_nz(void)
1411{
39d5492a 1412 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1413 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1414 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1415}
1416
1417static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1418{
1419 iwmmxt_load_reg(cpu_V1, rn);
86831435 1420 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1421 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1422}
1423
39d5492a
PM
1424static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1425 TCGv_i32 dest)
18c9b560
AZ
1426{
1427 int rd;
1428 uint32_t offset;
39d5492a 1429 TCGv_i32 tmp;
18c9b560
AZ
1430
1431 rd = (insn >> 16) & 0xf;
da6b5335 1432 tmp = load_reg(s, rd);
18c9b560
AZ
1433
1434 offset = (insn & 0xff) << ((insn >> 7) & 2);
1435 if (insn & (1 << 24)) {
1436 /* Pre indexed */
1437 if (insn & (1 << 23))
da6b5335 1438 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1439 else
da6b5335
FN
1440 tcg_gen_addi_i32(tmp, tmp, -offset);
1441 tcg_gen_mov_i32(dest, tmp);
18c9b560 1442 if (insn & (1 << 21))
da6b5335
FN
1443 store_reg(s, rd, tmp);
1444 else
7d1b0095 1445 tcg_temp_free_i32(tmp);
18c9b560
AZ
1446 } else if (insn & (1 << 21)) {
1447 /* Post indexed */
da6b5335 1448 tcg_gen_mov_i32(dest, tmp);
18c9b560 1449 if (insn & (1 << 23))
da6b5335 1450 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1451 else
da6b5335
FN
1452 tcg_gen_addi_i32(tmp, tmp, -offset);
1453 store_reg(s, rd, tmp);
18c9b560
AZ
1454 } else if (!(insn & (1 << 23)))
1455 return 1;
1456 return 0;
1457}
1458
39d5492a 1459static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1460{
1461 int rd = (insn >> 0) & 0xf;
39d5492a 1462 TCGv_i32 tmp;
18c9b560 1463
da6b5335
FN
1464 if (insn & (1 << 8)) {
1465 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1466 return 1;
da6b5335
FN
1467 } else {
1468 tmp = iwmmxt_load_creg(rd);
1469 }
1470 } else {
7d1b0095 1471 tmp = tcg_temp_new_i32();
da6b5335
FN
1472 iwmmxt_load_reg(cpu_V0, rd);
1473 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1474 }
1475 tcg_gen_andi_i32(tmp, tmp, mask);
1476 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1477 tcg_temp_free_i32(tmp);
18c9b560
AZ
1478 return 0;
1479}
1480
a1c7273b 1481/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1482 (ie. an undefined instruction). */
0ecb72a5 1483static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
1484{
1485 int rd, wrd;
1486 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1487 TCGv_i32 addr;
1488 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1489
1490 if ((insn & 0x0e000e00) == 0x0c000000) {
1491 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1492 wrd = insn & 0xf;
1493 rdlo = (insn >> 12) & 0xf;
1494 rdhi = (insn >> 16) & 0xf;
1495 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1496 iwmmxt_load_reg(cpu_V0, wrd);
1497 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1498 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1499 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1500 } else { /* TMCRR */
da6b5335
FN
1501 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1502 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1503 gen_op_iwmmxt_set_mup();
1504 }
1505 return 0;
1506 }
1507
1508 wrd = (insn >> 12) & 0xf;
7d1b0095 1509 addr = tcg_temp_new_i32();
da6b5335 1510 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1511 tcg_temp_free_i32(addr);
18c9b560 1512 return 1;
da6b5335 1513 }
18c9b560
AZ
1514 if (insn & ARM_CP_RW_BIT) {
1515 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1516 tmp = tcg_temp_new_i32();
08307563 1517 gen_aa32_ld32u(tmp, addr, IS_USER(s));
da6b5335 1518 iwmmxt_store_creg(wrd, tmp);
18c9b560 1519 } else {
e677137d
PB
1520 i = 1;
1521 if (insn & (1 << 8)) {
1522 if (insn & (1 << 22)) { /* WLDRD */
08307563 1523 gen_aa32_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1524 i = 0;
1525 } else { /* WLDRW wRd */
29531141 1526 tmp = tcg_temp_new_i32();
08307563 1527 gen_aa32_ld32u(tmp, addr, IS_USER(s));
e677137d
PB
1528 }
1529 } else {
29531141 1530 tmp = tcg_temp_new_i32();
e677137d 1531 if (insn & (1 << 22)) { /* WLDRH */
08307563 1532 gen_aa32_ld16u(tmp, addr, IS_USER(s));
e677137d 1533 } else { /* WLDRB */
08307563 1534 gen_aa32_ld8u(tmp, addr, IS_USER(s));
e677137d
PB
1535 }
1536 }
1537 if (i) {
1538 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1539 tcg_temp_free_i32(tmp);
e677137d 1540 }
18c9b560
AZ
1541 gen_op_iwmmxt_movq_wRn_M0(wrd);
1542 }
1543 } else {
1544 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1545 tmp = iwmmxt_load_creg(wrd);
08307563 1546 gen_aa32_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1547 } else {
1548 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1549 tmp = tcg_temp_new_i32();
e677137d
PB
1550 if (insn & (1 << 8)) {
1551 if (insn & (1 << 22)) { /* WSTRD */
08307563 1552 gen_aa32_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1553 } else { /* WSTRW wRd */
1554 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
08307563 1555 gen_aa32_st32(tmp, addr, IS_USER(s));
e677137d
PB
1556 }
1557 } else {
1558 if (insn & (1 << 22)) { /* WSTRH */
1559 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
08307563 1560 gen_aa32_st16(tmp, addr, IS_USER(s));
e677137d
PB
1561 } else { /* WSTRB */
1562 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
08307563 1563 gen_aa32_st8(tmp, addr, IS_USER(s));
e677137d
PB
1564 }
1565 }
18c9b560 1566 }
29531141 1567 tcg_temp_free_i32(tmp);
18c9b560 1568 }
7d1b0095 1569 tcg_temp_free_i32(addr);
18c9b560
AZ
1570 return 0;
1571 }
1572
1573 if ((insn & 0x0f000000) != 0x0e000000)
1574 return 1;
1575
1576 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1577 case 0x000: /* WOR */
1578 wrd = (insn >> 12) & 0xf;
1579 rd0 = (insn >> 0) & 0xf;
1580 rd1 = (insn >> 16) & 0xf;
1581 gen_op_iwmmxt_movq_M0_wRn(rd0);
1582 gen_op_iwmmxt_orq_M0_wRn(rd1);
1583 gen_op_iwmmxt_setpsr_nz();
1584 gen_op_iwmmxt_movq_wRn_M0(wrd);
1585 gen_op_iwmmxt_set_mup();
1586 gen_op_iwmmxt_set_cup();
1587 break;
1588 case 0x011: /* TMCR */
1589 if (insn & 0xf)
1590 return 1;
1591 rd = (insn >> 12) & 0xf;
1592 wrd = (insn >> 16) & 0xf;
1593 switch (wrd) {
1594 case ARM_IWMMXT_wCID:
1595 case ARM_IWMMXT_wCASF:
1596 break;
1597 case ARM_IWMMXT_wCon:
1598 gen_op_iwmmxt_set_cup();
1599 /* Fall through. */
1600 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1601 tmp = iwmmxt_load_creg(wrd);
1602 tmp2 = load_reg(s, rd);
f669df27 1603 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1604 tcg_temp_free_i32(tmp2);
da6b5335 1605 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1606 break;
1607 case ARM_IWMMXT_wCGR0:
1608 case ARM_IWMMXT_wCGR1:
1609 case ARM_IWMMXT_wCGR2:
1610 case ARM_IWMMXT_wCGR3:
1611 gen_op_iwmmxt_set_cup();
da6b5335
FN
1612 tmp = load_reg(s, rd);
1613 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1614 break;
1615 default:
1616 return 1;
1617 }
1618 break;
1619 case 0x100: /* WXOR */
1620 wrd = (insn >> 12) & 0xf;
1621 rd0 = (insn >> 0) & 0xf;
1622 rd1 = (insn >> 16) & 0xf;
1623 gen_op_iwmmxt_movq_M0_wRn(rd0);
1624 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1625 gen_op_iwmmxt_setpsr_nz();
1626 gen_op_iwmmxt_movq_wRn_M0(wrd);
1627 gen_op_iwmmxt_set_mup();
1628 gen_op_iwmmxt_set_cup();
1629 break;
1630 case 0x111: /* TMRC */
1631 if (insn & 0xf)
1632 return 1;
1633 rd = (insn >> 12) & 0xf;
1634 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1635 tmp = iwmmxt_load_creg(wrd);
1636 store_reg(s, rd, tmp);
18c9b560
AZ
1637 break;
1638 case 0x300: /* WANDN */
1639 wrd = (insn >> 12) & 0xf;
1640 rd0 = (insn >> 0) & 0xf;
1641 rd1 = (insn >> 16) & 0xf;
1642 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1643 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1644 gen_op_iwmmxt_andq_M0_wRn(rd1);
1645 gen_op_iwmmxt_setpsr_nz();
1646 gen_op_iwmmxt_movq_wRn_M0(wrd);
1647 gen_op_iwmmxt_set_mup();
1648 gen_op_iwmmxt_set_cup();
1649 break;
1650 case 0x200: /* WAND */
1651 wrd = (insn >> 12) & 0xf;
1652 rd0 = (insn >> 0) & 0xf;
1653 rd1 = (insn >> 16) & 0xf;
1654 gen_op_iwmmxt_movq_M0_wRn(rd0);
1655 gen_op_iwmmxt_andq_M0_wRn(rd1);
1656 gen_op_iwmmxt_setpsr_nz();
1657 gen_op_iwmmxt_movq_wRn_M0(wrd);
1658 gen_op_iwmmxt_set_mup();
1659 gen_op_iwmmxt_set_cup();
1660 break;
1661 case 0x810: case 0xa10: /* WMADD */
1662 wrd = (insn >> 12) & 0xf;
1663 rd0 = (insn >> 0) & 0xf;
1664 rd1 = (insn >> 16) & 0xf;
1665 gen_op_iwmmxt_movq_M0_wRn(rd0);
1666 if (insn & (1 << 21))
1667 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1668 else
1669 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1670 gen_op_iwmmxt_movq_wRn_M0(wrd);
1671 gen_op_iwmmxt_set_mup();
1672 break;
1673 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1674 wrd = (insn >> 12) & 0xf;
1675 rd0 = (insn >> 16) & 0xf;
1676 rd1 = (insn >> 0) & 0xf;
1677 gen_op_iwmmxt_movq_M0_wRn(rd0);
1678 switch ((insn >> 22) & 3) {
1679 case 0:
1680 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1681 break;
1682 case 1:
1683 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1684 break;
1685 case 2:
1686 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1687 break;
1688 case 3:
1689 return 1;
1690 }
1691 gen_op_iwmmxt_movq_wRn_M0(wrd);
1692 gen_op_iwmmxt_set_mup();
1693 gen_op_iwmmxt_set_cup();
1694 break;
1695 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1696 wrd = (insn >> 12) & 0xf;
1697 rd0 = (insn >> 16) & 0xf;
1698 rd1 = (insn >> 0) & 0xf;
1699 gen_op_iwmmxt_movq_M0_wRn(rd0);
1700 switch ((insn >> 22) & 3) {
1701 case 0:
1702 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1703 break;
1704 case 1:
1705 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1706 break;
1707 case 2:
1708 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1709 break;
1710 case 3:
1711 return 1;
1712 }
1713 gen_op_iwmmxt_movq_wRn_M0(wrd);
1714 gen_op_iwmmxt_set_mup();
1715 gen_op_iwmmxt_set_cup();
1716 break;
1717 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1718 wrd = (insn >> 12) & 0xf;
1719 rd0 = (insn >> 16) & 0xf;
1720 rd1 = (insn >> 0) & 0xf;
1721 gen_op_iwmmxt_movq_M0_wRn(rd0);
1722 if (insn & (1 << 22))
1723 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1724 else
1725 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1726 if (!(insn & (1 << 20)))
1727 gen_op_iwmmxt_addl_M0_wRn(wrd);
1728 gen_op_iwmmxt_movq_wRn_M0(wrd);
1729 gen_op_iwmmxt_set_mup();
1730 break;
1731 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1732 wrd = (insn >> 12) & 0xf;
1733 rd0 = (insn >> 16) & 0xf;
1734 rd1 = (insn >> 0) & 0xf;
1735 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1736 if (insn & (1 << 21)) {
1737 if (insn & (1 << 20))
1738 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1739 else
1740 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1741 } else {
1742 if (insn & (1 << 20))
1743 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1744 else
1745 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1746 }
18c9b560
AZ
1747 gen_op_iwmmxt_movq_wRn_M0(wrd);
1748 gen_op_iwmmxt_set_mup();
1749 break;
1750 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1751 wrd = (insn >> 12) & 0xf;
1752 rd0 = (insn >> 16) & 0xf;
1753 rd1 = (insn >> 0) & 0xf;
1754 gen_op_iwmmxt_movq_M0_wRn(rd0);
1755 if (insn & (1 << 21))
1756 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1757 else
1758 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1759 if (!(insn & (1 << 20))) {
e677137d
PB
1760 iwmmxt_load_reg(cpu_V1, wrd);
1761 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1762 }
1763 gen_op_iwmmxt_movq_wRn_M0(wrd);
1764 gen_op_iwmmxt_set_mup();
1765 break;
1766 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1767 wrd = (insn >> 12) & 0xf;
1768 rd0 = (insn >> 16) & 0xf;
1769 rd1 = (insn >> 0) & 0xf;
1770 gen_op_iwmmxt_movq_M0_wRn(rd0);
1771 switch ((insn >> 22) & 3) {
1772 case 0:
1773 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1774 break;
1775 case 1:
1776 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1777 break;
1778 case 2:
1779 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1780 break;
1781 case 3:
1782 return 1;
1783 }
1784 gen_op_iwmmxt_movq_wRn_M0(wrd);
1785 gen_op_iwmmxt_set_mup();
1786 gen_op_iwmmxt_set_cup();
1787 break;
1788 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1789 wrd = (insn >> 12) & 0xf;
1790 rd0 = (insn >> 16) & 0xf;
1791 rd1 = (insn >> 0) & 0xf;
1792 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1793 if (insn & (1 << 22)) {
1794 if (insn & (1 << 20))
1795 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1796 else
1797 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1798 } else {
1799 if (insn & (1 << 20))
1800 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1801 else
1802 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1803 }
18c9b560
AZ
1804 gen_op_iwmmxt_movq_wRn_M0(wrd);
1805 gen_op_iwmmxt_set_mup();
1806 gen_op_iwmmxt_set_cup();
1807 break;
1808 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1809 wrd = (insn >> 12) & 0xf;
1810 rd0 = (insn >> 16) & 0xf;
1811 rd1 = (insn >> 0) & 0xf;
1812 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1813 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1814 tcg_gen_andi_i32(tmp, tmp, 7);
1815 iwmmxt_load_reg(cpu_V1, rd1);
1816 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1817 tcg_temp_free_i32(tmp);
18c9b560
AZ
1818 gen_op_iwmmxt_movq_wRn_M0(wrd);
1819 gen_op_iwmmxt_set_mup();
1820 break;
1821 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1822 if (((insn >> 6) & 3) == 3)
1823 return 1;
18c9b560
AZ
1824 rd = (insn >> 12) & 0xf;
1825 wrd = (insn >> 16) & 0xf;
da6b5335 1826 tmp = load_reg(s, rd);
18c9b560
AZ
1827 gen_op_iwmmxt_movq_M0_wRn(wrd);
1828 switch ((insn >> 6) & 3) {
1829 case 0:
da6b5335
FN
1830 tmp2 = tcg_const_i32(0xff);
1831 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1832 break;
1833 case 1:
da6b5335
FN
1834 tmp2 = tcg_const_i32(0xffff);
1835 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1836 break;
1837 case 2:
da6b5335
FN
1838 tmp2 = tcg_const_i32(0xffffffff);
1839 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1840 break;
da6b5335 1841 default:
39d5492a
PM
1842 TCGV_UNUSED_I32(tmp2);
1843 TCGV_UNUSED_I32(tmp3);
18c9b560 1844 }
da6b5335 1845 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
1846 tcg_temp_free_i32(tmp3);
1847 tcg_temp_free_i32(tmp2);
7d1b0095 1848 tcg_temp_free_i32(tmp);
18c9b560
AZ
1849 gen_op_iwmmxt_movq_wRn_M0(wrd);
1850 gen_op_iwmmxt_set_mup();
1851 break;
1852 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1853 rd = (insn >> 12) & 0xf;
1854 wrd = (insn >> 16) & 0xf;
da6b5335 1855 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1856 return 1;
1857 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1858 tmp = tcg_temp_new_i32();
18c9b560
AZ
1859 switch ((insn >> 22) & 3) {
1860 case 0:
da6b5335
FN
1861 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1862 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1863 if (insn & 8) {
1864 tcg_gen_ext8s_i32(tmp, tmp);
1865 } else {
1866 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1867 }
1868 break;
1869 case 1:
da6b5335
FN
1870 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1871 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1872 if (insn & 8) {
1873 tcg_gen_ext16s_i32(tmp, tmp);
1874 } else {
1875 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1876 }
1877 break;
1878 case 2:
da6b5335
FN
1879 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1880 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1881 break;
18c9b560 1882 }
da6b5335 1883 store_reg(s, rd, tmp);
18c9b560
AZ
1884 break;
1885 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1886 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1887 return 1;
da6b5335 1888 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1889 switch ((insn >> 22) & 3) {
1890 case 0:
da6b5335 1891 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1892 break;
1893 case 1:
da6b5335 1894 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1895 break;
1896 case 2:
da6b5335 1897 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1898 break;
18c9b560 1899 }
da6b5335
FN
1900 tcg_gen_shli_i32(tmp, tmp, 28);
1901 gen_set_nzcv(tmp);
7d1b0095 1902 tcg_temp_free_i32(tmp);
18c9b560
AZ
1903 break;
1904 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1905 if (((insn >> 6) & 3) == 3)
1906 return 1;
18c9b560
AZ
1907 rd = (insn >> 12) & 0xf;
1908 wrd = (insn >> 16) & 0xf;
da6b5335 1909 tmp = load_reg(s, rd);
18c9b560
AZ
1910 switch ((insn >> 6) & 3) {
1911 case 0:
da6b5335 1912 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1913 break;
1914 case 1:
da6b5335 1915 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1916 break;
1917 case 2:
da6b5335 1918 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1919 break;
18c9b560 1920 }
7d1b0095 1921 tcg_temp_free_i32(tmp);
18c9b560
AZ
1922 gen_op_iwmmxt_movq_wRn_M0(wrd);
1923 gen_op_iwmmxt_set_mup();
1924 break;
1925 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1926 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1927 return 1;
da6b5335 1928 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1929 tmp2 = tcg_temp_new_i32();
da6b5335 1930 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1931 switch ((insn >> 22) & 3) {
1932 case 0:
1933 for (i = 0; i < 7; i ++) {
da6b5335
FN
1934 tcg_gen_shli_i32(tmp2, tmp2, 4);
1935 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1936 }
1937 break;
1938 case 1:
1939 for (i = 0; i < 3; i ++) {
da6b5335
FN
1940 tcg_gen_shli_i32(tmp2, tmp2, 8);
1941 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1942 }
1943 break;
1944 case 2:
da6b5335
FN
1945 tcg_gen_shli_i32(tmp2, tmp2, 16);
1946 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1947 break;
18c9b560 1948 }
da6b5335 1949 gen_set_nzcv(tmp);
7d1b0095
PM
1950 tcg_temp_free_i32(tmp2);
1951 tcg_temp_free_i32(tmp);
18c9b560
AZ
1952 break;
1953 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1954 wrd = (insn >> 12) & 0xf;
1955 rd0 = (insn >> 16) & 0xf;
1956 gen_op_iwmmxt_movq_M0_wRn(rd0);
1957 switch ((insn >> 22) & 3) {
1958 case 0:
e677137d 1959 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1960 break;
1961 case 1:
e677137d 1962 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1963 break;
1964 case 2:
e677137d 1965 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1966 break;
1967 case 3:
1968 return 1;
1969 }
1970 gen_op_iwmmxt_movq_wRn_M0(wrd);
1971 gen_op_iwmmxt_set_mup();
1972 break;
1973 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1974 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1975 return 1;
da6b5335 1976 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1977 tmp2 = tcg_temp_new_i32();
da6b5335 1978 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1979 switch ((insn >> 22) & 3) {
1980 case 0:
1981 for (i = 0; i < 7; i ++) {
da6b5335
FN
1982 tcg_gen_shli_i32(tmp2, tmp2, 4);
1983 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1984 }
1985 break;
1986 case 1:
1987 for (i = 0; i < 3; i ++) {
da6b5335
FN
1988 tcg_gen_shli_i32(tmp2, tmp2, 8);
1989 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1990 }
1991 break;
1992 case 2:
da6b5335
FN
1993 tcg_gen_shli_i32(tmp2, tmp2, 16);
1994 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1995 break;
18c9b560 1996 }
da6b5335 1997 gen_set_nzcv(tmp);
7d1b0095
PM
1998 tcg_temp_free_i32(tmp2);
1999 tcg_temp_free_i32(tmp);
18c9b560
AZ
2000 break;
2001 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2002 rd = (insn >> 12) & 0xf;
2003 rd0 = (insn >> 16) & 0xf;
da6b5335 2004 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2005 return 1;
2006 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2007 tmp = tcg_temp_new_i32();
18c9b560
AZ
2008 switch ((insn >> 22) & 3) {
2009 case 0:
da6b5335 2010 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2011 break;
2012 case 1:
da6b5335 2013 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2014 break;
2015 case 2:
da6b5335 2016 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2017 break;
18c9b560 2018 }
da6b5335 2019 store_reg(s, rd, tmp);
18c9b560
AZ
2020 break;
2021 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2022 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2023 wrd = (insn >> 12) & 0xf;
2024 rd0 = (insn >> 16) & 0xf;
2025 rd1 = (insn >> 0) & 0xf;
2026 gen_op_iwmmxt_movq_M0_wRn(rd0);
2027 switch ((insn >> 22) & 3) {
2028 case 0:
2029 if (insn & (1 << 21))
2030 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2031 else
2032 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2033 break;
2034 case 1:
2035 if (insn & (1 << 21))
2036 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2037 else
2038 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2039 break;
2040 case 2:
2041 if (insn & (1 << 21))
2042 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2043 else
2044 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2045 break;
2046 case 3:
2047 return 1;
2048 }
2049 gen_op_iwmmxt_movq_wRn_M0(wrd);
2050 gen_op_iwmmxt_set_mup();
2051 gen_op_iwmmxt_set_cup();
2052 break;
2053 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2054 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2055 wrd = (insn >> 12) & 0xf;
2056 rd0 = (insn >> 16) & 0xf;
2057 gen_op_iwmmxt_movq_M0_wRn(rd0);
2058 switch ((insn >> 22) & 3) {
2059 case 0:
2060 if (insn & (1 << 21))
2061 gen_op_iwmmxt_unpacklsb_M0();
2062 else
2063 gen_op_iwmmxt_unpacklub_M0();
2064 break;
2065 case 1:
2066 if (insn & (1 << 21))
2067 gen_op_iwmmxt_unpacklsw_M0();
2068 else
2069 gen_op_iwmmxt_unpackluw_M0();
2070 break;
2071 case 2:
2072 if (insn & (1 << 21))
2073 gen_op_iwmmxt_unpacklsl_M0();
2074 else
2075 gen_op_iwmmxt_unpacklul_M0();
2076 break;
2077 case 3:
2078 return 1;
2079 }
2080 gen_op_iwmmxt_movq_wRn_M0(wrd);
2081 gen_op_iwmmxt_set_mup();
2082 gen_op_iwmmxt_set_cup();
2083 break;
2084 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2085 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2086 wrd = (insn >> 12) & 0xf;
2087 rd0 = (insn >> 16) & 0xf;
2088 gen_op_iwmmxt_movq_M0_wRn(rd0);
2089 switch ((insn >> 22) & 3) {
2090 case 0:
2091 if (insn & (1 << 21))
2092 gen_op_iwmmxt_unpackhsb_M0();
2093 else
2094 gen_op_iwmmxt_unpackhub_M0();
2095 break;
2096 case 1:
2097 if (insn & (1 << 21))
2098 gen_op_iwmmxt_unpackhsw_M0();
2099 else
2100 gen_op_iwmmxt_unpackhuw_M0();
2101 break;
2102 case 2:
2103 if (insn & (1 << 21))
2104 gen_op_iwmmxt_unpackhsl_M0();
2105 else
2106 gen_op_iwmmxt_unpackhul_M0();
2107 break;
2108 case 3:
2109 return 1;
2110 }
2111 gen_op_iwmmxt_movq_wRn_M0(wrd);
2112 gen_op_iwmmxt_set_mup();
2113 gen_op_iwmmxt_set_cup();
2114 break;
2115 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2116 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2117 if (((insn >> 22) & 3) == 0)
2118 return 1;
18c9b560
AZ
2119 wrd = (insn >> 12) & 0xf;
2120 rd0 = (insn >> 16) & 0xf;
2121 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2122 tmp = tcg_temp_new_i32();
da6b5335 2123 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2124 tcg_temp_free_i32(tmp);
18c9b560 2125 return 1;
da6b5335 2126 }
18c9b560 2127 switch ((insn >> 22) & 3) {
18c9b560 2128 case 1:
477955bd 2129 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2130 break;
2131 case 2:
477955bd 2132 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2133 break;
2134 case 3:
477955bd 2135 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2136 break;
2137 }
7d1b0095 2138 tcg_temp_free_i32(tmp);
18c9b560
AZ
2139 gen_op_iwmmxt_movq_wRn_M0(wrd);
2140 gen_op_iwmmxt_set_mup();
2141 gen_op_iwmmxt_set_cup();
2142 break;
2143 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2144 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2145 if (((insn >> 22) & 3) == 0)
2146 return 1;
18c9b560
AZ
2147 wrd = (insn >> 12) & 0xf;
2148 rd0 = (insn >> 16) & 0xf;
2149 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2150 tmp = tcg_temp_new_i32();
da6b5335 2151 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2152 tcg_temp_free_i32(tmp);
18c9b560 2153 return 1;
da6b5335 2154 }
18c9b560 2155 switch ((insn >> 22) & 3) {
18c9b560 2156 case 1:
477955bd 2157 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2158 break;
2159 case 2:
477955bd 2160 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2161 break;
2162 case 3:
477955bd 2163 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2164 break;
2165 }
7d1b0095 2166 tcg_temp_free_i32(tmp);
18c9b560
AZ
2167 gen_op_iwmmxt_movq_wRn_M0(wrd);
2168 gen_op_iwmmxt_set_mup();
2169 gen_op_iwmmxt_set_cup();
2170 break;
2171 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2172 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2173 if (((insn >> 22) & 3) == 0)
2174 return 1;
18c9b560
AZ
2175 wrd = (insn >> 12) & 0xf;
2176 rd0 = (insn >> 16) & 0xf;
2177 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2178 tmp = tcg_temp_new_i32();
da6b5335 2179 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2180 tcg_temp_free_i32(tmp);
18c9b560 2181 return 1;
da6b5335 2182 }
18c9b560 2183 switch ((insn >> 22) & 3) {
18c9b560 2184 case 1:
477955bd 2185 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2186 break;
2187 case 2:
477955bd 2188 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2189 break;
2190 case 3:
477955bd 2191 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2192 break;
2193 }
7d1b0095 2194 tcg_temp_free_i32(tmp);
18c9b560
AZ
2195 gen_op_iwmmxt_movq_wRn_M0(wrd);
2196 gen_op_iwmmxt_set_mup();
2197 gen_op_iwmmxt_set_cup();
2198 break;
2199 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2200 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2201 if (((insn >> 22) & 3) == 0)
2202 return 1;
18c9b560
AZ
2203 wrd = (insn >> 12) & 0xf;
2204 rd0 = (insn >> 16) & 0xf;
2205 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2206 tmp = tcg_temp_new_i32();
18c9b560 2207 switch ((insn >> 22) & 3) {
18c9b560 2208 case 1:
da6b5335 2209 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2210 tcg_temp_free_i32(tmp);
18c9b560 2211 return 1;
da6b5335 2212 }
477955bd 2213 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2214 break;
2215 case 2:
da6b5335 2216 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2217 tcg_temp_free_i32(tmp);
18c9b560 2218 return 1;
da6b5335 2219 }
477955bd 2220 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2221 break;
2222 case 3:
da6b5335 2223 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2224 tcg_temp_free_i32(tmp);
18c9b560 2225 return 1;
da6b5335 2226 }
477955bd 2227 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2228 break;
2229 }
7d1b0095 2230 tcg_temp_free_i32(tmp);
18c9b560
AZ
2231 gen_op_iwmmxt_movq_wRn_M0(wrd);
2232 gen_op_iwmmxt_set_mup();
2233 gen_op_iwmmxt_set_cup();
2234 break;
2235 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2236 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2237 wrd = (insn >> 12) & 0xf;
2238 rd0 = (insn >> 16) & 0xf;
2239 rd1 = (insn >> 0) & 0xf;
2240 gen_op_iwmmxt_movq_M0_wRn(rd0);
2241 switch ((insn >> 22) & 3) {
2242 case 0:
2243 if (insn & (1 << 21))
2244 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2245 else
2246 gen_op_iwmmxt_minub_M0_wRn(rd1);
2247 break;
2248 case 1:
2249 if (insn & (1 << 21))
2250 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2251 else
2252 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2253 break;
2254 case 2:
2255 if (insn & (1 << 21))
2256 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2257 else
2258 gen_op_iwmmxt_minul_M0_wRn(rd1);
2259 break;
2260 case 3:
2261 return 1;
2262 }
2263 gen_op_iwmmxt_movq_wRn_M0(wrd);
2264 gen_op_iwmmxt_set_mup();
2265 break;
2266 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2267 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2268 wrd = (insn >> 12) & 0xf;
2269 rd0 = (insn >> 16) & 0xf;
2270 rd1 = (insn >> 0) & 0xf;
2271 gen_op_iwmmxt_movq_M0_wRn(rd0);
2272 switch ((insn >> 22) & 3) {
2273 case 0:
2274 if (insn & (1 << 21))
2275 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2276 else
2277 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2278 break;
2279 case 1:
2280 if (insn & (1 << 21))
2281 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2282 else
2283 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2284 break;
2285 case 2:
2286 if (insn & (1 << 21))
2287 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2288 else
2289 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2290 break;
2291 case 3:
2292 return 1;
2293 }
2294 gen_op_iwmmxt_movq_wRn_M0(wrd);
2295 gen_op_iwmmxt_set_mup();
2296 break;
2297 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2298 case 0x402: case 0x502: case 0x602: case 0x702:
2299 wrd = (insn >> 12) & 0xf;
2300 rd0 = (insn >> 16) & 0xf;
2301 rd1 = (insn >> 0) & 0xf;
2302 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2303 tmp = tcg_const_i32((insn >> 20) & 3);
2304 iwmmxt_load_reg(cpu_V1, rd1);
2305 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2306 tcg_temp_free_i32(tmp);
18c9b560
AZ
2307 gen_op_iwmmxt_movq_wRn_M0(wrd);
2308 gen_op_iwmmxt_set_mup();
2309 break;
2310 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2311 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2312 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2313 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2314 wrd = (insn >> 12) & 0xf;
2315 rd0 = (insn >> 16) & 0xf;
2316 rd1 = (insn >> 0) & 0xf;
2317 gen_op_iwmmxt_movq_M0_wRn(rd0);
2318 switch ((insn >> 20) & 0xf) {
2319 case 0x0:
2320 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2321 break;
2322 case 0x1:
2323 gen_op_iwmmxt_subub_M0_wRn(rd1);
2324 break;
2325 case 0x3:
2326 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2327 break;
2328 case 0x4:
2329 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2330 break;
2331 case 0x5:
2332 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2333 break;
2334 case 0x7:
2335 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2336 break;
2337 case 0x8:
2338 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2339 break;
2340 case 0x9:
2341 gen_op_iwmmxt_subul_M0_wRn(rd1);
2342 break;
2343 case 0xb:
2344 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2345 break;
2346 default:
2347 return 1;
2348 }
2349 gen_op_iwmmxt_movq_wRn_M0(wrd);
2350 gen_op_iwmmxt_set_mup();
2351 gen_op_iwmmxt_set_cup();
2352 break;
2353 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2354 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2355 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2356 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2357 wrd = (insn >> 12) & 0xf;
2358 rd0 = (insn >> 16) & 0xf;
2359 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2360 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2361 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2362 tcg_temp_free_i32(tmp);
18c9b560
AZ
2363 gen_op_iwmmxt_movq_wRn_M0(wrd);
2364 gen_op_iwmmxt_set_mup();
2365 gen_op_iwmmxt_set_cup();
2366 break;
2367 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2368 case 0x418: case 0x518: case 0x618: case 0x718:
2369 case 0x818: case 0x918: case 0xa18: case 0xb18:
2370 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2371 wrd = (insn >> 12) & 0xf;
2372 rd0 = (insn >> 16) & 0xf;
2373 rd1 = (insn >> 0) & 0xf;
2374 gen_op_iwmmxt_movq_M0_wRn(rd0);
2375 switch ((insn >> 20) & 0xf) {
2376 case 0x0:
2377 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2378 break;
2379 case 0x1:
2380 gen_op_iwmmxt_addub_M0_wRn(rd1);
2381 break;
2382 case 0x3:
2383 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2384 break;
2385 case 0x4:
2386 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2387 break;
2388 case 0x5:
2389 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2390 break;
2391 case 0x7:
2392 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2393 break;
2394 case 0x8:
2395 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2396 break;
2397 case 0x9:
2398 gen_op_iwmmxt_addul_M0_wRn(rd1);
2399 break;
2400 case 0xb:
2401 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2402 break;
2403 default:
2404 return 1;
2405 }
2406 gen_op_iwmmxt_movq_wRn_M0(wrd);
2407 gen_op_iwmmxt_set_mup();
2408 gen_op_iwmmxt_set_cup();
2409 break;
2410 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2411 case 0x408: case 0x508: case 0x608: case 0x708:
2412 case 0x808: case 0x908: case 0xa08: case 0xb08:
2413 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2414 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2415 return 1;
18c9b560
AZ
2416 wrd = (insn >> 12) & 0xf;
2417 rd0 = (insn >> 16) & 0xf;
2418 rd1 = (insn >> 0) & 0xf;
2419 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2420 switch ((insn >> 22) & 3) {
18c9b560
AZ
2421 case 1:
2422 if (insn & (1 << 21))
2423 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2424 else
2425 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2426 break;
2427 case 2:
2428 if (insn & (1 << 21))
2429 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2430 else
2431 gen_op_iwmmxt_packul_M0_wRn(rd1);
2432 break;
2433 case 3:
2434 if (insn & (1 << 21))
2435 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2436 else
2437 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2438 break;
2439 }
2440 gen_op_iwmmxt_movq_wRn_M0(wrd);
2441 gen_op_iwmmxt_set_mup();
2442 gen_op_iwmmxt_set_cup();
2443 break;
2444 case 0x201: case 0x203: case 0x205: case 0x207:
2445 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2446 case 0x211: case 0x213: case 0x215: case 0x217:
2447 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2448 wrd = (insn >> 5) & 0xf;
2449 rd0 = (insn >> 12) & 0xf;
2450 rd1 = (insn >> 0) & 0xf;
2451 if (rd0 == 0xf || rd1 == 0xf)
2452 return 1;
2453 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2454 tmp = load_reg(s, rd0);
2455 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2456 switch ((insn >> 16) & 0xf) {
2457 case 0x0: /* TMIA */
da6b5335 2458 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2459 break;
2460 case 0x8: /* TMIAPH */
da6b5335 2461 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2462 break;
2463 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2464 if (insn & (1 << 16))
da6b5335 2465 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2466 if (insn & (1 << 17))
da6b5335
FN
2467 tcg_gen_shri_i32(tmp2, tmp2, 16);
2468 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2469 break;
2470 default:
7d1b0095
PM
2471 tcg_temp_free_i32(tmp2);
2472 tcg_temp_free_i32(tmp);
18c9b560
AZ
2473 return 1;
2474 }
7d1b0095
PM
2475 tcg_temp_free_i32(tmp2);
2476 tcg_temp_free_i32(tmp);
18c9b560
AZ
2477 gen_op_iwmmxt_movq_wRn_M0(wrd);
2478 gen_op_iwmmxt_set_mup();
2479 break;
2480 default:
2481 return 1;
2482 }
2483
2484 return 0;
2485}
2486
a1c7273b 2487/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2488 (ie. an undefined instruction). */
0ecb72a5 2489static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
2490{
2491 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2492 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2493
2494 if ((insn & 0x0ff00f10) == 0x0e200010) {
2495 /* Multiply with Internal Accumulate Format */
2496 rd0 = (insn >> 12) & 0xf;
2497 rd1 = insn & 0xf;
2498 acc = (insn >> 5) & 7;
2499
2500 if (acc != 0)
2501 return 1;
2502
3a554c0f
FN
2503 tmp = load_reg(s, rd0);
2504 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2505 switch ((insn >> 16) & 0xf) {
2506 case 0x0: /* MIA */
3a554c0f 2507 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2508 break;
2509 case 0x8: /* MIAPH */
3a554c0f 2510 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2511 break;
2512 case 0xc: /* MIABB */
2513 case 0xd: /* MIABT */
2514 case 0xe: /* MIATB */
2515 case 0xf: /* MIATT */
18c9b560 2516 if (insn & (1 << 16))
3a554c0f 2517 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2518 if (insn & (1 << 17))
3a554c0f
FN
2519 tcg_gen_shri_i32(tmp2, tmp2, 16);
2520 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2521 break;
2522 default:
2523 return 1;
2524 }
7d1b0095
PM
2525 tcg_temp_free_i32(tmp2);
2526 tcg_temp_free_i32(tmp);
18c9b560
AZ
2527
2528 gen_op_iwmmxt_movq_wRn_M0(acc);
2529 return 0;
2530 }
2531
2532 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2533 /* Internal Accumulator Access Format */
2534 rdhi = (insn >> 16) & 0xf;
2535 rdlo = (insn >> 12) & 0xf;
2536 acc = insn & 7;
2537
2538 if (acc != 0)
2539 return 1;
2540
2541 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2542 iwmmxt_load_reg(cpu_V0, acc);
2543 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2544 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2545 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2546 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2547 } else { /* MAR */
3a554c0f
FN
2548 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2549 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2550 }
2551 return 0;
2552 }
2553
2554 return 1;
2555}
2556
9ee6e8bb
PB
2557#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2558#define VFP_SREG(insn, bigbit, smallbit) \
2559 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2560#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2561 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2562 reg = (((insn) >> (bigbit)) & 0x0f) \
2563 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2564 } else { \
2565 if (insn & (1 << (smallbit))) \
2566 return 1; \
2567 reg = ((insn) >> (bigbit)) & 0x0f; \
2568 }} while (0)
2569
2570#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2571#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2572#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2573#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2574#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2575#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2576
4373f3ce 2577/* Move between integer and VFP cores. */
39d5492a 2578static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 2579{
39d5492a 2580 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
2581 tcg_gen_mov_i32(tmp, cpu_F0s);
2582 return tmp;
2583}
2584
39d5492a 2585static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
2586{
2587 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2588 tcg_temp_free_i32(tmp);
4373f3ce
PB
2589}
2590
39d5492a 2591static void gen_neon_dup_u8(TCGv_i32 var, int shift)
ad69471c 2592{
39d5492a 2593 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2594 if (shift)
2595 tcg_gen_shri_i32(var, var, shift);
86831435 2596 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2597 tcg_gen_shli_i32(tmp, var, 8);
2598 tcg_gen_or_i32(var, var, tmp);
2599 tcg_gen_shli_i32(tmp, var, 16);
2600 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2601 tcg_temp_free_i32(tmp);
ad69471c
PB
2602}
2603
39d5492a 2604static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2605{
39d5492a 2606 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2607 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2608 tcg_gen_shli_i32(tmp, var, 16);
2609 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2610 tcg_temp_free_i32(tmp);
ad69471c
PB
2611}
2612
39d5492a 2613static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2614{
39d5492a 2615 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2616 tcg_gen_andi_i32(var, var, 0xffff0000);
2617 tcg_gen_shri_i32(tmp, var, 16);
2618 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2619 tcg_temp_free_i32(tmp);
ad69471c
PB
2620}
2621
39d5492a 2622static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
8e18cde3
PM
2623{
2624 /* Load a single Neon element and replicate into a 32 bit TCG reg */
58ab8e96 2625 TCGv_i32 tmp = tcg_temp_new_i32();
8e18cde3
PM
2626 switch (size) {
2627 case 0:
08307563 2628 gen_aa32_ld8u(tmp, addr, IS_USER(s));
8e18cde3
PM
2629 gen_neon_dup_u8(tmp, 0);
2630 break;
2631 case 1:
08307563 2632 gen_aa32_ld16u(tmp, addr, IS_USER(s));
8e18cde3
PM
2633 gen_neon_dup_low16(tmp);
2634 break;
2635 case 2:
08307563 2636 gen_aa32_ld32u(tmp, addr, IS_USER(s));
8e18cde3
PM
2637 break;
2638 default: /* Avoid compiler warnings. */
2639 abort();
2640 }
2641 return tmp;
2642}
2643
04731fb5
WN
2644static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2645 uint32_t dp)
2646{
2647 uint32_t cc = extract32(insn, 20, 2);
2648
2649 if (dp) {
2650 TCGv_i64 frn, frm, dest;
2651 TCGv_i64 tmp, zero, zf, nf, vf;
2652
2653 zero = tcg_const_i64(0);
2654
2655 frn = tcg_temp_new_i64();
2656 frm = tcg_temp_new_i64();
2657 dest = tcg_temp_new_i64();
2658
2659 zf = tcg_temp_new_i64();
2660 nf = tcg_temp_new_i64();
2661 vf = tcg_temp_new_i64();
2662
2663 tcg_gen_extu_i32_i64(zf, cpu_ZF);
2664 tcg_gen_ext_i32_i64(nf, cpu_NF);
2665 tcg_gen_ext_i32_i64(vf, cpu_VF);
2666
2667 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2668 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2669 switch (cc) {
2670 case 0: /* eq: Z */
2671 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
2672 frn, frm);
2673 break;
2674 case 1: /* vs: V */
2675 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
2676 frn, frm);
2677 break;
2678 case 2: /* ge: N == V -> N ^ V == 0 */
2679 tmp = tcg_temp_new_i64();
2680 tcg_gen_xor_i64(tmp, vf, nf);
2681 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2682 frn, frm);
2683 tcg_temp_free_i64(tmp);
2684 break;
2685 case 3: /* gt: !Z && N == V */
2686 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
2687 frn, frm);
2688 tmp = tcg_temp_new_i64();
2689 tcg_gen_xor_i64(tmp, vf, nf);
2690 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2691 dest, frm);
2692 tcg_temp_free_i64(tmp);
2693 break;
2694 }
2695 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2696 tcg_temp_free_i64(frn);
2697 tcg_temp_free_i64(frm);
2698 tcg_temp_free_i64(dest);
2699
2700 tcg_temp_free_i64(zf);
2701 tcg_temp_free_i64(nf);
2702 tcg_temp_free_i64(vf);
2703
2704 tcg_temp_free_i64(zero);
2705 } else {
2706 TCGv_i32 frn, frm, dest;
2707 TCGv_i32 tmp, zero;
2708
2709 zero = tcg_const_i32(0);
2710
2711 frn = tcg_temp_new_i32();
2712 frm = tcg_temp_new_i32();
2713 dest = tcg_temp_new_i32();
2714 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2715 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2716 switch (cc) {
2717 case 0: /* eq: Z */
2718 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
2719 frn, frm);
2720 break;
2721 case 1: /* vs: V */
2722 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
2723 frn, frm);
2724 break;
2725 case 2: /* ge: N == V -> N ^ V == 0 */
2726 tmp = tcg_temp_new_i32();
2727 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2728 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2729 frn, frm);
2730 tcg_temp_free_i32(tmp);
2731 break;
2732 case 3: /* gt: !Z && N == V */
2733 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
2734 frn, frm);
2735 tmp = tcg_temp_new_i32();
2736 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2737 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2738 dest, frm);
2739 tcg_temp_free_i32(tmp);
2740 break;
2741 }
2742 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2743 tcg_temp_free_i32(frn);
2744 tcg_temp_free_i32(frm);
2745 tcg_temp_free_i32(dest);
2746
2747 tcg_temp_free_i32(zero);
2748 }
2749
2750 return 0;
2751}
2752
40cfacdd
WN
2753static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
2754 uint32_t rm, uint32_t dp)
2755{
2756 uint32_t vmin = extract32(insn, 6, 1);
2757 TCGv_ptr fpst = get_fpstatus_ptr(0);
2758
2759 if (dp) {
2760 TCGv_i64 frn, frm, dest;
2761
2762 frn = tcg_temp_new_i64();
2763 frm = tcg_temp_new_i64();
2764 dest = tcg_temp_new_i64();
2765
2766 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2767 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2768 if (vmin) {
f71a2ae5 2769 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
40cfacdd 2770 } else {
f71a2ae5 2771 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
40cfacdd
WN
2772 }
2773 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2774 tcg_temp_free_i64(frn);
2775 tcg_temp_free_i64(frm);
2776 tcg_temp_free_i64(dest);
2777 } else {
2778 TCGv_i32 frn, frm, dest;
2779
2780 frn = tcg_temp_new_i32();
2781 frm = tcg_temp_new_i32();
2782 dest = tcg_temp_new_i32();
2783
2784 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2785 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2786 if (vmin) {
f71a2ae5 2787 gen_helper_vfp_minnums(dest, frn, frm, fpst);
40cfacdd 2788 } else {
f71a2ae5 2789 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
40cfacdd
WN
2790 }
2791 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2792 tcg_temp_free_i32(frn);
2793 tcg_temp_free_i32(frm);
2794 tcg_temp_free_i32(dest);
2795 }
2796
2797 tcg_temp_free_ptr(fpst);
2798 return 0;
2799}
2800
7655f39b
WN
2801static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2802 int rounding)
2803{
2804 TCGv_ptr fpst = get_fpstatus_ptr(0);
2805 TCGv_i32 tcg_rmode;
2806
2807 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2808 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2809
2810 if (dp) {
2811 TCGv_i64 tcg_op;
2812 TCGv_i64 tcg_res;
2813 tcg_op = tcg_temp_new_i64();
2814 tcg_res = tcg_temp_new_i64();
2815 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2816 gen_helper_rintd(tcg_res, tcg_op, fpst);
2817 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
2818 tcg_temp_free_i64(tcg_op);
2819 tcg_temp_free_i64(tcg_res);
2820 } else {
2821 TCGv_i32 tcg_op;
2822 TCGv_i32 tcg_res;
2823 tcg_op = tcg_temp_new_i32();
2824 tcg_res = tcg_temp_new_i32();
2825 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2826 gen_helper_rints(tcg_res, tcg_op, fpst);
2827 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
2828 tcg_temp_free_i32(tcg_op);
2829 tcg_temp_free_i32(tcg_res);
2830 }
2831
2832 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2833 tcg_temp_free_i32(tcg_rmode);
2834
2835 tcg_temp_free_ptr(fpst);
2836 return 0;
2837}
2838
c9975a83
WN
2839static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2840 int rounding)
2841{
2842 bool is_signed = extract32(insn, 7, 1);
2843 TCGv_ptr fpst = get_fpstatus_ptr(0);
2844 TCGv_i32 tcg_rmode, tcg_shift;
2845
2846 tcg_shift = tcg_const_i32(0);
2847
2848 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2849 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2850
2851 if (dp) {
2852 TCGv_i64 tcg_double, tcg_res;
2853 TCGv_i32 tcg_tmp;
2854 /* Rd is encoded as a single precision register even when the source
2855 * is double precision.
2856 */
2857 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
2858 tcg_double = tcg_temp_new_i64();
2859 tcg_res = tcg_temp_new_i64();
2860 tcg_tmp = tcg_temp_new_i32();
2861 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
2862 if (is_signed) {
2863 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
2864 } else {
2865 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
2866 }
2867 tcg_gen_trunc_i64_i32(tcg_tmp, tcg_res);
2868 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
2869 tcg_temp_free_i32(tcg_tmp);
2870 tcg_temp_free_i64(tcg_res);
2871 tcg_temp_free_i64(tcg_double);
2872 } else {
2873 TCGv_i32 tcg_single, tcg_res;
2874 tcg_single = tcg_temp_new_i32();
2875 tcg_res = tcg_temp_new_i32();
2876 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
2877 if (is_signed) {
2878 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
2879 } else {
2880 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
2881 }
2882 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
2883 tcg_temp_free_i32(tcg_res);
2884 tcg_temp_free_i32(tcg_single);
2885 }
2886
2887 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2888 tcg_temp_free_i32(tcg_rmode);
2889
2890 tcg_temp_free_i32(tcg_shift);
2891
2892 tcg_temp_free_ptr(fpst);
2893
2894 return 0;
2895}
7655f39b
WN
2896
2897/* Table for converting the most common AArch32 encoding of
2898 * rounding mode to arm_fprounding order (which matches the
2899 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
2900 */
2901static const uint8_t fp_decode_rm[] = {
2902 FPROUNDING_TIEAWAY,
2903 FPROUNDING_TIEEVEN,
2904 FPROUNDING_POSINF,
2905 FPROUNDING_NEGINF,
2906};
2907
04731fb5
WN
2908static int disas_vfp_v8_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
2909{
2910 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
2911
2912 if (!arm_feature(env, ARM_FEATURE_V8)) {
2913 return 1;
2914 }
2915
2916 if (dp) {
2917 VFP_DREG_D(rd, insn);
2918 VFP_DREG_N(rn, insn);
2919 VFP_DREG_M(rm, insn);
2920 } else {
2921 rd = VFP_SREG_D(insn);
2922 rn = VFP_SREG_N(insn);
2923 rm = VFP_SREG_M(insn);
2924 }
2925
2926 if ((insn & 0x0f800e50) == 0x0e000a00) {
2927 return handle_vsel(insn, rd, rn, rm, dp);
40cfacdd
WN
2928 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
2929 return handle_vminmaxnm(insn, rd, rn, rm, dp);
7655f39b
WN
2930 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
2931 /* VRINTA, VRINTN, VRINTP, VRINTM */
2932 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
2933 return handle_vrint(insn, rd, rm, dp, rounding);
c9975a83
WN
2934 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
2935 /* VCVTA, VCVTN, VCVTP, VCVTM */
2936 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
2937 return handle_vcvt(insn, rd, rm, dp, rounding);
04731fb5
WN
2938 }
2939 return 1;
2940}
2941
a1c7273b 2942/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 2943 (ie. an undefined instruction). */
0ecb72a5 2944static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
b7bcbe95
FB
2945{
2946 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2947 int dp, veclen;
39d5492a
PM
2948 TCGv_i32 addr;
2949 TCGv_i32 tmp;
2950 TCGv_i32 tmp2;
b7bcbe95 2951
40f137e1
PB
2952 if (!arm_feature(env, ARM_FEATURE_VFP))
2953 return 1;
2954
5df8bac1 2955 if (!s->vfp_enabled) {
9ee6e8bb 2956 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2957 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2958 return 1;
2959 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2960 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2961 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2962 return 1;
2963 }
6a57f3eb
WN
2964
2965 if (extract32(insn, 28, 4) == 0xf) {
2966 /* Encodings with T=1 (Thumb) or unconditional (ARM):
2967 * only used in v8 and above.
2968 */
04731fb5 2969 return disas_vfp_v8_insn(env, s, insn);
6a57f3eb
WN
2970 }
2971
b7bcbe95
FB
2972 dp = ((insn & 0xf00) == 0xb00);
2973 switch ((insn >> 24) & 0xf) {
2974 case 0xe:
2975 if (insn & (1 << 4)) {
2976 /* single register transfer */
b7bcbe95
FB
2977 rd = (insn >> 12) & 0xf;
2978 if (dp) {
9ee6e8bb
PB
2979 int size;
2980 int pass;
2981
2982 VFP_DREG_N(rn, insn);
2983 if (insn & 0xf)
b7bcbe95 2984 return 1;
9ee6e8bb
PB
2985 if (insn & 0x00c00060
2986 && !arm_feature(env, ARM_FEATURE_NEON))
2987 return 1;
2988
2989 pass = (insn >> 21) & 1;
2990 if (insn & (1 << 22)) {
2991 size = 0;
2992 offset = ((insn >> 5) & 3) * 8;
2993 } else if (insn & (1 << 5)) {
2994 size = 1;
2995 offset = (insn & (1 << 6)) ? 16 : 0;
2996 } else {
2997 size = 2;
2998 offset = 0;
2999 }
18c9b560 3000 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3001 /* vfp->arm */
ad69471c 3002 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
3003 switch (size) {
3004 case 0:
9ee6e8bb 3005 if (offset)
ad69471c 3006 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 3007 if (insn & (1 << 23))
ad69471c 3008 gen_uxtb(tmp);
9ee6e8bb 3009 else
ad69471c 3010 gen_sxtb(tmp);
9ee6e8bb
PB
3011 break;
3012 case 1:
9ee6e8bb
PB
3013 if (insn & (1 << 23)) {
3014 if (offset) {
ad69471c 3015 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 3016 } else {
ad69471c 3017 gen_uxth(tmp);
9ee6e8bb
PB
3018 }
3019 } else {
3020 if (offset) {
ad69471c 3021 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 3022 } else {
ad69471c 3023 gen_sxth(tmp);
9ee6e8bb
PB
3024 }
3025 }
3026 break;
3027 case 2:
9ee6e8bb
PB
3028 break;
3029 }
ad69471c 3030 store_reg(s, rd, tmp);
b7bcbe95
FB
3031 } else {
3032 /* arm->vfp */
ad69471c 3033 tmp = load_reg(s, rd);
9ee6e8bb
PB
3034 if (insn & (1 << 23)) {
3035 /* VDUP */
3036 if (size == 0) {
ad69471c 3037 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 3038 } else if (size == 1) {
ad69471c 3039 gen_neon_dup_low16(tmp);
9ee6e8bb 3040 }
cbbccffc 3041 for (n = 0; n <= pass * 2; n++) {
7d1b0095 3042 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
3043 tcg_gen_mov_i32(tmp2, tmp);
3044 neon_store_reg(rn, n, tmp2);
3045 }
3046 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
3047 } else {
3048 /* VMOV */
3049 switch (size) {
3050 case 0:
ad69471c 3051 tmp2 = neon_load_reg(rn, pass);
d593c48e 3052 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 3053 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3054 break;
3055 case 1:
ad69471c 3056 tmp2 = neon_load_reg(rn, pass);
d593c48e 3057 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 3058 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3059 break;
3060 case 2:
9ee6e8bb
PB
3061 break;
3062 }
ad69471c 3063 neon_store_reg(rn, pass, tmp);
9ee6e8bb 3064 }
b7bcbe95 3065 }
9ee6e8bb
PB
3066 } else { /* !dp */
3067 if ((insn & 0x6f) != 0x00)
3068 return 1;
3069 rn = VFP_SREG_N(insn);
18c9b560 3070 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3071 /* vfp->arm */
3072 if (insn & (1 << 21)) {
3073 /* system register */
40f137e1 3074 rn >>= 1;
9ee6e8bb 3075
b7bcbe95 3076 switch (rn) {
40f137e1 3077 case ARM_VFP_FPSID:
4373f3ce 3078 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
3079 VFP3 restricts all id registers to privileged
3080 accesses. */
3081 if (IS_USER(s)
3082 && arm_feature(env, ARM_FEATURE_VFP3))
3083 return 1;
4373f3ce 3084 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3085 break;
40f137e1 3086 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3087 if (IS_USER(s))
3088 return 1;
4373f3ce 3089 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3090 break;
40f137e1
PB
3091 case ARM_VFP_FPINST:
3092 case ARM_VFP_FPINST2:
9ee6e8bb
PB
3093 /* Not present in VFP3. */
3094 if (IS_USER(s)
3095 || arm_feature(env, ARM_FEATURE_VFP3))
3096 return 1;
4373f3ce 3097 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 3098 break;
40f137e1 3099 case ARM_VFP_FPSCR:
601d70b9 3100 if (rd == 15) {
4373f3ce
PB
3101 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3102 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3103 } else {
7d1b0095 3104 tmp = tcg_temp_new_i32();
4373f3ce
PB
3105 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3106 }
b7bcbe95 3107 break;
9ee6e8bb
PB
3108 case ARM_VFP_MVFR0:
3109 case ARM_VFP_MVFR1:
3110 if (IS_USER(s)
06ed5d66 3111 || !arm_feature(env, ARM_FEATURE_MVFR))
9ee6e8bb 3112 return 1;
4373f3ce 3113 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3114 break;
b7bcbe95
FB
3115 default:
3116 return 1;
3117 }
3118 } else {
3119 gen_mov_F0_vreg(0, rn);
4373f3ce 3120 tmp = gen_vfp_mrs();
b7bcbe95
FB
3121 }
3122 if (rd == 15) {
b5ff1b31 3123 /* Set the 4 flag bits in the CPSR. */
4373f3ce 3124 gen_set_nzcv(tmp);
7d1b0095 3125 tcg_temp_free_i32(tmp);
4373f3ce
PB
3126 } else {
3127 store_reg(s, rd, tmp);
3128 }
b7bcbe95
FB
3129 } else {
3130 /* arm->vfp */
b7bcbe95 3131 if (insn & (1 << 21)) {
40f137e1 3132 rn >>= 1;
b7bcbe95
FB
3133 /* system register */
3134 switch (rn) {
40f137e1 3135 case ARM_VFP_FPSID:
9ee6e8bb
PB
3136 case ARM_VFP_MVFR0:
3137 case ARM_VFP_MVFR1:
b7bcbe95
FB
3138 /* Writes are ignored. */
3139 break;
40f137e1 3140 case ARM_VFP_FPSCR:
e4c1cfa5 3141 tmp = load_reg(s, rd);
4373f3ce 3142 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 3143 tcg_temp_free_i32(tmp);
b5ff1b31 3144 gen_lookup_tb(s);
b7bcbe95 3145 break;
40f137e1 3146 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3147 if (IS_USER(s))
3148 return 1;
71b3c3de
JR
3149 /* TODO: VFP subarchitecture support.
3150 * For now, keep the EN bit only */
e4c1cfa5 3151 tmp = load_reg(s, rd);
71b3c3de 3152 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 3153 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
3154 gen_lookup_tb(s);
3155 break;
3156 case ARM_VFP_FPINST:
3157 case ARM_VFP_FPINST2:
e4c1cfa5 3158 tmp = load_reg(s, rd);
4373f3ce 3159 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 3160 break;
b7bcbe95
FB
3161 default:
3162 return 1;
3163 }
3164 } else {
e4c1cfa5 3165 tmp = load_reg(s, rd);
4373f3ce 3166 gen_vfp_msr(tmp);
b7bcbe95
FB
3167 gen_mov_vreg_F0(0, rn);
3168 }
3169 }
3170 }
3171 } else {
3172 /* data processing */
3173 /* The opcode is in bits 23, 21, 20 and 6. */
3174 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3175 if (dp) {
3176 if (op == 15) {
3177 /* rn is opcode */
3178 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3179 } else {
3180 /* rn is register number */
9ee6e8bb 3181 VFP_DREG_N(rn, insn);
b7bcbe95
FB
3182 }
3183
239c20c7
WN
3184 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3185 ((rn & 0x1e) == 0x6))) {
3186 /* Integer or single/half precision destination. */
9ee6e8bb 3187 rd = VFP_SREG_D(insn);
b7bcbe95 3188 } else {
9ee6e8bb 3189 VFP_DREG_D(rd, insn);
b7bcbe95 3190 }
04595bf6 3191 if (op == 15 &&
239c20c7
WN
3192 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3193 ((rn & 0x1e) == 0x4))) {
3194 /* VCVT from int or half precision is always from S reg
3195 * regardless of dp bit. VCVT with immediate frac_bits
3196 * has same format as SREG_M.
04595bf6
PM
3197 */
3198 rm = VFP_SREG_M(insn);
b7bcbe95 3199 } else {
9ee6e8bb 3200 VFP_DREG_M(rm, insn);
b7bcbe95
FB
3201 }
3202 } else {
9ee6e8bb 3203 rn = VFP_SREG_N(insn);
b7bcbe95
FB
3204 if (op == 15 && rn == 15) {
3205 /* Double precision destination. */
9ee6e8bb
PB
3206 VFP_DREG_D(rd, insn);
3207 } else {
3208 rd = VFP_SREG_D(insn);
3209 }
04595bf6
PM
3210 /* NB that we implicitly rely on the encoding for the frac_bits
3211 * in VCVT of fixed to float being the same as that of an SREG_M
3212 */
9ee6e8bb 3213 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3214 }
3215
69d1fc22 3216 veclen = s->vec_len;
b7bcbe95
FB
3217 if (op == 15 && rn > 3)
3218 veclen = 0;
3219
3220 /* Shut up compiler warnings. */
3221 delta_m = 0;
3222 delta_d = 0;
3223 bank_mask = 0;
3b46e624 3224
b7bcbe95
FB
3225 if (veclen > 0) {
3226 if (dp)
3227 bank_mask = 0xc;
3228 else
3229 bank_mask = 0x18;
3230
3231 /* Figure out what type of vector operation this is. */
3232 if ((rd & bank_mask) == 0) {
3233 /* scalar */
3234 veclen = 0;
3235 } else {
3236 if (dp)
69d1fc22 3237 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3238 else
69d1fc22 3239 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3240
3241 if ((rm & bank_mask) == 0) {
3242 /* mixed scalar/vector */
3243 delta_m = 0;
3244 } else {
3245 /* vector */
3246 delta_m = delta_d;
3247 }
3248 }
3249 }
3250
3251 /* Load the initial operands. */
3252 if (op == 15) {
3253 switch (rn) {
3254 case 16:
3255 case 17:
3256 /* Integer source */
3257 gen_mov_F0_vreg(0, rm);
3258 break;
3259 case 8:
3260 case 9:
3261 /* Compare */
3262 gen_mov_F0_vreg(dp, rd);
3263 gen_mov_F1_vreg(dp, rm);
3264 break;
3265 case 10:
3266 case 11:
3267 /* Compare with zero */
3268 gen_mov_F0_vreg(dp, rd);
3269 gen_vfp_F1_ld0(dp);
3270 break;
9ee6e8bb
PB
3271 case 20:
3272 case 21:
3273 case 22:
3274 case 23:
644ad806
PB
3275 case 28:
3276 case 29:
3277 case 30:
3278 case 31:
9ee6e8bb
PB
3279 /* Source and destination the same. */
3280 gen_mov_F0_vreg(dp, rd);
3281 break;
6e0c0ed1
PM
3282 case 4:
3283 case 5:
3284 case 6:
3285 case 7:
239c20c7
WN
3286 /* VCVTB, VCVTT: only present with the halfprec extension
3287 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3288 * (we choose to UNDEF)
6e0c0ed1 3289 */
239c20c7
WN
3290 if ((dp && !arm_feature(env, ARM_FEATURE_V8)) ||
3291 !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
6e0c0ed1
PM
3292 return 1;
3293 }
239c20c7
WN
3294 if (!extract32(rn, 1, 1)) {
3295 /* Half precision source. */
3296 gen_mov_F0_vreg(0, rm);
3297 break;
3298 }
6e0c0ed1 3299 /* Otherwise fall through */
b7bcbe95
FB
3300 default:
3301 /* One source operand. */
3302 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3303 break;
b7bcbe95
FB
3304 }
3305 } else {
3306 /* Two source operands. */
3307 gen_mov_F0_vreg(dp, rn);
3308 gen_mov_F1_vreg(dp, rm);
3309 }
3310
3311 for (;;) {
3312 /* Perform the calculation. */
3313 switch (op) {
605a6aed
PM
3314 case 0: /* VMLA: fd + (fn * fm) */
3315 /* Note that order of inputs to the add matters for NaNs */
3316 gen_vfp_F1_mul(dp);
3317 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3318 gen_vfp_add(dp);
3319 break;
605a6aed 3320 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3321 gen_vfp_mul(dp);
605a6aed
PM
3322 gen_vfp_F1_neg(dp);
3323 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3324 gen_vfp_add(dp);
3325 break;
605a6aed
PM
3326 case 2: /* VNMLS: -fd + (fn * fm) */
3327 /* Note that it isn't valid to replace (-A + B) with (B - A)
3328 * or similar plausible looking simplifications
3329 * because this will give wrong results for NaNs.
3330 */
3331 gen_vfp_F1_mul(dp);
3332 gen_mov_F0_vreg(dp, rd);
3333 gen_vfp_neg(dp);
3334 gen_vfp_add(dp);
b7bcbe95 3335 break;
605a6aed 3336 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3337 gen_vfp_mul(dp);
605a6aed
PM
3338 gen_vfp_F1_neg(dp);
3339 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3340 gen_vfp_neg(dp);
605a6aed 3341 gen_vfp_add(dp);
b7bcbe95
FB
3342 break;
3343 case 4: /* mul: fn * fm */
3344 gen_vfp_mul(dp);
3345 break;
3346 case 5: /* nmul: -(fn * fm) */
3347 gen_vfp_mul(dp);
3348 gen_vfp_neg(dp);
3349 break;
3350 case 6: /* add: fn + fm */
3351 gen_vfp_add(dp);
3352 break;
3353 case 7: /* sub: fn - fm */
3354 gen_vfp_sub(dp);
3355 break;
3356 case 8: /* div: fn / fm */
3357 gen_vfp_div(dp);
3358 break;
da97f52c
PM
3359 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3360 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3361 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3362 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3363 /* These are fused multiply-add, and must be done as one
3364 * floating point operation with no rounding between the
3365 * multiplication and addition steps.
3366 * NB that doing the negations here as separate steps is
3367 * correct : an input NaN should come out with its sign bit
3368 * flipped if it is a negated-input.
3369 */
3370 if (!arm_feature(env, ARM_FEATURE_VFP4)) {
3371 return 1;
3372 }
3373 if (dp) {
3374 TCGv_ptr fpst;
3375 TCGv_i64 frd;
3376 if (op & 1) {
3377 /* VFNMS, VFMS */
3378 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3379 }
3380 frd = tcg_temp_new_i64();
3381 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3382 if (op & 2) {
3383 /* VFNMA, VFNMS */
3384 gen_helper_vfp_negd(frd, frd);
3385 }
3386 fpst = get_fpstatus_ptr(0);
3387 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3388 cpu_F1d, frd, fpst);
3389 tcg_temp_free_ptr(fpst);
3390 tcg_temp_free_i64(frd);
3391 } else {
3392 TCGv_ptr fpst;
3393 TCGv_i32 frd;
3394 if (op & 1) {
3395 /* VFNMS, VFMS */
3396 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3397 }
3398 frd = tcg_temp_new_i32();
3399 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3400 if (op & 2) {
3401 gen_helper_vfp_negs(frd, frd);
3402 }
3403 fpst = get_fpstatus_ptr(0);
3404 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3405 cpu_F1s, frd, fpst);
3406 tcg_temp_free_ptr(fpst);
3407 tcg_temp_free_i32(frd);
3408 }
3409 break;
9ee6e8bb
PB
3410 case 14: /* fconst */
3411 if (!arm_feature(env, ARM_FEATURE_VFP3))
3412 return 1;
3413
3414 n = (insn << 12) & 0x80000000;
3415 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3416 if (dp) {
3417 if (i & 0x40)
3418 i |= 0x3f80;
3419 else
3420 i |= 0x4000;
3421 n |= i << 16;
4373f3ce 3422 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3423 } else {
3424 if (i & 0x40)
3425 i |= 0x780;
3426 else
3427 i |= 0x800;
3428 n |= i << 19;
5b340b51 3429 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3430 }
9ee6e8bb 3431 break;
b7bcbe95
FB
3432 case 15: /* extension space */
3433 switch (rn) {
3434 case 0: /* cpy */
3435 /* no-op */
3436 break;
3437 case 1: /* abs */
3438 gen_vfp_abs(dp);
3439 break;
3440 case 2: /* neg */
3441 gen_vfp_neg(dp);
3442 break;
3443 case 3: /* sqrt */
3444 gen_vfp_sqrt(dp);
3445 break;
239c20c7 3446 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
60011498
PB
3447 tmp = gen_vfp_mrs();
3448 tcg_gen_ext16u_i32(tmp, tmp);
239c20c7
WN
3449 if (dp) {
3450 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3451 cpu_env);
3452 } else {
3453 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3454 cpu_env);
3455 }
7d1b0095 3456 tcg_temp_free_i32(tmp);
60011498 3457 break;
239c20c7 3458 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
60011498
PB
3459 tmp = gen_vfp_mrs();
3460 tcg_gen_shri_i32(tmp, tmp, 16);
239c20c7
WN
3461 if (dp) {
3462 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3463 cpu_env);
3464 } else {
3465 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3466 cpu_env);
3467 }
7d1b0095 3468 tcg_temp_free_i32(tmp);
60011498 3469 break;
239c20c7 3470 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
7d1b0095 3471 tmp = tcg_temp_new_i32();
239c20c7
WN
3472 if (dp) {
3473 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3474 cpu_env);
3475 } else {
3476 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3477 cpu_env);
3478 }
60011498
PB
3479 gen_mov_F0_vreg(0, rd);
3480 tmp2 = gen_vfp_mrs();
3481 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3482 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3483 tcg_temp_free_i32(tmp2);
60011498
PB
3484 gen_vfp_msr(tmp);
3485 break;
239c20c7 3486 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
7d1b0095 3487 tmp = tcg_temp_new_i32();
239c20c7
WN
3488 if (dp) {
3489 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3490 cpu_env);
3491 } else {
3492 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3493 cpu_env);
3494 }
60011498
PB
3495 tcg_gen_shli_i32(tmp, tmp, 16);
3496 gen_mov_F0_vreg(0, rd);
3497 tmp2 = gen_vfp_mrs();
3498 tcg_gen_ext16u_i32(tmp2, tmp2);
3499 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3500 tcg_temp_free_i32(tmp2);
60011498
PB
3501 gen_vfp_msr(tmp);
3502 break;
b7bcbe95
FB
3503 case 8: /* cmp */
3504 gen_vfp_cmp(dp);
3505 break;
3506 case 9: /* cmpe */
3507 gen_vfp_cmpe(dp);
3508 break;
3509 case 10: /* cmpz */
3510 gen_vfp_cmp(dp);
3511 break;
3512 case 11: /* cmpez */
3513 gen_vfp_F1_ld0(dp);
3514 gen_vfp_cmpe(dp);
3515 break;
664c6733
WN
3516 case 12: /* vrintr */
3517 {
3518 TCGv_ptr fpst = get_fpstatus_ptr(0);
3519 if (dp) {
3520 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3521 } else {
3522 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3523 }
3524 tcg_temp_free_ptr(fpst);
3525 break;
3526 }
a290c62a
WN
3527 case 13: /* vrintz */
3528 {
3529 TCGv_ptr fpst = get_fpstatus_ptr(0);
3530 TCGv_i32 tcg_rmode;
3531 tcg_rmode = tcg_const_i32(float_round_to_zero);
3532 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3533 if (dp) {
3534 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3535 } else {
3536 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3537 }
3538 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3539 tcg_temp_free_i32(tcg_rmode);
3540 tcg_temp_free_ptr(fpst);
3541 break;
3542 }
4e82bc01
WN
3543 case 14: /* vrintx */
3544 {
3545 TCGv_ptr fpst = get_fpstatus_ptr(0);
3546 if (dp) {
3547 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3548 } else {
3549 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3550 }
3551 tcg_temp_free_ptr(fpst);
3552 break;
3553 }
b7bcbe95
FB
3554 case 15: /* single<->double conversion */
3555 if (dp)
4373f3ce 3556 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3557 else
4373f3ce 3558 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3559 break;
3560 case 16: /* fuito */
5500b06c 3561 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3562 break;
3563 case 17: /* fsito */
5500b06c 3564 gen_vfp_sito(dp, 0);
b7bcbe95 3565 break;
9ee6e8bb
PB
3566 case 20: /* fshto */
3567 if (!arm_feature(env, ARM_FEATURE_VFP3))
3568 return 1;
5500b06c 3569 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3570 break;
3571 case 21: /* fslto */
3572 if (!arm_feature(env, ARM_FEATURE_VFP3))
3573 return 1;
5500b06c 3574 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3575 break;
3576 case 22: /* fuhto */
3577 if (!arm_feature(env, ARM_FEATURE_VFP3))
3578 return 1;
5500b06c 3579 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3580 break;
3581 case 23: /* fulto */
3582 if (!arm_feature(env, ARM_FEATURE_VFP3))
3583 return 1;
5500b06c 3584 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3585 break;
b7bcbe95 3586 case 24: /* ftoui */
5500b06c 3587 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3588 break;
3589 case 25: /* ftouiz */
5500b06c 3590 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3591 break;
3592 case 26: /* ftosi */
5500b06c 3593 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3594 break;
3595 case 27: /* ftosiz */
5500b06c 3596 gen_vfp_tosiz(dp, 0);
b7bcbe95 3597 break;
9ee6e8bb
PB
3598 case 28: /* ftosh */
3599 if (!arm_feature(env, ARM_FEATURE_VFP3))
3600 return 1;
5500b06c 3601 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3602 break;
3603 case 29: /* ftosl */
3604 if (!arm_feature(env, ARM_FEATURE_VFP3))
3605 return 1;
5500b06c 3606 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3607 break;
3608 case 30: /* ftouh */
3609 if (!arm_feature(env, ARM_FEATURE_VFP3))
3610 return 1;
5500b06c 3611 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3612 break;
3613 case 31: /* ftoul */
3614 if (!arm_feature(env, ARM_FEATURE_VFP3))
3615 return 1;
5500b06c 3616 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3617 break;
b7bcbe95 3618 default: /* undefined */
b7bcbe95
FB
3619 return 1;
3620 }
3621 break;
3622 default: /* undefined */
b7bcbe95
FB
3623 return 1;
3624 }
3625
3626 /* Write back the result. */
239c20c7
WN
3627 if (op == 15 && (rn >= 8 && rn <= 11)) {
3628 /* Comparison, do nothing. */
3629 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
3630 (rn & 0x1e) == 0x6)) {
3631 /* VCVT double to int: always integer result.
3632 * VCVT double to half precision is always a single
3633 * precision result.
3634 */
b7bcbe95 3635 gen_mov_vreg_F0(0, rd);
239c20c7 3636 } else if (op == 15 && rn == 15) {
b7bcbe95
FB
3637 /* conversion */
3638 gen_mov_vreg_F0(!dp, rd);
239c20c7 3639 } else {
b7bcbe95 3640 gen_mov_vreg_F0(dp, rd);
239c20c7 3641 }
b7bcbe95
FB
3642
3643 /* break out of the loop if we have finished */
3644 if (veclen == 0)
3645 break;
3646
3647 if (op == 15 && delta_m == 0) {
3648 /* single source one-many */
3649 while (veclen--) {
3650 rd = ((rd + delta_d) & (bank_mask - 1))
3651 | (rd & bank_mask);
3652 gen_mov_vreg_F0(dp, rd);
3653 }
3654 break;
3655 }
3656 /* Setup the next operands. */
3657 veclen--;
3658 rd = ((rd + delta_d) & (bank_mask - 1))
3659 | (rd & bank_mask);
3660
3661 if (op == 15) {
3662 /* One source operand. */
3663 rm = ((rm + delta_m) & (bank_mask - 1))
3664 | (rm & bank_mask);
3665 gen_mov_F0_vreg(dp, rm);
3666 } else {
3667 /* Two source operands. */
3668 rn = ((rn + delta_d) & (bank_mask - 1))
3669 | (rn & bank_mask);
3670 gen_mov_F0_vreg(dp, rn);
3671 if (delta_m) {
3672 rm = ((rm + delta_m) & (bank_mask - 1))
3673 | (rm & bank_mask);
3674 gen_mov_F1_vreg(dp, rm);
3675 }
3676 }
3677 }
3678 }
3679 break;
3680 case 0xc:
3681 case 0xd:
8387da81 3682 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3683 /* two-register transfer */
3684 rn = (insn >> 16) & 0xf;
3685 rd = (insn >> 12) & 0xf;
3686 if (dp) {
9ee6e8bb
PB
3687 VFP_DREG_M(rm, insn);
3688 } else {
3689 rm = VFP_SREG_M(insn);
3690 }
b7bcbe95 3691
18c9b560 3692 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3693 /* vfp->arm */
3694 if (dp) {
4373f3ce
PB
3695 gen_mov_F0_vreg(0, rm * 2);
3696 tmp = gen_vfp_mrs();
3697 store_reg(s, rd, tmp);
3698 gen_mov_F0_vreg(0, rm * 2 + 1);
3699 tmp = gen_vfp_mrs();
3700 store_reg(s, rn, tmp);
b7bcbe95
FB
3701 } else {
3702 gen_mov_F0_vreg(0, rm);
4373f3ce 3703 tmp = gen_vfp_mrs();
8387da81 3704 store_reg(s, rd, tmp);
b7bcbe95 3705 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3706 tmp = gen_vfp_mrs();
8387da81 3707 store_reg(s, rn, tmp);
b7bcbe95
FB
3708 }
3709 } else {
3710 /* arm->vfp */
3711 if (dp) {
4373f3ce
PB
3712 tmp = load_reg(s, rd);
3713 gen_vfp_msr(tmp);
3714 gen_mov_vreg_F0(0, rm * 2);
3715 tmp = load_reg(s, rn);
3716 gen_vfp_msr(tmp);
3717 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3718 } else {
8387da81 3719 tmp = load_reg(s, rd);
4373f3ce 3720 gen_vfp_msr(tmp);
b7bcbe95 3721 gen_mov_vreg_F0(0, rm);
8387da81 3722 tmp = load_reg(s, rn);
4373f3ce 3723 gen_vfp_msr(tmp);
b7bcbe95
FB
3724 gen_mov_vreg_F0(0, rm + 1);
3725 }
3726 }
3727 } else {
3728 /* Load/store */
3729 rn = (insn >> 16) & 0xf;
3730 if (dp)
9ee6e8bb 3731 VFP_DREG_D(rd, insn);
b7bcbe95 3732 else
9ee6e8bb 3733 rd = VFP_SREG_D(insn);
b7bcbe95
FB
3734 if ((insn & 0x01200000) == 0x01000000) {
3735 /* Single load/store */
3736 offset = (insn & 0xff) << 2;
3737 if ((insn & (1 << 23)) == 0)
3738 offset = -offset;
934814f1
PM
3739 if (s->thumb && rn == 15) {
3740 /* This is actually UNPREDICTABLE */
3741 addr = tcg_temp_new_i32();
3742 tcg_gen_movi_i32(addr, s->pc & ~2);
3743 } else {
3744 addr = load_reg(s, rn);
3745 }
312eea9f 3746 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3747 if (insn & (1 << 20)) {
312eea9f 3748 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3749 gen_mov_vreg_F0(dp, rd);
3750 } else {
3751 gen_mov_F0_vreg(dp, rd);
312eea9f 3752 gen_vfp_st(s, dp, addr);
b7bcbe95 3753 }
7d1b0095 3754 tcg_temp_free_i32(addr);
b7bcbe95
FB
3755 } else {
3756 /* load/store multiple */
934814f1 3757 int w = insn & (1 << 21);
b7bcbe95
FB
3758 if (dp)
3759 n = (insn >> 1) & 0x7f;
3760 else
3761 n = insn & 0xff;
3762
934814f1
PM
3763 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3764 /* P == U , W == 1 => UNDEF */
3765 return 1;
3766 }
3767 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3768 /* UNPREDICTABLE cases for bad immediates: we choose to
3769 * UNDEF to avoid generating huge numbers of TCG ops
3770 */
3771 return 1;
3772 }
3773 if (rn == 15 && w) {
3774 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3775 return 1;
3776 }
3777
3778 if (s->thumb && rn == 15) {
3779 /* This is actually UNPREDICTABLE */
3780 addr = tcg_temp_new_i32();
3781 tcg_gen_movi_i32(addr, s->pc & ~2);
3782 } else {
3783 addr = load_reg(s, rn);
3784 }
b7bcbe95 3785 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3786 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3787
3788 if (dp)
3789 offset = 8;
3790 else
3791 offset = 4;
3792 for (i = 0; i < n; i++) {
18c9b560 3793 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3794 /* load */
312eea9f 3795 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3796 gen_mov_vreg_F0(dp, rd + i);
3797 } else {
3798 /* store */
3799 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3800 gen_vfp_st(s, dp, addr);
b7bcbe95 3801 }
312eea9f 3802 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3803 }
934814f1 3804 if (w) {
b7bcbe95
FB
3805 /* writeback */
3806 if (insn & (1 << 24))
3807 offset = -offset * n;
3808 else if (dp && (insn & 1))
3809 offset = 4;
3810 else
3811 offset = 0;
3812
3813 if (offset != 0)
312eea9f
FN
3814 tcg_gen_addi_i32(addr, addr, offset);
3815 store_reg(s, rn, addr);
3816 } else {
7d1b0095 3817 tcg_temp_free_i32(addr);
b7bcbe95
FB
3818 }
3819 }
3820 }
3821 break;
3822 default:
3823 /* Should never happen. */
3824 return 1;
3825 }
3826 return 0;
3827}
3828
0a2461fa 3829static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
c53be334 3830{
6e256c93
FB
3831 TranslationBlock *tb;
3832
3833 tb = s->tb;
3834 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3835 tcg_gen_goto_tb(n);
eaed129d 3836 gen_set_pc_im(s, dest);
8cfd0495 3837 tcg_gen_exit_tb((uintptr_t)tb + n);
6e256c93 3838 } else {
eaed129d 3839 gen_set_pc_im(s, dest);
57fec1fe 3840 tcg_gen_exit_tb(0);
6e256c93 3841 }
c53be334
FB
3842}
3843
8aaca4c0
FB
3844static inline void gen_jmp (DisasContext *s, uint32_t dest)
3845{
551bd27f 3846 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3847 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3848 if (s->thumb)
d9ba4830
PB
3849 dest |= 1;
3850 gen_bx_im(s, dest);
8aaca4c0 3851 } else {
6e256c93 3852 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3853 s->is_jmp = DISAS_TB_JUMP;
3854 }
3855}
3856
39d5492a 3857static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 3858{
ee097184 3859 if (x)
d9ba4830 3860 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3861 else
d9ba4830 3862 gen_sxth(t0);
ee097184 3863 if (y)
d9ba4830 3864 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3865 else
d9ba4830
PB
3866 gen_sxth(t1);
3867 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3868}
3869
3870/* Return the mask of PSR bits set by a MSR instruction. */
0ecb72a5 3871static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3872 uint32_t mask;
3873
3874 mask = 0;
3875 if (flags & (1 << 0))
3876 mask |= 0xff;
3877 if (flags & (1 << 1))
3878 mask |= 0xff00;
3879 if (flags & (1 << 2))
3880 mask |= 0xff0000;
3881 if (flags & (1 << 3))
3882 mask |= 0xff000000;
9ee6e8bb 3883
2ae23e75 3884 /* Mask out undefined bits. */
9ee6e8bb 3885 mask &= ~CPSR_RESERVED;
be5e7a76
DES
3886 if (!arm_feature(env, ARM_FEATURE_V4T))
3887 mask &= ~CPSR_T;
3888 if (!arm_feature(env, ARM_FEATURE_V5))
3889 mask &= ~CPSR_Q; /* V5TE in reality*/
9ee6e8bb 3890 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3891 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3892 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3893 mask &= ~CPSR_IT;
9ee6e8bb 3894 /* Mask out execution state bits. */
2ae23e75 3895 if (!spsr)
e160c51c 3896 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3897 /* Mask out privileged bits. */
3898 if (IS_USER(s))
9ee6e8bb 3899 mask &= CPSR_USER;
b5ff1b31
FB
3900 return mask;
3901}
3902
2fbac54b 3903/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 3904static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 3905{
39d5492a 3906 TCGv_i32 tmp;
b5ff1b31
FB
3907 if (spsr) {
3908 /* ??? This is also undefined in system mode. */
3909 if (IS_USER(s))
3910 return 1;
d9ba4830
PB
3911
3912 tmp = load_cpu_field(spsr);
3913 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3914 tcg_gen_andi_i32(t0, t0, mask);
3915 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3916 store_cpu_field(tmp, spsr);
b5ff1b31 3917 } else {
2fbac54b 3918 gen_set_cpsr(t0, mask);
b5ff1b31 3919 }
7d1b0095 3920 tcg_temp_free_i32(t0);
b5ff1b31
FB
3921 gen_lookup_tb(s);
3922 return 0;
3923}
3924
2fbac54b
FN
3925/* Returns nonzero if access to the PSR is not permitted. */
3926static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3927{
39d5492a 3928 TCGv_i32 tmp;
7d1b0095 3929 tmp = tcg_temp_new_i32();
2fbac54b
FN
3930 tcg_gen_movi_i32(tmp, val);
3931 return gen_set_psr(s, mask, spsr, tmp);
3932}
3933
e9bb4aa9 3934/* Generate an old-style exception return. Marks pc as dead. */
39d5492a 3935static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
b5ff1b31 3936{
39d5492a 3937 TCGv_i32 tmp;
e9bb4aa9 3938 store_reg(s, 15, pc);
d9ba4830
PB
3939 tmp = load_cpu_field(spsr);
3940 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 3941 tcg_temp_free_i32(tmp);
b5ff1b31
FB
3942 s->is_jmp = DISAS_UPDATE;
3943}
3944
b0109805 3945/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 3946static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 3947{
b0109805 3948 gen_set_cpsr(cpsr, 0xffffffff);
7d1b0095 3949 tcg_temp_free_i32(cpsr);
b0109805 3950 store_reg(s, 15, pc);
9ee6e8bb
PB
3951 s->is_jmp = DISAS_UPDATE;
3952}
3b46e624 3953
9ee6e8bb
PB
3954static void gen_nop_hint(DisasContext *s, int val)
3955{
3956 switch (val) {
3957 case 3: /* wfi */
eaed129d 3958 gen_set_pc_im(s, s->pc);
9ee6e8bb
PB
3959 s->is_jmp = DISAS_WFI;
3960 break;
3961 case 2: /* wfe */
72c1d3af
PM
3962 gen_set_pc_im(s, s->pc);
3963 s->is_jmp = DISAS_WFE;
3964 break;
9ee6e8bb 3965 case 4: /* sev */
12b10571
MR
3966 case 5: /* sevl */
3967 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
3968 default: /* nop */
3969 break;
3970 }
3971}
99c475ab 3972
ad69471c 3973#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3974
39d5492a 3975static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
3976{
3977 switch (size) {
dd8fbd78
FN
3978 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3979 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3980 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 3981 default: abort();
9ee6e8bb 3982 }
9ee6e8bb
PB
3983}
3984
39d5492a 3985static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
3986{
3987 switch (size) {
dd8fbd78
FN
3988 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3989 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3990 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3991 default: return;
3992 }
3993}
3994
3995/* 32-bit pairwise ops end up the same as the elementwise versions. */
3996#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3997#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3998#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3999#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4000
ad69471c
PB
4001#define GEN_NEON_INTEGER_OP_ENV(name) do { \
4002 switch ((size << 1) | u) { \
4003 case 0: \
dd8fbd78 4004 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4005 break; \
4006 case 1: \
dd8fbd78 4007 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4008 break; \
4009 case 2: \
dd8fbd78 4010 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4011 break; \
4012 case 3: \
dd8fbd78 4013 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4014 break; \
4015 case 4: \
dd8fbd78 4016 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4017 break; \
4018 case 5: \
dd8fbd78 4019 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4020 break; \
4021 default: return 1; \
4022 }} while (0)
9ee6e8bb
PB
4023
4024#define GEN_NEON_INTEGER_OP(name) do { \
4025 switch ((size << 1) | u) { \
ad69471c 4026 case 0: \
dd8fbd78 4027 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
4028 break; \
4029 case 1: \
dd8fbd78 4030 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
4031 break; \
4032 case 2: \
dd8fbd78 4033 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
4034 break; \
4035 case 3: \
dd8fbd78 4036 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
4037 break; \
4038 case 4: \
dd8fbd78 4039 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
4040 break; \
4041 case 5: \
dd8fbd78 4042 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 4043 break; \
9ee6e8bb
PB
4044 default: return 1; \
4045 }} while (0)
4046
39d5492a 4047static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 4048{
39d5492a 4049 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
4050 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4051 return tmp;
9ee6e8bb
PB
4052}
4053
39d5492a 4054static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 4055{
dd8fbd78 4056 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 4057 tcg_temp_free_i32(var);
9ee6e8bb
PB
4058}
4059
39d5492a 4060static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 4061{
39d5492a 4062 TCGv_i32 tmp;
9ee6e8bb 4063 if (size == 1) {
0fad6efc
PM
4064 tmp = neon_load_reg(reg & 7, reg >> 4);
4065 if (reg & 8) {
dd8fbd78 4066 gen_neon_dup_high16(tmp);
0fad6efc
PM
4067 } else {
4068 gen_neon_dup_low16(tmp);
dd8fbd78 4069 }
0fad6efc
PM
4070 } else {
4071 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 4072 }
dd8fbd78 4073 return tmp;
9ee6e8bb
PB
4074}
4075
02acedf9 4076static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 4077{
39d5492a 4078 TCGv_i32 tmp, tmp2;
600b828c 4079 if (!q && size == 2) {
02acedf9
PM
4080 return 1;
4081 }
4082 tmp = tcg_const_i32(rd);
4083 tmp2 = tcg_const_i32(rm);
4084 if (q) {
4085 switch (size) {
4086 case 0:
02da0b2d 4087 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4088 break;
4089 case 1:
02da0b2d 4090 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4091 break;
4092 case 2:
02da0b2d 4093 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
4094 break;
4095 default:
4096 abort();
4097 }
4098 } else {
4099 switch (size) {
4100 case 0:
02da0b2d 4101 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4102 break;
4103 case 1:
02da0b2d 4104 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4105 break;
4106 default:
4107 abort();
4108 }
4109 }
4110 tcg_temp_free_i32(tmp);
4111 tcg_temp_free_i32(tmp2);
4112 return 0;
19457615
FN
4113}
4114
d68a6f3a 4115static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 4116{
39d5492a 4117 TCGv_i32 tmp, tmp2;
600b828c 4118 if (!q && size == 2) {
d68a6f3a
PM
4119 return 1;
4120 }
4121 tmp = tcg_const_i32(rd);
4122 tmp2 = tcg_const_i32(rm);
4123 if (q) {
4124 switch (size) {
4125 case 0:
02da0b2d 4126 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4127 break;
4128 case 1:
02da0b2d 4129 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4130 break;
4131 case 2:
02da0b2d 4132 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
4133 break;
4134 default:
4135 abort();
4136 }
4137 } else {
4138 switch (size) {
4139 case 0:
02da0b2d 4140 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4141 break;
4142 case 1:
02da0b2d 4143 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4144 break;
4145 default:
4146 abort();
4147 }
4148 }
4149 tcg_temp_free_i32(tmp);
4150 tcg_temp_free_i32(tmp2);
4151 return 0;
19457615
FN
4152}
4153
39d5492a 4154static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 4155{
39d5492a 4156 TCGv_i32 rd, tmp;
19457615 4157
7d1b0095
PM
4158 rd = tcg_temp_new_i32();
4159 tmp = tcg_temp_new_i32();
19457615
FN
4160
4161 tcg_gen_shli_i32(rd, t0, 8);
4162 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4163 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4164 tcg_gen_or_i32(rd, rd, tmp);
4165
4166 tcg_gen_shri_i32(t1, t1, 8);
4167 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4168 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4169 tcg_gen_or_i32(t1, t1, tmp);
4170 tcg_gen_mov_i32(t0, rd);
4171
7d1b0095
PM
4172 tcg_temp_free_i32(tmp);
4173 tcg_temp_free_i32(rd);
19457615
FN
4174}
4175
39d5492a 4176static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 4177{
39d5492a 4178 TCGv_i32 rd, tmp;
19457615 4179
7d1b0095
PM
4180 rd = tcg_temp_new_i32();
4181 tmp = tcg_temp_new_i32();
19457615
FN
4182
4183 tcg_gen_shli_i32(rd, t0, 16);
4184 tcg_gen_andi_i32(tmp, t1, 0xffff);
4185 tcg_gen_or_i32(rd, rd, tmp);
4186 tcg_gen_shri_i32(t1, t1, 16);
4187 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4188 tcg_gen_or_i32(t1, t1, tmp);
4189 tcg_gen_mov_i32(t0, rd);
4190
7d1b0095
PM
4191 tcg_temp_free_i32(tmp);
4192 tcg_temp_free_i32(rd);
19457615
FN
4193}
4194
4195
9ee6e8bb
PB
4196static struct {
4197 int nregs;
4198 int interleave;
4199 int spacing;
4200} neon_ls_element_type[11] = {
4201 {4, 4, 1},
4202 {4, 4, 2},
4203 {4, 1, 1},
4204 {4, 2, 1},
4205 {3, 3, 1},
4206 {3, 3, 2},
4207 {3, 1, 1},
4208 {1, 1, 1},
4209 {2, 2, 1},
4210 {2, 2, 2},
4211 {2, 1, 1}
4212};
4213
4214/* Translate a NEON load/store element instruction. Return nonzero if the
4215 instruction is invalid. */
0ecb72a5 4216static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4217{
4218 int rd, rn, rm;
4219 int op;
4220 int nregs;
4221 int interleave;
84496233 4222 int spacing;
9ee6e8bb
PB
4223 int stride;
4224 int size;
4225 int reg;
4226 int pass;
4227 int load;
4228 int shift;
9ee6e8bb 4229 int n;
39d5492a
PM
4230 TCGv_i32 addr;
4231 TCGv_i32 tmp;
4232 TCGv_i32 tmp2;
84496233 4233 TCGv_i64 tmp64;
9ee6e8bb 4234
5df8bac1 4235 if (!s->vfp_enabled)
9ee6e8bb
PB
4236 return 1;
4237 VFP_DREG_D(rd, insn);
4238 rn = (insn >> 16) & 0xf;
4239 rm = insn & 0xf;
4240 load = (insn & (1 << 21)) != 0;
4241 if ((insn & (1 << 23)) == 0) {
4242 /* Load store all elements. */
4243 op = (insn >> 8) & 0xf;
4244 size = (insn >> 6) & 3;
84496233 4245 if (op > 10)
9ee6e8bb 4246 return 1;
f2dd89d0
PM
4247 /* Catch UNDEF cases for bad values of align field */
4248 switch (op & 0xc) {
4249 case 4:
4250 if (((insn >> 5) & 1) == 1) {
4251 return 1;
4252 }
4253 break;
4254 case 8:
4255 if (((insn >> 4) & 3) == 3) {
4256 return 1;
4257 }
4258 break;
4259 default:
4260 break;
4261 }
9ee6e8bb
PB
4262 nregs = neon_ls_element_type[op].nregs;
4263 interleave = neon_ls_element_type[op].interleave;
84496233
JR
4264 spacing = neon_ls_element_type[op].spacing;
4265 if (size == 3 && (interleave | spacing) != 1)
4266 return 1;
e318a60b 4267 addr = tcg_temp_new_i32();
dcc65026 4268 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4269 stride = (1 << size) * interleave;
4270 for (reg = 0; reg < nregs; reg++) {
4271 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
4272 load_reg_var(s, addr, rn);
4273 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 4274 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
4275 load_reg_var(s, addr, rn);
4276 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 4277 }
84496233 4278 if (size == 3) {
8ed1237d 4279 tmp64 = tcg_temp_new_i64();
84496233 4280 if (load) {
08307563 4281 gen_aa32_ld64(tmp64, addr, IS_USER(s));
84496233 4282 neon_store_reg64(tmp64, rd);
84496233 4283 } else {
84496233 4284 neon_load_reg64(tmp64, rd);
08307563 4285 gen_aa32_st64(tmp64, addr, IS_USER(s));
84496233 4286 }
8ed1237d 4287 tcg_temp_free_i64(tmp64);
84496233
JR
4288 tcg_gen_addi_i32(addr, addr, stride);
4289 } else {
4290 for (pass = 0; pass < 2; pass++) {
4291 if (size == 2) {
4292 if (load) {
58ab8e96 4293 tmp = tcg_temp_new_i32();
08307563 4294 gen_aa32_ld32u(tmp, addr, IS_USER(s));
84496233
JR
4295 neon_store_reg(rd, pass, tmp);
4296 } else {
4297 tmp = neon_load_reg(rd, pass);
08307563 4298 gen_aa32_st32(tmp, addr, IS_USER(s));
58ab8e96 4299 tcg_temp_free_i32(tmp);
84496233 4300 }
1b2b1e54 4301 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
4302 } else if (size == 1) {
4303 if (load) {
58ab8e96 4304 tmp = tcg_temp_new_i32();
08307563 4305 gen_aa32_ld16u(tmp, addr, IS_USER(s));
84496233 4306 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96 4307 tmp2 = tcg_temp_new_i32();
08307563 4308 gen_aa32_ld16u(tmp2, addr, IS_USER(s));
84496233 4309 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
4310 tcg_gen_shli_i32(tmp2, tmp2, 16);
4311 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4312 tcg_temp_free_i32(tmp2);
84496233
JR
4313 neon_store_reg(rd, pass, tmp);
4314 } else {
4315 tmp = neon_load_reg(rd, pass);
7d1b0095 4316 tmp2 = tcg_temp_new_i32();
84496233 4317 tcg_gen_shri_i32(tmp2, tmp, 16);
08307563 4318 gen_aa32_st16(tmp, addr, IS_USER(s));
58ab8e96 4319 tcg_temp_free_i32(tmp);
84496233 4320 tcg_gen_addi_i32(addr, addr, stride);
08307563 4321 gen_aa32_st16(tmp2, addr, IS_USER(s));
58ab8e96 4322 tcg_temp_free_i32(tmp2);
1b2b1e54 4323 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 4324 }
84496233
JR
4325 } else /* size == 0 */ {
4326 if (load) {
39d5492a 4327 TCGV_UNUSED_I32(tmp2);
84496233 4328 for (n = 0; n < 4; n++) {
58ab8e96 4329 tmp = tcg_temp_new_i32();
08307563 4330 gen_aa32_ld8u(tmp, addr, IS_USER(s));
84496233
JR
4331 tcg_gen_addi_i32(addr, addr, stride);
4332 if (n == 0) {
4333 tmp2 = tmp;
4334 } else {
41ba8341
PB
4335 tcg_gen_shli_i32(tmp, tmp, n * 8);
4336 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 4337 tcg_temp_free_i32(tmp);
84496233 4338 }
9ee6e8bb 4339 }
84496233
JR
4340 neon_store_reg(rd, pass, tmp2);
4341 } else {
4342 tmp2 = neon_load_reg(rd, pass);
4343 for (n = 0; n < 4; n++) {
7d1b0095 4344 tmp = tcg_temp_new_i32();
84496233
JR
4345 if (n == 0) {
4346 tcg_gen_mov_i32(tmp, tmp2);
4347 } else {
4348 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4349 }
08307563 4350 gen_aa32_st8(tmp, addr, IS_USER(s));
58ab8e96 4351 tcg_temp_free_i32(tmp);
84496233
JR
4352 tcg_gen_addi_i32(addr, addr, stride);
4353 }
7d1b0095 4354 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
4355 }
4356 }
4357 }
4358 }
84496233 4359 rd += spacing;
9ee6e8bb 4360 }
e318a60b 4361 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4362 stride = nregs * 8;
4363 } else {
4364 size = (insn >> 10) & 3;
4365 if (size == 3) {
4366 /* Load single element to all lanes. */
8e18cde3
PM
4367 int a = (insn >> 4) & 1;
4368 if (!load) {
9ee6e8bb 4369 return 1;
8e18cde3 4370 }
9ee6e8bb
PB
4371 size = (insn >> 6) & 3;
4372 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
4373
4374 if (size == 3) {
4375 if (nregs != 4 || a == 0) {
9ee6e8bb 4376 return 1;
99c475ab 4377 }
8e18cde3
PM
4378 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4379 size = 2;
4380 }
4381 if (nregs == 1 && a == 1 && size == 0) {
4382 return 1;
4383 }
4384 if (nregs == 3 && a == 1) {
4385 return 1;
4386 }
e318a60b 4387 addr = tcg_temp_new_i32();
8e18cde3
PM
4388 load_reg_var(s, addr, rn);
4389 if (nregs == 1) {
4390 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4391 tmp = gen_load_and_replicate(s, addr, size);
4392 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4393 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4394 if (insn & (1 << 5)) {
4395 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4396 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4397 }
4398 tcg_temp_free_i32(tmp);
4399 } else {
4400 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4401 stride = (insn & (1 << 5)) ? 2 : 1;
4402 for (reg = 0; reg < nregs; reg++) {
4403 tmp = gen_load_and_replicate(s, addr, size);
4404 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4405 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4406 tcg_temp_free_i32(tmp);
4407 tcg_gen_addi_i32(addr, addr, 1 << size);
4408 rd += stride;
4409 }
9ee6e8bb 4410 }
e318a60b 4411 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4412 stride = (1 << size) * nregs;
4413 } else {
4414 /* Single element. */
93262b16 4415 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
4416 pass = (insn >> 7) & 1;
4417 switch (size) {
4418 case 0:
4419 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
4420 stride = 1;
4421 break;
4422 case 1:
4423 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
4424 stride = (insn & (1 << 5)) ? 2 : 1;
4425 break;
4426 case 2:
4427 shift = 0;
9ee6e8bb
PB
4428 stride = (insn & (1 << 6)) ? 2 : 1;
4429 break;
4430 default:
4431 abort();
4432 }
4433 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
4434 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4435 switch (nregs) {
4436 case 1:
4437 if (((idx & (1 << size)) != 0) ||
4438 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4439 return 1;
4440 }
4441 break;
4442 case 3:
4443 if ((idx & 1) != 0) {
4444 return 1;
4445 }
4446 /* fall through */
4447 case 2:
4448 if (size == 2 && (idx & 2) != 0) {
4449 return 1;
4450 }
4451 break;
4452 case 4:
4453 if ((size == 2) && ((idx & 3) == 3)) {
4454 return 1;
4455 }
4456 break;
4457 default:
4458 abort();
4459 }
4460 if ((rd + stride * (nregs - 1)) > 31) {
4461 /* Attempts to write off the end of the register file
4462 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4463 * the neon_load_reg() would write off the end of the array.
4464 */
4465 return 1;
4466 }
e318a60b 4467 addr = tcg_temp_new_i32();
dcc65026 4468 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4469 for (reg = 0; reg < nregs; reg++) {
4470 if (load) {
58ab8e96 4471 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
4472 switch (size) {
4473 case 0:
08307563 4474 gen_aa32_ld8u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4475 break;
4476 case 1:
08307563 4477 gen_aa32_ld16u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4478 break;
4479 case 2:
08307563 4480 gen_aa32_ld32u(tmp, addr, IS_USER(s));
9ee6e8bb 4481 break;
a50f5b91
PB
4482 default: /* Avoid compiler warnings. */
4483 abort();
9ee6e8bb
PB
4484 }
4485 if (size != 2) {
8f8e3aa4 4486 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
4487 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4488 shift, size ? 16 : 8);
7d1b0095 4489 tcg_temp_free_i32(tmp2);
9ee6e8bb 4490 }
8f8e3aa4 4491 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4492 } else { /* Store */
8f8e3aa4
PB
4493 tmp = neon_load_reg(rd, pass);
4494 if (shift)
4495 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4496 switch (size) {
4497 case 0:
08307563 4498 gen_aa32_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4499 break;
4500 case 1:
08307563 4501 gen_aa32_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4502 break;
4503 case 2:
08307563 4504 gen_aa32_st32(tmp, addr, IS_USER(s));
9ee6e8bb 4505 break;
99c475ab 4506 }
58ab8e96 4507 tcg_temp_free_i32(tmp);
99c475ab 4508 }
9ee6e8bb 4509 rd += stride;
1b2b1e54 4510 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4511 }
e318a60b 4512 tcg_temp_free_i32(addr);
9ee6e8bb 4513 stride = nregs * (1 << size);
99c475ab 4514 }
9ee6e8bb
PB
4515 }
4516 if (rm != 15) {
39d5492a 4517 TCGv_i32 base;
b26eefb6
PB
4518
4519 base = load_reg(s, rn);
9ee6e8bb 4520 if (rm == 13) {
b26eefb6 4521 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4522 } else {
39d5492a 4523 TCGv_i32 index;
b26eefb6
PB
4524 index = load_reg(s, rm);
4525 tcg_gen_add_i32(base, base, index);
7d1b0095 4526 tcg_temp_free_i32(index);
9ee6e8bb 4527 }
b26eefb6 4528 store_reg(s, rn, base);
9ee6e8bb
PB
4529 }
4530 return 0;
4531}
3b46e624 4532
8f8e3aa4 4533/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
39d5492a 4534static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
8f8e3aa4
PB
4535{
4536 tcg_gen_and_i32(t, t, c);
f669df27 4537 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4538 tcg_gen_or_i32(dest, t, f);
4539}
4540
39d5492a 4541static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4542{
4543 switch (size) {
4544 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4545 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4546 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4547 default: abort();
4548 }
4549}
4550
39d5492a 4551static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4552{
4553 switch (size) {
02da0b2d
PM
4554 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4555 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4556 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
4557 default: abort();
4558 }
4559}
4560
39d5492a 4561static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4562{
4563 switch (size) {
02da0b2d
PM
4564 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4565 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4566 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
4567 default: abort();
4568 }
4569}
4570
39d5492a 4571static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
4572{
4573 switch (size) {
02da0b2d
PM
4574 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4575 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4576 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
4577 default: abort();
4578 }
4579}
4580
39d5492a 4581static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
4582 int q, int u)
4583{
4584 if (q) {
4585 if (u) {
4586 switch (size) {
4587 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4588 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4589 default: abort();
4590 }
4591 } else {
4592 switch (size) {
4593 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4594 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4595 default: abort();
4596 }
4597 }
4598 } else {
4599 if (u) {
4600 switch (size) {
b408a9b0
CL
4601 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4602 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4603 default: abort();
4604 }
4605 } else {
4606 switch (size) {
4607 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4608 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4609 default: abort();
4610 }
4611 }
4612 }
4613}
4614
39d5492a 4615static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
4616{
4617 if (u) {
4618 switch (size) {
4619 case 0: gen_helper_neon_widen_u8(dest, src); break;
4620 case 1: gen_helper_neon_widen_u16(dest, src); break;
4621 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4622 default: abort();
4623 }
4624 } else {
4625 switch (size) {
4626 case 0: gen_helper_neon_widen_s8(dest, src); break;
4627 case 1: gen_helper_neon_widen_s16(dest, src); break;
4628 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4629 default: abort();
4630 }
4631 }
7d1b0095 4632 tcg_temp_free_i32(src);
ad69471c
PB
4633}
4634
4635static inline void gen_neon_addl(int size)
4636{
4637 switch (size) {
4638 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4639 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4640 case 2: tcg_gen_add_i64(CPU_V001); break;
4641 default: abort();
4642 }
4643}
4644
4645static inline void gen_neon_subl(int size)
4646{
4647 switch (size) {
4648 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4649 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4650 case 2: tcg_gen_sub_i64(CPU_V001); break;
4651 default: abort();
4652 }
4653}
4654
a7812ae4 4655static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4656{
4657 switch (size) {
4658 case 0: gen_helper_neon_negl_u16(var, var); break;
4659 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
4660 case 2:
4661 tcg_gen_neg_i64(var, var);
4662 break;
ad69471c
PB
4663 default: abort();
4664 }
4665}
4666
a7812ae4 4667static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4668{
4669 switch (size) {
02da0b2d
PM
4670 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4671 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
4672 default: abort();
4673 }
4674}
4675
39d5492a
PM
4676static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
4677 int size, int u)
ad69471c 4678{
a7812ae4 4679 TCGv_i64 tmp;
ad69471c
PB
4680
4681 switch ((size << 1) | u) {
4682 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4683 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4684 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4685 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4686 case 4:
4687 tmp = gen_muls_i64_i32(a, b);
4688 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4689 tcg_temp_free_i64(tmp);
ad69471c
PB
4690 break;
4691 case 5:
4692 tmp = gen_mulu_i64_i32(a, b);
4693 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4694 tcg_temp_free_i64(tmp);
ad69471c
PB
4695 break;
4696 default: abort();
4697 }
c6067f04
CL
4698
4699 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4700 Don't forget to clean them now. */
4701 if (size < 2) {
7d1b0095
PM
4702 tcg_temp_free_i32(a);
4703 tcg_temp_free_i32(b);
c6067f04 4704 }
ad69471c
PB
4705}
4706
39d5492a
PM
4707static void gen_neon_narrow_op(int op, int u, int size,
4708 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
4709{
4710 if (op) {
4711 if (u) {
4712 gen_neon_unarrow_sats(size, dest, src);
4713 } else {
4714 gen_neon_narrow(size, dest, src);
4715 }
4716 } else {
4717 if (u) {
4718 gen_neon_narrow_satu(size, dest, src);
4719 } else {
4720 gen_neon_narrow_sats(size, dest, src);
4721 }
4722 }
4723}
4724
62698be3
PM
4725/* Symbolic constants for op fields for Neon 3-register same-length.
4726 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4727 * table A7-9.
4728 */
4729#define NEON_3R_VHADD 0
4730#define NEON_3R_VQADD 1
4731#define NEON_3R_VRHADD 2
4732#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4733#define NEON_3R_VHSUB 4
4734#define NEON_3R_VQSUB 5
4735#define NEON_3R_VCGT 6
4736#define NEON_3R_VCGE 7
4737#define NEON_3R_VSHL 8
4738#define NEON_3R_VQSHL 9
4739#define NEON_3R_VRSHL 10
4740#define NEON_3R_VQRSHL 11
4741#define NEON_3R_VMAX 12
4742#define NEON_3R_VMIN 13
4743#define NEON_3R_VABD 14
4744#define NEON_3R_VABA 15
4745#define NEON_3R_VADD_VSUB 16
4746#define NEON_3R_VTST_VCEQ 17
4747#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4748#define NEON_3R_VMUL 19
4749#define NEON_3R_VPMAX 20
4750#define NEON_3R_VPMIN 21
4751#define NEON_3R_VQDMULH_VQRDMULH 22
4752#define NEON_3R_VPADD 23
da97f52c 4753#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
4754#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4755#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4756#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4757#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4758#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 4759#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
4760
4761static const uint8_t neon_3r_sizes[] = {
4762 [NEON_3R_VHADD] = 0x7,
4763 [NEON_3R_VQADD] = 0xf,
4764 [NEON_3R_VRHADD] = 0x7,
4765 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4766 [NEON_3R_VHSUB] = 0x7,
4767 [NEON_3R_VQSUB] = 0xf,
4768 [NEON_3R_VCGT] = 0x7,
4769 [NEON_3R_VCGE] = 0x7,
4770 [NEON_3R_VSHL] = 0xf,
4771 [NEON_3R_VQSHL] = 0xf,
4772 [NEON_3R_VRSHL] = 0xf,
4773 [NEON_3R_VQRSHL] = 0xf,
4774 [NEON_3R_VMAX] = 0x7,
4775 [NEON_3R_VMIN] = 0x7,
4776 [NEON_3R_VABD] = 0x7,
4777 [NEON_3R_VABA] = 0x7,
4778 [NEON_3R_VADD_VSUB] = 0xf,
4779 [NEON_3R_VTST_VCEQ] = 0x7,
4780 [NEON_3R_VML] = 0x7,
4781 [NEON_3R_VMUL] = 0x7,
4782 [NEON_3R_VPMAX] = 0x7,
4783 [NEON_3R_VPMIN] = 0x7,
4784 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4785 [NEON_3R_VPADD] = 0x7,
da97f52c 4786 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4787 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4788 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4789 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4790 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4791 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 4792 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4793};
4794
600b828c
PM
4795/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4796 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4797 * table A7-13.
4798 */
4799#define NEON_2RM_VREV64 0
4800#define NEON_2RM_VREV32 1
4801#define NEON_2RM_VREV16 2
4802#define NEON_2RM_VPADDL 4
4803#define NEON_2RM_VPADDL_U 5
9d935509
AB
4804#define NEON_2RM_AESE 6 /* Includes AESD */
4805#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
4806#define NEON_2RM_VCLS 8
4807#define NEON_2RM_VCLZ 9
4808#define NEON_2RM_VCNT 10
4809#define NEON_2RM_VMVN 11
4810#define NEON_2RM_VPADAL 12
4811#define NEON_2RM_VPADAL_U 13
4812#define NEON_2RM_VQABS 14
4813#define NEON_2RM_VQNEG 15
4814#define NEON_2RM_VCGT0 16
4815#define NEON_2RM_VCGE0 17
4816#define NEON_2RM_VCEQ0 18
4817#define NEON_2RM_VCLE0 19
4818#define NEON_2RM_VCLT0 20
4819#define NEON_2RM_VABS 22
4820#define NEON_2RM_VNEG 23
4821#define NEON_2RM_VCGT0_F 24
4822#define NEON_2RM_VCGE0_F 25
4823#define NEON_2RM_VCEQ0_F 26
4824#define NEON_2RM_VCLE0_F 27
4825#define NEON_2RM_VCLT0_F 28
4826#define NEON_2RM_VABS_F 30
4827#define NEON_2RM_VNEG_F 31
4828#define NEON_2RM_VSWP 32
4829#define NEON_2RM_VTRN 33
4830#define NEON_2RM_VUZP 34
4831#define NEON_2RM_VZIP 35
4832#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4833#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4834#define NEON_2RM_VSHLL 38
34f7b0a2 4835#define NEON_2RM_VRINTN 40
2ce70625 4836#define NEON_2RM_VRINTX 41
34f7b0a2
WN
4837#define NEON_2RM_VRINTA 42
4838#define NEON_2RM_VRINTZ 43
600b828c 4839#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 4840#define NEON_2RM_VRINTM 45
600b828c 4841#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 4842#define NEON_2RM_VRINTP 47
901ad525
WN
4843#define NEON_2RM_VCVTAU 48
4844#define NEON_2RM_VCVTAS 49
4845#define NEON_2RM_VCVTNU 50
4846#define NEON_2RM_VCVTNS 51
4847#define NEON_2RM_VCVTPU 52
4848#define NEON_2RM_VCVTPS 53
4849#define NEON_2RM_VCVTMU 54
4850#define NEON_2RM_VCVTMS 55
600b828c
PM
4851#define NEON_2RM_VRECPE 56
4852#define NEON_2RM_VRSQRTE 57
4853#define NEON_2RM_VRECPE_F 58
4854#define NEON_2RM_VRSQRTE_F 59
4855#define NEON_2RM_VCVT_FS 60
4856#define NEON_2RM_VCVT_FU 61
4857#define NEON_2RM_VCVT_SF 62
4858#define NEON_2RM_VCVT_UF 63
4859
4860static int neon_2rm_is_float_op(int op)
4861{
4862 /* Return true if this neon 2reg-misc op is float-to-float */
4863 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
34f7b0a2 4864 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
901ad525
WN
4865 op == NEON_2RM_VRINTM ||
4866 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
34f7b0a2 4867 op >= NEON_2RM_VRECPE_F);
600b828c
PM
4868}
4869
4870/* Each entry in this array has bit n set if the insn allows
4871 * size value n (otherwise it will UNDEF). Since unallocated
4872 * op values will have no bits set they always UNDEF.
4873 */
4874static const uint8_t neon_2rm_sizes[] = {
4875 [NEON_2RM_VREV64] = 0x7,
4876 [NEON_2RM_VREV32] = 0x3,
4877 [NEON_2RM_VREV16] = 0x1,
4878 [NEON_2RM_VPADDL] = 0x7,
4879 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
4880 [NEON_2RM_AESE] = 0x1,
4881 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
4882 [NEON_2RM_VCLS] = 0x7,
4883 [NEON_2RM_VCLZ] = 0x7,
4884 [NEON_2RM_VCNT] = 0x1,
4885 [NEON_2RM_VMVN] = 0x1,
4886 [NEON_2RM_VPADAL] = 0x7,
4887 [NEON_2RM_VPADAL_U] = 0x7,
4888 [NEON_2RM_VQABS] = 0x7,
4889 [NEON_2RM_VQNEG] = 0x7,
4890 [NEON_2RM_VCGT0] = 0x7,
4891 [NEON_2RM_VCGE0] = 0x7,
4892 [NEON_2RM_VCEQ0] = 0x7,
4893 [NEON_2RM_VCLE0] = 0x7,
4894 [NEON_2RM_VCLT0] = 0x7,
4895 [NEON_2RM_VABS] = 0x7,
4896 [NEON_2RM_VNEG] = 0x7,
4897 [NEON_2RM_VCGT0_F] = 0x4,
4898 [NEON_2RM_VCGE0_F] = 0x4,
4899 [NEON_2RM_VCEQ0_F] = 0x4,
4900 [NEON_2RM_VCLE0_F] = 0x4,
4901 [NEON_2RM_VCLT0_F] = 0x4,
4902 [NEON_2RM_VABS_F] = 0x4,
4903 [NEON_2RM_VNEG_F] = 0x4,
4904 [NEON_2RM_VSWP] = 0x1,
4905 [NEON_2RM_VTRN] = 0x7,
4906 [NEON_2RM_VUZP] = 0x7,
4907 [NEON_2RM_VZIP] = 0x7,
4908 [NEON_2RM_VMOVN] = 0x7,
4909 [NEON_2RM_VQMOVN] = 0x7,
4910 [NEON_2RM_VSHLL] = 0x7,
34f7b0a2 4911 [NEON_2RM_VRINTN] = 0x4,
2ce70625 4912 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
4913 [NEON_2RM_VRINTA] = 0x4,
4914 [NEON_2RM_VRINTZ] = 0x4,
600b828c 4915 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 4916 [NEON_2RM_VRINTM] = 0x4,
600b828c 4917 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 4918 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
4919 [NEON_2RM_VCVTAU] = 0x4,
4920 [NEON_2RM_VCVTAS] = 0x4,
4921 [NEON_2RM_VCVTNU] = 0x4,
4922 [NEON_2RM_VCVTNS] = 0x4,
4923 [NEON_2RM_VCVTPU] = 0x4,
4924 [NEON_2RM_VCVTPS] = 0x4,
4925 [NEON_2RM_VCVTMU] = 0x4,
4926 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
4927 [NEON_2RM_VRECPE] = 0x4,
4928 [NEON_2RM_VRSQRTE] = 0x4,
4929 [NEON_2RM_VRECPE_F] = 0x4,
4930 [NEON_2RM_VRSQRTE_F] = 0x4,
4931 [NEON_2RM_VCVT_FS] = 0x4,
4932 [NEON_2RM_VCVT_FU] = 0x4,
4933 [NEON_2RM_VCVT_SF] = 0x4,
4934 [NEON_2RM_VCVT_UF] = 0x4,
4935};
4936
9ee6e8bb
PB
4937/* Translate a NEON data processing instruction. Return nonzero if the
4938 instruction is invalid.
ad69471c
PB
4939 We process data in a mixture of 32-bit and 64-bit chunks.
4940 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4941
0ecb72a5 4942static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4943{
4944 int op;
4945 int q;
4946 int rd, rn, rm;
4947 int size;
4948 int shift;
4949 int pass;
4950 int count;
4951 int pairwise;
4952 int u;
ca9a32e4 4953 uint32_t imm, mask;
39d5492a 4954 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4955 TCGv_i64 tmp64;
9ee6e8bb 4956
5df8bac1 4957 if (!s->vfp_enabled)
9ee6e8bb
PB
4958 return 1;
4959 q = (insn & (1 << 6)) != 0;
4960 u = (insn >> 24) & 1;
4961 VFP_DREG_D(rd, insn);
4962 VFP_DREG_N(rn, insn);
4963 VFP_DREG_M(rm, insn);
4964 size = (insn >> 20) & 3;
4965 if ((insn & (1 << 23)) == 0) {
4966 /* Three register same length. */
4967 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
4968 /* Catch invalid op and bad size combinations: UNDEF */
4969 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4970 return 1;
4971 }
25f84f79
PM
4972 /* All insns of this form UNDEF for either this condition or the
4973 * superset of cases "Q==1"; we catch the latter later.
4974 */
4975 if (q && ((rd | rn | rm) & 1)) {
4976 return 1;
4977 }
62698be3
PM
4978 if (size == 3 && op != NEON_3R_LOGIC) {
4979 /* 64-bit element instructions. */
9ee6e8bb 4980 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4981 neon_load_reg64(cpu_V0, rn + pass);
4982 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 4983 switch (op) {
62698be3 4984 case NEON_3R_VQADD:
9ee6e8bb 4985 if (u) {
02da0b2d
PM
4986 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4987 cpu_V0, cpu_V1);
2c0262af 4988 } else {
02da0b2d
PM
4989 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4990 cpu_V0, cpu_V1);
2c0262af 4991 }
9ee6e8bb 4992 break;
62698be3 4993 case NEON_3R_VQSUB:
9ee6e8bb 4994 if (u) {
02da0b2d
PM
4995 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4996 cpu_V0, cpu_V1);
ad69471c 4997 } else {
02da0b2d
PM
4998 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4999 cpu_V0, cpu_V1);
ad69471c
PB
5000 }
5001 break;
62698be3 5002 case NEON_3R_VSHL:
ad69471c
PB
5003 if (u) {
5004 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5005 } else {
5006 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5007 }
5008 break;
62698be3 5009 case NEON_3R_VQSHL:
ad69471c 5010 if (u) {
02da0b2d
PM
5011 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5012 cpu_V1, cpu_V0);
ad69471c 5013 } else {
02da0b2d
PM
5014 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5015 cpu_V1, cpu_V0);
ad69471c
PB
5016 }
5017 break;
62698be3 5018 case NEON_3R_VRSHL:
ad69471c
PB
5019 if (u) {
5020 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 5021 } else {
ad69471c
PB
5022 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5023 }
5024 break;
62698be3 5025 case NEON_3R_VQRSHL:
ad69471c 5026 if (u) {
02da0b2d
PM
5027 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5028 cpu_V1, cpu_V0);
ad69471c 5029 } else {
02da0b2d
PM
5030 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5031 cpu_V1, cpu_V0);
1e8d4eec 5032 }
9ee6e8bb 5033 break;
62698be3 5034 case NEON_3R_VADD_VSUB:
9ee6e8bb 5035 if (u) {
ad69471c 5036 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 5037 } else {
ad69471c 5038 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
5039 }
5040 break;
5041 default:
5042 abort();
2c0262af 5043 }
ad69471c 5044 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 5045 }
9ee6e8bb 5046 return 0;
2c0262af 5047 }
25f84f79 5048 pairwise = 0;
9ee6e8bb 5049 switch (op) {
62698be3
PM
5050 case NEON_3R_VSHL:
5051 case NEON_3R_VQSHL:
5052 case NEON_3R_VRSHL:
5053 case NEON_3R_VQRSHL:
9ee6e8bb 5054 {
ad69471c
PB
5055 int rtmp;
5056 /* Shift instruction operands are reversed. */
5057 rtmp = rn;
9ee6e8bb 5058 rn = rm;
ad69471c 5059 rm = rtmp;
9ee6e8bb 5060 }
2c0262af 5061 break;
25f84f79
PM
5062 case NEON_3R_VPADD:
5063 if (u) {
5064 return 1;
5065 }
5066 /* Fall through */
62698be3
PM
5067 case NEON_3R_VPMAX:
5068 case NEON_3R_VPMIN:
9ee6e8bb 5069 pairwise = 1;
2c0262af 5070 break;
25f84f79
PM
5071 case NEON_3R_FLOAT_ARITH:
5072 pairwise = (u && size < 2); /* if VPADD (float) */
5073 break;
5074 case NEON_3R_FLOAT_MINMAX:
5075 pairwise = u; /* if VPMIN/VPMAX (float) */
5076 break;
5077 case NEON_3R_FLOAT_CMP:
5078 if (!u && size) {
5079 /* no encoding for U=0 C=1x */
5080 return 1;
5081 }
5082 break;
5083 case NEON_3R_FLOAT_ACMP:
5084 if (!u) {
5085 return 1;
5086 }
5087 break;
505935fc
WN
5088 case NEON_3R_FLOAT_MISC:
5089 /* VMAXNM/VMINNM in ARMv8 */
5090 if (u && !arm_feature(env, ARM_FEATURE_V8)) {
25f84f79
PM
5091 return 1;
5092 }
2c0262af 5093 break;
25f84f79
PM
5094 case NEON_3R_VMUL:
5095 if (u && (size != 0)) {
5096 /* UNDEF on invalid size for polynomial subcase */
5097 return 1;
5098 }
2c0262af 5099 break;
da97f52c
PM
5100 case NEON_3R_VFM:
5101 if (!arm_feature(env, ARM_FEATURE_VFP4) || u) {
5102 return 1;
5103 }
5104 break;
9ee6e8bb 5105 default:
2c0262af 5106 break;
9ee6e8bb 5107 }
dd8fbd78 5108
25f84f79
PM
5109 if (pairwise && q) {
5110 /* All the pairwise insns UNDEF if Q is set */
5111 return 1;
5112 }
5113
9ee6e8bb
PB
5114 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5115
5116 if (pairwise) {
5117 /* Pairwise. */
a5a14945
JR
5118 if (pass < 1) {
5119 tmp = neon_load_reg(rn, 0);
5120 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 5121 } else {
a5a14945
JR
5122 tmp = neon_load_reg(rm, 0);
5123 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
5124 }
5125 } else {
5126 /* Elementwise. */
dd8fbd78
FN
5127 tmp = neon_load_reg(rn, pass);
5128 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
5129 }
5130 switch (op) {
62698be3 5131 case NEON_3R_VHADD:
9ee6e8bb
PB
5132 GEN_NEON_INTEGER_OP(hadd);
5133 break;
62698be3 5134 case NEON_3R_VQADD:
02da0b2d 5135 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 5136 break;
62698be3 5137 case NEON_3R_VRHADD:
9ee6e8bb 5138 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 5139 break;
62698be3 5140 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
5141 switch ((u << 2) | size) {
5142 case 0: /* VAND */
dd8fbd78 5143 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5144 break;
5145 case 1: /* BIC */
f669df27 5146 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5147 break;
5148 case 2: /* VORR */
dd8fbd78 5149 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5150 break;
5151 case 3: /* VORN */
f669df27 5152 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5153 break;
5154 case 4: /* VEOR */
dd8fbd78 5155 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5156 break;
5157 case 5: /* VBSL */
dd8fbd78
FN
5158 tmp3 = neon_load_reg(rd, pass);
5159 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 5160 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5161 break;
5162 case 6: /* VBIT */
dd8fbd78
FN
5163 tmp3 = neon_load_reg(rd, pass);
5164 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 5165 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5166 break;
5167 case 7: /* VBIF */
dd8fbd78
FN
5168 tmp3 = neon_load_reg(rd, pass);
5169 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 5170 tcg_temp_free_i32(tmp3);
9ee6e8bb 5171 break;
2c0262af
FB
5172 }
5173 break;
62698be3 5174 case NEON_3R_VHSUB:
9ee6e8bb
PB
5175 GEN_NEON_INTEGER_OP(hsub);
5176 break;
62698be3 5177 case NEON_3R_VQSUB:
02da0b2d 5178 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 5179 break;
62698be3 5180 case NEON_3R_VCGT:
9ee6e8bb
PB
5181 GEN_NEON_INTEGER_OP(cgt);
5182 break;
62698be3 5183 case NEON_3R_VCGE:
9ee6e8bb
PB
5184 GEN_NEON_INTEGER_OP(cge);
5185 break;
62698be3 5186 case NEON_3R_VSHL:
ad69471c 5187 GEN_NEON_INTEGER_OP(shl);
2c0262af 5188 break;
62698be3 5189 case NEON_3R_VQSHL:
02da0b2d 5190 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 5191 break;
62698be3 5192 case NEON_3R_VRSHL:
ad69471c 5193 GEN_NEON_INTEGER_OP(rshl);
2c0262af 5194 break;
62698be3 5195 case NEON_3R_VQRSHL:
02da0b2d 5196 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 5197 break;
62698be3 5198 case NEON_3R_VMAX:
9ee6e8bb
PB
5199 GEN_NEON_INTEGER_OP(max);
5200 break;
62698be3 5201 case NEON_3R_VMIN:
9ee6e8bb
PB
5202 GEN_NEON_INTEGER_OP(min);
5203 break;
62698be3 5204 case NEON_3R_VABD:
9ee6e8bb
PB
5205 GEN_NEON_INTEGER_OP(abd);
5206 break;
62698be3 5207 case NEON_3R_VABA:
9ee6e8bb 5208 GEN_NEON_INTEGER_OP(abd);
7d1b0095 5209 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
5210 tmp2 = neon_load_reg(rd, pass);
5211 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 5212 break;
62698be3 5213 case NEON_3R_VADD_VSUB:
9ee6e8bb 5214 if (!u) { /* VADD */
62698be3 5215 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5216 } else { /* VSUB */
5217 switch (size) {
dd8fbd78
FN
5218 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
5219 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
5220 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 5221 default: abort();
9ee6e8bb
PB
5222 }
5223 }
5224 break;
62698be3 5225 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
5226 if (!u) { /* VTST */
5227 switch (size) {
dd8fbd78
FN
5228 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
5229 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
5230 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 5231 default: abort();
9ee6e8bb
PB
5232 }
5233 } else { /* VCEQ */
5234 switch (size) {
dd8fbd78
FN
5235 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5236 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5237 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 5238 default: abort();
9ee6e8bb
PB
5239 }
5240 }
5241 break;
62698be3 5242 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 5243 switch (size) {
dd8fbd78
FN
5244 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5245 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5246 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5247 default: abort();
9ee6e8bb 5248 }
7d1b0095 5249 tcg_temp_free_i32(tmp2);
dd8fbd78 5250 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5251 if (u) { /* VMLS */
dd8fbd78 5252 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 5253 } else { /* VMLA */
dd8fbd78 5254 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5255 }
5256 break;
62698be3 5257 case NEON_3R_VMUL:
9ee6e8bb 5258 if (u) { /* polynomial */
dd8fbd78 5259 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
5260 } else { /* Integer */
5261 switch (size) {
dd8fbd78
FN
5262 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5263 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5264 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5265 default: abort();
9ee6e8bb
PB
5266 }
5267 }
5268 break;
62698be3 5269 case NEON_3R_VPMAX:
9ee6e8bb
PB
5270 GEN_NEON_INTEGER_OP(pmax);
5271 break;
62698be3 5272 case NEON_3R_VPMIN:
9ee6e8bb
PB
5273 GEN_NEON_INTEGER_OP(pmin);
5274 break;
62698be3 5275 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
5276 if (!u) { /* VQDMULH */
5277 switch (size) {
02da0b2d
PM
5278 case 1:
5279 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5280 break;
5281 case 2:
5282 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5283 break;
62698be3 5284 default: abort();
9ee6e8bb 5285 }
62698be3 5286 } else { /* VQRDMULH */
9ee6e8bb 5287 switch (size) {
02da0b2d
PM
5288 case 1:
5289 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5290 break;
5291 case 2:
5292 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5293 break;
62698be3 5294 default: abort();
9ee6e8bb
PB
5295 }
5296 }
5297 break;
62698be3 5298 case NEON_3R_VPADD:
9ee6e8bb 5299 switch (size) {
dd8fbd78
FN
5300 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5301 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5302 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 5303 default: abort();
9ee6e8bb
PB
5304 }
5305 break;
62698be3 5306 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
5307 {
5308 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
5309 switch ((u << 2) | size) {
5310 case 0: /* VADD */
aa47cfdd
PM
5311 case 4: /* VPADD */
5312 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5313 break;
5314 case 2: /* VSUB */
aa47cfdd 5315 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5316 break;
5317 case 6: /* VABD */
aa47cfdd 5318 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5319 break;
5320 default:
62698be3 5321 abort();
9ee6e8bb 5322 }
aa47cfdd 5323 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5324 break;
aa47cfdd 5325 }
62698be3 5326 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
5327 {
5328 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5329 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5330 if (!u) {
7d1b0095 5331 tcg_temp_free_i32(tmp2);
dd8fbd78 5332 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5333 if (size == 0) {
aa47cfdd 5334 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5335 } else {
aa47cfdd 5336 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
5337 }
5338 }
aa47cfdd 5339 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5340 break;
aa47cfdd 5341 }
62698be3 5342 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
5343 {
5344 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 5345 if (!u) {
aa47cfdd 5346 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 5347 } else {
aa47cfdd
PM
5348 if (size == 0) {
5349 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5350 } else {
5351 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5352 }
b5ff1b31 5353 }
aa47cfdd 5354 tcg_temp_free_ptr(fpstatus);
2c0262af 5355 break;
aa47cfdd 5356 }
62698be3 5357 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
5358 {
5359 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5360 if (size == 0) {
5361 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5362 } else {
5363 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5364 }
5365 tcg_temp_free_ptr(fpstatus);
2c0262af 5366 break;
aa47cfdd 5367 }
62698be3 5368 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
5369 {
5370 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5371 if (size == 0) {
f71a2ae5 5372 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 5373 } else {
f71a2ae5 5374 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
5375 }
5376 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5377 break;
aa47cfdd 5378 }
505935fc
WN
5379 case NEON_3R_FLOAT_MISC:
5380 if (u) {
5381 /* VMAXNM/VMINNM */
5382 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5383 if (size == 0) {
f71a2ae5 5384 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 5385 } else {
f71a2ae5 5386 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
5387 }
5388 tcg_temp_free_ptr(fpstatus);
5389 } else {
5390 if (size == 0) {
5391 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5392 } else {
5393 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5394 }
5395 }
2c0262af 5396 break;
da97f52c
PM
5397 case NEON_3R_VFM:
5398 {
5399 /* VFMA, VFMS: fused multiply-add */
5400 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5401 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5402 if (size) {
5403 /* VFMS */
5404 gen_helper_vfp_negs(tmp, tmp);
5405 }
5406 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5407 tcg_temp_free_i32(tmp3);
5408 tcg_temp_free_ptr(fpstatus);
5409 break;
5410 }
9ee6e8bb
PB
5411 default:
5412 abort();
2c0262af 5413 }
7d1b0095 5414 tcg_temp_free_i32(tmp2);
dd8fbd78 5415
9ee6e8bb
PB
5416 /* Save the result. For elementwise operations we can put it
5417 straight into the destination register. For pairwise operations
5418 we have to be careful to avoid clobbering the source operands. */
5419 if (pairwise && rd == rm) {
dd8fbd78 5420 neon_store_scratch(pass, tmp);
9ee6e8bb 5421 } else {
dd8fbd78 5422 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5423 }
5424
5425 } /* for pass */
5426 if (pairwise && rd == rm) {
5427 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5428 tmp = neon_load_scratch(pass);
5429 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5430 }
5431 }
ad69471c 5432 /* End of 3 register same size operations. */
9ee6e8bb
PB
5433 } else if (insn & (1 << 4)) {
5434 if ((insn & 0x00380080) != 0) {
5435 /* Two registers and shift. */
5436 op = (insn >> 8) & 0xf;
5437 if (insn & (1 << 7)) {
cc13115b
PM
5438 /* 64-bit shift. */
5439 if (op > 7) {
5440 return 1;
5441 }
9ee6e8bb
PB
5442 size = 3;
5443 } else {
5444 size = 2;
5445 while ((insn & (1 << (size + 19))) == 0)
5446 size--;
5447 }
5448 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 5449 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
5450 by immediate using the variable shift operations. */
5451 if (op < 8) {
5452 /* Shift by immediate:
5453 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
5454 if (q && ((rd | rm) & 1)) {
5455 return 1;
5456 }
5457 if (!u && (op == 4 || op == 6)) {
5458 return 1;
5459 }
9ee6e8bb
PB
5460 /* Right shifts are encoded as N - shift, where N is the
5461 element size in bits. */
5462 if (op <= 4)
5463 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
5464 if (size == 3) {
5465 count = q + 1;
5466 } else {
5467 count = q ? 4: 2;
5468 }
5469 switch (size) {
5470 case 0:
5471 imm = (uint8_t) shift;
5472 imm |= imm << 8;
5473 imm |= imm << 16;
5474 break;
5475 case 1:
5476 imm = (uint16_t) shift;
5477 imm |= imm << 16;
5478 break;
5479 case 2:
5480 case 3:
5481 imm = shift;
5482 break;
5483 default:
5484 abort();
5485 }
5486
5487 for (pass = 0; pass < count; pass++) {
ad69471c
PB
5488 if (size == 3) {
5489 neon_load_reg64(cpu_V0, rm + pass);
5490 tcg_gen_movi_i64(cpu_V1, imm);
5491 switch (op) {
5492 case 0: /* VSHR */
5493 case 1: /* VSRA */
5494 if (u)
5495 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5496 else
ad69471c 5497 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5498 break;
ad69471c
PB
5499 case 2: /* VRSHR */
5500 case 3: /* VRSRA */
5501 if (u)
5502 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5503 else
ad69471c 5504 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5505 break;
ad69471c 5506 case 4: /* VSRI */
ad69471c
PB
5507 case 5: /* VSHL, VSLI */
5508 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5509 break;
0322b26e 5510 case 6: /* VQSHLU */
02da0b2d
PM
5511 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5512 cpu_V0, cpu_V1);
ad69471c 5513 break;
0322b26e
PM
5514 case 7: /* VQSHL */
5515 if (u) {
02da0b2d 5516 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
5517 cpu_V0, cpu_V1);
5518 } else {
02da0b2d 5519 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
5520 cpu_V0, cpu_V1);
5521 }
9ee6e8bb 5522 break;
9ee6e8bb 5523 }
ad69471c
PB
5524 if (op == 1 || op == 3) {
5525 /* Accumulate. */
5371cb81 5526 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
5527 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5528 } else if (op == 4 || (op == 5 && u)) {
5529 /* Insert */
923e6509
CL
5530 neon_load_reg64(cpu_V1, rd + pass);
5531 uint64_t mask;
5532 if (shift < -63 || shift > 63) {
5533 mask = 0;
5534 } else {
5535 if (op == 4) {
5536 mask = 0xffffffffffffffffull >> -shift;
5537 } else {
5538 mask = 0xffffffffffffffffull << shift;
5539 }
5540 }
5541 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5542 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5543 }
5544 neon_store_reg64(cpu_V0, rd + pass);
5545 } else { /* size < 3 */
5546 /* Operands in T0 and T1. */
dd8fbd78 5547 tmp = neon_load_reg(rm, pass);
7d1b0095 5548 tmp2 = tcg_temp_new_i32();
dd8fbd78 5549 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
5550 switch (op) {
5551 case 0: /* VSHR */
5552 case 1: /* VSRA */
5553 GEN_NEON_INTEGER_OP(shl);
5554 break;
5555 case 2: /* VRSHR */
5556 case 3: /* VRSRA */
5557 GEN_NEON_INTEGER_OP(rshl);
5558 break;
5559 case 4: /* VSRI */
ad69471c
PB
5560 case 5: /* VSHL, VSLI */
5561 switch (size) {
dd8fbd78
FN
5562 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5563 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5564 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 5565 default: abort();
ad69471c
PB
5566 }
5567 break;
0322b26e 5568 case 6: /* VQSHLU */
ad69471c 5569 switch (size) {
0322b26e 5570 case 0:
02da0b2d
PM
5571 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5572 tmp, tmp2);
0322b26e
PM
5573 break;
5574 case 1:
02da0b2d
PM
5575 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5576 tmp, tmp2);
0322b26e
PM
5577 break;
5578 case 2:
02da0b2d
PM
5579 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5580 tmp, tmp2);
0322b26e
PM
5581 break;
5582 default:
cc13115b 5583 abort();
ad69471c
PB
5584 }
5585 break;
0322b26e 5586 case 7: /* VQSHL */
02da0b2d 5587 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 5588 break;
ad69471c 5589 }
7d1b0095 5590 tcg_temp_free_i32(tmp2);
ad69471c
PB
5591
5592 if (op == 1 || op == 3) {
5593 /* Accumulate. */
dd8fbd78 5594 tmp2 = neon_load_reg(rd, pass);
5371cb81 5595 gen_neon_add(size, tmp, tmp2);
7d1b0095 5596 tcg_temp_free_i32(tmp2);
ad69471c
PB
5597 } else if (op == 4 || (op == 5 && u)) {
5598 /* Insert */
5599 switch (size) {
5600 case 0:
5601 if (op == 4)
ca9a32e4 5602 mask = 0xff >> -shift;
ad69471c 5603 else
ca9a32e4
JR
5604 mask = (uint8_t)(0xff << shift);
5605 mask |= mask << 8;
5606 mask |= mask << 16;
ad69471c
PB
5607 break;
5608 case 1:
5609 if (op == 4)
ca9a32e4 5610 mask = 0xffff >> -shift;
ad69471c 5611 else
ca9a32e4
JR
5612 mask = (uint16_t)(0xffff << shift);
5613 mask |= mask << 16;
ad69471c
PB
5614 break;
5615 case 2:
ca9a32e4
JR
5616 if (shift < -31 || shift > 31) {
5617 mask = 0;
5618 } else {
5619 if (op == 4)
5620 mask = 0xffffffffu >> -shift;
5621 else
5622 mask = 0xffffffffu << shift;
5623 }
ad69471c
PB
5624 break;
5625 default:
5626 abort();
5627 }
dd8fbd78 5628 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
5629 tcg_gen_andi_i32(tmp, tmp, mask);
5630 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 5631 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 5632 tcg_temp_free_i32(tmp2);
ad69471c 5633 }
dd8fbd78 5634 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5635 }
5636 } /* for pass */
5637 } else if (op < 10) {
ad69471c 5638 /* Shift by immediate and narrow:
9ee6e8bb 5639 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5640 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5641 if (rm & 1) {
5642 return 1;
5643 }
9ee6e8bb
PB
5644 shift = shift - (1 << (size + 3));
5645 size++;
92cdfaeb 5646 if (size == 3) {
a7812ae4 5647 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5648 neon_load_reg64(cpu_V0, rm);
5649 neon_load_reg64(cpu_V1, rm + 1);
5650 for (pass = 0; pass < 2; pass++) {
5651 TCGv_i64 in;
5652 if (pass == 0) {
5653 in = cpu_V0;
5654 } else {
5655 in = cpu_V1;
5656 }
ad69471c 5657 if (q) {
0b36f4cd 5658 if (input_unsigned) {
92cdfaeb 5659 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5660 } else {
92cdfaeb 5661 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5662 }
ad69471c 5663 } else {
0b36f4cd 5664 if (input_unsigned) {
92cdfaeb 5665 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5666 } else {
92cdfaeb 5667 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5668 }
ad69471c 5669 }
7d1b0095 5670 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5671 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5672 neon_store_reg(rd, pass, tmp);
5673 } /* for pass */
5674 tcg_temp_free_i64(tmp64);
5675 } else {
5676 if (size == 1) {
5677 imm = (uint16_t)shift;
5678 imm |= imm << 16;
2c0262af 5679 } else {
92cdfaeb
PM
5680 /* size == 2 */
5681 imm = (uint32_t)shift;
5682 }
5683 tmp2 = tcg_const_i32(imm);
5684 tmp4 = neon_load_reg(rm + 1, 0);
5685 tmp5 = neon_load_reg(rm + 1, 1);
5686 for (pass = 0; pass < 2; pass++) {
5687 if (pass == 0) {
5688 tmp = neon_load_reg(rm, 0);
5689 } else {
5690 tmp = tmp4;
5691 }
0b36f4cd
CL
5692 gen_neon_shift_narrow(size, tmp, tmp2, q,
5693 input_unsigned);
92cdfaeb
PM
5694 if (pass == 0) {
5695 tmp3 = neon_load_reg(rm, 1);
5696 } else {
5697 tmp3 = tmp5;
5698 }
0b36f4cd
CL
5699 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5700 input_unsigned);
36aa55dc 5701 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5702 tcg_temp_free_i32(tmp);
5703 tcg_temp_free_i32(tmp3);
5704 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5705 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5706 neon_store_reg(rd, pass, tmp);
5707 } /* for pass */
c6067f04 5708 tcg_temp_free_i32(tmp2);
b75263d6 5709 }
9ee6e8bb 5710 } else if (op == 10) {
cc13115b
PM
5711 /* VSHLL, VMOVL */
5712 if (q || (rd & 1)) {
9ee6e8bb 5713 return 1;
cc13115b 5714 }
ad69471c
PB
5715 tmp = neon_load_reg(rm, 0);
5716 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5717 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5718 if (pass == 1)
5719 tmp = tmp2;
5720
5721 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5722
9ee6e8bb
PB
5723 if (shift != 0) {
5724 /* The shift is less than the width of the source
ad69471c
PB
5725 type, so we can just shift the whole register. */
5726 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5727 /* Widen the result of shift: we need to clear
5728 * the potential overflow bits resulting from
5729 * left bits of the narrow input appearing as
5730 * right bits of left the neighbour narrow
5731 * input. */
ad69471c
PB
5732 if (size < 2 || !u) {
5733 uint64_t imm64;
5734 if (size == 0) {
5735 imm = (0xffu >> (8 - shift));
5736 imm |= imm << 16;
acdf01ef 5737 } else if (size == 1) {
ad69471c 5738 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5739 } else {
5740 /* size == 2 */
5741 imm = 0xffffffff >> (32 - shift);
5742 }
5743 if (size < 2) {
5744 imm64 = imm | (((uint64_t)imm) << 32);
5745 } else {
5746 imm64 = imm;
9ee6e8bb 5747 }
acdf01ef 5748 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5749 }
5750 }
ad69471c 5751 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5752 }
f73534a5 5753 } else if (op >= 14) {
9ee6e8bb 5754 /* VCVT fixed-point. */
cc13115b
PM
5755 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5756 return 1;
5757 }
f73534a5
PM
5758 /* We have already masked out the must-be-1 top bit of imm6,
5759 * hence this 32-shift where the ARM ARM has 64-imm6.
5760 */
5761 shift = 32 - shift;
9ee6e8bb 5762 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 5763 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 5764 if (!(op & 1)) {
9ee6e8bb 5765 if (u)
5500b06c 5766 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 5767 else
5500b06c 5768 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
5769 } else {
5770 if (u)
5500b06c 5771 gen_vfp_toul(0, shift, 1);
9ee6e8bb 5772 else
5500b06c 5773 gen_vfp_tosl(0, shift, 1);
2c0262af 5774 }
4373f3ce 5775 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
5776 }
5777 } else {
9ee6e8bb
PB
5778 return 1;
5779 }
5780 } else { /* (insn & 0x00380080) == 0 */
5781 int invert;
7d80fee5
PM
5782 if (q && (rd & 1)) {
5783 return 1;
5784 }
9ee6e8bb
PB
5785
5786 op = (insn >> 8) & 0xf;
5787 /* One register and immediate. */
5788 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5789 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
5790 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5791 * We choose to not special-case this and will behave as if a
5792 * valid constant encoding of 0 had been given.
5793 */
9ee6e8bb
PB
5794 switch (op) {
5795 case 0: case 1:
5796 /* no-op */
5797 break;
5798 case 2: case 3:
5799 imm <<= 8;
5800 break;
5801 case 4: case 5:
5802 imm <<= 16;
5803 break;
5804 case 6: case 7:
5805 imm <<= 24;
5806 break;
5807 case 8: case 9:
5808 imm |= imm << 16;
5809 break;
5810 case 10: case 11:
5811 imm = (imm << 8) | (imm << 24);
5812 break;
5813 case 12:
8e31209e 5814 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
5815 break;
5816 case 13:
5817 imm = (imm << 16) | 0xffff;
5818 break;
5819 case 14:
5820 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5821 if (invert)
5822 imm = ~imm;
5823 break;
5824 case 15:
7d80fee5
PM
5825 if (invert) {
5826 return 1;
5827 }
9ee6e8bb
PB
5828 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5829 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5830 break;
5831 }
5832 if (invert)
5833 imm = ~imm;
5834
9ee6e8bb
PB
5835 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5836 if (op & 1 && op < 12) {
ad69471c 5837 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
5838 if (invert) {
5839 /* The immediate value has already been inverted, so
5840 BIC becomes AND. */
ad69471c 5841 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 5842 } else {
ad69471c 5843 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 5844 }
9ee6e8bb 5845 } else {
ad69471c 5846 /* VMOV, VMVN. */
7d1b0095 5847 tmp = tcg_temp_new_i32();
9ee6e8bb 5848 if (op == 14 && invert) {
a5a14945 5849 int n;
ad69471c
PB
5850 uint32_t val;
5851 val = 0;
9ee6e8bb
PB
5852 for (n = 0; n < 4; n++) {
5853 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 5854 val |= 0xff << (n * 8);
9ee6e8bb 5855 }
ad69471c
PB
5856 tcg_gen_movi_i32(tmp, val);
5857 } else {
5858 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 5859 }
9ee6e8bb 5860 }
ad69471c 5861 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5862 }
5863 }
e4b3861d 5864 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
5865 if (size != 3) {
5866 op = (insn >> 8) & 0xf;
5867 if ((insn & (1 << 6)) == 0) {
5868 /* Three registers of different lengths. */
5869 int src1_wide;
5870 int src2_wide;
5871 int prewiden;
695272dc
PM
5872 /* undefreq: bit 0 : UNDEF if size != 0
5873 * bit 1 : UNDEF if size == 0
5874 * bit 2 : UNDEF if U == 1
5875 * Note that [1:0] set implies 'always UNDEF'
5876 */
5877 int undefreq;
5878 /* prewiden, src1_wide, src2_wide, undefreq */
5879 static const int neon_3reg_wide[16][4] = {
5880 {1, 0, 0, 0}, /* VADDL */
5881 {1, 1, 0, 0}, /* VADDW */
5882 {1, 0, 0, 0}, /* VSUBL */
5883 {1, 1, 0, 0}, /* VSUBW */
5884 {0, 1, 1, 0}, /* VADDHN */
5885 {0, 0, 0, 0}, /* VABAL */
5886 {0, 1, 1, 0}, /* VSUBHN */
5887 {0, 0, 0, 0}, /* VABDL */
5888 {0, 0, 0, 0}, /* VMLAL */
5889 {0, 0, 0, 6}, /* VQDMLAL */
5890 {0, 0, 0, 0}, /* VMLSL */
5891 {0, 0, 0, 6}, /* VQDMLSL */
5892 {0, 0, 0, 0}, /* Integer VMULL */
5893 {0, 0, 0, 2}, /* VQDMULL */
5894 {0, 0, 0, 5}, /* Polynomial VMULL */
5895 {0, 0, 0, 3}, /* Reserved: always UNDEF */
9ee6e8bb
PB
5896 };
5897
5898 prewiden = neon_3reg_wide[op][0];
5899 src1_wide = neon_3reg_wide[op][1];
5900 src2_wide = neon_3reg_wide[op][2];
695272dc 5901 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 5902
695272dc
PM
5903 if (((undefreq & 1) && (size != 0)) ||
5904 ((undefreq & 2) && (size == 0)) ||
5905 ((undefreq & 4) && u)) {
5906 return 1;
5907 }
5908 if ((src1_wide && (rn & 1)) ||
5909 (src2_wide && (rm & 1)) ||
5910 (!src2_wide && (rd & 1))) {
ad69471c 5911 return 1;
695272dc 5912 }
ad69471c 5913
9ee6e8bb
PB
5914 /* Avoid overlapping operands. Wide source operands are
5915 always aligned so will never overlap with wide
5916 destinations in problematic ways. */
8f8e3aa4 5917 if (rd == rm && !src2_wide) {
dd8fbd78
FN
5918 tmp = neon_load_reg(rm, 1);
5919 neon_store_scratch(2, tmp);
8f8e3aa4 5920 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
5921 tmp = neon_load_reg(rn, 1);
5922 neon_store_scratch(2, tmp);
9ee6e8bb 5923 }
39d5492a 5924 TCGV_UNUSED_I32(tmp3);
9ee6e8bb 5925 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5926 if (src1_wide) {
5927 neon_load_reg64(cpu_V0, rn + pass);
39d5492a 5928 TCGV_UNUSED_I32(tmp);
9ee6e8bb 5929 } else {
ad69471c 5930 if (pass == 1 && rd == rn) {
dd8fbd78 5931 tmp = neon_load_scratch(2);
9ee6e8bb 5932 } else {
ad69471c
PB
5933 tmp = neon_load_reg(rn, pass);
5934 }
5935 if (prewiden) {
5936 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5937 }
5938 }
ad69471c
PB
5939 if (src2_wide) {
5940 neon_load_reg64(cpu_V1, rm + pass);
39d5492a 5941 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 5942 } else {
ad69471c 5943 if (pass == 1 && rd == rm) {
dd8fbd78 5944 tmp2 = neon_load_scratch(2);
9ee6e8bb 5945 } else {
ad69471c
PB
5946 tmp2 = neon_load_reg(rm, pass);
5947 }
5948 if (prewiden) {
5949 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5950 }
9ee6e8bb
PB
5951 }
5952 switch (op) {
5953 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5954 gen_neon_addl(size);
9ee6e8bb 5955 break;
79b0e534 5956 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5957 gen_neon_subl(size);
9ee6e8bb
PB
5958 break;
5959 case 5: case 7: /* VABAL, VABDL */
5960 switch ((size << 1) | u) {
ad69471c
PB
5961 case 0:
5962 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5963 break;
5964 case 1:
5965 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5966 break;
5967 case 2:
5968 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5969 break;
5970 case 3:
5971 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5972 break;
5973 case 4:
5974 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5975 break;
5976 case 5:
5977 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5978 break;
9ee6e8bb
PB
5979 default: abort();
5980 }
7d1b0095
PM
5981 tcg_temp_free_i32(tmp2);
5982 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5983 break;
5984 case 8: case 9: case 10: case 11: case 12: case 13:
5985 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5986 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
5987 break;
5988 case 14: /* Polynomial VMULL */
e5ca24cb 5989 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
5990 tcg_temp_free_i32(tmp2);
5991 tcg_temp_free_i32(tmp);
e5ca24cb 5992 break;
695272dc
PM
5993 default: /* 15 is RESERVED: caught earlier */
5994 abort();
9ee6e8bb 5995 }
ebcd88ce
PM
5996 if (op == 13) {
5997 /* VQDMULL */
5998 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5999 neon_store_reg64(cpu_V0, rd + pass);
6000 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 6001 /* Accumulate. */
ebcd88ce 6002 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6003 switch (op) {
4dc064e6
PM
6004 case 10: /* VMLSL */
6005 gen_neon_negl(cpu_V0, size);
6006 /* Fall through */
6007 case 5: case 8: /* VABAL, VMLAL */
ad69471c 6008 gen_neon_addl(size);
9ee6e8bb
PB
6009 break;
6010 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 6011 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6012 if (op == 11) {
6013 gen_neon_negl(cpu_V0, size);
6014 }
ad69471c
PB
6015 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6016 break;
9ee6e8bb
PB
6017 default:
6018 abort();
6019 }
ad69471c 6020 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6021 } else if (op == 4 || op == 6) {
6022 /* Narrowing operation. */
7d1b0095 6023 tmp = tcg_temp_new_i32();
79b0e534 6024 if (!u) {
9ee6e8bb 6025 switch (size) {
ad69471c
PB
6026 case 0:
6027 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6028 break;
6029 case 1:
6030 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6031 break;
6032 case 2:
6033 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6034 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
6035 break;
9ee6e8bb
PB
6036 default: abort();
6037 }
6038 } else {
6039 switch (size) {
ad69471c
PB
6040 case 0:
6041 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6042 break;
6043 case 1:
6044 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6045 break;
6046 case 2:
6047 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6048 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6049 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
6050 break;
9ee6e8bb
PB
6051 default: abort();
6052 }
6053 }
ad69471c
PB
6054 if (pass == 0) {
6055 tmp3 = tmp;
6056 } else {
6057 neon_store_reg(rd, 0, tmp3);
6058 neon_store_reg(rd, 1, tmp);
6059 }
9ee6e8bb
PB
6060 } else {
6061 /* Write back the result. */
ad69471c 6062 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6063 }
6064 }
6065 } else {
3e3326df
PM
6066 /* Two registers and a scalar. NB that for ops of this form
6067 * the ARM ARM labels bit 24 as Q, but it is in our variable
6068 * 'u', not 'q'.
6069 */
6070 if (size == 0) {
6071 return 1;
6072 }
9ee6e8bb 6073 switch (op) {
9ee6e8bb 6074 case 1: /* Float VMLA scalar */
9ee6e8bb 6075 case 5: /* Floating point VMLS scalar */
9ee6e8bb 6076 case 9: /* Floating point VMUL scalar */
3e3326df
PM
6077 if (size == 1) {
6078 return 1;
6079 }
6080 /* fall through */
6081 case 0: /* Integer VMLA scalar */
6082 case 4: /* Integer VMLS scalar */
6083 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
6084 case 12: /* VQDMULH scalar */
6085 case 13: /* VQRDMULH scalar */
3e3326df
PM
6086 if (u && ((rd | rn) & 1)) {
6087 return 1;
6088 }
dd8fbd78
FN
6089 tmp = neon_get_scalar(size, rm);
6090 neon_store_scratch(0, tmp);
9ee6e8bb 6091 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
6092 tmp = neon_load_scratch(0);
6093 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
6094 if (op == 12) {
6095 if (size == 1) {
02da0b2d 6096 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6097 } else {
02da0b2d 6098 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6099 }
6100 } else if (op == 13) {
6101 if (size == 1) {
02da0b2d 6102 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6103 } else {
02da0b2d 6104 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6105 }
6106 } else if (op & 1) {
aa47cfdd
PM
6107 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6108 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6109 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
6110 } else {
6111 switch (size) {
dd8fbd78
FN
6112 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6113 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6114 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 6115 default: abort();
9ee6e8bb
PB
6116 }
6117 }
7d1b0095 6118 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6119 if (op < 8) {
6120 /* Accumulate. */
dd8fbd78 6121 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
6122 switch (op) {
6123 case 0:
dd8fbd78 6124 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6125 break;
6126 case 1:
aa47cfdd
PM
6127 {
6128 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6129 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6130 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6131 break;
aa47cfdd 6132 }
9ee6e8bb 6133 case 4:
dd8fbd78 6134 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
6135 break;
6136 case 5:
aa47cfdd
PM
6137 {
6138 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6139 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6140 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6141 break;
aa47cfdd 6142 }
9ee6e8bb
PB
6143 default:
6144 abort();
6145 }
7d1b0095 6146 tcg_temp_free_i32(tmp2);
9ee6e8bb 6147 }
dd8fbd78 6148 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6149 }
6150 break;
9ee6e8bb 6151 case 3: /* VQDMLAL scalar */
9ee6e8bb 6152 case 7: /* VQDMLSL scalar */
9ee6e8bb 6153 case 11: /* VQDMULL scalar */
3e3326df 6154 if (u == 1) {
ad69471c 6155 return 1;
3e3326df
PM
6156 }
6157 /* fall through */
6158 case 2: /* VMLAL sclar */
6159 case 6: /* VMLSL scalar */
6160 case 10: /* VMULL scalar */
6161 if (rd & 1) {
6162 return 1;
6163 }
dd8fbd78 6164 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
6165 /* We need a copy of tmp2 because gen_neon_mull
6166 * deletes it during pass 0. */
7d1b0095 6167 tmp4 = tcg_temp_new_i32();
c6067f04 6168 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 6169 tmp3 = neon_load_reg(rn, 1);
ad69471c 6170
9ee6e8bb 6171 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6172 if (pass == 0) {
6173 tmp = neon_load_reg(rn, 0);
9ee6e8bb 6174 } else {
dd8fbd78 6175 tmp = tmp3;
c6067f04 6176 tmp2 = tmp4;
9ee6e8bb 6177 }
ad69471c 6178 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
6179 if (op != 11) {
6180 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6181 }
9ee6e8bb 6182 switch (op) {
4dc064e6
PM
6183 case 6:
6184 gen_neon_negl(cpu_V0, size);
6185 /* Fall through */
6186 case 2:
ad69471c 6187 gen_neon_addl(size);
9ee6e8bb
PB
6188 break;
6189 case 3: case 7:
ad69471c 6190 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6191 if (op == 7) {
6192 gen_neon_negl(cpu_V0, size);
6193 }
ad69471c 6194 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
6195 break;
6196 case 10:
6197 /* no-op */
6198 break;
6199 case 11:
ad69471c 6200 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
6201 break;
6202 default:
6203 abort();
6204 }
ad69471c 6205 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6206 }
dd8fbd78 6207
dd8fbd78 6208
9ee6e8bb
PB
6209 break;
6210 default: /* 14 and 15 are RESERVED */
6211 return 1;
6212 }
6213 }
6214 } else { /* size == 3 */
6215 if (!u) {
6216 /* Extract. */
9ee6e8bb 6217 imm = (insn >> 8) & 0xf;
ad69471c
PB
6218
6219 if (imm > 7 && !q)
6220 return 1;
6221
52579ea1
PM
6222 if (q && ((rd | rn | rm) & 1)) {
6223 return 1;
6224 }
6225
ad69471c
PB
6226 if (imm == 0) {
6227 neon_load_reg64(cpu_V0, rn);
6228 if (q) {
6229 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 6230 }
ad69471c
PB
6231 } else if (imm == 8) {
6232 neon_load_reg64(cpu_V0, rn + 1);
6233 if (q) {
6234 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6235 }
ad69471c 6236 } else if (q) {
a7812ae4 6237 tmp64 = tcg_temp_new_i64();
ad69471c
PB
6238 if (imm < 8) {
6239 neon_load_reg64(cpu_V0, rn);
a7812ae4 6240 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
6241 } else {
6242 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 6243 neon_load_reg64(tmp64, rm);
ad69471c
PB
6244 }
6245 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 6246 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
6247 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6248 if (imm < 8) {
6249 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6250 } else {
ad69471c
PB
6251 neon_load_reg64(cpu_V1, rm + 1);
6252 imm -= 8;
9ee6e8bb 6253 }
ad69471c 6254 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
6255 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6256 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 6257 tcg_temp_free_i64(tmp64);
ad69471c 6258 } else {
a7812ae4 6259 /* BUGFIX */
ad69471c 6260 neon_load_reg64(cpu_V0, rn);
a7812ae4 6261 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 6262 neon_load_reg64(cpu_V1, rm);
a7812ae4 6263 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
6264 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6265 }
6266 neon_store_reg64(cpu_V0, rd);
6267 if (q) {
6268 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
6269 }
6270 } else if ((insn & (1 << 11)) == 0) {
6271 /* Two register misc. */
6272 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6273 size = (insn >> 18) & 3;
600b828c
PM
6274 /* UNDEF for unknown op values and bad op-size combinations */
6275 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6276 return 1;
6277 }
fc2a9b37
PM
6278 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6279 q && ((rm | rd) & 1)) {
6280 return 1;
6281 }
9ee6e8bb 6282 switch (op) {
600b828c 6283 case NEON_2RM_VREV64:
9ee6e8bb 6284 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
6285 tmp = neon_load_reg(rm, pass * 2);
6286 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 6287 switch (size) {
dd8fbd78
FN
6288 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6289 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
6290 case 2: /* no-op */ break;
6291 default: abort();
6292 }
dd8fbd78 6293 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 6294 if (size == 2) {
dd8fbd78 6295 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 6296 } else {
9ee6e8bb 6297 switch (size) {
dd8fbd78
FN
6298 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6299 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
6300 default: abort();
6301 }
dd8fbd78 6302 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
6303 }
6304 }
6305 break;
600b828c
PM
6306 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6307 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
6308 for (pass = 0; pass < q + 1; pass++) {
6309 tmp = neon_load_reg(rm, pass * 2);
6310 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6311 tmp = neon_load_reg(rm, pass * 2 + 1);
6312 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6313 switch (size) {
6314 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6315 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6316 case 2: tcg_gen_add_i64(CPU_V001); break;
6317 default: abort();
6318 }
600b828c 6319 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 6320 /* Accumulate. */
ad69471c
PB
6321 neon_load_reg64(cpu_V1, rd + pass);
6322 gen_neon_addl(size);
9ee6e8bb 6323 }
ad69471c 6324 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6325 }
6326 break;
600b828c 6327 case NEON_2RM_VTRN:
9ee6e8bb 6328 if (size == 2) {
a5a14945 6329 int n;
9ee6e8bb 6330 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
6331 tmp = neon_load_reg(rm, n);
6332 tmp2 = neon_load_reg(rd, n + 1);
6333 neon_store_reg(rm, n, tmp2);
6334 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
6335 }
6336 } else {
6337 goto elementwise;
6338 }
6339 break;
600b828c 6340 case NEON_2RM_VUZP:
02acedf9 6341 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 6342 return 1;
9ee6e8bb
PB
6343 }
6344 break;
600b828c 6345 case NEON_2RM_VZIP:
d68a6f3a 6346 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 6347 return 1;
9ee6e8bb
PB
6348 }
6349 break;
600b828c
PM
6350 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6351 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
6352 if (rm & 1) {
6353 return 1;
6354 }
39d5492a 6355 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 6356 for (pass = 0; pass < 2; pass++) {
ad69471c 6357 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 6358 tmp = tcg_temp_new_i32();
600b828c
PM
6359 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6360 tmp, cpu_V0);
ad69471c
PB
6361 if (pass == 0) {
6362 tmp2 = tmp;
6363 } else {
6364 neon_store_reg(rd, 0, tmp2);
6365 neon_store_reg(rd, 1, tmp);
9ee6e8bb 6366 }
9ee6e8bb
PB
6367 }
6368 break;
600b828c 6369 case NEON_2RM_VSHLL:
fc2a9b37 6370 if (q || (rd & 1)) {
9ee6e8bb 6371 return 1;
600b828c 6372 }
ad69471c
PB
6373 tmp = neon_load_reg(rm, 0);
6374 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6375 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6376 if (pass == 1)
6377 tmp = tmp2;
6378 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 6379 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 6380 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6381 }
6382 break;
600b828c 6383 case NEON_2RM_VCVT_F16_F32:
fc2a9b37
PM
6384 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
6385 q || (rm & 1)) {
6386 return 1;
6387 }
7d1b0095
PM
6388 tmp = tcg_temp_new_i32();
6389 tmp2 = tcg_temp_new_i32();
60011498 6390 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 6391 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 6392 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 6393 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6394 tcg_gen_shli_i32(tmp2, tmp2, 16);
6395 tcg_gen_or_i32(tmp2, tmp2, tmp);
6396 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 6397 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
6398 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
6399 neon_store_reg(rd, 0, tmp2);
7d1b0095 6400 tmp2 = tcg_temp_new_i32();
2d981da7 6401 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6402 tcg_gen_shli_i32(tmp2, tmp2, 16);
6403 tcg_gen_or_i32(tmp2, tmp2, tmp);
6404 neon_store_reg(rd, 1, tmp2);
7d1b0095 6405 tcg_temp_free_i32(tmp);
60011498 6406 break;
600b828c 6407 case NEON_2RM_VCVT_F32_F16:
fc2a9b37
PM
6408 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
6409 q || (rd & 1)) {
6410 return 1;
6411 }
7d1b0095 6412 tmp3 = tcg_temp_new_i32();
60011498
PB
6413 tmp = neon_load_reg(rm, 0);
6414 tmp2 = neon_load_reg(rm, 1);
6415 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 6416 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6417 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
6418 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 6419 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6420 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 6421 tcg_temp_free_i32(tmp);
60011498 6422 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 6423 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6424 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
6425 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 6426 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6427 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
6428 tcg_temp_free_i32(tmp2);
6429 tcg_temp_free_i32(tmp3);
60011498 6430 break;
9d935509
AB
6431 case NEON_2RM_AESE: case NEON_2RM_AESMC:
6432 if (!arm_feature(env, ARM_FEATURE_V8_AES)
6433 || ((rm | rd) & 1)) {
6434 return 1;
6435 }
6436 tmp = tcg_const_i32(rd);
6437 tmp2 = tcg_const_i32(rm);
6438
6439 /* Bit 6 is the lowest opcode bit; it distinguishes between
6440 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6441 */
6442 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
6443
6444 if (op == NEON_2RM_AESE) {
6445 gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
6446 } else {
6447 gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
6448 }
6449 tcg_temp_free_i32(tmp);
6450 tcg_temp_free_i32(tmp2);
6451 tcg_temp_free_i32(tmp3);
6452 break;
9ee6e8bb
PB
6453 default:
6454 elementwise:
6455 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 6456 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6457 tcg_gen_ld_f32(cpu_F0s, cpu_env,
6458 neon_reg_offset(rm, pass));
39d5492a 6459 TCGV_UNUSED_I32(tmp);
9ee6e8bb 6460 } else {
dd8fbd78 6461 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
6462 }
6463 switch (op) {
600b828c 6464 case NEON_2RM_VREV32:
9ee6e8bb 6465 switch (size) {
dd8fbd78
FN
6466 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6467 case 1: gen_swap_half(tmp); break;
600b828c 6468 default: abort();
9ee6e8bb
PB
6469 }
6470 break;
600b828c 6471 case NEON_2RM_VREV16:
dd8fbd78 6472 gen_rev16(tmp);
9ee6e8bb 6473 break;
600b828c 6474 case NEON_2RM_VCLS:
9ee6e8bb 6475 switch (size) {
dd8fbd78
FN
6476 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6477 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6478 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 6479 default: abort();
9ee6e8bb
PB
6480 }
6481 break;
600b828c 6482 case NEON_2RM_VCLZ:
9ee6e8bb 6483 switch (size) {
dd8fbd78
FN
6484 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6485 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6486 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 6487 default: abort();
9ee6e8bb
PB
6488 }
6489 break;
600b828c 6490 case NEON_2RM_VCNT:
dd8fbd78 6491 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 6492 break;
600b828c 6493 case NEON_2RM_VMVN:
dd8fbd78 6494 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 6495 break;
600b828c 6496 case NEON_2RM_VQABS:
9ee6e8bb 6497 switch (size) {
02da0b2d
PM
6498 case 0:
6499 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6500 break;
6501 case 1:
6502 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6503 break;
6504 case 2:
6505 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6506 break;
600b828c 6507 default: abort();
9ee6e8bb
PB
6508 }
6509 break;
600b828c 6510 case NEON_2RM_VQNEG:
9ee6e8bb 6511 switch (size) {
02da0b2d
PM
6512 case 0:
6513 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6514 break;
6515 case 1:
6516 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6517 break;
6518 case 2:
6519 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6520 break;
600b828c 6521 default: abort();
9ee6e8bb
PB
6522 }
6523 break;
600b828c 6524 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 6525 tmp2 = tcg_const_i32(0);
9ee6e8bb 6526 switch(size) {
dd8fbd78
FN
6527 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6528 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6529 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 6530 default: abort();
9ee6e8bb 6531 }
39d5492a 6532 tcg_temp_free_i32(tmp2);
600b828c 6533 if (op == NEON_2RM_VCLE0) {
dd8fbd78 6534 tcg_gen_not_i32(tmp, tmp);
600b828c 6535 }
9ee6e8bb 6536 break;
600b828c 6537 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 6538 tmp2 = tcg_const_i32(0);
9ee6e8bb 6539 switch(size) {
dd8fbd78
FN
6540 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6541 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6542 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 6543 default: abort();
9ee6e8bb 6544 }
39d5492a 6545 tcg_temp_free_i32(tmp2);
600b828c 6546 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6547 tcg_gen_not_i32(tmp, tmp);
600b828c 6548 }
9ee6e8bb 6549 break;
600b828c 6550 case NEON_2RM_VCEQ0:
dd8fbd78 6551 tmp2 = tcg_const_i32(0);
9ee6e8bb 6552 switch(size) {
dd8fbd78
FN
6553 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6554 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6555 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6556 default: abort();
9ee6e8bb 6557 }
39d5492a 6558 tcg_temp_free_i32(tmp2);
9ee6e8bb 6559 break;
600b828c 6560 case NEON_2RM_VABS:
9ee6e8bb 6561 switch(size) {
dd8fbd78
FN
6562 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6563 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6564 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 6565 default: abort();
9ee6e8bb
PB
6566 }
6567 break;
600b828c 6568 case NEON_2RM_VNEG:
dd8fbd78
FN
6569 tmp2 = tcg_const_i32(0);
6570 gen_neon_rsb(size, tmp, tmp2);
39d5492a 6571 tcg_temp_free_i32(tmp2);
9ee6e8bb 6572 break;
600b828c 6573 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
6574 {
6575 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6576 tmp2 = tcg_const_i32(0);
aa47cfdd 6577 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6578 tcg_temp_free_i32(tmp2);
aa47cfdd 6579 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6580 break;
aa47cfdd 6581 }
600b828c 6582 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
6583 {
6584 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6585 tmp2 = tcg_const_i32(0);
aa47cfdd 6586 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6587 tcg_temp_free_i32(tmp2);
aa47cfdd 6588 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6589 break;
aa47cfdd 6590 }
600b828c 6591 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
6592 {
6593 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6594 tmp2 = tcg_const_i32(0);
aa47cfdd 6595 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6596 tcg_temp_free_i32(tmp2);
aa47cfdd 6597 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6598 break;
aa47cfdd 6599 }
600b828c 6600 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
6601 {
6602 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6603 tmp2 = tcg_const_i32(0);
aa47cfdd 6604 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6605 tcg_temp_free_i32(tmp2);
aa47cfdd 6606 tcg_temp_free_ptr(fpstatus);
0e326109 6607 break;
aa47cfdd 6608 }
600b828c 6609 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
6610 {
6611 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6612 tmp2 = tcg_const_i32(0);
aa47cfdd 6613 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6614 tcg_temp_free_i32(tmp2);
aa47cfdd 6615 tcg_temp_free_ptr(fpstatus);
0e326109 6616 break;
aa47cfdd 6617 }
600b828c 6618 case NEON_2RM_VABS_F:
4373f3ce 6619 gen_vfp_abs(0);
9ee6e8bb 6620 break;
600b828c 6621 case NEON_2RM_VNEG_F:
4373f3ce 6622 gen_vfp_neg(0);
9ee6e8bb 6623 break;
600b828c 6624 case NEON_2RM_VSWP:
dd8fbd78
FN
6625 tmp2 = neon_load_reg(rd, pass);
6626 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6627 break;
600b828c 6628 case NEON_2RM_VTRN:
dd8fbd78 6629 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6630 switch (size) {
dd8fbd78
FN
6631 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6632 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6633 default: abort();
9ee6e8bb 6634 }
dd8fbd78 6635 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6636 break;
34f7b0a2
WN
6637 case NEON_2RM_VRINTN:
6638 case NEON_2RM_VRINTA:
6639 case NEON_2RM_VRINTM:
6640 case NEON_2RM_VRINTP:
6641 case NEON_2RM_VRINTZ:
6642 {
6643 TCGv_i32 tcg_rmode;
6644 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6645 int rmode;
6646
6647 if (op == NEON_2RM_VRINTZ) {
6648 rmode = FPROUNDING_ZERO;
6649 } else {
6650 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
6651 }
6652
6653 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6654 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6655 cpu_env);
6656 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
6657 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6658 cpu_env);
6659 tcg_temp_free_ptr(fpstatus);
6660 tcg_temp_free_i32(tcg_rmode);
6661 break;
6662 }
2ce70625
WN
6663 case NEON_2RM_VRINTX:
6664 {
6665 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6666 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
6667 tcg_temp_free_ptr(fpstatus);
6668 break;
6669 }
901ad525
WN
6670 case NEON_2RM_VCVTAU:
6671 case NEON_2RM_VCVTAS:
6672 case NEON_2RM_VCVTNU:
6673 case NEON_2RM_VCVTNS:
6674 case NEON_2RM_VCVTPU:
6675 case NEON_2RM_VCVTPS:
6676 case NEON_2RM_VCVTMU:
6677 case NEON_2RM_VCVTMS:
6678 {
6679 bool is_signed = !extract32(insn, 7, 1);
6680 TCGv_ptr fpst = get_fpstatus_ptr(1);
6681 TCGv_i32 tcg_rmode, tcg_shift;
6682 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
6683
6684 tcg_shift = tcg_const_i32(0);
6685 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6686 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6687 cpu_env);
6688
6689 if (is_signed) {
6690 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
6691 tcg_shift, fpst);
6692 } else {
6693 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
6694 tcg_shift, fpst);
6695 }
6696
6697 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6698 cpu_env);
6699 tcg_temp_free_i32(tcg_rmode);
6700 tcg_temp_free_i32(tcg_shift);
6701 tcg_temp_free_ptr(fpst);
6702 break;
6703 }
600b828c 6704 case NEON_2RM_VRECPE:
b6d4443a
AB
6705 {
6706 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6707 gen_helper_recpe_u32(tmp, tmp, fpstatus);
6708 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6709 break;
b6d4443a 6710 }
600b828c 6711 case NEON_2RM_VRSQRTE:
c2fb418e
AB
6712 {
6713 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6714 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
6715 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6716 break;
c2fb418e 6717 }
600b828c 6718 case NEON_2RM_VRECPE_F:
b6d4443a
AB
6719 {
6720 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6721 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
6722 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6723 break;
b6d4443a 6724 }
600b828c 6725 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
6726 {
6727 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6728 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
6729 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6730 break;
c2fb418e 6731 }
600b828c 6732 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 6733 gen_vfp_sito(0, 1);
9ee6e8bb 6734 break;
600b828c 6735 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 6736 gen_vfp_uito(0, 1);
9ee6e8bb 6737 break;
600b828c 6738 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 6739 gen_vfp_tosiz(0, 1);
9ee6e8bb 6740 break;
600b828c 6741 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 6742 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
6743 break;
6744 default:
600b828c
PM
6745 /* Reserved op values were caught by the
6746 * neon_2rm_sizes[] check earlier.
6747 */
6748 abort();
9ee6e8bb 6749 }
600b828c 6750 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6751 tcg_gen_st_f32(cpu_F0s, cpu_env,
6752 neon_reg_offset(rd, pass));
9ee6e8bb 6753 } else {
dd8fbd78 6754 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6755 }
6756 }
6757 break;
6758 }
6759 } else if ((insn & (1 << 10)) == 0) {
6760 /* VTBL, VTBX. */
56907d77
PM
6761 int n = ((insn >> 8) & 3) + 1;
6762 if ((rn + n) > 32) {
6763 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6764 * helper function running off the end of the register file.
6765 */
6766 return 1;
6767 }
6768 n <<= 3;
9ee6e8bb 6769 if (insn & (1 << 6)) {
8f8e3aa4 6770 tmp = neon_load_reg(rd, 0);
9ee6e8bb 6771 } else {
7d1b0095 6772 tmp = tcg_temp_new_i32();
8f8e3aa4 6773 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6774 }
8f8e3aa4 6775 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
6776 tmp4 = tcg_const_i32(rn);
6777 tmp5 = tcg_const_i32(n);
9ef39277 6778 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7d1b0095 6779 tcg_temp_free_i32(tmp);
9ee6e8bb 6780 if (insn & (1 << 6)) {
8f8e3aa4 6781 tmp = neon_load_reg(rd, 1);
9ee6e8bb 6782 } else {
7d1b0095 6783 tmp = tcg_temp_new_i32();
8f8e3aa4 6784 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6785 }
8f8e3aa4 6786 tmp3 = neon_load_reg(rm, 1);
9ef39277 6787 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
6788 tcg_temp_free_i32(tmp5);
6789 tcg_temp_free_i32(tmp4);
8f8e3aa4 6790 neon_store_reg(rd, 0, tmp2);
3018f259 6791 neon_store_reg(rd, 1, tmp3);
7d1b0095 6792 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6793 } else if ((insn & 0x380) == 0) {
6794 /* VDUP */
133da6aa
JR
6795 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6796 return 1;
6797 }
9ee6e8bb 6798 if (insn & (1 << 19)) {
dd8fbd78 6799 tmp = neon_load_reg(rm, 1);
9ee6e8bb 6800 } else {
dd8fbd78 6801 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
6802 }
6803 if (insn & (1 << 16)) {
dd8fbd78 6804 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
6805 } else if (insn & (1 << 17)) {
6806 if ((insn >> 18) & 1)
dd8fbd78 6807 gen_neon_dup_high16(tmp);
9ee6e8bb 6808 else
dd8fbd78 6809 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
6810 }
6811 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 6812 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
6813 tcg_gen_mov_i32(tmp2, tmp);
6814 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 6815 }
7d1b0095 6816 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6817 } else {
6818 return 1;
6819 }
6820 }
6821 }
6822 return 0;
6823}
6824
0ecb72a5 6825static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb 6826{
4b6a83fb
PM
6827 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
6828 const ARMCPRegInfo *ri;
9ee6e8bb
PB
6829
6830 cpnum = (insn >> 8) & 0xf;
6831 if (arm_feature(env, ARM_FEATURE_XSCALE)
6832 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6833 return 1;
6834
4b6a83fb 6835 /* First check for coprocessor space used for actual instructions */
9ee6e8bb
PB
6836 switch (cpnum) {
6837 case 0:
6838 case 1:
6839 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6840 return disas_iwmmxt_insn(env, s, insn);
6841 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6842 return disas_dsp_insn(env, s, insn);
6843 }
6844 return 1;
4b6a83fb
PM
6845 default:
6846 break;
6847 }
6848
6849 /* Otherwise treat as a generic register access */
6850 is64 = (insn & (1 << 25)) == 0;
6851 if (!is64 && ((insn & (1 << 4)) == 0)) {
6852 /* cdp */
6853 return 1;
6854 }
6855
6856 crm = insn & 0xf;
6857 if (is64) {
6858 crn = 0;
6859 opc1 = (insn >> 4) & 0xf;
6860 opc2 = 0;
6861 rt2 = (insn >> 16) & 0xf;
6862 } else {
6863 crn = (insn >> 16) & 0xf;
6864 opc1 = (insn >> 21) & 7;
6865 opc2 = (insn >> 5) & 7;
6866 rt2 = 0;
6867 }
6868 isread = (insn >> 20) & 1;
6869 rt = (insn >> 12) & 0xf;
6870
60322b39 6871 ri = get_arm_cp_reginfo(s->cp_regs,
4b6a83fb
PM
6872 ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
6873 if (ri) {
6874 /* Check access permissions */
60322b39 6875 if (!cp_access_ok(s->current_pl, ri, isread)) {
4b6a83fb
PM
6876 return 1;
6877 }
6878
f59df3f2
PM
6879 if (ri->accessfn) {
6880 /* Emit code to perform further access permissions checks at
6881 * runtime; this may result in an exception.
6882 */
6883 TCGv_ptr tmpptr;
8bcbf37c
PM
6884 TCGv_i32 tcg_syn;
6885 uint32_t syndrome;
6886
6887 /* Note that since we are an implementation which takes an
6888 * exception on a trapped conditional instruction only if the
6889 * instruction passes its condition code check, we can take
6890 * advantage of the clause in the ARM ARM that allows us to set
6891 * the COND field in the instruction to 0xE in all cases.
6892 * We could fish the actual condition out of the insn (ARM)
6893 * or the condexec bits (Thumb) but it isn't necessary.
6894 */
6895 switch (cpnum) {
6896 case 14:
6897 if (is64) {
6898 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
6899 isread, s->thumb);
6900 } else {
6901 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
6902 rt, isread, s->thumb);
6903 }
6904 break;
6905 case 15:
6906 if (is64) {
6907 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
6908 isread, s->thumb);
6909 } else {
6910 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
6911 rt, isread, s->thumb);
6912 }
6913 break;
6914 default:
6915 /* ARMv8 defines that only coprocessors 14 and 15 exist,
6916 * so this can only happen if this is an ARMv7 or earlier CPU,
6917 * in which case the syndrome information won't actually be
6918 * guest visible.
6919 */
6920 assert(!arm_feature(env, ARM_FEATURE_V8));
6921 syndrome = syn_uncategorized();
6922 break;
6923 }
6924
f59df3f2
PM
6925 gen_set_pc_im(s, s->pc);
6926 tmpptr = tcg_const_ptr(ri);
8bcbf37c
PM
6927 tcg_syn = tcg_const_i32(syndrome);
6928 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn);
f59df3f2 6929 tcg_temp_free_ptr(tmpptr);
8bcbf37c 6930 tcg_temp_free_i32(tcg_syn);
f59df3f2
PM
6931 }
6932
4b6a83fb
PM
6933 /* Handle special cases first */
6934 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
6935 case ARM_CP_NOP:
6936 return 0;
6937 case ARM_CP_WFI:
6938 if (isread) {
6939 return 1;
6940 }
eaed129d 6941 gen_set_pc_im(s, s->pc);
4b6a83fb 6942 s->is_jmp = DISAS_WFI;
2bee5105 6943 return 0;
4b6a83fb
PM
6944 default:
6945 break;
6946 }
6947
2452731c
PM
6948 if (use_icount && (ri->type & ARM_CP_IO)) {
6949 gen_io_start();
6950 }
6951
4b6a83fb
PM
6952 if (isread) {
6953 /* Read */
6954 if (is64) {
6955 TCGv_i64 tmp64;
6956 TCGv_i32 tmp;
6957 if (ri->type & ARM_CP_CONST) {
6958 tmp64 = tcg_const_i64(ri->resetvalue);
6959 } else if (ri->readfn) {
6960 TCGv_ptr tmpptr;
4b6a83fb
PM
6961 tmp64 = tcg_temp_new_i64();
6962 tmpptr = tcg_const_ptr(ri);
6963 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
6964 tcg_temp_free_ptr(tmpptr);
6965 } else {
6966 tmp64 = tcg_temp_new_i64();
6967 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
6968 }
6969 tmp = tcg_temp_new_i32();
6970 tcg_gen_trunc_i64_i32(tmp, tmp64);
6971 store_reg(s, rt, tmp);
6972 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 6973 tmp = tcg_temp_new_i32();
4b6a83fb 6974 tcg_gen_trunc_i64_i32(tmp, tmp64);
ed336850 6975 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
6976 store_reg(s, rt2, tmp);
6977 } else {
39d5492a 6978 TCGv_i32 tmp;
4b6a83fb
PM
6979 if (ri->type & ARM_CP_CONST) {
6980 tmp = tcg_const_i32(ri->resetvalue);
6981 } else if (ri->readfn) {
6982 TCGv_ptr tmpptr;
4b6a83fb
PM
6983 tmp = tcg_temp_new_i32();
6984 tmpptr = tcg_const_ptr(ri);
6985 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
6986 tcg_temp_free_ptr(tmpptr);
6987 } else {
6988 tmp = load_cpu_offset(ri->fieldoffset);
6989 }
6990 if (rt == 15) {
6991 /* Destination register of r15 for 32 bit loads sets
6992 * the condition codes from the high 4 bits of the value
6993 */
6994 gen_set_nzcv(tmp);
6995 tcg_temp_free_i32(tmp);
6996 } else {
6997 store_reg(s, rt, tmp);
6998 }
6999 }
7000 } else {
7001 /* Write */
7002 if (ri->type & ARM_CP_CONST) {
7003 /* If not forbidden by access permissions, treat as WI */
7004 return 0;
7005 }
7006
7007 if (is64) {
39d5492a 7008 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
7009 TCGv_i64 tmp64 = tcg_temp_new_i64();
7010 tmplo = load_reg(s, rt);
7011 tmphi = load_reg(s, rt2);
7012 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7013 tcg_temp_free_i32(tmplo);
7014 tcg_temp_free_i32(tmphi);
7015 if (ri->writefn) {
7016 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
7017 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7018 tcg_temp_free_ptr(tmpptr);
7019 } else {
7020 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7021 }
7022 tcg_temp_free_i64(tmp64);
7023 } else {
7024 if (ri->writefn) {
39d5492a 7025 TCGv_i32 tmp;
4b6a83fb 7026 TCGv_ptr tmpptr;
4b6a83fb
PM
7027 tmp = load_reg(s, rt);
7028 tmpptr = tcg_const_ptr(ri);
7029 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7030 tcg_temp_free_ptr(tmpptr);
7031 tcg_temp_free_i32(tmp);
7032 } else {
39d5492a 7033 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
7034 store_cpu_offset(tmp, ri->fieldoffset);
7035 }
7036 }
2452731c
PM
7037 }
7038
7039 if (use_icount && (ri->type & ARM_CP_IO)) {
7040 /* I/O operations must end the TB here (whether read or write) */
7041 gen_io_end();
7042 gen_lookup_tb(s);
7043 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
7044 /* We default to ending the TB on a coprocessor register write,
7045 * but allow this to be suppressed by the register definition
7046 * (usually only necessary to work around guest bugs).
7047 */
2452731c 7048 gen_lookup_tb(s);
4b6a83fb 7049 }
2452731c 7050
4b6a83fb
PM
7051 return 0;
7052 }
7053
626187d8
PM
7054 /* Unknown register; this might be a guest error or a QEMU
7055 * unimplemented feature.
7056 */
7057 if (is64) {
7058 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7059 "64 bit system register cp:%d opc1: %d crm:%d\n",
7060 isread ? "read" : "write", cpnum, opc1, crm);
7061 } else {
7062 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7063 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d\n",
7064 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2);
7065 }
7066
4a9a539f 7067 return 1;
9ee6e8bb
PB
7068}
7069
5e3f878a
PB
7070
7071/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 7072static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 7073{
39d5492a 7074 TCGv_i32 tmp;
7d1b0095 7075 tmp = tcg_temp_new_i32();
5e3f878a
PB
7076 tcg_gen_trunc_i64_i32(tmp, val);
7077 store_reg(s, rlow, tmp);
7d1b0095 7078 tmp = tcg_temp_new_i32();
5e3f878a
PB
7079 tcg_gen_shri_i64(val, val, 32);
7080 tcg_gen_trunc_i64_i32(tmp, val);
7081 store_reg(s, rhigh, tmp);
7082}
7083
7084/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 7085static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 7086{
a7812ae4 7087 TCGv_i64 tmp;
39d5492a 7088 TCGv_i32 tmp2;
5e3f878a 7089
36aa55dc 7090 /* Load value and extend to 64 bits. */
a7812ae4 7091 tmp = tcg_temp_new_i64();
5e3f878a
PB
7092 tmp2 = load_reg(s, rlow);
7093 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 7094 tcg_temp_free_i32(tmp2);
5e3f878a 7095 tcg_gen_add_i64(val, val, tmp);
b75263d6 7096 tcg_temp_free_i64(tmp);
5e3f878a
PB
7097}
7098
7099/* load and add a 64-bit value from a register pair. */
a7812ae4 7100static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 7101{
a7812ae4 7102 TCGv_i64 tmp;
39d5492a
PM
7103 TCGv_i32 tmpl;
7104 TCGv_i32 tmph;
5e3f878a
PB
7105
7106 /* Load 64-bit value rd:rn. */
36aa55dc
PB
7107 tmpl = load_reg(s, rlow);
7108 tmph = load_reg(s, rhigh);
a7812ae4 7109 tmp = tcg_temp_new_i64();
36aa55dc 7110 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
7111 tcg_temp_free_i32(tmpl);
7112 tcg_temp_free_i32(tmph);
5e3f878a 7113 tcg_gen_add_i64(val, val, tmp);
b75263d6 7114 tcg_temp_free_i64(tmp);
5e3f878a
PB
7115}
7116
c9f10124 7117/* Set N and Z flags from hi|lo. */
39d5492a 7118static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 7119{
c9f10124
RH
7120 tcg_gen_mov_i32(cpu_NF, hi);
7121 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
7122}
7123
426f5abc
PB
7124/* Load/Store exclusive instructions are implemented by remembering
7125 the value/address loaded, and seeing if these are the same
b90372ad 7126 when the store is performed. This should be sufficient to implement
426f5abc
PB
7127 the architecturally mandated semantics, and avoids having to monitor
7128 regular stores.
7129
7130 In system emulation mode only one CPU will be running at once, so
7131 this sequence is effectively atomic. In user emulation mode we
7132 throw an exception and handle the atomic operation elsewhere. */
7133static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 7134 TCGv_i32 addr, int size)
426f5abc 7135{
94ee24e7 7136 TCGv_i32 tmp = tcg_temp_new_i32();
426f5abc
PB
7137
7138 switch (size) {
7139 case 0:
08307563 7140 gen_aa32_ld8u(tmp, addr, IS_USER(s));
426f5abc
PB
7141 break;
7142 case 1:
08307563 7143 gen_aa32_ld16u(tmp, addr, IS_USER(s));
426f5abc
PB
7144 break;
7145 case 2:
7146 case 3:
08307563 7147 gen_aa32_ld32u(tmp, addr, IS_USER(s));
426f5abc
PB
7148 break;
7149 default:
7150 abort();
7151 }
03d05e2d 7152
426f5abc 7153 if (size == 3) {
39d5492a 7154 TCGv_i32 tmp2 = tcg_temp_new_i32();
03d05e2d
PM
7155 TCGv_i32 tmp3 = tcg_temp_new_i32();
7156
2c9adbda 7157 tcg_gen_addi_i32(tmp2, addr, 4);
03d05e2d 7158 gen_aa32_ld32u(tmp3, tmp2, IS_USER(s));
7d1b0095 7159 tcg_temp_free_i32(tmp2);
03d05e2d
PM
7160 tcg_gen_concat_i32_i64(cpu_exclusive_val, tmp, tmp3);
7161 store_reg(s, rt2, tmp3);
7162 } else {
7163 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 7164 }
03d05e2d
PM
7165
7166 store_reg(s, rt, tmp);
7167 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
7168}
7169
7170static void gen_clrex(DisasContext *s)
7171{
03d05e2d 7172 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7173}
7174
7175#ifdef CONFIG_USER_ONLY
7176static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7177 TCGv_i32 addr, int size)
426f5abc 7178{
03d05e2d 7179 tcg_gen_extu_i32_i64(cpu_exclusive_test, addr);
426f5abc
PB
7180 tcg_gen_movi_i32(cpu_exclusive_info,
7181 size | (rd << 4) | (rt << 8) | (rt2 << 12));
d4a2dc67 7182 gen_exception_internal_insn(s, 4, EXCP_STREX);
426f5abc
PB
7183}
7184#else
7185static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7186 TCGv_i32 addr, int size)
426f5abc 7187{
39d5492a 7188 TCGv_i32 tmp;
03d05e2d 7189 TCGv_i64 val64, extaddr;
426f5abc
PB
7190 int done_label;
7191 int fail_label;
7192
7193 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7194 [addr] = {Rt};
7195 {Rd} = 0;
7196 } else {
7197 {Rd} = 1;
7198 } */
7199 fail_label = gen_new_label();
7200 done_label = gen_new_label();
03d05e2d
PM
7201 extaddr = tcg_temp_new_i64();
7202 tcg_gen_extu_i32_i64(extaddr, addr);
7203 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7204 tcg_temp_free_i64(extaddr);
7205
94ee24e7 7206 tmp = tcg_temp_new_i32();
426f5abc
PB
7207 switch (size) {
7208 case 0:
08307563 7209 gen_aa32_ld8u(tmp, addr, IS_USER(s));
426f5abc
PB
7210 break;
7211 case 1:
08307563 7212 gen_aa32_ld16u(tmp, addr, IS_USER(s));
426f5abc
PB
7213 break;
7214 case 2:
7215 case 3:
08307563 7216 gen_aa32_ld32u(tmp, addr, IS_USER(s));
426f5abc
PB
7217 break;
7218 default:
7219 abort();
7220 }
03d05e2d
PM
7221
7222 val64 = tcg_temp_new_i64();
426f5abc 7223 if (size == 3) {
39d5492a 7224 TCGv_i32 tmp2 = tcg_temp_new_i32();
03d05e2d 7225 TCGv_i32 tmp3 = tcg_temp_new_i32();
426f5abc 7226 tcg_gen_addi_i32(tmp2, addr, 4);
03d05e2d 7227 gen_aa32_ld32u(tmp3, tmp2, IS_USER(s));
7d1b0095 7228 tcg_temp_free_i32(tmp2);
03d05e2d
PM
7229 tcg_gen_concat_i32_i64(val64, tmp, tmp3);
7230 tcg_temp_free_i32(tmp3);
7231 } else {
7232 tcg_gen_extu_i32_i64(val64, tmp);
426f5abc 7233 }
03d05e2d
PM
7234 tcg_temp_free_i32(tmp);
7235
7236 tcg_gen_brcond_i64(TCG_COND_NE, val64, cpu_exclusive_val, fail_label);
7237 tcg_temp_free_i64(val64);
7238
426f5abc
PB
7239 tmp = load_reg(s, rt);
7240 switch (size) {
7241 case 0:
08307563 7242 gen_aa32_st8(tmp, addr, IS_USER(s));
426f5abc
PB
7243 break;
7244 case 1:
08307563 7245 gen_aa32_st16(tmp, addr, IS_USER(s));
426f5abc
PB
7246 break;
7247 case 2:
7248 case 3:
08307563 7249 gen_aa32_st32(tmp, addr, IS_USER(s));
426f5abc
PB
7250 break;
7251 default:
7252 abort();
7253 }
94ee24e7 7254 tcg_temp_free_i32(tmp);
426f5abc
PB
7255 if (size == 3) {
7256 tcg_gen_addi_i32(addr, addr, 4);
7257 tmp = load_reg(s, rt2);
08307563 7258 gen_aa32_st32(tmp, addr, IS_USER(s));
94ee24e7 7259 tcg_temp_free_i32(tmp);
426f5abc
PB
7260 }
7261 tcg_gen_movi_i32(cpu_R[rd], 0);
7262 tcg_gen_br(done_label);
7263 gen_set_label(fail_label);
7264 tcg_gen_movi_i32(cpu_R[rd], 1);
7265 gen_set_label(done_label);
03d05e2d 7266 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7267}
7268#endif
7269
81465888
PM
7270/* gen_srs:
7271 * @env: CPUARMState
7272 * @s: DisasContext
7273 * @mode: mode field from insn (which stack to store to)
7274 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7275 * @writeback: true if writeback bit set
7276 *
7277 * Generate code for the SRS (Store Return State) insn.
7278 */
7279static void gen_srs(DisasContext *s,
7280 uint32_t mode, uint32_t amode, bool writeback)
7281{
7282 int32_t offset;
7283 TCGv_i32 addr = tcg_temp_new_i32();
7284 TCGv_i32 tmp = tcg_const_i32(mode);
7285 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7286 tcg_temp_free_i32(tmp);
7287 switch (amode) {
7288 case 0: /* DA */
7289 offset = -4;
7290 break;
7291 case 1: /* IA */
7292 offset = 0;
7293 break;
7294 case 2: /* DB */
7295 offset = -8;
7296 break;
7297 case 3: /* IB */
7298 offset = 4;
7299 break;
7300 default:
7301 abort();
7302 }
7303 tcg_gen_addi_i32(addr, addr, offset);
7304 tmp = load_reg(s, 14);
08307563 7305 gen_aa32_st32(tmp, addr, 0);
5a839c0d 7306 tcg_temp_free_i32(tmp);
81465888
PM
7307 tmp = load_cpu_field(spsr);
7308 tcg_gen_addi_i32(addr, addr, 4);
08307563 7309 gen_aa32_st32(tmp, addr, 0);
5a839c0d 7310 tcg_temp_free_i32(tmp);
81465888
PM
7311 if (writeback) {
7312 switch (amode) {
7313 case 0:
7314 offset = -8;
7315 break;
7316 case 1:
7317 offset = 4;
7318 break;
7319 case 2:
7320 offset = -4;
7321 break;
7322 case 3:
7323 offset = 0;
7324 break;
7325 default:
7326 abort();
7327 }
7328 tcg_gen_addi_i32(addr, addr, offset);
7329 tmp = tcg_const_i32(mode);
7330 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7331 tcg_temp_free_i32(tmp);
7332 }
7333 tcg_temp_free_i32(addr);
7334}
7335
0ecb72a5 7336static void disas_arm_insn(CPUARMState * env, DisasContext *s)
9ee6e8bb
PB
7337{
7338 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
7339 TCGv_i32 tmp;
7340 TCGv_i32 tmp2;
7341 TCGv_i32 tmp3;
7342 TCGv_i32 addr;
a7812ae4 7343 TCGv_i64 tmp64;
9ee6e8bb 7344
d31dd73e 7345 insn = arm_ldl_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
7346 s->pc += 4;
7347
7348 /* M variants do not implement ARM mode. */
7349 if (IS_M(env))
7350 goto illegal_op;
7351 cond = insn >> 28;
7352 if (cond == 0xf){
be5e7a76
DES
7353 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
7354 * choose to UNDEF. In ARMv5 and above the space is used
7355 * for miscellaneous unconditional instructions.
7356 */
7357 ARCH(5);
7358
9ee6e8bb
PB
7359 /* Unconditional instructions. */
7360 if (((insn >> 25) & 7) == 1) {
7361 /* NEON Data processing. */
7362 if (!arm_feature(env, ARM_FEATURE_NEON))
7363 goto illegal_op;
7364
7365 if (disas_neon_data_insn(env, s, insn))
7366 goto illegal_op;
7367 return;
7368 }
7369 if ((insn & 0x0f100000) == 0x04000000) {
7370 /* NEON load/store. */
7371 if (!arm_feature(env, ARM_FEATURE_NEON))
7372 goto illegal_op;
7373
7374 if (disas_neon_ls_insn(env, s, insn))
7375 goto illegal_op;
7376 return;
7377 }
6a57f3eb
WN
7378 if ((insn & 0x0f000e10) == 0x0e000a00) {
7379 /* VFP. */
7380 if (disas_vfp_insn(env, s, insn)) {
7381 goto illegal_op;
7382 }
7383 return;
7384 }
3d185e5d
PM
7385 if (((insn & 0x0f30f000) == 0x0510f000) ||
7386 ((insn & 0x0f30f010) == 0x0710f000)) {
7387 if ((insn & (1 << 22)) == 0) {
7388 /* PLDW; v7MP */
7389 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
7390 goto illegal_op;
7391 }
7392 }
7393 /* Otherwise PLD; v5TE+ */
be5e7a76 7394 ARCH(5TE);
3d185e5d
PM
7395 return;
7396 }
7397 if (((insn & 0x0f70f000) == 0x0450f000) ||
7398 ((insn & 0x0f70f010) == 0x0650f000)) {
7399 ARCH(7);
7400 return; /* PLI; V7 */
7401 }
7402 if (((insn & 0x0f700000) == 0x04100000) ||
7403 ((insn & 0x0f700010) == 0x06100000)) {
7404 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
7405 goto illegal_op;
7406 }
7407 return; /* v7MP: Unallocated memory hint: must NOP */
7408 }
7409
7410 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
7411 ARCH(6);
7412 /* setend */
10962fd5
PM
7413 if (((insn >> 9) & 1) != s->bswap_code) {
7414 /* Dynamic endianness switching not implemented. */
e0c270d9 7415 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
9ee6e8bb
PB
7416 goto illegal_op;
7417 }
7418 return;
7419 } else if ((insn & 0x0fffff00) == 0x057ff000) {
7420 switch ((insn >> 4) & 0xf) {
7421 case 1: /* clrex */
7422 ARCH(6K);
426f5abc 7423 gen_clrex(s);
9ee6e8bb
PB
7424 return;
7425 case 4: /* dsb */
7426 case 5: /* dmb */
7427 case 6: /* isb */
7428 ARCH(7);
7429 /* We don't emulate caches so these are a no-op. */
7430 return;
7431 default:
7432 goto illegal_op;
7433 }
7434 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
7435 /* srs */
81465888 7436 if (IS_USER(s)) {
9ee6e8bb 7437 goto illegal_op;
9ee6e8bb 7438 }
81465888
PM
7439 ARCH(6);
7440 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 7441 return;
ea825eee 7442 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 7443 /* rfe */
c67b6b71 7444 int32_t offset;
9ee6e8bb
PB
7445 if (IS_USER(s))
7446 goto illegal_op;
7447 ARCH(6);
7448 rn = (insn >> 16) & 0xf;
b0109805 7449 addr = load_reg(s, rn);
9ee6e8bb
PB
7450 i = (insn >> 23) & 3;
7451 switch (i) {
b0109805 7452 case 0: offset = -4; break; /* DA */
c67b6b71
FN
7453 case 1: offset = 0; break; /* IA */
7454 case 2: offset = -8; break; /* DB */
b0109805 7455 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
7456 default: abort();
7457 }
7458 if (offset)
b0109805
PB
7459 tcg_gen_addi_i32(addr, addr, offset);
7460 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 7461 tmp = tcg_temp_new_i32();
08307563 7462 gen_aa32_ld32u(tmp, addr, 0);
b0109805 7463 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 7464 tmp2 = tcg_temp_new_i32();
08307563 7465 gen_aa32_ld32u(tmp2, addr, 0);
9ee6e8bb
PB
7466 if (insn & (1 << 21)) {
7467 /* Base writeback. */
7468 switch (i) {
b0109805 7469 case 0: offset = -8; break;
c67b6b71
FN
7470 case 1: offset = 4; break;
7471 case 2: offset = -4; break;
b0109805 7472 case 3: offset = 0; break;
9ee6e8bb
PB
7473 default: abort();
7474 }
7475 if (offset)
b0109805
PB
7476 tcg_gen_addi_i32(addr, addr, offset);
7477 store_reg(s, rn, addr);
7478 } else {
7d1b0095 7479 tcg_temp_free_i32(addr);
9ee6e8bb 7480 }
b0109805 7481 gen_rfe(s, tmp, tmp2);
c67b6b71 7482 return;
9ee6e8bb
PB
7483 } else if ((insn & 0x0e000000) == 0x0a000000) {
7484 /* branch link and change to thumb (blx <offset>) */
7485 int32_t offset;
7486
7487 val = (uint32_t)s->pc;
7d1b0095 7488 tmp = tcg_temp_new_i32();
d9ba4830
PB
7489 tcg_gen_movi_i32(tmp, val);
7490 store_reg(s, 14, tmp);
9ee6e8bb
PB
7491 /* Sign-extend the 24-bit offset */
7492 offset = (((int32_t)insn) << 8) >> 8;
7493 /* offset * 4 + bit24 * 2 + (thumb bit) */
7494 val += (offset << 2) | ((insn >> 23) & 2) | 1;
7495 /* pipeline offset */
7496 val += 4;
be5e7a76 7497 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 7498 gen_bx_im(s, val);
9ee6e8bb
PB
7499 return;
7500 } else if ((insn & 0x0e000f00) == 0x0c000100) {
7501 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
7502 /* iWMMXt register transfer. */
7503 if (env->cp15.c15_cpar & (1 << 1))
7504 if (!disas_iwmmxt_insn(env, s, insn))
7505 return;
7506 }
7507 } else if ((insn & 0x0fe00000) == 0x0c400000) {
7508 /* Coprocessor double register transfer. */
be5e7a76 7509 ARCH(5TE);
9ee6e8bb
PB
7510 } else if ((insn & 0x0f000010) == 0x0e000010) {
7511 /* Additional coprocessor register transfer. */
7997d92f 7512 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
7513 uint32_t mask;
7514 uint32_t val;
7515 /* cps (privileged) */
7516 if (IS_USER(s))
7517 return;
7518 mask = val = 0;
7519 if (insn & (1 << 19)) {
7520 if (insn & (1 << 8))
7521 mask |= CPSR_A;
7522 if (insn & (1 << 7))
7523 mask |= CPSR_I;
7524 if (insn & (1 << 6))
7525 mask |= CPSR_F;
7526 if (insn & (1 << 18))
7527 val |= mask;
7528 }
7997d92f 7529 if (insn & (1 << 17)) {
9ee6e8bb
PB
7530 mask |= CPSR_M;
7531 val |= (insn & 0x1f);
7532 }
7533 if (mask) {
2fbac54b 7534 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
7535 }
7536 return;
7537 }
7538 goto illegal_op;
7539 }
7540 if (cond != 0xe) {
7541 /* if not always execute, we generate a conditional jump to
7542 next instruction */
7543 s->condlabel = gen_new_label();
39fb730a 7544 arm_gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
7545 s->condjmp = 1;
7546 }
7547 if ((insn & 0x0f900000) == 0x03000000) {
7548 if ((insn & (1 << 21)) == 0) {
7549 ARCH(6T2);
7550 rd = (insn >> 12) & 0xf;
7551 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
7552 if ((insn & (1 << 22)) == 0) {
7553 /* MOVW */
7d1b0095 7554 tmp = tcg_temp_new_i32();
5e3f878a 7555 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
7556 } else {
7557 /* MOVT */
5e3f878a 7558 tmp = load_reg(s, rd);
86831435 7559 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 7560 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 7561 }
5e3f878a 7562 store_reg(s, rd, tmp);
9ee6e8bb
PB
7563 } else {
7564 if (((insn >> 12) & 0xf) != 0xf)
7565 goto illegal_op;
7566 if (((insn >> 16) & 0xf) == 0) {
7567 gen_nop_hint(s, insn & 0xff);
7568 } else {
7569 /* CPSR = immediate */
7570 val = insn & 0xff;
7571 shift = ((insn >> 8) & 0xf) * 2;
7572 if (shift)
7573 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 7574 i = ((insn & (1 << 22)) != 0);
2fbac54b 7575 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
7576 goto illegal_op;
7577 }
7578 }
7579 } else if ((insn & 0x0f900000) == 0x01000000
7580 && (insn & 0x00000090) != 0x00000090) {
7581 /* miscellaneous instructions */
7582 op1 = (insn >> 21) & 3;
7583 sh = (insn >> 4) & 0xf;
7584 rm = insn & 0xf;
7585 switch (sh) {
7586 case 0x0: /* move program status register */
7587 if (op1 & 1) {
7588 /* PSR = reg */
2fbac54b 7589 tmp = load_reg(s, rm);
9ee6e8bb 7590 i = ((op1 & 2) != 0);
2fbac54b 7591 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
7592 goto illegal_op;
7593 } else {
7594 /* reg = PSR */
7595 rd = (insn >> 12) & 0xf;
7596 if (op1 & 2) {
7597 if (IS_USER(s))
7598 goto illegal_op;
d9ba4830 7599 tmp = load_cpu_field(spsr);
9ee6e8bb 7600 } else {
7d1b0095 7601 tmp = tcg_temp_new_i32();
9ef39277 7602 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 7603 }
d9ba4830 7604 store_reg(s, rd, tmp);
9ee6e8bb
PB
7605 }
7606 break;
7607 case 0x1:
7608 if (op1 == 1) {
7609 /* branch/exchange thumb (bx). */
be5e7a76 7610 ARCH(4T);
d9ba4830
PB
7611 tmp = load_reg(s, rm);
7612 gen_bx(s, tmp);
9ee6e8bb
PB
7613 } else if (op1 == 3) {
7614 /* clz */
be5e7a76 7615 ARCH(5);
9ee6e8bb 7616 rd = (insn >> 12) & 0xf;
1497c961
PB
7617 tmp = load_reg(s, rm);
7618 gen_helper_clz(tmp, tmp);
7619 store_reg(s, rd, tmp);
9ee6e8bb
PB
7620 } else {
7621 goto illegal_op;
7622 }
7623 break;
7624 case 0x2:
7625 if (op1 == 1) {
7626 ARCH(5J); /* bxj */
7627 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
7628 tmp = load_reg(s, rm);
7629 gen_bx(s, tmp);
9ee6e8bb
PB
7630 } else {
7631 goto illegal_op;
7632 }
7633 break;
7634 case 0x3:
7635 if (op1 != 1)
7636 goto illegal_op;
7637
be5e7a76 7638 ARCH(5);
9ee6e8bb 7639 /* branch link/exchange thumb (blx) */
d9ba4830 7640 tmp = load_reg(s, rm);
7d1b0095 7641 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
7642 tcg_gen_movi_i32(tmp2, s->pc);
7643 store_reg(s, 14, tmp2);
7644 gen_bx(s, tmp);
9ee6e8bb 7645 break;
eb0ecd5a
WN
7646 case 0x4:
7647 {
7648 /* crc32/crc32c */
7649 uint32_t c = extract32(insn, 8, 4);
7650
7651 /* Check this CPU supports ARMv8 CRC instructions.
7652 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
7653 * Bits 8, 10 and 11 should be zero.
7654 */
7655 if (!arm_feature(env, ARM_FEATURE_CRC) || op1 == 0x3 ||
7656 (c & 0xd) != 0) {
7657 goto illegal_op;
7658 }
7659
7660 rn = extract32(insn, 16, 4);
7661 rd = extract32(insn, 12, 4);
7662
7663 tmp = load_reg(s, rn);
7664 tmp2 = load_reg(s, rm);
7665 tmp3 = tcg_const_i32(1 << op1);
7666 if (c & 0x2) {
7667 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
7668 } else {
7669 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
7670 }
7671 tcg_temp_free_i32(tmp2);
7672 tcg_temp_free_i32(tmp3);
7673 store_reg(s, rd, tmp);
7674 break;
7675 }
9ee6e8bb 7676 case 0x5: /* saturating add/subtract */
be5e7a76 7677 ARCH(5TE);
9ee6e8bb
PB
7678 rd = (insn >> 12) & 0xf;
7679 rn = (insn >> 16) & 0xf;
b40d0353 7680 tmp = load_reg(s, rm);
5e3f878a 7681 tmp2 = load_reg(s, rn);
9ee6e8bb 7682 if (op1 & 2)
9ef39277 7683 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 7684 if (op1 & 1)
9ef39277 7685 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 7686 else
9ef39277 7687 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 7688 tcg_temp_free_i32(tmp2);
5e3f878a 7689 store_reg(s, rd, tmp);
9ee6e8bb 7690 break;
49e14940 7691 case 7:
d4a2dc67
PM
7692 {
7693 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
49e14940
AL
7694 /* SMC instruction (op1 == 3)
7695 and undefined instructions (op1 == 0 || op1 == 2)
7696 will trap */
7697 if (op1 != 1) {
7698 goto illegal_op;
7699 }
7700 /* bkpt */
be5e7a76 7701 ARCH(5);
d4a2dc67 7702 gen_exception_insn(s, 4, EXCP_BKPT, syn_aa32_bkpt(imm16, false));
9ee6e8bb 7703 break;
d4a2dc67 7704 }
9ee6e8bb
PB
7705 case 0x8: /* signed multiply */
7706 case 0xa:
7707 case 0xc:
7708 case 0xe:
be5e7a76 7709 ARCH(5TE);
9ee6e8bb
PB
7710 rs = (insn >> 8) & 0xf;
7711 rn = (insn >> 12) & 0xf;
7712 rd = (insn >> 16) & 0xf;
7713 if (op1 == 1) {
7714 /* (32 * 16) >> 16 */
5e3f878a
PB
7715 tmp = load_reg(s, rm);
7716 tmp2 = load_reg(s, rs);
9ee6e8bb 7717 if (sh & 4)
5e3f878a 7718 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7719 else
5e3f878a 7720 gen_sxth(tmp2);
a7812ae4
PB
7721 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7722 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 7723 tmp = tcg_temp_new_i32();
a7812ae4 7724 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 7725 tcg_temp_free_i64(tmp64);
9ee6e8bb 7726 if ((sh & 2) == 0) {
5e3f878a 7727 tmp2 = load_reg(s, rn);
9ef39277 7728 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7729 tcg_temp_free_i32(tmp2);
9ee6e8bb 7730 }
5e3f878a 7731 store_reg(s, rd, tmp);
9ee6e8bb
PB
7732 } else {
7733 /* 16 * 16 */
5e3f878a
PB
7734 tmp = load_reg(s, rm);
7735 tmp2 = load_reg(s, rs);
7736 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 7737 tcg_temp_free_i32(tmp2);
9ee6e8bb 7738 if (op1 == 2) {
a7812ae4
PB
7739 tmp64 = tcg_temp_new_i64();
7740 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7741 tcg_temp_free_i32(tmp);
a7812ae4
PB
7742 gen_addq(s, tmp64, rn, rd);
7743 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 7744 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7745 } else {
7746 if (op1 == 0) {
5e3f878a 7747 tmp2 = load_reg(s, rn);
9ef39277 7748 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7749 tcg_temp_free_i32(tmp2);
9ee6e8bb 7750 }
5e3f878a 7751 store_reg(s, rd, tmp);
9ee6e8bb
PB
7752 }
7753 }
7754 break;
7755 default:
7756 goto illegal_op;
7757 }
7758 } else if (((insn & 0x0e000000) == 0 &&
7759 (insn & 0x00000090) != 0x90) ||
7760 ((insn & 0x0e000000) == (1 << 25))) {
7761 int set_cc, logic_cc, shiftop;
7762
7763 op1 = (insn >> 21) & 0xf;
7764 set_cc = (insn >> 20) & 1;
7765 logic_cc = table_logic_cc[op1] & set_cc;
7766
7767 /* data processing instruction */
7768 if (insn & (1 << 25)) {
7769 /* immediate operand */
7770 val = insn & 0xff;
7771 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 7772 if (shift) {
9ee6e8bb 7773 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 7774 }
7d1b0095 7775 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
7776 tcg_gen_movi_i32(tmp2, val);
7777 if (logic_cc && shift) {
7778 gen_set_CF_bit31(tmp2);
7779 }
9ee6e8bb
PB
7780 } else {
7781 /* register */
7782 rm = (insn) & 0xf;
e9bb4aa9 7783 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7784 shiftop = (insn >> 5) & 3;
7785 if (!(insn & (1 << 4))) {
7786 shift = (insn >> 7) & 0x1f;
e9bb4aa9 7787 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
7788 } else {
7789 rs = (insn >> 8) & 0xf;
8984bd2e 7790 tmp = load_reg(s, rs);
e9bb4aa9 7791 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
7792 }
7793 }
7794 if (op1 != 0x0f && op1 != 0x0d) {
7795 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
7796 tmp = load_reg(s, rn);
7797 } else {
39d5492a 7798 TCGV_UNUSED_I32(tmp);
9ee6e8bb
PB
7799 }
7800 rd = (insn >> 12) & 0xf;
7801 switch(op1) {
7802 case 0x00:
e9bb4aa9
JR
7803 tcg_gen_and_i32(tmp, tmp, tmp2);
7804 if (logic_cc) {
7805 gen_logic_CC(tmp);
7806 }
21aeb343 7807 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7808 break;
7809 case 0x01:
e9bb4aa9
JR
7810 tcg_gen_xor_i32(tmp, tmp, tmp2);
7811 if (logic_cc) {
7812 gen_logic_CC(tmp);
7813 }
21aeb343 7814 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7815 break;
7816 case 0x02:
7817 if (set_cc && rd == 15) {
7818 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 7819 if (IS_USER(s)) {
9ee6e8bb 7820 goto illegal_op;
e9bb4aa9 7821 }
72485ec4 7822 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 7823 gen_exception_return(s, tmp);
9ee6e8bb 7824 } else {
e9bb4aa9 7825 if (set_cc) {
72485ec4 7826 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7827 } else {
7828 tcg_gen_sub_i32(tmp, tmp, tmp2);
7829 }
21aeb343 7830 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7831 }
7832 break;
7833 case 0x03:
e9bb4aa9 7834 if (set_cc) {
72485ec4 7835 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
7836 } else {
7837 tcg_gen_sub_i32(tmp, tmp2, tmp);
7838 }
21aeb343 7839 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7840 break;
7841 case 0x04:
e9bb4aa9 7842 if (set_cc) {
72485ec4 7843 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7844 } else {
7845 tcg_gen_add_i32(tmp, tmp, tmp2);
7846 }
21aeb343 7847 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7848 break;
7849 case 0x05:
e9bb4aa9 7850 if (set_cc) {
49b4c31e 7851 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7852 } else {
7853 gen_add_carry(tmp, tmp, tmp2);
7854 }
21aeb343 7855 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7856 break;
7857 case 0x06:
e9bb4aa9 7858 if (set_cc) {
2de68a49 7859 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7860 } else {
7861 gen_sub_carry(tmp, tmp, tmp2);
7862 }
21aeb343 7863 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7864 break;
7865 case 0x07:
e9bb4aa9 7866 if (set_cc) {
2de68a49 7867 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
7868 } else {
7869 gen_sub_carry(tmp, tmp2, tmp);
7870 }
21aeb343 7871 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7872 break;
7873 case 0x08:
7874 if (set_cc) {
e9bb4aa9
JR
7875 tcg_gen_and_i32(tmp, tmp, tmp2);
7876 gen_logic_CC(tmp);
9ee6e8bb 7877 }
7d1b0095 7878 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7879 break;
7880 case 0x09:
7881 if (set_cc) {
e9bb4aa9
JR
7882 tcg_gen_xor_i32(tmp, tmp, tmp2);
7883 gen_logic_CC(tmp);
9ee6e8bb 7884 }
7d1b0095 7885 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7886 break;
7887 case 0x0a:
7888 if (set_cc) {
72485ec4 7889 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 7890 }
7d1b0095 7891 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7892 break;
7893 case 0x0b:
7894 if (set_cc) {
72485ec4 7895 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 7896 }
7d1b0095 7897 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7898 break;
7899 case 0x0c:
e9bb4aa9
JR
7900 tcg_gen_or_i32(tmp, tmp, tmp2);
7901 if (logic_cc) {
7902 gen_logic_CC(tmp);
7903 }
21aeb343 7904 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7905 break;
7906 case 0x0d:
7907 if (logic_cc && rd == 15) {
7908 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 7909 if (IS_USER(s)) {
9ee6e8bb 7910 goto illegal_op;
e9bb4aa9
JR
7911 }
7912 gen_exception_return(s, tmp2);
9ee6e8bb 7913 } else {
e9bb4aa9
JR
7914 if (logic_cc) {
7915 gen_logic_CC(tmp2);
7916 }
21aeb343 7917 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7918 }
7919 break;
7920 case 0x0e:
f669df27 7921 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
7922 if (logic_cc) {
7923 gen_logic_CC(tmp);
7924 }
21aeb343 7925 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7926 break;
7927 default:
7928 case 0x0f:
e9bb4aa9
JR
7929 tcg_gen_not_i32(tmp2, tmp2);
7930 if (logic_cc) {
7931 gen_logic_CC(tmp2);
7932 }
21aeb343 7933 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7934 break;
7935 }
e9bb4aa9 7936 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 7937 tcg_temp_free_i32(tmp2);
e9bb4aa9 7938 }
9ee6e8bb
PB
7939 } else {
7940 /* other instructions */
7941 op1 = (insn >> 24) & 0xf;
7942 switch(op1) {
7943 case 0x0:
7944 case 0x1:
7945 /* multiplies, extra load/stores */
7946 sh = (insn >> 5) & 3;
7947 if (sh == 0) {
7948 if (op1 == 0x0) {
7949 rd = (insn >> 16) & 0xf;
7950 rn = (insn >> 12) & 0xf;
7951 rs = (insn >> 8) & 0xf;
7952 rm = (insn) & 0xf;
7953 op1 = (insn >> 20) & 0xf;
7954 switch (op1) {
7955 case 0: case 1: case 2: case 3: case 6:
7956 /* 32 bit mul */
5e3f878a
PB
7957 tmp = load_reg(s, rs);
7958 tmp2 = load_reg(s, rm);
7959 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 7960 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7961 if (insn & (1 << 22)) {
7962 /* Subtract (mls) */
7963 ARCH(6T2);
5e3f878a
PB
7964 tmp2 = load_reg(s, rn);
7965 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 7966 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7967 } else if (insn & (1 << 21)) {
7968 /* Add */
5e3f878a
PB
7969 tmp2 = load_reg(s, rn);
7970 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7971 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7972 }
7973 if (insn & (1 << 20))
5e3f878a
PB
7974 gen_logic_CC(tmp);
7975 store_reg(s, rd, tmp);
9ee6e8bb 7976 break;
8aac08b1
AJ
7977 case 4:
7978 /* 64 bit mul double accumulate (UMAAL) */
7979 ARCH(6);
7980 tmp = load_reg(s, rs);
7981 tmp2 = load_reg(s, rm);
7982 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7983 gen_addq_lo(s, tmp64, rn);
7984 gen_addq_lo(s, tmp64, rd);
7985 gen_storeq_reg(s, rn, rd, tmp64);
7986 tcg_temp_free_i64(tmp64);
7987 break;
7988 case 8: case 9: case 10: case 11:
7989 case 12: case 13: case 14: case 15:
7990 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
7991 tmp = load_reg(s, rs);
7992 tmp2 = load_reg(s, rm);
8aac08b1 7993 if (insn & (1 << 22)) {
c9f10124 7994 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 7995 } else {
c9f10124 7996 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
7997 }
7998 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
7999 TCGv_i32 al = load_reg(s, rn);
8000 TCGv_i32 ah = load_reg(s, rd);
c9f10124 8001 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
8002 tcg_temp_free_i32(al);
8003 tcg_temp_free_i32(ah);
9ee6e8bb 8004 }
8aac08b1 8005 if (insn & (1 << 20)) {
c9f10124 8006 gen_logicq_cc(tmp, tmp2);
8aac08b1 8007 }
c9f10124
RH
8008 store_reg(s, rn, tmp);
8009 store_reg(s, rd, tmp2);
9ee6e8bb 8010 break;
8aac08b1
AJ
8011 default:
8012 goto illegal_op;
9ee6e8bb
PB
8013 }
8014 } else {
8015 rn = (insn >> 16) & 0xf;
8016 rd = (insn >> 12) & 0xf;
8017 if (insn & (1 << 23)) {
8018 /* load/store exclusive */
2359bf80 8019 int op2 = (insn >> 8) & 3;
86753403 8020 op1 = (insn >> 21) & 0x3;
2359bf80
MR
8021
8022 switch (op2) {
8023 case 0: /* lda/stl */
8024 if (op1 == 1) {
8025 goto illegal_op;
8026 }
8027 ARCH(8);
8028 break;
8029 case 1: /* reserved */
8030 goto illegal_op;
8031 case 2: /* ldaex/stlex */
8032 ARCH(8);
8033 break;
8034 case 3: /* ldrex/strex */
8035 if (op1) {
8036 ARCH(6K);
8037 } else {
8038 ARCH(6);
8039 }
8040 break;
8041 }
8042
3174f8e9 8043 addr = tcg_temp_local_new_i32();
98a46317 8044 load_reg_var(s, addr, rn);
2359bf80
MR
8045
8046 /* Since the emulation does not have barriers,
8047 the acquire/release semantics need no special
8048 handling */
8049 if (op2 == 0) {
8050 if (insn & (1 << 20)) {
8051 tmp = tcg_temp_new_i32();
8052 switch (op1) {
8053 case 0: /* lda */
08307563 8054 gen_aa32_ld32u(tmp, addr, IS_USER(s));
2359bf80
MR
8055 break;
8056 case 2: /* ldab */
08307563 8057 gen_aa32_ld8u(tmp, addr, IS_USER(s));
2359bf80
MR
8058 break;
8059 case 3: /* ldah */
08307563 8060 gen_aa32_ld16u(tmp, addr, IS_USER(s));
2359bf80
MR
8061 break;
8062 default:
8063 abort();
8064 }
8065 store_reg(s, rd, tmp);
8066 } else {
8067 rm = insn & 0xf;
8068 tmp = load_reg(s, rm);
8069 switch (op1) {
8070 case 0: /* stl */
08307563 8071 gen_aa32_st32(tmp, addr, IS_USER(s));
2359bf80
MR
8072 break;
8073 case 2: /* stlb */
08307563 8074 gen_aa32_st8(tmp, addr, IS_USER(s));
2359bf80
MR
8075 break;
8076 case 3: /* stlh */
08307563 8077 gen_aa32_st16(tmp, addr, IS_USER(s));
2359bf80
MR
8078 break;
8079 default:
8080 abort();
8081 }
8082 tcg_temp_free_i32(tmp);
8083 }
8084 } else if (insn & (1 << 20)) {
86753403
PB
8085 switch (op1) {
8086 case 0: /* ldrex */
426f5abc 8087 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
8088 break;
8089 case 1: /* ldrexd */
426f5abc 8090 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
8091 break;
8092 case 2: /* ldrexb */
426f5abc 8093 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
8094 break;
8095 case 3: /* ldrexh */
426f5abc 8096 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
8097 break;
8098 default:
8099 abort();
8100 }
9ee6e8bb
PB
8101 } else {
8102 rm = insn & 0xf;
86753403
PB
8103 switch (op1) {
8104 case 0: /* strex */
426f5abc 8105 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
8106 break;
8107 case 1: /* strexd */
502e64fe 8108 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
8109 break;
8110 case 2: /* strexb */
426f5abc 8111 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
8112 break;
8113 case 3: /* strexh */
426f5abc 8114 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
8115 break;
8116 default:
8117 abort();
8118 }
9ee6e8bb 8119 }
39d5492a 8120 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8121 } else {
8122 /* SWP instruction */
8123 rm = (insn) & 0xf;
8124
8984bd2e
PB
8125 /* ??? This is not really atomic. However we know
8126 we never have multiple CPUs running in parallel,
8127 so it is good enough. */
8128 addr = load_reg(s, rn);
8129 tmp = load_reg(s, rm);
5a839c0d 8130 tmp2 = tcg_temp_new_i32();
9ee6e8bb 8131 if (insn & (1 << 22)) {
08307563
PM
8132 gen_aa32_ld8u(tmp2, addr, IS_USER(s));
8133 gen_aa32_st8(tmp, addr, IS_USER(s));
9ee6e8bb 8134 } else {
08307563
PM
8135 gen_aa32_ld32u(tmp2, addr, IS_USER(s));
8136 gen_aa32_st32(tmp, addr, IS_USER(s));
9ee6e8bb 8137 }
5a839c0d 8138 tcg_temp_free_i32(tmp);
7d1b0095 8139 tcg_temp_free_i32(addr);
8984bd2e 8140 store_reg(s, rd, tmp2);
9ee6e8bb
PB
8141 }
8142 }
8143 } else {
8144 int address_offset;
8145 int load;
8146 /* Misc load/store */
8147 rn = (insn >> 16) & 0xf;
8148 rd = (insn >> 12) & 0xf;
b0109805 8149 addr = load_reg(s, rn);
9ee6e8bb 8150 if (insn & (1 << 24))
b0109805 8151 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
8152 address_offset = 0;
8153 if (insn & (1 << 20)) {
8154 /* load */
5a839c0d 8155 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
8156 switch(sh) {
8157 case 1:
08307563 8158 gen_aa32_ld16u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
8159 break;
8160 case 2:
08307563 8161 gen_aa32_ld8s(tmp, addr, IS_USER(s));
9ee6e8bb
PB
8162 break;
8163 default:
8164 case 3:
08307563 8165 gen_aa32_ld16s(tmp, addr, IS_USER(s));
9ee6e8bb
PB
8166 break;
8167 }
8168 load = 1;
8169 } else if (sh & 2) {
be5e7a76 8170 ARCH(5TE);
9ee6e8bb
PB
8171 /* doubleword */
8172 if (sh & 1) {
8173 /* store */
b0109805 8174 tmp = load_reg(s, rd);
08307563 8175 gen_aa32_st32(tmp, addr, IS_USER(s));
5a839c0d 8176 tcg_temp_free_i32(tmp);
b0109805
PB
8177 tcg_gen_addi_i32(addr, addr, 4);
8178 tmp = load_reg(s, rd + 1);
08307563 8179 gen_aa32_st32(tmp, addr, IS_USER(s));
5a839c0d 8180 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8181 load = 0;
8182 } else {
8183 /* load */
5a839c0d 8184 tmp = tcg_temp_new_i32();
08307563 8185 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805
PB
8186 store_reg(s, rd, tmp);
8187 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8188 tmp = tcg_temp_new_i32();
08307563 8189 gen_aa32_ld32u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
8190 rd++;
8191 load = 1;
8192 }
8193 address_offset = -4;
8194 } else {
8195 /* store */
b0109805 8196 tmp = load_reg(s, rd);
08307563 8197 gen_aa32_st16(tmp, addr, IS_USER(s));
5a839c0d 8198 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8199 load = 0;
8200 }
8201 /* Perform base writeback before the loaded value to
8202 ensure correct behavior with overlapping index registers.
8203 ldrd with base writeback is is undefined if the
8204 destination and index registers overlap. */
8205 if (!(insn & (1 << 24))) {
b0109805
PB
8206 gen_add_datah_offset(s, insn, address_offset, addr);
8207 store_reg(s, rn, addr);
9ee6e8bb
PB
8208 } else if (insn & (1 << 21)) {
8209 if (address_offset)
b0109805
PB
8210 tcg_gen_addi_i32(addr, addr, address_offset);
8211 store_reg(s, rn, addr);
8212 } else {
7d1b0095 8213 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8214 }
8215 if (load) {
8216 /* Complete the load. */
b0109805 8217 store_reg(s, rd, tmp);
9ee6e8bb
PB
8218 }
8219 }
8220 break;
8221 case 0x4:
8222 case 0x5:
8223 goto do_ldst;
8224 case 0x6:
8225 case 0x7:
8226 if (insn & (1 << 4)) {
8227 ARCH(6);
8228 /* Armv6 Media instructions. */
8229 rm = insn & 0xf;
8230 rn = (insn >> 16) & 0xf;
2c0262af 8231 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
8232 rs = (insn >> 8) & 0xf;
8233 switch ((insn >> 23) & 3) {
8234 case 0: /* Parallel add/subtract. */
8235 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
8236 tmp = load_reg(s, rn);
8237 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8238 sh = (insn >> 5) & 7;
8239 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
8240 goto illegal_op;
6ddbc6e4 8241 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 8242 tcg_temp_free_i32(tmp2);
6ddbc6e4 8243 store_reg(s, rd, tmp);
9ee6e8bb
PB
8244 break;
8245 case 1:
8246 if ((insn & 0x00700020) == 0) {
6c95676b 8247 /* Halfword pack. */
3670669c
PB
8248 tmp = load_reg(s, rn);
8249 tmp2 = load_reg(s, rm);
9ee6e8bb 8250 shift = (insn >> 7) & 0x1f;
3670669c
PB
8251 if (insn & (1 << 6)) {
8252 /* pkhtb */
22478e79
AZ
8253 if (shift == 0)
8254 shift = 31;
8255 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 8256 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 8257 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
8258 } else {
8259 /* pkhbt */
22478e79
AZ
8260 if (shift)
8261 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 8262 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
8263 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8264 }
8265 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8266 tcg_temp_free_i32(tmp2);
3670669c 8267 store_reg(s, rd, tmp);
9ee6e8bb
PB
8268 } else if ((insn & 0x00200020) == 0x00200000) {
8269 /* [us]sat */
6ddbc6e4 8270 tmp = load_reg(s, rm);
9ee6e8bb
PB
8271 shift = (insn >> 7) & 0x1f;
8272 if (insn & (1 << 6)) {
8273 if (shift == 0)
8274 shift = 31;
6ddbc6e4 8275 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8276 } else {
6ddbc6e4 8277 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
8278 }
8279 sh = (insn >> 16) & 0x1f;
40d3c433
CL
8280 tmp2 = tcg_const_i32(sh);
8281 if (insn & (1 << 22))
9ef39277 8282 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 8283 else
9ef39277 8284 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 8285 tcg_temp_free_i32(tmp2);
6ddbc6e4 8286 store_reg(s, rd, tmp);
9ee6e8bb
PB
8287 } else if ((insn & 0x00300fe0) == 0x00200f20) {
8288 /* [us]sat16 */
6ddbc6e4 8289 tmp = load_reg(s, rm);
9ee6e8bb 8290 sh = (insn >> 16) & 0x1f;
40d3c433
CL
8291 tmp2 = tcg_const_i32(sh);
8292 if (insn & (1 << 22))
9ef39277 8293 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 8294 else
9ef39277 8295 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 8296 tcg_temp_free_i32(tmp2);
6ddbc6e4 8297 store_reg(s, rd, tmp);
9ee6e8bb
PB
8298 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
8299 /* Select bytes. */
6ddbc6e4
PB
8300 tmp = load_reg(s, rn);
8301 tmp2 = load_reg(s, rm);
7d1b0095 8302 tmp3 = tcg_temp_new_i32();
0ecb72a5 8303 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 8304 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8305 tcg_temp_free_i32(tmp3);
8306 tcg_temp_free_i32(tmp2);
6ddbc6e4 8307 store_reg(s, rd, tmp);
9ee6e8bb 8308 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 8309 tmp = load_reg(s, rm);
9ee6e8bb 8310 shift = (insn >> 10) & 3;
1301f322 8311 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
8312 rotate, a shift is sufficient. */
8313 if (shift != 0)
f669df27 8314 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8315 op1 = (insn >> 20) & 7;
8316 switch (op1) {
5e3f878a
PB
8317 case 0: gen_sxtb16(tmp); break;
8318 case 2: gen_sxtb(tmp); break;
8319 case 3: gen_sxth(tmp); break;
8320 case 4: gen_uxtb16(tmp); break;
8321 case 6: gen_uxtb(tmp); break;
8322 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
8323 default: goto illegal_op;
8324 }
8325 if (rn != 15) {
5e3f878a 8326 tmp2 = load_reg(s, rn);
9ee6e8bb 8327 if ((op1 & 3) == 0) {
5e3f878a 8328 gen_add16(tmp, tmp2);
9ee6e8bb 8329 } else {
5e3f878a 8330 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8331 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8332 }
8333 }
6c95676b 8334 store_reg(s, rd, tmp);
9ee6e8bb
PB
8335 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
8336 /* rev */
b0109805 8337 tmp = load_reg(s, rm);
9ee6e8bb
PB
8338 if (insn & (1 << 22)) {
8339 if (insn & (1 << 7)) {
b0109805 8340 gen_revsh(tmp);
9ee6e8bb
PB
8341 } else {
8342 ARCH(6T2);
b0109805 8343 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8344 }
8345 } else {
8346 if (insn & (1 << 7))
b0109805 8347 gen_rev16(tmp);
9ee6e8bb 8348 else
66896cb8 8349 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 8350 }
b0109805 8351 store_reg(s, rd, tmp);
9ee6e8bb
PB
8352 } else {
8353 goto illegal_op;
8354 }
8355 break;
8356 case 2: /* Multiplies (Type 3). */
41e9564d
PM
8357 switch ((insn >> 20) & 0x7) {
8358 case 5:
8359 if (((insn >> 6) ^ (insn >> 7)) & 1) {
8360 /* op2 not 00x or 11x : UNDEF */
8361 goto illegal_op;
8362 }
838fa72d
AJ
8363 /* Signed multiply most significant [accumulate].
8364 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
8365 tmp = load_reg(s, rm);
8366 tmp2 = load_reg(s, rs);
a7812ae4 8367 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 8368
955a7dd5 8369 if (rd != 15) {
838fa72d 8370 tmp = load_reg(s, rd);
9ee6e8bb 8371 if (insn & (1 << 6)) {
838fa72d 8372 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 8373 } else {
838fa72d 8374 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
8375 }
8376 }
838fa72d
AJ
8377 if (insn & (1 << 5)) {
8378 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8379 }
8380 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 8381 tmp = tcg_temp_new_i32();
838fa72d
AJ
8382 tcg_gen_trunc_i64_i32(tmp, tmp64);
8383 tcg_temp_free_i64(tmp64);
955a7dd5 8384 store_reg(s, rn, tmp);
41e9564d
PM
8385 break;
8386 case 0:
8387 case 4:
8388 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
8389 if (insn & (1 << 7)) {
8390 goto illegal_op;
8391 }
8392 tmp = load_reg(s, rm);
8393 tmp2 = load_reg(s, rs);
9ee6e8bb 8394 if (insn & (1 << 5))
5e3f878a
PB
8395 gen_swap_half(tmp2);
8396 gen_smul_dual(tmp, tmp2);
5e3f878a 8397 if (insn & (1 << 6)) {
e1d177b9 8398 /* This subtraction cannot overflow. */
5e3f878a
PB
8399 tcg_gen_sub_i32(tmp, tmp, tmp2);
8400 } else {
e1d177b9
PM
8401 /* This addition cannot overflow 32 bits;
8402 * however it may overflow considered as a signed
8403 * operation, in which case we must set the Q flag.
8404 */
9ef39277 8405 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
5e3f878a 8406 }
7d1b0095 8407 tcg_temp_free_i32(tmp2);
9ee6e8bb 8408 if (insn & (1 << 22)) {
5e3f878a 8409 /* smlald, smlsld */
a7812ae4
PB
8410 tmp64 = tcg_temp_new_i64();
8411 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8412 tcg_temp_free_i32(tmp);
a7812ae4
PB
8413 gen_addq(s, tmp64, rd, rn);
8414 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 8415 tcg_temp_free_i64(tmp64);
9ee6e8bb 8416 } else {
5e3f878a 8417 /* smuad, smusd, smlad, smlsd */
22478e79 8418 if (rd != 15)
9ee6e8bb 8419 {
22478e79 8420 tmp2 = load_reg(s, rd);
9ef39277 8421 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8422 tcg_temp_free_i32(tmp2);
9ee6e8bb 8423 }
22478e79 8424 store_reg(s, rn, tmp);
9ee6e8bb 8425 }
41e9564d 8426 break;
b8b8ea05
PM
8427 case 1:
8428 case 3:
8429 /* SDIV, UDIV */
8430 if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) {
8431 goto illegal_op;
8432 }
8433 if (((insn >> 5) & 7) || (rd != 15)) {
8434 goto illegal_op;
8435 }
8436 tmp = load_reg(s, rm);
8437 tmp2 = load_reg(s, rs);
8438 if (insn & (1 << 21)) {
8439 gen_helper_udiv(tmp, tmp, tmp2);
8440 } else {
8441 gen_helper_sdiv(tmp, tmp, tmp2);
8442 }
8443 tcg_temp_free_i32(tmp2);
8444 store_reg(s, rn, tmp);
8445 break;
41e9564d
PM
8446 default:
8447 goto illegal_op;
9ee6e8bb
PB
8448 }
8449 break;
8450 case 3:
8451 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
8452 switch (op1) {
8453 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
8454 ARCH(6);
8455 tmp = load_reg(s, rm);
8456 tmp2 = load_reg(s, rs);
8457 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 8458 tcg_temp_free_i32(tmp2);
ded9d295
AZ
8459 if (rd != 15) {
8460 tmp2 = load_reg(s, rd);
6ddbc6e4 8461 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8462 tcg_temp_free_i32(tmp2);
9ee6e8bb 8463 }
ded9d295 8464 store_reg(s, rn, tmp);
9ee6e8bb
PB
8465 break;
8466 case 0x20: case 0x24: case 0x28: case 0x2c:
8467 /* Bitfield insert/clear. */
8468 ARCH(6T2);
8469 shift = (insn >> 7) & 0x1f;
8470 i = (insn >> 16) & 0x1f;
8471 i = i + 1 - shift;
8472 if (rm == 15) {
7d1b0095 8473 tmp = tcg_temp_new_i32();
5e3f878a 8474 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 8475 } else {
5e3f878a 8476 tmp = load_reg(s, rm);
9ee6e8bb
PB
8477 }
8478 if (i != 32) {
5e3f878a 8479 tmp2 = load_reg(s, rd);
d593c48e 8480 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 8481 tcg_temp_free_i32(tmp2);
9ee6e8bb 8482 }
5e3f878a 8483 store_reg(s, rd, tmp);
9ee6e8bb
PB
8484 break;
8485 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
8486 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 8487 ARCH(6T2);
5e3f878a 8488 tmp = load_reg(s, rm);
9ee6e8bb
PB
8489 shift = (insn >> 7) & 0x1f;
8490 i = ((insn >> 16) & 0x1f) + 1;
8491 if (shift + i > 32)
8492 goto illegal_op;
8493 if (i < 32) {
8494 if (op1 & 0x20) {
5e3f878a 8495 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 8496 } else {
5e3f878a 8497 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
8498 }
8499 }
5e3f878a 8500 store_reg(s, rd, tmp);
9ee6e8bb
PB
8501 break;
8502 default:
8503 goto illegal_op;
8504 }
8505 break;
8506 }
8507 break;
8508 }
8509 do_ldst:
8510 /* Check for undefined extension instructions
8511 * per the ARM Bible IE:
8512 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
8513 */
8514 sh = (0xf << 20) | (0xf << 4);
8515 if (op1 == 0x7 && ((insn & sh) == sh))
8516 {
8517 goto illegal_op;
8518 }
8519 /* load/store byte/word */
8520 rn = (insn >> 16) & 0xf;
8521 rd = (insn >> 12) & 0xf;
b0109805 8522 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
8523 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
8524 if (insn & (1 << 24))
b0109805 8525 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
8526 if (insn & (1 << 20)) {
8527 /* load */
5a839c0d 8528 tmp = tcg_temp_new_i32();
9ee6e8bb 8529 if (insn & (1 << 22)) {
08307563 8530 gen_aa32_ld8u(tmp, tmp2, i);
9ee6e8bb 8531 } else {
08307563 8532 gen_aa32_ld32u(tmp, tmp2, i);
9ee6e8bb 8533 }
9ee6e8bb
PB
8534 } else {
8535 /* store */
b0109805 8536 tmp = load_reg(s, rd);
5a839c0d 8537 if (insn & (1 << 22)) {
08307563 8538 gen_aa32_st8(tmp, tmp2, i);
5a839c0d 8539 } else {
08307563 8540 gen_aa32_st32(tmp, tmp2, i);
5a839c0d
PM
8541 }
8542 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8543 }
8544 if (!(insn & (1 << 24))) {
b0109805
PB
8545 gen_add_data_offset(s, insn, tmp2);
8546 store_reg(s, rn, tmp2);
8547 } else if (insn & (1 << 21)) {
8548 store_reg(s, rn, tmp2);
8549 } else {
7d1b0095 8550 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8551 }
8552 if (insn & (1 << 20)) {
8553 /* Complete the load. */
be5e7a76 8554 store_reg_from_load(env, s, rd, tmp);
9ee6e8bb
PB
8555 }
8556 break;
8557 case 0x08:
8558 case 0x09:
8559 {
8560 int j, n, user, loaded_base;
39d5492a 8561 TCGv_i32 loaded_var;
9ee6e8bb
PB
8562 /* load/store multiple words */
8563 /* XXX: store correct base if write back */
8564 user = 0;
8565 if (insn & (1 << 22)) {
8566 if (IS_USER(s))
8567 goto illegal_op; /* only usable in supervisor mode */
8568
8569 if ((insn & (1 << 15)) == 0)
8570 user = 1;
8571 }
8572 rn = (insn >> 16) & 0xf;
b0109805 8573 addr = load_reg(s, rn);
9ee6e8bb
PB
8574
8575 /* compute total size */
8576 loaded_base = 0;
39d5492a 8577 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
8578 n = 0;
8579 for(i=0;i<16;i++) {
8580 if (insn & (1 << i))
8581 n++;
8582 }
8583 /* XXX: test invalid n == 0 case ? */
8584 if (insn & (1 << 23)) {
8585 if (insn & (1 << 24)) {
8586 /* pre increment */
b0109805 8587 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
8588 } else {
8589 /* post increment */
8590 }
8591 } else {
8592 if (insn & (1 << 24)) {
8593 /* pre decrement */
b0109805 8594 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
8595 } else {
8596 /* post decrement */
8597 if (n != 1)
b0109805 8598 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
8599 }
8600 }
8601 j = 0;
8602 for(i=0;i<16;i++) {
8603 if (insn & (1 << i)) {
8604 if (insn & (1 << 20)) {
8605 /* load */
5a839c0d 8606 tmp = tcg_temp_new_i32();
08307563 8607 gen_aa32_ld32u(tmp, addr, IS_USER(s));
be5e7a76 8608 if (user) {
b75263d6 8609 tmp2 = tcg_const_i32(i);
1ce94f81 8610 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 8611 tcg_temp_free_i32(tmp2);
7d1b0095 8612 tcg_temp_free_i32(tmp);
9ee6e8bb 8613 } else if (i == rn) {
b0109805 8614 loaded_var = tmp;
9ee6e8bb
PB
8615 loaded_base = 1;
8616 } else {
be5e7a76 8617 store_reg_from_load(env, s, i, tmp);
9ee6e8bb
PB
8618 }
8619 } else {
8620 /* store */
8621 if (i == 15) {
8622 /* special case: r15 = PC + 8 */
8623 val = (long)s->pc + 4;
7d1b0095 8624 tmp = tcg_temp_new_i32();
b0109805 8625 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 8626 } else if (user) {
7d1b0095 8627 tmp = tcg_temp_new_i32();
b75263d6 8628 tmp2 = tcg_const_i32(i);
9ef39277 8629 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 8630 tcg_temp_free_i32(tmp2);
9ee6e8bb 8631 } else {
b0109805 8632 tmp = load_reg(s, i);
9ee6e8bb 8633 }
08307563 8634 gen_aa32_st32(tmp, addr, IS_USER(s));
5a839c0d 8635 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8636 }
8637 j++;
8638 /* no need to add after the last transfer */
8639 if (j != n)
b0109805 8640 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
8641 }
8642 }
8643 if (insn & (1 << 21)) {
8644 /* write back */
8645 if (insn & (1 << 23)) {
8646 if (insn & (1 << 24)) {
8647 /* pre increment */
8648 } else {
8649 /* post increment */
b0109805 8650 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
8651 }
8652 } else {
8653 if (insn & (1 << 24)) {
8654 /* pre decrement */
8655 if (n != 1)
b0109805 8656 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
8657 } else {
8658 /* post decrement */
b0109805 8659 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
8660 }
8661 }
b0109805
PB
8662 store_reg(s, rn, addr);
8663 } else {
7d1b0095 8664 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8665 }
8666 if (loaded_base) {
b0109805 8667 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
8668 }
8669 if ((insn & (1 << 22)) && !user) {
8670 /* Restore CPSR from SPSR. */
d9ba4830
PB
8671 tmp = load_cpu_field(spsr);
8672 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 8673 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8674 s->is_jmp = DISAS_UPDATE;
8675 }
8676 }
8677 break;
8678 case 0xa:
8679 case 0xb:
8680 {
8681 int32_t offset;
8682
8683 /* branch (and link) */
8684 val = (int32_t)s->pc;
8685 if (insn & (1 << 24)) {
7d1b0095 8686 tmp = tcg_temp_new_i32();
5e3f878a
PB
8687 tcg_gen_movi_i32(tmp, val);
8688 store_reg(s, 14, tmp);
9ee6e8bb 8689 }
534df156
PM
8690 offset = sextract32(insn << 2, 0, 26);
8691 val += offset + 4;
9ee6e8bb
PB
8692 gen_jmp(s, val);
8693 }
8694 break;
8695 case 0xc:
8696 case 0xd:
8697 case 0xe:
6a57f3eb
WN
8698 if (((insn >> 8) & 0xe) == 10) {
8699 /* VFP. */
8700 if (disas_vfp_insn(env, s, insn)) {
8701 goto illegal_op;
8702 }
8703 } else if (disas_coproc_insn(env, s, insn)) {
8704 /* Coprocessor. */
9ee6e8bb 8705 goto illegal_op;
6a57f3eb 8706 }
9ee6e8bb
PB
8707 break;
8708 case 0xf:
8709 /* swi */
eaed129d 8710 gen_set_pc_im(s, s->pc);
d4a2dc67 8711 s->svc_imm = extract32(insn, 0, 24);
9ee6e8bb
PB
8712 s->is_jmp = DISAS_SWI;
8713 break;
8714 default:
8715 illegal_op:
d4a2dc67 8716 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized());
9ee6e8bb
PB
8717 break;
8718 }
8719 }
8720}
8721
8722/* Return true if this is a Thumb-2 logical op. */
8723static int
8724thumb2_logic_op(int op)
8725{
8726 return (op < 8);
8727}
8728
8729/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
8730 then set condition code flags based on the result of the operation.
8731 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
8732 to the high bit of T1.
8733 Returns zero if the opcode is valid. */
8734
8735static int
39d5492a
PM
8736gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
8737 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
8738{
8739 int logic_cc;
8740
8741 logic_cc = 0;
8742 switch (op) {
8743 case 0: /* and */
396e467c 8744 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
8745 logic_cc = conds;
8746 break;
8747 case 1: /* bic */
f669df27 8748 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
8749 logic_cc = conds;
8750 break;
8751 case 2: /* orr */
396e467c 8752 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
8753 logic_cc = conds;
8754 break;
8755 case 3: /* orn */
29501f1b 8756 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
8757 logic_cc = conds;
8758 break;
8759 case 4: /* eor */
396e467c 8760 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
8761 logic_cc = conds;
8762 break;
8763 case 8: /* add */
8764 if (conds)
72485ec4 8765 gen_add_CC(t0, t0, t1);
9ee6e8bb 8766 else
396e467c 8767 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
8768 break;
8769 case 10: /* adc */
8770 if (conds)
49b4c31e 8771 gen_adc_CC(t0, t0, t1);
9ee6e8bb 8772 else
396e467c 8773 gen_adc(t0, t1);
9ee6e8bb
PB
8774 break;
8775 case 11: /* sbc */
2de68a49
RH
8776 if (conds) {
8777 gen_sbc_CC(t0, t0, t1);
8778 } else {
396e467c 8779 gen_sub_carry(t0, t0, t1);
2de68a49 8780 }
9ee6e8bb
PB
8781 break;
8782 case 13: /* sub */
8783 if (conds)
72485ec4 8784 gen_sub_CC(t0, t0, t1);
9ee6e8bb 8785 else
396e467c 8786 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
8787 break;
8788 case 14: /* rsb */
8789 if (conds)
72485ec4 8790 gen_sub_CC(t0, t1, t0);
9ee6e8bb 8791 else
396e467c 8792 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
8793 break;
8794 default: /* 5, 6, 7, 9, 12, 15. */
8795 return 1;
8796 }
8797 if (logic_cc) {
396e467c 8798 gen_logic_CC(t0);
9ee6e8bb 8799 if (shifter_out)
396e467c 8800 gen_set_CF_bit31(t1);
9ee6e8bb
PB
8801 }
8802 return 0;
8803}
8804
8805/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
8806 is not legal. */
0ecb72a5 8807static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9ee6e8bb 8808{
b0109805 8809 uint32_t insn, imm, shift, offset;
9ee6e8bb 8810 uint32_t rd, rn, rm, rs;
39d5492a
PM
8811 TCGv_i32 tmp;
8812 TCGv_i32 tmp2;
8813 TCGv_i32 tmp3;
8814 TCGv_i32 addr;
a7812ae4 8815 TCGv_i64 tmp64;
9ee6e8bb
PB
8816 int op;
8817 int shiftop;
8818 int conds;
8819 int logic_cc;
8820
8821 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
8822 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 8823 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
8824 16-bit instructions to get correct prefetch abort behavior. */
8825 insn = insn_hw1;
8826 if ((insn & (1 << 12)) == 0) {
be5e7a76 8827 ARCH(5);
9ee6e8bb
PB
8828 /* Second half of blx. */
8829 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
8830 tmp = load_reg(s, 14);
8831 tcg_gen_addi_i32(tmp, tmp, offset);
8832 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 8833
7d1b0095 8834 tmp2 = tcg_temp_new_i32();
b0109805 8835 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
8836 store_reg(s, 14, tmp2);
8837 gen_bx(s, tmp);
9ee6e8bb
PB
8838 return 0;
8839 }
8840 if (insn & (1 << 11)) {
8841 /* Second half of bl. */
8842 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 8843 tmp = load_reg(s, 14);
6a0d8a1d 8844 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 8845
7d1b0095 8846 tmp2 = tcg_temp_new_i32();
b0109805 8847 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
8848 store_reg(s, 14, tmp2);
8849 gen_bx(s, tmp);
9ee6e8bb
PB
8850 return 0;
8851 }
8852 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
8853 /* Instruction spans a page boundary. Implement it as two
8854 16-bit instructions in case the second half causes an
8855 prefetch abort. */
8856 offset = ((int32_t)insn << 21) >> 9;
396e467c 8857 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
8858 return 0;
8859 }
8860 /* Fall through to 32-bit decode. */
8861 }
8862
d31dd73e 8863 insn = arm_lduw_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
8864 s->pc += 2;
8865 insn |= (uint32_t)insn_hw1 << 16;
8866
8867 if ((insn & 0xf800e800) != 0xf000e800) {
8868 ARCH(6T2);
8869 }
8870
8871 rn = (insn >> 16) & 0xf;
8872 rs = (insn >> 12) & 0xf;
8873 rd = (insn >> 8) & 0xf;
8874 rm = insn & 0xf;
8875 switch ((insn >> 25) & 0xf) {
8876 case 0: case 1: case 2: case 3:
8877 /* 16-bit instructions. Should never happen. */
8878 abort();
8879 case 4:
8880 if (insn & (1 << 22)) {
8881 /* Other load/store, table branch. */
8882 if (insn & 0x01200000) {
8883 /* Load/store doubleword. */
8884 if (rn == 15) {
7d1b0095 8885 addr = tcg_temp_new_i32();
b0109805 8886 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 8887 } else {
b0109805 8888 addr = load_reg(s, rn);
9ee6e8bb
PB
8889 }
8890 offset = (insn & 0xff) * 4;
8891 if ((insn & (1 << 23)) == 0)
8892 offset = -offset;
8893 if (insn & (1 << 24)) {
b0109805 8894 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
8895 offset = 0;
8896 }
8897 if (insn & (1 << 20)) {
8898 /* ldrd */
e2592fad 8899 tmp = tcg_temp_new_i32();
08307563 8900 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805
PB
8901 store_reg(s, rs, tmp);
8902 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 8903 tmp = tcg_temp_new_i32();
08307563 8904 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805 8905 store_reg(s, rd, tmp);
9ee6e8bb
PB
8906 } else {
8907 /* strd */
b0109805 8908 tmp = load_reg(s, rs);
08307563 8909 gen_aa32_st32(tmp, addr, IS_USER(s));
e2592fad 8910 tcg_temp_free_i32(tmp);
b0109805
PB
8911 tcg_gen_addi_i32(addr, addr, 4);
8912 tmp = load_reg(s, rd);
08307563 8913 gen_aa32_st32(tmp, addr, IS_USER(s));
e2592fad 8914 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8915 }
8916 if (insn & (1 << 21)) {
8917 /* Base writeback. */
8918 if (rn == 15)
8919 goto illegal_op;
b0109805
PB
8920 tcg_gen_addi_i32(addr, addr, offset - 4);
8921 store_reg(s, rn, addr);
8922 } else {
7d1b0095 8923 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8924 }
8925 } else if ((insn & (1 << 23)) == 0) {
8926 /* Load/store exclusive word. */
39d5492a 8927 addr = tcg_temp_local_new_i32();
98a46317 8928 load_reg_var(s, addr, rn);
426f5abc 8929 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 8930 if (insn & (1 << 20)) {
426f5abc 8931 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 8932 } else {
426f5abc 8933 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 8934 }
39d5492a 8935 tcg_temp_free_i32(addr);
2359bf80 8936 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
8937 /* Table Branch. */
8938 if (rn == 15) {
7d1b0095 8939 addr = tcg_temp_new_i32();
b0109805 8940 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 8941 } else {
b0109805 8942 addr = load_reg(s, rn);
9ee6e8bb 8943 }
b26eefb6 8944 tmp = load_reg(s, rm);
b0109805 8945 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
8946 if (insn & (1 << 4)) {
8947 /* tbh */
b0109805 8948 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8949 tcg_temp_free_i32(tmp);
e2592fad 8950 tmp = tcg_temp_new_i32();
08307563 8951 gen_aa32_ld16u(tmp, addr, IS_USER(s));
9ee6e8bb 8952 } else { /* tbb */
7d1b0095 8953 tcg_temp_free_i32(tmp);
e2592fad 8954 tmp = tcg_temp_new_i32();
08307563 8955 gen_aa32_ld8u(tmp, addr, IS_USER(s));
9ee6e8bb 8956 }
7d1b0095 8957 tcg_temp_free_i32(addr);
b0109805
PB
8958 tcg_gen_shli_i32(tmp, tmp, 1);
8959 tcg_gen_addi_i32(tmp, tmp, s->pc);
8960 store_reg(s, 15, tmp);
9ee6e8bb 8961 } else {
2359bf80 8962 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 8963 op = (insn >> 4) & 0x3;
2359bf80
MR
8964 switch (op2) {
8965 case 0:
426f5abc 8966 goto illegal_op;
2359bf80
MR
8967 case 1:
8968 /* Load/store exclusive byte/halfword/doubleword */
8969 if (op == 2) {
8970 goto illegal_op;
8971 }
8972 ARCH(7);
8973 break;
8974 case 2:
8975 /* Load-acquire/store-release */
8976 if (op == 3) {
8977 goto illegal_op;
8978 }
8979 /* Fall through */
8980 case 3:
8981 /* Load-acquire/store-release exclusive */
8982 ARCH(8);
8983 break;
426f5abc 8984 }
39d5492a 8985 addr = tcg_temp_local_new_i32();
98a46317 8986 load_reg_var(s, addr, rn);
2359bf80
MR
8987 if (!(op2 & 1)) {
8988 if (insn & (1 << 20)) {
8989 tmp = tcg_temp_new_i32();
8990 switch (op) {
8991 case 0: /* ldab */
08307563 8992 gen_aa32_ld8u(tmp, addr, IS_USER(s));
2359bf80
MR
8993 break;
8994 case 1: /* ldah */
08307563 8995 gen_aa32_ld16u(tmp, addr, IS_USER(s));
2359bf80
MR
8996 break;
8997 case 2: /* lda */
08307563 8998 gen_aa32_ld32u(tmp, addr, IS_USER(s));
2359bf80
MR
8999 break;
9000 default:
9001 abort();
9002 }
9003 store_reg(s, rs, tmp);
9004 } else {
9005 tmp = load_reg(s, rs);
9006 switch (op) {
9007 case 0: /* stlb */
08307563 9008 gen_aa32_st8(tmp, addr, IS_USER(s));
2359bf80
MR
9009 break;
9010 case 1: /* stlh */
08307563 9011 gen_aa32_st16(tmp, addr, IS_USER(s));
2359bf80
MR
9012 break;
9013 case 2: /* stl */
08307563 9014 gen_aa32_st32(tmp, addr, IS_USER(s));
2359bf80
MR
9015 break;
9016 default:
9017 abort();
9018 }
9019 tcg_temp_free_i32(tmp);
9020 }
9021 } else if (insn & (1 << 20)) {
426f5abc 9022 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 9023 } else {
426f5abc 9024 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 9025 }
39d5492a 9026 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9027 }
9028 } else {
9029 /* Load/store multiple, RFE, SRS. */
9030 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976
PM
9031 /* RFE, SRS: not available in user mode or on M profile */
9032 if (IS_USER(s) || IS_M(env)) {
9ee6e8bb 9033 goto illegal_op;
00115976 9034 }
9ee6e8bb
PB
9035 if (insn & (1 << 20)) {
9036 /* rfe */
b0109805
PB
9037 addr = load_reg(s, rn);
9038 if ((insn & (1 << 24)) == 0)
9039 tcg_gen_addi_i32(addr, addr, -8);
9040 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 9041 tmp = tcg_temp_new_i32();
08307563 9042 gen_aa32_ld32u(tmp, addr, 0);
b0109805 9043 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9044 tmp2 = tcg_temp_new_i32();
08307563 9045 gen_aa32_ld32u(tmp2, addr, 0);
9ee6e8bb
PB
9046 if (insn & (1 << 21)) {
9047 /* Base writeback. */
b0109805
PB
9048 if (insn & (1 << 24)) {
9049 tcg_gen_addi_i32(addr, addr, 4);
9050 } else {
9051 tcg_gen_addi_i32(addr, addr, -4);
9052 }
9053 store_reg(s, rn, addr);
9054 } else {
7d1b0095 9055 tcg_temp_free_i32(addr);
9ee6e8bb 9056 }
b0109805 9057 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
9058 } else {
9059 /* srs */
81465888
PM
9060 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
9061 insn & (1 << 21));
9ee6e8bb
PB
9062 }
9063 } else {
5856d44e 9064 int i, loaded_base = 0;
39d5492a 9065 TCGv_i32 loaded_var;
9ee6e8bb 9066 /* Load/store multiple. */
b0109805 9067 addr = load_reg(s, rn);
9ee6e8bb
PB
9068 offset = 0;
9069 for (i = 0; i < 16; i++) {
9070 if (insn & (1 << i))
9071 offset += 4;
9072 }
9073 if (insn & (1 << 24)) {
b0109805 9074 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9075 }
9076
39d5492a 9077 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
9078 for (i = 0; i < 16; i++) {
9079 if ((insn & (1 << i)) == 0)
9080 continue;
9081 if (insn & (1 << 20)) {
9082 /* Load. */
e2592fad 9083 tmp = tcg_temp_new_i32();
08307563 9084 gen_aa32_ld32u(tmp, addr, IS_USER(s));
9ee6e8bb 9085 if (i == 15) {
b0109805 9086 gen_bx(s, tmp);
5856d44e
YO
9087 } else if (i == rn) {
9088 loaded_var = tmp;
9089 loaded_base = 1;
9ee6e8bb 9090 } else {
b0109805 9091 store_reg(s, i, tmp);
9ee6e8bb
PB
9092 }
9093 } else {
9094 /* Store. */
b0109805 9095 tmp = load_reg(s, i);
08307563 9096 gen_aa32_st32(tmp, addr, IS_USER(s));
e2592fad 9097 tcg_temp_free_i32(tmp);
9ee6e8bb 9098 }
b0109805 9099 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 9100 }
5856d44e
YO
9101 if (loaded_base) {
9102 store_reg(s, rn, loaded_var);
9103 }
9ee6e8bb
PB
9104 if (insn & (1 << 21)) {
9105 /* Base register writeback. */
9106 if (insn & (1 << 24)) {
b0109805 9107 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9108 }
9109 /* Fault if writeback register is in register list. */
9110 if (insn & (1 << rn))
9111 goto illegal_op;
b0109805
PB
9112 store_reg(s, rn, addr);
9113 } else {
7d1b0095 9114 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9115 }
9116 }
9117 }
9118 break;
2af9ab77
JB
9119 case 5:
9120
9ee6e8bb 9121 op = (insn >> 21) & 0xf;
2af9ab77
JB
9122 if (op == 6) {
9123 /* Halfword pack. */
9124 tmp = load_reg(s, rn);
9125 tmp2 = load_reg(s, rm);
9126 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
9127 if (insn & (1 << 5)) {
9128 /* pkhtb */
9129 if (shift == 0)
9130 shift = 31;
9131 tcg_gen_sari_i32(tmp2, tmp2, shift);
9132 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
9133 tcg_gen_ext16u_i32(tmp2, tmp2);
9134 } else {
9135 /* pkhbt */
9136 if (shift)
9137 tcg_gen_shli_i32(tmp2, tmp2, shift);
9138 tcg_gen_ext16u_i32(tmp, tmp);
9139 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9140 }
9141 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 9142 tcg_temp_free_i32(tmp2);
3174f8e9
FN
9143 store_reg(s, rd, tmp);
9144 } else {
2af9ab77
JB
9145 /* Data processing register constant shift. */
9146 if (rn == 15) {
7d1b0095 9147 tmp = tcg_temp_new_i32();
2af9ab77
JB
9148 tcg_gen_movi_i32(tmp, 0);
9149 } else {
9150 tmp = load_reg(s, rn);
9151 }
9152 tmp2 = load_reg(s, rm);
9153
9154 shiftop = (insn >> 4) & 3;
9155 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
9156 conds = (insn & (1 << 20)) != 0;
9157 logic_cc = (conds && thumb2_logic_op(op));
9158 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9159 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9160 goto illegal_op;
7d1b0095 9161 tcg_temp_free_i32(tmp2);
2af9ab77
JB
9162 if (rd != 15) {
9163 store_reg(s, rd, tmp);
9164 } else {
7d1b0095 9165 tcg_temp_free_i32(tmp);
2af9ab77 9166 }
3174f8e9 9167 }
9ee6e8bb
PB
9168 break;
9169 case 13: /* Misc data processing. */
9170 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
9171 if (op < 4 && (insn & 0xf000) != 0xf000)
9172 goto illegal_op;
9173 switch (op) {
9174 case 0: /* Register controlled shift. */
8984bd2e
PB
9175 tmp = load_reg(s, rn);
9176 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9177 if ((insn & 0x70) != 0)
9178 goto illegal_op;
9179 op = (insn >> 21) & 3;
8984bd2e
PB
9180 logic_cc = (insn & (1 << 20)) != 0;
9181 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
9182 if (logic_cc)
9183 gen_logic_CC(tmp);
21aeb343 9184 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
9185 break;
9186 case 1: /* Sign/zero extend. */
5e3f878a 9187 tmp = load_reg(s, rm);
9ee6e8bb 9188 shift = (insn >> 4) & 3;
1301f322 9189 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
9190 rotate, a shift is sufficient. */
9191 if (shift != 0)
f669df27 9192 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9193 op = (insn >> 20) & 7;
9194 switch (op) {
5e3f878a
PB
9195 case 0: gen_sxth(tmp); break;
9196 case 1: gen_uxth(tmp); break;
9197 case 2: gen_sxtb16(tmp); break;
9198 case 3: gen_uxtb16(tmp); break;
9199 case 4: gen_sxtb(tmp); break;
9200 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
9201 default: goto illegal_op;
9202 }
9203 if (rn != 15) {
5e3f878a 9204 tmp2 = load_reg(s, rn);
9ee6e8bb 9205 if ((op >> 1) == 1) {
5e3f878a 9206 gen_add16(tmp, tmp2);
9ee6e8bb 9207 } else {
5e3f878a 9208 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9209 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9210 }
9211 }
5e3f878a 9212 store_reg(s, rd, tmp);
9ee6e8bb
PB
9213 break;
9214 case 2: /* SIMD add/subtract. */
9215 op = (insn >> 20) & 7;
9216 shift = (insn >> 4) & 7;
9217 if ((op & 3) == 3 || (shift & 3) == 3)
9218 goto illegal_op;
6ddbc6e4
PB
9219 tmp = load_reg(s, rn);
9220 tmp2 = load_reg(s, rm);
9221 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 9222 tcg_temp_free_i32(tmp2);
6ddbc6e4 9223 store_reg(s, rd, tmp);
9ee6e8bb
PB
9224 break;
9225 case 3: /* Other data processing. */
9226 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
9227 if (op < 4) {
9228 /* Saturating add/subtract. */
d9ba4830
PB
9229 tmp = load_reg(s, rn);
9230 tmp2 = load_reg(s, rm);
9ee6e8bb 9231 if (op & 1)
9ef39277 9232 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 9233 if (op & 2)
9ef39277 9234 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 9235 else
9ef39277 9236 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 9237 tcg_temp_free_i32(tmp2);
9ee6e8bb 9238 } else {
d9ba4830 9239 tmp = load_reg(s, rn);
9ee6e8bb
PB
9240 switch (op) {
9241 case 0x0a: /* rbit */
d9ba4830 9242 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
9243 break;
9244 case 0x08: /* rev */
66896cb8 9245 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
9246 break;
9247 case 0x09: /* rev16 */
d9ba4830 9248 gen_rev16(tmp);
9ee6e8bb
PB
9249 break;
9250 case 0x0b: /* revsh */
d9ba4830 9251 gen_revsh(tmp);
9ee6e8bb
PB
9252 break;
9253 case 0x10: /* sel */
d9ba4830 9254 tmp2 = load_reg(s, rm);
7d1b0095 9255 tmp3 = tcg_temp_new_i32();
0ecb72a5 9256 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 9257 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
9258 tcg_temp_free_i32(tmp3);
9259 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9260 break;
9261 case 0x18: /* clz */
d9ba4830 9262 gen_helper_clz(tmp, tmp);
9ee6e8bb 9263 break;
eb0ecd5a
WN
9264 case 0x20:
9265 case 0x21:
9266 case 0x22:
9267 case 0x28:
9268 case 0x29:
9269 case 0x2a:
9270 {
9271 /* crc32/crc32c */
9272 uint32_t sz = op & 0x3;
9273 uint32_t c = op & 0x8;
9274
9275 if (!arm_feature(env, ARM_FEATURE_CRC)) {
9276 goto illegal_op;
9277 }
9278
9279 tmp2 = load_reg(s, rm);
9280 tmp3 = tcg_const_i32(1 << sz);
9281 if (c) {
9282 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
9283 } else {
9284 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
9285 }
9286 tcg_temp_free_i32(tmp2);
9287 tcg_temp_free_i32(tmp3);
9288 break;
9289 }
9ee6e8bb
PB
9290 default:
9291 goto illegal_op;
9292 }
9293 }
d9ba4830 9294 store_reg(s, rd, tmp);
9ee6e8bb
PB
9295 break;
9296 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
9297 op = (insn >> 4) & 0xf;
d9ba4830
PB
9298 tmp = load_reg(s, rn);
9299 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9300 switch ((insn >> 20) & 7) {
9301 case 0: /* 32 x 32 -> 32 */
d9ba4830 9302 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 9303 tcg_temp_free_i32(tmp2);
9ee6e8bb 9304 if (rs != 15) {
d9ba4830 9305 tmp2 = load_reg(s, rs);
9ee6e8bb 9306 if (op)
d9ba4830 9307 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 9308 else
d9ba4830 9309 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9310 tcg_temp_free_i32(tmp2);
9ee6e8bb 9311 }
9ee6e8bb
PB
9312 break;
9313 case 1: /* 16 x 16 -> 32 */
d9ba4830 9314 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 9315 tcg_temp_free_i32(tmp2);
9ee6e8bb 9316 if (rs != 15) {
d9ba4830 9317 tmp2 = load_reg(s, rs);
9ef39277 9318 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9319 tcg_temp_free_i32(tmp2);
9ee6e8bb 9320 }
9ee6e8bb
PB
9321 break;
9322 case 2: /* Dual multiply add. */
9323 case 4: /* Dual multiply subtract. */
9324 if (op)
d9ba4830
PB
9325 gen_swap_half(tmp2);
9326 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9327 if (insn & (1 << 22)) {
e1d177b9 9328 /* This subtraction cannot overflow. */
d9ba4830 9329 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 9330 } else {
e1d177b9
PM
9331 /* This addition cannot overflow 32 bits;
9332 * however it may overflow considered as a signed
9333 * operation, in which case we must set the Q flag.
9334 */
9ef39277 9335 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 9336 }
7d1b0095 9337 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9338 if (rs != 15)
9339 {
d9ba4830 9340 tmp2 = load_reg(s, rs);
9ef39277 9341 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9342 tcg_temp_free_i32(tmp2);
9ee6e8bb 9343 }
9ee6e8bb
PB
9344 break;
9345 case 3: /* 32 * 16 -> 32msb */
9346 if (op)
d9ba4830 9347 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 9348 else
d9ba4830 9349 gen_sxth(tmp2);
a7812ae4
PB
9350 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9351 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 9352 tmp = tcg_temp_new_i32();
a7812ae4 9353 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 9354 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
9355 if (rs != 15)
9356 {
d9ba4830 9357 tmp2 = load_reg(s, rs);
9ef39277 9358 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9359 tcg_temp_free_i32(tmp2);
9ee6e8bb 9360 }
9ee6e8bb 9361 break;
838fa72d
AJ
9362 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
9363 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 9364 if (rs != 15) {
838fa72d
AJ
9365 tmp = load_reg(s, rs);
9366 if (insn & (1 << 20)) {
9367 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 9368 } else {
838fa72d 9369 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 9370 }
2c0262af 9371 }
838fa72d
AJ
9372 if (insn & (1 << 4)) {
9373 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9374 }
9375 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 9376 tmp = tcg_temp_new_i32();
838fa72d
AJ
9377 tcg_gen_trunc_i64_i32(tmp, tmp64);
9378 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
9379 break;
9380 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 9381 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 9382 tcg_temp_free_i32(tmp2);
9ee6e8bb 9383 if (rs != 15) {
d9ba4830
PB
9384 tmp2 = load_reg(s, rs);
9385 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9386 tcg_temp_free_i32(tmp2);
5fd46862 9387 }
9ee6e8bb 9388 break;
2c0262af 9389 }
d9ba4830 9390 store_reg(s, rd, tmp);
2c0262af 9391 break;
9ee6e8bb
PB
9392 case 6: case 7: /* 64-bit multiply, Divide. */
9393 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
9394 tmp = load_reg(s, rn);
9395 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9396 if ((op & 0x50) == 0x10) {
9397 /* sdiv, udiv */
47789990 9398 if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 9399 goto illegal_op;
47789990 9400 }
9ee6e8bb 9401 if (op & 0x20)
5e3f878a 9402 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 9403 else
5e3f878a 9404 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 9405 tcg_temp_free_i32(tmp2);
5e3f878a 9406 store_reg(s, rd, tmp);
9ee6e8bb
PB
9407 } else if ((op & 0xe) == 0xc) {
9408 /* Dual multiply accumulate long. */
9409 if (op & 1)
5e3f878a
PB
9410 gen_swap_half(tmp2);
9411 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9412 if (op & 0x10) {
5e3f878a 9413 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 9414 } else {
5e3f878a 9415 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 9416 }
7d1b0095 9417 tcg_temp_free_i32(tmp2);
a7812ae4
PB
9418 /* BUGFIX */
9419 tmp64 = tcg_temp_new_i64();
9420 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 9421 tcg_temp_free_i32(tmp);
a7812ae4
PB
9422 gen_addq(s, tmp64, rs, rd);
9423 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 9424 tcg_temp_free_i64(tmp64);
2c0262af 9425 } else {
9ee6e8bb
PB
9426 if (op & 0x20) {
9427 /* Unsigned 64-bit multiply */
a7812ae4 9428 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 9429 } else {
9ee6e8bb
PB
9430 if (op & 8) {
9431 /* smlalxy */
5e3f878a 9432 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 9433 tcg_temp_free_i32(tmp2);
a7812ae4
PB
9434 tmp64 = tcg_temp_new_i64();
9435 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 9436 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9437 } else {
9438 /* Signed 64-bit multiply */
a7812ae4 9439 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 9440 }
b5ff1b31 9441 }
9ee6e8bb
PB
9442 if (op & 4) {
9443 /* umaal */
a7812ae4
PB
9444 gen_addq_lo(s, tmp64, rs);
9445 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
9446 } else if (op & 0x40) {
9447 /* 64-bit accumulate. */
a7812ae4 9448 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 9449 }
a7812ae4 9450 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 9451 tcg_temp_free_i64(tmp64);
5fd46862 9452 }
2c0262af 9453 break;
9ee6e8bb
PB
9454 }
9455 break;
9456 case 6: case 7: case 14: case 15:
9457 /* Coprocessor. */
9458 if (((insn >> 24) & 3) == 3) {
9459 /* Translate into the equivalent ARM encoding. */
f06053e3 9460 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9ee6e8bb
PB
9461 if (disas_neon_data_insn(env, s, insn))
9462 goto illegal_op;
6a57f3eb
WN
9463 } else if (((insn >> 8) & 0xe) == 10) {
9464 if (disas_vfp_insn(env, s, insn)) {
9465 goto illegal_op;
9466 }
9ee6e8bb
PB
9467 } else {
9468 if (insn & (1 << 28))
9469 goto illegal_op;
9470 if (disas_coproc_insn (env, s, insn))
9471 goto illegal_op;
9472 }
9473 break;
9474 case 8: case 9: case 10: case 11:
9475 if (insn & (1 << 15)) {
9476 /* Branches, misc control. */
9477 if (insn & 0x5000) {
9478 /* Unconditional branch. */
9479 /* signextend(hw1[10:0]) -> offset[:12]. */
9480 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
9481 /* hw1[10:0] -> offset[11:1]. */
9482 offset |= (insn & 0x7ff) << 1;
9483 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
9484 offset[24:22] already have the same value because of the
9485 sign extension above. */
9486 offset ^= ((~insn) & (1 << 13)) << 10;
9487 offset ^= ((~insn) & (1 << 11)) << 11;
9488
9ee6e8bb
PB
9489 if (insn & (1 << 14)) {
9490 /* Branch and link. */
3174f8e9 9491 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 9492 }
3b46e624 9493
b0109805 9494 offset += s->pc;
9ee6e8bb
PB
9495 if (insn & (1 << 12)) {
9496 /* b/bl */
b0109805 9497 gen_jmp(s, offset);
9ee6e8bb
PB
9498 } else {
9499 /* blx */
b0109805 9500 offset &= ~(uint32_t)2;
be5e7a76 9501 /* thumb2 bx, no need to check */
b0109805 9502 gen_bx_im(s, offset);
2c0262af 9503 }
9ee6e8bb
PB
9504 } else if (((insn >> 23) & 7) == 7) {
9505 /* Misc control */
9506 if (insn & (1 << 13))
9507 goto illegal_op;
9508
9509 if (insn & (1 << 26)) {
9510 /* Secure monitor call (v6Z) */
e0c270d9
SW
9511 qemu_log_mask(LOG_UNIMP,
9512 "arm: unimplemented secure monitor call\n");
9ee6e8bb 9513 goto illegal_op; /* not implemented. */
2c0262af 9514 } else {
9ee6e8bb
PB
9515 op = (insn >> 20) & 7;
9516 switch (op) {
9517 case 0: /* msr cpsr. */
9518 if (IS_M(env)) {
8984bd2e
PB
9519 tmp = load_reg(s, rn);
9520 addr = tcg_const_i32(insn & 0xff);
9521 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 9522 tcg_temp_free_i32(addr);
7d1b0095 9523 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9524 gen_lookup_tb(s);
9525 break;
9526 }
9527 /* fall through */
9528 case 1: /* msr spsr. */
9529 if (IS_M(env))
9530 goto illegal_op;
2fbac54b
FN
9531 tmp = load_reg(s, rn);
9532 if (gen_set_psr(s,
9ee6e8bb 9533 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 9534 op == 1, tmp))
9ee6e8bb
PB
9535 goto illegal_op;
9536 break;
9537 case 2: /* cps, nop-hint. */
9538 if (((insn >> 8) & 7) == 0) {
9539 gen_nop_hint(s, insn & 0xff);
9540 }
9541 /* Implemented as NOP in user mode. */
9542 if (IS_USER(s))
9543 break;
9544 offset = 0;
9545 imm = 0;
9546 if (insn & (1 << 10)) {
9547 if (insn & (1 << 7))
9548 offset |= CPSR_A;
9549 if (insn & (1 << 6))
9550 offset |= CPSR_I;
9551 if (insn & (1 << 5))
9552 offset |= CPSR_F;
9553 if (insn & (1 << 9))
9554 imm = CPSR_A | CPSR_I | CPSR_F;
9555 }
9556 if (insn & (1 << 8)) {
9557 offset |= 0x1f;
9558 imm |= (insn & 0x1f);
9559 }
9560 if (offset) {
2fbac54b 9561 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
9562 }
9563 break;
9564 case 3: /* Special control operations. */
426f5abc 9565 ARCH(7);
9ee6e8bb
PB
9566 op = (insn >> 4) & 0xf;
9567 switch (op) {
9568 case 2: /* clrex */
426f5abc 9569 gen_clrex(s);
9ee6e8bb
PB
9570 break;
9571 case 4: /* dsb */
9572 case 5: /* dmb */
9573 case 6: /* isb */
9574 /* These execute as NOPs. */
9ee6e8bb
PB
9575 break;
9576 default:
9577 goto illegal_op;
9578 }
9579 break;
9580 case 4: /* bxj */
9581 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
9582 tmp = load_reg(s, rn);
9583 gen_bx(s, tmp);
9ee6e8bb
PB
9584 break;
9585 case 5: /* Exception return. */
b8b45b68
RV
9586 if (IS_USER(s)) {
9587 goto illegal_op;
9588 }
9589 if (rn != 14 || rd != 15) {
9590 goto illegal_op;
9591 }
9592 tmp = load_reg(s, rn);
9593 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
9594 gen_exception_return(s, tmp);
9595 break;
9ee6e8bb 9596 case 6: /* mrs cpsr. */
7d1b0095 9597 tmp = tcg_temp_new_i32();
9ee6e8bb 9598 if (IS_M(env)) {
8984bd2e
PB
9599 addr = tcg_const_i32(insn & 0xff);
9600 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 9601 tcg_temp_free_i32(addr);
9ee6e8bb 9602 } else {
9ef39277 9603 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 9604 }
8984bd2e 9605 store_reg(s, rd, tmp);
9ee6e8bb
PB
9606 break;
9607 case 7: /* mrs spsr. */
9608 /* Not accessible in user mode. */
9609 if (IS_USER(s) || IS_M(env))
9610 goto illegal_op;
d9ba4830
PB
9611 tmp = load_cpu_field(spsr);
9612 store_reg(s, rd, tmp);
9ee6e8bb 9613 break;
2c0262af
FB
9614 }
9615 }
9ee6e8bb
PB
9616 } else {
9617 /* Conditional branch. */
9618 op = (insn >> 22) & 0xf;
9619 /* Generate a conditional jump to next instruction. */
9620 s->condlabel = gen_new_label();
39fb730a 9621 arm_gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
9622 s->condjmp = 1;
9623
9624 /* offset[11:1] = insn[10:0] */
9625 offset = (insn & 0x7ff) << 1;
9626 /* offset[17:12] = insn[21:16]. */
9627 offset |= (insn & 0x003f0000) >> 4;
9628 /* offset[31:20] = insn[26]. */
9629 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
9630 /* offset[18] = insn[13]. */
9631 offset |= (insn & (1 << 13)) << 5;
9632 /* offset[19] = insn[11]. */
9633 offset |= (insn & (1 << 11)) << 8;
9634
9635 /* jump to the offset */
b0109805 9636 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
9637 }
9638 } else {
9639 /* Data processing immediate. */
9640 if (insn & (1 << 25)) {
9641 if (insn & (1 << 24)) {
9642 if (insn & (1 << 20))
9643 goto illegal_op;
9644 /* Bitfield/Saturate. */
9645 op = (insn >> 21) & 7;
9646 imm = insn & 0x1f;
9647 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 9648 if (rn == 15) {
7d1b0095 9649 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
9650 tcg_gen_movi_i32(tmp, 0);
9651 } else {
9652 tmp = load_reg(s, rn);
9653 }
9ee6e8bb
PB
9654 switch (op) {
9655 case 2: /* Signed bitfield extract. */
9656 imm++;
9657 if (shift + imm > 32)
9658 goto illegal_op;
9659 if (imm < 32)
6ddbc6e4 9660 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
9661 break;
9662 case 6: /* Unsigned bitfield extract. */
9663 imm++;
9664 if (shift + imm > 32)
9665 goto illegal_op;
9666 if (imm < 32)
6ddbc6e4 9667 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
9668 break;
9669 case 3: /* Bitfield insert/clear. */
9670 if (imm < shift)
9671 goto illegal_op;
9672 imm = imm + 1 - shift;
9673 if (imm != 32) {
6ddbc6e4 9674 tmp2 = load_reg(s, rd);
d593c48e 9675 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 9676 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9677 }
9678 break;
9679 case 7:
9680 goto illegal_op;
9681 default: /* Saturate. */
9ee6e8bb
PB
9682 if (shift) {
9683 if (op & 1)
6ddbc6e4 9684 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 9685 else
6ddbc6e4 9686 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 9687 }
6ddbc6e4 9688 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
9689 if (op & 4) {
9690 /* Unsigned. */
9ee6e8bb 9691 if ((op & 1) && shift == 0)
9ef39277 9692 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 9693 else
9ef39277 9694 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
2c0262af 9695 } else {
9ee6e8bb 9696 /* Signed. */
9ee6e8bb 9697 if ((op & 1) && shift == 0)
9ef39277 9698 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 9699 else
9ef39277 9700 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
2c0262af 9701 }
b75263d6 9702 tcg_temp_free_i32(tmp2);
9ee6e8bb 9703 break;
2c0262af 9704 }
6ddbc6e4 9705 store_reg(s, rd, tmp);
9ee6e8bb
PB
9706 } else {
9707 imm = ((insn & 0x04000000) >> 15)
9708 | ((insn & 0x7000) >> 4) | (insn & 0xff);
9709 if (insn & (1 << 22)) {
9710 /* 16-bit immediate. */
9711 imm |= (insn >> 4) & 0xf000;
9712 if (insn & (1 << 23)) {
9713 /* movt */
5e3f878a 9714 tmp = load_reg(s, rd);
86831435 9715 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 9716 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 9717 } else {
9ee6e8bb 9718 /* movw */
7d1b0095 9719 tmp = tcg_temp_new_i32();
5e3f878a 9720 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
9721 }
9722 } else {
9ee6e8bb
PB
9723 /* Add/sub 12-bit immediate. */
9724 if (rn == 15) {
b0109805 9725 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 9726 if (insn & (1 << 23))
b0109805 9727 offset -= imm;
9ee6e8bb 9728 else
b0109805 9729 offset += imm;
7d1b0095 9730 tmp = tcg_temp_new_i32();
5e3f878a 9731 tcg_gen_movi_i32(tmp, offset);
2c0262af 9732 } else {
5e3f878a 9733 tmp = load_reg(s, rn);
9ee6e8bb 9734 if (insn & (1 << 23))
5e3f878a 9735 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 9736 else
5e3f878a 9737 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 9738 }
9ee6e8bb 9739 }
5e3f878a 9740 store_reg(s, rd, tmp);
191abaa2 9741 }
9ee6e8bb
PB
9742 } else {
9743 int shifter_out = 0;
9744 /* modified 12-bit immediate. */
9745 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
9746 imm = (insn & 0xff);
9747 switch (shift) {
9748 case 0: /* XY */
9749 /* Nothing to do. */
9750 break;
9751 case 1: /* 00XY00XY */
9752 imm |= imm << 16;
9753 break;
9754 case 2: /* XY00XY00 */
9755 imm |= imm << 16;
9756 imm <<= 8;
9757 break;
9758 case 3: /* XYXYXYXY */
9759 imm |= imm << 16;
9760 imm |= imm << 8;
9761 break;
9762 default: /* Rotated constant. */
9763 shift = (shift << 1) | (imm >> 7);
9764 imm |= 0x80;
9765 imm = imm << (32 - shift);
9766 shifter_out = 1;
9767 break;
b5ff1b31 9768 }
7d1b0095 9769 tmp2 = tcg_temp_new_i32();
3174f8e9 9770 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 9771 rn = (insn >> 16) & 0xf;
3174f8e9 9772 if (rn == 15) {
7d1b0095 9773 tmp = tcg_temp_new_i32();
3174f8e9
FN
9774 tcg_gen_movi_i32(tmp, 0);
9775 } else {
9776 tmp = load_reg(s, rn);
9777 }
9ee6e8bb
PB
9778 op = (insn >> 21) & 0xf;
9779 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 9780 shifter_out, tmp, tmp2))
9ee6e8bb 9781 goto illegal_op;
7d1b0095 9782 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9783 rd = (insn >> 8) & 0xf;
9784 if (rd != 15) {
3174f8e9
FN
9785 store_reg(s, rd, tmp);
9786 } else {
7d1b0095 9787 tcg_temp_free_i32(tmp);
2c0262af 9788 }
2c0262af 9789 }
9ee6e8bb
PB
9790 }
9791 break;
9792 case 12: /* Load/store single data item. */
9793 {
9794 int postinc = 0;
9795 int writeback = 0;
b0109805 9796 int user;
9ee6e8bb
PB
9797 if ((insn & 0x01100000) == 0x01000000) {
9798 if (disas_neon_ls_insn(env, s, insn))
c1713132 9799 goto illegal_op;
9ee6e8bb
PB
9800 break;
9801 }
a2fdc890
PM
9802 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
9803 if (rs == 15) {
9804 if (!(insn & (1 << 20))) {
9805 goto illegal_op;
9806 }
9807 if (op != 2) {
9808 /* Byte or halfword load space with dest == r15 : memory hints.
9809 * Catch them early so we don't emit pointless addressing code.
9810 * This space is a mix of:
9811 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
9812 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
9813 * cores)
9814 * unallocated hints, which must be treated as NOPs
9815 * UNPREDICTABLE space, which we NOP or UNDEF depending on
9816 * which is easiest for the decoding logic
9817 * Some space which must UNDEF
9818 */
9819 int op1 = (insn >> 23) & 3;
9820 int op2 = (insn >> 6) & 0x3f;
9821 if (op & 2) {
9822 goto illegal_op;
9823 }
9824 if (rn == 15) {
02afbf64
PM
9825 /* UNPREDICTABLE, unallocated hint or
9826 * PLD/PLDW/PLI (literal)
9827 */
a2fdc890
PM
9828 return 0;
9829 }
9830 if (op1 & 1) {
02afbf64 9831 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
9832 }
9833 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
02afbf64 9834 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
9835 }
9836 /* UNDEF space, or an UNPREDICTABLE */
9837 return 1;
9838 }
9839 }
b0109805 9840 user = IS_USER(s);
9ee6e8bb 9841 if (rn == 15) {
7d1b0095 9842 addr = tcg_temp_new_i32();
9ee6e8bb
PB
9843 /* PC relative. */
9844 /* s->pc has already been incremented by 4. */
9845 imm = s->pc & 0xfffffffc;
9846 if (insn & (1 << 23))
9847 imm += insn & 0xfff;
9848 else
9849 imm -= insn & 0xfff;
b0109805 9850 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 9851 } else {
b0109805 9852 addr = load_reg(s, rn);
9ee6e8bb
PB
9853 if (insn & (1 << 23)) {
9854 /* Positive offset. */
9855 imm = insn & 0xfff;
b0109805 9856 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 9857 } else {
9ee6e8bb 9858 imm = insn & 0xff;
2a0308c5
PM
9859 switch ((insn >> 8) & 0xf) {
9860 case 0x0: /* Shifted Register. */
9ee6e8bb 9861 shift = (insn >> 4) & 0xf;
2a0308c5
PM
9862 if (shift > 3) {
9863 tcg_temp_free_i32(addr);
18c9b560 9864 goto illegal_op;
2a0308c5 9865 }
b26eefb6 9866 tmp = load_reg(s, rm);
9ee6e8bb 9867 if (shift)
b26eefb6 9868 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 9869 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9870 tcg_temp_free_i32(tmp);
9ee6e8bb 9871 break;
2a0308c5 9872 case 0xc: /* Negative offset. */
b0109805 9873 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 9874 break;
2a0308c5 9875 case 0xe: /* User privilege. */
b0109805
PB
9876 tcg_gen_addi_i32(addr, addr, imm);
9877 user = 1;
9ee6e8bb 9878 break;
2a0308c5 9879 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
9880 imm = -imm;
9881 /* Fall through. */
2a0308c5 9882 case 0xb: /* Post-increment. */
9ee6e8bb
PB
9883 postinc = 1;
9884 writeback = 1;
9885 break;
2a0308c5 9886 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
9887 imm = -imm;
9888 /* Fall through. */
2a0308c5 9889 case 0xf: /* Pre-increment. */
b0109805 9890 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
9891 writeback = 1;
9892 break;
9893 default:
2a0308c5 9894 tcg_temp_free_i32(addr);
b7bcbe95 9895 goto illegal_op;
9ee6e8bb
PB
9896 }
9897 }
9898 }
9ee6e8bb
PB
9899 if (insn & (1 << 20)) {
9900 /* Load. */
5a839c0d 9901 tmp = tcg_temp_new_i32();
a2fdc890 9902 switch (op) {
5a839c0d 9903 case 0:
08307563 9904 gen_aa32_ld8u(tmp, addr, user);
5a839c0d
PM
9905 break;
9906 case 4:
08307563 9907 gen_aa32_ld8s(tmp, addr, user);
5a839c0d
PM
9908 break;
9909 case 1:
08307563 9910 gen_aa32_ld16u(tmp, addr, user);
5a839c0d
PM
9911 break;
9912 case 5:
08307563 9913 gen_aa32_ld16s(tmp, addr, user);
5a839c0d
PM
9914 break;
9915 case 2:
08307563 9916 gen_aa32_ld32u(tmp, addr, user);
5a839c0d 9917 break;
2a0308c5 9918 default:
5a839c0d 9919 tcg_temp_free_i32(tmp);
2a0308c5
PM
9920 tcg_temp_free_i32(addr);
9921 goto illegal_op;
a2fdc890
PM
9922 }
9923 if (rs == 15) {
9924 gen_bx(s, tmp);
9ee6e8bb 9925 } else {
a2fdc890 9926 store_reg(s, rs, tmp);
9ee6e8bb
PB
9927 }
9928 } else {
9929 /* Store. */
b0109805 9930 tmp = load_reg(s, rs);
9ee6e8bb 9931 switch (op) {
5a839c0d 9932 case 0:
08307563 9933 gen_aa32_st8(tmp, addr, user);
5a839c0d
PM
9934 break;
9935 case 1:
08307563 9936 gen_aa32_st16(tmp, addr, user);
5a839c0d
PM
9937 break;
9938 case 2:
08307563 9939 gen_aa32_st32(tmp, addr, user);
5a839c0d 9940 break;
2a0308c5 9941 default:
5a839c0d 9942 tcg_temp_free_i32(tmp);
2a0308c5
PM
9943 tcg_temp_free_i32(addr);
9944 goto illegal_op;
b7bcbe95 9945 }
5a839c0d 9946 tcg_temp_free_i32(tmp);
2c0262af 9947 }
9ee6e8bb 9948 if (postinc)
b0109805
PB
9949 tcg_gen_addi_i32(addr, addr, imm);
9950 if (writeback) {
9951 store_reg(s, rn, addr);
9952 } else {
7d1b0095 9953 tcg_temp_free_i32(addr);
b0109805 9954 }
9ee6e8bb
PB
9955 }
9956 break;
9957 default:
9958 goto illegal_op;
2c0262af 9959 }
9ee6e8bb
PB
9960 return 0;
9961illegal_op:
9962 return 1;
2c0262af
FB
9963}
9964
0ecb72a5 9965static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
99c475ab
FB
9966{
9967 uint32_t val, insn, op, rm, rn, rd, shift, cond;
9968 int32_t offset;
9969 int i;
39d5492a
PM
9970 TCGv_i32 tmp;
9971 TCGv_i32 tmp2;
9972 TCGv_i32 addr;
99c475ab 9973
9ee6e8bb
PB
9974 if (s->condexec_mask) {
9975 cond = s->condexec_cond;
bedd2912
JB
9976 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
9977 s->condlabel = gen_new_label();
39fb730a 9978 arm_gen_test_cc(cond ^ 1, s->condlabel);
bedd2912
JB
9979 s->condjmp = 1;
9980 }
9ee6e8bb
PB
9981 }
9982
d31dd73e 9983 insn = arm_lduw_code(env, s->pc, s->bswap_code);
99c475ab 9984 s->pc += 2;
b5ff1b31 9985
99c475ab
FB
9986 switch (insn >> 12) {
9987 case 0: case 1:
396e467c 9988
99c475ab
FB
9989 rd = insn & 7;
9990 op = (insn >> 11) & 3;
9991 if (op == 3) {
9992 /* add/subtract */
9993 rn = (insn >> 3) & 7;
396e467c 9994 tmp = load_reg(s, rn);
99c475ab
FB
9995 if (insn & (1 << 10)) {
9996 /* immediate */
7d1b0095 9997 tmp2 = tcg_temp_new_i32();
396e467c 9998 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
9999 } else {
10000 /* reg */
10001 rm = (insn >> 6) & 7;
396e467c 10002 tmp2 = load_reg(s, rm);
99c475ab 10003 }
9ee6e8bb
PB
10004 if (insn & (1 << 9)) {
10005 if (s->condexec_mask)
396e467c 10006 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10007 else
72485ec4 10008 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
10009 } else {
10010 if (s->condexec_mask)
396e467c 10011 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 10012 else
72485ec4 10013 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 10014 }
7d1b0095 10015 tcg_temp_free_i32(tmp2);
396e467c 10016 store_reg(s, rd, tmp);
99c475ab
FB
10017 } else {
10018 /* shift immediate */
10019 rm = (insn >> 3) & 7;
10020 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
10021 tmp = load_reg(s, rm);
10022 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
10023 if (!s->condexec_mask)
10024 gen_logic_CC(tmp);
10025 store_reg(s, rd, tmp);
99c475ab
FB
10026 }
10027 break;
10028 case 2: case 3:
10029 /* arithmetic large immediate */
10030 op = (insn >> 11) & 3;
10031 rd = (insn >> 8) & 0x7;
396e467c 10032 if (op == 0) { /* mov */
7d1b0095 10033 tmp = tcg_temp_new_i32();
396e467c 10034 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 10035 if (!s->condexec_mask)
396e467c
FN
10036 gen_logic_CC(tmp);
10037 store_reg(s, rd, tmp);
10038 } else {
10039 tmp = load_reg(s, rd);
7d1b0095 10040 tmp2 = tcg_temp_new_i32();
396e467c
FN
10041 tcg_gen_movi_i32(tmp2, insn & 0xff);
10042 switch (op) {
10043 case 1: /* cmp */
72485ec4 10044 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
10045 tcg_temp_free_i32(tmp);
10046 tcg_temp_free_i32(tmp2);
396e467c
FN
10047 break;
10048 case 2: /* add */
10049 if (s->condexec_mask)
10050 tcg_gen_add_i32(tmp, tmp, tmp2);
10051 else
72485ec4 10052 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 10053 tcg_temp_free_i32(tmp2);
396e467c
FN
10054 store_reg(s, rd, tmp);
10055 break;
10056 case 3: /* sub */
10057 if (s->condexec_mask)
10058 tcg_gen_sub_i32(tmp, tmp, tmp2);
10059 else
72485ec4 10060 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 10061 tcg_temp_free_i32(tmp2);
396e467c
FN
10062 store_reg(s, rd, tmp);
10063 break;
10064 }
99c475ab 10065 }
99c475ab
FB
10066 break;
10067 case 4:
10068 if (insn & (1 << 11)) {
10069 rd = (insn >> 8) & 7;
5899f386
FB
10070 /* load pc-relative. Bit 1 of PC is ignored. */
10071 val = s->pc + 2 + ((insn & 0xff) * 4);
10072 val &= ~(uint32_t)2;
7d1b0095 10073 addr = tcg_temp_new_i32();
b0109805 10074 tcg_gen_movi_i32(addr, val);
c40c8556 10075 tmp = tcg_temp_new_i32();
08307563 10076 gen_aa32_ld32u(tmp, addr, IS_USER(s));
7d1b0095 10077 tcg_temp_free_i32(addr);
b0109805 10078 store_reg(s, rd, tmp);
99c475ab
FB
10079 break;
10080 }
10081 if (insn & (1 << 10)) {
10082 /* data processing extended or blx */
10083 rd = (insn & 7) | ((insn >> 4) & 8);
10084 rm = (insn >> 3) & 0xf;
10085 op = (insn >> 8) & 3;
10086 switch (op) {
10087 case 0: /* add */
396e467c
FN
10088 tmp = load_reg(s, rd);
10089 tmp2 = load_reg(s, rm);
10090 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10091 tcg_temp_free_i32(tmp2);
396e467c 10092 store_reg(s, rd, tmp);
99c475ab
FB
10093 break;
10094 case 1: /* cmp */
396e467c
FN
10095 tmp = load_reg(s, rd);
10096 tmp2 = load_reg(s, rm);
72485ec4 10097 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
10098 tcg_temp_free_i32(tmp2);
10099 tcg_temp_free_i32(tmp);
99c475ab
FB
10100 break;
10101 case 2: /* mov/cpy */
396e467c
FN
10102 tmp = load_reg(s, rm);
10103 store_reg(s, rd, tmp);
99c475ab
FB
10104 break;
10105 case 3:/* branch [and link] exchange thumb register */
b0109805 10106 tmp = load_reg(s, rm);
99c475ab 10107 if (insn & (1 << 7)) {
be5e7a76 10108 ARCH(5);
99c475ab 10109 val = (uint32_t)s->pc | 1;
7d1b0095 10110 tmp2 = tcg_temp_new_i32();
b0109805
PB
10111 tcg_gen_movi_i32(tmp2, val);
10112 store_reg(s, 14, tmp2);
99c475ab 10113 }
be5e7a76 10114 /* already thumb, no need to check */
d9ba4830 10115 gen_bx(s, tmp);
99c475ab
FB
10116 break;
10117 }
10118 break;
10119 }
10120
10121 /* data processing register */
10122 rd = insn & 7;
10123 rm = (insn >> 3) & 7;
10124 op = (insn >> 6) & 0xf;
10125 if (op == 2 || op == 3 || op == 4 || op == 7) {
10126 /* the shift/rotate ops want the operands backwards */
10127 val = rm;
10128 rm = rd;
10129 rd = val;
10130 val = 1;
10131 } else {
10132 val = 0;
10133 }
10134
396e467c 10135 if (op == 9) { /* neg */
7d1b0095 10136 tmp = tcg_temp_new_i32();
396e467c
FN
10137 tcg_gen_movi_i32(tmp, 0);
10138 } else if (op != 0xf) { /* mvn doesn't read its first operand */
10139 tmp = load_reg(s, rd);
10140 } else {
39d5492a 10141 TCGV_UNUSED_I32(tmp);
396e467c 10142 }
99c475ab 10143
396e467c 10144 tmp2 = load_reg(s, rm);
5899f386 10145 switch (op) {
99c475ab 10146 case 0x0: /* and */
396e467c 10147 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 10148 if (!s->condexec_mask)
396e467c 10149 gen_logic_CC(tmp);
99c475ab
FB
10150 break;
10151 case 0x1: /* eor */
396e467c 10152 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 10153 if (!s->condexec_mask)
396e467c 10154 gen_logic_CC(tmp);
99c475ab
FB
10155 break;
10156 case 0x2: /* lsl */
9ee6e8bb 10157 if (s->condexec_mask) {
365af80e 10158 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 10159 } else {
9ef39277 10160 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10161 gen_logic_CC(tmp2);
9ee6e8bb 10162 }
99c475ab
FB
10163 break;
10164 case 0x3: /* lsr */
9ee6e8bb 10165 if (s->condexec_mask) {
365af80e 10166 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 10167 } else {
9ef39277 10168 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10169 gen_logic_CC(tmp2);
9ee6e8bb 10170 }
99c475ab
FB
10171 break;
10172 case 0x4: /* asr */
9ee6e8bb 10173 if (s->condexec_mask) {
365af80e 10174 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 10175 } else {
9ef39277 10176 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10177 gen_logic_CC(tmp2);
9ee6e8bb 10178 }
99c475ab
FB
10179 break;
10180 case 0x5: /* adc */
49b4c31e 10181 if (s->condexec_mask) {
396e467c 10182 gen_adc(tmp, tmp2);
49b4c31e
RH
10183 } else {
10184 gen_adc_CC(tmp, tmp, tmp2);
10185 }
99c475ab
FB
10186 break;
10187 case 0x6: /* sbc */
2de68a49 10188 if (s->condexec_mask) {
396e467c 10189 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
10190 } else {
10191 gen_sbc_CC(tmp, tmp, tmp2);
10192 }
99c475ab
FB
10193 break;
10194 case 0x7: /* ror */
9ee6e8bb 10195 if (s->condexec_mask) {
f669df27
AJ
10196 tcg_gen_andi_i32(tmp, tmp, 0x1f);
10197 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 10198 } else {
9ef39277 10199 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10200 gen_logic_CC(tmp2);
9ee6e8bb 10201 }
99c475ab
FB
10202 break;
10203 case 0x8: /* tst */
396e467c
FN
10204 tcg_gen_and_i32(tmp, tmp, tmp2);
10205 gen_logic_CC(tmp);
99c475ab 10206 rd = 16;
5899f386 10207 break;
99c475ab 10208 case 0x9: /* neg */
9ee6e8bb 10209 if (s->condexec_mask)
396e467c 10210 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 10211 else
72485ec4 10212 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
10213 break;
10214 case 0xa: /* cmp */
72485ec4 10215 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
10216 rd = 16;
10217 break;
10218 case 0xb: /* cmn */
72485ec4 10219 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
10220 rd = 16;
10221 break;
10222 case 0xc: /* orr */
396e467c 10223 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 10224 if (!s->condexec_mask)
396e467c 10225 gen_logic_CC(tmp);
99c475ab
FB
10226 break;
10227 case 0xd: /* mul */
7b2919a0 10228 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 10229 if (!s->condexec_mask)
396e467c 10230 gen_logic_CC(tmp);
99c475ab
FB
10231 break;
10232 case 0xe: /* bic */
f669df27 10233 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 10234 if (!s->condexec_mask)
396e467c 10235 gen_logic_CC(tmp);
99c475ab
FB
10236 break;
10237 case 0xf: /* mvn */
396e467c 10238 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 10239 if (!s->condexec_mask)
396e467c 10240 gen_logic_CC(tmp2);
99c475ab 10241 val = 1;
5899f386 10242 rm = rd;
99c475ab
FB
10243 break;
10244 }
10245 if (rd != 16) {
396e467c
FN
10246 if (val) {
10247 store_reg(s, rm, tmp2);
10248 if (op != 0xf)
7d1b0095 10249 tcg_temp_free_i32(tmp);
396e467c
FN
10250 } else {
10251 store_reg(s, rd, tmp);
7d1b0095 10252 tcg_temp_free_i32(tmp2);
396e467c
FN
10253 }
10254 } else {
7d1b0095
PM
10255 tcg_temp_free_i32(tmp);
10256 tcg_temp_free_i32(tmp2);
99c475ab
FB
10257 }
10258 break;
10259
10260 case 5:
10261 /* load/store register offset. */
10262 rd = insn & 7;
10263 rn = (insn >> 3) & 7;
10264 rm = (insn >> 6) & 7;
10265 op = (insn >> 9) & 7;
b0109805 10266 addr = load_reg(s, rn);
b26eefb6 10267 tmp = load_reg(s, rm);
b0109805 10268 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 10269 tcg_temp_free_i32(tmp);
99c475ab 10270
c40c8556 10271 if (op < 3) { /* store */
b0109805 10272 tmp = load_reg(s, rd);
c40c8556
PM
10273 } else {
10274 tmp = tcg_temp_new_i32();
10275 }
99c475ab
FB
10276
10277 switch (op) {
10278 case 0: /* str */
08307563 10279 gen_aa32_st32(tmp, addr, IS_USER(s));
99c475ab
FB
10280 break;
10281 case 1: /* strh */
08307563 10282 gen_aa32_st16(tmp, addr, IS_USER(s));
99c475ab
FB
10283 break;
10284 case 2: /* strb */
08307563 10285 gen_aa32_st8(tmp, addr, IS_USER(s));
99c475ab
FB
10286 break;
10287 case 3: /* ldrsb */
08307563 10288 gen_aa32_ld8s(tmp, addr, IS_USER(s));
99c475ab
FB
10289 break;
10290 case 4: /* ldr */
08307563 10291 gen_aa32_ld32u(tmp, addr, IS_USER(s));
99c475ab
FB
10292 break;
10293 case 5: /* ldrh */
08307563 10294 gen_aa32_ld16u(tmp, addr, IS_USER(s));
99c475ab
FB
10295 break;
10296 case 6: /* ldrb */
08307563 10297 gen_aa32_ld8u(tmp, addr, IS_USER(s));
99c475ab
FB
10298 break;
10299 case 7: /* ldrsh */
08307563 10300 gen_aa32_ld16s(tmp, addr, IS_USER(s));
99c475ab
FB
10301 break;
10302 }
c40c8556 10303 if (op >= 3) { /* load */
b0109805 10304 store_reg(s, rd, tmp);
c40c8556
PM
10305 } else {
10306 tcg_temp_free_i32(tmp);
10307 }
7d1b0095 10308 tcg_temp_free_i32(addr);
99c475ab
FB
10309 break;
10310
10311 case 6:
10312 /* load/store word immediate offset */
10313 rd = insn & 7;
10314 rn = (insn >> 3) & 7;
b0109805 10315 addr = load_reg(s, rn);
99c475ab 10316 val = (insn >> 4) & 0x7c;
b0109805 10317 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10318
10319 if (insn & (1 << 11)) {
10320 /* load */
c40c8556 10321 tmp = tcg_temp_new_i32();
08307563 10322 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805 10323 store_reg(s, rd, tmp);
99c475ab
FB
10324 } else {
10325 /* store */
b0109805 10326 tmp = load_reg(s, rd);
08307563 10327 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 10328 tcg_temp_free_i32(tmp);
99c475ab 10329 }
7d1b0095 10330 tcg_temp_free_i32(addr);
99c475ab
FB
10331 break;
10332
10333 case 7:
10334 /* load/store byte immediate offset */
10335 rd = insn & 7;
10336 rn = (insn >> 3) & 7;
b0109805 10337 addr = load_reg(s, rn);
99c475ab 10338 val = (insn >> 6) & 0x1f;
b0109805 10339 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10340
10341 if (insn & (1 << 11)) {
10342 /* load */
c40c8556 10343 tmp = tcg_temp_new_i32();
08307563 10344 gen_aa32_ld8u(tmp, addr, IS_USER(s));
b0109805 10345 store_reg(s, rd, tmp);
99c475ab
FB
10346 } else {
10347 /* store */
b0109805 10348 tmp = load_reg(s, rd);
08307563 10349 gen_aa32_st8(tmp, addr, IS_USER(s));
c40c8556 10350 tcg_temp_free_i32(tmp);
99c475ab 10351 }
7d1b0095 10352 tcg_temp_free_i32(addr);
99c475ab
FB
10353 break;
10354
10355 case 8:
10356 /* load/store halfword immediate offset */
10357 rd = insn & 7;
10358 rn = (insn >> 3) & 7;
b0109805 10359 addr = load_reg(s, rn);
99c475ab 10360 val = (insn >> 5) & 0x3e;
b0109805 10361 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10362
10363 if (insn & (1 << 11)) {
10364 /* load */
c40c8556 10365 tmp = tcg_temp_new_i32();
08307563 10366 gen_aa32_ld16u(tmp, addr, IS_USER(s));
b0109805 10367 store_reg(s, rd, tmp);
99c475ab
FB
10368 } else {
10369 /* store */
b0109805 10370 tmp = load_reg(s, rd);
08307563 10371 gen_aa32_st16(tmp, addr, IS_USER(s));
c40c8556 10372 tcg_temp_free_i32(tmp);
99c475ab 10373 }
7d1b0095 10374 tcg_temp_free_i32(addr);
99c475ab
FB
10375 break;
10376
10377 case 9:
10378 /* load/store from stack */
10379 rd = (insn >> 8) & 7;
b0109805 10380 addr = load_reg(s, 13);
99c475ab 10381 val = (insn & 0xff) * 4;
b0109805 10382 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10383
10384 if (insn & (1 << 11)) {
10385 /* load */
c40c8556 10386 tmp = tcg_temp_new_i32();
08307563 10387 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805 10388 store_reg(s, rd, tmp);
99c475ab
FB
10389 } else {
10390 /* store */
b0109805 10391 tmp = load_reg(s, rd);
08307563 10392 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 10393 tcg_temp_free_i32(tmp);
99c475ab 10394 }
7d1b0095 10395 tcg_temp_free_i32(addr);
99c475ab
FB
10396 break;
10397
10398 case 10:
10399 /* add to high reg */
10400 rd = (insn >> 8) & 7;
5899f386
FB
10401 if (insn & (1 << 11)) {
10402 /* SP */
5e3f878a 10403 tmp = load_reg(s, 13);
5899f386
FB
10404 } else {
10405 /* PC. bit 1 is ignored. */
7d1b0095 10406 tmp = tcg_temp_new_i32();
5e3f878a 10407 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 10408 }
99c475ab 10409 val = (insn & 0xff) * 4;
5e3f878a
PB
10410 tcg_gen_addi_i32(tmp, tmp, val);
10411 store_reg(s, rd, tmp);
99c475ab
FB
10412 break;
10413
10414 case 11:
10415 /* misc */
10416 op = (insn >> 8) & 0xf;
10417 switch (op) {
10418 case 0:
10419 /* adjust stack pointer */
b26eefb6 10420 tmp = load_reg(s, 13);
99c475ab
FB
10421 val = (insn & 0x7f) * 4;
10422 if (insn & (1 << 7))
6a0d8a1d 10423 val = -(int32_t)val;
b26eefb6
PB
10424 tcg_gen_addi_i32(tmp, tmp, val);
10425 store_reg(s, 13, tmp);
99c475ab
FB
10426 break;
10427
9ee6e8bb
PB
10428 case 2: /* sign/zero extend. */
10429 ARCH(6);
10430 rd = insn & 7;
10431 rm = (insn >> 3) & 7;
b0109805 10432 tmp = load_reg(s, rm);
9ee6e8bb 10433 switch ((insn >> 6) & 3) {
b0109805
PB
10434 case 0: gen_sxth(tmp); break;
10435 case 1: gen_sxtb(tmp); break;
10436 case 2: gen_uxth(tmp); break;
10437 case 3: gen_uxtb(tmp); break;
9ee6e8bb 10438 }
b0109805 10439 store_reg(s, rd, tmp);
9ee6e8bb 10440 break;
99c475ab
FB
10441 case 4: case 5: case 0xc: case 0xd:
10442 /* push/pop */
b0109805 10443 addr = load_reg(s, 13);
5899f386
FB
10444 if (insn & (1 << 8))
10445 offset = 4;
99c475ab 10446 else
5899f386
FB
10447 offset = 0;
10448 for (i = 0; i < 8; i++) {
10449 if (insn & (1 << i))
10450 offset += 4;
10451 }
10452 if ((insn & (1 << 11)) == 0) {
b0109805 10453 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 10454 }
99c475ab
FB
10455 for (i = 0; i < 8; i++) {
10456 if (insn & (1 << i)) {
10457 if (insn & (1 << 11)) {
10458 /* pop */
c40c8556 10459 tmp = tcg_temp_new_i32();
08307563 10460 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805 10461 store_reg(s, i, tmp);
99c475ab
FB
10462 } else {
10463 /* push */
b0109805 10464 tmp = load_reg(s, i);
08307563 10465 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 10466 tcg_temp_free_i32(tmp);
99c475ab 10467 }
5899f386 10468 /* advance to the next address. */
b0109805 10469 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
10470 }
10471 }
39d5492a 10472 TCGV_UNUSED_I32(tmp);
99c475ab
FB
10473 if (insn & (1 << 8)) {
10474 if (insn & (1 << 11)) {
10475 /* pop pc */
c40c8556 10476 tmp = tcg_temp_new_i32();
08307563 10477 gen_aa32_ld32u(tmp, addr, IS_USER(s));
99c475ab
FB
10478 /* don't set the pc until the rest of the instruction
10479 has completed */
10480 } else {
10481 /* push lr */
b0109805 10482 tmp = load_reg(s, 14);
08307563 10483 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 10484 tcg_temp_free_i32(tmp);
99c475ab 10485 }
b0109805 10486 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 10487 }
5899f386 10488 if ((insn & (1 << 11)) == 0) {
b0109805 10489 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 10490 }
99c475ab 10491 /* write back the new stack pointer */
b0109805 10492 store_reg(s, 13, addr);
99c475ab 10493 /* set the new PC value */
be5e7a76
DES
10494 if ((insn & 0x0900) == 0x0900) {
10495 store_reg_from_load(env, s, 15, tmp);
10496 }
99c475ab
FB
10497 break;
10498
9ee6e8bb
PB
10499 case 1: case 3: case 9: case 11: /* czb */
10500 rm = insn & 7;
d9ba4830 10501 tmp = load_reg(s, rm);
9ee6e8bb
PB
10502 s->condlabel = gen_new_label();
10503 s->condjmp = 1;
10504 if (insn & (1 << 11))
cb63669a 10505 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 10506 else
cb63669a 10507 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 10508 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10509 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
10510 val = (uint32_t)s->pc + 2;
10511 val += offset;
10512 gen_jmp(s, val);
10513 break;
10514
10515 case 15: /* IT, nop-hint. */
10516 if ((insn & 0xf) == 0) {
10517 gen_nop_hint(s, (insn >> 4) & 0xf);
10518 break;
10519 }
10520 /* If Then. */
10521 s->condexec_cond = (insn >> 4) & 0xe;
10522 s->condexec_mask = insn & 0x1f;
10523 /* No actual code generated for this insn, just setup state. */
10524 break;
10525
06c949e6 10526 case 0xe: /* bkpt */
d4a2dc67
PM
10527 {
10528 int imm8 = extract32(insn, 0, 8);
be5e7a76 10529 ARCH(5);
d4a2dc67 10530 gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true));
06c949e6 10531 break;
d4a2dc67 10532 }
06c949e6 10533
9ee6e8bb
PB
10534 case 0xa: /* rev */
10535 ARCH(6);
10536 rn = (insn >> 3) & 0x7;
10537 rd = insn & 0x7;
b0109805 10538 tmp = load_reg(s, rn);
9ee6e8bb 10539 switch ((insn >> 6) & 3) {
66896cb8 10540 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
10541 case 1: gen_rev16(tmp); break;
10542 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
10543 default: goto illegal_op;
10544 }
b0109805 10545 store_reg(s, rd, tmp);
9ee6e8bb
PB
10546 break;
10547
d9e028c1
PM
10548 case 6:
10549 switch ((insn >> 5) & 7) {
10550 case 2:
10551 /* setend */
10552 ARCH(6);
10962fd5
PM
10553 if (((insn >> 3) & 1) != s->bswap_code) {
10554 /* Dynamic endianness switching not implemented. */
e0c270d9 10555 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
d9e028c1
PM
10556 goto illegal_op;
10557 }
9ee6e8bb 10558 break;
d9e028c1
PM
10559 case 3:
10560 /* cps */
10561 ARCH(6);
10562 if (IS_USER(s)) {
10563 break;
8984bd2e 10564 }
d9e028c1
PM
10565 if (IS_M(env)) {
10566 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
10567 /* FAULTMASK */
10568 if (insn & 1) {
10569 addr = tcg_const_i32(19);
10570 gen_helper_v7m_msr(cpu_env, addr, tmp);
10571 tcg_temp_free_i32(addr);
10572 }
10573 /* PRIMASK */
10574 if (insn & 2) {
10575 addr = tcg_const_i32(16);
10576 gen_helper_v7m_msr(cpu_env, addr, tmp);
10577 tcg_temp_free_i32(addr);
10578 }
10579 tcg_temp_free_i32(tmp);
10580 gen_lookup_tb(s);
10581 } else {
10582 if (insn & (1 << 4)) {
10583 shift = CPSR_A | CPSR_I | CPSR_F;
10584 } else {
10585 shift = 0;
10586 }
10587 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 10588 }
d9e028c1
PM
10589 break;
10590 default:
10591 goto undef;
9ee6e8bb
PB
10592 }
10593 break;
10594
99c475ab
FB
10595 default:
10596 goto undef;
10597 }
10598 break;
10599
10600 case 12:
a7d3970d 10601 {
99c475ab 10602 /* load/store multiple */
39d5492a
PM
10603 TCGv_i32 loaded_var;
10604 TCGV_UNUSED_I32(loaded_var);
99c475ab 10605 rn = (insn >> 8) & 0x7;
b0109805 10606 addr = load_reg(s, rn);
99c475ab
FB
10607 for (i = 0; i < 8; i++) {
10608 if (insn & (1 << i)) {
99c475ab
FB
10609 if (insn & (1 << 11)) {
10610 /* load */
c40c8556 10611 tmp = tcg_temp_new_i32();
08307563 10612 gen_aa32_ld32u(tmp, addr, IS_USER(s));
a7d3970d
PM
10613 if (i == rn) {
10614 loaded_var = tmp;
10615 } else {
10616 store_reg(s, i, tmp);
10617 }
99c475ab
FB
10618 } else {
10619 /* store */
b0109805 10620 tmp = load_reg(s, i);
08307563 10621 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 10622 tcg_temp_free_i32(tmp);
99c475ab 10623 }
5899f386 10624 /* advance to the next address */
b0109805 10625 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
10626 }
10627 }
b0109805 10628 if ((insn & (1 << rn)) == 0) {
a7d3970d 10629 /* base reg not in list: base register writeback */
b0109805
PB
10630 store_reg(s, rn, addr);
10631 } else {
a7d3970d
PM
10632 /* base reg in list: if load, complete it now */
10633 if (insn & (1 << 11)) {
10634 store_reg(s, rn, loaded_var);
10635 }
7d1b0095 10636 tcg_temp_free_i32(addr);
b0109805 10637 }
99c475ab 10638 break;
a7d3970d 10639 }
99c475ab
FB
10640 case 13:
10641 /* conditional branch or swi */
10642 cond = (insn >> 8) & 0xf;
10643 if (cond == 0xe)
10644 goto undef;
10645
10646 if (cond == 0xf) {
10647 /* swi */
eaed129d 10648 gen_set_pc_im(s, s->pc);
d4a2dc67 10649 s->svc_imm = extract32(insn, 0, 8);
9ee6e8bb 10650 s->is_jmp = DISAS_SWI;
99c475ab
FB
10651 break;
10652 }
10653 /* generate a conditional jump to next instruction */
e50e6a20 10654 s->condlabel = gen_new_label();
39fb730a 10655 arm_gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 10656 s->condjmp = 1;
99c475ab
FB
10657
10658 /* jump to the offset */
5899f386 10659 val = (uint32_t)s->pc + 2;
99c475ab 10660 offset = ((int32_t)insn << 24) >> 24;
5899f386 10661 val += offset << 1;
8aaca4c0 10662 gen_jmp(s, val);
99c475ab
FB
10663 break;
10664
10665 case 14:
358bf29e 10666 if (insn & (1 << 11)) {
9ee6e8bb
PB
10667 if (disas_thumb2_insn(env, s, insn))
10668 goto undef32;
358bf29e
PB
10669 break;
10670 }
9ee6e8bb 10671 /* unconditional branch */
99c475ab
FB
10672 val = (uint32_t)s->pc;
10673 offset = ((int32_t)insn << 21) >> 21;
10674 val += (offset << 1) + 2;
8aaca4c0 10675 gen_jmp(s, val);
99c475ab
FB
10676 break;
10677
10678 case 15:
9ee6e8bb 10679 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 10680 goto undef32;
9ee6e8bb 10681 break;
99c475ab
FB
10682 }
10683 return;
9ee6e8bb 10684undef32:
d4a2dc67 10685 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized());
9ee6e8bb
PB
10686 return;
10687illegal_op:
99c475ab 10688undef:
d4a2dc67 10689 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized());
99c475ab
FB
10690}
10691
2c0262af
FB
10692/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
10693 basic block 'tb'. If search_pc is TRUE, also generate PC
10694 information for each intermediate instruction. */
5639c3f2 10695static inline void gen_intermediate_code_internal(ARMCPU *cpu,
2cfc5f17 10696 TranslationBlock *tb,
5639c3f2 10697 bool search_pc)
2c0262af 10698{
ed2803da 10699 CPUState *cs = CPU(cpu);
5639c3f2 10700 CPUARMState *env = &cpu->env;
2c0262af 10701 DisasContext dc1, *dc = &dc1;
a1d1bb31 10702 CPUBreakpoint *bp;
2c0262af
FB
10703 uint16_t *gen_opc_end;
10704 int j, lj;
0fa85d43 10705 target_ulong pc_start;
0a2461fa 10706 target_ulong next_page_start;
2e70f6ef
PB
10707 int num_insns;
10708 int max_insns;
3b46e624 10709
2c0262af 10710 /* generate intermediate code */
40f860cd
PM
10711
10712 /* The A64 decoder has its own top level loop, because it doesn't need
10713 * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
10714 */
10715 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
10716 gen_intermediate_code_internal_a64(cpu, tb, search_pc);
10717 return;
10718 }
10719
0fa85d43 10720 pc_start = tb->pc;
3b46e624 10721
2c0262af
FB
10722 dc->tb = tb;
10723
92414b31 10724 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
10725
10726 dc->is_jmp = DISAS_NEXT;
10727 dc->pc = pc_start;
ed2803da 10728 dc->singlestep_enabled = cs->singlestep_enabled;
e50e6a20 10729 dc->condjmp = 0;
3926cc84 10730
40f860cd
PM
10731 dc->aarch64 = 0;
10732 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
10733 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
10734 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
10735 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
3926cc84 10736#if !defined(CONFIG_USER_ONLY)
40f860cd 10737 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
3926cc84 10738#endif
40f860cd
PM
10739 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
10740 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
10741 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
60322b39
PM
10742 dc->cp_regs = cpu->cp_regs;
10743 dc->current_pl = arm_current_pl(env);
a984e42c 10744 dc->features = env->features;
40f860cd 10745
a7812ae4
PB
10746 cpu_F0s = tcg_temp_new_i32();
10747 cpu_F1s = tcg_temp_new_i32();
10748 cpu_F0d = tcg_temp_new_i64();
10749 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
10750 cpu_V0 = cpu_F0d;
10751 cpu_V1 = cpu_F1d;
e677137d 10752 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 10753 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 10754 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 10755 lj = -1;
2e70f6ef
PB
10756 num_insns = 0;
10757 max_insns = tb->cflags & CF_COUNT_MASK;
10758 if (max_insns == 0)
10759 max_insns = CF_COUNT_MASK;
10760
806f352d 10761 gen_tb_start();
e12ce78d 10762
3849902c
PM
10763 tcg_clear_temp_count();
10764
e12ce78d
PM
10765 /* A note on handling of the condexec (IT) bits:
10766 *
10767 * We want to avoid the overhead of having to write the updated condexec
0ecb72a5 10768 * bits back to the CPUARMState for every instruction in an IT block. So:
e12ce78d 10769 * (1) if the condexec bits are not already zero then we write
0ecb72a5 10770 * zero back into the CPUARMState now. This avoids complications trying
e12ce78d
PM
10771 * to do it at the end of the block. (For example if we don't do this
10772 * it's hard to identify whether we can safely skip writing condexec
10773 * at the end of the TB, which we definitely want to do for the case
10774 * where a TB doesn't do anything with the IT state at all.)
10775 * (2) if we are going to leave the TB then we call gen_set_condexec()
0ecb72a5 10776 * which will write the correct value into CPUARMState if zero is wrong.
e12ce78d
PM
10777 * This is done both for leaving the TB at the end, and for leaving
10778 * it because of an exception we know will happen, which is done in
10779 * gen_exception_insn(). The latter is necessary because we need to
10780 * leave the TB with the PC/IT state just prior to execution of the
10781 * instruction which caused the exception.
10782 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
0ecb72a5 10783 * then the CPUARMState will be wrong and we need to reset it.
e12ce78d
PM
10784 * This is handled in the same way as restoration of the
10785 * PC in these situations: we will be called again with search_pc=1
10786 * and generate a mapping of the condexec bits for each PC in
e87b7cb0
SW
10787 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
10788 * this to restore the condexec bits.
e12ce78d
PM
10789 *
10790 * Note that there are no instructions which can read the condexec
10791 * bits, and none which can write non-static values to them, so
0ecb72a5 10792 * we don't need to care about whether CPUARMState is correct in the
e12ce78d
PM
10793 * middle of a TB.
10794 */
10795
9ee6e8bb
PB
10796 /* Reset the conditional execution bits immediately. This avoids
10797 complications trying to do it at the end of the block. */
98eac7ca 10798 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 10799 {
39d5492a 10800 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e 10801 tcg_gen_movi_i32(tmp, 0);
d9ba4830 10802 store_cpu_field(tmp, condexec_bits);
8f01245e 10803 }
2c0262af 10804 do {
fbb4a2e3
PB
10805#ifdef CONFIG_USER_ONLY
10806 /* Intercept jump to the magic kernel page. */
40f860cd 10807 if (dc->pc >= 0xffff0000) {
fbb4a2e3
PB
10808 /* We always get here via a jump, so know we are not in a
10809 conditional execution block. */
d4a2dc67 10810 gen_exception_internal(EXCP_KERNEL_TRAP);
fbb4a2e3
PB
10811 dc->is_jmp = DISAS_UPDATE;
10812 break;
10813 }
10814#else
9ee6e8bb
PB
10815 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
10816 /* We always get here via a jump, so know we are not in a
10817 conditional execution block. */
d4a2dc67 10818 gen_exception_internal(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
10819 dc->is_jmp = DISAS_UPDATE;
10820 break;
9ee6e8bb
PB
10821 }
10822#endif
10823
f0c3c505
AF
10824 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
10825 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
a1d1bb31 10826 if (bp->pc == dc->pc) {
d4a2dc67 10827 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
9ee6e8bb
PB
10828 /* Advance PC so that clearing the breakpoint will
10829 invalidate this TB. */
10830 dc->pc += 2;
10831 goto done_generating;
1fddef4b
FB
10832 }
10833 }
10834 }
2c0262af 10835 if (search_pc) {
92414b31 10836 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2c0262af
FB
10837 if (lj < j) {
10838 lj++;
10839 while (lj < j)
ab1103de 10840 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2c0262af 10841 }
25983cad 10842 tcg_ctx.gen_opc_pc[lj] = dc->pc;
e12ce78d 10843 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
ab1103de 10844 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 10845 tcg_ctx.gen_opc_icount[lj] = num_insns;
2c0262af 10846 }
e50e6a20 10847
2e70f6ef
PB
10848 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
10849 gen_io_start();
10850
fdefe51c 10851 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
5642463a
PM
10852 tcg_gen_debug_insn_start(dc->pc);
10853 }
10854
40f860cd 10855 if (dc->thumb) {
9ee6e8bb
PB
10856 disas_thumb_insn(env, dc);
10857 if (dc->condexec_mask) {
10858 dc->condexec_cond = (dc->condexec_cond & 0xe)
10859 | ((dc->condexec_mask >> 4) & 1);
10860 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
10861 if (dc->condexec_mask == 0) {
10862 dc->condexec_cond = 0;
10863 }
10864 }
10865 } else {
10866 disas_arm_insn(env, dc);
10867 }
e50e6a20
FB
10868
10869 if (dc->condjmp && !dc->is_jmp) {
10870 gen_set_label(dc->condlabel);
10871 dc->condjmp = 0;
10872 }
3849902c
PM
10873
10874 if (tcg_check_temp_count()) {
0a2461fa
AG
10875 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
10876 dc->pc);
3849902c
PM
10877 }
10878
aaf2d97d 10879 /* Translation stops when a conditional branch is encountered.
e50e6a20 10880 * Otherwise the subsequent code could get translated several times.
b5ff1b31 10881 * Also stop translation when a page boundary is reached. This
bf20dc07 10882 * ensures prefetch aborts occur at the right place. */
2e70f6ef 10883 num_insns ++;
efd7f486 10884 } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
ed2803da 10885 !cs->singlestep_enabled &&
1b530a6d 10886 !singlestep &&
2e70f6ef
PB
10887 dc->pc < next_page_start &&
10888 num_insns < max_insns);
10889
10890 if (tb->cflags & CF_LAST_IO) {
10891 if (dc->condjmp) {
10892 /* FIXME: This can theoretically happen with self-modifying
10893 code. */
a47dddd7 10894 cpu_abort(cs, "IO on conditional branch instruction");
2e70f6ef
PB
10895 }
10896 gen_io_end();
10897 }
9ee6e8bb 10898
b5ff1b31 10899 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
10900 instruction was a conditional branch or trap, and the PC has
10901 already been written. */
ed2803da 10902 if (unlikely(cs->singlestep_enabled)) {
8aaca4c0 10903 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 10904 if (dc->condjmp) {
9ee6e8bb
PB
10905 gen_set_condexec(dc);
10906 if (dc->is_jmp == DISAS_SWI) {
d4a2dc67 10907 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
9ee6e8bb 10908 } else {
d4a2dc67 10909 gen_exception_internal(EXCP_DEBUG);
9ee6e8bb 10910 }
e50e6a20
FB
10911 gen_set_label(dc->condlabel);
10912 }
10913 if (dc->condjmp || !dc->is_jmp) {
eaed129d 10914 gen_set_pc_im(dc, dc->pc);
e50e6a20 10915 dc->condjmp = 0;
8aaca4c0 10916 }
9ee6e8bb
PB
10917 gen_set_condexec(dc);
10918 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d4a2dc67 10919 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
9ee6e8bb
PB
10920 } else {
10921 /* FIXME: Single stepping a WFI insn will not halt
10922 the CPU. */
d4a2dc67 10923 gen_exception_internal(EXCP_DEBUG);
9ee6e8bb 10924 }
8aaca4c0 10925 } else {
9ee6e8bb
PB
10926 /* While branches must always occur at the end of an IT block,
10927 there are a few other things that can cause us to terminate
65626741 10928 the TB in the middle of an IT block:
9ee6e8bb
PB
10929 - Exception generating instructions (bkpt, swi, undefined).
10930 - Page boundaries.
10931 - Hardware watchpoints.
10932 Hardware breakpoints have already been handled and skip this code.
10933 */
10934 gen_set_condexec(dc);
8aaca4c0 10935 switch(dc->is_jmp) {
8aaca4c0 10936 case DISAS_NEXT:
6e256c93 10937 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
10938 break;
10939 default:
10940 case DISAS_JUMP:
10941 case DISAS_UPDATE:
10942 /* indicate that the hash table must be used to find the next TB */
57fec1fe 10943 tcg_gen_exit_tb(0);
8aaca4c0
FB
10944 break;
10945 case DISAS_TB_JUMP:
10946 /* nothing more to generate */
10947 break;
9ee6e8bb 10948 case DISAS_WFI:
1ce94f81 10949 gen_helper_wfi(cpu_env);
9ee6e8bb 10950 break;
72c1d3af
PM
10951 case DISAS_WFE:
10952 gen_helper_wfe(cpu_env);
10953 break;
9ee6e8bb 10954 case DISAS_SWI:
d4a2dc67 10955 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
9ee6e8bb 10956 break;
8aaca4c0 10957 }
e50e6a20
FB
10958 if (dc->condjmp) {
10959 gen_set_label(dc->condlabel);
9ee6e8bb 10960 gen_set_condexec(dc);
6e256c93 10961 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
10962 dc->condjmp = 0;
10963 }
2c0262af 10964 }
2e70f6ef 10965
9ee6e8bb 10966done_generating:
806f352d 10967 gen_tb_end(tb, num_insns);
efd7f486 10968 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
2c0262af
FB
10969
10970#ifdef DEBUG_DISAS
8fec2b8c 10971 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
10972 qemu_log("----------------\n");
10973 qemu_log("IN: %s\n", lookup_symbol(pc_start));
f4359b9f 10974 log_target_disas(env, pc_start, dc->pc - pc_start,
d8fd2954 10975 dc->thumb | (dc->bswap_code << 1));
93fcfe39 10976 qemu_log("\n");
2c0262af
FB
10977 }
10978#endif
b5ff1b31 10979 if (search_pc) {
92414b31 10980 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
b5ff1b31
FB
10981 lj++;
10982 while (lj <= j)
ab1103de 10983 tcg_ctx.gen_opc_instr_start[lj++] = 0;
b5ff1b31 10984 } else {
2c0262af 10985 tb->size = dc->pc - pc_start;
2e70f6ef 10986 tb->icount = num_insns;
b5ff1b31 10987 }
2c0262af
FB
10988}
10989
0ecb72a5 10990void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
2c0262af 10991{
5639c3f2 10992 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, false);
2c0262af
FB
10993}
10994
0ecb72a5 10995void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
2c0262af 10996{
5639c3f2 10997 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, true);
2c0262af
FB
10998}
10999
b5ff1b31
FB
11000static const char *cpu_mode_names[16] = {
11001 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
11002 "???", "???", "???", "und", "???", "???", "???", "sys"
11003};
9ee6e8bb 11004
878096ee
AF
11005void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
11006 int flags)
2c0262af 11007{
878096ee
AF
11008 ARMCPU *cpu = ARM_CPU(cs);
11009 CPUARMState *env = &cpu->env;
2c0262af 11010 int i;
b5ff1b31 11011 uint32_t psr;
2c0262af
FB
11012
11013 for(i=0;i<16;i++) {
7fe48483 11014 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 11015 if ((i % 4) == 3)
7fe48483 11016 cpu_fprintf(f, "\n");
2c0262af 11017 else
7fe48483 11018 cpu_fprintf(f, " ");
2c0262af 11019 }
b5ff1b31 11020 psr = cpsr_read(env);
687fa640
TS
11021 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
11022 psr,
b5ff1b31
FB
11023 psr & (1 << 31) ? 'N' : '-',
11024 psr & (1 << 30) ? 'Z' : '-',
11025 psr & (1 << 29) ? 'C' : '-',
11026 psr & (1 << 28) ? 'V' : '-',
5fafdf24 11027 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 11028 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 11029
f2617cfc
PM
11030 if (flags & CPU_DUMP_FPU) {
11031 int numvfpregs = 0;
11032 if (arm_feature(env, ARM_FEATURE_VFP)) {
11033 numvfpregs += 16;
11034 }
11035 if (arm_feature(env, ARM_FEATURE_VFP3)) {
11036 numvfpregs += 16;
11037 }
11038 for (i = 0; i < numvfpregs; i++) {
11039 uint64_t v = float64_val(env->vfp.regs[i]);
11040 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
11041 i * 2, (uint32_t)v,
11042 i * 2 + 1, (uint32_t)(v >> 32),
11043 i, v);
11044 }
11045 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 11046 }
2c0262af 11047}
a6b025d3 11048
0ecb72a5 11049void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
d2856f1a 11050{
3926cc84
AG
11051 if (is_a64(env)) {
11052 env->pc = tcg_ctx.gen_opc_pc[pc_pos];
40f860cd 11053 env->condexec_bits = 0;
3926cc84
AG
11054 } else {
11055 env->regs[15] = tcg_ctx.gen_opc_pc[pc_pos];
40f860cd 11056 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
3926cc84 11057 }
d2856f1a 11058}