]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/translate.c
target-arm: mark a few integer helpers const and pure
[mirror_qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
2c0262af 28#include "disas.h"
57fec1fe 29#include "tcg-op.h"
79383c9c 30#include "qemu-log.h"
1497c961 31
7b59220e 32#include "helper.h"
1497c961 33#define GEN_HELPER 1
7b59220e 34#include "helper.h"
2c0262af 35
be5e7a76
DES
36#define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
37#define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
38/* currently all emulated v5 cores are also v5TE, so don't bother */
39#define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
9ee6e8bb
PB
40#define ENABLE_ARCH_5J 0
41#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
42#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
43#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
44#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 45
86753403 46#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 47
2c0262af
FB
48/* internal defines */
49typedef struct DisasContext {
0fa85d43 50 target_ulong pc;
2c0262af 51 int is_jmp;
e50e6a20
FB
52 /* Nonzero if this instruction has been conditionally skipped. */
53 int condjmp;
54 /* The label that will be jumped to when the instruction is skipped. */
55 int condlabel;
b90372ad 56 /* Thumb-2 conditional execution bits. */
9ee6e8bb
PB
57 int condexec_mask;
58 int condexec_cond;
2c0262af 59 struct TranslationBlock *tb;
8aaca4c0 60 int singlestep_enabled;
5899f386 61 int thumb;
d8fd2954 62 int bswap_code;
b5ff1b31
FB
63#if !defined(CONFIG_USER_ONLY)
64 int user;
65#endif
5df8bac1 66 int vfp_enabled;
69d1fc22
PM
67 int vec_len;
68 int vec_stride;
2c0262af
FB
69} DisasContext;
70
e12ce78d
PM
71static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
72
b5ff1b31
FB
73#if defined(CONFIG_USER_ONLY)
74#define IS_USER(s) 1
75#else
76#define IS_USER(s) (s->user)
77#endif
78
9ee6e8bb 79/* These instructions trap after executing, so defer them until after the
b90372ad 80 conditional execution state has been updated. */
9ee6e8bb
PB
81#define DISAS_WFI 4
82#define DISAS_SWI 5
2c0262af 83
a7812ae4 84static TCGv_ptr cpu_env;
ad69471c 85/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 86static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 87static TCGv_i32 cpu_R[16];
66c374de 88static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
426f5abc
PB
89static TCGv_i32 cpu_exclusive_addr;
90static TCGv_i32 cpu_exclusive_val;
91static TCGv_i32 cpu_exclusive_high;
92#ifdef CONFIG_USER_ONLY
93static TCGv_i32 cpu_exclusive_test;
94static TCGv_i32 cpu_exclusive_info;
95#endif
ad69471c 96
b26eefb6 97/* FIXME: These should be removed. */
a7812ae4
PB
98static TCGv cpu_F0s, cpu_F1s;
99static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 100
2e70f6ef
PB
101#include "gen-icount.h"
102
155c3eac
FN
103static const char *regnames[] =
104 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
105 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
106
b26eefb6
PB
107/* initialize TCG globals. */
108void arm_translate_init(void)
109{
155c3eac
FN
110 int i;
111
a7812ae4
PB
112 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
113
155c3eac
FN
114 for (i = 0; i < 16; i++) {
115 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 116 offsetof(CPUARMState, regs[i]),
155c3eac
FN
117 regnames[i]);
118 }
66c374de
AJ
119 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
120 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
121 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
122 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
123
426f5abc 124 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 125 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
426f5abc 126 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 127 offsetof(CPUARMState, exclusive_val), "exclusive_val");
426f5abc 128 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 129 offsetof(CPUARMState, exclusive_high), "exclusive_high");
426f5abc
PB
130#ifdef CONFIG_USER_ONLY
131 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 132 offsetof(CPUARMState, exclusive_test), "exclusive_test");
426f5abc 133 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 134 offsetof(CPUARMState, exclusive_info), "exclusive_info");
426f5abc 135#endif
155c3eac 136
a7812ae4 137#define GEN_HELPER 2
7b59220e 138#include "helper.h"
b26eefb6
PB
139}
140
d9ba4830
PB
141static inline TCGv load_cpu_offset(int offset)
142{
7d1b0095 143 TCGv tmp = tcg_temp_new_i32();
d9ba4830
PB
144 tcg_gen_ld_i32(tmp, cpu_env, offset);
145 return tmp;
146}
147
0ecb72a5 148#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830
PB
149
150static inline void store_cpu_offset(TCGv var, int offset)
151{
152 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 153 tcg_temp_free_i32(var);
d9ba4830
PB
154}
155
156#define store_cpu_field(var, name) \
0ecb72a5 157 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 158
b26eefb6
PB
159/* Set a variable to the value of a CPU register. */
160static void load_reg_var(DisasContext *s, TCGv var, int reg)
161{
162 if (reg == 15) {
163 uint32_t addr;
b90372ad 164 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
165 if (s->thumb)
166 addr = (long)s->pc + 2;
167 else
168 addr = (long)s->pc + 4;
169 tcg_gen_movi_i32(var, addr);
170 } else {
155c3eac 171 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
172 }
173}
174
175/* Create a new temporary and set it to the value of a CPU register. */
176static inline TCGv load_reg(DisasContext *s, int reg)
177{
7d1b0095 178 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
179 load_reg_var(s, tmp, reg);
180 return tmp;
181}
182
183/* Set a CPU register. The source must be a temporary and will be
184 marked as dead. */
185static void store_reg(DisasContext *s, int reg, TCGv var)
186{
187 if (reg == 15) {
188 tcg_gen_andi_i32(var, var, ~1);
189 s->is_jmp = DISAS_JUMP;
190 }
155c3eac 191 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 192 tcg_temp_free_i32(var);
b26eefb6
PB
193}
194
b26eefb6 195/* Value extensions. */
86831435
PB
196#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
197#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
198#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
199#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
200
1497c961
PB
201#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
202#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 203
b26eefb6 204
b75263d6
JR
205static inline void gen_set_cpsr(TCGv var, uint32_t mask)
206{
207 TCGv tmp_mask = tcg_const_i32(mask);
1ce94f81 208 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
209 tcg_temp_free_i32(tmp_mask);
210}
d9ba4830
PB
211/* Set NZCV flags from the high 4 bits of var. */
212#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
213
214static void gen_exception(int excp)
215{
7d1b0095 216 TCGv tmp = tcg_temp_new_i32();
d9ba4830 217 tcg_gen_movi_i32(tmp, excp);
1ce94f81 218 gen_helper_exception(cpu_env, tmp);
7d1b0095 219 tcg_temp_free_i32(tmp);
d9ba4830
PB
220}
221
3670669c
PB
222static void gen_smul_dual(TCGv a, TCGv b)
223{
7d1b0095
PM
224 TCGv tmp1 = tcg_temp_new_i32();
225 TCGv tmp2 = tcg_temp_new_i32();
22478e79
AZ
226 tcg_gen_ext16s_i32(tmp1, a);
227 tcg_gen_ext16s_i32(tmp2, b);
3670669c 228 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 229 tcg_temp_free_i32(tmp2);
3670669c
PB
230 tcg_gen_sari_i32(a, a, 16);
231 tcg_gen_sari_i32(b, b, 16);
232 tcg_gen_mul_i32(b, b, a);
233 tcg_gen_mov_i32(a, tmp1);
7d1b0095 234 tcg_temp_free_i32(tmp1);
3670669c
PB
235}
236
237/* Byteswap each halfword. */
238static void gen_rev16(TCGv var)
239{
7d1b0095 240 TCGv tmp = tcg_temp_new_i32();
3670669c
PB
241 tcg_gen_shri_i32(tmp, var, 8);
242 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
243 tcg_gen_shli_i32(var, var, 8);
244 tcg_gen_andi_i32(var, var, 0xff00ff00);
245 tcg_gen_or_i32(var, var, tmp);
7d1b0095 246 tcg_temp_free_i32(tmp);
3670669c
PB
247}
248
249/* Byteswap low halfword and sign extend. */
250static void gen_revsh(TCGv var)
251{
1a855029
AJ
252 tcg_gen_ext16u_i32(var, var);
253 tcg_gen_bswap16_i32(var, var);
254 tcg_gen_ext16s_i32(var, var);
3670669c
PB
255}
256
257/* Unsigned bitfield extract. */
258static void gen_ubfx(TCGv var, int shift, uint32_t mask)
259{
260 if (shift)
261 tcg_gen_shri_i32(var, var, shift);
262 tcg_gen_andi_i32(var, var, mask);
263}
264
265/* Signed bitfield extract. */
266static void gen_sbfx(TCGv var, int shift, int width)
267{
268 uint32_t signbit;
269
270 if (shift)
271 tcg_gen_sari_i32(var, var, shift);
272 if (shift + width < 32) {
273 signbit = 1u << (width - 1);
274 tcg_gen_andi_i32(var, var, (1u << width) - 1);
275 tcg_gen_xori_i32(var, var, signbit);
276 tcg_gen_subi_i32(var, var, signbit);
277 }
278}
279
280/* Bitfield insertion. Insert val into base. Clobbers base and val. */
281static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
282{
3670669c 283 tcg_gen_andi_i32(val, val, mask);
8f8e3aa4
PB
284 tcg_gen_shli_i32(val, val, shift);
285 tcg_gen_andi_i32(base, base, ~(mask << shift));
3670669c
PB
286 tcg_gen_or_i32(dest, base, val);
287}
288
838fa72d
AJ
289/* Return (b << 32) + a. Mark inputs as dead */
290static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
3670669c 291{
838fa72d
AJ
292 TCGv_i64 tmp64 = tcg_temp_new_i64();
293
294 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 295 tcg_temp_free_i32(b);
838fa72d
AJ
296 tcg_gen_shli_i64(tmp64, tmp64, 32);
297 tcg_gen_add_i64(a, tmp64, a);
298
299 tcg_temp_free_i64(tmp64);
300 return a;
301}
302
303/* Return (b << 32) - a. Mark inputs as dead. */
304static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
305{
306 TCGv_i64 tmp64 = tcg_temp_new_i64();
307
308 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 309 tcg_temp_free_i32(b);
838fa72d
AJ
310 tcg_gen_shli_i64(tmp64, tmp64, 32);
311 tcg_gen_sub_i64(a, tmp64, a);
312
313 tcg_temp_free_i64(tmp64);
314 return a;
3670669c
PB
315}
316
8f01245e
PB
317/* FIXME: Most targets have native widening multiplication.
318 It would be good to use that instead of a full wide multiply. */
5e3f878a 319/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 320static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 321{
a7812ae4
PB
322 TCGv_i64 tmp1 = tcg_temp_new_i64();
323 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
324
325 tcg_gen_extu_i32_i64(tmp1, a);
7d1b0095 326 tcg_temp_free_i32(a);
5e3f878a 327 tcg_gen_extu_i32_i64(tmp2, b);
7d1b0095 328 tcg_temp_free_i32(b);
5e3f878a 329 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 330 tcg_temp_free_i64(tmp2);
5e3f878a
PB
331 return tmp1;
332}
333
a7812ae4 334static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 335{
a7812ae4
PB
336 TCGv_i64 tmp1 = tcg_temp_new_i64();
337 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
338
339 tcg_gen_ext_i32_i64(tmp1, a);
7d1b0095 340 tcg_temp_free_i32(a);
5e3f878a 341 tcg_gen_ext_i32_i64(tmp2, b);
7d1b0095 342 tcg_temp_free_i32(b);
5e3f878a 343 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 344 tcg_temp_free_i64(tmp2);
5e3f878a
PB
345 return tmp1;
346}
347
8f01245e
PB
348/* Swap low and high halfwords. */
349static void gen_swap_half(TCGv var)
350{
7d1b0095 351 TCGv tmp = tcg_temp_new_i32();
8f01245e
PB
352 tcg_gen_shri_i32(tmp, var, 16);
353 tcg_gen_shli_i32(var, var, 16);
354 tcg_gen_or_i32(var, var, tmp);
7d1b0095 355 tcg_temp_free_i32(tmp);
8f01245e
PB
356}
357
b26eefb6
PB
358/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
359 tmp = (t0 ^ t1) & 0x8000;
360 t0 &= ~0x8000;
361 t1 &= ~0x8000;
362 t0 = (t0 + t1) ^ tmp;
363 */
364
365static void gen_add16(TCGv t0, TCGv t1)
366{
7d1b0095 367 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
368 tcg_gen_xor_i32(tmp, t0, t1);
369 tcg_gen_andi_i32(tmp, tmp, 0x8000);
370 tcg_gen_andi_i32(t0, t0, ~0x8000);
371 tcg_gen_andi_i32(t1, t1, ~0x8000);
372 tcg_gen_add_i32(t0, t0, t1);
373 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
374 tcg_temp_free_i32(tmp);
375 tcg_temp_free_i32(t1);
b26eefb6
PB
376}
377
378/* Set CF to the top bit of var. */
379static void gen_set_CF_bit31(TCGv var)
380{
66c374de 381 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
382}
383
384/* Set N and Z flags from var. */
385static inline void gen_logic_CC(TCGv var)
386{
66c374de
AJ
387 tcg_gen_mov_i32(cpu_NF, var);
388 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
389}
390
391/* T0 += T1 + CF. */
396e467c 392static void gen_adc(TCGv t0, TCGv t1)
b26eefb6 393{
396e467c 394 tcg_gen_add_i32(t0, t0, t1);
66c374de 395 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
396}
397
e9bb4aa9
JR
398/* dest = T0 + T1 + CF. */
399static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
400{
e9bb4aa9 401 tcg_gen_add_i32(dest, t0, t1);
66c374de 402 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
403}
404
3670669c
PB
405/* dest = T0 - T1 + CF - 1. */
406static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
407{
3670669c 408 tcg_gen_sub_i32(dest, t0, t1);
66c374de 409 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 410 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
411}
412
72485ec4
AJ
413/* dest = T0 + T1. Compute C, N, V and Z flags */
414static void gen_add_CC(TCGv dest, TCGv t0, TCGv t1)
415{
416 TCGv tmp;
417 tcg_gen_add_i32(cpu_NF, t0, t1);
418 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
419 tcg_gen_setcond_i32(TCG_COND_LTU, cpu_CF, cpu_NF, t0);
420 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
421 tmp = tcg_temp_new_i32();
422 tcg_gen_xor_i32(tmp, t0, t1);
423 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
424 tcg_temp_free_i32(tmp);
425 tcg_gen_mov_i32(dest, cpu_NF);
426}
427
428/* dest = T0 - T1. Compute C, N, V and Z flags */
429static void gen_sub_CC(TCGv dest, TCGv t0, TCGv t1)
430{
431 TCGv tmp;
432 tcg_gen_sub_i32(cpu_NF, t0, t1);
433 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
434 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
435 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
436 tmp = tcg_temp_new_i32();
437 tcg_gen_xor_i32(tmp, t0, t1);
438 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
439 tcg_temp_free_i32(tmp);
440 tcg_gen_mov_i32(dest, cpu_NF);
441}
442
365af80e
AJ
443#define GEN_SHIFT(name) \
444static void gen_##name(TCGv dest, TCGv t0, TCGv t1) \
445{ \
446 TCGv tmp1, tmp2, tmp3; \
447 tmp1 = tcg_temp_new_i32(); \
448 tcg_gen_andi_i32(tmp1, t1, 0xff); \
449 tmp2 = tcg_const_i32(0); \
450 tmp3 = tcg_const_i32(0x1f); \
451 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
452 tcg_temp_free_i32(tmp3); \
453 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
454 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
455 tcg_temp_free_i32(tmp2); \
456 tcg_temp_free_i32(tmp1); \
457}
458GEN_SHIFT(shl)
459GEN_SHIFT(shr)
460#undef GEN_SHIFT
461
462static void gen_sar(TCGv dest, TCGv t0, TCGv t1)
463{
464 TCGv tmp1, tmp2;
465 tmp1 = tcg_temp_new_i32();
466 tcg_gen_andi_i32(tmp1, t1, 0xff);
467 tmp2 = tcg_const_i32(0x1f);
468 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
469 tcg_temp_free_i32(tmp2);
470 tcg_gen_sar_i32(dest, t0, tmp1);
471 tcg_temp_free_i32(tmp1);
472}
473
ad69471c
PB
474/* FIXME: Implement this natively. */
475#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
476
9a119ff6 477static void shifter_out_im(TCGv var, int shift)
b26eefb6 478{
9a119ff6 479 if (shift == 0) {
66c374de 480 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 481 } else {
66c374de
AJ
482 tcg_gen_shri_i32(cpu_CF, var, shift);
483 if (shift != 31) {
484 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
485 }
9a119ff6 486 }
9a119ff6 487}
b26eefb6 488
9a119ff6
PB
489/* Shift by immediate. Includes special handling for shift == 0. */
490static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
491{
492 switch (shiftop) {
493 case 0: /* LSL */
494 if (shift != 0) {
495 if (flags)
496 shifter_out_im(var, 32 - shift);
497 tcg_gen_shli_i32(var, var, shift);
498 }
499 break;
500 case 1: /* LSR */
501 if (shift == 0) {
502 if (flags) {
66c374de 503 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
504 }
505 tcg_gen_movi_i32(var, 0);
506 } else {
507 if (flags)
508 shifter_out_im(var, shift - 1);
509 tcg_gen_shri_i32(var, var, shift);
510 }
511 break;
512 case 2: /* ASR */
513 if (shift == 0)
514 shift = 32;
515 if (flags)
516 shifter_out_im(var, shift - 1);
517 if (shift == 32)
518 shift = 31;
519 tcg_gen_sari_i32(var, var, shift);
520 break;
521 case 3: /* ROR/RRX */
522 if (shift != 0) {
523 if (flags)
524 shifter_out_im(var, shift - 1);
f669df27 525 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 526 } else {
66c374de 527 TCGv tmp = tcg_temp_new_i32();
9a119ff6
PB
528 if (flags)
529 shifter_out_im(var, 0);
530 tcg_gen_shri_i32(var, var, 1);
66c374de 531 tcg_gen_shli_i32(tmp, cpu_CF, 31);
b26eefb6 532 tcg_gen_or_i32(var, var, tmp);
7d1b0095 533 tcg_temp_free_i32(tmp);
b26eefb6
PB
534 }
535 }
536};
537
8984bd2e
PB
538static inline void gen_arm_shift_reg(TCGv var, int shiftop,
539 TCGv shift, int flags)
540{
541 if (flags) {
542 switch (shiftop) {
9ef39277
BS
543 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
544 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
545 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
546 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
547 }
548 } else {
549 switch (shiftop) {
365af80e
AJ
550 case 0:
551 gen_shl(var, var, shift);
552 break;
553 case 1:
554 gen_shr(var, var, shift);
555 break;
556 case 2:
557 gen_sar(var, var, shift);
558 break;
f669df27
AJ
559 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
560 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
561 }
562 }
7d1b0095 563 tcg_temp_free_i32(shift);
8984bd2e
PB
564}
565
6ddbc6e4
PB
566#define PAS_OP(pfx) \
567 switch (op2) { \
568 case 0: gen_pas_helper(glue(pfx,add16)); break; \
569 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
570 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
571 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
572 case 4: gen_pas_helper(glue(pfx,add8)); break; \
573 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
574 }
d9ba4830 575static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 576{
a7812ae4 577 TCGv_ptr tmp;
6ddbc6e4
PB
578
579 switch (op1) {
580#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
581 case 1:
a7812ae4 582 tmp = tcg_temp_new_ptr();
0ecb72a5 583 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 584 PAS_OP(s)
b75263d6 585 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
586 break;
587 case 5:
a7812ae4 588 tmp = tcg_temp_new_ptr();
0ecb72a5 589 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 590 PAS_OP(u)
b75263d6 591 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
592 break;
593#undef gen_pas_helper
594#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
595 case 2:
596 PAS_OP(q);
597 break;
598 case 3:
599 PAS_OP(sh);
600 break;
601 case 6:
602 PAS_OP(uq);
603 break;
604 case 7:
605 PAS_OP(uh);
606 break;
607#undef gen_pas_helper
608 }
609}
9ee6e8bb
PB
610#undef PAS_OP
611
6ddbc6e4
PB
612/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
613#define PAS_OP(pfx) \
ed89a2f1 614 switch (op1) { \
6ddbc6e4
PB
615 case 0: gen_pas_helper(glue(pfx,add8)); break; \
616 case 1: gen_pas_helper(glue(pfx,add16)); break; \
617 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
618 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
619 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
620 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
621 }
d9ba4830 622static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 623{
a7812ae4 624 TCGv_ptr tmp;
6ddbc6e4 625
ed89a2f1 626 switch (op2) {
6ddbc6e4
PB
627#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
628 case 0:
a7812ae4 629 tmp = tcg_temp_new_ptr();
0ecb72a5 630 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 631 PAS_OP(s)
b75263d6 632 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
633 break;
634 case 4:
a7812ae4 635 tmp = tcg_temp_new_ptr();
0ecb72a5 636 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 637 PAS_OP(u)
b75263d6 638 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
639 break;
640#undef gen_pas_helper
641#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
642 case 1:
643 PAS_OP(q);
644 break;
645 case 2:
646 PAS_OP(sh);
647 break;
648 case 5:
649 PAS_OP(uq);
650 break;
651 case 6:
652 PAS_OP(uh);
653 break;
654#undef gen_pas_helper
655 }
656}
9ee6e8bb
PB
657#undef PAS_OP
658
d9ba4830
PB
659static void gen_test_cc(int cc, int label)
660{
661 TCGv tmp;
d9ba4830
PB
662 int inv;
663
d9ba4830
PB
664 switch (cc) {
665 case 0: /* eq: Z */
66c374de 666 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
667 break;
668 case 1: /* ne: !Z */
66c374de 669 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
670 break;
671 case 2: /* cs: C */
66c374de 672 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_CF, 0, label);
d9ba4830
PB
673 break;
674 case 3: /* cc: !C */
66c374de 675 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
d9ba4830
PB
676 break;
677 case 4: /* mi: N */
66c374de 678 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_NF, 0, label);
d9ba4830
PB
679 break;
680 case 5: /* pl: !N */
66c374de 681 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_NF, 0, label);
d9ba4830
PB
682 break;
683 case 6: /* vs: V */
66c374de 684 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_VF, 0, label);
d9ba4830
PB
685 break;
686 case 7: /* vc: !V */
66c374de 687 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_VF, 0, label);
d9ba4830
PB
688 break;
689 case 8: /* hi: C && !Z */
690 inv = gen_new_label();
66c374de
AJ
691 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, inv);
692 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
693 gen_set_label(inv);
694 break;
695 case 9: /* ls: !C || Z */
66c374de
AJ
696 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
697 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
698 break;
699 case 10: /* ge: N == V -> N ^ V == 0 */
66c374de
AJ
700 tmp = tcg_temp_new_i32();
701 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 702 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 703 tcg_temp_free_i32(tmp);
d9ba4830
PB
704 break;
705 case 11: /* lt: N != V -> N ^ V != 0 */
66c374de
AJ
706 tmp = tcg_temp_new_i32();
707 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 708 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 709 tcg_temp_free_i32(tmp);
d9ba4830
PB
710 break;
711 case 12: /* gt: !Z && N == V */
712 inv = gen_new_label();
66c374de
AJ
713 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, inv);
714 tmp = tcg_temp_new_i32();
715 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 716 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 717 tcg_temp_free_i32(tmp);
d9ba4830
PB
718 gen_set_label(inv);
719 break;
720 case 13: /* le: Z || N != V */
66c374de
AJ
721 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
722 tmp = tcg_temp_new_i32();
723 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 724 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 725 tcg_temp_free_i32(tmp);
d9ba4830
PB
726 break;
727 default:
728 fprintf(stderr, "Bad condition code 0x%x\n", cc);
729 abort();
730 }
d9ba4830 731}
2c0262af 732
b1d8e52e 733static const uint8_t table_logic_cc[16] = {
2c0262af
FB
734 1, /* and */
735 1, /* xor */
736 0, /* sub */
737 0, /* rsb */
738 0, /* add */
739 0, /* adc */
740 0, /* sbc */
741 0, /* rsc */
742 1, /* andl */
743 1, /* xorl */
744 0, /* cmp */
745 0, /* cmn */
746 1, /* orr */
747 1, /* mov */
748 1, /* bic */
749 1, /* mvn */
750};
3b46e624 751
d9ba4830
PB
752/* Set PC and Thumb state from an immediate address. */
753static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 754{
b26eefb6 755 TCGv tmp;
99c475ab 756
b26eefb6 757 s->is_jmp = DISAS_UPDATE;
d9ba4830 758 if (s->thumb != (addr & 1)) {
7d1b0095 759 tmp = tcg_temp_new_i32();
d9ba4830 760 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 761 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 762 tcg_temp_free_i32(tmp);
d9ba4830 763 }
155c3eac 764 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
765}
766
767/* Set PC and Thumb state from var. var is marked as dead. */
768static inline void gen_bx(DisasContext *s, TCGv var)
769{
d9ba4830 770 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
771 tcg_gen_andi_i32(cpu_R[15], var, ~1);
772 tcg_gen_andi_i32(var, var, 1);
773 store_cpu_field(var, thumb);
d9ba4830
PB
774}
775
21aeb343
JR
776/* Variant of store_reg which uses branch&exchange logic when storing
777 to r15 in ARM architecture v7 and above. The source must be a temporary
778 and will be marked as dead. */
0ecb72a5 779static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
21aeb343
JR
780 int reg, TCGv var)
781{
782 if (reg == 15 && ENABLE_ARCH_7) {
783 gen_bx(s, var);
784 } else {
785 store_reg(s, reg, var);
786 }
787}
788
be5e7a76
DES
789/* Variant of store_reg which uses branch&exchange logic when storing
790 * to r15 in ARM architecture v5T and above. This is used for storing
791 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
792 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
0ecb72a5 793static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
be5e7a76
DES
794 int reg, TCGv var)
795{
796 if (reg == 15 && ENABLE_ARCH_5) {
797 gen_bx(s, var);
798 } else {
799 store_reg(s, reg, var);
800 }
801}
802
b0109805
PB
803static inline TCGv gen_ld8s(TCGv addr, int index)
804{
7d1b0095 805 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
806 tcg_gen_qemu_ld8s(tmp, addr, index);
807 return tmp;
808}
809static inline TCGv gen_ld8u(TCGv addr, int index)
810{
7d1b0095 811 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
812 tcg_gen_qemu_ld8u(tmp, addr, index);
813 return tmp;
814}
815static inline TCGv gen_ld16s(TCGv addr, int index)
816{
7d1b0095 817 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
818 tcg_gen_qemu_ld16s(tmp, addr, index);
819 return tmp;
820}
821static inline TCGv gen_ld16u(TCGv addr, int index)
822{
7d1b0095 823 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
824 tcg_gen_qemu_ld16u(tmp, addr, index);
825 return tmp;
826}
827static inline TCGv gen_ld32(TCGv addr, int index)
828{
7d1b0095 829 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
830 tcg_gen_qemu_ld32u(tmp, addr, index);
831 return tmp;
832}
84496233
JR
833static inline TCGv_i64 gen_ld64(TCGv addr, int index)
834{
835 TCGv_i64 tmp = tcg_temp_new_i64();
836 tcg_gen_qemu_ld64(tmp, addr, index);
837 return tmp;
838}
b0109805
PB
839static inline void gen_st8(TCGv val, TCGv addr, int index)
840{
841 tcg_gen_qemu_st8(val, addr, index);
7d1b0095 842 tcg_temp_free_i32(val);
b0109805
PB
843}
844static inline void gen_st16(TCGv val, TCGv addr, int index)
845{
846 tcg_gen_qemu_st16(val, addr, index);
7d1b0095 847 tcg_temp_free_i32(val);
b0109805
PB
848}
849static inline void gen_st32(TCGv val, TCGv addr, int index)
850{
851 tcg_gen_qemu_st32(val, addr, index);
7d1b0095 852 tcg_temp_free_i32(val);
b0109805 853}
84496233
JR
854static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
855{
856 tcg_gen_qemu_st64(val, addr, index);
857 tcg_temp_free_i64(val);
858}
b5ff1b31 859
5e3f878a
PB
860static inline void gen_set_pc_im(uint32_t val)
861{
155c3eac 862 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
863}
864
b5ff1b31
FB
865/* Force a TB lookup after an instruction that changes the CPU state. */
866static inline void gen_lookup_tb(DisasContext *s)
867{
a6445c52 868 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
869 s->is_jmp = DISAS_UPDATE;
870}
871
b0109805
PB
872static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
873 TCGv var)
2c0262af 874{
1e8d4eec 875 int val, rm, shift, shiftop;
b26eefb6 876 TCGv offset;
2c0262af
FB
877
878 if (!(insn & (1 << 25))) {
879 /* immediate */
880 val = insn & 0xfff;
881 if (!(insn & (1 << 23)))
882 val = -val;
537730b9 883 if (val != 0)
b0109805 884 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
885 } else {
886 /* shift/register */
887 rm = (insn) & 0xf;
888 shift = (insn >> 7) & 0x1f;
1e8d4eec 889 shiftop = (insn >> 5) & 3;
b26eefb6 890 offset = load_reg(s, rm);
9a119ff6 891 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 892 if (!(insn & (1 << 23)))
b0109805 893 tcg_gen_sub_i32(var, var, offset);
2c0262af 894 else
b0109805 895 tcg_gen_add_i32(var, var, offset);
7d1b0095 896 tcg_temp_free_i32(offset);
2c0262af
FB
897 }
898}
899
191f9a93 900static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 901 int extra, TCGv var)
2c0262af
FB
902{
903 int val, rm;
b26eefb6 904 TCGv offset;
3b46e624 905
2c0262af
FB
906 if (insn & (1 << 22)) {
907 /* immediate */
908 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
909 if (!(insn & (1 << 23)))
910 val = -val;
18acad92 911 val += extra;
537730b9 912 if (val != 0)
b0109805 913 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
914 } else {
915 /* register */
191f9a93 916 if (extra)
b0109805 917 tcg_gen_addi_i32(var, var, extra);
2c0262af 918 rm = (insn) & 0xf;
b26eefb6 919 offset = load_reg(s, rm);
2c0262af 920 if (!(insn & (1 << 23)))
b0109805 921 tcg_gen_sub_i32(var, var, offset);
2c0262af 922 else
b0109805 923 tcg_gen_add_i32(var, var, offset);
7d1b0095 924 tcg_temp_free_i32(offset);
2c0262af
FB
925 }
926}
927
5aaebd13
PM
928static TCGv_ptr get_fpstatus_ptr(int neon)
929{
930 TCGv_ptr statusptr = tcg_temp_new_ptr();
931 int offset;
932 if (neon) {
0ecb72a5 933 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 934 } else {
0ecb72a5 935 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
936 }
937 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
938 return statusptr;
939}
940
4373f3ce
PB
941#define VFP_OP2(name) \
942static inline void gen_vfp_##name(int dp) \
943{ \
ae1857ec
PM
944 TCGv_ptr fpst = get_fpstatus_ptr(0); \
945 if (dp) { \
946 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
947 } else { \
948 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
949 } \
950 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
951}
952
4373f3ce
PB
953VFP_OP2(add)
954VFP_OP2(sub)
955VFP_OP2(mul)
956VFP_OP2(div)
957
958#undef VFP_OP2
959
605a6aed
PM
960static inline void gen_vfp_F1_mul(int dp)
961{
962 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 963 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 964 if (dp) {
ae1857ec 965 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 966 } else {
ae1857ec 967 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 968 }
ae1857ec 969 tcg_temp_free_ptr(fpst);
605a6aed
PM
970}
971
972static inline void gen_vfp_F1_neg(int dp)
973{
974 /* Like gen_vfp_neg() but put result in F1 */
975 if (dp) {
976 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
977 } else {
978 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
979 }
980}
981
4373f3ce
PB
982static inline void gen_vfp_abs(int dp)
983{
984 if (dp)
985 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
986 else
987 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
988}
989
990static inline void gen_vfp_neg(int dp)
991{
992 if (dp)
993 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
994 else
995 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
996}
997
998static inline void gen_vfp_sqrt(int dp)
999{
1000 if (dp)
1001 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1002 else
1003 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1004}
1005
1006static inline void gen_vfp_cmp(int dp)
1007{
1008 if (dp)
1009 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1010 else
1011 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1012}
1013
1014static inline void gen_vfp_cmpe(int dp)
1015{
1016 if (dp)
1017 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1018 else
1019 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1020}
1021
1022static inline void gen_vfp_F1_ld0(int dp)
1023{
1024 if (dp)
5b340b51 1025 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1026 else
5b340b51 1027 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1028}
1029
5500b06c
PM
1030#define VFP_GEN_ITOF(name) \
1031static inline void gen_vfp_##name(int dp, int neon) \
1032{ \
5aaebd13 1033 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1034 if (dp) { \
1035 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1036 } else { \
1037 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1038 } \
b7fa9214 1039 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1040}
1041
5500b06c
PM
1042VFP_GEN_ITOF(uito)
1043VFP_GEN_ITOF(sito)
1044#undef VFP_GEN_ITOF
4373f3ce 1045
5500b06c
PM
1046#define VFP_GEN_FTOI(name) \
1047static inline void gen_vfp_##name(int dp, int neon) \
1048{ \
5aaebd13 1049 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1050 if (dp) { \
1051 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1052 } else { \
1053 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1054 } \
b7fa9214 1055 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1056}
1057
5500b06c
PM
1058VFP_GEN_FTOI(toui)
1059VFP_GEN_FTOI(touiz)
1060VFP_GEN_FTOI(tosi)
1061VFP_GEN_FTOI(tosiz)
1062#undef VFP_GEN_FTOI
4373f3ce
PB
1063
1064#define VFP_GEN_FIX(name) \
5500b06c 1065static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1066{ \
b75263d6 1067 TCGv tmp_shift = tcg_const_i32(shift); \
5aaebd13 1068 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1069 if (dp) { \
1070 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1071 } else { \
1072 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1073 } \
b75263d6 1074 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1075 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1076}
4373f3ce
PB
1077VFP_GEN_FIX(tosh)
1078VFP_GEN_FIX(tosl)
1079VFP_GEN_FIX(touh)
1080VFP_GEN_FIX(toul)
1081VFP_GEN_FIX(shto)
1082VFP_GEN_FIX(slto)
1083VFP_GEN_FIX(uhto)
1084VFP_GEN_FIX(ulto)
1085#undef VFP_GEN_FIX
9ee6e8bb 1086
312eea9f 1087static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1088{
1089 if (dp)
312eea9f 1090 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1091 else
312eea9f 1092 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1093}
1094
312eea9f 1095static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1096{
1097 if (dp)
312eea9f 1098 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1099 else
312eea9f 1100 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1101}
1102
8e96005d
FB
1103static inline long
1104vfp_reg_offset (int dp, int reg)
1105{
1106 if (dp)
1107 return offsetof(CPUARMState, vfp.regs[reg]);
1108 else if (reg & 1) {
1109 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1110 + offsetof(CPU_DoubleU, l.upper);
1111 } else {
1112 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1113 + offsetof(CPU_DoubleU, l.lower);
1114 }
1115}
9ee6e8bb
PB
1116
1117/* Return the offset of a 32-bit piece of a NEON register.
1118 zero is the least significant end of the register. */
1119static inline long
1120neon_reg_offset (int reg, int n)
1121{
1122 int sreg;
1123 sreg = reg * 2 + n;
1124 return vfp_reg_offset(0, sreg);
1125}
1126
8f8e3aa4
PB
1127static TCGv neon_load_reg(int reg, int pass)
1128{
7d1b0095 1129 TCGv tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1130 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1131 return tmp;
1132}
1133
1134static void neon_store_reg(int reg, int pass, TCGv var)
1135{
1136 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1137 tcg_temp_free_i32(var);
8f8e3aa4
PB
1138}
1139
a7812ae4 1140static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1141{
1142 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1143}
1144
a7812ae4 1145static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1146{
1147 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1148}
1149
4373f3ce
PB
1150#define tcg_gen_ld_f32 tcg_gen_ld_i32
1151#define tcg_gen_ld_f64 tcg_gen_ld_i64
1152#define tcg_gen_st_f32 tcg_gen_st_i32
1153#define tcg_gen_st_f64 tcg_gen_st_i64
1154
b7bcbe95
FB
1155static inline void gen_mov_F0_vreg(int dp, int reg)
1156{
1157 if (dp)
4373f3ce 1158 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1159 else
4373f3ce 1160 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1161}
1162
1163static inline void gen_mov_F1_vreg(int dp, int reg)
1164{
1165 if (dp)
4373f3ce 1166 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1167 else
4373f3ce 1168 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1169}
1170
1171static inline void gen_mov_vreg_F0(int dp, int reg)
1172{
1173 if (dp)
4373f3ce 1174 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1175 else
4373f3ce 1176 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1177}
1178
18c9b560
AZ
1179#define ARM_CP_RW_BIT (1 << 20)
1180
a7812ae4 1181static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1182{
0ecb72a5 1183 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1184}
1185
a7812ae4 1186static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1187{
0ecb72a5 1188 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1189}
1190
da6b5335 1191static inline TCGv iwmmxt_load_creg(int reg)
e677137d 1192{
7d1b0095 1193 TCGv var = tcg_temp_new_i32();
0ecb72a5 1194 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1195 return var;
e677137d
PB
1196}
1197
da6b5335 1198static inline void iwmmxt_store_creg(int reg, TCGv var)
e677137d 1199{
0ecb72a5 1200 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1201 tcg_temp_free_i32(var);
e677137d
PB
1202}
1203
1204static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1205{
1206 iwmmxt_store_reg(cpu_M0, rn);
1207}
1208
1209static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1210{
1211 iwmmxt_load_reg(cpu_M0, rn);
1212}
1213
1214static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1215{
1216 iwmmxt_load_reg(cpu_V1, rn);
1217 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1218}
1219
1220static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1221{
1222 iwmmxt_load_reg(cpu_V1, rn);
1223 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1224}
1225
1226static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1227{
1228 iwmmxt_load_reg(cpu_V1, rn);
1229 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1230}
1231
1232#define IWMMXT_OP(name) \
1233static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1234{ \
1235 iwmmxt_load_reg(cpu_V1, rn); \
1236 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1237}
1238
477955bd
PM
1239#define IWMMXT_OP_ENV(name) \
1240static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1241{ \
1242 iwmmxt_load_reg(cpu_V1, rn); \
1243 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1244}
1245
1246#define IWMMXT_OP_ENV_SIZE(name) \
1247IWMMXT_OP_ENV(name##b) \
1248IWMMXT_OP_ENV(name##w) \
1249IWMMXT_OP_ENV(name##l)
e677137d 1250
477955bd 1251#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1252static inline void gen_op_iwmmxt_##name##_M0(void) \
1253{ \
477955bd 1254 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1255}
1256
1257IWMMXT_OP(maddsq)
1258IWMMXT_OP(madduq)
1259IWMMXT_OP(sadb)
1260IWMMXT_OP(sadw)
1261IWMMXT_OP(mulslw)
1262IWMMXT_OP(mulshw)
1263IWMMXT_OP(mululw)
1264IWMMXT_OP(muluhw)
1265IWMMXT_OP(macsw)
1266IWMMXT_OP(macuw)
1267
477955bd
PM
1268IWMMXT_OP_ENV_SIZE(unpackl)
1269IWMMXT_OP_ENV_SIZE(unpackh)
1270
1271IWMMXT_OP_ENV1(unpacklub)
1272IWMMXT_OP_ENV1(unpackluw)
1273IWMMXT_OP_ENV1(unpacklul)
1274IWMMXT_OP_ENV1(unpackhub)
1275IWMMXT_OP_ENV1(unpackhuw)
1276IWMMXT_OP_ENV1(unpackhul)
1277IWMMXT_OP_ENV1(unpacklsb)
1278IWMMXT_OP_ENV1(unpacklsw)
1279IWMMXT_OP_ENV1(unpacklsl)
1280IWMMXT_OP_ENV1(unpackhsb)
1281IWMMXT_OP_ENV1(unpackhsw)
1282IWMMXT_OP_ENV1(unpackhsl)
1283
1284IWMMXT_OP_ENV_SIZE(cmpeq)
1285IWMMXT_OP_ENV_SIZE(cmpgtu)
1286IWMMXT_OP_ENV_SIZE(cmpgts)
1287
1288IWMMXT_OP_ENV_SIZE(mins)
1289IWMMXT_OP_ENV_SIZE(minu)
1290IWMMXT_OP_ENV_SIZE(maxs)
1291IWMMXT_OP_ENV_SIZE(maxu)
1292
1293IWMMXT_OP_ENV_SIZE(subn)
1294IWMMXT_OP_ENV_SIZE(addn)
1295IWMMXT_OP_ENV_SIZE(subu)
1296IWMMXT_OP_ENV_SIZE(addu)
1297IWMMXT_OP_ENV_SIZE(subs)
1298IWMMXT_OP_ENV_SIZE(adds)
1299
1300IWMMXT_OP_ENV(avgb0)
1301IWMMXT_OP_ENV(avgb1)
1302IWMMXT_OP_ENV(avgw0)
1303IWMMXT_OP_ENV(avgw1)
e677137d
PB
1304
1305IWMMXT_OP(msadb)
1306
477955bd
PM
1307IWMMXT_OP_ENV(packuw)
1308IWMMXT_OP_ENV(packul)
1309IWMMXT_OP_ENV(packuq)
1310IWMMXT_OP_ENV(packsw)
1311IWMMXT_OP_ENV(packsl)
1312IWMMXT_OP_ENV(packsq)
e677137d 1313
e677137d
PB
1314static void gen_op_iwmmxt_set_mup(void)
1315{
1316 TCGv tmp;
1317 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1318 tcg_gen_ori_i32(tmp, tmp, 2);
1319 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1320}
1321
1322static void gen_op_iwmmxt_set_cup(void)
1323{
1324 TCGv tmp;
1325 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1326 tcg_gen_ori_i32(tmp, tmp, 1);
1327 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1328}
1329
1330static void gen_op_iwmmxt_setpsr_nz(void)
1331{
7d1b0095 1332 TCGv tmp = tcg_temp_new_i32();
e677137d
PB
1333 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1334 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1335}
1336
1337static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1338{
1339 iwmmxt_load_reg(cpu_V1, rn);
86831435 1340 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1341 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1342}
1343
da6b5335 1344static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
18c9b560
AZ
1345{
1346 int rd;
1347 uint32_t offset;
da6b5335 1348 TCGv tmp;
18c9b560
AZ
1349
1350 rd = (insn >> 16) & 0xf;
da6b5335 1351 tmp = load_reg(s, rd);
18c9b560
AZ
1352
1353 offset = (insn & 0xff) << ((insn >> 7) & 2);
1354 if (insn & (1 << 24)) {
1355 /* Pre indexed */
1356 if (insn & (1 << 23))
da6b5335 1357 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1358 else
da6b5335
FN
1359 tcg_gen_addi_i32(tmp, tmp, -offset);
1360 tcg_gen_mov_i32(dest, tmp);
18c9b560 1361 if (insn & (1 << 21))
da6b5335
FN
1362 store_reg(s, rd, tmp);
1363 else
7d1b0095 1364 tcg_temp_free_i32(tmp);
18c9b560
AZ
1365 } else if (insn & (1 << 21)) {
1366 /* Post indexed */
da6b5335 1367 tcg_gen_mov_i32(dest, tmp);
18c9b560 1368 if (insn & (1 << 23))
da6b5335 1369 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1370 else
da6b5335
FN
1371 tcg_gen_addi_i32(tmp, tmp, -offset);
1372 store_reg(s, rd, tmp);
18c9b560
AZ
1373 } else if (!(insn & (1 << 23)))
1374 return 1;
1375 return 0;
1376}
1377
da6b5335 1378static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
18c9b560
AZ
1379{
1380 int rd = (insn >> 0) & 0xf;
da6b5335 1381 TCGv tmp;
18c9b560 1382
da6b5335
FN
1383 if (insn & (1 << 8)) {
1384 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1385 return 1;
da6b5335
FN
1386 } else {
1387 tmp = iwmmxt_load_creg(rd);
1388 }
1389 } else {
7d1b0095 1390 tmp = tcg_temp_new_i32();
da6b5335
FN
1391 iwmmxt_load_reg(cpu_V0, rd);
1392 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1393 }
1394 tcg_gen_andi_i32(tmp, tmp, mask);
1395 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1396 tcg_temp_free_i32(tmp);
18c9b560
AZ
1397 return 0;
1398}
1399
a1c7273b 1400/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1401 (ie. an undefined instruction). */
0ecb72a5 1402static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
1403{
1404 int rd, wrd;
1405 int rdhi, rdlo, rd0, rd1, i;
da6b5335
FN
1406 TCGv addr;
1407 TCGv tmp, tmp2, tmp3;
18c9b560
AZ
1408
1409 if ((insn & 0x0e000e00) == 0x0c000000) {
1410 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1411 wrd = insn & 0xf;
1412 rdlo = (insn >> 12) & 0xf;
1413 rdhi = (insn >> 16) & 0xf;
1414 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1415 iwmmxt_load_reg(cpu_V0, wrd);
1416 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1417 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1418 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1419 } else { /* TMCRR */
da6b5335
FN
1420 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1421 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1422 gen_op_iwmmxt_set_mup();
1423 }
1424 return 0;
1425 }
1426
1427 wrd = (insn >> 12) & 0xf;
7d1b0095 1428 addr = tcg_temp_new_i32();
da6b5335 1429 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1430 tcg_temp_free_i32(addr);
18c9b560 1431 return 1;
da6b5335 1432 }
18c9b560
AZ
1433 if (insn & ARM_CP_RW_BIT) {
1434 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1435 tmp = tcg_temp_new_i32();
da6b5335
FN
1436 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1437 iwmmxt_store_creg(wrd, tmp);
18c9b560 1438 } else {
e677137d
PB
1439 i = 1;
1440 if (insn & (1 << 8)) {
1441 if (insn & (1 << 22)) { /* WLDRD */
da6b5335 1442 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1443 i = 0;
1444 } else { /* WLDRW wRd */
da6b5335 1445 tmp = gen_ld32(addr, IS_USER(s));
e677137d
PB
1446 }
1447 } else {
1448 if (insn & (1 << 22)) { /* WLDRH */
da6b5335 1449 tmp = gen_ld16u(addr, IS_USER(s));
e677137d 1450 } else { /* WLDRB */
da6b5335 1451 tmp = gen_ld8u(addr, IS_USER(s));
e677137d
PB
1452 }
1453 }
1454 if (i) {
1455 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1456 tcg_temp_free_i32(tmp);
e677137d 1457 }
18c9b560
AZ
1458 gen_op_iwmmxt_movq_wRn_M0(wrd);
1459 }
1460 } else {
1461 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335
FN
1462 tmp = iwmmxt_load_creg(wrd);
1463 gen_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1464 } else {
1465 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1466 tmp = tcg_temp_new_i32();
e677137d
PB
1467 if (insn & (1 << 8)) {
1468 if (insn & (1 << 22)) { /* WSTRD */
7d1b0095 1469 tcg_temp_free_i32(tmp);
da6b5335 1470 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1471 } else { /* WSTRW wRd */
1472 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1473 gen_st32(tmp, addr, IS_USER(s));
e677137d
PB
1474 }
1475 } else {
1476 if (insn & (1 << 22)) { /* WSTRH */
1477 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1478 gen_st16(tmp, addr, IS_USER(s));
e677137d
PB
1479 } else { /* WSTRB */
1480 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1481 gen_st8(tmp, addr, IS_USER(s));
e677137d
PB
1482 }
1483 }
18c9b560
AZ
1484 }
1485 }
7d1b0095 1486 tcg_temp_free_i32(addr);
18c9b560
AZ
1487 return 0;
1488 }
1489
1490 if ((insn & 0x0f000000) != 0x0e000000)
1491 return 1;
1492
1493 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1494 case 0x000: /* WOR */
1495 wrd = (insn >> 12) & 0xf;
1496 rd0 = (insn >> 0) & 0xf;
1497 rd1 = (insn >> 16) & 0xf;
1498 gen_op_iwmmxt_movq_M0_wRn(rd0);
1499 gen_op_iwmmxt_orq_M0_wRn(rd1);
1500 gen_op_iwmmxt_setpsr_nz();
1501 gen_op_iwmmxt_movq_wRn_M0(wrd);
1502 gen_op_iwmmxt_set_mup();
1503 gen_op_iwmmxt_set_cup();
1504 break;
1505 case 0x011: /* TMCR */
1506 if (insn & 0xf)
1507 return 1;
1508 rd = (insn >> 12) & 0xf;
1509 wrd = (insn >> 16) & 0xf;
1510 switch (wrd) {
1511 case ARM_IWMMXT_wCID:
1512 case ARM_IWMMXT_wCASF:
1513 break;
1514 case ARM_IWMMXT_wCon:
1515 gen_op_iwmmxt_set_cup();
1516 /* Fall through. */
1517 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1518 tmp = iwmmxt_load_creg(wrd);
1519 tmp2 = load_reg(s, rd);
f669df27 1520 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1521 tcg_temp_free_i32(tmp2);
da6b5335 1522 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1523 break;
1524 case ARM_IWMMXT_wCGR0:
1525 case ARM_IWMMXT_wCGR1:
1526 case ARM_IWMMXT_wCGR2:
1527 case ARM_IWMMXT_wCGR3:
1528 gen_op_iwmmxt_set_cup();
da6b5335
FN
1529 tmp = load_reg(s, rd);
1530 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1531 break;
1532 default:
1533 return 1;
1534 }
1535 break;
1536 case 0x100: /* WXOR */
1537 wrd = (insn >> 12) & 0xf;
1538 rd0 = (insn >> 0) & 0xf;
1539 rd1 = (insn >> 16) & 0xf;
1540 gen_op_iwmmxt_movq_M0_wRn(rd0);
1541 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1542 gen_op_iwmmxt_setpsr_nz();
1543 gen_op_iwmmxt_movq_wRn_M0(wrd);
1544 gen_op_iwmmxt_set_mup();
1545 gen_op_iwmmxt_set_cup();
1546 break;
1547 case 0x111: /* TMRC */
1548 if (insn & 0xf)
1549 return 1;
1550 rd = (insn >> 12) & 0xf;
1551 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1552 tmp = iwmmxt_load_creg(wrd);
1553 store_reg(s, rd, tmp);
18c9b560
AZ
1554 break;
1555 case 0x300: /* WANDN */
1556 wrd = (insn >> 12) & 0xf;
1557 rd0 = (insn >> 0) & 0xf;
1558 rd1 = (insn >> 16) & 0xf;
1559 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1560 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1561 gen_op_iwmmxt_andq_M0_wRn(rd1);
1562 gen_op_iwmmxt_setpsr_nz();
1563 gen_op_iwmmxt_movq_wRn_M0(wrd);
1564 gen_op_iwmmxt_set_mup();
1565 gen_op_iwmmxt_set_cup();
1566 break;
1567 case 0x200: /* WAND */
1568 wrd = (insn >> 12) & 0xf;
1569 rd0 = (insn >> 0) & 0xf;
1570 rd1 = (insn >> 16) & 0xf;
1571 gen_op_iwmmxt_movq_M0_wRn(rd0);
1572 gen_op_iwmmxt_andq_M0_wRn(rd1);
1573 gen_op_iwmmxt_setpsr_nz();
1574 gen_op_iwmmxt_movq_wRn_M0(wrd);
1575 gen_op_iwmmxt_set_mup();
1576 gen_op_iwmmxt_set_cup();
1577 break;
1578 case 0x810: case 0xa10: /* WMADD */
1579 wrd = (insn >> 12) & 0xf;
1580 rd0 = (insn >> 0) & 0xf;
1581 rd1 = (insn >> 16) & 0xf;
1582 gen_op_iwmmxt_movq_M0_wRn(rd0);
1583 if (insn & (1 << 21))
1584 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1585 else
1586 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1587 gen_op_iwmmxt_movq_wRn_M0(wrd);
1588 gen_op_iwmmxt_set_mup();
1589 break;
1590 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1591 wrd = (insn >> 12) & 0xf;
1592 rd0 = (insn >> 16) & 0xf;
1593 rd1 = (insn >> 0) & 0xf;
1594 gen_op_iwmmxt_movq_M0_wRn(rd0);
1595 switch ((insn >> 22) & 3) {
1596 case 0:
1597 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1598 break;
1599 case 1:
1600 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1601 break;
1602 case 2:
1603 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1604 break;
1605 case 3:
1606 return 1;
1607 }
1608 gen_op_iwmmxt_movq_wRn_M0(wrd);
1609 gen_op_iwmmxt_set_mup();
1610 gen_op_iwmmxt_set_cup();
1611 break;
1612 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1613 wrd = (insn >> 12) & 0xf;
1614 rd0 = (insn >> 16) & 0xf;
1615 rd1 = (insn >> 0) & 0xf;
1616 gen_op_iwmmxt_movq_M0_wRn(rd0);
1617 switch ((insn >> 22) & 3) {
1618 case 0:
1619 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1620 break;
1621 case 1:
1622 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1623 break;
1624 case 2:
1625 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1626 break;
1627 case 3:
1628 return 1;
1629 }
1630 gen_op_iwmmxt_movq_wRn_M0(wrd);
1631 gen_op_iwmmxt_set_mup();
1632 gen_op_iwmmxt_set_cup();
1633 break;
1634 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1635 wrd = (insn >> 12) & 0xf;
1636 rd0 = (insn >> 16) & 0xf;
1637 rd1 = (insn >> 0) & 0xf;
1638 gen_op_iwmmxt_movq_M0_wRn(rd0);
1639 if (insn & (1 << 22))
1640 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1641 else
1642 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1643 if (!(insn & (1 << 20)))
1644 gen_op_iwmmxt_addl_M0_wRn(wrd);
1645 gen_op_iwmmxt_movq_wRn_M0(wrd);
1646 gen_op_iwmmxt_set_mup();
1647 break;
1648 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1649 wrd = (insn >> 12) & 0xf;
1650 rd0 = (insn >> 16) & 0xf;
1651 rd1 = (insn >> 0) & 0xf;
1652 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1653 if (insn & (1 << 21)) {
1654 if (insn & (1 << 20))
1655 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1656 else
1657 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1658 } else {
1659 if (insn & (1 << 20))
1660 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1661 else
1662 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1663 }
18c9b560
AZ
1664 gen_op_iwmmxt_movq_wRn_M0(wrd);
1665 gen_op_iwmmxt_set_mup();
1666 break;
1667 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1668 wrd = (insn >> 12) & 0xf;
1669 rd0 = (insn >> 16) & 0xf;
1670 rd1 = (insn >> 0) & 0xf;
1671 gen_op_iwmmxt_movq_M0_wRn(rd0);
1672 if (insn & (1 << 21))
1673 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1674 else
1675 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1676 if (!(insn & (1 << 20))) {
e677137d
PB
1677 iwmmxt_load_reg(cpu_V1, wrd);
1678 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1679 }
1680 gen_op_iwmmxt_movq_wRn_M0(wrd);
1681 gen_op_iwmmxt_set_mup();
1682 break;
1683 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1684 wrd = (insn >> 12) & 0xf;
1685 rd0 = (insn >> 16) & 0xf;
1686 rd1 = (insn >> 0) & 0xf;
1687 gen_op_iwmmxt_movq_M0_wRn(rd0);
1688 switch ((insn >> 22) & 3) {
1689 case 0:
1690 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1691 break;
1692 case 1:
1693 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1694 break;
1695 case 2:
1696 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1697 break;
1698 case 3:
1699 return 1;
1700 }
1701 gen_op_iwmmxt_movq_wRn_M0(wrd);
1702 gen_op_iwmmxt_set_mup();
1703 gen_op_iwmmxt_set_cup();
1704 break;
1705 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1706 wrd = (insn >> 12) & 0xf;
1707 rd0 = (insn >> 16) & 0xf;
1708 rd1 = (insn >> 0) & 0xf;
1709 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1710 if (insn & (1 << 22)) {
1711 if (insn & (1 << 20))
1712 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1713 else
1714 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1715 } else {
1716 if (insn & (1 << 20))
1717 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1718 else
1719 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1720 }
18c9b560
AZ
1721 gen_op_iwmmxt_movq_wRn_M0(wrd);
1722 gen_op_iwmmxt_set_mup();
1723 gen_op_iwmmxt_set_cup();
1724 break;
1725 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1726 wrd = (insn >> 12) & 0xf;
1727 rd0 = (insn >> 16) & 0xf;
1728 rd1 = (insn >> 0) & 0xf;
1729 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1730 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1731 tcg_gen_andi_i32(tmp, tmp, 7);
1732 iwmmxt_load_reg(cpu_V1, rd1);
1733 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1734 tcg_temp_free_i32(tmp);
18c9b560
AZ
1735 gen_op_iwmmxt_movq_wRn_M0(wrd);
1736 gen_op_iwmmxt_set_mup();
1737 break;
1738 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1739 if (((insn >> 6) & 3) == 3)
1740 return 1;
18c9b560
AZ
1741 rd = (insn >> 12) & 0xf;
1742 wrd = (insn >> 16) & 0xf;
da6b5335 1743 tmp = load_reg(s, rd);
18c9b560
AZ
1744 gen_op_iwmmxt_movq_M0_wRn(wrd);
1745 switch ((insn >> 6) & 3) {
1746 case 0:
da6b5335
FN
1747 tmp2 = tcg_const_i32(0xff);
1748 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1749 break;
1750 case 1:
da6b5335
FN
1751 tmp2 = tcg_const_i32(0xffff);
1752 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1753 break;
1754 case 2:
da6b5335
FN
1755 tmp2 = tcg_const_i32(0xffffffff);
1756 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1757 break;
da6b5335
FN
1758 default:
1759 TCGV_UNUSED(tmp2);
1760 TCGV_UNUSED(tmp3);
18c9b560 1761 }
da6b5335
FN
1762 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1763 tcg_temp_free(tmp3);
1764 tcg_temp_free(tmp2);
7d1b0095 1765 tcg_temp_free_i32(tmp);
18c9b560
AZ
1766 gen_op_iwmmxt_movq_wRn_M0(wrd);
1767 gen_op_iwmmxt_set_mup();
1768 break;
1769 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1770 rd = (insn >> 12) & 0xf;
1771 wrd = (insn >> 16) & 0xf;
da6b5335 1772 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1773 return 1;
1774 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1775 tmp = tcg_temp_new_i32();
18c9b560
AZ
1776 switch ((insn >> 22) & 3) {
1777 case 0:
da6b5335
FN
1778 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1779 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1780 if (insn & 8) {
1781 tcg_gen_ext8s_i32(tmp, tmp);
1782 } else {
1783 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1784 }
1785 break;
1786 case 1:
da6b5335
FN
1787 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1788 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1789 if (insn & 8) {
1790 tcg_gen_ext16s_i32(tmp, tmp);
1791 } else {
1792 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1793 }
1794 break;
1795 case 2:
da6b5335
FN
1796 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1797 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1798 break;
18c9b560 1799 }
da6b5335 1800 store_reg(s, rd, tmp);
18c9b560
AZ
1801 break;
1802 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1803 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1804 return 1;
da6b5335 1805 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1806 switch ((insn >> 22) & 3) {
1807 case 0:
da6b5335 1808 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1809 break;
1810 case 1:
da6b5335 1811 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1812 break;
1813 case 2:
da6b5335 1814 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1815 break;
18c9b560 1816 }
da6b5335
FN
1817 tcg_gen_shli_i32(tmp, tmp, 28);
1818 gen_set_nzcv(tmp);
7d1b0095 1819 tcg_temp_free_i32(tmp);
18c9b560
AZ
1820 break;
1821 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1822 if (((insn >> 6) & 3) == 3)
1823 return 1;
18c9b560
AZ
1824 rd = (insn >> 12) & 0xf;
1825 wrd = (insn >> 16) & 0xf;
da6b5335 1826 tmp = load_reg(s, rd);
18c9b560
AZ
1827 switch ((insn >> 6) & 3) {
1828 case 0:
da6b5335 1829 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1830 break;
1831 case 1:
da6b5335 1832 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1833 break;
1834 case 2:
da6b5335 1835 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1836 break;
18c9b560 1837 }
7d1b0095 1838 tcg_temp_free_i32(tmp);
18c9b560
AZ
1839 gen_op_iwmmxt_movq_wRn_M0(wrd);
1840 gen_op_iwmmxt_set_mup();
1841 break;
1842 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1843 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1844 return 1;
da6b5335 1845 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1846 tmp2 = tcg_temp_new_i32();
da6b5335 1847 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1848 switch ((insn >> 22) & 3) {
1849 case 0:
1850 for (i = 0; i < 7; i ++) {
da6b5335
FN
1851 tcg_gen_shli_i32(tmp2, tmp2, 4);
1852 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1853 }
1854 break;
1855 case 1:
1856 for (i = 0; i < 3; i ++) {
da6b5335
FN
1857 tcg_gen_shli_i32(tmp2, tmp2, 8);
1858 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1859 }
1860 break;
1861 case 2:
da6b5335
FN
1862 tcg_gen_shli_i32(tmp2, tmp2, 16);
1863 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1864 break;
18c9b560 1865 }
da6b5335 1866 gen_set_nzcv(tmp);
7d1b0095
PM
1867 tcg_temp_free_i32(tmp2);
1868 tcg_temp_free_i32(tmp);
18c9b560
AZ
1869 break;
1870 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1871 wrd = (insn >> 12) & 0xf;
1872 rd0 = (insn >> 16) & 0xf;
1873 gen_op_iwmmxt_movq_M0_wRn(rd0);
1874 switch ((insn >> 22) & 3) {
1875 case 0:
e677137d 1876 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1877 break;
1878 case 1:
e677137d 1879 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1880 break;
1881 case 2:
e677137d 1882 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1883 break;
1884 case 3:
1885 return 1;
1886 }
1887 gen_op_iwmmxt_movq_wRn_M0(wrd);
1888 gen_op_iwmmxt_set_mup();
1889 break;
1890 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1891 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1892 return 1;
da6b5335 1893 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1894 tmp2 = tcg_temp_new_i32();
da6b5335 1895 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1896 switch ((insn >> 22) & 3) {
1897 case 0:
1898 for (i = 0; i < 7; i ++) {
da6b5335
FN
1899 tcg_gen_shli_i32(tmp2, tmp2, 4);
1900 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1901 }
1902 break;
1903 case 1:
1904 for (i = 0; i < 3; i ++) {
da6b5335
FN
1905 tcg_gen_shli_i32(tmp2, tmp2, 8);
1906 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1907 }
1908 break;
1909 case 2:
da6b5335
FN
1910 tcg_gen_shli_i32(tmp2, tmp2, 16);
1911 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1912 break;
18c9b560 1913 }
da6b5335 1914 gen_set_nzcv(tmp);
7d1b0095
PM
1915 tcg_temp_free_i32(tmp2);
1916 tcg_temp_free_i32(tmp);
18c9b560
AZ
1917 break;
1918 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1919 rd = (insn >> 12) & 0xf;
1920 rd0 = (insn >> 16) & 0xf;
da6b5335 1921 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1922 return 1;
1923 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 1924 tmp = tcg_temp_new_i32();
18c9b560
AZ
1925 switch ((insn >> 22) & 3) {
1926 case 0:
da6b5335 1927 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1928 break;
1929 case 1:
da6b5335 1930 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1931 break;
1932 case 2:
da6b5335 1933 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1934 break;
18c9b560 1935 }
da6b5335 1936 store_reg(s, rd, tmp);
18c9b560
AZ
1937 break;
1938 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1939 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1940 wrd = (insn >> 12) & 0xf;
1941 rd0 = (insn >> 16) & 0xf;
1942 rd1 = (insn >> 0) & 0xf;
1943 gen_op_iwmmxt_movq_M0_wRn(rd0);
1944 switch ((insn >> 22) & 3) {
1945 case 0:
1946 if (insn & (1 << 21))
1947 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1948 else
1949 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1950 break;
1951 case 1:
1952 if (insn & (1 << 21))
1953 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1954 else
1955 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1956 break;
1957 case 2:
1958 if (insn & (1 << 21))
1959 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1960 else
1961 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1962 break;
1963 case 3:
1964 return 1;
1965 }
1966 gen_op_iwmmxt_movq_wRn_M0(wrd);
1967 gen_op_iwmmxt_set_mup();
1968 gen_op_iwmmxt_set_cup();
1969 break;
1970 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1971 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1972 wrd = (insn >> 12) & 0xf;
1973 rd0 = (insn >> 16) & 0xf;
1974 gen_op_iwmmxt_movq_M0_wRn(rd0);
1975 switch ((insn >> 22) & 3) {
1976 case 0:
1977 if (insn & (1 << 21))
1978 gen_op_iwmmxt_unpacklsb_M0();
1979 else
1980 gen_op_iwmmxt_unpacklub_M0();
1981 break;
1982 case 1:
1983 if (insn & (1 << 21))
1984 gen_op_iwmmxt_unpacklsw_M0();
1985 else
1986 gen_op_iwmmxt_unpackluw_M0();
1987 break;
1988 case 2:
1989 if (insn & (1 << 21))
1990 gen_op_iwmmxt_unpacklsl_M0();
1991 else
1992 gen_op_iwmmxt_unpacklul_M0();
1993 break;
1994 case 3:
1995 return 1;
1996 }
1997 gen_op_iwmmxt_movq_wRn_M0(wrd);
1998 gen_op_iwmmxt_set_mup();
1999 gen_op_iwmmxt_set_cup();
2000 break;
2001 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2002 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2003 wrd = (insn >> 12) & 0xf;
2004 rd0 = (insn >> 16) & 0xf;
2005 gen_op_iwmmxt_movq_M0_wRn(rd0);
2006 switch ((insn >> 22) & 3) {
2007 case 0:
2008 if (insn & (1 << 21))
2009 gen_op_iwmmxt_unpackhsb_M0();
2010 else
2011 gen_op_iwmmxt_unpackhub_M0();
2012 break;
2013 case 1:
2014 if (insn & (1 << 21))
2015 gen_op_iwmmxt_unpackhsw_M0();
2016 else
2017 gen_op_iwmmxt_unpackhuw_M0();
2018 break;
2019 case 2:
2020 if (insn & (1 << 21))
2021 gen_op_iwmmxt_unpackhsl_M0();
2022 else
2023 gen_op_iwmmxt_unpackhul_M0();
2024 break;
2025 case 3:
2026 return 1;
2027 }
2028 gen_op_iwmmxt_movq_wRn_M0(wrd);
2029 gen_op_iwmmxt_set_mup();
2030 gen_op_iwmmxt_set_cup();
2031 break;
2032 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2033 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2034 if (((insn >> 22) & 3) == 0)
2035 return 1;
18c9b560
AZ
2036 wrd = (insn >> 12) & 0xf;
2037 rd0 = (insn >> 16) & 0xf;
2038 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2039 tmp = tcg_temp_new_i32();
da6b5335 2040 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2041 tcg_temp_free_i32(tmp);
18c9b560 2042 return 1;
da6b5335 2043 }
18c9b560 2044 switch ((insn >> 22) & 3) {
18c9b560 2045 case 1:
477955bd 2046 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2047 break;
2048 case 2:
477955bd 2049 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2050 break;
2051 case 3:
477955bd 2052 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2053 break;
2054 }
7d1b0095 2055 tcg_temp_free_i32(tmp);
18c9b560
AZ
2056 gen_op_iwmmxt_movq_wRn_M0(wrd);
2057 gen_op_iwmmxt_set_mup();
2058 gen_op_iwmmxt_set_cup();
2059 break;
2060 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2061 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2062 if (((insn >> 22) & 3) == 0)
2063 return 1;
18c9b560
AZ
2064 wrd = (insn >> 12) & 0xf;
2065 rd0 = (insn >> 16) & 0xf;
2066 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2067 tmp = tcg_temp_new_i32();
da6b5335 2068 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2069 tcg_temp_free_i32(tmp);
18c9b560 2070 return 1;
da6b5335 2071 }
18c9b560 2072 switch ((insn >> 22) & 3) {
18c9b560 2073 case 1:
477955bd 2074 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2075 break;
2076 case 2:
477955bd 2077 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2078 break;
2079 case 3:
477955bd 2080 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2081 break;
2082 }
7d1b0095 2083 tcg_temp_free_i32(tmp);
18c9b560
AZ
2084 gen_op_iwmmxt_movq_wRn_M0(wrd);
2085 gen_op_iwmmxt_set_mup();
2086 gen_op_iwmmxt_set_cup();
2087 break;
2088 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2089 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2090 if (((insn >> 22) & 3) == 0)
2091 return 1;
18c9b560
AZ
2092 wrd = (insn >> 12) & 0xf;
2093 rd0 = (insn >> 16) & 0xf;
2094 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2095 tmp = tcg_temp_new_i32();
da6b5335 2096 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2097 tcg_temp_free_i32(tmp);
18c9b560 2098 return 1;
da6b5335 2099 }
18c9b560 2100 switch ((insn >> 22) & 3) {
18c9b560 2101 case 1:
477955bd 2102 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2103 break;
2104 case 2:
477955bd 2105 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2106 break;
2107 case 3:
477955bd 2108 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2109 break;
2110 }
7d1b0095 2111 tcg_temp_free_i32(tmp);
18c9b560
AZ
2112 gen_op_iwmmxt_movq_wRn_M0(wrd);
2113 gen_op_iwmmxt_set_mup();
2114 gen_op_iwmmxt_set_cup();
2115 break;
2116 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2117 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2118 if (((insn >> 22) & 3) == 0)
2119 return 1;
18c9b560
AZ
2120 wrd = (insn >> 12) & 0xf;
2121 rd0 = (insn >> 16) & 0xf;
2122 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2123 tmp = tcg_temp_new_i32();
18c9b560 2124 switch ((insn >> 22) & 3) {
18c9b560 2125 case 1:
da6b5335 2126 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2127 tcg_temp_free_i32(tmp);
18c9b560 2128 return 1;
da6b5335 2129 }
477955bd 2130 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2131 break;
2132 case 2:
da6b5335 2133 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2134 tcg_temp_free_i32(tmp);
18c9b560 2135 return 1;
da6b5335 2136 }
477955bd 2137 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2138 break;
2139 case 3:
da6b5335 2140 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2141 tcg_temp_free_i32(tmp);
18c9b560 2142 return 1;
da6b5335 2143 }
477955bd 2144 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2145 break;
2146 }
7d1b0095 2147 tcg_temp_free_i32(tmp);
18c9b560
AZ
2148 gen_op_iwmmxt_movq_wRn_M0(wrd);
2149 gen_op_iwmmxt_set_mup();
2150 gen_op_iwmmxt_set_cup();
2151 break;
2152 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2153 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2154 wrd = (insn >> 12) & 0xf;
2155 rd0 = (insn >> 16) & 0xf;
2156 rd1 = (insn >> 0) & 0xf;
2157 gen_op_iwmmxt_movq_M0_wRn(rd0);
2158 switch ((insn >> 22) & 3) {
2159 case 0:
2160 if (insn & (1 << 21))
2161 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2162 else
2163 gen_op_iwmmxt_minub_M0_wRn(rd1);
2164 break;
2165 case 1:
2166 if (insn & (1 << 21))
2167 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2168 else
2169 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2170 break;
2171 case 2:
2172 if (insn & (1 << 21))
2173 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2174 else
2175 gen_op_iwmmxt_minul_M0_wRn(rd1);
2176 break;
2177 case 3:
2178 return 1;
2179 }
2180 gen_op_iwmmxt_movq_wRn_M0(wrd);
2181 gen_op_iwmmxt_set_mup();
2182 break;
2183 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2184 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2185 wrd = (insn >> 12) & 0xf;
2186 rd0 = (insn >> 16) & 0xf;
2187 rd1 = (insn >> 0) & 0xf;
2188 gen_op_iwmmxt_movq_M0_wRn(rd0);
2189 switch ((insn >> 22) & 3) {
2190 case 0:
2191 if (insn & (1 << 21))
2192 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2193 else
2194 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2195 break;
2196 case 1:
2197 if (insn & (1 << 21))
2198 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2199 else
2200 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2201 break;
2202 case 2:
2203 if (insn & (1 << 21))
2204 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2205 else
2206 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2207 break;
2208 case 3:
2209 return 1;
2210 }
2211 gen_op_iwmmxt_movq_wRn_M0(wrd);
2212 gen_op_iwmmxt_set_mup();
2213 break;
2214 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2215 case 0x402: case 0x502: case 0x602: case 0x702:
2216 wrd = (insn >> 12) & 0xf;
2217 rd0 = (insn >> 16) & 0xf;
2218 rd1 = (insn >> 0) & 0xf;
2219 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2220 tmp = tcg_const_i32((insn >> 20) & 3);
2221 iwmmxt_load_reg(cpu_V1, rd1);
2222 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2223 tcg_temp_free(tmp);
18c9b560
AZ
2224 gen_op_iwmmxt_movq_wRn_M0(wrd);
2225 gen_op_iwmmxt_set_mup();
2226 break;
2227 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2228 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2229 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2230 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2231 wrd = (insn >> 12) & 0xf;
2232 rd0 = (insn >> 16) & 0xf;
2233 rd1 = (insn >> 0) & 0xf;
2234 gen_op_iwmmxt_movq_M0_wRn(rd0);
2235 switch ((insn >> 20) & 0xf) {
2236 case 0x0:
2237 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2238 break;
2239 case 0x1:
2240 gen_op_iwmmxt_subub_M0_wRn(rd1);
2241 break;
2242 case 0x3:
2243 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2244 break;
2245 case 0x4:
2246 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2247 break;
2248 case 0x5:
2249 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2250 break;
2251 case 0x7:
2252 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2253 break;
2254 case 0x8:
2255 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2256 break;
2257 case 0x9:
2258 gen_op_iwmmxt_subul_M0_wRn(rd1);
2259 break;
2260 case 0xb:
2261 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2262 break;
2263 default:
2264 return 1;
2265 }
2266 gen_op_iwmmxt_movq_wRn_M0(wrd);
2267 gen_op_iwmmxt_set_mup();
2268 gen_op_iwmmxt_set_cup();
2269 break;
2270 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2271 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2272 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2273 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2274 wrd = (insn >> 12) & 0xf;
2275 rd0 = (insn >> 16) & 0xf;
2276 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2277 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2278 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
da6b5335 2279 tcg_temp_free(tmp);
18c9b560
AZ
2280 gen_op_iwmmxt_movq_wRn_M0(wrd);
2281 gen_op_iwmmxt_set_mup();
2282 gen_op_iwmmxt_set_cup();
2283 break;
2284 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2285 case 0x418: case 0x518: case 0x618: case 0x718:
2286 case 0x818: case 0x918: case 0xa18: case 0xb18:
2287 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2288 wrd = (insn >> 12) & 0xf;
2289 rd0 = (insn >> 16) & 0xf;
2290 rd1 = (insn >> 0) & 0xf;
2291 gen_op_iwmmxt_movq_M0_wRn(rd0);
2292 switch ((insn >> 20) & 0xf) {
2293 case 0x0:
2294 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2295 break;
2296 case 0x1:
2297 gen_op_iwmmxt_addub_M0_wRn(rd1);
2298 break;
2299 case 0x3:
2300 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2301 break;
2302 case 0x4:
2303 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2304 break;
2305 case 0x5:
2306 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2307 break;
2308 case 0x7:
2309 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2310 break;
2311 case 0x8:
2312 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2313 break;
2314 case 0x9:
2315 gen_op_iwmmxt_addul_M0_wRn(rd1);
2316 break;
2317 case 0xb:
2318 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2319 break;
2320 default:
2321 return 1;
2322 }
2323 gen_op_iwmmxt_movq_wRn_M0(wrd);
2324 gen_op_iwmmxt_set_mup();
2325 gen_op_iwmmxt_set_cup();
2326 break;
2327 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2328 case 0x408: case 0x508: case 0x608: case 0x708:
2329 case 0x808: case 0x908: case 0xa08: case 0xb08:
2330 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2331 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2332 return 1;
18c9b560
AZ
2333 wrd = (insn >> 12) & 0xf;
2334 rd0 = (insn >> 16) & 0xf;
2335 rd1 = (insn >> 0) & 0xf;
2336 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2337 switch ((insn >> 22) & 3) {
18c9b560
AZ
2338 case 1:
2339 if (insn & (1 << 21))
2340 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2341 else
2342 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2343 break;
2344 case 2:
2345 if (insn & (1 << 21))
2346 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2347 else
2348 gen_op_iwmmxt_packul_M0_wRn(rd1);
2349 break;
2350 case 3:
2351 if (insn & (1 << 21))
2352 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2353 else
2354 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2355 break;
2356 }
2357 gen_op_iwmmxt_movq_wRn_M0(wrd);
2358 gen_op_iwmmxt_set_mup();
2359 gen_op_iwmmxt_set_cup();
2360 break;
2361 case 0x201: case 0x203: case 0x205: case 0x207:
2362 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2363 case 0x211: case 0x213: case 0x215: case 0x217:
2364 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2365 wrd = (insn >> 5) & 0xf;
2366 rd0 = (insn >> 12) & 0xf;
2367 rd1 = (insn >> 0) & 0xf;
2368 if (rd0 == 0xf || rd1 == 0xf)
2369 return 1;
2370 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2371 tmp = load_reg(s, rd0);
2372 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2373 switch ((insn >> 16) & 0xf) {
2374 case 0x0: /* TMIA */
da6b5335 2375 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2376 break;
2377 case 0x8: /* TMIAPH */
da6b5335 2378 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2379 break;
2380 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2381 if (insn & (1 << 16))
da6b5335 2382 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2383 if (insn & (1 << 17))
da6b5335
FN
2384 tcg_gen_shri_i32(tmp2, tmp2, 16);
2385 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2386 break;
2387 default:
7d1b0095
PM
2388 tcg_temp_free_i32(tmp2);
2389 tcg_temp_free_i32(tmp);
18c9b560
AZ
2390 return 1;
2391 }
7d1b0095
PM
2392 tcg_temp_free_i32(tmp2);
2393 tcg_temp_free_i32(tmp);
18c9b560
AZ
2394 gen_op_iwmmxt_movq_wRn_M0(wrd);
2395 gen_op_iwmmxt_set_mup();
2396 break;
2397 default:
2398 return 1;
2399 }
2400
2401 return 0;
2402}
2403
a1c7273b 2404/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2405 (ie. an undefined instruction). */
0ecb72a5 2406static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
2407{
2408 int acc, rd0, rd1, rdhi, rdlo;
3a554c0f 2409 TCGv tmp, tmp2;
18c9b560
AZ
2410
2411 if ((insn & 0x0ff00f10) == 0x0e200010) {
2412 /* Multiply with Internal Accumulate Format */
2413 rd0 = (insn >> 12) & 0xf;
2414 rd1 = insn & 0xf;
2415 acc = (insn >> 5) & 7;
2416
2417 if (acc != 0)
2418 return 1;
2419
3a554c0f
FN
2420 tmp = load_reg(s, rd0);
2421 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2422 switch ((insn >> 16) & 0xf) {
2423 case 0x0: /* MIA */
3a554c0f 2424 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2425 break;
2426 case 0x8: /* MIAPH */
3a554c0f 2427 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2428 break;
2429 case 0xc: /* MIABB */
2430 case 0xd: /* MIABT */
2431 case 0xe: /* MIATB */
2432 case 0xf: /* MIATT */
18c9b560 2433 if (insn & (1 << 16))
3a554c0f 2434 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2435 if (insn & (1 << 17))
3a554c0f
FN
2436 tcg_gen_shri_i32(tmp2, tmp2, 16);
2437 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2438 break;
2439 default:
2440 return 1;
2441 }
7d1b0095
PM
2442 tcg_temp_free_i32(tmp2);
2443 tcg_temp_free_i32(tmp);
18c9b560
AZ
2444
2445 gen_op_iwmmxt_movq_wRn_M0(acc);
2446 return 0;
2447 }
2448
2449 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2450 /* Internal Accumulator Access Format */
2451 rdhi = (insn >> 16) & 0xf;
2452 rdlo = (insn >> 12) & 0xf;
2453 acc = insn & 7;
2454
2455 if (acc != 0)
2456 return 1;
2457
2458 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2459 iwmmxt_load_reg(cpu_V0, acc);
2460 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2461 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2462 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2463 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2464 } else { /* MAR */
3a554c0f
FN
2465 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2466 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2467 }
2468 return 0;
2469 }
2470
2471 return 1;
2472}
2473
9ee6e8bb
PB
2474#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2475#define VFP_SREG(insn, bigbit, smallbit) \
2476 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2477#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2478 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2479 reg = (((insn) >> (bigbit)) & 0x0f) \
2480 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2481 } else { \
2482 if (insn & (1 << (smallbit))) \
2483 return 1; \
2484 reg = ((insn) >> (bigbit)) & 0x0f; \
2485 }} while (0)
2486
2487#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2488#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2489#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2490#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2491#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2492#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2493
4373f3ce
PB
2494/* Move between integer and VFP cores. */
2495static TCGv gen_vfp_mrs(void)
2496{
7d1b0095 2497 TCGv tmp = tcg_temp_new_i32();
4373f3ce
PB
2498 tcg_gen_mov_i32(tmp, cpu_F0s);
2499 return tmp;
2500}
2501
2502static void gen_vfp_msr(TCGv tmp)
2503{
2504 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2505 tcg_temp_free_i32(tmp);
4373f3ce
PB
2506}
2507
ad69471c
PB
2508static void gen_neon_dup_u8(TCGv var, int shift)
2509{
7d1b0095 2510 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2511 if (shift)
2512 tcg_gen_shri_i32(var, var, shift);
86831435 2513 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2514 tcg_gen_shli_i32(tmp, var, 8);
2515 tcg_gen_or_i32(var, var, tmp);
2516 tcg_gen_shli_i32(tmp, var, 16);
2517 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2518 tcg_temp_free_i32(tmp);
ad69471c
PB
2519}
2520
2521static void gen_neon_dup_low16(TCGv var)
2522{
7d1b0095 2523 TCGv tmp = tcg_temp_new_i32();
86831435 2524 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2525 tcg_gen_shli_i32(tmp, var, 16);
2526 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2527 tcg_temp_free_i32(tmp);
ad69471c
PB
2528}
2529
2530static void gen_neon_dup_high16(TCGv var)
2531{
7d1b0095 2532 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2533 tcg_gen_andi_i32(var, var, 0xffff0000);
2534 tcg_gen_shri_i32(tmp, var, 16);
2535 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2536 tcg_temp_free_i32(tmp);
ad69471c
PB
2537}
2538
8e18cde3
PM
2539static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size)
2540{
2541 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2542 TCGv tmp;
2543 switch (size) {
2544 case 0:
2545 tmp = gen_ld8u(addr, IS_USER(s));
2546 gen_neon_dup_u8(tmp, 0);
2547 break;
2548 case 1:
2549 tmp = gen_ld16u(addr, IS_USER(s));
2550 gen_neon_dup_low16(tmp);
2551 break;
2552 case 2:
2553 tmp = gen_ld32(addr, IS_USER(s));
2554 break;
2555 default: /* Avoid compiler warnings. */
2556 abort();
2557 }
2558 return tmp;
2559}
2560
a1c7273b 2561/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 2562 (ie. an undefined instruction). */
0ecb72a5 2563static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
b7bcbe95
FB
2564{
2565 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2566 int dp, veclen;
312eea9f 2567 TCGv addr;
4373f3ce 2568 TCGv tmp;
ad69471c 2569 TCGv tmp2;
b7bcbe95 2570
40f137e1
PB
2571 if (!arm_feature(env, ARM_FEATURE_VFP))
2572 return 1;
2573
5df8bac1 2574 if (!s->vfp_enabled) {
9ee6e8bb 2575 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2576 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2577 return 1;
2578 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2579 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2580 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2581 return 1;
2582 }
b7bcbe95
FB
2583 dp = ((insn & 0xf00) == 0xb00);
2584 switch ((insn >> 24) & 0xf) {
2585 case 0xe:
2586 if (insn & (1 << 4)) {
2587 /* single register transfer */
b7bcbe95
FB
2588 rd = (insn >> 12) & 0xf;
2589 if (dp) {
9ee6e8bb
PB
2590 int size;
2591 int pass;
2592
2593 VFP_DREG_N(rn, insn);
2594 if (insn & 0xf)
b7bcbe95 2595 return 1;
9ee6e8bb
PB
2596 if (insn & 0x00c00060
2597 && !arm_feature(env, ARM_FEATURE_NEON))
2598 return 1;
2599
2600 pass = (insn >> 21) & 1;
2601 if (insn & (1 << 22)) {
2602 size = 0;
2603 offset = ((insn >> 5) & 3) * 8;
2604 } else if (insn & (1 << 5)) {
2605 size = 1;
2606 offset = (insn & (1 << 6)) ? 16 : 0;
2607 } else {
2608 size = 2;
2609 offset = 0;
2610 }
18c9b560 2611 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2612 /* vfp->arm */
ad69471c 2613 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2614 switch (size) {
2615 case 0:
9ee6e8bb 2616 if (offset)
ad69471c 2617 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2618 if (insn & (1 << 23))
ad69471c 2619 gen_uxtb(tmp);
9ee6e8bb 2620 else
ad69471c 2621 gen_sxtb(tmp);
9ee6e8bb
PB
2622 break;
2623 case 1:
9ee6e8bb
PB
2624 if (insn & (1 << 23)) {
2625 if (offset) {
ad69471c 2626 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2627 } else {
ad69471c 2628 gen_uxth(tmp);
9ee6e8bb
PB
2629 }
2630 } else {
2631 if (offset) {
ad69471c 2632 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2633 } else {
ad69471c 2634 gen_sxth(tmp);
9ee6e8bb
PB
2635 }
2636 }
2637 break;
2638 case 2:
9ee6e8bb
PB
2639 break;
2640 }
ad69471c 2641 store_reg(s, rd, tmp);
b7bcbe95
FB
2642 } else {
2643 /* arm->vfp */
ad69471c 2644 tmp = load_reg(s, rd);
9ee6e8bb
PB
2645 if (insn & (1 << 23)) {
2646 /* VDUP */
2647 if (size == 0) {
ad69471c 2648 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2649 } else if (size == 1) {
ad69471c 2650 gen_neon_dup_low16(tmp);
9ee6e8bb 2651 }
cbbccffc 2652 for (n = 0; n <= pass * 2; n++) {
7d1b0095 2653 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
2654 tcg_gen_mov_i32(tmp2, tmp);
2655 neon_store_reg(rn, n, tmp2);
2656 }
2657 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2658 } else {
2659 /* VMOV */
2660 switch (size) {
2661 case 0:
ad69471c
PB
2662 tmp2 = neon_load_reg(rn, pass);
2663 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
7d1b0095 2664 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2665 break;
2666 case 1:
ad69471c
PB
2667 tmp2 = neon_load_reg(rn, pass);
2668 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
7d1b0095 2669 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2670 break;
2671 case 2:
9ee6e8bb
PB
2672 break;
2673 }
ad69471c 2674 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2675 }
b7bcbe95 2676 }
9ee6e8bb
PB
2677 } else { /* !dp */
2678 if ((insn & 0x6f) != 0x00)
2679 return 1;
2680 rn = VFP_SREG_N(insn);
18c9b560 2681 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2682 /* vfp->arm */
2683 if (insn & (1 << 21)) {
2684 /* system register */
40f137e1 2685 rn >>= 1;
9ee6e8bb 2686
b7bcbe95 2687 switch (rn) {
40f137e1 2688 case ARM_VFP_FPSID:
4373f3ce 2689 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2690 VFP3 restricts all id registers to privileged
2691 accesses. */
2692 if (IS_USER(s)
2693 && arm_feature(env, ARM_FEATURE_VFP3))
2694 return 1;
4373f3ce 2695 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2696 break;
40f137e1 2697 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2698 if (IS_USER(s))
2699 return 1;
4373f3ce 2700 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2701 break;
40f137e1
PB
2702 case ARM_VFP_FPINST:
2703 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2704 /* Not present in VFP3. */
2705 if (IS_USER(s)
2706 || arm_feature(env, ARM_FEATURE_VFP3))
2707 return 1;
4373f3ce 2708 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2709 break;
40f137e1 2710 case ARM_VFP_FPSCR:
601d70b9 2711 if (rd == 15) {
4373f3ce
PB
2712 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2713 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2714 } else {
7d1b0095 2715 tmp = tcg_temp_new_i32();
4373f3ce
PB
2716 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2717 }
b7bcbe95 2718 break;
9ee6e8bb
PB
2719 case ARM_VFP_MVFR0:
2720 case ARM_VFP_MVFR1:
2721 if (IS_USER(s)
06ed5d66 2722 || !arm_feature(env, ARM_FEATURE_MVFR))
9ee6e8bb 2723 return 1;
4373f3ce 2724 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2725 break;
b7bcbe95
FB
2726 default:
2727 return 1;
2728 }
2729 } else {
2730 gen_mov_F0_vreg(0, rn);
4373f3ce 2731 tmp = gen_vfp_mrs();
b7bcbe95
FB
2732 }
2733 if (rd == 15) {
b5ff1b31 2734 /* Set the 4 flag bits in the CPSR. */
4373f3ce 2735 gen_set_nzcv(tmp);
7d1b0095 2736 tcg_temp_free_i32(tmp);
4373f3ce
PB
2737 } else {
2738 store_reg(s, rd, tmp);
2739 }
b7bcbe95
FB
2740 } else {
2741 /* arm->vfp */
4373f3ce 2742 tmp = load_reg(s, rd);
b7bcbe95 2743 if (insn & (1 << 21)) {
40f137e1 2744 rn >>= 1;
b7bcbe95
FB
2745 /* system register */
2746 switch (rn) {
40f137e1 2747 case ARM_VFP_FPSID:
9ee6e8bb
PB
2748 case ARM_VFP_MVFR0:
2749 case ARM_VFP_MVFR1:
b7bcbe95
FB
2750 /* Writes are ignored. */
2751 break;
40f137e1 2752 case ARM_VFP_FPSCR:
4373f3ce 2753 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 2754 tcg_temp_free_i32(tmp);
b5ff1b31 2755 gen_lookup_tb(s);
b7bcbe95 2756 break;
40f137e1 2757 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2758 if (IS_USER(s))
2759 return 1;
71b3c3de
JR
2760 /* TODO: VFP subarchitecture support.
2761 * For now, keep the EN bit only */
2762 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 2763 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2764 gen_lookup_tb(s);
2765 break;
2766 case ARM_VFP_FPINST:
2767 case ARM_VFP_FPINST2:
4373f3ce 2768 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2769 break;
b7bcbe95
FB
2770 default:
2771 return 1;
2772 }
2773 } else {
4373f3ce 2774 gen_vfp_msr(tmp);
b7bcbe95
FB
2775 gen_mov_vreg_F0(0, rn);
2776 }
2777 }
2778 }
2779 } else {
2780 /* data processing */
2781 /* The opcode is in bits 23, 21, 20 and 6. */
2782 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2783 if (dp) {
2784 if (op == 15) {
2785 /* rn is opcode */
2786 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2787 } else {
2788 /* rn is register number */
9ee6e8bb 2789 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2790 }
2791
04595bf6 2792 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
b7bcbe95 2793 /* Integer or single precision destination. */
9ee6e8bb 2794 rd = VFP_SREG_D(insn);
b7bcbe95 2795 } else {
9ee6e8bb 2796 VFP_DREG_D(rd, insn);
b7bcbe95 2797 }
04595bf6
PM
2798 if (op == 15 &&
2799 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2800 /* VCVT from int is always from S reg regardless of dp bit.
2801 * VCVT with immediate frac_bits has same format as SREG_M
2802 */
2803 rm = VFP_SREG_M(insn);
b7bcbe95 2804 } else {
9ee6e8bb 2805 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2806 }
2807 } else {
9ee6e8bb 2808 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2809 if (op == 15 && rn == 15) {
2810 /* Double precision destination. */
9ee6e8bb
PB
2811 VFP_DREG_D(rd, insn);
2812 } else {
2813 rd = VFP_SREG_D(insn);
2814 }
04595bf6
PM
2815 /* NB that we implicitly rely on the encoding for the frac_bits
2816 * in VCVT of fixed to float being the same as that of an SREG_M
2817 */
9ee6e8bb 2818 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2819 }
2820
69d1fc22 2821 veclen = s->vec_len;
b7bcbe95
FB
2822 if (op == 15 && rn > 3)
2823 veclen = 0;
2824
2825 /* Shut up compiler warnings. */
2826 delta_m = 0;
2827 delta_d = 0;
2828 bank_mask = 0;
3b46e624 2829
b7bcbe95
FB
2830 if (veclen > 0) {
2831 if (dp)
2832 bank_mask = 0xc;
2833 else
2834 bank_mask = 0x18;
2835
2836 /* Figure out what type of vector operation this is. */
2837 if ((rd & bank_mask) == 0) {
2838 /* scalar */
2839 veclen = 0;
2840 } else {
2841 if (dp)
69d1fc22 2842 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 2843 else
69d1fc22 2844 delta_d = s->vec_stride + 1;
b7bcbe95
FB
2845
2846 if ((rm & bank_mask) == 0) {
2847 /* mixed scalar/vector */
2848 delta_m = 0;
2849 } else {
2850 /* vector */
2851 delta_m = delta_d;
2852 }
2853 }
2854 }
2855
2856 /* Load the initial operands. */
2857 if (op == 15) {
2858 switch (rn) {
2859 case 16:
2860 case 17:
2861 /* Integer source */
2862 gen_mov_F0_vreg(0, rm);
2863 break;
2864 case 8:
2865 case 9:
2866 /* Compare */
2867 gen_mov_F0_vreg(dp, rd);
2868 gen_mov_F1_vreg(dp, rm);
2869 break;
2870 case 10:
2871 case 11:
2872 /* Compare with zero */
2873 gen_mov_F0_vreg(dp, rd);
2874 gen_vfp_F1_ld0(dp);
2875 break;
9ee6e8bb
PB
2876 case 20:
2877 case 21:
2878 case 22:
2879 case 23:
644ad806
PB
2880 case 28:
2881 case 29:
2882 case 30:
2883 case 31:
9ee6e8bb
PB
2884 /* Source and destination the same. */
2885 gen_mov_F0_vreg(dp, rd);
2886 break;
6e0c0ed1
PM
2887 case 4:
2888 case 5:
2889 case 6:
2890 case 7:
2891 /* VCVTB, VCVTT: only present with the halfprec extension,
2892 * UNPREDICTABLE if bit 8 is set (we choose to UNDEF)
2893 */
2894 if (dp || !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
2895 return 1;
2896 }
2897 /* Otherwise fall through */
b7bcbe95
FB
2898 default:
2899 /* One source operand. */
2900 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 2901 break;
b7bcbe95
FB
2902 }
2903 } else {
2904 /* Two source operands. */
2905 gen_mov_F0_vreg(dp, rn);
2906 gen_mov_F1_vreg(dp, rm);
2907 }
2908
2909 for (;;) {
2910 /* Perform the calculation. */
2911 switch (op) {
605a6aed
PM
2912 case 0: /* VMLA: fd + (fn * fm) */
2913 /* Note that order of inputs to the add matters for NaNs */
2914 gen_vfp_F1_mul(dp);
2915 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
2916 gen_vfp_add(dp);
2917 break;
605a6aed 2918 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 2919 gen_vfp_mul(dp);
605a6aed
PM
2920 gen_vfp_F1_neg(dp);
2921 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
2922 gen_vfp_add(dp);
2923 break;
605a6aed
PM
2924 case 2: /* VNMLS: -fd + (fn * fm) */
2925 /* Note that it isn't valid to replace (-A + B) with (B - A)
2926 * or similar plausible looking simplifications
2927 * because this will give wrong results for NaNs.
2928 */
2929 gen_vfp_F1_mul(dp);
2930 gen_mov_F0_vreg(dp, rd);
2931 gen_vfp_neg(dp);
2932 gen_vfp_add(dp);
b7bcbe95 2933 break;
605a6aed 2934 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 2935 gen_vfp_mul(dp);
605a6aed
PM
2936 gen_vfp_F1_neg(dp);
2937 gen_mov_F0_vreg(dp, rd);
b7bcbe95 2938 gen_vfp_neg(dp);
605a6aed 2939 gen_vfp_add(dp);
b7bcbe95
FB
2940 break;
2941 case 4: /* mul: fn * fm */
2942 gen_vfp_mul(dp);
2943 break;
2944 case 5: /* nmul: -(fn * fm) */
2945 gen_vfp_mul(dp);
2946 gen_vfp_neg(dp);
2947 break;
2948 case 6: /* add: fn + fm */
2949 gen_vfp_add(dp);
2950 break;
2951 case 7: /* sub: fn - fm */
2952 gen_vfp_sub(dp);
2953 break;
2954 case 8: /* div: fn / fm */
2955 gen_vfp_div(dp);
2956 break;
da97f52c
PM
2957 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
2958 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
2959 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
2960 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
2961 /* These are fused multiply-add, and must be done as one
2962 * floating point operation with no rounding between the
2963 * multiplication and addition steps.
2964 * NB that doing the negations here as separate steps is
2965 * correct : an input NaN should come out with its sign bit
2966 * flipped if it is a negated-input.
2967 */
2968 if (!arm_feature(env, ARM_FEATURE_VFP4)) {
2969 return 1;
2970 }
2971 if (dp) {
2972 TCGv_ptr fpst;
2973 TCGv_i64 frd;
2974 if (op & 1) {
2975 /* VFNMS, VFMS */
2976 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
2977 }
2978 frd = tcg_temp_new_i64();
2979 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
2980 if (op & 2) {
2981 /* VFNMA, VFNMS */
2982 gen_helper_vfp_negd(frd, frd);
2983 }
2984 fpst = get_fpstatus_ptr(0);
2985 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
2986 cpu_F1d, frd, fpst);
2987 tcg_temp_free_ptr(fpst);
2988 tcg_temp_free_i64(frd);
2989 } else {
2990 TCGv_ptr fpst;
2991 TCGv_i32 frd;
2992 if (op & 1) {
2993 /* VFNMS, VFMS */
2994 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
2995 }
2996 frd = tcg_temp_new_i32();
2997 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
2998 if (op & 2) {
2999 gen_helper_vfp_negs(frd, frd);
3000 }
3001 fpst = get_fpstatus_ptr(0);
3002 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3003 cpu_F1s, frd, fpst);
3004 tcg_temp_free_ptr(fpst);
3005 tcg_temp_free_i32(frd);
3006 }
3007 break;
9ee6e8bb
PB
3008 case 14: /* fconst */
3009 if (!arm_feature(env, ARM_FEATURE_VFP3))
3010 return 1;
3011
3012 n = (insn << 12) & 0x80000000;
3013 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3014 if (dp) {
3015 if (i & 0x40)
3016 i |= 0x3f80;
3017 else
3018 i |= 0x4000;
3019 n |= i << 16;
4373f3ce 3020 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3021 } else {
3022 if (i & 0x40)
3023 i |= 0x780;
3024 else
3025 i |= 0x800;
3026 n |= i << 19;
5b340b51 3027 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3028 }
9ee6e8bb 3029 break;
b7bcbe95
FB
3030 case 15: /* extension space */
3031 switch (rn) {
3032 case 0: /* cpy */
3033 /* no-op */
3034 break;
3035 case 1: /* abs */
3036 gen_vfp_abs(dp);
3037 break;
3038 case 2: /* neg */
3039 gen_vfp_neg(dp);
3040 break;
3041 case 3: /* sqrt */
3042 gen_vfp_sqrt(dp);
3043 break;
60011498 3044 case 4: /* vcvtb.f32.f16 */
60011498
PB
3045 tmp = gen_vfp_mrs();
3046 tcg_gen_ext16u_i32(tmp, tmp);
3047 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3048 tcg_temp_free_i32(tmp);
60011498
PB
3049 break;
3050 case 5: /* vcvtt.f32.f16 */
60011498
PB
3051 tmp = gen_vfp_mrs();
3052 tcg_gen_shri_i32(tmp, tmp, 16);
3053 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3054 tcg_temp_free_i32(tmp);
60011498
PB
3055 break;
3056 case 6: /* vcvtb.f16.f32 */
7d1b0095 3057 tmp = tcg_temp_new_i32();
60011498
PB
3058 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3059 gen_mov_F0_vreg(0, rd);
3060 tmp2 = gen_vfp_mrs();
3061 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3062 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3063 tcg_temp_free_i32(tmp2);
60011498
PB
3064 gen_vfp_msr(tmp);
3065 break;
3066 case 7: /* vcvtt.f16.f32 */
7d1b0095 3067 tmp = tcg_temp_new_i32();
60011498
PB
3068 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3069 tcg_gen_shli_i32(tmp, tmp, 16);
3070 gen_mov_F0_vreg(0, rd);
3071 tmp2 = gen_vfp_mrs();
3072 tcg_gen_ext16u_i32(tmp2, tmp2);
3073 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3074 tcg_temp_free_i32(tmp2);
60011498
PB
3075 gen_vfp_msr(tmp);
3076 break;
b7bcbe95
FB
3077 case 8: /* cmp */
3078 gen_vfp_cmp(dp);
3079 break;
3080 case 9: /* cmpe */
3081 gen_vfp_cmpe(dp);
3082 break;
3083 case 10: /* cmpz */
3084 gen_vfp_cmp(dp);
3085 break;
3086 case 11: /* cmpez */
3087 gen_vfp_F1_ld0(dp);
3088 gen_vfp_cmpe(dp);
3089 break;
3090 case 15: /* single<->double conversion */
3091 if (dp)
4373f3ce 3092 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3093 else
4373f3ce 3094 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3095 break;
3096 case 16: /* fuito */
5500b06c 3097 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3098 break;
3099 case 17: /* fsito */
5500b06c 3100 gen_vfp_sito(dp, 0);
b7bcbe95 3101 break;
9ee6e8bb
PB
3102 case 20: /* fshto */
3103 if (!arm_feature(env, ARM_FEATURE_VFP3))
3104 return 1;
5500b06c 3105 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3106 break;
3107 case 21: /* fslto */
3108 if (!arm_feature(env, ARM_FEATURE_VFP3))
3109 return 1;
5500b06c 3110 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3111 break;
3112 case 22: /* fuhto */
3113 if (!arm_feature(env, ARM_FEATURE_VFP3))
3114 return 1;
5500b06c 3115 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3116 break;
3117 case 23: /* fulto */
3118 if (!arm_feature(env, ARM_FEATURE_VFP3))
3119 return 1;
5500b06c 3120 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3121 break;
b7bcbe95 3122 case 24: /* ftoui */
5500b06c 3123 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3124 break;
3125 case 25: /* ftouiz */
5500b06c 3126 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3127 break;
3128 case 26: /* ftosi */
5500b06c 3129 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3130 break;
3131 case 27: /* ftosiz */
5500b06c 3132 gen_vfp_tosiz(dp, 0);
b7bcbe95 3133 break;
9ee6e8bb
PB
3134 case 28: /* ftosh */
3135 if (!arm_feature(env, ARM_FEATURE_VFP3))
3136 return 1;
5500b06c 3137 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3138 break;
3139 case 29: /* ftosl */
3140 if (!arm_feature(env, ARM_FEATURE_VFP3))
3141 return 1;
5500b06c 3142 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3143 break;
3144 case 30: /* ftouh */
3145 if (!arm_feature(env, ARM_FEATURE_VFP3))
3146 return 1;
5500b06c 3147 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3148 break;
3149 case 31: /* ftoul */
3150 if (!arm_feature(env, ARM_FEATURE_VFP3))
3151 return 1;
5500b06c 3152 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3153 break;
b7bcbe95 3154 default: /* undefined */
b7bcbe95
FB
3155 return 1;
3156 }
3157 break;
3158 default: /* undefined */
b7bcbe95
FB
3159 return 1;
3160 }
3161
3162 /* Write back the result. */
3163 if (op == 15 && (rn >= 8 && rn <= 11))
3164 ; /* Comparison, do nothing. */
04595bf6
PM
3165 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3166 /* VCVT double to int: always integer result. */
b7bcbe95
FB
3167 gen_mov_vreg_F0(0, rd);
3168 else if (op == 15 && rn == 15)
3169 /* conversion */
3170 gen_mov_vreg_F0(!dp, rd);
3171 else
3172 gen_mov_vreg_F0(dp, rd);
3173
3174 /* break out of the loop if we have finished */
3175 if (veclen == 0)
3176 break;
3177
3178 if (op == 15 && delta_m == 0) {
3179 /* single source one-many */
3180 while (veclen--) {
3181 rd = ((rd + delta_d) & (bank_mask - 1))
3182 | (rd & bank_mask);
3183 gen_mov_vreg_F0(dp, rd);
3184 }
3185 break;
3186 }
3187 /* Setup the next operands. */
3188 veclen--;
3189 rd = ((rd + delta_d) & (bank_mask - 1))
3190 | (rd & bank_mask);
3191
3192 if (op == 15) {
3193 /* One source operand. */
3194 rm = ((rm + delta_m) & (bank_mask - 1))
3195 | (rm & bank_mask);
3196 gen_mov_F0_vreg(dp, rm);
3197 } else {
3198 /* Two source operands. */
3199 rn = ((rn + delta_d) & (bank_mask - 1))
3200 | (rn & bank_mask);
3201 gen_mov_F0_vreg(dp, rn);
3202 if (delta_m) {
3203 rm = ((rm + delta_m) & (bank_mask - 1))
3204 | (rm & bank_mask);
3205 gen_mov_F1_vreg(dp, rm);
3206 }
3207 }
3208 }
3209 }
3210 break;
3211 case 0xc:
3212 case 0xd:
8387da81 3213 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3214 /* two-register transfer */
3215 rn = (insn >> 16) & 0xf;
3216 rd = (insn >> 12) & 0xf;
3217 if (dp) {
9ee6e8bb
PB
3218 VFP_DREG_M(rm, insn);
3219 } else {
3220 rm = VFP_SREG_M(insn);
3221 }
b7bcbe95 3222
18c9b560 3223 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3224 /* vfp->arm */
3225 if (dp) {
4373f3ce
PB
3226 gen_mov_F0_vreg(0, rm * 2);
3227 tmp = gen_vfp_mrs();
3228 store_reg(s, rd, tmp);
3229 gen_mov_F0_vreg(0, rm * 2 + 1);
3230 tmp = gen_vfp_mrs();
3231 store_reg(s, rn, tmp);
b7bcbe95
FB
3232 } else {
3233 gen_mov_F0_vreg(0, rm);
4373f3ce 3234 tmp = gen_vfp_mrs();
8387da81 3235 store_reg(s, rd, tmp);
b7bcbe95 3236 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3237 tmp = gen_vfp_mrs();
8387da81 3238 store_reg(s, rn, tmp);
b7bcbe95
FB
3239 }
3240 } else {
3241 /* arm->vfp */
3242 if (dp) {
4373f3ce
PB
3243 tmp = load_reg(s, rd);
3244 gen_vfp_msr(tmp);
3245 gen_mov_vreg_F0(0, rm * 2);
3246 tmp = load_reg(s, rn);
3247 gen_vfp_msr(tmp);
3248 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3249 } else {
8387da81 3250 tmp = load_reg(s, rd);
4373f3ce 3251 gen_vfp_msr(tmp);
b7bcbe95 3252 gen_mov_vreg_F0(0, rm);
8387da81 3253 tmp = load_reg(s, rn);
4373f3ce 3254 gen_vfp_msr(tmp);
b7bcbe95
FB
3255 gen_mov_vreg_F0(0, rm + 1);
3256 }
3257 }
3258 } else {
3259 /* Load/store */
3260 rn = (insn >> 16) & 0xf;
3261 if (dp)
9ee6e8bb 3262 VFP_DREG_D(rd, insn);
b7bcbe95 3263 else
9ee6e8bb 3264 rd = VFP_SREG_D(insn);
b7bcbe95
FB
3265 if ((insn & 0x01200000) == 0x01000000) {
3266 /* Single load/store */
3267 offset = (insn & 0xff) << 2;
3268 if ((insn & (1 << 23)) == 0)
3269 offset = -offset;
934814f1
PM
3270 if (s->thumb && rn == 15) {
3271 /* This is actually UNPREDICTABLE */
3272 addr = tcg_temp_new_i32();
3273 tcg_gen_movi_i32(addr, s->pc & ~2);
3274 } else {
3275 addr = load_reg(s, rn);
3276 }
312eea9f 3277 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3278 if (insn & (1 << 20)) {
312eea9f 3279 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3280 gen_mov_vreg_F0(dp, rd);
3281 } else {
3282 gen_mov_F0_vreg(dp, rd);
312eea9f 3283 gen_vfp_st(s, dp, addr);
b7bcbe95 3284 }
7d1b0095 3285 tcg_temp_free_i32(addr);
b7bcbe95
FB
3286 } else {
3287 /* load/store multiple */
934814f1 3288 int w = insn & (1 << 21);
b7bcbe95
FB
3289 if (dp)
3290 n = (insn >> 1) & 0x7f;
3291 else
3292 n = insn & 0xff;
3293
934814f1
PM
3294 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3295 /* P == U , W == 1 => UNDEF */
3296 return 1;
3297 }
3298 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3299 /* UNPREDICTABLE cases for bad immediates: we choose to
3300 * UNDEF to avoid generating huge numbers of TCG ops
3301 */
3302 return 1;
3303 }
3304 if (rn == 15 && w) {
3305 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3306 return 1;
3307 }
3308
3309 if (s->thumb && rn == 15) {
3310 /* This is actually UNPREDICTABLE */
3311 addr = tcg_temp_new_i32();
3312 tcg_gen_movi_i32(addr, s->pc & ~2);
3313 } else {
3314 addr = load_reg(s, rn);
3315 }
b7bcbe95 3316 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3317 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3318
3319 if (dp)
3320 offset = 8;
3321 else
3322 offset = 4;
3323 for (i = 0; i < n; i++) {
18c9b560 3324 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3325 /* load */
312eea9f 3326 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3327 gen_mov_vreg_F0(dp, rd + i);
3328 } else {
3329 /* store */
3330 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3331 gen_vfp_st(s, dp, addr);
b7bcbe95 3332 }
312eea9f 3333 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3334 }
934814f1 3335 if (w) {
b7bcbe95
FB
3336 /* writeback */
3337 if (insn & (1 << 24))
3338 offset = -offset * n;
3339 else if (dp && (insn & 1))
3340 offset = 4;
3341 else
3342 offset = 0;
3343
3344 if (offset != 0)
312eea9f
FN
3345 tcg_gen_addi_i32(addr, addr, offset);
3346 store_reg(s, rn, addr);
3347 } else {
7d1b0095 3348 tcg_temp_free_i32(addr);
b7bcbe95
FB
3349 }
3350 }
3351 }
3352 break;
3353 default:
3354 /* Should never happen. */
3355 return 1;
3356 }
3357 return 0;
3358}
3359
6e256c93 3360static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3361{
6e256c93
FB
3362 TranslationBlock *tb;
3363
3364 tb = s->tb;
3365 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3366 tcg_gen_goto_tb(n);
8984bd2e 3367 gen_set_pc_im(dest);
4b4a72e5 3368 tcg_gen_exit_tb((tcg_target_long)tb + n);
6e256c93 3369 } else {
8984bd2e 3370 gen_set_pc_im(dest);
57fec1fe 3371 tcg_gen_exit_tb(0);
6e256c93 3372 }
c53be334
FB
3373}
3374
8aaca4c0
FB
3375static inline void gen_jmp (DisasContext *s, uint32_t dest)
3376{
551bd27f 3377 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3378 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3379 if (s->thumb)
d9ba4830
PB
3380 dest |= 1;
3381 gen_bx_im(s, dest);
8aaca4c0 3382 } else {
6e256c93 3383 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3384 s->is_jmp = DISAS_TB_JUMP;
3385 }
3386}
3387
d9ba4830 3388static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3389{
ee097184 3390 if (x)
d9ba4830 3391 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3392 else
d9ba4830 3393 gen_sxth(t0);
ee097184 3394 if (y)
d9ba4830 3395 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3396 else
d9ba4830
PB
3397 gen_sxth(t1);
3398 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3399}
3400
3401/* Return the mask of PSR bits set by a MSR instruction. */
0ecb72a5 3402static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3403 uint32_t mask;
3404
3405 mask = 0;
3406 if (flags & (1 << 0))
3407 mask |= 0xff;
3408 if (flags & (1 << 1))
3409 mask |= 0xff00;
3410 if (flags & (1 << 2))
3411 mask |= 0xff0000;
3412 if (flags & (1 << 3))
3413 mask |= 0xff000000;
9ee6e8bb 3414
2ae23e75 3415 /* Mask out undefined bits. */
9ee6e8bb 3416 mask &= ~CPSR_RESERVED;
be5e7a76
DES
3417 if (!arm_feature(env, ARM_FEATURE_V4T))
3418 mask &= ~CPSR_T;
3419 if (!arm_feature(env, ARM_FEATURE_V5))
3420 mask &= ~CPSR_Q; /* V5TE in reality*/
9ee6e8bb 3421 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3422 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3423 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3424 mask &= ~CPSR_IT;
9ee6e8bb 3425 /* Mask out execution state bits. */
2ae23e75 3426 if (!spsr)
e160c51c 3427 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3428 /* Mask out privileged bits. */
3429 if (IS_USER(s))
9ee6e8bb 3430 mask &= CPSR_USER;
b5ff1b31
FB
3431 return mask;
3432}
3433
2fbac54b
FN
3434/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3435static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
b5ff1b31 3436{
d9ba4830 3437 TCGv tmp;
b5ff1b31
FB
3438 if (spsr) {
3439 /* ??? This is also undefined in system mode. */
3440 if (IS_USER(s))
3441 return 1;
d9ba4830
PB
3442
3443 tmp = load_cpu_field(spsr);
3444 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3445 tcg_gen_andi_i32(t0, t0, mask);
3446 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3447 store_cpu_field(tmp, spsr);
b5ff1b31 3448 } else {
2fbac54b 3449 gen_set_cpsr(t0, mask);
b5ff1b31 3450 }
7d1b0095 3451 tcg_temp_free_i32(t0);
b5ff1b31
FB
3452 gen_lookup_tb(s);
3453 return 0;
3454}
3455
2fbac54b
FN
3456/* Returns nonzero if access to the PSR is not permitted. */
3457static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3458{
3459 TCGv tmp;
7d1b0095 3460 tmp = tcg_temp_new_i32();
2fbac54b
FN
3461 tcg_gen_movi_i32(tmp, val);
3462 return gen_set_psr(s, mask, spsr, tmp);
3463}
3464
e9bb4aa9
JR
3465/* Generate an old-style exception return. Marks pc as dead. */
3466static void gen_exception_return(DisasContext *s, TCGv pc)
b5ff1b31 3467{
d9ba4830 3468 TCGv tmp;
e9bb4aa9 3469 store_reg(s, 15, pc);
d9ba4830
PB
3470 tmp = load_cpu_field(spsr);
3471 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 3472 tcg_temp_free_i32(tmp);
b5ff1b31
FB
3473 s->is_jmp = DISAS_UPDATE;
3474}
3475
b0109805
PB
3476/* Generate a v6 exception return. Marks both values as dead. */
3477static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3478{
b0109805 3479 gen_set_cpsr(cpsr, 0xffffffff);
7d1b0095 3480 tcg_temp_free_i32(cpsr);
b0109805 3481 store_reg(s, 15, pc);
9ee6e8bb
PB
3482 s->is_jmp = DISAS_UPDATE;
3483}
3b46e624 3484
9ee6e8bb
PB
3485static inline void
3486gen_set_condexec (DisasContext *s)
3487{
3488 if (s->condexec_mask) {
8f01245e 3489 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
7d1b0095 3490 TCGv tmp = tcg_temp_new_i32();
8f01245e 3491 tcg_gen_movi_i32(tmp, val);
d9ba4830 3492 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3493 }
3494}
3b46e624 3495
bc4a0de0
PM
3496static void gen_exception_insn(DisasContext *s, int offset, int excp)
3497{
3498 gen_set_condexec(s);
3499 gen_set_pc_im(s->pc - offset);
3500 gen_exception(excp);
3501 s->is_jmp = DISAS_JUMP;
3502}
3503
9ee6e8bb
PB
3504static void gen_nop_hint(DisasContext *s, int val)
3505{
3506 switch (val) {
3507 case 3: /* wfi */
8984bd2e 3508 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3509 s->is_jmp = DISAS_WFI;
3510 break;
3511 case 2: /* wfe */
3512 case 4: /* sev */
3513 /* TODO: Implement SEV and WFE. May help SMP performance. */
3514 default: /* nop */
3515 break;
3516 }
3517}
99c475ab 3518
ad69471c 3519#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3520
62698be3 3521static inline void gen_neon_add(int size, TCGv t0, TCGv t1)
9ee6e8bb
PB
3522{
3523 switch (size) {
dd8fbd78
FN
3524 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3525 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3526 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 3527 default: abort();
9ee6e8bb 3528 }
9ee6e8bb
PB
3529}
3530
dd8fbd78 3531static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
ad69471c
PB
3532{
3533 switch (size) {
dd8fbd78
FN
3534 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3535 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3536 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3537 default: return;
3538 }
3539}
3540
3541/* 32-bit pairwise ops end up the same as the elementwise versions. */
3542#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3543#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3544#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3545#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3546
ad69471c
PB
3547#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3548 switch ((size << 1) | u) { \
3549 case 0: \
dd8fbd78 3550 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3551 break; \
3552 case 1: \
dd8fbd78 3553 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3554 break; \
3555 case 2: \
dd8fbd78 3556 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3557 break; \
3558 case 3: \
dd8fbd78 3559 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3560 break; \
3561 case 4: \
dd8fbd78 3562 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3563 break; \
3564 case 5: \
dd8fbd78 3565 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3566 break; \
3567 default: return 1; \
3568 }} while (0)
9ee6e8bb
PB
3569
3570#define GEN_NEON_INTEGER_OP(name) do { \
3571 switch ((size << 1) | u) { \
ad69471c 3572 case 0: \
dd8fbd78 3573 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3574 break; \
3575 case 1: \
dd8fbd78 3576 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3577 break; \
3578 case 2: \
dd8fbd78 3579 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3580 break; \
3581 case 3: \
dd8fbd78 3582 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3583 break; \
3584 case 4: \
dd8fbd78 3585 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3586 break; \
3587 case 5: \
dd8fbd78 3588 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3589 break; \
9ee6e8bb
PB
3590 default: return 1; \
3591 }} while (0)
3592
dd8fbd78 3593static TCGv neon_load_scratch(int scratch)
9ee6e8bb 3594{
7d1b0095 3595 TCGv tmp = tcg_temp_new_i32();
dd8fbd78
FN
3596 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3597 return tmp;
9ee6e8bb
PB
3598}
3599
dd8fbd78 3600static void neon_store_scratch(int scratch, TCGv var)
9ee6e8bb 3601{
dd8fbd78 3602 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 3603 tcg_temp_free_i32(var);
9ee6e8bb
PB
3604}
3605
dd8fbd78 3606static inline TCGv neon_get_scalar(int size, int reg)
9ee6e8bb 3607{
dd8fbd78 3608 TCGv tmp;
9ee6e8bb 3609 if (size == 1) {
0fad6efc
PM
3610 tmp = neon_load_reg(reg & 7, reg >> 4);
3611 if (reg & 8) {
dd8fbd78 3612 gen_neon_dup_high16(tmp);
0fad6efc
PM
3613 } else {
3614 gen_neon_dup_low16(tmp);
dd8fbd78 3615 }
0fad6efc
PM
3616 } else {
3617 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 3618 }
dd8fbd78 3619 return tmp;
9ee6e8bb
PB
3620}
3621
02acedf9 3622static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 3623{
02acedf9 3624 TCGv tmp, tmp2;
600b828c 3625 if (!q && size == 2) {
02acedf9
PM
3626 return 1;
3627 }
3628 tmp = tcg_const_i32(rd);
3629 tmp2 = tcg_const_i32(rm);
3630 if (q) {
3631 switch (size) {
3632 case 0:
02da0b2d 3633 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3634 break;
3635 case 1:
02da0b2d 3636 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3637 break;
3638 case 2:
02da0b2d 3639 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
3640 break;
3641 default:
3642 abort();
3643 }
3644 } else {
3645 switch (size) {
3646 case 0:
02da0b2d 3647 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3648 break;
3649 case 1:
02da0b2d 3650 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3651 break;
3652 default:
3653 abort();
3654 }
3655 }
3656 tcg_temp_free_i32(tmp);
3657 tcg_temp_free_i32(tmp2);
3658 return 0;
19457615
FN
3659}
3660
d68a6f3a 3661static int gen_neon_zip(int rd, int rm, int size, int q)
19457615
FN
3662{
3663 TCGv tmp, tmp2;
600b828c 3664 if (!q && size == 2) {
d68a6f3a
PM
3665 return 1;
3666 }
3667 tmp = tcg_const_i32(rd);
3668 tmp2 = tcg_const_i32(rm);
3669 if (q) {
3670 switch (size) {
3671 case 0:
02da0b2d 3672 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
3673 break;
3674 case 1:
02da0b2d 3675 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
3676 break;
3677 case 2:
02da0b2d 3678 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
3679 break;
3680 default:
3681 abort();
3682 }
3683 } else {
3684 switch (size) {
3685 case 0:
02da0b2d 3686 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
3687 break;
3688 case 1:
02da0b2d 3689 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
3690 break;
3691 default:
3692 abort();
3693 }
3694 }
3695 tcg_temp_free_i32(tmp);
3696 tcg_temp_free_i32(tmp2);
3697 return 0;
19457615
FN
3698}
3699
19457615
FN
3700static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3701{
3702 TCGv rd, tmp;
3703
7d1b0095
PM
3704 rd = tcg_temp_new_i32();
3705 tmp = tcg_temp_new_i32();
19457615
FN
3706
3707 tcg_gen_shli_i32(rd, t0, 8);
3708 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3709 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3710 tcg_gen_or_i32(rd, rd, tmp);
3711
3712 tcg_gen_shri_i32(t1, t1, 8);
3713 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3714 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3715 tcg_gen_or_i32(t1, t1, tmp);
3716 tcg_gen_mov_i32(t0, rd);
3717
7d1b0095
PM
3718 tcg_temp_free_i32(tmp);
3719 tcg_temp_free_i32(rd);
19457615
FN
3720}
3721
3722static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3723{
3724 TCGv rd, tmp;
3725
7d1b0095
PM
3726 rd = tcg_temp_new_i32();
3727 tmp = tcg_temp_new_i32();
19457615
FN
3728
3729 tcg_gen_shli_i32(rd, t0, 16);
3730 tcg_gen_andi_i32(tmp, t1, 0xffff);
3731 tcg_gen_or_i32(rd, rd, tmp);
3732 tcg_gen_shri_i32(t1, t1, 16);
3733 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3734 tcg_gen_or_i32(t1, t1, tmp);
3735 tcg_gen_mov_i32(t0, rd);
3736
7d1b0095
PM
3737 tcg_temp_free_i32(tmp);
3738 tcg_temp_free_i32(rd);
19457615
FN
3739}
3740
3741
9ee6e8bb
PB
3742static struct {
3743 int nregs;
3744 int interleave;
3745 int spacing;
3746} neon_ls_element_type[11] = {
3747 {4, 4, 1},
3748 {4, 4, 2},
3749 {4, 1, 1},
3750 {4, 2, 1},
3751 {3, 3, 1},
3752 {3, 3, 2},
3753 {3, 1, 1},
3754 {1, 1, 1},
3755 {2, 2, 1},
3756 {2, 2, 2},
3757 {2, 1, 1}
3758};
3759
3760/* Translate a NEON load/store element instruction. Return nonzero if the
3761 instruction is invalid. */
0ecb72a5 3762static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
3763{
3764 int rd, rn, rm;
3765 int op;
3766 int nregs;
3767 int interleave;
84496233 3768 int spacing;
9ee6e8bb
PB
3769 int stride;
3770 int size;
3771 int reg;
3772 int pass;
3773 int load;
3774 int shift;
9ee6e8bb 3775 int n;
1b2b1e54 3776 TCGv addr;
b0109805 3777 TCGv tmp;
8f8e3aa4 3778 TCGv tmp2;
84496233 3779 TCGv_i64 tmp64;
9ee6e8bb 3780
5df8bac1 3781 if (!s->vfp_enabled)
9ee6e8bb
PB
3782 return 1;
3783 VFP_DREG_D(rd, insn);
3784 rn = (insn >> 16) & 0xf;
3785 rm = insn & 0xf;
3786 load = (insn & (1 << 21)) != 0;
3787 if ((insn & (1 << 23)) == 0) {
3788 /* Load store all elements. */
3789 op = (insn >> 8) & 0xf;
3790 size = (insn >> 6) & 3;
84496233 3791 if (op > 10)
9ee6e8bb 3792 return 1;
f2dd89d0
PM
3793 /* Catch UNDEF cases for bad values of align field */
3794 switch (op & 0xc) {
3795 case 4:
3796 if (((insn >> 5) & 1) == 1) {
3797 return 1;
3798 }
3799 break;
3800 case 8:
3801 if (((insn >> 4) & 3) == 3) {
3802 return 1;
3803 }
3804 break;
3805 default:
3806 break;
3807 }
9ee6e8bb
PB
3808 nregs = neon_ls_element_type[op].nregs;
3809 interleave = neon_ls_element_type[op].interleave;
84496233
JR
3810 spacing = neon_ls_element_type[op].spacing;
3811 if (size == 3 && (interleave | spacing) != 1)
3812 return 1;
e318a60b 3813 addr = tcg_temp_new_i32();
dcc65026 3814 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3815 stride = (1 << size) * interleave;
3816 for (reg = 0; reg < nregs; reg++) {
3817 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
3818 load_reg_var(s, addr, rn);
3819 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 3820 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
3821 load_reg_var(s, addr, rn);
3822 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 3823 }
84496233
JR
3824 if (size == 3) {
3825 if (load) {
3826 tmp64 = gen_ld64(addr, IS_USER(s));
3827 neon_store_reg64(tmp64, rd);
3828 tcg_temp_free_i64(tmp64);
3829 } else {
3830 tmp64 = tcg_temp_new_i64();
3831 neon_load_reg64(tmp64, rd);
3832 gen_st64(tmp64, addr, IS_USER(s));
3833 }
3834 tcg_gen_addi_i32(addr, addr, stride);
3835 } else {
3836 for (pass = 0; pass < 2; pass++) {
3837 if (size == 2) {
3838 if (load) {
3839 tmp = gen_ld32(addr, IS_USER(s));
3840 neon_store_reg(rd, pass, tmp);
3841 } else {
3842 tmp = neon_load_reg(rd, pass);
3843 gen_st32(tmp, addr, IS_USER(s));
3844 }
1b2b1e54 3845 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
3846 } else if (size == 1) {
3847 if (load) {
3848 tmp = gen_ld16u(addr, IS_USER(s));
3849 tcg_gen_addi_i32(addr, addr, stride);
3850 tmp2 = gen_ld16u(addr, IS_USER(s));
3851 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
3852 tcg_gen_shli_i32(tmp2, tmp2, 16);
3853 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3854 tcg_temp_free_i32(tmp2);
84496233
JR
3855 neon_store_reg(rd, pass, tmp);
3856 } else {
3857 tmp = neon_load_reg(rd, pass);
7d1b0095 3858 tmp2 = tcg_temp_new_i32();
84496233
JR
3859 tcg_gen_shri_i32(tmp2, tmp, 16);
3860 gen_st16(tmp, addr, IS_USER(s));
3861 tcg_gen_addi_i32(addr, addr, stride);
3862 gen_st16(tmp2, addr, IS_USER(s));
1b2b1e54 3863 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3864 }
84496233
JR
3865 } else /* size == 0 */ {
3866 if (load) {
3867 TCGV_UNUSED(tmp2);
3868 for (n = 0; n < 4; n++) {
3869 tmp = gen_ld8u(addr, IS_USER(s));
3870 tcg_gen_addi_i32(addr, addr, stride);
3871 if (n == 0) {
3872 tmp2 = tmp;
3873 } else {
41ba8341
PB
3874 tcg_gen_shli_i32(tmp, tmp, n * 8);
3875 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 3876 tcg_temp_free_i32(tmp);
84496233 3877 }
9ee6e8bb 3878 }
84496233
JR
3879 neon_store_reg(rd, pass, tmp2);
3880 } else {
3881 tmp2 = neon_load_reg(rd, pass);
3882 for (n = 0; n < 4; n++) {
7d1b0095 3883 tmp = tcg_temp_new_i32();
84496233
JR
3884 if (n == 0) {
3885 tcg_gen_mov_i32(tmp, tmp2);
3886 } else {
3887 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3888 }
3889 gen_st8(tmp, addr, IS_USER(s));
3890 tcg_gen_addi_i32(addr, addr, stride);
3891 }
7d1b0095 3892 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3893 }
3894 }
3895 }
3896 }
84496233 3897 rd += spacing;
9ee6e8bb 3898 }
e318a60b 3899 tcg_temp_free_i32(addr);
9ee6e8bb
PB
3900 stride = nregs * 8;
3901 } else {
3902 size = (insn >> 10) & 3;
3903 if (size == 3) {
3904 /* Load single element to all lanes. */
8e18cde3
PM
3905 int a = (insn >> 4) & 1;
3906 if (!load) {
9ee6e8bb 3907 return 1;
8e18cde3 3908 }
9ee6e8bb
PB
3909 size = (insn >> 6) & 3;
3910 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
3911
3912 if (size == 3) {
3913 if (nregs != 4 || a == 0) {
9ee6e8bb 3914 return 1;
99c475ab 3915 }
8e18cde3
PM
3916 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3917 size = 2;
3918 }
3919 if (nregs == 1 && a == 1 && size == 0) {
3920 return 1;
3921 }
3922 if (nregs == 3 && a == 1) {
3923 return 1;
3924 }
e318a60b 3925 addr = tcg_temp_new_i32();
8e18cde3
PM
3926 load_reg_var(s, addr, rn);
3927 if (nregs == 1) {
3928 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
3929 tmp = gen_load_and_replicate(s, addr, size);
3930 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3931 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3932 if (insn & (1 << 5)) {
3933 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
3934 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
3935 }
3936 tcg_temp_free_i32(tmp);
3937 } else {
3938 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
3939 stride = (insn & (1 << 5)) ? 2 : 1;
3940 for (reg = 0; reg < nregs; reg++) {
3941 tmp = gen_load_and_replicate(s, addr, size);
3942 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3943 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3944 tcg_temp_free_i32(tmp);
3945 tcg_gen_addi_i32(addr, addr, 1 << size);
3946 rd += stride;
3947 }
9ee6e8bb 3948 }
e318a60b 3949 tcg_temp_free_i32(addr);
9ee6e8bb
PB
3950 stride = (1 << size) * nregs;
3951 } else {
3952 /* Single element. */
93262b16 3953 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
3954 pass = (insn >> 7) & 1;
3955 switch (size) {
3956 case 0:
3957 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
3958 stride = 1;
3959 break;
3960 case 1:
3961 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
3962 stride = (insn & (1 << 5)) ? 2 : 1;
3963 break;
3964 case 2:
3965 shift = 0;
9ee6e8bb
PB
3966 stride = (insn & (1 << 6)) ? 2 : 1;
3967 break;
3968 default:
3969 abort();
3970 }
3971 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
3972 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
3973 switch (nregs) {
3974 case 1:
3975 if (((idx & (1 << size)) != 0) ||
3976 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
3977 return 1;
3978 }
3979 break;
3980 case 3:
3981 if ((idx & 1) != 0) {
3982 return 1;
3983 }
3984 /* fall through */
3985 case 2:
3986 if (size == 2 && (idx & 2) != 0) {
3987 return 1;
3988 }
3989 break;
3990 case 4:
3991 if ((size == 2) && ((idx & 3) == 3)) {
3992 return 1;
3993 }
3994 break;
3995 default:
3996 abort();
3997 }
3998 if ((rd + stride * (nregs - 1)) > 31) {
3999 /* Attempts to write off the end of the register file
4000 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4001 * the neon_load_reg() would write off the end of the array.
4002 */
4003 return 1;
4004 }
e318a60b 4005 addr = tcg_temp_new_i32();
dcc65026 4006 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4007 for (reg = 0; reg < nregs; reg++) {
4008 if (load) {
9ee6e8bb
PB
4009 switch (size) {
4010 case 0:
1b2b1e54 4011 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb
PB
4012 break;
4013 case 1:
1b2b1e54 4014 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
4015 break;
4016 case 2:
1b2b1e54 4017 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 4018 break;
a50f5b91
PB
4019 default: /* Avoid compiler warnings. */
4020 abort();
9ee6e8bb
PB
4021 }
4022 if (size != 2) {
8f8e3aa4
PB
4023 tmp2 = neon_load_reg(rd, pass);
4024 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
7d1b0095 4025 tcg_temp_free_i32(tmp2);
9ee6e8bb 4026 }
8f8e3aa4 4027 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4028 } else { /* Store */
8f8e3aa4
PB
4029 tmp = neon_load_reg(rd, pass);
4030 if (shift)
4031 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4032 switch (size) {
4033 case 0:
1b2b1e54 4034 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4035 break;
4036 case 1:
1b2b1e54 4037 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4038 break;
4039 case 2:
1b2b1e54 4040 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 4041 break;
99c475ab 4042 }
99c475ab 4043 }
9ee6e8bb 4044 rd += stride;
1b2b1e54 4045 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4046 }
e318a60b 4047 tcg_temp_free_i32(addr);
9ee6e8bb 4048 stride = nregs * (1 << size);
99c475ab 4049 }
9ee6e8bb
PB
4050 }
4051 if (rm != 15) {
b26eefb6
PB
4052 TCGv base;
4053
4054 base = load_reg(s, rn);
9ee6e8bb 4055 if (rm == 13) {
b26eefb6 4056 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4057 } else {
b26eefb6
PB
4058 TCGv index;
4059 index = load_reg(s, rm);
4060 tcg_gen_add_i32(base, base, index);
7d1b0095 4061 tcg_temp_free_i32(index);
9ee6e8bb 4062 }
b26eefb6 4063 store_reg(s, rn, base);
9ee6e8bb
PB
4064 }
4065 return 0;
4066}
3b46e624 4067
8f8e3aa4
PB
4068/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4069static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4070{
4071 tcg_gen_and_i32(t, t, c);
f669df27 4072 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4073 tcg_gen_or_i32(dest, t, f);
4074}
4075
a7812ae4 4076static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4077{
4078 switch (size) {
4079 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4080 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4081 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4082 default: abort();
4083 }
4084}
4085
a7812ae4 4086static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4087{
4088 switch (size) {
02da0b2d
PM
4089 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4090 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4091 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
4092 default: abort();
4093 }
4094}
4095
a7812ae4 4096static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4097{
4098 switch (size) {
02da0b2d
PM
4099 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4100 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4101 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
4102 default: abort();
4103 }
4104}
4105
af1bbf30
JR
4106static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4107{
4108 switch (size) {
02da0b2d
PM
4109 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4110 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4111 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
4112 default: abort();
4113 }
4114}
4115
ad69471c
PB
4116static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4117 int q, int u)
4118{
4119 if (q) {
4120 if (u) {
4121 switch (size) {
4122 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4123 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4124 default: abort();
4125 }
4126 } else {
4127 switch (size) {
4128 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4129 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4130 default: abort();
4131 }
4132 }
4133 } else {
4134 if (u) {
4135 switch (size) {
b408a9b0
CL
4136 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4137 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4138 default: abort();
4139 }
4140 } else {
4141 switch (size) {
4142 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4143 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4144 default: abort();
4145 }
4146 }
4147 }
4148}
4149
a7812ae4 4150static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
4151{
4152 if (u) {
4153 switch (size) {
4154 case 0: gen_helper_neon_widen_u8(dest, src); break;
4155 case 1: gen_helper_neon_widen_u16(dest, src); break;
4156 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4157 default: abort();
4158 }
4159 } else {
4160 switch (size) {
4161 case 0: gen_helper_neon_widen_s8(dest, src); break;
4162 case 1: gen_helper_neon_widen_s16(dest, src); break;
4163 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4164 default: abort();
4165 }
4166 }
7d1b0095 4167 tcg_temp_free_i32(src);
ad69471c
PB
4168}
4169
4170static inline void gen_neon_addl(int size)
4171{
4172 switch (size) {
4173 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4174 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4175 case 2: tcg_gen_add_i64(CPU_V001); break;
4176 default: abort();
4177 }
4178}
4179
4180static inline void gen_neon_subl(int size)
4181{
4182 switch (size) {
4183 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4184 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4185 case 2: tcg_gen_sub_i64(CPU_V001); break;
4186 default: abort();
4187 }
4188}
4189
a7812ae4 4190static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4191{
4192 switch (size) {
4193 case 0: gen_helper_neon_negl_u16(var, var); break;
4194 case 1: gen_helper_neon_negl_u32(var, var); break;
4195 case 2: gen_helper_neon_negl_u64(var, var); break;
4196 default: abort();
4197 }
4198}
4199
a7812ae4 4200static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4201{
4202 switch (size) {
02da0b2d
PM
4203 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4204 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
4205 default: abort();
4206 }
4207}
4208
a7812ae4 4209static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4210{
a7812ae4 4211 TCGv_i64 tmp;
ad69471c
PB
4212
4213 switch ((size << 1) | u) {
4214 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4215 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4216 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4217 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4218 case 4:
4219 tmp = gen_muls_i64_i32(a, b);
4220 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4221 tcg_temp_free_i64(tmp);
ad69471c
PB
4222 break;
4223 case 5:
4224 tmp = gen_mulu_i64_i32(a, b);
4225 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4226 tcg_temp_free_i64(tmp);
ad69471c
PB
4227 break;
4228 default: abort();
4229 }
c6067f04
CL
4230
4231 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4232 Don't forget to clean them now. */
4233 if (size < 2) {
7d1b0095
PM
4234 tcg_temp_free_i32(a);
4235 tcg_temp_free_i32(b);
c6067f04 4236 }
ad69471c
PB
4237}
4238
c33171c7
PM
4239static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4240{
4241 if (op) {
4242 if (u) {
4243 gen_neon_unarrow_sats(size, dest, src);
4244 } else {
4245 gen_neon_narrow(size, dest, src);
4246 }
4247 } else {
4248 if (u) {
4249 gen_neon_narrow_satu(size, dest, src);
4250 } else {
4251 gen_neon_narrow_sats(size, dest, src);
4252 }
4253 }
4254}
4255
62698be3
PM
4256/* Symbolic constants for op fields for Neon 3-register same-length.
4257 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4258 * table A7-9.
4259 */
4260#define NEON_3R_VHADD 0
4261#define NEON_3R_VQADD 1
4262#define NEON_3R_VRHADD 2
4263#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4264#define NEON_3R_VHSUB 4
4265#define NEON_3R_VQSUB 5
4266#define NEON_3R_VCGT 6
4267#define NEON_3R_VCGE 7
4268#define NEON_3R_VSHL 8
4269#define NEON_3R_VQSHL 9
4270#define NEON_3R_VRSHL 10
4271#define NEON_3R_VQRSHL 11
4272#define NEON_3R_VMAX 12
4273#define NEON_3R_VMIN 13
4274#define NEON_3R_VABD 14
4275#define NEON_3R_VABA 15
4276#define NEON_3R_VADD_VSUB 16
4277#define NEON_3R_VTST_VCEQ 17
4278#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4279#define NEON_3R_VMUL 19
4280#define NEON_3R_VPMAX 20
4281#define NEON_3R_VPMIN 21
4282#define NEON_3R_VQDMULH_VQRDMULH 22
4283#define NEON_3R_VPADD 23
da97f52c 4284#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
4285#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4286#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4287#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4288#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4289#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4290#define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4291
4292static const uint8_t neon_3r_sizes[] = {
4293 [NEON_3R_VHADD] = 0x7,
4294 [NEON_3R_VQADD] = 0xf,
4295 [NEON_3R_VRHADD] = 0x7,
4296 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4297 [NEON_3R_VHSUB] = 0x7,
4298 [NEON_3R_VQSUB] = 0xf,
4299 [NEON_3R_VCGT] = 0x7,
4300 [NEON_3R_VCGE] = 0x7,
4301 [NEON_3R_VSHL] = 0xf,
4302 [NEON_3R_VQSHL] = 0xf,
4303 [NEON_3R_VRSHL] = 0xf,
4304 [NEON_3R_VQRSHL] = 0xf,
4305 [NEON_3R_VMAX] = 0x7,
4306 [NEON_3R_VMIN] = 0x7,
4307 [NEON_3R_VABD] = 0x7,
4308 [NEON_3R_VABA] = 0x7,
4309 [NEON_3R_VADD_VSUB] = 0xf,
4310 [NEON_3R_VTST_VCEQ] = 0x7,
4311 [NEON_3R_VML] = 0x7,
4312 [NEON_3R_VMUL] = 0x7,
4313 [NEON_3R_VPMAX] = 0x7,
4314 [NEON_3R_VPMIN] = 0x7,
4315 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4316 [NEON_3R_VPADD] = 0x7,
da97f52c 4317 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4318 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4319 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4320 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4321 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4322 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4323 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4324};
4325
600b828c
PM
4326/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4327 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4328 * table A7-13.
4329 */
4330#define NEON_2RM_VREV64 0
4331#define NEON_2RM_VREV32 1
4332#define NEON_2RM_VREV16 2
4333#define NEON_2RM_VPADDL 4
4334#define NEON_2RM_VPADDL_U 5
4335#define NEON_2RM_VCLS 8
4336#define NEON_2RM_VCLZ 9
4337#define NEON_2RM_VCNT 10
4338#define NEON_2RM_VMVN 11
4339#define NEON_2RM_VPADAL 12
4340#define NEON_2RM_VPADAL_U 13
4341#define NEON_2RM_VQABS 14
4342#define NEON_2RM_VQNEG 15
4343#define NEON_2RM_VCGT0 16
4344#define NEON_2RM_VCGE0 17
4345#define NEON_2RM_VCEQ0 18
4346#define NEON_2RM_VCLE0 19
4347#define NEON_2RM_VCLT0 20
4348#define NEON_2RM_VABS 22
4349#define NEON_2RM_VNEG 23
4350#define NEON_2RM_VCGT0_F 24
4351#define NEON_2RM_VCGE0_F 25
4352#define NEON_2RM_VCEQ0_F 26
4353#define NEON_2RM_VCLE0_F 27
4354#define NEON_2RM_VCLT0_F 28
4355#define NEON_2RM_VABS_F 30
4356#define NEON_2RM_VNEG_F 31
4357#define NEON_2RM_VSWP 32
4358#define NEON_2RM_VTRN 33
4359#define NEON_2RM_VUZP 34
4360#define NEON_2RM_VZIP 35
4361#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4362#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4363#define NEON_2RM_VSHLL 38
4364#define NEON_2RM_VCVT_F16_F32 44
4365#define NEON_2RM_VCVT_F32_F16 46
4366#define NEON_2RM_VRECPE 56
4367#define NEON_2RM_VRSQRTE 57
4368#define NEON_2RM_VRECPE_F 58
4369#define NEON_2RM_VRSQRTE_F 59
4370#define NEON_2RM_VCVT_FS 60
4371#define NEON_2RM_VCVT_FU 61
4372#define NEON_2RM_VCVT_SF 62
4373#define NEON_2RM_VCVT_UF 63
4374
4375static int neon_2rm_is_float_op(int op)
4376{
4377 /* Return true if this neon 2reg-misc op is float-to-float */
4378 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4379 op >= NEON_2RM_VRECPE_F);
4380}
4381
4382/* Each entry in this array has bit n set if the insn allows
4383 * size value n (otherwise it will UNDEF). Since unallocated
4384 * op values will have no bits set they always UNDEF.
4385 */
4386static const uint8_t neon_2rm_sizes[] = {
4387 [NEON_2RM_VREV64] = 0x7,
4388 [NEON_2RM_VREV32] = 0x3,
4389 [NEON_2RM_VREV16] = 0x1,
4390 [NEON_2RM_VPADDL] = 0x7,
4391 [NEON_2RM_VPADDL_U] = 0x7,
4392 [NEON_2RM_VCLS] = 0x7,
4393 [NEON_2RM_VCLZ] = 0x7,
4394 [NEON_2RM_VCNT] = 0x1,
4395 [NEON_2RM_VMVN] = 0x1,
4396 [NEON_2RM_VPADAL] = 0x7,
4397 [NEON_2RM_VPADAL_U] = 0x7,
4398 [NEON_2RM_VQABS] = 0x7,
4399 [NEON_2RM_VQNEG] = 0x7,
4400 [NEON_2RM_VCGT0] = 0x7,
4401 [NEON_2RM_VCGE0] = 0x7,
4402 [NEON_2RM_VCEQ0] = 0x7,
4403 [NEON_2RM_VCLE0] = 0x7,
4404 [NEON_2RM_VCLT0] = 0x7,
4405 [NEON_2RM_VABS] = 0x7,
4406 [NEON_2RM_VNEG] = 0x7,
4407 [NEON_2RM_VCGT0_F] = 0x4,
4408 [NEON_2RM_VCGE0_F] = 0x4,
4409 [NEON_2RM_VCEQ0_F] = 0x4,
4410 [NEON_2RM_VCLE0_F] = 0x4,
4411 [NEON_2RM_VCLT0_F] = 0x4,
4412 [NEON_2RM_VABS_F] = 0x4,
4413 [NEON_2RM_VNEG_F] = 0x4,
4414 [NEON_2RM_VSWP] = 0x1,
4415 [NEON_2RM_VTRN] = 0x7,
4416 [NEON_2RM_VUZP] = 0x7,
4417 [NEON_2RM_VZIP] = 0x7,
4418 [NEON_2RM_VMOVN] = 0x7,
4419 [NEON_2RM_VQMOVN] = 0x7,
4420 [NEON_2RM_VSHLL] = 0x7,
4421 [NEON_2RM_VCVT_F16_F32] = 0x2,
4422 [NEON_2RM_VCVT_F32_F16] = 0x2,
4423 [NEON_2RM_VRECPE] = 0x4,
4424 [NEON_2RM_VRSQRTE] = 0x4,
4425 [NEON_2RM_VRECPE_F] = 0x4,
4426 [NEON_2RM_VRSQRTE_F] = 0x4,
4427 [NEON_2RM_VCVT_FS] = 0x4,
4428 [NEON_2RM_VCVT_FU] = 0x4,
4429 [NEON_2RM_VCVT_SF] = 0x4,
4430 [NEON_2RM_VCVT_UF] = 0x4,
4431};
4432
9ee6e8bb
PB
4433/* Translate a NEON data processing instruction. Return nonzero if the
4434 instruction is invalid.
ad69471c
PB
4435 We process data in a mixture of 32-bit and 64-bit chunks.
4436 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4437
0ecb72a5 4438static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4439{
4440 int op;
4441 int q;
4442 int rd, rn, rm;
4443 int size;
4444 int shift;
4445 int pass;
4446 int count;
4447 int pairwise;
4448 int u;
ca9a32e4 4449 uint32_t imm, mask;
b75263d6 4450 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4451 TCGv_i64 tmp64;
9ee6e8bb 4452
5df8bac1 4453 if (!s->vfp_enabled)
9ee6e8bb
PB
4454 return 1;
4455 q = (insn & (1 << 6)) != 0;
4456 u = (insn >> 24) & 1;
4457 VFP_DREG_D(rd, insn);
4458 VFP_DREG_N(rn, insn);
4459 VFP_DREG_M(rm, insn);
4460 size = (insn >> 20) & 3;
4461 if ((insn & (1 << 23)) == 0) {
4462 /* Three register same length. */
4463 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
4464 /* Catch invalid op and bad size combinations: UNDEF */
4465 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4466 return 1;
4467 }
25f84f79
PM
4468 /* All insns of this form UNDEF for either this condition or the
4469 * superset of cases "Q==1"; we catch the latter later.
4470 */
4471 if (q && ((rd | rn | rm) & 1)) {
4472 return 1;
4473 }
62698be3
PM
4474 if (size == 3 && op != NEON_3R_LOGIC) {
4475 /* 64-bit element instructions. */
9ee6e8bb 4476 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4477 neon_load_reg64(cpu_V0, rn + pass);
4478 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 4479 switch (op) {
62698be3 4480 case NEON_3R_VQADD:
9ee6e8bb 4481 if (u) {
02da0b2d
PM
4482 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4483 cpu_V0, cpu_V1);
2c0262af 4484 } else {
02da0b2d
PM
4485 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4486 cpu_V0, cpu_V1);
2c0262af 4487 }
9ee6e8bb 4488 break;
62698be3 4489 case NEON_3R_VQSUB:
9ee6e8bb 4490 if (u) {
02da0b2d
PM
4491 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4492 cpu_V0, cpu_V1);
ad69471c 4493 } else {
02da0b2d
PM
4494 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4495 cpu_V0, cpu_V1);
ad69471c
PB
4496 }
4497 break;
62698be3 4498 case NEON_3R_VSHL:
ad69471c
PB
4499 if (u) {
4500 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4501 } else {
4502 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4503 }
4504 break;
62698be3 4505 case NEON_3R_VQSHL:
ad69471c 4506 if (u) {
02da0b2d
PM
4507 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4508 cpu_V1, cpu_V0);
ad69471c 4509 } else {
02da0b2d
PM
4510 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4511 cpu_V1, cpu_V0);
ad69471c
PB
4512 }
4513 break;
62698be3 4514 case NEON_3R_VRSHL:
ad69471c
PB
4515 if (u) {
4516 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4517 } else {
ad69471c
PB
4518 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4519 }
4520 break;
62698be3 4521 case NEON_3R_VQRSHL:
ad69471c 4522 if (u) {
02da0b2d
PM
4523 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4524 cpu_V1, cpu_V0);
ad69471c 4525 } else {
02da0b2d
PM
4526 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4527 cpu_V1, cpu_V0);
1e8d4eec 4528 }
9ee6e8bb 4529 break;
62698be3 4530 case NEON_3R_VADD_VSUB:
9ee6e8bb 4531 if (u) {
ad69471c 4532 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4533 } else {
ad69471c 4534 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4535 }
4536 break;
4537 default:
4538 abort();
2c0262af 4539 }
ad69471c 4540 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4541 }
9ee6e8bb 4542 return 0;
2c0262af 4543 }
25f84f79 4544 pairwise = 0;
9ee6e8bb 4545 switch (op) {
62698be3
PM
4546 case NEON_3R_VSHL:
4547 case NEON_3R_VQSHL:
4548 case NEON_3R_VRSHL:
4549 case NEON_3R_VQRSHL:
9ee6e8bb 4550 {
ad69471c
PB
4551 int rtmp;
4552 /* Shift instruction operands are reversed. */
4553 rtmp = rn;
9ee6e8bb 4554 rn = rm;
ad69471c 4555 rm = rtmp;
9ee6e8bb 4556 }
2c0262af 4557 break;
25f84f79
PM
4558 case NEON_3R_VPADD:
4559 if (u) {
4560 return 1;
4561 }
4562 /* Fall through */
62698be3
PM
4563 case NEON_3R_VPMAX:
4564 case NEON_3R_VPMIN:
9ee6e8bb 4565 pairwise = 1;
2c0262af 4566 break;
25f84f79
PM
4567 case NEON_3R_FLOAT_ARITH:
4568 pairwise = (u && size < 2); /* if VPADD (float) */
4569 break;
4570 case NEON_3R_FLOAT_MINMAX:
4571 pairwise = u; /* if VPMIN/VPMAX (float) */
4572 break;
4573 case NEON_3R_FLOAT_CMP:
4574 if (!u && size) {
4575 /* no encoding for U=0 C=1x */
4576 return 1;
4577 }
4578 break;
4579 case NEON_3R_FLOAT_ACMP:
4580 if (!u) {
4581 return 1;
4582 }
4583 break;
4584 case NEON_3R_VRECPS_VRSQRTS:
4585 if (u) {
4586 return 1;
4587 }
2c0262af 4588 break;
25f84f79
PM
4589 case NEON_3R_VMUL:
4590 if (u && (size != 0)) {
4591 /* UNDEF on invalid size for polynomial subcase */
4592 return 1;
4593 }
2c0262af 4594 break;
da97f52c
PM
4595 case NEON_3R_VFM:
4596 if (!arm_feature(env, ARM_FEATURE_VFP4) || u) {
4597 return 1;
4598 }
4599 break;
9ee6e8bb 4600 default:
2c0262af 4601 break;
9ee6e8bb 4602 }
dd8fbd78 4603
25f84f79
PM
4604 if (pairwise && q) {
4605 /* All the pairwise insns UNDEF if Q is set */
4606 return 1;
4607 }
4608
9ee6e8bb
PB
4609 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4610
4611 if (pairwise) {
4612 /* Pairwise. */
a5a14945
JR
4613 if (pass < 1) {
4614 tmp = neon_load_reg(rn, 0);
4615 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 4616 } else {
a5a14945
JR
4617 tmp = neon_load_reg(rm, 0);
4618 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
4619 }
4620 } else {
4621 /* Elementwise. */
dd8fbd78
FN
4622 tmp = neon_load_reg(rn, pass);
4623 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4624 }
4625 switch (op) {
62698be3 4626 case NEON_3R_VHADD:
9ee6e8bb
PB
4627 GEN_NEON_INTEGER_OP(hadd);
4628 break;
62698be3 4629 case NEON_3R_VQADD:
02da0b2d 4630 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4631 break;
62698be3 4632 case NEON_3R_VRHADD:
9ee6e8bb 4633 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4634 break;
62698be3 4635 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
4636 switch ((u << 2) | size) {
4637 case 0: /* VAND */
dd8fbd78 4638 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4639 break;
4640 case 1: /* BIC */
f669df27 4641 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4642 break;
4643 case 2: /* VORR */
dd8fbd78 4644 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4645 break;
4646 case 3: /* VORN */
f669df27 4647 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4648 break;
4649 case 4: /* VEOR */
dd8fbd78 4650 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4651 break;
4652 case 5: /* VBSL */
dd8fbd78
FN
4653 tmp3 = neon_load_reg(rd, pass);
4654 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 4655 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4656 break;
4657 case 6: /* VBIT */
dd8fbd78
FN
4658 tmp3 = neon_load_reg(rd, pass);
4659 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 4660 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4661 break;
4662 case 7: /* VBIF */
dd8fbd78
FN
4663 tmp3 = neon_load_reg(rd, pass);
4664 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 4665 tcg_temp_free_i32(tmp3);
9ee6e8bb 4666 break;
2c0262af
FB
4667 }
4668 break;
62698be3 4669 case NEON_3R_VHSUB:
9ee6e8bb
PB
4670 GEN_NEON_INTEGER_OP(hsub);
4671 break;
62698be3 4672 case NEON_3R_VQSUB:
02da0b2d 4673 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4674 break;
62698be3 4675 case NEON_3R_VCGT:
9ee6e8bb
PB
4676 GEN_NEON_INTEGER_OP(cgt);
4677 break;
62698be3 4678 case NEON_3R_VCGE:
9ee6e8bb
PB
4679 GEN_NEON_INTEGER_OP(cge);
4680 break;
62698be3 4681 case NEON_3R_VSHL:
ad69471c 4682 GEN_NEON_INTEGER_OP(shl);
2c0262af 4683 break;
62698be3 4684 case NEON_3R_VQSHL:
02da0b2d 4685 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4686 break;
62698be3 4687 case NEON_3R_VRSHL:
ad69471c 4688 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4689 break;
62698be3 4690 case NEON_3R_VQRSHL:
02da0b2d 4691 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 4692 break;
62698be3 4693 case NEON_3R_VMAX:
9ee6e8bb
PB
4694 GEN_NEON_INTEGER_OP(max);
4695 break;
62698be3 4696 case NEON_3R_VMIN:
9ee6e8bb
PB
4697 GEN_NEON_INTEGER_OP(min);
4698 break;
62698be3 4699 case NEON_3R_VABD:
9ee6e8bb
PB
4700 GEN_NEON_INTEGER_OP(abd);
4701 break;
62698be3 4702 case NEON_3R_VABA:
9ee6e8bb 4703 GEN_NEON_INTEGER_OP(abd);
7d1b0095 4704 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
4705 tmp2 = neon_load_reg(rd, pass);
4706 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 4707 break;
62698be3 4708 case NEON_3R_VADD_VSUB:
9ee6e8bb 4709 if (!u) { /* VADD */
62698be3 4710 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4711 } else { /* VSUB */
4712 switch (size) {
dd8fbd78
FN
4713 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4714 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4715 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 4716 default: abort();
9ee6e8bb
PB
4717 }
4718 }
4719 break;
62698be3 4720 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
4721 if (!u) { /* VTST */
4722 switch (size) {
dd8fbd78
FN
4723 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4724 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4725 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 4726 default: abort();
9ee6e8bb
PB
4727 }
4728 } else { /* VCEQ */
4729 switch (size) {
dd8fbd78
FN
4730 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4731 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4732 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 4733 default: abort();
9ee6e8bb
PB
4734 }
4735 }
4736 break;
62698be3 4737 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 4738 switch (size) {
dd8fbd78
FN
4739 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4740 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4741 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4742 default: abort();
9ee6e8bb 4743 }
7d1b0095 4744 tcg_temp_free_i32(tmp2);
dd8fbd78 4745 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4746 if (u) { /* VMLS */
dd8fbd78 4747 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4748 } else { /* VMLA */
dd8fbd78 4749 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4750 }
4751 break;
62698be3 4752 case NEON_3R_VMUL:
9ee6e8bb 4753 if (u) { /* polynomial */
dd8fbd78 4754 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4755 } else { /* Integer */
4756 switch (size) {
dd8fbd78
FN
4757 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4758 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4759 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4760 default: abort();
9ee6e8bb
PB
4761 }
4762 }
4763 break;
62698be3 4764 case NEON_3R_VPMAX:
9ee6e8bb
PB
4765 GEN_NEON_INTEGER_OP(pmax);
4766 break;
62698be3 4767 case NEON_3R_VPMIN:
9ee6e8bb
PB
4768 GEN_NEON_INTEGER_OP(pmin);
4769 break;
62698be3 4770 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
4771 if (!u) { /* VQDMULH */
4772 switch (size) {
02da0b2d
PM
4773 case 1:
4774 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
4775 break;
4776 case 2:
4777 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
4778 break;
62698be3 4779 default: abort();
9ee6e8bb 4780 }
62698be3 4781 } else { /* VQRDMULH */
9ee6e8bb 4782 switch (size) {
02da0b2d
PM
4783 case 1:
4784 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
4785 break;
4786 case 2:
4787 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
4788 break;
62698be3 4789 default: abort();
9ee6e8bb
PB
4790 }
4791 }
4792 break;
62698be3 4793 case NEON_3R_VPADD:
9ee6e8bb 4794 switch (size) {
dd8fbd78
FN
4795 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4796 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4797 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 4798 default: abort();
9ee6e8bb
PB
4799 }
4800 break;
62698be3 4801 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
4802 {
4803 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
4804 switch ((u << 2) | size) {
4805 case 0: /* VADD */
aa47cfdd
PM
4806 case 4: /* VPADD */
4807 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4808 break;
4809 case 2: /* VSUB */
aa47cfdd 4810 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4811 break;
4812 case 6: /* VABD */
aa47cfdd 4813 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4814 break;
4815 default:
62698be3 4816 abort();
9ee6e8bb 4817 }
aa47cfdd 4818 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4819 break;
aa47cfdd 4820 }
62698be3 4821 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
4822 {
4823 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4824 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 4825 if (!u) {
7d1b0095 4826 tcg_temp_free_i32(tmp2);
dd8fbd78 4827 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4828 if (size == 0) {
aa47cfdd 4829 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 4830 } else {
aa47cfdd 4831 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
4832 }
4833 }
aa47cfdd 4834 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4835 break;
aa47cfdd 4836 }
62698be3 4837 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
4838 {
4839 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 4840 if (!u) {
aa47cfdd 4841 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 4842 } else {
aa47cfdd
PM
4843 if (size == 0) {
4844 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
4845 } else {
4846 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
4847 }
b5ff1b31 4848 }
aa47cfdd 4849 tcg_temp_free_ptr(fpstatus);
2c0262af 4850 break;
aa47cfdd 4851 }
62698be3 4852 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
4853 {
4854 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4855 if (size == 0) {
4856 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
4857 } else {
4858 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
4859 }
4860 tcg_temp_free_ptr(fpstatus);
2c0262af 4861 break;
aa47cfdd 4862 }
62698be3 4863 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
4864 {
4865 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4866 if (size == 0) {
4867 gen_helper_neon_max_f32(tmp, tmp, tmp2, fpstatus);
4868 } else {
4869 gen_helper_neon_min_f32(tmp, tmp, tmp2, fpstatus);
4870 }
4871 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4872 break;
aa47cfdd 4873 }
62698be3 4874 case NEON_3R_VRECPS_VRSQRTS:
9ee6e8bb 4875 if (size == 0)
dd8fbd78 4876 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 4877 else
dd8fbd78 4878 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 4879 break;
da97f52c
PM
4880 case NEON_3R_VFM:
4881 {
4882 /* VFMA, VFMS: fused multiply-add */
4883 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4884 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
4885 if (size) {
4886 /* VFMS */
4887 gen_helper_vfp_negs(tmp, tmp);
4888 }
4889 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
4890 tcg_temp_free_i32(tmp3);
4891 tcg_temp_free_ptr(fpstatus);
4892 break;
4893 }
9ee6e8bb
PB
4894 default:
4895 abort();
2c0262af 4896 }
7d1b0095 4897 tcg_temp_free_i32(tmp2);
dd8fbd78 4898
9ee6e8bb
PB
4899 /* Save the result. For elementwise operations we can put it
4900 straight into the destination register. For pairwise operations
4901 we have to be careful to avoid clobbering the source operands. */
4902 if (pairwise && rd == rm) {
dd8fbd78 4903 neon_store_scratch(pass, tmp);
9ee6e8bb 4904 } else {
dd8fbd78 4905 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4906 }
4907
4908 } /* for pass */
4909 if (pairwise && rd == rm) {
4910 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
4911 tmp = neon_load_scratch(pass);
4912 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4913 }
4914 }
ad69471c 4915 /* End of 3 register same size operations. */
9ee6e8bb
PB
4916 } else if (insn & (1 << 4)) {
4917 if ((insn & 0x00380080) != 0) {
4918 /* Two registers and shift. */
4919 op = (insn >> 8) & 0xf;
4920 if (insn & (1 << 7)) {
cc13115b
PM
4921 /* 64-bit shift. */
4922 if (op > 7) {
4923 return 1;
4924 }
9ee6e8bb
PB
4925 size = 3;
4926 } else {
4927 size = 2;
4928 while ((insn & (1 << (size + 19))) == 0)
4929 size--;
4930 }
4931 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 4932 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
4933 by immediate using the variable shift operations. */
4934 if (op < 8) {
4935 /* Shift by immediate:
4936 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
4937 if (q && ((rd | rm) & 1)) {
4938 return 1;
4939 }
4940 if (!u && (op == 4 || op == 6)) {
4941 return 1;
4942 }
9ee6e8bb
PB
4943 /* Right shifts are encoded as N - shift, where N is the
4944 element size in bits. */
4945 if (op <= 4)
4946 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4947 if (size == 3) {
4948 count = q + 1;
4949 } else {
4950 count = q ? 4: 2;
4951 }
4952 switch (size) {
4953 case 0:
4954 imm = (uint8_t) shift;
4955 imm |= imm << 8;
4956 imm |= imm << 16;
4957 break;
4958 case 1:
4959 imm = (uint16_t) shift;
4960 imm |= imm << 16;
4961 break;
4962 case 2:
4963 case 3:
4964 imm = shift;
4965 break;
4966 default:
4967 abort();
4968 }
4969
4970 for (pass = 0; pass < count; pass++) {
ad69471c
PB
4971 if (size == 3) {
4972 neon_load_reg64(cpu_V0, rm + pass);
4973 tcg_gen_movi_i64(cpu_V1, imm);
4974 switch (op) {
4975 case 0: /* VSHR */
4976 case 1: /* VSRA */
4977 if (u)
4978 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4979 else
ad69471c 4980 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4981 break;
ad69471c
PB
4982 case 2: /* VRSHR */
4983 case 3: /* VRSRA */
4984 if (u)
4985 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4986 else
ad69471c 4987 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4988 break;
ad69471c 4989 case 4: /* VSRI */
ad69471c
PB
4990 case 5: /* VSHL, VSLI */
4991 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4992 break;
0322b26e 4993 case 6: /* VQSHLU */
02da0b2d
PM
4994 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
4995 cpu_V0, cpu_V1);
ad69471c 4996 break;
0322b26e
PM
4997 case 7: /* VQSHL */
4998 if (u) {
02da0b2d 4999 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
5000 cpu_V0, cpu_V1);
5001 } else {
02da0b2d 5002 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
5003 cpu_V0, cpu_V1);
5004 }
9ee6e8bb 5005 break;
9ee6e8bb 5006 }
ad69471c
PB
5007 if (op == 1 || op == 3) {
5008 /* Accumulate. */
5371cb81 5009 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
5010 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5011 } else if (op == 4 || (op == 5 && u)) {
5012 /* Insert */
923e6509
CL
5013 neon_load_reg64(cpu_V1, rd + pass);
5014 uint64_t mask;
5015 if (shift < -63 || shift > 63) {
5016 mask = 0;
5017 } else {
5018 if (op == 4) {
5019 mask = 0xffffffffffffffffull >> -shift;
5020 } else {
5021 mask = 0xffffffffffffffffull << shift;
5022 }
5023 }
5024 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5025 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5026 }
5027 neon_store_reg64(cpu_V0, rd + pass);
5028 } else { /* size < 3 */
5029 /* Operands in T0 and T1. */
dd8fbd78 5030 tmp = neon_load_reg(rm, pass);
7d1b0095 5031 tmp2 = tcg_temp_new_i32();
dd8fbd78 5032 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
5033 switch (op) {
5034 case 0: /* VSHR */
5035 case 1: /* VSRA */
5036 GEN_NEON_INTEGER_OP(shl);
5037 break;
5038 case 2: /* VRSHR */
5039 case 3: /* VRSRA */
5040 GEN_NEON_INTEGER_OP(rshl);
5041 break;
5042 case 4: /* VSRI */
ad69471c
PB
5043 case 5: /* VSHL, VSLI */
5044 switch (size) {
dd8fbd78
FN
5045 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5046 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5047 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 5048 default: abort();
ad69471c
PB
5049 }
5050 break;
0322b26e 5051 case 6: /* VQSHLU */
ad69471c 5052 switch (size) {
0322b26e 5053 case 0:
02da0b2d
PM
5054 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5055 tmp, tmp2);
0322b26e
PM
5056 break;
5057 case 1:
02da0b2d
PM
5058 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5059 tmp, tmp2);
0322b26e
PM
5060 break;
5061 case 2:
02da0b2d
PM
5062 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5063 tmp, tmp2);
0322b26e
PM
5064 break;
5065 default:
cc13115b 5066 abort();
ad69471c
PB
5067 }
5068 break;
0322b26e 5069 case 7: /* VQSHL */
02da0b2d 5070 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 5071 break;
ad69471c 5072 }
7d1b0095 5073 tcg_temp_free_i32(tmp2);
ad69471c
PB
5074
5075 if (op == 1 || op == 3) {
5076 /* Accumulate. */
dd8fbd78 5077 tmp2 = neon_load_reg(rd, pass);
5371cb81 5078 gen_neon_add(size, tmp, tmp2);
7d1b0095 5079 tcg_temp_free_i32(tmp2);
ad69471c
PB
5080 } else if (op == 4 || (op == 5 && u)) {
5081 /* Insert */
5082 switch (size) {
5083 case 0:
5084 if (op == 4)
ca9a32e4 5085 mask = 0xff >> -shift;
ad69471c 5086 else
ca9a32e4
JR
5087 mask = (uint8_t)(0xff << shift);
5088 mask |= mask << 8;
5089 mask |= mask << 16;
ad69471c
PB
5090 break;
5091 case 1:
5092 if (op == 4)
ca9a32e4 5093 mask = 0xffff >> -shift;
ad69471c 5094 else
ca9a32e4
JR
5095 mask = (uint16_t)(0xffff << shift);
5096 mask |= mask << 16;
ad69471c
PB
5097 break;
5098 case 2:
ca9a32e4
JR
5099 if (shift < -31 || shift > 31) {
5100 mask = 0;
5101 } else {
5102 if (op == 4)
5103 mask = 0xffffffffu >> -shift;
5104 else
5105 mask = 0xffffffffu << shift;
5106 }
ad69471c
PB
5107 break;
5108 default:
5109 abort();
5110 }
dd8fbd78 5111 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
5112 tcg_gen_andi_i32(tmp, tmp, mask);
5113 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 5114 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 5115 tcg_temp_free_i32(tmp2);
ad69471c 5116 }
dd8fbd78 5117 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5118 }
5119 } /* for pass */
5120 } else if (op < 10) {
ad69471c 5121 /* Shift by immediate and narrow:
9ee6e8bb 5122 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5123 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5124 if (rm & 1) {
5125 return 1;
5126 }
9ee6e8bb
PB
5127 shift = shift - (1 << (size + 3));
5128 size++;
92cdfaeb 5129 if (size == 3) {
a7812ae4 5130 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5131 neon_load_reg64(cpu_V0, rm);
5132 neon_load_reg64(cpu_V1, rm + 1);
5133 for (pass = 0; pass < 2; pass++) {
5134 TCGv_i64 in;
5135 if (pass == 0) {
5136 in = cpu_V0;
5137 } else {
5138 in = cpu_V1;
5139 }
ad69471c 5140 if (q) {
0b36f4cd 5141 if (input_unsigned) {
92cdfaeb 5142 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5143 } else {
92cdfaeb 5144 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5145 }
ad69471c 5146 } else {
0b36f4cd 5147 if (input_unsigned) {
92cdfaeb 5148 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5149 } else {
92cdfaeb 5150 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5151 }
ad69471c 5152 }
7d1b0095 5153 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5154 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5155 neon_store_reg(rd, pass, tmp);
5156 } /* for pass */
5157 tcg_temp_free_i64(tmp64);
5158 } else {
5159 if (size == 1) {
5160 imm = (uint16_t)shift;
5161 imm |= imm << 16;
2c0262af 5162 } else {
92cdfaeb
PM
5163 /* size == 2 */
5164 imm = (uint32_t)shift;
5165 }
5166 tmp2 = tcg_const_i32(imm);
5167 tmp4 = neon_load_reg(rm + 1, 0);
5168 tmp5 = neon_load_reg(rm + 1, 1);
5169 for (pass = 0; pass < 2; pass++) {
5170 if (pass == 0) {
5171 tmp = neon_load_reg(rm, 0);
5172 } else {
5173 tmp = tmp4;
5174 }
0b36f4cd
CL
5175 gen_neon_shift_narrow(size, tmp, tmp2, q,
5176 input_unsigned);
92cdfaeb
PM
5177 if (pass == 0) {
5178 tmp3 = neon_load_reg(rm, 1);
5179 } else {
5180 tmp3 = tmp5;
5181 }
0b36f4cd
CL
5182 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5183 input_unsigned);
36aa55dc 5184 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5185 tcg_temp_free_i32(tmp);
5186 tcg_temp_free_i32(tmp3);
5187 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5188 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5189 neon_store_reg(rd, pass, tmp);
5190 } /* for pass */
c6067f04 5191 tcg_temp_free_i32(tmp2);
b75263d6 5192 }
9ee6e8bb 5193 } else if (op == 10) {
cc13115b
PM
5194 /* VSHLL, VMOVL */
5195 if (q || (rd & 1)) {
9ee6e8bb 5196 return 1;
cc13115b 5197 }
ad69471c
PB
5198 tmp = neon_load_reg(rm, 0);
5199 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5200 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5201 if (pass == 1)
5202 tmp = tmp2;
5203
5204 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5205
9ee6e8bb
PB
5206 if (shift != 0) {
5207 /* The shift is less than the width of the source
ad69471c
PB
5208 type, so we can just shift the whole register. */
5209 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5210 /* Widen the result of shift: we need to clear
5211 * the potential overflow bits resulting from
5212 * left bits of the narrow input appearing as
5213 * right bits of left the neighbour narrow
5214 * input. */
ad69471c
PB
5215 if (size < 2 || !u) {
5216 uint64_t imm64;
5217 if (size == 0) {
5218 imm = (0xffu >> (8 - shift));
5219 imm |= imm << 16;
acdf01ef 5220 } else if (size == 1) {
ad69471c 5221 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5222 } else {
5223 /* size == 2 */
5224 imm = 0xffffffff >> (32 - shift);
5225 }
5226 if (size < 2) {
5227 imm64 = imm | (((uint64_t)imm) << 32);
5228 } else {
5229 imm64 = imm;
9ee6e8bb 5230 }
acdf01ef 5231 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5232 }
5233 }
ad69471c 5234 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5235 }
f73534a5 5236 } else if (op >= 14) {
9ee6e8bb 5237 /* VCVT fixed-point. */
cc13115b
PM
5238 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5239 return 1;
5240 }
f73534a5
PM
5241 /* We have already masked out the must-be-1 top bit of imm6,
5242 * hence this 32-shift where the ARM ARM has 64-imm6.
5243 */
5244 shift = 32 - shift;
9ee6e8bb 5245 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 5246 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 5247 if (!(op & 1)) {
9ee6e8bb 5248 if (u)
5500b06c 5249 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 5250 else
5500b06c 5251 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
5252 } else {
5253 if (u)
5500b06c 5254 gen_vfp_toul(0, shift, 1);
9ee6e8bb 5255 else
5500b06c 5256 gen_vfp_tosl(0, shift, 1);
2c0262af 5257 }
4373f3ce 5258 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
5259 }
5260 } else {
9ee6e8bb
PB
5261 return 1;
5262 }
5263 } else { /* (insn & 0x00380080) == 0 */
5264 int invert;
7d80fee5
PM
5265 if (q && (rd & 1)) {
5266 return 1;
5267 }
9ee6e8bb
PB
5268
5269 op = (insn >> 8) & 0xf;
5270 /* One register and immediate. */
5271 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5272 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
5273 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5274 * We choose to not special-case this and will behave as if a
5275 * valid constant encoding of 0 had been given.
5276 */
9ee6e8bb
PB
5277 switch (op) {
5278 case 0: case 1:
5279 /* no-op */
5280 break;
5281 case 2: case 3:
5282 imm <<= 8;
5283 break;
5284 case 4: case 5:
5285 imm <<= 16;
5286 break;
5287 case 6: case 7:
5288 imm <<= 24;
5289 break;
5290 case 8: case 9:
5291 imm |= imm << 16;
5292 break;
5293 case 10: case 11:
5294 imm = (imm << 8) | (imm << 24);
5295 break;
5296 case 12:
8e31209e 5297 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
5298 break;
5299 case 13:
5300 imm = (imm << 16) | 0xffff;
5301 break;
5302 case 14:
5303 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5304 if (invert)
5305 imm = ~imm;
5306 break;
5307 case 15:
7d80fee5
PM
5308 if (invert) {
5309 return 1;
5310 }
9ee6e8bb
PB
5311 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5312 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5313 break;
5314 }
5315 if (invert)
5316 imm = ~imm;
5317
9ee6e8bb
PB
5318 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5319 if (op & 1 && op < 12) {
ad69471c 5320 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
5321 if (invert) {
5322 /* The immediate value has already been inverted, so
5323 BIC becomes AND. */
ad69471c 5324 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 5325 } else {
ad69471c 5326 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 5327 }
9ee6e8bb 5328 } else {
ad69471c 5329 /* VMOV, VMVN. */
7d1b0095 5330 tmp = tcg_temp_new_i32();
9ee6e8bb 5331 if (op == 14 && invert) {
a5a14945 5332 int n;
ad69471c
PB
5333 uint32_t val;
5334 val = 0;
9ee6e8bb
PB
5335 for (n = 0; n < 4; n++) {
5336 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 5337 val |= 0xff << (n * 8);
9ee6e8bb 5338 }
ad69471c
PB
5339 tcg_gen_movi_i32(tmp, val);
5340 } else {
5341 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 5342 }
9ee6e8bb 5343 }
ad69471c 5344 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5345 }
5346 }
e4b3861d 5347 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
5348 if (size != 3) {
5349 op = (insn >> 8) & 0xf;
5350 if ((insn & (1 << 6)) == 0) {
5351 /* Three registers of different lengths. */
5352 int src1_wide;
5353 int src2_wide;
5354 int prewiden;
695272dc
PM
5355 /* undefreq: bit 0 : UNDEF if size != 0
5356 * bit 1 : UNDEF if size == 0
5357 * bit 2 : UNDEF if U == 1
5358 * Note that [1:0] set implies 'always UNDEF'
5359 */
5360 int undefreq;
5361 /* prewiden, src1_wide, src2_wide, undefreq */
5362 static const int neon_3reg_wide[16][4] = {
5363 {1, 0, 0, 0}, /* VADDL */
5364 {1, 1, 0, 0}, /* VADDW */
5365 {1, 0, 0, 0}, /* VSUBL */
5366 {1, 1, 0, 0}, /* VSUBW */
5367 {0, 1, 1, 0}, /* VADDHN */
5368 {0, 0, 0, 0}, /* VABAL */
5369 {0, 1, 1, 0}, /* VSUBHN */
5370 {0, 0, 0, 0}, /* VABDL */
5371 {0, 0, 0, 0}, /* VMLAL */
5372 {0, 0, 0, 6}, /* VQDMLAL */
5373 {0, 0, 0, 0}, /* VMLSL */
5374 {0, 0, 0, 6}, /* VQDMLSL */
5375 {0, 0, 0, 0}, /* Integer VMULL */
5376 {0, 0, 0, 2}, /* VQDMULL */
5377 {0, 0, 0, 5}, /* Polynomial VMULL */
5378 {0, 0, 0, 3}, /* Reserved: always UNDEF */
9ee6e8bb
PB
5379 };
5380
5381 prewiden = neon_3reg_wide[op][0];
5382 src1_wide = neon_3reg_wide[op][1];
5383 src2_wide = neon_3reg_wide[op][2];
695272dc 5384 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 5385
695272dc
PM
5386 if (((undefreq & 1) && (size != 0)) ||
5387 ((undefreq & 2) && (size == 0)) ||
5388 ((undefreq & 4) && u)) {
5389 return 1;
5390 }
5391 if ((src1_wide && (rn & 1)) ||
5392 (src2_wide && (rm & 1)) ||
5393 (!src2_wide && (rd & 1))) {
ad69471c 5394 return 1;
695272dc 5395 }
ad69471c 5396
9ee6e8bb
PB
5397 /* Avoid overlapping operands. Wide source operands are
5398 always aligned so will never overlap with wide
5399 destinations in problematic ways. */
8f8e3aa4 5400 if (rd == rm && !src2_wide) {
dd8fbd78
FN
5401 tmp = neon_load_reg(rm, 1);
5402 neon_store_scratch(2, tmp);
8f8e3aa4 5403 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
5404 tmp = neon_load_reg(rn, 1);
5405 neon_store_scratch(2, tmp);
9ee6e8bb 5406 }
a50f5b91 5407 TCGV_UNUSED(tmp3);
9ee6e8bb 5408 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5409 if (src1_wide) {
5410 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 5411 TCGV_UNUSED(tmp);
9ee6e8bb 5412 } else {
ad69471c 5413 if (pass == 1 && rd == rn) {
dd8fbd78 5414 tmp = neon_load_scratch(2);
9ee6e8bb 5415 } else {
ad69471c
PB
5416 tmp = neon_load_reg(rn, pass);
5417 }
5418 if (prewiden) {
5419 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5420 }
5421 }
ad69471c
PB
5422 if (src2_wide) {
5423 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 5424 TCGV_UNUSED(tmp2);
9ee6e8bb 5425 } else {
ad69471c 5426 if (pass == 1 && rd == rm) {
dd8fbd78 5427 tmp2 = neon_load_scratch(2);
9ee6e8bb 5428 } else {
ad69471c
PB
5429 tmp2 = neon_load_reg(rm, pass);
5430 }
5431 if (prewiden) {
5432 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5433 }
9ee6e8bb
PB
5434 }
5435 switch (op) {
5436 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5437 gen_neon_addl(size);
9ee6e8bb 5438 break;
79b0e534 5439 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5440 gen_neon_subl(size);
9ee6e8bb
PB
5441 break;
5442 case 5: case 7: /* VABAL, VABDL */
5443 switch ((size << 1) | u) {
ad69471c
PB
5444 case 0:
5445 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5446 break;
5447 case 1:
5448 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5449 break;
5450 case 2:
5451 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5452 break;
5453 case 3:
5454 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5455 break;
5456 case 4:
5457 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5458 break;
5459 case 5:
5460 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5461 break;
9ee6e8bb
PB
5462 default: abort();
5463 }
7d1b0095
PM
5464 tcg_temp_free_i32(tmp2);
5465 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5466 break;
5467 case 8: case 9: case 10: case 11: case 12: case 13:
5468 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5469 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
5470 break;
5471 case 14: /* Polynomial VMULL */
e5ca24cb 5472 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
5473 tcg_temp_free_i32(tmp2);
5474 tcg_temp_free_i32(tmp);
e5ca24cb 5475 break;
695272dc
PM
5476 default: /* 15 is RESERVED: caught earlier */
5477 abort();
9ee6e8bb 5478 }
ebcd88ce
PM
5479 if (op == 13) {
5480 /* VQDMULL */
5481 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5482 neon_store_reg64(cpu_V0, rd + pass);
5483 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 5484 /* Accumulate. */
ebcd88ce 5485 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5486 switch (op) {
4dc064e6
PM
5487 case 10: /* VMLSL */
5488 gen_neon_negl(cpu_V0, size);
5489 /* Fall through */
5490 case 5: case 8: /* VABAL, VMLAL */
ad69471c 5491 gen_neon_addl(size);
9ee6e8bb
PB
5492 break;
5493 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 5494 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5495 if (op == 11) {
5496 gen_neon_negl(cpu_V0, size);
5497 }
ad69471c
PB
5498 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5499 break;
9ee6e8bb
PB
5500 default:
5501 abort();
5502 }
ad69471c 5503 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5504 } else if (op == 4 || op == 6) {
5505 /* Narrowing operation. */
7d1b0095 5506 tmp = tcg_temp_new_i32();
79b0e534 5507 if (!u) {
9ee6e8bb 5508 switch (size) {
ad69471c
PB
5509 case 0:
5510 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5511 break;
5512 case 1:
5513 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5514 break;
5515 case 2:
5516 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5517 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5518 break;
9ee6e8bb
PB
5519 default: abort();
5520 }
5521 } else {
5522 switch (size) {
ad69471c
PB
5523 case 0:
5524 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5525 break;
5526 case 1:
5527 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5528 break;
5529 case 2:
5530 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5531 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5532 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5533 break;
9ee6e8bb
PB
5534 default: abort();
5535 }
5536 }
ad69471c
PB
5537 if (pass == 0) {
5538 tmp3 = tmp;
5539 } else {
5540 neon_store_reg(rd, 0, tmp3);
5541 neon_store_reg(rd, 1, tmp);
5542 }
9ee6e8bb
PB
5543 } else {
5544 /* Write back the result. */
ad69471c 5545 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5546 }
5547 }
5548 } else {
3e3326df
PM
5549 /* Two registers and a scalar. NB that for ops of this form
5550 * the ARM ARM labels bit 24 as Q, but it is in our variable
5551 * 'u', not 'q'.
5552 */
5553 if (size == 0) {
5554 return 1;
5555 }
9ee6e8bb 5556 switch (op) {
9ee6e8bb 5557 case 1: /* Float VMLA scalar */
9ee6e8bb 5558 case 5: /* Floating point VMLS scalar */
9ee6e8bb 5559 case 9: /* Floating point VMUL scalar */
3e3326df
PM
5560 if (size == 1) {
5561 return 1;
5562 }
5563 /* fall through */
5564 case 0: /* Integer VMLA scalar */
5565 case 4: /* Integer VMLS scalar */
5566 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
5567 case 12: /* VQDMULH scalar */
5568 case 13: /* VQRDMULH scalar */
3e3326df
PM
5569 if (u && ((rd | rn) & 1)) {
5570 return 1;
5571 }
dd8fbd78
FN
5572 tmp = neon_get_scalar(size, rm);
5573 neon_store_scratch(0, tmp);
9ee6e8bb 5574 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5575 tmp = neon_load_scratch(0);
5576 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5577 if (op == 12) {
5578 if (size == 1) {
02da0b2d 5579 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5580 } else {
02da0b2d 5581 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5582 }
5583 } else if (op == 13) {
5584 if (size == 1) {
02da0b2d 5585 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5586 } else {
02da0b2d 5587 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5588 }
5589 } else if (op & 1) {
aa47cfdd
PM
5590 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5591 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5592 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
5593 } else {
5594 switch (size) {
dd8fbd78
FN
5595 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5596 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5597 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 5598 default: abort();
9ee6e8bb
PB
5599 }
5600 }
7d1b0095 5601 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
5602 if (op < 8) {
5603 /* Accumulate. */
dd8fbd78 5604 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5605 switch (op) {
5606 case 0:
dd8fbd78 5607 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5608 break;
5609 case 1:
aa47cfdd
PM
5610 {
5611 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5612 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5613 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5614 break;
aa47cfdd 5615 }
9ee6e8bb 5616 case 4:
dd8fbd78 5617 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5618 break;
5619 case 5:
aa47cfdd
PM
5620 {
5621 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5622 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5623 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5624 break;
aa47cfdd 5625 }
9ee6e8bb
PB
5626 default:
5627 abort();
5628 }
7d1b0095 5629 tcg_temp_free_i32(tmp2);
9ee6e8bb 5630 }
dd8fbd78 5631 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5632 }
5633 break;
9ee6e8bb 5634 case 3: /* VQDMLAL scalar */
9ee6e8bb 5635 case 7: /* VQDMLSL scalar */
9ee6e8bb 5636 case 11: /* VQDMULL scalar */
3e3326df 5637 if (u == 1) {
ad69471c 5638 return 1;
3e3326df
PM
5639 }
5640 /* fall through */
5641 case 2: /* VMLAL sclar */
5642 case 6: /* VMLSL scalar */
5643 case 10: /* VMULL scalar */
5644 if (rd & 1) {
5645 return 1;
5646 }
dd8fbd78 5647 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
5648 /* We need a copy of tmp2 because gen_neon_mull
5649 * deletes it during pass 0. */
7d1b0095 5650 tmp4 = tcg_temp_new_i32();
c6067f04 5651 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 5652 tmp3 = neon_load_reg(rn, 1);
ad69471c 5653
9ee6e8bb 5654 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5655 if (pass == 0) {
5656 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5657 } else {
dd8fbd78 5658 tmp = tmp3;
c6067f04 5659 tmp2 = tmp4;
9ee6e8bb 5660 }
ad69471c 5661 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
5662 if (op != 11) {
5663 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5664 }
9ee6e8bb 5665 switch (op) {
4dc064e6
PM
5666 case 6:
5667 gen_neon_negl(cpu_V0, size);
5668 /* Fall through */
5669 case 2:
ad69471c 5670 gen_neon_addl(size);
9ee6e8bb
PB
5671 break;
5672 case 3: case 7:
ad69471c 5673 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5674 if (op == 7) {
5675 gen_neon_negl(cpu_V0, size);
5676 }
ad69471c 5677 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5678 break;
5679 case 10:
5680 /* no-op */
5681 break;
5682 case 11:
ad69471c 5683 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5684 break;
5685 default:
5686 abort();
5687 }
ad69471c 5688 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5689 }
dd8fbd78 5690
dd8fbd78 5691
9ee6e8bb
PB
5692 break;
5693 default: /* 14 and 15 are RESERVED */
5694 return 1;
5695 }
5696 }
5697 } else { /* size == 3 */
5698 if (!u) {
5699 /* Extract. */
9ee6e8bb 5700 imm = (insn >> 8) & 0xf;
ad69471c
PB
5701
5702 if (imm > 7 && !q)
5703 return 1;
5704
52579ea1
PM
5705 if (q && ((rd | rn | rm) & 1)) {
5706 return 1;
5707 }
5708
ad69471c
PB
5709 if (imm == 0) {
5710 neon_load_reg64(cpu_V0, rn);
5711 if (q) {
5712 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5713 }
ad69471c
PB
5714 } else if (imm == 8) {
5715 neon_load_reg64(cpu_V0, rn + 1);
5716 if (q) {
5717 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5718 }
ad69471c 5719 } else if (q) {
a7812ae4 5720 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5721 if (imm < 8) {
5722 neon_load_reg64(cpu_V0, rn);
a7812ae4 5723 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5724 } else {
5725 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5726 neon_load_reg64(tmp64, rm);
ad69471c
PB
5727 }
5728 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5729 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5730 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5731 if (imm < 8) {
5732 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5733 } else {
ad69471c
PB
5734 neon_load_reg64(cpu_V1, rm + 1);
5735 imm -= 8;
9ee6e8bb 5736 }
ad69471c 5737 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5738 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5739 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 5740 tcg_temp_free_i64(tmp64);
ad69471c 5741 } else {
a7812ae4 5742 /* BUGFIX */
ad69471c 5743 neon_load_reg64(cpu_V0, rn);
a7812ae4 5744 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5745 neon_load_reg64(cpu_V1, rm);
a7812ae4 5746 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5747 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5748 }
5749 neon_store_reg64(cpu_V0, rd);
5750 if (q) {
5751 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5752 }
5753 } else if ((insn & (1 << 11)) == 0) {
5754 /* Two register misc. */
5755 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5756 size = (insn >> 18) & 3;
600b828c
PM
5757 /* UNDEF for unknown op values and bad op-size combinations */
5758 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
5759 return 1;
5760 }
fc2a9b37
PM
5761 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
5762 q && ((rm | rd) & 1)) {
5763 return 1;
5764 }
9ee6e8bb 5765 switch (op) {
600b828c 5766 case NEON_2RM_VREV64:
9ee6e8bb 5767 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5768 tmp = neon_load_reg(rm, pass * 2);
5769 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5770 switch (size) {
dd8fbd78
FN
5771 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5772 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5773 case 2: /* no-op */ break;
5774 default: abort();
5775 }
dd8fbd78 5776 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5777 if (size == 2) {
dd8fbd78 5778 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5779 } else {
9ee6e8bb 5780 switch (size) {
dd8fbd78
FN
5781 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5782 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5783 default: abort();
5784 }
dd8fbd78 5785 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5786 }
5787 }
5788 break;
600b828c
PM
5789 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
5790 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
5791 for (pass = 0; pass < q + 1; pass++) {
5792 tmp = neon_load_reg(rm, pass * 2);
5793 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5794 tmp = neon_load_reg(rm, pass * 2 + 1);
5795 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5796 switch (size) {
5797 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5798 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5799 case 2: tcg_gen_add_i64(CPU_V001); break;
5800 default: abort();
5801 }
600b828c 5802 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 5803 /* Accumulate. */
ad69471c
PB
5804 neon_load_reg64(cpu_V1, rd + pass);
5805 gen_neon_addl(size);
9ee6e8bb 5806 }
ad69471c 5807 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5808 }
5809 break;
600b828c 5810 case NEON_2RM_VTRN:
9ee6e8bb 5811 if (size == 2) {
a5a14945 5812 int n;
9ee6e8bb 5813 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
5814 tmp = neon_load_reg(rm, n);
5815 tmp2 = neon_load_reg(rd, n + 1);
5816 neon_store_reg(rm, n, tmp2);
5817 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
5818 }
5819 } else {
5820 goto elementwise;
5821 }
5822 break;
600b828c 5823 case NEON_2RM_VUZP:
02acedf9 5824 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 5825 return 1;
9ee6e8bb
PB
5826 }
5827 break;
600b828c 5828 case NEON_2RM_VZIP:
d68a6f3a 5829 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 5830 return 1;
9ee6e8bb
PB
5831 }
5832 break;
600b828c
PM
5833 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
5834 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
5835 if (rm & 1) {
5836 return 1;
5837 }
a50f5b91 5838 TCGV_UNUSED(tmp2);
9ee6e8bb 5839 for (pass = 0; pass < 2; pass++) {
ad69471c 5840 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 5841 tmp = tcg_temp_new_i32();
600b828c
PM
5842 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
5843 tmp, cpu_V0);
ad69471c
PB
5844 if (pass == 0) {
5845 tmp2 = tmp;
5846 } else {
5847 neon_store_reg(rd, 0, tmp2);
5848 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5849 }
9ee6e8bb
PB
5850 }
5851 break;
600b828c 5852 case NEON_2RM_VSHLL:
fc2a9b37 5853 if (q || (rd & 1)) {
9ee6e8bb 5854 return 1;
600b828c 5855 }
ad69471c
PB
5856 tmp = neon_load_reg(rm, 0);
5857 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5858 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5859 if (pass == 1)
5860 tmp = tmp2;
5861 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 5862 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 5863 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5864 }
5865 break;
600b828c 5866 case NEON_2RM_VCVT_F16_F32:
fc2a9b37
PM
5867 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5868 q || (rm & 1)) {
5869 return 1;
5870 }
7d1b0095
PM
5871 tmp = tcg_temp_new_i32();
5872 tmp2 = tcg_temp_new_i32();
60011498 5873 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 5874 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 5875 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 5876 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5877 tcg_gen_shli_i32(tmp2, tmp2, 16);
5878 tcg_gen_or_i32(tmp2, tmp2, tmp);
5879 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 5880 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
5881 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5882 neon_store_reg(rd, 0, tmp2);
7d1b0095 5883 tmp2 = tcg_temp_new_i32();
2d981da7 5884 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5885 tcg_gen_shli_i32(tmp2, tmp2, 16);
5886 tcg_gen_or_i32(tmp2, tmp2, tmp);
5887 neon_store_reg(rd, 1, tmp2);
7d1b0095 5888 tcg_temp_free_i32(tmp);
60011498 5889 break;
600b828c 5890 case NEON_2RM_VCVT_F32_F16:
fc2a9b37
PM
5891 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5892 q || (rd & 1)) {
5893 return 1;
5894 }
7d1b0095 5895 tmp3 = tcg_temp_new_i32();
60011498
PB
5896 tmp = neon_load_reg(rm, 0);
5897 tmp2 = neon_load_reg(rm, 1);
5898 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 5899 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5900 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5901 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 5902 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5903 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 5904 tcg_temp_free_i32(tmp);
60011498 5905 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 5906 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5907 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5908 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 5909 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5910 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
5911 tcg_temp_free_i32(tmp2);
5912 tcg_temp_free_i32(tmp3);
60011498 5913 break;
9ee6e8bb
PB
5914 default:
5915 elementwise:
5916 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 5917 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
5918 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5919 neon_reg_offset(rm, pass));
dd8fbd78 5920 TCGV_UNUSED(tmp);
9ee6e8bb 5921 } else {
dd8fbd78 5922 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
5923 }
5924 switch (op) {
600b828c 5925 case NEON_2RM_VREV32:
9ee6e8bb 5926 switch (size) {
dd8fbd78
FN
5927 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5928 case 1: gen_swap_half(tmp); break;
600b828c 5929 default: abort();
9ee6e8bb
PB
5930 }
5931 break;
600b828c 5932 case NEON_2RM_VREV16:
dd8fbd78 5933 gen_rev16(tmp);
9ee6e8bb 5934 break;
600b828c 5935 case NEON_2RM_VCLS:
9ee6e8bb 5936 switch (size) {
dd8fbd78
FN
5937 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5938 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5939 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 5940 default: abort();
9ee6e8bb
PB
5941 }
5942 break;
600b828c 5943 case NEON_2RM_VCLZ:
9ee6e8bb 5944 switch (size) {
dd8fbd78
FN
5945 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5946 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5947 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 5948 default: abort();
9ee6e8bb
PB
5949 }
5950 break;
600b828c 5951 case NEON_2RM_VCNT:
dd8fbd78 5952 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 5953 break;
600b828c 5954 case NEON_2RM_VMVN:
dd8fbd78 5955 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 5956 break;
600b828c 5957 case NEON_2RM_VQABS:
9ee6e8bb 5958 switch (size) {
02da0b2d
PM
5959 case 0:
5960 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
5961 break;
5962 case 1:
5963 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
5964 break;
5965 case 2:
5966 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
5967 break;
600b828c 5968 default: abort();
9ee6e8bb
PB
5969 }
5970 break;
600b828c 5971 case NEON_2RM_VQNEG:
9ee6e8bb 5972 switch (size) {
02da0b2d
PM
5973 case 0:
5974 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
5975 break;
5976 case 1:
5977 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
5978 break;
5979 case 2:
5980 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
5981 break;
600b828c 5982 default: abort();
9ee6e8bb
PB
5983 }
5984 break;
600b828c 5985 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 5986 tmp2 = tcg_const_i32(0);
9ee6e8bb 5987 switch(size) {
dd8fbd78
FN
5988 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5989 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5990 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 5991 default: abort();
9ee6e8bb 5992 }
dd8fbd78 5993 tcg_temp_free(tmp2);
600b828c 5994 if (op == NEON_2RM_VCLE0) {
dd8fbd78 5995 tcg_gen_not_i32(tmp, tmp);
600b828c 5996 }
9ee6e8bb 5997 break;
600b828c 5998 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 5999 tmp2 = tcg_const_i32(0);
9ee6e8bb 6000 switch(size) {
dd8fbd78
FN
6001 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6002 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6003 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 6004 default: abort();
9ee6e8bb 6005 }
dd8fbd78 6006 tcg_temp_free(tmp2);
600b828c 6007 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6008 tcg_gen_not_i32(tmp, tmp);
600b828c 6009 }
9ee6e8bb 6010 break;
600b828c 6011 case NEON_2RM_VCEQ0:
dd8fbd78 6012 tmp2 = tcg_const_i32(0);
9ee6e8bb 6013 switch(size) {
dd8fbd78
FN
6014 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6015 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6016 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6017 default: abort();
9ee6e8bb 6018 }
dd8fbd78 6019 tcg_temp_free(tmp2);
9ee6e8bb 6020 break;
600b828c 6021 case NEON_2RM_VABS:
9ee6e8bb 6022 switch(size) {
dd8fbd78
FN
6023 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6024 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6025 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 6026 default: abort();
9ee6e8bb
PB
6027 }
6028 break;
600b828c 6029 case NEON_2RM_VNEG:
dd8fbd78
FN
6030 tmp2 = tcg_const_i32(0);
6031 gen_neon_rsb(size, tmp, tmp2);
6032 tcg_temp_free(tmp2);
9ee6e8bb 6033 break;
600b828c 6034 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
6035 {
6036 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6037 tmp2 = tcg_const_i32(0);
aa47cfdd 6038 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
dd8fbd78 6039 tcg_temp_free(tmp2);
aa47cfdd 6040 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6041 break;
aa47cfdd 6042 }
600b828c 6043 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
6044 {
6045 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6046 tmp2 = tcg_const_i32(0);
aa47cfdd 6047 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
dd8fbd78 6048 tcg_temp_free(tmp2);
aa47cfdd 6049 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6050 break;
aa47cfdd 6051 }
600b828c 6052 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
6053 {
6054 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6055 tmp2 = tcg_const_i32(0);
aa47cfdd 6056 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
dd8fbd78 6057 tcg_temp_free(tmp2);
aa47cfdd 6058 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6059 break;
aa47cfdd 6060 }
600b828c 6061 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
6062 {
6063 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6064 tmp2 = tcg_const_i32(0);
aa47cfdd 6065 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
0e326109 6066 tcg_temp_free(tmp2);
aa47cfdd 6067 tcg_temp_free_ptr(fpstatus);
0e326109 6068 break;
aa47cfdd 6069 }
600b828c 6070 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
6071 {
6072 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6073 tmp2 = tcg_const_i32(0);
aa47cfdd 6074 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
0e326109 6075 tcg_temp_free(tmp2);
aa47cfdd 6076 tcg_temp_free_ptr(fpstatus);
0e326109 6077 break;
aa47cfdd 6078 }
600b828c 6079 case NEON_2RM_VABS_F:
4373f3ce 6080 gen_vfp_abs(0);
9ee6e8bb 6081 break;
600b828c 6082 case NEON_2RM_VNEG_F:
4373f3ce 6083 gen_vfp_neg(0);
9ee6e8bb 6084 break;
600b828c 6085 case NEON_2RM_VSWP:
dd8fbd78
FN
6086 tmp2 = neon_load_reg(rd, pass);
6087 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6088 break;
600b828c 6089 case NEON_2RM_VTRN:
dd8fbd78 6090 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6091 switch (size) {
dd8fbd78
FN
6092 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6093 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6094 default: abort();
9ee6e8bb 6095 }
dd8fbd78 6096 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6097 break;
600b828c 6098 case NEON_2RM_VRECPE:
dd8fbd78 6099 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb 6100 break;
600b828c 6101 case NEON_2RM_VRSQRTE:
dd8fbd78 6102 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb 6103 break;
600b828c 6104 case NEON_2RM_VRECPE_F:
4373f3ce 6105 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6106 break;
600b828c 6107 case NEON_2RM_VRSQRTE_F:
4373f3ce 6108 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6109 break;
600b828c 6110 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 6111 gen_vfp_sito(0, 1);
9ee6e8bb 6112 break;
600b828c 6113 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 6114 gen_vfp_uito(0, 1);
9ee6e8bb 6115 break;
600b828c 6116 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 6117 gen_vfp_tosiz(0, 1);
9ee6e8bb 6118 break;
600b828c 6119 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 6120 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
6121 break;
6122 default:
600b828c
PM
6123 /* Reserved op values were caught by the
6124 * neon_2rm_sizes[] check earlier.
6125 */
6126 abort();
9ee6e8bb 6127 }
600b828c 6128 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6129 tcg_gen_st_f32(cpu_F0s, cpu_env,
6130 neon_reg_offset(rd, pass));
9ee6e8bb 6131 } else {
dd8fbd78 6132 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6133 }
6134 }
6135 break;
6136 }
6137 } else if ((insn & (1 << 10)) == 0) {
6138 /* VTBL, VTBX. */
56907d77
PM
6139 int n = ((insn >> 8) & 3) + 1;
6140 if ((rn + n) > 32) {
6141 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6142 * helper function running off the end of the register file.
6143 */
6144 return 1;
6145 }
6146 n <<= 3;
9ee6e8bb 6147 if (insn & (1 << 6)) {
8f8e3aa4 6148 tmp = neon_load_reg(rd, 0);
9ee6e8bb 6149 } else {
7d1b0095 6150 tmp = tcg_temp_new_i32();
8f8e3aa4 6151 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6152 }
8f8e3aa4 6153 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
6154 tmp4 = tcg_const_i32(rn);
6155 tmp5 = tcg_const_i32(n);
9ef39277 6156 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7d1b0095 6157 tcg_temp_free_i32(tmp);
9ee6e8bb 6158 if (insn & (1 << 6)) {
8f8e3aa4 6159 tmp = neon_load_reg(rd, 1);
9ee6e8bb 6160 } else {
7d1b0095 6161 tmp = tcg_temp_new_i32();
8f8e3aa4 6162 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6163 }
8f8e3aa4 6164 tmp3 = neon_load_reg(rm, 1);
9ef39277 6165 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
6166 tcg_temp_free_i32(tmp5);
6167 tcg_temp_free_i32(tmp4);
8f8e3aa4 6168 neon_store_reg(rd, 0, tmp2);
3018f259 6169 neon_store_reg(rd, 1, tmp3);
7d1b0095 6170 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6171 } else if ((insn & 0x380) == 0) {
6172 /* VDUP */
133da6aa
JR
6173 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6174 return 1;
6175 }
9ee6e8bb 6176 if (insn & (1 << 19)) {
dd8fbd78 6177 tmp = neon_load_reg(rm, 1);
9ee6e8bb 6178 } else {
dd8fbd78 6179 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
6180 }
6181 if (insn & (1 << 16)) {
dd8fbd78 6182 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
6183 } else if (insn & (1 << 17)) {
6184 if ((insn >> 18) & 1)
dd8fbd78 6185 gen_neon_dup_high16(tmp);
9ee6e8bb 6186 else
dd8fbd78 6187 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
6188 }
6189 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 6190 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
6191 tcg_gen_mov_i32(tmp2, tmp);
6192 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 6193 }
7d1b0095 6194 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6195 } else {
6196 return 1;
6197 }
6198 }
6199 }
6200 return 0;
6201}
6202
0ecb72a5 6203static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb 6204{
4b6a83fb
PM
6205 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
6206 const ARMCPRegInfo *ri;
6207 ARMCPU *cpu = arm_env_get_cpu(env);
9ee6e8bb
PB
6208
6209 cpnum = (insn >> 8) & 0xf;
6210 if (arm_feature(env, ARM_FEATURE_XSCALE)
6211 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6212 return 1;
6213
4b6a83fb 6214 /* First check for coprocessor space used for actual instructions */
9ee6e8bb
PB
6215 switch (cpnum) {
6216 case 0:
6217 case 1:
6218 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6219 return disas_iwmmxt_insn(env, s, insn);
6220 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6221 return disas_dsp_insn(env, s, insn);
6222 }
6223 return 1;
6224 case 10:
6225 case 11:
6226 return disas_vfp_insn (env, s, insn);
4b6a83fb
PM
6227 default:
6228 break;
6229 }
6230
6231 /* Otherwise treat as a generic register access */
6232 is64 = (insn & (1 << 25)) == 0;
6233 if (!is64 && ((insn & (1 << 4)) == 0)) {
6234 /* cdp */
6235 return 1;
6236 }
6237
6238 crm = insn & 0xf;
6239 if (is64) {
6240 crn = 0;
6241 opc1 = (insn >> 4) & 0xf;
6242 opc2 = 0;
6243 rt2 = (insn >> 16) & 0xf;
6244 } else {
6245 crn = (insn >> 16) & 0xf;
6246 opc1 = (insn >> 21) & 7;
6247 opc2 = (insn >> 5) & 7;
6248 rt2 = 0;
6249 }
6250 isread = (insn >> 20) & 1;
6251 rt = (insn >> 12) & 0xf;
6252
6253 ri = get_arm_cp_reginfo(cpu,
6254 ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
6255 if (ri) {
6256 /* Check access permissions */
6257 if (!cp_access_ok(env, ri, isread)) {
6258 return 1;
6259 }
6260
6261 /* Handle special cases first */
6262 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
6263 case ARM_CP_NOP:
6264 return 0;
6265 case ARM_CP_WFI:
6266 if (isread) {
6267 return 1;
6268 }
6269 gen_set_pc_im(s->pc);
6270 s->is_jmp = DISAS_WFI;
2bee5105 6271 return 0;
4b6a83fb
PM
6272 default:
6273 break;
6274 }
6275
6276 if (isread) {
6277 /* Read */
6278 if (is64) {
6279 TCGv_i64 tmp64;
6280 TCGv_i32 tmp;
6281 if (ri->type & ARM_CP_CONST) {
6282 tmp64 = tcg_const_i64(ri->resetvalue);
6283 } else if (ri->readfn) {
6284 TCGv_ptr tmpptr;
6285 gen_set_pc_im(s->pc);
6286 tmp64 = tcg_temp_new_i64();
6287 tmpptr = tcg_const_ptr(ri);
6288 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
6289 tcg_temp_free_ptr(tmpptr);
6290 } else {
6291 tmp64 = tcg_temp_new_i64();
6292 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
6293 }
6294 tmp = tcg_temp_new_i32();
6295 tcg_gen_trunc_i64_i32(tmp, tmp64);
6296 store_reg(s, rt, tmp);
6297 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 6298 tmp = tcg_temp_new_i32();
4b6a83fb 6299 tcg_gen_trunc_i64_i32(tmp, tmp64);
ed336850 6300 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
6301 store_reg(s, rt2, tmp);
6302 } else {
6303 TCGv tmp;
6304 if (ri->type & ARM_CP_CONST) {
6305 tmp = tcg_const_i32(ri->resetvalue);
6306 } else if (ri->readfn) {
6307 TCGv_ptr tmpptr;
6308 gen_set_pc_im(s->pc);
6309 tmp = tcg_temp_new_i32();
6310 tmpptr = tcg_const_ptr(ri);
6311 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
6312 tcg_temp_free_ptr(tmpptr);
6313 } else {
6314 tmp = load_cpu_offset(ri->fieldoffset);
6315 }
6316 if (rt == 15) {
6317 /* Destination register of r15 for 32 bit loads sets
6318 * the condition codes from the high 4 bits of the value
6319 */
6320 gen_set_nzcv(tmp);
6321 tcg_temp_free_i32(tmp);
6322 } else {
6323 store_reg(s, rt, tmp);
6324 }
6325 }
6326 } else {
6327 /* Write */
6328 if (ri->type & ARM_CP_CONST) {
6329 /* If not forbidden by access permissions, treat as WI */
6330 return 0;
6331 }
6332
6333 if (is64) {
6334 TCGv tmplo, tmphi;
6335 TCGv_i64 tmp64 = tcg_temp_new_i64();
6336 tmplo = load_reg(s, rt);
6337 tmphi = load_reg(s, rt2);
6338 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
6339 tcg_temp_free_i32(tmplo);
6340 tcg_temp_free_i32(tmphi);
6341 if (ri->writefn) {
6342 TCGv_ptr tmpptr = tcg_const_ptr(ri);
6343 gen_set_pc_im(s->pc);
6344 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
6345 tcg_temp_free_ptr(tmpptr);
6346 } else {
6347 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
6348 }
6349 tcg_temp_free_i64(tmp64);
6350 } else {
6351 if (ri->writefn) {
6352 TCGv tmp;
6353 TCGv_ptr tmpptr;
6354 gen_set_pc_im(s->pc);
6355 tmp = load_reg(s, rt);
6356 tmpptr = tcg_const_ptr(ri);
6357 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
6358 tcg_temp_free_ptr(tmpptr);
6359 tcg_temp_free_i32(tmp);
6360 } else {
6361 TCGv tmp = load_reg(s, rt);
6362 store_cpu_offset(tmp, ri->fieldoffset);
6363 }
6364 }
6365 /* We default to ending the TB on a coprocessor register write,
6366 * but allow this to be suppressed by the register definition
6367 * (usually only necessary to work around guest bugs).
6368 */
6369 if (!(ri->type & ARM_CP_SUPPRESS_TB_END)) {
6370 gen_lookup_tb(s);
6371 }
6372 }
6373 return 0;
6374 }
6375
4a9a539f 6376 return 1;
9ee6e8bb
PB
6377}
6378
5e3f878a
PB
6379
6380/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 6381static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
6382{
6383 TCGv tmp;
7d1b0095 6384 tmp = tcg_temp_new_i32();
5e3f878a
PB
6385 tcg_gen_trunc_i64_i32(tmp, val);
6386 store_reg(s, rlow, tmp);
7d1b0095 6387 tmp = tcg_temp_new_i32();
5e3f878a
PB
6388 tcg_gen_shri_i64(val, val, 32);
6389 tcg_gen_trunc_i64_i32(tmp, val);
6390 store_reg(s, rhigh, tmp);
6391}
6392
6393/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 6394static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 6395{
a7812ae4 6396 TCGv_i64 tmp;
5e3f878a
PB
6397 TCGv tmp2;
6398
36aa55dc 6399 /* Load value and extend to 64 bits. */
a7812ae4 6400 tmp = tcg_temp_new_i64();
5e3f878a
PB
6401 tmp2 = load_reg(s, rlow);
6402 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 6403 tcg_temp_free_i32(tmp2);
5e3f878a 6404 tcg_gen_add_i64(val, val, tmp);
b75263d6 6405 tcg_temp_free_i64(tmp);
5e3f878a
PB
6406}
6407
6408/* load and add a 64-bit value from a register pair. */
a7812ae4 6409static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 6410{
a7812ae4 6411 TCGv_i64 tmp;
36aa55dc
PB
6412 TCGv tmpl;
6413 TCGv tmph;
5e3f878a
PB
6414
6415 /* Load 64-bit value rd:rn. */
36aa55dc
PB
6416 tmpl = load_reg(s, rlow);
6417 tmph = load_reg(s, rhigh);
a7812ae4 6418 tmp = tcg_temp_new_i64();
36aa55dc 6419 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
6420 tcg_temp_free_i32(tmpl);
6421 tcg_temp_free_i32(tmph);
5e3f878a 6422 tcg_gen_add_i64(val, val, tmp);
b75263d6 6423 tcg_temp_free_i64(tmp);
5e3f878a
PB
6424}
6425
6426/* Set N and Z flags from a 64-bit value. */
a7812ae4 6427static void gen_logicq_cc(TCGv_i64 val)
5e3f878a 6428{
7d1b0095 6429 TCGv tmp = tcg_temp_new_i32();
5e3f878a 6430 gen_helper_logicq_cc(tmp, val);
6fbe23d5 6431 gen_logic_CC(tmp);
7d1b0095 6432 tcg_temp_free_i32(tmp);
5e3f878a
PB
6433}
6434
426f5abc
PB
6435/* Load/Store exclusive instructions are implemented by remembering
6436 the value/address loaded, and seeing if these are the same
b90372ad 6437 when the store is performed. This should be sufficient to implement
426f5abc
PB
6438 the architecturally mandated semantics, and avoids having to monitor
6439 regular stores.
6440
6441 In system emulation mode only one CPU will be running at once, so
6442 this sequence is effectively atomic. In user emulation mode we
6443 throw an exception and handle the atomic operation elsewhere. */
6444static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
6445 TCGv addr, int size)
6446{
6447 TCGv tmp;
6448
6449 switch (size) {
6450 case 0:
6451 tmp = gen_ld8u(addr, IS_USER(s));
6452 break;
6453 case 1:
6454 tmp = gen_ld16u(addr, IS_USER(s));
6455 break;
6456 case 2:
6457 case 3:
6458 tmp = gen_ld32(addr, IS_USER(s));
6459 break;
6460 default:
6461 abort();
6462 }
6463 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6464 store_reg(s, rt, tmp);
6465 if (size == 3) {
7d1b0095 6466 TCGv tmp2 = tcg_temp_new_i32();
2c9adbda
PM
6467 tcg_gen_addi_i32(tmp2, addr, 4);
6468 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 6469 tcg_temp_free_i32(tmp2);
426f5abc
PB
6470 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6471 store_reg(s, rt2, tmp);
6472 }
6473 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6474}
6475
6476static void gen_clrex(DisasContext *s)
6477{
6478 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6479}
6480
6481#ifdef CONFIG_USER_ONLY
6482static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6483 TCGv addr, int size)
6484{
6485 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6486 tcg_gen_movi_i32(cpu_exclusive_info,
6487 size | (rd << 4) | (rt << 8) | (rt2 << 12));
bc4a0de0 6488 gen_exception_insn(s, 4, EXCP_STREX);
426f5abc
PB
6489}
6490#else
6491static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6492 TCGv addr, int size)
6493{
6494 TCGv tmp;
6495 int done_label;
6496 int fail_label;
6497
6498 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6499 [addr] = {Rt};
6500 {Rd} = 0;
6501 } else {
6502 {Rd} = 1;
6503 } */
6504 fail_label = gen_new_label();
6505 done_label = gen_new_label();
6506 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6507 switch (size) {
6508 case 0:
6509 tmp = gen_ld8u(addr, IS_USER(s));
6510 break;
6511 case 1:
6512 tmp = gen_ld16u(addr, IS_USER(s));
6513 break;
6514 case 2:
6515 case 3:
6516 tmp = gen_ld32(addr, IS_USER(s));
6517 break;
6518 default:
6519 abort();
6520 }
6521 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
7d1b0095 6522 tcg_temp_free_i32(tmp);
426f5abc 6523 if (size == 3) {
7d1b0095 6524 TCGv tmp2 = tcg_temp_new_i32();
426f5abc 6525 tcg_gen_addi_i32(tmp2, addr, 4);
2c9adbda 6526 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 6527 tcg_temp_free_i32(tmp2);
426f5abc 6528 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
7d1b0095 6529 tcg_temp_free_i32(tmp);
426f5abc
PB
6530 }
6531 tmp = load_reg(s, rt);
6532 switch (size) {
6533 case 0:
6534 gen_st8(tmp, addr, IS_USER(s));
6535 break;
6536 case 1:
6537 gen_st16(tmp, addr, IS_USER(s));
6538 break;
6539 case 2:
6540 case 3:
6541 gen_st32(tmp, addr, IS_USER(s));
6542 break;
6543 default:
6544 abort();
6545 }
6546 if (size == 3) {
6547 tcg_gen_addi_i32(addr, addr, 4);
6548 tmp = load_reg(s, rt2);
6549 gen_st32(tmp, addr, IS_USER(s));
6550 }
6551 tcg_gen_movi_i32(cpu_R[rd], 0);
6552 tcg_gen_br(done_label);
6553 gen_set_label(fail_label);
6554 tcg_gen_movi_i32(cpu_R[rd], 1);
6555 gen_set_label(done_label);
6556 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6557}
6558#endif
6559
0ecb72a5 6560static void disas_arm_insn(CPUARMState * env, DisasContext *s)
9ee6e8bb
PB
6561{
6562 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 6563 TCGv tmp;
3670669c 6564 TCGv tmp2;
6ddbc6e4 6565 TCGv tmp3;
b0109805 6566 TCGv addr;
a7812ae4 6567 TCGv_i64 tmp64;
9ee6e8bb 6568
d31dd73e 6569 insn = arm_ldl_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
6570 s->pc += 4;
6571
6572 /* M variants do not implement ARM mode. */
6573 if (IS_M(env))
6574 goto illegal_op;
6575 cond = insn >> 28;
6576 if (cond == 0xf){
be5e7a76
DES
6577 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6578 * choose to UNDEF. In ARMv5 and above the space is used
6579 * for miscellaneous unconditional instructions.
6580 */
6581 ARCH(5);
6582
9ee6e8bb
PB
6583 /* Unconditional instructions. */
6584 if (((insn >> 25) & 7) == 1) {
6585 /* NEON Data processing. */
6586 if (!arm_feature(env, ARM_FEATURE_NEON))
6587 goto illegal_op;
6588
6589 if (disas_neon_data_insn(env, s, insn))
6590 goto illegal_op;
6591 return;
6592 }
6593 if ((insn & 0x0f100000) == 0x04000000) {
6594 /* NEON load/store. */
6595 if (!arm_feature(env, ARM_FEATURE_NEON))
6596 goto illegal_op;
6597
6598 if (disas_neon_ls_insn(env, s, insn))
6599 goto illegal_op;
6600 return;
6601 }
3d185e5d
PM
6602 if (((insn & 0x0f30f000) == 0x0510f000) ||
6603 ((insn & 0x0f30f010) == 0x0710f000)) {
6604 if ((insn & (1 << 22)) == 0) {
6605 /* PLDW; v7MP */
6606 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6607 goto illegal_op;
6608 }
6609 }
6610 /* Otherwise PLD; v5TE+ */
be5e7a76 6611 ARCH(5TE);
3d185e5d
PM
6612 return;
6613 }
6614 if (((insn & 0x0f70f000) == 0x0450f000) ||
6615 ((insn & 0x0f70f010) == 0x0650f000)) {
6616 ARCH(7);
6617 return; /* PLI; V7 */
6618 }
6619 if (((insn & 0x0f700000) == 0x04100000) ||
6620 ((insn & 0x0f700010) == 0x06100000)) {
6621 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6622 goto illegal_op;
6623 }
6624 return; /* v7MP: Unallocated memory hint: must NOP */
6625 }
6626
6627 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
6628 ARCH(6);
6629 /* setend */
10962fd5
PM
6630 if (((insn >> 9) & 1) != s->bswap_code) {
6631 /* Dynamic endianness switching not implemented. */
9ee6e8bb
PB
6632 goto illegal_op;
6633 }
6634 return;
6635 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6636 switch ((insn >> 4) & 0xf) {
6637 case 1: /* clrex */
6638 ARCH(6K);
426f5abc 6639 gen_clrex(s);
9ee6e8bb
PB
6640 return;
6641 case 4: /* dsb */
6642 case 5: /* dmb */
6643 case 6: /* isb */
6644 ARCH(7);
6645 /* We don't emulate caches so these are a no-op. */
6646 return;
6647 default:
6648 goto illegal_op;
6649 }
6650 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6651 /* srs */
c67b6b71 6652 int32_t offset;
9ee6e8bb
PB
6653 if (IS_USER(s))
6654 goto illegal_op;
6655 ARCH(6);
6656 op1 = (insn & 0x1f);
7d1b0095 6657 addr = tcg_temp_new_i32();
39ea3d4e
PM
6658 tmp = tcg_const_i32(op1);
6659 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6660 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6661 i = (insn >> 23) & 3;
6662 switch (i) {
6663 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6664 case 1: offset = 0; break; /* IA */
6665 case 2: offset = -8; break; /* DB */
9ee6e8bb
PB
6666 case 3: offset = 4; break; /* IB */
6667 default: abort();
6668 }
6669 if (offset)
b0109805
PB
6670 tcg_gen_addi_i32(addr, addr, offset);
6671 tmp = load_reg(s, 14);
6672 gen_st32(tmp, addr, 0);
c67b6b71 6673 tmp = load_cpu_field(spsr);
b0109805
PB
6674 tcg_gen_addi_i32(addr, addr, 4);
6675 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
6676 if (insn & (1 << 21)) {
6677 /* Base writeback. */
6678 switch (i) {
6679 case 0: offset = -8; break;
c67b6b71
FN
6680 case 1: offset = 4; break;
6681 case 2: offset = -4; break;
9ee6e8bb
PB
6682 case 3: offset = 0; break;
6683 default: abort();
6684 }
6685 if (offset)
c67b6b71 6686 tcg_gen_addi_i32(addr, addr, offset);
39ea3d4e
PM
6687 tmp = tcg_const_i32(op1);
6688 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6689 tcg_temp_free_i32(tmp);
7d1b0095 6690 tcg_temp_free_i32(addr);
b0109805 6691 } else {
7d1b0095 6692 tcg_temp_free_i32(addr);
9ee6e8bb 6693 }
a990f58f 6694 return;
ea825eee 6695 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 6696 /* rfe */
c67b6b71 6697 int32_t offset;
9ee6e8bb
PB
6698 if (IS_USER(s))
6699 goto illegal_op;
6700 ARCH(6);
6701 rn = (insn >> 16) & 0xf;
b0109805 6702 addr = load_reg(s, rn);
9ee6e8bb
PB
6703 i = (insn >> 23) & 3;
6704 switch (i) {
b0109805 6705 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6706 case 1: offset = 0; break; /* IA */
6707 case 2: offset = -8; break; /* DB */
b0109805 6708 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
6709 default: abort();
6710 }
6711 if (offset)
b0109805
PB
6712 tcg_gen_addi_i32(addr, addr, offset);
6713 /* Load PC into tmp and CPSR into tmp2. */
6714 tmp = gen_ld32(addr, 0);
6715 tcg_gen_addi_i32(addr, addr, 4);
6716 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
6717 if (insn & (1 << 21)) {
6718 /* Base writeback. */
6719 switch (i) {
b0109805 6720 case 0: offset = -8; break;
c67b6b71
FN
6721 case 1: offset = 4; break;
6722 case 2: offset = -4; break;
b0109805 6723 case 3: offset = 0; break;
9ee6e8bb
PB
6724 default: abort();
6725 }
6726 if (offset)
b0109805
PB
6727 tcg_gen_addi_i32(addr, addr, offset);
6728 store_reg(s, rn, addr);
6729 } else {
7d1b0095 6730 tcg_temp_free_i32(addr);
9ee6e8bb 6731 }
b0109805 6732 gen_rfe(s, tmp, tmp2);
c67b6b71 6733 return;
9ee6e8bb
PB
6734 } else if ((insn & 0x0e000000) == 0x0a000000) {
6735 /* branch link and change to thumb (blx <offset>) */
6736 int32_t offset;
6737
6738 val = (uint32_t)s->pc;
7d1b0095 6739 tmp = tcg_temp_new_i32();
d9ba4830
PB
6740 tcg_gen_movi_i32(tmp, val);
6741 store_reg(s, 14, tmp);
9ee6e8bb
PB
6742 /* Sign-extend the 24-bit offset */
6743 offset = (((int32_t)insn) << 8) >> 8;
6744 /* offset * 4 + bit24 * 2 + (thumb bit) */
6745 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6746 /* pipeline offset */
6747 val += 4;
be5e7a76 6748 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 6749 gen_bx_im(s, val);
9ee6e8bb
PB
6750 return;
6751 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6752 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6753 /* iWMMXt register transfer. */
6754 if (env->cp15.c15_cpar & (1 << 1))
6755 if (!disas_iwmmxt_insn(env, s, insn))
6756 return;
6757 }
6758 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6759 /* Coprocessor double register transfer. */
be5e7a76 6760 ARCH(5TE);
9ee6e8bb
PB
6761 } else if ((insn & 0x0f000010) == 0x0e000010) {
6762 /* Additional coprocessor register transfer. */
7997d92f 6763 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
6764 uint32_t mask;
6765 uint32_t val;
6766 /* cps (privileged) */
6767 if (IS_USER(s))
6768 return;
6769 mask = val = 0;
6770 if (insn & (1 << 19)) {
6771 if (insn & (1 << 8))
6772 mask |= CPSR_A;
6773 if (insn & (1 << 7))
6774 mask |= CPSR_I;
6775 if (insn & (1 << 6))
6776 mask |= CPSR_F;
6777 if (insn & (1 << 18))
6778 val |= mask;
6779 }
7997d92f 6780 if (insn & (1 << 17)) {
9ee6e8bb
PB
6781 mask |= CPSR_M;
6782 val |= (insn & 0x1f);
6783 }
6784 if (mask) {
2fbac54b 6785 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
6786 }
6787 return;
6788 }
6789 goto illegal_op;
6790 }
6791 if (cond != 0xe) {
6792 /* if not always execute, we generate a conditional jump to
6793 next instruction */
6794 s->condlabel = gen_new_label();
d9ba4830 6795 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
6796 s->condjmp = 1;
6797 }
6798 if ((insn & 0x0f900000) == 0x03000000) {
6799 if ((insn & (1 << 21)) == 0) {
6800 ARCH(6T2);
6801 rd = (insn >> 12) & 0xf;
6802 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6803 if ((insn & (1 << 22)) == 0) {
6804 /* MOVW */
7d1b0095 6805 tmp = tcg_temp_new_i32();
5e3f878a 6806 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
6807 } else {
6808 /* MOVT */
5e3f878a 6809 tmp = load_reg(s, rd);
86831435 6810 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 6811 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 6812 }
5e3f878a 6813 store_reg(s, rd, tmp);
9ee6e8bb
PB
6814 } else {
6815 if (((insn >> 12) & 0xf) != 0xf)
6816 goto illegal_op;
6817 if (((insn >> 16) & 0xf) == 0) {
6818 gen_nop_hint(s, insn & 0xff);
6819 } else {
6820 /* CPSR = immediate */
6821 val = insn & 0xff;
6822 shift = ((insn >> 8) & 0xf) * 2;
6823 if (shift)
6824 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 6825 i = ((insn & (1 << 22)) != 0);
2fbac54b 6826 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
6827 goto illegal_op;
6828 }
6829 }
6830 } else if ((insn & 0x0f900000) == 0x01000000
6831 && (insn & 0x00000090) != 0x00000090) {
6832 /* miscellaneous instructions */
6833 op1 = (insn >> 21) & 3;
6834 sh = (insn >> 4) & 0xf;
6835 rm = insn & 0xf;
6836 switch (sh) {
6837 case 0x0: /* move program status register */
6838 if (op1 & 1) {
6839 /* PSR = reg */
2fbac54b 6840 tmp = load_reg(s, rm);
9ee6e8bb 6841 i = ((op1 & 2) != 0);
2fbac54b 6842 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
6843 goto illegal_op;
6844 } else {
6845 /* reg = PSR */
6846 rd = (insn >> 12) & 0xf;
6847 if (op1 & 2) {
6848 if (IS_USER(s))
6849 goto illegal_op;
d9ba4830 6850 tmp = load_cpu_field(spsr);
9ee6e8bb 6851 } else {
7d1b0095 6852 tmp = tcg_temp_new_i32();
9ef39277 6853 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 6854 }
d9ba4830 6855 store_reg(s, rd, tmp);
9ee6e8bb
PB
6856 }
6857 break;
6858 case 0x1:
6859 if (op1 == 1) {
6860 /* branch/exchange thumb (bx). */
be5e7a76 6861 ARCH(4T);
d9ba4830
PB
6862 tmp = load_reg(s, rm);
6863 gen_bx(s, tmp);
9ee6e8bb
PB
6864 } else if (op1 == 3) {
6865 /* clz */
be5e7a76 6866 ARCH(5);
9ee6e8bb 6867 rd = (insn >> 12) & 0xf;
1497c961
PB
6868 tmp = load_reg(s, rm);
6869 gen_helper_clz(tmp, tmp);
6870 store_reg(s, rd, tmp);
9ee6e8bb
PB
6871 } else {
6872 goto illegal_op;
6873 }
6874 break;
6875 case 0x2:
6876 if (op1 == 1) {
6877 ARCH(5J); /* bxj */
6878 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6879 tmp = load_reg(s, rm);
6880 gen_bx(s, tmp);
9ee6e8bb
PB
6881 } else {
6882 goto illegal_op;
6883 }
6884 break;
6885 case 0x3:
6886 if (op1 != 1)
6887 goto illegal_op;
6888
be5e7a76 6889 ARCH(5);
9ee6e8bb 6890 /* branch link/exchange thumb (blx) */
d9ba4830 6891 tmp = load_reg(s, rm);
7d1b0095 6892 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
6893 tcg_gen_movi_i32(tmp2, s->pc);
6894 store_reg(s, 14, tmp2);
6895 gen_bx(s, tmp);
9ee6e8bb
PB
6896 break;
6897 case 0x5: /* saturating add/subtract */
be5e7a76 6898 ARCH(5TE);
9ee6e8bb
PB
6899 rd = (insn >> 12) & 0xf;
6900 rn = (insn >> 16) & 0xf;
b40d0353 6901 tmp = load_reg(s, rm);
5e3f878a 6902 tmp2 = load_reg(s, rn);
9ee6e8bb 6903 if (op1 & 2)
9ef39277 6904 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 6905 if (op1 & 1)
9ef39277 6906 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6907 else
9ef39277 6908 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 6909 tcg_temp_free_i32(tmp2);
5e3f878a 6910 store_reg(s, rd, tmp);
9ee6e8bb 6911 break;
49e14940
AL
6912 case 7:
6913 /* SMC instruction (op1 == 3)
6914 and undefined instructions (op1 == 0 || op1 == 2)
6915 will trap */
6916 if (op1 != 1) {
6917 goto illegal_op;
6918 }
6919 /* bkpt */
be5e7a76 6920 ARCH(5);
bc4a0de0 6921 gen_exception_insn(s, 4, EXCP_BKPT);
9ee6e8bb
PB
6922 break;
6923 case 0x8: /* signed multiply */
6924 case 0xa:
6925 case 0xc:
6926 case 0xe:
be5e7a76 6927 ARCH(5TE);
9ee6e8bb
PB
6928 rs = (insn >> 8) & 0xf;
6929 rn = (insn >> 12) & 0xf;
6930 rd = (insn >> 16) & 0xf;
6931 if (op1 == 1) {
6932 /* (32 * 16) >> 16 */
5e3f878a
PB
6933 tmp = load_reg(s, rm);
6934 tmp2 = load_reg(s, rs);
9ee6e8bb 6935 if (sh & 4)
5e3f878a 6936 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 6937 else
5e3f878a 6938 gen_sxth(tmp2);
a7812ae4
PB
6939 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6940 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 6941 tmp = tcg_temp_new_i32();
a7812ae4 6942 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 6943 tcg_temp_free_i64(tmp64);
9ee6e8bb 6944 if ((sh & 2) == 0) {
5e3f878a 6945 tmp2 = load_reg(s, rn);
9ef39277 6946 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 6947 tcg_temp_free_i32(tmp2);
9ee6e8bb 6948 }
5e3f878a 6949 store_reg(s, rd, tmp);
9ee6e8bb
PB
6950 } else {
6951 /* 16 * 16 */
5e3f878a
PB
6952 tmp = load_reg(s, rm);
6953 tmp2 = load_reg(s, rs);
6954 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 6955 tcg_temp_free_i32(tmp2);
9ee6e8bb 6956 if (op1 == 2) {
a7812ae4
PB
6957 tmp64 = tcg_temp_new_i64();
6958 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 6959 tcg_temp_free_i32(tmp);
a7812ae4
PB
6960 gen_addq(s, tmp64, rn, rd);
6961 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 6962 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
6963 } else {
6964 if (op1 == 0) {
5e3f878a 6965 tmp2 = load_reg(s, rn);
9ef39277 6966 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 6967 tcg_temp_free_i32(tmp2);
9ee6e8bb 6968 }
5e3f878a 6969 store_reg(s, rd, tmp);
9ee6e8bb
PB
6970 }
6971 }
6972 break;
6973 default:
6974 goto illegal_op;
6975 }
6976 } else if (((insn & 0x0e000000) == 0 &&
6977 (insn & 0x00000090) != 0x90) ||
6978 ((insn & 0x0e000000) == (1 << 25))) {
6979 int set_cc, logic_cc, shiftop;
6980
6981 op1 = (insn >> 21) & 0xf;
6982 set_cc = (insn >> 20) & 1;
6983 logic_cc = table_logic_cc[op1] & set_cc;
6984
6985 /* data processing instruction */
6986 if (insn & (1 << 25)) {
6987 /* immediate operand */
6988 val = insn & 0xff;
6989 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 6990 if (shift) {
9ee6e8bb 6991 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 6992 }
7d1b0095 6993 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
6994 tcg_gen_movi_i32(tmp2, val);
6995 if (logic_cc && shift) {
6996 gen_set_CF_bit31(tmp2);
6997 }
9ee6e8bb
PB
6998 } else {
6999 /* register */
7000 rm = (insn) & 0xf;
e9bb4aa9 7001 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7002 shiftop = (insn >> 5) & 3;
7003 if (!(insn & (1 << 4))) {
7004 shift = (insn >> 7) & 0x1f;
e9bb4aa9 7005 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
7006 } else {
7007 rs = (insn >> 8) & 0xf;
8984bd2e 7008 tmp = load_reg(s, rs);
e9bb4aa9 7009 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
7010 }
7011 }
7012 if (op1 != 0x0f && op1 != 0x0d) {
7013 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
7014 tmp = load_reg(s, rn);
7015 } else {
7016 TCGV_UNUSED(tmp);
9ee6e8bb
PB
7017 }
7018 rd = (insn >> 12) & 0xf;
7019 switch(op1) {
7020 case 0x00:
e9bb4aa9
JR
7021 tcg_gen_and_i32(tmp, tmp, tmp2);
7022 if (logic_cc) {
7023 gen_logic_CC(tmp);
7024 }
21aeb343 7025 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7026 break;
7027 case 0x01:
e9bb4aa9
JR
7028 tcg_gen_xor_i32(tmp, tmp, tmp2);
7029 if (logic_cc) {
7030 gen_logic_CC(tmp);
7031 }
21aeb343 7032 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7033 break;
7034 case 0x02:
7035 if (set_cc && rd == 15) {
7036 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 7037 if (IS_USER(s)) {
9ee6e8bb 7038 goto illegal_op;
e9bb4aa9 7039 }
72485ec4 7040 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 7041 gen_exception_return(s, tmp);
9ee6e8bb 7042 } else {
e9bb4aa9 7043 if (set_cc) {
72485ec4 7044 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7045 } else {
7046 tcg_gen_sub_i32(tmp, tmp, tmp2);
7047 }
21aeb343 7048 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7049 }
7050 break;
7051 case 0x03:
e9bb4aa9 7052 if (set_cc) {
72485ec4 7053 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
7054 } else {
7055 tcg_gen_sub_i32(tmp, tmp2, tmp);
7056 }
21aeb343 7057 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7058 break;
7059 case 0x04:
e9bb4aa9 7060 if (set_cc) {
72485ec4 7061 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7062 } else {
7063 tcg_gen_add_i32(tmp, tmp, tmp2);
7064 }
21aeb343 7065 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7066 break;
7067 case 0x05:
e9bb4aa9 7068 if (set_cc) {
9ef39277 7069 gen_helper_adc_cc(tmp, cpu_env, tmp, tmp2);
e9bb4aa9
JR
7070 } else {
7071 gen_add_carry(tmp, tmp, tmp2);
7072 }
21aeb343 7073 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7074 break;
7075 case 0x06:
e9bb4aa9 7076 if (set_cc) {
9ef39277 7077 gen_helper_sbc_cc(tmp, cpu_env, tmp, tmp2);
e9bb4aa9
JR
7078 } else {
7079 gen_sub_carry(tmp, tmp, tmp2);
7080 }
21aeb343 7081 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7082 break;
7083 case 0x07:
e9bb4aa9 7084 if (set_cc) {
9ef39277 7085 gen_helper_sbc_cc(tmp, cpu_env, tmp2, tmp);
e9bb4aa9
JR
7086 } else {
7087 gen_sub_carry(tmp, tmp2, tmp);
7088 }
21aeb343 7089 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7090 break;
7091 case 0x08:
7092 if (set_cc) {
e9bb4aa9
JR
7093 tcg_gen_and_i32(tmp, tmp, tmp2);
7094 gen_logic_CC(tmp);
9ee6e8bb 7095 }
7d1b0095 7096 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7097 break;
7098 case 0x09:
7099 if (set_cc) {
e9bb4aa9
JR
7100 tcg_gen_xor_i32(tmp, tmp, tmp2);
7101 gen_logic_CC(tmp);
9ee6e8bb 7102 }
7d1b0095 7103 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7104 break;
7105 case 0x0a:
7106 if (set_cc) {
72485ec4 7107 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 7108 }
7d1b0095 7109 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7110 break;
7111 case 0x0b:
7112 if (set_cc) {
72485ec4 7113 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 7114 }
7d1b0095 7115 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7116 break;
7117 case 0x0c:
e9bb4aa9
JR
7118 tcg_gen_or_i32(tmp, tmp, tmp2);
7119 if (logic_cc) {
7120 gen_logic_CC(tmp);
7121 }
21aeb343 7122 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7123 break;
7124 case 0x0d:
7125 if (logic_cc && rd == 15) {
7126 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 7127 if (IS_USER(s)) {
9ee6e8bb 7128 goto illegal_op;
e9bb4aa9
JR
7129 }
7130 gen_exception_return(s, tmp2);
9ee6e8bb 7131 } else {
e9bb4aa9
JR
7132 if (logic_cc) {
7133 gen_logic_CC(tmp2);
7134 }
21aeb343 7135 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7136 }
7137 break;
7138 case 0x0e:
f669df27 7139 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
7140 if (logic_cc) {
7141 gen_logic_CC(tmp);
7142 }
21aeb343 7143 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7144 break;
7145 default:
7146 case 0x0f:
e9bb4aa9
JR
7147 tcg_gen_not_i32(tmp2, tmp2);
7148 if (logic_cc) {
7149 gen_logic_CC(tmp2);
7150 }
21aeb343 7151 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7152 break;
7153 }
e9bb4aa9 7154 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 7155 tcg_temp_free_i32(tmp2);
e9bb4aa9 7156 }
9ee6e8bb
PB
7157 } else {
7158 /* other instructions */
7159 op1 = (insn >> 24) & 0xf;
7160 switch(op1) {
7161 case 0x0:
7162 case 0x1:
7163 /* multiplies, extra load/stores */
7164 sh = (insn >> 5) & 3;
7165 if (sh == 0) {
7166 if (op1 == 0x0) {
7167 rd = (insn >> 16) & 0xf;
7168 rn = (insn >> 12) & 0xf;
7169 rs = (insn >> 8) & 0xf;
7170 rm = (insn) & 0xf;
7171 op1 = (insn >> 20) & 0xf;
7172 switch (op1) {
7173 case 0: case 1: case 2: case 3: case 6:
7174 /* 32 bit mul */
5e3f878a
PB
7175 tmp = load_reg(s, rs);
7176 tmp2 = load_reg(s, rm);
7177 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 7178 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7179 if (insn & (1 << 22)) {
7180 /* Subtract (mls) */
7181 ARCH(6T2);
5e3f878a
PB
7182 tmp2 = load_reg(s, rn);
7183 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 7184 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7185 } else if (insn & (1 << 21)) {
7186 /* Add */
5e3f878a
PB
7187 tmp2 = load_reg(s, rn);
7188 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7189 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7190 }
7191 if (insn & (1 << 20))
5e3f878a
PB
7192 gen_logic_CC(tmp);
7193 store_reg(s, rd, tmp);
9ee6e8bb 7194 break;
8aac08b1
AJ
7195 case 4:
7196 /* 64 bit mul double accumulate (UMAAL) */
7197 ARCH(6);
7198 tmp = load_reg(s, rs);
7199 tmp2 = load_reg(s, rm);
7200 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7201 gen_addq_lo(s, tmp64, rn);
7202 gen_addq_lo(s, tmp64, rd);
7203 gen_storeq_reg(s, rn, rd, tmp64);
7204 tcg_temp_free_i64(tmp64);
7205 break;
7206 case 8: case 9: case 10: case 11:
7207 case 12: case 13: case 14: case 15:
7208 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
7209 tmp = load_reg(s, rs);
7210 tmp2 = load_reg(s, rm);
8aac08b1 7211 if (insn & (1 << 22)) {
a7812ae4 7212 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8aac08b1 7213 } else {
a7812ae4 7214 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8aac08b1
AJ
7215 }
7216 if (insn & (1 << 21)) { /* mult accumulate */
a7812ae4 7217 gen_addq(s, tmp64, rn, rd);
9ee6e8bb 7218 }
8aac08b1 7219 if (insn & (1 << 20)) {
a7812ae4 7220 gen_logicq_cc(tmp64);
8aac08b1 7221 }
a7812ae4 7222 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 7223 tcg_temp_free_i64(tmp64);
9ee6e8bb 7224 break;
8aac08b1
AJ
7225 default:
7226 goto illegal_op;
9ee6e8bb
PB
7227 }
7228 } else {
7229 rn = (insn >> 16) & 0xf;
7230 rd = (insn >> 12) & 0xf;
7231 if (insn & (1 << 23)) {
7232 /* load/store exclusive */
86753403
PB
7233 op1 = (insn >> 21) & 0x3;
7234 if (op1)
a47f43d2 7235 ARCH(6K);
86753403
PB
7236 else
7237 ARCH(6);
3174f8e9 7238 addr = tcg_temp_local_new_i32();
98a46317 7239 load_reg_var(s, addr, rn);
9ee6e8bb 7240 if (insn & (1 << 20)) {
86753403
PB
7241 switch (op1) {
7242 case 0: /* ldrex */
426f5abc 7243 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
7244 break;
7245 case 1: /* ldrexd */
426f5abc 7246 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
7247 break;
7248 case 2: /* ldrexb */
426f5abc 7249 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
7250 break;
7251 case 3: /* ldrexh */
426f5abc 7252 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
7253 break;
7254 default:
7255 abort();
7256 }
9ee6e8bb
PB
7257 } else {
7258 rm = insn & 0xf;
86753403
PB
7259 switch (op1) {
7260 case 0: /* strex */
426f5abc 7261 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
7262 break;
7263 case 1: /* strexd */
502e64fe 7264 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
7265 break;
7266 case 2: /* strexb */
426f5abc 7267 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
7268 break;
7269 case 3: /* strexh */
426f5abc 7270 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
7271 break;
7272 default:
7273 abort();
7274 }
9ee6e8bb 7275 }
3174f8e9 7276 tcg_temp_free(addr);
9ee6e8bb
PB
7277 } else {
7278 /* SWP instruction */
7279 rm = (insn) & 0xf;
7280
8984bd2e
PB
7281 /* ??? This is not really atomic. However we know
7282 we never have multiple CPUs running in parallel,
7283 so it is good enough. */
7284 addr = load_reg(s, rn);
7285 tmp = load_reg(s, rm);
9ee6e8bb 7286 if (insn & (1 << 22)) {
8984bd2e
PB
7287 tmp2 = gen_ld8u(addr, IS_USER(s));
7288 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 7289 } else {
8984bd2e
PB
7290 tmp2 = gen_ld32(addr, IS_USER(s));
7291 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7292 }
7d1b0095 7293 tcg_temp_free_i32(addr);
8984bd2e 7294 store_reg(s, rd, tmp2);
9ee6e8bb
PB
7295 }
7296 }
7297 } else {
7298 int address_offset;
7299 int load;
7300 /* Misc load/store */
7301 rn = (insn >> 16) & 0xf;
7302 rd = (insn >> 12) & 0xf;
b0109805 7303 addr = load_reg(s, rn);
9ee6e8bb 7304 if (insn & (1 << 24))
b0109805 7305 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
7306 address_offset = 0;
7307 if (insn & (1 << 20)) {
7308 /* load */
7309 switch(sh) {
7310 case 1:
b0109805 7311 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
7312 break;
7313 case 2:
b0109805 7314 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
7315 break;
7316 default:
7317 case 3:
b0109805 7318 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
7319 break;
7320 }
7321 load = 1;
7322 } else if (sh & 2) {
be5e7a76 7323 ARCH(5TE);
9ee6e8bb
PB
7324 /* doubleword */
7325 if (sh & 1) {
7326 /* store */
b0109805
PB
7327 tmp = load_reg(s, rd);
7328 gen_st32(tmp, addr, IS_USER(s));
7329 tcg_gen_addi_i32(addr, addr, 4);
7330 tmp = load_reg(s, rd + 1);
7331 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7332 load = 0;
7333 } else {
7334 /* load */
b0109805
PB
7335 tmp = gen_ld32(addr, IS_USER(s));
7336 store_reg(s, rd, tmp);
7337 tcg_gen_addi_i32(addr, addr, 4);
7338 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
7339 rd++;
7340 load = 1;
7341 }
7342 address_offset = -4;
7343 } else {
7344 /* store */
b0109805
PB
7345 tmp = load_reg(s, rd);
7346 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7347 load = 0;
7348 }
7349 /* Perform base writeback before the loaded value to
7350 ensure correct behavior with overlapping index registers.
7351 ldrd with base writeback is is undefined if the
7352 destination and index registers overlap. */
7353 if (!(insn & (1 << 24))) {
b0109805
PB
7354 gen_add_datah_offset(s, insn, address_offset, addr);
7355 store_reg(s, rn, addr);
9ee6e8bb
PB
7356 } else if (insn & (1 << 21)) {
7357 if (address_offset)
b0109805
PB
7358 tcg_gen_addi_i32(addr, addr, address_offset);
7359 store_reg(s, rn, addr);
7360 } else {
7d1b0095 7361 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7362 }
7363 if (load) {
7364 /* Complete the load. */
b0109805 7365 store_reg(s, rd, tmp);
9ee6e8bb
PB
7366 }
7367 }
7368 break;
7369 case 0x4:
7370 case 0x5:
7371 goto do_ldst;
7372 case 0x6:
7373 case 0x7:
7374 if (insn & (1 << 4)) {
7375 ARCH(6);
7376 /* Armv6 Media instructions. */
7377 rm = insn & 0xf;
7378 rn = (insn >> 16) & 0xf;
2c0262af 7379 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
7380 rs = (insn >> 8) & 0xf;
7381 switch ((insn >> 23) & 3) {
7382 case 0: /* Parallel add/subtract. */
7383 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
7384 tmp = load_reg(s, rn);
7385 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7386 sh = (insn >> 5) & 7;
7387 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7388 goto illegal_op;
6ddbc6e4 7389 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 7390 tcg_temp_free_i32(tmp2);
6ddbc6e4 7391 store_reg(s, rd, tmp);
9ee6e8bb
PB
7392 break;
7393 case 1:
7394 if ((insn & 0x00700020) == 0) {
6c95676b 7395 /* Halfword pack. */
3670669c
PB
7396 tmp = load_reg(s, rn);
7397 tmp2 = load_reg(s, rm);
9ee6e8bb 7398 shift = (insn >> 7) & 0x1f;
3670669c
PB
7399 if (insn & (1 << 6)) {
7400 /* pkhtb */
22478e79
AZ
7401 if (shift == 0)
7402 shift = 31;
7403 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 7404 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 7405 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
7406 } else {
7407 /* pkhbt */
22478e79
AZ
7408 if (shift)
7409 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 7410 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
7411 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7412 }
7413 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 7414 tcg_temp_free_i32(tmp2);
3670669c 7415 store_reg(s, rd, tmp);
9ee6e8bb
PB
7416 } else if ((insn & 0x00200020) == 0x00200000) {
7417 /* [us]sat */
6ddbc6e4 7418 tmp = load_reg(s, rm);
9ee6e8bb
PB
7419 shift = (insn >> 7) & 0x1f;
7420 if (insn & (1 << 6)) {
7421 if (shift == 0)
7422 shift = 31;
6ddbc6e4 7423 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7424 } else {
6ddbc6e4 7425 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
7426 }
7427 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7428 tmp2 = tcg_const_i32(sh);
7429 if (insn & (1 << 22))
9ef39277 7430 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 7431 else
9ef39277 7432 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 7433 tcg_temp_free_i32(tmp2);
6ddbc6e4 7434 store_reg(s, rd, tmp);
9ee6e8bb
PB
7435 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7436 /* [us]sat16 */
6ddbc6e4 7437 tmp = load_reg(s, rm);
9ee6e8bb 7438 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7439 tmp2 = tcg_const_i32(sh);
7440 if (insn & (1 << 22))
9ef39277 7441 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 7442 else
9ef39277 7443 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 7444 tcg_temp_free_i32(tmp2);
6ddbc6e4 7445 store_reg(s, rd, tmp);
9ee6e8bb
PB
7446 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7447 /* Select bytes. */
6ddbc6e4
PB
7448 tmp = load_reg(s, rn);
7449 tmp2 = load_reg(s, rm);
7d1b0095 7450 tmp3 = tcg_temp_new_i32();
0ecb72a5 7451 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 7452 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
7453 tcg_temp_free_i32(tmp3);
7454 tcg_temp_free_i32(tmp2);
6ddbc6e4 7455 store_reg(s, rd, tmp);
9ee6e8bb 7456 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 7457 tmp = load_reg(s, rm);
9ee6e8bb 7458 shift = (insn >> 10) & 3;
1301f322 7459 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
7460 rotate, a shift is sufficient. */
7461 if (shift != 0)
f669df27 7462 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7463 op1 = (insn >> 20) & 7;
7464 switch (op1) {
5e3f878a
PB
7465 case 0: gen_sxtb16(tmp); break;
7466 case 2: gen_sxtb(tmp); break;
7467 case 3: gen_sxth(tmp); break;
7468 case 4: gen_uxtb16(tmp); break;
7469 case 6: gen_uxtb(tmp); break;
7470 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
7471 default: goto illegal_op;
7472 }
7473 if (rn != 15) {
5e3f878a 7474 tmp2 = load_reg(s, rn);
9ee6e8bb 7475 if ((op1 & 3) == 0) {
5e3f878a 7476 gen_add16(tmp, tmp2);
9ee6e8bb 7477 } else {
5e3f878a 7478 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7479 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7480 }
7481 }
6c95676b 7482 store_reg(s, rd, tmp);
9ee6e8bb
PB
7483 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7484 /* rev */
b0109805 7485 tmp = load_reg(s, rm);
9ee6e8bb
PB
7486 if (insn & (1 << 22)) {
7487 if (insn & (1 << 7)) {
b0109805 7488 gen_revsh(tmp);
9ee6e8bb
PB
7489 } else {
7490 ARCH(6T2);
b0109805 7491 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7492 }
7493 } else {
7494 if (insn & (1 << 7))
b0109805 7495 gen_rev16(tmp);
9ee6e8bb 7496 else
66896cb8 7497 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 7498 }
b0109805 7499 store_reg(s, rd, tmp);
9ee6e8bb
PB
7500 } else {
7501 goto illegal_op;
7502 }
7503 break;
7504 case 2: /* Multiplies (Type 3). */
41e9564d
PM
7505 switch ((insn >> 20) & 0x7) {
7506 case 5:
7507 if (((insn >> 6) ^ (insn >> 7)) & 1) {
7508 /* op2 not 00x or 11x : UNDEF */
7509 goto illegal_op;
7510 }
838fa72d
AJ
7511 /* Signed multiply most significant [accumulate].
7512 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
7513 tmp = load_reg(s, rm);
7514 tmp2 = load_reg(s, rs);
a7812ae4 7515 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 7516
955a7dd5 7517 if (rd != 15) {
838fa72d 7518 tmp = load_reg(s, rd);
9ee6e8bb 7519 if (insn & (1 << 6)) {
838fa72d 7520 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 7521 } else {
838fa72d 7522 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
7523 }
7524 }
838fa72d
AJ
7525 if (insn & (1 << 5)) {
7526 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7527 }
7528 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 7529 tmp = tcg_temp_new_i32();
838fa72d
AJ
7530 tcg_gen_trunc_i64_i32(tmp, tmp64);
7531 tcg_temp_free_i64(tmp64);
955a7dd5 7532 store_reg(s, rn, tmp);
41e9564d
PM
7533 break;
7534 case 0:
7535 case 4:
7536 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
7537 if (insn & (1 << 7)) {
7538 goto illegal_op;
7539 }
7540 tmp = load_reg(s, rm);
7541 tmp2 = load_reg(s, rs);
9ee6e8bb 7542 if (insn & (1 << 5))
5e3f878a
PB
7543 gen_swap_half(tmp2);
7544 gen_smul_dual(tmp, tmp2);
5e3f878a 7545 if (insn & (1 << 6)) {
e1d177b9 7546 /* This subtraction cannot overflow. */
5e3f878a
PB
7547 tcg_gen_sub_i32(tmp, tmp, tmp2);
7548 } else {
e1d177b9
PM
7549 /* This addition cannot overflow 32 bits;
7550 * however it may overflow considered as a signed
7551 * operation, in which case we must set the Q flag.
7552 */
9ef39277 7553 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
5e3f878a 7554 }
7d1b0095 7555 tcg_temp_free_i32(tmp2);
9ee6e8bb 7556 if (insn & (1 << 22)) {
5e3f878a 7557 /* smlald, smlsld */
a7812ae4
PB
7558 tmp64 = tcg_temp_new_i64();
7559 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7560 tcg_temp_free_i32(tmp);
a7812ae4
PB
7561 gen_addq(s, tmp64, rd, rn);
7562 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 7563 tcg_temp_free_i64(tmp64);
9ee6e8bb 7564 } else {
5e3f878a 7565 /* smuad, smusd, smlad, smlsd */
22478e79 7566 if (rd != 15)
9ee6e8bb 7567 {
22478e79 7568 tmp2 = load_reg(s, rd);
9ef39277 7569 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7570 tcg_temp_free_i32(tmp2);
9ee6e8bb 7571 }
22478e79 7572 store_reg(s, rn, tmp);
9ee6e8bb 7573 }
41e9564d 7574 break;
b8b8ea05
PM
7575 case 1:
7576 case 3:
7577 /* SDIV, UDIV */
7578 if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) {
7579 goto illegal_op;
7580 }
7581 if (((insn >> 5) & 7) || (rd != 15)) {
7582 goto illegal_op;
7583 }
7584 tmp = load_reg(s, rm);
7585 tmp2 = load_reg(s, rs);
7586 if (insn & (1 << 21)) {
7587 gen_helper_udiv(tmp, tmp, tmp2);
7588 } else {
7589 gen_helper_sdiv(tmp, tmp, tmp2);
7590 }
7591 tcg_temp_free_i32(tmp2);
7592 store_reg(s, rn, tmp);
7593 break;
41e9564d
PM
7594 default:
7595 goto illegal_op;
9ee6e8bb
PB
7596 }
7597 break;
7598 case 3:
7599 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7600 switch (op1) {
7601 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
7602 ARCH(6);
7603 tmp = load_reg(s, rm);
7604 tmp2 = load_reg(s, rs);
7605 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 7606 tcg_temp_free_i32(tmp2);
ded9d295
AZ
7607 if (rd != 15) {
7608 tmp2 = load_reg(s, rd);
6ddbc6e4 7609 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7610 tcg_temp_free_i32(tmp2);
9ee6e8bb 7611 }
ded9d295 7612 store_reg(s, rn, tmp);
9ee6e8bb
PB
7613 break;
7614 case 0x20: case 0x24: case 0x28: case 0x2c:
7615 /* Bitfield insert/clear. */
7616 ARCH(6T2);
7617 shift = (insn >> 7) & 0x1f;
7618 i = (insn >> 16) & 0x1f;
7619 i = i + 1 - shift;
7620 if (rm == 15) {
7d1b0095 7621 tmp = tcg_temp_new_i32();
5e3f878a 7622 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7623 } else {
5e3f878a 7624 tmp = load_reg(s, rm);
9ee6e8bb
PB
7625 }
7626 if (i != 32) {
5e3f878a 7627 tmp2 = load_reg(s, rd);
8f8e3aa4 7628 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7d1b0095 7629 tcg_temp_free_i32(tmp2);
9ee6e8bb 7630 }
5e3f878a 7631 store_reg(s, rd, tmp);
9ee6e8bb
PB
7632 break;
7633 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7634 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 7635 ARCH(6T2);
5e3f878a 7636 tmp = load_reg(s, rm);
9ee6e8bb
PB
7637 shift = (insn >> 7) & 0x1f;
7638 i = ((insn >> 16) & 0x1f) + 1;
7639 if (shift + i > 32)
7640 goto illegal_op;
7641 if (i < 32) {
7642 if (op1 & 0x20) {
5e3f878a 7643 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 7644 } else {
5e3f878a 7645 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
7646 }
7647 }
5e3f878a 7648 store_reg(s, rd, tmp);
9ee6e8bb
PB
7649 break;
7650 default:
7651 goto illegal_op;
7652 }
7653 break;
7654 }
7655 break;
7656 }
7657 do_ldst:
7658 /* Check for undefined extension instructions
7659 * per the ARM Bible IE:
7660 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7661 */
7662 sh = (0xf << 20) | (0xf << 4);
7663 if (op1 == 0x7 && ((insn & sh) == sh))
7664 {
7665 goto illegal_op;
7666 }
7667 /* load/store byte/word */
7668 rn = (insn >> 16) & 0xf;
7669 rd = (insn >> 12) & 0xf;
b0109805 7670 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
7671 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7672 if (insn & (1 << 24))
b0109805 7673 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
7674 if (insn & (1 << 20)) {
7675 /* load */
9ee6e8bb 7676 if (insn & (1 << 22)) {
b0109805 7677 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 7678 } else {
b0109805 7679 tmp = gen_ld32(tmp2, i);
9ee6e8bb 7680 }
9ee6e8bb
PB
7681 } else {
7682 /* store */
b0109805 7683 tmp = load_reg(s, rd);
9ee6e8bb 7684 if (insn & (1 << 22))
b0109805 7685 gen_st8(tmp, tmp2, i);
9ee6e8bb 7686 else
b0109805 7687 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
7688 }
7689 if (!(insn & (1 << 24))) {
b0109805
PB
7690 gen_add_data_offset(s, insn, tmp2);
7691 store_reg(s, rn, tmp2);
7692 } else if (insn & (1 << 21)) {
7693 store_reg(s, rn, tmp2);
7694 } else {
7d1b0095 7695 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7696 }
7697 if (insn & (1 << 20)) {
7698 /* Complete the load. */
be5e7a76 7699 store_reg_from_load(env, s, rd, tmp);
9ee6e8bb
PB
7700 }
7701 break;
7702 case 0x08:
7703 case 0x09:
7704 {
7705 int j, n, user, loaded_base;
b0109805 7706 TCGv loaded_var;
9ee6e8bb
PB
7707 /* load/store multiple words */
7708 /* XXX: store correct base if write back */
7709 user = 0;
7710 if (insn & (1 << 22)) {
7711 if (IS_USER(s))
7712 goto illegal_op; /* only usable in supervisor mode */
7713
7714 if ((insn & (1 << 15)) == 0)
7715 user = 1;
7716 }
7717 rn = (insn >> 16) & 0xf;
b0109805 7718 addr = load_reg(s, rn);
9ee6e8bb
PB
7719
7720 /* compute total size */
7721 loaded_base = 0;
a50f5b91 7722 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
7723 n = 0;
7724 for(i=0;i<16;i++) {
7725 if (insn & (1 << i))
7726 n++;
7727 }
7728 /* XXX: test invalid n == 0 case ? */
7729 if (insn & (1 << 23)) {
7730 if (insn & (1 << 24)) {
7731 /* pre increment */
b0109805 7732 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7733 } else {
7734 /* post increment */
7735 }
7736 } else {
7737 if (insn & (1 << 24)) {
7738 /* pre decrement */
b0109805 7739 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7740 } else {
7741 /* post decrement */
7742 if (n != 1)
b0109805 7743 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7744 }
7745 }
7746 j = 0;
7747 for(i=0;i<16;i++) {
7748 if (insn & (1 << i)) {
7749 if (insn & (1 << 20)) {
7750 /* load */
b0109805 7751 tmp = gen_ld32(addr, IS_USER(s));
be5e7a76 7752 if (user) {
b75263d6 7753 tmp2 = tcg_const_i32(i);
1ce94f81 7754 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 7755 tcg_temp_free_i32(tmp2);
7d1b0095 7756 tcg_temp_free_i32(tmp);
9ee6e8bb 7757 } else if (i == rn) {
b0109805 7758 loaded_var = tmp;
9ee6e8bb
PB
7759 loaded_base = 1;
7760 } else {
be5e7a76 7761 store_reg_from_load(env, s, i, tmp);
9ee6e8bb
PB
7762 }
7763 } else {
7764 /* store */
7765 if (i == 15) {
7766 /* special case: r15 = PC + 8 */
7767 val = (long)s->pc + 4;
7d1b0095 7768 tmp = tcg_temp_new_i32();
b0109805 7769 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 7770 } else if (user) {
7d1b0095 7771 tmp = tcg_temp_new_i32();
b75263d6 7772 tmp2 = tcg_const_i32(i);
9ef39277 7773 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 7774 tcg_temp_free_i32(tmp2);
9ee6e8bb 7775 } else {
b0109805 7776 tmp = load_reg(s, i);
9ee6e8bb 7777 }
b0109805 7778 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7779 }
7780 j++;
7781 /* no need to add after the last transfer */
7782 if (j != n)
b0109805 7783 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7784 }
7785 }
7786 if (insn & (1 << 21)) {
7787 /* write back */
7788 if (insn & (1 << 23)) {
7789 if (insn & (1 << 24)) {
7790 /* pre increment */
7791 } else {
7792 /* post increment */
b0109805 7793 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7794 }
7795 } else {
7796 if (insn & (1 << 24)) {
7797 /* pre decrement */
7798 if (n != 1)
b0109805 7799 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7800 } else {
7801 /* post decrement */
b0109805 7802 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7803 }
7804 }
b0109805
PB
7805 store_reg(s, rn, addr);
7806 } else {
7d1b0095 7807 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7808 }
7809 if (loaded_base) {
b0109805 7810 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
7811 }
7812 if ((insn & (1 << 22)) && !user) {
7813 /* Restore CPSR from SPSR. */
d9ba4830
PB
7814 tmp = load_cpu_field(spsr);
7815 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 7816 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7817 s->is_jmp = DISAS_UPDATE;
7818 }
7819 }
7820 break;
7821 case 0xa:
7822 case 0xb:
7823 {
7824 int32_t offset;
7825
7826 /* branch (and link) */
7827 val = (int32_t)s->pc;
7828 if (insn & (1 << 24)) {
7d1b0095 7829 tmp = tcg_temp_new_i32();
5e3f878a
PB
7830 tcg_gen_movi_i32(tmp, val);
7831 store_reg(s, 14, tmp);
9ee6e8bb
PB
7832 }
7833 offset = (((int32_t)insn << 8) >> 8);
7834 val += (offset << 2) + 4;
7835 gen_jmp(s, val);
7836 }
7837 break;
7838 case 0xc:
7839 case 0xd:
7840 case 0xe:
7841 /* Coprocessor. */
7842 if (disas_coproc_insn(env, s, insn))
7843 goto illegal_op;
7844 break;
7845 case 0xf:
7846 /* swi */
5e3f878a 7847 gen_set_pc_im(s->pc);
9ee6e8bb
PB
7848 s->is_jmp = DISAS_SWI;
7849 break;
7850 default:
7851 illegal_op:
bc4a0de0 7852 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
7853 break;
7854 }
7855 }
7856}
7857
7858/* Return true if this is a Thumb-2 logical op. */
7859static int
7860thumb2_logic_op(int op)
7861{
7862 return (op < 8);
7863}
7864
7865/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7866 then set condition code flags based on the result of the operation.
7867 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7868 to the high bit of T1.
7869 Returns zero if the opcode is valid. */
7870
7871static int
396e467c 7872gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
9ee6e8bb
PB
7873{
7874 int logic_cc;
7875
7876 logic_cc = 0;
7877 switch (op) {
7878 case 0: /* and */
396e467c 7879 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
7880 logic_cc = conds;
7881 break;
7882 case 1: /* bic */
f669df27 7883 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
7884 logic_cc = conds;
7885 break;
7886 case 2: /* orr */
396e467c 7887 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7888 logic_cc = conds;
7889 break;
7890 case 3: /* orn */
29501f1b 7891 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
7892 logic_cc = conds;
7893 break;
7894 case 4: /* eor */
396e467c 7895 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
7896 logic_cc = conds;
7897 break;
7898 case 8: /* add */
7899 if (conds)
72485ec4 7900 gen_add_CC(t0, t0, t1);
9ee6e8bb 7901 else
396e467c 7902 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
7903 break;
7904 case 10: /* adc */
7905 if (conds)
9ef39277 7906 gen_helper_adc_cc(t0, cpu_env, t0, t1);
9ee6e8bb 7907 else
396e467c 7908 gen_adc(t0, t1);
9ee6e8bb
PB
7909 break;
7910 case 11: /* sbc */
7911 if (conds)
9ef39277 7912 gen_helper_sbc_cc(t0, cpu_env, t0, t1);
9ee6e8bb 7913 else
396e467c 7914 gen_sub_carry(t0, t0, t1);
9ee6e8bb
PB
7915 break;
7916 case 13: /* sub */
7917 if (conds)
72485ec4 7918 gen_sub_CC(t0, t0, t1);
9ee6e8bb 7919 else
396e467c 7920 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
7921 break;
7922 case 14: /* rsb */
7923 if (conds)
72485ec4 7924 gen_sub_CC(t0, t1, t0);
9ee6e8bb 7925 else
396e467c 7926 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
7927 break;
7928 default: /* 5, 6, 7, 9, 12, 15. */
7929 return 1;
7930 }
7931 if (logic_cc) {
396e467c 7932 gen_logic_CC(t0);
9ee6e8bb 7933 if (shifter_out)
396e467c 7934 gen_set_CF_bit31(t1);
9ee6e8bb
PB
7935 }
7936 return 0;
7937}
7938
7939/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7940 is not legal. */
0ecb72a5 7941static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9ee6e8bb 7942{
b0109805 7943 uint32_t insn, imm, shift, offset;
9ee6e8bb 7944 uint32_t rd, rn, rm, rs;
b26eefb6 7945 TCGv tmp;
6ddbc6e4
PB
7946 TCGv tmp2;
7947 TCGv tmp3;
b0109805 7948 TCGv addr;
a7812ae4 7949 TCGv_i64 tmp64;
9ee6e8bb
PB
7950 int op;
7951 int shiftop;
7952 int conds;
7953 int logic_cc;
7954
7955 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7956 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 7957 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
7958 16-bit instructions to get correct prefetch abort behavior. */
7959 insn = insn_hw1;
7960 if ((insn & (1 << 12)) == 0) {
be5e7a76 7961 ARCH(5);
9ee6e8bb
PB
7962 /* Second half of blx. */
7963 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
7964 tmp = load_reg(s, 14);
7965 tcg_gen_addi_i32(tmp, tmp, offset);
7966 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 7967
7d1b0095 7968 tmp2 = tcg_temp_new_i32();
b0109805 7969 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7970 store_reg(s, 14, tmp2);
7971 gen_bx(s, tmp);
9ee6e8bb
PB
7972 return 0;
7973 }
7974 if (insn & (1 << 11)) {
7975 /* Second half of bl. */
7976 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 7977 tmp = load_reg(s, 14);
6a0d8a1d 7978 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 7979
7d1b0095 7980 tmp2 = tcg_temp_new_i32();
b0109805 7981 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7982 store_reg(s, 14, tmp2);
7983 gen_bx(s, tmp);
9ee6e8bb
PB
7984 return 0;
7985 }
7986 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7987 /* Instruction spans a page boundary. Implement it as two
7988 16-bit instructions in case the second half causes an
7989 prefetch abort. */
7990 offset = ((int32_t)insn << 21) >> 9;
396e467c 7991 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
7992 return 0;
7993 }
7994 /* Fall through to 32-bit decode. */
7995 }
7996
d31dd73e 7997 insn = arm_lduw_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
7998 s->pc += 2;
7999 insn |= (uint32_t)insn_hw1 << 16;
8000
8001 if ((insn & 0xf800e800) != 0xf000e800) {
8002 ARCH(6T2);
8003 }
8004
8005 rn = (insn >> 16) & 0xf;
8006 rs = (insn >> 12) & 0xf;
8007 rd = (insn >> 8) & 0xf;
8008 rm = insn & 0xf;
8009 switch ((insn >> 25) & 0xf) {
8010 case 0: case 1: case 2: case 3:
8011 /* 16-bit instructions. Should never happen. */
8012 abort();
8013 case 4:
8014 if (insn & (1 << 22)) {
8015 /* Other load/store, table branch. */
8016 if (insn & 0x01200000) {
8017 /* Load/store doubleword. */
8018 if (rn == 15) {
7d1b0095 8019 addr = tcg_temp_new_i32();
b0109805 8020 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 8021 } else {
b0109805 8022 addr = load_reg(s, rn);
9ee6e8bb
PB
8023 }
8024 offset = (insn & 0xff) * 4;
8025 if ((insn & (1 << 23)) == 0)
8026 offset = -offset;
8027 if (insn & (1 << 24)) {
b0109805 8028 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
8029 offset = 0;
8030 }
8031 if (insn & (1 << 20)) {
8032 /* ldrd */
b0109805
PB
8033 tmp = gen_ld32(addr, IS_USER(s));
8034 store_reg(s, rs, tmp);
8035 tcg_gen_addi_i32(addr, addr, 4);
8036 tmp = gen_ld32(addr, IS_USER(s));
8037 store_reg(s, rd, tmp);
9ee6e8bb
PB
8038 } else {
8039 /* strd */
b0109805
PB
8040 tmp = load_reg(s, rs);
8041 gen_st32(tmp, addr, IS_USER(s));
8042 tcg_gen_addi_i32(addr, addr, 4);
8043 tmp = load_reg(s, rd);
8044 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
8045 }
8046 if (insn & (1 << 21)) {
8047 /* Base writeback. */
8048 if (rn == 15)
8049 goto illegal_op;
b0109805
PB
8050 tcg_gen_addi_i32(addr, addr, offset - 4);
8051 store_reg(s, rn, addr);
8052 } else {
7d1b0095 8053 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8054 }
8055 } else if ((insn & (1 << 23)) == 0) {
8056 /* Load/store exclusive word. */
3174f8e9 8057 addr = tcg_temp_local_new();
98a46317 8058 load_reg_var(s, addr, rn);
426f5abc 8059 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 8060 if (insn & (1 << 20)) {
426f5abc 8061 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 8062 } else {
426f5abc 8063 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 8064 }
3174f8e9 8065 tcg_temp_free(addr);
9ee6e8bb
PB
8066 } else if ((insn & (1 << 6)) == 0) {
8067 /* Table Branch. */
8068 if (rn == 15) {
7d1b0095 8069 addr = tcg_temp_new_i32();
b0109805 8070 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 8071 } else {
b0109805 8072 addr = load_reg(s, rn);
9ee6e8bb 8073 }
b26eefb6 8074 tmp = load_reg(s, rm);
b0109805 8075 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
8076 if (insn & (1 << 4)) {
8077 /* tbh */
b0109805 8078 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8079 tcg_temp_free_i32(tmp);
b0109805 8080 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 8081 } else { /* tbb */
7d1b0095 8082 tcg_temp_free_i32(tmp);
b0109805 8083 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 8084 }
7d1b0095 8085 tcg_temp_free_i32(addr);
b0109805
PB
8086 tcg_gen_shli_i32(tmp, tmp, 1);
8087 tcg_gen_addi_i32(tmp, tmp, s->pc);
8088 store_reg(s, 15, tmp);
9ee6e8bb
PB
8089 } else {
8090 /* Load/store exclusive byte/halfword/doubleword. */
426f5abc 8091 ARCH(7);
9ee6e8bb 8092 op = (insn >> 4) & 0x3;
426f5abc
PB
8093 if (op == 2) {
8094 goto illegal_op;
8095 }
3174f8e9 8096 addr = tcg_temp_local_new();
98a46317 8097 load_reg_var(s, addr, rn);
9ee6e8bb 8098 if (insn & (1 << 20)) {
426f5abc 8099 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 8100 } else {
426f5abc 8101 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 8102 }
3174f8e9 8103 tcg_temp_free(addr);
9ee6e8bb
PB
8104 }
8105 } else {
8106 /* Load/store multiple, RFE, SRS. */
8107 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
8108 /* Not available in user mode. */
b0109805 8109 if (IS_USER(s))
9ee6e8bb
PB
8110 goto illegal_op;
8111 if (insn & (1 << 20)) {
8112 /* rfe */
b0109805
PB
8113 addr = load_reg(s, rn);
8114 if ((insn & (1 << 24)) == 0)
8115 tcg_gen_addi_i32(addr, addr, -8);
8116 /* Load PC into tmp and CPSR into tmp2. */
8117 tmp = gen_ld32(addr, 0);
8118 tcg_gen_addi_i32(addr, addr, 4);
8119 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
8120 if (insn & (1 << 21)) {
8121 /* Base writeback. */
b0109805
PB
8122 if (insn & (1 << 24)) {
8123 tcg_gen_addi_i32(addr, addr, 4);
8124 } else {
8125 tcg_gen_addi_i32(addr, addr, -4);
8126 }
8127 store_reg(s, rn, addr);
8128 } else {
7d1b0095 8129 tcg_temp_free_i32(addr);
9ee6e8bb 8130 }
b0109805 8131 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
8132 } else {
8133 /* srs */
8134 op = (insn & 0x1f);
7d1b0095 8135 addr = tcg_temp_new_i32();
39ea3d4e
PM
8136 tmp = tcg_const_i32(op);
8137 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8138 tcg_temp_free_i32(tmp);
9ee6e8bb 8139 if ((insn & (1 << 24)) == 0) {
b0109805 8140 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 8141 }
b0109805
PB
8142 tmp = load_reg(s, 14);
8143 gen_st32(tmp, addr, 0);
8144 tcg_gen_addi_i32(addr, addr, 4);
7d1b0095 8145 tmp = tcg_temp_new_i32();
9ef39277 8146 gen_helper_cpsr_read(tmp, cpu_env);
b0109805 8147 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
8148 if (insn & (1 << 21)) {
8149 if ((insn & (1 << 24)) == 0) {
b0109805 8150 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 8151 } else {
b0109805 8152 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8153 }
39ea3d4e
PM
8154 tmp = tcg_const_i32(op);
8155 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8156 tcg_temp_free_i32(tmp);
b0109805 8157 } else {
7d1b0095 8158 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8159 }
8160 }
8161 } else {
5856d44e
YO
8162 int i, loaded_base = 0;
8163 TCGv loaded_var;
9ee6e8bb 8164 /* Load/store multiple. */
b0109805 8165 addr = load_reg(s, rn);
9ee6e8bb
PB
8166 offset = 0;
8167 for (i = 0; i < 16; i++) {
8168 if (insn & (1 << i))
8169 offset += 4;
8170 }
8171 if (insn & (1 << 24)) {
b0109805 8172 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8173 }
8174
5856d44e 8175 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
8176 for (i = 0; i < 16; i++) {
8177 if ((insn & (1 << i)) == 0)
8178 continue;
8179 if (insn & (1 << 20)) {
8180 /* Load. */
b0109805 8181 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 8182 if (i == 15) {
b0109805 8183 gen_bx(s, tmp);
5856d44e
YO
8184 } else if (i == rn) {
8185 loaded_var = tmp;
8186 loaded_base = 1;
9ee6e8bb 8187 } else {
b0109805 8188 store_reg(s, i, tmp);
9ee6e8bb
PB
8189 }
8190 } else {
8191 /* Store. */
b0109805
PB
8192 tmp = load_reg(s, i);
8193 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 8194 }
b0109805 8195 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8196 }
5856d44e
YO
8197 if (loaded_base) {
8198 store_reg(s, rn, loaded_var);
8199 }
9ee6e8bb
PB
8200 if (insn & (1 << 21)) {
8201 /* Base register writeback. */
8202 if (insn & (1 << 24)) {
b0109805 8203 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8204 }
8205 /* Fault if writeback register is in register list. */
8206 if (insn & (1 << rn))
8207 goto illegal_op;
b0109805
PB
8208 store_reg(s, rn, addr);
8209 } else {
7d1b0095 8210 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8211 }
8212 }
8213 }
8214 break;
2af9ab77
JB
8215 case 5:
8216
9ee6e8bb 8217 op = (insn >> 21) & 0xf;
2af9ab77
JB
8218 if (op == 6) {
8219 /* Halfword pack. */
8220 tmp = load_reg(s, rn);
8221 tmp2 = load_reg(s, rm);
8222 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8223 if (insn & (1 << 5)) {
8224 /* pkhtb */
8225 if (shift == 0)
8226 shift = 31;
8227 tcg_gen_sari_i32(tmp2, tmp2, shift);
8228 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8229 tcg_gen_ext16u_i32(tmp2, tmp2);
8230 } else {
8231 /* pkhbt */
8232 if (shift)
8233 tcg_gen_shli_i32(tmp2, tmp2, shift);
8234 tcg_gen_ext16u_i32(tmp, tmp);
8235 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8236 }
8237 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8238 tcg_temp_free_i32(tmp2);
3174f8e9
FN
8239 store_reg(s, rd, tmp);
8240 } else {
2af9ab77
JB
8241 /* Data processing register constant shift. */
8242 if (rn == 15) {
7d1b0095 8243 tmp = tcg_temp_new_i32();
2af9ab77
JB
8244 tcg_gen_movi_i32(tmp, 0);
8245 } else {
8246 tmp = load_reg(s, rn);
8247 }
8248 tmp2 = load_reg(s, rm);
8249
8250 shiftop = (insn >> 4) & 3;
8251 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8252 conds = (insn & (1 << 20)) != 0;
8253 logic_cc = (conds && thumb2_logic_op(op));
8254 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8255 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
8256 goto illegal_op;
7d1b0095 8257 tcg_temp_free_i32(tmp2);
2af9ab77
JB
8258 if (rd != 15) {
8259 store_reg(s, rd, tmp);
8260 } else {
7d1b0095 8261 tcg_temp_free_i32(tmp);
2af9ab77 8262 }
3174f8e9 8263 }
9ee6e8bb
PB
8264 break;
8265 case 13: /* Misc data processing. */
8266 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
8267 if (op < 4 && (insn & 0xf000) != 0xf000)
8268 goto illegal_op;
8269 switch (op) {
8270 case 0: /* Register controlled shift. */
8984bd2e
PB
8271 tmp = load_reg(s, rn);
8272 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8273 if ((insn & 0x70) != 0)
8274 goto illegal_op;
8275 op = (insn >> 21) & 3;
8984bd2e
PB
8276 logic_cc = (insn & (1 << 20)) != 0;
8277 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
8278 if (logic_cc)
8279 gen_logic_CC(tmp);
21aeb343 8280 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8281 break;
8282 case 1: /* Sign/zero extend. */
5e3f878a 8283 tmp = load_reg(s, rm);
9ee6e8bb 8284 shift = (insn >> 4) & 3;
1301f322 8285 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
8286 rotate, a shift is sufficient. */
8287 if (shift != 0)
f669df27 8288 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8289 op = (insn >> 20) & 7;
8290 switch (op) {
5e3f878a
PB
8291 case 0: gen_sxth(tmp); break;
8292 case 1: gen_uxth(tmp); break;
8293 case 2: gen_sxtb16(tmp); break;
8294 case 3: gen_uxtb16(tmp); break;
8295 case 4: gen_sxtb(tmp); break;
8296 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
8297 default: goto illegal_op;
8298 }
8299 if (rn != 15) {
5e3f878a 8300 tmp2 = load_reg(s, rn);
9ee6e8bb 8301 if ((op >> 1) == 1) {
5e3f878a 8302 gen_add16(tmp, tmp2);
9ee6e8bb 8303 } else {
5e3f878a 8304 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8305 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8306 }
8307 }
5e3f878a 8308 store_reg(s, rd, tmp);
9ee6e8bb
PB
8309 break;
8310 case 2: /* SIMD add/subtract. */
8311 op = (insn >> 20) & 7;
8312 shift = (insn >> 4) & 7;
8313 if ((op & 3) == 3 || (shift & 3) == 3)
8314 goto illegal_op;
6ddbc6e4
PB
8315 tmp = load_reg(s, rn);
8316 tmp2 = load_reg(s, rm);
8317 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 8318 tcg_temp_free_i32(tmp2);
6ddbc6e4 8319 store_reg(s, rd, tmp);
9ee6e8bb
PB
8320 break;
8321 case 3: /* Other data processing. */
8322 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
8323 if (op < 4) {
8324 /* Saturating add/subtract. */
d9ba4830
PB
8325 tmp = load_reg(s, rn);
8326 tmp2 = load_reg(s, rm);
9ee6e8bb 8327 if (op & 1)
9ef39277 8328 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 8329 if (op & 2)
9ef39277 8330 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 8331 else
9ef39277 8332 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 8333 tcg_temp_free_i32(tmp2);
9ee6e8bb 8334 } else {
d9ba4830 8335 tmp = load_reg(s, rn);
9ee6e8bb
PB
8336 switch (op) {
8337 case 0x0a: /* rbit */
d9ba4830 8338 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8339 break;
8340 case 0x08: /* rev */
66896cb8 8341 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
8342 break;
8343 case 0x09: /* rev16 */
d9ba4830 8344 gen_rev16(tmp);
9ee6e8bb
PB
8345 break;
8346 case 0x0b: /* revsh */
d9ba4830 8347 gen_revsh(tmp);
9ee6e8bb
PB
8348 break;
8349 case 0x10: /* sel */
d9ba4830 8350 tmp2 = load_reg(s, rm);
7d1b0095 8351 tmp3 = tcg_temp_new_i32();
0ecb72a5 8352 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 8353 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8354 tcg_temp_free_i32(tmp3);
8355 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8356 break;
8357 case 0x18: /* clz */
d9ba4830 8358 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
8359 break;
8360 default:
8361 goto illegal_op;
8362 }
8363 }
d9ba4830 8364 store_reg(s, rd, tmp);
9ee6e8bb
PB
8365 break;
8366 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8367 op = (insn >> 4) & 0xf;
d9ba4830
PB
8368 tmp = load_reg(s, rn);
8369 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8370 switch ((insn >> 20) & 7) {
8371 case 0: /* 32 x 32 -> 32 */
d9ba4830 8372 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8373 tcg_temp_free_i32(tmp2);
9ee6e8bb 8374 if (rs != 15) {
d9ba4830 8375 tmp2 = load_reg(s, rs);
9ee6e8bb 8376 if (op)
d9ba4830 8377 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 8378 else
d9ba4830 8379 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8380 tcg_temp_free_i32(tmp2);
9ee6e8bb 8381 }
9ee6e8bb
PB
8382 break;
8383 case 1: /* 16 x 16 -> 32 */
d9ba4830 8384 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8385 tcg_temp_free_i32(tmp2);
9ee6e8bb 8386 if (rs != 15) {
d9ba4830 8387 tmp2 = load_reg(s, rs);
9ef39277 8388 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8389 tcg_temp_free_i32(tmp2);
9ee6e8bb 8390 }
9ee6e8bb
PB
8391 break;
8392 case 2: /* Dual multiply add. */
8393 case 4: /* Dual multiply subtract. */
8394 if (op)
d9ba4830
PB
8395 gen_swap_half(tmp2);
8396 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8397 if (insn & (1 << 22)) {
e1d177b9 8398 /* This subtraction cannot overflow. */
d9ba4830 8399 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8400 } else {
e1d177b9
PM
8401 /* This addition cannot overflow 32 bits;
8402 * however it may overflow considered as a signed
8403 * operation, in which case we must set the Q flag.
8404 */
9ef39277 8405 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8406 }
7d1b0095 8407 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8408 if (rs != 15)
8409 {
d9ba4830 8410 tmp2 = load_reg(s, rs);
9ef39277 8411 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8412 tcg_temp_free_i32(tmp2);
9ee6e8bb 8413 }
9ee6e8bb
PB
8414 break;
8415 case 3: /* 32 * 16 -> 32msb */
8416 if (op)
d9ba4830 8417 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8418 else
d9ba4830 8419 gen_sxth(tmp2);
a7812ae4
PB
8420 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8421 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8422 tmp = tcg_temp_new_i32();
a7812ae4 8423 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 8424 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8425 if (rs != 15)
8426 {
d9ba4830 8427 tmp2 = load_reg(s, rs);
9ef39277 8428 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8429 tcg_temp_free_i32(tmp2);
9ee6e8bb 8430 }
9ee6e8bb 8431 break;
838fa72d
AJ
8432 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8433 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8434 if (rs != 15) {
838fa72d
AJ
8435 tmp = load_reg(s, rs);
8436 if (insn & (1 << 20)) {
8437 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 8438 } else {
838fa72d 8439 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 8440 }
2c0262af 8441 }
838fa72d
AJ
8442 if (insn & (1 << 4)) {
8443 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8444 }
8445 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 8446 tmp = tcg_temp_new_i32();
838fa72d
AJ
8447 tcg_gen_trunc_i64_i32(tmp, tmp64);
8448 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8449 break;
8450 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 8451 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 8452 tcg_temp_free_i32(tmp2);
9ee6e8bb 8453 if (rs != 15) {
d9ba4830
PB
8454 tmp2 = load_reg(s, rs);
8455 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8456 tcg_temp_free_i32(tmp2);
5fd46862 8457 }
9ee6e8bb 8458 break;
2c0262af 8459 }
d9ba4830 8460 store_reg(s, rd, tmp);
2c0262af 8461 break;
9ee6e8bb
PB
8462 case 6: case 7: /* 64-bit multiply, Divide. */
8463 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
8464 tmp = load_reg(s, rn);
8465 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8466 if ((op & 0x50) == 0x10) {
8467 /* sdiv, udiv */
47789990 8468 if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 8469 goto illegal_op;
47789990 8470 }
9ee6e8bb 8471 if (op & 0x20)
5e3f878a 8472 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 8473 else
5e3f878a 8474 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 8475 tcg_temp_free_i32(tmp2);
5e3f878a 8476 store_reg(s, rd, tmp);
9ee6e8bb
PB
8477 } else if ((op & 0xe) == 0xc) {
8478 /* Dual multiply accumulate long. */
8479 if (op & 1)
5e3f878a
PB
8480 gen_swap_half(tmp2);
8481 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8482 if (op & 0x10) {
5e3f878a 8483 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 8484 } else {
5e3f878a 8485 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 8486 }
7d1b0095 8487 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8488 /* BUGFIX */
8489 tmp64 = tcg_temp_new_i64();
8490 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8491 tcg_temp_free_i32(tmp);
a7812ae4
PB
8492 gen_addq(s, tmp64, rs, rd);
8493 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8494 tcg_temp_free_i64(tmp64);
2c0262af 8495 } else {
9ee6e8bb
PB
8496 if (op & 0x20) {
8497 /* Unsigned 64-bit multiply */
a7812ae4 8498 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 8499 } else {
9ee6e8bb
PB
8500 if (op & 8) {
8501 /* smlalxy */
5e3f878a 8502 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8503 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8504 tmp64 = tcg_temp_new_i64();
8505 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8506 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8507 } else {
8508 /* Signed 64-bit multiply */
a7812ae4 8509 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8510 }
b5ff1b31 8511 }
9ee6e8bb
PB
8512 if (op & 4) {
8513 /* umaal */
a7812ae4
PB
8514 gen_addq_lo(s, tmp64, rs);
8515 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
8516 } else if (op & 0x40) {
8517 /* 64-bit accumulate. */
a7812ae4 8518 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 8519 }
a7812ae4 8520 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8521 tcg_temp_free_i64(tmp64);
5fd46862 8522 }
2c0262af 8523 break;
9ee6e8bb
PB
8524 }
8525 break;
8526 case 6: case 7: case 14: case 15:
8527 /* Coprocessor. */
8528 if (((insn >> 24) & 3) == 3) {
8529 /* Translate into the equivalent ARM encoding. */
f06053e3 8530 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9ee6e8bb
PB
8531 if (disas_neon_data_insn(env, s, insn))
8532 goto illegal_op;
8533 } else {
8534 if (insn & (1 << 28))
8535 goto illegal_op;
8536 if (disas_coproc_insn (env, s, insn))
8537 goto illegal_op;
8538 }
8539 break;
8540 case 8: case 9: case 10: case 11:
8541 if (insn & (1 << 15)) {
8542 /* Branches, misc control. */
8543 if (insn & 0x5000) {
8544 /* Unconditional branch. */
8545 /* signextend(hw1[10:0]) -> offset[:12]. */
8546 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8547 /* hw1[10:0] -> offset[11:1]. */
8548 offset |= (insn & 0x7ff) << 1;
8549 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8550 offset[24:22] already have the same value because of the
8551 sign extension above. */
8552 offset ^= ((~insn) & (1 << 13)) << 10;
8553 offset ^= ((~insn) & (1 << 11)) << 11;
8554
9ee6e8bb
PB
8555 if (insn & (1 << 14)) {
8556 /* Branch and link. */
3174f8e9 8557 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 8558 }
3b46e624 8559
b0109805 8560 offset += s->pc;
9ee6e8bb
PB
8561 if (insn & (1 << 12)) {
8562 /* b/bl */
b0109805 8563 gen_jmp(s, offset);
9ee6e8bb
PB
8564 } else {
8565 /* blx */
b0109805 8566 offset &= ~(uint32_t)2;
be5e7a76 8567 /* thumb2 bx, no need to check */
b0109805 8568 gen_bx_im(s, offset);
2c0262af 8569 }
9ee6e8bb
PB
8570 } else if (((insn >> 23) & 7) == 7) {
8571 /* Misc control */
8572 if (insn & (1 << 13))
8573 goto illegal_op;
8574
8575 if (insn & (1 << 26)) {
8576 /* Secure monitor call (v6Z) */
8577 goto illegal_op; /* not implemented. */
2c0262af 8578 } else {
9ee6e8bb
PB
8579 op = (insn >> 20) & 7;
8580 switch (op) {
8581 case 0: /* msr cpsr. */
8582 if (IS_M(env)) {
8984bd2e
PB
8583 tmp = load_reg(s, rn);
8584 addr = tcg_const_i32(insn & 0xff);
8585 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8586 tcg_temp_free_i32(addr);
7d1b0095 8587 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8588 gen_lookup_tb(s);
8589 break;
8590 }
8591 /* fall through */
8592 case 1: /* msr spsr. */
8593 if (IS_M(env))
8594 goto illegal_op;
2fbac54b
FN
8595 tmp = load_reg(s, rn);
8596 if (gen_set_psr(s,
9ee6e8bb 8597 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 8598 op == 1, tmp))
9ee6e8bb
PB
8599 goto illegal_op;
8600 break;
8601 case 2: /* cps, nop-hint. */
8602 if (((insn >> 8) & 7) == 0) {
8603 gen_nop_hint(s, insn & 0xff);
8604 }
8605 /* Implemented as NOP in user mode. */
8606 if (IS_USER(s))
8607 break;
8608 offset = 0;
8609 imm = 0;
8610 if (insn & (1 << 10)) {
8611 if (insn & (1 << 7))
8612 offset |= CPSR_A;
8613 if (insn & (1 << 6))
8614 offset |= CPSR_I;
8615 if (insn & (1 << 5))
8616 offset |= CPSR_F;
8617 if (insn & (1 << 9))
8618 imm = CPSR_A | CPSR_I | CPSR_F;
8619 }
8620 if (insn & (1 << 8)) {
8621 offset |= 0x1f;
8622 imm |= (insn & 0x1f);
8623 }
8624 if (offset) {
2fbac54b 8625 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
8626 }
8627 break;
8628 case 3: /* Special control operations. */
426f5abc 8629 ARCH(7);
9ee6e8bb
PB
8630 op = (insn >> 4) & 0xf;
8631 switch (op) {
8632 case 2: /* clrex */
426f5abc 8633 gen_clrex(s);
9ee6e8bb
PB
8634 break;
8635 case 4: /* dsb */
8636 case 5: /* dmb */
8637 case 6: /* isb */
8638 /* These execute as NOPs. */
9ee6e8bb
PB
8639 break;
8640 default:
8641 goto illegal_op;
8642 }
8643 break;
8644 case 4: /* bxj */
8645 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8646 tmp = load_reg(s, rn);
8647 gen_bx(s, tmp);
9ee6e8bb
PB
8648 break;
8649 case 5: /* Exception return. */
b8b45b68
RV
8650 if (IS_USER(s)) {
8651 goto illegal_op;
8652 }
8653 if (rn != 14 || rd != 15) {
8654 goto illegal_op;
8655 }
8656 tmp = load_reg(s, rn);
8657 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8658 gen_exception_return(s, tmp);
8659 break;
9ee6e8bb 8660 case 6: /* mrs cpsr. */
7d1b0095 8661 tmp = tcg_temp_new_i32();
9ee6e8bb 8662 if (IS_M(env)) {
8984bd2e
PB
8663 addr = tcg_const_i32(insn & 0xff);
8664 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 8665 tcg_temp_free_i32(addr);
9ee6e8bb 8666 } else {
9ef39277 8667 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 8668 }
8984bd2e 8669 store_reg(s, rd, tmp);
9ee6e8bb
PB
8670 break;
8671 case 7: /* mrs spsr. */
8672 /* Not accessible in user mode. */
8673 if (IS_USER(s) || IS_M(env))
8674 goto illegal_op;
d9ba4830
PB
8675 tmp = load_cpu_field(spsr);
8676 store_reg(s, rd, tmp);
9ee6e8bb 8677 break;
2c0262af
FB
8678 }
8679 }
9ee6e8bb
PB
8680 } else {
8681 /* Conditional branch. */
8682 op = (insn >> 22) & 0xf;
8683 /* Generate a conditional jump to next instruction. */
8684 s->condlabel = gen_new_label();
d9ba4830 8685 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
8686 s->condjmp = 1;
8687
8688 /* offset[11:1] = insn[10:0] */
8689 offset = (insn & 0x7ff) << 1;
8690 /* offset[17:12] = insn[21:16]. */
8691 offset |= (insn & 0x003f0000) >> 4;
8692 /* offset[31:20] = insn[26]. */
8693 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8694 /* offset[18] = insn[13]. */
8695 offset |= (insn & (1 << 13)) << 5;
8696 /* offset[19] = insn[11]. */
8697 offset |= (insn & (1 << 11)) << 8;
8698
8699 /* jump to the offset */
b0109805 8700 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
8701 }
8702 } else {
8703 /* Data processing immediate. */
8704 if (insn & (1 << 25)) {
8705 if (insn & (1 << 24)) {
8706 if (insn & (1 << 20))
8707 goto illegal_op;
8708 /* Bitfield/Saturate. */
8709 op = (insn >> 21) & 7;
8710 imm = insn & 0x1f;
8711 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 8712 if (rn == 15) {
7d1b0095 8713 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
8714 tcg_gen_movi_i32(tmp, 0);
8715 } else {
8716 tmp = load_reg(s, rn);
8717 }
9ee6e8bb
PB
8718 switch (op) {
8719 case 2: /* Signed bitfield extract. */
8720 imm++;
8721 if (shift + imm > 32)
8722 goto illegal_op;
8723 if (imm < 32)
6ddbc6e4 8724 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
8725 break;
8726 case 6: /* Unsigned bitfield extract. */
8727 imm++;
8728 if (shift + imm > 32)
8729 goto illegal_op;
8730 if (imm < 32)
6ddbc6e4 8731 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
8732 break;
8733 case 3: /* Bitfield insert/clear. */
8734 if (imm < shift)
8735 goto illegal_op;
8736 imm = imm + 1 - shift;
8737 if (imm != 32) {
6ddbc6e4 8738 tmp2 = load_reg(s, rd);
8f8e3aa4 8739 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
7d1b0095 8740 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8741 }
8742 break;
8743 case 7:
8744 goto illegal_op;
8745 default: /* Saturate. */
9ee6e8bb
PB
8746 if (shift) {
8747 if (op & 1)
6ddbc6e4 8748 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8749 else
6ddbc6e4 8750 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 8751 }
6ddbc6e4 8752 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
8753 if (op & 4) {
8754 /* Unsigned. */
9ee6e8bb 8755 if ((op & 1) && shift == 0)
9ef39277 8756 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8757 else
9ef39277 8758 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
2c0262af 8759 } else {
9ee6e8bb 8760 /* Signed. */
9ee6e8bb 8761 if ((op & 1) && shift == 0)
9ef39277 8762 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8763 else
9ef39277 8764 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
2c0262af 8765 }
b75263d6 8766 tcg_temp_free_i32(tmp2);
9ee6e8bb 8767 break;
2c0262af 8768 }
6ddbc6e4 8769 store_reg(s, rd, tmp);
9ee6e8bb
PB
8770 } else {
8771 imm = ((insn & 0x04000000) >> 15)
8772 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8773 if (insn & (1 << 22)) {
8774 /* 16-bit immediate. */
8775 imm |= (insn >> 4) & 0xf000;
8776 if (insn & (1 << 23)) {
8777 /* movt */
5e3f878a 8778 tmp = load_reg(s, rd);
86831435 8779 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8780 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 8781 } else {
9ee6e8bb 8782 /* movw */
7d1b0095 8783 tmp = tcg_temp_new_i32();
5e3f878a 8784 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
8785 }
8786 } else {
9ee6e8bb
PB
8787 /* Add/sub 12-bit immediate. */
8788 if (rn == 15) {
b0109805 8789 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 8790 if (insn & (1 << 23))
b0109805 8791 offset -= imm;
9ee6e8bb 8792 else
b0109805 8793 offset += imm;
7d1b0095 8794 tmp = tcg_temp_new_i32();
5e3f878a 8795 tcg_gen_movi_i32(tmp, offset);
2c0262af 8796 } else {
5e3f878a 8797 tmp = load_reg(s, rn);
9ee6e8bb 8798 if (insn & (1 << 23))
5e3f878a 8799 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 8800 else
5e3f878a 8801 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 8802 }
9ee6e8bb 8803 }
5e3f878a 8804 store_reg(s, rd, tmp);
191abaa2 8805 }
9ee6e8bb
PB
8806 } else {
8807 int shifter_out = 0;
8808 /* modified 12-bit immediate. */
8809 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8810 imm = (insn & 0xff);
8811 switch (shift) {
8812 case 0: /* XY */
8813 /* Nothing to do. */
8814 break;
8815 case 1: /* 00XY00XY */
8816 imm |= imm << 16;
8817 break;
8818 case 2: /* XY00XY00 */
8819 imm |= imm << 16;
8820 imm <<= 8;
8821 break;
8822 case 3: /* XYXYXYXY */
8823 imm |= imm << 16;
8824 imm |= imm << 8;
8825 break;
8826 default: /* Rotated constant. */
8827 shift = (shift << 1) | (imm >> 7);
8828 imm |= 0x80;
8829 imm = imm << (32 - shift);
8830 shifter_out = 1;
8831 break;
b5ff1b31 8832 }
7d1b0095 8833 tmp2 = tcg_temp_new_i32();
3174f8e9 8834 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 8835 rn = (insn >> 16) & 0xf;
3174f8e9 8836 if (rn == 15) {
7d1b0095 8837 tmp = tcg_temp_new_i32();
3174f8e9
FN
8838 tcg_gen_movi_i32(tmp, 0);
8839 } else {
8840 tmp = load_reg(s, rn);
8841 }
9ee6e8bb
PB
8842 op = (insn >> 21) & 0xf;
8843 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 8844 shifter_out, tmp, tmp2))
9ee6e8bb 8845 goto illegal_op;
7d1b0095 8846 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8847 rd = (insn >> 8) & 0xf;
8848 if (rd != 15) {
3174f8e9
FN
8849 store_reg(s, rd, tmp);
8850 } else {
7d1b0095 8851 tcg_temp_free_i32(tmp);
2c0262af 8852 }
2c0262af 8853 }
9ee6e8bb
PB
8854 }
8855 break;
8856 case 12: /* Load/store single data item. */
8857 {
8858 int postinc = 0;
8859 int writeback = 0;
b0109805 8860 int user;
9ee6e8bb
PB
8861 if ((insn & 0x01100000) == 0x01000000) {
8862 if (disas_neon_ls_insn(env, s, insn))
c1713132 8863 goto illegal_op;
9ee6e8bb
PB
8864 break;
8865 }
a2fdc890
PM
8866 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8867 if (rs == 15) {
8868 if (!(insn & (1 << 20))) {
8869 goto illegal_op;
8870 }
8871 if (op != 2) {
8872 /* Byte or halfword load space with dest == r15 : memory hints.
8873 * Catch them early so we don't emit pointless addressing code.
8874 * This space is a mix of:
8875 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8876 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8877 * cores)
8878 * unallocated hints, which must be treated as NOPs
8879 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8880 * which is easiest for the decoding logic
8881 * Some space which must UNDEF
8882 */
8883 int op1 = (insn >> 23) & 3;
8884 int op2 = (insn >> 6) & 0x3f;
8885 if (op & 2) {
8886 goto illegal_op;
8887 }
8888 if (rn == 15) {
02afbf64
PM
8889 /* UNPREDICTABLE, unallocated hint or
8890 * PLD/PLDW/PLI (literal)
8891 */
a2fdc890
PM
8892 return 0;
8893 }
8894 if (op1 & 1) {
02afbf64 8895 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
8896 }
8897 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
02afbf64 8898 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
8899 }
8900 /* UNDEF space, or an UNPREDICTABLE */
8901 return 1;
8902 }
8903 }
b0109805 8904 user = IS_USER(s);
9ee6e8bb 8905 if (rn == 15) {
7d1b0095 8906 addr = tcg_temp_new_i32();
9ee6e8bb
PB
8907 /* PC relative. */
8908 /* s->pc has already been incremented by 4. */
8909 imm = s->pc & 0xfffffffc;
8910 if (insn & (1 << 23))
8911 imm += insn & 0xfff;
8912 else
8913 imm -= insn & 0xfff;
b0109805 8914 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 8915 } else {
b0109805 8916 addr = load_reg(s, rn);
9ee6e8bb
PB
8917 if (insn & (1 << 23)) {
8918 /* Positive offset. */
8919 imm = insn & 0xfff;
b0109805 8920 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 8921 } else {
9ee6e8bb 8922 imm = insn & 0xff;
2a0308c5
PM
8923 switch ((insn >> 8) & 0xf) {
8924 case 0x0: /* Shifted Register. */
9ee6e8bb 8925 shift = (insn >> 4) & 0xf;
2a0308c5
PM
8926 if (shift > 3) {
8927 tcg_temp_free_i32(addr);
18c9b560 8928 goto illegal_op;
2a0308c5 8929 }
b26eefb6 8930 tmp = load_reg(s, rm);
9ee6e8bb 8931 if (shift)
b26eefb6 8932 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 8933 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8934 tcg_temp_free_i32(tmp);
9ee6e8bb 8935 break;
2a0308c5 8936 case 0xc: /* Negative offset. */
b0109805 8937 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 8938 break;
2a0308c5 8939 case 0xe: /* User privilege. */
b0109805
PB
8940 tcg_gen_addi_i32(addr, addr, imm);
8941 user = 1;
9ee6e8bb 8942 break;
2a0308c5 8943 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
8944 imm = -imm;
8945 /* Fall through. */
2a0308c5 8946 case 0xb: /* Post-increment. */
9ee6e8bb
PB
8947 postinc = 1;
8948 writeback = 1;
8949 break;
2a0308c5 8950 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
8951 imm = -imm;
8952 /* Fall through. */
2a0308c5 8953 case 0xf: /* Pre-increment. */
b0109805 8954 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8955 writeback = 1;
8956 break;
8957 default:
2a0308c5 8958 tcg_temp_free_i32(addr);
b7bcbe95 8959 goto illegal_op;
9ee6e8bb
PB
8960 }
8961 }
8962 }
9ee6e8bb
PB
8963 if (insn & (1 << 20)) {
8964 /* Load. */
a2fdc890
PM
8965 switch (op) {
8966 case 0: tmp = gen_ld8u(addr, user); break;
8967 case 4: tmp = gen_ld8s(addr, user); break;
8968 case 1: tmp = gen_ld16u(addr, user); break;
8969 case 5: tmp = gen_ld16s(addr, user); break;
8970 case 2: tmp = gen_ld32(addr, user); break;
2a0308c5
PM
8971 default:
8972 tcg_temp_free_i32(addr);
8973 goto illegal_op;
a2fdc890
PM
8974 }
8975 if (rs == 15) {
8976 gen_bx(s, tmp);
9ee6e8bb 8977 } else {
a2fdc890 8978 store_reg(s, rs, tmp);
9ee6e8bb
PB
8979 }
8980 } else {
8981 /* Store. */
b0109805 8982 tmp = load_reg(s, rs);
9ee6e8bb 8983 switch (op) {
b0109805
PB
8984 case 0: gen_st8(tmp, addr, user); break;
8985 case 1: gen_st16(tmp, addr, user); break;
8986 case 2: gen_st32(tmp, addr, user); break;
2a0308c5
PM
8987 default:
8988 tcg_temp_free_i32(addr);
8989 goto illegal_op;
b7bcbe95 8990 }
2c0262af 8991 }
9ee6e8bb 8992 if (postinc)
b0109805
PB
8993 tcg_gen_addi_i32(addr, addr, imm);
8994 if (writeback) {
8995 store_reg(s, rn, addr);
8996 } else {
7d1b0095 8997 tcg_temp_free_i32(addr);
b0109805 8998 }
9ee6e8bb
PB
8999 }
9000 break;
9001 default:
9002 goto illegal_op;
2c0262af 9003 }
9ee6e8bb
PB
9004 return 0;
9005illegal_op:
9006 return 1;
2c0262af
FB
9007}
9008
0ecb72a5 9009static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
99c475ab
FB
9010{
9011 uint32_t val, insn, op, rm, rn, rd, shift, cond;
9012 int32_t offset;
9013 int i;
b26eefb6 9014 TCGv tmp;
d9ba4830 9015 TCGv tmp2;
b0109805 9016 TCGv addr;
99c475ab 9017
9ee6e8bb
PB
9018 if (s->condexec_mask) {
9019 cond = s->condexec_cond;
bedd2912
JB
9020 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
9021 s->condlabel = gen_new_label();
9022 gen_test_cc(cond ^ 1, s->condlabel);
9023 s->condjmp = 1;
9024 }
9ee6e8bb
PB
9025 }
9026
d31dd73e 9027 insn = arm_lduw_code(env, s->pc, s->bswap_code);
99c475ab 9028 s->pc += 2;
b5ff1b31 9029
99c475ab
FB
9030 switch (insn >> 12) {
9031 case 0: case 1:
396e467c 9032
99c475ab
FB
9033 rd = insn & 7;
9034 op = (insn >> 11) & 3;
9035 if (op == 3) {
9036 /* add/subtract */
9037 rn = (insn >> 3) & 7;
396e467c 9038 tmp = load_reg(s, rn);
99c475ab
FB
9039 if (insn & (1 << 10)) {
9040 /* immediate */
7d1b0095 9041 tmp2 = tcg_temp_new_i32();
396e467c 9042 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
9043 } else {
9044 /* reg */
9045 rm = (insn >> 6) & 7;
396e467c 9046 tmp2 = load_reg(s, rm);
99c475ab 9047 }
9ee6e8bb
PB
9048 if (insn & (1 << 9)) {
9049 if (s->condexec_mask)
396e467c 9050 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 9051 else
72485ec4 9052 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
9053 } else {
9054 if (s->condexec_mask)
396e467c 9055 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 9056 else
72485ec4 9057 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 9058 }
7d1b0095 9059 tcg_temp_free_i32(tmp2);
396e467c 9060 store_reg(s, rd, tmp);
99c475ab
FB
9061 } else {
9062 /* shift immediate */
9063 rm = (insn >> 3) & 7;
9064 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
9065 tmp = load_reg(s, rm);
9066 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
9067 if (!s->condexec_mask)
9068 gen_logic_CC(tmp);
9069 store_reg(s, rd, tmp);
99c475ab
FB
9070 }
9071 break;
9072 case 2: case 3:
9073 /* arithmetic large immediate */
9074 op = (insn >> 11) & 3;
9075 rd = (insn >> 8) & 0x7;
396e467c 9076 if (op == 0) { /* mov */
7d1b0095 9077 tmp = tcg_temp_new_i32();
396e467c 9078 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 9079 if (!s->condexec_mask)
396e467c
FN
9080 gen_logic_CC(tmp);
9081 store_reg(s, rd, tmp);
9082 } else {
9083 tmp = load_reg(s, rd);
7d1b0095 9084 tmp2 = tcg_temp_new_i32();
396e467c
FN
9085 tcg_gen_movi_i32(tmp2, insn & 0xff);
9086 switch (op) {
9087 case 1: /* cmp */
72485ec4 9088 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
9089 tcg_temp_free_i32(tmp);
9090 tcg_temp_free_i32(tmp2);
396e467c
FN
9091 break;
9092 case 2: /* add */
9093 if (s->condexec_mask)
9094 tcg_gen_add_i32(tmp, tmp, tmp2);
9095 else
72485ec4 9096 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 9097 tcg_temp_free_i32(tmp2);
396e467c
FN
9098 store_reg(s, rd, tmp);
9099 break;
9100 case 3: /* sub */
9101 if (s->condexec_mask)
9102 tcg_gen_sub_i32(tmp, tmp, tmp2);
9103 else
72485ec4 9104 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 9105 tcg_temp_free_i32(tmp2);
396e467c
FN
9106 store_reg(s, rd, tmp);
9107 break;
9108 }
99c475ab 9109 }
99c475ab
FB
9110 break;
9111 case 4:
9112 if (insn & (1 << 11)) {
9113 rd = (insn >> 8) & 7;
5899f386
FB
9114 /* load pc-relative. Bit 1 of PC is ignored. */
9115 val = s->pc + 2 + ((insn & 0xff) * 4);
9116 val &= ~(uint32_t)2;
7d1b0095 9117 addr = tcg_temp_new_i32();
b0109805
PB
9118 tcg_gen_movi_i32(addr, val);
9119 tmp = gen_ld32(addr, IS_USER(s));
7d1b0095 9120 tcg_temp_free_i32(addr);
b0109805 9121 store_reg(s, rd, tmp);
99c475ab
FB
9122 break;
9123 }
9124 if (insn & (1 << 10)) {
9125 /* data processing extended or blx */
9126 rd = (insn & 7) | ((insn >> 4) & 8);
9127 rm = (insn >> 3) & 0xf;
9128 op = (insn >> 8) & 3;
9129 switch (op) {
9130 case 0: /* add */
396e467c
FN
9131 tmp = load_reg(s, rd);
9132 tmp2 = load_reg(s, rm);
9133 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9134 tcg_temp_free_i32(tmp2);
396e467c 9135 store_reg(s, rd, tmp);
99c475ab
FB
9136 break;
9137 case 1: /* cmp */
396e467c
FN
9138 tmp = load_reg(s, rd);
9139 tmp2 = load_reg(s, rm);
72485ec4 9140 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
9141 tcg_temp_free_i32(tmp2);
9142 tcg_temp_free_i32(tmp);
99c475ab
FB
9143 break;
9144 case 2: /* mov/cpy */
396e467c
FN
9145 tmp = load_reg(s, rm);
9146 store_reg(s, rd, tmp);
99c475ab
FB
9147 break;
9148 case 3:/* branch [and link] exchange thumb register */
b0109805 9149 tmp = load_reg(s, rm);
99c475ab 9150 if (insn & (1 << 7)) {
be5e7a76 9151 ARCH(5);
99c475ab 9152 val = (uint32_t)s->pc | 1;
7d1b0095 9153 tmp2 = tcg_temp_new_i32();
b0109805
PB
9154 tcg_gen_movi_i32(tmp2, val);
9155 store_reg(s, 14, tmp2);
99c475ab 9156 }
be5e7a76 9157 /* already thumb, no need to check */
d9ba4830 9158 gen_bx(s, tmp);
99c475ab
FB
9159 break;
9160 }
9161 break;
9162 }
9163
9164 /* data processing register */
9165 rd = insn & 7;
9166 rm = (insn >> 3) & 7;
9167 op = (insn >> 6) & 0xf;
9168 if (op == 2 || op == 3 || op == 4 || op == 7) {
9169 /* the shift/rotate ops want the operands backwards */
9170 val = rm;
9171 rm = rd;
9172 rd = val;
9173 val = 1;
9174 } else {
9175 val = 0;
9176 }
9177
396e467c 9178 if (op == 9) { /* neg */
7d1b0095 9179 tmp = tcg_temp_new_i32();
396e467c
FN
9180 tcg_gen_movi_i32(tmp, 0);
9181 } else if (op != 0xf) { /* mvn doesn't read its first operand */
9182 tmp = load_reg(s, rd);
9183 } else {
9184 TCGV_UNUSED(tmp);
9185 }
99c475ab 9186
396e467c 9187 tmp2 = load_reg(s, rm);
5899f386 9188 switch (op) {
99c475ab 9189 case 0x0: /* and */
396e467c 9190 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 9191 if (!s->condexec_mask)
396e467c 9192 gen_logic_CC(tmp);
99c475ab
FB
9193 break;
9194 case 0x1: /* eor */
396e467c 9195 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 9196 if (!s->condexec_mask)
396e467c 9197 gen_logic_CC(tmp);
99c475ab
FB
9198 break;
9199 case 0x2: /* lsl */
9ee6e8bb 9200 if (s->condexec_mask) {
365af80e 9201 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 9202 } else {
9ef39277 9203 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9204 gen_logic_CC(tmp2);
9ee6e8bb 9205 }
99c475ab
FB
9206 break;
9207 case 0x3: /* lsr */
9ee6e8bb 9208 if (s->condexec_mask) {
365af80e 9209 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 9210 } else {
9ef39277 9211 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9212 gen_logic_CC(tmp2);
9ee6e8bb 9213 }
99c475ab
FB
9214 break;
9215 case 0x4: /* asr */
9ee6e8bb 9216 if (s->condexec_mask) {
365af80e 9217 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 9218 } else {
9ef39277 9219 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9220 gen_logic_CC(tmp2);
9ee6e8bb 9221 }
99c475ab
FB
9222 break;
9223 case 0x5: /* adc */
9ee6e8bb 9224 if (s->condexec_mask)
396e467c 9225 gen_adc(tmp, tmp2);
9ee6e8bb 9226 else
9ef39277 9227 gen_helper_adc_cc(tmp, cpu_env, tmp, tmp2);
99c475ab
FB
9228 break;
9229 case 0x6: /* sbc */
9ee6e8bb 9230 if (s->condexec_mask)
396e467c 9231 gen_sub_carry(tmp, tmp, tmp2);
9ee6e8bb 9232 else
9ef39277 9233 gen_helper_sbc_cc(tmp, cpu_env, tmp, tmp2);
99c475ab
FB
9234 break;
9235 case 0x7: /* ror */
9ee6e8bb 9236 if (s->condexec_mask) {
f669df27
AJ
9237 tcg_gen_andi_i32(tmp, tmp, 0x1f);
9238 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 9239 } else {
9ef39277 9240 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9241 gen_logic_CC(tmp2);
9ee6e8bb 9242 }
99c475ab
FB
9243 break;
9244 case 0x8: /* tst */
396e467c
FN
9245 tcg_gen_and_i32(tmp, tmp, tmp2);
9246 gen_logic_CC(tmp);
99c475ab 9247 rd = 16;
5899f386 9248 break;
99c475ab 9249 case 0x9: /* neg */
9ee6e8bb 9250 if (s->condexec_mask)
396e467c 9251 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 9252 else
72485ec4 9253 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
9254 break;
9255 case 0xa: /* cmp */
72485ec4 9256 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
9257 rd = 16;
9258 break;
9259 case 0xb: /* cmn */
72485ec4 9260 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
9261 rd = 16;
9262 break;
9263 case 0xc: /* orr */
396e467c 9264 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 9265 if (!s->condexec_mask)
396e467c 9266 gen_logic_CC(tmp);
99c475ab
FB
9267 break;
9268 case 0xd: /* mul */
7b2919a0 9269 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 9270 if (!s->condexec_mask)
396e467c 9271 gen_logic_CC(tmp);
99c475ab
FB
9272 break;
9273 case 0xe: /* bic */
f669df27 9274 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 9275 if (!s->condexec_mask)
396e467c 9276 gen_logic_CC(tmp);
99c475ab
FB
9277 break;
9278 case 0xf: /* mvn */
396e467c 9279 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 9280 if (!s->condexec_mask)
396e467c 9281 gen_logic_CC(tmp2);
99c475ab 9282 val = 1;
5899f386 9283 rm = rd;
99c475ab
FB
9284 break;
9285 }
9286 if (rd != 16) {
396e467c
FN
9287 if (val) {
9288 store_reg(s, rm, tmp2);
9289 if (op != 0xf)
7d1b0095 9290 tcg_temp_free_i32(tmp);
396e467c
FN
9291 } else {
9292 store_reg(s, rd, tmp);
7d1b0095 9293 tcg_temp_free_i32(tmp2);
396e467c
FN
9294 }
9295 } else {
7d1b0095
PM
9296 tcg_temp_free_i32(tmp);
9297 tcg_temp_free_i32(tmp2);
99c475ab
FB
9298 }
9299 break;
9300
9301 case 5:
9302 /* load/store register offset. */
9303 rd = insn & 7;
9304 rn = (insn >> 3) & 7;
9305 rm = (insn >> 6) & 7;
9306 op = (insn >> 9) & 7;
b0109805 9307 addr = load_reg(s, rn);
b26eefb6 9308 tmp = load_reg(s, rm);
b0109805 9309 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9310 tcg_temp_free_i32(tmp);
99c475ab
FB
9311
9312 if (op < 3) /* store */
b0109805 9313 tmp = load_reg(s, rd);
99c475ab
FB
9314
9315 switch (op) {
9316 case 0: /* str */
b0109805 9317 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
9318 break;
9319 case 1: /* strh */
b0109805 9320 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
9321 break;
9322 case 2: /* strb */
b0109805 9323 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
9324 break;
9325 case 3: /* ldrsb */
b0109805 9326 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
9327 break;
9328 case 4: /* ldr */
b0109805 9329 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
9330 break;
9331 case 5: /* ldrh */
b0109805 9332 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
9333 break;
9334 case 6: /* ldrb */
b0109805 9335 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
9336 break;
9337 case 7: /* ldrsh */
b0109805 9338 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
9339 break;
9340 }
9341 if (op >= 3) /* load */
b0109805 9342 store_reg(s, rd, tmp);
7d1b0095 9343 tcg_temp_free_i32(addr);
99c475ab
FB
9344 break;
9345
9346 case 6:
9347 /* load/store word immediate offset */
9348 rd = insn & 7;
9349 rn = (insn >> 3) & 7;
b0109805 9350 addr = load_reg(s, rn);
99c475ab 9351 val = (insn >> 4) & 0x7c;
b0109805 9352 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9353
9354 if (insn & (1 << 11)) {
9355 /* load */
b0109805
PB
9356 tmp = gen_ld32(addr, IS_USER(s));
9357 store_reg(s, rd, tmp);
99c475ab
FB
9358 } else {
9359 /* store */
b0109805
PB
9360 tmp = load_reg(s, rd);
9361 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9362 }
7d1b0095 9363 tcg_temp_free_i32(addr);
99c475ab
FB
9364 break;
9365
9366 case 7:
9367 /* load/store byte immediate offset */
9368 rd = insn & 7;
9369 rn = (insn >> 3) & 7;
b0109805 9370 addr = load_reg(s, rn);
99c475ab 9371 val = (insn >> 6) & 0x1f;
b0109805 9372 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9373
9374 if (insn & (1 << 11)) {
9375 /* load */
b0109805
PB
9376 tmp = gen_ld8u(addr, IS_USER(s));
9377 store_reg(s, rd, tmp);
99c475ab
FB
9378 } else {
9379 /* store */
b0109805
PB
9380 tmp = load_reg(s, rd);
9381 gen_st8(tmp, addr, IS_USER(s));
99c475ab 9382 }
7d1b0095 9383 tcg_temp_free_i32(addr);
99c475ab
FB
9384 break;
9385
9386 case 8:
9387 /* load/store halfword immediate offset */
9388 rd = insn & 7;
9389 rn = (insn >> 3) & 7;
b0109805 9390 addr = load_reg(s, rn);
99c475ab 9391 val = (insn >> 5) & 0x3e;
b0109805 9392 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9393
9394 if (insn & (1 << 11)) {
9395 /* load */
b0109805
PB
9396 tmp = gen_ld16u(addr, IS_USER(s));
9397 store_reg(s, rd, tmp);
99c475ab
FB
9398 } else {
9399 /* store */
b0109805
PB
9400 tmp = load_reg(s, rd);
9401 gen_st16(tmp, addr, IS_USER(s));
99c475ab 9402 }
7d1b0095 9403 tcg_temp_free_i32(addr);
99c475ab
FB
9404 break;
9405
9406 case 9:
9407 /* load/store from stack */
9408 rd = (insn >> 8) & 7;
b0109805 9409 addr = load_reg(s, 13);
99c475ab 9410 val = (insn & 0xff) * 4;
b0109805 9411 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9412
9413 if (insn & (1 << 11)) {
9414 /* load */
b0109805
PB
9415 tmp = gen_ld32(addr, IS_USER(s));
9416 store_reg(s, rd, tmp);
99c475ab
FB
9417 } else {
9418 /* store */
b0109805
PB
9419 tmp = load_reg(s, rd);
9420 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9421 }
7d1b0095 9422 tcg_temp_free_i32(addr);
99c475ab
FB
9423 break;
9424
9425 case 10:
9426 /* add to high reg */
9427 rd = (insn >> 8) & 7;
5899f386
FB
9428 if (insn & (1 << 11)) {
9429 /* SP */
5e3f878a 9430 tmp = load_reg(s, 13);
5899f386
FB
9431 } else {
9432 /* PC. bit 1 is ignored. */
7d1b0095 9433 tmp = tcg_temp_new_i32();
5e3f878a 9434 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 9435 }
99c475ab 9436 val = (insn & 0xff) * 4;
5e3f878a
PB
9437 tcg_gen_addi_i32(tmp, tmp, val);
9438 store_reg(s, rd, tmp);
99c475ab
FB
9439 break;
9440
9441 case 11:
9442 /* misc */
9443 op = (insn >> 8) & 0xf;
9444 switch (op) {
9445 case 0:
9446 /* adjust stack pointer */
b26eefb6 9447 tmp = load_reg(s, 13);
99c475ab
FB
9448 val = (insn & 0x7f) * 4;
9449 if (insn & (1 << 7))
6a0d8a1d 9450 val = -(int32_t)val;
b26eefb6
PB
9451 tcg_gen_addi_i32(tmp, tmp, val);
9452 store_reg(s, 13, tmp);
99c475ab
FB
9453 break;
9454
9ee6e8bb
PB
9455 case 2: /* sign/zero extend. */
9456 ARCH(6);
9457 rd = insn & 7;
9458 rm = (insn >> 3) & 7;
b0109805 9459 tmp = load_reg(s, rm);
9ee6e8bb 9460 switch ((insn >> 6) & 3) {
b0109805
PB
9461 case 0: gen_sxth(tmp); break;
9462 case 1: gen_sxtb(tmp); break;
9463 case 2: gen_uxth(tmp); break;
9464 case 3: gen_uxtb(tmp); break;
9ee6e8bb 9465 }
b0109805 9466 store_reg(s, rd, tmp);
9ee6e8bb 9467 break;
99c475ab
FB
9468 case 4: case 5: case 0xc: case 0xd:
9469 /* push/pop */
b0109805 9470 addr = load_reg(s, 13);
5899f386
FB
9471 if (insn & (1 << 8))
9472 offset = 4;
99c475ab 9473 else
5899f386
FB
9474 offset = 0;
9475 for (i = 0; i < 8; i++) {
9476 if (insn & (1 << i))
9477 offset += 4;
9478 }
9479 if ((insn & (1 << 11)) == 0) {
b0109805 9480 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9481 }
99c475ab
FB
9482 for (i = 0; i < 8; i++) {
9483 if (insn & (1 << i)) {
9484 if (insn & (1 << 11)) {
9485 /* pop */
b0109805
PB
9486 tmp = gen_ld32(addr, IS_USER(s));
9487 store_reg(s, i, tmp);
99c475ab
FB
9488 } else {
9489 /* push */
b0109805
PB
9490 tmp = load_reg(s, i);
9491 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9492 }
5899f386 9493 /* advance to the next address. */
b0109805 9494 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9495 }
9496 }
a50f5b91 9497 TCGV_UNUSED(tmp);
99c475ab
FB
9498 if (insn & (1 << 8)) {
9499 if (insn & (1 << 11)) {
9500 /* pop pc */
b0109805 9501 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
9502 /* don't set the pc until the rest of the instruction
9503 has completed */
9504 } else {
9505 /* push lr */
b0109805
PB
9506 tmp = load_reg(s, 14);
9507 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9508 }
b0109805 9509 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 9510 }
5899f386 9511 if ((insn & (1 << 11)) == 0) {
b0109805 9512 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9513 }
99c475ab 9514 /* write back the new stack pointer */
b0109805 9515 store_reg(s, 13, addr);
99c475ab 9516 /* set the new PC value */
be5e7a76
DES
9517 if ((insn & 0x0900) == 0x0900) {
9518 store_reg_from_load(env, s, 15, tmp);
9519 }
99c475ab
FB
9520 break;
9521
9ee6e8bb
PB
9522 case 1: case 3: case 9: case 11: /* czb */
9523 rm = insn & 7;
d9ba4830 9524 tmp = load_reg(s, rm);
9ee6e8bb
PB
9525 s->condlabel = gen_new_label();
9526 s->condjmp = 1;
9527 if (insn & (1 << 11))
cb63669a 9528 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 9529 else
cb63669a 9530 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 9531 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9532 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9533 val = (uint32_t)s->pc + 2;
9534 val += offset;
9535 gen_jmp(s, val);
9536 break;
9537
9538 case 15: /* IT, nop-hint. */
9539 if ((insn & 0xf) == 0) {
9540 gen_nop_hint(s, (insn >> 4) & 0xf);
9541 break;
9542 }
9543 /* If Then. */
9544 s->condexec_cond = (insn >> 4) & 0xe;
9545 s->condexec_mask = insn & 0x1f;
9546 /* No actual code generated for this insn, just setup state. */
9547 break;
9548
06c949e6 9549 case 0xe: /* bkpt */
be5e7a76 9550 ARCH(5);
bc4a0de0 9551 gen_exception_insn(s, 2, EXCP_BKPT);
06c949e6
PB
9552 break;
9553
9ee6e8bb
PB
9554 case 0xa: /* rev */
9555 ARCH(6);
9556 rn = (insn >> 3) & 0x7;
9557 rd = insn & 0x7;
b0109805 9558 tmp = load_reg(s, rn);
9ee6e8bb 9559 switch ((insn >> 6) & 3) {
66896cb8 9560 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
9561 case 1: gen_rev16(tmp); break;
9562 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
9563 default: goto illegal_op;
9564 }
b0109805 9565 store_reg(s, rd, tmp);
9ee6e8bb
PB
9566 break;
9567
d9e028c1
PM
9568 case 6:
9569 switch ((insn >> 5) & 7) {
9570 case 2:
9571 /* setend */
9572 ARCH(6);
10962fd5
PM
9573 if (((insn >> 3) & 1) != s->bswap_code) {
9574 /* Dynamic endianness switching not implemented. */
d9e028c1
PM
9575 goto illegal_op;
9576 }
9ee6e8bb 9577 break;
d9e028c1
PM
9578 case 3:
9579 /* cps */
9580 ARCH(6);
9581 if (IS_USER(s)) {
9582 break;
8984bd2e 9583 }
d9e028c1
PM
9584 if (IS_M(env)) {
9585 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9586 /* FAULTMASK */
9587 if (insn & 1) {
9588 addr = tcg_const_i32(19);
9589 gen_helper_v7m_msr(cpu_env, addr, tmp);
9590 tcg_temp_free_i32(addr);
9591 }
9592 /* PRIMASK */
9593 if (insn & 2) {
9594 addr = tcg_const_i32(16);
9595 gen_helper_v7m_msr(cpu_env, addr, tmp);
9596 tcg_temp_free_i32(addr);
9597 }
9598 tcg_temp_free_i32(tmp);
9599 gen_lookup_tb(s);
9600 } else {
9601 if (insn & (1 << 4)) {
9602 shift = CPSR_A | CPSR_I | CPSR_F;
9603 } else {
9604 shift = 0;
9605 }
9606 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 9607 }
d9e028c1
PM
9608 break;
9609 default:
9610 goto undef;
9ee6e8bb
PB
9611 }
9612 break;
9613
99c475ab
FB
9614 default:
9615 goto undef;
9616 }
9617 break;
9618
9619 case 12:
a7d3970d 9620 {
99c475ab 9621 /* load/store multiple */
a7d3970d
PM
9622 TCGv loaded_var;
9623 TCGV_UNUSED(loaded_var);
99c475ab 9624 rn = (insn >> 8) & 0x7;
b0109805 9625 addr = load_reg(s, rn);
99c475ab
FB
9626 for (i = 0; i < 8; i++) {
9627 if (insn & (1 << i)) {
99c475ab
FB
9628 if (insn & (1 << 11)) {
9629 /* load */
b0109805 9630 tmp = gen_ld32(addr, IS_USER(s));
a7d3970d
PM
9631 if (i == rn) {
9632 loaded_var = tmp;
9633 } else {
9634 store_reg(s, i, tmp);
9635 }
99c475ab
FB
9636 } else {
9637 /* store */
b0109805
PB
9638 tmp = load_reg(s, i);
9639 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9640 }
5899f386 9641 /* advance to the next address */
b0109805 9642 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9643 }
9644 }
b0109805 9645 if ((insn & (1 << rn)) == 0) {
a7d3970d 9646 /* base reg not in list: base register writeback */
b0109805
PB
9647 store_reg(s, rn, addr);
9648 } else {
a7d3970d
PM
9649 /* base reg in list: if load, complete it now */
9650 if (insn & (1 << 11)) {
9651 store_reg(s, rn, loaded_var);
9652 }
7d1b0095 9653 tcg_temp_free_i32(addr);
b0109805 9654 }
99c475ab 9655 break;
a7d3970d 9656 }
99c475ab
FB
9657 case 13:
9658 /* conditional branch or swi */
9659 cond = (insn >> 8) & 0xf;
9660 if (cond == 0xe)
9661 goto undef;
9662
9663 if (cond == 0xf) {
9664 /* swi */
422ebf69 9665 gen_set_pc_im(s->pc);
9ee6e8bb 9666 s->is_jmp = DISAS_SWI;
99c475ab
FB
9667 break;
9668 }
9669 /* generate a conditional jump to next instruction */
e50e6a20 9670 s->condlabel = gen_new_label();
d9ba4830 9671 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 9672 s->condjmp = 1;
99c475ab
FB
9673
9674 /* jump to the offset */
5899f386 9675 val = (uint32_t)s->pc + 2;
99c475ab 9676 offset = ((int32_t)insn << 24) >> 24;
5899f386 9677 val += offset << 1;
8aaca4c0 9678 gen_jmp(s, val);
99c475ab
FB
9679 break;
9680
9681 case 14:
358bf29e 9682 if (insn & (1 << 11)) {
9ee6e8bb
PB
9683 if (disas_thumb2_insn(env, s, insn))
9684 goto undef32;
358bf29e
PB
9685 break;
9686 }
9ee6e8bb 9687 /* unconditional branch */
99c475ab
FB
9688 val = (uint32_t)s->pc;
9689 offset = ((int32_t)insn << 21) >> 21;
9690 val += (offset << 1) + 2;
8aaca4c0 9691 gen_jmp(s, val);
99c475ab
FB
9692 break;
9693
9694 case 15:
9ee6e8bb 9695 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 9696 goto undef32;
9ee6e8bb 9697 break;
99c475ab
FB
9698 }
9699 return;
9ee6e8bb 9700undef32:
bc4a0de0 9701 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
9702 return;
9703illegal_op:
99c475ab 9704undef:
bc4a0de0 9705 gen_exception_insn(s, 2, EXCP_UDEF);
99c475ab
FB
9706}
9707
2c0262af
FB
9708/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9709 basic block 'tb'. If search_pc is TRUE, also generate PC
9710 information for each intermediate instruction. */
0ecb72a5 9711static inline void gen_intermediate_code_internal(CPUARMState *env,
2cfc5f17
TS
9712 TranslationBlock *tb,
9713 int search_pc)
2c0262af
FB
9714{
9715 DisasContext dc1, *dc = &dc1;
a1d1bb31 9716 CPUBreakpoint *bp;
2c0262af
FB
9717 uint16_t *gen_opc_end;
9718 int j, lj;
0fa85d43 9719 target_ulong pc_start;
b5ff1b31 9720 uint32_t next_page_start;
2e70f6ef
PB
9721 int num_insns;
9722 int max_insns;
3b46e624 9723
2c0262af 9724 /* generate intermediate code */
0fa85d43 9725 pc_start = tb->pc;
3b46e624 9726
2c0262af
FB
9727 dc->tb = tb;
9728
2c0262af 9729 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
9730
9731 dc->is_jmp = DISAS_NEXT;
9732 dc->pc = pc_start;
8aaca4c0 9733 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 9734 dc->condjmp = 0;
7204ab88 9735 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
d8fd2954 9736 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
98eac7ca
PM
9737 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9738 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
b5ff1b31 9739#if !defined(CONFIG_USER_ONLY)
61f74d6a 9740 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
b5ff1b31 9741#endif
5df8bac1 9742 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
69d1fc22
PM
9743 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9744 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
a7812ae4
PB
9745 cpu_F0s = tcg_temp_new_i32();
9746 cpu_F1s = tcg_temp_new_i32();
9747 cpu_F0d = tcg_temp_new_i64();
9748 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
9749 cpu_V0 = cpu_F0d;
9750 cpu_V1 = cpu_F1d;
e677137d 9751 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 9752 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 9753 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 9754 lj = -1;
2e70f6ef
PB
9755 num_insns = 0;
9756 max_insns = tb->cflags & CF_COUNT_MASK;
9757 if (max_insns == 0)
9758 max_insns = CF_COUNT_MASK;
9759
9760 gen_icount_start();
e12ce78d 9761
3849902c
PM
9762 tcg_clear_temp_count();
9763
e12ce78d
PM
9764 /* A note on handling of the condexec (IT) bits:
9765 *
9766 * We want to avoid the overhead of having to write the updated condexec
0ecb72a5 9767 * bits back to the CPUARMState for every instruction in an IT block. So:
e12ce78d 9768 * (1) if the condexec bits are not already zero then we write
0ecb72a5 9769 * zero back into the CPUARMState now. This avoids complications trying
e12ce78d
PM
9770 * to do it at the end of the block. (For example if we don't do this
9771 * it's hard to identify whether we can safely skip writing condexec
9772 * at the end of the TB, which we definitely want to do for the case
9773 * where a TB doesn't do anything with the IT state at all.)
9774 * (2) if we are going to leave the TB then we call gen_set_condexec()
0ecb72a5 9775 * which will write the correct value into CPUARMState if zero is wrong.
e12ce78d
PM
9776 * This is done both for leaving the TB at the end, and for leaving
9777 * it because of an exception we know will happen, which is done in
9778 * gen_exception_insn(). The latter is necessary because we need to
9779 * leave the TB with the PC/IT state just prior to execution of the
9780 * instruction which caused the exception.
9781 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
0ecb72a5 9782 * then the CPUARMState will be wrong and we need to reset it.
e12ce78d
PM
9783 * This is handled in the same way as restoration of the
9784 * PC in these situations: we will be called again with search_pc=1
9785 * and generate a mapping of the condexec bits for each PC in
e87b7cb0
SW
9786 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9787 * this to restore the condexec bits.
e12ce78d
PM
9788 *
9789 * Note that there are no instructions which can read the condexec
9790 * bits, and none which can write non-static values to them, so
0ecb72a5 9791 * we don't need to care about whether CPUARMState is correct in the
e12ce78d
PM
9792 * middle of a TB.
9793 */
9794
9ee6e8bb
PB
9795 /* Reset the conditional execution bits immediately. This avoids
9796 complications trying to do it at the end of the block. */
98eac7ca 9797 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 9798 {
7d1b0095 9799 TCGv tmp = tcg_temp_new_i32();
8f01245e 9800 tcg_gen_movi_i32(tmp, 0);
d9ba4830 9801 store_cpu_field(tmp, condexec_bits);
8f01245e 9802 }
2c0262af 9803 do {
fbb4a2e3
PB
9804#ifdef CONFIG_USER_ONLY
9805 /* Intercept jump to the magic kernel page. */
9806 if (dc->pc >= 0xffff0000) {
9807 /* We always get here via a jump, so know we are not in a
9808 conditional execution block. */
9809 gen_exception(EXCP_KERNEL_TRAP);
9810 dc->is_jmp = DISAS_UPDATE;
9811 break;
9812 }
9813#else
9ee6e8bb
PB
9814 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9815 /* We always get here via a jump, so know we are not in a
9816 conditional execution block. */
d9ba4830 9817 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
9818 dc->is_jmp = DISAS_UPDATE;
9819 break;
9ee6e8bb
PB
9820 }
9821#endif
9822
72cf2d4f
BS
9823 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9824 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 9825 if (bp->pc == dc->pc) {
bc4a0de0 9826 gen_exception_insn(dc, 0, EXCP_DEBUG);
9ee6e8bb
PB
9827 /* Advance PC so that clearing the breakpoint will
9828 invalidate this TB. */
9829 dc->pc += 2;
9830 goto done_generating;
1fddef4b
FB
9831 break;
9832 }
9833 }
9834 }
2c0262af
FB
9835 if (search_pc) {
9836 j = gen_opc_ptr - gen_opc_buf;
9837 if (lj < j) {
9838 lj++;
9839 while (lj < j)
9840 gen_opc_instr_start[lj++] = 0;
9841 }
0fa85d43 9842 gen_opc_pc[lj] = dc->pc;
e12ce78d 9843 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
2c0262af 9844 gen_opc_instr_start[lj] = 1;
2e70f6ef 9845 gen_opc_icount[lj] = num_insns;
2c0262af 9846 }
e50e6a20 9847
2e70f6ef
PB
9848 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9849 gen_io_start();
9850
fdefe51c 9851 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
5642463a
PM
9852 tcg_gen_debug_insn_start(dc->pc);
9853 }
9854
7204ab88 9855 if (dc->thumb) {
9ee6e8bb
PB
9856 disas_thumb_insn(env, dc);
9857 if (dc->condexec_mask) {
9858 dc->condexec_cond = (dc->condexec_cond & 0xe)
9859 | ((dc->condexec_mask >> 4) & 1);
9860 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9861 if (dc->condexec_mask == 0) {
9862 dc->condexec_cond = 0;
9863 }
9864 }
9865 } else {
9866 disas_arm_insn(env, dc);
9867 }
e50e6a20
FB
9868
9869 if (dc->condjmp && !dc->is_jmp) {
9870 gen_set_label(dc->condlabel);
9871 dc->condjmp = 0;
9872 }
3849902c
PM
9873
9874 if (tcg_check_temp_count()) {
9875 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
9876 }
9877
aaf2d97d 9878 /* Translation stops when a conditional branch is encountered.
e50e6a20 9879 * Otherwise the subsequent code could get translated several times.
b5ff1b31 9880 * Also stop translation when a page boundary is reached. This
bf20dc07 9881 * ensures prefetch aborts occur at the right place. */
2e70f6ef 9882 num_insns ++;
1fddef4b
FB
9883 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9884 !env->singlestep_enabled &&
1b530a6d 9885 !singlestep &&
2e70f6ef
PB
9886 dc->pc < next_page_start &&
9887 num_insns < max_insns);
9888
9889 if (tb->cflags & CF_LAST_IO) {
9890 if (dc->condjmp) {
9891 /* FIXME: This can theoretically happen with self-modifying
9892 code. */
9893 cpu_abort(env, "IO on conditional branch instruction");
9894 }
9895 gen_io_end();
9896 }
9ee6e8bb 9897
b5ff1b31 9898 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
9899 instruction was a conditional branch or trap, and the PC has
9900 already been written. */
551bd27f 9901 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 9902 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 9903 if (dc->condjmp) {
9ee6e8bb
PB
9904 gen_set_condexec(dc);
9905 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 9906 gen_exception(EXCP_SWI);
9ee6e8bb 9907 } else {
d9ba4830 9908 gen_exception(EXCP_DEBUG);
9ee6e8bb 9909 }
e50e6a20
FB
9910 gen_set_label(dc->condlabel);
9911 }
9912 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 9913 gen_set_pc_im(dc->pc);
e50e6a20 9914 dc->condjmp = 0;
8aaca4c0 9915 }
9ee6e8bb
PB
9916 gen_set_condexec(dc);
9917 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 9918 gen_exception(EXCP_SWI);
9ee6e8bb
PB
9919 } else {
9920 /* FIXME: Single stepping a WFI insn will not halt
9921 the CPU. */
d9ba4830 9922 gen_exception(EXCP_DEBUG);
9ee6e8bb 9923 }
8aaca4c0 9924 } else {
9ee6e8bb
PB
9925 /* While branches must always occur at the end of an IT block,
9926 there are a few other things that can cause us to terminate
65626741 9927 the TB in the middle of an IT block:
9ee6e8bb
PB
9928 - Exception generating instructions (bkpt, swi, undefined).
9929 - Page boundaries.
9930 - Hardware watchpoints.
9931 Hardware breakpoints have already been handled and skip this code.
9932 */
9933 gen_set_condexec(dc);
8aaca4c0 9934 switch(dc->is_jmp) {
8aaca4c0 9935 case DISAS_NEXT:
6e256c93 9936 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
9937 break;
9938 default:
9939 case DISAS_JUMP:
9940 case DISAS_UPDATE:
9941 /* indicate that the hash table must be used to find the next TB */
57fec1fe 9942 tcg_gen_exit_tb(0);
8aaca4c0
FB
9943 break;
9944 case DISAS_TB_JUMP:
9945 /* nothing more to generate */
9946 break;
9ee6e8bb 9947 case DISAS_WFI:
1ce94f81 9948 gen_helper_wfi(cpu_env);
9ee6e8bb
PB
9949 break;
9950 case DISAS_SWI:
d9ba4830 9951 gen_exception(EXCP_SWI);
9ee6e8bb 9952 break;
8aaca4c0 9953 }
e50e6a20
FB
9954 if (dc->condjmp) {
9955 gen_set_label(dc->condlabel);
9ee6e8bb 9956 gen_set_condexec(dc);
6e256c93 9957 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
9958 dc->condjmp = 0;
9959 }
2c0262af 9960 }
2e70f6ef 9961
9ee6e8bb 9962done_generating:
2e70f6ef 9963 gen_icount_end(tb, num_insns);
2c0262af
FB
9964 *gen_opc_ptr = INDEX_op_end;
9965
9966#ifdef DEBUG_DISAS
8fec2b8c 9967 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
9968 qemu_log("----------------\n");
9969 qemu_log("IN: %s\n", lookup_symbol(pc_start));
d8fd2954
PB
9970 log_target_disas(pc_start, dc->pc - pc_start,
9971 dc->thumb | (dc->bswap_code << 1));
93fcfe39 9972 qemu_log("\n");
2c0262af
FB
9973 }
9974#endif
b5ff1b31
FB
9975 if (search_pc) {
9976 j = gen_opc_ptr - gen_opc_buf;
9977 lj++;
9978 while (lj <= j)
9979 gen_opc_instr_start[lj++] = 0;
b5ff1b31 9980 } else {
2c0262af 9981 tb->size = dc->pc - pc_start;
2e70f6ef 9982 tb->icount = num_insns;
b5ff1b31 9983 }
2c0262af
FB
9984}
9985
0ecb72a5 9986void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
2c0262af 9987{
2cfc5f17 9988 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
9989}
9990
0ecb72a5 9991void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
2c0262af 9992{
2cfc5f17 9993 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
9994}
9995
b5ff1b31
FB
9996static const char *cpu_mode_names[16] = {
9997 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9998 "???", "???", "???", "und", "???", "???", "???", "sys"
9999};
9ee6e8bb 10000
0ecb72a5 10001void cpu_dump_state(CPUARMState *env, FILE *f, fprintf_function cpu_fprintf,
7fe48483 10002 int flags)
2c0262af
FB
10003{
10004 int i;
b5ff1b31 10005 uint32_t psr;
2c0262af
FB
10006
10007 for(i=0;i<16;i++) {
7fe48483 10008 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 10009 if ((i % 4) == 3)
7fe48483 10010 cpu_fprintf(f, "\n");
2c0262af 10011 else
7fe48483 10012 cpu_fprintf(f, " ");
2c0262af 10013 }
b5ff1b31 10014 psr = cpsr_read(env);
687fa640
TS
10015 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
10016 psr,
b5ff1b31
FB
10017 psr & (1 << 31) ? 'N' : '-',
10018 psr & (1 << 30) ? 'Z' : '-',
10019 psr & (1 << 29) ? 'C' : '-',
10020 psr & (1 << 28) ? 'V' : '-',
5fafdf24 10021 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 10022 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 10023
f2617cfc
PM
10024 if (flags & CPU_DUMP_FPU) {
10025 int numvfpregs = 0;
10026 if (arm_feature(env, ARM_FEATURE_VFP)) {
10027 numvfpregs += 16;
10028 }
10029 if (arm_feature(env, ARM_FEATURE_VFP3)) {
10030 numvfpregs += 16;
10031 }
10032 for (i = 0; i < numvfpregs; i++) {
10033 uint64_t v = float64_val(env->vfp.regs[i]);
10034 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
10035 i * 2, (uint32_t)v,
10036 i * 2 + 1, (uint32_t)(v >> 32),
10037 i, v);
10038 }
10039 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 10040 }
2c0262af 10041}
a6b025d3 10042
0ecb72a5 10043void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
d2856f1a
AJ
10044{
10045 env->regs[15] = gen_opc_pc[pc_pos];
e12ce78d 10046 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
d2856f1a 10047}