]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/translate.c
target-arm: Add set_neon_rmode helper
[mirror_qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
76cad711 28#include "disas/disas.h"
57fec1fe 29#include "tcg-op.h"
1de7afc9 30#include "qemu/log.h"
534df156 31#include "qemu/bitops.h"
1497c961 32
7b59220e 33#include "helper.h"
1497c961 34#define GEN_HELPER 1
7b59220e 35#include "helper.h"
2c0262af 36
be5e7a76
DES
37#define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
38#define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
39/* currently all emulated v5 cores are also v5TE, so don't bother */
40#define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
9ee6e8bb
PB
41#define ENABLE_ARCH_5J 0
42#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
43#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
44#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
45#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
81e69fb0 46#define ENABLE_ARCH_8 arm_feature(env, ARM_FEATURE_V8)
b5ff1b31 47
86753403 48#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 49
f570c61e 50#include "translate.h"
e12ce78d
PM
51static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
52
b5ff1b31
FB
53#if defined(CONFIG_USER_ONLY)
54#define IS_USER(s) 1
55#else
56#define IS_USER(s) (s->user)
57#endif
58
3407ad0e 59TCGv_ptr cpu_env;
ad69471c 60/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 61static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 62static TCGv_i32 cpu_R[16];
66c374de 63static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
03d05e2d
PM
64static TCGv_i64 cpu_exclusive_addr;
65static TCGv_i64 cpu_exclusive_val;
426f5abc 66#ifdef CONFIG_USER_ONLY
03d05e2d 67static TCGv_i64 cpu_exclusive_test;
426f5abc
PB
68static TCGv_i32 cpu_exclusive_info;
69#endif
ad69471c 70
b26eefb6 71/* FIXME: These should be removed. */
39d5492a 72static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 73static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 74
022c62cb 75#include "exec/gen-icount.h"
2e70f6ef 76
155c3eac
FN
77static const char *regnames[] =
78 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
79 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
80
b26eefb6
PB
81/* initialize TCG globals. */
82void arm_translate_init(void)
83{
155c3eac
FN
84 int i;
85
a7812ae4
PB
86 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
87
155c3eac
FN
88 for (i = 0; i < 16; i++) {
89 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 90 offsetof(CPUARMState, regs[i]),
155c3eac
FN
91 regnames[i]);
92 }
66c374de
AJ
93 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
94 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
95 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
96 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
97
03d05e2d 98 cpu_exclusive_addr = tcg_global_mem_new_i64(TCG_AREG0,
0ecb72a5 99 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
03d05e2d 100 cpu_exclusive_val = tcg_global_mem_new_i64(TCG_AREG0,
0ecb72a5 101 offsetof(CPUARMState, exclusive_val), "exclusive_val");
426f5abc 102#ifdef CONFIG_USER_ONLY
03d05e2d 103 cpu_exclusive_test = tcg_global_mem_new_i64(TCG_AREG0,
0ecb72a5 104 offsetof(CPUARMState, exclusive_test), "exclusive_test");
426f5abc 105 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 106 offsetof(CPUARMState, exclusive_info), "exclusive_info");
426f5abc 107#endif
155c3eac 108
14ade10f 109 a64_translate_init();
b26eefb6
PB
110}
111
39d5492a 112static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 113{
39d5492a 114 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
115 tcg_gen_ld_i32(tmp, cpu_env, offset);
116 return tmp;
117}
118
0ecb72a5 119#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 120
39d5492a 121static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
122{
123 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 124 tcg_temp_free_i32(var);
d9ba4830
PB
125}
126
127#define store_cpu_field(var, name) \
0ecb72a5 128 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 129
b26eefb6 130/* Set a variable to the value of a CPU register. */
39d5492a 131static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
132{
133 if (reg == 15) {
134 uint32_t addr;
b90372ad 135 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
136 if (s->thumb)
137 addr = (long)s->pc + 2;
138 else
139 addr = (long)s->pc + 4;
140 tcg_gen_movi_i32(var, addr);
141 } else {
155c3eac 142 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
143 }
144}
145
146/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 147static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 148{
39d5492a 149 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
150 load_reg_var(s, tmp, reg);
151 return tmp;
152}
153
154/* Set a CPU register. The source must be a temporary and will be
155 marked as dead. */
39d5492a 156static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
157{
158 if (reg == 15) {
159 tcg_gen_andi_i32(var, var, ~1);
160 s->is_jmp = DISAS_JUMP;
161 }
155c3eac 162 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 163 tcg_temp_free_i32(var);
b26eefb6
PB
164}
165
b26eefb6 166/* Value extensions. */
86831435
PB
167#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
168#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
169#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
170#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
171
1497c961
PB
172#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
173#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 174
b26eefb6 175
39d5492a 176static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 177{
39d5492a 178 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 179 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
180 tcg_temp_free_i32(tmp_mask);
181}
d9ba4830
PB
182/* Set NZCV flags from the high 4 bits of var. */
183#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
184
185static void gen_exception(int excp)
186{
39d5492a 187 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830 188 tcg_gen_movi_i32(tmp, excp);
1ce94f81 189 gen_helper_exception(cpu_env, tmp);
7d1b0095 190 tcg_temp_free_i32(tmp);
d9ba4830
PB
191}
192
39d5492a 193static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 194{
39d5492a
PM
195 TCGv_i32 tmp1 = tcg_temp_new_i32();
196 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
197 tcg_gen_ext16s_i32(tmp1, a);
198 tcg_gen_ext16s_i32(tmp2, b);
3670669c 199 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 200 tcg_temp_free_i32(tmp2);
3670669c
PB
201 tcg_gen_sari_i32(a, a, 16);
202 tcg_gen_sari_i32(b, b, 16);
203 tcg_gen_mul_i32(b, b, a);
204 tcg_gen_mov_i32(a, tmp1);
7d1b0095 205 tcg_temp_free_i32(tmp1);
3670669c
PB
206}
207
208/* Byteswap each halfword. */
39d5492a 209static void gen_rev16(TCGv_i32 var)
3670669c 210{
39d5492a 211 TCGv_i32 tmp = tcg_temp_new_i32();
3670669c
PB
212 tcg_gen_shri_i32(tmp, var, 8);
213 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
214 tcg_gen_shli_i32(var, var, 8);
215 tcg_gen_andi_i32(var, var, 0xff00ff00);
216 tcg_gen_or_i32(var, var, tmp);
7d1b0095 217 tcg_temp_free_i32(tmp);
3670669c
PB
218}
219
220/* Byteswap low halfword and sign extend. */
39d5492a 221static void gen_revsh(TCGv_i32 var)
3670669c 222{
1a855029
AJ
223 tcg_gen_ext16u_i32(var, var);
224 tcg_gen_bswap16_i32(var, var);
225 tcg_gen_ext16s_i32(var, var);
3670669c
PB
226}
227
228/* Unsigned bitfield extract. */
39d5492a 229static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
3670669c
PB
230{
231 if (shift)
232 tcg_gen_shri_i32(var, var, shift);
233 tcg_gen_andi_i32(var, var, mask);
234}
235
236/* Signed bitfield extract. */
39d5492a 237static void gen_sbfx(TCGv_i32 var, int shift, int width)
3670669c
PB
238{
239 uint32_t signbit;
240
241 if (shift)
242 tcg_gen_sari_i32(var, var, shift);
243 if (shift + width < 32) {
244 signbit = 1u << (width - 1);
245 tcg_gen_andi_i32(var, var, (1u << width) - 1);
246 tcg_gen_xori_i32(var, var, signbit);
247 tcg_gen_subi_i32(var, var, signbit);
248 }
249}
250
838fa72d 251/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 252static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 253{
838fa72d
AJ
254 TCGv_i64 tmp64 = tcg_temp_new_i64();
255
256 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 257 tcg_temp_free_i32(b);
838fa72d
AJ
258 tcg_gen_shli_i64(tmp64, tmp64, 32);
259 tcg_gen_add_i64(a, tmp64, a);
260
261 tcg_temp_free_i64(tmp64);
262 return a;
263}
264
265/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 266static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
267{
268 TCGv_i64 tmp64 = tcg_temp_new_i64();
269
270 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 271 tcg_temp_free_i32(b);
838fa72d
AJ
272 tcg_gen_shli_i64(tmp64, tmp64, 32);
273 tcg_gen_sub_i64(a, tmp64, a);
274
275 tcg_temp_free_i64(tmp64);
276 return a;
3670669c
PB
277}
278
5e3f878a 279/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 280static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 281{
39d5492a
PM
282 TCGv_i32 lo = tcg_temp_new_i32();
283 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 284 TCGv_i64 ret;
5e3f878a 285
831d7fe8 286 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 287 tcg_temp_free_i32(a);
7d1b0095 288 tcg_temp_free_i32(b);
831d7fe8
RH
289
290 ret = tcg_temp_new_i64();
291 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
292 tcg_temp_free_i32(lo);
293 tcg_temp_free_i32(hi);
831d7fe8
RH
294
295 return ret;
5e3f878a
PB
296}
297
39d5492a 298static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 299{
39d5492a
PM
300 TCGv_i32 lo = tcg_temp_new_i32();
301 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 302 TCGv_i64 ret;
5e3f878a 303
831d7fe8 304 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 305 tcg_temp_free_i32(a);
7d1b0095 306 tcg_temp_free_i32(b);
831d7fe8
RH
307
308 ret = tcg_temp_new_i64();
309 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
310 tcg_temp_free_i32(lo);
311 tcg_temp_free_i32(hi);
831d7fe8
RH
312
313 return ret;
5e3f878a
PB
314}
315
8f01245e 316/* Swap low and high halfwords. */
39d5492a 317static void gen_swap_half(TCGv_i32 var)
8f01245e 318{
39d5492a 319 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
320 tcg_gen_shri_i32(tmp, var, 16);
321 tcg_gen_shli_i32(var, var, 16);
322 tcg_gen_or_i32(var, var, tmp);
7d1b0095 323 tcg_temp_free_i32(tmp);
8f01245e
PB
324}
325
b26eefb6
PB
326/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
327 tmp = (t0 ^ t1) & 0x8000;
328 t0 &= ~0x8000;
329 t1 &= ~0x8000;
330 t0 = (t0 + t1) ^ tmp;
331 */
332
39d5492a 333static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 334{
39d5492a 335 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
336 tcg_gen_xor_i32(tmp, t0, t1);
337 tcg_gen_andi_i32(tmp, tmp, 0x8000);
338 tcg_gen_andi_i32(t0, t0, ~0x8000);
339 tcg_gen_andi_i32(t1, t1, ~0x8000);
340 tcg_gen_add_i32(t0, t0, t1);
341 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
342 tcg_temp_free_i32(tmp);
343 tcg_temp_free_i32(t1);
b26eefb6
PB
344}
345
346/* Set CF to the top bit of var. */
39d5492a 347static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 348{
66c374de 349 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
350}
351
352/* Set N and Z flags from var. */
39d5492a 353static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 354{
66c374de
AJ
355 tcg_gen_mov_i32(cpu_NF, var);
356 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
357}
358
359/* T0 += T1 + CF. */
39d5492a 360static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 361{
396e467c 362 tcg_gen_add_i32(t0, t0, t1);
66c374de 363 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
364}
365
e9bb4aa9 366/* dest = T0 + T1 + CF. */
39d5492a 367static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 368{
e9bb4aa9 369 tcg_gen_add_i32(dest, t0, t1);
66c374de 370 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
371}
372
3670669c 373/* dest = T0 - T1 + CF - 1. */
39d5492a 374static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 375{
3670669c 376 tcg_gen_sub_i32(dest, t0, t1);
66c374de 377 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 378 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
379}
380
72485ec4 381/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 382static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 383{
39d5492a 384 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
385 tcg_gen_movi_i32(tmp, 0);
386 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 387 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 388 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
389 tcg_gen_xor_i32(tmp, t0, t1);
390 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
391 tcg_temp_free_i32(tmp);
392 tcg_gen_mov_i32(dest, cpu_NF);
393}
394
49b4c31e 395/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 396static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 397{
39d5492a 398 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
399 if (TCG_TARGET_HAS_add2_i32) {
400 tcg_gen_movi_i32(tmp, 0);
401 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 402 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
403 } else {
404 TCGv_i64 q0 = tcg_temp_new_i64();
405 TCGv_i64 q1 = tcg_temp_new_i64();
406 tcg_gen_extu_i32_i64(q0, t0);
407 tcg_gen_extu_i32_i64(q1, t1);
408 tcg_gen_add_i64(q0, q0, q1);
409 tcg_gen_extu_i32_i64(q1, cpu_CF);
410 tcg_gen_add_i64(q0, q0, q1);
411 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
412 tcg_temp_free_i64(q0);
413 tcg_temp_free_i64(q1);
414 }
415 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
416 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
417 tcg_gen_xor_i32(tmp, t0, t1);
418 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
419 tcg_temp_free_i32(tmp);
420 tcg_gen_mov_i32(dest, cpu_NF);
421}
422
72485ec4 423/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 424static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 425{
39d5492a 426 TCGv_i32 tmp;
72485ec4
AJ
427 tcg_gen_sub_i32(cpu_NF, t0, t1);
428 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
429 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
430 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
431 tmp = tcg_temp_new_i32();
432 tcg_gen_xor_i32(tmp, t0, t1);
433 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
434 tcg_temp_free_i32(tmp);
435 tcg_gen_mov_i32(dest, cpu_NF);
436}
437
e77f0832 438/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 439static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 440{
39d5492a 441 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
442 tcg_gen_not_i32(tmp, t1);
443 gen_adc_CC(dest, t0, tmp);
39d5492a 444 tcg_temp_free_i32(tmp);
2de68a49
RH
445}
446
365af80e 447#define GEN_SHIFT(name) \
39d5492a 448static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 449{ \
39d5492a 450 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
451 tmp1 = tcg_temp_new_i32(); \
452 tcg_gen_andi_i32(tmp1, t1, 0xff); \
453 tmp2 = tcg_const_i32(0); \
454 tmp3 = tcg_const_i32(0x1f); \
455 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
456 tcg_temp_free_i32(tmp3); \
457 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
458 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
459 tcg_temp_free_i32(tmp2); \
460 tcg_temp_free_i32(tmp1); \
461}
462GEN_SHIFT(shl)
463GEN_SHIFT(shr)
464#undef GEN_SHIFT
465
39d5492a 466static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 467{
39d5492a 468 TCGv_i32 tmp1, tmp2;
365af80e
AJ
469 tmp1 = tcg_temp_new_i32();
470 tcg_gen_andi_i32(tmp1, t1, 0xff);
471 tmp2 = tcg_const_i32(0x1f);
472 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
473 tcg_temp_free_i32(tmp2);
474 tcg_gen_sar_i32(dest, t0, tmp1);
475 tcg_temp_free_i32(tmp1);
476}
477
39d5492a 478static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 479{
39d5492a
PM
480 TCGv_i32 c0 = tcg_const_i32(0);
481 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
482 tcg_gen_neg_i32(tmp, src);
483 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
484 tcg_temp_free_i32(c0);
485 tcg_temp_free_i32(tmp);
486}
ad69471c 487
39d5492a 488static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 489{
9a119ff6 490 if (shift == 0) {
66c374de 491 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 492 } else {
66c374de
AJ
493 tcg_gen_shri_i32(cpu_CF, var, shift);
494 if (shift != 31) {
495 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
496 }
9a119ff6 497 }
9a119ff6 498}
b26eefb6 499
9a119ff6 500/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
501static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
502 int shift, int flags)
9a119ff6
PB
503{
504 switch (shiftop) {
505 case 0: /* LSL */
506 if (shift != 0) {
507 if (flags)
508 shifter_out_im(var, 32 - shift);
509 tcg_gen_shli_i32(var, var, shift);
510 }
511 break;
512 case 1: /* LSR */
513 if (shift == 0) {
514 if (flags) {
66c374de 515 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
516 }
517 tcg_gen_movi_i32(var, 0);
518 } else {
519 if (flags)
520 shifter_out_im(var, shift - 1);
521 tcg_gen_shri_i32(var, var, shift);
522 }
523 break;
524 case 2: /* ASR */
525 if (shift == 0)
526 shift = 32;
527 if (flags)
528 shifter_out_im(var, shift - 1);
529 if (shift == 32)
530 shift = 31;
531 tcg_gen_sari_i32(var, var, shift);
532 break;
533 case 3: /* ROR/RRX */
534 if (shift != 0) {
535 if (flags)
536 shifter_out_im(var, shift - 1);
f669df27 537 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 538 } else {
39d5492a 539 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 540 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
541 if (flags)
542 shifter_out_im(var, 0);
543 tcg_gen_shri_i32(var, var, 1);
b26eefb6 544 tcg_gen_or_i32(var, var, tmp);
7d1b0095 545 tcg_temp_free_i32(tmp);
b26eefb6
PB
546 }
547 }
548};
549
39d5492a
PM
550static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
551 TCGv_i32 shift, int flags)
8984bd2e
PB
552{
553 if (flags) {
554 switch (shiftop) {
9ef39277
BS
555 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
556 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
557 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
558 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
559 }
560 } else {
561 switch (shiftop) {
365af80e
AJ
562 case 0:
563 gen_shl(var, var, shift);
564 break;
565 case 1:
566 gen_shr(var, var, shift);
567 break;
568 case 2:
569 gen_sar(var, var, shift);
570 break;
f669df27
AJ
571 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
572 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
573 }
574 }
7d1b0095 575 tcg_temp_free_i32(shift);
8984bd2e
PB
576}
577
6ddbc6e4
PB
578#define PAS_OP(pfx) \
579 switch (op2) { \
580 case 0: gen_pas_helper(glue(pfx,add16)); break; \
581 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
582 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
583 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
584 case 4: gen_pas_helper(glue(pfx,add8)); break; \
585 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
586 }
39d5492a 587static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 588{
a7812ae4 589 TCGv_ptr tmp;
6ddbc6e4
PB
590
591 switch (op1) {
592#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
593 case 1:
a7812ae4 594 tmp = tcg_temp_new_ptr();
0ecb72a5 595 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 596 PAS_OP(s)
b75263d6 597 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
598 break;
599 case 5:
a7812ae4 600 tmp = tcg_temp_new_ptr();
0ecb72a5 601 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 602 PAS_OP(u)
b75263d6 603 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
604 break;
605#undef gen_pas_helper
606#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
607 case 2:
608 PAS_OP(q);
609 break;
610 case 3:
611 PAS_OP(sh);
612 break;
613 case 6:
614 PAS_OP(uq);
615 break;
616 case 7:
617 PAS_OP(uh);
618 break;
619#undef gen_pas_helper
620 }
621}
9ee6e8bb
PB
622#undef PAS_OP
623
6ddbc6e4
PB
624/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
625#define PAS_OP(pfx) \
ed89a2f1 626 switch (op1) { \
6ddbc6e4
PB
627 case 0: gen_pas_helper(glue(pfx,add8)); break; \
628 case 1: gen_pas_helper(glue(pfx,add16)); break; \
629 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
630 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
631 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
632 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
633 }
39d5492a 634static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 635{
a7812ae4 636 TCGv_ptr tmp;
6ddbc6e4 637
ed89a2f1 638 switch (op2) {
6ddbc6e4
PB
639#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
640 case 0:
a7812ae4 641 tmp = tcg_temp_new_ptr();
0ecb72a5 642 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 643 PAS_OP(s)
b75263d6 644 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
645 break;
646 case 4:
a7812ae4 647 tmp = tcg_temp_new_ptr();
0ecb72a5 648 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 649 PAS_OP(u)
b75263d6 650 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
651 break;
652#undef gen_pas_helper
653#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
654 case 1:
655 PAS_OP(q);
656 break;
657 case 2:
658 PAS_OP(sh);
659 break;
660 case 5:
661 PAS_OP(uq);
662 break;
663 case 6:
664 PAS_OP(uh);
665 break;
666#undef gen_pas_helper
667 }
668}
9ee6e8bb
PB
669#undef PAS_OP
670
39fb730a
AG
671/*
672 * generate a conditional branch based on ARM condition code cc.
673 * This is common between ARM and Aarch64 targets.
674 */
675void arm_gen_test_cc(int cc, int label)
d9ba4830 676{
39d5492a 677 TCGv_i32 tmp;
d9ba4830
PB
678 int inv;
679
d9ba4830
PB
680 switch (cc) {
681 case 0: /* eq: Z */
66c374de 682 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
683 break;
684 case 1: /* ne: !Z */
66c374de 685 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
686 break;
687 case 2: /* cs: C */
66c374de 688 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_CF, 0, label);
d9ba4830
PB
689 break;
690 case 3: /* cc: !C */
66c374de 691 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
d9ba4830
PB
692 break;
693 case 4: /* mi: N */
66c374de 694 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_NF, 0, label);
d9ba4830
PB
695 break;
696 case 5: /* pl: !N */
66c374de 697 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_NF, 0, label);
d9ba4830
PB
698 break;
699 case 6: /* vs: V */
66c374de 700 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_VF, 0, label);
d9ba4830
PB
701 break;
702 case 7: /* vc: !V */
66c374de 703 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_VF, 0, label);
d9ba4830
PB
704 break;
705 case 8: /* hi: C && !Z */
706 inv = gen_new_label();
66c374de
AJ
707 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, inv);
708 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
709 gen_set_label(inv);
710 break;
711 case 9: /* ls: !C || Z */
66c374de
AJ
712 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
713 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
714 break;
715 case 10: /* ge: N == V -> N ^ V == 0 */
66c374de
AJ
716 tmp = tcg_temp_new_i32();
717 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 718 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 719 tcg_temp_free_i32(tmp);
d9ba4830
PB
720 break;
721 case 11: /* lt: N != V -> N ^ V != 0 */
66c374de
AJ
722 tmp = tcg_temp_new_i32();
723 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 724 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 725 tcg_temp_free_i32(tmp);
d9ba4830
PB
726 break;
727 case 12: /* gt: !Z && N == V */
728 inv = gen_new_label();
66c374de
AJ
729 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, inv);
730 tmp = tcg_temp_new_i32();
731 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 732 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 733 tcg_temp_free_i32(tmp);
d9ba4830
PB
734 gen_set_label(inv);
735 break;
736 case 13: /* le: Z || N != V */
66c374de
AJ
737 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
738 tmp = tcg_temp_new_i32();
739 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 740 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 741 tcg_temp_free_i32(tmp);
d9ba4830
PB
742 break;
743 default:
744 fprintf(stderr, "Bad condition code 0x%x\n", cc);
745 abort();
746 }
d9ba4830 747}
2c0262af 748
b1d8e52e 749static const uint8_t table_logic_cc[16] = {
2c0262af
FB
750 1, /* and */
751 1, /* xor */
752 0, /* sub */
753 0, /* rsb */
754 0, /* add */
755 0, /* adc */
756 0, /* sbc */
757 0, /* rsc */
758 1, /* andl */
759 1, /* xorl */
760 0, /* cmp */
761 0, /* cmn */
762 1, /* orr */
763 1, /* mov */
764 1, /* bic */
765 1, /* mvn */
766};
3b46e624 767
d9ba4830
PB
768/* Set PC and Thumb state from an immediate address. */
769static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 770{
39d5492a 771 TCGv_i32 tmp;
99c475ab 772
b26eefb6 773 s->is_jmp = DISAS_UPDATE;
d9ba4830 774 if (s->thumb != (addr & 1)) {
7d1b0095 775 tmp = tcg_temp_new_i32();
d9ba4830 776 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 777 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 778 tcg_temp_free_i32(tmp);
d9ba4830 779 }
155c3eac 780 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
781}
782
783/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 784static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 785{
d9ba4830 786 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
787 tcg_gen_andi_i32(cpu_R[15], var, ~1);
788 tcg_gen_andi_i32(var, var, 1);
789 store_cpu_field(var, thumb);
d9ba4830
PB
790}
791
21aeb343
JR
792/* Variant of store_reg which uses branch&exchange logic when storing
793 to r15 in ARM architecture v7 and above. The source must be a temporary
794 and will be marked as dead. */
0ecb72a5 795static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
39d5492a 796 int reg, TCGv_i32 var)
21aeb343
JR
797{
798 if (reg == 15 && ENABLE_ARCH_7) {
799 gen_bx(s, var);
800 } else {
801 store_reg(s, reg, var);
802 }
803}
804
be5e7a76
DES
805/* Variant of store_reg which uses branch&exchange logic when storing
806 * to r15 in ARM architecture v5T and above. This is used for storing
807 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
808 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
0ecb72a5 809static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
39d5492a 810 int reg, TCGv_i32 var)
be5e7a76
DES
811{
812 if (reg == 15 && ENABLE_ARCH_5) {
813 gen_bx(s, var);
814 } else {
815 store_reg(s, reg, var);
816 }
817}
818
08307563
PM
819/* Abstractions of "generate code to do a guest load/store for
820 * AArch32", where a vaddr is always 32 bits (and is zero
821 * extended if we're a 64 bit core) and data is also
822 * 32 bits unless specifically doing a 64 bit access.
823 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 824 * that the address argument is TCGv_i32 rather than TCGv.
08307563
PM
825 */
826#if TARGET_LONG_BITS == 32
827
09f78135
RH
828#define DO_GEN_LD(SUFF, OPC) \
829static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563 830{ \
09f78135 831 tcg_gen_qemu_ld_i32(val, addr, index, OPC); \
08307563
PM
832}
833
09f78135
RH
834#define DO_GEN_ST(SUFF, OPC) \
835static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563 836{ \
09f78135 837 tcg_gen_qemu_st_i32(val, addr, index, OPC); \
08307563
PM
838}
839
840static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
841{
09f78135 842 tcg_gen_qemu_ld_i64(val, addr, index, MO_TEQ);
08307563
PM
843}
844
845static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
846{
09f78135 847 tcg_gen_qemu_st_i64(val, addr, index, MO_TEQ);
08307563
PM
848}
849
850#else
851
09f78135
RH
852#define DO_GEN_LD(SUFF, OPC) \
853static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563
PM
854{ \
855 TCGv addr64 = tcg_temp_new(); \
08307563 856 tcg_gen_extu_i32_i64(addr64, addr); \
09f78135 857 tcg_gen_qemu_ld_i32(val, addr64, index, OPC); \
08307563 858 tcg_temp_free(addr64); \
08307563
PM
859}
860
09f78135
RH
861#define DO_GEN_ST(SUFF, OPC) \
862static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563
PM
863{ \
864 TCGv addr64 = tcg_temp_new(); \
08307563 865 tcg_gen_extu_i32_i64(addr64, addr); \
09f78135 866 tcg_gen_qemu_st_i32(val, addr64, index, OPC); \
08307563 867 tcg_temp_free(addr64); \
08307563
PM
868}
869
870static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
871{
872 TCGv addr64 = tcg_temp_new();
873 tcg_gen_extu_i32_i64(addr64, addr);
09f78135 874 tcg_gen_qemu_ld_i64(val, addr64, index, MO_TEQ);
08307563
PM
875 tcg_temp_free(addr64);
876}
877
878static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
879{
880 TCGv addr64 = tcg_temp_new();
881 tcg_gen_extu_i32_i64(addr64, addr);
09f78135 882 tcg_gen_qemu_st_i64(val, addr64, index, MO_TEQ);
08307563
PM
883 tcg_temp_free(addr64);
884}
885
886#endif
887
09f78135
RH
888DO_GEN_LD(8s, MO_SB)
889DO_GEN_LD(8u, MO_UB)
890DO_GEN_LD(16s, MO_TESW)
891DO_GEN_LD(16u, MO_TEUW)
892DO_GEN_LD(32u, MO_TEUL)
893DO_GEN_ST(8, MO_UB)
894DO_GEN_ST(16, MO_TEUW)
895DO_GEN_ST(32, MO_TEUL)
08307563 896
eaed129d 897static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
5e3f878a 898{
40f860cd 899 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
900}
901
b5ff1b31
FB
902/* Force a TB lookup after an instruction that changes the CPU state. */
903static inline void gen_lookup_tb(DisasContext *s)
904{
a6445c52 905 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
906 s->is_jmp = DISAS_UPDATE;
907}
908
b0109805 909static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 910 TCGv_i32 var)
2c0262af 911{
1e8d4eec 912 int val, rm, shift, shiftop;
39d5492a 913 TCGv_i32 offset;
2c0262af
FB
914
915 if (!(insn & (1 << 25))) {
916 /* immediate */
917 val = insn & 0xfff;
918 if (!(insn & (1 << 23)))
919 val = -val;
537730b9 920 if (val != 0)
b0109805 921 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
922 } else {
923 /* shift/register */
924 rm = (insn) & 0xf;
925 shift = (insn >> 7) & 0x1f;
1e8d4eec 926 shiftop = (insn >> 5) & 3;
b26eefb6 927 offset = load_reg(s, rm);
9a119ff6 928 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 929 if (!(insn & (1 << 23)))
b0109805 930 tcg_gen_sub_i32(var, var, offset);
2c0262af 931 else
b0109805 932 tcg_gen_add_i32(var, var, offset);
7d1b0095 933 tcg_temp_free_i32(offset);
2c0262af
FB
934 }
935}
936
191f9a93 937static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 938 int extra, TCGv_i32 var)
2c0262af
FB
939{
940 int val, rm;
39d5492a 941 TCGv_i32 offset;
3b46e624 942
2c0262af
FB
943 if (insn & (1 << 22)) {
944 /* immediate */
945 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
946 if (!(insn & (1 << 23)))
947 val = -val;
18acad92 948 val += extra;
537730b9 949 if (val != 0)
b0109805 950 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
951 } else {
952 /* register */
191f9a93 953 if (extra)
b0109805 954 tcg_gen_addi_i32(var, var, extra);
2c0262af 955 rm = (insn) & 0xf;
b26eefb6 956 offset = load_reg(s, rm);
2c0262af 957 if (!(insn & (1 << 23)))
b0109805 958 tcg_gen_sub_i32(var, var, offset);
2c0262af 959 else
b0109805 960 tcg_gen_add_i32(var, var, offset);
7d1b0095 961 tcg_temp_free_i32(offset);
2c0262af
FB
962 }
963}
964
5aaebd13
PM
965static TCGv_ptr get_fpstatus_ptr(int neon)
966{
967 TCGv_ptr statusptr = tcg_temp_new_ptr();
968 int offset;
969 if (neon) {
0ecb72a5 970 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 971 } else {
0ecb72a5 972 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
973 }
974 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
975 return statusptr;
976}
977
4373f3ce
PB
978#define VFP_OP2(name) \
979static inline void gen_vfp_##name(int dp) \
980{ \
ae1857ec
PM
981 TCGv_ptr fpst = get_fpstatus_ptr(0); \
982 if (dp) { \
983 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
984 } else { \
985 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
986 } \
987 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
988}
989
4373f3ce
PB
990VFP_OP2(add)
991VFP_OP2(sub)
992VFP_OP2(mul)
993VFP_OP2(div)
994
995#undef VFP_OP2
996
605a6aed
PM
997static inline void gen_vfp_F1_mul(int dp)
998{
999 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1000 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1001 if (dp) {
ae1857ec 1002 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1003 } else {
ae1857ec 1004 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1005 }
ae1857ec 1006 tcg_temp_free_ptr(fpst);
605a6aed
PM
1007}
1008
1009static inline void gen_vfp_F1_neg(int dp)
1010{
1011 /* Like gen_vfp_neg() but put result in F1 */
1012 if (dp) {
1013 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1014 } else {
1015 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1016 }
1017}
1018
4373f3ce
PB
1019static inline void gen_vfp_abs(int dp)
1020{
1021 if (dp)
1022 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1023 else
1024 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1025}
1026
1027static inline void gen_vfp_neg(int dp)
1028{
1029 if (dp)
1030 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1031 else
1032 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1033}
1034
1035static inline void gen_vfp_sqrt(int dp)
1036{
1037 if (dp)
1038 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1039 else
1040 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1041}
1042
1043static inline void gen_vfp_cmp(int dp)
1044{
1045 if (dp)
1046 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1047 else
1048 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1049}
1050
1051static inline void gen_vfp_cmpe(int dp)
1052{
1053 if (dp)
1054 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1055 else
1056 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1057}
1058
1059static inline void gen_vfp_F1_ld0(int dp)
1060{
1061 if (dp)
5b340b51 1062 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1063 else
5b340b51 1064 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1065}
1066
5500b06c
PM
1067#define VFP_GEN_ITOF(name) \
1068static inline void gen_vfp_##name(int dp, int neon) \
1069{ \
5aaebd13 1070 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1071 if (dp) { \
1072 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1073 } else { \
1074 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1075 } \
b7fa9214 1076 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1077}
1078
5500b06c
PM
1079VFP_GEN_ITOF(uito)
1080VFP_GEN_ITOF(sito)
1081#undef VFP_GEN_ITOF
4373f3ce 1082
5500b06c
PM
1083#define VFP_GEN_FTOI(name) \
1084static inline void gen_vfp_##name(int dp, int neon) \
1085{ \
5aaebd13 1086 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1087 if (dp) { \
1088 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1089 } else { \
1090 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1091 } \
b7fa9214 1092 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1093}
1094
5500b06c
PM
1095VFP_GEN_FTOI(toui)
1096VFP_GEN_FTOI(touiz)
1097VFP_GEN_FTOI(tosi)
1098VFP_GEN_FTOI(tosiz)
1099#undef VFP_GEN_FTOI
4373f3ce 1100
16d5b3ca 1101#define VFP_GEN_FIX(name, round) \
5500b06c 1102static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1103{ \
39d5492a 1104 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1105 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c 1106 if (dp) { \
16d5b3ca
WN
1107 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1108 statusptr); \
5500b06c 1109 } else { \
16d5b3ca
WN
1110 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1111 statusptr); \
5500b06c 1112 } \
b75263d6 1113 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1114 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1115}
16d5b3ca
WN
1116VFP_GEN_FIX(tosh, _round_to_zero)
1117VFP_GEN_FIX(tosl, _round_to_zero)
1118VFP_GEN_FIX(touh, _round_to_zero)
1119VFP_GEN_FIX(toul, _round_to_zero)
1120VFP_GEN_FIX(shto, )
1121VFP_GEN_FIX(slto, )
1122VFP_GEN_FIX(uhto, )
1123VFP_GEN_FIX(ulto, )
4373f3ce 1124#undef VFP_GEN_FIX
9ee6e8bb 1125
39d5492a 1126static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1127{
08307563
PM
1128 if (dp) {
1129 gen_aa32_ld64(cpu_F0d, addr, IS_USER(s));
1130 } else {
1131 gen_aa32_ld32u(cpu_F0s, addr, IS_USER(s));
1132 }
b5ff1b31
FB
1133}
1134
39d5492a 1135static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1136{
08307563
PM
1137 if (dp) {
1138 gen_aa32_st64(cpu_F0d, addr, IS_USER(s));
1139 } else {
1140 gen_aa32_st32(cpu_F0s, addr, IS_USER(s));
1141 }
b5ff1b31
FB
1142}
1143
8e96005d
FB
1144static inline long
1145vfp_reg_offset (int dp, int reg)
1146{
1147 if (dp)
1148 return offsetof(CPUARMState, vfp.regs[reg]);
1149 else if (reg & 1) {
1150 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1151 + offsetof(CPU_DoubleU, l.upper);
1152 } else {
1153 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1154 + offsetof(CPU_DoubleU, l.lower);
1155 }
1156}
9ee6e8bb
PB
1157
1158/* Return the offset of a 32-bit piece of a NEON register.
1159 zero is the least significant end of the register. */
1160static inline long
1161neon_reg_offset (int reg, int n)
1162{
1163 int sreg;
1164 sreg = reg * 2 + n;
1165 return vfp_reg_offset(0, sreg);
1166}
1167
39d5492a 1168static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1169{
39d5492a 1170 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1171 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1172 return tmp;
1173}
1174
39d5492a 1175static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1176{
1177 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1178 tcg_temp_free_i32(var);
8f8e3aa4
PB
1179}
1180
a7812ae4 1181static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1182{
1183 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1184}
1185
a7812ae4 1186static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1187{
1188 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1189}
1190
4373f3ce
PB
1191#define tcg_gen_ld_f32 tcg_gen_ld_i32
1192#define tcg_gen_ld_f64 tcg_gen_ld_i64
1193#define tcg_gen_st_f32 tcg_gen_st_i32
1194#define tcg_gen_st_f64 tcg_gen_st_i64
1195
b7bcbe95
FB
1196static inline void gen_mov_F0_vreg(int dp, int reg)
1197{
1198 if (dp)
4373f3ce 1199 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1200 else
4373f3ce 1201 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1202}
1203
1204static inline void gen_mov_F1_vreg(int dp, int reg)
1205{
1206 if (dp)
4373f3ce 1207 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1208 else
4373f3ce 1209 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1210}
1211
1212static inline void gen_mov_vreg_F0(int dp, int reg)
1213{
1214 if (dp)
4373f3ce 1215 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1216 else
4373f3ce 1217 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1218}
1219
18c9b560
AZ
1220#define ARM_CP_RW_BIT (1 << 20)
1221
a7812ae4 1222static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1223{
0ecb72a5 1224 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1225}
1226
a7812ae4 1227static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1228{
0ecb72a5 1229 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1230}
1231
39d5492a 1232static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1233{
39d5492a 1234 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1235 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1236 return var;
e677137d
PB
1237}
1238
39d5492a 1239static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1240{
0ecb72a5 1241 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1242 tcg_temp_free_i32(var);
e677137d
PB
1243}
1244
1245static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1246{
1247 iwmmxt_store_reg(cpu_M0, rn);
1248}
1249
1250static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1251{
1252 iwmmxt_load_reg(cpu_M0, rn);
1253}
1254
1255static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1256{
1257 iwmmxt_load_reg(cpu_V1, rn);
1258 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1259}
1260
1261static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1262{
1263 iwmmxt_load_reg(cpu_V1, rn);
1264 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1265}
1266
1267static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1268{
1269 iwmmxt_load_reg(cpu_V1, rn);
1270 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1271}
1272
1273#define IWMMXT_OP(name) \
1274static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1275{ \
1276 iwmmxt_load_reg(cpu_V1, rn); \
1277 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1278}
1279
477955bd
PM
1280#define IWMMXT_OP_ENV(name) \
1281static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1282{ \
1283 iwmmxt_load_reg(cpu_V1, rn); \
1284 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1285}
1286
1287#define IWMMXT_OP_ENV_SIZE(name) \
1288IWMMXT_OP_ENV(name##b) \
1289IWMMXT_OP_ENV(name##w) \
1290IWMMXT_OP_ENV(name##l)
e677137d 1291
477955bd 1292#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1293static inline void gen_op_iwmmxt_##name##_M0(void) \
1294{ \
477955bd 1295 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1296}
1297
1298IWMMXT_OP(maddsq)
1299IWMMXT_OP(madduq)
1300IWMMXT_OP(sadb)
1301IWMMXT_OP(sadw)
1302IWMMXT_OP(mulslw)
1303IWMMXT_OP(mulshw)
1304IWMMXT_OP(mululw)
1305IWMMXT_OP(muluhw)
1306IWMMXT_OP(macsw)
1307IWMMXT_OP(macuw)
1308
477955bd
PM
1309IWMMXT_OP_ENV_SIZE(unpackl)
1310IWMMXT_OP_ENV_SIZE(unpackh)
1311
1312IWMMXT_OP_ENV1(unpacklub)
1313IWMMXT_OP_ENV1(unpackluw)
1314IWMMXT_OP_ENV1(unpacklul)
1315IWMMXT_OP_ENV1(unpackhub)
1316IWMMXT_OP_ENV1(unpackhuw)
1317IWMMXT_OP_ENV1(unpackhul)
1318IWMMXT_OP_ENV1(unpacklsb)
1319IWMMXT_OP_ENV1(unpacklsw)
1320IWMMXT_OP_ENV1(unpacklsl)
1321IWMMXT_OP_ENV1(unpackhsb)
1322IWMMXT_OP_ENV1(unpackhsw)
1323IWMMXT_OP_ENV1(unpackhsl)
1324
1325IWMMXT_OP_ENV_SIZE(cmpeq)
1326IWMMXT_OP_ENV_SIZE(cmpgtu)
1327IWMMXT_OP_ENV_SIZE(cmpgts)
1328
1329IWMMXT_OP_ENV_SIZE(mins)
1330IWMMXT_OP_ENV_SIZE(minu)
1331IWMMXT_OP_ENV_SIZE(maxs)
1332IWMMXT_OP_ENV_SIZE(maxu)
1333
1334IWMMXT_OP_ENV_SIZE(subn)
1335IWMMXT_OP_ENV_SIZE(addn)
1336IWMMXT_OP_ENV_SIZE(subu)
1337IWMMXT_OP_ENV_SIZE(addu)
1338IWMMXT_OP_ENV_SIZE(subs)
1339IWMMXT_OP_ENV_SIZE(adds)
1340
1341IWMMXT_OP_ENV(avgb0)
1342IWMMXT_OP_ENV(avgb1)
1343IWMMXT_OP_ENV(avgw0)
1344IWMMXT_OP_ENV(avgw1)
e677137d
PB
1345
1346IWMMXT_OP(msadb)
1347
477955bd
PM
1348IWMMXT_OP_ENV(packuw)
1349IWMMXT_OP_ENV(packul)
1350IWMMXT_OP_ENV(packuq)
1351IWMMXT_OP_ENV(packsw)
1352IWMMXT_OP_ENV(packsl)
1353IWMMXT_OP_ENV(packsq)
e677137d 1354
e677137d
PB
1355static void gen_op_iwmmxt_set_mup(void)
1356{
39d5492a 1357 TCGv_i32 tmp;
e677137d
PB
1358 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1359 tcg_gen_ori_i32(tmp, tmp, 2);
1360 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1361}
1362
1363static void gen_op_iwmmxt_set_cup(void)
1364{
39d5492a 1365 TCGv_i32 tmp;
e677137d
PB
1366 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1367 tcg_gen_ori_i32(tmp, tmp, 1);
1368 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1369}
1370
1371static void gen_op_iwmmxt_setpsr_nz(void)
1372{
39d5492a 1373 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1374 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1375 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1376}
1377
1378static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1379{
1380 iwmmxt_load_reg(cpu_V1, rn);
86831435 1381 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1382 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1383}
1384
39d5492a
PM
1385static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1386 TCGv_i32 dest)
18c9b560
AZ
1387{
1388 int rd;
1389 uint32_t offset;
39d5492a 1390 TCGv_i32 tmp;
18c9b560
AZ
1391
1392 rd = (insn >> 16) & 0xf;
da6b5335 1393 tmp = load_reg(s, rd);
18c9b560
AZ
1394
1395 offset = (insn & 0xff) << ((insn >> 7) & 2);
1396 if (insn & (1 << 24)) {
1397 /* Pre indexed */
1398 if (insn & (1 << 23))
da6b5335 1399 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1400 else
da6b5335
FN
1401 tcg_gen_addi_i32(tmp, tmp, -offset);
1402 tcg_gen_mov_i32(dest, tmp);
18c9b560 1403 if (insn & (1 << 21))
da6b5335
FN
1404 store_reg(s, rd, tmp);
1405 else
7d1b0095 1406 tcg_temp_free_i32(tmp);
18c9b560
AZ
1407 } else if (insn & (1 << 21)) {
1408 /* Post indexed */
da6b5335 1409 tcg_gen_mov_i32(dest, tmp);
18c9b560 1410 if (insn & (1 << 23))
da6b5335 1411 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1412 else
da6b5335
FN
1413 tcg_gen_addi_i32(tmp, tmp, -offset);
1414 store_reg(s, rd, tmp);
18c9b560
AZ
1415 } else if (!(insn & (1 << 23)))
1416 return 1;
1417 return 0;
1418}
1419
39d5492a 1420static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1421{
1422 int rd = (insn >> 0) & 0xf;
39d5492a 1423 TCGv_i32 tmp;
18c9b560 1424
da6b5335
FN
1425 if (insn & (1 << 8)) {
1426 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1427 return 1;
da6b5335
FN
1428 } else {
1429 tmp = iwmmxt_load_creg(rd);
1430 }
1431 } else {
7d1b0095 1432 tmp = tcg_temp_new_i32();
da6b5335
FN
1433 iwmmxt_load_reg(cpu_V0, rd);
1434 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1435 }
1436 tcg_gen_andi_i32(tmp, tmp, mask);
1437 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1438 tcg_temp_free_i32(tmp);
18c9b560
AZ
1439 return 0;
1440}
1441
a1c7273b 1442/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1443 (ie. an undefined instruction). */
0ecb72a5 1444static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
1445{
1446 int rd, wrd;
1447 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1448 TCGv_i32 addr;
1449 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1450
1451 if ((insn & 0x0e000e00) == 0x0c000000) {
1452 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1453 wrd = insn & 0xf;
1454 rdlo = (insn >> 12) & 0xf;
1455 rdhi = (insn >> 16) & 0xf;
1456 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1457 iwmmxt_load_reg(cpu_V0, wrd);
1458 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1459 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1460 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1461 } else { /* TMCRR */
da6b5335
FN
1462 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1463 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1464 gen_op_iwmmxt_set_mup();
1465 }
1466 return 0;
1467 }
1468
1469 wrd = (insn >> 12) & 0xf;
7d1b0095 1470 addr = tcg_temp_new_i32();
da6b5335 1471 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1472 tcg_temp_free_i32(addr);
18c9b560 1473 return 1;
da6b5335 1474 }
18c9b560
AZ
1475 if (insn & ARM_CP_RW_BIT) {
1476 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1477 tmp = tcg_temp_new_i32();
08307563 1478 gen_aa32_ld32u(tmp, addr, IS_USER(s));
da6b5335 1479 iwmmxt_store_creg(wrd, tmp);
18c9b560 1480 } else {
e677137d
PB
1481 i = 1;
1482 if (insn & (1 << 8)) {
1483 if (insn & (1 << 22)) { /* WLDRD */
08307563 1484 gen_aa32_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1485 i = 0;
1486 } else { /* WLDRW wRd */
29531141 1487 tmp = tcg_temp_new_i32();
08307563 1488 gen_aa32_ld32u(tmp, addr, IS_USER(s));
e677137d
PB
1489 }
1490 } else {
29531141 1491 tmp = tcg_temp_new_i32();
e677137d 1492 if (insn & (1 << 22)) { /* WLDRH */
08307563 1493 gen_aa32_ld16u(tmp, addr, IS_USER(s));
e677137d 1494 } else { /* WLDRB */
08307563 1495 gen_aa32_ld8u(tmp, addr, IS_USER(s));
e677137d
PB
1496 }
1497 }
1498 if (i) {
1499 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1500 tcg_temp_free_i32(tmp);
e677137d 1501 }
18c9b560
AZ
1502 gen_op_iwmmxt_movq_wRn_M0(wrd);
1503 }
1504 } else {
1505 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1506 tmp = iwmmxt_load_creg(wrd);
08307563 1507 gen_aa32_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1508 } else {
1509 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1510 tmp = tcg_temp_new_i32();
e677137d
PB
1511 if (insn & (1 << 8)) {
1512 if (insn & (1 << 22)) { /* WSTRD */
08307563 1513 gen_aa32_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1514 } else { /* WSTRW wRd */
1515 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
08307563 1516 gen_aa32_st32(tmp, addr, IS_USER(s));
e677137d
PB
1517 }
1518 } else {
1519 if (insn & (1 << 22)) { /* WSTRH */
1520 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
08307563 1521 gen_aa32_st16(tmp, addr, IS_USER(s));
e677137d
PB
1522 } else { /* WSTRB */
1523 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
08307563 1524 gen_aa32_st8(tmp, addr, IS_USER(s));
e677137d
PB
1525 }
1526 }
18c9b560 1527 }
29531141 1528 tcg_temp_free_i32(tmp);
18c9b560 1529 }
7d1b0095 1530 tcg_temp_free_i32(addr);
18c9b560
AZ
1531 return 0;
1532 }
1533
1534 if ((insn & 0x0f000000) != 0x0e000000)
1535 return 1;
1536
1537 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1538 case 0x000: /* WOR */
1539 wrd = (insn >> 12) & 0xf;
1540 rd0 = (insn >> 0) & 0xf;
1541 rd1 = (insn >> 16) & 0xf;
1542 gen_op_iwmmxt_movq_M0_wRn(rd0);
1543 gen_op_iwmmxt_orq_M0_wRn(rd1);
1544 gen_op_iwmmxt_setpsr_nz();
1545 gen_op_iwmmxt_movq_wRn_M0(wrd);
1546 gen_op_iwmmxt_set_mup();
1547 gen_op_iwmmxt_set_cup();
1548 break;
1549 case 0x011: /* TMCR */
1550 if (insn & 0xf)
1551 return 1;
1552 rd = (insn >> 12) & 0xf;
1553 wrd = (insn >> 16) & 0xf;
1554 switch (wrd) {
1555 case ARM_IWMMXT_wCID:
1556 case ARM_IWMMXT_wCASF:
1557 break;
1558 case ARM_IWMMXT_wCon:
1559 gen_op_iwmmxt_set_cup();
1560 /* Fall through. */
1561 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1562 tmp = iwmmxt_load_creg(wrd);
1563 tmp2 = load_reg(s, rd);
f669df27 1564 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1565 tcg_temp_free_i32(tmp2);
da6b5335 1566 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1567 break;
1568 case ARM_IWMMXT_wCGR0:
1569 case ARM_IWMMXT_wCGR1:
1570 case ARM_IWMMXT_wCGR2:
1571 case ARM_IWMMXT_wCGR3:
1572 gen_op_iwmmxt_set_cup();
da6b5335
FN
1573 tmp = load_reg(s, rd);
1574 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1575 break;
1576 default:
1577 return 1;
1578 }
1579 break;
1580 case 0x100: /* WXOR */
1581 wrd = (insn >> 12) & 0xf;
1582 rd0 = (insn >> 0) & 0xf;
1583 rd1 = (insn >> 16) & 0xf;
1584 gen_op_iwmmxt_movq_M0_wRn(rd0);
1585 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1586 gen_op_iwmmxt_setpsr_nz();
1587 gen_op_iwmmxt_movq_wRn_M0(wrd);
1588 gen_op_iwmmxt_set_mup();
1589 gen_op_iwmmxt_set_cup();
1590 break;
1591 case 0x111: /* TMRC */
1592 if (insn & 0xf)
1593 return 1;
1594 rd = (insn >> 12) & 0xf;
1595 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1596 tmp = iwmmxt_load_creg(wrd);
1597 store_reg(s, rd, tmp);
18c9b560
AZ
1598 break;
1599 case 0x300: /* WANDN */
1600 wrd = (insn >> 12) & 0xf;
1601 rd0 = (insn >> 0) & 0xf;
1602 rd1 = (insn >> 16) & 0xf;
1603 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1604 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1605 gen_op_iwmmxt_andq_M0_wRn(rd1);
1606 gen_op_iwmmxt_setpsr_nz();
1607 gen_op_iwmmxt_movq_wRn_M0(wrd);
1608 gen_op_iwmmxt_set_mup();
1609 gen_op_iwmmxt_set_cup();
1610 break;
1611 case 0x200: /* WAND */
1612 wrd = (insn >> 12) & 0xf;
1613 rd0 = (insn >> 0) & 0xf;
1614 rd1 = (insn >> 16) & 0xf;
1615 gen_op_iwmmxt_movq_M0_wRn(rd0);
1616 gen_op_iwmmxt_andq_M0_wRn(rd1);
1617 gen_op_iwmmxt_setpsr_nz();
1618 gen_op_iwmmxt_movq_wRn_M0(wrd);
1619 gen_op_iwmmxt_set_mup();
1620 gen_op_iwmmxt_set_cup();
1621 break;
1622 case 0x810: case 0xa10: /* WMADD */
1623 wrd = (insn >> 12) & 0xf;
1624 rd0 = (insn >> 0) & 0xf;
1625 rd1 = (insn >> 16) & 0xf;
1626 gen_op_iwmmxt_movq_M0_wRn(rd0);
1627 if (insn & (1 << 21))
1628 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1629 else
1630 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1631 gen_op_iwmmxt_movq_wRn_M0(wrd);
1632 gen_op_iwmmxt_set_mup();
1633 break;
1634 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1635 wrd = (insn >> 12) & 0xf;
1636 rd0 = (insn >> 16) & 0xf;
1637 rd1 = (insn >> 0) & 0xf;
1638 gen_op_iwmmxt_movq_M0_wRn(rd0);
1639 switch ((insn >> 22) & 3) {
1640 case 0:
1641 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1642 break;
1643 case 1:
1644 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1645 break;
1646 case 2:
1647 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1648 break;
1649 case 3:
1650 return 1;
1651 }
1652 gen_op_iwmmxt_movq_wRn_M0(wrd);
1653 gen_op_iwmmxt_set_mup();
1654 gen_op_iwmmxt_set_cup();
1655 break;
1656 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1657 wrd = (insn >> 12) & 0xf;
1658 rd0 = (insn >> 16) & 0xf;
1659 rd1 = (insn >> 0) & 0xf;
1660 gen_op_iwmmxt_movq_M0_wRn(rd0);
1661 switch ((insn >> 22) & 3) {
1662 case 0:
1663 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1664 break;
1665 case 1:
1666 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1667 break;
1668 case 2:
1669 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1670 break;
1671 case 3:
1672 return 1;
1673 }
1674 gen_op_iwmmxt_movq_wRn_M0(wrd);
1675 gen_op_iwmmxt_set_mup();
1676 gen_op_iwmmxt_set_cup();
1677 break;
1678 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1679 wrd = (insn >> 12) & 0xf;
1680 rd0 = (insn >> 16) & 0xf;
1681 rd1 = (insn >> 0) & 0xf;
1682 gen_op_iwmmxt_movq_M0_wRn(rd0);
1683 if (insn & (1 << 22))
1684 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1685 else
1686 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1687 if (!(insn & (1 << 20)))
1688 gen_op_iwmmxt_addl_M0_wRn(wrd);
1689 gen_op_iwmmxt_movq_wRn_M0(wrd);
1690 gen_op_iwmmxt_set_mup();
1691 break;
1692 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1693 wrd = (insn >> 12) & 0xf;
1694 rd0 = (insn >> 16) & 0xf;
1695 rd1 = (insn >> 0) & 0xf;
1696 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1697 if (insn & (1 << 21)) {
1698 if (insn & (1 << 20))
1699 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1700 else
1701 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1702 } else {
1703 if (insn & (1 << 20))
1704 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1705 else
1706 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1707 }
18c9b560
AZ
1708 gen_op_iwmmxt_movq_wRn_M0(wrd);
1709 gen_op_iwmmxt_set_mup();
1710 break;
1711 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1712 wrd = (insn >> 12) & 0xf;
1713 rd0 = (insn >> 16) & 0xf;
1714 rd1 = (insn >> 0) & 0xf;
1715 gen_op_iwmmxt_movq_M0_wRn(rd0);
1716 if (insn & (1 << 21))
1717 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1718 else
1719 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1720 if (!(insn & (1 << 20))) {
e677137d
PB
1721 iwmmxt_load_reg(cpu_V1, wrd);
1722 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1723 }
1724 gen_op_iwmmxt_movq_wRn_M0(wrd);
1725 gen_op_iwmmxt_set_mup();
1726 break;
1727 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1728 wrd = (insn >> 12) & 0xf;
1729 rd0 = (insn >> 16) & 0xf;
1730 rd1 = (insn >> 0) & 0xf;
1731 gen_op_iwmmxt_movq_M0_wRn(rd0);
1732 switch ((insn >> 22) & 3) {
1733 case 0:
1734 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1735 break;
1736 case 1:
1737 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1738 break;
1739 case 2:
1740 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1741 break;
1742 case 3:
1743 return 1;
1744 }
1745 gen_op_iwmmxt_movq_wRn_M0(wrd);
1746 gen_op_iwmmxt_set_mup();
1747 gen_op_iwmmxt_set_cup();
1748 break;
1749 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1750 wrd = (insn >> 12) & 0xf;
1751 rd0 = (insn >> 16) & 0xf;
1752 rd1 = (insn >> 0) & 0xf;
1753 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1754 if (insn & (1 << 22)) {
1755 if (insn & (1 << 20))
1756 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1757 else
1758 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1759 } else {
1760 if (insn & (1 << 20))
1761 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1762 else
1763 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1764 }
18c9b560
AZ
1765 gen_op_iwmmxt_movq_wRn_M0(wrd);
1766 gen_op_iwmmxt_set_mup();
1767 gen_op_iwmmxt_set_cup();
1768 break;
1769 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1770 wrd = (insn >> 12) & 0xf;
1771 rd0 = (insn >> 16) & 0xf;
1772 rd1 = (insn >> 0) & 0xf;
1773 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1774 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1775 tcg_gen_andi_i32(tmp, tmp, 7);
1776 iwmmxt_load_reg(cpu_V1, rd1);
1777 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1778 tcg_temp_free_i32(tmp);
18c9b560
AZ
1779 gen_op_iwmmxt_movq_wRn_M0(wrd);
1780 gen_op_iwmmxt_set_mup();
1781 break;
1782 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1783 if (((insn >> 6) & 3) == 3)
1784 return 1;
18c9b560
AZ
1785 rd = (insn >> 12) & 0xf;
1786 wrd = (insn >> 16) & 0xf;
da6b5335 1787 tmp = load_reg(s, rd);
18c9b560
AZ
1788 gen_op_iwmmxt_movq_M0_wRn(wrd);
1789 switch ((insn >> 6) & 3) {
1790 case 0:
da6b5335
FN
1791 tmp2 = tcg_const_i32(0xff);
1792 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1793 break;
1794 case 1:
da6b5335
FN
1795 tmp2 = tcg_const_i32(0xffff);
1796 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1797 break;
1798 case 2:
da6b5335
FN
1799 tmp2 = tcg_const_i32(0xffffffff);
1800 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1801 break;
da6b5335 1802 default:
39d5492a
PM
1803 TCGV_UNUSED_I32(tmp2);
1804 TCGV_UNUSED_I32(tmp3);
18c9b560 1805 }
da6b5335 1806 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
1807 tcg_temp_free_i32(tmp3);
1808 tcg_temp_free_i32(tmp2);
7d1b0095 1809 tcg_temp_free_i32(tmp);
18c9b560
AZ
1810 gen_op_iwmmxt_movq_wRn_M0(wrd);
1811 gen_op_iwmmxt_set_mup();
1812 break;
1813 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1814 rd = (insn >> 12) & 0xf;
1815 wrd = (insn >> 16) & 0xf;
da6b5335 1816 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1817 return 1;
1818 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1819 tmp = tcg_temp_new_i32();
18c9b560
AZ
1820 switch ((insn >> 22) & 3) {
1821 case 0:
da6b5335
FN
1822 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1823 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1824 if (insn & 8) {
1825 tcg_gen_ext8s_i32(tmp, tmp);
1826 } else {
1827 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1828 }
1829 break;
1830 case 1:
da6b5335
FN
1831 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1832 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1833 if (insn & 8) {
1834 tcg_gen_ext16s_i32(tmp, tmp);
1835 } else {
1836 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1837 }
1838 break;
1839 case 2:
da6b5335
FN
1840 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1841 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1842 break;
18c9b560 1843 }
da6b5335 1844 store_reg(s, rd, tmp);
18c9b560
AZ
1845 break;
1846 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1847 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1848 return 1;
da6b5335 1849 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1850 switch ((insn >> 22) & 3) {
1851 case 0:
da6b5335 1852 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1853 break;
1854 case 1:
da6b5335 1855 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1856 break;
1857 case 2:
da6b5335 1858 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1859 break;
18c9b560 1860 }
da6b5335
FN
1861 tcg_gen_shli_i32(tmp, tmp, 28);
1862 gen_set_nzcv(tmp);
7d1b0095 1863 tcg_temp_free_i32(tmp);
18c9b560
AZ
1864 break;
1865 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1866 if (((insn >> 6) & 3) == 3)
1867 return 1;
18c9b560
AZ
1868 rd = (insn >> 12) & 0xf;
1869 wrd = (insn >> 16) & 0xf;
da6b5335 1870 tmp = load_reg(s, rd);
18c9b560
AZ
1871 switch ((insn >> 6) & 3) {
1872 case 0:
da6b5335 1873 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1874 break;
1875 case 1:
da6b5335 1876 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1877 break;
1878 case 2:
da6b5335 1879 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1880 break;
18c9b560 1881 }
7d1b0095 1882 tcg_temp_free_i32(tmp);
18c9b560
AZ
1883 gen_op_iwmmxt_movq_wRn_M0(wrd);
1884 gen_op_iwmmxt_set_mup();
1885 break;
1886 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1887 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1888 return 1;
da6b5335 1889 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1890 tmp2 = tcg_temp_new_i32();
da6b5335 1891 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1892 switch ((insn >> 22) & 3) {
1893 case 0:
1894 for (i = 0; i < 7; i ++) {
da6b5335
FN
1895 tcg_gen_shli_i32(tmp2, tmp2, 4);
1896 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1897 }
1898 break;
1899 case 1:
1900 for (i = 0; i < 3; i ++) {
da6b5335
FN
1901 tcg_gen_shli_i32(tmp2, tmp2, 8);
1902 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1903 }
1904 break;
1905 case 2:
da6b5335
FN
1906 tcg_gen_shli_i32(tmp2, tmp2, 16);
1907 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1908 break;
18c9b560 1909 }
da6b5335 1910 gen_set_nzcv(tmp);
7d1b0095
PM
1911 tcg_temp_free_i32(tmp2);
1912 tcg_temp_free_i32(tmp);
18c9b560
AZ
1913 break;
1914 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1915 wrd = (insn >> 12) & 0xf;
1916 rd0 = (insn >> 16) & 0xf;
1917 gen_op_iwmmxt_movq_M0_wRn(rd0);
1918 switch ((insn >> 22) & 3) {
1919 case 0:
e677137d 1920 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1921 break;
1922 case 1:
e677137d 1923 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1924 break;
1925 case 2:
e677137d 1926 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1927 break;
1928 case 3:
1929 return 1;
1930 }
1931 gen_op_iwmmxt_movq_wRn_M0(wrd);
1932 gen_op_iwmmxt_set_mup();
1933 break;
1934 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1935 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1936 return 1;
da6b5335 1937 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1938 tmp2 = tcg_temp_new_i32();
da6b5335 1939 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1940 switch ((insn >> 22) & 3) {
1941 case 0:
1942 for (i = 0; i < 7; i ++) {
da6b5335
FN
1943 tcg_gen_shli_i32(tmp2, tmp2, 4);
1944 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1945 }
1946 break;
1947 case 1:
1948 for (i = 0; i < 3; i ++) {
da6b5335
FN
1949 tcg_gen_shli_i32(tmp2, tmp2, 8);
1950 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1951 }
1952 break;
1953 case 2:
da6b5335
FN
1954 tcg_gen_shli_i32(tmp2, tmp2, 16);
1955 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1956 break;
18c9b560 1957 }
da6b5335 1958 gen_set_nzcv(tmp);
7d1b0095
PM
1959 tcg_temp_free_i32(tmp2);
1960 tcg_temp_free_i32(tmp);
18c9b560
AZ
1961 break;
1962 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1963 rd = (insn >> 12) & 0xf;
1964 rd0 = (insn >> 16) & 0xf;
da6b5335 1965 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1966 return 1;
1967 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 1968 tmp = tcg_temp_new_i32();
18c9b560
AZ
1969 switch ((insn >> 22) & 3) {
1970 case 0:
da6b5335 1971 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1972 break;
1973 case 1:
da6b5335 1974 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1975 break;
1976 case 2:
da6b5335 1977 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1978 break;
18c9b560 1979 }
da6b5335 1980 store_reg(s, rd, tmp);
18c9b560
AZ
1981 break;
1982 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1983 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1984 wrd = (insn >> 12) & 0xf;
1985 rd0 = (insn >> 16) & 0xf;
1986 rd1 = (insn >> 0) & 0xf;
1987 gen_op_iwmmxt_movq_M0_wRn(rd0);
1988 switch ((insn >> 22) & 3) {
1989 case 0:
1990 if (insn & (1 << 21))
1991 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1992 else
1993 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1994 break;
1995 case 1:
1996 if (insn & (1 << 21))
1997 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1998 else
1999 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2000 break;
2001 case 2:
2002 if (insn & (1 << 21))
2003 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2004 else
2005 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2006 break;
2007 case 3:
2008 return 1;
2009 }
2010 gen_op_iwmmxt_movq_wRn_M0(wrd);
2011 gen_op_iwmmxt_set_mup();
2012 gen_op_iwmmxt_set_cup();
2013 break;
2014 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2015 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2016 wrd = (insn >> 12) & 0xf;
2017 rd0 = (insn >> 16) & 0xf;
2018 gen_op_iwmmxt_movq_M0_wRn(rd0);
2019 switch ((insn >> 22) & 3) {
2020 case 0:
2021 if (insn & (1 << 21))
2022 gen_op_iwmmxt_unpacklsb_M0();
2023 else
2024 gen_op_iwmmxt_unpacklub_M0();
2025 break;
2026 case 1:
2027 if (insn & (1 << 21))
2028 gen_op_iwmmxt_unpacklsw_M0();
2029 else
2030 gen_op_iwmmxt_unpackluw_M0();
2031 break;
2032 case 2:
2033 if (insn & (1 << 21))
2034 gen_op_iwmmxt_unpacklsl_M0();
2035 else
2036 gen_op_iwmmxt_unpacklul_M0();
2037 break;
2038 case 3:
2039 return 1;
2040 }
2041 gen_op_iwmmxt_movq_wRn_M0(wrd);
2042 gen_op_iwmmxt_set_mup();
2043 gen_op_iwmmxt_set_cup();
2044 break;
2045 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2046 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2047 wrd = (insn >> 12) & 0xf;
2048 rd0 = (insn >> 16) & 0xf;
2049 gen_op_iwmmxt_movq_M0_wRn(rd0);
2050 switch ((insn >> 22) & 3) {
2051 case 0:
2052 if (insn & (1 << 21))
2053 gen_op_iwmmxt_unpackhsb_M0();
2054 else
2055 gen_op_iwmmxt_unpackhub_M0();
2056 break;
2057 case 1:
2058 if (insn & (1 << 21))
2059 gen_op_iwmmxt_unpackhsw_M0();
2060 else
2061 gen_op_iwmmxt_unpackhuw_M0();
2062 break;
2063 case 2:
2064 if (insn & (1 << 21))
2065 gen_op_iwmmxt_unpackhsl_M0();
2066 else
2067 gen_op_iwmmxt_unpackhul_M0();
2068 break;
2069 case 3:
2070 return 1;
2071 }
2072 gen_op_iwmmxt_movq_wRn_M0(wrd);
2073 gen_op_iwmmxt_set_mup();
2074 gen_op_iwmmxt_set_cup();
2075 break;
2076 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2077 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2078 if (((insn >> 22) & 3) == 0)
2079 return 1;
18c9b560
AZ
2080 wrd = (insn >> 12) & 0xf;
2081 rd0 = (insn >> 16) & 0xf;
2082 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2083 tmp = tcg_temp_new_i32();
da6b5335 2084 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2085 tcg_temp_free_i32(tmp);
18c9b560 2086 return 1;
da6b5335 2087 }
18c9b560 2088 switch ((insn >> 22) & 3) {
18c9b560 2089 case 1:
477955bd 2090 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2091 break;
2092 case 2:
477955bd 2093 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2094 break;
2095 case 3:
477955bd 2096 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2097 break;
2098 }
7d1b0095 2099 tcg_temp_free_i32(tmp);
18c9b560
AZ
2100 gen_op_iwmmxt_movq_wRn_M0(wrd);
2101 gen_op_iwmmxt_set_mup();
2102 gen_op_iwmmxt_set_cup();
2103 break;
2104 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2105 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2106 if (((insn >> 22) & 3) == 0)
2107 return 1;
18c9b560
AZ
2108 wrd = (insn >> 12) & 0xf;
2109 rd0 = (insn >> 16) & 0xf;
2110 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2111 tmp = tcg_temp_new_i32();
da6b5335 2112 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2113 tcg_temp_free_i32(tmp);
18c9b560 2114 return 1;
da6b5335 2115 }
18c9b560 2116 switch ((insn >> 22) & 3) {
18c9b560 2117 case 1:
477955bd 2118 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2119 break;
2120 case 2:
477955bd 2121 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2122 break;
2123 case 3:
477955bd 2124 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2125 break;
2126 }
7d1b0095 2127 tcg_temp_free_i32(tmp);
18c9b560
AZ
2128 gen_op_iwmmxt_movq_wRn_M0(wrd);
2129 gen_op_iwmmxt_set_mup();
2130 gen_op_iwmmxt_set_cup();
2131 break;
2132 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2133 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2134 if (((insn >> 22) & 3) == 0)
2135 return 1;
18c9b560
AZ
2136 wrd = (insn >> 12) & 0xf;
2137 rd0 = (insn >> 16) & 0xf;
2138 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2139 tmp = tcg_temp_new_i32();
da6b5335 2140 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2141 tcg_temp_free_i32(tmp);
18c9b560 2142 return 1;
da6b5335 2143 }
18c9b560 2144 switch ((insn >> 22) & 3) {
18c9b560 2145 case 1:
477955bd 2146 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2147 break;
2148 case 2:
477955bd 2149 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2150 break;
2151 case 3:
477955bd 2152 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2153 break;
2154 }
7d1b0095 2155 tcg_temp_free_i32(tmp);
18c9b560
AZ
2156 gen_op_iwmmxt_movq_wRn_M0(wrd);
2157 gen_op_iwmmxt_set_mup();
2158 gen_op_iwmmxt_set_cup();
2159 break;
2160 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2161 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2162 if (((insn >> 22) & 3) == 0)
2163 return 1;
18c9b560
AZ
2164 wrd = (insn >> 12) & 0xf;
2165 rd0 = (insn >> 16) & 0xf;
2166 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2167 tmp = tcg_temp_new_i32();
18c9b560 2168 switch ((insn >> 22) & 3) {
18c9b560 2169 case 1:
da6b5335 2170 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2171 tcg_temp_free_i32(tmp);
18c9b560 2172 return 1;
da6b5335 2173 }
477955bd 2174 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2175 break;
2176 case 2:
da6b5335 2177 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2178 tcg_temp_free_i32(tmp);
18c9b560 2179 return 1;
da6b5335 2180 }
477955bd 2181 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2182 break;
2183 case 3:
da6b5335 2184 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2185 tcg_temp_free_i32(tmp);
18c9b560 2186 return 1;
da6b5335 2187 }
477955bd 2188 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2189 break;
2190 }
7d1b0095 2191 tcg_temp_free_i32(tmp);
18c9b560
AZ
2192 gen_op_iwmmxt_movq_wRn_M0(wrd);
2193 gen_op_iwmmxt_set_mup();
2194 gen_op_iwmmxt_set_cup();
2195 break;
2196 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2197 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2198 wrd = (insn >> 12) & 0xf;
2199 rd0 = (insn >> 16) & 0xf;
2200 rd1 = (insn >> 0) & 0xf;
2201 gen_op_iwmmxt_movq_M0_wRn(rd0);
2202 switch ((insn >> 22) & 3) {
2203 case 0:
2204 if (insn & (1 << 21))
2205 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2206 else
2207 gen_op_iwmmxt_minub_M0_wRn(rd1);
2208 break;
2209 case 1:
2210 if (insn & (1 << 21))
2211 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2212 else
2213 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2214 break;
2215 case 2:
2216 if (insn & (1 << 21))
2217 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2218 else
2219 gen_op_iwmmxt_minul_M0_wRn(rd1);
2220 break;
2221 case 3:
2222 return 1;
2223 }
2224 gen_op_iwmmxt_movq_wRn_M0(wrd);
2225 gen_op_iwmmxt_set_mup();
2226 break;
2227 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2228 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2229 wrd = (insn >> 12) & 0xf;
2230 rd0 = (insn >> 16) & 0xf;
2231 rd1 = (insn >> 0) & 0xf;
2232 gen_op_iwmmxt_movq_M0_wRn(rd0);
2233 switch ((insn >> 22) & 3) {
2234 case 0:
2235 if (insn & (1 << 21))
2236 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2237 else
2238 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2239 break;
2240 case 1:
2241 if (insn & (1 << 21))
2242 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2243 else
2244 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2245 break;
2246 case 2:
2247 if (insn & (1 << 21))
2248 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2249 else
2250 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2251 break;
2252 case 3:
2253 return 1;
2254 }
2255 gen_op_iwmmxt_movq_wRn_M0(wrd);
2256 gen_op_iwmmxt_set_mup();
2257 break;
2258 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2259 case 0x402: case 0x502: case 0x602: case 0x702:
2260 wrd = (insn >> 12) & 0xf;
2261 rd0 = (insn >> 16) & 0xf;
2262 rd1 = (insn >> 0) & 0xf;
2263 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2264 tmp = tcg_const_i32((insn >> 20) & 3);
2265 iwmmxt_load_reg(cpu_V1, rd1);
2266 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2267 tcg_temp_free_i32(tmp);
18c9b560
AZ
2268 gen_op_iwmmxt_movq_wRn_M0(wrd);
2269 gen_op_iwmmxt_set_mup();
2270 break;
2271 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2272 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2273 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2274 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2275 wrd = (insn >> 12) & 0xf;
2276 rd0 = (insn >> 16) & 0xf;
2277 rd1 = (insn >> 0) & 0xf;
2278 gen_op_iwmmxt_movq_M0_wRn(rd0);
2279 switch ((insn >> 20) & 0xf) {
2280 case 0x0:
2281 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2282 break;
2283 case 0x1:
2284 gen_op_iwmmxt_subub_M0_wRn(rd1);
2285 break;
2286 case 0x3:
2287 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2288 break;
2289 case 0x4:
2290 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2291 break;
2292 case 0x5:
2293 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2294 break;
2295 case 0x7:
2296 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2297 break;
2298 case 0x8:
2299 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2300 break;
2301 case 0x9:
2302 gen_op_iwmmxt_subul_M0_wRn(rd1);
2303 break;
2304 case 0xb:
2305 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2306 break;
2307 default:
2308 return 1;
2309 }
2310 gen_op_iwmmxt_movq_wRn_M0(wrd);
2311 gen_op_iwmmxt_set_mup();
2312 gen_op_iwmmxt_set_cup();
2313 break;
2314 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2315 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2316 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2317 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2318 wrd = (insn >> 12) & 0xf;
2319 rd0 = (insn >> 16) & 0xf;
2320 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2321 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2322 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2323 tcg_temp_free_i32(tmp);
18c9b560
AZ
2324 gen_op_iwmmxt_movq_wRn_M0(wrd);
2325 gen_op_iwmmxt_set_mup();
2326 gen_op_iwmmxt_set_cup();
2327 break;
2328 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2329 case 0x418: case 0x518: case 0x618: case 0x718:
2330 case 0x818: case 0x918: case 0xa18: case 0xb18:
2331 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2332 wrd = (insn >> 12) & 0xf;
2333 rd0 = (insn >> 16) & 0xf;
2334 rd1 = (insn >> 0) & 0xf;
2335 gen_op_iwmmxt_movq_M0_wRn(rd0);
2336 switch ((insn >> 20) & 0xf) {
2337 case 0x0:
2338 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2339 break;
2340 case 0x1:
2341 gen_op_iwmmxt_addub_M0_wRn(rd1);
2342 break;
2343 case 0x3:
2344 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2345 break;
2346 case 0x4:
2347 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2348 break;
2349 case 0x5:
2350 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2351 break;
2352 case 0x7:
2353 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2354 break;
2355 case 0x8:
2356 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2357 break;
2358 case 0x9:
2359 gen_op_iwmmxt_addul_M0_wRn(rd1);
2360 break;
2361 case 0xb:
2362 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2363 break;
2364 default:
2365 return 1;
2366 }
2367 gen_op_iwmmxt_movq_wRn_M0(wrd);
2368 gen_op_iwmmxt_set_mup();
2369 gen_op_iwmmxt_set_cup();
2370 break;
2371 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2372 case 0x408: case 0x508: case 0x608: case 0x708:
2373 case 0x808: case 0x908: case 0xa08: case 0xb08:
2374 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2375 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2376 return 1;
18c9b560
AZ
2377 wrd = (insn >> 12) & 0xf;
2378 rd0 = (insn >> 16) & 0xf;
2379 rd1 = (insn >> 0) & 0xf;
2380 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2381 switch ((insn >> 22) & 3) {
18c9b560
AZ
2382 case 1:
2383 if (insn & (1 << 21))
2384 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2385 else
2386 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2387 break;
2388 case 2:
2389 if (insn & (1 << 21))
2390 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2391 else
2392 gen_op_iwmmxt_packul_M0_wRn(rd1);
2393 break;
2394 case 3:
2395 if (insn & (1 << 21))
2396 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2397 else
2398 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2399 break;
2400 }
2401 gen_op_iwmmxt_movq_wRn_M0(wrd);
2402 gen_op_iwmmxt_set_mup();
2403 gen_op_iwmmxt_set_cup();
2404 break;
2405 case 0x201: case 0x203: case 0x205: case 0x207:
2406 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2407 case 0x211: case 0x213: case 0x215: case 0x217:
2408 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2409 wrd = (insn >> 5) & 0xf;
2410 rd0 = (insn >> 12) & 0xf;
2411 rd1 = (insn >> 0) & 0xf;
2412 if (rd0 == 0xf || rd1 == 0xf)
2413 return 1;
2414 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2415 tmp = load_reg(s, rd0);
2416 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2417 switch ((insn >> 16) & 0xf) {
2418 case 0x0: /* TMIA */
da6b5335 2419 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2420 break;
2421 case 0x8: /* TMIAPH */
da6b5335 2422 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2423 break;
2424 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2425 if (insn & (1 << 16))
da6b5335 2426 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2427 if (insn & (1 << 17))
da6b5335
FN
2428 tcg_gen_shri_i32(tmp2, tmp2, 16);
2429 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2430 break;
2431 default:
7d1b0095
PM
2432 tcg_temp_free_i32(tmp2);
2433 tcg_temp_free_i32(tmp);
18c9b560
AZ
2434 return 1;
2435 }
7d1b0095
PM
2436 tcg_temp_free_i32(tmp2);
2437 tcg_temp_free_i32(tmp);
18c9b560
AZ
2438 gen_op_iwmmxt_movq_wRn_M0(wrd);
2439 gen_op_iwmmxt_set_mup();
2440 break;
2441 default:
2442 return 1;
2443 }
2444
2445 return 0;
2446}
2447
a1c7273b 2448/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2449 (ie. an undefined instruction). */
0ecb72a5 2450static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
2451{
2452 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2453 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2454
2455 if ((insn & 0x0ff00f10) == 0x0e200010) {
2456 /* Multiply with Internal Accumulate Format */
2457 rd0 = (insn >> 12) & 0xf;
2458 rd1 = insn & 0xf;
2459 acc = (insn >> 5) & 7;
2460
2461 if (acc != 0)
2462 return 1;
2463
3a554c0f
FN
2464 tmp = load_reg(s, rd0);
2465 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2466 switch ((insn >> 16) & 0xf) {
2467 case 0x0: /* MIA */
3a554c0f 2468 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2469 break;
2470 case 0x8: /* MIAPH */
3a554c0f 2471 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2472 break;
2473 case 0xc: /* MIABB */
2474 case 0xd: /* MIABT */
2475 case 0xe: /* MIATB */
2476 case 0xf: /* MIATT */
18c9b560 2477 if (insn & (1 << 16))
3a554c0f 2478 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2479 if (insn & (1 << 17))
3a554c0f
FN
2480 tcg_gen_shri_i32(tmp2, tmp2, 16);
2481 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2482 break;
2483 default:
2484 return 1;
2485 }
7d1b0095
PM
2486 tcg_temp_free_i32(tmp2);
2487 tcg_temp_free_i32(tmp);
18c9b560
AZ
2488
2489 gen_op_iwmmxt_movq_wRn_M0(acc);
2490 return 0;
2491 }
2492
2493 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2494 /* Internal Accumulator Access Format */
2495 rdhi = (insn >> 16) & 0xf;
2496 rdlo = (insn >> 12) & 0xf;
2497 acc = insn & 7;
2498
2499 if (acc != 0)
2500 return 1;
2501
2502 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2503 iwmmxt_load_reg(cpu_V0, acc);
2504 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2505 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2506 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2507 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2508 } else { /* MAR */
3a554c0f
FN
2509 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2510 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2511 }
2512 return 0;
2513 }
2514
2515 return 1;
2516}
2517
9ee6e8bb
PB
2518#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2519#define VFP_SREG(insn, bigbit, smallbit) \
2520 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2521#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2522 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2523 reg = (((insn) >> (bigbit)) & 0x0f) \
2524 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2525 } else { \
2526 if (insn & (1 << (smallbit))) \
2527 return 1; \
2528 reg = ((insn) >> (bigbit)) & 0x0f; \
2529 }} while (0)
2530
2531#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2532#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2533#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2534#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2535#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2536#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2537
4373f3ce 2538/* Move between integer and VFP cores. */
39d5492a 2539static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 2540{
39d5492a 2541 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
2542 tcg_gen_mov_i32(tmp, cpu_F0s);
2543 return tmp;
2544}
2545
39d5492a 2546static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
2547{
2548 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2549 tcg_temp_free_i32(tmp);
4373f3ce
PB
2550}
2551
39d5492a 2552static void gen_neon_dup_u8(TCGv_i32 var, int shift)
ad69471c 2553{
39d5492a 2554 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2555 if (shift)
2556 tcg_gen_shri_i32(var, var, shift);
86831435 2557 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2558 tcg_gen_shli_i32(tmp, var, 8);
2559 tcg_gen_or_i32(var, var, tmp);
2560 tcg_gen_shli_i32(tmp, var, 16);
2561 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2562 tcg_temp_free_i32(tmp);
ad69471c
PB
2563}
2564
39d5492a 2565static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2566{
39d5492a 2567 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2568 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2569 tcg_gen_shli_i32(tmp, var, 16);
2570 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2571 tcg_temp_free_i32(tmp);
ad69471c
PB
2572}
2573
39d5492a 2574static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2575{
39d5492a 2576 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2577 tcg_gen_andi_i32(var, var, 0xffff0000);
2578 tcg_gen_shri_i32(tmp, var, 16);
2579 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2580 tcg_temp_free_i32(tmp);
ad69471c
PB
2581}
2582
39d5492a 2583static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
8e18cde3
PM
2584{
2585 /* Load a single Neon element and replicate into a 32 bit TCG reg */
58ab8e96 2586 TCGv_i32 tmp = tcg_temp_new_i32();
8e18cde3
PM
2587 switch (size) {
2588 case 0:
08307563 2589 gen_aa32_ld8u(tmp, addr, IS_USER(s));
8e18cde3
PM
2590 gen_neon_dup_u8(tmp, 0);
2591 break;
2592 case 1:
08307563 2593 gen_aa32_ld16u(tmp, addr, IS_USER(s));
8e18cde3
PM
2594 gen_neon_dup_low16(tmp);
2595 break;
2596 case 2:
08307563 2597 gen_aa32_ld32u(tmp, addr, IS_USER(s));
8e18cde3
PM
2598 break;
2599 default: /* Avoid compiler warnings. */
2600 abort();
2601 }
2602 return tmp;
2603}
2604
04731fb5
WN
2605static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2606 uint32_t dp)
2607{
2608 uint32_t cc = extract32(insn, 20, 2);
2609
2610 if (dp) {
2611 TCGv_i64 frn, frm, dest;
2612 TCGv_i64 tmp, zero, zf, nf, vf;
2613
2614 zero = tcg_const_i64(0);
2615
2616 frn = tcg_temp_new_i64();
2617 frm = tcg_temp_new_i64();
2618 dest = tcg_temp_new_i64();
2619
2620 zf = tcg_temp_new_i64();
2621 nf = tcg_temp_new_i64();
2622 vf = tcg_temp_new_i64();
2623
2624 tcg_gen_extu_i32_i64(zf, cpu_ZF);
2625 tcg_gen_ext_i32_i64(nf, cpu_NF);
2626 tcg_gen_ext_i32_i64(vf, cpu_VF);
2627
2628 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2629 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2630 switch (cc) {
2631 case 0: /* eq: Z */
2632 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
2633 frn, frm);
2634 break;
2635 case 1: /* vs: V */
2636 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
2637 frn, frm);
2638 break;
2639 case 2: /* ge: N == V -> N ^ V == 0 */
2640 tmp = tcg_temp_new_i64();
2641 tcg_gen_xor_i64(tmp, vf, nf);
2642 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2643 frn, frm);
2644 tcg_temp_free_i64(tmp);
2645 break;
2646 case 3: /* gt: !Z && N == V */
2647 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
2648 frn, frm);
2649 tmp = tcg_temp_new_i64();
2650 tcg_gen_xor_i64(tmp, vf, nf);
2651 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2652 dest, frm);
2653 tcg_temp_free_i64(tmp);
2654 break;
2655 }
2656 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2657 tcg_temp_free_i64(frn);
2658 tcg_temp_free_i64(frm);
2659 tcg_temp_free_i64(dest);
2660
2661 tcg_temp_free_i64(zf);
2662 tcg_temp_free_i64(nf);
2663 tcg_temp_free_i64(vf);
2664
2665 tcg_temp_free_i64(zero);
2666 } else {
2667 TCGv_i32 frn, frm, dest;
2668 TCGv_i32 tmp, zero;
2669
2670 zero = tcg_const_i32(0);
2671
2672 frn = tcg_temp_new_i32();
2673 frm = tcg_temp_new_i32();
2674 dest = tcg_temp_new_i32();
2675 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2676 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2677 switch (cc) {
2678 case 0: /* eq: Z */
2679 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
2680 frn, frm);
2681 break;
2682 case 1: /* vs: V */
2683 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
2684 frn, frm);
2685 break;
2686 case 2: /* ge: N == V -> N ^ V == 0 */
2687 tmp = tcg_temp_new_i32();
2688 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2689 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2690 frn, frm);
2691 tcg_temp_free_i32(tmp);
2692 break;
2693 case 3: /* gt: !Z && N == V */
2694 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
2695 frn, frm);
2696 tmp = tcg_temp_new_i32();
2697 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2698 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2699 dest, frm);
2700 tcg_temp_free_i32(tmp);
2701 break;
2702 }
2703 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2704 tcg_temp_free_i32(frn);
2705 tcg_temp_free_i32(frm);
2706 tcg_temp_free_i32(dest);
2707
2708 tcg_temp_free_i32(zero);
2709 }
2710
2711 return 0;
2712}
2713
40cfacdd
WN
2714static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
2715 uint32_t rm, uint32_t dp)
2716{
2717 uint32_t vmin = extract32(insn, 6, 1);
2718 TCGv_ptr fpst = get_fpstatus_ptr(0);
2719
2720 if (dp) {
2721 TCGv_i64 frn, frm, dest;
2722
2723 frn = tcg_temp_new_i64();
2724 frm = tcg_temp_new_i64();
2725 dest = tcg_temp_new_i64();
2726
2727 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2728 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2729 if (vmin) {
f71a2ae5 2730 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
40cfacdd 2731 } else {
f71a2ae5 2732 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
40cfacdd
WN
2733 }
2734 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2735 tcg_temp_free_i64(frn);
2736 tcg_temp_free_i64(frm);
2737 tcg_temp_free_i64(dest);
2738 } else {
2739 TCGv_i32 frn, frm, dest;
2740
2741 frn = tcg_temp_new_i32();
2742 frm = tcg_temp_new_i32();
2743 dest = tcg_temp_new_i32();
2744
2745 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2746 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2747 if (vmin) {
f71a2ae5 2748 gen_helper_vfp_minnums(dest, frn, frm, fpst);
40cfacdd 2749 } else {
f71a2ae5 2750 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
40cfacdd
WN
2751 }
2752 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2753 tcg_temp_free_i32(frn);
2754 tcg_temp_free_i32(frm);
2755 tcg_temp_free_i32(dest);
2756 }
2757
2758 tcg_temp_free_ptr(fpst);
2759 return 0;
2760}
2761
7655f39b
WN
2762static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2763 int rounding)
2764{
2765 TCGv_ptr fpst = get_fpstatus_ptr(0);
2766 TCGv_i32 tcg_rmode;
2767
2768 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2769 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2770
2771 if (dp) {
2772 TCGv_i64 tcg_op;
2773 TCGv_i64 tcg_res;
2774 tcg_op = tcg_temp_new_i64();
2775 tcg_res = tcg_temp_new_i64();
2776 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2777 gen_helper_rintd(tcg_res, tcg_op, fpst);
2778 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
2779 tcg_temp_free_i64(tcg_op);
2780 tcg_temp_free_i64(tcg_res);
2781 } else {
2782 TCGv_i32 tcg_op;
2783 TCGv_i32 tcg_res;
2784 tcg_op = tcg_temp_new_i32();
2785 tcg_res = tcg_temp_new_i32();
2786 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2787 gen_helper_rints(tcg_res, tcg_op, fpst);
2788 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
2789 tcg_temp_free_i32(tcg_op);
2790 tcg_temp_free_i32(tcg_res);
2791 }
2792
2793 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2794 tcg_temp_free_i32(tcg_rmode);
2795
2796 tcg_temp_free_ptr(fpst);
2797 return 0;
2798}
2799
2800
2801/* Table for converting the most common AArch32 encoding of
2802 * rounding mode to arm_fprounding order (which matches the
2803 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
2804 */
2805static const uint8_t fp_decode_rm[] = {
2806 FPROUNDING_TIEAWAY,
2807 FPROUNDING_TIEEVEN,
2808 FPROUNDING_POSINF,
2809 FPROUNDING_NEGINF,
2810};
2811
04731fb5
WN
2812static int disas_vfp_v8_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
2813{
2814 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
2815
2816 if (!arm_feature(env, ARM_FEATURE_V8)) {
2817 return 1;
2818 }
2819
2820 if (dp) {
2821 VFP_DREG_D(rd, insn);
2822 VFP_DREG_N(rn, insn);
2823 VFP_DREG_M(rm, insn);
2824 } else {
2825 rd = VFP_SREG_D(insn);
2826 rn = VFP_SREG_N(insn);
2827 rm = VFP_SREG_M(insn);
2828 }
2829
2830 if ((insn & 0x0f800e50) == 0x0e000a00) {
2831 return handle_vsel(insn, rd, rn, rm, dp);
40cfacdd
WN
2832 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
2833 return handle_vminmaxnm(insn, rd, rn, rm, dp);
7655f39b
WN
2834 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
2835 /* VRINTA, VRINTN, VRINTP, VRINTM */
2836 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
2837 return handle_vrint(insn, rd, rm, dp, rounding);
04731fb5
WN
2838 }
2839 return 1;
2840}
2841
a1c7273b 2842/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 2843 (ie. an undefined instruction). */
0ecb72a5 2844static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
b7bcbe95
FB
2845{
2846 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2847 int dp, veclen;
39d5492a
PM
2848 TCGv_i32 addr;
2849 TCGv_i32 tmp;
2850 TCGv_i32 tmp2;
b7bcbe95 2851
40f137e1
PB
2852 if (!arm_feature(env, ARM_FEATURE_VFP))
2853 return 1;
2854
5df8bac1 2855 if (!s->vfp_enabled) {
9ee6e8bb 2856 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2857 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2858 return 1;
2859 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2860 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2861 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2862 return 1;
2863 }
6a57f3eb
WN
2864
2865 if (extract32(insn, 28, 4) == 0xf) {
2866 /* Encodings with T=1 (Thumb) or unconditional (ARM):
2867 * only used in v8 and above.
2868 */
04731fb5 2869 return disas_vfp_v8_insn(env, s, insn);
6a57f3eb
WN
2870 }
2871
b7bcbe95
FB
2872 dp = ((insn & 0xf00) == 0xb00);
2873 switch ((insn >> 24) & 0xf) {
2874 case 0xe:
2875 if (insn & (1 << 4)) {
2876 /* single register transfer */
b7bcbe95
FB
2877 rd = (insn >> 12) & 0xf;
2878 if (dp) {
9ee6e8bb
PB
2879 int size;
2880 int pass;
2881
2882 VFP_DREG_N(rn, insn);
2883 if (insn & 0xf)
b7bcbe95 2884 return 1;
9ee6e8bb
PB
2885 if (insn & 0x00c00060
2886 && !arm_feature(env, ARM_FEATURE_NEON))
2887 return 1;
2888
2889 pass = (insn >> 21) & 1;
2890 if (insn & (1 << 22)) {
2891 size = 0;
2892 offset = ((insn >> 5) & 3) * 8;
2893 } else if (insn & (1 << 5)) {
2894 size = 1;
2895 offset = (insn & (1 << 6)) ? 16 : 0;
2896 } else {
2897 size = 2;
2898 offset = 0;
2899 }
18c9b560 2900 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2901 /* vfp->arm */
ad69471c 2902 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2903 switch (size) {
2904 case 0:
9ee6e8bb 2905 if (offset)
ad69471c 2906 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2907 if (insn & (1 << 23))
ad69471c 2908 gen_uxtb(tmp);
9ee6e8bb 2909 else
ad69471c 2910 gen_sxtb(tmp);
9ee6e8bb
PB
2911 break;
2912 case 1:
9ee6e8bb
PB
2913 if (insn & (1 << 23)) {
2914 if (offset) {
ad69471c 2915 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2916 } else {
ad69471c 2917 gen_uxth(tmp);
9ee6e8bb
PB
2918 }
2919 } else {
2920 if (offset) {
ad69471c 2921 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2922 } else {
ad69471c 2923 gen_sxth(tmp);
9ee6e8bb
PB
2924 }
2925 }
2926 break;
2927 case 2:
9ee6e8bb
PB
2928 break;
2929 }
ad69471c 2930 store_reg(s, rd, tmp);
b7bcbe95
FB
2931 } else {
2932 /* arm->vfp */
ad69471c 2933 tmp = load_reg(s, rd);
9ee6e8bb
PB
2934 if (insn & (1 << 23)) {
2935 /* VDUP */
2936 if (size == 0) {
ad69471c 2937 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2938 } else if (size == 1) {
ad69471c 2939 gen_neon_dup_low16(tmp);
9ee6e8bb 2940 }
cbbccffc 2941 for (n = 0; n <= pass * 2; n++) {
7d1b0095 2942 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
2943 tcg_gen_mov_i32(tmp2, tmp);
2944 neon_store_reg(rn, n, tmp2);
2945 }
2946 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2947 } else {
2948 /* VMOV */
2949 switch (size) {
2950 case 0:
ad69471c 2951 tmp2 = neon_load_reg(rn, pass);
d593c48e 2952 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 2953 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2954 break;
2955 case 1:
ad69471c 2956 tmp2 = neon_load_reg(rn, pass);
d593c48e 2957 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 2958 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2959 break;
2960 case 2:
9ee6e8bb
PB
2961 break;
2962 }
ad69471c 2963 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2964 }
b7bcbe95 2965 }
9ee6e8bb
PB
2966 } else { /* !dp */
2967 if ((insn & 0x6f) != 0x00)
2968 return 1;
2969 rn = VFP_SREG_N(insn);
18c9b560 2970 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2971 /* vfp->arm */
2972 if (insn & (1 << 21)) {
2973 /* system register */
40f137e1 2974 rn >>= 1;
9ee6e8bb 2975
b7bcbe95 2976 switch (rn) {
40f137e1 2977 case ARM_VFP_FPSID:
4373f3ce 2978 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2979 VFP3 restricts all id registers to privileged
2980 accesses. */
2981 if (IS_USER(s)
2982 && arm_feature(env, ARM_FEATURE_VFP3))
2983 return 1;
4373f3ce 2984 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2985 break;
40f137e1 2986 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2987 if (IS_USER(s))
2988 return 1;
4373f3ce 2989 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2990 break;
40f137e1
PB
2991 case ARM_VFP_FPINST:
2992 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2993 /* Not present in VFP3. */
2994 if (IS_USER(s)
2995 || arm_feature(env, ARM_FEATURE_VFP3))
2996 return 1;
4373f3ce 2997 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2998 break;
40f137e1 2999 case ARM_VFP_FPSCR:
601d70b9 3000 if (rd == 15) {
4373f3ce
PB
3001 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3002 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3003 } else {
7d1b0095 3004 tmp = tcg_temp_new_i32();
4373f3ce
PB
3005 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3006 }
b7bcbe95 3007 break;
9ee6e8bb
PB
3008 case ARM_VFP_MVFR0:
3009 case ARM_VFP_MVFR1:
3010 if (IS_USER(s)
06ed5d66 3011 || !arm_feature(env, ARM_FEATURE_MVFR))
9ee6e8bb 3012 return 1;
4373f3ce 3013 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3014 break;
b7bcbe95
FB
3015 default:
3016 return 1;
3017 }
3018 } else {
3019 gen_mov_F0_vreg(0, rn);
4373f3ce 3020 tmp = gen_vfp_mrs();
b7bcbe95
FB
3021 }
3022 if (rd == 15) {
b5ff1b31 3023 /* Set the 4 flag bits in the CPSR. */
4373f3ce 3024 gen_set_nzcv(tmp);
7d1b0095 3025 tcg_temp_free_i32(tmp);
4373f3ce
PB
3026 } else {
3027 store_reg(s, rd, tmp);
3028 }
b7bcbe95
FB
3029 } else {
3030 /* arm->vfp */
b7bcbe95 3031 if (insn & (1 << 21)) {
40f137e1 3032 rn >>= 1;
b7bcbe95
FB
3033 /* system register */
3034 switch (rn) {
40f137e1 3035 case ARM_VFP_FPSID:
9ee6e8bb
PB
3036 case ARM_VFP_MVFR0:
3037 case ARM_VFP_MVFR1:
b7bcbe95
FB
3038 /* Writes are ignored. */
3039 break;
40f137e1 3040 case ARM_VFP_FPSCR:
e4c1cfa5 3041 tmp = load_reg(s, rd);
4373f3ce 3042 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 3043 tcg_temp_free_i32(tmp);
b5ff1b31 3044 gen_lookup_tb(s);
b7bcbe95 3045 break;
40f137e1 3046 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3047 if (IS_USER(s))
3048 return 1;
71b3c3de
JR
3049 /* TODO: VFP subarchitecture support.
3050 * For now, keep the EN bit only */
e4c1cfa5 3051 tmp = load_reg(s, rd);
71b3c3de 3052 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 3053 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
3054 gen_lookup_tb(s);
3055 break;
3056 case ARM_VFP_FPINST:
3057 case ARM_VFP_FPINST2:
e4c1cfa5 3058 tmp = load_reg(s, rd);
4373f3ce 3059 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 3060 break;
b7bcbe95
FB
3061 default:
3062 return 1;
3063 }
3064 } else {
e4c1cfa5 3065 tmp = load_reg(s, rd);
4373f3ce 3066 gen_vfp_msr(tmp);
b7bcbe95
FB
3067 gen_mov_vreg_F0(0, rn);
3068 }
3069 }
3070 }
3071 } else {
3072 /* data processing */
3073 /* The opcode is in bits 23, 21, 20 and 6. */
3074 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3075 if (dp) {
3076 if (op == 15) {
3077 /* rn is opcode */
3078 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3079 } else {
3080 /* rn is register number */
9ee6e8bb 3081 VFP_DREG_N(rn, insn);
b7bcbe95
FB
3082 }
3083
04595bf6 3084 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
b7bcbe95 3085 /* Integer or single precision destination. */
9ee6e8bb 3086 rd = VFP_SREG_D(insn);
b7bcbe95 3087 } else {
9ee6e8bb 3088 VFP_DREG_D(rd, insn);
b7bcbe95 3089 }
04595bf6
PM
3090 if (op == 15 &&
3091 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
3092 /* VCVT from int is always from S reg regardless of dp bit.
3093 * VCVT with immediate frac_bits has same format as SREG_M
3094 */
3095 rm = VFP_SREG_M(insn);
b7bcbe95 3096 } else {
9ee6e8bb 3097 VFP_DREG_M(rm, insn);
b7bcbe95
FB
3098 }
3099 } else {
9ee6e8bb 3100 rn = VFP_SREG_N(insn);
b7bcbe95
FB
3101 if (op == 15 && rn == 15) {
3102 /* Double precision destination. */
9ee6e8bb
PB
3103 VFP_DREG_D(rd, insn);
3104 } else {
3105 rd = VFP_SREG_D(insn);
3106 }
04595bf6
PM
3107 /* NB that we implicitly rely on the encoding for the frac_bits
3108 * in VCVT of fixed to float being the same as that of an SREG_M
3109 */
9ee6e8bb 3110 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3111 }
3112
69d1fc22 3113 veclen = s->vec_len;
b7bcbe95
FB
3114 if (op == 15 && rn > 3)
3115 veclen = 0;
3116
3117 /* Shut up compiler warnings. */
3118 delta_m = 0;
3119 delta_d = 0;
3120 bank_mask = 0;
3b46e624 3121
b7bcbe95
FB
3122 if (veclen > 0) {
3123 if (dp)
3124 bank_mask = 0xc;
3125 else
3126 bank_mask = 0x18;
3127
3128 /* Figure out what type of vector operation this is. */
3129 if ((rd & bank_mask) == 0) {
3130 /* scalar */
3131 veclen = 0;
3132 } else {
3133 if (dp)
69d1fc22 3134 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3135 else
69d1fc22 3136 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3137
3138 if ((rm & bank_mask) == 0) {
3139 /* mixed scalar/vector */
3140 delta_m = 0;
3141 } else {
3142 /* vector */
3143 delta_m = delta_d;
3144 }
3145 }
3146 }
3147
3148 /* Load the initial operands. */
3149 if (op == 15) {
3150 switch (rn) {
3151 case 16:
3152 case 17:
3153 /* Integer source */
3154 gen_mov_F0_vreg(0, rm);
3155 break;
3156 case 8:
3157 case 9:
3158 /* Compare */
3159 gen_mov_F0_vreg(dp, rd);
3160 gen_mov_F1_vreg(dp, rm);
3161 break;
3162 case 10:
3163 case 11:
3164 /* Compare with zero */
3165 gen_mov_F0_vreg(dp, rd);
3166 gen_vfp_F1_ld0(dp);
3167 break;
9ee6e8bb
PB
3168 case 20:
3169 case 21:
3170 case 22:
3171 case 23:
644ad806
PB
3172 case 28:
3173 case 29:
3174 case 30:
3175 case 31:
9ee6e8bb
PB
3176 /* Source and destination the same. */
3177 gen_mov_F0_vreg(dp, rd);
3178 break;
6e0c0ed1
PM
3179 case 4:
3180 case 5:
3181 case 6:
3182 case 7:
3183 /* VCVTB, VCVTT: only present with the halfprec extension,
3184 * UNPREDICTABLE if bit 8 is set (we choose to UNDEF)
3185 */
3186 if (dp || !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
3187 return 1;
3188 }
3189 /* Otherwise fall through */
b7bcbe95
FB
3190 default:
3191 /* One source operand. */
3192 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3193 break;
b7bcbe95
FB
3194 }
3195 } else {
3196 /* Two source operands. */
3197 gen_mov_F0_vreg(dp, rn);
3198 gen_mov_F1_vreg(dp, rm);
3199 }
3200
3201 for (;;) {
3202 /* Perform the calculation. */
3203 switch (op) {
605a6aed
PM
3204 case 0: /* VMLA: fd + (fn * fm) */
3205 /* Note that order of inputs to the add matters for NaNs */
3206 gen_vfp_F1_mul(dp);
3207 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3208 gen_vfp_add(dp);
3209 break;
605a6aed 3210 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3211 gen_vfp_mul(dp);
605a6aed
PM
3212 gen_vfp_F1_neg(dp);
3213 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3214 gen_vfp_add(dp);
3215 break;
605a6aed
PM
3216 case 2: /* VNMLS: -fd + (fn * fm) */
3217 /* Note that it isn't valid to replace (-A + B) with (B - A)
3218 * or similar plausible looking simplifications
3219 * because this will give wrong results for NaNs.
3220 */
3221 gen_vfp_F1_mul(dp);
3222 gen_mov_F0_vreg(dp, rd);
3223 gen_vfp_neg(dp);
3224 gen_vfp_add(dp);
b7bcbe95 3225 break;
605a6aed 3226 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3227 gen_vfp_mul(dp);
605a6aed
PM
3228 gen_vfp_F1_neg(dp);
3229 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3230 gen_vfp_neg(dp);
605a6aed 3231 gen_vfp_add(dp);
b7bcbe95
FB
3232 break;
3233 case 4: /* mul: fn * fm */
3234 gen_vfp_mul(dp);
3235 break;
3236 case 5: /* nmul: -(fn * fm) */
3237 gen_vfp_mul(dp);
3238 gen_vfp_neg(dp);
3239 break;
3240 case 6: /* add: fn + fm */
3241 gen_vfp_add(dp);
3242 break;
3243 case 7: /* sub: fn - fm */
3244 gen_vfp_sub(dp);
3245 break;
3246 case 8: /* div: fn / fm */
3247 gen_vfp_div(dp);
3248 break;
da97f52c
PM
3249 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3250 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3251 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3252 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3253 /* These are fused multiply-add, and must be done as one
3254 * floating point operation with no rounding between the
3255 * multiplication and addition steps.
3256 * NB that doing the negations here as separate steps is
3257 * correct : an input NaN should come out with its sign bit
3258 * flipped if it is a negated-input.
3259 */
3260 if (!arm_feature(env, ARM_FEATURE_VFP4)) {
3261 return 1;
3262 }
3263 if (dp) {
3264 TCGv_ptr fpst;
3265 TCGv_i64 frd;
3266 if (op & 1) {
3267 /* VFNMS, VFMS */
3268 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3269 }
3270 frd = tcg_temp_new_i64();
3271 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3272 if (op & 2) {
3273 /* VFNMA, VFNMS */
3274 gen_helper_vfp_negd(frd, frd);
3275 }
3276 fpst = get_fpstatus_ptr(0);
3277 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3278 cpu_F1d, frd, fpst);
3279 tcg_temp_free_ptr(fpst);
3280 tcg_temp_free_i64(frd);
3281 } else {
3282 TCGv_ptr fpst;
3283 TCGv_i32 frd;
3284 if (op & 1) {
3285 /* VFNMS, VFMS */
3286 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3287 }
3288 frd = tcg_temp_new_i32();
3289 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3290 if (op & 2) {
3291 gen_helper_vfp_negs(frd, frd);
3292 }
3293 fpst = get_fpstatus_ptr(0);
3294 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3295 cpu_F1s, frd, fpst);
3296 tcg_temp_free_ptr(fpst);
3297 tcg_temp_free_i32(frd);
3298 }
3299 break;
9ee6e8bb
PB
3300 case 14: /* fconst */
3301 if (!arm_feature(env, ARM_FEATURE_VFP3))
3302 return 1;
3303
3304 n = (insn << 12) & 0x80000000;
3305 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3306 if (dp) {
3307 if (i & 0x40)
3308 i |= 0x3f80;
3309 else
3310 i |= 0x4000;
3311 n |= i << 16;
4373f3ce 3312 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3313 } else {
3314 if (i & 0x40)
3315 i |= 0x780;
3316 else
3317 i |= 0x800;
3318 n |= i << 19;
5b340b51 3319 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3320 }
9ee6e8bb 3321 break;
b7bcbe95
FB
3322 case 15: /* extension space */
3323 switch (rn) {
3324 case 0: /* cpy */
3325 /* no-op */
3326 break;
3327 case 1: /* abs */
3328 gen_vfp_abs(dp);
3329 break;
3330 case 2: /* neg */
3331 gen_vfp_neg(dp);
3332 break;
3333 case 3: /* sqrt */
3334 gen_vfp_sqrt(dp);
3335 break;
60011498 3336 case 4: /* vcvtb.f32.f16 */
60011498
PB
3337 tmp = gen_vfp_mrs();
3338 tcg_gen_ext16u_i32(tmp, tmp);
3339 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3340 tcg_temp_free_i32(tmp);
60011498
PB
3341 break;
3342 case 5: /* vcvtt.f32.f16 */
60011498
PB
3343 tmp = gen_vfp_mrs();
3344 tcg_gen_shri_i32(tmp, tmp, 16);
3345 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3346 tcg_temp_free_i32(tmp);
60011498
PB
3347 break;
3348 case 6: /* vcvtb.f16.f32 */
7d1b0095 3349 tmp = tcg_temp_new_i32();
60011498
PB
3350 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3351 gen_mov_F0_vreg(0, rd);
3352 tmp2 = gen_vfp_mrs();
3353 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3354 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3355 tcg_temp_free_i32(tmp2);
60011498
PB
3356 gen_vfp_msr(tmp);
3357 break;
3358 case 7: /* vcvtt.f16.f32 */
7d1b0095 3359 tmp = tcg_temp_new_i32();
60011498
PB
3360 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3361 tcg_gen_shli_i32(tmp, tmp, 16);
3362 gen_mov_F0_vreg(0, rd);
3363 tmp2 = gen_vfp_mrs();
3364 tcg_gen_ext16u_i32(tmp2, tmp2);
3365 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3366 tcg_temp_free_i32(tmp2);
60011498
PB
3367 gen_vfp_msr(tmp);
3368 break;
b7bcbe95
FB
3369 case 8: /* cmp */
3370 gen_vfp_cmp(dp);
3371 break;
3372 case 9: /* cmpe */
3373 gen_vfp_cmpe(dp);
3374 break;
3375 case 10: /* cmpz */
3376 gen_vfp_cmp(dp);
3377 break;
3378 case 11: /* cmpez */
3379 gen_vfp_F1_ld0(dp);
3380 gen_vfp_cmpe(dp);
3381 break;
664c6733
WN
3382 case 12: /* vrintr */
3383 {
3384 TCGv_ptr fpst = get_fpstatus_ptr(0);
3385 if (dp) {
3386 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3387 } else {
3388 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3389 }
3390 tcg_temp_free_ptr(fpst);
3391 break;
3392 }
a290c62a
WN
3393 case 13: /* vrintz */
3394 {
3395 TCGv_ptr fpst = get_fpstatus_ptr(0);
3396 TCGv_i32 tcg_rmode;
3397 tcg_rmode = tcg_const_i32(float_round_to_zero);
3398 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3399 if (dp) {
3400 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3401 } else {
3402 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3403 }
3404 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3405 tcg_temp_free_i32(tcg_rmode);
3406 tcg_temp_free_ptr(fpst);
3407 break;
3408 }
4e82bc01
WN
3409 case 14: /* vrintx */
3410 {
3411 TCGv_ptr fpst = get_fpstatus_ptr(0);
3412 if (dp) {
3413 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3414 } else {
3415 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3416 }
3417 tcg_temp_free_ptr(fpst);
3418 break;
3419 }
b7bcbe95
FB
3420 case 15: /* single<->double conversion */
3421 if (dp)
4373f3ce 3422 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3423 else
4373f3ce 3424 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3425 break;
3426 case 16: /* fuito */
5500b06c 3427 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3428 break;
3429 case 17: /* fsito */
5500b06c 3430 gen_vfp_sito(dp, 0);
b7bcbe95 3431 break;
9ee6e8bb
PB
3432 case 20: /* fshto */
3433 if (!arm_feature(env, ARM_FEATURE_VFP3))
3434 return 1;
5500b06c 3435 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3436 break;
3437 case 21: /* fslto */
3438 if (!arm_feature(env, ARM_FEATURE_VFP3))
3439 return 1;
5500b06c 3440 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3441 break;
3442 case 22: /* fuhto */
3443 if (!arm_feature(env, ARM_FEATURE_VFP3))
3444 return 1;
5500b06c 3445 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3446 break;
3447 case 23: /* fulto */
3448 if (!arm_feature(env, ARM_FEATURE_VFP3))
3449 return 1;
5500b06c 3450 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3451 break;
b7bcbe95 3452 case 24: /* ftoui */
5500b06c 3453 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3454 break;
3455 case 25: /* ftouiz */
5500b06c 3456 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3457 break;
3458 case 26: /* ftosi */
5500b06c 3459 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3460 break;
3461 case 27: /* ftosiz */
5500b06c 3462 gen_vfp_tosiz(dp, 0);
b7bcbe95 3463 break;
9ee6e8bb
PB
3464 case 28: /* ftosh */
3465 if (!arm_feature(env, ARM_FEATURE_VFP3))
3466 return 1;
5500b06c 3467 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3468 break;
3469 case 29: /* ftosl */
3470 if (!arm_feature(env, ARM_FEATURE_VFP3))
3471 return 1;
5500b06c 3472 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3473 break;
3474 case 30: /* ftouh */
3475 if (!arm_feature(env, ARM_FEATURE_VFP3))
3476 return 1;
5500b06c 3477 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3478 break;
3479 case 31: /* ftoul */
3480 if (!arm_feature(env, ARM_FEATURE_VFP3))
3481 return 1;
5500b06c 3482 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3483 break;
b7bcbe95 3484 default: /* undefined */
b7bcbe95
FB
3485 return 1;
3486 }
3487 break;
3488 default: /* undefined */
b7bcbe95
FB
3489 return 1;
3490 }
3491
3492 /* Write back the result. */
3493 if (op == 15 && (rn >= 8 && rn <= 11))
3494 ; /* Comparison, do nothing. */
04595bf6
PM
3495 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3496 /* VCVT double to int: always integer result. */
b7bcbe95
FB
3497 gen_mov_vreg_F0(0, rd);
3498 else if (op == 15 && rn == 15)
3499 /* conversion */
3500 gen_mov_vreg_F0(!dp, rd);
3501 else
3502 gen_mov_vreg_F0(dp, rd);
3503
3504 /* break out of the loop if we have finished */
3505 if (veclen == 0)
3506 break;
3507
3508 if (op == 15 && delta_m == 0) {
3509 /* single source one-many */
3510 while (veclen--) {
3511 rd = ((rd + delta_d) & (bank_mask - 1))
3512 | (rd & bank_mask);
3513 gen_mov_vreg_F0(dp, rd);
3514 }
3515 break;
3516 }
3517 /* Setup the next operands. */
3518 veclen--;
3519 rd = ((rd + delta_d) & (bank_mask - 1))
3520 | (rd & bank_mask);
3521
3522 if (op == 15) {
3523 /* One source operand. */
3524 rm = ((rm + delta_m) & (bank_mask - 1))
3525 | (rm & bank_mask);
3526 gen_mov_F0_vreg(dp, rm);
3527 } else {
3528 /* Two source operands. */
3529 rn = ((rn + delta_d) & (bank_mask - 1))
3530 | (rn & bank_mask);
3531 gen_mov_F0_vreg(dp, rn);
3532 if (delta_m) {
3533 rm = ((rm + delta_m) & (bank_mask - 1))
3534 | (rm & bank_mask);
3535 gen_mov_F1_vreg(dp, rm);
3536 }
3537 }
3538 }
3539 }
3540 break;
3541 case 0xc:
3542 case 0xd:
8387da81 3543 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3544 /* two-register transfer */
3545 rn = (insn >> 16) & 0xf;
3546 rd = (insn >> 12) & 0xf;
3547 if (dp) {
9ee6e8bb
PB
3548 VFP_DREG_M(rm, insn);
3549 } else {
3550 rm = VFP_SREG_M(insn);
3551 }
b7bcbe95 3552
18c9b560 3553 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3554 /* vfp->arm */
3555 if (dp) {
4373f3ce
PB
3556 gen_mov_F0_vreg(0, rm * 2);
3557 tmp = gen_vfp_mrs();
3558 store_reg(s, rd, tmp);
3559 gen_mov_F0_vreg(0, rm * 2 + 1);
3560 tmp = gen_vfp_mrs();
3561 store_reg(s, rn, tmp);
b7bcbe95
FB
3562 } else {
3563 gen_mov_F0_vreg(0, rm);
4373f3ce 3564 tmp = gen_vfp_mrs();
8387da81 3565 store_reg(s, rd, tmp);
b7bcbe95 3566 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3567 tmp = gen_vfp_mrs();
8387da81 3568 store_reg(s, rn, tmp);
b7bcbe95
FB
3569 }
3570 } else {
3571 /* arm->vfp */
3572 if (dp) {
4373f3ce
PB
3573 tmp = load_reg(s, rd);
3574 gen_vfp_msr(tmp);
3575 gen_mov_vreg_F0(0, rm * 2);
3576 tmp = load_reg(s, rn);
3577 gen_vfp_msr(tmp);
3578 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3579 } else {
8387da81 3580 tmp = load_reg(s, rd);
4373f3ce 3581 gen_vfp_msr(tmp);
b7bcbe95 3582 gen_mov_vreg_F0(0, rm);
8387da81 3583 tmp = load_reg(s, rn);
4373f3ce 3584 gen_vfp_msr(tmp);
b7bcbe95
FB
3585 gen_mov_vreg_F0(0, rm + 1);
3586 }
3587 }
3588 } else {
3589 /* Load/store */
3590 rn = (insn >> 16) & 0xf;
3591 if (dp)
9ee6e8bb 3592 VFP_DREG_D(rd, insn);
b7bcbe95 3593 else
9ee6e8bb 3594 rd = VFP_SREG_D(insn);
b7bcbe95
FB
3595 if ((insn & 0x01200000) == 0x01000000) {
3596 /* Single load/store */
3597 offset = (insn & 0xff) << 2;
3598 if ((insn & (1 << 23)) == 0)
3599 offset = -offset;
934814f1
PM
3600 if (s->thumb && rn == 15) {
3601 /* This is actually UNPREDICTABLE */
3602 addr = tcg_temp_new_i32();
3603 tcg_gen_movi_i32(addr, s->pc & ~2);
3604 } else {
3605 addr = load_reg(s, rn);
3606 }
312eea9f 3607 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3608 if (insn & (1 << 20)) {
312eea9f 3609 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3610 gen_mov_vreg_F0(dp, rd);
3611 } else {
3612 gen_mov_F0_vreg(dp, rd);
312eea9f 3613 gen_vfp_st(s, dp, addr);
b7bcbe95 3614 }
7d1b0095 3615 tcg_temp_free_i32(addr);
b7bcbe95
FB
3616 } else {
3617 /* load/store multiple */
934814f1 3618 int w = insn & (1 << 21);
b7bcbe95
FB
3619 if (dp)
3620 n = (insn >> 1) & 0x7f;
3621 else
3622 n = insn & 0xff;
3623
934814f1
PM
3624 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3625 /* P == U , W == 1 => UNDEF */
3626 return 1;
3627 }
3628 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3629 /* UNPREDICTABLE cases for bad immediates: we choose to
3630 * UNDEF to avoid generating huge numbers of TCG ops
3631 */
3632 return 1;
3633 }
3634 if (rn == 15 && w) {
3635 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3636 return 1;
3637 }
3638
3639 if (s->thumb && rn == 15) {
3640 /* This is actually UNPREDICTABLE */
3641 addr = tcg_temp_new_i32();
3642 tcg_gen_movi_i32(addr, s->pc & ~2);
3643 } else {
3644 addr = load_reg(s, rn);
3645 }
b7bcbe95 3646 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3647 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3648
3649 if (dp)
3650 offset = 8;
3651 else
3652 offset = 4;
3653 for (i = 0; i < n; i++) {
18c9b560 3654 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3655 /* load */
312eea9f 3656 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3657 gen_mov_vreg_F0(dp, rd + i);
3658 } else {
3659 /* store */
3660 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3661 gen_vfp_st(s, dp, addr);
b7bcbe95 3662 }
312eea9f 3663 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3664 }
934814f1 3665 if (w) {
b7bcbe95
FB
3666 /* writeback */
3667 if (insn & (1 << 24))
3668 offset = -offset * n;
3669 else if (dp && (insn & 1))
3670 offset = 4;
3671 else
3672 offset = 0;
3673
3674 if (offset != 0)
312eea9f
FN
3675 tcg_gen_addi_i32(addr, addr, offset);
3676 store_reg(s, rn, addr);
3677 } else {
7d1b0095 3678 tcg_temp_free_i32(addr);
b7bcbe95
FB
3679 }
3680 }
3681 }
3682 break;
3683 default:
3684 /* Should never happen. */
3685 return 1;
3686 }
3687 return 0;
3688}
3689
0a2461fa 3690static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
c53be334 3691{
6e256c93
FB
3692 TranslationBlock *tb;
3693
3694 tb = s->tb;
3695 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3696 tcg_gen_goto_tb(n);
eaed129d 3697 gen_set_pc_im(s, dest);
8cfd0495 3698 tcg_gen_exit_tb((uintptr_t)tb + n);
6e256c93 3699 } else {
eaed129d 3700 gen_set_pc_im(s, dest);
57fec1fe 3701 tcg_gen_exit_tb(0);
6e256c93 3702 }
c53be334
FB
3703}
3704
8aaca4c0
FB
3705static inline void gen_jmp (DisasContext *s, uint32_t dest)
3706{
551bd27f 3707 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3708 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3709 if (s->thumb)
d9ba4830
PB
3710 dest |= 1;
3711 gen_bx_im(s, dest);
8aaca4c0 3712 } else {
6e256c93 3713 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3714 s->is_jmp = DISAS_TB_JUMP;
3715 }
3716}
3717
39d5492a 3718static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 3719{
ee097184 3720 if (x)
d9ba4830 3721 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3722 else
d9ba4830 3723 gen_sxth(t0);
ee097184 3724 if (y)
d9ba4830 3725 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3726 else
d9ba4830
PB
3727 gen_sxth(t1);
3728 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3729}
3730
3731/* Return the mask of PSR bits set by a MSR instruction. */
0ecb72a5 3732static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3733 uint32_t mask;
3734
3735 mask = 0;
3736 if (flags & (1 << 0))
3737 mask |= 0xff;
3738 if (flags & (1 << 1))
3739 mask |= 0xff00;
3740 if (flags & (1 << 2))
3741 mask |= 0xff0000;
3742 if (flags & (1 << 3))
3743 mask |= 0xff000000;
9ee6e8bb 3744
2ae23e75 3745 /* Mask out undefined bits. */
9ee6e8bb 3746 mask &= ~CPSR_RESERVED;
be5e7a76
DES
3747 if (!arm_feature(env, ARM_FEATURE_V4T))
3748 mask &= ~CPSR_T;
3749 if (!arm_feature(env, ARM_FEATURE_V5))
3750 mask &= ~CPSR_Q; /* V5TE in reality*/
9ee6e8bb 3751 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3752 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3753 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3754 mask &= ~CPSR_IT;
9ee6e8bb 3755 /* Mask out execution state bits. */
2ae23e75 3756 if (!spsr)
e160c51c 3757 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3758 /* Mask out privileged bits. */
3759 if (IS_USER(s))
9ee6e8bb 3760 mask &= CPSR_USER;
b5ff1b31
FB
3761 return mask;
3762}
3763
2fbac54b 3764/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 3765static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 3766{
39d5492a 3767 TCGv_i32 tmp;
b5ff1b31
FB
3768 if (spsr) {
3769 /* ??? This is also undefined in system mode. */
3770 if (IS_USER(s))
3771 return 1;
d9ba4830
PB
3772
3773 tmp = load_cpu_field(spsr);
3774 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3775 tcg_gen_andi_i32(t0, t0, mask);
3776 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3777 store_cpu_field(tmp, spsr);
b5ff1b31 3778 } else {
2fbac54b 3779 gen_set_cpsr(t0, mask);
b5ff1b31 3780 }
7d1b0095 3781 tcg_temp_free_i32(t0);
b5ff1b31
FB
3782 gen_lookup_tb(s);
3783 return 0;
3784}
3785
2fbac54b
FN
3786/* Returns nonzero if access to the PSR is not permitted. */
3787static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3788{
39d5492a 3789 TCGv_i32 tmp;
7d1b0095 3790 tmp = tcg_temp_new_i32();
2fbac54b
FN
3791 tcg_gen_movi_i32(tmp, val);
3792 return gen_set_psr(s, mask, spsr, tmp);
3793}
3794
e9bb4aa9 3795/* Generate an old-style exception return. Marks pc as dead. */
39d5492a 3796static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
b5ff1b31 3797{
39d5492a 3798 TCGv_i32 tmp;
e9bb4aa9 3799 store_reg(s, 15, pc);
d9ba4830
PB
3800 tmp = load_cpu_field(spsr);
3801 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 3802 tcg_temp_free_i32(tmp);
b5ff1b31
FB
3803 s->is_jmp = DISAS_UPDATE;
3804}
3805
b0109805 3806/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 3807static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 3808{
b0109805 3809 gen_set_cpsr(cpsr, 0xffffffff);
7d1b0095 3810 tcg_temp_free_i32(cpsr);
b0109805 3811 store_reg(s, 15, pc);
9ee6e8bb
PB
3812 s->is_jmp = DISAS_UPDATE;
3813}
3b46e624 3814
9ee6e8bb
PB
3815static inline void
3816gen_set_condexec (DisasContext *s)
3817{
3818 if (s->condexec_mask) {
8f01245e 3819 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
39d5492a 3820 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e 3821 tcg_gen_movi_i32(tmp, val);
d9ba4830 3822 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3823 }
3824}
3b46e624 3825
bc4a0de0
PM
3826static void gen_exception_insn(DisasContext *s, int offset, int excp)
3827{
3828 gen_set_condexec(s);
eaed129d 3829 gen_set_pc_im(s, s->pc - offset);
bc4a0de0
PM
3830 gen_exception(excp);
3831 s->is_jmp = DISAS_JUMP;
3832}
3833
9ee6e8bb
PB
3834static void gen_nop_hint(DisasContext *s, int val)
3835{
3836 switch (val) {
3837 case 3: /* wfi */
eaed129d 3838 gen_set_pc_im(s, s->pc);
9ee6e8bb
PB
3839 s->is_jmp = DISAS_WFI;
3840 break;
3841 case 2: /* wfe */
3842 case 4: /* sev */
12b10571
MR
3843 case 5: /* sevl */
3844 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
3845 default: /* nop */
3846 break;
3847 }
3848}
99c475ab 3849
ad69471c 3850#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3851
39d5492a 3852static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
3853{
3854 switch (size) {
dd8fbd78
FN
3855 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3856 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3857 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 3858 default: abort();
9ee6e8bb 3859 }
9ee6e8bb
PB
3860}
3861
39d5492a 3862static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
3863{
3864 switch (size) {
dd8fbd78
FN
3865 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3866 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3867 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3868 default: return;
3869 }
3870}
3871
3872/* 32-bit pairwise ops end up the same as the elementwise versions. */
3873#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3874#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3875#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3876#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3877
ad69471c
PB
3878#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3879 switch ((size << 1) | u) { \
3880 case 0: \
dd8fbd78 3881 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3882 break; \
3883 case 1: \
dd8fbd78 3884 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3885 break; \
3886 case 2: \
dd8fbd78 3887 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3888 break; \
3889 case 3: \
dd8fbd78 3890 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3891 break; \
3892 case 4: \
dd8fbd78 3893 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3894 break; \
3895 case 5: \
dd8fbd78 3896 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3897 break; \
3898 default: return 1; \
3899 }} while (0)
9ee6e8bb
PB
3900
3901#define GEN_NEON_INTEGER_OP(name) do { \
3902 switch ((size << 1) | u) { \
ad69471c 3903 case 0: \
dd8fbd78 3904 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3905 break; \
3906 case 1: \
dd8fbd78 3907 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3908 break; \
3909 case 2: \
dd8fbd78 3910 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3911 break; \
3912 case 3: \
dd8fbd78 3913 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3914 break; \
3915 case 4: \
dd8fbd78 3916 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3917 break; \
3918 case 5: \
dd8fbd78 3919 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3920 break; \
9ee6e8bb
PB
3921 default: return 1; \
3922 }} while (0)
3923
39d5492a 3924static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 3925{
39d5492a 3926 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
3927 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3928 return tmp;
9ee6e8bb
PB
3929}
3930
39d5492a 3931static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 3932{
dd8fbd78 3933 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 3934 tcg_temp_free_i32(var);
9ee6e8bb
PB
3935}
3936
39d5492a 3937static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 3938{
39d5492a 3939 TCGv_i32 tmp;
9ee6e8bb 3940 if (size == 1) {
0fad6efc
PM
3941 tmp = neon_load_reg(reg & 7, reg >> 4);
3942 if (reg & 8) {
dd8fbd78 3943 gen_neon_dup_high16(tmp);
0fad6efc
PM
3944 } else {
3945 gen_neon_dup_low16(tmp);
dd8fbd78 3946 }
0fad6efc
PM
3947 } else {
3948 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 3949 }
dd8fbd78 3950 return tmp;
9ee6e8bb
PB
3951}
3952
02acedf9 3953static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 3954{
39d5492a 3955 TCGv_i32 tmp, tmp2;
600b828c 3956 if (!q && size == 2) {
02acedf9
PM
3957 return 1;
3958 }
3959 tmp = tcg_const_i32(rd);
3960 tmp2 = tcg_const_i32(rm);
3961 if (q) {
3962 switch (size) {
3963 case 0:
02da0b2d 3964 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3965 break;
3966 case 1:
02da0b2d 3967 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3968 break;
3969 case 2:
02da0b2d 3970 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
3971 break;
3972 default:
3973 abort();
3974 }
3975 } else {
3976 switch (size) {
3977 case 0:
02da0b2d 3978 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3979 break;
3980 case 1:
02da0b2d 3981 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3982 break;
3983 default:
3984 abort();
3985 }
3986 }
3987 tcg_temp_free_i32(tmp);
3988 tcg_temp_free_i32(tmp2);
3989 return 0;
19457615
FN
3990}
3991
d68a6f3a 3992static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 3993{
39d5492a 3994 TCGv_i32 tmp, tmp2;
600b828c 3995 if (!q && size == 2) {
d68a6f3a
PM
3996 return 1;
3997 }
3998 tmp = tcg_const_i32(rd);
3999 tmp2 = tcg_const_i32(rm);
4000 if (q) {
4001 switch (size) {
4002 case 0:
02da0b2d 4003 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4004 break;
4005 case 1:
02da0b2d 4006 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4007 break;
4008 case 2:
02da0b2d 4009 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
4010 break;
4011 default:
4012 abort();
4013 }
4014 } else {
4015 switch (size) {
4016 case 0:
02da0b2d 4017 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4018 break;
4019 case 1:
02da0b2d 4020 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4021 break;
4022 default:
4023 abort();
4024 }
4025 }
4026 tcg_temp_free_i32(tmp);
4027 tcg_temp_free_i32(tmp2);
4028 return 0;
19457615
FN
4029}
4030
39d5492a 4031static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 4032{
39d5492a 4033 TCGv_i32 rd, tmp;
19457615 4034
7d1b0095
PM
4035 rd = tcg_temp_new_i32();
4036 tmp = tcg_temp_new_i32();
19457615
FN
4037
4038 tcg_gen_shli_i32(rd, t0, 8);
4039 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4040 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4041 tcg_gen_or_i32(rd, rd, tmp);
4042
4043 tcg_gen_shri_i32(t1, t1, 8);
4044 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4045 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4046 tcg_gen_or_i32(t1, t1, tmp);
4047 tcg_gen_mov_i32(t0, rd);
4048
7d1b0095
PM
4049 tcg_temp_free_i32(tmp);
4050 tcg_temp_free_i32(rd);
19457615
FN
4051}
4052
39d5492a 4053static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 4054{
39d5492a 4055 TCGv_i32 rd, tmp;
19457615 4056
7d1b0095
PM
4057 rd = tcg_temp_new_i32();
4058 tmp = tcg_temp_new_i32();
19457615
FN
4059
4060 tcg_gen_shli_i32(rd, t0, 16);
4061 tcg_gen_andi_i32(tmp, t1, 0xffff);
4062 tcg_gen_or_i32(rd, rd, tmp);
4063 tcg_gen_shri_i32(t1, t1, 16);
4064 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4065 tcg_gen_or_i32(t1, t1, tmp);
4066 tcg_gen_mov_i32(t0, rd);
4067
7d1b0095
PM
4068 tcg_temp_free_i32(tmp);
4069 tcg_temp_free_i32(rd);
19457615
FN
4070}
4071
4072
9ee6e8bb
PB
4073static struct {
4074 int nregs;
4075 int interleave;
4076 int spacing;
4077} neon_ls_element_type[11] = {
4078 {4, 4, 1},
4079 {4, 4, 2},
4080 {4, 1, 1},
4081 {4, 2, 1},
4082 {3, 3, 1},
4083 {3, 3, 2},
4084 {3, 1, 1},
4085 {1, 1, 1},
4086 {2, 2, 1},
4087 {2, 2, 2},
4088 {2, 1, 1}
4089};
4090
4091/* Translate a NEON load/store element instruction. Return nonzero if the
4092 instruction is invalid. */
0ecb72a5 4093static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4094{
4095 int rd, rn, rm;
4096 int op;
4097 int nregs;
4098 int interleave;
84496233 4099 int spacing;
9ee6e8bb
PB
4100 int stride;
4101 int size;
4102 int reg;
4103 int pass;
4104 int load;
4105 int shift;
9ee6e8bb 4106 int n;
39d5492a
PM
4107 TCGv_i32 addr;
4108 TCGv_i32 tmp;
4109 TCGv_i32 tmp2;
84496233 4110 TCGv_i64 tmp64;
9ee6e8bb 4111
5df8bac1 4112 if (!s->vfp_enabled)
9ee6e8bb
PB
4113 return 1;
4114 VFP_DREG_D(rd, insn);
4115 rn = (insn >> 16) & 0xf;
4116 rm = insn & 0xf;
4117 load = (insn & (1 << 21)) != 0;
4118 if ((insn & (1 << 23)) == 0) {
4119 /* Load store all elements. */
4120 op = (insn >> 8) & 0xf;
4121 size = (insn >> 6) & 3;
84496233 4122 if (op > 10)
9ee6e8bb 4123 return 1;
f2dd89d0
PM
4124 /* Catch UNDEF cases for bad values of align field */
4125 switch (op & 0xc) {
4126 case 4:
4127 if (((insn >> 5) & 1) == 1) {
4128 return 1;
4129 }
4130 break;
4131 case 8:
4132 if (((insn >> 4) & 3) == 3) {
4133 return 1;
4134 }
4135 break;
4136 default:
4137 break;
4138 }
9ee6e8bb
PB
4139 nregs = neon_ls_element_type[op].nregs;
4140 interleave = neon_ls_element_type[op].interleave;
84496233
JR
4141 spacing = neon_ls_element_type[op].spacing;
4142 if (size == 3 && (interleave | spacing) != 1)
4143 return 1;
e318a60b 4144 addr = tcg_temp_new_i32();
dcc65026 4145 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4146 stride = (1 << size) * interleave;
4147 for (reg = 0; reg < nregs; reg++) {
4148 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
4149 load_reg_var(s, addr, rn);
4150 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 4151 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
4152 load_reg_var(s, addr, rn);
4153 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 4154 }
84496233 4155 if (size == 3) {
8ed1237d 4156 tmp64 = tcg_temp_new_i64();
84496233 4157 if (load) {
08307563 4158 gen_aa32_ld64(tmp64, addr, IS_USER(s));
84496233 4159 neon_store_reg64(tmp64, rd);
84496233 4160 } else {
84496233 4161 neon_load_reg64(tmp64, rd);
08307563 4162 gen_aa32_st64(tmp64, addr, IS_USER(s));
84496233 4163 }
8ed1237d 4164 tcg_temp_free_i64(tmp64);
84496233
JR
4165 tcg_gen_addi_i32(addr, addr, stride);
4166 } else {
4167 for (pass = 0; pass < 2; pass++) {
4168 if (size == 2) {
4169 if (load) {
58ab8e96 4170 tmp = tcg_temp_new_i32();
08307563 4171 gen_aa32_ld32u(tmp, addr, IS_USER(s));
84496233
JR
4172 neon_store_reg(rd, pass, tmp);
4173 } else {
4174 tmp = neon_load_reg(rd, pass);
08307563 4175 gen_aa32_st32(tmp, addr, IS_USER(s));
58ab8e96 4176 tcg_temp_free_i32(tmp);
84496233 4177 }
1b2b1e54 4178 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
4179 } else if (size == 1) {
4180 if (load) {
58ab8e96 4181 tmp = tcg_temp_new_i32();
08307563 4182 gen_aa32_ld16u(tmp, addr, IS_USER(s));
84496233 4183 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96 4184 tmp2 = tcg_temp_new_i32();
08307563 4185 gen_aa32_ld16u(tmp2, addr, IS_USER(s));
84496233 4186 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
4187 tcg_gen_shli_i32(tmp2, tmp2, 16);
4188 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4189 tcg_temp_free_i32(tmp2);
84496233
JR
4190 neon_store_reg(rd, pass, tmp);
4191 } else {
4192 tmp = neon_load_reg(rd, pass);
7d1b0095 4193 tmp2 = tcg_temp_new_i32();
84496233 4194 tcg_gen_shri_i32(tmp2, tmp, 16);
08307563 4195 gen_aa32_st16(tmp, addr, IS_USER(s));
58ab8e96 4196 tcg_temp_free_i32(tmp);
84496233 4197 tcg_gen_addi_i32(addr, addr, stride);
08307563 4198 gen_aa32_st16(tmp2, addr, IS_USER(s));
58ab8e96 4199 tcg_temp_free_i32(tmp2);
1b2b1e54 4200 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 4201 }
84496233
JR
4202 } else /* size == 0 */ {
4203 if (load) {
39d5492a 4204 TCGV_UNUSED_I32(tmp2);
84496233 4205 for (n = 0; n < 4; n++) {
58ab8e96 4206 tmp = tcg_temp_new_i32();
08307563 4207 gen_aa32_ld8u(tmp, addr, IS_USER(s));
84496233
JR
4208 tcg_gen_addi_i32(addr, addr, stride);
4209 if (n == 0) {
4210 tmp2 = tmp;
4211 } else {
41ba8341
PB
4212 tcg_gen_shli_i32(tmp, tmp, n * 8);
4213 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 4214 tcg_temp_free_i32(tmp);
84496233 4215 }
9ee6e8bb 4216 }
84496233
JR
4217 neon_store_reg(rd, pass, tmp2);
4218 } else {
4219 tmp2 = neon_load_reg(rd, pass);
4220 for (n = 0; n < 4; n++) {
7d1b0095 4221 tmp = tcg_temp_new_i32();
84496233
JR
4222 if (n == 0) {
4223 tcg_gen_mov_i32(tmp, tmp2);
4224 } else {
4225 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4226 }
08307563 4227 gen_aa32_st8(tmp, addr, IS_USER(s));
58ab8e96 4228 tcg_temp_free_i32(tmp);
84496233
JR
4229 tcg_gen_addi_i32(addr, addr, stride);
4230 }
7d1b0095 4231 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
4232 }
4233 }
4234 }
4235 }
84496233 4236 rd += spacing;
9ee6e8bb 4237 }
e318a60b 4238 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4239 stride = nregs * 8;
4240 } else {
4241 size = (insn >> 10) & 3;
4242 if (size == 3) {
4243 /* Load single element to all lanes. */
8e18cde3
PM
4244 int a = (insn >> 4) & 1;
4245 if (!load) {
9ee6e8bb 4246 return 1;
8e18cde3 4247 }
9ee6e8bb
PB
4248 size = (insn >> 6) & 3;
4249 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
4250
4251 if (size == 3) {
4252 if (nregs != 4 || a == 0) {
9ee6e8bb 4253 return 1;
99c475ab 4254 }
8e18cde3
PM
4255 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4256 size = 2;
4257 }
4258 if (nregs == 1 && a == 1 && size == 0) {
4259 return 1;
4260 }
4261 if (nregs == 3 && a == 1) {
4262 return 1;
4263 }
e318a60b 4264 addr = tcg_temp_new_i32();
8e18cde3
PM
4265 load_reg_var(s, addr, rn);
4266 if (nregs == 1) {
4267 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4268 tmp = gen_load_and_replicate(s, addr, size);
4269 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4270 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4271 if (insn & (1 << 5)) {
4272 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4273 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4274 }
4275 tcg_temp_free_i32(tmp);
4276 } else {
4277 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4278 stride = (insn & (1 << 5)) ? 2 : 1;
4279 for (reg = 0; reg < nregs; reg++) {
4280 tmp = gen_load_and_replicate(s, addr, size);
4281 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4282 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4283 tcg_temp_free_i32(tmp);
4284 tcg_gen_addi_i32(addr, addr, 1 << size);
4285 rd += stride;
4286 }
9ee6e8bb 4287 }
e318a60b 4288 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4289 stride = (1 << size) * nregs;
4290 } else {
4291 /* Single element. */
93262b16 4292 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
4293 pass = (insn >> 7) & 1;
4294 switch (size) {
4295 case 0:
4296 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
4297 stride = 1;
4298 break;
4299 case 1:
4300 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
4301 stride = (insn & (1 << 5)) ? 2 : 1;
4302 break;
4303 case 2:
4304 shift = 0;
9ee6e8bb
PB
4305 stride = (insn & (1 << 6)) ? 2 : 1;
4306 break;
4307 default:
4308 abort();
4309 }
4310 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
4311 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4312 switch (nregs) {
4313 case 1:
4314 if (((idx & (1 << size)) != 0) ||
4315 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4316 return 1;
4317 }
4318 break;
4319 case 3:
4320 if ((idx & 1) != 0) {
4321 return 1;
4322 }
4323 /* fall through */
4324 case 2:
4325 if (size == 2 && (idx & 2) != 0) {
4326 return 1;
4327 }
4328 break;
4329 case 4:
4330 if ((size == 2) && ((idx & 3) == 3)) {
4331 return 1;
4332 }
4333 break;
4334 default:
4335 abort();
4336 }
4337 if ((rd + stride * (nregs - 1)) > 31) {
4338 /* Attempts to write off the end of the register file
4339 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4340 * the neon_load_reg() would write off the end of the array.
4341 */
4342 return 1;
4343 }
e318a60b 4344 addr = tcg_temp_new_i32();
dcc65026 4345 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4346 for (reg = 0; reg < nregs; reg++) {
4347 if (load) {
58ab8e96 4348 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
4349 switch (size) {
4350 case 0:
08307563 4351 gen_aa32_ld8u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4352 break;
4353 case 1:
08307563 4354 gen_aa32_ld16u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4355 break;
4356 case 2:
08307563 4357 gen_aa32_ld32u(tmp, addr, IS_USER(s));
9ee6e8bb 4358 break;
a50f5b91
PB
4359 default: /* Avoid compiler warnings. */
4360 abort();
9ee6e8bb
PB
4361 }
4362 if (size != 2) {
8f8e3aa4 4363 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
4364 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4365 shift, size ? 16 : 8);
7d1b0095 4366 tcg_temp_free_i32(tmp2);
9ee6e8bb 4367 }
8f8e3aa4 4368 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4369 } else { /* Store */
8f8e3aa4
PB
4370 tmp = neon_load_reg(rd, pass);
4371 if (shift)
4372 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4373 switch (size) {
4374 case 0:
08307563 4375 gen_aa32_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4376 break;
4377 case 1:
08307563 4378 gen_aa32_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4379 break;
4380 case 2:
08307563 4381 gen_aa32_st32(tmp, addr, IS_USER(s));
9ee6e8bb 4382 break;
99c475ab 4383 }
58ab8e96 4384 tcg_temp_free_i32(tmp);
99c475ab 4385 }
9ee6e8bb 4386 rd += stride;
1b2b1e54 4387 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4388 }
e318a60b 4389 tcg_temp_free_i32(addr);
9ee6e8bb 4390 stride = nregs * (1 << size);
99c475ab 4391 }
9ee6e8bb
PB
4392 }
4393 if (rm != 15) {
39d5492a 4394 TCGv_i32 base;
b26eefb6
PB
4395
4396 base = load_reg(s, rn);
9ee6e8bb 4397 if (rm == 13) {
b26eefb6 4398 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4399 } else {
39d5492a 4400 TCGv_i32 index;
b26eefb6
PB
4401 index = load_reg(s, rm);
4402 tcg_gen_add_i32(base, base, index);
7d1b0095 4403 tcg_temp_free_i32(index);
9ee6e8bb 4404 }
b26eefb6 4405 store_reg(s, rn, base);
9ee6e8bb
PB
4406 }
4407 return 0;
4408}
3b46e624 4409
8f8e3aa4 4410/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
39d5492a 4411static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
8f8e3aa4
PB
4412{
4413 tcg_gen_and_i32(t, t, c);
f669df27 4414 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4415 tcg_gen_or_i32(dest, t, f);
4416}
4417
39d5492a 4418static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4419{
4420 switch (size) {
4421 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4422 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4423 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4424 default: abort();
4425 }
4426}
4427
39d5492a 4428static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4429{
4430 switch (size) {
02da0b2d
PM
4431 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4432 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4433 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
4434 default: abort();
4435 }
4436}
4437
39d5492a 4438static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4439{
4440 switch (size) {
02da0b2d
PM
4441 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4442 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4443 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
4444 default: abort();
4445 }
4446}
4447
39d5492a 4448static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
4449{
4450 switch (size) {
02da0b2d
PM
4451 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4452 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4453 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
4454 default: abort();
4455 }
4456}
4457
39d5492a 4458static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
4459 int q, int u)
4460{
4461 if (q) {
4462 if (u) {
4463 switch (size) {
4464 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4465 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4466 default: abort();
4467 }
4468 } else {
4469 switch (size) {
4470 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4471 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4472 default: abort();
4473 }
4474 }
4475 } else {
4476 if (u) {
4477 switch (size) {
b408a9b0
CL
4478 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4479 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4480 default: abort();
4481 }
4482 } else {
4483 switch (size) {
4484 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4485 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4486 default: abort();
4487 }
4488 }
4489 }
4490}
4491
39d5492a 4492static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
4493{
4494 if (u) {
4495 switch (size) {
4496 case 0: gen_helper_neon_widen_u8(dest, src); break;
4497 case 1: gen_helper_neon_widen_u16(dest, src); break;
4498 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4499 default: abort();
4500 }
4501 } else {
4502 switch (size) {
4503 case 0: gen_helper_neon_widen_s8(dest, src); break;
4504 case 1: gen_helper_neon_widen_s16(dest, src); break;
4505 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4506 default: abort();
4507 }
4508 }
7d1b0095 4509 tcg_temp_free_i32(src);
ad69471c
PB
4510}
4511
4512static inline void gen_neon_addl(int size)
4513{
4514 switch (size) {
4515 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4516 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4517 case 2: tcg_gen_add_i64(CPU_V001); break;
4518 default: abort();
4519 }
4520}
4521
4522static inline void gen_neon_subl(int size)
4523{
4524 switch (size) {
4525 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4526 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4527 case 2: tcg_gen_sub_i64(CPU_V001); break;
4528 default: abort();
4529 }
4530}
4531
a7812ae4 4532static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4533{
4534 switch (size) {
4535 case 0: gen_helper_neon_negl_u16(var, var); break;
4536 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
4537 case 2:
4538 tcg_gen_neg_i64(var, var);
4539 break;
ad69471c
PB
4540 default: abort();
4541 }
4542}
4543
a7812ae4 4544static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4545{
4546 switch (size) {
02da0b2d
PM
4547 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4548 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
4549 default: abort();
4550 }
4551}
4552
39d5492a
PM
4553static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
4554 int size, int u)
ad69471c 4555{
a7812ae4 4556 TCGv_i64 tmp;
ad69471c
PB
4557
4558 switch ((size << 1) | u) {
4559 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4560 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4561 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4562 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4563 case 4:
4564 tmp = gen_muls_i64_i32(a, b);
4565 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4566 tcg_temp_free_i64(tmp);
ad69471c
PB
4567 break;
4568 case 5:
4569 tmp = gen_mulu_i64_i32(a, b);
4570 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4571 tcg_temp_free_i64(tmp);
ad69471c
PB
4572 break;
4573 default: abort();
4574 }
c6067f04
CL
4575
4576 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4577 Don't forget to clean them now. */
4578 if (size < 2) {
7d1b0095
PM
4579 tcg_temp_free_i32(a);
4580 tcg_temp_free_i32(b);
c6067f04 4581 }
ad69471c
PB
4582}
4583
39d5492a
PM
4584static void gen_neon_narrow_op(int op, int u, int size,
4585 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
4586{
4587 if (op) {
4588 if (u) {
4589 gen_neon_unarrow_sats(size, dest, src);
4590 } else {
4591 gen_neon_narrow(size, dest, src);
4592 }
4593 } else {
4594 if (u) {
4595 gen_neon_narrow_satu(size, dest, src);
4596 } else {
4597 gen_neon_narrow_sats(size, dest, src);
4598 }
4599 }
4600}
4601
62698be3
PM
4602/* Symbolic constants for op fields for Neon 3-register same-length.
4603 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4604 * table A7-9.
4605 */
4606#define NEON_3R_VHADD 0
4607#define NEON_3R_VQADD 1
4608#define NEON_3R_VRHADD 2
4609#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4610#define NEON_3R_VHSUB 4
4611#define NEON_3R_VQSUB 5
4612#define NEON_3R_VCGT 6
4613#define NEON_3R_VCGE 7
4614#define NEON_3R_VSHL 8
4615#define NEON_3R_VQSHL 9
4616#define NEON_3R_VRSHL 10
4617#define NEON_3R_VQRSHL 11
4618#define NEON_3R_VMAX 12
4619#define NEON_3R_VMIN 13
4620#define NEON_3R_VABD 14
4621#define NEON_3R_VABA 15
4622#define NEON_3R_VADD_VSUB 16
4623#define NEON_3R_VTST_VCEQ 17
4624#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4625#define NEON_3R_VMUL 19
4626#define NEON_3R_VPMAX 20
4627#define NEON_3R_VPMIN 21
4628#define NEON_3R_VQDMULH_VQRDMULH 22
4629#define NEON_3R_VPADD 23
da97f52c 4630#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
4631#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4632#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4633#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4634#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4635#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 4636#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
4637
4638static const uint8_t neon_3r_sizes[] = {
4639 [NEON_3R_VHADD] = 0x7,
4640 [NEON_3R_VQADD] = 0xf,
4641 [NEON_3R_VRHADD] = 0x7,
4642 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4643 [NEON_3R_VHSUB] = 0x7,
4644 [NEON_3R_VQSUB] = 0xf,
4645 [NEON_3R_VCGT] = 0x7,
4646 [NEON_3R_VCGE] = 0x7,
4647 [NEON_3R_VSHL] = 0xf,
4648 [NEON_3R_VQSHL] = 0xf,
4649 [NEON_3R_VRSHL] = 0xf,
4650 [NEON_3R_VQRSHL] = 0xf,
4651 [NEON_3R_VMAX] = 0x7,
4652 [NEON_3R_VMIN] = 0x7,
4653 [NEON_3R_VABD] = 0x7,
4654 [NEON_3R_VABA] = 0x7,
4655 [NEON_3R_VADD_VSUB] = 0xf,
4656 [NEON_3R_VTST_VCEQ] = 0x7,
4657 [NEON_3R_VML] = 0x7,
4658 [NEON_3R_VMUL] = 0x7,
4659 [NEON_3R_VPMAX] = 0x7,
4660 [NEON_3R_VPMIN] = 0x7,
4661 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4662 [NEON_3R_VPADD] = 0x7,
da97f52c 4663 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4664 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4665 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4666 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4667 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4668 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 4669 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4670};
4671
600b828c
PM
4672/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4673 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4674 * table A7-13.
4675 */
4676#define NEON_2RM_VREV64 0
4677#define NEON_2RM_VREV32 1
4678#define NEON_2RM_VREV16 2
4679#define NEON_2RM_VPADDL 4
4680#define NEON_2RM_VPADDL_U 5
9d935509
AB
4681#define NEON_2RM_AESE 6 /* Includes AESD */
4682#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
4683#define NEON_2RM_VCLS 8
4684#define NEON_2RM_VCLZ 9
4685#define NEON_2RM_VCNT 10
4686#define NEON_2RM_VMVN 11
4687#define NEON_2RM_VPADAL 12
4688#define NEON_2RM_VPADAL_U 13
4689#define NEON_2RM_VQABS 14
4690#define NEON_2RM_VQNEG 15
4691#define NEON_2RM_VCGT0 16
4692#define NEON_2RM_VCGE0 17
4693#define NEON_2RM_VCEQ0 18
4694#define NEON_2RM_VCLE0 19
4695#define NEON_2RM_VCLT0 20
4696#define NEON_2RM_VABS 22
4697#define NEON_2RM_VNEG 23
4698#define NEON_2RM_VCGT0_F 24
4699#define NEON_2RM_VCGE0_F 25
4700#define NEON_2RM_VCEQ0_F 26
4701#define NEON_2RM_VCLE0_F 27
4702#define NEON_2RM_VCLT0_F 28
4703#define NEON_2RM_VABS_F 30
4704#define NEON_2RM_VNEG_F 31
4705#define NEON_2RM_VSWP 32
4706#define NEON_2RM_VTRN 33
4707#define NEON_2RM_VUZP 34
4708#define NEON_2RM_VZIP 35
4709#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4710#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4711#define NEON_2RM_VSHLL 38
2ce70625 4712#define NEON_2RM_VRINTX 41
600b828c
PM
4713#define NEON_2RM_VCVT_F16_F32 44
4714#define NEON_2RM_VCVT_F32_F16 46
4715#define NEON_2RM_VRECPE 56
4716#define NEON_2RM_VRSQRTE 57
4717#define NEON_2RM_VRECPE_F 58
4718#define NEON_2RM_VRSQRTE_F 59
4719#define NEON_2RM_VCVT_FS 60
4720#define NEON_2RM_VCVT_FU 61
4721#define NEON_2RM_VCVT_SF 62
4722#define NEON_2RM_VCVT_UF 63
4723
4724static int neon_2rm_is_float_op(int op)
4725{
4726 /* Return true if this neon 2reg-misc op is float-to-float */
4727 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
2ce70625 4728 op == NEON_2RM_VRINTX || op >= NEON_2RM_VRECPE_F);
600b828c
PM
4729}
4730
4731/* Each entry in this array has bit n set if the insn allows
4732 * size value n (otherwise it will UNDEF). Since unallocated
4733 * op values will have no bits set they always UNDEF.
4734 */
4735static const uint8_t neon_2rm_sizes[] = {
4736 [NEON_2RM_VREV64] = 0x7,
4737 [NEON_2RM_VREV32] = 0x3,
4738 [NEON_2RM_VREV16] = 0x1,
4739 [NEON_2RM_VPADDL] = 0x7,
4740 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
4741 [NEON_2RM_AESE] = 0x1,
4742 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
4743 [NEON_2RM_VCLS] = 0x7,
4744 [NEON_2RM_VCLZ] = 0x7,
4745 [NEON_2RM_VCNT] = 0x1,
4746 [NEON_2RM_VMVN] = 0x1,
4747 [NEON_2RM_VPADAL] = 0x7,
4748 [NEON_2RM_VPADAL_U] = 0x7,
4749 [NEON_2RM_VQABS] = 0x7,
4750 [NEON_2RM_VQNEG] = 0x7,
4751 [NEON_2RM_VCGT0] = 0x7,
4752 [NEON_2RM_VCGE0] = 0x7,
4753 [NEON_2RM_VCEQ0] = 0x7,
4754 [NEON_2RM_VCLE0] = 0x7,
4755 [NEON_2RM_VCLT0] = 0x7,
4756 [NEON_2RM_VABS] = 0x7,
4757 [NEON_2RM_VNEG] = 0x7,
4758 [NEON_2RM_VCGT0_F] = 0x4,
4759 [NEON_2RM_VCGE0_F] = 0x4,
4760 [NEON_2RM_VCEQ0_F] = 0x4,
4761 [NEON_2RM_VCLE0_F] = 0x4,
4762 [NEON_2RM_VCLT0_F] = 0x4,
4763 [NEON_2RM_VABS_F] = 0x4,
4764 [NEON_2RM_VNEG_F] = 0x4,
4765 [NEON_2RM_VSWP] = 0x1,
4766 [NEON_2RM_VTRN] = 0x7,
4767 [NEON_2RM_VUZP] = 0x7,
4768 [NEON_2RM_VZIP] = 0x7,
4769 [NEON_2RM_VMOVN] = 0x7,
4770 [NEON_2RM_VQMOVN] = 0x7,
4771 [NEON_2RM_VSHLL] = 0x7,
2ce70625 4772 [NEON_2RM_VRINTX] = 0x4,
600b828c
PM
4773 [NEON_2RM_VCVT_F16_F32] = 0x2,
4774 [NEON_2RM_VCVT_F32_F16] = 0x2,
4775 [NEON_2RM_VRECPE] = 0x4,
4776 [NEON_2RM_VRSQRTE] = 0x4,
4777 [NEON_2RM_VRECPE_F] = 0x4,
4778 [NEON_2RM_VRSQRTE_F] = 0x4,
4779 [NEON_2RM_VCVT_FS] = 0x4,
4780 [NEON_2RM_VCVT_FU] = 0x4,
4781 [NEON_2RM_VCVT_SF] = 0x4,
4782 [NEON_2RM_VCVT_UF] = 0x4,
4783};
4784
9ee6e8bb
PB
4785/* Translate a NEON data processing instruction. Return nonzero if the
4786 instruction is invalid.
ad69471c
PB
4787 We process data in a mixture of 32-bit and 64-bit chunks.
4788 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4789
0ecb72a5 4790static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4791{
4792 int op;
4793 int q;
4794 int rd, rn, rm;
4795 int size;
4796 int shift;
4797 int pass;
4798 int count;
4799 int pairwise;
4800 int u;
ca9a32e4 4801 uint32_t imm, mask;
39d5492a 4802 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4803 TCGv_i64 tmp64;
9ee6e8bb 4804
5df8bac1 4805 if (!s->vfp_enabled)
9ee6e8bb
PB
4806 return 1;
4807 q = (insn & (1 << 6)) != 0;
4808 u = (insn >> 24) & 1;
4809 VFP_DREG_D(rd, insn);
4810 VFP_DREG_N(rn, insn);
4811 VFP_DREG_M(rm, insn);
4812 size = (insn >> 20) & 3;
4813 if ((insn & (1 << 23)) == 0) {
4814 /* Three register same length. */
4815 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
4816 /* Catch invalid op and bad size combinations: UNDEF */
4817 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4818 return 1;
4819 }
25f84f79
PM
4820 /* All insns of this form UNDEF for either this condition or the
4821 * superset of cases "Q==1"; we catch the latter later.
4822 */
4823 if (q && ((rd | rn | rm) & 1)) {
4824 return 1;
4825 }
62698be3
PM
4826 if (size == 3 && op != NEON_3R_LOGIC) {
4827 /* 64-bit element instructions. */
9ee6e8bb 4828 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4829 neon_load_reg64(cpu_V0, rn + pass);
4830 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 4831 switch (op) {
62698be3 4832 case NEON_3R_VQADD:
9ee6e8bb 4833 if (u) {
02da0b2d
PM
4834 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4835 cpu_V0, cpu_V1);
2c0262af 4836 } else {
02da0b2d
PM
4837 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4838 cpu_V0, cpu_V1);
2c0262af 4839 }
9ee6e8bb 4840 break;
62698be3 4841 case NEON_3R_VQSUB:
9ee6e8bb 4842 if (u) {
02da0b2d
PM
4843 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4844 cpu_V0, cpu_V1);
ad69471c 4845 } else {
02da0b2d
PM
4846 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4847 cpu_V0, cpu_V1);
ad69471c
PB
4848 }
4849 break;
62698be3 4850 case NEON_3R_VSHL:
ad69471c
PB
4851 if (u) {
4852 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4853 } else {
4854 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4855 }
4856 break;
62698be3 4857 case NEON_3R_VQSHL:
ad69471c 4858 if (u) {
02da0b2d
PM
4859 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4860 cpu_V1, cpu_V0);
ad69471c 4861 } else {
02da0b2d
PM
4862 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4863 cpu_V1, cpu_V0);
ad69471c
PB
4864 }
4865 break;
62698be3 4866 case NEON_3R_VRSHL:
ad69471c
PB
4867 if (u) {
4868 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4869 } else {
ad69471c
PB
4870 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4871 }
4872 break;
62698be3 4873 case NEON_3R_VQRSHL:
ad69471c 4874 if (u) {
02da0b2d
PM
4875 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4876 cpu_V1, cpu_V0);
ad69471c 4877 } else {
02da0b2d
PM
4878 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4879 cpu_V1, cpu_V0);
1e8d4eec 4880 }
9ee6e8bb 4881 break;
62698be3 4882 case NEON_3R_VADD_VSUB:
9ee6e8bb 4883 if (u) {
ad69471c 4884 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4885 } else {
ad69471c 4886 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4887 }
4888 break;
4889 default:
4890 abort();
2c0262af 4891 }
ad69471c 4892 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4893 }
9ee6e8bb 4894 return 0;
2c0262af 4895 }
25f84f79 4896 pairwise = 0;
9ee6e8bb 4897 switch (op) {
62698be3
PM
4898 case NEON_3R_VSHL:
4899 case NEON_3R_VQSHL:
4900 case NEON_3R_VRSHL:
4901 case NEON_3R_VQRSHL:
9ee6e8bb 4902 {
ad69471c
PB
4903 int rtmp;
4904 /* Shift instruction operands are reversed. */
4905 rtmp = rn;
9ee6e8bb 4906 rn = rm;
ad69471c 4907 rm = rtmp;
9ee6e8bb 4908 }
2c0262af 4909 break;
25f84f79
PM
4910 case NEON_3R_VPADD:
4911 if (u) {
4912 return 1;
4913 }
4914 /* Fall through */
62698be3
PM
4915 case NEON_3R_VPMAX:
4916 case NEON_3R_VPMIN:
9ee6e8bb 4917 pairwise = 1;
2c0262af 4918 break;
25f84f79
PM
4919 case NEON_3R_FLOAT_ARITH:
4920 pairwise = (u && size < 2); /* if VPADD (float) */
4921 break;
4922 case NEON_3R_FLOAT_MINMAX:
4923 pairwise = u; /* if VPMIN/VPMAX (float) */
4924 break;
4925 case NEON_3R_FLOAT_CMP:
4926 if (!u && size) {
4927 /* no encoding for U=0 C=1x */
4928 return 1;
4929 }
4930 break;
4931 case NEON_3R_FLOAT_ACMP:
4932 if (!u) {
4933 return 1;
4934 }
4935 break;
505935fc
WN
4936 case NEON_3R_FLOAT_MISC:
4937 /* VMAXNM/VMINNM in ARMv8 */
4938 if (u && !arm_feature(env, ARM_FEATURE_V8)) {
25f84f79
PM
4939 return 1;
4940 }
2c0262af 4941 break;
25f84f79
PM
4942 case NEON_3R_VMUL:
4943 if (u && (size != 0)) {
4944 /* UNDEF on invalid size for polynomial subcase */
4945 return 1;
4946 }
2c0262af 4947 break;
da97f52c
PM
4948 case NEON_3R_VFM:
4949 if (!arm_feature(env, ARM_FEATURE_VFP4) || u) {
4950 return 1;
4951 }
4952 break;
9ee6e8bb 4953 default:
2c0262af 4954 break;
9ee6e8bb 4955 }
dd8fbd78 4956
25f84f79
PM
4957 if (pairwise && q) {
4958 /* All the pairwise insns UNDEF if Q is set */
4959 return 1;
4960 }
4961
9ee6e8bb
PB
4962 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4963
4964 if (pairwise) {
4965 /* Pairwise. */
a5a14945
JR
4966 if (pass < 1) {
4967 tmp = neon_load_reg(rn, 0);
4968 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 4969 } else {
a5a14945
JR
4970 tmp = neon_load_reg(rm, 0);
4971 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
4972 }
4973 } else {
4974 /* Elementwise. */
dd8fbd78
FN
4975 tmp = neon_load_reg(rn, pass);
4976 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4977 }
4978 switch (op) {
62698be3 4979 case NEON_3R_VHADD:
9ee6e8bb
PB
4980 GEN_NEON_INTEGER_OP(hadd);
4981 break;
62698be3 4982 case NEON_3R_VQADD:
02da0b2d 4983 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4984 break;
62698be3 4985 case NEON_3R_VRHADD:
9ee6e8bb 4986 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4987 break;
62698be3 4988 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
4989 switch ((u << 2) | size) {
4990 case 0: /* VAND */
dd8fbd78 4991 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4992 break;
4993 case 1: /* BIC */
f669df27 4994 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4995 break;
4996 case 2: /* VORR */
dd8fbd78 4997 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4998 break;
4999 case 3: /* VORN */
f669df27 5000 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5001 break;
5002 case 4: /* VEOR */
dd8fbd78 5003 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5004 break;
5005 case 5: /* VBSL */
dd8fbd78
FN
5006 tmp3 = neon_load_reg(rd, pass);
5007 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 5008 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5009 break;
5010 case 6: /* VBIT */
dd8fbd78
FN
5011 tmp3 = neon_load_reg(rd, pass);
5012 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 5013 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5014 break;
5015 case 7: /* VBIF */
dd8fbd78
FN
5016 tmp3 = neon_load_reg(rd, pass);
5017 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 5018 tcg_temp_free_i32(tmp3);
9ee6e8bb 5019 break;
2c0262af
FB
5020 }
5021 break;
62698be3 5022 case NEON_3R_VHSUB:
9ee6e8bb
PB
5023 GEN_NEON_INTEGER_OP(hsub);
5024 break;
62698be3 5025 case NEON_3R_VQSUB:
02da0b2d 5026 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 5027 break;
62698be3 5028 case NEON_3R_VCGT:
9ee6e8bb
PB
5029 GEN_NEON_INTEGER_OP(cgt);
5030 break;
62698be3 5031 case NEON_3R_VCGE:
9ee6e8bb
PB
5032 GEN_NEON_INTEGER_OP(cge);
5033 break;
62698be3 5034 case NEON_3R_VSHL:
ad69471c 5035 GEN_NEON_INTEGER_OP(shl);
2c0262af 5036 break;
62698be3 5037 case NEON_3R_VQSHL:
02da0b2d 5038 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 5039 break;
62698be3 5040 case NEON_3R_VRSHL:
ad69471c 5041 GEN_NEON_INTEGER_OP(rshl);
2c0262af 5042 break;
62698be3 5043 case NEON_3R_VQRSHL:
02da0b2d 5044 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 5045 break;
62698be3 5046 case NEON_3R_VMAX:
9ee6e8bb
PB
5047 GEN_NEON_INTEGER_OP(max);
5048 break;
62698be3 5049 case NEON_3R_VMIN:
9ee6e8bb
PB
5050 GEN_NEON_INTEGER_OP(min);
5051 break;
62698be3 5052 case NEON_3R_VABD:
9ee6e8bb
PB
5053 GEN_NEON_INTEGER_OP(abd);
5054 break;
62698be3 5055 case NEON_3R_VABA:
9ee6e8bb 5056 GEN_NEON_INTEGER_OP(abd);
7d1b0095 5057 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
5058 tmp2 = neon_load_reg(rd, pass);
5059 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 5060 break;
62698be3 5061 case NEON_3R_VADD_VSUB:
9ee6e8bb 5062 if (!u) { /* VADD */
62698be3 5063 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5064 } else { /* VSUB */
5065 switch (size) {
dd8fbd78
FN
5066 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
5067 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
5068 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 5069 default: abort();
9ee6e8bb
PB
5070 }
5071 }
5072 break;
62698be3 5073 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
5074 if (!u) { /* VTST */
5075 switch (size) {
dd8fbd78
FN
5076 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
5077 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
5078 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 5079 default: abort();
9ee6e8bb
PB
5080 }
5081 } else { /* VCEQ */
5082 switch (size) {
dd8fbd78
FN
5083 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5084 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5085 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 5086 default: abort();
9ee6e8bb
PB
5087 }
5088 }
5089 break;
62698be3 5090 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 5091 switch (size) {
dd8fbd78
FN
5092 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5093 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5094 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5095 default: abort();
9ee6e8bb 5096 }
7d1b0095 5097 tcg_temp_free_i32(tmp2);
dd8fbd78 5098 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5099 if (u) { /* VMLS */
dd8fbd78 5100 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 5101 } else { /* VMLA */
dd8fbd78 5102 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5103 }
5104 break;
62698be3 5105 case NEON_3R_VMUL:
9ee6e8bb 5106 if (u) { /* polynomial */
dd8fbd78 5107 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
5108 } else { /* Integer */
5109 switch (size) {
dd8fbd78
FN
5110 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5111 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5112 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5113 default: abort();
9ee6e8bb
PB
5114 }
5115 }
5116 break;
62698be3 5117 case NEON_3R_VPMAX:
9ee6e8bb
PB
5118 GEN_NEON_INTEGER_OP(pmax);
5119 break;
62698be3 5120 case NEON_3R_VPMIN:
9ee6e8bb
PB
5121 GEN_NEON_INTEGER_OP(pmin);
5122 break;
62698be3 5123 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
5124 if (!u) { /* VQDMULH */
5125 switch (size) {
02da0b2d
PM
5126 case 1:
5127 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5128 break;
5129 case 2:
5130 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5131 break;
62698be3 5132 default: abort();
9ee6e8bb 5133 }
62698be3 5134 } else { /* VQRDMULH */
9ee6e8bb 5135 switch (size) {
02da0b2d
PM
5136 case 1:
5137 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5138 break;
5139 case 2:
5140 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5141 break;
62698be3 5142 default: abort();
9ee6e8bb
PB
5143 }
5144 }
5145 break;
62698be3 5146 case NEON_3R_VPADD:
9ee6e8bb 5147 switch (size) {
dd8fbd78
FN
5148 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5149 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5150 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 5151 default: abort();
9ee6e8bb
PB
5152 }
5153 break;
62698be3 5154 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
5155 {
5156 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
5157 switch ((u << 2) | size) {
5158 case 0: /* VADD */
aa47cfdd
PM
5159 case 4: /* VPADD */
5160 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5161 break;
5162 case 2: /* VSUB */
aa47cfdd 5163 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5164 break;
5165 case 6: /* VABD */
aa47cfdd 5166 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5167 break;
5168 default:
62698be3 5169 abort();
9ee6e8bb 5170 }
aa47cfdd 5171 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5172 break;
aa47cfdd 5173 }
62698be3 5174 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
5175 {
5176 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5177 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5178 if (!u) {
7d1b0095 5179 tcg_temp_free_i32(tmp2);
dd8fbd78 5180 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5181 if (size == 0) {
aa47cfdd 5182 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5183 } else {
aa47cfdd 5184 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
5185 }
5186 }
aa47cfdd 5187 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5188 break;
aa47cfdd 5189 }
62698be3 5190 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
5191 {
5192 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 5193 if (!u) {
aa47cfdd 5194 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 5195 } else {
aa47cfdd
PM
5196 if (size == 0) {
5197 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5198 } else {
5199 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5200 }
b5ff1b31 5201 }
aa47cfdd 5202 tcg_temp_free_ptr(fpstatus);
2c0262af 5203 break;
aa47cfdd 5204 }
62698be3 5205 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
5206 {
5207 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5208 if (size == 0) {
5209 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5210 } else {
5211 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5212 }
5213 tcg_temp_free_ptr(fpstatus);
2c0262af 5214 break;
aa47cfdd 5215 }
62698be3 5216 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
5217 {
5218 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5219 if (size == 0) {
f71a2ae5 5220 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 5221 } else {
f71a2ae5 5222 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
5223 }
5224 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5225 break;
aa47cfdd 5226 }
505935fc
WN
5227 case NEON_3R_FLOAT_MISC:
5228 if (u) {
5229 /* VMAXNM/VMINNM */
5230 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5231 if (size == 0) {
f71a2ae5 5232 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 5233 } else {
f71a2ae5 5234 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
5235 }
5236 tcg_temp_free_ptr(fpstatus);
5237 } else {
5238 if (size == 0) {
5239 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5240 } else {
5241 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5242 }
5243 }
2c0262af 5244 break;
da97f52c
PM
5245 case NEON_3R_VFM:
5246 {
5247 /* VFMA, VFMS: fused multiply-add */
5248 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5249 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5250 if (size) {
5251 /* VFMS */
5252 gen_helper_vfp_negs(tmp, tmp);
5253 }
5254 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5255 tcg_temp_free_i32(tmp3);
5256 tcg_temp_free_ptr(fpstatus);
5257 break;
5258 }
9ee6e8bb
PB
5259 default:
5260 abort();
2c0262af 5261 }
7d1b0095 5262 tcg_temp_free_i32(tmp2);
dd8fbd78 5263
9ee6e8bb
PB
5264 /* Save the result. For elementwise operations we can put it
5265 straight into the destination register. For pairwise operations
5266 we have to be careful to avoid clobbering the source operands. */
5267 if (pairwise && rd == rm) {
dd8fbd78 5268 neon_store_scratch(pass, tmp);
9ee6e8bb 5269 } else {
dd8fbd78 5270 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5271 }
5272
5273 } /* for pass */
5274 if (pairwise && rd == rm) {
5275 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5276 tmp = neon_load_scratch(pass);
5277 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5278 }
5279 }
ad69471c 5280 /* End of 3 register same size operations. */
9ee6e8bb
PB
5281 } else if (insn & (1 << 4)) {
5282 if ((insn & 0x00380080) != 0) {
5283 /* Two registers and shift. */
5284 op = (insn >> 8) & 0xf;
5285 if (insn & (1 << 7)) {
cc13115b
PM
5286 /* 64-bit shift. */
5287 if (op > 7) {
5288 return 1;
5289 }
9ee6e8bb
PB
5290 size = 3;
5291 } else {
5292 size = 2;
5293 while ((insn & (1 << (size + 19))) == 0)
5294 size--;
5295 }
5296 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 5297 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
5298 by immediate using the variable shift operations. */
5299 if (op < 8) {
5300 /* Shift by immediate:
5301 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
5302 if (q && ((rd | rm) & 1)) {
5303 return 1;
5304 }
5305 if (!u && (op == 4 || op == 6)) {
5306 return 1;
5307 }
9ee6e8bb
PB
5308 /* Right shifts are encoded as N - shift, where N is the
5309 element size in bits. */
5310 if (op <= 4)
5311 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
5312 if (size == 3) {
5313 count = q + 1;
5314 } else {
5315 count = q ? 4: 2;
5316 }
5317 switch (size) {
5318 case 0:
5319 imm = (uint8_t) shift;
5320 imm |= imm << 8;
5321 imm |= imm << 16;
5322 break;
5323 case 1:
5324 imm = (uint16_t) shift;
5325 imm |= imm << 16;
5326 break;
5327 case 2:
5328 case 3:
5329 imm = shift;
5330 break;
5331 default:
5332 abort();
5333 }
5334
5335 for (pass = 0; pass < count; pass++) {
ad69471c
PB
5336 if (size == 3) {
5337 neon_load_reg64(cpu_V0, rm + pass);
5338 tcg_gen_movi_i64(cpu_V1, imm);
5339 switch (op) {
5340 case 0: /* VSHR */
5341 case 1: /* VSRA */
5342 if (u)
5343 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5344 else
ad69471c 5345 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5346 break;
ad69471c
PB
5347 case 2: /* VRSHR */
5348 case 3: /* VRSRA */
5349 if (u)
5350 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5351 else
ad69471c 5352 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5353 break;
ad69471c 5354 case 4: /* VSRI */
ad69471c
PB
5355 case 5: /* VSHL, VSLI */
5356 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5357 break;
0322b26e 5358 case 6: /* VQSHLU */
02da0b2d
PM
5359 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5360 cpu_V0, cpu_V1);
ad69471c 5361 break;
0322b26e
PM
5362 case 7: /* VQSHL */
5363 if (u) {
02da0b2d 5364 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
5365 cpu_V0, cpu_V1);
5366 } else {
02da0b2d 5367 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
5368 cpu_V0, cpu_V1);
5369 }
9ee6e8bb 5370 break;
9ee6e8bb 5371 }
ad69471c
PB
5372 if (op == 1 || op == 3) {
5373 /* Accumulate. */
5371cb81 5374 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
5375 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5376 } else if (op == 4 || (op == 5 && u)) {
5377 /* Insert */
923e6509
CL
5378 neon_load_reg64(cpu_V1, rd + pass);
5379 uint64_t mask;
5380 if (shift < -63 || shift > 63) {
5381 mask = 0;
5382 } else {
5383 if (op == 4) {
5384 mask = 0xffffffffffffffffull >> -shift;
5385 } else {
5386 mask = 0xffffffffffffffffull << shift;
5387 }
5388 }
5389 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5390 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5391 }
5392 neon_store_reg64(cpu_V0, rd + pass);
5393 } else { /* size < 3 */
5394 /* Operands in T0 and T1. */
dd8fbd78 5395 tmp = neon_load_reg(rm, pass);
7d1b0095 5396 tmp2 = tcg_temp_new_i32();
dd8fbd78 5397 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
5398 switch (op) {
5399 case 0: /* VSHR */
5400 case 1: /* VSRA */
5401 GEN_NEON_INTEGER_OP(shl);
5402 break;
5403 case 2: /* VRSHR */
5404 case 3: /* VRSRA */
5405 GEN_NEON_INTEGER_OP(rshl);
5406 break;
5407 case 4: /* VSRI */
ad69471c
PB
5408 case 5: /* VSHL, VSLI */
5409 switch (size) {
dd8fbd78
FN
5410 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5411 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5412 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 5413 default: abort();
ad69471c
PB
5414 }
5415 break;
0322b26e 5416 case 6: /* VQSHLU */
ad69471c 5417 switch (size) {
0322b26e 5418 case 0:
02da0b2d
PM
5419 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5420 tmp, tmp2);
0322b26e
PM
5421 break;
5422 case 1:
02da0b2d
PM
5423 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5424 tmp, tmp2);
0322b26e
PM
5425 break;
5426 case 2:
02da0b2d
PM
5427 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5428 tmp, tmp2);
0322b26e
PM
5429 break;
5430 default:
cc13115b 5431 abort();
ad69471c
PB
5432 }
5433 break;
0322b26e 5434 case 7: /* VQSHL */
02da0b2d 5435 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 5436 break;
ad69471c 5437 }
7d1b0095 5438 tcg_temp_free_i32(tmp2);
ad69471c
PB
5439
5440 if (op == 1 || op == 3) {
5441 /* Accumulate. */
dd8fbd78 5442 tmp2 = neon_load_reg(rd, pass);
5371cb81 5443 gen_neon_add(size, tmp, tmp2);
7d1b0095 5444 tcg_temp_free_i32(tmp2);
ad69471c
PB
5445 } else if (op == 4 || (op == 5 && u)) {
5446 /* Insert */
5447 switch (size) {
5448 case 0:
5449 if (op == 4)
ca9a32e4 5450 mask = 0xff >> -shift;
ad69471c 5451 else
ca9a32e4
JR
5452 mask = (uint8_t)(0xff << shift);
5453 mask |= mask << 8;
5454 mask |= mask << 16;
ad69471c
PB
5455 break;
5456 case 1:
5457 if (op == 4)
ca9a32e4 5458 mask = 0xffff >> -shift;
ad69471c 5459 else
ca9a32e4
JR
5460 mask = (uint16_t)(0xffff << shift);
5461 mask |= mask << 16;
ad69471c
PB
5462 break;
5463 case 2:
ca9a32e4
JR
5464 if (shift < -31 || shift > 31) {
5465 mask = 0;
5466 } else {
5467 if (op == 4)
5468 mask = 0xffffffffu >> -shift;
5469 else
5470 mask = 0xffffffffu << shift;
5471 }
ad69471c
PB
5472 break;
5473 default:
5474 abort();
5475 }
dd8fbd78 5476 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
5477 tcg_gen_andi_i32(tmp, tmp, mask);
5478 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 5479 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 5480 tcg_temp_free_i32(tmp2);
ad69471c 5481 }
dd8fbd78 5482 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5483 }
5484 } /* for pass */
5485 } else if (op < 10) {
ad69471c 5486 /* Shift by immediate and narrow:
9ee6e8bb 5487 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5488 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5489 if (rm & 1) {
5490 return 1;
5491 }
9ee6e8bb
PB
5492 shift = shift - (1 << (size + 3));
5493 size++;
92cdfaeb 5494 if (size == 3) {
a7812ae4 5495 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5496 neon_load_reg64(cpu_V0, rm);
5497 neon_load_reg64(cpu_V1, rm + 1);
5498 for (pass = 0; pass < 2; pass++) {
5499 TCGv_i64 in;
5500 if (pass == 0) {
5501 in = cpu_V0;
5502 } else {
5503 in = cpu_V1;
5504 }
ad69471c 5505 if (q) {
0b36f4cd 5506 if (input_unsigned) {
92cdfaeb 5507 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5508 } else {
92cdfaeb 5509 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5510 }
ad69471c 5511 } else {
0b36f4cd 5512 if (input_unsigned) {
92cdfaeb 5513 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5514 } else {
92cdfaeb 5515 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5516 }
ad69471c 5517 }
7d1b0095 5518 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5519 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5520 neon_store_reg(rd, pass, tmp);
5521 } /* for pass */
5522 tcg_temp_free_i64(tmp64);
5523 } else {
5524 if (size == 1) {
5525 imm = (uint16_t)shift;
5526 imm |= imm << 16;
2c0262af 5527 } else {
92cdfaeb
PM
5528 /* size == 2 */
5529 imm = (uint32_t)shift;
5530 }
5531 tmp2 = tcg_const_i32(imm);
5532 tmp4 = neon_load_reg(rm + 1, 0);
5533 tmp5 = neon_load_reg(rm + 1, 1);
5534 for (pass = 0; pass < 2; pass++) {
5535 if (pass == 0) {
5536 tmp = neon_load_reg(rm, 0);
5537 } else {
5538 tmp = tmp4;
5539 }
0b36f4cd
CL
5540 gen_neon_shift_narrow(size, tmp, tmp2, q,
5541 input_unsigned);
92cdfaeb
PM
5542 if (pass == 0) {
5543 tmp3 = neon_load_reg(rm, 1);
5544 } else {
5545 tmp3 = tmp5;
5546 }
0b36f4cd
CL
5547 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5548 input_unsigned);
36aa55dc 5549 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5550 tcg_temp_free_i32(tmp);
5551 tcg_temp_free_i32(tmp3);
5552 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5553 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5554 neon_store_reg(rd, pass, tmp);
5555 } /* for pass */
c6067f04 5556 tcg_temp_free_i32(tmp2);
b75263d6 5557 }
9ee6e8bb 5558 } else if (op == 10) {
cc13115b
PM
5559 /* VSHLL, VMOVL */
5560 if (q || (rd & 1)) {
9ee6e8bb 5561 return 1;
cc13115b 5562 }
ad69471c
PB
5563 tmp = neon_load_reg(rm, 0);
5564 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5565 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5566 if (pass == 1)
5567 tmp = tmp2;
5568
5569 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5570
9ee6e8bb
PB
5571 if (shift != 0) {
5572 /* The shift is less than the width of the source
ad69471c
PB
5573 type, so we can just shift the whole register. */
5574 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5575 /* Widen the result of shift: we need to clear
5576 * the potential overflow bits resulting from
5577 * left bits of the narrow input appearing as
5578 * right bits of left the neighbour narrow
5579 * input. */
ad69471c
PB
5580 if (size < 2 || !u) {
5581 uint64_t imm64;
5582 if (size == 0) {
5583 imm = (0xffu >> (8 - shift));
5584 imm |= imm << 16;
acdf01ef 5585 } else if (size == 1) {
ad69471c 5586 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5587 } else {
5588 /* size == 2 */
5589 imm = 0xffffffff >> (32 - shift);
5590 }
5591 if (size < 2) {
5592 imm64 = imm | (((uint64_t)imm) << 32);
5593 } else {
5594 imm64 = imm;
9ee6e8bb 5595 }
acdf01ef 5596 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5597 }
5598 }
ad69471c 5599 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5600 }
f73534a5 5601 } else if (op >= 14) {
9ee6e8bb 5602 /* VCVT fixed-point. */
cc13115b
PM
5603 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5604 return 1;
5605 }
f73534a5
PM
5606 /* We have already masked out the must-be-1 top bit of imm6,
5607 * hence this 32-shift where the ARM ARM has 64-imm6.
5608 */
5609 shift = 32 - shift;
9ee6e8bb 5610 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 5611 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 5612 if (!(op & 1)) {
9ee6e8bb 5613 if (u)
5500b06c 5614 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 5615 else
5500b06c 5616 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
5617 } else {
5618 if (u)
5500b06c 5619 gen_vfp_toul(0, shift, 1);
9ee6e8bb 5620 else
5500b06c 5621 gen_vfp_tosl(0, shift, 1);
2c0262af 5622 }
4373f3ce 5623 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
5624 }
5625 } else {
9ee6e8bb
PB
5626 return 1;
5627 }
5628 } else { /* (insn & 0x00380080) == 0 */
5629 int invert;
7d80fee5
PM
5630 if (q && (rd & 1)) {
5631 return 1;
5632 }
9ee6e8bb
PB
5633
5634 op = (insn >> 8) & 0xf;
5635 /* One register and immediate. */
5636 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5637 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
5638 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5639 * We choose to not special-case this and will behave as if a
5640 * valid constant encoding of 0 had been given.
5641 */
9ee6e8bb
PB
5642 switch (op) {
5643 case 0: case 1:
5644 /* no-op */
5645 break;
5646 case 2: case 3:
5647 imm <<= 8;
5648 break;
5649 case 4: case 5:
5650 imm <<= 16;
5651 break;
5652 case 6: case 7:
5653 imm <<= 24;
5654 break;
5655 case 8: case 9:
5656 imm |= imm << 16;
5657 break;
5658 case 10: case 11:
5659 imm = (imm << 8) | (imm << 24);
5660 break;
5661 case 12:
8e31209e 5662 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
5663 break;
5664 case 13:
5665 imm = (imm << 16) | 0xffff;
5666 break;
5667 case 14:
5668 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5669 if (invert)
5670 imm = ~imm;
5671 break;
5672 case 15:
7d80fee5
PM
5673 if (invert) {
5674 return 1;
5675 }
9ee6e8bb
PB
5676 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5677 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5678 break;
5679 }
5680 if (invert)
5681 imm = ~imm;
5682
9ee6e8bb
PB
5683 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5684 if (op & 1 && op < 12) {
ad69471c 5685 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
5686 if (invert) {
5687 /* The immediate value has already been inverted, so
5688 BIC becomes AND. */
ad69471c 5689 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 5690 } else {
ad69471c 5691 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 5692 }
9ee6e8bb 5693 } else {
ad69471c 5694 /* VMOV, VMVN. */
7d1b0095 5695 tmp = tcg_temp_new_i32();
9ee6e8bb 5696 if (op == 14 && invert) {
a5a14945 5697 int n;
ad69471c
PB
5698 uint32_t val;
5699 val = 0;
9ee6e8bb
PB
5700 for (n = 0; n < 4; n++) {
5701 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 5702 val |= 0xff << (n * 8);
9ee6e8bb 5703 }
ad69471c
PB
5704 tcg_gen_movi_i32(tmp, val);
5705 } else {
5706 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 5707 }
9ee6e8bb 5708 }
ad69471c 5709 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5710 }
5711 }
e4b3861d 5712 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
5713 if (size != 3) {
5714 op = (insn >> 8) & 0xf;
5715 if ((insn & (1 << 6)) == 0) {
5716 /* Three registers of different lengths. */
5717 int src1_wide;
5718 int src2_wide;
5719 int prewiden;
695272dc
PM
5720 /* undefreq: bit 0 : UNDEF if size != 0
5721 * bit 1 : UNDEF if size == 0
5722 * bit 2 : UNDEF if U == 1
5723 * Note that [1:0] set implies 'always UNDEF'
5724 */
5725 int undefreq;
5726 /* prewiden, src1_wide, src2_wide, undefreq */
5727 static const int neon_3reg_wide[16][4] = {
5728 {1, 0, 0, 0}, /* VADDL */
5729 {1, 1, 0, 0}, /* VADDW */
5730 {1, 0, 0, 0}, /* VSUBL */
5731 {1, 1, 0, 0}, /* VSUBW */
5732 {0, 1, 1, 0}, /* VADDHN */
5733 {0, 0, 0, 0}, /* VABAL */
5734 {0, 1, 1, 0}, /* VSUBHN */
5735 {0, 0, 0, 0}, /* VABDL */
5736 {0, 0, 0, 0}, /* VMLAL */
5737 {0, 0, 0, 6}, /* VQDMLAL */
5738 {0, 0, 0, 0}, /* VMLSL */
5739 {0, 0, 0, 6}, /* VQDMLSL */
5740 {0, 0, 0, 0}, /* Integer VMULL */
5741 {0, 0, 0, 2}, /* VQDMULL */
5742 {0, 0, 0, 5}, /* Polynomial VMULL */
5743 {0, 0, 0, 3}, /* Reserved: always UNDEF */
9ee6e8bb
PB
5744 };
5745
5746 prewiden = neon_3reg_wide[op][0];
5747 src1_wide = neon_3reg_wide[op][1];
5748 src2_wide = neon_3reg_wide[op][2];
695272dc 5749 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 5750
695272dc
PM
5751 if (((undefreq & 1) && (size != 0)) ||
5752 ((undefreq & 2) && (size == 0)) ||
5753 ((undefreq & 4) && u)) {
5754 return 1;
5755 }
5756 if ((src1_wide && (rn & 1)) ||
5757 (src2_wide && (rm & 1)) ||
5758 (!src2_wide && (rd & 1))) {
ad69471c 5759 return 1;
695272dc 5760 }
ad69471c 5761
9ee6e8bb
PB
5762 /* Avoid overlapping operands. Wide source operands are
5763 always aligned so will never overlap with wide
5764 destinations in problematic ways. */
8f8e3aa4 5765 if (rd == rm && !src2_wide) {
dd8fbd78
FN
5766 tmp = neon_load_reg(rm, 1);
5767 neon_store_scratch(2, tmp);
8f8e3aa4 5768 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
5769 tmp = neon_load_reg(rn, 1);
5770 neon_store_scratch(2, tmp);
9ee6e8bb 5771 }
39d5492a 5772 TCGV_UNUSED_I32(tmp3);
9ee6e8bb 5773 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5774 if (src1_wide) {
5775 neon_load_reg64(cpu_V0, rn + pass);
39d5492a 5776 TCGV_UNUSED_I32(tmp);
9ee6e8bb 5777 } else {
ad69471c 5778 if (pass == 1 && rd == rn) {
dd8fbd78 5779 tmp = neon_load_scratch(2);
9ee6e8bb 5780 } else {
ad69471c
PB
5781 tmp = neon_load_reg(rn, pass);
5782 }
5783 if (prewiden) {
5784 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5785 }
5786 }
ad69471c
PB
5787 if (src2_wide) {
5788 neon_load_reg64(cpu_V1, rm + pass);
39d5492a 5789 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 5790 } else {
ad69471c 5791 if (pass == 1 && rd == rm) {
dd8fbd78 5792 tmp2 = neon_load_scratch(2);
9ee6e8bb 5793 } else {
ad69471c
PB
5794 tmp2 = neon_load_reg(rm, pass);
5795 }
5796 if (prewiden) {
5797 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5798 }
9ee6e8bb
PB
5799 }
5800 switch (op) {
5801 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5802 gen_neon_addl(size);
9ee6e8bb 5803 break;
79b0e534 5804 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5805 gen_neon_subl(size);
9ee6e8bb
PB
5806 break;
5807 case 5: case 7: /* VABAL, VABDL */
5808 switch ((size << 1) | u) {
ad69471c
PB
5809 case 0:
5810 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5811 break;
5812 case 1:
5813 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5814 break;
5815 case 2:
5816 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5817 break;
5818 case 3:
5819 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5820 break;
5821 case 4:
5822 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5823 break;
5824 case 5:
5825 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5826 break;
9ee6e8bb
PB
5827 default: abort();
5828 }
7d1b0095
PM
5829 tcg_temp_free_i32(tmp2);
5830 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5831 break;
5832 case 8: case 9: case 10: case 11: case 12: case 13:
5833 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5834 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
5835 break;
5836 case 14: /* Polynomial VMULL */
e5ca24cb 5837 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
5838 tcg_temp_free_i32(tmp2);
5839 tcg_temp_free_i32(tmp);
e5ca24cb 5840 break;
695272dc
PM
5841 default: /* 15 is RESERVED: caught earlier */
5842 abort();
9ee6e8bb 5843 }
ebcd88ce
PM
5844 if (op == 13) {
5845 /* VQDMULL */
5846 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5847 neon_store_reg64(cpu_V0, rd + pass);
5848 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 5849 /* Accumulate. */
ebcd88ce 5850 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5851 switch (op) {
4dc064e6
PM
5852 case 10: /* VMLSL */
5853 gen_neon_negl(cpu_V0, size);
5854 /* Fall through */
5855 case 5: case 8: /* VABAL, VMLAL */
ad69471c 5856 gen_neon_addl(size);
9ee6e8bb
PB
5857 break;
5858 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 5859 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5860 if (op == 11) {
5861 gen_neon_negl(cpu_V0, size);
5862 }
ad69471c
PB
5863 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5864 break;
9ee6e8bb
PB
5865 default:
5866 abort();
5867 }
ad69471c 5868 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5869 } else if (op == 4 || op == 6) {
5870 /* Narrowing operation. */
7d1b0095 5871 tmp = tcg_temp_new_i32();
79b0e534 5872 if (!u) {
9ee6e8bb 5873 switch (size) {
ad69471c
PB
5874 case 0:
5875 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5876 break;
5877 case 1:
5878 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5879 break;
5880 case 2:
5881 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5882 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5883 break;
9ee6e8bb
PB
5884 default: abort();
5885 }
5886 } else {
5887 switch (size) {
ad69471c
PB
5888 case 0:
5889 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5890 break;
5891 case 1:
5892 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5893 break;
5894 case 2:
5895 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5896 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5897 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5898 break;
9ee6e8bb
PB
5899 default: abort();
5900 }
5901 }
ad69471c
PB
5902 if (pass == 0) {
5903 tmp3 = tmp;
5904 } else {
5905 neon_store_reg(rd, 0, tmp3);
5906 neon_store_reg(rd, 1, tmp);
5907 }
9ee6e8bb
PB
5908 } else {
5909 /* Write back the result. */
ad69471c 5910 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5911 }
5912 }
5913 } else {
3e3326df
PM
5914 /* Two registers and a scalar. NB that for ops of this form
5915 * the ARM ARM labels bit 24 as Q, but it is in our variable
5916 * 'u', not 'q'.
5917 */
5918 if (size == 0) {
5919 return 1;
5920 }
9ee6e8bb 5921 switch (op) {
9ee6e8bb 5922 case 1: /* Float VMLA scalar */
9ee6e8bb 5923 case 5: /* Floating point VMLS scalar */
9ee6e8bb 5924 case 9: /* Floating point VMUL scalar */
3e3326df
PM
5925 if (size == 1) {
5926 return 1;
5927 }
5928 /* fall through */
5929 case 0: /* Integer VMLA scalar */
5930 case 4: /* Integer VMLS scalar */
5931 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
5932 case 12: /* VQDMULH scalar */
5933 case 13: /* VQRDMULH scalar */
3e3326df
PM
5934 if (u && ((rd | rn) & 1)) {
5935 return 1;
5936 }
dd8fbd78
FN
5937 tmp = neon_get_scalar(size, rm);
5938 neon_store_scratch(0, tmp);
9ee6e8bb 5939 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5940 tmp = neon_load_scratch(0);
5941 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5942 if (op == 12) {
5943 if (size == 1) {
02da0b2d 5944 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5945 } else {
02da0b2d 5946 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5947 }
5948 } else if (op == 13) {
5949 if (size == 1) {
02da0b2d 5950 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5951 } else {
02da0b2d 5952 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5953 }
5954 } else if (op & 1) {
aa47cfdd
PM
5955 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5956 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5957 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
5958 } else {
5959 switch (size) {
dd8fbd78
FN
5960 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5961 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5962 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 5963 default: abort();
9ee6e8bb
PB
5964 }
5965 }
7d1b0095 5966 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
5967 if (op < 8) {
5968 /* Accumulate. */
dd8fbd78 5969 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5970 switch (op) {
5971 case 0:
dd8fbd78 5972 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5973 break;
5974 case 1:
aa47cfdd
PM
5975 {
5976 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5977 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5978 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5979 break;
aa47cfdd 5980 }
9ee6e8bb 5981 case 4:
dd8fbd78 5982 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5983 break;
5984 case 5:
aa47cfdd
PM
5985 {
5986 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5987 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5988 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5989 break;
aa47cfdd 5990 }
9ee6e8bb
PB
5991 default:
5992 abort();
5993 }
7d1b0095 5994 tcg_temp_free_i32(tmp2);
9ee6e8bb 5995 }
dd8fbd78 5996 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5997 }
5998 break;
9ee6e8bb 5999 case 3: /* VQDMLAL scalar */
9ee6e8bb 6000 case 7: /* VQDMLSL scalar */
9ee6e8bb 6001 case 11: /* VQDMULL scalar */
3e3326df 6002 if (u == 1) {
ad69471c 6003 return 1;
3e3326df
PM
6004 }
6005 /* fall through */
6006 case 2: /* VMLAL sclar */
6007 case 6: /* VMLSL scalar */
6008 case 10: /* VMULL scalar */
6009 if (rd & 1) {
6010 return 1;
6011 }
dd8fbd78 6012 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
6013 /* We need a copy of tmp2 because gen_neon_mull
6014 * deletes it during pass 0. */
7d1b0095 6015 tmp4 = tcg_temp_new_i32();
c6067f04 6016 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 6017 tmp3 = neon_load_reg(rn, 1);
ad69471c 6018
9ee6e8bb 6019 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6020 if (pass == 0) {
6021 tmp = neon_load_reg(rn, 0);
9ee6e8bb 6022 } else {
dd8fbd78 6023 tmp = tmp3;
c6067f04 6024 tmp2 = tmp4;
9ee6e8bb 6025 }
ad69471c 6026 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
6027 if (op != 11) {
6028 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6029 }
9ee6e8bb 6030 switch (op) {
4dc064e6
PM
6031 case 6:
6032 gen_neon_negl(cpu_V0, size);
6033 /* Fall through */
6034 case 2:
ad69471c 6035 gen_neon_addl(size);
9ee6e8bb
PB
6036 break;
6037 case 3: case 7:
ad69471c 6038 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6039 if (op == 7) {
6040 gen_neon_negl(cpu_V0, size);
6041 }
ad69471c 6042 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
6043 break;
6044 case 10:
6045 /* no-op */
6046 break;
6047 case 11:
ad69471c 6048 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
6049 break;
6050 default:
6051 abort();
6052 }
ad69471c 6053 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6054 }
dd8fbd78 6055
dd8fbd78 6056
9ee6e8bb
PB
6057 break;
6058 default: /* 14 and 15 are RESERVED */
6059 return 1;
6060 }
6061 }
6062 } else { /* size == 3 */
6063 if (!u) {
6064 /* Extract. */
9ee6e8bb 6065 imm = (insn >> 8) & 0xf;
ad69471c
PB
6066
6067 if (imm > 7 && !q)
6068 return 1;
6069
52579ea1
PM
6070 if (q && ((rd | rn | rm) & 1)) {
6071 return 1;
6072 }
6073
ad69471c
PB
6074 if (imm == 0) {
6075 neon_load_reg64(cpu_V0, rn);
6076 if (q) {
6077 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 6078 }
ad69471c
PB
6079 } else if (imm == 8) {
6080 neon_load_reg64(cpu_V0, rn + 1);
6081 if (q) {
6082 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6083 }
ad69471c 6084 } else if (q) {
a7812ae4 6085 tmp64 = tcg_temp_new_i64();
ad69471c
PB
6086 if (imm < 8) {
6087 neon_load_reg64(cpu_V0, rn);
a7812ae4 6088 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
6089 } else {
6090 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 6091 neon_load_reg64(tmp64, rm);
ad69471c
PB
6092 }
6093 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 6094 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
6095 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6096 if (imm < 8) {
6097 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6098 } else {
ad69471c
PB
6099 neon_load_reg64(cpu_V1, rm + 1);
6100 imm -= 8;
9ee6e8bb 6101 }
ad69471c 6102 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
6103 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6104 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 6105 tcg_temp_free_i64(tmp64);
ad69471c 6106 } else {
a7812ae4 6107 /* BUGFIX */
ad69471c 6108 neon_load_reg64(cpu_V0, rn);
a7812ae4 6109 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 6110 neon_load_reg64(cpu_V1, rm);
a7812ae4 6111 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
6112 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6113 }
6114 neon_store_reg64(cpu_V0, rd);
6115 if (q) {
6116 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
6117 }
6118 } else if ((insn & (1 << 11)) == 0) {
6119 /* Two register misc. */
6120 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6121 size = (insn >> 18) & 3;
600b828c
PM
6122 /* UNDEF for unknown op values and bad op-size combinations */
6123 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6124 return 1;
6125 }
fc2a9b37
PM
6126 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6127 q && ((rm | rd) & 1)) {
6128 return 1;
6129 }
9ee6e8bb 6130 switch (op) {
600b828c 6131 case NEON_2RM_VREV64:
9ee6e8bb 6132 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
6133 tmp = neon_load_reg(rm, pass * 2);
6134 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 6135 switch (size) {
dd8fbd78
FN
6136 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6137 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
6138 case 2: /* no-op */ break;
6139 default: abort();
6140 }
dd8fbd78 6141 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 6142 if (size == 2) {
dd8fbd78 6143 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 6144 } else {
9ee6e8bb 6145 switch (size) {
dd8fbd78
FN
6146 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6147 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
6148 default: abort();
6149 }
dd8fbd78 6150 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
6151 }
6152 }
6153 break;
600b828c
PM
6154 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6155 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
6156 for (pass = 0; pass < q + 1; pass++) {
6157 tmp = neon_load_reg(rm, pass * 2);
6158 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6159 tmp = neon_load_reg(rm, pass * 2 + 1);
6160 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6161 switch (size) {
6162 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6163 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6164 case 2: tcg_gen_add_i64(CPU_V001); break;
6165 default: abort();
6166 }
600b828c 6167 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 6168 /* Accumulate. */
ad69471c
PB
6169 neon_load_reg64(cpu_V1, rd + pass);
6170 gen_neon_addl(size);
9ee6e8bb 6171 }
ad69471c 6172 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6173 }
6174 break;
600b828c 6175 case NEON_2RM_VTRN:
9ee6e8bb 6176 if (size == 2) {
a5a14945 6177 int n;
9ee6e8bb 6178 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
6179 tmp = neon_load_reg(rm, n);
6180 tmp2 = neon_load_reg(rd, n + 1);
6181 neon_store_reg(rm, n, tmp2);
6182 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
6183 }
6184 } else {
6185 goto elementwise;
6186 }
6187 break;
600b828c 6188 case NEON_2RM_VUZP:
02acedf9 6189 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 6190 return 1;
9ee6e8bb
PB
6191 }
6192 break;
600b828c 6193 case NEON_2RM_VZIP:
d68a6f3a 6194 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 6195 return 1;
9ee6e8bb
PB
6196 }
6197 break;
600b828c
PM
6198 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6199 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
6200 if (rm & 1) {
6201 return 1;
6202 }
39d5492a 6203 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 6204 for (pass = 0; pass < 2; pass++) {
ad69471c 6205 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 6206 tmp = tcg_temp_new_i32();
600b828c
PM
6207 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6208 tmp, cpu_V0);
ad69471c
PB
6209 if (pass == 0) {
6210 tmp2 = tmp;
6211 } else {
6212 neon_store_reg(rd, 0, tmp2);
6213 neon_store_reg(rd, 1, tmp);
9ee6e8bb 6214 }
9ee6e8bb
PB
6215 }
6216 break;
600b828c 6217 case NEON_2RM_VSHLL:
fc2a9b37 6218 if (q || (rd & 1)) {
9ee6e8bb 6219 return 1;
600b828c 6220 }
ad69471c
PB
6221 tmp = neon_load_reg(rm, 0);
6222 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6223 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6224 if (pass == 1)
6225 tmp = tmp2;
6226 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 6227 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 6228 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6229 }
6230 break;
600b828c 6231 case NEON_2RM_VCVT_F16_F32:
fc2a9b37
PM
6232 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
6233 q || (rm & 1)) {
6234 return 1;
6235 }
7d1b0095
PM
6236 tmp = tcg_temp_new_i32();
6237 tmp2 = tcg_temp_new_i32();
60011498 6238 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 6239 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 6240 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 6241 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6242 tcg_gen_shli_i32(tmp2, tmp2, 16);
6243 tcg_gen_or_i32(tmp2, tmp2, tmp);
6244 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 6245 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
6246 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
6247 neon_store_reg(rd, 0, tmp2);
7d1b0095 6248 tmp2 = tcg_temp_new_i32();
2d981da7 6249 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6250 tcg_gen_shli_i32(tmp2, tmp2, 16);
6251 tcg_gen_or_i32(tmp2, tmp2, tmp);
6252 neon_store_reg(rd, 1, tmp2);
7d1b0095 6253 tcg_temp_free_i32(tmp);
60011498 6254 break;
600b828c 6255 case NEON_2RM_VCVT_F32_F16:
fc2a9b37
PM
6256 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
6257 q || (rd & 1)) {
6258 return 1;
6259 }
7d1b0095 6260 tmp3 = tcg_temp_new_i32();
60011498
PB
6261 tmp = neon_load_reg(rm, 0);
6262 tmp2 = neon_load_reg(rm, 1);
6263 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 6264 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6265 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
6266 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 6267 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6268 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 6269 tcg_temp_free_i32(tmp);
60011498 6270 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 6271 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6272 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
6273 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 6274 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6275 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
6276 tcg_temp_free_i32(tmp2);
6277 tcg_temp_free_i32(tmp3);
60011498 6278 break;
9d935509
AB
6279 case NEON_2RM_AESE: case NEON_2RM_AESMC:
6280 if (!arm_feature(env, ARM_FEATURE_V8_AES)
6281 || ((rm | rd) & 1)) {
6282 return 1;
6283 }
6284 tmp = tcg_const_i32(rd);
6285 tmp2 = tcg_const_i32(rm);
6286
6287 /* Bit 6 is the lowest opcode bit; it distinguishes between
6288 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6289 */
6290 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
6291
6292 if (op == NEON_2RM_AESE) {
6293 gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
6294 } else {
6295 gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
6296 }
6297 tcg_temp_free_i32(tmp);
6298 tcg_temp_free_i32(tmp2);
6299 tcg_temp_free_i32(tmp3);
6300 break;
9ee6e8bb
PB
6301 default:
6302 elementwise:
6303 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 6304 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6305 tcg_gen_ld_f32(cpu_F0s, cpu_env,
6306 neon_reg_offset(rm, pass));
39d5492a 6307 TCGV_UNUSED_I32(tmp);
9ee6e8bb 6308 } else {
dd8fbd78 6309 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
6310 }
6311 switch (op) {
600b828c 6312 case NEON_2RM_VREV32:
9ee6e8bb 6313 switch (size) {
dd8fbd78
FN
6314 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6315 case 1: gen_swap_half(tmp); break;
600b828c 6316 default: abort();
9ee6e8bb
PB
6317 }
6318 break;
600b828c 6319 case NEON_2RM_VREV16:
dd8fbd78 6320 gen_rev16(tmp);
9ee6e8bb 6321 break;
600b828c 6322 case NEON_2RM_VCLS:
9ee6e8bb 6323 switch (size) {
dd8fbd78
FN
6324 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6325 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6326 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 6327 default: abort();
9ee6e8bb
PB
6328 }
6329 break;
600b828c 6330 case NEON_2RM_VCLZ:
9ee6e8bb 6331 switch (size) {
dd8fbd78
FN
6332 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6333 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6334 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 6335 default: abort();
9ee6e8bb
PB
6336 }
6337 break;
600b828c 6338 case NEON_2RM_VCNT:
dd8fbd78 6339 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 6340 break;
600b828c 6341 case NEON_2RM_VMVN:
dd8fbd78 6342 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 6343 break;
600b828c 6344 case NEON_2RM_VQABS:
9ee6e8bb 6345 switch (size) {
02da0b2d
PM
6346 case 0:
6347 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6348 break;
6349 case 1:
6350 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6351 break;
6352 case 2:
6353 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6354 break;
600b828c 6355 default: abort();
9ee6e8bb
PB
6356 }
6357 break;
600b828c 6358 case NEON_2RM_VQNEG:
9ee6e8bb 6359 switch (size) {
02da0b2d
PM
6360 case 0:
6361 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6362 break;
6363 case 1:
6364 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6365 break;
6366 case 2:
6367 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6368 break;
600b828c 6369 default: abort();
9ee6e8bb
PB
6370 }
6371 break;
600b828c 6372 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 6373 tmp2 = tcg_const_i32(0);
9ee6e8bb 6374 switch(size) {
dd8fbd78
FN
6375 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6376 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6377 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 6378 default: abort();
9ee6e8bb 6379 }
39d5492a 6380 tcg_temp_free_i32(tmp2);
600b828c 6381 if (op == NEON_2RM_VCLE0) {
dd8fbd78 6382 tcg_gen_not_i32(tmp, tmp);
600b828c 6383 }
9ee6e8bb 6384 break;
600b828c 6385 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 6386 tmp2 = tcg_const_i32(0);
9ee6e8bb 6387 switch(size) {
dd8fbd78
FN
6388 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6389 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6390 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 6391 default: abort();
9ee6e8bb 6392 }
39d5492a 6393 tcg_temp_free_i32(tmp2);
600b828c 6394 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6395 tcg_gen_not_i32(tmp, tmp);
600b828c 6396 }
9ee6e8bb 6397 break;
600b828c 6398 case NEON_2RM_VCEQ0:
dd8fbd78 6399 tmp2 = tcg_const_i32(0);
9ee6e8bb 6400 switch(size) {
dd8fbd78
FN
6401 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6402 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6403 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6404 default: abort();
9ee6e8bb 6405 }
39d5492a 6406 tcg_temp_free_i32(tmp2);
9ee6e8bb 6407 break;
600b828c 6408 case NEON_2RM_VABS:
9ee6e8bb 6409 switch(size) {
dd8fbd78
FN
6410 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6411 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6412 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 6413 default: abort();
9ee6e8bb
PB
6414 }
6415 break;
600b828c 6416 case NEON_2RM_VNEG:
dd8fbd78
FN
6417 tmp2 = tcg_const_i32(0);
6418 gen_neon_rsb(size, tmp, tmp2);
39d5492a 6419 tcg_temp_free_i32(tmp2);
9ee6e8bb 6420 break;
600b828c 6421 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
6422 {
6423 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6424 tmp2 = tcg_const_i32(0);
aa47cfdd 6425 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6426 tcg_temp_free_i32(tmp2);
aa47cfdd 6427 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6428 break;
aa47cfdd 6429 }
600b828c 6430 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
6431 {
6432 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6433 tmp2 = tcg_const_i32(0);
aa47cfdd 6434 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6435 tcg_temp_free_i32(tmp2);
aa47cfdd 6436 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6437 break;
aa47cfdd 6438 }
600b828c 6439 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
6440 {
6441 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6442 tmp2 = tcg_const_i32(0);
aa47cfdd 6443 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6444 tcg_temp_free_i32(tmp2);
aa47cfdd 6445 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6446 break;
aa47cfdd 6447 }
600b828c 6448 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
6449 {
6450 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6451 tmp2 = tcg_const_i32(0);
aa47cfdd 6452 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6453 tcg_temp_free_i32(tmp2);
aa47cfdd 6454 tcg_temp_free_ptr(fpstatus);
0e326109 6455 break;
aa47cfdd 6456 }
600b828c 6457 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
6458 {
6459 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6460 tmp2 = tcg_const_i32(0);
aa47cfdd 6461 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6462 tcg_temp_free_i32(tmp2);
aa47cfdd 6463 tcg_temp_free_ptr(fpstatus);
0e326109 6464 break;
aa47cfdd 6465 }
600b828c 6466 case NEON_2RM_VABS_F:
4373f3ce 6467 gen_vfp_abs(0);
9ee6e8bb 6468 break;
600b828c 6469 case NEON_2RM_VNEG_F:
4373f3ce 6470 gen_vfp_neg(0);
9ee6e8bb 6471 break;
600b828c 6472 case NEON_2RM_VSWP:
dd8fbd78
FN
6473 tmp2 = neon_load_reg(rd, pass);
6474 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6475 break;
600b828c 6476 case NEON_2RM_VTRN:
dd8fbd78 6477 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6478 switch (size) {
dd8fbd78
FN
6479 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6480 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6481 default: abort();
9ee6e8bb 6482 }
dd8fbd78 6483 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6484 break;
2ce70625
WN
6485 case NEON_2RM_VRINTX:
6486 {
6487 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6488 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
6489 tcg_temp_free_ptr(fpstatus);
6490 break;
6491 }
600b828c 6492 case NEON_2RM_VRECPE:
dd8fbd78 6493 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb 6494 break;
600b828c 6495 case NEON_2RM_VRSQRTE:
dd8fbd78 6496 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb 6497 break;
600b828c 6498 case NEON_2RM_VRECPE_F:
4373f3ce 6499 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6500 break;
600b828c 6501 case NEON_2RM_VRSQRTE_F:
4373f3ce 6502 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6503 break;
600b828c 6504 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 6505 gen_vfp_sito(0, 1);
9ee6e8bb 6506 break;
600b828c 6507 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 6508 gen_vfp_uito(0, 1);
9ee6e8bb 6509 break;
600b828c 6510 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 6511 gen_vfp_tosiz(0, 1);
9ee6e8bb 6512 break;
600b828c 6513 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 6514 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
6515 break;
6516 default:
600b828c
PM
6517 /* Reserved op values were caught by the
6518 * neon_2rm_sizes[] check earlier.
6519 */
6520 abort();
9ee6e8bb 6521 }
600b828c 6522 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6523 tcg_gen_st_f32(cpu_F0s, cpu_env,
6524 neon_reg_offset(rd, pass));
9ee6e8bb 6525 } else {
dd8fbd78 6526 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6527 }
6528 }
6529 break;
6530 }
6531 } else if ((insn & (1 << 10)) == 0) {
6532 /* VTBL, VTBX. */
56907d77
PM
6533 int n = ((insn >> 8) & 3) + 1;
6534 if ((rn + n) > 32) {
6535 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6536 * helper function running off the end of the register file.
6537 */
6538 return 1;
6539 }
6540 n <<= 3;
9ee6e8bb 6541 if (insn & (1 << 6)) {
8f8e3aa4 6542 tmp = neon_load_reg(rd, 0);
9ee6e8bb 6543 } else {
7d1b0095 6544 tmp = tcg_temp_new_i32();
8f8e3aa4 6545 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6546 }
8f8e3aa4 6547 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
6548 tmp4 = tcg_const_i32(rn);
6549 tmp5 = tcg_const_i32(n);
9ef39277 6550 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7d1b0095 6551 tcg_temp_free_i32(tmp);
9ee6e8bb 6552 if (insn & (1 << 6)) {
8f8e3aa4 6553 tmp = neon_load_reg(rd, 1);
9ee6e8bb 6554 } else {
7d1b0095 6555 tmp = tcg_temp_new_i32();
8f8e3aa4 6556 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6557 }
8f8e3aa4 6558 tmp3 = neon_load_reg(rm, 1);
9ef39277 6559 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
6560 tcg_temp_free_i32(tmp5);
6561 tcg_temp_free_i32(tmp4);
8f8e3aa4 6562 neon_store_reg(rd, 0, tmp2);
3018f259 6563 neon_store_reg(rd, 1, tmp3);
7d1b0095 6564 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6565 } else if ((insn & 0x380) == 0) {
6566 /* VDUP */
133da6aa
JR
6567 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6568 return 1;
6569 }
9ee6e8bb 6570 if (insn & (1 << 19)) {
dd8fbd78 6571 tmp = neon_load_reg(rm, 1);
9ee6e8bb 6572 } else {
dd8fbd78 6573 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
6574 }
6575 if (insn & (1 << 16)) {
dd8fbd78 6576 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
6577 } else if (insn & (1 << 17)) {
6578 if ((insn >> 18) & 1)
dd8fbd78 6579 gen_neon_dup_high16(tmp);
9ee6e8bb 6580 else
dd8fbd78 6581 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
6582 }
6583 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 6584 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
6585 tcg_gen_mov_i32(tmp2, tmp);
6586 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 6587 }
7d1b0095 6588 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6589 } else {
6590 return 1;
6591 }
6592 }
6593 }
6594 return 0;
6595}
6596
0ecb72a5 6597static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb 6598{
4b6a83fb
PM
6599 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
6600 const ARMCPRegInfo *ri;
9ee6e8bb
PB
6601
6602 cpnum = (insn >> 8) & 0xf;
6603 if (arm_feature(env, ARM_FEATURE_XSCALE)
6604 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6605 return 1;
6606
4b6a83fb 6607 /* First check for coprocessor space used for actual instructions */
9ee6e8bb
PB
6608 switch (cpnum) {
6609 case 0:
6610 case 1:
6611 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6612 return disas_iwmmxt_insn(env, s, insn);
6613 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6614 return disas_dsp_insn(env, s, insn);
6615 }
6616 return 1;
4b6a83fb
PM
6617 default:
6618 break;
6619 }
6620
6621 /* Otherwise treat as a generic register access */
6622 is64 = (insn & (1 << 25)) == 0;
6623 if (!is64 && ((insn & (1 << 4)) == 0)) {
6624 /* cdp */
6625 return 1;
6626 }
6627
6628 crm = insn & 0xf;
6629 if (is64) {
6630 crn = 0;
6631 opc1 = (insn >> 4) & 0xf;
6632 opc2 = 0;
6633 rt2 = (insn >> 16) & 0xf;
6634 } else {
6635 crn = (insn >> 16) & 0xf;
6636 opc1 = (insn >> 21) & 7;
6637 opc2 = (insn >> 5) & 7;
6638 rt2 = 0;
6639 }
6640 isread = (insn >> 20) & 1;
6641 rt = (insn >> 12) & 0xf;
6642
60322b39 6643 ri = get_arm_cp_reginfo(s->cp_regs,
4b6a83fb
PM
6644 ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
6645 if (ri) {
6646 /* Check access permissions */
60322b39 6647 if (!cp_access_ok(s->current_pl, ri, isread)) {
4b6a83fb
PM
6648 return 1;
6649 }
6650
6651 /* Handle special cases first */
6652 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
6653 case ARM_CP_NOP:
6654 return 0;
6655 case ARM_CP_WFI:
6656 if (isread) {
6657 return 1;
6658 }
eaed129d 6659 gen_set_pc_im(s, s->pc);
4b6a83fb 6660 s->is_jmp = DISAS_WFI;
2bee5105 6661 return 0;
4b6a83fb
PM
6662 default:
6663 break;
6664 }
6665
2452731c
PM
6666 if (use_icount && (ri->type & ARM_CP_IO)) {
6667 gen_io_start();
6668 }
6669
4b6a83fb
PM
6670 if (isread) {
6671 /* Read */
6672 if (is64) {
6673 TCGv_i64 tmp64;
6674 TCGv_i32 tmp;
6675 if (ri->type & ARM_CP_CONST) {
6676 tmp64 = tcg_const_i64(ri->resetvalue);
6677 } else if (ri->readfn) {
6678 TCGv_ptr tmpptr;
eaed129d 6679 gen_set_pc_im(s, s->pc);
4b6a83fb
PM
6680 tmp64 = tcg_temp_new_i64();
6681 tmpptr = tcg_const_ptr(ri);
6682 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
6683 tcg_temp_free_ptr(tmpptr);
6684 } else {
6685 tmp64 = tcg_temp_new_i64();
6686 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
6687 }
6688 tmp = tcg_temp_new_i32();
6689 tcg_gen_trunc_i64_i32(tmp, tmp64);
6690 store_reg(s, rt, tmp);
6691 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 6692 tmp = tcg_temp_new_i32();
4b6a83fb 6693 tcg_gen_trunc_i64_i32(tmp, tmp64);
ed336850 6694 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
6695 store_reg(s, rt2, tmp);
6696 } else {
39d5492a 6697 TCGv_i32 tmp;
4b6a83fb
PM
6698 if (ri->type & ARM_CP_CONST) {
6699 tmp = tcg_const_i32(ri->resetvalue);
6700 } else if (ri->readfn) {
6701 TCGv_ptr tmpptr;
eaed129d 6702 gen_set_pc_im(s, s->pc);
4b6a83fb
PM
6703 tmp = tcg_temp_new_i32();
6704 tmpptr = tcg_const_ptr(ri);
6705 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
6706 tcg_temp_free_ptr(tmpptr);
6707 } else {
6708 tmp = load_cpu_offset(ri->fieldoffset);
6709 }
6710 if (rt == 15) {
6711 /* Destination register of r15 for 32 bit loads sets
6712 * the condition codes from the high 4 bits of the value
6713 */
6714 gen_set_nzcv(tmp);
6715 tcg_temp_free_i32(tmp);
6716 } else {
6717 store_reg(s, rt, tmp);
6718 }
6719 }
6720 } else {
6721 /* Write */
6722 if (ri->type & ARM_CP_CONST) {
6723 /* If not forbidden by access permissions, treat as WI */
6724 return 0;
6725 }
6726
6727 if (is64) {
39d5492a 6728 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
6729 TCGv_i64 tmp64 = tcg_temp_new_i64();
6730 tmplo = load_reg(s, rt);
6731 tmphi = load_reg(s, rt2);
6732 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
6733 tcg_temp_free_i32(tmplo);
6734 tcg_temp_free_i32(tmphi);
6735 if (ri->writefn) {
6736 TCGv_ptr tmpptr = tcg_const_ptr(ri);
eaed129d 6737 gen_set_pc_im(s, s->pc);
4b6a83fb
PM
6738 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
6739 tcg_temp_free_ptr(tmpptr);
6740 } else {
6741 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
6742 }
6743 tcg_temp_free_i64(tmp64);
6744 } else {
6745 if (ri->writefn) {
39d5492a 6746 TCGv_i32 tmp;
4b6a83fb 6747 TCGv_ptr tmpptr;
eaed129d 6748 gen_set_pc_im(s, s->pc);
4b6a83fb
PM
6749 tmp = load_reg(s, rt);
6750 tmpptr = tcg_const_ptr(ri);
6751 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
6752 tcg_temp_free_ptr(tmpptr);
6753 tcg_temp_free_i32(tmp);
6754 } else {
39d5492a 6755 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
6756 store_cpu_offset(tmp, ri->fieldoffset);
6757 }
6758 }
2452731c
PM
6759 }
6760
6761 if (use_icount && (ri->type & ARM_CP_IO)) {
6762 /* I/O operations must end the TB here (whether read or write) */
6763 gen_io_end();
6764 gen_lookup_tb(s);
6765 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
6766 /* We default to ending the TB on a coprocessor register write,
6767 * but allow this to be suppressed by the register definition
6768 * (usually only necessary to work around guest bugs).
6769 */
2452731c 6770 gen_lookup_tb(s);
4b6a83fb 6771 }
2452731c 6772
4b6a83fb
PM
6773 return 0;
6774 }
6775
4a9a539f 6776 return 1;
9ee6e8bb
PB
6777}
6778
5e3f878a
PB
6779
6780/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 6781static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 6782{
39d5492a 6783 TCGv_i32 tmp;
7d1b0095 6784 tmp = tcg_temp_new_i32();
5e3f878a
PB
6785 tcg_gen_trunc_i64_i32(tmp, val);
6786 store_reg(s, rlow, tmp);
7d1b0095 6787 tmp = tcg_temp_new_i32();
5e3f878a
PB
6788 tcg_gen_shri_i64(val, val, 32);
6789 tcg_gen_trunc_i64_i32(tmp, val);
6790 store_reg(s, rhigh, tmp);
6791}
6792
6793/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 6794static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 6795{
a7812ae4 6796 TCGv_i64 tmp;
39d5492a 6797 TCGv_i32 tmp2;
5e3f878a 6798
36aa55dc 6799 /* Load value and extend to 64 bits. */
a7812ae4 6800 tmp = tcg_temp_new_i64();
5e3f878a
PB
6801 tmp2 = load_reg(s, rlow);
6802 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 6803 tcg_temp_free_i32(tmp2);
5e3f878a 6804 tcg_gen_add_i64(val, val, tmp);
b75263d6 6805 tcg_temp_free_i64(tmp);
5e3f878a
PB
6806}
6807
6808/* load and add a 64-bit value from a register pair. */
a7812ae4 6809static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 6810{
a7812ae4 6811 TCGv_i64 tmp;
39d5492a
PM
6812 TCGv_i32 tmpl;
6813 TCGv_i32 tmph;
5e3f878a
PB
6814
6815 /* Load 64-bit value rd:rn. */
36aa55dc
PB
6816 tmpl = load_reg(s, rlow);
6817 tmph = load_reg(s, rhigh);
a7812ae4 6818 tmp = tcg_temp_new_i64();
36aa55dc 6819 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
6820 tcg_temp_free_i32(tmpl);
6821 tcg_temp_free_i32(tmph);
5e3f878a 6822 tcg_gen_add_i64(val, val, tmp);
b75263d6 6823 tcg_temp_free_i64(tmp);
5e3f878a
PB
6824}
6825
c9f10124 6826/* Set N and Z flags from hi|lo. */
39d5492a 6827static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 6828{
c9f10124
RH
6829 tcg_gen_mov_i32(cpu_NF, hi);
6830 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
6831}
6832
426f5abc
PB
6833/* Load/Store exclusive instructions are implemented by remembering
6834 the value/address loaded, and seeing if these are the same
b90372ad 6835 when the store is performed. This should be sufficient to implement
426f5abc
PB
6836 the architecturally mandated semantics, and avoids having to monitor
6837 regular stores.
6838
6839 In system emulation mode only one CPU will be running at once, so
6840 this sequence is effectively atomic. In user emulation mode we
6841 throw an exception and handle the atomic operation elsewhere. */
6842static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 6843 TCGv_i32 addr, int size)
426f5abc 6844{
94ee24e7 6845 TCGv_i32 tmp = tcg_temp_new_i32();
426f5abc
PB
6846
6847 switch (size) {
6848 case 0:
08307563 6849 gen_aa32_ld8u(tmp, addr, IS_USER(s));
426f5abc
PB
6850 break;
6851 case 1:
08307563 6852 gen_aa32_ld16u(tmp, addr, IS_USER(s));
426f5abc
PB
6853 break;
6854 case 2:
6855 case 3:
08307563 6856 gen_aa32_ld32u(tmp, addr, IS_USER(s));
426f5abc
PB
6857 break;
6858 default:
6859 abort();
6860 }
03d05e2d 6861
426f5abc 6862 if (size == 3) {
39d5492a 6863 TCGv_i32 tmp2 = tcg_temp_new_i32();
03d05e2d
PM
6864 TCGv_i32 tmp3 = tcg_temp_new_i32();
6865
2c9adbda 6866 tcg_gen_addi_i32(tmp2, addr, 4);
03d05e2d 6867 gen_aa32_ld32u(tmp3, tmp2, IS_USER(s));
7d1b0095 6868 tcg_temp_free_i32(tmp2);
03d05e2d
PM
6869 tcg_gen_concat_i32_i64(cpu_exclusive_val, tmp, tmp3);
6870 store_reg(s, rt2, tmp3);
6871 } else {
6872 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 6873 }
03d05e2d
PM
6874
6875 store_reg(s, rt, tmp);
6876 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
6877}
6878
6879static void gen_clrex(DisasContext *s)
6880{
03d05e2d 6881 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
6882}
6883
6884#ifdef CONFIG_USER_ONLY
6885static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 6886 TCGv_i32 addr, int size)
426f5abc 6887{
03d05e2d 6888 tcg_gen_extu_i32_i64(cpu_exclusive_test, addr);
426f5abc
PB
6889 tcg_gen_movi_i32(cpu_exclusive_info,
6890 size | (rd << 4) | (rt << 8) | (rt2 << 12));
bc4a0de0 6891 gen_exception_insn(s, 4, EXCP_STREX);
426f5abc
PB
6892}
6893#else
6894static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 6895 TCGv_i32 addr, int size)
426f5abc 6896{
39d5492a 6897 TCGv_i32 tmp;
03d05e2d 6898 TCGv_i64 val64, extaddr;
426f5abc
PB
6899 int done_label;
6900 int fail_label;
6901
6902 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6903 [addr] = {Rt};
6904 {Rd} = 0;
6905 } else {
6906 {Rd} = 1;
6907 } */
6908 fail_label = gen_new_label();
6909 done_label = gen_new_label();
03d05e2d
PM
6910 extaddr = tcg_temp_new_i64();
6911 tcg_gen_extu_i32_i64(extaddr, addr);
6912 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
6913 tcg_temp_free_i64(extaddr);
6914
94ee24e7 6915 tmp = tcg_temp_new_i32();
426f5abc
PB
6916 switch (size) {
6917 case 0:
08307563 6918 gen_aa32_ld8u(tmp, addr, IS_USER(s));
426f5abc
PB
6919 break;
6920 case 1:
08307563 6921 gen_aa32_ld16u(tmp, addr, IS_USER(s));
426f5abc
PB
6922 break;
6923 case 2:
6924 case 3:
08307563 6925 gen_aa32_ld32u(tmp, addr, IS_USER(s));
426f5abc
PB
6926 break;
6927 default:
6928 abort();
6929 }
03d05e2d
PM
6930
6931 val64 = tcg_temp_new_i64();
426f5abc 6932 if (size == 3) {
39d5492a 6933 TCGv_i32 tmp2 = tcg_temp_new_i32();
03d05e2d 6934 TCGv_i32 tmp3 = tcg_temp_new_i32();
426f5abc 6935 tcg_gen_addi_i32(tmp2, addr, 4);
03d05e2d 6936 gen_aa32_ld32u(tmp3, tmp2, IS_USER(s));
7d1b0095 6937 tcg_temp_free_i32(tmp2);
03d05e2d
PM
6938 tcg_gen_concat_i32_i64(val64, tmp, tmp3);
6939 tcg_temp_free_i32(tmp3);
6940 } else {
6941 tcg_gen_extu_i32_i64(val64, tmp);
426f5abc 6942 }
03d05e2d
PM
6943 tcg_temp_free_i32(tmp);
6944
6945 tcg_gen_brcond_i64(TCG_COND_NE, val64, cpu_exclusive_val, fail_label);
6946 tcg_temp_free_i64(val64);
6947
426f5abc
PB
6948 tmp = load_reg(s, rt);
6949 switch (size) {
6950 case 0:
08307563 6951 gen_aa32_st8(tmp, addr, IS_USER(s));
426f5abc
PB
6952 break;
6953 case 1:
08307563 6954 gen_aa32_st16(tmp, addr, IS_USER(s));
426f5abc
PB
6955 break;
6956 case 2:
6957 case 3:
08307563 6958 gen_aa32_st32(tmp, addr, IS_USER(s));
426f5abc
PB
6959 break;
6960 default:
6961 abort();
6962 }
94ee24e7 6963 tcg_temp_free_i32(tmp);
426f5abc
PB
6964 if (size == 3) {
6965 tcg_gen_addi_i32(addr, addr, 4);
6966 tmp = load_reg(s, rt2);
08307563 6967 gen_aa32_st32(tmp, addr, IS_USER(s));
94ee24e7 6968 tcg_temp_free_i32(tmp);
426f5abc
PB
6969 }
6970 tcg_gen_movi_i32(cpu_R[rd], 0);
6971 tcg_gen_br(done_label);
6972 gen_set_label(fail_label);
6973 tcg_gen_movi_i32(cpu_R[rd], 1);
6974 gen_set_label(done_label);
03d05e2d 6975 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
6976}
6977#endif
6978
81465888
PM
6979/* gen_srs:
6980 * @env: CPUARMState
6981 * @s: DisasContext
6982 * @mode: mode field from insn (which stack to store to)
6983 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
6984 * @writeback: true if writeback bit set
6985 *
6986 * Generate code for the SRS (Store Return State) insn.
6987 */
6988static void gen_srs(DisasContext *s,
6989 uint32_t mode, uint32_t amode, bool writeback)
6990{
6991 int32_t offset;
6992 TCGv_i32 addr = tcg_temp_new_i32();
6993 TCGv_i32 tmp = tcg_const_i32(mode);
6994 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6995 tcg_temp_free_i32(tmp);
6996 switch (amode) {
6997 case 0: /* DA */
6998 offset = -4;
6999 break;
7000 case 1: /* IA */
7001 offset = 0;
7002 break;
7003 case 2: /* DB */
7004 offset = -8;
7005 break;
7006 case 3: /* IB */
7007 offset = 4;
7008 break;
7009 default:
7010 abort();
7011 }
7012 tcg_gen_addi_i32(addr, addr, offset);
7013 tmp = load_reg(s, 14);
08307563 7014 gen_aa32_st32(tmp, addr, 0);
5a839c0d 7015 tcg_temp_free_i32(tmp);
81465888
PM
7016 tmp = load_cpu_field(spsr);
7017 tcg_gen_addi_i32(addr, addr, 4);
08307563 7018 gen_aa32_st32(tmp, addr, 0);
5a839c0d 7019 tcg_temp_free_i32(tmp);
81465888
PM
7020 if (writeback) {
7021 switch (amode) {
7022 case 0:
7023 offset = -8;
7024 break;
7025 case 1:
7026 offset = 4;
7027 break;
7028 case 2:
7029 offset = -4;
7030 break;
7031 case 3:
7032 offset = 0;
7033 break;
7034 default:
7035 abort();
7036 }
7037 tcg_gen_addi_i32(addr, addr, offset);
7038 tmp = tcg_const_i32(mode);
7039 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7040 tcg_temp_free_i32(tmp);
7041 }
7042 tcg_temp_free_i32(addr);
7043}
7044
0ecb72a5 7045static void disas_arm_insn(CPUARMState * env, DisasContext *s)
9ee6e8bb
PB
7046{
7047 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
7048 TCGv_i32 tmp;
7049 TCGv_i32 tmp2;
7050 TCGv_i32 tmp3;
7051 TCGv_i32 addr;
a7812ae4 7052 TCGv_i64 tmp64;
9ee6e8bb 7053
d31dd73e 7054 insn = arm_ldl_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
7055 s->pc += 4;
7056
7057 /* M variants do not implement ARM mode. */
7058 if (IS_M(env))
7059 goto illegal_op;
7060 cond = insn >> 28;
7061 if (cond == 0xf){
be5e7a76
DES
7062 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
7063 * choose to UNDEF. In ARMv5 and above the space is used
7064 * for miscellaneous unconditional instructions.
7065 */
7066 ARCH(5);
7067
9ee6e8bb
PB
7068 /* Unconditional instructions. */
7069 if (((insn >> 25) & 7) == 1) {
7070 /* NEON Data processing. */
7071 if (!arm_feature(env, ARM_FEATURE_NEON))
7072 goto illegal_op;
7073
7074 if (disas_neon_data_insn(env, s, insn))
7075 goto illegal_op;
7076 return;
7077 }
7078 if ((insn & 0x0f100000) == 0x04000000) {
7079 /* NEON load/store. */
7080 if (!arm_feature(env, ARM_FEATURE_NEON))
7081 goto illegal_op;
7082
7083 if (disas_neon_ls_insn(env, s, insn))
7084 goto illegal_op;
7085 return;
7086 }
6a57f3eb
WN
7087 if ((insn & 0x0f000e10) == 0x0e000a00) {
7088 /* VFP. */
7089 if (disas_vfp_insn(env, s, insn)) {
7090 goto illegal_op;
7091 }
7092 return;
7093 }
3d185e5d
PM
7094 if (((insn & 0x0f30f000) == 0x0510f000) ||
7095 ((insn & 0x0f30f010) == 0x0710f000)) {
7096 if ((insn & (1 << 22)) == 0) {
7097 /* PLDW; v7MP */
7098 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
7099 goto illegal_op;
7100 }
7101 }
7102 /* Otherwise PLD; v5TE+ */
be5e7a76 7103 ARCH(5TE);
3d185e5d
PM
7104 return;
7105 }
7106 if (((insn & 0x0f70f000) == 0x0450f000) ||
7107 ((insn & 0x0f70f010) == 0x0650f000)) {
7108 ARCH(7);
7109 return; /* PLI; V7 */
7110 }
7111 if (((insn & 0x0f700000) == 0x04100000) ||
7112 ((insn & 0x0f700010) == 0x06100000)) {
7113 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
7114 goto illegal_op;
7115 }
7116 return; /* v7MP: Unallocated memory hint: must NOP */
7117 }
7118
7119 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
7120 ARCH(6);
7121 /* setend */
10962fd5
PM
7122 if (((insn >> 9) & 1) != s->bswap_code) {
7123 /* Dynamic endianness switching not implemented. */
e0c270d9 7124 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
9ee6e8bb
PB
7125 goto illegal_op;
7126 }
7127 return;
7128 } else if ((insn & 0x0fffff00) == 0x057ff000) {
7129 switch ((insn >> 4) & 0xf) {
7130 case 1: /* clrex */
7131 ARCH(6K);
426f5abc 7132 gen_clrex(s);
9ee6e8bb
PB
7133 return;
7134 case 4: /* dsb */
7135 case 5: /* dmb */
7136 case 6: /* isb */
7137 ARCH(7);
7138 /* We don't emulate caches so these are a no-op. */
7139 return;
7140 default:
7141 goto illegal_op;
7142 }
7143 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
7144 /* srs */
81465888 7145 if (IS_USER(s)) {
9ee6e8bb 7146 goto illegal_op;
9ee6e8bb 7147 }
81465888
PM
7148 ARCH(6);
7149 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 7150 return;
ea825eee 7151 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 7152 /* rfe */
c67b6b71 7153 int32_t offset;
9ee6e8bb
PB
7154 if (IS_USER(s))
7155 goto illegal_op;
7156 ARCH(6);
7157 rn = (insn >> 16) & 0xf;
b0109805 7158 addr = load_reg(s, rn);
9ee6e8bb
PB
7159 i = (insn >> 23) & 3;
7160 switch (i) {
b0109805 7161 case 0: offset = -4; break; /* DA */
c67b6b71
FN
7162 case 1: offset = 0; break; /* IA */
7163 case 2: offset = -8; break; /* DB */
b0109805 7164 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
7165 default: abort();
7166 }
7167 if (offset)
b0109805
PB
7168 tcg_gen_addi_i32(addr, addr, offset);
7169 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 7170 tmp = tcg_temp_new_i32();
08307563 7171 gen_aa32_ld32u(tmp, addr, 0);
b0109805 7172 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 7173 tmp2 = tcg_temp_new_i32();
08307563 7174 gen_aa32_ld32u(tmp2, addr, 0);
9ee6e8bb
PB
7175 if (insn & (1 << 21)) {
7176 /* Base writeback. */
7177 switch (i) {
b0109805 7178 case 0: offset = -8; break;
c67b6b71
FN
7179 case 1: offset = 4; break;
7180 case 2: offset = -4; break;
b0109805 7181 case 3: offset = 0; break;
9ee6e8bb
PB
7182 default: abort();
7183 }
7184 if (offset)
b0109805
PB
7185 tcg_gen_addi_i32(addr, addr, offset);
7186 store_reg(s, rn, addr);
7187 } else {
7d1b0095 7188 tcg_temp_free_i32(addr);
9ee6e8bb 7189 }
b0109805 7190 gen_rfe(s, tmp, tmp2);
c67b6b71 7191 return;
9ee6e8bb
PB
7192 } else if ((insn & 0x0e000000) == 0x0a000000) {
7193 /* branch link and change to thumb (blx <offset>) */
7194 int32_t offset;
7195
7196 val = (uint32_t)s->pc;
7d1b0095 7197 tmp = tcg_temp_new_i32();
d9ba4830
PB
7198 tcg_gen_movi_i32(tmp, val);
7199 store_reg(s, 14, tmp);
9ee6e8bb
PB
7200 /* Sign-extend the 24-bit offset */
7201 offset = (((int32_t)insn) << 8) >> 8;
7202 /* offset * 4 + bit24 * 2 + (thumb bit) */
7203 val += (offset << 2) | ((insn >> 23) & 2) | 1;
7204 /* pipeline offset */
7205 val += 4;
be5e7a76 7206 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 7207 gen_bx_im(s, val);
9ee6e8bb
PB
7208 return;
7209 } else if ((insn & 0x0e000f00) == 0x0c000100) {
7210 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
7211 /* iWMMXt register transfer. */
7212 if (env->cp15.c15_cpar & (1 << 1))
7213 if (!disas_iwmmxt_insn(env, s, insn))
7214 return;
7215 }
7216 } else if ((insn & 0x0fe00000) == 0x0c400000) {
7217 /* Coprocessor double register transfer. */
be5e7a76 7218 ARCH(5TE);
9ee6e8bb
PB
7219 } else if ((insn & 0x0f000010) == 0x0e000010) {
7220 /* Additional coprocessor register transfer. */
7997d92f 7221 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
7222 uint32_t mask;
7223 uint32_t val;
7224 /* cps (privileged) */
7225 if (IS_USER(s))
7226 return;
7227 mask = val = 0;
7228 if (insn & (1 << 19)) {
7229 if (insn & (1 << 8))
7230 mask |= CPSR_A;
7231 if (insn & (1 << 7))
7232 mask |= CPSR_I;
7233 if (insn & (1 << 6))
7234 mask |= CPSR_F;
7235 if (insn & (1 << 18))
7236 val |= mask;
7237 }
7997d92f 7238 if (insn & (1 << 17)) {
9ee6e8bb
PB
7239 mask |= CPSR_M;
7240 val |= (insn & 0x1f);
7241 }
7242 if (mask) {
2fbac54b 7243 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
7244 }
7245 return;
7246 }
7247 goto illegal_op;
7248 }
7249 if (cond != 0xe) {
7250 /* if not always execute, we generate a conditional jump to
7251 next instruction */
7252 s->condlabel = gen_new_label();
39fb730a 7253 arm_gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
7254 s->condjmp = 1;
7255 }
7256 if ((insn & 0x0f900000) == 0x03000000) {
7257 if ((insn & (1 << 21)) == 0) {
7258 ARCH(6T2);
7259 rd = (insn >> 12) & 0xf;
7260 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
7261 if ((insn & (1 << 22)) == 0) {
7262 /* MOVW */
7d1b0095 7263 tmp = tcg_temp_new_i32();
5e3f878a 7264 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
7265 } else {
7266 /* MOVT */
5e3f878a 7267 tmp = load_reg(s, rd);
86831435 7268 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 7269 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 7270 }
5e3f878a 7271 store_reg(s, rd, tmp);
9ee6e8bb
PB
7272 } else {
7273 if (((insn >> 12) & 0xf) != 0xf)
7274 goto illegal_op;
7275 if (((insn >> 16) & 0xf) == 0) {
7276 gen_nop_hint(s, insn & 0xff);
7277 } else {
7278 /* CPSR = immediate */
7279 val = insn & 0xff;
7280 shift = ((insn >> 8) & 0xf) * 2;
7281 if (shift)
7282 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 7283 i = ((insn & (1 << 22)) != 0);
2fbac54b 7284 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
7285 goto illegal_op;
7286 }
7287 }
7288 } else if ((insn & 0x0f900000) == 0x01000000
7289 && (insn & 0x00000090) != 0x00000090) {
7290 /* miscellaneous instructions */
7291 op1 = (insn >> 21) & 3;
7292 sh = (insn >> 4) & 0xf;
7293 rm = insn & 0xf;
7294 switch (sh) {
7295 case 0x0: /* move program status register */
7296 if (op1 & 1) {
7297 /* PSR = reg */
2fbac54b 7298 tmp = load_reg(s, rm);
9ee6e8bb 7299 i = ((op1 & 2) != 0);
2fbac54b 7300 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
7301 goto illegal_op;
7302 } else {
7303 /* reg = PSR */
7304 rd = (insn >> 12) & 0xf;
7305 if (op1 & 2) {
7306 if (IS_USER(s))
7307 goto illegal_op;
d9ba4830 7308 tmp = load_cpu_field(spsr);
9ee6e8bb 7309 } else {
7d1b0095 7310 tmp = tcg_temp_new_i32();
9ef39277 7311 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 7312 }
d9ba4830 7313 store_reg(s, rd, tmp);
9ee6e8bb
PB
7314 }
7315 break;
7316 case 0x1:
7317 if (op1 == 1) {
7318 /* branch/exchange thumb (bx). */
be5e7a76 7319 ARCH(4T);
d9ba4830
PB
7320 tmp = load_reg(s, rm);
7321 gen_bx(s, tmp);
9ee6e8bb
PB
7322 } else if (op1 == 3) {
7323 /* clz */
be5e7a76 7324 ARCH(5);
9ee6e8bb 7325 rd = (insn >> 12) & 0xf;
1497c961
PB
7326 tmp = load_reg(s, rm);
7327 gen_helper_clz(tmp, tmp);
7328 store_reg(s, rd, tmp);
9ee6e8bb
PB
7329 } else {
7330 goto illegal_op;
7331 }
7332 break;
7333 case 0x2:
7334 if (op1 == 1) {
7335 ARCH(5J); /* bxj */
7336 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
7337 tmp = load_reg(s, rm);
7338 gen_bx(s, tmp);
9ee6e8bb
PB
7339 } else {
7340 goto illegal_op;
7341 }
7342 break;
7343 case 0x3:
7344 if (op1 != 1)
7345 goto illegal_op;
7346
be5e7a76 7347 ARCH(5);
9ee6e8bb 7348 /* branch link/exchange thumb (blx) */
d9ba4830 7349 tmp = load_reg(s, rm);
7d1b0095 7350 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
7351 tcg_gen_movi_i32(tmp2, s->pc);
7352 store_reg(s, 14, tmp2);
7353 gen_bx(s, tmp);
9ee6e8bb
PB
7354 break;
7355 case 0x5: /* saturating add/subtract */
be5e7a76 7356 ARCH(5TE);
9ee6e8bb
PB
7357 rd = (insn >> 12) & 0xf;
7358 rn = (insn >> 16) & 0xf;
b40d0353 7359 tmp = load_reg(s, rm);
5e3f878a 7360 tmp2 = load_reg(s, rn);
9ee6e8bb 7361 if (op1 & 2)
9ef39277 7362 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 7363 if (op1 & 1)
9ef39277 7364 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 7365 else
9ef39277 7366 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 7367 tcg_temp_free_i32(tmp2);
5e3f878a 7368 store_reg(s, rd, tmp);
9ee6e8bb 7369 break;
49e14940
AL
7370 case 7:
7371 /* SMC instruction (op1 == 3)
7372 and undefined instructions (op1 == 0 || op1 == 2)
7373 will trap */
7374 if (op1 != 1) {
7375 goto illegal_op;
7376 }
7377 /* bkpt */
be5e7a76 7378 ARCH(5);
bc4a0de0 7379 gen_exception_insn(s, 4, EXCP_BKPT);
9ee6e8bb
PB
7380 break;
7381 case 0x8: /* signed multiply */
7382 case 0xa:
7383 case 0xc:
7384 case 0xe:
be5e7a76 7385 ARCH(5TE);
9ee6e8bb
PB
7386 rs = (insn >> 8) & 0xf;
7387 rn = (insn >> 12) & 0xf;
7388 rd = (insn >> 16) & 0xf;
7389 if (op1 == 1) {
7390 /* (32 * 16) >> 16 */
5e3f878a
PB
7391 tmp = load_reg(s, rm);
7392 tmp2 = load_reg(s, rs);
9ee6e8bb 7393 if (sh & 4)
5e3f878a 7394 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7395 else
5e3f878a 7396 gen_sxth(tmp2);
a7812ae4
PB
7397 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7398 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 7399 tmp = tcg_temp_new_i32();
a7812ae4 7400 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 7401 tcg_temp_free_i64(tmp64);
9ee6e8bb 7402 if ((sh & 2) == 0) {
5e3f878a 7403 tmp2 = load_reg(s, rn);
9ef39277 7404 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7405 tcg_temp_free_i32(tmp2);
9ee6e8bb 7406 }
5e3f878a 7407 store_reg(s, rd, tmp);
9ee6e8bb
PB
7408 } else {
7409 /* 16 * 16 */
5e3f878a
PB
7410 tmp = load_reg(s, rm);
7411 tmp2 = load_reg(s, rs);
7412 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 7413 tcg_temp_free_i32(tmp2);
9ee6e8bb 7414 if (op1 == 2) {
a7812ae4
PB
7415 tmp64 = tcg_temp_new_i64();
7416 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7417 tcg_temp_free_i32(tmp);
a7812ae4
PB
7418 gen_addq(s, tmp64, rn, rd);
7419 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 7420 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7421 } else {
7422 if (op1 == 0) {
5e3f878a 7423 tmp2 = load_reg(s, rn);
9ef39277 7424 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7425 tcg_temp_free_i32(tmp2);
9ee6e8bb 7426 }
5e3f878a 7427 store_reg(s, rd, tmp);
9ee6e8bb
PB
7428 }
7429 }
7430 break;
7431 default:
7432 goto illegal_op;
7433 }
7434 } else if (((insn & 0x0e000000) == 0 &&
7435 (insn & 0x00000090) != 0x90) ||
7436 ((insn & 0x0e000000) == (1 << 25))) {
7437 int set_cc, logic_cc, shiftop;
7438
7439 op1 = (insn >> 21) & 0xf;
7440 set_cc = (insn >> 20) & 1;
7441 logic_cc = table_logic_cc[op1] & set_cc;
7442
7443 /* data processing instruction */
7444 if (insn & (1 << 25)) {
7445 /* immediate operand */
7446 val = insn & 0xff;
7447 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 7448 if (shift) {
9ee6e8bb 7449 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 7450 }
7d1b0095 7451 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
7452 tcg_gen_movi_i32(tmp2, val);
7453 if (logic_cc && shift) {
7454 gen_set_CF_bit31(tmp2);
7455 }
9ee6e8bb
PB
7456 } else {
7457 /* register */
7458 rm = (insn) & 0xf;
e9bb4aa9 7459 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7460 shiftop = (insn >> 5) & 3;
7461 if (!(insn & (1 << 4))) {
7462 shift = (insn >> 7) & 0x1f;
e9bb4aa9 7463 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
7464 } else {
7465 rs = (insn >> 8) & 0xf;
8984bd2e 7466 tmp = load_reg(s, rs);
e9bb4aa9 7467 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
7468 }
7469 }
7470 if (op1 != 0x0f && op1 != 0x0d) {
7471 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
7472 tmp = load_reg(s, rn);
7473 } else {
39d5492a 7474 TCGV_UNUSED_I32(tmp);
9ee6e8bb
PB
7475 }
7476 rd = (insn >> 12) & 0xf;
7477 switch(op1) {
7478 case 0x00:
e9bb4aa9
JR
7479 tcg_gen_and_i32(tmp, tmp, tmp2);
7480 if (logic_cc) {
7481 gen_logic_CC(tmp);
7482 }
21aeb343 7483 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7484 break;
7485 case 0x01:
e9bb4aa9
JR
7486 tcg_gen_xor_i32(tmp, tmp, tmp2);
7487 if (logic_cc) {
7488 gen_logic_CC(tmp);
7489 }
21aeb343 7490 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7491 break;
7492 case 0x02:
7493 if (set_cc && rd == 15) {
7494 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 7495 if (IS_USER(s)) {
9ee6e8bb 7496 goto illegal_op;
e9bb4aa9 7497 }
72485ec4 7498 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 7499 gen_exception_return(s, tmp);
9ee6e8bb 7500 } else {
e9bb4aa9 7501 if (set_cc) {
72485ec4 7502 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7503 } else {
7504 tcg_gen_sub_i32(tmp, tmp, tmp2);
7505 }
21aeb343 7506 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7507 }
7508 break;
7509 case 0x03:
e9bb4aa9 7510 if (set_cc) {
72485ec4 7511 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
7512 } else {
7513 tcg_gen_sub_i32(tmp, tmp2, tmp);
7514 }
21aeb343 7515 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7516 break;
7517 case 0x04:
e9bb4aa9 7518 if (set_cc) {
72485ec4 7519 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7520 } else {
7521 tcg_gen_add_i32(tmp, tmp, tmp2);
7522 }
21aeb343 7523 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7524 break;
7525 case 0x05:
e9bb4aa9 7526 if (set_cc) {
49b4c31e 7527 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7528 } else {
7529 gen_add_carry(tmp, tmp, tmp2);
7530 }
21aeb343 7531 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7532 break;
7533 case 0x06:
e9bb4aa9 7534 if (set_cc) {
2de68a49 7535 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7536 } else {
7537 gen_sub_carry(tmp, tmp, tmp2);
7538 }
21aeb343 7539 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7540 break;
7541 case 0x07:
e9bb4aa9 7542 if (set_cc) {
2de68a49 7543 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
7544 } else {
7545 gen_sub_carry(tmp, tmp2, tmp);
7546 }
21aeb343 7547 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7548 break;
7549 case 0x08:
7550 if (set_cc) {
e9bb4aa9
JR
7551 tcg_gen_and_i32(tmp, tmp, tmp2);
7552 gen_logic_CC(tmp);
9ee6e8bb 7553 }
7d1b0095 7554 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7555 break;
7556 case 0x09:
7557 if (set_cc) {
e9bb4aa9
JR
7558 tcg_gen_xor_i32(tmp, tmp, tmp2);
7559 gen_logic_CC(tmp);
9ee6e8bb 7560 }
7d1b0095 7561 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7562 break;
7563 case 0x0a:
7564 if (set_cc) {
72485ec4 7565 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 7566 }
7d1b0095 7567 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7568 break;
7569 case 0x0b:
7570 if (set_cc) {
72485ec4 7571 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 7572 }
7d1b0095 7573 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7574 break;
7575 case 0x0c:
e9bb4aa9
JR
7576 tcg_gen_or_i32(tmp, tmp, tmp2);
7577 if (logic_cc) {
7578 gen_logic_CC(tmp);
7579 }
21aeb343 7580 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7581 break;
7582 case 0x0d:
7583 if (logic_cc && rd == 15) {
7584 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 7585 if (IS_USER(s)) {
9ee6e8bb 7586 goto illegal_op;
e9bb4aa9
JR
7587 }
7588 gen_exception_return(s, tmp2);
9ee6e8bb 7589 } else {
e9bb4aa9
JR
7590 if (logic_cc) {
7591 gen_logic_CC(tmp2);
7592 }
21aeb343 7593 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7594 }
7595 break;
7596 case 0x0e:
f669df27 7597 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
7598 if (logic_cc) {
7599 gen_logic_CC(tmp);
7600 }
21aeb343 7601 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7602 break;
7603 default:
7604 case 0x0f:
e9bb4aa9
JR
7605 tcg_gen_not_i32(tmp2, tmp2);
7606 if (logic_cc) {
7607 gen_logic_CC(tmp2);
7608 }
21aeb343 7609 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7610 break;
7611 }
e9bb4aa9 7612 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 7613 tcg_temp_free_i32(tmp2);
e9bb4aa9 7614 }
9ee6e8bb
PB
7615 } else {
7616 /* other instructions */
7617 op1 = (insn >> 24) & 0xf;
7618 switch(op1) {
7619 case 0x0:
7620 case 0x1:
7621 /* multiplies, extra load/stores */
7622 sh = (insn >> 5) & 3;
7623 if (sh == 0) {
7624 if (op1 == 0x0) {
7625 rd = (insn >> 16) & 0xf;
7626 rn = (insn >> 12) & 0xf;
7627 rs = (insn >> 8) & 0xf;
7628 rm = (insn) & 0xf;
7629 op1 = (insn >> 20) & 0xf;
7630 switch (op1) {
7631 case 0: case 1: case 2: case 3: case 6:
7632 /* 32 bit mul */
5e3f878a
PB
7633 tmp = load_reg(s, rs);
7634 tmp2 = load_reg(s, rm);
7635 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 7636 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7637 if (insn & (1 << 22)) {
7638 /* Subtract (mls) */
7639 ARCH(6T2);
5e3f878a
PB
7640 tmp2 = load_reg(s, rn);
7641 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 7642 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7643 } else if (insn & (1 << 21)) {
7644 /* Add */
5e3f878a
PB
7645 tmp2 = load_reg(s, rn);
7646 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7647 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7648 }
7649 if (insn & (1 << 20))
5e3f878a
PB
7650 gen_logic_CC(tmp);
7651 store_reg(s, rd, tmp);
9ee6e8bb 7652 break;
8aac08b1
AJ
7653 case 4:
7654 /* 64 bit mul double accumulate (UMAAL) */
7655 ARCH(6);
7656 tmp = load_reg(s, rs);
7657 tmp2 = load_reg(s, rm);
7658 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7659 gen_addq_lo(s, tmp64, rn);
7660 gen_addq_lo(s, tmp64, rd);
7661 gen_storeq_reg(s, rn, rd, tmp64);
7662 tcg_temp_free_i64(tmp64);
7663 break;
7664 case 8: case 9: case 10: case 11:
7665 case 12: case 13: case 14: case 15:
7666 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
7667 tmp = load_reg(s, rs);
7668 tmp2 = load_reg(s, rm);
8aac08b1 7669 if (insn & (1 << 22)) {
c9f10124 7670 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 7671 } else {
c9f10124 7672 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
7673 }
7674 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
7675 TCGv_i32 al = load_reg(s, rn);
7676 TCGv_i32 ah = load_reg(s, rd);
c9f10124 7677 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
7678 tcg_temp_free_i32(al);
7679 tcg_temp_free_i32(ah);
9ee6e8bb 7680 }
8aac08b1 7681 if (insn & (1 << 20)) {
c9f10124 7682 gen_logicq_cc(tmp, tmp2);
8aac08b1 7683 }
c9f10124
RH
7684 store_reg(s, rn, tmp);
7685 store_reg(s, rd, tmp2);
9ee6e8bb 7686 break;
8aac08b1
AJ
7687 default:
7688 goto illegal_op;
9ee6e8bb
PB
7689 }
7690 } else {
7691 rn = (insn >> 16) & 0xf;
7692 rd = (insn >> 12) & 0xf;
7693 if (insn & (1 << 23)) {
7694 /* load/store exclusive */
2359bf80 7695 int op2 = (insn >> 8) & 3;
86753403 7696 op1 = (insn >> 21) & 0x3;
2359bf80
MR
7697
7698 switch (op2) {
7699 case 0: /* lda/stl */
7700 if (op1 == 1) {
7701 goto illegal_op;
7702 }
7703 ARCH(8);
7704 break;
7705 case 1: /* reserved */
7706 goto illegal_op;
7707 case 2: /* ldaex/stlex */
7708 ARCH(8);
7709 break;
7710 case 3: /* ldrex/strex */
7711 if (op1) {
7712 ARCH(6K);
7713 } else {
7714 ARCH(6);
7715 }
7716 break;
7717 }
7718
3174f8e9 7719 addr = tcg_temp_local_new_i32();
98a46317 7720 load_reg_var(s, addr, rn);
2359bf80
MR
7721
7722 /* Since the emulation does not have barriers,
7723 the acquire/release semantics need no special
7724 handling */
7725 if (op2 == 0) {
7726 if (insn & (1 << 20)) {
7727 tmp = tcg_temp_new_i32();
7728 switch (op1) {
7729 case 0: /* lda */
08307563 7730 gen_aa32_ld32u(tmp, addr, IS_USER(s));
2359bf80
MR
7731 break;
7732 case 2: /* ldab */
08307563 7733 gen_aa32_ld8u(tmp, addr, IS_USER(s));
2359bf80
MR
7734 break;
7735 case 3: /* ldah */
08307563 7736 gen_aa32_ld16u(tmp, addr, IS_USER(s));
2359bf80
MR
7737 break;
7738 default:
7739 abort();
7740 }
7741 store_reg(s, rd, tmp);
7742 } else {
7743 rm = insn & 0xf;
7744 tmp = load_reg(s, rm);
7745 switch (op1) {
7746 case 0: /* stl */
08307563 7747 gen_aa32_st32(tmp, addr, IS_USER(s));
2359bf80
MR
7748 break;
7749 case 2: /* stlb */
08307563 7750 gen_aa32_st8(tmp, addr, IS_USER(s));
2359bf80
MR
7751 break;
7752 case 3: /* stlh */
08307563 7753 gen_aa32_st16(tmp, addr, IS_USER(s));
2359bf80
MR
7754 break;
7755 default:
7756 abort();
7757 }
7758 tcg_temp_free_i32(tmp);
7759 }
7760 } else if (insn & (1 << 20)) {
86753403
PB
7761 switch (op1) {
7762 case 0: /* ldrex */
426f5abc 7763 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
7764 break;
7765 case 1: /* ldrexd */
426f5abc 7766 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
7767 break;
7768 case 2: /* ldrexb */
426f5abc 7769 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
7770 break;
7771 case 3: /* ldrexh */
426f5abc 7772 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
7773 break;
7774 default:
7775 abort();
7776 }
9ee6e8bb
PB
7777 } else {
7778 rm = insn & 0xf;
86753403
PB
7779 switch (op1) {
7780 case 0: /* strex */
426f5abc 7781 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
7782 break;
7783 case 1: /* strexd */
502e64fe 7784 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
7785 break;
7786 case 2: /* strexb */
426f5abc 7787 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
7788 break;
7789 case 3: /* strexh */
426f5abc 7790 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
7791 break;
7792 default:
7793 abort();
7794 }
9ee6e8bb 7795 }
39d5492a 7796 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7797 } else {
7798 /* SWP instruction */
7799 rm = (insn) & 0xf;
7800
8984bd2e
PB
7801 /* ??? This is not really atomic. However we know
7802 we never have multiple CPUs running in parallel,
7803 so it is good enough. */
7804 addr = load_reg(s, rn);
7805 tmp = load_reg(s, rm);
5a839c0d 7806 tmp2 = tcg_temp_new_i32();
9ee6e8bb 7807 if (insn & (1 << 22)) {
08307563
PM
7808 gen_aa32_ld8u(tmp2, addr, IS_USER(s));
7809 gen_aa32_st8(tmp, addr, IS_USER(s));
9ee6e8bb 7810 } else {
08307563
PM
7811 gen_aa32_ld32u(tmp2, addr, IS_USER(s));
7812 gen_aa32_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7813 }
5a839c0d 7814 tcg_temp_free_i32(tmp);
7d1b0095 7815 tcg_temp_free_i32(addr);
8984bd2e 7816 store_reg(s, rd, tmp2);
9ee6e8bb
PB
7817 }
7818 }
7819 } else {
7820 int address_offset;
7821 int load;
7822 /* Misc load/store */
7823 rn = (insn >> 16) & 0xf;
7824 rd = (insn >> 12) & 0xf;
b0109805 7825 addr = load_reg(s, rn);
9ee6e8bb 7826 if (insn & (1 << 24))
b0109805 7827 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
7828 address_offset = 0;
7829 if (insn & (1 << 20)) {
7830 /* load */
5a839c0d 7831 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
7832 switch(sh) {
7833 case 1:
08307563 7834 gen_aa32_ld16u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7835 break;
7836 case 2:
08307563 7837 gen_aa32_ld8s(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7838 break;
7839 default:
7840 case 3:
08307563 7841 gen_aa32_ld16s(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7842 break;
7843 }
7844 load = 1;
7845 } else if (sh & 2) {
be5e7a76 7846 ARCH(5TE);
9ee6e8bb
PB
7847 /* doubleword */
7848 if (sh & 1) {
7849 /* store */
b0109805 7850 tmp = load_reg(s, rd);
08307563 7851 gen_aa32_st32(tmp, addr, IS_USER(s));
5a839c0d 7852 tcg_temp_free_i32(tmp);
b0109805
PB
7853 tcg_gen_addi_i32(addr, addr, 4);
7854 tmp = load_reg(s, rd + 1);
08307563 7855 gen_aa32_st32(tmp, addr, IS_USER(s));
5a839c0d 7856 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7857 load = 0;
7858 } else {
7859 /* load */
5a839c0d 7860 tmp = tcg_temp_new_i32();
08307563 7861 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805
PB
7862 store_reg(s, rd, tmp);
7863 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 7864 tmp = tcg_temp_new_i32();
08307563 7865 gen_aa32_ld32u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7866 rd++;
7867 load = 1;
7868 }
7869 address_offset = -4;
7870 } else {
7871 /* store */
b0109805 7872 tmp = load_reg(s, rd);
08307563 7873 gen_aa32_st16(tmp, addr, IS_USER(s));
5a839c0d 7874 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7875 load = 0;
7876 }
7877 /* Perform base writeback before the loaded value to
7878 ensure correct behavior with overlapping index registers.
7879 ldrd with base writeback is is undefined if the
7880 destination and index registers overlap. */
7881 if (!(insn & (1 << 24))) {
b0109805
PB
7882 gen_add_datah_offset(s, insn, address_offset, addr);
7883 store_reg(s, rn, addr);
9ee6e8bb
PB
7884 } else if (insn & (1 << 21)) {
7885 if (address_offset)
b0109805
PB
7886 tcg_gen_addi_i32(addr, addr, address_offset);
7887 store_reg(s, rn, addr);
7888 } else {
7d1b0095 7889 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7890 }
7891 if (load) {
7892 /* Complete the load. */
b0109805 7893 store_reg(s, rd, tmp);
9ee6e8bb
PB
7894 }
7895 }
7896 break;
7897 case 0x4:
7898 case 0x5:
7899 goto do_ldst;
7900 case 0x6:
7901 case 0x7:
7902 if (insn & (1 << 4)) {
7903 ARCH(6);
7904 /* Armv6 Media instructions. */
7905 rm = insn & 0xf;
7906 rn = (insn >> 16) & 0xf;
2c0262af 7907 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
7908 rs = (insn >> 8) & 0xf;
7909 switch ((insn >> 23) & 3) {
7910 case 0: /* Parallel add/subtract. */
7911 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
7912 tmp = load_reg(s, rn);
7913 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7914 sh = (insn >> 5) & 7;
7915 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7916 goto illegal_op;
6ddbc6e4 7917 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 7918 tcg_temp_free_i32(tmp2);
6ddbc6e4 7919 store_reg(s, rd, tmp);
9ee6e8bb
PB
7920 break;
7921 case 1:
7922 if ((insn & 0x00700020) == 0) {
6c95676b 7923 /* Halfword pack. */
3670669c
PB
7924 tmp = load_reg(s, rn);
7925 tmp2 = load_reg(s, rm);
9ee6e8bb 7926 shift = (insn >> 7) & 0x1f;
3670669c
PB
7927 if (insn & (1 << 6)) {
7928 /* pkhtb */
22478e79
AZ
7929 if (shift == 0)
7930 shift = 31;
7931 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 7932 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 7933 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
7934 } else {
7935 /* pkhbt */
22478e79
AZ
7936 if (shift)
7937 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 7938 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
7939 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7940 }
7941 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 7942 tcg_temp_free_i32(tmp2);
3670669c 7943 store_reg(s, rd, tmp);
9ee6e8bb
PB
7944 } else if ((insn & 0x00200020) == 0x00200000) {
7945 /* [us]sat */
6ddbc6e4 7946 tmp = load_reg(s, rm);
9ee6e8bb
PB
7947 shift = (insn >> 7) & 0x1f;
7948 if (insn & (1 << 6)) {
7949 if (shift == 0)
7950 shift = 31;
6ddbc6e4 7951 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7952 } else {
6ddbc6e4 7953 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
7954 }
7955 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7956 tmp2 = tcg_const_i32(sh);
7957 if (insn & (1 << 22))
9ef39277 7958 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 7959 else
9ef39277 7960 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 7961 tcg_temp_free_i32(tmp2);
6ddbc6e4 7962 store_reg(s, rd, tmp);
9ee6e8bb
PB
7963 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7964 /* [us]sat16 */
6ddbc6e4 7965 tmp = load_reg(s, rm);
9ee6e8bb 7966 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7967 tmp2 = tcg_const_i32(sh);
7968 if (insn & (1 << 22))
9ef39277 7969 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 7970 else
9ef39277 7971 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 7972 tcg_temp_free_i32(tmp2);
6ddbc6e4 7973 store_reg(s, rd, tmp);
9ee6e8bb
PB
7974 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7975 /* Select bytes. */
6ddbc6e4
PB
7976 tmp = load_reg(s, rn);
7977 tmp2 = load_reg(s, rm);
7d1b0095 7978 tmp3 = tcg_temp_new_i32();
0ecb72a5 7979 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 7980 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
7981 tcg_temp_free_i32(tmp3);
7982 tcg_temp_free_i32(tmp2);
6ddbc6e4 7983 store_reg(s, rd, tmp);
9ee6e8bb 7984 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 7985 tmp = load_reg(s, rm);
9ee6e8bb 7986 shift = (insn >> 10) & 3;
1301f322 7987 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
7988 rotate, a shift is sufficient. */
7989 if (shift != 0)
f669df27 7990 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7991 op1 = (insn >> 20) & 7;
7992 switch (op1) {
5e3f878a
PB
7993 case 0: gen_sxtb16(tmp); break;
7994 case 2: gen_sxtb(tmp); break;
7995 case 3: gen_sxth(tmp); break;
7996 case 4: gen_uxtb16(tmp); break;
7997 case 6: gen_uxtb(tmp); break;
7998 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
7999 default: goto illegal_op;
8000 }
8001 if (rn != 15) {
5e3f878a 8002 tmp2 = load_reg(s, rn);
9ee6e8bb 8003 if ((op1 & 3) == 0) {
5e3f878a 8004 gen_add16(tmp, tmp2);
9ee6e8bb 8005 } else {
5e3f878a 8006 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8007 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8008 }
8009 }
6c95676b 8010 store_reg(s, rd, tmp);
9ee6e8bb
PB
8011 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
8012 /* rev */
b0109805 8013 tmp = load_reg(s, rm);
9ee6e8bb
PB
8014 if (insn & (1 << 22)) {
8015 if (insn & (1 << 7)) {
b0109805 8016 gen_revsh(tmp);
9ee6e8bb
PB
8017 } else {
8018 ARCH(6T2);
b0109805 8019 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8020 }
8021 } else {
8022 if (insn & (1 << 7))
b0109805 8023 gen_rev16(tmp);
9ee6e8bb 8024 else
66896cb8 8025 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 8026 }
b0109805 8027 store_reg(s, rd, tmp);
9ee6e8bb
PB
8028 } else {
8029 goto illegal_op;
8030 }
8031 break;
8032 case 2: /* Multiplies (Type 3). */
41e9564d
PM
8033 switch ((insn >> 20) & 0x7) {
8034 case 5:
8035 if (((insn >> 6) ^ (insn >> 7)) & 1) {
8036 /* op2 not 00x or 11x : UNDEF */
8037 goto illegal_op;
8038 }
838fa72d
AJ
8039 /* Signed multiply most significant [accumulate].
8040 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
8041 tmp = load_reg(s, rm);
8042 tmp2 = load_reg(s, rs);
a7812ae4 8043 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 8044
955a7dd5 8045 if (rd != 15) {
838fa72d 8046 tmp = load_reg(s, rd);
9ee6e8bb 8047 if (insn & (1 << 6)) {
838fa72d 8048 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 8049 } else {
838fa72d 8050 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
8051 }
8052 }
838fa72d
AJ
8053 if (insn & (1 << 5)) {
8054 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8055 }
8056 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 8057 tmp = tcg_temp_new_i32();
838fa72d
AJ
8058 tcg_gen_trunc_i64_i32(tmp, tmp64);
8059 tcg_temp_free_i64(tmp64);
955a7dd5 8060 store_reg(s, rn, tmp);
41e9564d
PM
8061 break;
8062 case 0:
8063 case 4:
8064 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
8065 if (insn & (1 << 7)) {
8066 goto illegal_op;
8067 }
8068 tmp = load_reg(s, rm);
8069 tmp2 = load_reg(s, rs);
9ee6e8bb 8070 if (insn & (1 << 5))
5e3f878a
PB
8071 gen_swap_half(tmp2);
8072 gen_smul_dual(tmp, tmp2);
5e3f878a 8073 if (insn & (1 << 6)) {
e1d177b9 8074 /* This subtraction cannot overflow. */
5e3f878a
PB
8075 tcg_gen_sub_i32(tmp, tmp, tmp2);
8076 } else {
e1d177b9
PM
8077 /* This addition cannot overflow 32 bits;
8078 * however it may overflow considered as a signed
8079 * operation, in which case we must set the Q flag.
8080 */
9ef39277 8081 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
5e3f878a 8082 }
7d1b0095 8083 tcg_temp_free_i32(tmp2);
9ee6e8bb 8084 if (insn & (1 << 22)) {
5e3f878a 8085 /* smlald, smlsld */
a7812ae4
PB
8086 tmp64 = tcg_temp_new_i64();
8087 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8088 tcg_temp_free_i32(tmp);
a7812ae4
PB
8089 gen_addq(s, tmp64, rd, rn);
8090 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 8091 tcg_temp_free_i64(tmp64);
9ee6e8bb 8092 } else {
5e3f878a 8093 /* smuad, smusd, smlad, smlsd */
22478e79 8094 if (rd != 15)
9ee6e8bb 8095 {
22478e79 8096 tmp2 = load_reg(s, rd);
9ef39277 8097 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8098 tcg_temp_free_i32(tmp2);
9ee6e8bb 8099 }
22478e79 8100 store_reg(s, rn, tmp);
9ee6e8bb 8101 }
41e9564d 8102 break;
b8b8ea05
PM
8103 case 1:
8104 case 3:
8105 /* SDIV, UDIV */
8106 if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) {
8107 goto illegal_op;
8108 }
8109 if (((insn >> 5) & 7) || (rd != 15)) {
8110 goto illegal_op;
8111 }
8112 tmp = load_reg(s, rm);
8113 tmp2 = load_reg(s, rs);
8114 if (insn & (1 << 21)) {
8115 gen_helper_udiv(tmp, tmp, tmp2);
8116 } else {
8117 gen_helper_sdiv(tmp, tmp, tmp2);
8118 }
8119 tcg_temp_free_i32(tmp2);
8120 store_reg(s, rn, tmp);
8121 break;
41e9564d
PM
8122 default:
8123 goto illegal_op;
9ee6e8bb
PB
8124 }
8125 break;
8126 case 3:
8127 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
8128 switch (op1) {
8129 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
8130 ARCH(6);
8131 tmp = load_reg(s, rm);
8132 tmp2 = load_reg(s, rs);
8133 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 8134 tcg_temp_free_i32(tmp2);
ded9d295
AZ
8135 if (rd != 15) {
8136 tmp2 = load_reg(s, rd);
6ddbc6e4 8137 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8138 tcg_temp_free_i32(tmp2);
9ee6e8bb 8139 }
ded9d295 8140 store_reg(s, rn, tmp);
9ee6e8bb
PB
8141 break;
8142 case 0x20: case 0x24: case 0x28: case 0x2c:
8143 /* Bitfield insert/clear. */
8144 ARCH(6T2);
8145 shift = (insn >> 7) & 0x1f;
8146 i = (insn >> 16) & 0x1f;
8147 i = i + 1 - shift;
8148 if (rm == 15) {
7d1b0095 8149 tmp = tcg_temp_new_i32();
5e3f878a 8150 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 8151 } else {
5e3f878a 8152 tmp = load_reg(s, rm);
9ee6e8bb
PB
8153 }
8154 if (i != 32) {
5e3f878a 8155 tmp2 = load_reg(s, rd);
d593c48e 8156 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 8157 tcg_temp_free_i32(tmp2);
9ee6e8bb 8158 }
5e3f878a 8159 store_reg(s, rd, tmp);
9ee6e8bb
PB
8160 break;
8161 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
8162 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 8163 ARCH(6T2);
5e3f878a 8164 tmp = load_reg(s, rm);
9ee6e8bb
PB
8165 shift = (insn >> 7) & 0x1f;
8166 i = ((insn >> 16) & 0x1f) + 1;
8167 if (shift + i > 32)
8168 goto illegal_op;
8169 if (i < 32) {
8170 if (op1 & 0x20) {
5e3f878a 8171 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 8172 } else {
5e3f878a 8173 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
8174 }
8175 }
5e3f878a 8176 store_reg(s, rd, tmp);
9ee6e8bb
PB
8177 break;
8178 default:
8179 goto illegal_op;
8180 }
8181 break;
8182 }
8183 break;
8184 }
8185 do_ldst:
8186 /* Check for undefined extension instructions
8187 * per the ARM Bible IE:
8188 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
8189 */
8190 sh = (0xf << 20) | (0xf << 4);
8191 if (op1 == 0x7 && ((insn & sh) == sh))
8192 {
8193 goto illegal_op;
8194 }
8195 /* load/store byte/word */
8196 rn = (insn >> 16) & 0xf;
8197 rd = (insn >> 12) & 0xf;
b0109805 8198 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
8199 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
8200 if (insn & (1 << 24))
b0109805 8201 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
8202 if (insn & (1 << 20)) {
8203 /* load */
5a839c0d 8204 tmp = tcg_temp_new_i32();
9ee6e8bb 8205 if (insn & (1 << 22)) {
08307563 8206 gen_aa32_ld8u(tmp, tmp2, i);
9ee6e8bb 8207 } else {
08307563 8208 gen_aa32_ld32u(tmp, tmp2, i);
9ee6e8bb 8209 }
9ee6e8bb
PB
8210 } else {
8211 /* store */
b0109805 8212 tmp = load_reg(s, rd);
5a839c0d 8213 if (insn & (1 << 22)) {
08307563 8214 gen_aa32_st8(tmp, tmp2, i);
5a839c0d 8215 } else {
08307563 8216 gen_aa32_st32(tmp, tmp2, i);
5a839c0d
PM
8217 }
8218 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8219 }
8220 if (!(insn & (1 << 24))) {
b0109805
PB
8221 gen_add_data_offset(s, insn, tmp2);
8222 store_reg(s, rn, tmp2);
8223 } else if (insn & (1 << 21)) {
8224 store_reg(s, rn, tmp2);
8225 } else {
7d1b0095 8226 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8227 }
8228 if (insn & (1 << 20)) {
8229 /* Complete the load. */
be5e7a76 8230 store_reg_from_load(env, s, rd, tmp);
9ee6e8bb
PB
8231 }
8232 break;
8233 case 0x08:
8234 case 0x09:
8235 {
8236 int j, n, user, loaded_base;
39d5492a 8237 TCGv_i32 loaded_var;
9ee6e8bb
PB
8238 /* load/store multiple words */
8239 /* XXX: store correct base if write back */
8240 user = 0;
8241 if (insn & (1 << 22)) {
8242 if (IS_USER(s))
8243 goto illegal_op; /* only usable in supervisor mode */
8244
8245 if ((insn & (1 << 15)) == 0)
8246 user = 1;
8247 }
8248 rn = (insn >> 16) & 0xf;
b0109805 8249 addr = load_reg(s, rn);
9ee6e8bb
PB
8250
8251 /* compute total size */
8252 loaded_base = 0;
39d5492a 8253 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
8254 n = 0;
8255 for(i=0;i<16;i++) {
8256 if (insn & (1 << i))
8257 n++;
8258 }
8259 /* XXX: test invalid n == 0 case ? */
8260 if (insn & (1 << 23)) {
8261 if (insn & (1 << 24)) {
8262 /* pre increment */
b0109805 8263 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
8264 } else {
8265 /* post increment */
8266 }
8267 } else {
8268 if (insn & (1 << 24)) {
8269 /* pre decrement */
b0109805 8270 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
8271 } else {
8272 /* post decrement */
8273 if (n != 1)
b0109805 8274 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
8275 }
8276 }
8277 j = 0;
8278 for(i=0;i<16;i++) {
8279 if (insn & (1 << i)) {
8280 if (insn & (1 << 20)) {
8281 /* load */
5a839c0d 8282 tmp = tcg_temp_new_i32();
08307563 8283 gen_aa32_ld32u(tmp, addr, IS_USER(s));
be5e7a76 8284 if (user) {
b75263d6 8285 tmp2 = tcg_const_i32(i);
1ce94f81 8286 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 8287 tcg_temp_free_i32(tmp2);
7d1b0095 8288 tcg_temp_free_i32(tmp);
9ee6e8bb 8289 } else if (i == rn) {
b0109805 8290 loaded_var = tmp;
9ee6e8bb
PB
8291 loaded_base = 1;
8292 } else {
be5e7a76 8293 store_reg_from_load(env, s, i, tmp);
9ee6e8bb
PB
8294 }
8295 } else {
8296 /* store */
8297 if (i == 15) {
8298 /* special case: r15 = PC + 8 */
8299 val = (long)s->pc + 4;
7d1b0095 8300 tmp = tcg_temp_new_i32();
b0109805 8301 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 8302 } else if (user) {
7d1b0095 8303 tmp = tcg_temp_new_i32();
b75263d6 8304 tmp2 = tcg_const_i32(i);
9ef39277 8305 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 8306 tcg_temp_free_i32(tmp2);
9ee6e8bb 8307 } else {
b0109805 8308 tmp = load_reg(s, i);
9ee6e8bb 8309 }
08307563 8310 gen_aa32_st32(tmp, addr, IS_USER(s));
5a839c0d 8311 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8312 }
8313 j++;
8314 /* no need to add after the last transfer */
8315 if (j != n)
b0109805 8316 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
8317 }
8318 }
8319 if (insn & (1 << 21)) {
8320 /* write back */
8321 if (insn & (1 << 23)) {
8322 if (insn & (1 << 24)) {
8323 /* pre increment */
8324 } else {
8325 /* post increment */
b0109805 8326 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
8327 }
8328 } else {
8329 if (insn & (1 << 24)) {
8330 /* pre decrement */
8331 if (n != 1)
b0109805 8332 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
8333 } else {
8334 /* post decrement */
b0109805 8335 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
8336 }
8337 }
b0109805
PB
8338 store_reg(s, rn, addr);
8339 } else {
7d1b0095 8340 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8341 }
8342 if (loaded_base) {
b0109805 8343 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
8344 }
8345 if ((insn & (1 << 22)) && !user) {
8346 /* Restore CPSR from SPSR. */
d9ba4830
PB
8347 tmp = load_cpu_field(spsr);
8348 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 8349 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8350 s->is_jmp = DISAS_UPDATE;
8351 }
8352 }
8353 break;
8354 case 0xa:
8355 case 0xb:
8356 {
8357 int32_t offset;
8358
8359 /* branch (and link) */
8360 val = (int32_t)s->pc;
8361 if (insn & (1 << 24)) {
7d1b0095 8362 tmp = tcg_temp_new_i32();
5e3f878a
PB
8363 tcg_gen_movi_i32(tmp, val);
8364 store_reg(s, 14, tmp);
9ee6e8bb 8365 }
534df156
PM
8366 offset = sextract32(insn << 2, 0, 26);
8367 val += offset + 4;
9ee6e8bb
PB
8368 gen_jmp(s, val);
8369 }
8370 break;
8371 case 0xc:
8372 case 0xd:
8373 case 0xe:
6a57f3eb
WN
8374 if (((insn >> 8) & 0xe) == 10) {
8375 /* VFP. */
8376 if (disas_vfp_insn(env, s, insn)) {
8377 goto illegal_op;
8378 }
8379 } else if (disas_coproc_insn(env, s, insn)) {
8380 /* Coprocessor. */
9ee6e8bb 8381 goto illegal_op;
6a57f3eb 8382 }
9ee6e8bb
PB
8383 break;
8384 case 0xf:
8385 /* swi */
eaed129d 8386 gen_set_pc_im(s, s->pc);
9ee6e8bb
PB
8387 s->is_jmp = DISAS_SWI;
8388 break;
8389 default:
8390 illegal_op:
bc4a0de0 8391 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
8392 break;
8393 }
8394 }
8395}
8396
8397/* Return true if this is a Thumb-2 logical op. */
8398static int
8399thumb2_logic_op(int op)
8400{
8401 return (op < 8);
8402}
8403
8404/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
8405 then set condition code flags based on the result of the operation.
8406 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
8407 to the high bit of T1.
8408 Returns zero if the opcode is valid. */
8409
8410static int
39d5492a
PM
8411gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
8412 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
8413{
8414 int logic_cc;
8415
8416 logic_cc = 0;
8417 switch (op) {
8418 case 0: /* and */
396e467c 8419 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
8420 logic_cc = conds;
8421 break;
8422 case 1: /* bic */
f669df27 8423 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
8424 logic_cc = conds;
8425 break;
8426 case 2: /* orr */
396e467c 8427 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
8428 logic_cc = conds;
8429 break;
8430 case 3: /* orn */
29501f1b 8431 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
8432 logic_cc = conds;
8433 break;
8434 case 4: /* eor */
396e467c 8435 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
8436 logic_cc = conds;
8437 break;
8438 case 8: /* add */
8439 if (conds)
72485ec4 8440 gen_add_CC(t0, t0, t1);
9ee6e8bb 8441 else
396e467c 8442 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
8443 break;
8444 case 10: /* adc */
8445 if (conds)
49b4c31e 8446 gen_adc_CC(t0, t0, t1);
9ee6e8bb 8447 else
396e467c 8448 gen_adc(t0, t1);
9ee6e8bb
PB
8449 break;
8450 case 11: /* sbc */
2de68a49
RH
8451 if (conds) {
8452 gen_sbc_CC(t0, t0, t1);
8453 } else {
396e467c 8454 gen_sub_carry(t0, t0, t1);
2de68a49 8455 }
9ee6e8bb
PB
8456 break;
8457 case 13: /* sub */
8458 if (conds)
72485ec4 8459 gen_sub_CC(t0, t0, t1);
9ee6e8bb 8460 else
396e467c 8461 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
8462 break;
8463 case 14: /* rsb */
8464 if (conds)
72485ec4 8465 gen_sub_CC(t0, t1, t0);
9ee6e8bb 8466 else
396e467c 8467 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
8468 break;
8469 default: /* 5, 6, 7, 9, 12, 15. */
8470 return 1;
8471 }
8472 if (logic_cc) {
396e467c 8473 gen_logic_CC(t0);
9ee6e8bb 8474 if (shifter_out)
396e467c 8475 gen_set_CF_bit31(t1);
9ee6e8bb
PB
8476 }
8477 return 0;
8478}
8479
8480/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
8481 is not legal. */
0ecb72a5 8482static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9ee6e8bb 8483{
b0109805 8484 uint32_t insn, imm, shift, offset;
9ee6e8bb 8485 uint32_t rd, rn, rm, rs;
39d5492a
PM
8486 TCGv_i32 tmp;
8487 TCGv_i32 tmp2;
8488 TCGv_i32 tmp3;
8489 TCGv_i32 addr;
a7812ae4 8490 TCGv_i64 tmp64;
9ee6e8bb
PB
8491 int op;
8492 int shiftop;
8493 int conds;
8494 int logic_cc;
8495
8496 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
8497 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 8498 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
8499 16-bit instructions to get correct prefetch abort behavior. */
8500 insn = insn_hw1;
8501 if ((insn & (1 << 12)) == 0) {
be5e7a76 8502 ARCH(5);
9ee6e8bb
PB
8503 /* Second half of blx. */
8504 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
8505 tmp = load_reg(s, 14);
8506 tcg_gen_addi_i32(tmp, tmp, offset);
8507 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 8508
7d1b0095 8509 tmp2 = tcg_temp_new_i32();
b0109805 8510 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
8511 store_reg(s, 14, tmp2);
8512 gen_bx(s, tmp);
9ee6e8bb
PB
8513 return 0;
8514 }
8515 if (insn & (1 << 11)) {
8516 /* Second half of bl. */
8517 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 8518 tmp = load_reg(s, 14);
6a0d8a1d 8519 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 8520
7d1b0095 8521 tmp2 = tcg_temp_new_i32();
b0109805 8522 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
8523 store_reg(s, 14, tmp2);
8524 gen_bx(s, tmp);
9ee6e8bb
PB
8525 return 0;
8526 }
8527 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
8528 /* Instruction spans a page boundary. Implement it as two
8529 16-bit instructions in case the second half causes an
8530 prefetch abort. */
8531 offset = ((int32_t)insn << 21) >> 9;
396e467c 8532 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
8533 return 0;
8534 }
8535 /* Fall through to 32-bit decode. */
8536 }
8537
d31dd73e 8538 insn = arm_lduw_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
8539 s->pc += 2;
8540 insn |= (uint32_t)insn_hw1 << 16;
8541
8542 if ((insn & 0xf800e800) != 0xf000e800) {
8543 ARCH(6T2);
8544 }
8545
8546 rn = (insn >> 16) & 0xf;
8547 rs = (insn >> 12) & 0xf;
8548 rd = (insn >> 8) & 0xf;
8549 rm = insn & 0xf;
8550 switch ((insn >> 25) & 0xf) {
8551 case 0: case 1: case 2: case 3:
8552 /* 16-bit instructions. Should never happen. */
8553 abort();
8554 case 4:
8555 if (insn & (1 << 22)) {
8556 /* Other load/store, table branch. */
8557 if (insn & 0x01200000) {
8558 /* Load/store doubleword. */
8559 if (rn == 15) {
7d1b0095 8560 addr = tcg_temp_new_i32();
b0109805 8561 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 8562 } else {
b0109805 8563 addr = load_reg(s, rn);
9ee6e8bb
PB
8564 }
8565 offset = (insn & 0xff) * 4;
8566 if ((insn & (1 << 23)) == 0)
8567 offset = -offset;
8568 if (insn & (1 << 24)) {
b0109805 8569 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
8570 offset = 0;
8571 }
8572 if (insn & (1 << 20)) {
8573 /* ldrd */
e2592fad 8574 tmp = tcg_temp_new_i32();
08307563 8575 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805
PB
8576 store_reg(s, rs, tmp);
8577 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 8578 tmp = tcg_temp_new_i32();
08307563 8579 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805 8580 store_reg(s, rd, tmp);
9ee6e8bb
PB
8581 } else {
8582 /* strd */
b0109805 8583 tmp = load_reg(s, rs);
08307563 8584 gen_aa32_st32(tmp, addr, IS_USER(s));
e2592fad 8585 tcg_temp_free_i32(tmp);
b0109805
PB
8586 tcg_gen_addi_i32(addr, addr, 4);
8587 tmp = load_reg(s, rd);
08307563 8588 gen_aa32_st32(tmp, addr, IS_USER(s));
e2592fad 8589 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8590 }
8591 if (insn & (1 << 21)) {
8592 /* Base writeback. */
8593 if (rn == 15)
8594 goto illegal_op;
b0109805
PB
8595 tcg_gen_addi_i32(addr, addr, offset - 4);
8596 store_reg(s, rn, addr);
8597 } else {
7d1b0095 8598 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8599 }
8600 } else if ((insn & (1 << 23)) == 0) {
8601 /* Load/store exclusive word. */
39d5492a 8602 addr = tcg_temp_local_new_i32();
98a46317 8603 load_reg_var(s, addr, rn);
426f5abc 8604 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 8605 if (insn & (1 << 20)) {
426f5abc 8606 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 8607 } else {
426f5abc 8608 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 8609 }
39d5492a 8610 tcg_temp_free_i32(addr);
2359bf80 8611 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
8612 /* Table Branch. */
8613 if (rn == 15) {
7d1b0095 8614 addr = tcg_temp_new_i32();
b0109805 8615 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 8616 } else {
b0109805 8617 addr = load_reg(s, rn);
9ee6e8bb 8618 }
b26eefb6 8619 tmp = load_reg(s, rm);
b0109805 8620 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
8621 if (insn & (1 << 4)) {
8622 /* tbh */
b0109805 8623 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8624 tcg_temp_free_i32(tmp);
e2592fad 8625 tmp = tcg_temp_new_i32();
08307563 8626 gen_aa32_ld16u(tmp, addr, IS_USER(s));
9ee6e8bb 8627 } else { /* tbb */
7d1b0095 8628 tcg_temp_free_i32(tmp);
e2592fad 8629 tmp = tcg_temp_new_i32();
08307563 8630 gen_aa32_ld8u(tmp, addr, IS_USER(s));
9ee6e8bb 8631 }
7d1b0095 8632 tcg_temp_free_i32(addr);
b0109805
PB
8633 tcg_gen_shli_i32(tmp, tmp, 1);
8634 tcg_gen_addi_i32(tmp, tmp, s->pc);
8635 store_reg(s, 15, tmp);
9ee6e8bb 8636 } else {
2359bf80 8637 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 8638 op = (insn >> 4) & 0x3;
2359bf80
MR
8639 switch (op2) {
8640 case 0:
426f5abc 8641 goto illegal_op;
2359bf80
MR
8642 case 1:
8643 /* Load/store exclusive byte/halfword/doubleword */
8644 if (op == 2) {
8645 goto illegal_op;
8646 }
8647 ARCH(7);
8648 break;
8649 case 2:
8650 /* Load-acquire/store-release */
8651 if (op == 3) {
8652 goto illegal_op;
8653 }
8654 /* Fall through */
8655 case 3:
8656 /* Load-acquire/store-release exclusive */
8657 ARCH(8);
8658 break;
426f5abc 8659 }
39d5492a 8660 addr = tcg_temp_local_new_i32();
98a46317 8661 load_reg_var(s, addr, rn);
2359bf80
MR
8662 if (!(op2 & 1)) {
8663 if (insn & (1 << 20)) {
8664 tmp = tcg_temp_new_i32();
8665 switch (op) {
8666 case 0: /* ldab */
08307563 8667 gen_aa32_ld8u(tmp, addr, IS_USER(s));
2359bf80
MR
8668 break;
8669 case 1: /* ldah */
08307563 8670 gen_aa32_ld16u(tmp, addr, IS_USER(s));
2359bf80
MR
8671 break;
8672 case 2: /* lda */
08307563 8673 gen_aa32_ld32u(tmp, addr, IS_USER(s));
2359bf80
MR
8674 break;
8675 default:
8676 abort();
8677 }
8678 store_reg(s, rs, tmp);
8679 } else {
8680 tmp = load_reg(s, rs);
8681 switch (op) {
8682 case 0: /* stlb */
08307563 8683 gen_aa32_st8(tmp, addr, IS_USER(s));
2359bf80
MR
8684 break;
8685 case 1: /* stlh */
08307563 8686 gen_aa32_st16(tmp, addr, IS_USER(s));
2359bf80
MR
8687 break;
8688 case 2: /* stl */
08307563 8689 gen_aa32_st32(tmp, addr, IS_USER(s));
2359bf80
MR
8690 break;
8691 default:
8692 abort();
8693 }
8694 tcg_temp_free_i32(tmp);
8695 }
8696 } else if (insn & (1 << 20)) {
426f5abc 8697 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 8698 } else {
426f5abc 8699 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 8700 }
39d5492a 8701 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8702 }
8703 } else {
8704 /* Load/store multiple, RFE, SRS. */
8705 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976
PM
8706 /* RFE, SRS: not available in user mode or on M profile */
8707 if (IS_USER(s) || IS_M(env)) {
9ee6e8bb 8708 goto illegal_op;
00115976 8709 }
9ee6e8bb
PB
8710 if (insn & (1 << 20)) {
8711 /* rfe */
b0109805
PB
8712 addr = load_reg(s, rn);
8713 if ((insn & (1 << 24)) == 0)
8714 tcg_gen_addi_i32(addr, addr, -8);
8715 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 8716 tmp = tcg_temp_new_i32();
08307563 8717 gen_aa32_ld32u(tmp, addr, 0);
b0109805 8718 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 8719 tmp2 = tcg_temp_new_i32();
08307563 8720 gen_aa32_ld32u(tmp2, addr, 0);
9ee6e8bb
PB
8721 if (insn & (1 << 21)) {
8722 /* Base writeback. */
b0109805
PB
8723 if (insn & (1 << 24)) {
8724 tcg_gen_addi_i32(addr, addr, 4);
8725 } else {
8726 tcg_gen_addi_i32(addr, addr, -4);
8727 }
8728 store_reg(s, rn, addr);
8729 } else {
7d1b0095 8730 tcg_temp_free_i32(addr);
9ee6e8bb 8731 }
b0109805 8732 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
8733 } else {
8734 /* srs */
81465888
PM
8735 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
8736 insn & (1 << 21));
9ee6e8bb
PB
8737 }
8738 } else {
5856d44e 8739 int i, loaded_base = 0;
39d5492a 8740 TCGv_i32 loaded_var;
9ee6e8bb 8741 /* Load/store multiple. */
b0109805 8742 addr = load_reg(s, rn);
9ee6e8bb
PB
8743 offset = 0;
8744 for (i = 0; i < 16; i++) {
8745 if (insn & (1 << i))
8746 offset += 4;
8747 }
8748 if (insn & (1 << 24)) {
b0109805 8749 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8750 }
8751
39d5492a 8752 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
8753 for (i = 0; i < 16; i++) {
8754 if ((insn & (1 << i)) == 0)
8755 continue;
8756 if (insn & (1 << 20)) {
8757 /* Load. */
e2592fad 8758 tmp = tcg_temp_new_i32();
08307563 8759 gen_aa32_ld32u(tmp, addr, IS_USER(s));
9ee6e8bb 8760 if (i == 15) {
b0109805 8761 gen_bx(s, tmp);
5856d44e
YO
8762 } else if (i == rn) {
8763 loaded_var = tmp;
8764 loaded_base = 1;
9ee6e8bb 8765 } else {
b0109805 8766 store_reg(s, i, tmp);
9ee6e8bb
PB
8767 }
8768 } else {
8769 /* Store. */
b0109805 8770 tmp = load_reg(s, i);
08307563 8771 gen_aa32_st32(tmp, addr, IS_USER(s));
e2592fad 8772 tcg_temp_free_i32(tmp);
9ee6e8bb 8773 }
b0109805 8774 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8775 }
5856d44e
YO
8776 if (loaded_base) {
8777 store_reg(s, rn, loaded_var);
8778 }
9ee6e8bb
PB
8779 if (insn & (1 << 21)) {
8780 /* Base register writeback. */
8781 if (insn & (1 << 24)) {
b0109805 8782 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8783 }
8784 /* Fault if writeback register is in register list. */
8785 if (insn & (1 << rn))
8786 goto illegal_op;
b0109805
PB
8787 store_reg(s, rn, addr);
8788 } else {
7d1b0095 8789 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8790 }
8791 }
8792 }
8793 break;
2af9ab77
JB
8794 case 5:
8795
9ee6e8bb 8796 op = (insn >> 21) & 0xf;
2af9ab77
JB
8797 if (op == 6) {
8798 /* Halfword pack. */
8799 tmp = load_reg(s, rn);
8800 tmp2 = load_reg(s, rm);
8801 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8802 if (insn & (1 << 5)) {
8803 /* pkhtb */
8804 if (shift == 0)
8805 shift = 31;
8806 tcg_gen_sari_i32(tmp2, tmp2, shift);
8807 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8808 tcg_gen_ext16u_i32(tmp2, tmp2);
8809 } else {
8810 /* pkhbt */
8811 if (shift)
8812 tcg_gen_shli_i32(tmp2, tmp2, shift);
8813 tcg_gen_ext16u_i32(tmp, tmp);
8814 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8815 }
8816 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8817 tcg_temp_free_i32(tmp2);
3174f8e9
FN
8818 store_reg(s, rd, tmp);
8819 } else {
2af9ab77
JB
8820 /* Data processing register constant shift. */
8821 if (rn == 15) {
7d1b0095 8822 tmp = tcg_temp_new_i32();
2af9ab77
JB
8823 tcg_gen_movi_i32(tmp, 0);
8824 } else {
8825 tmp = load_reg(s, rn);
8826 }
8827 tmp2 = load_reg(s, rm);
8828
8829 shiftop = (insn >> 4) & 3;
8830 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8831 conds = (insn & (1 << 20)) != 0;
8832 logic_cc = (conds && thumb2_logic_op(op));
8833 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8834 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
8835 goto illegal_op;
7d1b0095 8836 tcg_temp_free_i32(tmp2);
2af9ab77
JB
8837 if (rd != 15) {
8838 store_reg(s, rd, tmp);
8839 } else {
7d1b0095 8840 tcg_temp_free_i32(tmp);
2af9ab77 8841 }
3174f8e9 8842 }
9ee6e8bb
PB
8843 break;
8844 case 13: /* Misc data processing. */
8845 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
8846 if (op < 4 && (insn & 0xf000) != 0xf000)
8847 goto illegal_op;
8848 switch (op) {
8849 case 0: /* Register controlled shift. */
8984bd2e
PB
8850 tmp = load_reg(s, rn);
8851 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8852 if ((insn & 0x70) != 0)
8853 goto illegal_op;
8854 op = (insn >> 21) & 3;
8984bd2e
PB
8855 logic_cc = (insn & (1 << 20)) != 0;
8856 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
8857 if (logic_cc)
8858 gen_logic_CC(tmp);
21aeb343 8859 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8860 break;
8861 case 1: /* Sign/zero extend. */
5e3f878a 8862 tmp = load_reg(s, rm);
9ee6e8bb 8863 shift = (insn >> 4) & 3;
1301f322 8864 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
8865 rotate, a shift is sufficient. */
8866 if (shift != 0)
f669df27 8867 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8868 op = (insn >> 20) & 7;
8869 switch (op) {
5e3f878a
PB
8870 case 0: gen_sxth(tmp); break;
8871 case 1: gen_uxth(tmp); break;
8872 case 2: gen_sxtb16(tmp); break;
8873 case 3: gen_uxtb16(tmp); break;
8874 case 4: gen_sxtb(tmp); break;
8875 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
8876 default: goto illegal_op;
8877 }
8878 if (rn != 15) {
5e3f878a 8879 tmp2 = load_reg(s, rn);
9ee6e8bb 8880 if ((op >> 1) == 1) {
5e3f878a 8881 gen_add16(tmp, tmp2);
9ee6e8bb 8882 } else {
5e3f878a 8883 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8884 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8885 }
8886 }
5e3f878a 8887 store_reg(s, rd, tmp);
9ee6e8bb
PB
8888 break;
8889 case 2: /* SIMD add/subtract. */
8890 op = (insn >> 20) & 7;
8891 shift = (insn >> 4) & 7;
8892 if ((op & 3) == 3 || (shift & 3) == 3)
8893 goto illegal_op;
6ddbc6e4
PB
8894 tmp = load_reg(s, rn);
8895 tmp2 = load_reg(s, rm);
8896 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 8897 tcg_temp_free_i32(tmp2);
6ddbc6e4 8898 store_reg(s, rd, tmp);
9ee6e8bb
PB
8899 break;
8900 case 3: /* Other data processing. */
8901 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
8902 if (op < 4) {
8903 /* Saturating add/subtract. */
d9ba4830
PB
8904 tmp = load_reg(s, rn);
8905 tmp2 = load_reg(s, rm);
9ee6e8bb 8906 if (op & 1)
9ef39277 8907 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 8908 if (op & 2)
9ef39277 8909 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 8910 else
9ef39277 8911 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 8912 tcg_temp_free_i32(tmp2);
9ee6e8bb 8913 } else {
d9ba4830 8914 tmp = load_reg(s, rn);
9ee6e8bb
PB
8915 switch (op) {
8916 case 0x0a: /* rbit */
d9ba4830 8917 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8918 break;
8919 case 0x08: /* rev */
66896cb8 8920 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
8921 break;
8922 case 0x09: /* rev16 */
d9ba4830 8923 gen_rev16(tmp);
9ee6e8bb
PB
8924 break;
8925 case 0x0b: /* revsh */
d9ba4830 8926 gen_revsh(tmp);
9ee6e8bb
PB
8927 break;
8928 case 0x10: /* sel */
d9ba4830 8929 tmp2 = load_reg(s, rm);
7d1b0095 8930 tmp3 = tcg_temp_new_i32();
0ecb72a5 8931 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 8932 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8933 tcg_temp_free_i32(tmp3);
8934 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8935 break;
8936 case 0x18: /* clz */
d9ba4830 8937 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
8938 break;
8939 default:
8940 goto illegal_op;
8941 }
8942 }
d9ba4830 8943 store_reg(s, rd, tmp);
9ee6e8bb
PB
8944 break;
8945 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8946 op = (insn >> 4) & 0xf;
d9ba4830
PB
8947 tmp = load_reg(s, rn);
8948 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8949 switch ((insn >> 20) & 7) {
8950 case 0: /* 32 x 32 -> 32 */
d9ba4830 8951 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8952 tcg_temp_free_i32(tmp2);
9ee6e8bb 8953 if (rs != 15) {
d9ba4830 8954 tmp2 = load_reg(s, rs);
9ee6e8bb 8955 if (op)
d9ba4830 8956 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 8957 else
d9ba4830 8958 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8959 tcg_temp_free_i32(tmp2);
9ee6e8bb 8960 }
9ee6e8bb
PB
8961 break;
8962 case 1: /* 16 x 16 -> 32 */
d9ba4830 8963 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8964 tcg_temp_free_i32(tmp2);
9ee6e8bb 8965 if (rs != 15) {
d9ba4830 8966 tmp2 = load_reg(s, rs);
9ef39277 8967 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8968 tcg_temp_free_i32(tmp2);
9ee6e8bb 8969 }
9ee6e8bb
PB
8970 break;
8971 case 2: /* Dual multiply add. */
8972 case 4: /* Dual multiply subtract. */
8973 if (op)
d9ba4830
PB
8974 gen_swap_half(tmp2);
8975 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8976 if (insn & (1 << 22)) {
e1d177b9 8977 /* This subtraction cannot overflow. */
d9ba4830 8978 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8979 } else {
e1d177b9
PM
8980 /* This addition cannot overflow 32 bits;
8981 * however it may overflow considered as a signed
8982 * operation, in which case we must set the Q flag.
8983 */
9ef39277 8984 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8985 }
7d1b0095 8986 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8987 if (rs != 15)
8988 {
d9ba4830 8989 tmp2 = load_reg(s, rs);
9ef39277 8990 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8991 tcg_temp_free_i32(tmp2);
9ee6e8bb 8992 }
9ee6e8bb
PB
8993 break;
8994 case 3: /* 32 * 16 -> 32msb */
8995 if (op)
d9ba4830 8996 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8997 else
d9ba4830 8998 gen_sxth(tmp2);
a7812ae4
PB
8999 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9000 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 9001 tmp = tcg_temp_new_i32();
a7812ae4 9002 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 9003 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
9004 if (rs != 15)
9005 {
d9ba4830 9006 tmp2 = load_reg(s, rs);
9ef39277 9007 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9008 tcg_temp_free_i32(tmp2);
9ee6e8bb 9009 }
9ee6e8bb 9010 break;
838fa72d
AJ
9011 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
9012 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 9013 if (rs != 15) {
838fa72d
AJ
9014 tmp = load_reg(s, rs);
9015 if (insn & (1 << 20)) {
9016 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 9017 } else {
838fa72d 9018 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 9019 }
2c0262af 9020 }
838fa72d
AJ
9021 if (insn & (1 << 4)) {
9022 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9023 }
9024 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 9025 tmp = tcg_temp_new_i32();
838fa72d
AJ
9026 tcg_gen_trunc_i64_i32(tmp, tmp64);
9027 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
9028 break;
9029 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 9030 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 9031 tcg_temp_free_i32(tmp2);
9ee6e8bb 9032 if (rs != 15) {
d9ba4830
PB
9033 tmp2 = load_reg(s, rs);
9034 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9035 tcg_temp_free_i32(tmp2);
5fd46862 9036 }
9ee6e8bb 9037 break;
2c0262af 9038 }
d9ba4830 9039 store_reg(s, rd, tmp);
2c0262af 9040 break;
9ee6e8bb
PB
9041 case 6: case 7: /* 64-bit multiply, Divide. */
9042 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
9043 tmp = load_reg(s, rn);
9044 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9045 if ((op & 0x50) == 0x10) {
9046 /* sdiv, udiv */
47789990 9047 if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 9048 goto illegal_op;
47789990 9049 }
9ee6e8bb 9050 if (op & 0x20)
5e3f878a 9051 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 9052 else
5e3f878a 9053 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 9054 tcg_temp_free_i32(tmp2);
5e3f878a 9055 store_reg(s, rd, tmp);
9ee6e8bb
PB
9056 } else if ((op & 0xe) == 0xc) {
9057 /* Dual multiply accumulate long. */
9058 if (op & 1)
5e3f878a
PB
9059 gen_swap_half(tmp2);
9060 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9061 if (op & 0x10) {
5e3f878a 9062 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 9063 } else {
5e3f878a 9064 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 9065 }
7d1b0095 9066 tcg_temp_free_i32(tmp2);
a7812ae4
PB
9067 /* BUGFIX */
9068 tmp64 = tcg_temp_new_i64();
9069 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 9070 tcg_temp_free_i32(tmp);
a7812ae4
PB
9071 gen_addq(s, tmp64, rs, rd);
9072 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 9073 tcg_temp_free_i64(tmp64);
2c0262af 9074 } else {
9ee6e8bb
PB
9075 if (op & 0x20) {
9076 /* Unsigned 64-bit multiply */
a7812ae4 9077 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 9078 } else {
9ee6e8bb
PB
9079 if (op & 8) {
9080 /* smlalxy */
5e3f878a 9081 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 9082 tcg_temp_free_i32(tmp2);
a7812ae4
PB
9083 tmp64 = tcg_temp_new_i64();
9084 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 9085 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9086 } else {
9087 /* Signed 64-bit multiply */
a7812ae4 9088 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 9089 }
b5ff1b31 9090 }
9ee6e8bb
PB
9091 if (op & 4) {
9092 /* umaal */
a7812ae4
PB
9093 gen_addq_lo(s, tmp64, rs);
9094 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
9095 } else if (op & 0x40) {
9096 /* 64-bit accumulate. */
a7812ae4 9097 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 9098 }
a7812ae4 9099 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 9100 tcg_temp_free_i64(tmp64);
5fd46862 9101 }
2c0262af 9102 break;
9ee6e8bb
PB
9103 }
9104 break;
9105 case 6: case 7: case 14: case 15:
9106 /* Coprocessor. */
9107 if (((insn >> 24) & 3) == 3) {
9108 /* Translate into the equivalent ARM encoding. */
f06053e3 9109 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9ee6e8bb
PB
9110 if (disas_neon_data_insn(env, s, insn))
9111 goto illegal_op;
6a57f3eb
WN
9112 } else if (((insn >> 8) & 0xe) == 10) {
9113 if (disas_vfp_insn(env, s, insn)) {
9114 goto illegal_op;
9115 }
9ee6e8bb
PB
9116 } else {
9117 if (insn & (1 << 28))
9118 goto illegal_op;
9119 if (disas_coproc_insn (env, s, insn))
9120 goto illegal_op;
9121 }
9122 break;
9123 case 8: case 9: case 10: case 11:
9124 if (insn & (1 << 15)) {
9125 /* Branches, misc control. */
9126 if (insn & 0x5000) {
9127 /* Unconditional branch. */
9128 /* signextend(hw1[10:0]) -> offset[:12]. */
9129 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
9130 /* hw1[10:0] -> offset[11:1]. */
9131 offset |= (insn & 0x7ff) << 1;
9132 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
9133 offset[24:22] already have the same value because of the
9134 sign extension above. */
9135 offset ^= ((~insn) & (1 << 13)) << 10;
9136 offset ^= ((~insn) & (1 << 11)) << 11;
9137
9ee6e8bb
PB
9138 if (insn & (1 << 14)) {
9139 /* Branch and link. */
3174f8e9 9140 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 9141 }
3b46e624 9142
b0109805 9143 offset += s->pc;
9ee6e8bb
PB
9144 if (insn & (1 << 12)) {
9145 /* b/bl */
b0109805 9146 gen_jmp(s, offset);
9ee6e8bb
PB
9147 } else {
9148 /* blx */
b0109805 9149 offset &= ~(uint32_t)2;
be5e7a76 9150 /* thumb2 bx, no need to check */
b0109805 9151 gen_bx_im(s, offset);
2c0262af 9152 }
9ee6e8bb
PB
9153 } else if (((insn >> 23) & 7) == 7) {
9154 /* Misc control */
9155 if (insn & (1 << 13))
9156 goto illegal_op;
9157
9158 if (insn & (1 << 26)) {
9159 /* Secure monitor call (v6Z) */
e0c270d9
SW
9160 qemu_log_mask(LOG_UNIMP,
9161 "arm: unimplemented secure monitor call\n");
9ee6e8bb 9162 goto illegal_op; /* not implemented. */
2c0262af 9163 } else {
9ee6e8bb
PB
9164 op = (insn >> 20) & 7;
9165 switch (op) {
9166 case 0: /* msr cpsr. */
9167 if (IS_M(env)) {
8984bd2e
PB
9168 tmp = load_reg(s, rn);
9169 addr = tcg_const_i32(insn & 0xff);
9170 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 9171 tcg_temp_free_i32(addr);
7d1b0095 9172 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9173 gen_lookup_tb(s);
9174 break;
9175 }
9176 /* fall through */
9177 case 1: /* msr spsr. */
9178 if (IS_M(env))
9179 goto illegal_op;
2fbac54b
FN
9180 tmp = load_reg(s, rn);
9181 if (gen_set_psr(s,
9ee6e8bb 9182 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 9183 op == 1, tmp))
9ee6e8bb
PB
9184 goto illegal_op;
9185 break;
9186 case 2: /* cps, nop-hint. */
9187 if (((insn >> 8) & 7) == 0) {
9188 gen_nop_hint(s, insn & 0xff);
9189 }
9190 /* Implemented as NOP in user mode. */
9191 if (IS_USER(s))
9192 break;
9193 offset = 0;
9194 imm = 0;
9195 if (insn & (1 << 10)) {
9196 if (insn & (1 << 7))
9197 offset |= CPSR_A;
9198 if (insn & (1 << 6))
9199 offset |= CPSR_I;
9200 if (insn & (1 << 5))
9201 offset |= CPSR_F;
9202 if (insn & (1 << 9))
9203 imm = CPSR_A | CPSR_I | CPSR_F;
9204 }
9205 if (insn & (1 << 8)) {
9206 offset |= 0x1f;
9207 imm |= (insn & 0x1f);
9208 }
9209 if (offset) {
2fbac54b 9210 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
9211 }
9212 break;
9213 case 3: /* Special control operations. */
426f5abc 9214 ARCH(7);
9ee6e8bb
PB
9215 op = (insn >> 4) & 0xf;
9216 switch (op) {
9217 case 2: /* clrex */
426f5abc 9218 gen_clrex(s);
9ee6e8bb
PB
9219 break;
9220 case 4: /* dsb */
9221 case 5: /* dmb */
9222 case 6: /* isb */
9223 /* These execute as NOPs. */
9ee6e8bb
PB
9224 break;
9225 default:
9226 goto illegal_op;
9227 }
9228 break;
9229 case 4: /* bxj */
9230 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
9231 tmp = load_reg(s, rn);
9232 gen_bx(s, tmp);
9ee6e8bb
PB
9233 break;
9234 case 5: /* Exception return. */
b8b45b68
RV
9235 if (IS_USER(s)) {
9236 goto illegal_op;
9237 }
9238 if (rn != 14 || rd != 15) {
9239 goto illegal_op;
9240 }
9241 tmp = load_reg(s, rn);
9242 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
9243 gen_exception_return(s, tmp);
9244 break;
9ee6e8bb 9245 case 6: /* mrs cpsr. */
7d1b0095 9246 tmp = tcg_temp_new_i32();
9ee6e8bb 9247 if (IS_M(env)) {
8984bd2e
PB
9248 addr = tcg_const_i32(insn & 0xff);
9249 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 9250 tcg_temp_free_i32(addr);
9ee6e8bb 9251 } else {
9ef39277 9252 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 9253 }
8984bd2e 9254 store_reg(s, rd, tmp);
9ee6e8bb
PB
9255 break;
9256 case 7: /* mrs spsr. */
9257 /* Not accessible in user mode. */
9258 if (IS_USER(s) || IS_M(env))
9259 goto illegal_op;
d9ba4830
PB
9260 tmp = load_cpu_field(spsr);
9261 store_reg(s, rd, tmp);
9ee6e8bb 9262 break;
2c0262af
FB
9263 }
9264 }
9ee6e8bb
PB
9265 } else {
9266 /* Conditional branch. */
9267 op = (insn >> 22) & 0xf;
9268 /* Generate a conditional jump to next instruction. */
9269 s->condlabel = gen_new_label();
39fb730a 9270 arm_gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
9271 s->condjmp = 1;
9272
9273 /* offset[11:1] = insn[10:0] */
9274 offset = (insn & 0x7ff) << 1;
9275 /* offset[17:12] = insn[21:16]. */
9276 offset |= (insn & 0x003f0000) >> 4;
9277 /* offset[31:20] = insn[26]. */
9278 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
9279 /* offset[18] = insn[13]. */
9280 offset |= (insn & (1 << 13)) << 5;
9281 /* offset[19] = insn[11]. */
9282 offset |= (insn & (1 << 11)) << 8;
9283
9284 /* jump to the offset */
b0109805 9285 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
9286 }
9287 } else {
9288 /* Data processing immediate. */
9289 if (insn & (1 << 25)) {
9290 if (insn & (1 << 24)) {
9291 if (insn & (1 << 20))
9292 goto illegal_op;
9293 /* Bitfield/Saturate. */
9294 op = (insn >> 21) & 7;
9295 imm = insn & 0x1f;
9296 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 9297 if (rn == 15) {
7d1b0095 9298 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
9299 tcg_gen_movi_i32(tmp, 0);
9300 } else {
9301 tmp = load_reg(s, rn);
9302 }
9ee6e8bb
PB
9303 switch (op) {
9304 case 2: /* Signed bitfield extract. */
9305 imm++;
9306 if (shift + imm > 32)
9307 goto illegal_op;
9308 if (imm < 32)
6ddbc6e4 9309 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
9310 break;
9311 case 6: /* Unsigned bitfield extract. */
9312 imm++;
9313 if (shift + imm > 32)
9314 goto illegal_op;
9315 if (imm < 32)
6ddbc6e4 9316 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
9317 break;
9318 case 3: /* Bitfield insert/clear. */
9319 if (imm < shift)
9320 goto illegal_op;
9321 imm = imm + 1 - shift;
9322 if (imm != 32) {
6ddbc6e4 9323 tmp2 = load_reg(s, rd);
d593c48e 9324 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 9325 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9326 }
9327 break;
9328 case 7:
9329 goto illegal_op;
9330 default: /* Saturate. */
9ee6e8bb
PB
9331 if (shift) {
9332 if (op & 1)
6ddbc6e4 9333 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 9334 else
6ddbc6e4 9335 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 9336 }
6ddbc6e4 9337 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
9338 if (op & 4) {
9339 /* Unsigned. */
9ee6e8bb 9340 if ((op & 1) && shift == 0)
9ef39277 9341 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 9342 else
9ef39277 9343 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
2c0262af 9344 } else {
9ee6e8bb 9345 /* Signed. */
9ee6e8bb 9346 if ((op & 1) && shift == 0)
9ef39277 9347 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 9348 else
9ef39277 9349 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
2c0262af 9350 }
b75263d6 9351 tcg_temp_free_i32(tmp2);
9ee6e8bb 9352 break;
2c0262af 9353 }
6ddbc6e4 9354 store_reg(s, rd, tmp);
9ee6e8bb
PB
9355 } else {
9356 imm = ((insn & 0x04000000) >> 15)
9357 | ((insn & 0x7000) >> 4) | (insn & 0xff);
9358 if (insn & (1 << 22)) {
9359 /* 16-bit immediate. */
9360 imm |= (insn >> 4) & 0xf000;
9361 if (insn & (1 << 23)) {
9362 /* movt */
5e3f878a 9363 tmp = load_reg(s, rd);
86831435 9364 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 9365 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 9366 } else {
9ee6e8bb 9367 /* movw */
7d1b0095 9368 tmp = tcg_temp_new_i32();
5e3f878a 9369 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
9370 }
9371 } else {
9ee6e8bb
PB
9372 /* Add/sub 12-bit immediate. */
9373 if (rn == 15) {
b0109805 9374 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 9375 if (insn & (1 << 23))
b0109805 9376 offset -= imm;
9ee6e8bb 9377 else
b0109805 9378 offset += imm;
7d1b0095 9379 tmp = tcg_temp_new_i32();
5e3f878a 9380 tcg_gen_movi_i32(tmp, offset);
2c0262af 9381 } else {
5e3f878a 9382 tmp = load_reg(s, rn);
9ee6e8bb 9383 if (insn & (1 << 23))
5e3f878a 9384 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 9385 else
5e3f878a 9386 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 9387 }
9ee6e8bb 9388 }
5e3f878a 9389 store_reg(s, rd, tmp);
191abaa2 9390 }
9ee6e8bb
PB
9391 } else {
9392 int shifter_out = 0;
9393 /* modified 12-bit immediate. */
9394 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
9395 imm = (insn & 0xff);
9396 switch (shift) {
9397 case 0: /* XY */
9398 /* Nothing to do. */
9399 break;
9400 case 1: /* 00XY00XY */
9401 imm |= imm << 16;
9402 break;
9403 case 2: /* XY00XY00 */
9404 imm |= imm << 16;
9405 imm <<= 8;
9406 break;
9407 case 3: /* XYXYXYXY */
9408 imm |= imm << 16;
9409 imm |= imm << 8;
9410 break;
9411 default: /* Rotated constant. */
9412 shift = (shift << 1) | (imm >> 7);
9413 imm |= 0x80;
9414 imm = imm << (32 - shift);
9415 shifter_out = 1;
9416 break;
b5ff1b31 9417 }
7d1b0095 9418 tmp2 = tcg_temp_new_i32();
3174f8e9 9419 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 9420 rn = (insn >> 16) & 0xf;
3174f8e9 9421 if (rn == 15) {
7d1b0095 9422 tmp = tcg_temp_new_i32();
3174f8e9
FN
9423 tcg_gen_movi_i32(tmp, 0);
9424 } else {
9425 tmp = load_reg(s, rn);
9426 }
9ee6e8bb
PB
9427 op = (insn >> 21) & 0xf;
9428 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 9429 shifter_out, tmp, tmp2))
9ee6e8bb 9430 goto illegal_op;
7d1b0095 9431 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9432 rd = (insn >> 8) & 0xf;
9433 if (rd != 15) {
3174f8e9
FN
9434 store_reg(s, rd, tmp);
9435 } else {
7d1b0095 9436 tcg_temp_free_i32(tmp);
2c0262af 9437 }
2c0262af 9438 }
9ee6e8bb
PB
9439 }
9440 break;
9441 case 12: /* Load/store single data item. */
9442 {
9443 int postinc = 0;
9444 int writeback = 0;
b0109805 9445 int user;
9ee6e8bb
PB
9446 if ((insn & 0x01100000) == 0x01000000) {
9447 if (disas_neon_ls_insn(env, s, insn))
c1713132 9448 goto illegal_op;
9ee6e8bb
PB
9449 break;
9450 }
a2fdc890
PM
9451 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
9452 if (rs == 15) {
9453 if (!(insn & (1 << 20))) {
9454 goto illegal_op;
9455 }
9456 if (op != 2) {
9457 /* Byte or halfword load space with dest == r15 : memory hints.
9458 * Catch them early so we don't emit pointless addressing code.
9459 * This space is a mix of:
9460 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
9461 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
9462 * cores)
9463 * unallocated hints, which must be treated as NOPs
9464 * UNPREDICTABLE space, which we NOP or UNDEF depending on
9465 * which is easiest for the decoding logic
9466 * Some space which must UNDEF
9467 */
9468 int op1 = (insn >> 23) & 3;
9469 int op2 = (insn >> 6) & 0x3f;
9470 if (op & 2) {
9471 goto illegal_op;
9472 }
9473 if (rn == 15) {
02afbf64
PM
9474 /* UNPREDICTABLE, unallocated hint or
9475 * PLD/PLDW/PLI (literal)
9476 */
a2fdc890
PM
9477 return 0;
9478 }
9479 if (op1 & 1) {
02afbf64 9480 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
9481 }
9482 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
02afbf64 9483 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
9484 }
9485 /* UNDEF space, or an UNPREDICTABLE */
9486 return 1;
9487 }
9488 }
b0109805 9489 user = IS_USER(s);
9ee6e8bb 9490 if (rn == 15) {
7d1b0095 9491 addr = tcg_temp_new_i32();
9ee6e8bb
PB
9492 /* PC relative. */
9493 /* s->pc has already been incremented by 4. */
9494 imm = s->pc & 0xfffffffc;
9495 if (insn & (1 << 23))
9496 imm += insn & 0xfff;
9497 else
9498 imm -= insn & 0xfff;
b0109805 9499 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 9500 } else {
b0109805 9501 addr = load_reg(s, rn);
9ee6e8bb
PB
9502 if (insn & (1 << 23)) {
9503 /* Positive offset. */
9504 imm = insn & 0xfff;
b0109805 9505 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 9506 } else {
9ee6e8bb 9507 imm = insn & 0xff;
2a0308c5
PM
9508 switch ((insn >> 8) & 0xf) {
9509 case 0x0: /* Shifted Register. */
9ee6e8bb 9510 shift = (insn >> 4) & 0xf;
2a0308c5
PM
9511 if (shift > 3) {
9512 tcg_temp_free_i32(addr);
18c9b560 9513 goto illegal_op;
2a0308c5 9514 }
b26eefb6 9515 tmp = load_reg(s, rm);
9ee6e8bb 9516 if (shift)
b26eefb6 9517 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 9518 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9519 tcg_temp_free_i32(tmp);
9ee6e8bb 9520 break;
2a0308c5 9521 case 0xc: /* Negative offset. */
b0109805 9522 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 9523 break;
2a0308c5 9524 case 0xe: /* User privilege. */
b0109805
PB
9525 tcg_gen_addi_i32(addr, addr, imm);
9526 user = 1;
9ee6e8bb 9527 break;
2a0308c5 9528 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
9529 imm = -imm;
9530 /* Fall through. */
2a0308c5 9531 case 0xb: /* Post-increment. */
9ee6e8bb
PB
9532 postinc = 1;
9533 writeback = 1;
9534 break;
2a0308c5 9535 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
9536 imm = -imm;
9537 /* Fall through. */
2a0308c5 9538 case 0xf: /* Pre-increment. */
b0109805 9539 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
9540 writeback = 1;
9541 break;
9542 default:
2a0308c5 9543 tcg_temp_free_i32(addr);
b7bcbe95 9544 goto illegal_op;
9ee6e8bb
PB
9545 }
9546 }
9547 }
9ee6e8bb
PB
9548 if (insn & (1 << 20)) {
9549 /* Load. */
5a839c0d 9550 tmp = tcg_temp_new_i32();
a2fdc890 9551 switch (op) {
5a839c0d 9552 case 0:
08307563 9553 gen_aa32_ld8u(tmp, addr, user);
5a839c0d
PM
9554 break;
9555 case 4:
08307563 9556 gen_aa32_ld8s(tmp, addr, user);
5a839c0d
PM
9557 break;
9558 case 1:
08307563 9559 gen_aa32_ld16u(tmp, addr, user);
5a839c0d
PM
9560 break;
9561 case 5:
08307563 9562 gen_aa32_ld16s(tmp, addr, user);
5a839c0d
PM
9563 break;
9564 case 2:
08307563 9565 gen_aa32_ld32u(tmp, addr, user);
5a839c0d 9566 break;
2a0308c5 9567 default:
5a839c0d 9568 tcg_temp_free_i32(tmp);
2a0308c5
PM
9569 tcg_temp_free_i32(addr);
9570 goto illegal_op;
a2fdc890
PM
9571 }
9572 if (rs == 15) {
9573 gen_bx(s, tmp);
9ee6e8bb 9574 } else {
a2fdc890 9575 store_reg(s, rs, tmp);
9ee6e8bb
PB
9576 }
9577 } else {
9578 /* Store. */
b0109805 9579 tmp = load_reg(s, rs);
9ee6e8bb 9580 switch (op) {
5a839c0d 9581 case 0:
08307563 9582 gen_aa32_st8(tmp, addr, user);
5a839c0d
PM
9583 break;
9584 case 1:
08307563 9585 gen_aa32_st16(tmp, addr, user);
5a839c0d
PM
9586 break;
9587 case 2:
08307563 9588 gen_aa32_st32(tmp, addr, user);
5a839c0d 9589 break;
2a0308c5 9590 default:
5a839c0d 9591 tcg_temp_free_i32(tmp);
2a0308c5
PM
9592 tcg_temp_free_i32(addr);
9593 goto illegal_op;
b7bcbe95 9594 }
5a839c0d 9595 tcg_temp_free_i32(tmp);
2c0262af 9596 }
9ee6e8bb 9597 if (postinc)
b0109805
PB
9598 tcg_gen_addi_i32(addr, addr, imm);
9599 if (writeback) {
9600 store_reg(s, rn, addr);
9601 } else {
7d1b0095 9602 tcg_temp_free_i32(addr);
b0109805 9603 }
9ee6e8bb
PB
9604 }
9605 break;
9606 default:
9607 goto illegal_op;
2c0262af 9608 }
9ee6e8bb
PB
9609 return 0;
9610illegal_op:
9611 return 1;
2c0262af
FB
9612}
9613
0ecb72a5 9614static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
99c475ab
FB
9615{
9616 uint32_t val, insn, op, rm, rn, rd, shift, cond;
9617 int32_t offset;
9618 int i;
39d5492a
PM
9619 TCGv_i32 tmp;
9620 TCGv_i32 tmp2;
9621 TCGv_i32 addr;
99c475ab 9622
9ee6e8bb
PB
9623 if (s->condexec_mask) {
9624 cond = s->condexec_cond;
bedd2912
JB
9625 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
9626 s->condlabel = gen_new_label();
39fb730a 9627 arm_gen_test_cc(cond ^ 1, s->condlabel);
bedd2912
JB
9628 s->condjmp = 1;
9629 }
9ee6e8bb
PB
9630 }
9631
d31dd73e 9632 insn = arm_lduw_code(env, s->pc, s->bswap_code);
99c475ab 9633 s->pc += 2;
b5ff1b31 9634
99c475ab
FB
9635 switch (insn >> 12) {
9636 case 0: case 1:
396e467c 9637
99c475ab
FB
9638 rd = insn & 7;
9639 op = (insn >> 11) & 3;
9640 if (op == 3) {
9641 /* add/subtract */
9642 rn = (insn >> 3) & 7;
396e467c 9643 tmp = load_reg(s, rn);
99c475ab
FB
9644 if (insn & (1 << 10)) {
9645 /* immediate */
7d1b0095 9646 tmp2 = tcg_temp_new_i32();
396e467c 9647 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
9648 } else {
9649 /* reg */
9650 rm = (insn >> 6) & 7;
396e467c 9651 tmp2 = load_reg(s, rm);
99c475ab 9652 }
9ee6e8bb
PB
9653 if (insn & (1 << 9)) {
9654 if (s->condexec_mask)
396e467c 9655 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 9656 else
72485ec4 9657 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
9658 } else {
9659 if (s->condexec_mask)
396e467c 9660 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 9661 else
72485ec4 9662 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 9663 }
7d1b0095 9664 tcg_temp_free_i32(tmp2);
396e467c 9665 store_reg(s, rd, tmp);
99c475ab
FB
9666 } else {
9667 /* shift immediate */
9668 rm = (insn >> 3) & 7;
9669 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
9670 tmp = load_reg(s, rm);
9671 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
9672 if (!s->condexec_mask)
9673 gen_logic_CC(tmp);
9674 store_reg(s, rd, tmp);
99c475ab
FB
9675 }
9676 break;
9677 case 2: case 3:
9678 /* arithmetic large immediate */
9679 op = (insn >> 11) & 3;
9680 rd = (insn >> 8) & 0x7;
396e467c 9681 if (op == 0) { /* mov */
7d1b0095 9682 tmp = tcg_temp_new_i32();
396e467c 9683 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 9684 if (!s->condexec_mask)
396e467c
FN
9685 gen_logic_CC(tmp);
9686 store_reg(s, rd, tmp);
9687 } else {
9688 tmp = load_reg(s, rd);
7d1b0095 9689 tmp2 = tcg_temp_new_i32();
396e467c
FN
9690 tcg_gen_movi_i32(tmp2, insn & 0xff);
9691 switch (op) {
9692 case 1: /* cmp */
72485ec4 9693 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
9694 tcg_temp_free_i32(tmp);
9695 tcg_temp_free_i32(tmp2);
396e467c
FN
9696 break;
9697 case 2: /* add */
9698 if (s->condexec_mask)
9699 tcg_gen_add_i32(tmp, tmp, tmp2);
9700 else
72485ec4 9701 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 9702 tcg_temp_free_i32(tmp2);
396e467c
FN
9703 store_reg(s, rd, tmp);
9704 break;
9705 case 3: /* sub */
9706 if (s->condexec_mask)
9707 tcg_gen_sub_i32(tmp, tmp, tmp2);
9708 else
72485ec4 9709 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 9710 tcg_temp_free_i32(tmp2);
396e467c
FN
9711 store_reg(s, rd, tmp);
9712 break;
9713 }
99c475ab 9714 }
99c475ab
FB
9715 break;
9716 case 4:
9717 if (insn & (1 << 11)) {
9718 rd = (insn >> 8) & 7;
5899f386
FB
9719 /* load pc-relative. Bit 1 of PC is ignored. */
9720 val = s->pc + 2 + ((insn & 0xff) * 4);
9721 val &= ~(uint32_t)2;
7d1b0095 9722 addr = tcg_temp_new_i32();
b0109805 9723 tcg_gen_movi_i32(addr, val);
c40c8556 9724 tmp = tcg_temp_new_i32();
08307563 9725 gen_aa32_ld32u(tmp, addr, IS_USER(s));
7d1b0095 9726 tcg_temp_free_i32(addr);
b0109805 9727 store_reg(s, rd, tmp);
99c475ab
FB
9728 break;
9729 }
9730 if (insn & (1 << 10)) {
9731 /* data processing extended or blx */
9732 rd = (insn & 7) | ((insn >> 4) & 8);
9733 rm = (insn >> 3) & 0xf;
9734 op = (insn >> 8) & 3;
9735 switch (op) {
9736 case 0: /* add */
396e467c
FN
9737 tmp = load_reg(s, rd);
9738 tmp2 = load_reg(s, rm);
9739 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9740 tcg_temp_free_i32(tmp2);
396e467c 9741 store_reg(s, rd, tmp);
99c475ab
FB
9742 break;
9743 case 1: /* cmp */
396e467c
FN
9744 tmp = load_reg(s, rd);
9745 tmp2 = load_reg(s, rm);
72485ec4 9746 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
9747 tcg_temp_free_i32(tmp2);
9748 tcg_temp_free_i32(tmp);
99c475ab
FB
9749 break;
9750 case 2: /* mov/cpy */
396e467c
FN
9751 tmp = load_reg(s, rm);
9752 store_reg(s, rd, tmp);
99c475ab
FB
9753 break;
9754 case 3:/* branch [and link] exchange thumb register */
b0109805 9755 tmp = load_reg(s, rm);
99c475ab 9756 if (insn & (1 << 7)) {
be5e7a76 9757 ARCH(5);
99c475ab 9758 val = (uint32_t)s->pc | 1;
7d1b0095 9759 tmp2 = tcg_temp_new_i32();
b0109805
PB
9760 tcg_gen_movi_i32(tmp2, val);
9761 store_reg(s, 14, tmp2);
99c475ab 9762 }
be5e7a76 9763 /* already thumb, no need to check */
d9ba4830 9764 gen_bx(s, tmp);
99c475ab
FB
9765 break;
9766 }
9767 break;
9768 }
9769
9770 /* data processing register */
9771 rd = insn & 7;
9772 rm = (insn >> 3) & 7;
9773 op = (insn >> 6) & 0xf;
9774 if (op == 2 || op == 3 || op == 4 || op == 7) {
9775 /* the shift/rotate ops want the operands backwards */
9776 val = rm;
9777 rm = rd;
9778 rd = val;
9779 val = 1;
9780 } else {
9781 val = 0;
9782 }
9783
396e467c 9784 if (op == 9) { /* neg */
7d1b0095 9785 tmp = tcg_temp_new_i32();
396e467c
FN
9786 tcg_gen_movi_i32(tmp, 0);
9787 } else if (op != 0xf) { /* mvn doesn't read its first operand */
9788 tmp = load_reg(s, rd);
9789 } else {
39d5492a 9790 TCGV_UNUSED_I32(tmp);
396e467c 9791 }
99c475ab 9792
396e467c 9793 tmp2 = load_reg(s, rm);
5899f386 9794 switch (op) {
99c475ab 9795 case 0x0: /* and */
396e467c 9796 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 9797 if (!s->condexec_mask)
396e467c 9798 gen_logic_CC(tmp);
99c475ab
FB
9799 break;
9800 case 0x1: /* eor */
396e467c 9801 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 9802 if (!s->condexec_mask)
396e467c 9803 gen_logic_CC(tmp);
99c475ab
FB
9804 break;
9805 case 0x2: /* lsl */
9ee6e8bb 9806 if (s->condexec_mask) {
365af80e 9807 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 9808 } else {
9ef39277 9809 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9810 gen_logic_CC(tmp2);
9ee6e8bb 9811 }
99c475ab
FB
9812 break;
9813 case 0x3: /* lsr */
9ee6e8bb 9814 if (s->condexec_mask) {
365af80e 9815 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 9816 } else {
9ef39277 9817 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9818 gen_logic_CC(tmp2);
9ee6e8bb 9819 }
99c475ab
FB
9820 break;
9821 case 0x4: /* asr */
9ee6e8bb 9822 if (s->condexec_mask) {
365af80e 9823 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 9824 } else {
9ef39277 9825 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9826 gen_logic_CC(tmp2);
9ee6e8bb 9827 }
99c475ab
FB
9828 break;
9829 case 0x5: /* adc */
49b4c31e 9830 if (s->condexec_mask) {
396e467c 9831 gen_adc(tmp, tmp2);
49b4c31e
RH
9832 } else {
9833 gen_adc_CC(tmp, tmp, tmp2);
9834 }
99c475ab
FB
9835 break;
9836 case 0x6: /* sbc */
2de68a49 9837 if (s->condexec_mask) {
396e467c 9838 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
9839 } else {
9840 gen_sbc_CC(tmp, tmp, tmp2);
9841 }
99c475ab
FB
9842 break;
9843 case 0x7: /* ror */
9ee6e8bb 9844 if (s->condexec_mask) {
f669df27
AJ
9845 tcg_gen_andi_i32(tmp, tmp, 0x1f);
9846 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 9847 } else {
9ef39277 9848 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9849 gen_logic_CC(tmp2);
9ee6e8bb 9850 }
99c475ab
FB
9851 break;
9852 case 0x8: /* tst */
396e467c
FN
9853 tcg_gen_and_i32(tmp, tmp, tmp2);
9854 gen_logic_CC(tmp);
99c475ab 9855 rd = 16;
5899f386 9856 break;
99c475ab 9857 case 0x9: /* neg */
9ee6e8bb 9858 if (s->condexec_mask)
396e467c 9859 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 9860 else
72485ec4 9861 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
9862 break;
9863 case 0xa: /* cmp */
72485ec4 9864 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
9865 rd = 16;
9866 break;
9867 case 0xb: /* cmn */
72485ec4 9868 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
9869 rd = 16;
9870 break;
9871 case 0xc: /* orr */
396e467c 9872 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 9873 if (!s->condexec_mask)
396e467c 9874 gen_logic_CC(tmp);
99c475ab
FB
9875 break;
9876 case 0xd: /* mul */
7b2919a0 9877 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 9878 if (!s->condexec_mask)
396e467c 9879 gen_logic_CC(tmp);
99c475ab
FB
9880 break;
9881 case 0xe: /* bic */
f669df27 9882 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 9883 if (!s->condexec_mask)
396e467c 9884 gen_logic_CC(tmp);
99c475ab
FB
9885 break;
9886 case 0xf: /* mvn */
396e467c 9887 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 9888 if (!s->condexec_mask)
396e467c 9889 gen_logic_CC(tmp2);
99c475ab 9890 val = 1;
5899f386 9891 rm = rd;
99c475ab
FB
9892 break;
9893 }
9894 if (rd != 16) {
396e467c
FN
9895 if (val) {
9896 store_reg(s, rm, tmp2);
9897 if (op != 0xf)
7d1b0095 9898 tcg_temp_free_i32(tmp);
396e467c
FN
9899 } else {
9900 store_reg(s, rd, tmp);
7d1b0095 9901 tcg_temp_free_i32(tmp2);
396e467c
FN
9902 }
9903 } else {
7d1b0095
PM
9904 tcg_temp_free_i32(tmp);
9905 tcg_temp_free_i32(tmp2);
99c475ab
FB
9906 }
9907 break;
9908
9909 case 5:
9910 /* load/store register offset. */
9911 rd = insn & 7;
9912 rn = (insn >> 3) & 7;
9913 rm = (insn >> 6) & 7;
9914 op = (insn >> 9) & 7;
b0109805 9915 addr = load_reg(s, rn);
b26eefb6 9916 tmp = load_reg(s, rm);
b0109805 9917 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9918 tcg_temp_free_i32(tmp);
99c475ab 9919
c40c8556 9920 if (op < 3) { /* store */
b0109805 9921 tmp = load_reg(s, rd);
c40c8556
PM
9922 } else {
9923 tmp = tcg_temp_new_i32();
9924 }
99c475ab
FB
9925
9926 switch (op) {
9927 case 0: /* str */
08307563 9928 gen_aa32_st32(tmp, addr, IS_USER(s));
99c475ab
FB
9929 break;
9930 case 1: /* strh */
08307563 9931 gen_aa32_st16(tmp, addr, IS_USER(s));
99c475ab
FB
9932 break;
9933 case 2: /* strb */
08307563 9934 gen_aa32_st8(tmp, addr, IS_USER(s));
99c475ab
FB
9935 break;
9936 case 3: /* ldrsb */
08307563 9937 gen_aa32_ld8s(tmp, addr, IS_USER(s));
99c475ab
FB
9938 break;
9939 case 4: /* ldr */
08307563 9940 gen_aa32_ld32u(tmp, addr, IS_USER(s));
99c475ab
FB
9941 break;
9942 case 5: /* ldrh */
08307563 9943 gen_aa32_ld16u(tmp, addr, IS_USER(s));
99c475ab
FB
9944 break;
9945 case 6: /* ldrb */
08307563 9946 gen_aa32_ld8u(tmp, addr, IS_USER(s));
99c475ab
FB
9947 break;
9948 case 7: /* ldrsh */
08307563 9949 gen_aa32_ld16s(tmp, addr, IS_USER(s));
99c475ab
FB
9950 break;
9951 }
c40c8556 9952 if (op >= 3) { /* load */
b0109805 9953 store_reg(s, rd, tmp);
c40c8556
PM
9954 } else {
9955 tcg_temp_free_i32(tmp);
9956 }
7d1b0095 9957 tcg_temp_free_i32(addr);
99c475ab
FB
9958 break;
9959
9960 case 6:
9961 /* load/store word immediate offset */
9962 rd = insn & 7;
9963 rn = (insn >> 3) & 7;
b0109805 9964 addr = load_reg(s, rn);
99c475ab 9965 val = (insn >> 4) & 0x7c;
b0109805 9966 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9967
9968 if (insn & (1 << 11)) {
9969 /* load */
c40c8556 9970 tmp = tcg_temp_new_i32();
08307563 9971 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805 9972 store_reg(s, rd, tmp);
99c475ab
FB
9973 } else {
9974 /* store */
b0109805 9975 tmp = load_reg(s, rd);
08307563 9976 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 9977 tcg_temp_free_i32(tmp);
99c475ab 9978 }
7d1b0095 9979 tcg_temp_free_i32(addr);
99c475ab
FB
9980 break;
9981
9982 case 7:
9983 /* load/store byte immediate offset */
9984 rd = insn & 7;
9985 rn = (insn >> 3) & 7;
b0109805 9986 addr = load_reg(s, rn);
99c475ab 9987 val = (insn >> 6) & 0x1f;
b0109805 9988 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9989
9990 if (insn & (1 << 11)) {
9991 /* load */
c40c8556 9992 tmp = tcg_temp_new_i32();
08307563 9993 gen_aa32_ld8u(tmp, addr, IS_USER(s));
b0109805 9994 store_reg(s, rd, tmp);
99c475ab
FB
9995 } else {
9996 /* store */
b0109805 9997 tmp = load_reg(s, rd);
08307563 9998 gen_aa32_st8(tmp, addr, IS_USER(s));
c40c8556 9999 tcg_temp_free_i32(tmp);
99c475ab 10000 }
7d1b0095 10001 tcg_temp_free_i32(addr);
99c475ab
FB
10002 break;
10003
10004 case 8:
10005 /* load/store halfword immediate offset */
10006 rd = insn & 7;
10007 rn = (insn >> 3) & 7;
b0109805 10008 addr = load_reg(s, rn);
99c475ab 10009 val = (insn >> 5) & 0x3e;
b0109805 10010 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10011
10012 if (insn & (1 << 11)) {
10013 /* load */
c40c8556 10014 tmp = tcg_temp_new_i32();
08307563 10015 gen_aa32_ld16u(tmp, addr, IS_USER(s));
b0109805 10016 store_reg(s, rd, tmp);
99c475ab
FB
10017 } else {
10018 /* store */
b0109805 10019 tmp = load_reg(s, rd);
08307563 10020 gen_aa32_st16(tmp, addr, IS_USER(s));
c40c8556 10021 tcg_temp_free_i32(tmp);
99c475ab 10022 }
7d1b0095 10023 tcg_temp_free_i32(addr);
99c475ab
FB
10024 break;
10025
10026 case 9:
10027 /* load/store from stack */
10028 rd = (insn >> 8) & 7;
b0109805 10029 addr = load_reg(s, 13);
99c475ab 10030 val = (insn & 0xff) * 4;
b0109805 10031 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10032
10033 if (insn & (1 << 11)) {
10034 /* load */
c40c8556 10035 tmp = tcg_temp_new_i32();
08307563 10036 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805 10037 store_reg(s, rd, tmp);
99c475ab
FB
10038 } else {
10039 /* store */
b0109805 10040 tmp = load_reg(s, rd);
08307563 10041 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 10042 tcg_temp_free_i32(tmp);
99c475ab 10043 }
7d1b0095 10044 tcg_temp_free_i32(addr);
99c475ab
FB
10045 break;
10046
10047 case 10:
10048 /* add to high reg */
10049 rd = (insn >> 8) & 7;
5899f386
FB
10050 if (insn & (1 << 11)) {
10051 /* SP */
5e3f878a 10052 tmp = load_reg(s, 13);
5899f386
FB
10053 } else {
10054 /* PC. bit 1 is ignored. */
7d1b0095 10055 tmp = tcg_temp_new_i32();
5e3f878a 10056 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 10057 }
99c475ab 10058 val = (insn & 0xff) * 4;
5e3f878a
PB
10059 tcg_gen_addi_i32(tmp, tmp, val);
10060 store_reg(s, rd, tmp);
99c475ab
FB
10061 break;
10062
10063 case 11:
10064 /* misc */
10065 op = (insn >> 8) & 0xf;
10066 switch (op) {
10067 case 0:
10068 /* adjust stack pointer */
b26eefb6 10069 tmp = load_reg(s, 13);
99c475ab
FB
10070 val = (insn & 0x7f) * 4;
10071 if (insn & (1 << 7))
6a0d8a1d 10072 val = -(int32_t)val;
b26eefb6
PB
10073 tcg_gen_addi_i32(tmp, tmp, val);
10074 store_reg(s, 13, tmp);
99c475ab
FB
10075 break;
10076
9ee6e8bb
PB
10077 case 2: /* sign/zero extend. */
10078 ARCH(6);
10079 rd = insn & 7;
10080 rm = (insn >> 3) & 7;
b0109805 10081 tmp = load_reg(s, rm);
9ee6e8bb 10082 switch ((insn >> 6) & 3) {
b0109805
PB
10083 case 0: gen_sxth(tmp); break;
10084 case 1: gen_sxtb(tmp); break;
10085 case 2: gen_uxth(tmp); break;
10086 case 3: gen_uxtb(tmp); break;
9ee6e8bb 10087 }
b0109805 10088 store_reg(s, rd, tmp);
9ee6e8bb 10089 break;
99c475ab
FB
10090 case 4: case 5: case 0xc: case 0xd:
10091 /* push/pop */
b0109805 10092 addr = load_reg(s, 13);
5899f386
FB
10093 if (insn & (1 << 8))
10094 offset = 4;
99c475ab 10095 else
5899f386
FB
10096 offset = 0;
10097 for (i = 0; i < 8; i++) {
10098 if (insn & (1 << i))
10099 offset += 4;
10100 }
10101 if ((insn & (1 << 11)) == 0) {
b0109805 10102 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 10103 }
99c475ab
FB
10104 for (i = 0; i < 8; i++) {
10105 if (insn & (1 << i)) {
10106 if (insn & (1 << 11)) {
10107 /* pop */
c40c8556 10108 tmp = tcg_temp_new_i32();
08307563 10109 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805 10110 store_reg(s, i, tmp);
99c475ab
FB
10111 } else {
10112 /* push */
b0109805 10113 tmp = load_reg(s, i);
08307563 10114 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 10115 tcg_temp_free_i32(tmp);
99c475ab 10116 }
5899f386 10117 /* advance to the next address. */
b0109805 10118 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
10119 }
10120 }
39d5492a 10121 TCGV_UNUSED_I32(tmp);
99c475ab
FB
10122 if (insn & (1 << 8)) {
10123 if (insn & (1 << 11)) {
10124 /* pop pc */
c40c8556 10125 tmp = tcg_temp_new_i32();
08307563 10126 gen_aa32_ld32u(tmp, addr, IS_USER(s));
99c475ab
FB
10127 /* don't set the pc until the rest of the instruction
10128 has completed */
10129 } else {
10130 /* push lr */
b0109805 10131 tmp = load_reg(s, 14);
08307563 10132 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 10133 tcg_temp_free_i32(tmp);
99c475ab 10134 }
b0109805 10135 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 10136 }
5899f386 10137 if ((insn & (1 << 11)) == 0) {
b0109805 10138 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 10139 }
99c475ab 10140 /* write back the new stack pointer */
b0109805 10141 store_reg(s, 13, addr);
99c475ab 10142 /* set the new PC value */
be5e7a76
DES
10143 if ((insn & 0x0900) == 0x0900) {
10144 store_reg_from_load(env, s, 15, tmp);
10145 }
99c475ab
FB
10146 break;
10147
9ee6e8bb
PB
10148 case 1: case 3: case 9: case 11: /* czb */
10149 rm = insn & 7;
d9ba4830 10150 tmp = load_reg(s, rm);
9ee6e8bb
PB
10151 s->condlabel = gen_new_label();
10152 s->condjmp = 1;
10153 if (insn & (1 << 11))
cb63669a 10154 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 10155 else
cb63669a 10156 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 10157 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10158 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
10159 val = (uint32_t)s->pc + 2;
10160 val += offset;
10161 gen_jmp(s, val);
10162 break;
10163
10164 case 15: /* IT, nop-hint. */
10165 if ((insn & 0xf) == 0) {
10166 gen_nop_hint(s, (insn >> 4) & 0xf);
10167 break;
10168 }
10169 /* If Then. */
10170 s->condexec_cond = (insn >> 4) & 0xe;
10171 s->condexec_mask = insn & 0x1f;
10172 /* No actual code generated for this insn, just setup state. */
10173 break;
10174
06c949e6 10175 case 0xe: /* bkpt */
be5e7a76 10176 ARCH(5);
bc4a0de0 10177 gen_exception_insn(s, 2, EXCP_BKPT);
06c949e6
PB
10178 break;
10179
9ee6e8bb
PB
10180 case 0xa: /* rev */
10181 ARCH(6);
10182 rn = (insn >> 3) & 0x7;
10183 rd = insn & 0x7;
b0109805 10184 tmp = load_reg(s, rn);
9ee6e8bb 10185 switch ((insn >> 6) & 3) {
66896cb8 10186 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
10187 case 1: gen_rev16(tmp); break;
10188 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
10189 default: goto illegal_op;
10190 }
b0109805 10191 store_reg(s, rd, tmp);
9ee6e8bb
PB
10192 break;
10193
d9e028c1
PM
10194 case 6:
10195 switch ((insn >> 5) & 7) {
10196 case 2:
10197 /* setend */
10198 ARCH(6);
10962fd5
PM
10199 if (((insn >> 3) & 1) != s->bswap_code) {
10200 /* Dynamic endianness switching not implemented. */
e0c270d9 10201 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
d9e028c1
PM
10202 goto illegal_op;
10203 }
9ee6e8bb 10204 break;
d9e028c1
PM
10205 case 3:
10206 /* cps */
10207 ARCH(6);
10208 if (IS_USER(s)) {
10209 break;
8984bd2e 10210 }
d9e028c1
PM
10211 if (IS_M(env)) {
10212 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
10213 /* FAULTMASK */
10214 if (insn & 1) {
10215 addr = tcg_const_i32(19);
10216 gen_helper_v7m_msr(cpu_env, addr, tmp);
10217 tcg_temp_free_i32(addr);
10218 }
10219 /* PRIMASK */
10220 if (insn & 2) {
10221 addr = tcg_const_i32(16);
10222 gen_helper_v7m_msr(cpu_env, addr, tmp);
10223 tcg_temp_free_i32(addr);
10224 }
10225 tcg_temp_free_i32(tmp);
10226 gen_lookup_tb(s);
10227 } else {
10228 if (insn & (1 << 4)) {
10229 shift = CPSR_A | CPSR_I | CPSR_F;
10230 } else {
10231 shift = 0;
10232 }
10233 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 10234 }
d9e028c1
PM
10235 break;
10236 default:
10237 goto undef;
9ee6e8bb
PB
10238 }
10239 break;
10240
99c475ab
FB
10241 default:
10242 goto undef;
10243 }
10244 break;
10245
10246 case 12:
a7d3970d 10247 {
99c475ab 10248 /* load/store multiple */
39d5492a
PM
10249 TCGv_i32 loaded_var;
10250 TCGV_UNUSED_I32(loaded_var);
99c475ab 10251 rn = (insn >> 8) & 0x7;
b0109805 10252 addr = load_reg(s, rn);
99c475ab
FB
10253 for (i = 0; i < 8; i++) {
10254 if (insn & (1 << i)) {
99c475ab
FB
10255 if (insn & (1 << 11)) {
10256 /* load */
c40c8556 10257 tmp = tcg_temp_new_i32();
08307563 10258 gen_aa32_ld32u(tmp, addr, IS_USER(s));
a7d3970d
PM
10259 if (i == rn) {
10260 loaded_var = tmp;
10261 } else {
10262 store_reg(s, i, tmp);
10263 }
99c475ab
FB
10264 } else {
10265 /* store */
b0109805 10266 tmp = load_reg(s, i);
08307563 10267 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 10268 tcg_temp_free_i32(tmp);
99c475ab 10269 }
5899f386 10270 /* advance to the next address */
b0109805 10271 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
10272 }
10273 }
b0109805 10274 if ((insn & (1 << rn)) == 0) {
a7d3970d 10275 /* base reg not in list: base register writeback */
b0109805
PB
10276 store_reg(s, rn, addr);
10277 } else {
a7d3970d
PM
10278 /* base reg in list: if load, complete it now */
10279 if (insn & (1 << 11)) {
10280 store_reg(s, rn, loaded_var);
10281 }
7d1b0095 10282 tcg_temp_free_i32(addr);
b0109805 10283 }
99c475ab 10284 break;
a7d3970d 10285 }
99c475ab
FB
10286 case 13:
10287 /* conditional branch or swi */
10288 cond = (insn >> 8) & 0xf;
10289 if (cond == 0xe)
10290 goto undef;
10291
10292 if (cond == 0xf) {
10293 /* swi */
eaed129d 10294 gen_set_pc_im(s, s->pc);
9ee6e8bb 10295 s->is_jmp = DISAS_SWI;
99c475ab
FB
10296 break;
10297 }
10298 /* generate a conditional jump to next instruction */
e50e6a20 10299 s->condlabel = gen_new_label();
39fb730a 10300 arm_gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 10301 s->condjmp = 1;
99c475ab
FB
10302
10303 /* jump to the offset */
5899f386 10304 val = (uint32_t)s->pc + 2;
99c475ab 10305 offset = ((int32_t)insn << 24) >> 24;
5899f386 10306 val += offset << 1;
8aaca4c0 10307 gen_jmp(s, val);
99c475ab
FB
10308 break;
10309
10310 case 14:
358bf29e 10311 if (insn & (1 << 11)) {
9ee6e8bb
PB
10312 if (disas_thumb2_insn(env, s, insn))
10313 goto undef32;
358bf29e
PB
10314 break;
10315 }
9ee6e8bb 10316 /* unconditional branch */
99c475ab
FB
10317 val = (uint32_t)s->pc;
10318 offset = ((int32_t)insn << 21) >> 21;
10319 val += (offset << 1) + 2;
8aaca4c0 10320 gen_jmp(s, val);
99c475ab
FB
10321 break;
10322
10323 case 15:
9ee6e8bb 10324 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 10325 goto undef32;
9ee6e8bb 10326 break;
99c475ab
FB
10327 }
10328 return;
9ee6e8bb 10329undef32:
bc4a0de0 10330 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
10331 return;
10332illegal_op:
99c475ab 10333undef:
bc4a0de0 10334 gen_exception_insn(s, 2, EXCP_UDEF);
99c475ab
FB
10335}
10336
2c0262af
FB
10337/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
10338 basic block 'tb'. If search_pc is TRUE, also generate PC
10339 information for each intermediate instruction. */
5639c3f2 10340static inline void gen_intermediate_code_internal(ARMCPU *cpu,
2cfc5f17 10341 TranslationBlock *tb,
5639c3f2 10342 bool search_pc)
2c0262af 10343{
ed2803da 10344 CPUState *cs = CPU(cpu);
5639c3f2 10345 CPUARMState *env = &cpu->env;
2c0262af 10346 DisasContext dc1, *dc = &dc1;
a1d1bb31 10347 CPUBreakpoint *bp;
2c0262af
FB
10348 uint16_t *gen_opc_end;
10349 int j, lj;
0fa85d43 10350 target_ulong pc_start;
0a2461fa 10351 target_ulong next_page_start;
2e70f6ef
PB
10352 int num_insns;
10353 int max_insns;
3b46e624 10354
2c0262af 10355 /* generate intermediate code */
40f860cd
PM
10356
10357 /* The A64 decoder has its own top level loop, because it doesn't need
10358 * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
10359 */
10360 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
10361 gen_intermediate_code_internal_a64(cpu, tb, search_pc);
10362 return;
10363 }
10364
0fa85d43 10365 pc_start = tb->pc;
3b46e624 10366
2c0262af
FB
10367 dc->tb = tb;
10368
92414b31 10369 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
10370
10371 dc->is_jmp = DISAS_NEXT;
10372 dc->pc = pc_start;
ed2803da 10373 dc->singlestep_enabled = cs->singlestep_enabled;
e50e6a20 10374 dc->condjmp = 0;
3926cc84 10375
40f860cd
PM
10376 dc->aarch64 = 0;
10377 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
10378 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
10379 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
10380 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
3926cc84 10381#if !defined(CONFIG_USER_ONLY)
40f860cd 10382 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
3926cc84 10383#endif
40f860cd
PM
10384 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
10385 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
10386 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
60322b39
PM
10387 dc->cp_regs = cpu->cp_regs;
10388 dc->current_pl = arm_current_pl(env);
40f860cd 10389
a7812ae4
PB
10390 cpu_F0s = tcg_temp_new_i32();
10391 cpu_F1s = tcg_temp_new_i32();
10392 cpu_F0d = tcg_temp_new_i64();
10393 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
10394 cpu_V0 = cpu_F0d;
10395 cpu_V1 = cpu_F1d;
e677137d 10396 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 10397 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 10398 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 10399 lj = -1;
2e70f6ef
PB
10400 num_insns = 0;
10401 max_insns = tb->cflags & CF_COUNT_MASK;
10402 if (max_insns == 0)
10403 max_insns = CF_COUNT_MASK;
10404
806f352d 10405 gen_tb_start();
e12ce78d 10406
3849902c
PM
10407 tcg_clear_temp_count();
10408
e12ce78d
PM
10409 /* A note on handling of the condexec (IT) bits:
10410 *
10411 * We want to avoid the overhead of having to write the updated condexec
0ecb72a5 10412 * bits back to the CPUARMState for every instruction in an IT block. So:
e12ce78d 10413 * (1) if the condexec bits are not already zero then we write
0ecb72a5 10414 * zero back into the CPUARMState now. This avoids complications trying
e12ce78d
PM
10415 * to do it at the end of the block. (For example if we don't do this
10416 * it's hard to identify whether we can safely skip writing condexec
10417 * at the end of the TB, which we definitely want to do for the case
10418 * where a TB doesn't do anything with the IT state at all.)
10419 * (2) if we are going to leave the TB then we call gen_set_condexec()
0ecb72a5 10420 * which will write the correct value into CPUARMState if zero is wrong.
e12ce78d
PM
10421 * This is done both for leaving the TB at the end, and for leaving
10422 * it because of an exception we know will happen, which is done in
10423 * gen_exception_insn(). The latter is necessary because we need to
10424 * leave the TB with the PC/IT state just prior to execution of the
10425 * instruction which caused the exception.
10426 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
0ecb72a5 10427 * then the CPUARMState will be wrong and we need to reset it.
e12ce78d
PM
10428 * This is handled in the same way as restoration of the
10429 * PC in these situations: we will be called again with search_pc=1
10430 * and generate a mapping of the condexec bits for each PC in
e87b7cb0
SW
10431 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
10432 * this to restore the condexec bits.
e12ce78d
PM
10433 *
10434 * Note that there are no instructions which can read the condexec
10435 * bits, and none which can write non-static values to them, so
0ecb72a5 10436 * we don't need to care about whether CPUARMState is correct in the
e12ce78d
PM
10437 * middle of a TB.
10438 */
10439
9ee6e8bb
PB
10440 /* Reset the conditional execution bits immediately. This avoids
10441 complications trying to do it at the end of the block. */
98eac7ca 10442 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 10443 {
39d5492a 10444 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e 10445 tcg_gen_movi_i32(tmp, 0);
d9ba4830 10446 store_cpu_field(tmp, condexec_bits);
8f01245e 10447 }
2c0262af 10448 do {
fbb4a2e3
PB
10449#ifdef CONFIG_USER_ONLY
10450 /* Intercept jump to the magic kernel page. */
40f860cd 10451 if (dc->pc >= 0xffff0000) {
fbb4a2e3
PB
10452 /* We always get here via a jump, so know we are not in a
10453 conditional execution block. */
10454 gen_exception(EXCP_KERNEL_TRAP);
10455 dc->is_jmp = DISAS_UPDATE;
10456 break;
10457 }
10458#else
9ee6e8bb
PB
10459 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
10460 /* We always get here via a jump, so know we are not in a
10461 conditional execution block. */
d9ba4830 10462 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
10463 dc->is_jmp = DISAS_UPDATE;
10464 break;
9ee6e8bb
PB
10465 }
10466#endif
10467
72cf2d4f
BS
10468 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
10469 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 10470 if (bp->pc == dc->pc) {
bc4a0de0 10471 gen_exception_insn(dc, 0, EXCP_DEBUG);
9ee6e8bb
PB
10472 /* Advance PC so that clearing the breakpoint will
10473 invalidate this TB. */
10474 dc->pc += 2;
10475 goto done_generating;
1fddef4b
FB
10476 }
10477 }
10478 }
2c0262af 10479 if (search_pc) {
92414b31 10480 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2c0262af
FB
10481 if (lj < j) {
10482 lj++;
10483 while (lj < j)
ab1103de 10484 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2c0262af 10485 }
25983cad 10486 tcg_ctx.gen_opc_pc[lj] = dc->pc;
e12ce78d 10487 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
ab1103de 10488 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 10489 tcg_ctx.gen_opc_icount[lj] = num_insns;
2c0262af 10490 }
e50e6a20 10491
2e70f6ef
PB
10492 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
10493 gen_io_start();
10494
fdefe51c 10495 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
5642463a
PM
10496 tcg_gen_debug_insn_start(dc->pc);
10497 }
10498
40f860cd 10499 if (dc->thumb) {
9ee6e8bb
PB
10500 disas_thumb_insn(env, dc);
10501 if (dc->condexec_mask) {
10502 dc->condexec_cond = (dc->condexec_cond & 0xe)
10503 | ((dc->condexec_mask >> 4) & 1);
10504 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
10505 if (dc->condexec_mask == 0) {
10506 dc->condexec_cond = 0;
10507 }
10508 }
10509 } else {
10510 disas_arm_insn(env, dc);
10511 }
e50e6a20
FB
10512
10513 if (dc->condjmp && !dc->is_jmp) {
10514 gen_set_label(dc->condlabel);
10515 dc->condjmp = 0;
10516 }
3849902c
PM
10517
10518 if (tcg_check_temp_count()) {
0a2461fa
AG
10519 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
10520 dc->pc);
3849902c
PM
10521 }
10522
aaf2d97d 10523 /* Translation stops when a conditional branch is encountered.
e50e6a20 10524 * Otherwise the subsequent code could get translated several times.
b5ff1b31 10525 * Also stop translation when a page boundary is reached. This
bf20dc07 10526 * ensures prefetch aborts occur at the right place. */
2e70f6ef 10527 num_insns ++;
efd7f486 10528 } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
ed2803da 10529 !cs->singlestep_enabled &&
1b530a6d 10530 !singlestep &&
2e70f6ef
PB
10531 dc->pc < next_page_start &&
10532 num_insns < max_insns);
10533
10534 if (tb->cflags & CF_LAST_IO) {
10535 if (dc->condjmp) {
10536 /* FIXME: This can theoretically happen with self-modifying
10537 code. */
10538 cpu_abort(env, "IO on conditional branch instruction");
10539 }
10540 gen_io_end();
10541 }
9ee6e8bb 10542
b5ff1b31 10543 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
10544 instruction was a conditional branch or trap, and the PC has
10545 already been written. */
ed2803da 10546 if (unlikely(cs->singlestep_enabled)) {
8aaca4c0 10547 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 10548 if (dc->condjmp) {
9ee6e8bb
PB
10549 gen_set_condexec(dc);
10550 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 10551 gen_exception(EXCP_SWI);
9ee6e8bb 10552 } else {
d9ba4830 10553 gen_exception(EXCP_DEBUG);
9ee6e8bb 10554 }
e50e6a20
FB
10555 gen_set_label(dc->condlabel);
10556 }
10557 if (dc->condjmp || !dc->is_jmp) {
eaed129d 10558 gen_set_pc_im(dc, dc->pc);
e50e6a20 10559 dc->condjmp = 0;
8aaca4c0 10560 }
9ee6e8bb
PB
10561 gen_set_condexec(dc);
10562 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 10563 gen_exception(EXCP_SWI);
9ee6e8bb
PB
10564 } else {
10565 /* FIXME: Single stepping a WFI insn will not halt
10566 the CPU. */
d9ba4830 10567 gen_exception(EXCP_DEBUG);
9ee6e8bb 10568 }
8aaca4c0 10569 } else {
9ee6e8bb
PB
10570 /* While branches must always occur at the end of an IT block,
10571 there are a few other things that can cause us to terminate
65626741 10572 the TB in the middle of an IT block:
9ee6e8bb
PB
10573 - Exception generating instructions (bkpt, swi, undefined).
10574 - Page boundaries.
10575 - Hardware watchpoints.
10576 Hardware breakpoints have already been handled and skip this code.
10577 */
10578 gen_set_condexec(dc);
8aaca4c0 10579 switch(dc->is_jmp) {
8aaca4c0 10580 case DISAS_NEXT:
6e256c93 10581 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
10582 break;
10583 default:
10584 case DISAS_JUMP:
10585 case DISAS_UPDATE:
10586 /* indicate that the hash table must be used to find the next TB */
57fec1fe 10587 tcg_gen_exit_tb(0);
8aaca4c0
FB
10588 break;
10589 case DISAS_TB_JUMP:
10590 /* nothing more to generate */
10591 break;
9ee6e8bb 10592 case DISAS_WFI:
1ce94f81 10593 gen_helper_wfi(cpu_env);
9ee6e8bb
PB
10594 break;
10595 case DISAS_SWI:
d9ba4830 10596 gen_exception(EXCP_SWI);
9ee6e8bb 10597 break;
8aaca4c0 10598 }
e50e6a20
FB
10599 if (dc->condjmp) {
10600 gen_set_label(dc->condlabel);
9ee6e8bb 10601 gen_set_condexec(dc);
6e256c93 10602 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
10603 dc->condjmp = 0;
10604 }
2c0262af 10605 }
2e70f6ef 10606
9ee6e8bb 10607done_generating:
806f352d 10608 gen_tb_end(tb, num_insns);
efd7f486 10609 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
2c0262af
FB
10610
10611#ifdef DEBUG_DISAS
8fec2b8c 10612 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
10613 qemu_log("----------------\n");
10614 qemu_log("IN: %s\n", lookup_symbol(pc_start));
f4359b9f 10615 log_target_disas(env, pc_start, dc->pc - pc_start,
d8fd2954 10616 dc->thumb | (dc->bswap_code << 1));
93fcfe39 10617 qemu_log("\n");
2c0262af
FB
10618 }
10619#endif
b5ff1b31 10620 if (search_pc) {
92414b31 10621 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
b5ff1b31
FB
10622 lj++;
10623 while (lj <= j)
ab1103de 10624 tcg_ctx.gen_opc_instr_start[lj++] = 0;
b5ff1b31 10625 } else {
2c0262af 10626 tb->size = dc->pc - pc_start;
2e70f6ef 10627 tb->icount = num_insns;
b5ff1b31 10628 }
2c0262af
FB
10629}
10630
0ecb72a5 10631void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
2c0262af 10632{
5639c3f2 10633 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, false);
2c0262af
FB
10634}
10635
0ecb72a5 10636void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
2c0262af 10637{
5639c3f2 10638 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, true);
2c0262af
FB
10639}
10640
b5ff1b31
FB
10641static const char *cpu_mode_names[16] = {
10642 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
10643 "???", "???", "???", "und", "???", "???", "???", "sys"
10644};
9ee6e8bb 10645
878096ee
AF
10646void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
10647 int flags)
2c0262af 10648{
878096ee
AF
10649 ARMCPU *cpu = ARM_CPU(cs);
10650 CPUARMState *env = &cpu->env;
2c0262af 10651 int i;
b5ff1b31 10652 uint32_t psr;
2c0262af
FB
10653
10654 for(i=0;i<16;i++) {
7fe48483 10655 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 10656 if ((i % 4) == 3)
7fe48483 10657 cpu_fprintf(f, "\n");
2c0262af 10658 else
7fe48483 10659 cpu_fprintf(f, " ");
2c0262af 10660 }
b5ff1b31 10661 psr = cpsr_read(env);
687fa640
TS
10662 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
10663 psr,
b5ff1b31
FB
10664 psr & (1 << 31) ? 'N' : '-',
10665 psr & (1 << 30) ? 'Z' : '-',
10666 psr & (1 << 29) ? 'C' : '-',
10667 psr & (1 << 28) ? 'V' : '-',
5fafdf24 10668 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 10669 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 10670
f2617cfc
PM
10671 if (flags & CPU_DUMP_FPU) {
10672 int numvfpregs = 0;
10673 if (arm_feature(env, ARM_FEATURE_VFP)) {
10674 numvfpregs += 16;
10675 }
10676 if (arm_feature(env, ARM_FEATURE_VFP3)) {
10677 numvfpregs += 16;
10678 }
10679 for (i = 0; i < numvfpregs; i++) {
10680 uint64_t v = float64_val(env->vfp.regs[i]);
10681 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
10682 i * 2, (uint32_t)v,
10683 i * 2 + 1, (uint32_t)(v >> 32),
10684 i, v);
10685 }
10686 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 10687 }
2c0262af 10688}
a6b025d3 10689
0ecb72a5 10690void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
d2856f1a 10691{
3926cc84
AG
10692 if (is_a64(env)) {
10693 env->pc = tcg_ctx.gen_opc_pc[pc_pos];
40f860cd 10694 env->condexec_bits = 0;
3926cc84
AG
10695 } else {
10696 env->regs[15] = tcg_ctx.gen_opc_pc[pc_pos];
40f860cd 10697 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
3926cc84 10698 }
d2856f1a 10699}