]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/translate.c
target-arm: Stop underdecoding ARM946 PRBS registers
[mirror_qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
76cad711 28#include "disas/disas.h"
57fec1fe 29#include "tcg-op.h"
1de7afc9 30#include "qemu/log.h"
534df156 31#include "qemu/bitops.h"
1497c961 32
7b59220e 33#include "helper.h"
1497c961 34#define GEN_HELPER 1
7b59220e 35#include "helper.h"
2c0262af 36
be5e7a76
DES
37#define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
38#define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
39/* currently all emulated v5 cores are also v5TE, so don't bother */
40#define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
9ee6e8bb
PB
41#define ENABLE_ARCH_5J 0
42#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
43#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
44#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
45#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
81e69fb0 46#define ENABLE_ARCH_8 arm_feature(env, ARM_FEATURE_V8)
b5ff1b31 47
86753403 48#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 49
f570c61e 50#include "translate.h"
e12ce78d
PM
51static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
52
b5ff1b31
FB
53#if defined(CONFIG_USER_ONLY)
54#define IS_USER(s) 1
55#else
56#define IS_USER(s) (s->user)
57#endif
58
3407ad0e 59TCGv_ptr cpu_env;
ad69471c 60/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 61static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 62static TCGv_i32 cpu_R[16];
66c374de 63static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
03d05e2d
PM
64static TCGv_i64 cpu_exclusive_addr;
65static TCGv_i64 cpu_exclusive_val;
426f5abc 66#ifdef CONFIG_USER_ONLY
03d05e2d 67static TCGv_i64 cpu_exclusive_test;
426f5abc
PB
68static TCGv_i32 cpu_exclusive_info;
69#endif
ad69471c 70
b26eefb6 71/* FIXME: These should be removed. */
39d5492a 72static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 73static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 74
022c62cb 75#include "exec/gen-icount.h"
2e70f6ef 76
155c3eac
FN
77static const char *regnames[] =
78 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
79 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
80
b26eefb6
PB
81/* initialize TCG globals. */
82void arm_translate_init(void)
83{
155c3eac
FN
84 int i;
85
a7812ae4
PB
86 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
87
155c3eac
FN
88 for (i = 0; i < 16; i++) {
89 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 90 offsetof(CPUARMState, regs[i]),
155c3eac
FN
91 regnames[i]);
92 }
66c374de
AJ
93 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
94 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
95 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
96 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
97
03d05e2d 98 cpu_exclusive_addr = tcg_global_mem_new_i64(TCG_AREG0,
0ecb72a5 99 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
03d05e2d 100 cpu_exclusive_val = tcg_global_mem_new_i64(TCG_AREG0,
0ecb72a5 101 offsetof(CPUARMState, exclusive_val), "exclusive_val");
426f5abc 102#ifdef CONFIG_USER_ONLY
03d05e2d 103 cpu_exclusive_test = tcg_global_mem_new_i64(TCG_AREG0,
0ecb72a5 104 offsetof(CPUARMState, exclusive_test), "exclusive_test");
426f5abc 105 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 106 offsetof(CPUARMState, exclusive_info), "exclusive_info");
426f5abc 107#endif
155c3eac 108
14ade10f 109 a64_translate_init();
b26eefb6
PB
110}
111
39d5492a 112static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 113{
39d5492a 114 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
115 tcg_gen_ld_i32(tmp, cpu_env, offset);
116 return tmp;
117}
118
0ecb72a5 119#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 120
39d5492a 121static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
122{
123 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 124 tcg_temp_free_i32(var);
d9ba4830
PB
125}
126
127#define store_cpu_field(var, name) \
0ecb72a5 128 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 129
b26eefb6 130/* Set a variable to the value of a CPU register. */
39d5492a 131static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
132{
133 if (reg == 15) {
134 uint32_t addr;
b90372ad 135 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
136 if (s->thumb)
137 addr = (long)s->pc + 2;
138 else
139 addr = (long)s->pc + 4;
140 tcg_gen_movi_i32(var, addr);
141 } else {
155c3eac 142 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
143 }
144}
145
146/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 147static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 148{
39d5492a 149 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
150 load_reg_var(s, tmp, reg);
151 return tmp;
152}
153
154/* Set a CPU register. The source must be a temporary and will be
155 marked as dead. */
39d5492a 156static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
157{
158 if (reg == 15) {
159 tcg_gen_andi_i32(var, var, ~1);
160 s->is_jmp = DISAS_JUMP;
161 }
155c3eac 162 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 163 tcg_temp_free_i32(var);
b26eefb6
PB
164}
165
b26eefb6 166/* Value extensions. */
86831435
PB
167#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
168#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
169#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
170#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
171
1497c961
PB
172#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
173#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 174
b26eefb6 175
39d5492a 176static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 177{
39d5492a 178 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 179 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
180 tcg_temp_free_i32(tmp_mask);
181}
d9ba4830
PB
182/* Set NZCV flags from the high 4 bits of var. */
183#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
184
185static void gen_exception(int excp)
186{
39d5492a 187 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830 188 tcg_gen_movi_i32(tmp, excp);
1ce94f81 189 gen_helper_exception(cpu_env, tmp);
7d1b0095 190 tcg_temp_free_i32(tmp);
d9ba4830
PB
191}
192
39d5492a 193static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 194{
39d5492a
PM
195 TCGv_i32 tmp1 = tcg_temp_new_i32();
196 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
197 tcg_gen_ext16s_i32(tmp1, a);
198 tcg_gen_ext16s_i32(tmp2, b);
3670669c 199 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 200 tcg_temp_free_i32(tmp2);
3670669c
PB
201 tcg_gen_sari_i32(a, a, 16);
202 tcg_gen_sari_i32(b, b, 16);
203 tcg_gen_mul_i32(b, b, a);
204 tcg_gen_mov_i32(a, tmp1);
7d1b0095 205 tcg_temp_free_i32(tmp1);
3670669c
PB
206}
207
208/* Byteswap each halfword. */
39d5492a 209static void gen_rev16(TCGv_i32 var)
3670669c 210{
39d5492a 211 TCGv_i32 tmp = tcg_temp_new_i32();
3670669c
PB
212 tcg_gen_shri_i32(tmp, var, 8);
213 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
214 tcg_gen_shli_i32(var, var, 8);
215 tcg_gen_andi_i32(var, var, 0xff00ff00);
216 tcg_gen_or_i32(var, var, tmp);
7d1b0095 217 tcg_temp_free_i32(tmp);
3670669c
PB
218}
219
220/* Byteswap low halfword and sign extend. */
39d5492a 221static void gen_revsh(TCGv_i32 var)
3670669c 222{
1a855029
AJ
223 tcg_gen_ext16u_i32(var, var);
224 tcg_gen_bswap16_i32(var, var);
225 tcg_gen_ext16s_i32(var, var);
3670669c
PB
226}
227
228/* Unsigned bitfield extract. */
39d5492a 229static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
3670669c
PB
230{
231 if (shift)
232 tcg_gen_shri_i32(var, var, shift);
233 tcg_gen_andi_i32(var, var, mask);
234}
235
236/* Signed bitfield extract. */
39d5492a 237static void gen_sbfx(TCGv_i32 var, int shift, int width)
3670669c
PB
238{
239 uint32_t signbit;
240
241 if (shift)
242 tcg_gen_sari_i32(var, var, shift);
243 if (shift + width < 32) {
244 signbit = 1u << (width - 1);
245 tcg_gen_andi_i32(var, var, (1u << width) - 1);
246 tcg_gen_xori_i32(var, var, signbit);
247 tcg_gen_subi_i32(var, var, signbit);
248 }
249}
250
838fa72d 251/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 252static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 253{
838fa72d
AJ
254 TCGv_i64 tmp64 = tcg_temp_new_i64();
255
256 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 257 tcg_temp_free_i32(b);
838fa72d
AJ
258 tcg_gen_shli_i64(tmp64, tmp64, 32);
259 tcg_gen_add_i64(a, tmp64, a);
260
261 tcg_temp_free_i64(tmp64);
262 return a;
263}
264
265/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 266static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
267{
268 TCGv_i64 tmp64 = tcg_temp_new_i64();
269
270 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 271 tcg_temp_free_i32(b);
838fa72d
AJ
272 tcg_gen_shli_i64(tmp64, tmp64, 32);
273 tcg_gen_sub_i64(a, tmp64, a);
274
275 tcg_temp_free_i64(tmp64);
276 return a;
3670669c
PB
277}
278
5e3f878a 279/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 280static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 281{
39d5492a
PM
282 TCGv_i32 lo = tcg_temp_new_i32();
283 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 284 TCGv_i64 ret;
5e3f878a 285
831d7fe8 286 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 287 tcg_temp_free_i32(a);
7d1b0095 288 tcg_temp_free_i32(b);
831d7fe8
RH
289
290 ret = tcg_temp_new_i64();
291 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
292 tcg_temp_free_i32(lo);
293 tcg_temp_free_i32(hi);
831d7fe8
RH
294
295 return ret;
5e3f878a
PB
296}
297
39d5492a 298static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 299{
39d5492a
PM
300 TCGv_i32 lo = tcg_temp_new_i32();
301 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 302 TCGv_i64 ret;
5e3f878a 303
831d7fe8 304 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 305 tcg_temp_free_i32(a);
7d1b0095 306 tcg_temp_free_i32(b);
831d7fe8
RH
307
308 ret = tcg_temp_new_i64();
309 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
310 tcg_temp_free_i32(lo);
311 tcg_temp_free_i32(hi);
831d7fe8
RH
312
313 return ret;
5e3f878a
PB
314}
315
8f01245e 316/* Swap low and high halfwords. */
39d5492a 317static void gen_swap_half(TCGv_i32 var)
8f01245e 318{
39d5492a 319 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
320 tcg_gen_shri_i32(tmp, var, 16);
321 tcg_gen_shli_i32(var, var, 16);
322 tcg_gen_or_i32(var, var, tmp);
7d1b0095 323 tcg_temp_free_i32(tmp);
8f01245e
PB
324}
325
b26eefb6
PB
326/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
327 tmp = (t0 ^ t1) & 0x8000;
328 t0 &= ~0x8000;
329 t1 &= ~0x8000;
330 t0 = (t0 + t1) ^ tmp;
331 */
332
39d5492a 333static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 334{
39d5492a 335 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
336 tcg_gen_xor_i32(tmp, t0, t1);
337 tcg_gen_andi_i32(tmp, tmp, 0x8000);
338 tcg_gen_andi_i32(t0, t0, ~0x8000);
339 tcg_gen_andi_i32(t1, t1, ~0x8000);
340 tcg_gen_add_i32(t0, t0, t1);
341 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
342 tcg_temp_free_i32(tmp);
343 tcg_temp_free_i32(t1);
b26eefb6
PB
344}
345
346/* Set CF to the top bit of var. */
39d5492a 347static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 348{
66c374de 349 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
350}
351
352/* Set N and Z flags from var. */
39d5492a 353static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 354{
66c374de
AJ
355 tcg_gen_mov_i32(cpu_NF, var);
356 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
357}
358
359/* T0 += T1 + CF. */
39d5492a 360static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 361{
396e467c 362 tcg_gen_add_i32(t0, t0, t1);
66c374de 363 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
364}
365
e9bb4aa9 366/* dest = T0 + T1 + CF. */
39d5492a 367static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 368{
e9bb4aa9 369 tcg_gen_add_i32(dest, t0, t1);
66c374de 370 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
371}
372
3670669c 373/* dest = T0 - T1 + CF - 1. */
39d5492a 374static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 375{
3670669c 376 tcg_gen_sub_i32(dest, t0, t1);
66c374de 377 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 378 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
379}
380
72485ec4 381/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 382static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 383{
39d5492a 384 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
385 tcg_gen_movi_i32(tmp, 0);
386 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 387 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 388 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
389 tcg_gen_xor_i32(tmp, t0, t1);
390 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
391 tcg_temp_free_i32(tmp);
392 tcg_gen_mov_i32(dest, cpu_NF);
393}
394
49b4c31e 395/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 396static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 397{
39d5492a 398 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
399 if (TCG_TARGET_HAS_add2_i32) {
400 tcg_gen_movi_i32(tmp, 0);
401 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 402 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
403 } else {
404 TCGv_i64 q0 = tcg_temp_new_i64();
405 TCGv_i64 q1 = tcg_temp_new_i64();
406 tcg_gen_extu_i32_i64(q0, t0);
407 tcg_gen_extu_i32_i64(q1, t1);
408 tcg_gen_add_i64(q0, q0, q1);
409 tcg_gen_extu_i32_i64(q1, cpu_CF);
410 tcg_gen_add_i64(q0, q0, q1);
411 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
412 tcg_temp_free_i64(q0);
413 tcg_temp_free_i64(q1);
414 }
415 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
416 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
417 tcg_gen_xor_i32(tmp, t0, t1);
418 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
419 tcg_temp_free_i32(tmp);
420 tcg_gen_mov_i32(dest, cpu_NF);
421}
422
72485ec4 423/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 424static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 425{
39d5492a 426 TCGv_i32 tmp;
72485ec4
AJ
427 tcg_gen_sub_i32(cpu_NF, t0, t1);
428 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
429 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
430 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
431 tmp = tcg_temp_new_i32();
432 tcg_gen_xor_i32(tmp, t0, t1);
433 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
434 tcg_temp_free_i32(tmp);
435 tcg_gen_mov_i32(dest, cpu_NF);
436}
437
e77f0832 438/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 439static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 440{
39d5492a 441 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
442 tcg_gen_not_i32(tmp, t1);
443 gen_adc_CC(dest, t0, tmp);
39d5492a 444 tcg_temp_free_i32(tmp);
2de68a49
RH
445}
446
365af80e 447#define GEN_SHIFT(name) \
39d5492a 448static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 449{ \
39d5492a 450 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
451 tmp1 = tcg_temp_new_i32(); \
452 tcg_gen_andi_i32(tmp1, t1, 0xff); \
453 tmp2 = tcg_const_i32(0); \
454 tmp3 = tcg_const_i32(0x1f); \
455 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
456 tcg_temp_free_i32(tmp3); \
457 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
458 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
459 tcg_temp_free_i32(tmp2); \
460 tcg_temp_free_i32(tmp1); \
461}
462GEN_SHIFT(shl)
463GEN_SHIFT(shr)
464#undef GEN_SHIFT
465
39d5492a 466static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 467{
39d5492a 468 TCGv_i32 tmp1, tmp2;
365af80e
AJ
469 tmp1 = tcg_temp_new_i32();
470 tcg_gen_andi_i32(tmp1, t1, 0xff);
471 tmp2 = tcg_const_i32(0x1f);
472 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
473 tcg_temp_free_i32(tmp2);
474 tcg_gen_sar_i32(dest, t0, tmp1);
475 tcg_temp_free_i32(tmp1);
476}
477
39d5492a 478static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 479{
39d5492a
PM
480 TCGv_i32 c0 = tcg_const_i32(0);
481 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
482 tcg_gen_neg_i32(tmp, src);
483 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
484 tcg_temp_free_i32(c0);
485 tcg_temp_free_i32(tmp);
486}
ad69471c 487
39d5492a 488static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 489{
9a119ff6 490 if (shift == 0) {
66c374de 491 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 492 } else {
66c374de
AJ
493 tcg_gen_shri_i32(cpu_CF, var, shift);
494 if (shift != 31) {
495 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
496 }
9a119ff6 497 }
9a119ff6 498}
b26eefb6 499
9a119ff6 500/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
501static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
502 int shift, int flags)
9a119ff6
PB
503{
504 switch (shiftop) {
505 case 0: /* LSL */
506 if (shift != 0) {
507 if (flags)
508 shifter_out_im(var, 32 - shift);
509 tcg_gen_shli_i32(var, var, shift);
510 }
511 break;
512 case 1: /* LSR */
513 if (shift == 0) {
514 if (flags) {
66c374de 515 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
516 }
517 tcg_gen_movi_i32(var, 0);
518 } else {
519 if (flags)
520 shifter_out_im(var, shift - 1);
521 tcg_gen_shri_i32(var, var, shift);
522 }
523 break;
524 case 2: /* ASR */
525 if (shift == 0)
526 shift = 32;
527 if (flags)
528 shifter_out_im(var, shift - 1);
529 if (shift == 32)
530 shift = 31;
531 tcg_gen_sari_i32(var, var, shift);
532 break;
533 case 3: /* ROR/RRX */
534 if (shift != 0) {
535 if (flags)
536 shifter_out_im(var, shift - 1);
f669df27 537 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 538 } else {
39d5492a 539 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 540 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
541 if (flags)
542 shifter_out_im(var, 0);
543 tcg_gen_shri_i32(var, var, 1);
b26eefb6 544 tcg_gen_or_i32(var, var, tmp);
7d1b0095 545 tcg_temp_free_i32(tmp);
b26eefb6
PB
546 }
547 }
548};
549
39d5492a
PM
550static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
551 TCGv_i32 shift, int flags)
8984bd2e
PB
552{
553 if (flags) {
554 switch (shiftop) {
9ef39277
BS
555 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
556 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
557 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
558 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
559 }
560 } else {
561 switch (shiftop) {
365af80e
AJ
562 case 0:
563 gen_shl(var, var, shift);
564 break;
565 case 1:
566 gen_shr(var, var, shift);
567 break;
568 case 2:
569 gen_sar(var, var, shift);
570 break;
f669df27
AJ
571 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
572 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
573 }
574 }
7d1b0095 575 tcg_temp_free_i32(shift);
8984bd2e
PB
576}
577
6ddbc6e4
PB
578#define PAS_OP(pfx) \
579 switch (op2) { \
580 case 0: gen_pas_helper(glue(pfx,add16)); break; \
581 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
582 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
583 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
584 case 4: gen_pas_helper(glue(pfx,add8)); break; \
585 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
586 }
39d5492a 587static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 588{
a7812ae4 589 TCGv_ptr tmp;
6ddbc6e4
PB
590
591 switch (op1) {
592#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
593 case 1:
a7812ae4 594 tmp = tcg_temp_new_ptr();
0ecb72a5 595 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 596 PAS_OP(s)
b75263d6 597 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
598 break;
599 case 5:
a7812ae4 600 tmp = tcg_temp_new_ptr();
0ecb72a5 601 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 602 PAS_OP(u)
b75263d6 603 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
604 break;
605#undef gen_pas_helper
606#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
607 case 2:
608 PAS_OP(q);
609 break;
610 case 3:
611 PAS_OP(sh);
612 break;
613 case 6:
614 PAS_OP(uq);
615 break;
616 case 7:
617 PAS_OP(uh);
618 break;
619#undef gen_pas_helper
620 }
621}
9ee6e8bb
PB
622#undef PAS_OP
623
6ddbc6e4
PB
624/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
625#define PAS_OP(pfx) \
ed89a2f1 626 switch (op1) { \
6ddbc6e4
PB
627 case 0: gen_pas_helper(glue(pfx,add8)); break; \
628 case 1: gen_pas_helper(glue(pfx,add16)); break; \
629 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
630 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
631 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
632 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
633 }
39d5492a 634static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 635{
a7812ae4 636 TCGv_ptr tmp;
6ddbc6e4 637
ed89a2f1 638 switch (op2) {
6ddbc6e4
PB
639#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
640 case 0:
a7812ae4 641 tmp = tcg_temp_new_ptr();
0ecb72a5 642 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 643 PAS_OP(s)
b75263d6 644 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
645 break;
646 case 4:
a7812ae4 647 tmp = tcg_temp_new_ptr();
0ecb72a5 648 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 649 PAS_OP(u)
b75263d6 650 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
651 break;
652#undef gen_pas_helper
653#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
654 case 1:
655 PAS_OP(q);
656 break;
657 case 2:
658 PAS_OP(sh);
659 break;
660 case 5:
661 PAS_OP(uq);
662 break;
663 case 6:
664 PAS_OP(uh);
665 break;
666#undef gen_pas_helper
667 }
668}
9ee6e8bb
PB
669#undef PAS_OP
670
39fb730a
AG
671/*
672 * generate a conditional branch based on ARM condition code cc.
673 * This is common between ARM and Aarch64 targets.
674 */
675void arm_gen_test_cc(int cc, int label)
d9ba4830 676{
39d5492a 677 TCGv_i32 tmp;
d9ba4830
PB
678 int inv;
679
d9ba4830
PB
680 switch (cc) {
681 case 0: /* eq: Z */
66c374de 682 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
683 break;
684 case 1: /* ne: !Z */
66c374de 685 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
686 break;
687 case 2: /* cs: C */
66c374de 688 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_CF, 0, label);
d9ba4830
PB
689 break;
690 case 3: /* cc: !C */
66c374de 691 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
d9ba4830
PB
692 break;
693 case 4: /* mi: N */
66c374de 694 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_NF, 0, label);
d9ba4830
PB
695 break;
696 case 5: /* pl: !N */
66c374de 697 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_NF, 0, label);
d9ba4830
PB
698 break;
699 case 6: /* vs: V */
66c374de 700 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_VF, 0, label);
d9ba4830
PB
701 break;
702 case 7: /* vc: !V */
66c374de 703 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_VF, 0, label);
d9ba4830
PB
704 break;
705 case 8: /* hi: C && !Z */
706 inv = gen_new_label();
66c374de
AJ
707 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, inv);
708 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
709 gen_set_label(inv);
710 break;
711 case 9: /* ls: !C || Z */
66c374de
AJ
712 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
713 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
714 break;
715 case 10: /* ge: N == V -> N ^ V == 0 */
66c374de
AJ
716 tmp = tcg_temp_new_i32();
717 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 718 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 719 tcg_temp_free_i32(tmp);
d9ba4830
PB
720 break;
721 case 11: /* lt: N != V -> N ^ V != 0 */
66c374de
AJ
722 tmp = tcg_temp_new_i32();
723 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 724 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 725 tcg_temp_free_i32(tmp);
d9ba4830
PB
726 break;
727 case 12: /* gt: !Z && N == V */
728 inv = gen_new_label();
66c374de
AJ
729 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, inv);
730 tmp = tcg_temp_new_i32();
731 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 732 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 733 tcg_temp_free_i32(tmp);
d9ba4830
PB
734 gen_set_label(inv);
735 break;
736 case 13: /* le: Z || N != V */
66c374de
AJ
737 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
738 tmp = tcg_temp_new_i32();
739 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 740 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 741 tcg_temp_free_i32(tmp);
d9ba4830
PB
742 break;
743 default:
744 fprintf(stderr, "Bad condition code 0x%x\n", cc);
745 abort();
746 }
d9ba4830 747}
2c0262af 748
b1d8e52e 749static const uint8_t table_logic_cc[16] = {
2c0262af
FB
750 1, /* and */
751 1, /* xor */
752 0, /* sub */
753 0, /* rsb */
754 0, /* add */
755 0, /* adc */
756 0, /* sbc */
757 0, /* rsc */
758 1, /* andl */
759 1, /* xorl */
760 0, /* cmp */
761 0, /* cmn */
762 1, /* orr */
763 1, /* mov */
764 1, /* bic */
765 1, /* mvn */
766};
3b46e624 767
d9ba4830
PB
768/* Set PC and Thumb state from an immediate address. */
769static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 770{
39d5492a 771 TCGv_i32 tmp;
99c475ab 772
b26eefb6 773 s->is_jmp = DISAS_UPDATE;
d9ba4830 774 if (s->thumb != (addr & 1)) {
7d1b0095 775 tmp = tcg_temp_new_i32();
d9ba4830 776 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 777 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 778 tcg_temp_free_i32(tmp);
d9ba4830 779 }
155c3eac 780 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
781}
782
783/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 784static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 785{
d9ba4830 786 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
787 tcg_gen_andi_i32(cpu_R[15], var, ~1);
788 tcg_gen_andi_i32(var, var, 1);
789 store_cpu_field(var, thumb);
d9ba4830
PB
790}
791
21aeb343
JR
792/* Variant of store_reg which uses branch&exchange logic when storing
793 to r15 in ARM architecture v7 and above. The source must be a temporary
794 and will be marked as dead. */
0ecb72a5 795static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
39d5492a 796 int reg, TCGv_i32 var)
21aeb343
JR
797{
798 if (reg == 15 && ENABLE_ARCH_7) {
799 gen_bx(s, var);
800 } else {
801 store_reg(s, reg, var);
802 }
803}
804
be5e7a76
DES
805/* Variant of store_reg which uses branch&exchange logic when storing
806 * to r15 in ARM architecture v5T and above. This is used for storing
807 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
808 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
0ecb72a5 809static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
39d5492a 810 int reg, TCGv_i32 var)
be5e7a76
DES
811{
812 if (reg == 15 && ENABLE_ARCH_5) {
813 gen_bx(s, var);
814 } else {
815 store_reg(s, reg, var);
816 }
817}
818
08307563
PM
819/* Abstractions of "generate code to do a guest load/store for
820 * AArch32", where a vaddr is always 32 bits (and is zero
821 * extended if we're a 64 bit core) and data is also
822 * 32 bits unless specifically doing a 64 bit access.
823 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 824 * that the address argument is TCGv_i32 rather than TCGv.
08307563
PM
825 */
826#if TARGET_LONG_BITS == 32
827
09f78135
RH
828#define DO_GEN_LD(SUFF, OPC) \
829static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563 830{ \
09f78135 831 tcg_gen_qemu_ld_i32(val, addr, index, OPC); \
08307563
PM
832}
833
09f78135
RH
834#define DO_GEN_ST(SUFF, OPC) \
835static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563 836{ \
09f78135 837 tcg_gen_qemu_st_i32(val, addr, index, OPC); \
08307563
PM
838}
839
840static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
841{
09f78135 842 tcg_gen_qemu_ld_i64(val, addr, index, MO_TEQ);
08307563
PM
843}
844
845static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
846{
09f78135 847 tcg_gen_qemu_st_i64(val, addr, index, MO_TEQ);
08307563
PM
848}
849
850#else
851
09f78135
RH
852#define DO_GEN_LD(SUFF, OPC) \
853static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563
PM
854{ \
855 TCGv addr64 = tcg_temp_new(); \
08307563 856 tcg_gen_extu_i32_i64(addr64, addr); \
09f78135 857 tcg_gen_qemu_ld_i32(val, addr64, index, OPC); \
08307563 858 tcg_temp_free(addr64); \
08307563
PM
859}
860
09f78135
RH
861#define DO_GEN_ST(SUFF, OPC) \
862static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563
PM
863{ \
864 TCGv addr64 = tcg_temp_new(); \
08307563 865 tcg_gen_extu_i32_i64(addr64, addr); \
09f78135 866 tcg_gen_qemu_st_i32(val, addr64, index, OPC); \
08307563 867 tcg_temp_free(addr64); \
08307563
PM
868}
869
870static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
871{
872 TCGv addr64 = tcg_temp_new();
873 tcg_gen_extu_i32_i64(addr64, addr);
09f78135 874 tcg_gen_qemu_ld_i64(val, addr64, index, MO_TEQ);
08307563
PM
875 tcg_temp_free(addr64);
876}
877
878static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
879{
880 TCGv addr64 = tcg_temp_new();
881 tcg_gen_extu_i32_i64(addr64, addr);
09f78135 882 tcg_gen_qemu_st_i64(val, addr64, index, MO_TEQ);
08307563
PM
883 tcg_temp_free(addr64);
884}
885
886#endif
887
09f78135
RH
888DO_GEN_LD(8s, MO_SB)
889DO_GEN_LD(8u, MO_UB)
890DO_GEN_LD(16s, MO_TESW)
891DO_GEN_LD(16u, MO_TEUW)
892DO_GEN_LD(32u, MO_TEUL)
893DO_GEN_ST(8, MO_UB)
894DO_GEN_ST(16, MO_TEUW)
895DO_GEN_ST(32, MO_TEUL)
08307563 896
eaed129d 897static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
5e3f878a 898{
40f860cd 899 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
900}
901
b5ff1b31
FB
902/* Force a TB lookup after an instruction that changes the CPU state. */
903static inline void gen_lookup_tb(DisasContext *s)
904{
a6445c52 905 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
906 s->is_jmp = DISAS_UPDATE;
907}
908
b0109805 909static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 910 TCGv_i32 var)
2c0262af 911{
1e8d4eec 912 int val, rm, shift, shiftop;
39d5492a 913 TCGv_i32 offset;
2c0262af
FB
914
915 if (!(insn & (1 << 25))) {
916 /* immediate */
917 val = insn & 0xfff;
918 if (!(insn & (1 << 23)))
919 val = -val;
537730b9 920 if (val != 0)
b0109805 921 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
922 } else {
923 /* shift/register */
924 rm = (insn) & 0xf;
925 shift = (insn >> 7) & 0x1f;
1e8d4eec 926 shiftop = (insn >> 5) & 3;
b26eefb6 927 offset = load_reg(s, rm);
9a119ff6 928 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 929 if (!(insn & (1 << 23)))
b0109805 930 tcg_gen_sub_i32(var, var, offset);
2c0262af 931 else
b0109805 932 tcg_gen_add_i32(var, var, offset);
7d1b0095 933 tcg_temp_free_i32(offset);
2c0262af
FB
934 }
935}
936
191f9a93 937static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 938 int extra, TCGv_i32 var)
2c0262af
FB
939{
940 int val, rm;
39d5492a 941 TCGv_i32 offset;
3b46e624 942
2c0262af
FB
943 if (insn & (1 << 22)) {
944 /* immediate */
945 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
946 if (!(insn & (1 << 23)))
947 val = -val;
18acad92 948 val += extra;
537730b9 949 if (val != 0)
b0109805 950 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
951 } else {
952 /* register */
191f9a93 953 if (extra)
b0109805 954 tcg_gen_addi_i32(var, var, extra);
2c0262af 955 rm = (insn) & 0xf;
b26eefb6 956 offset = load_reg(s, rm);
2c0262af 957 if (!(insn & (1 << 23)))
b0109805 958 tcg_gen_sub_i32(var, var, offset);
2c0262af 959 else
b0109805 960 tcg_gen_add_i32(var, var, offset);
7d1b0095 961 tcg_temp_free_i32(offset);
2c0262af
FB
962 }
963}
964
5aaebd13
PM
965static TCGv_ptr get_fpstatus_ptr(int neon)
966{
967 TCGv_ptr statusptr = tcg_temp_new_ptr();
968 int offset;
969 if (neon) {
0ecb72a5 970 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 971 } else {
0ecb72a5 972 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
973 }
974 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
975 return statusptr;
976}
977
4373f3ce
PB
978#define VFP_OP2(name) \
979static inline void gen_vfp_##name(int dp) \
980{ \
ae1857ec
PM
981 TCGv_ptr fpst = get_fpstatus_ptr(0); \
982 if (dp) { \
983 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
984 } else { \
985 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
986 } \
987 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
988}
989
4373f3ce
PB
990VFP_OP2(add)
991VFP_OP2(sub)
992VFP_OP2(mul)
993VFP_OP2(div)
994
995#undef VFP_OP2
996
605a6aed
PM
997static inline void gen_vfp_F1_mul(int dp)
998{
999 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1000 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1001 if (dp) {
ae1857ec 1002 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1003 } else {
ae1857ec 1004 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1005 }
ae1857ec 1006 tcg_temp_free_ptr(fpst);
605a6aed
PM
1007}
1008
1009static inline void gen_vfp_F1_neg(int dp)
1010{
1011 /* Like gen_vfp_neg() but put result in F1 */
1012 if (dp) {
1013 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1014 } else {
1015 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1016 }
1017}
1018
4373f3ce
PB
1019static inline void gen_vfp_abs(int dp)
1020{
1021 if (dp)
1022 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1023 else
1024 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1025}
1026
1027static inline void gen_vfp_neg(int dp)
1028{
1029 if (dp)
1030 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1031 else
1032 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1033}
1034
1035static inline void gen_vfp_sqrt(int dp)
1036{
1037 if (dp)
1038 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1039 else
1040 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1041}
1042
1043static inline void gen_vfp_cmp(int dp)
1044{
1045 if (dp)
1046 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1047 else
1048 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1049}
1050
1051static inline void gen_vfp_cmpe(int dp)
1052{
1053 if (dp)
1054 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1055 else
1056 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1057}
1058
1059static inline void gen_vfp_F1_ld0(int dp)
1060{
1061 if (dp)
5b340b51 1062 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1063 else
5b340b51 1064 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1065}
1066
5500b06c
PM
1067#define VFP_GEN_ITOF(name) \
1068static inline void gen_vfp_##name(int dp, int neon) \
1069{ \
5aaebd13 1070 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1071 if (dp) { \
1072 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1073 } else { \
1074 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1075 } \
b7fa9214 1076 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1077}
1078
5500b06c
PM
1079VFP_GEN_ITOF(uito)
1080VFP_GEN_ITOF(sito)
1081#undef VFP_GEN_ITOF
4373f3ce 1082
5500b06c
PM
1083#define VFP_GEN_FTOI(name) \
1084static inline void gen_vfp_##name(int dp, int neon) \
1085{ \
5aaebd13 1086 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1087 if (dp) { \
1088 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1089 } else { \
1090 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1091 } \
b7fa9214 1092 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1093}
1094
5500b06c
PM
1095VFP_GEN_FTOI(toui)
1096VFP_GEN_FTOI(touiz)
1097VFP_GEN_FTOI(tosi)
1098VFP_GEN_FTOI(tosiz)
1099#undef VFP_GEN_FTOI
4373f3ce 1100
16d5b3ca 1101#define VFP_GEN_FIX(name, round) \
5500b06c 1102static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1103{ \
39d5492a 1104 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1105 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c 1106 if (dp) { \
16d5b3ca
WN
1107 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1108 statusptr); \
5500b06c 1109 } else { \
16d5b3ca
WN
1110 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1111 statusptr); \
5500b06c 1112 } \
b75263d6 1113 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1114 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1115}
16d5b3ca
WN
1116VFP_GEN_FIX(tosh, _round_to_zero)
1117VFP_GEN_FIX(tosl, _round_to_zero)
1118VFP_GEN_FIX(touh, _round_to_zero)
1119VFP_GEN_FIX(toul, _round_to_zero)
1120VFP_GEN_FIX(shto, )
1121VFP_GEN_FIX(slto, )
1122VFP_GEN_FIX(uhto, )
1123VFP_GEN_FIX(ulto, )
4373f3ce 1124#undef VFP_GEN_FIX
9ee6e8bb 1125
39d5492a 1126static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1127{
08307563
PM
1128 if (dp) {
1129 gen_aa32_ld64(cpu_F0d, addr, IS_USER(s));
1130 } else {
1131 gen_aa32_ld32u(cpu_F0s, addr, IS_USER(s));
1132 }
b5ff1b31
FB
1133}
1134
39d5492a 1135static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1136{
08307563
PM
1137 if (dp) {
1138 gen_aa32_st64(cpu_F0d, addr, IS_USER(s));
1139 } else {
1140 gen_aa32_st32(cpu_F0s, addr, IS_USER(s));
1141 }
b5ff1b31
FB
1142}
1143
8e96005d
FB
1144static inline long
1145vfp_reg_offset (int dp, int reg)
1146{
1147 if (dp)
1148 return offsetof(CPUARMState, vfp.regs[reg]);
1149 else if (reg & 1) {
1150 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1151 + offsetof(CPU_DoubleU, l.upper);
1152 } else {
1153 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1154 + offsetof(CPU_DoubleU, l.lower);
1155 }
1156}
9ee6e8bb
PB
1157
1158/* Return the offset of a 32-bit piece of a NEON register.
1159 zero is the least significant end of the register. */
1160static inline long
1161neon_reg_offset (int reg, int n)
1162{
1163 int sreg;
1164 sreg = reg * 2 + n;
1165 return vfp_reg_offset(0, sreg);
1166}
1167
39d5492a 1168static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1169{
39d5492a 1170 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1171 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1172 return tmp;
1173}
1174
39d5492a 1175static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1176{
1177 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1178 tcg_temp_free_i32(var);
8f8e3aa4
PB
1179}
1180
a7812ae4 1181static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1182{
1183 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1184}
1185
a7812ae4 1186static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1187{
1188 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1189}
1190
4373f3ce
PB
1191#define tcg_gen_ld_f32 tcg_gen_ld_i32
1192#define tcg_gen_ld_f64 tcg_gen_ld_i64
1193#define tcg_gen_st_f32 tcg_gen_st_i32
1194#define tcg_gen_st_f64 tcg_gen_st_i64
1195
b7bcbe95
FB
1196static inline void gen_mov_F0_vreg(int dp, int reg)
1197{
1198 if (dp)
4373f3ce 1199 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1200 else
4373f3ce 1201 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1202}
1203
1204static inline void gen_mov_F1_vreg(int dp, int reg)
1205{
1206 if (dp)
4373f3ce 1207 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1208 else
4373f3ce 1209 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1210}
1211
1212static inline void gen_mov_vreg_F0(int dp, int reg)
1213{
1214 if (dp)
4373f3ce 1215 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1216 else
4373f3ce 1217 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1218}
1219
18c9b560
AZ
1220#define ARM_CP_RW_BIT (1 << 20)
1221
a7812ae4 1222static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1223{
0ecb72a5 1224 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1225}
1226
a7812ae4 1227static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1228{
0ecb72a5 1229 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1230}
1231
39d5492a 1232static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1233{
39d5492a 1234 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1235 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1236 return var;
e677137d
PB
1237}
1238
39d5492a 1239static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1240{
0ecb72a5 1241 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1242 tcg_temp_free_i32(var);
e677137d
PB
1243}
1244
1245static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1246{
1247 iwmmxt_store_reg(cpu_M0, rn);
1248}
1249
1250static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1251{
1252 iwmmxt_load_reg(cpu_M0, rn);
1253}
1254
1255static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1256{
1257 iwmmxt_load_reg(cpu_V1, rn);
1258 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1259}
1260
1261static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1262{
1263 iwmmxt_load_reg(cpu_V1, rn);
1264 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1265}
1266
1267static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1268{
1269 iwmmxt_load_reg(cpu_V1, rn);
1270 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1271}
1272
1273#define IWMMXT_OP(name) \
1274static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1275{ \
1276 iwmmxt_load_reg(cpu_V1, rn); \
1277 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1278}
1279
477955bd
PM
1280#define IWMMXT_OP_ENV(name) \
1281static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1282{ \
1283 iwmmxt_load_reg(cpu_V1, rn); \
1284 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1285}
1286
1287#define IWMMXT_OP_ENV_SIZE(name) \
1288IWMMXT_OP_ENV(name##b) \
1289IWMMXT_OP_ENV(name##w) \
1290IWMMXT_OP_ENV(name##l)
e677137d 1291
477955bd 1292#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1293static inline void gen_op_iwmmxt_##name##_M0(void) \
1294{ \
477955bd 1295 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1296}
1297
1298IWMMXT_OP(maddsq)
1299IWMMXT_OP(madduq)
1300IWMMXT_OP(sadb)
1301IWMMXT_OP(sadw)
1302IWMMXT_OP(mulslw)
1303IWMMXT_OP(mulshw)
1304IWMMXT_OP(mululw)
1305IWMMXT_OP(muluhw)
1306IWMMXT_OP(macsw)
1307IWMMXT_OP(macuw)
1308
477955bd
PM
1309IWMMXT_OP_ENV_SIZE(unpackl)
1310IWMMXT_OP_ENV_SIZE(unpackh)
1311
1312IWMMXT_OP_ENV1(unpacklub)
1313IWMMXT_OP_ENV1(unpackluw)
1314IWMMXT_OP_ENV1(unpacklul)
1315IWMMXT_OP_ENV1(unpackhub)
1316IWMMXT_OP_ENV1(unpackhuw)
1317IWMMXT_OP_ENV1(unpackhul)
1318IWMMXT_OP_ENV1(unpacklsb)
1319IWMMXT_OP_ENV1(unpacklsw)
1320IWMMXT_OP_ENV1(unpacklsl)
1321IWMMXT_OP_ENV1(unpackhsb)
1322IWMMXT_OP_ENV1(unpackhsw)
1323IWMMXT_OP_ENV1(unpackhsl)
1324
1325IWMMXT_OP_ENV_SIZE(cmpeq)
1326IWMMXT_OP_ENV_SIZE(cmpgtu)
1327IWMMXT_OP_ENV_SIZE(cmpgts)
1328
1329IWMMXT_OP_ENV_SIZE(mins)
1330IWMMXT_OP_ENV_SIZE(minu)
1331IWMMXT_OP_ENV_SIZE(maxs)
1332IWMMXT_OP_ENV_SIZE(maxu)
1333
1334IWMMXT_OP_ENV_SIZE(subn)
1335IWMMXT_OP_ENV_SIZE(addn)
1336IWMMXT_OP_ENV_SIZE(subu)
1337IWMMXT_OP_ENV_SIZE(addu)
1338IWMMXT_OP_ENV_SIZE(subs)
1339IWMMXT_OP_ENV_SIZE(adds)
1340
1341IWMMXT_OP_ENV(avgb0)
1342IWMMXT_OP_ENV(avgb1)
1343IWMMXT_OP_ENV(avgw0)
1344IWMMXT_OP_ENV(avgw1)
e677137d
PB
1345
1346IWMMXT_OP(msadb)
1347
477955bd
PM
1348IWMMXT_OP_ENV(packuw)
1349IWMMXT_OP_ENV(packul)
1350IWMMXT_OP_ENV(packuq)
1351IWMMXT_OP_ENV(packsw)
1352IWMMXT_OP_ENV(packsl)
1353IWMMXT_OP_ENV(packsq)
e677137d 1354
e677137d
PB
1355static void gen_op_iwmmxt_set_mup(void)
1356{
39d5492a 1357 TCGv_i32 tmp;
e677137d
PB
1358 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1359 tcg_gen_ori_i32(tmp, tmp, 2);
1360 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1361}
1362
1363static void gen_op_iwmmxt_set_cup(void)
1364{
39d5492a 1365 TCGv_i32 tmp;
e677137d
PB
1366 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1367 tcg_gen_ori_i32(tmp, tmp, 1);
1368 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1369}
1370
1371static void gen_op_iwmmxt_setpsr_nz(void)
1372{
39d5492a 1373 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1374 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1375 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1376}
1377
1378static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1379{
1380 iwmmxt_load_reg(cpu_V1, rn);
86831435 1381 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1382 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1383}
1384
39d5492a
PM
1385static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1386 TCGv_i32 dest)
18c9b560
AZ
1387{
1388 int rd;
1389 uint32_t offset;
39d5492a 1390 TCGv_i32 tmp;
18c9b560
AZ
1391
1392 rd = (insn >> 16) & 0xf;
da6b5335 1393 tmp = load_reg(s, rd);
18c9b560
AZ
1394
1395 offset = (insn & 0xff) << ((insn >> 7) & 2);
1396 if (insn & (1 << 24)) {
1397 /* Pre indexed */
1398 if (insn & (1 << 23))
da6b5335 1399 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1400 else
da6b5335
FN
1401 tcg_gen_addi_i32(tmp, tmp, -offset);
1402 tcg_gen_mov_i32(dest, tmp);
18c9b560 1403 if (insn & (1 << 21))
da6b5335
FN
1404 store_reg(s, rd, tmp);
1405 else
7d1b0095 1406 tcg_temp_free_i32(tmp);
18c9b560
AZ
1407 } else if (insn & (1 << 21)) {
1408 /* Post indexed */
da6b5335 1409 tcg_gen_mov_i32(dest, tmp);
18c9b560 1410 if (insn & (1 << 23))
da6b5335 1411 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1412 else
da6b5335
FN
1413 tcg_gen_addi_i32(tmp, tmp, -offset);
1414 store_reg(s, rd, tmp);
18c9b560
AZ
1415 } else if (!(insn & (1 << 23)))
1416 return 1;
1417 return 0;
1418}
1419
39d5492a 1420static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1421{
1422 int rd = (insn >> 0) & 0xf;
39d5492a 1423 TCGv_i32 tmp;
18c9b560 1424
da6b5335
FN
1425 if (insn & (1 << 8)) {
1426 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1427 return 1;
da6b5335
FN
1428 } else {
1429 tmp = iwmmxt_load_creg(rd);
1430 }
1431 } else {
7d1b0095 1432 tmp = tcg_temp_new_i32();
da6b5335
FN
1433 iwmmxt_load_reg(cpu_V0, rd);
1434 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1435 }
1436 tcg_gen_andi_i32(tmp, tmp, mask);
1437 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1438 tcg_temp_free_i32(tmp);
18c9b560
AZ
1439 return 0;
1440}
1441
a1c7273b 1442/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1443 (ie. an undefined instruction). */
0ecb72a5 1444static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
1445{
1446 int rd, wrd;
1447 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1448 TCGv_i32 addr;
1449 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1450
1451 if ((insn & 0x0e000e00) == 0x0c000000) {
1452 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1453 wrd = insn & 0xf;
1454 rdlo = (insn >> 12) & 0xf;
1455 rdhi = (insn >> 16) & 0xf;
1456 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1457 iwmmxt_load_reg(cpu_V0, wrd);
1458 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1459 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1460 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1461 } else { /* TMCRR */
da6b5335
FN
1462 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1463 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1464 gen_op_iwmmxt_set_mup();
1465 }
1466 return 0;
1467 }
1468
1469 wrd = (insn >> 12) & 0xf;
7d1b0095 1470 addr = tcg_temp_new_i32();
da6b5335 1471 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1472 tcg_temp_free_i32(addr);
18c9b560 1473 return 1;
da6b5335 1474 }
18c9b560
AZ
1475 if (insn & ARM_CP_RW_BIT) {
1476 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1477 tmp = tcg_temp_new_i32();
08307563 1478 gen_aa32_ld32u(tmp, addr, IS_USER(s));
da6b5335 1479 iwmmxt_store_creg(wrd, tmp);
18c9b560 1480 } else {
e677137d
PB
1481 i = 1;
1482 if (insn & (1 << 8)) {
1483 if (insn & (1 << 22)) { /* WLDRD */
08307563 1484 gen_aa32_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1485 i = 0;
1486 } else { /* WLDRW wRd */
29531141 1487 tmp = tcg_temp_new_i32();
08307563 1488 gen_aa32_ld32u(tmp, addr, IS_USER(s));
e677137d
PB
1489 }
1490 } else {
29531141 1491 tmp = tcg_temp_new_i32();
e677137d 1492 if (insn & (1 << 22)) { /* WLDRH */
08307563 1493 gen_aa32_ld16u(tmp, addr, IS_USER(s));
e677137d 1494 } else { /* WLDRB */
08307563 1495 gen_aa32_ld8u(tmp, addr, IS_USER(s));
e677137d
PB
1496 }
1497 }
1498 if (i) {
1499 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1500 tcg_temp_free_i32(tmp);
e677137d 1501 }
18c9b560
AZ
1502 gen_op_iwmmxt_movq_wRn_M0(wrd);
1503 }
1504 } else {
1505 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1506 tmp = iwmmxt_load_creg(wrd);
08307563 1507 gen_aa32_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1508 } else {
1509 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1510 tmp = tcg_temp_new_i32();
e677137d
PB
1511 if (insn & (1 << 8)) {
1512 if (insn & (1 << 22)) { /* WSTRD */
08307563 1513 gen_aa32_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1514 } else { /* WSTRW wRd */
1515 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
08307563 1516 gen_aa32_st32(tmp, addr, IS_USER(s));
e677137d
PB
1517 }
1518 } else {
1519 if (insn & (1 << 22)) { /* WSTRH */
1520 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
08307563 1521 gen_aa32_st16(tmp, addr, IS_USER(s));
e677137d
PB
1522 } else { /* WSTRB */
1523 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
08307563 1524 gen_aa32_st8(tmp, addr, IS_USER(s));
e677137d
PB
1525 }
1526 }
18c9b560 1527 }
29531141 1528 tcg_temp_free_i32(tmp);
18c9b560 1529 }
7d1b0095 1530 tcg_temp_free_i32(addr);
18c9b560
AZ
1531 return 0;
1532 }
1533
1534 if ((insn & 0x0f000000) != 0x0e000000)
1535 return 1;
1536
1537 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1538 case 0x000: /* WOR */
1539 wrd = (insn >> 12) & 0xf;
1540 rd0 = (insn >> 0) & 0xf;
1541 rd1 = (insn >> 16) & 0xf;
1542 gen_op_iwmmxt_movq_M0_wRn(rd0);
1543 gen_op_iwmmxt_orq_M0_wRn(rd1);
1544 gen_op_iwmmxt_setpsr_nz();
1545 gen_op_iwmmxt_movq_wRn_M0(wrd);
1546 gen_op_iwmmxt_set_mup();
1547 gen_op_iwmmxt_set_cup();
1548 break;
1549 case 0x011: /* TMCR */
1550 if (insn & 0xf)
1551 return 1;
1552 rd = (insn >> 12) & 0xf;
1553 wrd = (insn >> 16) & 0xf;
1554 switch (wrd) {
1555 case ARM_IWMMXT_wCID:
1556 case ARM_IWMMXT_wCASF:
1557 break;
1558 case ARM_IWMMXT_wCon:
1559 gen_op_iwmmxt_set_cup();
1560 /* Fall through. */
1561 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1562 tmp = iwmmxt_load_creg(wrd);
1563 tmp2 = load_reg(s, rd);
f669df27 1564 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1565 tcg_temp_free_i32(tmp2);
da6b5335 1566 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1567 break;
1568 case ARM_IWMMXT_wCGR0:
1569 case ARM_IWMMXT_wCGR1:
1570 case ARM_IWMMXT_wCGR2:
1571 case ARM_IWMMXT_wCGR3:
1572 gen_op_iwmmxt_set_cup();
da6b5335
FN
1573 tmp = load_reg(s, rd);
1574 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1575 break;
1576 default:
1577 return 1;
1578 }
1579 break;
1580 case 0x100: /* WXOR */
1581 wrd = (insn >> 12) & 0xf;
1582 rd0 = (insn >> 0) & 0xf;
1583 rd1 = (insn >> 16) & 0xf;
1584 gen_op_iwmmxt_movq_M0_wRn(rd0);
1585 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1586 gen_op_iwmmxt_setpsr_nz();
1587 gen_op_iwmmxt_movq_wRn_M0(wrd);
1588 gen_op_iwmmxt_set_mup();
1589 gen_op_iwmmxt_set_cup();
1590 break;
1591 case 0x111: /* TMRC */
1592 if (insn & 0xf)
1593 return 1;
1594 rd = (insn >> 12) & 0xf;
1595 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1596 tmp = iwmmxt_load_creg(wrd);
1597 store_reg(s, rd, tmp);
18c9b560
AZ
1598 break;
1599 case 0x300: /* WANDN */
1600 wrd = (insn >> 12) & 0xf;
1601 rd0 = (insn >> 0) & 0xf;
1602 rd1 = (insn >> 16) & 0xf;
1603 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1604 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1605 gen_op_iwmmxt_andq_M0_wRn(rd1);
1606 gen_op_iwmmxt_setpsr_nz();
1607 gen_op_iwmmxt_movq_wRn_M0(wrd);
1608 gen_op_iwmmxt_set_mup();
1609 gen_op_iwmmxt_set_cup();
1610 break;
1611 case 0x200: /* WAND */
1612 wrd = (insn >> 12) & 0xf;
1613 rd0 = (insn >> 0) & 0xf;
1614 rd1 = (insn >> 16) & 0xf;
1615 gen_op_iwmmxt_movq_M0_wRn(rd0);
1616 gen_op_iwmmxt_andq_M0_wRn(rd1);
1617 gen_op_iwmmxt_setpsr_nz();
1618 gen_op_iwmmxt_movq_wRn_M0(wrd);
1619 gen_op_iwmmxt_set_mup();
1620 gen_op_iwmmxt_set_cup();
1621 break;
1622 case 0x810: case 0xa10: /* WMADD */
1623 wrd = (insn >> 12) & 0xf;
1624 rd0 = (insn >> 0) & 0xf;
1625 rd1 = (insn >> 16) & 0xf;
1626 gen_op_iwmmxt_movq_M0_wRn(rd0);
1627 if (insn & (1 << 21))
1628 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1629 else
1630 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1631 gen_op_iwmmxt_movq_wRn_M0(wrd);
1632 gen_op_iwmmxt_set_mup();
1633 break;
1634 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1635 wrd = (insn >> 12) & 0xf;
1636 rd0 = (insn >> 16) & 0xf;
1637 rd1 = (insn >> 0) & 0xf;
1638 gen_op_iwmmxt_movq_M0_wRn(rd0);
1639 switch ((insn >> 22) & 3) {
1640 case 0:
1641 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1642 break;
1643 case 1:
1644 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1645 break;
1646 case 2:
1647 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1648 break;
1649 case 3:
1650 return 1;
1651 }
1652 gen_op_iwmmxt_movq_wRn_M0(wrd);
1653 gen_op_iwmmxt_set_mup();
1654 gen_op_iwmmxt_set_cup();
1655 break;
1656 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1657 wrd = (insn >> 12) & 0xf;
1658 rd0 = (insn >> 16) & 0xf;
1659 rd1 = (insn >> 0) & 0xf;
1660 gen_op_iwmmxt_movq_M0_wRn(rd0);
1661 switch ((insn >> 22) & 3) {
1662 case 0:
1663 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1664 break;
1665 case 1:
1666 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1667 break;
1668 case 2:
1669 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1670 break;
1671 case 3:
1672 return 1;
1673 }
1674 gen_op_iwmmxt_movq_wRn_M0(wrd);
1675 gen_op_iwmmxt_set_mup();
1676 gen_op_iwmmxt_set_cup();
1677 break;
1678 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1679 wrd = (insn >> 12) & 0xf;
1680 rd0 = (insn >> 16) & 0xf;
1681 rd1 = (insn >> 0) & 0xf;
1682 gen_op_iwmmxt_movq_M0_wRn(rd0);
1683 if (insn & (1 << 22))
1684 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1685 else
1686 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1687 if (!(insn & (1 << 20)))
1688 gen_op_iwmmxt_addl_M0_wRn(wrd);
1689 gen_op_iwmmxt_movq_wRn_M0(wrd);
1690 gen_op_iwmmxt_set_mup();
1691 break;
1692 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1693 wrd = (insn >> 12) & 0xf;
1694 rd0 = (insn >> 16) & 0xf;
1695 rd1 = (insn >> 0) & 0xf;
1696 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1697 if (insn & (1 << 21)) {
1698 if (insn & (1 << 20))
1699 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1700 else
1701 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1702 } else {
1703 if (insn & (1 << 20))
1704 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1705 else
1706 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1707 }
18c9b560
AZ
1708 gen_op_iwmmxt_movq_wRn_M0(wrd);
1709 gen_op_iwmmxt_set_mup();
1710 break;
1711 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1712 wrd = (insn >> 12) & 0xf;
1713 rd0 = (insn >> 16) & 0xf;
1714 rd1 = (insn >> 0) & 0xf;
1715 gen_op_iwmmxt_movq_M0_wRn(rd0);
1716 if (insn & (1 << 21))
1717 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1718 else
1719 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1720 if (!(insn & (1 << 20))) {
e677137d
PB
1721 iwmmxt_load_reg(cpu_V1, wrd);
1722 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1723 }
1724 gen_op_iwmmxt_movq_wRn_M0(wrd);
1725 gen_op_iwmmxt_set_mup();
1726 break;
1727 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1728 wrd = (insn >> 12) & 0xf;
1729 rd0 = (insn >> 16) & 0xf;
1730 rd1 = (insn >> 0) & 0xf;
1731 gen_op_iwmmxt_movq_M0_wRn(rd0);
1732 switch ((insn >> 22) & 3) {
1733 case 0:
1734 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1735 break;
1736 case 1:
1737 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1738 break;
1739 case 2:
1740 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1741 break;
1742 case 3:
1743 return 1;
1744 }
1745 gen_op_iwmmxt_movq_wRn_M0(wrd);
1746 gen_op_iwmmxt_set_mup();
1747 gen_op_iwmmxt_set_cup();
1748 break;
1749 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1750 wrd = (insn >> 12) & 0xf;
1751 rd0 = (insn >> 16) & 0xf;
1752 rd1 = (insn >> 0) & 0xf;
1753 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1754 if (insn & (1 << 22)) {
1755 if (insn & (1 << 20))
1756 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1757 else
1758 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1759 } else {
1760 if (insn & (1 << 20))
1761 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1762 else
1763 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1764 }
18c9b560
AZ
1765 gen_op_iwmmxt_movq_wRn_M0(wrd);
1766 gen_op_iwmmxt_set_mup();
1767 gen_op_iwmmxt_set_cup();
1768 break;
1769 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1770 wrd = (insn >> 12) & 0xf;
1771 rd0 = (insn >> 16) & 0xf;
1772 rd1 = (insn >> 0) & 0xf;
1773 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1774 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1775 tcg_gen_andi_i32(tmp, tmp, 7);
1776 iwmmxt_load_reg(cpu_V1, rd1);
1777 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1778 tcg_temp_free_i32(tmp);
18c9b560
AZ
1779 gen_op_iwmmxt_movq_wRn_M0(wrd);
1780 gen_op_iwmmxt_set_mup();
1781 break;
1782 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1783 if (((insn >> 6) & 3) == 3)
1784 return 1;
18c9b560
AZ
1785 rd = (insn >> 12) & 0xf;
1786 wrd = (insn >> 16) & 0xf;
da6b5335 1787 tmp = load_reg(s, rd);
18c9b560
AZ
1788 gen_op_iwmmxt_movq_M0_wRn(wrd);
1789 switch ((insn >> 6) & 3) {
1790 case 0:
da6b5335
FN
1791 tmp2 = tcg_const_i32(0xff);
1792 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1793 break;
1794 case 1:
da6b5335
FN
1795 tmp2 = tcg_const_i32(0xffff);
1796 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1797 break;
1798 case 2:
da6b5335
FN
1799 tmp2 = tcg_const_i32(0xffffffff);
1800 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1801 break;
da6b5335 1802 default:
39d5492a
PM
1803 TCGV_UNUSED_I32(tmp2);
1804 TCGV_UNUSED_I32(tmp3);
18c9b560 1805 }
da6b5335 1806 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
1807 tcg_temp_free_i32(tmp3);
1808 tcg_temp_free_i32(tmp2);
7d1b0095 1809 tcg_temp_free_i32(tmp);
18c9b560
AZ
1810 gen_op_iwmmxt_movq_wRn_M0(wrd);
1811 gen_op_iwmmxt_set_mup();
1812 break;
1813 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1814 rd = (insn >> 12) & 0xf;
1815 wrd = (insn >> 16) & 0xf;
da6b5335 1816 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1817 return 1;
1818 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1819 tmp = tcg_temp_new_i32();
18c9b560
AZ
1820 switch ((insn >> 22) & 3) {
1821 case 0:
da6b5335
FN
1822 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1823 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1824 if (insn & 8) {
1825 tcg_gen_ext8s_i32(tmp, tmp);
1826 } else {
1827 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1828 }
1829 break;
1830 case 1:
da6b5335
FN
1831 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1832 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1833 if (insn & 8) {
1834 tcg_gen_ext16s_i32(tmp, tmp);
1835 } else {
1836 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1837 }
1838 break;
1839 case 2:
da6b5335
FN
1840 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1841 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1842 break;
18c9b560 1843 }
da6b5335 1844 store_reg(s, rd, tmp);
18c9b560
AZ
1845 break;
1846 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1847 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1848 return 1;
da6b5335 1849 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1850 switch ((insn >> 22) & 3) {
1851 case 0:
da6b5335 1852 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1853 break;
1854 case 1:
da6b5335 1855 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1856 break;
1857 case 2:
da6b5335 1858 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1859 break;
18c9b560 1860 }
da6b5335
FN
1861 tcg_gen_shli_i32(tmp, tmp, 28);
1862 gen_set_nzcv(tmp);
7d1b0095 1863 tcg_temp_free_i32(tmp);
18c9b560
AZ
1864 break;
1865 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1866 if (((insn >> 6) & 3) == 3)
1867 return 1;
18c9b560
AZ
1868 rd = (insn >> 12) & 0xf;
1869 wrd = (insn >> 16) & 0xf;
da6b5335 1870 tmp = load_reg(s, rd);
18c9b560
AZ
1871 switch ((insn >> 6) & 3) {
1872 case 0:
da6b5335 1873 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1874 break;
1875 case 1:
da6b5335 1876 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1877 break;
1878 case 2:
da6b5335 1879 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1880 break;
18c9b560 1881 }
7d1b0095 1882 tcg_temp_free_i32(tmp);
18c9b560
AZ
1883 gen_op_iwmmxt_movq_wRn_M0(wrd);
1884 gen_op_iwmmxt_set_mup();
1885 break;
1886 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1887 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1888 return 1;
da6b5335 1889 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1890 tmp2 = tcg_temp_new_i32();
da6b5335 1891 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1892 switch ((insn >> 22) & 3) {
1893 case 0:
1894 for (i = 0; i < 7; i ++) {
da6b5335
FN
1895 tcg_gen_shli_i32(tmp2, tmp2, 4);
1896 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1897 }
1898 break;
1899 case 1:
1900 for (i = 0; i < 3; i ++) {
da6b5335
FN
1901 tcg_gen_shli_i32(tmp2, tmp2, 8);
1902 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1903 }
1904 break;
1905 case 2:
da6b5335
FN
1906 tcg_gen_shli_i32(tmp2, tmp2, 16);
1907 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1908 break;
18c9b560 1909 }
da6b5335 1910 gen_set_nzcv(tmp);
7d1b0095
PM
1911 tcg_temp_free_i32(tmp2);
1912 tcg_temp_free_i32(tmp);
18c9b560
AZ
1913 break;
1914 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1915 wrd = (insn >> 12) & 0xf;
1916 rd0 = (insn >> 16) & 0xf;
1917 gen_op_iwmmxt_movq_M0_wRn(rd0);
1918 switch ((insn >> 22) & 3) {
1919 case 0:
e677137d 1920 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1921 break;
1922 case 1:
e677137d 1923 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1924 break;
1925 case 2:
e677137d 1926 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1927 break;
1928 case 3:
1929 return 1;
1930 }
1931 gen_op_iwmmxt_movq_wRn_M0(wrd);
1932 gen_op_iwmmxt_set_mup();
1933 break;
1934 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1935 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1936 return 1;
da6b5335 1937 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1938 tmp2 = tcg_temp_new_i32();
da6b5335 1939 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1940 switch ((insn >> 22) & 3) {
1941 case 0:
1942 for (i = 0; i < 7; i ++) {
da6b5335
FN
1943 tcg_gen_shli_i32(tmp2, tmp2, 4);
1944 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1945 }
1946 break;
1947 case 1:
1948 for (i = 0; i < 3; i ++) {
da6b5335
FN
1949 tcg_gen_shli_i32(tmp2, tmp2, 8);
1950 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1951 }
1952 break;
1953 case 2:
da6b5335
FN
1954 tcg_gen_shli_i32(tmp2, tmp2, 16);
1955 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1956 break;
18c9b560 1957 }
da6b5335 1958 gen_set_nzcv(tmp);
7d1b0095
PM
1959 tcg_temp_free_i32(tmp2);
1960 tcg_temp_free_i32(tmp);
18c9b560
AZ
1961 break;
1962 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1963 rd = (insn >> 12) & 0xf;
1964 rd0 = (insn >> 16) & 0xf;
da6b5335 1965 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1966 return 1;
1967 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 1968 tmp = tcg_temp_new_i32();
18c9b560
AZ
1969 switch ((insn >> 22) & 3) {
1970 case 0:
da6b5335 1971 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1972 break;
1973 case 1:
da6b5335 1974 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1975 break;
1976 case 2:
da6b5335 1977 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1978 break;
18c9b560 1979 }
da6b5335 1980 store_reg(s, rd, tmp);
18c9b560
AZ
1981 break;
1982 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1983 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1984 wrd = (insn >> 12) & 0xf;
1985 rd0 = (insn >> 16) & 0xf;
1986 rd1 = (insn >> 0) & 0xf;
1987 gen_op_iwmmxt_movq_M0_wRn(rd0);
1988 switch ((insn >> 22) & 3) {
1989 case 0:
1990 if (insn & (1 << 21))
1991 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1992 else
1993 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1994 break;
1995 case 1:
1996 if (insn & (1 << 21))
1997 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1998 else
1999 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2000 break;
2001 case 2:
2002 if (insn & (1 << 21))
2003 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2004 else
2005 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2006 break;
2007 case 3:
2008 return 1;
2009 }
2010 gen_op_iwmmxt_movq_wRn_M0(wrd);
2011 gen_op_iwmmxt_set_mup();
2012 gen_op_iwmmxt_set_cup();
2013 break;
2014 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2015 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2016 wrd = (insn >> 12) & 0xf;
2017 rd0 = (insn >> 16) & 0xf;
2018 gen_op_iwmmxt_movq_M0_wRn(rd0);
2019 switch ((insn >> 22) & 3) {
2020 case 0:
2021 if (insn & (1 << 21))
2022 gen_op_iwmmxt_unpacklsb_M0();
2023 else
2024 gen_op_iwmmxt_unpacklub_M0();
2025 break;
2026 case 1:
2027 if (insn & (1 << 21))
2028 gen_op_iwmmxt_unpacklsw_M0();
2029 else
2030 gen_op_iwmmxt_unpackluw_M0();
2031 break;
2032 case 2:
2033 if (insn & (1 << 21))
2034 gen_op_iwmmxt_unpacklsl_M0();
2035 else
2036 gen_op_iwmmxt_unpacklul_M0();
2037 break;
2038 case 3:
2039 return 1;
2040 }
2041 gen_op_iwmmxt_movq_wRn_M0(wrd);
2042 gen_op_iwmmxt_set_mup();
2043 gen_op_iwmmxt_set_cup();
2044 break;
2045 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2046 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2047 wrd = (insn >> 12) & 0xf;
2048 rd0 = (insn >> 16) & 0xf;
2049 gen_op_iwmmxt_movq_M0_wRn(rd0);
2050 switch ((insn >> 22) & 3) {
2051 case 0:
2052 if (insn & (1 << 21))
2053 gen_op_iwmmxt_unpackhsb_M0();
2054 else
2055 gen_op_iwmmxt_unpackhub_M0();
2056 break;
2057 case 1:
2058 if (insn & (1 << 21))
2059 gen_op_iwmmxt_unpackhsw_M0();
2060 else
2061 gen_op_iwmmxt_unpackhuw_M0();
2062 break;
2063 case 2:
2064 if (insn & (1 << 21))
2065 gen_op_iwmmxt_unpackhsl_M0();
2066 else
2067 gen_op_iwmmxt_unpackhul_M0();
2068 break;
2069 case 3:
2070 return 1;
2071 }
2072 gen_op_iwmmxt_movq_wRn_M0(wrd);
2073 gen_op_iwmmxt_set_mup();
2074 gen_op_iwmmxt_set_cup();
2075 break;
2076 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2077 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2078 if (((insn >> 22) & 3) == 0)
2079 return 1;
18c9b560
AZ
2080 wrd = (insn >> 12) & 0xf;
2081 rd0 = (insn >> 16) & 0xf;
2082 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2083 tmp = tcg_temp_new_i32();
da6b5335 2084 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2085 tcg_temp_free_i32(tmp);
18c9b560 2086 return 1;
da6b5335 2087 }
18c9b560 2088 switch ((insn >> 22) & 3) {
18c9b560 2089 case 1:
477955bd 2090 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2091 break;
2092 case 2:
477955bd 2093 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2094 break;
2095 case 3:
477955bd 2096 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2097 break;
2098 }
7d1b0095 2099 tcg_temp_free_i32(tmp);
18c9b560
AZ
2100 gen_op_iwmmxt_movq_wRn_M0(wrd);
2101 gen_op_iwmmxt_set_mup();
2102 gen_op_iwmmxt_set_cup();
2103 break;
2104 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2105 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2106 if (((insn >> 22) & 3) == 0)
2107 return 1;
18c9b560
AZ
2108 wrd = (insn >> 12) & 0xf;
2109 rd0 = (insn >> 16) & 0xf;
2110 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2111 tmp = tcg_temp_new_i32();
da6b5335 2112 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2113 tcg_temp_free_i32(tmp);
18c9b560 2114 return 1;
da6b5335 2115 }
18c9b560 2116 switch ((insn >> 22) & 3) {
18c9b560 2117 case 1:
477955bd 2118 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2119 break;
2120 case 2:
477955bd 2121 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2122 break;
2123 case 3:
477955bd 2124 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2125 break;
2126 }
7d1b0095 2127 tcg_temp_free_i32(tmp);
18c9b560
AZ
2128 gen_op_iwmmxt_movq_wRn_M0(wrd);
2129 gen_op_iwmmxt_set_mup();
2130 gen_op_iwmmxt_set_cup();
2131 break;
2132 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2133 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2134 if (((insn >> 22) & 3) == 0)
2135 return 1;
18c9b560
AZ
2136 wrd = (insn >> 12) & 0xf;
2137 rd0 = (insn >> 16) & 0xf;
2138 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2139 tmp = tcg_temp_new_i32();
da6b5335 2140 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2141 tcg_temp_free_i32(tmp);
18c9b560 2142 return 1;
da6b5335 2143 }
18c9b560 2144 switch ((insn >> 22) & 3) {
18c9b560 2145 case 1:
477955bd 2146 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2147 break;
2148 case 2:
477955bd 2149 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2150 break;
2151 case 3:
477955bd 2152 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2153 break;
2154 }
7d1b0095 2155 tcg_temp_free_i32(tmp);
18c9b560
AZ
2156 gen_op_iwmmxt_movq_wRn_M0(wrd);
2157 gen_op_iwmmxt_set_mup();
2158 gen_op_iwmmxt_set_cup();
2159 break;
2160 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2161 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2162 if (((insn >> 22) & 3) == 0)
2163 return 1;
18c9b560
AZ
2164 wrd = (insn >> 12) & 0xf;
2165 rd0 = (insn >> 16) & 0xf;
2166 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2167 tmp = tcg_temp_new_i32();
18c9b560 2168 switch ((insn >> 22) & 3) {
18c9b560 2169 case 1:
da6b5335 2170 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2171 tcg_temp_free_i32(tmp);
18c9b560 2172 return 1;
da6b5335 2173 }
477955bd 2174 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2175 break;
2176 case 2:
da6b5335 2177 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2178 tcg_temp_free_i32(tmp);
18c9b560 2179 return 1;
da6b5335 2180 }
477955bd 2181 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2182 break;
2183 case 3:
da6b5335 2184 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2185 tcg_temp_free_i32(tmp);
18c9b560 2186 return 1;
da6b5335 2187 }
477955bd 2188 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2189 break;
2190 }
7d1b0095 2191 tcg_temp_free_i32(tmp);
18c9b560
AZ
2192 gen_op_iwmmxt_movq_wRn_M0(wrd);
2193 gen_op_iwmmxt_set_mup();
2194 gen_op_iwmmxt_set_cup();
2195 break;
2196 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2197 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2198 wrd = (insn >> 12) & 0xf;
2199 rd0 = (insn >> 16) & 0xf;
2200 rd1 = (insn >> 0) & 0xf;
2201 gen_op_iwmmxt_movq_M0_wRn(rd0);
2202 switch ((insn >> 22) & 3) {
2203 case 0:
2204 if (insn & (1 << 21))
2205 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2206 else
2207 gen_op_iwmmxt_minub_M0_wRn(rd1);
2208 break;
2209 case 1:
2210 if (insn & (1 << 21))
2211 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2212 else
2213 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2214 break;
2215 case 2:
2216 if (insn & (1 << 21))
2217 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2218 else
2219 gen_op_iwmmxt_minul_M0_wRn(rd1);
2220 break;
2221 case 3:
2222 return 1;
2223 }
2224 gen_op_iwmmxt_movq_wRn_M0(wrd);
2225 gen_op_iwmmxt_set_mup();
2226 break;
2227 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2228 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2229 wrd = (insn >> 12) & 0xf;
2230 rd0 = (insn >> 16) & 0xf;
2231 rd1 = (insn >> 0) & 0xf;
2232 gen_op_iwmmxt_movq_M0_wRn(rd0);
2233 switch ((insn >> 22) & 3) {
2234 case 0:
2235 if (insn & (1 << 21))
2236 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2237 else
2238 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2239 break;
2240 case 1:
2241 if (insn & (1 << 21))
2242 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2243 else
2244 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2245 break;
2246 case 2:
2247 if (insn & (1 << 21))
2248 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2249 else
2250 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2251 break;
2252 case 3:
2253 return 1;
2254 }
2255 gen_op_iwmmxt_movq_wRn_M0(wrd);
2256 gen_op_iwmmxt_set_mup();
2257 break;
2258 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2259 case 0x402: case 0x502: case 0x602: case 0x702:
2260 wrd = (insn >> 12) & 0xf;
2261 rd0 = (insn >> 16) & 0xf;
2262 rd1 = (insn >> 0) & 0xf;
2263 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2264 tmp = tcg_const_i32((insn >> 20) & 3);
2265 iwmmxt_load_reg(cpu_V1, rd1);
2266 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2267 tcg_temp_free_i32(tmp);
18c9b560
AZ
2268 gen_op_iwmmxt_movq_wRn_M0(wrd);
2269 gen_op_iwmmxt_set_mup();
2270 break;
2271 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2272 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2273 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2274 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2275 wrd = (insn >> 12) & 0xf;
2276 rd0 = (insn >> 16) & 0xf;
2277 rd1 = (insn >> 0) & 0xf;
2278 gen_op_iwmmxt_movq_M0_wRn(rd0);
2279 switch ((insn >> 20) & 0xf) {
2280 case 0x0:
2281 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2282 break;
2283 case 0x1:
2284 gen_op_iwmmxt_subub_M0_wRn(rd1);
2285 break;
2286 case 0x3:
2287 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2288 break;
2289 case 0x4:
2290 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2291 break;
2292 case 0x5:
2293 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2294 break;
2295 case 0x7:
2296 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2297 break;
2298 case 0x8:
2299 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2300 break;
2301 case 0x9:
2302 gen_op_iwmmxt_subul_M0_wRn(rd1);
2303 break;
2304 case 0xb:
2305 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2306 break;
2307 default:
2308 return 1;
2309 }
2310 gen_op_iwmmxt_movq_wRn_M0(wrd);
2311 gen_op_iwmmxt_set_mup();
2312 gen_op_iwmmxt_set_cup();
2313 break;
2314 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2315 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2316 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2317 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2318 wrd = (insn >> 12) & 0xf;
2319 rd0 = (insn >> 16) & 0xf;
2320 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2321 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2322 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2323 tcg_temp_free_i32(tmp);
18c9b560
AZ
2324 gen_op_iwmmxt_movq_wRn_M0(wrd);
2325 gen_op_iwmmxt_set_mup();
2326 gen_op_iwmmxt_set_cup();
2327 break;
2328 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2329 case 0x418: case 0x518: case 0x618: case 0x718:
2330 case 0x818: case 0x918: case 0xa18: case 0xb18:
2331 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2332 wrd = (insn >> 12) & 0xf;
2333 rd0 = (insn >> 16) & 0xf;
2334 rd1 = (insn >> 0) & 0xf;
2335 gen_op_iwmmxt_movq_M0_wRn(rd0);
2336 switch ((insn >> 20) & 0xf) {
2337 case 0x0:
2338 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2339 break;
2340 case 0x1:
2341 gen_op_iwmmxt_addub_M0_wRn(rd1);
2342 break;
2343 case 0x3:
2344 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2345 break;
2346 case 0x4:
2347 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2348 break;
2349 case 0x5:
2350 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2351 break;
2352 case 0x7:
2353 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2354 break;
2355 case 0x8:
2356 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2357 break;
2358 case 0x9:
2359 gen_op_iwmmxt_addul_M0_wRn(rd1);
2360 break;
2361 case 0xb:
2362 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2363 break;
2364 default:
2365 return 1;
2366 }
2367 gen_op_iwmmxt_movq_wRn_M0(wrd);
2368 gen_op_iwmmxt_set_mup();
2369 gen_op_iwmmxt_set_cup();
2370 break;
2371 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2372 case 0x408: case 0x508: case 0x608: case 0x708:
2373 case 0x808: case 0x908: case 0xa08: case 0xb08:
2374 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2375 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2376 return 1;
18c9b560
AZ
2377 wrd = (insn >> 12) & 0xf;
2378 rd0 = (insn >> 16) & 0xf;
2379 rd1 = (insn >> 0) & 0xf;
2380 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2381 switch ((insn >> 22) & 3) {
18c9b560
AZ
2382 case 1:
2383 if (insn & (1 << 21))
2384 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2385 else
2386 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2387 break;
2388 case 2:
2389 if (insn & (1 << 21))
2390 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2391 else
2392 gen_op_iwmmxt_packul_M0_wRn(rd1);
2393 break;
2394 case 3:
2395 if (insn & (1 << 21))
2396 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2397 else
2398 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2399 break;
2400 }
2401 gen_op_iwmmxt_movq_wRn_M0(wrd);
2402 gen_op_iwmmxt_set_mup();
2403 gen_op_iwmmxt_set_cup();
2404 break;
2405 case 0x201: case 0x203: case 0x205: case 0x207:
2406 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2407 case 0x211: case 0x213: case 0x215: case 0x217:
2408 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2409 wrd = (insn >> 5) & 0xf;
2410 rd0 = (insn >> 12) & 0xf;
2411 rd1 = (insn >> 0) & 0xf;
2412 if (rd0 == 0xf || rd1 == 0xf)
2413 return 1;
2414 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2415 tmp = load_reg(s, rd0);
2416 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2417 switch ((insn >> 16) & 0xf) {
2418 case 0x0: /* TMIA */
da6b5335 2419 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2420 break;
2421 case 0x8: /* TMIAPH */
da6b5335 2422 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2423 break;
2424 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2425 if (insn & (1 << 16))
da6b5335 2426 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2427 if (insn & (1 << 17))
da6b5335
FN
2428 tcg_gen_shri_i32(tmp2, tmp2, 16);
2429 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2430 break;
2431 default:
7d1b0095
PM
2432 tcg_temp_free_i32(tmp2);
2433 tcg_temp_free_i32(tmp);
18c9b560
AZ
2434 return 1;
2435 }
7d1b0095
PM
2436 tcg_temp_free_i32(tmp2);
2437 tcg_temp_free_i32(tmp);
18c9b560
AZ
2438 gen_op_iwmmxt_movq_wRn_M0(wrd);
2439 gen_op_iwmmxt_set_mup();
2440 break;
2441 default:
2442 return 1;
2443 }
2444
2445 return 0;
2446}
2447
a1c7273b 2448/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2449 (ie. an undefined instruction). */
0ecb72a5 2450static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
2451{
2452 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2453 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2454
2455 if ((insn & 0x0ff00f10) == 0x0e200010) {
2456 /* Multiply with Internal Accumulate Format */
2457 rd0 = (insn >> 12) & 0xf;
2458 rd1 = insn & 0xf;
2459 acc = (insn >> 5) & 7;
2460
2461 if (acc != 0)
2462 return 1;
2463
3a554c0f
FN
2464 tmp = load_reg(s, rd0);
2465 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2466 switch ((insn >> 16) & 0xf) {
2467 case 0x0: /* MIA */
3a554c0f 2468 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2469 break;
2470 case 0x8: /* MIAPH */
3a554c0f 2471 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2472 break;
2473 case 0xc: /* MIABB */
2474 case 0xd: /* MIABT */
2475 case 0xe: /* MIATB */
2476 case 0xf: /* MIATT */
18c9b560 2477 if (insn & (1 << 16))
3a554c0f 2478 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2479 if (insn & (1 << 17))
3a554c0f
FN
2480 tcg_gen_shri_i32(tmp2, tmp2, 16);
2481 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2482 break;
2483 default:
2484 return 1;
2485 }
7d1b0095
PM
2486 tcg_temp_free_i32(tmp2);
2487 tcg_temp_free_i32(tmp);
18c9b560
AZ
2488
2489 gen_op_iwmmxt_movq_wRn_M0(acc);
2490 return 0;
2491 }
2492
2493 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2494 /* Internal Accumulator Access Format */
2495 rdhi = (insn >> 16) & 0xf;
2496 rdlo = (insn >> 12) & 0xf;
2497 acc = insn & 7;
2498
2499 if (acc != 0)
2500 return 1;
2501
2502 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2503 iwmmxt_load_reg(cpu_V0, acc);
2504 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2505 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2506 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2507 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2508 } else { /* MAR */
3a554c0f
FN
2509 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2510 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2511 }
2512 return 0;
2513 }
2514
2515 return 1;
2516}
2517
9ee6e8bb
PB
2518#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2519#define VFP_SREG(insn, bigbit, smallbit) \
2520 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2521#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2522 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2523 reg = (((insn) >> (bigbit)) & 0x0f) \
2524 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2525 } else { \
2526 if (insn & (1 << (smallbit))) \
2527 return 1; \
2528 reg = ((insn) >> (bigbit)) & 0x0f; \
2529 }} while (0)
2530
2531#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2532#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2533#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2534#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2535#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2536#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2537
4373f3ce 2538/* Move between integer and VFP cores. */
39d5492a 2539static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 2540{
39d5492a 2541 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
2542 tcg_gen_mov_i32(tmp, cpu_F0s);
2543 return tmp;
2544}
2545
39d5492a 2546static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
2547{
2548 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2549 tcg_temp_free_i32(tmp);
4373f3ce
PB
2550}
2551
39d5492a 2552static void gen_neon_dup_u8(TCGv_i32 var, int shift)
ad69471c 2553{
39d5492a 2554 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2555 if (shift)
2556 tcg_gen_shri_i32(var, var, shift);
86831435 2557 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2558 tcg_gen_shli_i32(tmp, var, 8);
2559 tcg_gen_or_i32(var, var, tmp);
2560 tcg_gen_shli_i32(tmp, var, 16);
2561 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2562 tcg_temp_free_i32(tmp);
ad69471c
PB
2563}
2564
39d5492a 2565static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2566{
39d5492a 2567 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2568 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2569 tcg_gen_shli_i32(tmp, var, 16);
2570 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2571 tcg_temp_free_i32(tmp);
ad69471c
PB
2572}
2573
39d5492a 2574static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2575{
39d5492a 2576 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2577 tcg_gen_andi_i32(var, var, 0xffff0000);
2578 tcg_gen_shri_i32(tmp, var, 16);
2579 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2580 tcg_temp_free_i32(tmp);
ad69471c
PB
2581}
2582
39d5492a 2583static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
8e18cde3
PM
2584{
2585 /* Load a single Neon element and replicate into a 32 bit TCG reg */
58ab8e96 2586 TCGv_i32 tmp = tcg_temp_new_i32();
8e18cde3
PM
2587 switch (size) {
2588 case 0:
08307563 2589 gen_aa32_ld8u(tmp, addr, IS_USER(s));
8e18cde3
PM
2590 gen_neon_dup_u8(tmp, 0);
2591 break;
2592 case 1:
08307563 2593 gen_aa32_ld16u(tmp, addr, IS_USER(s));
8e18cde3
PM
2594 gen_neon_dup_low16(tmp);
2595 break;
2596 case 2:
08307563 2597 gen_aa32_ld32u(tmp, addr, IS_USER(s));
8e18cde3
PM
2598 break;
2599 default: /* Avoid compiler warnings. */
2600 abort();
2601 }
2602 return tmp;
2603}
2604
04731fb5
WN
2605static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2606 uint32_t dp)
2607{
2608 uint32_t cc = extract32(insn, 20, 2);
2609
2610 if (dp) {
2611 TCGv_i64 frn, frm, dest;
2612 TCGv_i64 tmp, zero, zf, nf, vf;
2613
2614 zero = tcg_const_i64(0);
2615
2616 frn = tcg_temp_new_i64();
2617 frm = tcg_temp_new_i64();
2618 dest = tcg_temp_new_i64();
2619
2620 zf = tcg_temp_new_i64();
2621 nf = tcg_temp_new_i64();
2622 vf = tcg_temp_new_i64();
2623
2624 tcg_gen_extu_i32_i64(zf, cpu_ZF);
2625 tcg_gen_ext_i32_i64(nf, cpu_NF);
2626 tcg_gen_ext_i32_i64(vf, cpu_VF);
2627
2628 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2629 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2630 switch (cc) {
2631 case 0: /* eq: Z */
2632 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
2633 frn, frm);
2634 break;
2635 case 1: /* vs: V */
2636 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
2637 frn, frm);
2638 break;
2639 case 2: /* ge: N == V -> N ^ V == 0 */
2640 tmp = tcg_temp_new_i64();
2641 tcg_gen_xor_i64(tmp, vf, nf);
2642 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2643 frn, frm);
2644 tcg_temp_free_i64(tmp);
2645 break;
2646 case 3: /* gt: !Z && N == V */
2647 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
2648 frn, frm);
2649 tmp = tcg_temp_new_i64();
2650 tcg_gen_xor_i64(tmp, vf, nf);
2651 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2652 dest, frm);
2653 tcg_temp_free_i64(tmp);
2654 break;
2655 }
2656 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2657 tcg_temp_free_i64(frn);
2658 tcg_temp_free_i64(frm);
2659 tcg_temp_free_i64(dest);
2660
2661 tcg_temp_free_i64(zf);
2662 tcg_temp_free_i64(nf);
2663 tcg_temp_free_i64(vf);
2664
2665 tcg_temp_free_i64(zero);
2666 } else {
2667 TCGv_i32 frn, frm, dest;
2668 TCGv_i32 tmp, zero;
2669
2670 zero = tcg_const_i32(0);
2671
2672 frn = tcg_temp_new_i32();
2673 frm = tcg_temp_new_i32();
2674 dest = tcg_temp_new_i32();
2675 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2676 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2677 switch (cc) {
2678 case 0: /* eq: Z */
2679 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
2680 frn, frm);
2681 break;
2682 case 1: /* vs: V */
2683 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
2684 frn, frm);
2685 break;
2686 case 2: /* ge: N == V -> N ^ V == 0 */
2687 tmp = tcg_temp_new_i32();
2688 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2689 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2690 frn, frm);
2691 tcg_temp_free_i32(tmp);
2692 break;
2693 case 3: /* gt: !Z && N == V */
2694 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
2695 frn, frm);
2696 tmp = tcg_temp_new_i32();
2697 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2698 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2699 dest, frm);
2700 tcg_temp_free_i32(tmp);
2701 break;
2702 }
2703 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2704 tcg_temp_free_i32(frn);
2705 tcg_temp_free_i32(frm);
2706 tcg_temp_free_i32(dest);
2707
2708 tcg_temp_free_i32(zero);
2709 }
2710
2711 return 0;
2712}
2713
40cfacdd
WN
2714static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
2715 uint32_t rm, uint32_t dp)
2716{
2717 uint32_t vmin = extract32(insn, 6, 1);
2718 TCGv_ptr fpst = get_fpstatus_ptr(0);
2719
2720 if (dp) {
2721 TCGv_i64 frn, frm, dest;
2722
2723 frn = tcg_temp_new_i64();
2724 frm = tcg_temp_new_i64();
2725 dest = tcg_temp_new_i64();
2726
2727 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2728 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2729 if (vmin) {
f71a2ae5 2730 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
40cfacdd 2731 } else {
f71a2ae5 2732 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
40cfacdd
WN
2733 }
2734 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2735 tcg_temp_free_i64(frn);
2736 tcg_temp_free_i64(frm);
2737 tcg_temp_free_i64(dest);
2738 } else {
2739 TCGv_i32 frn, frm, dest;
2740
2741 frn = tcg_temp_new_i32();
2742 frm = tcg_temp_new_i32();
2743 dest = tcg_temp_new_i32();
2744
2745 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2746 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2747 if (vmin) {
f71a2ae5 2748 gen_helper_vfp_minnums(dest, frn, frm, fpst);
40cfacdd 2749 } else {
f71a2ae5 2750 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
40cfacdd
WN
2751 }
2752 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2753 tcg_temp_free_i32(frn);
2754 tcg_temp_free_i32(frm);
2755 tcg_temp_free_i32(dest);
2756 }
2757
2758 tcg_temp_free_ptr(fpst);
2759 return 0;
2760}
2761
7655f39b
WN
2762static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2763 int rounding)
2764{
2765 TCGv_ptr fpst = get_fpstatus_ptr(0);
2766 TCGv_i32 tcg_rmode;
2767
2768 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2769 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2770
2771 if (dp) {
2772 TCGv_i64 tcg_op;
2773 TCGv_i64 tcg_res;
2774 tcg_op = tcg_temp_new_i64();
2775 tcg_res = tcg_temp_new_i64();
2776 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2777 gen_helper_rintd(tcg_res, tcg_op, fpst);
2778 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
2779 tcg_temp_free_i64(tcg_op);
2780 tcg_temp_free_i64(tcg_res);
2781 } else {
2782 TCGv_i32 tcg_op;
2783 TCGv_i32 tcg_res;
2784 tcg_op = tcg_temp_new_i32();
2785 tcg_res = tcg_temp_new_i32();
2786 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2787 gen_helper_rints(tcg_res, tcg_op, fpst);
2788 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
2789 tcg_temp_free_i32(tcg_op);
2790 tcg_temp_free_i32(tcg_res);
2791 }
2792
2793 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2794 tcg_temp_free_i32(tcg_rmode);
2795
2796 tcg_temp_free_ptr(fpst);
2797 return 0;
2798}
2799
c9975a83
WN
2800static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2801 int rounding)
2802{
2803 bool is_signed = extract32(insn, 7, 1);
2804 TCGv_ptr fpst = get_fpstatus_ptr(0);
2805 TCGv_i32 tcg_rmode, tcg_shift;
2806
2807 tcg_shift = tcg_const_i32(0);
2808
2809 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2810 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2811
2812 if (dp) {
2813 TCGv_i64 tcg_double, tcg_res;
2814 TCGv_i32 tcg_tmp;
2815 /* Rd is encoded as a single precision register even when the source
2816 * is double precision.
2817 */
2818 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
2819 tcg_double = tcg_temp_new_i64();
2820 tcg_res = tcg_temp_new_i64();
2821 tcg_tmp = tcg_temp_new_i32();
2822 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
2823 if (is_signed) {
2824 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
2825 } else {
2826 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
2827 }
2828 tcg_gen_trunc_i64_i32(tcg_tmp, tcg_res);
2829 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
2830 tcg_temp_free_i32(tcg_tmp);
2831 tcg_temp_free_i64(tcg_res);
2832 tcg_temp_free_i64(tcg_double);
2833 } else {
2834 TCGv_i32 tcg_single, tcg_res;
2835 tcg_single = tcg_temp_new_i32();
2836 tcg_res = tcg_temp_new_i32();
2837 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
2838 if (is_signed) {
2839 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
2840 } else {
2841 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
2842 }
2843 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
2844 tcg_temp_free_i32(tcg_res);
2845 tcg_temp_free_i32(tcg_single);
2846 }
2847
2848 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2849 tcg_temp_free_i32(tcg_rmode);
2850
2851 tcg_temp_free_i32(tcg_shift);
2852
2853 tcg_temp_free_ptr(fpst);
2854
2855 return 0;
2856}
7655f39b
WN
2857
2858/* Table for converting the most common AArch32 encoding of
2859 * rounding mode to arm_fprounding order (which matches the
2860 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
2861 */
2862static const uint8_t fp_decode_rm[] = {
2863 FPROUNDING_TIEAWAY,
2864 FPROUNDING_TIEEVEN,
2865 FPROUNDING_POSINF,
2866 FPROUNDING_NEGINF,
2867};
2868
04731fb5
WN
2869static int disas_vfp_v8_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
2870{
2871 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
2872
2873 if (!arm_feature(env, ARM_FEATURE_V8)) {
2874 return 1;
2875 }
2876
2877 if (dp) {
2878 VFP_DREG_D(rd, insn);
2879 VFP_DREG_N(rn, insn);
2880 VFP_DREG_M(rm, insn);
2881 } else {
2882 rd = VFP_SREG_D(insn);
2883 rn = VFP_SREG_N(insn);
2884 rm = VFP_SREG_M(insn);
2885 }
2886
2887 if ((insn & 0x0f800e50) == 0x0e000a00) {
2888 return handle_vsel(insn, rd, rn, rm, dp);
40cfacdd
WN
2889 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
2890 return handle_vminmaxnm(insn, rd, rn, rm, dp);
7655f39b
WN
2891 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
2892 /* VRINTA, VRINTN, VRINTP, VRINTM */
2893 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
2894 return handle_vrint(insn, rd, rm, dp, rounding);
c9975a83
WN
2895 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
2896 /* VCVTA, VCVTN, VCVTP, VCVTM */
2897 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
2898 return handle_vcvt(insn, rd, rm, dp, rounding);
04731fb5
WN
2899 }
2900 return 1;
2901}
2902
a1c7273b 2903/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 2904 (ie. an undefined instruction). */
0ecb72a5 2905static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
b7bcbe95
FB
2906{
2907 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2908 int dp, veclen;
39d5492a
PM
2909 TCGv_i32 addr;
2910 TCGv_i32 tmp;
2911 TCGv_i32 tmp2;
b7bcbe95 2912
40f137e1
PB
2913 if (!arm_feature(env, ARM_FEATURE_VFP))
2914 return 1;
2915
5df8bac1 2916 if (!s->vfp_enabled) {
9ee6e8bb 2917 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2918 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2919 return 1;
2920 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2921 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2922 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2923 return 1;
2924 }
6a57f3eb
WN
2925
2926 if (extract32(insn, 28, 4) == 0xf) {
2927 /* Encodings with T=1 (Thumb) or unconditional (ARM):
2928 * only used in v8 and above.
2929 */
04731fb5 2930 return disas_vfp_v8_insn(env, s, insn);
6a57f3eb
WN
2931 }
2932
b7bcbe95
FB
2933 dp = ((insn & 0xf00) == 0xb00);
2934 switch ((insn >> 24) & 0xf) {
2935 case 0xe:
2936 if (insn & (1 << 4)) {
2937 /* single register transfer */
b7bcbe95
FB
2938 rd = (insn >> 12) & 0xf;
2939 if (dp) {
9ee6e8bb
PB
2940 int size;
2941 int pass;
2942
2943 VFP_DREG_N(rn, insn);
2944 if (insn & 0xf)
b7bcbe95 2945 return 1;
9ee6e8bb
PB
2946 if (insn & 0x00c00060
2947 && !arm_feature(env, ARM_FEATURE_NEON))
2948 return 1;
2949
2950 pass = (insn >> 21) & 1;
2951 if (insn & (1 << 22)) {
2952 size = 0;
2953 offset = ((insn >> 5) & 3) * 8;
2954 } else if (insn & (1 << 5)) {
2955 size = 1;
2956 offset = (insn & (1 << 6)) ? 16 : 0;
2957 } else {
2958 size = 2;
2959 offset = 0;
2960 }
18c9b560 2961 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2962 /* vfp->arm */
ad69471c 2963 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2964 switch (size) {
2965 case 0:
9ee6e8bb 2966 if (offset)
ad69471c 2967 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2968 if (insn & (1 << 23))
ad69471c 2969 gen_uxtb(tmp);
9ee6e8bb 2970 else
ad69471c 2971 gen_sxtb(tmp);
9ee6e8bb
PB
2972 break;
2973 case 1:
9ee6e8bb
PB
2974 if (insn & (1 << 23)) {
2975 if (offset) {
ad69471c 2976 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2977 } else {
ad69471c 2978 gen_uxth(tmp);
9ee6e8bb
PB
2979 }
2980 } else {
2981 if (offset) {
ad69471c 2982 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2983 } else {
ad69471c 2984 gen_sxth(tmp);
9ee6e8bb
PB
2985 }
2986 }
2987 break;
2988 case 2:
9ee6e8bb
PB
2989 break;
2990 }
ad69471c 2991 store_reg(s, rd, tmp);
b7bcbe95
FB
2992 } else {
2993 /* arm->vfp */
ad69471c 2994 tmp = load_reg(s, rd);
9ee6e8bb
PB
2995 if (insn & (1 << 23)) {
2996 /* VDUP */
2997 if (size == 0) {
ad69471c 2998 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2999 } else if (size == 1) {
ad69471c 3000 gen_neon_dup_low16(tmp);
9ee6e8bb 3001 }
cbbccffc 3002 for (n = 0; n <= pass * 2; n++) {
7d1b0095 3003 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
3004 tcg_gen_mov_i32(tmp2, tmp);
3005 neon_store_reg(rn, n, tmp2);
3006 }
3007 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
3008 } else {
3009 /* VMOV */
3010 switch (size) {
3011 case 0:
ad69471c 3012 tmp2 = neon_load_reg(rn, pass);
d593c48e 3013 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 3014 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3015 break;
3016 case 1:
ad69471c 3017 tmp2 = neon_load_reg(rn, pass);
d593c48e 3018 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 3019 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3020 break;
3021 case 2:
9ee6e8bb
PB
3022 break;
3023 }
ad69471c 3024 neon_store_reg(rn, pass, tmp);
9ee6e8bb 3025 }
b7bcbe95 3026 }
9ee6e8bb
PB
3027 } else { /* !dp */
3028 if ((insn & 0x6f) != 0x00)
3029 return 1;
3030 rn = VFP_SREG_N(insn);
18c9b560 3031 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3032 /* vfp->arm */
3033 if (insn & (1 << 21)) {
3034 /* system register */
40f137e1 3035 rn >>= 1;
9ee6e8bb 3036
b7bcbe95 3037 switch (rn) {
40f137e1 3038 case ARM_VFP_FPSID:
4373f3ce 3039 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
3040 VFP3 restricts all id registers to privileged
3041 accesses. */
3042 if (IS_USER(s)
3043 && arm_feature(env, ARM_FEATURE_VFP3))
3044 return 1;
4373f3ce 3045 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3046 break;
40f137e1 3047 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3048 if (IS_USER(s))
3049 return 1;
4373f3ce 3050 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3051 break;
40f137e1
PB
3052 case ARM_VFP_FPINST:
3053 case ARM_VFP_FPINST2:
9ee6e8bb
PB
3054 /* Not present in VFP3. */
3055 if (IS_USER(s)
3056 || arm_feature(env, ARM_FEATURE_VFP3))
3057 return 1;
4373f3ce 3058 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 3059 break;
40f137e1 3060 case ARM_VFP_FPSCR:
601d70b9 3061 if (rd == 15) {
4373f3ce
PB
3062 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3063 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3064 } else {
7d1b0095 3065 tmp = tcg_temp_new_i32();
4373f3ce
PB
3066 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3067 }
b7bcbe95 3068 break;
9ee6e8bb
PB
3069 case ARM_VFP_MVFR0:
3070 case ARM_VFP_MVFR1:
3071 if (IS_USER(s)
06ed5d66 3072 || !arm_feature(env, ARM_FEATURE_MVFR))
9ee6e8bb 3073 return 1;
4373f3ce 3074 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3075 break;
b7bcbe95
FB
3076 default:
3077 return 1;
3078 }
3079 } else {
3080 gen_mov_F0_vreg(0, rn);
4373f3ce 3081 tmp = gen_vfp_mrs();
b7bcbe95
FB
3082 }
3083 if (rd == 15) {
b5ff1b31 3084 /* Set the 4 flag bits in the CPSR. */
4373f3ce 3085 gen_set_nzcv(tmp);
7d1b0095 3086 tcg_temp_free_i32(tmp);
4373f3ce
PB
3087 } else {
3088 store_reg(s, rd, tmp);
3089 }
b7bcbe95
FB
3090 } else {
3091 /* arm->vfp */
b7bcbe95 3092 if (insn & (1 << 21)) {
40f137e1 3093 rn >>= 1;
b7bcbe95
FB
3094 /* system register */
3095 switch (rn) {
40f137e1 3096 case ARM_VFP_FPSID:
9ee6e8bb
PB
3097 case ARM_VFP_MVFR0:
3098 case ARM_VFP_MVFR1:
b7bcbe95
FB
3099 /* Writes are ignored. */
3100 break;
40f137e1 3101 case ARM_VFP_FPSCR:
e4c1cfa5 3102 tmp = load_reg(s, rd);
4373f3ce 3103 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 3104 tcg_temp_free_i32(tmp);
b5ff1b31 3105 gen_lookup_tb(s);
b7bcbe95 3106 break;
40f137e1 3107 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3108 if (IS_USER(s))
3109 return 1;
71b3c3de
JR
3110 /* TODO: VFP subarchitecture support.
3111 * For now, keep the EN bit only */
e4c1cfa5 3112 tmp = load_reg(s, rd);
71b3c3de 3113 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 3114 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
3115 gen_lookup_tb(s);
3116 break;
3117 case ARM_VFP_FPINST:
3118 case ARM_VFP_FPINST2:
e4c1cfa5 3119 tmp = load_reg(s, rd);
4373f3ce 3120 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 3121 break;
b7bcbe95
FB
3122 default:
3123 return 1;
3124 }
3125 } else {
e4c1cfa5 3126 tmp = load_reg(s, rd);
4373f3ce 3127 gen_vfp_msr(tmp);
b7bcbe95
FB
3128 gen_mov_vreg_F0(0, rn);
3129 }
3130 }
3131 }
3132 } else {
3133 /* data processing */
3134 /* The opcode is in bits 23, 21, 20 and 6. */
3135 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3136 if (dp) {
3137 if (op == 15) {
3138 /* rn is opcode */
3139 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3140 } else {
3141 /* rn is register number */
9ee6e8bb 3142 VFP_DREG_N(rn, insn);
b7bcbe95
FB
3143 }
3144
239c20c7
WN
3145 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3146 ((rn & 0x1e) == 0x6))) {
3147 /* Integer or single/half precision destination. */
9ee6e8bb 3148 rd = VFP_SREG_D(insn);
b7bcbe95 3149 } else {
9ee6e8bb 3150 VFP_DREG_D(rd, insn);
b7bcbe95 3151 }
04595bf6 3152 if (op == 15 &&
239c20c7
WN
3153 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3154 ((rn & 0x1e) == 0x4))) {
3155 /* VCVT from int or half precision is always from S reg
3156 * regardless of dp bit. VCVT with immediate frac_bits
3157 * has same format as SREG_M.
04595bf6
PM
3158 */
3159 rm = VFP_SREG_M(insn);
b7bcbe95 3160 } else {
9ee6e8bb 3161 VFP_DREG_M(rm, insn);
b7bcbe95
FB
3162 }
3163 } else {
9ee6e8bb 3164 rn = VFP_SREG_N(insn);
b7bcbe95
FB
3165 if (op == 15 && rn == 15) {
3166 /* Double precision destination. */
9ee6e8bb
PB
3167 VFP_DREG_D(rd, insn);
3168 } else {
3169 rd = VFP_SREG_D(insn);
3170 }
04595bf6
PM
3171 /* NB that we implicitly rely on the encoding for the frac_bits
3172 * in VCVT of fixed to float being the same as that of an SREG_M
3173 */
9ee6e8bb 3174 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3175 }
3176
69d1fc22 3177 veclen = s->vec_len;
b7bcbe95
FB
3178 if (op == 15 && rn > 3)
3179 veclen = 0;
3180
3181 /* Shut up compiler warnings. */
3182 delta_m = 0;
3183 delta_d = 0;
3184 bank_mask = 0;
3b46e624 3185
b7bcbe95
FB
3186 if (veclen > 0) {
3187 if (dp)
3188 bank_mask = 0xc;
3189 else
3190 bank_mask = 0x18;
3191
3192 /* Figure out what type of vector operation this is. */
3193 if ((rd & bank_mask) == 0) {
3194 /* scalar */
3195 veclen = 0;
3196 } else {
3197 if (dp)
69d1fc22 3198 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3199 else
69d1fc22 3200 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3201
3202 if ((rm & bank_mask) == 0) {
3203 /* mixed scalar/vector */
3204 delta_m = 0;
3205 } else {
3206 /* vector */
3207 delta_m = delta_d;
3208 }
3209 }
3210 }
3211
3212 /* Load the initial operands. */
3213 if (op == 15) {
3214 switch (rn) {
3215 case 16:
3216 case 17:
3217 /* Integer source */
3218 gen_mov_F0_vreg(0, rm);
3219 break;
3220 case 8:
3221 case 9:
3222 /* Compare */
3223 gen_mov_F0_vreg(dp, rd);
3224 gen_mov_F1_vreg(dp, rm);
3225 break;
3226 case 10:
3227 case 11:
3228 /* Compare with zero */
3229 gen_mov_F0_vreg(dp, rd);
3230 gen_vfp_F1_ld0(dp);
3231 break;
9ee6e8bb
PB
3232 case 20:
3233 case 21:
3234 case 22:
3235 case 23:
644ad806
PB
3236 case 28:
3237 case 29:
3238 case 30:
3239 case 31:
9ee6e8bb
PB
3240 /* Source and destination the same. */
3241 gen_mov_F0_vreg(dp, rd);
3242 break;
6e0c0ed1
PM
3243 case 4:
3244 case 5:
3245 case 6:
3246 case 7:
239c20c7
WN
3247 /* VCVTB, VCVTT: only present with the halfprec extension
3248 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3249 * (we choose to UNDEF)
6e0c0ed1 3250 */
239c20c7
WN
3251 if ((dp && !arm_feature(env, ARM_FEATURE_V8)) ||
3252 !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
6e0c0ed1
PM
3253 return 1;
3254 }
239c20c7
WN
3255 if (!extract32(rn, 1, 1)) {
3256 /* Half precision source. */
3257 gen_mov_F0_vreg(0, rm);
3258 break;
3259 }
6e0c0ed1 3260 /* Otherwise fall through */
b7bcbe95
FB
3261 default:
3262 /* One source operand. */
3263 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3264 break;
b7bcbe95
FB
3265 }
3266 } else {
3267 /* Two source operands. */
3268 gen_mov_F0_vreg(dp, rn);
3269 gen_mov_F1_vreg(dp, rm);
3270 }
3271
3272 for (;;) {
3273 /* Perform the calculation. */
3274 switch (op) {
605a6aed
PM
3275 case 0: /* VMLA: fd + (fn * fm) */
3276 /* Note that order of inputs to the add matters for NaNs */
3277 gen_vfp_F1_mul(dp);
3278 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3279 gen_vfp_add(dp);
3280 break;
605a6aed 3281 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3282 gen_vfp_mul(dp);
605a6aed
PM
3283 gen_vfp_F1_neg(dp);
3284 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3285 gen_vfp_add(dp);
3286 break;
605a6aed
PM
3287 case 2: /* VNMLS: -fd + (fn * fm) */
3288 /* Note that it isn't valid to replace (-A + B) with (B - A)
3289 * or similar plausible looking simplifications
3290 * because this will give wrong results for NaNs.
3291 */
3292 gen_vfp_F1_mul(dp);
3293 gen_mov_F0_vreg(dp, rd);
3294 gen_vfp_neg(dp);
3295 gen_vfp_add(dp);
b7bcbe95 3296 break;
605a6aed 3297 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3298 gen_vfp_mul(dp);
605a6aed
PM
3299 gen_vfp_F1_neg(dp);
3300 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3301 gen_vfp_neg(dp);
605a6aed 3302 gen_vfp_add(dp);
b7bcbe95
FB
3303 break;
3304 case 4: /* mul: fn * fm */
3305 gen_vfp_mul(dp);
3306 break;
3307 case 5: /* nmul: -(fn * fm) */
3308 gen_vfp_mul(dp);
3309 gen_vfp_neg(dp);
3310 break;
3311 case 6: /* add: fn + fm */
3312 gen_vfp_add(dp);
3313 break;
3314 case 7: /* sub: fn - fm */
3315 gen_vfp_sub(dp);
3316 break;
3317 case 8: /* div: fn / fm */
3318 gen_vfp_div(dp);
3319 break;
da97f52c
PM
3320 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3321 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3322 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3323 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3324 /* These are fused multiply-add, and must be done as one
3325 * floating point operation with no rounding between the
3326 * multiplication and addition steps.
3327 * NB that doing the negations here as separate steps is
3328 * correct : an input NaN should come out with its sign bit
3329 * flipped if it is a negated-input.
3330 */
3331 if (!arm_feature(env, ARM_FEATURE_VFP4)) {
3332 return 1;
3333 }
3334 if (dp) {
3335 TCGv_ptr fpst;
3336 TCGv_i64 frd;
3337 if (op & 1) {
3338 /* VFNMS, VFMS */
3339 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3340 }
3341 frd = tcg_temp_new_i64();
3342 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3343 if (op & 2) {
3344 /* VFNMA, VFNMS */
3345 gen_helper_vfp_negd(frd, frd);
3346 }
3347 fpst = get_fpstatus_ptr(0);
3348 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3349 cpu_F1d, frd, fpst);
3350 tcg_temp_free_ptr(fpst);
3351 tcg_temp_free_i64(frd);
3352 } else {
3353 TCGv_ptr fpst;
3354 TCGv_i32 frd;
3355 if (op & 1) {
3356 /* VFNMS, VFMS */
3357 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3358 }
3359 frd = tcg_temp_new_i32();
3360 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3361 if (op & 2) {
3362 gen_helper_vfp_negs(frd, frd);
3363 }
3364 fpst = get_fpstatus_ptr(0);
3365 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3366 cpu_F1s, frd, fpst);
3367 tcg_temp_free_ptr(fpst);
3368 tcg_temp_free_i32(frd);
3369 }
3370 break;
9ee6e8bb
PB
3371 case 14: /* fconst */
3372 if (!arm_feature(env, ARM_FEATURE_VFP3))
3373 return 1;
3374
3375 n = (insn << 12) & 0x80000000;
3376 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3377 if (dp) {
3378 if (i & 0x40)
3379 i |= 0x3f80;
3380 else
3381 i |= 0x4000;
3382 n |= i << 16;
4373f3ce 3383 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3384 } else {
3385 if (i & 0x40)
3386 i |= 0x780;
3387 else
3388 i |= 0x800;
3389 n |= i << 19;
5b340b51 3390 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3391 }
9ee6e8bb 3392 break;
b7bcbe95
FB
3393 case 15: /* extension space */
3394 switch (rn) {
3395 case 0: /* cpy */
3396 /* no-op */
3397 break;
3398 case 1: /* abs */
3399 gen_vfp_abs(dp);
3400 break;
3401 case 2: /* neg */
3402 gen_vfp_neg(dp);
3403 break;
3404 case 3: /* sqrt */
3405 gen_vfp_sqrt(dp);
3406 break;
239c20c7 3407 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
60011498
PB
3408 tmp = gen_vfp_mrs();
3409 tcg_gen_ext16u_i32(tmp, tmp);
239c20c7
WN
3410 if (dp) {
3411 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3412 cpu_env);
3413 } else {
3414 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3415 cpu_env);
3416 }
7d1b0095 3417 tcg_temp_free_i32(tmp);
60011498 3418 break;
239c20c7 3419 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
60011498
PB
3420 tmp = gen_vfp_mrs();
3421 tcg_gen_shri_i32(tmp, tmp, 16);
239c20c7
WN
3422 if (dp) {
3423 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3424 cpu_env);
3425 } else {
3426 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3427 cpu_env);
3428 }
7d1b0095 3429 tcg_temp_free_i32(tmp);
60011498 3430 break;
239c20c7 3431 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
7d1b0095 3432 tmp = tcg_temp_new_i32();
239c20c7
WN
3433 if (dp) {
3434 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3435 cpu_env);
3436 } else {
3437 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3438 cpu_env);
3439 }
60011498
PB
3440 gen_mov_F0_vreg(0, rd);
3441 tmp2 = gen_vfp_mrs();
3442 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3443 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3444 tcg_temp_free_i32(tmp2);
60011498
PB
3445 gen_vfp_msr(tmp);
3446 break;
239c20c7 3447 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
7d1b0095 3448 tmp = tcg_temp_new_i32();
239c20c7
WN
3449 if (dp) {
3450 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3451 cpu_env);
3452 } else {
3453 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3454 cpu_env);
3455 }
60011498
PB
3456 tcg_gen_shli_i32(tmp, tmp, 16);
3457 gen_mov_F0_vreg(0, rd);
3458 tmp2 = gen_vfp_mrs();
3459 tcg_gen_ext16u_i32(tmp2, tmp2);
3460 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3461 tcg_temp_free_i32(tmp2);
60011498
PB
3462 gen_vfp_msr(tmp);
3463 break;
b7bcbe95
FB
3464 case 8: /* cmp */
3465 gen_vfp_cmp(dp);
3466 break;
3467 case 9: /* cmpe */
3468 gen_vfp_cmpe(dp);
3469 break;
3470 case 10: /* cmpz */
3471 gen_vfp_cmp(dp);
3472 break;
3473 case 11: /* cmpez */
3474 gen_vfp_F1_ld0(dp);
3475 gen_vfp_cmpe(dp);
3476 break;
664c6733
WN
3477 case 12: /* vrintr */
3478 {
3479 TCGv_ptr fpst = get_fpstatus_ptr(0);
3480 if (dp) {
3481 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3482 } else {
3483 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3484 }
3485 tcg_temp_free_ptr(fpst);
3486 break;
3487 }
a290c62a
WN
3488 case 13: /* vrintz */
3489 {
3490 TCGv_ptr fpst = get_fpstatus_ptr(0);
3491 TCGv_i32 tcg_rmode;
3492 tcg_rmode = tcg_const_i32(float_round_to_zero);
3493 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3494 if (dp) {
3495 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3496 } else {
3497 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3498 }
3499 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3500 tcg_temp_free_i32(tcg_rmode);
3501 tcg_temp_free_ptr(fpst);
3502 break;
3503 }
4e82bc01
WN
3504 case 14: /* vrintx */
3505 {
3506 TCGv_ptr fpst = get_fpstatus_ptr(0);
3507 if (dp) {
3508 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3509 } else {
3510 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3511 }
3512 tcg_temp_free_ptr(fpst);
3513 break;
3514 }
b7bcbe95
FB
3515 case 15: /* single<->double conversion */
3516 if (dp)
4373f3ce 3517 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3518 else
4373f3ce 3519 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3520 break;
3521 case 16: /* fuito */
5500b06c 3522 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3523 break;
3524 case 17: /* fsito */
5500b06c 3525 gen_vfp_sito(dp, 0);
b7bcbe95 3526 break;
9ee6e8bb
PB
3527 case 20: /* fshto */
3528 if (!arm_feature(env, ARM_FEATURE_VFP3))
3529 return 1;
5500b06c 3530 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3531 break;
3532 case 21: /* fslto */
3533 if (!arm_feature(env, ARM_FEATURE_VFP3))
3534 return 1;
5500b06c 3535 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3536 break;
3537 case 22: /* fuhto */
3538 if (!arm_feature(env, ARM_FEATURE_VFP3))
3539 return 1;
5500b06c 3540 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3541 break;
3542 case 23: /* fulto */
3543 if (!arm_feature(env, ARM_FEATURE_VFP3))
3544 return 1;
5500b06c 3545 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3546 break;
b7bcbe95 3547 case 24: /* ftoui */
5500b06c 3548 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3549 break;
3550 case 25: /* ftouiz */
5500b06c 3551 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3552 break;
3553 case 26: /* ftosi */
5500b06c 3554 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3555 break;
3556 case 27: /* ftosiz */
5500b06c 3557 gen_vfp_tosiz(dp, 0);
b7bcbe95 3558 break;
9ee6e8bb
PB
3559 case 28: /* ftosh */
3560 if (!arm_feature(env, ARM_FEATURE_VFP3))
3561 return 1;
5500b06c 3562 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3563 break;
3564 case 29: /* ftosl */
3565 if (!arm_feature(env, ARM_FEATURE_VFP3))
3566 return 1;
5500b06c 3567 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3568 break;
3569 case 30: /* ftouh */
3570 if (!arm_feature(env, ARM_FEATURE_VFP3))
3571 return 1;
5500b06c 3572 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3573 break;
3574 case 31: /* ftoul */
3575 if (!arm_feature(env, ARM_FEATURE_VFP3))
3576 return 1;
5500b06c 3577 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3578 break;
b7bcbe95 3579 default: /* undefined */
b7bcbe95
FB
3580 return 1;
3581 }
3582 break;
3583 default: /* undefined */
b7bcbe95
FB
3584 return 1;
3585 }
3586
3587 /* Write back the result. */
239c20c7
WN
3588 if (op == 15 && (rn >= 8 && rn <= 11)) {
3589 /* Comparison, do nothing. */
3590 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
3591 (rn & 0x1e) == 0x6)) {
3592 /* VCVT double to int: always integer result.
3593 * VCVT double to half precision is always a single
3594 * precision result.
3595 */
b7bcbe95 3596 gen_mov_vreg_F0(0, rd);
239c20c7 3597 } else if (op == 15 && rn == 15) {
b7bcbe95
FB
3598 /* conversion */
3599 gen_mov_vreg_F0(!dp, rd);
239c20c7 3600 } else {
b7bcbe95 3601 gen_mov_vreg_F0(dp, rd);
239c20c7 3602 }
b7bcbe95
FB
3603
3604 /* break out of the loop if we have finished */
3605 if (veclen == 0)
3606 break;
3607
3608 if (op == 15 && delta_m == 0) {
3609 /* single source one-many */
3610 while (veclen--) {
3611 rd = ((rd + delta_d) & (bank_mask - 1))
3612 | (rd & bank_mask);
3613 gen_mov_vreg_F0(dp, rd);
3614 }
3615 break;
3616 }
3617 /* Setup the next operands. */
3618 veclen--;
3619 rd = ((rd + delta_d) & (bank_mask - 1))
3620 | (rd & bank_mask);
3621
3622 if (op == 15) {
3623 /* One source operand. */
3624 rm = ((rm + delta_m) & (bank_mask - 1))
3625 | (rm & bank_mask);
3626 gen_mov_F0_vreg(dp, rm);
3627 } else {
3628 /* Two source operands. */
3629 rn = ((rn + delta_d) & (bank_mask - 1))
3630 | (rn & bank_mask);
3631 gen_mov_F0_vreg(dp, rn);
3632 if (delta_m) {
3633 rm = ((rm + delta_m) & (bank_mask - 1))
3634 | (rm & bank_mask);
3635 gen_mov_F1_vreg(dp, rm);
3636 }
3637 }
3638 }
3639 }
3640 break;
3641 case 0xc:
3642 case 0xd:
8387da81 3643 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3644 /* two-register transfer */
3645 rn = (insn >> 16) & 0xf;
3646 rd = (insn >> 12) & 0xf;
3647 if (dp) {
9ee6e8bb
PB
3648 VFP_DREG_M(rm, insn);
3649 } else {
3650 rm = VFP_SREG_M(insn);
3651 }
b7bcbe95 3652
18c9b560 3653 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3654 /* vfp->arm */
3655 if (dp) {
4373f3ce
PB
3656 gen_mov_F0_vreg(0, rm * 2);
3657 tmp = gen_vfp_mrs();
3658 store_reg(s, rd, tmp);
3659 gen_mov_F0_vreg(0, rm * 2 + 1);
3660 tmp = gen_vfp_mrs();
3661 store_reg(s, rn, tmp);
b7bcbe95
FB
3662 } else {
3663 gen_mov_F0_vreg(0, rm);
4373f3ce 3664 tmp = gen_vfp_mrs();
8387da81 3665 store_reg(s, rd, tmp);
b7bcbe95 3666 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3667 tmp = gen_vfp_mrs();
8387da81 3668 store_reg(s, rn, tmp);
b7bcbe95
FB
3669 }
3670 } else {
3671 /* arm->vfp */
3672 if (dp) {
4373f3ce
PB
3673 tmp = load_reg(s, rd);
3674 gen_vfp_msr(tmp);
3675 gen_mov_vreg_F0(0, rm * 2);
3676 tmp = load_reg(s, rn);
3677 gen_vfp_msr(tmp);
3678 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3679 } else {
8387da81 3680 tmp = load_reg(s, rd);
4373f3ce 3681 gen_vfp_msr(tmp);
b7bcbe95 3682 gen_mov_vreg_F0(0, rm);
8387da81 3683 tmp = load_reg(s, rn);
4373f3ce 3684 gen_vfp_msr(tmp);
b7bcbe95
FB
3685 gen_mov_vreg_F0(0, rm + 1);
3686 }
3687 }
3688 } else {
3689 /* Load/store */
3690 rn = (insn >> 16) & 0xf;
3691 if (dp)
9ee6e8bb 3692 VFP_DREG_D(rd, insn);
b7bcbe95 3693 else
9ee6e8bb 3694 rd = VFP_SREG_D(insn);
b7bcbe95
FB
3695 if ((insn & 0x01200000) == 0x01000000) {
3696 /* Single load/store */
3697 offset = (insn & 0xff) << 2;
3698 if ((insn & (1 << 23)) == 0)
3699 offset = -offset;
934814f1
PM
3700 if (s->thumb && rn == 15) {
3701 /* This is actually UNPREDICTABLE */
3702 addr = tcg_temp_new_i32();
3703 tcg_gen_movi_i32(addr, s->pc & ~2);
3704 } else {
3705 addr = load_reg(s, rn);
3706 }
312eea9f 3707 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3708 if (insn & (1 << 20)) {
312eea9f 3709 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3710 gen_mov_vreg_F0(dp, rd);
3711 } else {
3712 gen_mov_F0_vreg(dp, rd);
312eea9f 3713 gen_vfp_st(s, dp, addr);
b7bcbe95 3714 }
7d1b0095 3715 tcg_temp_free_i32(addr);
b7bcbe95
FB
3716 } else {
3717 /* load/store multiple */
934814f1 3718 int w = insn & (1 << 21);
b7bcbe95
FB
3719 if (dp)
3720 n = (insn >> 1) & 0x7f;
3721 else
3722 n = insn & 0xff;
3723
934814f1
PM
3724 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3725 /* P == U , W == 1 => UNDEF */
3726 return 1;
3727 }
3728 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3729 /* UNPREDICTABLE cases for bad immediates: we choose to
3730 * UNDEF to avoid generating huge numbers of TCG ops
3731 */
3732 return 1;
3733 }
3734 if (rn == 15 && w) {
3735 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3736 return 1;
3737 }
3738
3739 if (s->thumb && rn == 15) {
3740 /* This is actually UNPREDICTABLE */
3741 addr = tcg_temp_new_i32();
3742 tcg_gen_movi_i32(addr, s->pc & ~2);
3743 } else {
3744 addr = load_reg(s, rn);
3745 }
b7bcbe95 3746 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3747 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3748
3749 if (dp)
3750 offset = 8;
3751 else
3752 offset = 4;
3753 for (i = 0; i < n; i++) {
18c9b560 3754 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3755 /* load */
312eea9f 3756 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3757 gen_mov_vreg_F0(dp, rd + i);
3758 } else {
3759 /* store */
3760 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3761 gen_vfp_st(s, dp, addr);
b7bcbe95 3762 }
312eea9f 3763 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3764 }
934814f1 3765 if (w) {
b7bcbe95
FB
3766 /* writeback */
3767 if (insn & (1 << 24))
3768 offset = -offset * n;
3769 else if (dp && (insn & 1))
3770 offset = 4;
3771 else
3772 offset = 0;
3773
3774 if (offset != 0)
312eea9f
FN
3775 tcg_gen_addi_i32(addr, addr, offset);
3776 store_reg(s, rn, addr);
3777 } else {
7d1b0095 3778 tcg_temp_free_i32(addr);
b7bcbe95
FB
3779 }
3780 }
3781 }
3782 break;
3783 default:
3784 /* Should never happen. */
3785 return 1;
3786 }
3787 return 0;
3788}
3789
0a2461fa 3790static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
c53be334 3791{
6e256c93
FB
3792 TranslationBlock *tb;
3793
3794 tb = s->tb;
3795 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3796 tcg_gen_goto_tb(n);
eaed129d 3797 gen_set_pc_im(s, dest);
8cfd0495 3798 tcg_gen_exit_tb((uintptr_t)tb + n);
6e256c93 3799 } else {
eaed129d 3800 gen_set_pc_im(s, dest);
57fec1fe 3801 tcg_gen_exit_tb(0);
6e256c93 3802 }
c53be334
FB
3803}
3804
8aaca4c0
FB
3805static inline void gen_jmp (DisasContext *s, uint32_t dest)
3806{
551bd27f 3807 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3808 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3809 if (s->thumb)
d9ba4830
PB
3810 dest |= 1;
3811 gen_bx_im(s, dest);
8aaca4c0 3812 } else {
6e256c93 3813 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3814 s->is_jmp = DISAS_TB_JUMP;
3815 }
3816}
3817
39d5492a 3818static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 3819{
ee097184 3820 if (x)
d9ba4830 3821 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3822 else
d9ba4830 3823 gen_sxth(t0);
ee097184 3824 if (y)
d9ba4830 3825 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3826 else
d9ba4830
PB
3827 gen_sxth(t1);
3828 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3829}
3830
3831/* Return the mask of PSR bits set by a MSR instruction. */
0ecb72a5 3832static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3833 uint32_t mask;
3834
3835 mask = 0;
3836 if (flags & (1 << 0))
3837 mask |= 0xff;
3838 if (flags & (1 << 1))
3839 mask |= 0xff00;
3840 if (flags & (1 << 2))
3841 mask |= 0xff0000;
3842 if (flags & (1 << 3))
3843 mask |= 0xff000000;
9ee6e8bb 3844
2ae23e75 3845 /* Mask out undefined bits. */
9ee6e8bb 3846 mask &= ~CPSR_RESERVED;
be5e7a76
DES
3847 if (!arm_feature(env, ARM_FEATURE_V4T))
3848 mask &= ~CPSR_T;
3849 if (!arm_feature(env, ARM_FEATURE_V5))
3850 mask &= ~CPSR_Q; /* V5TE in reality*/
9ee6e8bb 3851 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3852 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3853 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3854 mask &= ~CPSR_IT;
9ee6e8bb 3855 /* Mask out execution state bits. */
2ae23e75 3856 if (!spsr)
e160c51c 3857 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3858 /* Mask out privileged bits. */
3859 if (IS_USER(s))
9ee6e8bb 3860 mask &= CPSR_USER;
b5ff1b31
FB
3861 return mask;
3862}
3863
2fbac54b 3864/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 3865static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 3866{
39d5492a 3867 TCGv_i32 tmp;
b5ff1b31
FB
3868 if (spsr) {
3869 /* ??? This is also undefined in system mode. */
3870 if (IS_USER(s))
3871 return 1;
d9ba4830
PB
3872
3873 tmp = load_cpu_field(spsr);
3874 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3875 tcg_gen_andi_i32(t0, t0, mask);
3876 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3877 store_cpu_field(tmp, spsr);
b5ff1b31 3878 } else {
2fbac54b 3879 gen_set_cpsr(t0, mask);
b5ff1b31 3880 }
7d1b0095 3881 tcg_temp_free_i32(t0);
b5ff1b31
FB
3882 gen_lookup_tb(s);
3883 return 0;
3884}
3885
2fbac54b
FN
3886/* Returns nonzero if access to the PSR is not permitted. */
3887static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3888{
39d5492a 3889 TCGv_i32 tmp;
7d1b0095 3890 tmp = tcg_temp_new_i32();
2fbac54b
FN
3891 tcg_gen_movi_i32(tmp, val);
3892 return gen_set_psr(s, mask, spsr, tmp);
3893}
3894
e9bb4aa9 3895/* Generate an old-style exception return. Marks pc as dead. */
39d5492a 3896static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
b5ff1b31 3897{
39d5492a 3898 TCGv_i32 tmp;
e9bb4aa9 3899 store_reg(s, 15, pc);
d9ba4830
PB
3900 tmp = load_cpu_field(spsr);
3901 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 3902 tcg_temp_free_i32(tmp);
b5ff1b31
FB
3903 s->is_jmp = DISAS_UPDATE;
3904}
3905
b0109805 3906/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 3907static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 3908{
b0109805 3909 gen_set_cpsr(cpsr, 0xffffffff);
7d1b0095 3910 tcg_temp_free_i32(cpsr);
b0109805 3911 store_reg(s, 15, pc);
9ee6e8bb
PB
3912 s->is_jmp = DISAS_UPDATE;
3913}
3b46e624 3914
9ee6e8bb
PB
3915static inline void
3916gen_set_condexec (DisasContext *s)
3917{
3918 if (s->condexec_mask) {
8f01245e 3919 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
39d5492a 3920 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e 3921 tcg_gen_movi_i32(tmp, val);
d9ba4830 3922 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3923 }
3924}
3b46e624 3925
bc4a0de0
PM
3926static void gen_exception_insn(DisasContext *s, int offset, int excp)
3927{
3928 gen_set_condexec(s);
eaed129d 3929 gen_set_pc_im(s, s->pc - offset);
bc4a0de0
PM
3930 gen_exception(excp);
3931 s->is_jmp = DISAS_JUMP;
3932}
3933
9ee6e8bb
PB
3934static void gen_nop_hint(DisasContext *s, int val)
3935{
3936 switch (val) {
3937 case 3: /* wfi */
eaed129d 3938 gen_set_pc_im(s, s->pc);
9ee6e8bb
PB
3939 s->is_jmp = DISAS_WFI;
3940 break;
3941 case 2: /* wfe */
3942 case 4: /* sev */
12b10571
MR
3943 case 5: /* sevl */
3944 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
3945 default: /* nop */
3946 break;
3947 }
3948}
99c475ab 3949
ad69471c 3950#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3951
39d5492a 3952static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
3953{
3954 switch (size) {
dd8fbd78
FN
3955 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3956 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3957 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 3958 default: abort();
9ee6e8bb 3959 }
9ee6e8bb
PB
3960}
3961
39d5492a 3962static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
3963{
3964 switch (size) {
dd8fbd78
FN
3965 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3966 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3967 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3968 default: return;
3969 }
3970}
3971
3972/* 32-bit pairwise ops end up the same as the elementwise versions. */
3973#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3974#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3975#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3976#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3977
ad69471c
PB
3978#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3979 switch ((size << 1) | u) { \
3980 case 0: \
dd8fbd78 3981 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3982 break; \
3983 case 1: \
dd8fbd78 3984 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3985 break; \
3986 case 2: \
dd8fbd78 3987 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3988 break; \
3989 case 3: \
dd8fbd78 3990 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3991 break; \
3992 case 4: \
dd8fbd78 3993 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3994 break; \
3995 case 5: \
dd8fbd78 3996 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3997 break; \
3998 default: return 1; \
3999 }} while (0)
9ee6e8bb
PB
4000
4001#define GEN_NEON_INTEGER_OP(name) do { \
4002 switch ((size << 1) | u) { \
ad69471c 4003 case 0: \
dd8fbd78 4004 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
4005 break; \
4006 case 1: \
dd8fbd78 4007 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
4008 break; \
4009 case 2: \
dd8fbd78 4010 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
4011 break; \
4012 case 3: \
dd8fbd78 4013 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
4014 break; \
4015 case 4: \
dd8fbd78 4016 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
4017 break; \
4018 case 5: \
dd8fbd78 4019 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 4020 break; \
9ee6e8bb
PB
4021 default: return 1; \
4022 }} while (0)
4023
39d5492a 4024static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 4025{
39d5492a 4026 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
4027 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4028 return tmp;
9ee6e8bb
PB
4029}
4030
39d5492a 4031static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 4032{
dd8fbd78 4033 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 4034 tcg_temp_free_i32(var);
9ee6e8bb
PB
4035}
4036
39d5492a 4037static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 4038{
39d5492a 4039 TCGv_i32 tmp;
9ee6e8bb 4040 if (size == 1) {
0fad6efc
PM
4041 tmp = neon_load_reg(reg & 7, reg >> 4);
4042 if (reg & 8) {
dd8fbd78 4043 gen_neon_dup_high16(tmp);
0fad6efc
PM
4044 } else {
4045 gen_neon_dup_low16(tmp);
dd8fbd78 4046 }
0fad6efc
PM
4047 } else {
4048 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 4049 }
dd8fbd78 4050 return tmp;
9ee6e8bb
PB
4051}
4052
02acedf9 4053static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 4054{
39d5492a 4055 TCGv_i32 tmp, tmp2;
600b828c 4056 if (!q && size == 2) {
02acedf9
PM
4057 return 1;
4058 }
4059 tmp = tcg_const_i32(rd);
4060 tmp2 = tcg_const_i32(rm);
4061 if (q) {
4062 switch (size) {
4063 case 0:
02da0b2d 4064 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4065 break;
4066 case 1:
02da0b2d 4067 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4068 break;
4069 case 2:
02da0b2d 4070 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
4071 break;
4072 default:
4073 abort();
4074 }
4075 } else {
4076 switch (size) {
4077 case 0:
02da0b2d 4078 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4079 break;
4080 case 1:
02da0b2d 4081 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4082 break;
4083 default:
4084 abort();
4085 }
4086 }
4087 tcg_temp_free_i32(tmp);
4088 tcg_temp_free_i32(tmp2);
4089 return 0;
19457615
FN
4090}
4091
d68a6f3a 4092static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 4093{
39d5492a 4094 TCGv_i32 tmp, tmp2;
600b828c 4095 if (!q && size == 2) {
d68a6f3a
PM
4096 return 1;
4097 }
4098 tmp = tcg_const_i32(rd);
4099 tmp2 = tcg_const_i32(rm);
4100 if (q) {
4101 switch (size) {
4102 case 0:
02da0b2d 4103 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4104 break;
4105 case 1:
02da0b2d 4106 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4107 break;
4108 case 2:
02da0b2d 4109 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
4110 break;
4111 default:
4112 abort();
4113 }
4114 } else {
4115 switch (size) {
4116 case 0:
02da0b2d 4117 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4118 break;
4119 case 1:
02da0b2d 4120 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4121 break;
4122 default:
4123 abort();
4124 }
4125 }
4126 tcg_temp_free_i32(tmp);
4127 tcg_temp_free_i32(tmp2);
4128 return 0;
19457615
FN
4129}
4130
39d5492a 4131static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 4132{
39d5492a 4133 TCGv_i32 rd, tmp;
19457615 4134
7d1b0095
PM
4135 rd = tcg_temp_new_i32();
4136 tmp = tcg_temp_new_i32();
19457615
FN
4137
4138 tcg_gen_shli_i32(rd, t0, 8);
4139 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4140 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4141 tcg_gen_or_i32(rd, rd, tmp);
4142
4143 tcg_gen_shri_i32(t1, t1, 8);
4144 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4145 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4146 tcg_gen_or_i32(t1, t1, tmp);
4147 tcg_gen_mov_i32(t0, rd);
4148
7d1b0095
PM
4149 tcg_temp_free_i32(tmp);
4150 tcg_temp_free_i32(rd);
19457615
FN
4151}
4152
39d5492a 4153static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 4154{
39d5492a 4155 TCGv_i32 rd, tmp;
19457615 4156
7d1b0095
PM
4157 rd = tcg_temp_new_i32();
4158 tmp = tcg_temp_new_i32();
19457615
FN
4159
4160 tcg_gen_shli_i32(rd, t0, 16);
4161 tcg_gen_andi_i32(tmp, t1, 0xffff);
4162 tcg_gen_or_i32(rd, rd, tmp);
4163 tcg_gen_shri_i32(t1, t1, 16);
4164 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4165 tcg_gen_or_i32(t1, t1, tmp);
4166 tcg_gen_mov_i32(t0, rd);
4167
7d1b0095
PM
4168 tcg_temp_free_i32(tmp);
4169 tcg_temp_free_i32(rd);
19457615
FN
4170}
4171
4172
9ee6e8bb
PB
4173static struct {
4174 int nregs;
4175 int interleave;
4176 int spacing;
4177} neon_ls_element_type[11] = {
4178 {4, 4, 1},
4179 {4, 4, 2},
4180 {4, 1, 1},
4181 {4, 2, 1},
4182 {3, 3, 1},
4183 {3, 3, 2},
4184 {3, 1, 1},
4185 {1, 1, 1},
4186 {2, 2, 1},
4187 {2, 2, 2},
4188 {2, 1, 1}
4189};
4190
4191/* Translate a NEON load/store element instruction. Return nonzero if the
4192 instruction is invalid. */
0ecb72a5 4193static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4194{
4195 int rd, rn, rm;
4196 int op;
4197 int nregs;
4198 int interleave;
84496233 4199 int spacing;
9ee6e8bb
PB
4200 int stride;
4201 int size;
4202 int reg;
4203 int pass;
4204 int load;
4205 int shift;
9ee6e8bb 4206 int n;
39d5492a
PM
4207 TCGv_i32 addr;
4208 TCGv_i32 tmp;
4209 TCGv_i32 tmp2;
84496233 4210 TCGv_i64 tmp64;
9ee6e8bb 4211
5df8bac1 4212 if (!s->vfp_enabled)
9ee6e8bb
PB
4213 return 1;
4214 VFP_DREG_D(rd, insn);
4215 rn = (insn >> 16) & 0xf;
4216 rm = insn & 0xf;
4217 load = (insn & (1 << 21)) != 0;
4218 if ((insn & (1 << 23)) == 0) {
4219 /* Load store all elements. */
4220 op = (insn >> 8) & 0xf;
4221 size = (insn >> 6) & 3;
84496233 4222 if (op > 10)
9ee6e8bb 4223 return 1;
f2dd89d0
PM
4224 /* Catch UNDEF cases for bad values of align field */
4225 switch (op & 0xc) {
4226 case 4:
4227 if (((insn >> 5) & 1) == 1) {
4228 return 1;
4229 }
4230 break;
4231 case 8:
4232 if (((insn >> 4) & 3) == 3) {
4233 return 1;
4234 }
4235 break;
4236 default:
4237 break;
4238 }
9ee6e8bb
PB
4239 nregs = neon_ls_element_type[op].nregs;
4240 interleave = neon_ls_element_type[op].interleave;
84496233
JR
4241 spacing = neon_ls_element_type[op].spacing;
4242 if (size == 3 && (interleave | spacing) != 1)
4243 return 1;
e318a60b 4244 addr = tcg_temp_new_i32();
dcc65026 4245 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4246 stride = (1 << size) * interleave;
4247 for (reg = 0; reg < nregs; reg++) {
4248 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
4249 load_reg_var(s, addr, rn);
4250 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 4251 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
4252 load_reg_var(s, addr, rn);
4253 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 4254 }
84496233 4255 if (size == 3) {
8ed1237d 4256 tmp64 = tcg_temp_new_i64();
84496233 4257 if (load) {
08307563 4258 gen_aa32_ld64(tmp64, addr, IS_USER(s));
84496233 4259 neon_store_reg64(tmp64, rd);
84496233 4260 } else {
84496233 4261 neon_load_reg64(tmp64, rd);
08307563 4262 gen_aa32_st64(tmp64, addr, IS_USER(s));
84496233 4263 }
8ed1237d 4264 tcg_temp_free_i64(tmp64);
84496233
JR
4265 tcg_gen_addi_i32(addr, addr, stride);
4266 } else {
4267 for (pass = 0; pass < 2; pass++) {
4268 if (size == 2) {
4269 if (load) {
58ab8e96 4270 tmp = tcg_temp_new_i32();
08307563 4271 gen_aa32_ld32u(tmp, addr, IS_USER(s));
84496233
JR
4272 neon_store_reg(rd, pass, tmp);
4273 } else {
4274 tmp = neon_load_reg(rd, pass);
08307563 4275 gen_aa32_st32(tmp, addr, IS_USER(s));
58ab8e96 4276 tcg_temp_free_i32(tmp);
84496233 4277 }
1b2b1e54 4278 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
4279 } else if (size == 1) {
4280 if (load) {
58ab8e96 4281 tmp = tcg_temp_new_i32();
08307563 4282 gen_aa32_ld16u(tmp, addr, IS_USER(s));
84496233 4283 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96 4284 tmp2 = tcg_temp_new_i32();
08307563 4285 gen_aa32_ld16u(tmp2, addr, IS_USER(s));
84496233 4286 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
4287 tcg_gen_shli_i32(tmp2, tmp2, 16);
4288 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4289 tcg_temp_free_i32(tmp2);
84496233
JR
4290 neon_store_reg(rd, pass, tmp);
4291 } else {
4292 tmp = neon_load_reg(rd, pass);
7d1b0095 4293 tmp2 = tcg_temp_new_i32();
84496233 4294 tcg_gen_shri_i32(tmp2, tmp, 16);
08307563 4295 gen_aa32_st16(tmp, addr, IS_USER(s));
58ab8e96 4296 tcg_temp_free_i32(tmp);
84496233 4297 tcg_gen_addi_i32(addr, addr, stride);
08307563 4298 gen_aa32_st16(tmp2, addr, IS_USER(s));
58ab8e96 4299 tcg_temp_free_i32(tmp2);
1b2b1e54 4300 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 4301 }
84496233
JR
4302 } else /* size == 0 */ {
4303 if (load) {
39d5492a 4304 TCGV_UNUSED_I32(tmp2);
84496233 4305 for (n = 0; n < 4; n++) {
58ab8e96 4306 tmp = tcg_temp_new_i32();
08307563 4307 gen_aa32_ld8u(tmp, addr, IS_USER(s));
84496233
JR
4308 tcg_gen_addi_i32(addr, addr, stride);
4309 if (n == 0) {
4310 tmp2 = tmp;
4311 } else {
41ba8341
PB
4312 tcg_gen_shli_i32(tmp, tmp, n * 8);
4313 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 4314 tcg_temp_free_i32(tmp);
84496233 4315 }
9ee6e8bb 4316 }
84496233
JR
4317 neon_store_reg(rd, pass, tmp2);
4318 } else {
4319 tmp2 = neon_load_reg(rd, pass);
4320 for (n = 0; n < 4; n++) {
7d1b0095 4321 tmp = tcg_temp_new_i32();
84496233
JR
4322 if (n == 0) {
4323 tcg_gen_mov_i32(tmp, tmp2);
4324 } else {
4325 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4326 }
08307563 4327 gen_aa32_st8(tmp, addr, IS_USER(s));
58ab8e96 4328 tcg_temp_free_i32(tmp);
84496233
JR
4329 tcg_gen_addi_i32(addr, addr, stride);
4330 }
7d1b0095 4331 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
4332 }
4333 }
4334 }
4335 }
84496233 4336 rd += spacing;
9ee6e8bb 4337 }
e318a60b 4338 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4339 stride = nregs * 8;
4340 } else {
4341 size = (insn >> 10) & 3;
4342 if (size == 3) {
4343 /* Load single element to all lanes. */
8e18cde3
PM
4344 int a = (insn >> 4) & 1;
4345 if (!load) {
9ee6e8bb 4346 return 1;
8e18cde3 4347 }
9ee6e8bb
PB
4348 size = (insn >> 6) & 3;
4349 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
4350
4351 if (size == 3) {
4352 if (nregs != 4 || a == 0) {
9ee6e8bb 4353 return 1;
99c475ab 4354 }
8e18cde3
PM
4355 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4356 size = 2;
4357 }
4358 if (nregs == 1 && a == 1 && size == 0) {
4359 return 1;
4360 }
4361 if (nregs == 3 && a == 1) {
4362 return 1;
4363 }
e318a60b 4364 addr = tcg_temp_new_i32();
8e18cde3
PM
4365 load_reg_var(s, addr, rn);
4366 if (nregs == 1) {
4367 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4368 tmp = gen_load_and_replicate(s, addr, size);
4369 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4370 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4371 if (insn & (1 << 5)) {
4372 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4373 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4374 }
4375 tcg_temp_free_i32(tmp);
4376 } else {
4377 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4378 stride = (insn & (1 << 5)) ? 2 : 1;
4379 for (reg = 0; reg < nregs; reg++) {
4380 tmp = gen_load_and_replicate(s, addr, size);
4381 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4382 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4383 tcg_temp_free_i32(tmp);
4384 tcg_gen_addi_i32(addr, addr, 1 << size);
4385 rd += stride;
4386 }
9ee6e8bb 4387 }
e318a60b 4388 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4389 stride = (1 << size) * nregs;
4390 } else {
4391 /* Single element. */
93262b16 4392 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
4393 pass = (insn >> 7) & 1;
4394 switch (size) {
4395 case 0:
4396 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
4397 stride = 1;
4398 break;
4399 case 1:
4400 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
4401 stride = (insn & (1 << 5)) ? 2 : 1;
4402 break;
4403 case 2:
4404 shift = 0;
9ee6e8bb
PB
4405 stride = (insn & (1 << 6)) ? 2 : 1;
4406 break;
4407 default:
4408 abort();
4409 }
4410 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
4411 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4412 switch (nregs) {
4413 case 1:
4414 if (((idx & (1 << size)) != 0) ||
4415 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4416 return 1;
4417 }
4418 break;
4419 case 3:
4420 if ((idx & 1) != 0) {
4421 return 1;
4422 }
4423 /* fall through */
4424 case 2:
4425 if (size == 2 && (idx & 2) != 0) {
4426 return 1;
4427 }
4428 break;
4429 case 4:
4430 if ((size == 2) && ((idx & 3) == 3)) {
4431 return 1;
4432 }
4433 break;
4434 default:
4435 abort();
4436 }
4437 if ((rd + stride * (nregs - 1)) > 31) {
4438 /* Attempts to write off the end of the register file
4439 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4440 * the neon_load_reg() would write off the end of the array.
4441 */
4442 return 1;
4443 }
e318a60b 4444 addr = tcg_temp_new_i32();
dcc65026 4445 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4446 for (reg = 0; reg < nregs; reg++) {
4447 if (load) {
58ab8e96 4448 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
4449 switch (size) {
4450 case 0:
08307563 4451 gen_aa32_ld8u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4452 break;
4453 case 1:
08307563 4454 gen_aa32_ld16u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4455 break;
4456 case 2:
08307563 4457 gen_aa32_ld32u(tmp, addr, IS_USER(s));
9ee6e8bb 4458 break;
a50f5b91
PB
4459 default: /* Avoid compiler warnings. */
4460 abort();
9ee6e8bb
PB
4461 }
4462 if (size != 2) {
8f8e3aa4 4463 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
4464 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4465 shift, size ? 16 : 8);
7d1b0095 4466 tcg_temp_free_i32(tmp2);
9ee6e8bb 4467 }
8f8e3aa4 4468 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4469 } else { /* Store */
8f8e3aa4
PB
4470 tmp = neon_load_reg(rd, pass);
4471 if (shift)
4472 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4473 switch (size) {
4474 case 0:
08307563 4475 gen_aa32_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4476 break;
4477 case 1:
08307563 4478 gen_aa32_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4479 break;
4480 case 2:
08307563 4481 gen_aa32_st32(tmp, addr, IS_USER(s));
9ee6e8bb 4482 break;
99c475ab 4483 }
58ab8e96 4484 tcg_temp_free_i32(tmp);
99c475ab 4485 }
9ee6e8bb 4486 rd += stride;
1b2b1e54 4487 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4488 }
e318a60b 4489 tcg_temp_free_i32(addr);
9ee6e8bb 4490 stride = nregs * (1 << size);
99c475ab 4491 }
9ee6e8bb
PB
4492 }
4493 if (rm != 15) {
39d5492a 4494 TCGv_i32 base;
b26eefb6
PB
4495
4496 base = load_reg(s, rn);
9ee6e8bb 4497 if (rm == 13) {
b26eefb6 4498 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4499 } else {
39d5492a 4500 TCGv_i32 index;
b26eefb6
PB
4501 index = load_reg(s, rm);
4502 tcg_gen_add_i32(base, base, index);
7d1b0095 4503 tcg_temp_free_i32(index);
9ee6e8bb 4504 }
b26eefb6 4505 store_reg(s, rn, base);
9ee6e8bb
PB
4506 }
4507 return 0;
4508}
3b46e624 4509
8f8e3aa4 4510/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
39d5492a 4511static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
8f8e3aa4
PB
4512{
4513 tcg_gen_and_i32(t, t, c);
f669df27 4514 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4515 tcg_gen_or_i32(dest, t, f);
4516}
4517
39d5492a 4518static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4519{
4520 switch (size) {
4521 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4522 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4523 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4524 default: abort();
4525 }
4526}
4527
39d5492a 4528static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4529{
4530 switch (size) {
02da0b2d
PM
4531 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4532 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4533 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
4534 default: abort();
4535 }
4536}
4537
39d5492a 4538static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4539{
4540 switch (size) {
02da0b2d
PM
4541 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4542 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4543 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
4544 default: abort();
4545 }
4546}
4547
39d5492a 4548static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
4549{
4550 switch (size) {
02da0b2d
PM
4551 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4552 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4553 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
4554 default: abort();
4555 }
4556}
4557
39d5492a 4558static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
4559 int q, int u)
4560{
4561 if (q) {
4562 if (u) {
4563 switch (size) {
4564 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4565 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4566 default: abort();
4567 }
4568 } else {
4569 switch (size) {
4570 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4571 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4572 default: abort();
4573 }
4574 }
4575 } else {
4576 if (u) {
4577 switch (size) {
b408a9b0
CL
4578 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4579 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4580 default: abort();
4581 }
4582 } else {
4583 switch (size) {
4584 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4585 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4586 default: abort();
4587 }
4588 }
4589 }
4590}
4591
39d5492a 4592static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
4593{
4594 if (u) {
4595 switch (size) {
4596 case 0: gen_helper_neon_widen_u8(dest, src); break;
4597 case 1: gen_helper_neon_widen_u16(dest, src); break;
4598 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4599 default: abort();
4600 }
4601 } else {
4602 switch (size) {
4603 case 0: gen_helper_neon_widen_s8(dest, src); break;
4604 case 1: gen_helper_neon_widen_s16(dest, src); break;
4605 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4606 default: abort();
4607 }
4608 }
7d1b0095 4609 tcg_temp_free_i32(src);
ad69471c
PB
4610}
4611
4612static inline void gen_neon_addl(int size)
4613{
4614 switch (size) {
4615 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4616 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4617 case 2: tcg_gen_add_i64(CPU_V001); break;
4618 default: abort();
4619 }
4620}
4621
4622static inline void gen_neon_subl(int size)
4623{
4624 switch (size) {
4625 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4626 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4627 case 2: tcg_gen_sub_i64(CPU_V001); break;
4628 default: abort();
4629 }
4630}
4631
a7812ae4 4632static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4633{
4634 switch (size) {
4635 case 0: gen_helper_neon_negl_u16(var, var); break;
4636 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
4637 case 2:
4638 tcg_gen_neg_i64(var, var);
4639 break;
ad69471c
PB
4640 default: abort();
4641 }
4642}
4643
a7812ae4 4644static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4645{
4646 switch (size) {
02da0b2d
PM
4647 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4648 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
4649 default: abort();
4650 }
4651}
4652
39d5492a
PM
4653static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
4654 int size, int u)
ad69471c 4655{
a7812ae4 4656 TCGv_i64 tmp;
ad69471c
PB
4657
4658 switch ((size << 1) | u) {
4659 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4660 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4661 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4662 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4663 case 4:
4664 tmp = gen_muls_i64_i32(a, b);
4665 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4666 tcg_temp_free_i64(tmp);
ad69471c
PB
4667 break;
4668 case 5:
4669 tmp = gen_mulu_i64_i32(a, b);
4670 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4671 tcg_temp_free_i64(tmp);
ad69471c
PB
4672 break;
4673 default: abort();
4674 }
c6067f04
CL
4675
4676 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4677 Don't forget to clean them now. */
4678 if (size < 2) {
7d1b0095
PM
4679 tcg_temp_free_i32(a);
4680 tcg_temp_free_i32(b);
c6067f04 4681 }
ad69471c
PB
4682}
4683
39d5492a
PM
4684static void gen_neon_narrow_op(int op, int u, int size,
4685 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
4686{
4687 if (op) {
4688 if (u) {
4689 gen_neon_unarrow_sats(size, dest, src);
4690 } else {
4691 gen_neon_narrow(size, dest, src);
4692 }
4693 } else {
4694 if (u) {
4695 gen_neon_narrow_satu(size, dest, src);
4696 } else {
4697 gen_neon_narrow_sats(size, dest, src);
4698 }
4699 }
4700}
4701
62698be3
PM
4702/* Symbolic constants for op fields for Neon 3-register same-length.
4703 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4704 * table A7-9.
4705 */
4706#define NEON_3R_VHADD 0
4707#define NEON_3R_VQADD 1
4708#define NEON_3R_VRHADD 2
4709#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4710#define NEON_3R_VHSUB 4
4711#define NEON_3R_VQSUB 5
4712#define NEON_3R_VCGT 6
4713#define NEON_3R_VCGE 7
4714#define NEON_3R_VSHL 8
4715#define NEON_3R_VQSHL 9
4716#define NEON_3R_VRSHL 10
4717#define NEON_3R_VQRSHL 11
4718#define NEON_3R_VMAX 12
4719#define NEON_3R_VMIN 13
4720#define NEON_3R_VABD 14
4721#define NEON_3R_VABA 15
4722#define NEON_3R_VADD_VSUB 16
4723#define NEON_3R_VTST_VCEQ 17
4724#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4725#define NEON_3R_VMUL 19
4726#define NEON_3R_VPMAX 20
4727#define NEON_3R_VPMIN 21
4728#define NEON_3R_VQDMULH_VQRDMULH 22
4729#define NEON_3R_VPADD 23
da97f52c 4730#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
4731#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4732#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4733#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4734#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4735#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 4736#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
4737
4738static const uint8_t neon_3r_sizes[] = {
4739 [NEON_3R_VHADD] = 0x7,
4740 [NEON_3R_VQADD] = 0xf,
4741 [NEON_3R_VRHADD] = 0x7,
4742 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4743 [NEON_3R_VHSUB] = 0x7,
4744 [NEON_3R_VQSUB] = 0xf,
4745 [NEON_3R_VCGT] = 0x7,
4746 [NEON_3R_VCGE] = 0x7,
4747 [NEON_3R_VSHL] = 0xf,
4748 [NEON_3R_VQSHL] = 0xf,
4749 [NEON_3R_VRSHL] = 0xf,
4750 [NEON_3R_VQRSHL] = 0xf,
4751 [NEON_3R_VMAX] = 0x7,
4752 [NEON_3R_VMIN] = 0x7,
4753 [NEON_3R_VABD] = 0x7,
4754 [NEON_3R_VABA] = 0x7,
4755 [NEON_3R_VADD_VSUB] = 0xf,
4756 [NEON_3R_VTST_VCEQ] = 0x7,
4757 [NEON_3R_VML] = 0x7,
4758 [NEON_3R_VMUL] = 0x7,
4759 [NEON_3R_VPMAX] = 0x7,
4760 [NEON_3R_VPMIN] = 0x7,
4761 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4762 [NEON_3R_VPADD] = 0x7,
da97f52c 4763 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4764 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4765 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4766 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4767 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4768 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 4769 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4770};
4771
600b828c
PM
4772/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4773 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4774 * table A7-13.
4775 */
4776#define NEON_2RM_VREV64 0
4777#define NEON_2RM_VREV32 1
4778#define NEON_2RM_VREV16 2
4779#define NEON_2RM_VPADDL 4
4780#define NEON_2RM_VPADDL_U 5
9d935509
AB
4781#define NEON_2RM_AESE 6 /* Includes AESD */
4782#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
4783#define NEON_2RM_VCLS 8
4784#define NEON_2RM_VCLZ 9
4785#define NEON_2RM_VCNT 10
4786#define NEON_2RM_VMVN 11
4787#define NEON_2RM_VPADAL 12
4788#define NEON_2RM_VPADAL_U 13
4789#define NEON_2RM_VQABS 14
4790#define NEON_2RM_VQNEG 15
4791#define NEON_2RM_VCGT0 16
4792#define NEON_2RM_VCGE0 17
4793#define NEON_2RM_VCEQ0 18
4794#define NEON_2RM_VCLE0 19
4795#define NEON_2RM_VCLT0 20
4796#define NEON_2RM_VABS 22
4797#define NEON_2RM_VNEG 23
4798#define NEON_2RM_VCGT0_F 24
4799#define NEON_2RM_VCGE0_F 25
4800#define NEON_2RM_VCEQ0_F 26
4801#define NEON_2RM_VCLE0_F 27
4802#define NEON_2RM_VCLT0_F 28
4803#define NEON_2RM_VABS_F 30
4804#define NEON_2RM_VNEG_F 31
4805#define NEON_2RM_VSWP 32
4806#define NEON_2RM_VTRN 33
4807#define NEON_2RM_VUZP 34
4808#define NEON_2RM_VZIP 35
4809#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4810#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4811#define NEON_2RM_VSHLL 38
34f7b0a2 4812#define NEON_2RM_VRINTN 40
2ce70625 4813#define NEON_2RM_VRINTX 41
34f7b0a2
WN
4814#define NEON_2RM_VRINTA 42
4815#define NEON_2RM_VRINTZ 43
600b828c 4816#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 4817#define NEON_2RM_VRINTM 45
600b828c 4818#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 4819#define NEON_2RM_VRINTP 47
901ad525
WN
4820#define NEON_2RM_VCVTAU 48
4821#define NEON_2RM_VCVTAS 49
4822#define NEON_2RM_VCVTNU 50
4823#define NEON_2RM_VCVTNS 51
4824#define NEON_2RM_VCVTPU 52
4825#define NEON_2RM_VCVTPS 53
4826#define NEON_2RM_VCVTMU 54
4827#define NEON_2RM_VCVTMS 55
600b828c
PM
4828#define NEON_2RM_VRECPE 56
4829#define NEON_2RM_VRSQRTE 57
4830#define NEON_2RM_VRECPE_F 58
4831#define NEON_2RM_VRSQRTE_F 59
4832#define NEON_2RM_VCVT_FS 60
4833#define NEON_2RM_VCVT_FU 61
4834#define NEON_2RM_VCVT_SF 62
4835#define NEON_2RM_VCVT_UF 63
4836
4837static int neon_2rm_is_float_op(int op)
4838{
4839 /* Return true if this neon 2reg-misc op is float-to-float */
4840 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
34f7b0a2 4841 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
901ad525
WN
4842 op == NEON_2RM_VRINTM ||
4843 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
34f7b0a2 4844 op >= NEON_2RM_VRECPE_F);
600b828c
PM
4845}
4846
4847/* Each entry in this array has bit n set if the insn allows
4848 * size value n (otherwise it will UNDEF). Since unallocated
4849 * op values will have no bits set they always UNDEF.
4850 */
4851static const uint8_t neon_2rm_sizes[] = {
4852 [NEON_2RM_VREV64] = 0x7,
4853 [NEON_2RM_VREV32] = 0x3,
4854 [NEON_2RM_VREV16] = 0x1,
4855 [NEON_2RM_VPADDL] = 0x7,
4856 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
4857 [NEON_2RM_AESE] = 0x1,
4858 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
4859 [NEON_2RM_VCLS] = 0x7,
4860 [NEON_2RM_VCLZ] = 0x7,
4861 [NEON_2RM_VCNT] = 0x1,
4862 [NEON_2RM_VMVN] = 0x1,
4863 [NEON_2RM_VPADAL] = 0x7,
4864 [NEON_2RM_VPADAL_U] = 0x7,
4865 [NEON_2RM_VQABS] = 0x7,
4866 [NEON_2RM_VQNEG] = 0x7,
4867 [NEON_2RM_VCGT0] = 0x7,
4868 [NEON_2RM_VCGE0] = 0x7,
4869 [NEON_2RM_VCEQ0] = 0x7,
4870 [NEON_2RM_VCLE0] = 0x7,
4871 [NEON_2RM_VCLT0] = 0x7,
4872 [NEON_2RM_VABS] = 0x7,
4873 [NEON_2RM_VNEG] = 0x7,
4874 [NEON_2RM_VCGT0_F] = 0x4,
4875 [NEON_2RM_VCGE0_F] = 0x4,
4876 [NEON_2RM_VCEQ0_F] = 0x4,
4877 [NEON_2RM_VCLE0_F] = 0x4,
4878 [NEON_2RM_VCLT0_F] = 0x4,
4879 [NEON_2RM_VABS_F] = 0x4,
4880 [NEON_2RM_VNEG_F] = 0x4,
4881 [NEON_2RM_VSWP] = 0x1,
4882 [NEON_2RM_VTRN] = 0x7,
4883 [NEON_2RM_VUZP] = 0x7,
4884 [NEON_2RM_VZIP] = 0x7,
4885 [NEON_2RM_VMOVN] = 0x7,
4886 [NEON_2RM_VQMOVN] = 0x7,
4887 [NEON_2RM_VSHLL] = 0x7,
34f7b0a2 4888 [NEON_2RM_VRINTN] = 0x4,
2ce70625 4889 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
4890 [NEON_2RM_VRINTA] = 0x4,
4891 [NEON_2RM_VRINTZ] = 0x4,
600b828c 4892 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 4893 [NEON_2RM_VRINTM] = 0x4,
600b828c 4894 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 4895 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
4896 [NEON_2RM_VCVTAU] = 0x4,
4897 [NEON_2RM_VCVTAS] = 0x4,
4898 [NEON_2RM_VCVTNU] = 0x4,
4899 [NEON_2RM_VCVTNS] = 0x4,
4900 [NEON_2RM_VCVTPU] = 0x4,
4901 [NEON_2RM_VCVTPS] = 0x4,
4902 [NEON_2RM_VCVTMU] = 0x4,
4903 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
4904 [NEON_2RM_VRECPE] = 0x4,
4905 [NEON_2RM_VRSQRTE] = 0x4,
4906 [NEON_2RM_VRECPE_F] = 0x4,
4907 [NEON_2RM_VRSQRTE_F] = 0x4,
4908 [NEON_2RM_VCVT_FS] = 0x4,
4909 [NEON_2RM_VCVT_FU] = 0x4,
4910 [NEON_2RM_VCVT_SF] = 0x4,
4911 [NEON_2RM_VCVT_UF] = 0x4,
4912};
4913
9ee6e8bb
PB
4914/* Translate a NEON data processing instruction. Return nonzero if the
4915 instruction is invalid.
ad69471c
PB
4916 We process data in a mixture of 32-bit and 64-bit chunks.
4917 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4918
0ecb72a5 4919static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4920{
4921 int op;
4922 int q;
4923 int rd, rn, rm;
4924 int size;
4925 int shift;
4926 int pass;
4927 int count;
4928 int pairwise;
4929 int u;
ca9a32e4 4930 uint32_t imm, mask;
39d5492a 4931 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4932 TCGv_i64 tmp64;
9ee6e8bb 4933
5df8bac1 4934 if (!s->vfp_enabled)
9ee6e8bb
PB
4935 return 1;
4936 q = (insn & (1 << 6)) != 0;
4937 u = (insn >> 24) & 1;
4938 VFP_DREG_D(rd, insn);
4939 VFP_DREG_N(rn, insn);
4940 VFP_DREG_M(rm, insn);
4941 size = (insn >> 20) & 3;
4942 if ((insn & (1 << 23)) == 0) {
4943 /* Three register same length. */
4944 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
4945 /* Catch invalid op and bad size combinations: UNDEF */
4946 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4947 return 1;
4948 }
25f84f79
PM
4949 /* All insns of this form UNDEF for either this condition or the
4950 * superset of cases "Q==1"; we catch the latter later.
4951 */
4952 if (q && ((rd | rn | rm) & 1)) {
4953 return 1;
4954 }
62698be3
PM
4955 if (size == 3 && op != NEON_3R_LOGIC) {
4956 /* 64-bit element instructions. */
9ee6e8bb 4957 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4958 neon_load_reg64(cpu_V0, rn + pass);
4959 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 4960 switch (op) {
62698be3 4961 case NEON_3R_VQADD:
9ee6e8bb 4962 if (u) {
02da0b2d
PM
4963 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4964 cpu_V0, cpu_V1);
2c0262af 4965 } else {
02da0b2d
PM
4966 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4967 cpu_V0, cpu_V1);
2c0262af 4968 }
9ee6e8bb 4969 break;
62698be3 4970 case NEON_3R_VQSUB:
9ee6e8bb 4971 if (u) {
02da0b2d
PM
4972 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4973 cpu_V0, cpu_V1);
ad69471c 4974 } else {
02da0b2d
PM
4975 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4976 cpu_V0, cpu_V1);
ad69471c
PB
4977 }
4978 break;
62698be3 4979 case NEON_3R_VSHL:
ad69471c
PB
4980 if (u) {
4981 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4982 } else {
4983 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4984 }
4985 break;
62698be3 4986 case NEON_3R_VQSHL:
ad69471c 4987 if (u) {
02da0b2d
PM
4988 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4989 cpu_V1, cpu_V0);
ad69471c 4990 } else {
02da0b2d
PM
4991 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4992 cpu_V1, cpu_V0);
ad69471c
PB
4993 }
4994 break;
62698be3 4995 case NEON_3R_VRSHL:
ad69471c
PB
4996 if (u) {
4997 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4998 } else {
ad69471c
PB
4999 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5000 }
5001 break;
62698be3 5002 case NEON_3R_VQRSHL:
ad69471c 5003 if (u) {
02da0b2d
PM
5004 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5005 cpu_V1, cpu_V0);
ad69471c 5006 } else {
02da0b2d
PM
5007 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5008 cpu_V1, cpu_V0);
1e8d4eec 5009 }
9ee6e8bb 5010 break;
62698be3 5011 case NEON_3R_VADD_VSUB:
9ee6e8bb 5012 if (u) {
ad69471c 5013 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 5014 } else {
ad69471c 5015 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
5016 }
5017 break;
5018 default:
5019 abort();
2c0262af 5020 }
ad69471c 5021 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 5022 }
9ee6e8bb 5023 return 0;
2c0262af 5024 }
25f84f79 5025 pairwise = 0;
9ee6e8bb 5026 switch (op) {
62698be3
PM
5027 case NEON_3R_VSHL:
5028 case NEON_3R_VQSHL:
5029 case NEON_3R_VRSHL:
5030 case NEON_3R_VQRSHL:
9ee6e8bb 5031 {
ad69471c
PB
5032 int rtmp;
5033 /* Shift instruction operands are reversed. */
5034 rtmp = rn;
9ee6e8bb 5035 rn = rm;
ad69471c 5036 rm = rtmp;
9ee6e8bb 5037 }
2c0262af 5038 break;
25f84f79
PM
5039 case NEON_3R_VPADD:
5040 if (u) {
5041 return 1;
5042 }
5043 /* Fall through */
62698be3
PM
5044 case NEON_3R_VPMAX:
5045 case NEON_3R_VPMIN:
9ee6e8bb 5046 pairwise = 1;
2c0262af 5047 break;
25f84f79
PM
5048 case NEON_3R_FLOAT_ARITH:
5049 pairwise = (u && size < 2); /* if VPADD (float) */
5050 break;
5051 case NEON_3R_FLOAT_MINMAX:
5052 pairwise = u; /* if VPMIN/VPMAX (float) */
5053 break;
5054 case NEON_3R_FLOAT_CMP:
5055 if (!u && size) {
5056 /* no encoding for U=0 C=1x */
5057 return 1;
5058 }
5059 break;
5060 case NEON_3R_FLOAT_ACMP:
5061 if (!u) {
5062 return 1;
5063 }
5064 break;
505935fc
WN
5065 case NEON_3R_FLOAT_MISC:
5066 /* VMAXNM/VMINNM in ARMv8 */
5067 if (u && !arm_feature(env, ARM_FEATURE_V8)) {
25f84f79
PM
5068 return 1;
5069 }
2c0262af 5070 break;
25f84f79
PM
5071 case NEON_3R_VMUL:
5072 if (u && (size != 0)) {
5073 /* UNDEF on invalid size for polynomial subcase */
5074 return 1;
5075 }
2c0262af 5076 break;
da97f52c
PM
5077 case NEON_3R_VFM:
5078 if (!arm_feature(env, ARM_FEATURE_VFP4) || u) {
5079 return 1;
5080 }
5081 break;
9ee6e8bb 5082 default:
2c0262af 5083 break;
9ee6e8bb 5084 }
dd8fbd78 5085
25f84f79
PM
5086 if (pairwise && q) {
5087 /* All the pairwise insns UNDEF if Q is set */
5088 return 1;
5089 }
5090
9ee6e8bb
PB
5091 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5092
5093 if (pairwise) {
5094 /* Pairwise. */
a5a14945
JR
5095 if (pass < 1) {
5096 tmp = neon_load_reg(rn, 0);
5097 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 5098 } else {
a5a14945
JR
5099 tmp = neon_load_reg(rm, 0);
5100 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
5101 }
5102 } else {
5103 /* Elementwise. */
dd8fbd78
FN
5104 tmp = neon_load_reg(rn, pass);
5105 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
5106 }
5107 switch (op) {
62698be3 5108 case NEON_3R_VHADD:
9ee6e8bb
PB
5109 GEN_NEON_INTEGER_OP(hadd);
5110 break;
62698be3 5111 case NEON_3R_VQADD:
02da0b2d 5112 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 5113 break;
62698be3 5114 case NEON_3R_VRHADD:
9ee6e8bb 5115 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 5116 break;
62698be3 5117 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
5118 switch ((u << 2) | size) {
5119 case 0: /* VAND */
dd8fbd78 5120 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5121 break;
5122 case 1: /* BIC */
f669df27 5123 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5124 break;
5125 case 2: /* VORR */
dd8fbd78 5126 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5127 break;
5128 case 3: /* VORN */
f669df27 5129 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5130 break;
5131 case 4: /* VEOR */
dd8fbd78 5132 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5133 break;
5134 case 5: /* VBSL */
dd8fbd78
FN
5135 tmp3 = neon_load_reg(rd, pass);
5136 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 5137 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5138 break;
5139 case 6: /* VBIT */
dd8fbd78
FN
5140 tmp3 = neon_load_reg(rd, pass);
5141 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 5142 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5143 break;
5144 case 7: /* VBIF */
dd8fbd78
FN
5145 tmp3 = neon_load_reg(rd, pass);
5146 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 5147 tcg_temp_free_i32(tmp3);
9ee6e8bb 5148 break;
2c0262af
FB
5149 }
5150 break;
62698be3 5151 case NEON_3R_VHSUB:
9ee6e8bb
PB
5152 GEN_NEON_INTEGER_OP(hsub);
5153 break;
62698be3 5154 case NEON_3R_VQSUB:
02da0b2d 5155 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 5156 break;
62698be3 5157 case NEON_3R_VCGT:
9ee6e8bb
PB
5158 GEN_NEON_INTEGER_OP(cgt);
5159 break;
62698be3 5160 case NEON_3R_VCGE:
9ee6e8bb
PB
5161 GEN_NEON_INTEGER_OP(cge);
5162 break;
62698be3 5163 case NEON_3R_VSHL:
ad69471c 5164 GEN_NEON_INTEGER_OP(shl);
2c0262af 5165 break;
62698be3 5166 case NEON_3R_VQSHL:
02da0b2d 5167 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 5168 break;
62698be3 5169 case NEON_3R_VRSHL:
ad69471c 5170 GEN_NEON_INTEGER_OP(rshl);
2c0262af 5171 break;
62698be3 5172 case NEON_3R_VQRSHL:
02da0b2d 5173 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 5174 break;
62698be3 5175 case NEON_3R_VMAX:
9ee6e8bb
PB
5176 GEN_NEON_INTEGER_OP(max);
5177 break;
62698be3 5178 case NEON_3R_VMIN:
9ee6e8bb
PB
5179 GEN_NEON_INTEGER_OP(min);
5180 break;
62698be3 5181 case NEON_3R_VABD:
9ee6e8bb
PB
5182 GEN_NEON_INTEGER_OP(abd);
5183 break;
62698be3 5184 case NEON_3R_VABA:
9ee6e8bb 5185 GEN_NEON_INTEGER_OP(abd);
7d1b0095 5186 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
5187 tmp2 = neon_load_reg(rd, pass);
5188 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 5189 break;
62698be3 5190 case NEON_3R_VADD_VSUB:
9ee6e8bb 5191 if (!u) { /* VADD */
62698be3 5192 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5193 } else { /* VSUB */
5194 switch (size) {
dd8fbd78
FN
5195 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
5196 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
5197 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 5198 default: abort();
9ee6e8bb
PB
5199 }
5200 }
5201 break;
62698be3 5202 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
5203 if (!u) { /* VTST */
5204 switch (size) {
dd8fbd78
FN
5205 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
5206 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
5207 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 5208 default: abort();
9ee6e8bb
PB
5209 }
5210 } else { /* VCEQ */
5211 switch (size) {
dd8fbd78
FN
5212 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5213 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5214 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 5215 default: abort();
9ee6e8bb
PB
5216 }
5217 }
5218 break;
62698be3 5219 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 5220 switch (size) {
dd8fbd78
FN
5221 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5222 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5223 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5224 default: abort();
9ee6e8bb 5225 }
7d1b0095 5226 tcg_temp_free_i32(tmp2);
dd8fbd78 5227 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5228 if (u) { /* VMLS */
dd8fbd78 5229 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 5230 } else { /* VMLA */
dd8fbd78 5231 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5232 }
5233 break;
62698be3 5234 case NEON_3R_VMUL:
9ee6e8bb 5235 if (u) { /* polynomial */
dd8fbd78 5236 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
5237 } else { /* Integer */
5238 switch (size) {
dd8fbd78
FN
5239 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5240 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5241 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5242 default: abort();
9ee6e8bb
PB
5243 }
5244 }
5245 break;
62698be3 5246 case NEON_3R_VPMAX:
9ee6e8bb
PB
5247 GEN_NEON_INTEGER_OP(pmax);
5248 break;
62698be3 5249 case NEON_3R_VPMIN:
9ee6e8bb
PB
5250 GEN_NEON_INTEGER_OP(pmin);
5251 break;
62698be3 5252 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
5253 if (!u) { /* VQDMULH */
5254 switch (size) {
02da0b2d
PM
5255 case 1:
5256 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5257 break;
5258 case 2:
5259 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5260 break;
62698be3 5261 default: abort();
9ee6e8bb 5262 }
62698be3 5263 } else { /* VQRDMULH */
9ee6e8bb 5264 switch (size) {
02da0b2d
PM
5265 case 1:
5266 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5267 break;
5268 case 2:
5269 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5270 break;
62698be3 5271 default: abort();
9ee6e8bb
PB
5272 }
5273 }
5274 break;
62698be3 5275 case NEON_3R_VPADD:
9ee6e8bb 5276 switch (size) {
dd8fbd78
FN
5277 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5278 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5279 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 5280 default: abort();
9ee6e8bb
PB
5281 }
5282 break;
62698be3 5283 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
5284 {
5285 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
5286 switch ((u << 2) | size) {
5287 case 0: /* VADD */
aa47cfdd
PM
5288 case 4: /* VPADD */
5289 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5290 break;
5291 case 2: /* VSUB */
aa47cfdd 5292 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5293 break;
5294 case 6: /* VABD */
aa47cfdd 5295 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5296 break;
5297 default:
62698be3 5298 abort();
9ee6e8bb 5299 }
aa47cfdd 5300 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5301 break;
aa47cfdd 5302 }
62698be3 5303 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
5304 {
5305 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5306 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5307 if (!u) {
7d1b0095 5308 tcg_temp_free_i32(tmp2);
dd8fbd78 5309 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5310 if (size == 0) {
aa47cfdd 5311 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5312 } else {
aa47cfdd 5313 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
5314 }
5315 }
aa47cfdd 5316 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5317 break;
aa47cfdd 5318 }
62698be3 5319 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
5320 {
5321 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 5322 if (!u) {
aa47cfdd 5323 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 5324 } else {
aa47cfdd
PM
5325 if (size == 0) {
5326 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5327 } else {
5328 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5329 }
b5ff1b31 5330 }
aa47cfdd 5331 tcg_temp_free_ptr(fpstatus);
2c0262af 5332 break;
aa47cfdd 5333 }
62698be3 5334 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
5335 {
5336 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5337 if (size == 0) {
5338 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5339 } else {
5340 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5341 }
5342 tcg_temp_free_ptr(fpstatus);
2c0262af 5343 break;
aa47cfdd 5344 }
62698be3 5345 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
5346 {
5347 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5348 if (size == 0) {
f71a2ae5 5349 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 5350 } else {
f71a2ae5 5351 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
5352 }
5353 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5354 break;
aa47cfdd 5355 }
505935fc
WN
5356 case NEON_3R_FLOAT_MISC:
5357 if (u) {
5358 /* VMAXNM/VMINNM */
5359 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5360 if (size == 0) {
f71a2ae5 5361 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 5362 } else {
f71a2ae5 5363 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
5364 }
5365 tcg_temp_free_ptr(fpstatus);
5366 } else {
5367 if (size == 0) {
5368 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5369 } else {
5370 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5371 }
5372 }
2c0262af 5373 break;
da97f52c
PM
5374 case NEON_3R_VFM:
5375 {
5376 /* VFMA, VFMS: fused multiply-add */
5377 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5378 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5379 if (size) {
5380 /* VFMS */
5381 gen_helper_vfp_negs(tmp, tmp);
5382 }
5383 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5384 tcg_temp_free_i32(tmp3);
5385 tcg_temp_free_ptr(fpstatus);
5386 break;
5387 }
9ee6e8bb
PB
5388 default:
5389 abort();
2c0262af 5390 }
7d1b0095 5391 tcg_temp_free_i32(tmp2);
dd8fbd78 5392
9ee6e8bb
PB
5393 /* Save the result. For elementwise operations we can put it
5394 straight into the destination register. For pairwise operations
5395 we have to be careful to avoid clobbering the source operands. */
5396 if (pairwise && rd == rm) {
dd8fbd78 5397 neon_store_scratch(pass, tmp);
9ee6e8bb 5398 } else {
dd8fbd78 5399 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5400 }
5401
5402 } /* for pass */
5403 if (pairwise && rd == rm) {
5404 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5405 tmp = neon_load_scratch(pass);
5406 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5407 }
5408 }
ad69471c 5409 /* End of 3 register same size operations. */
9ee6e8bb
PB
5410 } else if (insn & (1 << 4)) {
5411 if ((insn & 0x00380080) != 0) {
5412 /* Two registers and shift. */
5413 op = (insn >> 8) & 0xf;
5414 if (insn & (1 << 7)) {
cc13115b
PM
5415 /* 64-bit shift. */
5416 if (op > 7) {
5417 return 1;
5418 }
9ee6e8bb
PB
5419 size = 3;
5420 } else {
5421 size = 2;
5422 while ((insn & (1 << (size + 19))) == 0)
5423 size--;
5424 }
5425 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 5426 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
5427 by immediate using the variable shift operations. */
5428 if (op < 8) {
5429 /* Shift by immediate:
5430 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
5431 if (q && ((rd | rm) & 1)) {
5432 return 1;
5433 }
5434 if (!u && (op == 4 || op == 6)) {
5435 return 1;
5436 }
9ee6e8bb
PB
5437 /* Right shifts are encoded as N - shift, where N is the
5438 element size in bits. */
5439 if (op <= 4)
5440 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
5441 if (size == 3) {
5442 count = q + 1;
5443 } else {
5444 count = q ? 4: 2;
5445 }
5446 switch (size) {
5447 case 0:
5448 imm = (uint8_t) shift;
5449 imm |= imm << 8;
5450 imm |= imm << 16;
5451 break;
5452 case 1:
5453 imm = (uint16_t) shift;
5454 imm |= imm << 16;
5455 break;
5456 case 2:
5457 case 3:
5458 imm = shift;
5459 break;
5460 default:
5461 abort();
5462 }
5463
5464 for (pass = 0; pass < count; pass++) {
ad69471c
PB
5465 if (size == 3) {
5466 neon_load_reg64(cpu_V0, rm + pass);
5467 tcg_gen_movi_i64(cpu_V1, imm);
5468 switch (op) {
5469 case 0: /* VSHR */
5470 case 1: /* VSRA */
5471 if (u)
5472 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5473 else
ad69471c 5474 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5475 break;
ad69471c
PB
5476 case 2: /* VRSHR */
5477 case 3: /* VRSRA */
5478 if (u)
5479 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5480 else
ad69471c 5481 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5482 break;
ad69471c 5483 case 4: /* VSRI */
ad69471c
PB
5484 case 5: /* VSHL, VSLI */
5485 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5486 break;
0322b26e 5487 case 6: /* VQSHLU */
02da0b2d
PM
5488 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5489 cpu_V0, cpu_V1);
ad69471c 5490 break;
0322b26e
PM
5491 case 7: /* VQSHL */
5492 if (u) {
02da0b2d 5493 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
5494 cpu_V0, cpu_V1);
5495 } else {
02da0b2d 5496 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
5497 cpu_V0, cpu_V1);
5498 }
9ee6e8bb 5499 break;
9ee6e8bb 5500 }
ad69471c
PB
5501 if (op == 1 || op == 3) {
5502 /* Accumulate. */
5371cb81 5503 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
5504 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5505 } else if (op == 4 || (op == 5 && u)) {
5506 /* Insert */
923e6509
CL
5507 neon_load_reg64(cpu_V1, rd + pass);
5508 uint64_t mask;
5509 if (shift < -63 || shift > 63) {
5510 mask = 0;
5511 } else {
5512 if (op == 4) {
5513 mask = 0xffffffffffffffffull >> -shift;
5514 } else {
5515 mask = 0xffffffffffffffffull << shift;
5516 }
5517 }
5518 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5519 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5520 }
5521 neon_store_reg64(cpu_V0, rd + pass);
5522 } else { /* size < 3 */
5523 /* Operands in T0 and T1. */
dd8fbd78 5524 tmp = neon_load_reg(rm, pass);
7d1b0095 5525 tmp2 = tcg_temp_new_i32();
dd8fbd78 5526 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
5527 switch (op) {
5528 case 0: /* VSHR */
5529 case 1: /* VSRA */
5530 GEN_NEON_INTEGER_OP(shl);
5531 break;
5532 case 2: /* VRSHR */
5533 case 3: /* VRSRA */
5534 GEN_NEON_INTEGER_OP(rshl);
5535 break;
5536 case 4: /* VSRI */
ad69471c
PB
5537 case 5: /* VSHL, VSLI */
5538 switch (size) {
dd8fbd78
FN
5539 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5540 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5541 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 5542 default: abort();
ad69471c
PB
5543 }
5544 break;
0322b26e 5545 case 6: /* VQSHLU */
ad69471c 5546 switch (size) {
0322b26e 5547 case 0:
02da0b2d
PM
5548 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5549 tmp, tmp2);
0322b26e
PM
5550 break;
5551 case 1:
02da0b2d
PM
5552 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5553 tmp, tmp2);
0322b26e
PM
5554 break;
5555 case 2:
02da0b2d
PM
5556 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5557 tmp, tmp2);
0322b26e
PM
5558 break;
5559 default:
cc13115b 5560 abort();
ad69471c
PB
5561 }
5562 break;
0322b26e 5563 case 7: /* VQSHL */
02da0b2d 5564 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 5565 break;
ad69471c 5566 }
7d1b0095 5567 tcg_temp_free_i32(tmp2);
ad69471c
PB
5568
5569 if (op == 1 || op == 3) {
5570 /* Accumulate. */
dd8fbd78 5571 tmp2 = neon_load_reg(rd, pass);
5371cb81 5572 gen_neon_add(size, tmp, tmp2);
7d1b0095 5573 tcg_temp_free_i32(tmp2);
ad69471c
PB
5574 } else if (op == 4 || (op == 5 && u)) {
5575 /* Insert */
5576 switch (size) {
5577 case 0:
5578 if (op == 4)
ca9a32e4 5579 mask = 0xff >> -shift;
ad69471c 5580 else
ca9a32e4
JR
5581 mask = (uint8_t)(0xff << shift);
5582 mask |= mask << 8;
5583 mask |= mask << 16;
ad69471c
PB
5584 break;
5585 case 1:
5586 if (op == 4)
ca9a32e4 5587 mask = 0xffff >> -shift;
ad69471c 5588 else
ca9a32e4
JR
5589 mask = (uint16_t)(0xffff << shift);
5590 mask |= mask << 16;
ad69471c
PB
5591 break;
5592 case 2:
ca9a32e4
JR
5593 if (shift < -31 || shift > 31) {
5594 mask = 0;
5595 } else {
5596 if (op == 4)
5597 mask = 0xffffffffu >> -shift;
5598 else
5599 mask = 0xffffffffu << shift;
5600 }
ad69471c
PB
5601 break;
5602 default:
5603 abort();
5604 }
dd8fbd78 5605 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
5606 tcg_gen_andi_i32(tmp, tmp, mask);
5607 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 5608 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 5609 tcg_temp_free_i32(tmp2);
ad69471c 5610 }
dd8fbd78 5611 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5612 }
5613 } /* for pass */
5614 } else if (op < 10) {
ad69471c 5615 /* Shift by immediate and narrow:
9ee6e8bb 5616 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5617 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5618 if (rm & 1) {
5619 return 1;
5620 }
9ee6e8bb
PB
5621 shift = shift - (1 << (size + 3));
5622 size++;
92cdfaeb 5623 if (size == 3) {
a7812ae4 5624 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5625 neon_load_reg64(cpu_V0, rm);
5626 neon_load_reg64(cpu_V1, rm + 1);
5627 for (pass = 0; pass < 2; pass++) {
5628 TCGv_i64 in;
5629 if (pass == 0) {
5630 in = cpu_V0;
5631 } else {
5632 in = cpu_V1;
5633 }
ad69471c 5634 if (q) {
0b36f4cd 5635 if (input_unsigned) {
92cdfaeb 5636 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5637 } else {
92cdfaeb 5638 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5639 }
ad69471c 5640 } else {
0b36f4cd 5641 if (input_unsigned) {
92cdfaeb 5642 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5643 } else {
92cdfaeb 5644 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5645 }
ad69471c 5646 }
7d1b0095 5647 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5648 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5649 neon_store_reg(rd, pass, tmp);
5650 } /* for pass */
5651 tcg_temp_free_i64(tmp64);
5652 } else {
5653 if (size == 1) {
5654 imm = (uint16_t)shift;
5655 imm |= imm << 16;
2c0262af 5656 } else {
92cdfaeb
PM
5657 /* size == 2 */
5658 imm = (uint32_t)shift;
5659 }
5660 tmp2 = tcg_const_i32(imm);
5661 tmp4 = neon_load_reg(rm + 1, 0);
5662 tmp5 = neon_load_reg(rm + 1, 1);
5663 for (pass = 0; pass < 2; pass++) {
5664 if (pass == 0) {
5665 tmp = neon_load_reg(rm, 0);
5666 } else {
5667 tmp = tmp4;
5668 }
0b36f4cd
CL
5669 gen_neon_shift_narrow(size, tmp, tmp2, q,
5670 input_unsigned);
92cdfaeb
PM
5671 if (pass == 0) {
5672 tmp3 = neon_load_reg(rm, 1);
5673 } else {
5674 tmp3 = tmp5;
5675 }
0b36f4cd
CL
5676 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5677 input_unsigned);
36aa55dc 5678 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5679 tcg_temp_free_i32(tmp);
5680 tcg_temp_free_i32(tmp3);
5681 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5682 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5683 neon_store_reg(rd, pass, tmp);
5684 } /* for pass */
c6067f04 5685 tcg_temp_free_i32(tmp2);
b75263d6 5686 }
9ee6e8bb 5687 } else if (op == 10) {
cc13115b
PM
5688 /* VSHLL, VMOVL */
5689 if (q || (rd & 1)) {
9ee6e8bb 5690 return 1;
cc13115b 5691 }
ad69471c
PB
5692 tmp = neon_load_reg(rm, 0);
5693 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5694 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5695 if (pass == 1)
5696 tmp = tmp2;
5697
5698 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5699
9ee6e8bb
PB
5700 if (shift != 0) {
5701 /* The shift is less than the width of the source
ad69471c
PB
5702 type, so we can just shift the whole register. */
5703 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5704 /* Widen the result of shift: we need to clear
5705 * the potential overflow bits resulting from
5706 * left bits of the narrow input appearing as
5707 * right bits of left the neighbour narrow
5708 * input. */
ad69471c
PB
5709 if (size < 2 || !u) {
5710 uint64_t imm64;
5711 if (size == 0) {
5712 imm = (0xffu >> (8 - shift));
5713 imm |= imm << 16;
acdf01ef 5714 } else if (size == 1) {
ad69471c 5715 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5716 } else {
5717 /* size == 2 */
5718 imm = 0xffffffff >> (32 - shift);
5719 }
5720 if (size < 2) {
5721 imm64 = imm | (((uint64_t)imm) << 32);
5722 } else {
5723 imm64 = imm;
9ee6e8bb 5724 }
acdf01ef 5725 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5726 }
5727 }
ad69471c 5728 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5729 }
f73534a5 5730 } else if (op >= 14) {
9ee6e8bb 5731 /* VCVT fixed-point. */
cc13115b
PM
5732 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5733 return 1;
5734 }
f73534a5
PM
5735 /* We have already masked out the must-be-1 top bit of imm6,
5736 * hence this 32-shift where the ARM ARM has 64-imm6.
5737 */
5738 shift = 32 - shift;
9ee6e8bb 5739 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 5740 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 5741 if (!(op & 1)) {
9ee6e8bb 5742 if (u)
5500b06c 5743 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 5744 else
5500b06c 5745 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
5746 } else {
5747 if (u)
5500b06c 5748 gen_vfp_toul(0, shift, 1);
9ee6e8bb 5749 else
5500b06c 5750 gen_vfp_tosl(0, shift, 1);
2c0262af 5751 }
4373f3ce 5752 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
5753 }
5754 } else {
9ee6e8bb
PB
5755 return 1;
5756 }
5757 } else { /* (insn & 0x00380080) == 0 */
5758 int invert;
7d80fee5
PM
5759 if (q && (rd & 1)) {
5760 return 1;
5761 }
9ee6e8bb
PB
5762
5763 op = (insn >> 8) & 0xf;
5764 /* One register and immediate. */
5765 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5766 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
5767 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5768 * We choose to not special-case this and will behave as if a
5769 * valid constant encoding of 0 had been given.
5770 */
9ee6e8bb
PB
5771 switch (op) {
5772 case 0: case 1:
5773 /* no-op */
5774 break;
5775 case 2: case 3:
5776 imm <<= 8;
5777 break;
5778 case 4: case 5:
5779 imm <<= 16;
5780 break;
5781 case 6: case 7:
5782 imm <<= 24;
5783 break;
5784 case 8: case 9:
5785 imm |= imm << 16;
5786 break;
5787 case 10: case 11:
5788 imm = (imm << 8) | (imm << 24);
5789 break;
5790 case 12:
8e31209e 5791 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
5792 break;
5793 case 13:
5794 imm = (imm << 16) | 0xffff;
5795 break;
5796 case 14:
5797 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5798 if (invert)
5799 imm = ~imm;
5800 break;
5801 case 15:
7d80fee5
PM
5802 if (invert) {
5803 return 1;
5804 }
9ee6e8bb
PB
5805 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5806 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5807 break;
5808 }
5809 if (invert)
5810 imm = ~imm;
5811
9ee6e8bb
PB
5812 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5813 if (op & 1 && op < 12) {
ad69471c 5814 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
5815 if (invert) {
5816 /* The immediate value has already been inverted, so
5817 BIC becomes AND. */
ad69471c 5818 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 5819 } else {
ad69471c 5820 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 5821 }
9ee6e8bb 5822 } else {
ad69471c 5823 /* VMOV, VMVN. */
7d1b0095 5824 tmp = tcg_temp_new_i32();
9ee6e8bb 5825 if (op == 14 && invert) {
a5a14945 5826 int n;
ad69471c
PB
5827 uint32_t val;
5828 val = 0;
9ee6e8bb
PB
5829 for (n = 0; n < 4; n++) {
5830 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 5831 val |= 0xff << (n * 8);
9ee6e8bb 5832 }
ad69471c
PB
5833 tcg_gen_movi_i32(tmp, val);
5834 } else {
5835 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 5836 }
9ee6e8bb 5837 }
ad69471c 5838 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5839 }
5840 }
e4b3861d 5841 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
5842 if (size != 3) {
5843 op = (insn >> 8) & 0xf;
5844 if ((insn & (1 << 6)) == 0) {
5845 /* Three registers of different lengths. */
5846 int src1_wide;
5847 int src2_wide;
5848 int prewiden;
695272dc
PM
5849 /* undefreq: bit 0 : UNDEF if size != 0
5850 * bit 1 : UNDEF if size == 0
5851 * bit 2 : UNDEF if U == 1
5852 * Note that [1:0] set implies 'always UNDEF'
5853 */
5854 int undefreq;
5855 /* prewiden, src1_wide, src2_wide, undefreq */
5856 static const int neon_3reg_wide[16][4] = {
5857 {1, 0, 0, 0}, /* VADDL */
5858 {1, 1, 0, 0}, /* VADDW */
5859 {1, 0, 0, 0}, /* VSUBL */
5860 {1, 1, 0, 0}, /* VSUBW */
5861 {0, 1, 1, 0}, /* VADDHN */
5862 {0, 0, 0, 0}, /* VABAL */
5863 {0, 1, 1, 0}, /* VSUBHN */
5864 {0, 0, 0, 0}, /* VABDL */
5865 {0, 0, 0, 0}, /* VMLAL */
5866 {0, 0, 0, 6}, /* VQDMLAL */
5867 {0, 0, 0, 0}, /* VMLSL */
5868 {0, 0, 0, 6}, /* VQDMLSL */
5869 {0, 0, 0, 0}, /* Integer VMULL */
5870 {0, 0, 0, 2}, /* VQDMULL */
5871 {0, 0, 0, 5}, /* Polynomial VMULL */
5872 {0, 0, 0, 3}, /* Reserved: always UNDEF */
9ee6e8bb
PB
5873 };
5874
5875 prewiden = neon_3reg_wide[op][0];
5876 src1_wide = neon_3reg_wide[op][1];
5877 src2_wide = neon_3reg_wide[op][2];
695272dc 5878 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 5879
695272dc
PM
5880 if (((undefreq & 1) && (size != 0)) ||
5881 ((undefreq & 2) && (size == 0)) ||
5882 ((undefreq & 4) && u)) {
5883 return 1;
5884 }
5885 if ((src1_wide && (rn & 1)) ||
5886 (src2_wide && (rm & 1)) ||
5887 (!src2_wide && (rd & 1))) {
ad69471c 5888 return 1;
695272dc 5889 }
ad69471c 5890
9ee6e8bb
PB
5891 /* Avoid overlapping operands. Wide source operands are
5892 always aligned so will never overlap with wide
5893 destinations in problematic ways. */
8f8e3aa4 5894 if (rd == rm && !src2_wide) {
dd8fbd78
FN
5895 tmp = neon_load_reg(rm, 1);
5896 neon_store_scratch(2, tmp);
8f8e3aa4 5897 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
5898 tmp = neon_load_reg(rn, 1);
5899 neon_store_scratch(2, tmp);
9ee6e8bb 5900 }
39d5492a 5901 TCGV_UNUSED_I32(tmp3);
9ee6e8bb 5902 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5903 if (src1_wide) {
5904 neon_load_reg64(cpu_V0, rn + pass);
39d5492a 5905 TCGV_UNUSED_I32(tmp);
9ee6e8bb 5906 } else {
ad69471c 5907 if (pass == 1 && rd == rn) {
dd8fbd78 5908 tmp = neon_load_scratch(2);
9ee6e8bb 5909 } else {
ad69471c
PB
5910 tmp = neon_load_reg(rn, pass);
5911 }
5912 if (prewiden) {
5913 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5914 }
5915 }
ad69471c
PB
5916 if (src2_wide) {
5917 neon_load_reg64(cpu_V1, rm + pass);
39d5492a 5918 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 5919 } else {
ad69471c 5920 if (pass == 1 && rd == rm) {
dd8fbd78 5921 tmp2 = neon_load_scratch(2);
9ee6e8bb 5922 } else {
ad69471c
PB
5923 tmp2 = neon_load_reg(rm, pass);
5924 }
5925 if (prewiden) {
5926 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5927 }
9ee6e8bb
PB
5928 }
5929 switch (op) {
5930 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5931 gen_neon_addl(size);
9ee6e8bb 5932 break;
79b0e534 5933 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5934 gen_neon_subl(size);
9ee6e8bb
PB
5935 break;
5936 case 5: case 7: /* VABAL, VABDL */
5937 switch ((size << 1) | u) {
ad69471c
PB
5938 case 0:
5939 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5940 break;
5941 case 1:
5942 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5943 break;
5944 case 2:
5945 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5946 break;
5947 case 3:
5948 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5949 break;
5950 case 4:
5951 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5952 break;
5953 case 5:
5954 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5955 break;
9ee6e8bb
PB
5956 default: abort();
5957 }
7d1b0095
PM
5958 tcg_temp_free_i32(tmp2);
5959 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5960 break;
5961 case 8: case 9: case 10: case 11: case 12: case 13:
5962 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5963 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
5964 break;
5965 case 14: /* Polynomial VMULL */
e5ca24cb 5966 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
5967 tcg_temp_free_i32(tmp2);
5968 tcg_temp_free_i32(tmp);
e5ca24cb 5969 break;
695272dc
PM
5970 default: /* 15 is RESERVED: caught earlier */
5971 abort();
9ee6e8bb 5972 }
ebcd88ce
PM
5973 if (op == 13) {
5974 /* VQDMULL */
5975 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5976 neon_store_reg64(cpu_V0, rd + pass);
5977 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 5978 /* Accumulate. */
ebcd88ce 5979 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5980 switch (op) {
4dc064e6
PM
5981 case 10: /* VMLSL */
5982 gen_neon_negl(cpu_V0, size);
5983 /* Fall through */
5984 case 5: case 8: /* VABAL, VMLAL */
ad69471c 5985 gen_neon_addl(size);
9ee6e8bb
PB
5986 break;
5987 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 5988 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5989 if (op == 11) {
5990 gen_neon_negl(cpu_V0, size);
5991 }
ad69471c
PB
5992 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5993 break;
9ee6e8bb
PB
5994 default:
5995 abort();
5996 }
ad69471c 5997 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5998 } else if (op == 4 || op == 6) {
5999 /* Narrowing operation. */
7d1b0095 6000 tmp = tcg_temp_new_i32();
79b0e534 6001 if (!u) {
9ee6e8bb 6002 switch (size) {
ad69471c
PB
6003 case 0:
6004 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6005 break;
6006 case 1:
6007 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6008 break;
6009 case 2:
6010 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6011 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
6012 break;
9ee6e8bb
PB
6013 default: abort();
6014 }
6015 } else {
6016 switch (size) {
ad69471c
PB
6017 case 0:
6018 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6019 break;
6020 case 1:
6021 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6022 break;
6023 case 2:
6024 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6025 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6026 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
6027 break;
9ee6e8bb
PB
6028 default: abort();
6029 }
6030 }
ad69471c
PB
6031 if (pass == 0) {
6032 tmp3 = tmp;
6033 } else {
6034 neon_store_reg(rd, 0, tmp3);
6035 neon_store_reg(rd, 1, tmp);
6036 }
9ee6e8bb
PB
6037 } else {
6038 /* Write back the result. */
ad69471c 6039 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6040 }
6041 }
6042 } else {
3e3326df
PM
6043 /* Two registers and a scalar. NB that for ops of this form
6044 * the ARM ARM labels bit 24 as Q, but it is in our variable
6045 * 'u', not 'q'.
6046 */
6047 if (size == 0) {
6048 return 1;
6049 }
9ee6e8bb 6050 switch (op) {
9ee6e8bb 6051 case 1: /* Float VMLA scalar */
9ee6e8bb 6052 case 5: /* Floating point VMLS scalar */
9ee6e8bb 6053 case 9: /* Floating point VMUL scalar */
3e3326df
PM
6054 if (size == 1) {
6055 return 1;
6056 }
6057 /* fall through */
6058 case 0: /* Integer VMLA scalar */
6059 case 4: /* Integer VMLS scalar */
6060 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
6061 case 12: /* VQDMULH scalar */
6062 case 13: /* VQRDMULH scalar */
3e3326df
PM
6063 if (u && ((rd | rn) & 1)) {
6064 return 1;
6065 }
dd8fbd78
FN
6066 tmp = neon_get_scalar(size, rm);
6067 neon_store_scratch(0, tmp);
9ee6e8bb 6068 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
6069 tmp = neon_load_scratch(0);
6070 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
6071 if (op == 12) {
6072 if (size == 1) {
02da0b2d 6073 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6074 } else {
02da0b2d 6075 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6076 }
6077 } else if (op == 13) {
6078 if (size == 1) {
02da0b2d 6079 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6080 } else {
02da0b2d 6081 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6082 }
6083 } else if (op & 1) {
aa47cfdd
PM
6084 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6085 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6086 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
6087 } else {
6088 switch (size) {
dd8fbd78
FN
6089 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6090 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6091 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 6092 default: abort();
9ee6e8bb
PB
6093 }
6094 }
7d1b0095 6095 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6096 if (op < 8) {
6097 /* Accumulate. */
dd8fbd78 6098 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
6099 switch (op) {
6100 case 0:
dd8fbd78 6101 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6102 break;
6103 case 1:
aa47cfdd
PM
6104 {
6105 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6106 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6107 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6108 break;
aa47cfdd 6109 }
9ee6e8bb 6110 case 4:
dd8fbd78 6111 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
6112 break;
6113 case 5:
aa47cfdd
PM
6114 {
6115 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6116 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6117 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6118 break;
aa47cfdd 6119 }
9ee6e8bb
PB
6120 default:
6121 abort();
6122 }
7d1b0095 6123 tcg_temp_free_i32(tmp2);
9ee6e8bb 6124 }
dd8fbd78 6125 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6126 }
6127 break;
9ee6e8bb 6128 case 3: /* VQDMLAL scalar */
9ee6e8bb 6129 case 7: /* VQDMLSL scalar */
9ee6e8bb 6130 case 11: /* VQDMULL scalar */
3e3326df 6131 if (u == 1) {
ad69471c 6132 return 1;
3e3326df
PM
6133 }
6134 /* fall through */
6135 case 2: /* VMLAL sclar */
6136 case 6: /* VMLSL scalar */
6137 case 10: /* VMULL scalar */
6138 if (rd & 1) {
6139 return 1;
6140 }
dd8fbd78 6141 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
6142 /* We need a copy of tmp2 because gen_neon_mull
6143 * deletes it during pass 0. */
7d1b0095 6144 tmp4 = tcg_temp_new_i32();
c6067f04 6145 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 6146 tmp3 = neon_load_reg(rn, 1);
ad69471c 6147
9ee6e8bb 6148 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6149 if (pass == 0) {
6150 tmp = neon_load_reg(rn, 0);
9ee6e8bb 6151 } else {
dd8fbd78 6152 tmp = tmp3;
c6067f04 6153 tmp2 = tmp4;
9ee6e8bb 6154 }
ad69471c 6155 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
6156 if (op != 11) {
6157 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6158 }
9ee6e8bb 6159 switch (op) {
4dc064e6
PM
6160 case 6:
6161 gen_neon_negl(cpu_V0, size);
6162 /* Fall through */
6163 case 2:
ad69471c 6164 gen_neon_addl(size);
9ee6e8bb
PB
6165 break;
6166 case 3: case 7:
ad69471c 6167 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6168 if (op == 7) {
6169 gen_neon_negl(cpu_V0, size);
6170 }
ad69471c 6171 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
6172 break;
6173 case 10:
6174 /* no-op */
6175 break;
6176 case 11:
ad69471c 6177 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
6178 break;
6179 default:
6180 abort();
6181 }
ad69471c 6182 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6183 }
dd8fbd78 6184
dd8fbd78 6185
9ee6e8bb
PB
6186 break;
6187 default: /* 14 and 15 are RESERVED */
6188 return 1;
6189 }
6190 }
6191 } else { /* size == 3 */
6192 if (!u) {
6193 /* Extract. */
9ee6e8bb 6194 imm = (insn >> 8) & 0xf;
ad69471c
PB
6195
6196 if (imm > 7 && !q)
6197 return 1;
6198
52579ea1
PM
6199 if (q && ((rd | rn | rm) & 1)) {
6200 return 1;
6201 }
6202
ad69471c
PB
6203 if (imm == 0) {
6204 neon_load_reg64(cpu_V0, rn);
6205 if (q) {
6206 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 6207 }
ad69471c
PB
6208 } else if (imm == 8) {
6209 neon_load_reg64(cpu_V0, rn + 1);
6210 if (q) {
6211 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6212 }
ad69471c 6213 } else if (q) {
a7812ae4 6214 tmp64 = tcg_temp_new_i64();
ad69471c
PB
6215 if (imm < 8) {
6216 neon_load_reg64(cpu_V0, rn);
a7812ae4 6217 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
6218 } else {
6219 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 6220 neon_load_reg64(tmp64, rm);
ad69471c
PB
6221 }
6222 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 6223 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
6224 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6225 if (imm < 8) {
6226 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6227 } else {
ad69471c
PB
6228 neon_load_reg64(cpu_V1, rm + 1);
6229 imm -= 8;
9ee6e8bb 6230 }
ad69471c 6231 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
6232 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6233 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 6234 tcg_temp_free_i64(tmp64);
ad69471c 6235 } else {
a7812ae4 6236 /* BUGFIX */
ad69471c 6237 neon_load_reg64(cpu_V0, rn);
a7812ae4 6238 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 6239 neon_load_reg64(cpu_V1, rm);
a7812ae4 6240 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
6241 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6242 }
6243 neon_store_reg64(cpu_V0, rd);
6244 if (q) {
6245 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
6246 }
6247 } else if ((insn & (1 << 11)) == 0) {
6248 /* Two register misc. */
6249 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6250 size = (insn >> 18) & 3;
600b828c
PM
6251 /* UNDEF for unknown op values and bad op-size combinations */
6252 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6253 return 1;
6254 }
fc2a9b37
PM
6255 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6256 q && ((rm | rd) & 1)) {
6257 return 1;
6258 }
9ee6e8bb 6259 switch (op) {
600b828c 6260 case NEON_2RM_VREV64:
9ee6e8bb 6261 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
6262 tmp = neon_load_reg(rm, pass * 2);
6263 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 6264 switch (size) {
dd8fbd78
FN
6265 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6266 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
6267 case 2: /* no-op */ break;
6268 default: abort();
6269 }
dd8fbd78 6270 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 6271 if (size == 2) {
dd8fbd78 6272 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 6273 } else {
9ee6e8bb 6274 switch (size) {
dd8fbd78
FN
6275 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6276 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
6277 default: abort();
6278 }
dd8fbd78 6279 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
6280 }
6281 }
6282 break;
600b828c
PM
6283 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6284 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
6285 for (pass = 0; pass < q + 1; pass++) {
6286 tmp = neon_load_reg(rm, pass * 2);
6287 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6288 tmp = neon_load_reg(rm, pass * 2 + 1);
6289 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6290 switch (size) {
6291 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6292 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6293 case 2: tcg_gen_add_i64(CPU_V001); break;
6294 default: abort();
6295 }
600b828c 6296 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 6297 /* Accumulate. */
ad69471c
PB
6298 neon_load_reg64(cpu_V1, rd + pass);
6299 gen_neon_addl(size);
9ee6e8bb 6300 }
ad69471c 6301 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6302 }
6303 break;
600b828c 6304 case NEON_2RM_VTRN:
9ee6e8bb 6305 if (size == 2) {
a5a14945 6306 int n;
9ee6e8bb 6307 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
6308 tmp = neon_load_reg(rm, n);
6309 tmp2 = neon_load_reg(rd, n + 1);
6310 neon_store_reg(rm, n, tmp2);
6311 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
6312 }
6313 } else {
6314 goto elementwise;
6315 }
6316 break;
600b828c 6317 case NEON_2RM_VUZP:
02acedf9 6318 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 6319 return 1;
9ee6e8bb
PB
6320 }
6321 break;
600b828c 6322 case NEON_2RM_VZIP:
d68a6f3a 6323 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 6324 return 1;
9ee6e8bb
PB
6325 }
6326 break;
600b828c
PM
6327 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6328 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
6329 if (rm & 1) {
6330 return 1;
6331 }
39d5492a 6332 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 6333 for (pass = 0; pass < 2; pass++) {
ad69471c 6334 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 6335 tmp = tcg_temp_new_i32();
600b828c
PM
6336 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6337 tmp, cpu_V0);
ad69471c
PB
6338 if (pass == 0) {
6339 tmp2 = tmp;
6340 } else {
6341 neon_store_reg(rd, 0, tmp2);
6342 neon_store_reg(rd, 1, tmp);
9ee6e8bb 6343 }
9ee6e8bb
PB
6344 }
6345 break;
600b828c 6346 case NEON_2RM_VSHLL:
fc2a9b37 6347 if (q || (rd & 1)) {
9ee6e8bb 6348 return 1;
600b828c 6349 }
ad69471c
PB
6350 tmp = neon_load_reg(rm, 0);
6351 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6352 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6353 if (pass == 1)
6354 tmp = tmp2;
6355 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 6356 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 6357 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6358 }
6359 break;
600b828c 6360 case NEON_2RM_VCVT_F16_F32:
fc2a9b37
PM
6361 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
6362 q || (rm & 1)) {
6363 return 1;
6364 }
7d1b0095
PM
6365 tmp = tcg_temp_new_i32();
6366 tmp2 = tcg_temp_new_i32();
60011498 6367 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 6368 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 6369 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 6370 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6371 tcg_gen_shli_i32(tmp2, tmp2, 16);
6372 tcg_gen_or_i32(tmp2, tmp2, tmp);
6373 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 6374 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
6375 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
6376 neon_store_reg(rd, 0, tmp2);
7d1b0095 6377 tmp2 = tcg_temp_new_i32();
2d981da7 6378 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6379 tcg_gen_shli_i32(tmp2, tmp2, 16);
6380 tcg_gen_or_i32(tmp2, tmp2, tmp);
6381 neon_store_reg(rd, 1, tmp2);
7d1b0095 6382 tcg_temp_free_i32(tmp);
60011498 6383 break;
600b828c 6384 case NEON_2RM_VCVT_F32_F16:
fc2a9b37
PM
6385 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
6386 q || (rd & 1)) {
6387 return 1;
6388 }
7d1b0095 6389 tmp3 = tcg_temp_new_i32();
60011498
PB
6390 tmp = neon_load_reg(rm, 0);
6391 tmp2 = neon_load_reg(rm, 1);
6392 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 6393 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6394 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
6395 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 6396 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6397 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 6398 tcg_temp_free_i32(tmp);
60011498 6399 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 6400 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6401 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
6402 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 6403 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6404 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
6405 tcg_temp_free_i32(tmp2);
6406 tcg_temp_free_i32(tmp3);
60011498 6407 break;
9d935509
AB
6408 case NEON_2RM_AESE: case NEON_2RM_AESMC:
6409 if (!arm_feature(env, ARM_FEATURE_V8_AES)
6410 || ((rm | rd) & 1)) {
6411 return 1;
6412 }
6413 tmp = tcg_const_i32(rd);
6414 tmp2 = tcg_const_i32(rm);
6415
6416 /* Bit 6 is the lowest opcode bit; it distinguishes between
6417 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6418 */
6419 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
6420
6421 if (op == NEON_2RM_AESE) {
6422 gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
6423 } else {
6424 gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
6425 }
6426 tcg_temp_free_i32(tmp);
6427 tcg_temp_free_i32(tmp2);
6428 tcg_temp_free_i32(tmp3);
6429 break;
9ee6e8bb
PB
6430 default:
6431 elementwise:
6432 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 6433 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6434 tcg_gen_ld_f32(cpu_F0s, cpu_env,
6435 neon_reg_offset(rm, pass));
39d5492a 6436 TCGV_UNUSED_I32(tmp);
9ee6e8bb 6437 } else {
dd8fbd78 6438 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
6439 }
6440 switch (op) {
600b828c 6441 case NEON_2RM_VREV32:
9ee6e8bb 6442 switch (size) {
dd8fbd78
FN
6443 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6444 case 1: gen_swap_half(tmp); break;
600b828c 6445 default: abort();
9ee6e8bb
PB
6446 }
6447 break;
600b828c 6448 case NEON_2RM_VREV16:
dd8fbd78 6449 gen_rev16(tmp);
9ee6e8bb 6450 break;
600b828c 6451 case NEON_2RM_VCLS:
9ee6e8bb 6452 switch (size) {
dd8fbd78
FN
6453 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6454 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6455 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 6456 default: abort();
9ee6e8bb
PB
6457 }
6458 break;
600b828c 6459 case NEON_2RM_VCLZ:
9ee6e8bb 6460 switch (size) {
dd8fbd78
FN
6461 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6462 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6463 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 6464 default: abort();
9ee6e8bb
PB
6465 }
6466 break;
600b828c 6467 case NEON_2RM_VCNT:
dd8fbd78 6468 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 6469 break;
600b828c 6470 case NEON_2RM_VMVN:
dd8fbd78 6471 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 6472 break;
600b828c 6473 case NEON_2RM_VQABS:
9ee6e8bb 6474 switch (size) {
02da0b2d
PM
6475 case 0:
6476 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6477 break;
6478 case 1:
6479 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6480 break;
6481 case 2:
6482 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6483 break;
600b828c 6484 default: abort();
9ee6e8bb
PB
6485 }
6486 break;
600b828c 6487 case NEON_2RM_VQNEG:
9ee6e8bb 6488 switch (size) {
02da0b2d
PM
6489 case 0:
6490 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6491 break;
6492 case 1:
6493 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6494 break;
6495 case 2:
6496 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6497 break;
600b828c 6498 default: abort();
9ee6e8bb
PB
6499 }
6500 break;
600b828c 6501 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 6502 tmp2 = tcg_const_i32(0);
9ee6e8bb 6503 switch(size) {
dd8fbd78
FN
6504 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6505 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6506 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 6507 default: abort();
9ee6e8bb 6508 }
39d5492a 6509 tcg_temp_free_i32(tmp2);
600b828c 6510 if (op == NEON_2RM_VCLE0) {
dd8fbd78 6511 tcg_gen_not_i32(tmp, tmp);
600b828c 6512 }
9ee6e8bb 6513 break;
600b828c 6514 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 6515 tmp2 = tcg_const_i32(0);
9ee6e8bb 6516 switch(size) {
dd8fbd78
FN
6517 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6518 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6519 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 6520 default: abort();
9ee6e8bb 6521 }
39d5492a 6522 tcg_temp_free_i32(tmp2);
600b828c 6523 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6524 tcg_gen_not_i32(tmp, tmp);
600b828c 6525 }
9ee6e8bb 6526 break;
600b828c 6527 case NEON_2RM_VCEQ0:
dd8fbd78 6528 tmp2 = tcg_const_i32(0);
9ee6e8bb 6529 switch(size) {
dd8fbd78
FN
6530 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6531 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6532 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6533 default: abort();
9ee6e8bb 6534 }
39d5492a 6535 tcg_temp_free_i32(tmp2);
9ee6e8bb 6536 break;
600b828c 6537 case NEON_2RM_VABS:
9ee6e8bb 6538 switch(size) {
dd8fbd78
FN
6539 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6540 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6541 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 6542 default: abort();
9ee6e8bb
PB
6543 }
6544 break;
600b828c 6545 case NEON_2RM_VNEG:
dd8fbd78
FN
6546 tmp2 = tcg_const_i32(0);
6547 gen_neon_rsb(size, tmp, tmp2);
39d5492a 6548 tcg_temp_free_i32(tmp2);
9ee6e8bb 6549 break;
600b828c 6550 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
6551 {
6552 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6553 tmp2 = tcg_const_i32(0);
aa47cfdd 6554 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6555 tcg_temp_free_i32(tmp2);
aa47cfdd 6556 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6557 break;
aa47cfdd 6558 }
600b828c 6559 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
6560 {
6561 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6562 tmp2 = tcg_const_i32(0);
aa47cfdd 6563 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6564 tcg_temp_free_i32(tmp2);
aa47cfdd 6565 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6566 break;
aa47cfdd 6567 }
600b828c 6568 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
6569 {
6570 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6571 tmp2 = tcg_const_i32(0);
aa47cfdd 6572 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6573 tcg_temp_free_i32(tmp2);
aa47cfdd 6574 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6575 break;
aa47cfdd 6576 }
600b828c 6577 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
6578 {
6579 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6580 tmp2 = tcg_const_i32(0);
aa47cfdd 6581 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6582 tcg_temp_free_i32(tmp2);
aa47cfdd 6583 tcg_temp_free_ptr(fpstatus);
0e326109 6584 break;
aa47cfdd 6585 }
600b828c 6586 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
6587 {
6588 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6589 tmp2 = tcg_const_i32(0);
aa47cfdd 6590 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6591 tcg_temp_free_i32(tmp2);
aa47cfdd 6592 tcg_temp_free_ptr(fpstatus);
0e326109 6593 break;
aa47cfdd 6594 }
600b828c 6595 case NEON_2RM_VABS_F:
4373f3ce 6596 gen_vfp_abs(0);
9ee6e8bb 6597 break;
600b828c 6598 case NEON_2RM_VNEG_F:
4373f3ce 6599 gen_vfp_neg(0);
9ee6e8bb 6600 break;
600b828c 6601 case NEON_2RM_VSWP:
dd8fbd78
FN
6602 tmp2 = neon_load_reg(rd, pass);
6603 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6604 break;
600b828c 6605 case NEON_2RM_VTRN:
dd8fbd78 6606 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6607 switch (size) {
dd8fbd78
FN
6608 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6609 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6610 default: abort();
9ee6e8bb 6611 }
dd8fbd78 6612 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6613 break;
34f7b0a2
WN
6614 case NEON_2RM_VRINTN:
6615 case NEON_2RM_VRINTA:
6616 case NEON_2RM_VRINTM:
6617 case NEON_2RM_VRINTP:
6618 case NEON_2RM_VRINTZ:
6619 {
6620 TCGv_i32 tcg_rmode;
6621 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6622 int rmode;
6623
6624 if (op == NEON_2RM_VRINTZ) {
6625 rmode = FPROUNDING_ZERO;
6626 } else {
6627 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
6628 }
6629
6630 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6631 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6632 cpu_env);
6633 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
6634 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6635 cpu_env);
6636 tcg_temp_free_ptr(fpstatus);
6637 tcg_temp_free_i32(tcg_rmode);
6638 break;
6639 }
2ce70625
WN
6640 case NEON_2RM_VRINTX:
6641 {
6642 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6643 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
6644 tcg_temp_free_ptr(fpstatus);
6645 break;
6646 }
901ad525
WN
6647 case NEON_2RM_VCVTAU:
6648 case NEON_2RM_VCVTAS:
6649 case NEON_2RM_VCVTNU:
6650 case NEON_2RM_VCVTNS:
6651 case NEON_2RM_VCVTPU:
6652 case NEON_2RM_VCVTPS:
6653 case NEON_2RM_VCVTMU:
6654 case NEON_2RM_VCVTMS:
6655 {
6656 bool is_signed = !extract32(insn, 7, 1);
6657 TCGv_ptr fpst = get_fpstatus_ptr(1);
6658 TCGv_i32 tcg_rmode, tcg_shift;
6659 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
6660
6661 tcg_shift = tcg_const_i32(0);
6662 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6663 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6664 cpu_env);
6665
6666 if (is_signed) {
6667 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
6668 tcg_shift, fpst);
6669 } else {
6670 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
6671 tcg_shift, fpst);
6672 }
6673
6674 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6675 cpu_env);
6676 tcg_temp_free_i32(tcg_rmode);
6677 tcg_temp_free_i32(tcg_shift);
6678 tcg_temp_free_ptr(fpst);
6679 break;
6680 }
600b828c 6681 case NEON_2RM_VRECPE:
dd8fbd78 6682 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb 6683 break;
600b828c 6684 case NEON_2RM_VRSQRTE:
dd8fbd78 6685 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb 6686 break;
600b828c 6687 case NEON_2RM_VRECPE_F:
4373f3ce 6688 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6689 break;
600b828c 6690 case NEON_2RM_VRSQRTE_F:
4373f3ce 6691 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6692 break;
600b828c 6693 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 6694 gen_vfp_sito(0, 1);
9ee6e8bb 6695 break;
600b828c 6696 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 6697 gen_vfp_uito(0, 1);
9ee6e8bb 6698 break;
600b828c 6699 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 6700 gen_vfp_tosiz(0, 1);
9ee6e8bb 6701 break;
600b828c 6702 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 6703 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
6704 break;
6705 default:
600b828c
PM
6706 /* Reserved op values were caught by the
6707 * neon_2rm_sizes[] check earlier.
6708 */
6709 abort();
9ee6e8bb 6710 }
600b828c 6711 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6712 tcg_gen_st_f32(cpu_F0s, cpu_env,
6713 neon_reg_offset(rd, pass));
9ee6e8bb 6714 } else {
dd8fbd78 6715 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6716 }
6717 }
6718 break;
6719 }
6720 } else if ((insn & (1 << 10)) == 0) {
6721 /* VTBL, VTBX. */
56907d77
PM
6722 int n = ((insn >> 8) & 3) + 1;
6723 if ((rn + n) > 32) {
6724 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6725 * helper function running off the end of the register file.
6726 */
6727 return 1;
6728 }
6729 n <<= 3;
9ee6e8bb 6730 if (insn & (1 << 6)) {
8f8e3aa4 6731 tmp = neon_load_reg(rd, 0);
9ee6e8bb 6732 } else {
7d1b0095 6733 tmp = tcg_temp_new_i32();
8f8e3aa4 6734 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6735 }
8f8e3aa4 6736 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
6737 tmp4 = tcg_const_i32(rn);
6738 tmp5 = tcg_const_i32(n);
9ef39277 6739 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7d1b0095 6740 tcg_temp_free_i32(tmp);
9ee6e8bb 6741 if (insn & (1 << 6)) {
8f8e3aa4 6742 tmp = neon_load_reg(rd, 1);
9ee6e8bb 6743 } else {
7d1b0095 6744 tmp = tcg_temp_new_i32();
8f8e3aa4 6745 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6746 }
8f8e3aa4 6747 tmp3 = neon_load_reg(rm, 1);
9ef39277 6748 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
6749 tcg_temp_free_i32(tmp5);
6750 tcg_temp_free_i32(tmp4);
8f8e3aa4 6751 neon_store_reg(rd, 0, tmp2);
3018f259 6752 neon_store_reg(rd, 1, tmp3);
7d1b0095 6753 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6754 } else if ((insn & 0x380) == 0) {
6755 /* VDUP */
133da6aa
JR
6756 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6757 return 1;
6758 }
9ee6e8bb 6759 if (insn & (1 << 19)) {
dd8fbd78 6760 tmp = neon_load_reg(rm, 1);
9ee6e8bb 6761 } else {
dd8fbd78 6762 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
6763 }
6764 if (insn & (1 << 16)) {
dd8fbd78 6765 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
6766 } else if (insn & (1 << 17)) {
6767 if ((insn >> 18) & 1)
dd8fbd78 6768 gen_neon_dup_high16(tmp);
9ee6e8bb 6769 else
dd8fbd78 6770 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
6771 }
6772 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 6773 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
6774 tcg_gen_mov_i32(tmp2, tmp);
6775 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 6776 }
7d1b0095 6777 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6778 } else {
6779 return 1;
6780 }
6781 }
6782 }
6783 return 0;
6784}
6785
0ecb72a5 6786static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb 6787{
4b6a83fb
PM
6788 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
6789 const ARMCPRegInfo *ri;
9ee6e8bb
PB
6790
6791 cpnum = (insn >> 8) & 0xf;
6792 if (arm_feature(env, ARM_FEATURE_XSCALE)
6793 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6794 return 1;
6795
4b6a83fb 6796 /* First check for coprocessor space used for actual instructions */
9ee6e8bb
PB
6797 switch (cpnum) {
6798 case 0:
6799 case 1:
6800 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6801 return disas_iwmmxt_insn(env, s, insn);
6802 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6803 return disas_dsp_insn(env, s, insn);
6804 }
6805 return 1;
4b6a83fb
PM
6806 default:
6807 break;
6808 }
6809
6810 /* Otherwise treat as a generic register access */
6811 is64 = (insn & (1 << 25)) == 0;
6812 if (!is64 && ((insn & (1 << 4)) == 0)) {
6813 /* cdp */
6814 return 1;
6815 }
6816
6817 crm = insn & 0xf;
6818 if (is64) {
6819 crn = 0;
6820 opc1 = (insn >> 4) & 0xf;
6821 opc2 = 0;
6822 rt2 = (insn >> 16) & 0xf;
6823 } else {
6824 crn = (insn >> 16) & 0xf;
6825 opc1 = (insn >> 21) & 7;
6826 opc2 = (insn >> 5) & 7;
6827 rt2 = 0;
6828 }
6829 isread = (insn >> 20) & 1;
6830 rt = (insn >> 12) & 0xf;
6831
60322b39 6832 ri = get_arm_cp_reginfo(s->cp_regs,
4b6a83fb
PM
6833 ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
6834 if (ri) {
6835 /* Check access permissions */
60322b39 6836 if (!cp_access_ok(s->current_pl, ri, isread)) {
4b6a83fb
PM
6837 return 1;
6838 }
6839
6840 /* Handle special cases first */
6841 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
6842 case ARM_CP_NOP:
6843 return 0;
6844 case ARM_CP_WFI:
6845 if (isread) {
6846 return 1;
6847 }
eaed129d 6848 gen_set_pc_im(s, s->pc);
4b6a83fb 6849 s->is_jmp = DISAS_WFI;
2bee5105 6850 return 0;
4b6a83fb
PM
6851 default:
6852 break;
6853 }
6854
2452731c
PM
6855 if (use_icount && (ri->type & ARM_CP_IO)) {
6856 gen_io_start();
6857 }
6858
4b6a83fb
PM
6859 if (isread) {
6860 /* Read */
6861 if (is64) {
6862 TCGv_i64 tmp64;
6863 TCGv_i32 tmp;
6864 if (ri->type & ARM_CP_CONST) {
6865 tmp64 = tcg_const_i64(ri->resetvalue);
6866 } else if (ri->readfn) {
6867 TCGv_ptr tmpptr;
eaed129d 6868 gen_set_pc_im(s, s->pc);
4b6a83fb
PM
6869 tmp64 = tcg_temp_new_i64();
6870 tmpptr = tcg_const_ptr(ri);
6871 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
6872 tcg_temp_free_ptr(tmpptr);
6873 } else {
6874 tmp64 = tcg_temp_new_i64();
6875 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
6876 }
6877 tmp = tcg_temp_new_i32();
6878 tcg_gen_trunc_i64_i32(tmp, tmp64);
6879 store_reg(s, rt, tmp);
6880 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 6881 tmp = tcg_temp_new_i32();
4b6a83fb 6882 tcg_gen_trunc_i64_i32(tmp, tmp64);
ed336850 6883 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
6884 store_reg(s, rt2, tmp);
6885 } else {
39d5492a 6886 TCGv_i32 tmp;
4b6a83fb
PM
6887 if (ri->type & ARM_CP_CONST) {
6888 tmp = tcg_const_i32(ri->resetvalue);
6889 } else if (ri->readfn) {
6890 TCGv_ptr tmpptr;
eaed129d 6891 gen_set_pc_im(s, s->pc);
4b6a83fb
PM
6892 tmp = tcg_temp_new_i32();
6893 tmpptr = tcg_const_ptr(ri);
6894 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
6895 tcg_temp_free_ptr(tmpptr);
6896 } else {
6897 tmp = load_cpu_offset(ri->fieldoffset);
6898 }
6899 if (rt == 15) {
6900 /* Destination register of r15 for 32 bit loads sets
6901 * the condition codes from the high 4 bits of the value
6902 */
6903 gen_set_nzcv(tmp);
6904 tcg_temp_free_i32(tmp);
6905 } else {
6906 store_reg(s, rt, tmp);
6907 }
6908 }
6909 } else {
6910 /* Write */
6911 if (ri->type & ARM_CP_CONST) {
6912 /* If not forbidden by access permissions, treat as WI */
6913 return 0;
6914 }
6915
6916 if (is64) {
39d5492a 6917 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
6918 TCGv_i64 tmp64 = tcg_temp_new_i64();
6919 tmplo = load_reg(s, rt);
6920 tmphi = load_reg(s, rt2);
6921 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
6922 tcg_temp_free_i32(tmplo);
6923 tcg_temp_free_i32(tmphi);
6924 if (ri->writefn) {
6925 TCGv_ptr tmpptr = tcg_const_ptr(ri);
eaed129d 6926 gen_set_pc_im(s, s->pc);
4b6a83fb
PM
6927 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
6928 tcg_temp_free_ptr(tmpptr);
6929 } else {
6930 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
6931 }
6932 tcg_temp_free_i64(tmp64);
6933 } else {
6934 if (ri->writefn) {
39d5492a 6935 TCGv_i32 tmp;
4b6a83fb 6936 TCGv_ptr tmpptr;
eaed129d 6937 gen_set_pc_im(s, s->pc);
4b6a83fb
PM
6938 tmp = load_reg(s, rt);
6939 tmpptr = tcg_const_ptr(ri);
6940 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
6941 tcg_temp_free_ptr(tmpptr);
6942 tcg_temp_free_i32(tmp);
6943 } else {
39d5492a 6944 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
6945 store_cpu_offset(tmp, ri->fieldoffset);
6946 }
6947 }
2452731c
PM
6948 }
6949
6950 if (use_icount && (ri->type & ARM_CP_IO)) {
6951 /* I/O operations must end the TB here (whether read or write) */
6952 gen_io_end();
6953 gen_lookup_tb(s);
6954 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
6955 /* We default to ending the TB on a coprocessor register write,
6956 * but allow this to be suppressed by the register definition
6957 * (usually only necessary to work around guest bugs).
6958 */
2452731c 6959 gen_lookup_tb(s);
4b6a83fb 6960 }
2452731c 6961
4b6a83fb
PM
6962 return 0;
6963 }
6964
626187d8
PM
6965 /* Unknown register; this might be a guest error or a QEMU
6966 * unimplemented feature.
6967 */
6968 if (is64) {
6969 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
6970 "64 bit system register cp:%d opc1: %d crm:%d\n",
6971 isread ? "read" : "write", cpnum, opc1, crm);
6972 } else {
6973 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
6974 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d\n",
6975 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2);
6976 }
6977
4a9a539f 6978 return 1;
9ee6e8bb
PB
6979}
6980
5e3f878a
PB
6981
6982/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 6983static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 6984{
39d5492a 6985 TCGv_i32 tmp;
7d1b0095 6986 tmp = tcg_temp_new_i32();
5e3f878a
PB
6987 tcg_gen_trunc_i64_i32(tmp, val);
6988 store_reg(s, rlow, tmp);
7d1b0095 6989 tmp = tcg_temp_new_i32();
5e3f878a
PB
6990 tcg_gen_shri_i64(val, val, 32);
6991 tcg_gen_trunc_i64_i32(tmp, val);
6992 store_reg(s, rhigh, tmp);
6993}
6994
6995/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 6996static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 6997{
a7812ae4 6998 TCGv_i64 tmp;
39d5492a 6999 TCGv_i32 tmp2;
5e3f878a 7000
36aa55dc 7001 /* Load value and extend to 64 bits. */
a7812ae4 7002 tmp = tcg_temp_new_i64();
5e3f878a
PB
7003 tmp2 = load_reg(s, rlow);
7004 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 7005 tcg_temp_free_i32(tmp2);
5e3f878a 7006 tcg_gen_add_i64(val, val, tmp);
b75263d6 7007 tcg_temp_free_i64(tmp);
5e3f878a
PB
7008}
7009
7010/* load and add a 64-bit value from a register pair. */
a7812ae4 7011static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 7012{
a7812ae4 7013 TCGv_i64 tmp;
39d5492a
PM
7014 TCGv_i32 tmpl;
7015 TCGv_i32 tmph;
5e3f878a
PB
7016
7017 /* Load 64-bit value rd:rn. */
36aa55dc
PB
7018 tmpl = load_reg(s, rlow);
7019 tmph = load_reg(s, rhigh);
a7812ae4 7020 tmp = tcg_temp_new_i64();
36aa55dc 7021 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
7022 tcg_temp_free_i32(tmpl);
7023 tcg_temp_free_i32(tmph);
5e3f878a 7024 tcg_gen_add_i64(val, val, tmp);
b75263d6 7025 tcg_temp_free_i64(tmp);
5e3f878a
PB
7026}
7027
c9f10124 7028/* Set N and Z flags from hi|lo. */
39d5492a 7029static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 7030{
c9f10124
RH
7031 tcg_gen_mov_i32(cpu_NF, hi);
7032 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
7033}
7034
426f5abc
PB
7035/* Load/Store exclusive instructions are implemented by remembering
7036 the value/address loaded, and seeing if these are the same
b90372ad 7037 when the store is performed. This should be sufficient to implement
426f5abc
PB
7038 the architecturally mandated semantics, and avoids having to monitor
7039 regular stores.
7040
7041 In system emulation mode only one CPU will be running at once, so
7042 this sequence is effectively atomic. In user emulation mode we
7043 throw an exception and handle the atomic operation elsewhere. */
7044static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 7045 TCGv_i32 addr, int size)
426f5abc 7046{
94ee24e7 7047 TCGv_i32 tmp = tcg_temp_new_i32();
426f5abc
PB
7048
7049 switch (size) {
7050 case 0:
08307563 7051 gen_aa32_ld8u(tmp, addr, IS_USER(s));
426f5abc
PB
7052 break;
7053 case 1:
08307563 7054 gen_aa32_ld16u(tmp, addr, IS_USER(s));
426f5abc
PB
7055 break;
7056 case 2:
7057 case 3:
08307563 7058 gen_aa32_ld32u(tmp, addr, IS_USER(s));
426f5abc
PB
7059 break;
7060 default:
7061 abort();
7062 }
03d05e2d 7063
426f5abc 7064 if (size == 3) {
39d5492a 7065 TCGv_i32 tmp2 = tcg_temp_new_i32();
03d05e2d
PM
7066 TCGv_i32 tmp3 = tcg_temp_new_i32();
7067
2c9adbda 7068 tcg_gen_addi_i32(tmp2, addr, 4);
03d05e2d 7069 gen_aa32_ld32u(tmp3, tmp2, IS_USER(s));
7d1b0095 7070 tcg_temp_free_i32(tmp2);
03d05e2d
PM
7071 tcg_gen_concat_i32_i64(cpu_exclusive_val, tmp, tmp3);
7072 store_reg(s, rt2, tmp3);
7073 } else {
7074 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 7075 }
03d05e2d
PM
7076
7077 store_reg(s, rt, tmp);
7078 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
7079}
7080
7081static void gen_clrex(DisasContext *s)
7082{
03d05e2d 7083 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7084}
7085
7086#ifdef CONFIG_USER_ONLY
7087static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7088 TCGv_i32 addr, int size)
426f5abc 7089{
03d05e2d 7090 tcg_gen_extu_i32_i64(cpu_exclusive_test, addr);
426f5abc
PB
7091 tcg_gen_movi_i32(cpu_exclusive_info,
7092 size | (rd << 4) | (rt << 8) | (rt2 << 12));
bc4a0de0 7093 gen_exception_insn(s, 4, EXCP_STREX);
426f5abc
PB
7094}
7095#else
7096static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7097 TCGv_i32 addr, int size)
426f5abc 7098{
39d5492a 7099 TCGv_i32 tmp;
03d05e2d 7100 TCGv_i64 val64, extaddr;
426f5abc
PB
7101 int done_label;
7102 int fail_label;
7103
7104 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7105 [addr] = {Rt};
7106 {Rd} = 0;
7107 } else {
7108 {Rd} = 1;
7109 } */
7110 fail_label = gen_new_label();
7111 done_label = gen_new_label();
03d05e2d
PM
7112 extaddr = tcg_temp_new_i64();
7113 tcg_gen_extu_i32_i64(extaddr, addr);
7114 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7115 tcg_temp_free_i64(extaddr);
7116
94ee24e7 7117 tmp = tcg_temp_new_i32();
426f5abc
PB
7118 switch (size) {
7119 case 0:
08307563 7120 gen_aa32_ld8u(tmp, addr, IS_USER(s));
426f5abc
PB
7121 break;
7122 case 1:
08307563 7123 gen_aa32_ld16u(tmp, addr, IS_USER(s));
426f5abc
PB
7124 break;
7125 case 2:
7126 case 3:
08307563 7127 gen_aa32_ld32u(tmp, addr, IS_USER(s));
426f5abc
PB
7128 break;
7129 default:
7130 abort();
7131 }
03d05e2d
PM
7132
7133 val64 = tcg_temp_new_i64();
426f5abc 7134 if (size == 3) {
39d5492a 7135 TCGv_i32 tmp2 = tcg_temp_new_i32();
03d05e2d 7136 TCGv_i32 tmp3 = tcg_temp_new_i32();
426f5abc 7137 tcg_gen_addi_i32(tmp2, addr, 4);
03d05e2d 7138 gen_aa32_ld32u(tmp3, tmp2, IS_USER(s));
7d1b0095 7139 tcg_temp_free_i32(tmp2);
03d05e2d
PM
7140 tcg_gen_concat_i32_i64(val64, tmp, tmp3);
7141 tcg_temp_free_i32(tmp3);
7142 } else {
7143 tcg_gen_extu_i32_i64(val64, tmp);
426f5abc 7144 }
03d05e2d
PM
7145 tcg_temp_free_i32(tmp);
7146
7147 tcg_gen_brcond_i64(TCG_COND_NE, val64, cpu_exclusive_val, fail_label);
7148 tcg_temp_free_i64(val64);
7149
426f5abc
PB
7150 tmp = load_reg(s, rt);
7151 switch (size) {
7152 case 0:
08307563 7153 gen_aa32_st8(tmp, addr, IS_USER(s));
426f5abc
PB
7154 break;
7155 case 1:
08307563 7156 gen_aa32_st16(tmp, addr, IS_USER(s));
426f5abc
PB
7157 break;
7158 case 2:
7159 case 3:
08307563 7160 gen_aa32_st32(tmp, addr, IS_USER(s));
426f5abc
PB
7161 break;
7162 default:
7163 abort();
7164 }
94ee24e7 7165 tcg_temp_free_i32(tmp);
426f5abc
PB
7166 if (size == 3) {
7167 tcg_gen_addi_i32(addr, addr, 4);
7168 tmp = load_reg(s, rt2);
08307563 7169 gen_aa32_st32(tmp, addr, IS_USER(s));
94ee24e7 7170 tcg_temp_free_i32(tmp);
426f5abc
PB
7171 }
7172 tcg_gen_movi_i32(cpu_R[rd], 0);
7173 tcg_gen_br(done_label);
7174 gen_set_label(fail_label);
7175 tcg_gen_movi_i32(cpu_R[rd], 1);
7176 gen_set_label(done_label);
03d05e2d 7177 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7178}
7179#endif
7180
81465888
PM
7181/* gen_srs:
7182 * @env: CPUARMState
7183 * @s: DisasContext
7184 * @mode: mode field from insn (which stack to store to)
7185 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7186 * @writeback: true if writeback bit set
7187 *
7188 * Generate code for the SRS (Store Return State) insn.
7189 */
7190static void gen_srs(DisasContext *s,
7191 uint32_t mode, uint32_t amode, bool writeback)
7192{
7193 int32_t offset;
7194 TCGv_i32 addr = tcg_temp_new_i32();
7195 TCGv_i32 tmp = tcg_const_i32(mode);
7196 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7197 tcg_temp_free_i32(tmp);
7198 switch (amode) {
7199 case 0: /* DA */
7200 offset = -4;
7201 break;
7202 case 1: /* IA */
7203 offset = 0;
7204 break;
7205 case 2: /* DB */
7206 offset = -8;
7207 break;
7208 case 3: /* IB */
7209 offset = 4;
7210 break;
7211 default:
7212 abort();
7213 }
7214 tcg_gen_addi_i32(addr, addr, offset);
7215 tmp = load_reg(s, 14);
08307563 7216 gen_aa32_st32(tmp, addr, 0);
5a839c0d 7217 tcg_temp_free_i32(tmp);
81465888
PM
7218 tmp = load_cpu_field(spsr);
7219 tcg_gen_addi_i32(addr, addr, 4);
08307563 7220 gen_aa32_st32(tmp, addr, 0);
5a839c0d 7221 tcg_temp_free_i32(tmp);
81465888
PM
7222 if (writeback) {
7223 switch (amode) {
7224 case 0:
7225 offset = -8;
7226 break;
7227 case 1:
7228 offset = 4;
7229 break;
7230 case 2:
7231 offset = -4;
7232 break;
7233 case 3:
7234 offset = 0;
7235 break;
7236 default:
7237 abort();
7238 }
7239 tcg_gen_addi_i32(addr, addr, offset);
7240 tmp = tcg_const_i32(mode);
7241 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7242 tcg_temp_free_i32(tmp);
7243 }
7244 tcg_temp_free_i32(addr);
7245}
7246
0ecb72a5 7247static void disas_arm_insn(CPUARMState * env, DisasContext *s)
9ee6e8bb
PB
7248{
7249 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
7250 TCGv_i32 tmp;
7251 TCGv_i32 tmp2;
7252 TCGv_i32 tmp3;
7253 TCGv_i32 addr;
a7812ae4 7254 TCGv_i64 tmp64;
9ee6e8bb 7255
d31dd73e 7256 insn = arm_ldl_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
7257 s->pc += 4;
7258
7259 /* M variants do not implement ARM mode. */
7260 if (IS_M(env))
7261 goto illegal_op;
7262 cond = insn >> 28;
7263 if (cond == 0xf){
be5e7a76
DES
7264 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
7265 * choose to UNDEF. In ARMv5 and above the space is used
7266 * for miscellaneous unconditional instructions.
7267 */
7268 ARCH(5);
7269
9ee6e8bb
PB
7270 /* Unconditional instructions. */
7271 if (((insn >> 25) & 7) == 1) {
7272 /* NEON Data processing. */
7273 if (!arm_feature(env, ARM_FEATURE_NEON))
7274 goto illegal_op;
7275
7276 if (disas_neon_data_insn(env, s, insn))
7277 goto illegal_op;
7278 return;
7279 }
7280 if ((insn & 0x0f100000) == 0x04000000) {
7281 /* NEON load/store. */
7282 if (!arm_feature(env, ARM_FEATURE_NEON))
7283 goto illegal_op;
7284
7285 if (disas_neon_ls_insn(env, s, insn))
7286 goto illegal_op;
7287 return;
7288 }
6a57f3eb
WN
7289 if ((insn & 0x0f000e10) == 0x0e000a00) {
7290 /* VFP. */
7291 if (disas_vfp_insn(env, s, insn)) {
7292 goto illegal_op;
7293 }
7294 return;
7295 }
3d185e5d
PM
7296 if (((insn & 0x0f30f000) == 0x0510f000) ||
7297 ((insn & 0x0f30f010) == 0x0710f000)) {
7298 if ((insn & (1 << 22)) == 0) {
7299 /* PLDW; v7MP */
7300 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
7301 goto illegal_op;
7302 }
7303 }
7304 /* Otherwise PLD; v5TE+ */
be5e7a76 7305 ARCH(5TE);
3d185e5d
PM
7306 return;
7307 }
7308 if (((insn & 0x0f70f000) == 0x0450f000) ||
7309 ((insn & 0x0f70f010) == 0x0650f000)) {
7310 ARCH(7);
7311 return; /* PLI; V7 */
7312 }
7313 if (((insn & 0x0f700000) == 0x04100000) ||
7314 ((insn & 0x0f700010) == 0x06100000)) {
7315 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
7316 goto illegal_op;
7317 }
7318 return; /* v7MP: Unallocated memory hint: must NOP */
7319 }
7320
7321 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
7322 ARCH(6);
7323 /* setend */
10962fd5
PM
7324 if (((insn >> 9) & 1) != s->bswap_code) {
7325 /* Dynamic endianness switching not implemented. */
e0c270d9 7326 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
9ee6e8bb
PB
7327 goto illegal_op;
7328 }
7329 return;
7330 } else if ((insn & 0x0fffff00) == 0x057ff000) {
7331 switch ((insn >> 4) & 0xf) {
7332 case 1: /* clrex */
7333 ARCH(6K);
426f5abc 7334 gen_clrex(s);
9ee6e8bb
PB
7335 return;
7336 case 4: /* dsb */
7337 case 5: /* dmb */
7338 case 6: /* isb */
7339 ARCH(7);
7340 /* We don't emulate caches so these are a no-op. */
7341 return;
7342 default:
7343 goto illegal_op;
7344 }
7345 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
7346 /* srs */
81465888 7347 if (IS_USER(s)) {
9ee6e8bb 7348 goto illegal_op;
9ee6e8bb 7349 }
81465888
PM
7350 ARCH(6);
7351 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 7352 return;
ea825eee 7353 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 7354 /* rfe */
c67b6b71 7355 int32_t offset;
9ee6e8bb
PB
7356 if (IS_USER(s))
7357 goto illegal_op;
7358 ARCH(6);
7359 rn = (insn >> 16) & 0xf;
b0109805 7360 addr = load_reg(s, rn);
9ee6e8bb
PB
7361 i = (insn >> 23) & 3;
7362 switch (i) {
b0109805 7363 case 0: offset = -4; break; /* DA */
c67b6b71
FN
7364 case 1: offset = 0; break; /* IA */
7365 case 2: offset = -8; break; /* DB */
b0109805 7366 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
7367 default: abort();
7368 }
7369 if (offset)
b0109805
PB
7370 tcg_gen_addi_i32(addr, addr, offset);
7371 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 7372 tmp = tcg_temp_new_i32();
08307563 7373 gen_aa32_ld32u(tmp, addr, 0);
b0109805 7374 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 7375 tmp2 = tcg_temp_new_i32();
08307563 7376 gen_aa32_ld32u(tmp2, addr, 0);
9ee6e8bb
PB
7377 if (insn & (1 << 21)) {
7378 /* Base writeback. */
7379 switch (i) {
b0109805 7380 case 0: offset = -8; break;
c67b6b71
FN
7381 case 1: offset = 4; break;
7382 case 2: offset = -4; break;
b0109805 7383 case 3: offset = 0; break;
9ee6e8bb
PB
7384 default: abort();
7385 }
7386 if (offset)
b0109805
PB
7387 tcg_gen_addi_i32(addr, addr, offset);
7388 store_reg(s, rn, addr);
7389 } else {
7d1b0095 7390 tcg_temp_free_i32(addr);
9ee6e8bb 7391 }
b0109805 7392 gen_rfe(s, tmp, tmp2);
c67b6b71 7393 return;
9ee6e8bb
PB
7394 } else if ((insn & 0x0e000000) == 0x0a000000) {
7395 /* branch link and change to thumb (blx <offset>) */
7396 int32_t offset;
7397
7398 val = (uint32_t)s->pc;
7d1b0095 7399 tmp = tcg_temp_new_i32();
d9ba4830
PB
7400 tcg_gen_movi_i32(tmp, val);
7401 store_reg(s, 14, tmp);
9ee6e8bb
PB
7402 /* Sign-extend the 24-bit offset */
7403 offset = (((int32_t)insn) << 8) >> 8;
7404 /* offset * 4 + bit24 * 2 + (thumb bit) */
7405 val += (offset << 2) | ((insn >> 23) & 2) | 1;
7406 /* pipeline offset */
7407 val += 4;
be5e7a76 7408 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 7409 gen_bx_im(s, val);
9ee6e8bb
PB
7410 return;
7411 } else if ((insn & 0x0e000f00) == 0x0c000100) {
7412 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
7413 /* iWMMXt register transfer. */
7414 if (env->cp15.c15_cpar & (1 << 1))
7415 if (!disas_iwmmxt_insn(env, s, insn))
7416 return;
7417 }
7418 } else if ((insn & 0x0fe00000) == 0x0c400000) {
7419 /* Coprocessor double register transfer. */
be5e7a76 7420 ARCH(5TE);
9ee6e8bb
PB
7421 } else if ((insn & 0x0f000010) == 0x0e000010) {
7422 /* Additional coprocessor register transfer. */
7997d92f 7423 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
7424 uint32_t mask;
7425 uint32_t val;
7426 /* cps (privileged) */
7427 if (IS_USER(s))
7428 return;
7429 mask = val = 0;
7430 if (insn & (1 << 19)) {
7431 if (insn & (1 << 8))
7432 mask |= CPSR_A;
7433 if (insn & (1 << 7))
7434 mask |= CPSR_I;
7435 if (insn & (1 << 6))
7436 mask |= CPSR_F;
7437 if (insn & (1 << 18))
7438 val |= mask;
7439 }
7997d92f 7440 if (insn & (1 << 17)) {
9ee6e8bb
PB
7441 mask |= CPSR_M;
7442 val |= (insn & 0x1f);
7443 }
7444 if (mask) {
2fbac54b 7445 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
7446 }
7447 return;
7448 }
7449 goto illegal_op;
7450 }
7451 if (cond != 0xe) {
7452 /* if not always execute, we generate a conditional jump to
7453 next instruction */
7454 s->condlabel = gen_new_label();
39fb730a 7455 arm_gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
7456 s->condjmp = 1;
7457 }
7458 if ((insn & 0x0f900000) == 0x03000000) {
7459 if ((insn & (1 << 21)) == 0) {
7460 ARCH(6T2);
7461 rd = (insn >> 12) & 0xf;
7462 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
7463 if ((insn & (1 << 22)) == 0) {
7464 /* MOVW */
7d1b0095 7465 tmp = tcg_temp_new_i32();
5e3f878a 7466 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
7467 } else {
7468 /* MOVT */
5e3f878a 7469 tmp = load_reg(s, rd);
86831435 7470 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 7471 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 7472 }
5e3f878a 7473 store_reg(s, rd, tmp);
9ee6e8bb
PB
7474 } else {
7475 if (((insn >> 12) & 0xf) != 0xf)
7476 goto illegal_op;
7477 if (((insn >> 16) & 0xf) == 0) {
7478 gen_nop_hint(s, insn & 0xff);
7479 } else {
7480 /* CPSR = immediate */
7481 val = insn & 0xff;
7482 shift = ((insn >> 8) & 0xf) * 2;
7483 if (shift)
7484 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 7485 i = ((insn & (1 << 22)) != 0);
2fbac54b 7486 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
7487 goto illegal_op;
7488 }
7489 }
7490 } else if ((insn & 0x0f900000) == 0x01000000
7491 && (insn & 0x00000090) != 0x00000090) {
7492 /* miscellaneous instructions */
7493 op1 = (insn >> 21) & 3;
7494 sh = (insn >> 4) & 0xf;
7495 rm = insn & 0xf;
7496 switch (sh) {
7497 case 0x0: /* move program status register */
7498 if (op1 & 1) {
7499 /* PSR = reg */
2fbac54b 7500 tmp = load_reg(s, rm);
9ee6e8bb 7501 i = ((op1 & 2) != 0);
2fbac54b 7502 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
7503 goto illegal_op;
7504 } else {
7505 /* reg = PSR */
7506 rd = (insn >> 12) & 0xf;
7507 if (op1 & 2) {
7508 if (IS_USER(s))
7509 goto illegal_op;
d9ba4830 7510 tmp = load_cpu_field(spsr);
9ee6e8bb 7511 } else {
7d1b0095 7512 tmp = tcg_temp_new_i32();
9ef39277 7513 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 7514 }
d9ba4830 7515 store_reg(s, rd, tmp);
9ee6e8bb
PB
7516 }
7517 break;
7518 case 0x1:
7519 if (op1 == 1) {
7520 /* branch/exchange thumb (bx). */
be5e7a76 7521 ARCH(4T);
d9ba4830
PB
7522 tmp = load_reg(s, rm);
7523 gen_bx(s, tmp);
9ee6e8bb
PB
7524 } else if (op1 == 3) {
7525 /* clz */
be5e7a76 7526 ARCH(5);
9ee6e8bb 7527 rd = (insn >> 12) & 0xf;
1497c961
PB
7528 tmp = load_reg(s, rm);
7529 gen_helper_clz(tmp, tmp);
7530 store_reg(s, rd, tmp);
9ee6e8bb
PB
7531 } else {
7532 goto illegal_op;
7533 }
7534 break;
7535 case 0x2:
7536 if (op1 == 1) {
7537 ARCH(5J); /* bxj */
7538 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
7539 tmp = load_reg(s, rm);
7540 gen_bx(s, tmp);
9ee6e8bb
PB
7541 } else {
7542 goto illegal_op;
7543 }
7544 break;
7545 case 0x3:
7546 if (op1 != 1)
7547 goto illegal_op;
7548
be5e7a76 7549 ARCH(5);
9ee6e8bb 7550 /* branch link/exchange thumb (blx) */
d9ba4830 7551 tmp = load_reg(s, rm);
7d1b0095 7552 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
7553 tcg_gen_movi_i32(tmp2, s->pc);
7554 store_reg(s, 14, tmp2);
7555 gen_bx(s, tmp);
9ee6e8bb
PB
7556 break;
7557 case 0x5: /* saturating add/subtract */
be5e7a76 7558 ARCH(5TE);
9ee6e8bb
PB
7559 rd = (insn >> 12) & 0xf;
7560 rn = (insn >> 16) & 0xf;
b40d0353 7561 tmp = load_reg(s, rm);
5e3f878a 7562 tmp2 = load_reg(s, rn);
9ee6e8bb 7563 if (op1 & 2)
9ef39277 7564 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 7565 if (op1 & 1)
9ef39277 7566 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 7567 else
9ef39277 7568 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 7569 tcg_temp_free_i32(tmp2);
5e3f878a 7570 store_reg(s, rd, tmp);
9ee6e8bb 7571 break;
49e14940
AL
7572 case 7:
7573 /* SMC instruction (op1 == 3)
7574 and undefined instructions (op1 == 0 || op1 == 2)
7575 will trap */
7576 if (op1 != 1) {
7577 goto illegal_op;
7578 }
7579 /* bkpt */
be5e7a76 7580 ARCH(5);
bc4a0de0 7581 gen_exception_insn(s, 4, EXCP_BKPT);
9ee6e8bb
PB
7582 break;
7583 case 0x8: /* signed multiply */
7584 case 0xa:
7585 case 0xc:
7586 case 0xe:
be5e7a76 7587 ARCH(5TE);
9ee6e8bb
PB
7588 rs = (insn >> 8) & 0xf;
7589 rn = (insn >> 12) & 0xf;
7590 rd = (insn >> 16) & 0xf;
7591 if (op1 == 1) {
7592 /* (32 * 16) >> 16 */
5e3f878a
PB
7593 tmp = load_reg(s, rm);
7594 tmp2 = load_reg(s, rs);
9ee6e8bb 7595 if (sh & 4)
5e3f878a 7596 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7597 else
5e3f878a 7598 gen_sxth(tmp2);
a7812ae4
PB
7599 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7600 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 7601 tmp = tcg_temp_new_i32();
a7812ae4 7602 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 7603 tcg_temp_free_i64(tmp64);
9ee6e8bb 7604 if ((sh & 2) == 0) {
5e3f878a 7605 tmp2 = load_reg(s, rn);
9ef39277 7606 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7607 tcg_temp_free_i32(tmp2);
9ee6e8bb 7608 }
5e3f878a 7609 store_reg(s, rd, tmp);
9ee6e8bb
PB
7610 } else {
7611 /* 16 * 16 */
5e3f878a
PB
7612 tmp = load_reg(s, rm);
7613 tmp2 = load_reg(s, rs);
7614 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 7615 tcg_temp_free_i32(tmp2);
9ee6e8bb 7616 if (op1 == 2) {
a7812ae4
PB
7617 tmp64 = tcg_temp_new_i64();
7618 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7619 tcg_temp_free_i32(tmp);
a7812ae4
PB
7620 gen_addq(s, tmp64, rn, rd);
7621 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 7622 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7623 } else {
7624 if (op1 == 0) {
5e3f878a 7625 tmp2 = load_reg(s, rn);
9ef39277 7626 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7627 tcg_temp_free_i32(tmp2);
9ee6e8bb 7628 }
5e3f878a 7629 store_reg(s, rd, tmp);
9ee6e8bb
PB
7630 }
7631 }
7632 break;
7633 default:
7634 goto illegal_op;
7635 }
7636 } else if (((insn & 0x0e000000) == 0 &&
7637 (insn & 0x00000090) != 0x90) ||
7638 ((insn & 0x0e000000) == (1 << 25))) {
7639 int set_cc, logic_cc, shiftop;
7640
7641 op1 = (insn >> 21) & 0xf;
7642 set_cc = (insn >> 20) & 1;
7643 logic_cc = table_logic_cc[op1] & set_cc;
7644
7645 /* data processing instruction */
7646 if (insn & (1 << 25)) {
7647 /* immediate operand */
7648 val = insn & 0xff;
7649 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 7650 if (shift) {
9ee6e8bb 7651 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 7652 }
7d1b0095 7653 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
7654 tcg_gen_movi_i32(tmp2, val);
7655 if (logic_cc && shift) {
7656 gen_set_CF_bit31(tmp2);
7657 }
9ee6e8bb
PB
7658 } else {
7659 /* register */
7660 rm = (insn) & 0xf;
e9bb4aa9 7661 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7662 shiftop = (insn >> 5) & 3;
7663 if (!(insn & (1 << 4))) {
7664 shift = (insn >> 7) & 0x1f;
e9bb4aa9 7665 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
7666 } else {
7667 rs = (insn >> 8) & 0xf;
8984bd2e 7668 tmp = load_reg(s, rs);
e9bb4aa9 7669 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
7670 }
7671 }
7672 if (op1 != 0x0f && op1 != 0x0d) {
7673 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
7674 tmp = load_reg(s, rn);
7675 } else {
39d5492a 7676 TCGV_UNUSED_I32(tmp);
9ee6e8bb
PB
7677 }
7678 rd = (insn >> 12) & 0xf;
7679 switch(op1) {
7680 case 0x00:
e9bb4aa9
JR
7681 tcg_gen_and_i32(tmp, tmp, tmp2);
7682 if (logic_cc) {
7683 gen_logic_CC(tmp);
7684 }
21aeb343 7685 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7686 break;
7687 case 0x01:
e9bb4aa9
JR
7688 tcg_gen_xor_i32(tmp, tmp, tmp2);
7689 if (logic_cc) {
7690 gen_logic_CC(tmp);
7691 }
21aeb343 7692 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7693 break;
7694 case 0x02:
7695 if (set_cc && rd == 15) {
7696 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 7697 if (IS_USER(s)) {
9ee6e8bb 7698 goto illegal_op;
e9bb4aa9 7699 }
72485ec4 7700 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 7701 gen_exception_return(s, tmp);
9ee6e8bb 7702 } else {
e9bb4aa9 7703 if (set_cc) {
72485ec4 7704 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7705 } else {
7706 tcg_gen_sub_i32(tmp, tmp, tmp2);
7707 }
21aeb343 7708 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7709 }
7710 break;
7711 case 0x03:
e9bb4aa9 7712 if (set_cc) {
72485ec4 7713 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
7714 } else {
7715 tcg_gen_sub_i32(tmp, tmp2, tmp);
7716 }
21aeb343 7717 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7718 break;
7719 case 0x04:
e9bb4aa9 7720 if (set_cc) {
72485ec4 7721 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7722 } else {
7723 tcg_gen_add_i32(tmp, tmp, tmp2);
7724 }
21aeb343 7725 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7726 break;
7727 case 0x05:
e9bb4aa9 7728 if (set_cc) {
49b4c31e 7729 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7730 } else {
7731 gen_add_carry(tmp, tmp, tmp2);
7732 }
21aeb343 7733 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7734 break;
7735 case 0x06:
e9bb4aa9 7736 if (set_cc) {
2de68a49 7737 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7738 } else {
7739 gen_sub_carry(tmp, tmp, tmp2);
7740 }
21aeb343 7741 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7742 break;
7743 case 0x07:
e9bb4aa9 7744 if (set_cc) {
2de68a49 7745 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
7746 } else {
7747 gen_sub_carry(tmp, tmp2, tmp);
7748 }
21aeb343 7749 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7750 break;
7751 case 0x08:
7752 if (set_cc) {
e9bb4aa9
JR
7753 tcg_gen_and_i32(tmp, tmp, tmp2);
7754 gen_logic_CC(tmp);
9ee6e8bb 7755 }
7d1b0095 7756 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7757 break;
7758 case 0x09:
7759 if (set_cc) {
e9bb4aa9
JR
7760 tcg_gen_xor_i32(tmp, tmp, tmp2);
7761 gen_logic_CC(tmp);
9ee6e8bb 7762 }
7d1b0095 7763 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7764 break;
7765 case 0x0a:
7766 if (set_cc) {
72485ec4 7767 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 7768 }
7d1b0095 7769 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7770 break;
7771 case 0x0b:
7772 if (set_cc) {
72485ec4 7773 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 7774 }
7d1b0095 7775 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7776 break;
7777 case 0x0c:
e9bb4aa9
JR
7778 tcg_gen_or_i32(tmp, tmp, tmp2);
7779 if (logic_cc) {
7780 gen_logic_CC(tmp);
7781 }
21aeb343 7782 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7783 break;
7784 case 0x0d:
7785 if (logic_cc && rd == 15) {
7786 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 7787 if (IS_USER(s)) {
9ee6e8bb 7788 goto illegal_op;
e9bb4aa9
JR
7789 }
7790 gen_exception_return(s, tmp2);
9ee6e8bb 7791 } else {
e9bb4aa9
JR
7792 if (logic_cc) {
7793 gen_logic_CC(tmp2);
7794 }
21aeb343 7795 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7796 }
7797 break;
7798 case 0x0e:
f669df27 7799 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
7800 if (logic_cc) {
7801 gen_logic_CC(tmp);
7802 }
21aeb343 7803 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7804 break;
7805 default:
7806 case 0x0f:
e9bb4aa9
JR
7807 tcg_gen_not_i32(tmp2, tmp2);
7808 if (logic_cc) {
7809 gen_logic_CC(tmp2);
7810 }
21aeb343 7811 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7812 break;
7813 }
e9bb4aa9 7814 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 7815 tcg_temp_free_i32(tmp2);
e9bb4aa9 7816 }
9ee6e8bb
PB
7817 } else {
7818 /* other instructions */
7819 op1 = (insn >> 24) & 0xf;
7820 switch(op1) {
7821 case 0x0:
7822 case 0x1:
7823 /* multiplies, extra load/stores */
7824 sh = (insn >> 5) & 3;
7825 if (sh == 0) {
7826 if (op1 == 0x0) {
7827 rd = (insn >> 16) & 0xf;
7828 rn = (insn >> 12) & 0xf;
7829 rs = (insn >> 8) & 0xf;
7830 rm = (insn) & 0xf;
7831 op1 = (insn >> 20) & 0xf;
7832 switch (op1) {
7833 case 0: case 1: case 2: case 3: case 6:
7834 /* 32 bit mul */
5e3f878a
PB
7835 tmp = load_reg(s, rs);
7836 tmp2 = load_reg(s, rm);
7837 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 7838 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7839 if (insn & (1 << 22)) {
7840 /* Subtract (mls) */
7841 ARCH(6T2);
5e3f878a
PB
7842 tmp2 = load_reg(s, rn);
7843 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 7844 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7845 } else if (insn & (1 << 21)) {
7846 /* Add */
5e3f878a
PB
7847 tmp2 = load_reg(s, rn);
7848 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7849 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7850 }
7851 if (insn & (1 << 20))
5e3f878a
PB
7852 gen_logic_CC(tmp);
7853 store_reg(s, rd, tmp);
9ee6e8bb 7854 break;
8aac08b1
AJ
7855 case 4:
7856 /* 64 bit mul double accumulate (UMAAL) */
7857 ARCH(6);
7858 tmp = load_reg(s, rs);
7859 tmp2 = load_reg(s, rm);
7860 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7861 gen_addq_lo(s, tmp64, rn);
7862 gen_addq_lo(s, tmp64, rd);
7863 gen_storeq_reg(s, rn, rd, tmp64);
7864 tcg_temp_free_i64(tmp64);
7865 break;
7866 case 8: case 9: case 10: case 11:
7867 case 12: case 13: case 14: case 15:
7868 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
7869 tmp = load_reg(s, rs);
7870 tmp2 = load_reg(s, rm);
8aac08b1 7871 if (insn & (1 << 22)) {
c9f10124 7872 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 7873 } else {
c9f10124 7874 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
7875 }
7876 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
7877 TCGv_i32 al = load_reg(s, rn);
7878 TCGv_i32 ah = load_reg(s, rd);
c9f10124 7879 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
7880 tcg_temp_free_i32(al);
7881 tcg_temp_free_i32(ah);
9ee6e8bb 7882 }
8aac08b1 7883 if (insn & (1 << 20)) {
c9f10124 7884 gen_logicq_cc(tmp, tmp2);
8aac08b1 7885 }
c9f10124
RH
7886 store_reg(s, rn, tmp);
7887 store_reg(s, rd, tmp2);
9ee6e8bb 7888 break;
8aac08b1
AJ
7889 default:
7890 goto illegal_op;
9ee6e8bb
PB
7891 }
7892 } else {
7893 rn = (insn >> 16) & 0xf;
7894 rd = (insn >> 12) & 0xf;
7895 if (insn & (1 << 23)) {
7896 /* load/store exclusive */
2359bf80 7897 int op2 = (insn >> 8) & 3;
86753403 7898 op1 = (insn >> 21) & 0x3;
2359bf80
MR
7899
7900 switch (op2) {
7901 case 0: /* lda/stl */
7902 if (op1 == 1) {
7903 goto illegal_op;
7904 }
7905 ARCH(8);
7906 break;
7907 case 1: /* reserved */
7908 goto illegal_op;
7909 case 2: /* ldaex/stlex */
7910 ARCH(8);
7911 break;
7912 case 3: /* ldrex/strex */
7913 if (op1) {
7914 ARCH(6K);
7915 } else {
7916 ARCH(6);
7917 }
7918 break;
7919 }
7920
3174f8e9 7921 addr = tcg_temp_local_new_i32();
98a46317 7922 load_reg_var(s, addr, rn);
2359bf80
MR
7923
7924 /* Since the emulation does not have barriers,
7925 the acquire/release semantics need no special
7926 handling */
7927 if (op2 == 0) {
7928 if (insn & (1 << 20)) {
7929 tmp = tcg_temp_new_i32();
7930 switch (op1) {
7931 case 0: /* lda */
08307563 7932 gen_aa32_ld32u(tmp, addr, IS_USER(s));
2359bf80
MR
7933 break;
7934 case 2: /* ldab */
08307563 7935 gen_aa32_ld8u(tmp, addr, IS_USER(s));
2359bf80
MR
7936 break;
7937 case 3: /* ldah */
08307563 7938 gen_aa32_ld16u(tmp, addr, IS_USER(s));
2359bf80
MR
7939 break;
7940 default:
7941 abort();
7942 }
7943 store_reg(s, rd, tmp);
7944 } else {
7945 rm = insn & 0xf;
7946 tmp = load_reg(s, rm);
7947 switch (op1) {
7948 case 0: /* stl */
08307563 7949 gen_aa32_st32(tmp, addr, IS_USER(s));
2359bf80
MR
7950 break;
7951 case 2: /* stlb */
08307563 7952 gen_aa32_st8(tmp, addr, IS_USER(s));
2359bf80
MR
7953 break;
7954 case 3: /* stlh */
08307563 7955 gen_aa32_st16(tmp, addr, IS_USER(s));
2359bf80
MR
7956 break;
7957 default:
7958 abort();
7959 }
7960 tcg_temp_free_i32(tmp);
7961 }
7962 } else if (insn & (1 << 20)) {
86753403
PB
7963 switch (op1) {
7964 case 0: /* ldrex */
426f5abc 7965 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
7966 break;
7967 case 1: /* ldrexd */
426f5abc 7968 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
7969 break;
7970 case 2: /* ldrexb */
426f5abc 7971 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
7972 break;
7973 case 3: /* ldrexh */
426f5abc 7974 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
7975 break;
7976 default:
7977 abort();
7978 }
9ee6e8bb
PB
7979 } else {
7980 rm = insn & 0xf;
86753403
PB
7981 switch (op1) {
7982 case 0: /* strex */
426f5abc 7983 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
7984 break;
7985 case 1: /* strexd */
502e64fe 7986 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
7987 break;
7988 case 2: /* strexb */
426f5abc 7989 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
7990 break;
7991 case 3: /* strexh */
426f5abc 7992 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
7993 break;
7994 default:
7995 abort();
7996 }
9ee6e8bb 7997 }
39d5492a 7998 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7999 } else {
8000 /* SWP instruction */
8001 rm = (insn) & 0xf;
8002
8984bd2e
PB
8003 /* ??? This is not really atomic. However we know
8004 we never have multiple CPUs running in parallel,
8005 so it is good enough. */
8006 addr = load_reg(s, rn);
8007 tmp = load_reg(s, rm);
5a839c0d 8008 tmp2 = tcg_temp_new_i32();
9ee6e8bb 8009 if (insn & (1 << 22)) {
08307563
PM
8010 gen_aa32_ld8u(tmp2, addr, IS_USER(s));
8011 gen_aa32_st8(tmp, addr, IS_USER(s));
9ee6e8bb 8012 } else {
08307563
PM
8013 gen_aa32_ld32u(tmp2, addr, IS_USER(s));
8014 gen_aa32_st32(tmp, addr, IS_USER(s));
9ee6e8bb 8015 }
5a839c0d 8016 tcg_temp_free_i32(tmp);
7d1b0095 8017 tcg_temp_free_i32(addr);
8984bd2e 8018 store_reg(s, rd, tmp2);
9ee6e8bb
PB
8019 }
8020 }
8021 } else {
8022 int address_offset;
8023 int load;
8024 /* Misc load/store */
8025 rn = (insn >> 16) & 0xf;
8026 rd = (insn >> 12) & 0xf;
b0109805 8027 addr = load_reg(s, rn);
9ee6e8bb 8028 if (insn & (1 << 24))
b0109805 8029 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
8030 address_offset = 0;
8031 if (insn & (1 << 20)) {
8032 /* load */
5a839c0d 8033 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
8034 switch(sh) {
8035 case 1:
08307563 8036 gen_aa32_ld16u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
8037 break;
8038 case 2:
08307563 8039 gen_aa32_ld8s(tmp, addr, IS_USER(s));
9ee6e8bb
PB
8040 break;
8041 default:
8042 case 3:
08307563 8043 gen_aa32_ld16s(tmp, addr, IS_USER(s));
9ee6e8bb
PB
8044 break;
8045 }
8046 load = 1;
8047 } else if (sh & 2) {
be5e7a76 8048 ARCH(5TE);
9ee6e8bb
PB
8049 /* doubleword */
8050 if (sh & 1) {
8051 /* store */
b0109805 8052 tmp = load_reg(s, rd);
08307563 8053 gen_aa32_st32(tmp, addr, IS_USER(s));
5a839c0d 8054 tcg_temp_free_i32(tmp);
b0109805
PB
8055 tcg_gen_addi_i32(addr, addr, 4);
8056 tmp = load_reg(s, rd + 1);
08307563 8057 gen_aa32_st32(tmp, addr, IS_USER(s));
5a839c0d 8058 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8059 load = 0;
8060 } else {
8061 /* load */
5a839c0d 8062 tmp = tcg_temp_new_i32();
08307563 8063 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805
PB
8064 store_reg(s, rd, tmp);
8065 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8066 tmp = tcg_temp_new_i32();
08307563 8067 gen_aa32_ld32u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
8068 rd++;
8069 load = 1;
8070 }
8071 address_offset = -4;
8072 } else {
8073 /* store */
b0109805 8074 tmp = load_reg(s, rd);
08307563 8075 gen_aa32_st16(tmp, addr, IS_USER(s));
5a839c0d 8076 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8077 load = 0;
8078 }
8079 /* Perform base writeback before the loaded value to
8080 ensure correct behavior with overlapping index registers.
8081 ldrd with base writeback is is undefined if the
8082 destination and index registers overlap. */
8083 if (!(insn & (1 << 24))) {
b0109805
PB
8084 gen_add_datah_offset(s, insn, address_offset, addr);
8085 store_reg(s, rn, addr);
9ee6e8bb
PB
8086 } else if (insn & (1 << 21)) {
8087 if (address_offset)
b0109805
PB
8088 tcg_gen_addi_i32(addr, addr, address_offset);
8089 store_reg(s, rn, addr);
8090 } else {
7d1b0095 8091 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8092 }
8093 if (load) {
8094 /* Complete the load. */
b0109805 8095 store_reg(s, rd, tmp);
9ee6e8bb
PB
8096 }
8097 }
8098 break;
8099 case 0x4:
8100 case 0x5:
8101 goto do_ldst;
8102 case 0x6:
8103 case 0x7:
8104 if (insn & (1 << 4)) {
8105 ARCH(6);
8106 /* Armv6 Media instructions. */
8107 rm = insn & 0xf;
8108 rn = (insn >> 16) & 0xf;
2c0262af 8109 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
8110 rs = (insn >> 8) & 0xf;
8111 switch ((insn >> 23) & 3) {
8112 case 0: /* Parallel add/subtract. */
8113 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
8114 tmp = load_reg(s, rn);
8115 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8116 sh = (insn >> 5) & 7;
8117 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
8118 goto illegal_op;
6ddbc6e4 8119 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 8120 tcg_temp_free_i32(tmp2);
6ddbc6e4 8121 store_reg(s, rd, tmp);
9ee6e8bb
PB
8122 break;
8123 case 1:
8124 if ((insn & 0x00700020) == 0) {
6c95676b 8125 /* Halfword pack. */
3670669c
PB
8126 tmp = load_reg(s, rn);
8127 tmp2 = load_reg(s, rm);
9ee6e8bb 8128 shift = (insn >> 7) & 0x1f;
3670669c
PB
8129 if (insn & (1 << 6)) {
8130 /* pkhtb */
22478e79
AZ
8131 if (shift == 0)
8132 shift = 31;
8133 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 8134 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 8135 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
8136 } else {
8137 /* pkhbt */
22478e79
AZ
8138 if (shift)
8139 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 8140 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
8141 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8142 }
8143 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8144 tcg_temp_free_i32(tmp2);
3670669c 8145 store_reg(s, rd, tmp);
9ee6e8bb
PB
8146 } else if ((insn & 0x00200020) == 0x00200000) {
8147 /* [us]sat */
6ddbc6e4 8148 tmp = load_reg(s, rm);
9ee6e8bb
PB
8149 shift = (insn >> 7) & 0x1f;
8150 if (insn & (1 << 6)) {
8151 if (shift == 0)
8152 shift = 31;
6ddbc6e4 8153 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8154 } else {
6ddbc6e4 8155 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
8156 }
8157 sh = (insn >> 16) & 0x1f;
40d3c433
CL
8158 tmp2 = tcg_const_i32(sh);
8159 if (insn & (1 << 22))
9ef39277 8160 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 8161 else
9ef39277 8162 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 8163 tcg_temp_free_i32(tmp2);
6ddbc6e4 8164 store_reg(s, rd, tmp);
9ee6e8bb
PB
8165 } else if ((insn & 0x00300fe0) == 0x00200f20) {
8166 /* [us]sat16 */
6ddbc6e4 8167 tmp = load_reg(s, rm);
9ee6e8bb 8168 sh = (insn >> 16) & 0x1f;
40d3c433
CL
8169 tmp2 = tcg_const_i32(sh);
8170 if (insn & (1 << 22))
9ef39277 8171 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 8172 else
9ef39277 8173 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 8174 tcg_temp_free_i32(tmp2);
6ddbc6e4 8175 store_reg(s, rd, tmp);
9ee6e8bb
PB
8176 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
8177 /* Select bytes. */
6ddbc6e4
PB
8178 tmp = load_reg(s, rn);
8179 tmp2 = load_reg(s, rm);
7d1b0095 8180 tmp3 = tcg_temp_new_i32();
0ecb72a5 8181 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 8182 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8183 tcg_temp_free_i32(tmp3);
8184 tcg_temp_free_i32(tmp2);
6ddbc6e4 8185 store_reg(s, rd, tmp);
9ee6e8bb 8186 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 8187 tmp = load_reg(s, rm);
9ee6e8bb 8188 shift = (insn >> 10) & 3;
1301f322 8189 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
8190 rotate, a shift is sufficient. */
8191 if (shift != 0)
f669df27 8192 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8193 op1 = (insn >> 20) & 7;
8194 switch (op1) {
5e3f878a
PB
8195 case 0: gen_sxtb16(tmp); break;
8196 case 2: gen_sxtb(tmp); break;
8197 case 3: gen_sxth(tmp); break;
8198 case 4: gen_uxtb16(tmp); break;
8199 case 6: gen_uxtb(tmp); break;
8200 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
8201 default: goto illegal_op;
8202 }
8203 if (rn != 15) {
5e3f878a 8204 tmp2 = load_reg(s, rn);
9ee6e8bb 8205 if ((op1 & 3) == 0) {
5e3f878a 8206 gen_add16(tmp, tmp2);
9ee6e8bb 8207 } else {
5e3f878a 8208 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8209 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8210 }
8211 }
6c95676b 8212 store_reg(s, rd, tmp);
9ee6e8bb
PB
8213 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
8214 /* rev */
b0109805 8215 tmp = load_reg(s, rm);
9ee6e8bb
PB
8216 if (insn & (1 << 22)) {
8217 if (insn & (1 << 7)) {
b0109805 8218 gen_revsh(tmp);
9ee6e8bb
PB
8219 } else {
8220 ARCH(6T2);
b0109805 8221 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8222 }
8223 } else {
8224 if (insn & (1 << 7))
b0109805 8225 gen_rev16(tmp);
9ee6e8bb 8226 else
66896cb8 8227 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 8228 }
b0109805 8229 store_reg(s, rd, tmp);
9ee6e8bb
PB
8230 } else {
8231 goto illegal_op;
8232 }
8233 break;
8234 case 2: /* Multiplies (Type 3). */
41e9564d
PM
8235 switch ((insn >> 20) & 0x7) {
8236 case 5:
8237 if (((insn >> 6) ^ (insn >> 7)) & 1) {
8238 /* op2 not 00x or 11x : UNDEF */
8239 goto illegal_op;
8240 }
838fa72d
AJ
8241 /* Signed multiply most significant [accumulate].
8242 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
8243 tmp = load_reg(s, rm);
8244 tmp2 = load_reg(s, rs);
a7812ae4 8245 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 8246
955a7dd5 8247 if (rd != 15) {
838fa72d 8248 tmp = load_reg(s, rd);
9ee6e8bb 8249 if (insn & (1 << 6)) {
838fa72d 8250 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 8251 } else {
838fa72d 8252 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
8253 }
8254 }
838fa72d
AJ
8255 if (insn & (1 << 5)) {
8256 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8257 }
8258 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 8259 tmp = tcg_temp_new_i32();
838fa72d
AJ
8260 tcg_gen_trunc_i64_i32(tmp, tmp64);
8261 tcg_temp_free_i64(tmp64);
955a7dd5 8262 store_reg(s, rn, tmp);
41e9564d
PM
8263 break;
8264 case 0:
8265 case 4:
8266 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
8267 if (insn & (1 << 7)) {
8268 goto illegal_op;
8269 }
8270 tmp = load_reg(s, rm);
8271 tmp2 = load_reg(s, rs);
9ee6e8bb 8272 if (insn & (1 << 5))
5e3f878a
PB
8273 gen_swap_half(tmp2);
8274 gen_smul_dual(tmp, tmp2);
5e3f878a 8275 if (insn & (1 << 6)) {
e1d177b9 8276 /* This subtraction cannot overflow. */
5e3f878a
PB
8277 tcg_gen_sub_i32(tmp, tmp, tmp2);
8278 } else {
e1d177b9
PM
8279 /* This addition cannot overflow 32 bits;
8280 * however it may overflow considered as a signed
8281 * operation, in which case we must set the Q flag.
8282 */
9ef39277 8283 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
5e3f878a 8284 }
7d1b0095 8285 tcg_temp_free_i32(tmp2);
9ee6e8bb 8286 if (insn & (1 << 22)) {
5e3f878a 8287 /* smlald, smlsld */
a7812ae4
PB
8288 tmp64 = tcg_temp_new_i64();
8289 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8290 tcg_temp_free_i32(tmp);
a7812ae4
PB
8291 gen_addq(s, tmp64, rd, rn);
8292 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 8293 tcg_temp_free_i64(tmp64);
9ee6e8bb 8294 } else {
5e3f878a 8295 /* smuad, smusd, smlad, smlsd */
22478e79 8296 if (rd != 15)
9ee6e8bb 8297 {
22478e79 8298 tmp2 = load_reg(s, rd);
9ef39277 8299 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8300 tcg_temp_free_i32(tmp2);
9ee6e8bb 8301 }
22478e79 8302 store_reg(s, rn, tmp);
9ee6e8bb 8303 }
41e9564d 8304 break;
b8b8ea05
PM
8305 case 1:
8306 case 3:
8307 /* SDIV, UDIV */
8308 if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) {
8309 goto illegal_op;
8310 }
8311 if (((insn >> 5) & 7) || (rd != 15)) {
8312 goto illegal_op;
8313 }
8314 tmp = load_reg(s, rm);
8315 tmp2 = load_reg(s, rs);
8316 if (insn & (1 << 21)) {
8317 gen_helper_udiv(tmp, tmp, tmp2);
8318 } else {
8319 gen_helper_sdiv(tmp, tmp, tmp2);
8320 }
8321 tcg_temp_free_i32(tmp2);
8322 store_reg(s, rn, tmp);
8323 break;
41e9564d
PM
8324 default:
8325 goto illegal_op;
9ee6e8bb
PB
8326 }
8327 break;
8328 case 3:
8329 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
8330 switch (op1) {
8331 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
8332 ARCH(6);
8333 tmp = load_reg(s, rm);
8334 tmp2 = load_reg(s, rs);
8335 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 8336 tcg_temp_free_i32(tmp2);
ded9d295
AZ
8337 if (rd != 15) {
8338 tmp2 = load_reg(s, rd);
6ddbc6e4 8339 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8340 tcg_temp_free_i32(tmp2);
9ee6e8bb 8341 }
ded9d295 8342 store_reg(s, rn, tmp);
9ee6e8bb
PB
8343 break;
8344 case 0x20: case 0x24: case 0x28: case 0x2c:
8345 /* Bitfield insert/clear. */
8346 ARCH(6T2);
8347 shift = (insn >> 7) & 0x1f;
8348 i = (insn >> 16) & 0x1f;
8349 i = i + 1 - shift;
8350 if (rm == 15) {
7d1b0095 8351 tmp = tcg_temp_new_i32();
5e3f878a 8352 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 8353 } else {
5e3f878a 8354 tmp = load_reg(s, rm);
9ee6e8bb
PB
8355 }
8356 if (i != 32) {
5e3f878a 8357 tmp2 = load_reg(s, rd);
d593c48e 8358 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 8359 tcg_temp_free_i32(tmp2);
9ee6e8bb 8360 }
5e3f878a 8361 store_reg(s, rd, tmp);
9ee6e8bb
PB
8362 break;
8363 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
8364 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 8365 ARCH(6T2);
5e3f878a 8366 tmp = load_reg(s, rm);
9ee6e8bb
PB
8367 shift = (insn >> 7) & 0x1f;
8368 i = ((insn >> 16) & 0x1f) + 1;
8369 if (shift + i > 32)
8370 goto illegal_op;
8371 if (i < 32) {
8372 if (op1 & 0x20) {
5e3f878a 8373 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 8374 } else {
5e3f878a 8375 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
8376 }
8377 }
5e3f878a 8378 store_reg(s, rd, tmp);
9ee6e8bb
PB
8379 break;
8380 default:
8381 goto illegal_op;
8382 }
8383 break;
8384 }
8385 break;
8386 }
8387 do_ldst:
8388 /* Check for undefined extension instructions
8389 * per the ARM Bible IE:
8390 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
8391 */
8392 sh = (0xf << 20) | (0xf << 4);
8393 if (op1 == 0x7 && ((insn & sh) == sh))
8394 {
8395 goto illegal_op;
8396 }
8397 /* load/store byte/word */
8398 rn = (insn >> 16) & 0xf;
8399 rd = (insn >> 12) & 0xf;
b0109805 8400 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
8401 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
8402 if (insn & (1 << 24))
b0109805 8403 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
8404 if (insn & (1 << 20)) {
8405 /* load */
5a839c0d 8406 tmp = tcg_temp_new_i32();
9ee6e8bb 8407 if (insn & (1 << 22)) {
08307563 8408 gen_aa32_ld8u(tmp, tmp2, i);
9ee6e8bb 8409 } else {
08307563 8410 gen_aa32_ld32u(tmp, tmp2, i);
9ee6e8bb 8411 }
9ee6e8bb
PB
8412 } else {
8413 /* store */
b0109805 8414 tmp = load_reg(s, rd);
5a839c0d 8415 if (insn & (1 << 22)) {
08307563 8416 gen_aa32_st8(tmp, tmp2, i);
5a839c0d 8417 } else {
08307563 8418 gen_aa32_st32(tmp, tmp2, i);
5a839c0d
PM
8419 }
8420 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8421 }
8422 if (!(insn & (1 << 24))) {
b0109805
PB
8423 gen_add_data_offset(s, insn, tmp2);
8424 store_reg(s, rn, tmp2);
8425 } else if (insn & (1 << 21)) {
8426 store_reg(s, rn, tmp2);
8427 } else {
7d1b0095 8428 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8429 }
8430 if (insn & (1 << 20)) {
8431 /* Complete the load. */
be5e7a76 8432 store_reg_from_load(env, s, rd, tmp);
9ee6e8bb
PB
8433 }
8434 break;
8435 case 0x08:
8436 case 0x09:
8437 {
8438 int j, n, user, loaded_base;
39d5492a 8439 TCGv_i32 loaded_var;
9ee6e8bb
PB
8440 /* load/store multiple words */
8441 /* XXX: store correct base if write back */
8442 user = 0;
8443 if (insn & (1 << 22)) {
8444 if (IS_USER(s))
8445 goto illegal_op; /* only usable in supervisor mode */
8446
8447 if ((insn & (1 << 15)) == 0)
8448 user = 1;
8449 }
8450 rn = (insn >> 16) & 0xf;
b0109805 8451 addr = load_reg(s, rn);
9ee6e8bb
PB
8452
8453 /* compute total size */
8454 loaded_base = 0;
39d5492a 8455 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
8456 n = 0;
8457 for(i=0;i<16;i++) {
8458 if (insn & (1 << i))
8459 n++;
8460 }
8461 /* XXX: test invalid n == 0 case ? */
8462 if (insn & (1 << 23)) {
8463 if (insn & (1 << 24)) {
8464 /* pre increment */
b0109805 8465 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
8466 } else {
8467 /* post increment */
8468 }
8469 } else {
8470 if (insn & (1 << 24)) {
8471 /* pre decrement */
b0109805 8472 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
8473 } else {
8474 /* post decrement */
8475 if (n != 1)
b0109805 8476 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
8477 }
8478 }
8479 j = 0;
8480 for(i=0;i<16;i++) {
8481 if (insn & (1 << i)) {
8482 if (insn & (1 << 20)) {
8483 /* load */
5a839c0d 8484 tmp = tcg_temp_new_i32();
08307563 8485 gen_aa32_ld32u(tmp, addr, IS_USER(s));
be5e7a76 8486 if (user) {
b75263d6 8487 tmp2 = tcg_const_i32(i);
1ce94f81 8488 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 8489 tcg_temp_free_i32(tmp2);
7d1b0095 8490 tcg_temp_free_i32(tmp);
9ee6e8bb 8491 } else if (i == rn) {
b0109805 8492 loaded_var = tmp;
9ee6e8bb
PB
8493 loaded_base = 1;
8494 } else {
be5e7a76 8495 store_reg_from_load(env, s, i, tmp);
9ee6e8bb
PB
8496 }
8497 } else {
8498 /* store */
8499 if (i == 15) {
8500 /* special case: r15 = PC + 8 */
8501 val = (long)s->pc + 4;
7d1b0095 8502 tmp = tcg_temp_new_i32();
b0109805 8503 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 8504 } else if (user) {
7d1b0095 8505 tmp = tcg_temp_new_i32();
b75263d6 8506 tmp2 = tcg_const_i32(i);
9ef39277 8507 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 8508 tcg_temp_free_i32(tmp2);
9ee6e8bb 8509 } else {
b0109805 8510 tmp = load_reg(s, i);
9ee6e8bb 8511 }
08307563 8512 gen_aa32_st32(tmp, addr, IS_USER(s));
5a839c0d 8513 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8514 }
8515 j++;
8516 /* no need to add after the last transfer */
8517 if (j != n)
b0109805 8518 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
8519 }
8520 }
8521 if (insn & (1 << 21)) {
8522 /* write back */
8523 if (insn & (1 << 23)) {
8524 if (insn & (1 << 24)) {
8525 /* pre increment */
8526 } else {
8527 /* post increment */
b0109805 8528 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
8529 }
8530 } else {
8531 if (insn & (1 << 24)) {
8532 /* pre decrement */
8533 if (n != 1)
b0109805 8534 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
8535 } else {
8536 /* post decrement */
b0109805 8537 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
8538 }
8539 }
b0109805
PB
8540 store_reg(s, rn, addr);
8541 } else {
7d1b0095 8542 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8543 }
8544 if (loaded_base) {
b0109805 8545 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
8546 }
8547 if ((insn & (1 << 22)) && !user) {
8548 /* Restore CPSR from SPSR. */
d9ba4830
PB
8549 tmp = load_cpu_field(spsr);
8550 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 8551 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8552 s->is_jmp = DISAS_UPDATE;
8553 }
8554 }
8555 break;
8556 case 0xa:
8557 case 0xb:
8558 {
8559 int32_t offset;
8560
8561 /* branch (and link) */
8562 val = (int32_t)s->pc;
8563 if (insn & (1 << 24)) {
7d1b0095 8564 tmp = tcg_temp_new_i32();
5e3f878a
PB
8565 tcg_gen_movi_i32(tmp, val);
8566 store_reg(s, 14, tmp);
9ee6e8bb 8567 }
534df156
PM
8568 offset = sextract32(insn << 2, 0, 26);
8569 val += offset + 4;
9ee6e8bb
PB
8570 gen_jmp(s, val);
8571 }
8572 break;
8573 case 0xc:
8574 case 0xd:
8575 case 0xe:
6a57f3eb
WN
8576 if (((insn >> 8) & 0xe) == 10) {
8577 /* VFP. */
8578 if (disas_vfp_insn(env, s, insn)) {
8579 goto illegal_op;
8580 }
8581 } else if (disas_coproc_insn(env, s, insn)) {
8582 /* Coprocessor. */
9ee6e8bb 8583 goto illegal_op;
6a57f3eb 8584 }
9ee6e8bb
PB
8585 break;
8586 case 0xf:
8587 /* swi */
eaed129d 8588 gen_set_pc_im(s, s->pc);
9ee6e8bb
PB
8589 s->is_jmp = DISAS_SWI;
8590 break;
8591 default:
8592 illegal_op:
bc4a0de0 8593 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
8594 break;
8595 }
8596 }
8597}
8598
8599/* Return true if this is a Thumb-2 logical op. */
8600static int
8601thumb2_logic_op(int op)
8602{
8603 return (op < 8);
8604}
8605
8606/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
8607 then set condition code flags based on the result of the operation.
8608 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
8609 to the high bit of T1.
8610 Returns zero if the opcode is valid. */
8611
8612static int
39d5492a
PM
8613gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
8614 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
8615{
8616 int logic_cc;
8617
8618 logic_cc = 0;
8619 switch (op) {
8620 case 0: /* and */
396e467c 8621 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
8622 logic_cc = conds;
8623 break;
8624 case 1: /* bic */
f669df27 8625 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
8626 logic_cc = conds;
8627 break;
8628 case 2: /* orr */
396e467c 8629 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
8630 logic_cc = conds;
8631 break;
8632 case 3: /* orn */
29501f1b 8633 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
8634 logic_cc = conds;
8635 break;
8636 case 4: /* eor */
396e467c 8637 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
8638 logic_cc = conds;
8639 break;
8640 case 8: /* add */
8641 if (conds)
72485ec4 8642 gen_add_CC(t0, t0, t1);
9ee6e8bb 8643 else
396e467c 8644 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
8645 break;
8646 case 10: /* adc */
8647 if (conds)
49b4c31e 8648 gen_adc_CC(t0, t0, t1);
9ee6e8bb 8649 else
396e467c 8650 gen_adc(t0, t1);
9ee6e8bb
PB
8651 break;
8652 case 11: /* sbc */
2de68a49
RH
8653 if (conds) {
8654 gen_sbc_CC(t0, t0, t1);
8655 } else {
396e467c 8656 gen_sub_carry(t0, t0, t1);
2de68a49 8657 }
9ee6e8bb
PB
8658 break;
8659 case 13: /* sub */
8660 if (conds)
72485ec4 8661 gen_sub_CC(t0, t0, t1);
9ee6e8bb 8662 else
396e467c 8663 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
8664 break;
8665 case 14: /* rsb */
8666 if (conds)
72485ec4 8667 gen_sub_CC(t0, t1, t0);
9ee6e8bb 8668 else
396e467c 8669 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
8670 break;
8671 default: /* 5, 6, 7, 9, 12, 15. */
8672 return 1;
8673 }
8674 if (logic_cc) {
396e467c 8675 gen_logic_CC(t0);
9ee6e8bb 8676 if (shifter_out)
396e467c 8677 gen_set_CF_bit31(t1);
9ee6e8bb
PB
8678 }
8679 return 0;
8680}
8681
8682/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
8683 is not legal. */
0ecb72a5 8684static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9ee6e8bb 8685{
b0109805 8686 uint32_t insn, imm, shift, offset;
9ee6e8bb 8687 uint32_t rd, rn, rm, rs;
39d5492a
PM
8688 TCGv_i32 tmp;
8689 TCGv_i32 tmp2;
8690 TCGv_i32 tmp3;
8691 TCGv_i32 addr;
a7812ae4 8692 TCGv_i64 tmp64;
9ee6e8bb
PB
8693 int op;
8694 int shiftop;
8695 int conds;
8696 int logic_cc;
8697
8698 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
8699 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 8700 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
8701 16-bit instructions to get correct prefetch abort behavior. */
8702 insn = insn_hw1;
8703 if ((insn & (1 << 12)) == 0) {
be5e7a76 8704 ARCH(5);
9ee6e8bb
PB
8705 /* Second half of blx. */
8706 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
8707 tmp = load_reg(s, 14);
8708 tcg_gen_addi_i32(tmp, tmp, offset);
8709 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 8710
7d1b0095 8711 tmp2 = tcg_temp_new_i32();
b0109805 8712 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
8713 store_reg(s, 14, tmp2);
8714 gen_bx(s, tmp);
9ee6e8bb
PB
8715 return 0;
8716 }
8717 if (insn & (1 << 11)) {
8718 /* Second half of bl. */
8719 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 8720 tmp = load_reg(s, 14);
6a0d8a1d 8721 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 8722
7d1b0095 8723 tmp2 = tcg_temp_new_i32();
b0109805 8724 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
8725 store_reg(s, 14, tmp2);
8726 gen_bx(s, tmp);
9ee6e8bb
PB
8727 return 0;
8728 }
8729 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
8730 /* Instruction spans a page boundary. Implement it as two
8731 16-bit instructions in case the second half causes an
8732 prefetch abort. */
8733 offset = ((int32_t)insn << 21) >> 9;
396e467c 8734 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
8735 return 0;
8736 }
8737 /* Fall through to 32-bit decode. */
8738 }
8739
d31dd73e 8740 insn = arm_lduw_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
8741 s->pc += 2;
8742 insn |= (uint32_t)insn_hw1 << 16;
8743
8744 if ((insn & 0xf800e800) != 0xf000e800) {
8745 ARCH(6T2);
8746 }
8747
8748 rn = (insn >> 16) & 0xf;
8749 rs = (insn >> 12) & 0xf;
8750 rd = (insn >> 8) & 0xf;
8751 rm = insn & 0xf;
8752 switch ((insn >> 25) & 0xf) {
8753 case 0: case 1: case 2: case 3:
8754 /* 16-bit instructions. Should never happen. */
8755 abort();
8756 case 4:
8757 if (insn & (1 << 22)) {
8758 /* Other load/store, table branch. */
8759 if (insn & 0x01200000) {
8760 /* Load/store doubleword. */
8761 if (rn == 15) {
7d1b0095 8762 addr = tcg_temp_new_i32();
b0109805 8763 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 8764 } else {
b0109805 8765 addr = load_reg(s, rn);
9ee6e8bb
PB
8766 }
8767 offset = (insn & 0xff) * 4;
8768 if ((insn & (1 << 23)) == 0)
8769 offset = -offset;
8770 if (insn & (1 << 24)) {
b0109805 8771 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
8772 offset = 0;
8773 }
8774 if (insn & (1 << 20)) {
8775 /* ldrd */
e2592fad 8776 tmp = tcg_temp_new_i32();
08307563 8777 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805
PB
8778 store_reg(s, rs, tmp);
8779 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 8780 tmp = tcg_temp_new_i32();
08307563 8781 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805 8782 store_reg(s, rd, tmp);
9ee6e8bb
PB
8783 } else {
8784 /* strd */
b0109805 8785 tmp = load_reg(s, rs);
08307563 8786 gen_aa32_st32(tmp, addr, IS_USER(s));
e2592fad 8787 tcg_temp_free_i32(tmp);
b0109805
PB
8788 tcg_gen_addi_i32(addr, addr, 4);
8789 tmp = load_reg(s, rd);
08307563 8790 gen_aa32_st32(tmp, addr, IS_USER(s));
e2592fad 8791 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8792 }
8793 if (insn & (1 << 21)) {
8794 /* Base writeback. */
8795 if (rn == 15)
8796 goto illegal_op;
b0109805
PB
8797 tcg_gen_addi_i32(addr, addr, offset - 4);
8798 store_reg(s, rn, addr);
8799 } else {
7d1b0095 8800 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8801 }
8802 } else if ((insn & (1 << 23)) == 0) {
8803 /* Load/store exclusive word. */
39d5492a 8804 addr = tcg_temp_local_new_i32();
98a46317 8805 load_reg_var(s, addr, rn);
426f5abc 8806 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 8807 if (insn & (1 << 20)) {
426f5abc 8808 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 8809 } else {
426f5abc 8810 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 8811 }
39d5492a 8812 tcg_temp_free_i32(addr);
2359bf80 8813 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
8814 /* Table Branch. */
8815 if (rn == 15) {
7d1b0095 8816 addr = tcg_temp_new_i32();
b0109805 8817 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 8818 } else {
b0109805 8819 addr = load_reg(s, rn);
9ee6e8bb 8820 }
b26eefb6 8821 tmp = load_reg(s, rm);
b0109805 8822 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
8823 if (insn & (1 << 4)) {
8824 /* tbh */
b0109805 8825 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8826 tcg_temp_free_i32(tmp);
e2592fad 8827 tmp = tcg_temp_new_i32();
08307563 8828 gen_aa32_ld16u(tmp, addr, IS_USER(s));
9ee6e8bb 8829 } else { /* tbb */
7d1b0095 8830 tcg_temp_free_i32(tmp);
e2592fad 8831 tmp = tcg_temp_new_i32();
08307563 8832 gen_aa32_ld8u(tmp, addr, IS_USER(s));
9ee6e8bb 8833 }
7d1b0095 8834 tcg_temp_free_i32(addr);
b0109805
PB
8835 tcg_gen_shli_i32(tmp, tmp, 1);
8836 tcg_gen_addi_i32(tmp, tmp, s->pc);
8837 store_reg(s, 15, tmp);
9ee6e8bb 8838 } else {
2359bf80 8839 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 8840 op = (insn >> 4) & 0x3;
2359bf80
MR
8841 switch (op2) {
8842 case 0:
426f5abc 8843 goto illegal_op;
2359bf80
MR
8844 case 1:
8845 /* Load/store exclusive byte/halfword/doubleword */
8846 if (op == 2) {
8847 goto illegal_op;
8848 }
8849 ARCH(7);
8850 break;
8851 case 2:
8852 /* Load-acquire/store-release */
8853 if (op == 3) {
8854 goto illegal_op;
8855 }
8856 /* Fall through */
8857 case 3:
8858 /* Load-acquire/store-release exclusive */
8859 ARCH(8);
8860 break;
426f5abc 8861 }
39d5492a 8862 addr = tcg_temp_local_new_i32();
98a46317 8863 load_reg_var(s, addr, rn);
2359bf80
MR
8864 if (!(op2 & 1)) {
8865 if (insn & (1 << 20)) {
8866 tmp = tcg_temp_new_i32();
8867 switch (op) {
8868 case 0: /* ldab */
08307563 8869 gen_aa32_ld8u(tmp, addr, IS_USER(s));
2359bf80
MR
8870 break;
8871 case 1: /* ldah */
08307563 8872 gen_aa32_ld16u(tmp, addr, IS_USER(s));
2359bf80
MR
8873 break;
8874 case 2: /* lda */
08307563 8875 gen_aa32_ld32u(tmp, addr, IS_USER(s));
2359bf80
MR
8876 break;
8877 default:
8878 abort();
8879 }
8880 store_reg(s, rs, tmp);
8881 } else {
8882 tmp = load_reg(s, rs);
8883 switch (op) {
8884 case 0: /* stlb */
08307563 8885 gen_aa32_st8(tmp, addr, IS_USER(s));
2359bf80
MR
8886 break;
8887 case 1: /* stlh */
08307563 8888 gen_aa32_st16(tmp, addr, IS_USER(s));
2359bf80
MR
8889 break;
8890 case 2: /* stl */
08307563 8891 gen_aa32_st32(tmp, addr, IS_USER(s));
2359bf80
MR
8892 break;
8893 default:
8894 abort();
8895 }
8896 tcg_temp_free_i32(tmp);
8897 }
8898 } else if (insn & (1 << 20)) {
426f5abc 8899 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 8900 } else {
426f5abc 8901 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 8902 }
39d5492a 8903 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8904 }
8905 } else {
8906 /* Load/store multiple, RFE, SRS. */
8907 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976
PM
8908 /* RFE, SRS: not available in user mode or on M profile */
8909 if (IS_USER(s) || IS_M(env)) {
9ee6e8bb 8910 goto illegal_op;
00115976 8911 }
9ee6e8bb
PB
8912 if (insn & (1 << 20)) {
8913 /* rfe */
b0109805
PB
8914 addr = load_reg(s, rn);
8915 if ((insn & (1 << 24)) == 0)
8916 tcg_gen_addi_i32(addr, addr, -8);
8917 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 8918 tmp = tcg_temp_new_i32();
08307563 8919 gen_aa32_ld32u(tmp, addr, 0);
b0109805 8920 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 8921 tmp2 = tcg_temp_new_i32();
08307563 8922 gen_aa32_ld32u(tmp2, addr, 0);
9ee6e8bb
PB
8923 if (insn & (1 << 21)) {
8924 /* Base writeback. */
b0109805
PB
8925 if (insn & (1 << 24)) {
8926 tcg_gen_addi_i32(addr, addr, 4);
8927 } else {
8928 tcg_gen_addi_i32(addr, addr, -4);
8929 }
8930 store_reg(s, rn, addr);
8931 } else {
7d1b0095 8932 tcg_temp_free_i32(addr);
9ee6e8bb 8933 }
b0109805 8934 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
8935 } else {
8936 /* srs */
81465888
PM
8937 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
8938 insn & (1 << 21));
9ee6e8bb
PB
8939 }
8940 } else {
5856d44e 8941 int i, loaded_base = 0;
39d5492a 8942 TCGv_i32 loaded_var;
9ee6e8bb 8943 /* Load/store multiple. */
b0109805 8944 addr = load_reg(s, rn);
9ee6e8bb
PB
8945 offset = 0;
8946 for (i = 0; i < 16; i++) {
8947 if (insn & (1 << i))
8948 offset += 4;
8949 }
8950 if (insn & (1 << 24)) {
b0109805 8951 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8952 }
8953
39d5492a 8954 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
8955 for (i = 0; i < 16; i++) {
8956 if ((insn & (1 << i)) == 0)
8957 continue;
8958 if (insn & (1 << 20)) {
8959 /* Load. */
e2592fad 8960 tmp = tcg_temp_new_i32();
08307563 8961 gen_aa32_ld32u(tmp, addr, IS_USER(s));
9ee6e8bb 8962 if (i == 15) {
b0109805 8963 gen_bx(s, tmp);
5856d44e
YO
8964 } else if (i == rn) {
8965 loaded_var = tmp;
8966 loaded_base = 1;
9ee6e8bb 8967 } else {
b0109805 8968 store_reg(s, i, tmp);
9ee6e8bb
PB
8969 }
8970 } else {
8971 /* Store. */
b0109805 8972 tmp = load_reg(s, i);
08307563 8973 gen_aa32_st32(tmp, addr, IS_USER(s));
e2592fad 8974 tcg_temp_free_i32(tmp);
9ee6e8bb 8975 }
b0109805 8976 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8977 }
5856d44e
YO
8978 if (loaded_base) {
8979 store_reg(s, rn, loaded_var);
8980 }
9ee6e8bb
PB
8981 if (insn & (1 << 21)) {
8982 /* Base register writeback. */
8983 if (insn & (1 << 24)) {
b0109805 8984 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8985 }
8986 /* Fault if writeback register is in register list. */
8987 if (insn & (1 << rn))
8988 goto illegal_op;
b0109805
PB
8989 store_reg(s, rn, addr);
8990 } else {
7d1b0095 8991 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8992 }
8993 }
8994 }
8995 break;
2af9ab77
JB
8996 case 5:
8997
9ee6e8bb 8998 op = (insn >> 21) & 0xf;
2af9ab77
JB
8999 if (op == 6) {
9000 /* Halfword pack. */
9001 tmp = load_reg(s, rn);
9002 tmp2 = load_reg(s, rm);
9003 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
9004 if (insn & (1 << 5)) {
9005 /* pkhtb */
9006 if (shift == 0)
9007 shift = 31;
9008 tcg_gen_sari_i32(tmp2, tmp2, shift);
9009 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
9010 tcg_gen_ext16u_i32(tmp2, tmp2);
9011 } else {
9012 /* pkhbt */
9013 if (shift)
9014 tcg_gen_shli_i32(tmp2, tmp2, shift);
9015 tcg_gen_ext16u_i32(tmp, tmp);
9016 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9017 }
9018 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 9019 tcg_temp_free_i32(tmp2);
3174f8e9
FN
9020 store_reg(s, rd, tmp);
9021 } else {
2af9ab77
JB
9022 /* Data processing register constant shift. */
9023 if (rn == 15) {
7d1b0095 9024 tmp = tcg_temp_new_i32();
2af9ab77
JB
9025 tcg_gen_movi_i32(tmp, 0);
9026 } else {
9027 tmp = load_reg(s, rn);
9028 }
9029 tmp2 = load_reg(s, rm);
9030
9031 shiftop = (insn >> 4) & 3;
9032 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
9033 conds = (insn & (1 << 20)) != 0;
9034 logic_cc = (conds && thumb2_logic_op(op));
9035 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9036 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9037 goto illegal_op;
7d1b0095 9038 tcg_temp_free_i32(tmp2);
2af9ab77
JB
9039 if (rd != 15) {
9040 store_reg(s, rd, tmp);
9041 } else {
7d1b0095 9042 tcg_temp_free_i32(tmp);
2af9ab77 9043 }
3174f8e9 9044 }
9ee6e8bb
PB
9045 break;
9046 case 13: /* Misc data processing. */
9047 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
9048 if (op < 4 && (insn & 0xf000) != 0xf000)
9049 goto illegal_op;
9050 switch (op) {
9051 case 0: /* Register controlled shift. */
8984bd2e
PB
9052 tmp = load_reg(s, rn);
9053 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9054 if ((insn & 0x70) != 0)
9055 goto illegal_op;
9056 op = (insn >> 21) & 3;
8984bd2e
PB
9057 logic_cc = (insn & (1 << 20)) != 0;
9058 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
9059 if (logic_cc)
9060 gen_logic_CC(tmp);
21aeb343 9061 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
9062 break;
9063 case 1: /* Sign/zero extend. */
5e3f878a 9064 tmp = load_reg(s, rm);
9ee6e8bb 9065 shift = (insn >> 4) & 3;
1301f322 9066 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
9067 rotate, a shift is sufficient. */
9068 if (shift != 0)
f669df27 9069 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9070 op = (insn >> 20) & 7;
9071 switch (op) {
5e3f878a
PB
9072 case 0: gen_sxth(tmp); break;
9073 case 1: gen_uxth(tmp); break;
9074 case 2: gen_sxtb16(tmp); break;
9075 case 3: gen_uxtb16(tmp); break;
9076 case 4: gen_sxtb(tmp); break;
9077 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
9078 default: goto illegal_op;
9079 }
9080 if (rn != 15) {
5e3f878a 9081 tmp2 = load_reg(s, rn);
9ee6e8bb 9082 if ((op >> 1) == 1) {
5e3f878a 9083 gen_add16(tmp, tmp2);
9ee6e8bb 9084 } else {
5e3f878a 9085 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9086 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9087 }
9088 }
5e3f878a 9089 store_reg(s, rd, tmp);
9ee6e8bb
PB
9090 break;
9091 case 2: /* SIMD add/subtract. */
9092 op = (insn >> 20) & 7;
9093 shift = (insn >> 4) & 7;
9094 if ((op & 3) == 3 || (shift & 3) == 3)
9095 goto illegal_op;
6ddbc6e4
PB
9096 tmp = load_reg(s, rn);
9097 tmp2 = load_reg(s, rm);
9098 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 9099 tcg_temp_free_i32(tmp2);
6ddbc6e4 9100 store_reg(s, rd, tmp);
9ee6e8bb
PB
9101 break;
9102 case 3: /* Other data processing. */
9103 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
9104 if (op < 4) {
9105 /* Saturating add/subtract. */
d9ba4830
PB
9106 tmp = load_reg(s, rn);
9107 tmp2 = load_reg(s, rm);
9ee6e8bb 9108 if (op & 1)
9ef39277 9109 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 9110 if (op & 2)
9ef39277 9111 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 9112 else
9ef39277 9113 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 9114 tcg_temp_free_i32(tmp2);
9ee6e8bb 9115 } else {
d9ba4830 9116 tmp = load_reg(s, rn);
9ee6e8bb
PB
9117 switch (op) {
9118 case 0x0a: /* rbit */
d9ba4830 9119 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
9120 break;
9121 case 0x08: /* rev */
66896cb8 9122 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
9123 break;
9124 case 0x09: /* rev16 */
d9ba4830 9125 gen_rev16(tmp);
9ee6e8bb
PB
9126 break;
9127 case 0x0b: /* revsh */
d9ba4830 9128 gen_revsh(tmp);
9ee6e8bb
PB
9129 break;
9130 case 0x10: /* sel */
d9ba4830 9131 tmp2 = load_reg(s, rm);
7d1b0095 9132 tmp3 = tcg_temp_new_i32();
0ecb72a5 9133 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 9134 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
9135 tcg_temp_free_i32(tmp3);
9136 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9137 break;
9138 case 0x18: /* clz */
d9ba4830 9139 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
9140 break;
9141 default:
9142 goto illegal_op;
9143 }
9144 }
d9ba4830 9145 store_reg(s, rd, tmp);
9ee6e8bb
PB
9146 break;
9147 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
9148 op = (insn >> 4) & 0xf;
d9ba4830
PB
9149 tmp = load_reg(s, rn);
9150 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9151 switch ((insn >> 20) & 7) {
9152 case 0: /* 32 x 32 -> 32 */
d9ba4830 9153 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 9154 tcg_temp_free_i32(tmp2);
9ee6e8bb 9155 if (rs != 15) {
d9ba4830 9156 tmp2 = load_reg(s, rs);
9ee6e8bb 9157 if (op)
d9ba4830 9158 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 9159 else
d9ba4830 9160 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9161 tcg_temp_free_i32(tmp2);
9ee6e8bb 9162 }
9ee6e8bb
PB
9163 break;
9164 case 1: /* 16 x 16 -> 32 */
d9ba4830 9165 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 9166 tcg_temp_free_i32(tmp2);
9ee6e8bb 9167 if (rs != 15) {
d9ba4830 9168 tmp2 = load_reg(s, rs);
9ef39277 9169 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9170 tcg_temp_free_i32(tmp2);
9ee6e8bb 9171 }
9ee6e8bb
PB
9172 break;
9173 case 2: /* Dual multiply add. */
9174 case 4: /* Dual multiply subtract. */
9175 if (op)
d9ba4830
PB
9176 gen_swap_half(tmp2);
9177 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9178 if (insn & (1 << 22)) {
e1d177b9 9179 /* This subtraction cannot overflow. */
d9ba4830 9180 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 9181 } else {
e1d177b9
PM
9182 /* This addition cannot overflow 32 bits;
9183 * however it may overflow considered as a signed
9184 * operation, in which case we must set the Q flag.
9185 */
9ef39277 9186 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 9187 }
7d1b0095 9188 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9189 if (rs != 15)
9190 {
d9ba4830 9191 tmp2 = load_reg(s, rs);
9ef39277 9192 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9193 tcg_temp_free_i32(tmp2);
9ee6e8bb 9194 }
9ee6e8bb
PB
9195 break;
9196 case 3: /* 32 * 16 -> 32msb */
9197 if (op)
d9ba4830 9198 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 9199 else
d9ba4830 9200 gen_sxth(tmp2);
a7812ae4
PB
9201 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9202 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 9203 tmp = tcg_temp_new_i32();
a7812ae4 9204 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 9205 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
9206 if (rs != 15)
9207 {
d9ba4830 9208 tmp2 = load_reg(s, rs);
9ef39277 9209 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9210 tcg_temp_free_i32(tmp2);
9ee6e8bb 9211 }
9ee6e8bb 9212 break;
838fa72d
AJ
9213 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
9214 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 9215 if (rs != 15) {
838fa72d
AJ
9216 tmp = load_reg(s, rs);
9217 if (insn & (1 << 20)) {
9218 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 9219 } else {
838fa72d 9220 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 9221 }
2c0262af 9222 }
838fa72d
AJ
9223 if (insn & (1 << 4)) {
9224 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9225 }
9226 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 9227 tmp = tcg_temp_new_i32();
838fa72d
AJ
9228 tcg_gen_trunc_i64_i32(tmp, tmp64);
9229 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
9230 break;
9231 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 9232 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 9233 tcg_temp_free_i32(tmp2);
9ee6e8bb 9234 if (rs != 15) {
d9ba4830
PB
9235 tmp2 = load_reg(s, rs);
9236 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9237 tcg_temp_free_i32(tmp2);
5fd46862 9238 }
9ee6e8bb 9239 break;
2c0262af 9240 }
d9ba4830 9241 store_reg(s, rd, tmp);
2c0262af 9242 break;
9ee6e8bb
PB
9243 case 6: case 7: /* 64-bit multiply, Divide. */
9244 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
9245 tmp = load_reg(s, rn);
9246 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9247 if ((op & 0x50) == 0x10) {
9248 /* sdiv, udiv */
47789990 9249 if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 9250 goto illegal_op;
47789990 9251 }
9ee6e8bb 9252 if (op & 0x20)
5e3f878a 9253 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 9254 else
5e3f878a 9255 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 9256 tcg_temp_free_i32(tmp2);
5e3f878a 9257 store_reg(s, rd, tmp);
9ee6e8bb
PB
9258 } else if ((op & 0xe) == 0xc) {
9259 /* Dual multiply accumulate long. */
9260 if (op & 1)
5e3f878a
PB
9261 gen_swap_half(tmp2);
9262 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9263 if (op & 0x10) {
5e3f878a 9264 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 9265 } else {
5e3f878a 9266 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 9267 }
7d1b0095 9268 tcg_temp_free_i32(tmp2);
a7812ae4
PB
9269 /* BUGFIX */
9270 tmp64 = tcg_temp_new_i64();
9271 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 9272 tcg_temp_free_i32(tmp);
a7812ae4
PB
9273 gen_addq(s, tmp64, rs, rd);
9274 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 9275 tcg_temp_free_i64(tmp64);
2c0262af 9276 } else {
9ee6e8bb
PB
9277 if (op & 0x20) {
9278 /* Unsigned 64-bit multiply */
a7812ae4 9279 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 9280 } else {
9ee6e8bb
PB
9281 if (op & 8) {
9282 /* smlalxy */
5e3f878a 9283 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 9284 tcg_temp_free_i32(tmp2);
a7812ae4
PB
9285 tmp64 = tcg_temp_new_i64();
9286 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 9287 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9288 } else {
9289 /* Signed 64-bit multiply */
a7812ae4 9290 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 9291 }
b5ff1b31 9292 }
9ee6e8bb
PB
9293 if (op & 4) {
9294 /* umaal */
a7812ae4
PB
9295 gen_addq_lo(s, tmp64, rs);
9296 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
9297 } else if (op & 0x40) {
9298 /* 64-bit accumulate. */
a7812ae4 9299 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 9300 }
a7812ae4 9301 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 9302 tcg_temp_free_i64(tmp64);
5fd46862 9303 }
2c0262af 9304 break;
9ee6e8bb
PB
9305 }
9306 break;
9307 case 6: case 7: case 14: case 15:
9308 /* Coprocessor. */
9309 if (((insn >> 24) & 3) == 3) {
9310 /* Translate into the equivalent ARM encoding. */
f06053e3 9311 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9ee6e8bb
PB
9312 if (disas_neon_data_insn(env, s, insn))
9313 goto illegal_op;
6a57f3eb
WN
9314 } else if (((insn >> 8) & 0xe) == 10) {
9315 if (disas_vfp_insn(env, s, insn)) {
9316 goto illegal_op;
9317 }
9ee6e8bb
PB
9318 } else {
9319 if (insn & (1 << 28))
9320 goto illegal_op;
9321 if (disas_coproc_insn (env, s, insn))
9322 goto illegal_op;
9323 }
9324 break;
9325 case 8: case 9: case 10: case 11:
9326 if (insn & (1 << 15)) {
9327 /* Branches, misc control. */
9328 if (insn & 0x5000) {
9329 /* Unconditional branch. */
9330 /* signextend(hw1[10:0]) -> offset[:12]. */
9331 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
9332 /* hw1[10:0] -> offset[11:1]. */
9333 offset |= (insn & 0x7ff) << 1;
9334 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
9335 offset[24:22] already have the same value because of the
9336 sign extension above. */
9337 offset ^= ((~insn) & (1 << 13)) << 10;
9338 offset ^= ((~insn) & (1 << 11)) << 11;
9339
9ee6e8bb
PB
9340 if (insn & (1 << 14)) {
9341 /* Branch and link. */
3174f8e9 9342 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 9343 }
3b46e624 9344
b0109805 9345 offset += s->pc;
9ee6e8bb
PB
9346 if (insn & (1 << 12)) {
9347 /* b/bl */
b0109805 9348 gen_jmp(s, offset);
9ee6e8bb
PB
9349 } else {
9350 /* blx */
b0109805 9351 offset &= ~(uint32_t)2;
be5e7a76 9352 /* thumb2 bx, no need to check */
b0109805 9353 gen_bx_im(s, offset);
2c0262af 9354 }
9ee6e8bb
PB
9355 } else if (((insn >> 23) & 7) == 7) {
9356 /* Misc control */
9357 if (insn & (1 << 13))
9358 goto illegal_op;
9359
9360 if (insn & (1 << 26)) {
9361 /* Secure monitor call (v6Z) */
e0c270d9
SW
9362 qemu_log_mask(LOG_UNIMP,
9363 "arm: unimplemented secure monitor call\n");
9ee6e8bb 9364 goto illegal_op; /* not implemented. */
2c0262af 9365 } else {
9ee6e8bb
PB
9366 op = (insn >> 20) & 7;
9367 switch (op) {
9368 case 0: /* msr cpsr. */
9369 if (IS_M(env)) {
8984bd2e
PB
9370 tmp = load_reg(s, rn);
9371 addr = tcg_const_i32(insn & 0xff);
9372 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 9373 tcg_temp_free_i32(addr);
7d1b0095 9374 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9375 gen_lookup_tb(s);
9376 break;
9377 }
9378 /* fall through */
9379 case 1: /* msr spsr. */
9380 if (IS_M(env))
9381 goto illegal_op;
2fbac54b
FN
9382 tmp = load_reg(s, rn);
9383 if (gen_set_psr(s,
9ee6e8bb 9384 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 9385 op == 1, tmp))
9ee6e8bb
PB
9386 goto illegal_op;
9387 break;
9388 case 2: /* cps, nop-hint. */
9389 if (((insn >> 8) & 7) == 0) {
9390 gen_nop_hint(s, insn & 0xff);
9391 }
9392 /* Implemented as NOP in user mode. */
9393 if (IS_USER(s))
9394 break;
9395 offset = 0;
9396 imm = 0;
9397 if (insn & (1 << 10)) {
9398 if (insn & (1 << 7))
9399 offset |= CPSR_A;
9400 if (insn & (1 << 6))
9401 offset |= CPSR_I;
9402 if (insn & (1 << 5))
9403 offset |= CPSR_F;
9404 if (insn & (1 << 9))
9405 imm = CPSR_A | CPSR_I | CPSR_F;
9406 }
9407 if (insn & (1 << 8)) {
9408 offset |= 0x1f;
9409 imm |= (insn & 0x1f);
9410 }
9411 if (offset) {
2fbac54b 9412 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
9413 }
9414 break;
9415 case 3: /* Special control operations. */
426f5abc 9416 ARCH(7);
9ee6e8bb
PB
9417 op = (insn >> 4) & 0xf;
9418 switch (op) {
9419 case 2: /* clrex */
426f5abc 9420 gen_clrex(s);
9ee6e8bb
PB
9421 break;
9422 case 4: /* dsb */
9423 case 5: /* dmb */
9424 case 6: /* isb */
9425 /* These execute as NOPs. */
9ee6e8bb
PB
9426 break;
9427 default:
9428 goto illegal_op;
9429 }
9430 break;
9431 case 4: /* bxj */
9432 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
9433 tmp = load_reg(s, rn);
9434 gen_bx(s, tmp);
9ee6e8bb
PB
9435 break;
9436 case 5: /* Exception return. */
b8b45b68
RV
9437 if (IS_USER(s)) {
9438 goto illegal_op;
9439 }
9440 if (rn != 14 || rd != 15) {
9441 goto illegal_op;
9442 }
9443 tmp = load_reg(s, rn);
9444 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
9445 gen_exception_return(s, tmp);
9446 break;
9ee6e8bb 9447 case 6: /* mrs cpsr. */
7d1b0095 9448 tmp = tcg_temp_new_i32();
9ee6e8bb 9449 if (IS_M(env)) {
8984bd2e
PB
9450 addr = tcg_const_i32(insn & 0xff);
9451 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 9452 tcg_temp_free_i32(addr);
9ee6e8bb 9453 } else {
9ef39277 9454 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 9455 }
8984bd2e 9456 store_reg(s, rd, tmp);
9ee6e8bb
PB
9457 break;
9458 case 7: /* mrs spsr. */
9459 /* Not accessible in user mode. */
9460 if (IS_USER(s) || IS_M(env))
9461 goto illegal_op;
d9ba4830
PB
9462 tmp = load_cpu_field(spsr);
9463 store_reg(s, rd, tmp);
9ee6e8bb 9464 break;
2c0262af
FB
9465 }
9466 }
9ee6e8bb
PB
9467 } else {
9468 /* Conditional branch. */
9469 op = (insn >> 22) & 0xf;
9470 /* Generate a conditional jump to next instruction. */
9471 s->condlabel = gen_new_label();
39fb730a 9472 arm_gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
9473 s->condjmp = 1;
9474
9475 /* offset[11:1] = insn[10:0] */
9476 offset = (insn & 0x7ff) << 1;
9477 /* offset[17:12] = insn[21:16]. */
9478 offset |= (insn & 0x003f0000) >> 4;
9479 /* offset[31:20] = insn[26]. */
9480 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
9481 /* offset[18] = insn[13]. */
9482 offset |= (insn & (1 << 13)) << 5;
9483 /* offset[19] = insn[11]. */
9484 offset |= (insn & (1 << 11)) << 8;
9485
9486 /* jump to the offset */
b0109805 9487 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
9488 }
9489 } else {
9490 /* Data processing immediate. */
9491 if (insn & (1 << 25)) {
9492 if (insn & (1 << 24)) {
9493 if (insn & (1 << 20))
9494 goto illegal_op;
9495 /* Bitfield/Saturate. */
9496 op = (insn >> 21) & 7;
9497 imm = insn & 0x1f;
9498 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 9499 if (rn == 15) {
7d1b0095 9500 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
9501 tcg_gen_movi_i32(tmp, 0);
9502 } else {
9503 tmp = load_reg(s, rn);
9504 }
9ee6e8bb
PB
9505 switch (op) {
9506 case 2: /* Signed bitfield extract. */
9507 imm++;
9508 if (shift + imm > 32)
9509 goto illegal_op;
9510 if (imm < 32)
6ddbc6e4 9511 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
9512 break;
9513 case 6: /* Unsigned bitfield extract. */
9514 imm++;
9515 if (shift + imm > 32)
9516 goto illegal_op;
9517 if (imm < 32)
6ddbc6e4 9518 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
9519 break;
9520 case 3: /* Bitfield insert/clear. */
9521 if (imm < shift)
9522 goto illegal_op;
9523 imm = imm + 1 - shift;
9524 if (imm != 32) {
6ddbc6e4 9525 tmp2 = load_reg(s, rd);
d593c48e 9526 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 9527 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9528 }
9529 break;
9530 case 7:
9531 goto illegal_op;
9532 default: /* Saturate. */
9ee6e8bb
PB
9533 if (shift) {
9534 if (op & 1)
6ddbc6e4 9535 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 9536 else
6ddbc6e4 9537 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 9538 }
6ddbc6e4 9539 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
9540 if (op & 4) {
9541 /* Unsigned. */
9ee6e8bb 9542 if ((op & 1) && shift == 0)
9ef39277 9543 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 9544 else
9ef39277 9545 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
2c0262af 9546 } else {
9ee6e8bb 9547 /* Signed. */
9ee6e8bb 9548 if ((op & 1) && shift == 0)
9ef39277 9549 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 9550 else
9ef39277 9551 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
2c0262af 9552 }
b75263d6 9553 tcg_temp_free_i32(tmp2);
9ee6e8bb 9554 break;
2c0262af 9555 }
6ddbc6e4 9556 store_reg(s, rd, tmp);
9ee6e8bb
PB
9557 } else {
9558 imm = ((insn & 0x04000000) >> 15)
9559 | ((insn & 0x7000) >> 4) | (insn & 0xff);
9560 if (insn & (1 << 22)) {
9561 /* 16-bit immediate. */
9562 imm |= (insn >> 4) & 0xf000;
9563 if (insn & (1 << 23)) {
9564 /* movt */
5e3f878a 9565 tmp = load_reg(s, rd);
86831435 9566 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 9567 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 9568 } else {
9ee6e8bb 9569 /* movw */
7d1b0095 9570 tmp = tcg_temp_new_i32();
5e3f878a 9571 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
9572 }
9573 } else {
9ee6e8bb
PB
9574 /* Add/sub 12-bit immediate. */
9575 if (rn == 15) {
b0109805 9576 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 9577 if (insn & (1 << 23))
b0109805 9578 offset -= imm;
9ee6e8bb 9579 else
b0109805 9580 offset += imm;
7d1b0095 9581 tmp = tcg_temp_new_i32();
5e3f878a 9582 tcg_gen_movi_i32(tmp, offset);
2c0262af 9583 } else {
5e3f878a 9584 tmp = load_reg(s, rn);
9ee6e8bb 9585 if (insn & (1 << 23))
5e3f878a 9586 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 9587 else
5e3f878a 9588 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 9589 }
9ee6e8bb 9590 }
5e3f878a 9591 store_reg(s, rd, tmp);
191abaa2 9592 }
9ee6e8bb
PB
9593 } else {
9594 int shifter_out = 0;
9595 /* modified 12-bit immediate. */
9596 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
9597 imm = (insn & 0xff);
9598 switch (shift) {
9599 case 0: /* XY */
9600 /* Nothing to do. */
9601 break;
9602 case 1: /* 00XY00XY */
9603 imm |= imm << 16;
9604 break;
9605 case 2: /* XY00XY00 */
9606 imm |= imm << 16;
9607 imm <<= 8;
9608 break;
9609 case 3: /* XYXYXYXY */
9610 imm |= imm << 16;
9611 imm |= imm << 8;
9612 break;
9613 default: /* Rotated constant. */
9614 shift = (shift << 1) | (imm >> 7);
9615 imm |= 0x80;
9616 imm = imm << (32 - shift);
9617 shifter_out = 1;
9618 break;
b5ff1b31 9619 }
7d1b0095 9620 tmp2 = tcg_temp_new_i32();
3174f8e9 9621 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 9622 rn = (insn >> 16) & 0xf;
3174f8e9 9623 if (rn == 15) {
7d1b0095 9624 tmp = tcg_temp_new_i32();
3174f8e9
FN
9625 tcg_gen_movi_i32(tmp, 0);
9626 } else {
9627 tmp = load_reg(s, rn);
9628 }
9ee6e8bb
PB
9629 op = (insn >> 21) & 0xf;
9630 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 9631 shifter_out, tmp, tmp2))
9ee6e8bb 9632 goto illegal_op;
7d1b0095 9633 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9634 rd = (insn >> 8) & 0xf;
9635 if (rd != 15) {
3174f8e9
FN
9636 store_reg(s, rd, tmp);
9637 } else {
7d1b0095 9638 tcg_temp_free_i32(tmp);
2c0262af 9639 }
2c0262af 9640 }
9ee6e8bb
PB
9641 }
9642 break;
9643 case 12: /* Load/store single data item. */
9644 {
9645 int postinc = 0;
9646 int writeback = 0;
b0109805 9647 int user;
9ee6e8bb
PB
9648 if ((insn & 0x01100000) == 0x01000000) {
9649 if (disas_neon_ls_insn(env, s, insn))
c1713132 9650 goto illegal_op;
9ee6e8bb
PB
9651 break;
9652 }
a2fdc890
PM
9653 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
9654 if (rs == 15) {
9655 if (!(insn & (1 << 20))) {
9656 goto illegal_op;
9657 }
9658 if (op != 2) {
9659 /* Byte or halfword load space with dest == r15 : memory hints.
9660 * Catch them early so we don't emit pointless addressing code.
9661 * This space is a mix of:
9662 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
9663 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
9664 * cores)
9665 * unallocated hints, which must be treated as NOPs
9666 * UNPREDICTABLE space, which we NOP or UNDEF depending on
9667 * which is easiest for the decoding logic
9668 * Some space which must UNDEF
9669 */
9670 int op1 = (insn >> 23) & 3;
9671 int op2 = (insn >> 6) & 0x3f;
9672 if (op & 2) {
9673 goto illegal_op;
9674 }
9675 if (rn == 15) {
02afbf64
PM
9676 /* UNPREDICTABLE, unallocated hint or
9677 * PLD/PLDW/PLI (literal)
9678 */
a2fdc890
PM
9679 return 0;
9680 }
9681 if (op1 & 1) {
02afbf64 9682 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
9683 }
9684 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
02afbf64 9685 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
9686 }
9687 /* UNDEF space, or an UNPREDICTABLE */
9688 return 1;
9689 }
9690 }
b0109805 9691 user = IS_USER(s);
9ee6e8bb 9692 if (rn == 15) {
7d1b0095 9693 addr = tcg_temp_new_i32();
9ee6e8bb
PB
9694 /* PC relative. */
9695 /* s->pc has already been incremented by 4. */
9696 imm = s->pc & 0xfffffffc;
9697 if (insn & (1 << 23))
9698 imm += insn & 0xfff;
9699 else
9700 imm -= insn & 0xfff;
b0109805 9701 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 9702 } else {
b0109805 9703 addr = load_reg(s, rn);
9ee6e8bb
PB
9704 if (insn & (1 << 23)) {
9705 /* Positive offset. */
9706 imm = insn & 0xfff;
b0109805 9707 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 9708 } else {
9ee6e8bb 9709 imm = insn & 0xff;
2a0308c5
PM
9710 switch ((insn >> 8) & 0xf) {
9711 case 0x0: /* Shifted Register. */
9ee6e8bb 9712 shift = (insn >> 4) & 0xf;
2a0308c5
PM
9713 if (shift > 3) {
9714 tcg_temp_free_i32(addr);
18c9b560 9715 goto illegal_op;
2a0308c5 9716 }
b26eefb6 9717 tmp = load_reg(s, rm);
9ee6e8bb 9718 if (shift)
b26eefb6 9719 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 9720 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9721 tcg_temp_free_i32(tmp);
9ee6e8bb 9722 break;
2a0308c5 9723 case 0xc: /* Negative offset. */
b0109805 9724 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 9725 break;
2a0308c5 9726 case 0xe: /* User privilege. */
b0109805
PB
9727 tcg_gen_addi_i32(addr, addr, imm);
9728 user = 1;
9ee6e8bb 9729 break;
2a0308c5 9730 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
9731 imm = -imm;
9732 /* Fall through. */
2a0308c5 9733 case 0xb: /* Post-increment. */
9ee6e8bb
PB
9734 postinc = 1;
9735 writeback = 1;
9736 break;
2a0308c5 9737 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
9738 imm = -imm;
9739 /* Fall through. */
2a0308c5 9740 case 0xf: /* Pre-increment. */
b0109805 9741 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
9742 writeback = 1;
9743 break;
9744 default:
2a0308c5 9745 tcg_temp_free_i32(addr);
b7bcbe95 9746 goto illegal_op;
9ee6e8bb
PB
9747 }
9748 }
9749 }
9ee6e8bb
PB
9750 if (insn & (1 << 20)) {
9751 /* Load. */
5a839c0d 9752 tmp = tcg_temp_new_i32();
a2fdc890 9753 switch (op) {
5a839c0d 9754 case 0:
08307563 9755 gen_aa32_ld8u(tmp, addr, user);
5a839c0d
PM
9756 break;
9757 case 4:
08307563 9758 gen_aa32_ld8s(tmp, addr, user);
5a839c0d
PM
9759 break;
9760 case 1:
08307563 9761 gen_aa32_ld16u(tmp, addr, user);
5a839c0d
PM
9762 break;
9763 case 5:
08307563 9764 gen_aa32_ld16s(tmp, addr, user);
5a839c0d
PM
9765 break;
9766 case 2:
08307563 9767 gen_aa32_ld32u(tmp, addr, user);
5a839c0d 9768 break;
2a0308c5 9769 default:
5a839c0d 9770 tcg_temp_free_i32(tmp);
2a0308c5
PM
9771 tcg_temp_free_i32(addr);
9772 goto illegal_op;
a2fdc890
PM
9773 }
9774 if (rs == 15) {
9775 gen_bx(s, tmp);
9ee6e8bb 9776 } else {
a2fdc890 9777 store_reg(s, rs, tmp);
9ee6e8bb
PB
9778 }
9779 } else {
9780 /* Store. */
b0109805 9781 tmp = load_reg(s, rs);
9ee6e8bb 9782 switch (op) {
5a839c0d 9783 case 0:
08307563 9784 gen_aa32_st8(tmp, addr, user);
5a839c0d
PM
9785 break;
9786 case 1:
08307563 9787 gen_aa32_st16(tmp, addr, user);
5a839c0d
PM
9788 break;
9789 case 2:
08307563 9790 gen_aa32_st32(tmp, addr, user);
5a839c0d 9791 break;
2a0308c5 9792 default:
5a839c0d 9793 tcg_temp_free_i32(tmp);
2a0308c5
PM
9794 tcg_temp_free_i32(addr);
9795 goto illegal_op;
b7bcbe95 9796 }
5a839c0d 9797 tcg_temp_free_i32(tmp);
2c0262af 9798 }
9ee6e8bb 9799 if (postinc)
b0109805
PB
9800 tcg_gen_addi_i32(addr, addr, imm);
9801 if (writeback) {
9802 store_reg(s, rn, addr);
9803 } else {
7d1b0095 9804 tcg_temp_free_i32(addr);
b0109805 9805 }
9ee6e8bb
PB
9806 }
9807 break;
9808 default:
9809 goto illegal_op;
2c0262af 9810 }
9ee6e8bb
PB
9811 return 0;
9812illegal_op:
9813 return 1;
2c0262af
FB
9814}
9815
0ecb72a5 9816static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
99c475ab
FB
9817{
9818 uint32_t val, insn, op, rm, rn, rd, shift, cond;
9819 int32_t offset;
9820 int i;
39d5492a
PM
9821 TCGv_i32 tmp;
9822 TCGv_i32 tmp2;
9823 TCGv_i32 addr;
99c475ab 9824
9ee6e8bb
PB
9825 if (s->condexec_mask) {
9826 cond = s->condexec_cond;
bedd2912
JB
9827 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
9828 s->condlabel = gen_new_label();
39fb730a 9829 arm_gen_test_cc(cond ^ 1, s->condlabel);
bedd2912
JB
9830 s->condjmp = 1;
9831 }
9ee6e8bb
PB
9832 }
9833
d31dd73e 9834 insn = arm_lduw_code(env, s->pc, s->bswap_code);
99c475ab 9835 s->pc += 2;
b5ff1b31 9836
99c475ab
FB
9837 switch (insn >> 12) {
9838 case 0: case 1:
396e467c 9839
99c475ab
FB
9840 rd = insn & 7;
9841 op = (insn >> 11) & 3;
9842 if (op == 3) {
9843 /* add/subtract */
9844 rn = (insn >> 3) & 7;
396e467c 9845 tmp = load_reg(s, rn);
99c475ab
FB
9846 if (insn & (1 << 10)) {
9847 /* immediate */
7d1b0095 9848 tmp2 = tcg_temp_new_i32();
396e467c 9849 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
9850 } else {
9851 /* reg */
9852 rm = (insn >> 6) & 7;
396e467c 9853 tmp2 = load_reg(s, rm);
99c475ab 9854 }
9ee6e8bb
PB
9855 if (insn & (1 << 9)) {
9856 if (s->condexec_mask)
396e467c 9857 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 9858 else
72485ec4 9859 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
9860 } else {
9861 if (s->condexec_mask)
396e467c 9862 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 9863 else
72485ec4 9864 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 9865 }
7d1b0095 9866 tcg_temp_free_i32(tmp2);
396e467c 9867 store_reg(s, rd, tmp);
99c475ab
FB
9868 } else {
9869 /* shift immediate */
9870 rm = (insn >> 3) & 7;
9871 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
9872 tmp = load_reg(s, rm);
9873 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
9874 if (!s->condexec_mask)
9875 gen_logic_CC(tmp);
9876 store_reg(s, rd, tmp);
99c475ab
FB
9877 }
9878 break;
9879 case 2: case 3:
9880 /* arithmetic large immediate */
9881 op = (insn >> 11) & 3;
9882 rd = (insn >> 8) & 0x7;
396e467c 9883 if (op == 0) { /* mov */
7d1b0095 9884 tmp = tcg_temp_new_i32();
396e467c 9885 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 9886 if (!s->condexec_mask)
396e467c
FN
9887 gen_logic_CC(tmp);
9888 store_reg(s, rd, tmp);
9889 } else {
9890 tmp = load_reg(s, rd);
7d1b0095 9891 tmp2 = tcg_temp_new_i32();
396e467c
FN
9892 tcg_gen_movi_i32(tmp2, insn & 0xff);
9893 switch (op) {
9894 case 1: /* cmp */
72485ec4 9895 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
9896 tcg_temp_free_i32(tmp);
9897 tcg_temp_free_i32(tmp2);
396e467c
FN
9898 break;
9899 case 2: /* add */
9900 if (s->condexec_mask)
9901 tcg_gen_add_i32(tmp, tmp, tmp2);
9902 else
72485ec4 9903 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 9904 tcg_temp_free_i32(tmp2);
396e467c
FN
9905 store_reg(s, rd, tmp);
9906 break;
9907 case 3: /* sub */
9908 if (s->condexec_mask)
9909 tcg_gen_sub_i32(tmp, tmp, tmp2);
9910 else
72485ec4 9911 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 9912 tcg_temp_free_i32(tmp2);
396e467c
FN
9913 store_reg(s, rd, tmp);
9914 break;
9915 }
99c475ab 9916 }
99c475ab
FB
9917 break;
9918 case 4:
9919 if (insn & (1 << 11)) {
9920 rd = (insn >> 8) & 7;
5899f386
FB
9921 /* load pc-relative. Bit 1 of PC is ignored. */
9922 val = s->pc + 2 + ((insn & 0xff) * 4);
9923 val &= ~(uint32_t)2;
7d1b0095 9924 addr = tcg_temp_new_i32();
b0109805 9925 tcg_gen_movi_i32(addr, val);
c40c8556 9926 tmp = tcg_temp_new_i32();
08307563 9927 gen_aa32_ld32u(tmp, addr, IS_USER(s));
7d1b0095 9928 tcg_temp_free_i32(addr);
b0109805 9929 store_reg(s, rd, tmp);
99c475ab
FB
9930 break;
9931 }
9932 if (insn & (1 << 10)) {
9933 /* data processing extended or blx */
9934 rd = (insn & 7) | ((insn >> 4) & 8);
9935 rm = (insn >> 3) & 0xf;
9936 op = (insn >> 8) & 3;
9937 switch (op) {
9938 case 0: /* add */
396e467c
FN
9939 tmp = load_reg(s, rd);
9940 tmp2 = load_reg(s, rm);
9941 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9942 tcg_temp_free_i32(tmp2);
396e467c 9943 store_reg(s, rd, tmp);
99c475ab
FB
9944 break;
9945 case 1: /* cmp */
396e467c
FN
9946 tmp = load_reg(s, rd);
9947 tmp2 = load_reg(s, rm);
72485ec4 9948 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
9949 tcg_temp_free_i32(tmp2);
9950 tcg_temp_free_i32(tmp);
99c475ab
FB
9951 break;
9952 case 2: /* mov/cpy */
396e467c
FN
9953 tmp = load_reg(s, rm);
9954 store_reg(s, rd, tmp);
99c475ab
FB
9955 break;
9956 case 3:/* branch [and link] exchange thumb register */
b0109805 9957 tmp = load_reg(s, rm);
99c475ab 9958 if (insn & (1 << 7)) {
be5e7a76 9959 ARCH(5);
99c475ab 9960 val = (uint32_t)s->pc | 1;
7d1b0095 9961 tmp2 = tcg_temp_new_i32();
b0109805
PB
9962 tcg_gen_movi_i32(tmp2, val);
9963 store_reg(s, 14, tmp2);
99c475ab 9964 }
be5e7a76 9965 /* already thumb, no need to check */
d9ba4830 9966 gen_bx(s, tmp);
99c475ab
FB
9967 break;
9968 }
9969 break;
9970 }
9971
9972 /* data processing register */
9973 rd = insn & 7;
9974 rm = (insn >> 3) & 7;
9975 op = (insn >> 6) & 0xf;
9976 if (op == 2 || op == 3 || op == 4 || op == 7) {
9977 /* the shift/rotate ops want the operands backwards */
9978 val = rm;
9979 rm = rd;
9980 rd = val;
9981 val = 1;
9982 } else {
9983 val = 0;
9984 }
9985
396e467c 9986 if (op == 9) { /* neg */
7d1b0095 9987 tmp = tcg_temp_new_i32();
396e467c
FN
9988 tcg_gen_movi_i32(tmp, 0);
9989 } else if (op != 0xf) { /* mvn doesn't read its first operand */
9990 tmp = load_reg(s, rd);
9991 } else {
39d5492a 9992 TCGV_UNUSED_I32(tmp);
396e467c 9993 }
99c475ab 9994
396e467c 9995 tmp2 = load_reg(s, rm);
5899f386 9996 switch (op) {
99c475ab 9997 case 0x0: /* and */
396e467c 9998 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 9999 if (!s->condexec_mask)
396e467c 10000 gen_logic_CC(tmp);
99c475ab
FB
10001 break;
10002 case 0x1: /* eor */
396e467c 10003 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 10004 if (!s->condexec_mask)
396e467c 10005 gen_logic_CC(tmp);
99c475ab
FB
10006 break;
10007 case 0x2: /* lsl */
9ee6e8bb 10008 if (s->condexec_mask) {
365af80e 10009 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 10010 } else {
9ef39277 10011 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10012 gen_logic_CC(tmp2);
9ee6e8bb 10013 }
99c475ab
FB
10014 break;
10015 case 0x3: /* lsr */
9ee6e8bb 10016 if (s->condexec_mask) {
365af80e 10017 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 10018 } else {
9ef39277 10019 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10020 gen_logic_CC(tmp2);
9ee6e8bb 10021 }
99c475ab
FB
10022 break;
10023 case 0x4: /* asr */
9ee6e8bb 10024 if (s->condexec_mask) {
365af80e 10025 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 10026 } else {
9ef39277 10027 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10028 gen_logic_CC(tmp2);
9ee6e8bb 10029 }
99c475ab
FB
10030 break;
10031 case 0x5: /* adc */
49b4c31e 10032 if (s->condexec_mask) {
396e467c 10033 gen_adc(tmp, tmp2);
49b4c31e
RH
10034 } else {
10035 gen_adc_CC(tmp, tmp, tmp2);
10036 }
99c475ab
FB
10037 break;
10038 case 0x6: /* sbc */
2de68a49 10039 if (s->condexec_mask) {
396e467c 10040 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
10041 } else {
10042 gen_sbc_CC(tmp, tmp, tmp2);
10043 }
99c475ab
FB
10044 break;
10045 case 0x7: /* ror */
9ee6e8bb 10046 if (s->condexec_mask) {
f669df27
AJ
10047 tcg_gen_andi_i32(tmp, tmp, 0x1f);
10048 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 10049 } else {
9ef39277 10050 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10051 gen_logic_CC(tmp2);
9ee6e8bb 10052 }
99c475ab
FB
10053 break;
10054 case 0x8: /* tst */
396e467c
FN
10055 tcg_gen_and_i32(tmp, tmp, tmp2);
10056 gen_logic_CC(tmp);
99c475ab 10057 rd = 16;
5899f386 10058 break;
99c475ab 10059 case 0x9: /* neg */
9ee6e8bb 10060 if (s->condexec_mask)
396e467c 10061 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 10062 else
72485ec4 10063 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
10064 break;
10065 case 0xa: /* cmp */
72485ec4 10066 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
10067 rd = 16;
10068 break;
10069 case 0xb: /* cmn */
72485ec4 10070 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
10071 rd = 16;
10072 break;
10073 case 0xc: /* orr */
396e467c 10074 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 10075 if (!s->condexec_mask)
396e467c 10076 gen_logic_CC(tmp);
99c475ab
FB
10077 break;
10078 case 0xd: /* mul */
7b2919a0 10079 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 10080 if (!s->condexec_mask)
396e467c 10081 gen_logic_CC(tmp);
99c475ab
FB
10082 break;
10083 case 0xe: /* bic */
f669df27 10084 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 10085 if (!s->condexec_mask)
396e467c 10086 gen_logic_CC(tmp);
99c475ab
FB
10087 break;
10088 case 0xf: /* mvn */
396e467c 10089 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 10090 if (!s->condexec_mask)
396e467c 10091 gen_logic_CC(tmp2);
99c475ab 10092 val = 1;
5899f386 10093 rm = rd;
99c475ab
FB
10094 break;
10095 }
10096 if (rd != 16) {
396e467c
FN
10097 if (val) {
10098 store_reg(s, rm, tmp2);
10099 if (op != 0xf)
7d1b0095 10100 tcg_temp_free_i32(tmp);
396e467c
FN
10101 } else {
10102 store_reg(s, rd, tmp);
7d1b0095 10103 tcg_temp_free_i32(tmp2);
396e467c
FN
10104 }
10105 } else {
7d1b0095
PM
10106 tcg_temp_free_i32(tmp);
10107 tcg_temp_free_i32(tmp2);
99c475ab
FB
10108 }
10109 break;
10110
10111 case 5:
10112 /* load/store register offset. */
10113 rd = insn & 7;
10114 rn = (insn >> 3) & 7;
10115 rm = (insn >> 6) & 7;
10116 op = (insn >> 9) & 7;
b0109805 10117 addr = load_reg(s, rn);
b26eefb6 10118 tmp = load_reg(s, rm);
b0109805 10119 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 10120 tcg_temp_free_i32(tmp);
99c475ab 10121
c40c8556 10122 if (op < 3) { /* store */
b0109805 10123 tmp = load_reg(s, rd);
c40c8556
PM
10124 } else {
10125 tmp = tcg_temp_new_i32();
10126 }
99c475ab
FB
10127
10128 switch (op) {
10129 case 0: /* str */
08307563 10130 gen_aa32_st32(tmp, addr, IS_USER(s));
99c475ab
FB
10131 break;
10132 case 1: /* strh */
08307563 10133 gen_aa32_st16(tmp, addr, IS_USER(s));
99c475ab
FB
10134 break;
10135 case 2: /* strb */
08307563 10136 gen_aa32_st8(tmp, addr, IS_USER(s));
99c475ab
FB
10137 break;
10138 case 3: /* ldrsb */
08307563 10139 gen_aa32_ld8s(tmp, addr, IS_USER(s));
99c475ab
FB
10140 break;
10141 case 4: /* ldr */
08307563 10142 gen_aa32_ld32u(tmp, addr, IS_USER(s));
99c475ab
FB
10143 break;
10144 case 5: /* ldrh */
08307563 10145 gen_aa32_ld16u(tmp, addr, IS_USER(s));
99c475ab
FB
10146 break;
10147 case 6: /* ldrb */
08307563 10148 gen_aa32_ld8u(tmp, addr, IS_USER(s));
99c475ab
FB
10149 break;
10150 case 7: /* ldrsh */
08307563 10151 gen_aa32_ld16s(tmp, addr, IS_USER(s));
99c475ab
FB
10152 break;
10153 }
c40c8556 10154 if (op >= 3) { /* load */
b0109805 10155 store_reg(s, rd, tmp);
c40c8556
PM
10156 } else {
10157 tcg_temp_free_i32(tmp);
10158 }
7d1b0095 10159 tcg_temp_free_i32(addr);
99c475ab
FB
10160 break;
10161
10162 case 6:
10163 /* load/store word immediate offset */
10164 rd = insn & 7;
10165 rn = (insn >> 3) & 7;
b0109805 10166 addr = load_reg(s, rn);
99c475ab 10167 val = (insn >> 4) & 0x7c;
b0109805 10168 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10169
10170 if (insn & (1 << 11)) {
10171 /* load */
c40c8556 10172 tmp = tcg_temp_new_i32();
08307563 10173 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805 10174 store_reg(s, rd, tmp);
99c475ab
FB
10175 } else {
10176 /* store */
b0109805 10177 tmp = load_reg(s, rd);
08307563 10178 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 10179 tcg_temp_free_i32(tmp);
99c475ab 10180 }
7d1b0095 10181 tcg_temp_free_i32(addr);
99c475ab
FB
10182 break;
10183
10184 case 7:
10185 /* load/store byte immediate offset */
10186 rd = insn & 7;
10187 rn = (insn >> 3) & 7;
b0109805 10188 addr = load_reg(s, rn);
99c475ab 10189 val = (insn >> 6) & 0x1f;
b0109805 10190 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10191
10192 if (insn & (1 << 11)) {
10193 /* load */
c40c8556 10194 tmp = tcg_temp_new_i32();
08307563 10195 gen_aa32_ld8u(tmp, addr, IS_USER(s));
b0109805 10196 store_reg(s, rd, tmp);
99c475ab
FB
10197 } else {
10198 /* store */
b0109805 10199 tmp = load_reg(s, rd);
08307563 10200 gen_aa32_st8(tmp, addr, IS_USER(s));
c40c8556 10201 tcg_temp_free_i32(tmp);
99c475ab 10202 }
7d1b0095 10203 tcg_temp_free_i32(addr);
99c475ab
FB
10204 break;
10205
10206 case 8:
10207 /* load/store halfword immediate offset */
10208 rd = insn & 7;
10209 rn = (insn >> 3) & 7;
b0109805 10210 addr = load_reg(s, rn);
99c475ab 10211 val = (insn >> 5) & 0x3e;
b0109805 10212 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10213
10214 if (insn & (1 << 11)) {
10215 /* load */
c40c8556 10216 tmp = tcg_temp_new_i32();
08307563 10217 gen_aa32_ld16u(tmp, addr, IS_USER(s));
b0109805 10218 store_reg(s, rd, tmp);
99c475ab
FB
10219 } else {
10220 /* store */
b0109805 10221 tmp = load_reg(s, rd);
08307563 10222 gen_aa32_st16(tmp, addr, IS_USER(s));
c40c8556 10223 tcg_temp_free_i32(tmp);
99c475ab 10224 }
7d1b0095 10225 tcg_temp_free_i32(addr);
99c475ab
FB
10226 break;
10227
10228 case 9:
10229 /* load/store from stack */
10230 rd = (insn >> 8) & 7;
b0109805 10231 addr = load_reg(s, 13);
99c475ab 10232 val = (insn & 0xff) * 4;
b0109805 10233 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10234
10235 if (insn & (1 << 11)) {
10236 /* load */
c40c8556 10237 tmp = tcg_temp_new_i32();
08307563 10238 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805 10239 store_reg(s, rd, tmp);
99c475ab
FB
10240 } else {
10241 /* store */
b0109805 10242 tmp = load_reg(s, rd);
08307563 10243 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 10244 tcg_temp_free_i32(tmp);
99c475ab 10245 }
7d1b0095 10246 tcg_temp_free_i32(addr);
99c475ab
FB
10247 break;
10248
10249 case 10:
10250 /* add to high reg */
10251 rd = (insn >> 8) & 7;
5899f386
FB
10252 if (insn & (1 << 11)) {
10253 /* SP */
5e3f878a 10254 tmp = load_reg(s, 13);
5899f386
FB
10255 } else {
10256 /* PC. bit 1 is ignored. */
7d1b0095 10257 tmp = tcg_temp_new_i32();
5e3f878a 10258 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 10259 }
99c475ab 10260 val = (insn & 0xff) * 4;
5e3f878a
PB
10261 tcg_gen_addi_i32(tmp, tmp, val);
10262 store_reg(s, rd, tmp);
99c475ab
FB
10263 break;
10264
10265 case 11:
10266 /* misc */
10267 op = (insn >> 8) & 0xf;
10268 switch (op) {
10269 case 0:
10270 /* adjust stack pointer */
b26eefb6 10271 tmp = load_reg(s, 13);
99c475ab
FB
10272 val = (insn & 0x7f) * 4;
10273 if (insn & (1 << 7))
6a0d8a1d 10274 val = -(int32_t)val;
b26eefb6
PB
10275 tcg_gen_addi_i32(tmp, tmp, val);
10276 store_reg(s, 13, tmp);
99c475ab
FB
10277 break;
10278
9ee6e8bb
PB
10279 case 2: /* sign/zero extend. */
10280 ARCH(6);
10281 rd = insn & 7;
10282 rm = (insn >> 3) & 7;
b0109805 10283 tmp = load_reg(s, rm);
9ee6e8bb 10284 switch ((insn >> 6) & 3) {
b0109805
PB
10285 case 0: gen_sxth(tmp); break;
10286 case 1: gen_sxtb(tmp); break;
10287 case 2: gen_uxth(tmp); break;
10288 case 3: gen_uxtb(tmp); break;
9ee6e8bb 10289 }
b0109805 10290 store_reg(s, rd, tmp);
9ee6e8bb 10291 break;
99c475ab
FB
10292 case 4: case 5: case 0xc: case 0xd:
10293 /* push/pop */
b0109805 10294 addr = load_reg(s, 13);
5899f386
FB
10295 if (insn & (1 << 8))
10296 offset = 4;
99c475ab 10297 else
5899f386
FB
10298 offset = 0;
10299 for (i = 0; i < 8; i++) {
10300 if (insn & (1 << i))
10301 offset += 4;
10302 }
10303 if ((insn & (1 << 11)) == 0) {
b0109805 10304 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 10305 }
99c475ab
FB
10306 for (i = 0; i < 8; i++) {
10307 if (insn & (1 << i)) {
10308 if (insn & (1 << 11)) {
10309 /* pop */
c40c8556 10310 tmp = tcg_temp_new_i32();
08307563 10311 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805 10312 store_reg(s, i, tmp);
99c475ab
FB
10313 } else {
10314 /* push */
b0109805 10315 tmp = load_reg(s, i);
08307563 10316 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 10317 tcg_temp_free_i32(tmp);
99c475ab 10318 }
5899f386 10319 /* advance to the next address. */
b0109805 10320 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
10321 }
10322 }
39d5492a 10323 TCGV_UNUSED_I32(tmp);
99c475ab
FB
10324 if (insn & (1 << 8)) {
10325 if (insn & (1 << 11)) {
10326 /* pop pc */
c40c8556 10327 tmp = tcg_temp_new_i32();
08307563 10328 gen_aa32_ld32u(tmp, addr, IS_USER(s));
99c475ab
FB
10329 /* don't set the pc until the rest of the instruction
10330 has completed */
10331 } else {
10332 /* push lr */
b0109805 10333 tmp = load_reg(s, 14);
08307563 10334 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 10335 tcg_temp_free_i32(tmp);
99c475ab 10336 }
b0109805 10337 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 10338 }
5899f386 10339 if ((insn & (1 << 11)) == 0) {
b0109805 10340 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 10341 }
99c475ab 10342 /* write back the new stack pointer */
b0109805 10343 store_reg(s, 13, addr);
99c475ab 10344 /* set the new PC value */
be5e7a76
DES
10345 if ((insn & 0x0900) == 0x0900) {
10346 store_reg_from_load(env, s, 15, tmp);
10347 }
99c475ab
FB
10348 break;
10349
9ee6e8bb
PB
10350 case 1: case 3: case 9: case 11: /* czb */
10351 rm = insn & 7;
d9ba4830 10352 tmp = load_reg(s, rm);
9ee6e8bb
PB
10353 s->condlabel = gen_new_label();
10354 s->condjmp = 1;
10355 if (insn & (1 << 11))
cb63669a 10356 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 10357 else
cb63669a 10358 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 10359 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10360 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
10361 val = (uint32_t)s->pc + 2;
10362 val += offset;
10363 gen_jmp(s, val);
10364 break;
10365
10366 case 15: /* IT, nop-hint. */
10367 if ((insn & 0xf) == 0) {
10368 gen_nop_hint(s, (insn >> 4) & 0xf);
10369 break;
10370 }
10371 /* If Then. */
10372 s->condexec_cond = (insn >> 4) & 0xe;
10373 s->condexec_mask = insn & 0x1f;
10374 /* No actual code generated for this insn, just setup state. */
10375 break;
10376
06c949e6 10377 case 0xe: /* bkpt */
be5e7a76 10378 ARCH(5);
bc4a0de0 10379 gen_exception_insn(s, 2, EXCP_BKPT);
06c949e6
PB
10380 break;
10381
9ee6e8bb
PB
10382 case 0xa: /* rev */
10383 ARCH(6);
10384 rn = (insn >> 3) & 0x7;
10385 rd = insn & 0x7;
b0109805 10386 tmp = load_reg(s, rn);
9ee6e8bb 10387 switch ((insn >> 6) & 3) {
66896cb8 10388 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
10389 case 1: gen_rev16(tmp); break;
10390 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
10391 default: goto illegal_op;
10392 }
b0109805 10393 store_reg(s, rd, tmp);
9ee6e8bb
PB
10394 break;
10395
d9e028c1
PM
10396 case 6:
10397 switch ((insn >> 5) & 7) {
10398 case 2:
10399 /* setend */
10400 ARCH(6);
10962fd5
PM
10401 if (((insn >> 3) & 1) != s->bswap_code) {
10402 /* Dynamic endianness switching not implemented. */
e0c270d9 10403 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
d9e028c1
PM
10404 goto illegal_op;
10405 }
9ee6e8bb 10406 break;
d9e028c1
PM
10407 case 3:
10408 /* cps */
10409 ARCH(6);
10410 if (IS_USER(s)) {
10411 break;
8984bd2e 10412 }
d9e028c1
PM
10413 if (IS_M(env)) {
10414 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
10415 /* FAULTMASK */
10416 if (insn & 1) {
10417 addr = tcg_const_i32(19);
10418 gen_helper_v7m_msr(cpu_env, addr, tmp);
10419 tcg_temp_free_i32(addr);
10420 }
10421 /* PRIMASK */
10422 if (insn & 2) {
10423 addr = tcg_const_i32(16);
10424 gen_helper_v7m_msr(cpu_env, addr, tmp);
10425 tcg_temp_free_i32(addr);
10426 }
10427 tcg_temp_free_i32(tmp);
10428 gen_lookup_tb(s);
10429 } else {
10430 if (insn & (1 << 4)) {
10431 shift = CPSR_A | CPSR_I | CPSR_F;
10432 } else {
10433 shift = 0;
10434 }
10435 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 10436 }
d9e028c1
PM
10437 break;
10438 default:
10439 goto undef;
9ee6e8bb
PB
10440 }
10441 break;
10442
99c475ab
FB
10443 default:
10444 goto undef;
10445 }
10446 break;
10447
10448 case 12:
a7d3970d 10449 {
99c475ab 10450 /* load/store multiple */
39d5492a
PM
10451 TCGv_i32 loaded_var;
10452 TCGV_UNUSED_I32(loaded_var);
99c475ab 10453 rn = (insn >> 8) & 0x7;
b0109805 10454 addr = load_reg(s, rn);
99c475ab
FB
10455 for (i = 0; i < 8; i++) {
10456 if (insn & (1 << i)) {
99c475ab
FB
10457 if (insn & (1 << 11)) {
10458 /* load */
c40c8556 10459 tmp = tcg_temp_new_i32();
08307563 10460 gen_aa32_ld32u(tmp, addr, IS_USER(s));
a7d3970d
PM
10461 if (i == rn) {
10462 loaded_var = tmp;
10463 } else {
10464 store_reg(s, i, tmp);
10465 }
99c475ab
FB
10466 } else {
10467 /* store */
b0109805 10468 tmp = load_reg(s, i);
08307563 10469 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 10470 tcg_temp_free_i32(tmp);
99c475ab 10471 }
5899f386 10472 /* advance to the next address */
b0109805 10473 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
10474 }
10475 }
b0109805 10476 if ((insn & (1 << rn)) == 0) {
a7d3970d 10477 /* base reg not in list: base register writeback */
b0109805
PB
10478 store_reg(s, rn, addr);
10479 } else {
a7d3970d
PM
10480 /* base reg in list: if load, complete it now */
10481 if (insn & (1 << 11)) {
10482 store_reg(s, rn, loaded_var);
10483 }
7d1b0095 10484 tcg_temp_free_i32(addr);
b0109805 10485 }
99c475ab 10486 break;
a7d3970d 10487 }
99c475ab
FB
10488 case 13:
10489 /* conditional branch or swi */
10490 cond = (insn >> 8) & 0xf;
10491 if (cond == 0xe)
10492 goto undef;
10493
10494 if (cond == 0xf) {
10495 /* swi */
eaed129d 10496 gen_set_pc_im(s, s->pc);
9ee6e8bb 10497 s->is_jmp = DISAS_SWI;
99c475ab
FB
10498 break;
10499 }
10500 /* generate a conditional jump to next instruction */
e50e6a20 10501 s->condlabel = gen_new_label();
39fb730a 10502 arm_gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 10503 s->condjmp = 1;
99c475ab
FB
10504
10505 /* jump to the offset */
5899f386 10506 val = (uint32_t)s->pc + 2;
99c475ab 10507 offset = ((int32_t)insn << 24) >> 24;
5899f386 10508 val += offset << 1;
8aaca4c0 10509 gen_jmp(s, val);
99c475ab
FB
10510 break;
10511
10512 case 14:
358bf29e 10513 if (insn & (1 << 11)) {
9ee6e8bb
PB
10514 if (disas_thumb2_insn(env, s, insn))
10515 goto undef32;
358bf29e
PB
10516 break;
10517 }
9ee6e8bb 10518 /* unconditional branch */
99c475ab
FB
10519 val = (uint32_t)s->pc;
10520 offset = ((int32_t)insn << 21) >> 21;
10521 val += (offset << 1) + 2;
8aaca4c0 10522 gen_jmp(s, val);
99c475ab
FB
10523 break;
10524
10525 case 15:
9ee6e8bb 10526 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 10527 goto undef32;
9ee6e8bb 10528 break;
99c475ab
FB
10529 }
10530 return;
9ee6e8bb 10531undef32:
bc4a0de0 10532 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
10533 return;
10534illegal_op:
99c475ab 10535undef:
bc4a0de0 10536 gen_exception_insn(s, 2, EXCP_UDEF);
99c475ab
FB
10537}
10538
2c0262af
FB
10539/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
10540 basic block 'tb'. If search_pc is TRUE, also generate PC
10541 information for each intermediate instruction. */
5639c3f2 10542static inline void gen_intermediate_code_internal(ARMCPU *cpu,
2cfc5f17 10543 TranslationBlock *tb,
5639c3f2 10544 bool search_pc)
2c0262af 10545{
ed2803da 10546 CPUState *cs = CPU(cpu);
5639c3f2 10547 CPUARMState *env = &cpu->env;
2c0262af 10548 DisasContext dc1, *dc = &dc1;
a1d1bb31 10549 CPUBreakpoint *bp;
2c0262af
FB
10550 uint16_t *gen_opc_end;
10551 int j, lj;
0fa85d43 10552 target_ulong pc_start;
0a2461fa 10553 target_ulong next_page_start;
2e70f6ef
PB
10554 int num_insns;
10555 int max_insns;
3b46e624 10556
2c0262af 10557 /* generate intermediate code */
40f860cd
PM
10558
10559 /* The A64 decoder has its own top level loop, because it doesn't need
10560 * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
10561 */
10562 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
10563 gen_intermediate_code_internal_a64(cpu, tb, search_pc);
10564 return;
10565 }
10566
0fa85d43 10567 pc_start = tb->pc;
3b46e624 10568
2c0262af
FB
10569 dc->tb = tb;
10570
92414b31 10571 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
10572
10573 dc->is_jmp = DISAS_NEXT;
10574 dc->pc = pc_start;
ed2803da 10575 dc->singlestep_enabled = cs->singlestep_enabled;
e50e6a20 10576 dc->condjmp = 0;
3926cc84 10577
40f860cd
PM
10578 dc->aarch64 = 0;
10579 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
10580 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
10581 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
10582 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
3926cc84 10583#if !defined(CONFIG_USER_ONLY)
40f860cd 10584 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
3926cc84 10585#endif
40f860cd
PM
10586 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
10587 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
10588 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
60322b39
PM
10589 dc->cp_regs = cpu->cp_regs;
10590 dc->current_pl = arm_current_pl(env);
40f860cd 10591
a7812ae4
PB
10592 cpu_F0s = tcg_temp_new_i32();
10593 cpu_F1s = tcg_temp_new_i32();
10594 cpu_F0d = tcg_temp_new_i64();
10595 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
10596 cpu_V0 = cpu_F0d;
10597 cpu_V1 = cpu_F1d;
e677137d 10598 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 10599 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 10600 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 10601 lj = -1;
2e70f6ef
PB
10602 num_insns = 0;
10603 max_insns = tb->cflags & CF_COUNT_MASK;
10604 if (max_insns == 0)
10605 max_insns = CF_COUNT_MASK;
10606
806f352d 10607 gen_tb_start();
e12ce78d 10608
3849902c
PM
10609 tcg_clear_temp_count();
10610
e12ce78d
PM
10611 /* A note on handling of the condexec (IT) bits:
10612 *
10613 * We want to avoid the overhead of having to write the updated condexec
0ecb72a5 10614 * bits back to the CPUARMState for every instruction in an IT block. So:
e12ce78d 10615 * (1) if the condexec bits are not already zero then we write
0ecb72a5 10616 * zero back into the CPUARMState now. This avoids complications trying
e12ce78d
PM
10617 * to do it at the end of the block. (For example if we don't do this
10618 * it's hard to identify whether we can safely skip writing condexec
10619 * at the end of the TB, which we definitely want to do for the case
10620 * where a TB doesn't do anything with the IT state at all.)
10621 * (2) if we are going to leave the TB then we call gen_set_condexec()
0ecb72a5 10622 * which will write the correct value into CPUARMState if zero is wrong.
e12ce78d
PM
10623 * This is done both for leaving the TB at the end, and for leaving
10624 * it because of an exception we know will happen, which is done in
10625 * gen_exception_insn(). The latter is necessary because we need to
10626 * leave the TB with the PC/IT state just prior to execution of the
10627 * instruction which caused the exception.
10628 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
0ecb72a5 10629 * then the CPUARMState will be wrong and we need to reset it.
e12ce78d
PM
10630 * This is handled in the same way as restoration of the
10631 * PC in these situations: we will be called again with search_pc=1
10632 * and generate a mapping of the condexec bits for each PC in
e87b7cb0
SW
10633 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
10634 * this to restore the condexec bits.
e12ce78d
PM
10635 *
10636 * Note that there are no instructions which can read the condexec
10637 * bits, and none which can write non-static values to them, so
0ecb72a5 10638 * we don't need to care about whether CPUARMState is correct in the
e12ce78d
PM
10639 * middle of a TB.
10640 */
10641
9ee6e8bb
PB
10642 /* Reset the conditional execution bits immediately. This avoids
10643 complications trying to do it at the end of the block. */
98eac7ca 10644 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 10645 {
39d5492a 10646 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e 10647 tcg_gen_movi_i32(tmp, 0);
d9ba4830 10648 store_cpu_field(tmp, condexec_bits);
8f01245e 10649 }
2c0262af 10650 do {
fbb4a2e3
PB
10651#ifdef CONFIG_USER_ONLY
10652 /* Intercept jump to the magic kernel page. */
40f860cd 10653 if (dc->pc >= 0xffff0000) {
fbb4a2e3
PB
10654 /* We always get here via a jump, so know we are not in a
10655 conditional execution block. */
10656 gen_exception(EXCP_KERNEL_TRAP);
10657 dc->is_jmp = DISAS_UPDATE;
10658 break;
10659 }
10660#else
9ee6e8bb
PB
10661 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
10662 /* We always get here via a jump, so know we are not in a
10663 conditional execution block. */
d9ba4830 10664 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
10665 dc->is_jmp = DISAS_UPDATE;
10666 break;
9ee6e8bb
PB
10667 }
10668#endif
10669
72cf2d4f
BS
10670 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
10671 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 10672 if (bp->pc == dc->pc) {
bc4a0de0 10673 gen_exception_insn(dc, 0, EXCP_DEBUG);
9ee6e8bb
PB
10674 /* Advance PC so that clearing the breakpoint will
10675 invalidate this TB. */
10676 dc->pc += 2;
10677 goto done_generating;
1fddef4b
FB
10678 }
10679 }
10680 }
2c0262af 10681 if (search_pc) {
92414b31 10682 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2c0262af
FB
10683 if (lj < j) {
10684 lj++;
10685 while (lj < j)
ab1103de 10686 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2c0262af 10687 }
25983cad 10688 tcg_ctx.gen_opc_pc[lj] = dc->pc;
e12ce78d 10689 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
ab1103de 10690 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 10691 tcg_ctx.gen_opc_icount[lj] = num_insns;
2c0262af 10692 }
e50e6a20 10693
2e70f6ef
PB
10694 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
10695 gen_io_start();
10696
fdefe51c 10697 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
5642463a
PM
10698 tcg_gen_debug_insn_start(dc->pc);
10699 }
10700
40f860cd 10701 if (dc->thumb) {
9ee6e8bb
PB
10702 disas_thumb_insn(env, dc);
10703 if (dc->condexec_mask) {
10704 dc->condexec_cond = (dc->condexec_cond & 0xe)
10705 | ((dc->condexec_mask >> 4) & 1);
10706 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
10707 if (dc->condexec_mask == 0) {
10708 dc->condexec_cond = 0;
10709 }
10710 }
10711 } else {
10712 disas_arm_insn(env, dc);
10713 }
e50e6a20
FB
10714
10715 if (dc->condjmp && !dc->is_jmp) {
10716 gen_set_label(dc->condlabel);
10717 dc->condjmp = 0;
10718 }
3849902c
PM
10719
10720 if (tcg_check_temp_count()) {
0a2461fa
AG
10721 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
10722 dc->pc);
3849902c
PM
10723 }
10724
aaf2d97d 10725 /* Translation stops when a conditional branch is encountered.
e50e6a20 10726 * Otherwise the subsequent code could get translated several times.
b5ff1b31 10727 * Also stop translation when a page boundary is reached. This
bf20dc07 10728 * ensures prefetch aborts occur at the right place. */
2e70f6ef 10729 num_insns ++;
efd7f486 10730 } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
ed2803da 10731 !cs->singlestep_enabled &&
1b530a6d 10732 !singlestep &&
2e70f6ef
PB
10733 dc->pc < next_page_start &&
10734 num_insns < max_insns);
10735
10736 if (tb->cflags & CF_LAST_IO) {
10737 if (dc->condjmp) {
10738 /* FIXME: This can theoretically happen with self-modifying
10739 code. */
10740 cpu_abort(env, "IO on conditional branch instruction");
10741 }
10742 gen_io_end();
10743 }
9ee6e8bb 10744
b5ff1b31 10745 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
10746 instruction was a conditional branch or trap, and the PC has
10747 already been written. */
ed2803da 10748 if (unlikely(cs->singlestep_enabled)) {
8aaca4c0 10749 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 10750 if (dc->condjmp) {
9ee6e8bb
PB
10751 gen_set_condexec(dc);
10752 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 10753 gen_exception(EXCP_SWI);
9ee6e8bb 10754 } else {
d9ba4830 10755 gen_exception(EXCP_DEBUG);
9ee6e8bb 10756 }
e50e6a20
FB
10757 gen_set_label(dc->condlabel);
10758 }
10759 if (dc->condjmp || !dc->is_jmp) {
eaed129d 10760 gen_set_pc_im(dc, dc->pc);
e50e6a20 10761 dc->condjmp = 0;
8aaca4c0 10762 }
9ee6e8bb
PB
10763 gen_set_condexec(dc);
10764 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 10765 gen_exception(EXCP_SWI);
9ee6e8bb
PB
10766 } else {
10767 /* FIXME: Single stepping a WFI insn will not halt
10768 the CPU. */
d9ba4830 10769 gen_exception(EXCP_DEBUG);
9ee6e8bb 10770 }
8aaca4c0 10771 } else {
9ee6e8bb
PB
10772 /* While branches must always occur at the end of an IT block,
10773 there are a few other things that can cause us to terminate
65626741 10774 the TB in the middle of an IT block:
9ee6e8bb
PB
10775 - Exception generating instructions (bkpt, swi, undefined).
10776 - Page boundaries.
10777 - Hardware watchpoints.
10778 Hardware breakpoints have already been handled and skip this code.
10779 */
10780 gen_set_condexec(dc);
8aaca4c0 10781 switch(dc->is_jmp) {
8aaca4c0 10782 case DISAS_NEXT:
6e256c93 10783 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
10784 break;
10785 default:
10786 case DISAS_JUMP:
10787 case DISAS_UPDATE:
10788 /* indicate that the hash table must be used to find the next TB */
57fec1fe 10789 tcg_gen_exit_tb(0);
8aaca4c0
FB
10790 break;
10791 case DISAS_TB_JUMP:
10792 /* nothing more to generate */
10793 break;
9ee6e8bb 10794 case DISAS_WFI:
1ce94f81 10795 gen_helper_wfi(cpu_env);
9ee6e8bb
PB
10796 break;
10797 case DISAS_SWI:
d9ba4830 10798 gen_exception(EXCP_SWI);
9ee6e8bb 10799 break;
8aaca4c0 10800 }
e50e6a20
FB
10801 if (dc->condjmp) {
10802 gen_set_label(dc->condlabel);
9ee6e8bb 10803 gen_set_condexec(dc);
6e256c93 10804 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
10805 dc->condjmp = 0;
10806 }
2c0262af 10807 }
2e70f6ef 10808
9ee6e8bb 10809done_generating:
806f352d 10810 gen_tb_end(tb, num_insns);
efd7f486 10811 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
2c0262af
FB
10812
10813#ifdef DEBUG_DISAS
8fec2b8c 10814 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
10815 qemu_log("----------------\n");
10816 qemu_log("IN: %s\n", lookup_symbol(pc_start));
f4359b9f 10817 log_target_disas(env, pc_start, dc->pc - pc_start,
d8fd2954 10818 dc->thumb | (dc->bswap_code << 1));
93fcfe39 10819 qemu_log("\n");
2c0262af
FB
10820 }
10821#endif
b5ff1b31 10822 if (search_pc) {
92414b31 10823 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
b5ff1b31
FB
10824 lj++;
10825 while (lj <= j)
ab1103de 10826 tcg_ctx.gen_opc_instr_start[lj++] = 0;
b5ff1b31 10827 } else {
2c0262af 10828 tb->size = dc->pc - pc_start;
2e70f6ef 10829 tb->icount = num_insns;
b5ff1b31 10830 }
2c0262af
FB
10831}
10832
0ecb72a5 10833void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
2c0262af 10834{
5639c3f2 10835 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, false);
2c0262af
FB
10836}
10837
0ecb72a5 10838void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
2c0262af 10839{
5639c3f2 10840 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, true);
2c0262af
FB
10841}
10842
b5ff1b31
FB
10843static const char *cpu_mode_names[16] = {
10844 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
10845 "???", "???", "???", "und", "???", "???", "???", "sys"
10846};
9ee6e8bb 10847
878096ee
AF
10848void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
10849 int flags)
2c0262af 10850{
878096ee
AF
10851 ARMCPU *cpu = ARM_CPU(cs);
10852 CPUARMState *env = &cpu->env;
2c0262af 10853 int i;
b5ff1b31 10854 uint32_t psr;
2c0262af
FB
10855
10856 for(i=0;i<16;i++) {
7fe48483 10857 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 10858 if ((i % 4) == 3)
7fe48483 10859 cpu_fprintf(f, "\n");
2c0262af 10860 else
7fe48483 10861 cpu_fprintf(f, " ");
2c0262af 10862 }
b5ff1b31 10863 psr = cpsr_read(env);
687fa640
TS
10864 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
10865 psr,
b5ff1b31
FB
10866 psr & (1 << 31) ? 'N' : '-',
10867 psr & (1 << 30) ? 'Z' : '-',
10868 psr & (1 << 29) ? 'C' : '-',
10869 psr & (1 << 28) ? 'V' : '-',
5fafdf24 10870 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 10871 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 10872
f2617cfc
PM
10873 if (flags & CPU_DUMP_FPU) {
10874 int numvfpregs = 0;
10875 if (arm_feature(env, ARM_FEATURE_VFP)) {
10876 numvfpregs += 16;
10877 }
10878 if (arm_feature(env, ARM_FEATURE_VFP3)) {
10879 numvfpregs += 16;
10880 }
10881 for (i = 0; i < numvfpregs; i++) {
10882 uint64_t v = float64_val(env->vfp.regs[i]);
10883 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
10884 i * 2, (uint32_t)v,
10885 i * 2 + 1, (uint32_t)(v >> 32),
10886 i, v);
10887 }
10888 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 10889 }
2c0262af 10890}
a6b025d3 10891
0ecb72a5 10892void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
d2856f1a 10893{
3926cc84
AG
10894 if (is_a64(env)) {
10895 env->pc = tcg_ctx.gen_opc_pc[pc_pos];
40f860cd 10896 env->condexec_bits = 0;
3926cc84
AG
10897 } else {
10898 env->regs[15] = tcg_ctx.gen_opc_pc[pc_pos];
40f860cd 10899 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
3926cc84 10900 }
d2856f1a 10901}