]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/translate.c
target-arm: Implement ARMv8 single-step handling for A64 code
[mirror_qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
ccd38087 28#include "internals.h"
76cad711 29#include "disas/disas.h"
57fec1fe 30#include "tcg-op.h"
1de7afc9 31#include "qemu/log.h"
534df156 32#include "qemu/bitops.h"
1d854765 33#include "arm_ldst.h"
1497c961 34
2ef6175a
RH
35#include "exec/helper-proto.h"
36#include "exec/helper-gen.h"
2c0262af 37
a7e30d84
LV
38#include "trace-tcg.h"
39
40
be5e7a76
DES
41#define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
42#define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
43/* currently all emulated v5 cores are also v5TE, so don't bother */
44#define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
9ee6e8bb
PB
45#define ENABLE_ARCH_5J 0
46#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
47#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
48#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
49#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
81e69fb0 50#define ENABLE_ARCH_8 arm_feature(env, ARM_FEATURE_V8)
b5ff1b31 51
86753403 52#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 53
f570c61e 54#include "translate.h"
e12ce78d
PM
55static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
56
b5ff1b31
FB
57#if defined(CONFIG_USER_ONLY)
58#define IS_USER(s) 1
59#else
60#define IS_USER(s) (s->user)
61#endif
62
3407ad0e 63TCGv_ptr cpu_env;
ad69471c 64/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 65static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 66static TCGv_i32 cpu_R[16];
66c374de 67static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
03d05e2d
PM
68static TCGv_i64 cpu_exclusive_addr;
69static TCGv_i64 cpu_exclusive_val;
426f5abc 70#ifdef CONFIG_USER_ONLY
03d05e2d 71static TCGv_i64 cpu_exclusive_test;
426f5abc
PB
72static TCGv_i32 cpu_exclusive_info;
73#endif
ad69471c 74
b26eefb6 75/* FIXME: These should be removed. */
39d5492a 76static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 77static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 78
022c62cb 79#include "exec/gen-icount.h"
2e70f6ef 80
155c3eac
FN
81static const char *regnames[] =
82 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
83 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
84
b26eefb6
PB
85/* initialize TCG globals. */
86void arm_translate_init(void)
87{
155c3eac
FN
88 int i;
89
a7812ae4
PB
90 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
91
155c3eac
FN
92 for (i = 0; i < 16; i++) {
93 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 94 offsetof(CPUARMState, regs[i]),
155c3eac
FN
95 regnames[i]);
96 }
66c374de
AJ
97 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
98 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
99 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
100 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
101
03d05e2d 102 cpu_exclusive_addr = tcg_global_mem_new_i64(TCG_AREG0,
0ecb72a5 103 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
03d05e2d 104 cpu_exclusive_val = tcg_global_mem_new_i64(TCG_AREG0,
0ecb72a5 105 offsetof(CPUARMState, exclusive_val), "exclusive_val");
426f5abc 106#ifdef CONFIG_USER_ONLY
03d05e2d 107 cpu_exclusive_test = tcg_global_mem_new_i64(TCG_AREG0,
0ecb72a5 108 offsetof(CPUARMState, exclusive_test), "exclusive_test");
426f5abc 109 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 110 offsetof(CPUARMState, exclusive_info), "exclusive_info");
426f5abc 111#endif
155c3eac 112
14ade10f 113 a64_translate_init();
b26eefb6
PB
114}
115
39d5492a 116static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 117{
39d5492a 118 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
119 tcg_gen_ld_i32(tmp, cpu_env, offset);
120 return tmp;
121}
122
0ecb72a5 123#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 124
39d5492a 125static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
126{
127 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 128 tcg_temp_free_i32(var);
d9ba4830
PB
129}
130
131#define store_cpu_field(var, name) \
0ecb72a5 132 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 133
b26eefb6 134/* Set a variable to the value of a CPU register. */
39d5492a 135static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
136{
137 if (reg == 15) {
138 uint32_t addr;
b90372ad 139 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
140 if (s->thumb)
141 addr = (long)s->pc + 2;
142 else
143 addr = (long)s->pc + 4;
144 tcg_gen_movi_i32(var, addr);
145 } else {
155c3eac 146 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
147 }
148}
149
150/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 151static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 152{
39d5492a 153 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
154 load_reg_var(s, tmp, reg);
155 return tmp;
156}
157
158/* Set a CPU register. The source must be a temporary and will be
159 marked as dead. */
39d5492a 160static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
161{
162 if (reg == 15) {
163 tcg_gen_andi_i32(var, var, ~1);
164 s->is_jmp = DISAS_JUMP;
165 }
155c3eac 166 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 167 tcg_temp_free_i32(var);
b26eefb6
PB
168}
169
b26eefb6 170/* Value extensions. */
86831435
PB
171#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
172#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
173#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
174#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
175
1497c961
PB
176#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
177#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 178
b26eefb6 179
39d5492a 180static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 181{
39d5492a 182 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 183 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
184 tcg_temp_free_i32(tmp_mask);
185}
d9ba4830
PB
186/* Set NZCV flags from the high 4 bits of var. */
187#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
188
d4a2dc67 189static void gen_exception_internal(int excp)
d9ba4830 190{
d4a2dc67
PM
191 TCGv_i32 tcg_excp = tcg_const_i32(excp);
192
193 assert(excp_is_internal(excp));
194 gen_helper_exception_internal(cpu_env, tcg_excp);
195 tcg_temp_free_i32(tcg_excp);
196}
197
198static void gen_exception(int excp, uint32_t syndrome)
199{
200 TCGv_i32 tcg_excp = tcg_const_i32(excp);
201 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
202
203 gen_helper_exception_with_syndrome(cpu_env, tcg_excp, tcg_syn);
204 tcg_temp_free_i32(tcg_syn);
205 tcg_temp_free_i32(tcg_excp);
d9ba4830
PB
206}
207
39d5492a 208static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 209{
39d5492a
PM
210 TCGv_i32 tmp1 = tcg_temp_new_i32();
211 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
212 tcg_gen_ext16s_i32(tmp1, a);
213 tcg_gen_ext16s_i32(tmp2, b);
3670669c 214 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 215 tcg_temp_free_i32(tmp2);
3670669c
PB
216 tcg_gen_sari_i32(a, a, 16);
217 tcg_gen_sari_i32(b, b, 16);
218 tcg_gen_mul_i32(b, b, a);
219 tcg_gen_mov_i32(a, tmp1);
7d1b0095 220 tcg_temp_free_i32(tmp1);
3670669c
PB
221}
222
223/* Byteswap each halfword. */
39d5492a 224static void gen_rev16(TCGv_i32 var)
3670669c 225{
39d5492a 226 TCGv_i32 tmp = tcg_temp_new_i32();
3670669c
PB
227 tcg_gen_shri_i32(tmp, var, 8);
228 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
229 tcg_gen_shli_i32(var, var, 8);
230 tcg_gen_andi_i32(var, var, 0xff00ff00);
231 tcg_gen_or_i32(var, var, tmp);
7d1b0095 232 tcg_temp_free_i32(tmp);
3670669c
PB
233}
234
235/* Byteswap low halfword and sign extend. */
39d5492a 236static void gen_revsh(TCGv_i32 var)
3670669c 237{
1a855029
AJ
238 tcg_gen_ext16u_i32(var, var);
239 tcg_gen_bswap16_i32(var, var);
240 tcg_gen_ext16s_i32(var, var);
3670669c
PB
241}
242
243/* Unsigned bitfield extract. */
39d5492a 244static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
3670669c
PB
245{
246 if (shift)
247 tcg_gen_shri_i32(var, var, shift);
248 tcg_gen_andi_i32(var, var, mask);
249}
250
251/* Signed bitfield extract. */
39d5492a 252static void gen_sbfx(TCGv_i32 var, int shift, int width)
3670669c
PB
253{
254 uint32_t signbit;
255
256 if (shift)
257 tcg_gen_sari_i32(var, var, shift);
258 if (shift + width < 32) {
259 signbit = 1u << (width - 1);
260 tcg_gen_andi_i32(var, var, (1u << width) - 1);
261 tcg_gen_xori_i32(var, var, signbit);
262 tcg_gen_subi_i32(var, var, signbit);
263 }
264}
265
838fa72d 266/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 267static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 268{
838fa72d
AJ
269 TCGv_i64 tmp64 = tcg_temp_new_i64();
270
271 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 272 tcg_temp_free_i32(b);
838fa72d
AJ
273 tcg_gen_shli_i64(tmp64, tmp64, 32);
274 tcg_gen_add_i64(a, tmp64, a);
275
276 tcg_temp_free_i64(tmp64);
277 return a;
278}
279
280/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 281static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
282{
283 TCGv_i64 tmp64 = tcg_temp_new_i64();
284
285 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 286 tcg_temp_free_i32(b);
838fa72d
AJ
287 tcg_gen_shli_i64(tmp64, tmp64, 32);
288 tcg_gen_sub_i64(a, tmp64, a);
289
290 tcg_temp_free_i64(tmp64);
291 return a;
3670669c
PB
292}
293
5e3f878a 294/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 295static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 296{
39d5492a
PM
297 TCGv_i32 lo = tcg_temp_new_i32();
298 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 299 TCGv_i64 ret;
5e3f878a 300
831d7fe8 301 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 302 tcg_temp_free_i32(a);
7d1b0095 303 tcg_temp_free_i32(b);
831d7fe8
RH
304
305 ret = tcg_temp_new_i64();
306 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
307 tcg_temp_free_i32(lo);
308 tcg_temp_free_i32(hi);
831d7fe8
RH
309
310 return ret;
5e3f878a
PB
311}
312
39d5492a 313static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 314{
39d5492a
PM
315 TCGv_i32 lo = tcg_temp_new_i32();
316 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 317 TCGv_i64 ret;
5e3f878a 318
831d7fe8 319 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 320 tcg_temp_free_i32(a);
7d1b0095 321 tcg_temp_free_i32(b);
831d7fe8
RH
322
323 ret = tcg_temp_new_i64();
324 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
325 tcg_temp_free_i32(lo);
326 tcg_temp_free_i32(hi);
831d7fe8
RH
327
328 return ret;
5e3f878a
PB
329}
330
8f01245e 331/* Swap low and high halfwords. */
39d5492a 332static void gen_swap_half(TCGv_i32 var)
8f01245e 333{
39d5492a 334 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
335 tcg_gen_shri_i32(tmp, var, 16);
336 tcg_gen_shli_i32(var, var, 16);
337 tcg_gen_or_i32(var, var, tmp);
7d1b0095 338 tcg_temp_free_i32(tmp);
8f01245e
PB
339}
340
b26eefb6
PB
341/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
342 tmp = (t0 ^ t1) & 0x8000;
343 t0 &= ~0x8000;
344 t1 &= ~0x8000;
345 t0 = (t0 + t1) ^ tmp;
346 */
347
39d5492a 348static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 349{
39d5492a 350 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
351 tcg_gen_xor_i32(tmp, t0, t1);
352 tcg_gen_andi_i32(tmp, tmp, 0x8000);
353 tcg_gen_andi_i32(t0, t0, ~0x8000);
354 tcg_gen_andi_i32(t1, t1, ~0x8000);
355 tcg_gen_add_i32(t0, t0, t1);
356 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
357 tcg_temp_free_i32(tmp);
358 tcg_temp_free_i32(t1);
b26eefb6
PB
359}
360
361/* Set CF to the top bit of var. */
39d5492a 362static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 363{
66c374de 364 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
365}
366
367/* Set N and Z flags from var. */
39d5492a 368static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 369{
66c374de
AJ
370 tcg_gen_mov_i32(cpu_NF, var);
371 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
372}
373
374/* T0 += T1 + CF. */
39d5492a 375static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 376{
396e467c 377 tcg_gen_add_i32(t0, t0, t1);
66c374de 378 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
379}
380
e9bb4aa9 381/* dest = T0 + T1 + CF. */
39d5492a 382static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 383{
e9bb4aa9 384 tcg_gen_add_i32(dest, t0, t1);
66c374de 385 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
386}
387
3670669c 388/* dest = T0 - T1 + CF - 1. */
39d5492a 389static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 390{
3670669c 391 tcg_gen_sub_i32(dest, t0, t1);
66c374de 392 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 393 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
394}
395
72485ec4 396/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 397static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 398{
39d5492a 399 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
400 tcg_gen_movi_i32(tmp, 0);
401 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 402 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 403 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
404 tcg_gen_xor_i32(tmp, t0, t1);
405 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
406 tcg_temp_free_i32(tmp);
407 tcg_gen_mov_i32(dest, cpu_NF);
408}
409
49b4c31e 410/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 411static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 412{
39d5492a 413 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
414 if (TCG_TARGET_HAS_add2_i32) {
415 tcg_gen_movi_i32(tmp, 0);
416 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 417 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
418 } else {
419 TCGv_i64 q0 = tcg_temp_new_i64();
420 TCGv_i64 q1 = tcg_temp_new_i64();
421 tcg_gen_extu_i32_i64(q0, t0);
422 tcg_gen_extu_i32_i64(q1, t1);
423 tcg_gen_add_i64(q0, q0, q1);
424 tcg_gen_extu_i32_i64(q1, cpu_CF);
425 tcg_gen_add_i64(q0, q0, q1);
426 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
427 tcg_temp_free_i64(q0);
428 tcg_temp_free_i64(q1);
429 }
430 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
431 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
432 tcg_gen_xor_i32(tmp, t0, t1);
433 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
434 tcg_temp_free_i32(tmp);
435 tcg_gen_mov_i32(dest, cpu_NF);
436}
437
72485ec4 438/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 439static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 440{
39d5492a 441 TCGv_i32 tmp;
72485ec4
AJ
442 tcg_gen_sub_i32(cpu_NF, t0, t1);
443 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
444 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
445 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
446 tmp = tcg_temp_new_i32();
447 tcg_gen_xor_i32(tmp, t0, t1);
448 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
449 tcg_temp_free_i32(tmp);
450 tcg_gen_mov_i32(dest, cpu_NF);
451}
452
e77f0832 453/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 454static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 455{
39d5492a 456 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
457 tcg_gen_not_i32(tmp, t1);
458 gen_adc_CC(dest, t0, tmp);
39d5492a 459 tcg_temp_free_i32(tmp);
2de68a49
RH
460}
461
365af80e 462#define GEN_SHIFT(name) \
39d5492a 463static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 464{ \
39d5492a 465 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
466 tmp1 = tcg_temp_new_i32(); \
467 tcg_gen_andi_i32(tmp1, t1, 0xff); \
468 tmp2 = tcg_const_i32(0); \
469 tmp3 = tcg_const_i32(0x1f); \
470 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
471 tcg_temp_free_i32(tmp3); \
472 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
473 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
474 tcg_temp_free_i32(tmp2); \
475 tcg_temp_free_i32(tmp1); \
476}
477GEN_SHIFT(shl)
478GEN_SHIFT(shr)
479#undef GEN_SHIFT
480
39d5492a 481static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 482{
39d5492a 483 TCGv_i32 tmp1, tmp2;
365af80e
AJ
484 tmp1 = tcg_temp_new_i32();
485 tcg_gen_andi_i32(tmp1, t1, 0xff);
486 tmp2 = tcg_const_i32(0x1f);
487 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
488 tcg_temp_free_i32(tmp2);
489 tcg_gen_sar_i32(dest, t0, tmp1);
490 tcg_temp_free_i32(tmp1);
491}
492
39d5492a 493static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 494{
39d5492a
PM
495 TCGv_i32 c0 = tcg_const_i32(0);
496 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
497 tcg_gen_neg_i32(tmp, src);
498 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
499 tcg_temp_free_i32(c0);
500 tcg_temp_free_i32(tmp);
501}
ad69471c 502
39d5492a 503static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 504{
9a119ff6 505 if (shift == 0) {
66c374de 506 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 507 } else {
66c374de
AJ
508 tcg_gen_shri_i32(cpu_CF, var, shift);
509 if (shift != 31) {
510 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
511 }
9a119ff6 512 }
9a119ff6 513}
b26eefb6 514
9a119ff6 515/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
516static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
517 int shift, int flags)
9a119ff6
PB
518{
519 switch (shiftop) {
520 case 0: /* LSL */
521 if (shift != 0) {
522 if (flags)
523 shifter_out_im(var, 32 - shift);
524 tcg_gen_shli_i32(var, var, shift);
525 }
526 break;
527 case 1: /* LSR */
528 if (shift == 0) {
529 if (flags) {
66c374de 530 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
531 }
532 tcg_gen_movi_i32(var, 0);
533 } else {
534 if (flags)
535 shifter_out_im(var, shift - 1);
536 tcg_gen_shri_i32(var, var, shift);
537 }
538 break;
539 case 2: /* ASR */
540 if (shift == 0)
541 shift = 32;
542 if (flags)
543 shifter_out_im(var, shift - 1);
544 if (shift == 32)
545 shift = 31;
546 tcg_gen_sari_i32(var, var, shift);
547 break;
548 case 3: /* ROR/RRX */
549 if (shift != 0) {
550 if (flags)
551 shifter_out_im(var, shift - 1);
f669df27 552 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 553 } else {
39d5492a 554 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 555 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
556 if (flags)
557 shifter_out_im(var, 0);
558 tcg_gen_shri_i32(var, var, 1);
b26eefb6 559 tcg_gen_or_i32(var, var, tmp);
7d1b0095 560 tcg_temp_free_i32(tmp);
b26eefb6
PB
561 }
562 }
563};
564
39d5492a
PM
565static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
566 TCGv_i32 shift, int flags)
8984bd2e
PB
567{
568 if (flags) {
569 switch (shiftop) {
9ef39277
BS
570 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
571 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
572 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
573 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
574 }
575 } else {
576 switch (shiftop) {
365af80e
AJ
577 case 0:
578 gen_shl(var, var, shift);
579 break;
580 case 1:
581 gen_shr(var, var, shift);
582 break;
583 case 2:
584 gen_sar(var, var, shift);
585 break;
f669df27
AJ
586 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
587 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
588 }
589 }
7d1b0095 590 tcg_temp_free_i32(shift);
8984bd2e
PB
591}
592
6ddbc6e4
PB
593#define PAS_OP(pfx) \
594 switch (op2) { \
595 case 0: gen_pas_helper(glue(pfx,add16)); break; \
596 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
597 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
598 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
599 case 4: gen_pas_helper(glue(pfx,add8)); break; \
600 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
601 }
39d5492a 602static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 603{
a7812ae4 604 TCGv_ptr tmp;
6ddbc6e4
PB
605
606 switch (op1) {
607#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
608 case 1:
a7812ae4 609 tmp = tcg_temp_new_ptr();
0ecb72a5 610 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 611 PAS_OP(s)
b75263d6 612 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
613 break;
614 case 5:
a7812ae4 615 tmp = tcg_temp_new_ptr();
0ecb72a5 616 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 617 PAS_OP(u)
b75263d6 618 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
619 break;
620#undef gen_pas_helper
621#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
622 case 2:
623 PAS_OP(q);
624 break;
625 case 3:
626 PAS_OP(sh);
627 break;
628 case 6:
629 PAS_OP(uq);
630 break;
631 case 7:
632 PAS_OP(uh);
633 break;
634#undef gen_pas_helper
635 }
636}
9ee6e8bb
PB
637#undef PAS_OP
638
6ddbc6e4
PB
639/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
640#define PAS_OP(pfx) \
ed89a2f1 641 switch (op1) { \
6ddbc6e4
PB
642 case 0: gen_pas_helper(glue(pfx,add8)); break; \
643 case 1: gen_pas_helper(glue(pfx,add16)); break; \
644 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
645 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
646 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
647 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
648 }
39d5492a 649static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 650{
a7812ae4 651 TCGv_ptr tmp;
6ddbc6e4 652
ed89a2f1 653 switch (op2) {
6ddbc6e4
PB
654#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
655 case 0:
a7812ae4 656 tmp = tcg_temp_new_ptr();
0ecb72a5 657 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 658 PAS_OP(s)
b75263d6 659 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
660 break;
661 case 4:
a7812ae4 662 tmp = tcg_temp_new_ptr();
0ecb72a5 663 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 664 PAS_OP(u)
b75263d6 665 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
666 break;
667#undef gen_pas_helper
668#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
669 case 1:
670 PAS_OP(q);
671 break;
672 case 2:
673 PAS_OP(sh);
674 break;
675 case 5:
676 PAS_OP(uq);
677 break;
678 case 6:
679 PAS_OP(uh);
680 break;
681#undef gen_pas_helper
682 }
683}
9ee6e8bb
PB
684#undef PAS_OP
685
39fb730a
AG
686/*
687 * generate a conditional branch based on ARM condition code cc.
688 * This is common between ARM and Aarch64 targets.
689 */
690void arm_gen_test_cc(int cc, int label)
d9ba4830 691{
39d5492a 692 TCGv_i32 tmp;
d9ba4830
PB
693 int inv;
694
d9ba4830
PB
695 switch (cc) {
696 case 0: /* eq: Z */
66c374de 697 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
698 break;
699 case 1: /* ne: !Z */
66c374de 700 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
701 break;
702 case 2: /* cs: C */
66c374de 703 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_CF, 0, label);
d9ba4830
PB
704 break;
705 case 3: /* cc: !C */
66c374de 706 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
d9ba4830
PB
707 break;
708 case 4: /* mi: N */
66c374de 709 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_NF, 0, label);
d9ba4830
PB
710 break;
711 case 5: /* pl: !N */
66c374de 712 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_NF, 0, label);
d9ba4830
PB
713 break;
714 case 6: /* vs: V */
66c374de 715 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_VF, 0, label);
d9ba4830
PB
716 break;
717 case 7: /* vc: !V */
66c374de 718 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_VF, 0, label);
d9ba4830
PB
719 break;
720 case 8: /* hi: C && !Z */
721 inv = gen_new_label();
66c374de
AJ
722 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, inv);
723 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
724 gen_set_label(inv);
725 break;
726 case 9: /* ls: !C || Z */
66c374de
AJ
727 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
728 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
729 break;
730 case 10: /* ge: N == V -> N ^ V == 0 */
66c374de
AJ
731 tmp = tcg_temp_new_i32();
732 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 733 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 734 tcg_temp_free_i32(tmp);
d9ba4830
PB
735 break;
736 case 11: /* lt: N != V -> N ^ V != 0 */
66c374de
AJ
737 tmp = tcg_temp_new_i32();
738 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 739 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 740 tcg_temp_free_i32(tmp);
d9ba4830
PB
741 break;
742 case 12: /* gt: !Z && N == V */
743 inv = gen_new_label();
66c374de
AJ
744 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, inv);
745 tmp = tcg_temp_new_i32();
746 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 747 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 748 tcg_temp_free_i32(tmp);
d9ba4830
PB
749 gen_set_label(inv);
750 break;
751 case 13: /* le: Z || N != V */
66c374de
AJ
752 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
753 tmp = tcg_temp_new_i32();
754 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 755 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 756 tcg_temp_free_i32(tmp);
d9ba4830
PB
757 break;
758 default:
759 fprintf(stderr, "Bad condition code 0x%x\n", cc);
760 abort();
761 }
d9ba4830 762}
2c0262af 763
b1d8e52e 764static const uint8_t table_logic_cc[16] = {
2c0262af
FB
765 1, /* and */
766 1, /* xor */
767 0, /* sub */
768 0, /* rsb */
769 0, /* add */
770 0, /* adc */
771 0, /* sbc */
772 0, /* rsc */
773 1, /* andl */
774 1, /* xorl */
775 0, /* cmp */
776 0, /* cmn */
777 1, /* orr */
778 1, /* mov */
779 1, /* bic */
780 1, /* mvn */
781};
3b46e624 782
d9ba4830
PB
783/* Set PC and Thumb state from an immediate address. */
784static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 785{
39d5492a 786 TCGv_i32 tmp;
99c475ab 787
b26eefb6 788 s->is_jmp = DISAS_UPDATE;
d9ba4830 789 if (s->thumb != (addr & 1)) {
7d1b0095 790 tmp = tcg_temp_new_i32();
d9ba4830 791 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 792 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 793 tcg_temp_free_i32(tmp);
d9ba4830 794 }
155c3eac 795 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
796}
797
798/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 799static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 800{
d9ba4830 801 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
802 tcg_gen_andi_i32(cpu_R[15], var, ~1);
803 tcg_gen_andi_i32(var, var, 1);
804 store_cpu_field(var, thumb);
d9ba4830
PB
805}
806
21aeb343
JR
807/* Variant of store_reg which uses branch&exchange logic when storing
808 to r15 in ARM architecture v7 and above. The source must be a temporary
809 and will be marked as dead. */
0ecb72a5 810static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
39d5492a 811 int reg, TCGv_i32 var)
21aeb343
JR
812{
813 if (reg == 15 && ENABLE_ARCH_7) {
814 gen_bx(s, var);
815 } else {
816 store_reg(s, reg, var);
817 }
818}
819
be5e7a76
DES
820/* Variant of store_reg which uses branch&exchange logic when storing
821 * to r15 in ARM architecture v5T and above. This is used for storing
822 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
823 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
0ecb72a5 824static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
39d5492a 825 int reg, TCGv_i32 var)
be5e7a76
DES
826{
827 if (reg == 15 && ENABLE_ARCH_5) {
828 gen_bx(s, var);
829 } else {
830 store_reg(s, reg, var);
831 }
832}
833
08307563
PM
834/* Abstractions of "generate code to do a guest load/store for
835 * AArch32", where a vaddr is always 32 bits (and is zero
836 * extended if we're a 64 bit core) and data is also
837 * 32 bits unless specifically doing a 64 bit access.
838 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 839 * that the address argument is TCGv_i32 rather than TCGv.
08307563
PM
840 */
841#if TARGET_LONG_BITS == 32
842
09f78135
RH
843#define DO_GEN_LD(SUFF, OPC) \
844static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563 845{ \
09f78135 846 tcg_gen_qemu_ld_i32(val, addr, index, OPC); \
08307563
PM
847}
848
09f78135
RH
849#define DO_GEN_ST(SUFF, OPC) \
850static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563 851{ \
09f78135 852 tcg_gen_qemu_st_i32(val, addr, index, OPC); \
08307563
PM
853}
854
855static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
856{
09f78135 857 tcg_gen_qemu_ld_i64(val, addr, index, MO_TEQ);
08307563
PM
858}
859
860static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
861{
09f78135 862 tcg_gen_qemu_st_i64(val, addr, index, MO_TEQ);
08307563
PM
863}
864
865#else
866
09f78135
RH
867#define DO_GEN_LD(SUFF, OPC) \
868static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563
PM
869{ \
870 TCGv addr64 = tcg_temp_new(); \
08307563 871 tcg_gen_extu_i32_i64(addr64, addr); \
09f78135 872 tcg_gen_qemu_ld_i32(val, addr64, index, OPC); \
08307563 873 tcg_temp_free(addr64); \
08307563
PM
874}
875
09f78135
RH
876#define DO_GEN_ST(SUFF, OPC) \
877static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563
PM
878{ \
879 TCGv addr64 = tcg_temp_new(); \
08307563 880 tcg_gen_extu_i32_i64(addr64, addr); \
09f78135 881 tcg_gen_qemu_st_i32(val, addr64, index, OPC); \
08307563 882 tcg_temp_free(addr64); \
08307563
PM
883}
884
885static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
886{
887 TCGv addr64 = tcg_temp_new();
888 tcg_gen_extu_i32_i64(addr64, addr);
09f78135 889 tcg_gen_qemu_ld_i64(val, addr64, index, MO_TEQ);
08307563
PM
890 tcg_temp_free(addr64);
891}
892
893static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
894{
895 TCGv addr64 = tcg_temp_new();
896 tcg_gen_extu_i32_i64(addr64, addr);
09f78135 897 tcg_gen_qemu_st_i64(val, addr64, index, MO_TEQ);
08307563
PM
898 tcg_temp_free(addr64);
899}
900
901#endif
902
09f78135
RH
903DO_GEN_LD(8s, MO_SB)
904DO_GEN_LD(8u, MO_UB)
905DO_GEN_LD(16s, MO_TESW)
906DO_GEN_LD(16u, MO_TEUW)
907DO_GEN_LD(32u, MO_TEUL)
908DO_GEN_ST(8, MO_UB)
909DO_GEN_ST(16, MO_TEUW)
910DO_GEN_ST(32, MO_TEUL)
08307563 911
eaed129d 912static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
5e3f878a 913{
40f860cd 914 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
915}
916
d4a2dc67
PM
917static inline void
918gen_set_condexec (DisasContext *s)
919{
920 if (s->condexec_mask) {
921 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
922 TCGv_i32 tmp = tcg_temp_new_i32();
923 tcg_gen_movi_i32(tmp, val);
924 store_cpu_field(tmp, condexec_bits);
925 }
926}
927
928static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
929{
930 gen_set_condexec(s);
931 gen_set_pc_im(s, s->pc - offset);
932 gen_exception_internal(excp);
933 s->is_jmp = DISAS_JUMP;
934}
935
936static void gen_exception_insn(DisasContext *s, int offset, int excp, int syn)
937{
938 gen_set_condexec(s);
939 gen_set_pc_im(s, s->pc - offset);
940 gen_exception(excp, syn);
941 s->is_jmp = DISAS_JUMP;
942}
943
b5ff1b31
FB
944/* Force a TB lookup after an instruction that changes the CPU state. */
945static inline void gen_lookup_tb(DisasContext *s)
946{
a6445c52 947 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
948 s->is_jmp = DISAS_UPDATE;
949}
950
b0109805 951static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 952 TCGv_i32 var)
2c0262af 953{
1e8d4eec 954 int val, rm, shift, shiftop;
39d5492a 955 TCGv_i32 offset;
2c0262af
FB
956
957 if (!(insn & (1 << 25))) {
958 /* immediate */
959 val = insn & 0xfff;
960 if (!(insn & (1 << 23)))
961 val = -val;
537730b9 962 if (val != 0)
b0109805 963 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
964 } else {
965 /* shift/register */
966 rm = (insn) & 0xf;
967 shift = (insn >> 7) & 0x1f;
1e8d4eec 968 shiftop = (insn >> 5) & 3;
b26eefb6 969 offset = load_reg(s, rm);
9a119ff6 970 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 971 if (!(insn & (1 << 23)))
b0109805 972 tcg_gen_sub_i32(var, var, offset);
2c0262af 973 else
b0109805 974 tcg_gen_add_i32(var, var, offset);
7d1b0095 975 tcg_temp_free_i32(offset);
2c0262af
FB
976 }
977}
978
191f9a93 979static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 980 int extra, TCGv_i32 var)
2c0262af
FB
981{
982 int val, rm;
39d5492a 983 TCGv_i32 offset;
3b46e624 984
2c0262af
FB
985 if (insn & (1 << 22)) {
986 /* immediate */
987 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
988 if (!(insn & (1 << 23)))
989 val = -val;
18acad92 990 val += extra;
537730b9 991 if (val != 0)
b0109805 992 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
993 } else {
994 /* register */
191f9a93 995 if (extra)
b0109805 996 tcg_gen_addi_i32(var, var, extra);
2c0262af 997 rm = (insn) & 0xf;
b26eefb6 998 offset = load_reg(s, rm);
2c0262af 999 if (!(insn & (1 << 23)))
b0109805 1000 tcg_gen_sub_i32(var, var, offset);
2c0262af 1001 else
b0109805 1002 tcg_gen_add_i32(var, var, offset);
7d1b0095 1003 tcg_temp_free_i32(offset);
2c0262af
FB
1004 }
1005}
1006
5aaebd13
PM
1007static TCGv_ptr get_fpstatus_ptr(int neon)
1008{
1009 TCGv_ptr statusptr = tcg_temp_new_ptr();
1010 int offset;
1011 if (neon) {
0ecb72a5 1012 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1013 } else {
0ecb72a5 1014 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1015 }
1016 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1017 return statusptr;
1018}
1019
4373f3ce
PB
1020#define VFP_OP2(name) \
1021static inline void gen_vfp_##name(int dp) \
1022{ \
ae1857ec
PM
1023 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1024 if (dp) { \
1025 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1026 } else { \
1027 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1028 } \
1029 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
1030}
1031
4373f3ce
PB
1032VFP_OP2(add)
1033VFP_OP2(sub)
1034VFP_OP2(mul)
1035VFP_OP2(div)
1036
1037#undef VFP_OP2
1038
605a6aed
PM
1039static inline void gen_vfp_F1_mul(int dp)
1040{
1041 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1042 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1043 if (dp) {
ae1857ec 1044 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1045 } else {
ae1857ec 1046 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1047 }
ae1857ec 1048 tcg_temp_free_ptr(fpst);
605a6aed
PM
1049}
1050
1051static inline void gen_vfp_F1_neg(int dp)
1052{
1053 /* Like gen_vfp_neg() but put result in F1 */
1054 if (dp) {
1055 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1056 } else {
1057 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1058 }
1059}
1060
4373f3ce
PB
1061static inline void gen_vfp_abs(int dp)
1062{
1063 if (dp)
1064 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1065 else
1066 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1067}
1068
1069static inline void gen_vfp_neg(int dp)
1070{
1071 if (dp)
1072 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1073 else
1074 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1075}
1076
1077static inline void gen_vfp_sqrt(int dp)
1078{
1079 if (dp)
1080 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1081 else
1082 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1083}
1084
1085static inline void gen_vfp_cmp(int dp)
1086{
1087 if (dp)
1088 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1089 else
1090 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1091}
1092
1093static inline void gen_vfp_cmpe(int dp)
1094{
1095 if (dp)
1096 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1097 else
1098 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1099}
1100
1101static inline void gen_vfp_F1_ld0(int dp)
1102{
1103 if (dp)
5b340b51 1104 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1105 else
5b340b51 1106 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1107}
1108
5500b06c
PM
1109#define VFP_GEN_ITOF(name) \
1110static inline void gen_vfp_##name(int dp, int neon) \
1111{ \
5aaebd13 1112 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1113 if (dp) { \
1114 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1115 } else { \
1116 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1117 } \
b7fa9214 1118 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1119}
1120
5500b06c
PM
1121VFP_GEN_ITOF(uito)
1122VFP_GEN_ITOF(sito)
1123#undef VFP_GEN_ITOF
4373f3ce 1124
5500b06c
PM
1125#define VFP_GEN_FTOI(name) \
1126static inline void gen_vfp_##name(int dp, int neon) \
1127{ \
5aaebd13 1128 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1129 if (dp) { \
1130 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1131 } else { \
1132 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1133 } \
b7fa9214 1134 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1135}
1136
5500b06c
PM
1137VFP_GEN_FTOI(toui)
1138VFP_GEN_FTOI(touiz)
1139VFP_GEN_FTOI(tosi)
1140VFP_GEN_FTOI(tosiz)
1141#undef VFP_GEN_FTOI
4373f3ce 1142
16d5b3ca 1143#define VFP_GEN_FIX(name, round) \
5500b06c 1144static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1145{ \
39d5492a 1146 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1147 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c 1148 if (dp) { \
16d5b3ca
WN
1149 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1150 statusptr); \
5500b06c 1151 } else { \
16d5b3ca
WN
1152 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1153 statusptr); \
5500b06c 1154 } \
b75263d6 1155 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1156 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1157}
16d5b3ca
WN
1158VFP_GEN_FIX(tosh, _round_to_zero)
1159VFP_GEN_FIX(tosl, _round_to_zero)
1160VFP_GEN_FIX(touh, _round_to_zero)
1161VFP_GEN_FIX(toul, _round_to_zero)
1162VFP_GEN_FIX(shto, )
1163VFP_GEN_FIX(slto, )
1164VFP_GEN_FIX(uhto, )
1165VFP_GEN_FIX(ulto, )
4373f3ce 1166#undef VFP_GEN_FIX
9ee6e8bb 1167
39d5492a 1168static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1169{
08307563 1170 if (dp) {
6ce2faf4 1171 gen_aa32_ld64(cpu_F0d, addr, get_mem_index(s));
08307563 1172 } else {
6ce2faf4 1173 gen_aa32_ld32u(cpu_F0s, addr, get_mem_index(s));
08307563 1174 }
b5ff1b31
FB
1175}
1176
39d5492a 1177static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1178{
08307563 1179 if (dp) {
6ce2faf4 1180 gen_aa32_st64(cpu_F0d, addr, get_mem_index(s));
08307563 1181 } else {
6ce2faf4 1182 gen_aa32_st32(cpu_F0s, addr, get_mem_index(s));
08307563 1183 }
b5ff1b31
FB
1184}
1185
8e96005d
FB
1186static inline long
1187vfp_reg_offset (int dp, int reg)
1188{
1189 if (dp)
1190 return offsetof(CPUARMState, vfp.regs[reg]);
1191 else if (reg & 1) {
1192 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1193 + offsetof(CPU_DoubleU, l.upper);
1194 } else {
1195 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1196 + offsetof(CPU_DoubleU, l.lower);
1197 }
1198}
9ee6e8bb
PB
1199
1200/* Return the offset of a 32-bit piece of a NEON register.
1201 zero is the least significant end of the register. */
1202static inline long
1203neon_reg_offset (int reg, int n)
1204{
1205 int sreg;
1206 sreg = reg * 2 + n;
1207 return vfp_reg_offset(0, sreg);
1208}
1209
39d5492a 1210static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1211{
39d5492a 1212 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1213 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1214 return tmp;
1215}
1216
39d5492a 1217static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1218{
1219 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1220 tcg_temp_free_i32(var);
8f8e3aa4
PB
1221}
1222
a7812ae4 1223static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1224{
1225 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1226}
1227
a7812ae4 1228static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1229{
1230 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1231}
1232
4373f3ce
PB
1233#define tcg_gen_ld_f32 tcg_gen_ld_i32
1234#define tcg_gen_ld_f64 tcg_gen_ld_i64
1235#define tcg_gen_st_f32 tcg_gen_st_i32
1236#define tcg_gen_st_f64 tcg_gen_st_i64
1237
b7bcbe95
FB
1238static inline void gen_mov_F0_vreg(int dp, int reg)
1239{
1240 if (dp)
4373f3ce 1241 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1242 else
4373f3ce 1243 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1244}
1245
1246static inline void gen_mov_F1_vreg(int dp, int reg)
1247{
1248 if (dp)
4373f3ce 1249 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1250 else
4373f3ce 1251 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1252}
1253
1254static inline void gen_mov_vreg_F0(int dp, int reg)
1255{
1256 if (dp)
4373f3ce 1257 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1258 else
4373f3ce 1259 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1260}
1261
18c9b560
AZ
1262#define ARM_CP_RW_BIT (1 << 20)
1263
a7812ae4 1264static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1265{
0ecb72a5 1266 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1267}
1268
a7812ae4 1269static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1270{
0ecb72a5 1271 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1272}
1273
39d5492a 1274static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1275{
39d5492a 1276 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1277 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1278 return var;
e677137d
PB
1279}
1280
39d5492a 1281static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1282{
0ecb72a5 1283 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1284 tcg_temp_free_i32(var);
e677137d
PB
1285}
1286
1287static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1288{
1289 iwmmxt_store_reg(cpu_M0, rn);
1290}
1291
1292static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1293{
1294 iwmmxt_load_reg(cpu_M0, rn);
1295}
1296
1297static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1298{
1299 iwmmxt_load_reg(cpu_V1, rn);
1300 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1301}
1302
1303static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1304{
1305 iwmmxt_load_reg(cpu_V1, rn);
1306 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1307}
1308
1309static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1310{
1311 iwmmxt_load_reg(cpu_V1, rn);
1312 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1313}
1314
1315#define IWMMXT_OP(name) \
1316static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1317{ \
1318 iwmmxt_load_reg(cpu_V1, rn); \
1319 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1320}
1321
477955bd
PM
1322#define IWMMXT_OP_ENV(name) \
1323static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1324{ \
1325 iwmmxt_load_reg(cpu_V1, rn); \
1326 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1327}
1328
1329#define IWMMXT_OP_ENV_SIZE(name) \
1330IWMMXT_OP_ENV(name##b) \
1331IWMMXT_OP_ENV(name##w) \
1332IWMMXT_OP_ENV(name##l)
e677137d 1333
477955bd 1334#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1335static inline void gen_op_iwmmxt_##name##_M0(void) \
1336{ \
477955bd 1337 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1338}
1339
1340IWMMXT_OP(maddsq)
1341IWMMXT_OP(madduq)
1342IWMMXT_OP(sadb)
1343IWMMXT_OP(sadw)
1344IWMMXT_OP(mulslw)
1345IWMMXT_OP(mulshw)
1346IWMMXT_OP(mululw)
1347IWMMXT_OP(muluhw)
1348IWMMXT_OP(macsw)
1349IWMMXT_OP(macuw)
1350
477955bd
PM
1351IWMMXT_OP_ENV_SIZE(unpackl)
1352IWMMXT_OP_ENV_SIZE(unpackh)
1353
1354IWMMXT_OP_ENV1(unpacklub)
1355IWMMXT_OP_ENV1(unpackluw)
1356IWMMXT_OP_ENV1(unpacklul)
1357IWMMXT_OP_ENV1(unpackhub)
1358IWMMXT_OP_ENV1(unpackhuw)
1359IWMMXT_OP_ENV1(unpackhul)
1360IWMMXT_OP_ENV1(unpacklsb)
1361IWMMXT_OP_ENV1(unpacklsw)
1362IWMMXT_OP_ENV1(unpacklsl)
1363IWMMXT_OP_ENV1(unpackhsb)
1364IWMMXT_OP_ENV1(unpackhsw)
1365IWMMXT_OP_ENV1(unpackhsl)
1366
1367IWMMXT_OP_ENV_SIZE(cmpeq)
1368IWMMXT_OP_ENV_SIZE(cmpgtu)
1369IWMMXT_OP_ENV_SIZE(cmpgts)
1370
1371IWMMXT_OP_ENV_SIZE(mins)
1372IWMMXT_OP_ENV_SIZE(minu)
1373IWMMXT_OP_ENV_SIZE(maxs)
1374IWMMXT_OP_ENV_SIZE(maxu)
1375
1376IWMMXT_OP_ENV_SIZE(subn)
1377IWMMXT_OP_ENV_SIZE(addn)
1378IWMMXT_OP_ENV_SIZE(subu)
1379IWMMXT_OP_ENV_SIZE(addu)
1380IWMMXT_OP_ENV_SIZE(subs)
1381IWMMXT_OP_ENV_SIZE(adds)
1382
1383IWMMXT_OP_ENV(avgb0)
1384IWMMXT_OP_ENV(avgb1)
1385IWMMXT_OP_ENV(avgw0)
1386IWMMXT_OP_ENV(avgw1)
e677137d 1387
477955bd
PM
1388IWMMXT_OP_ENV(packuw)
1389IWMMXT_OP_ENV(packul)
1390IWMMXT_OP_ENV(packuq)
1391IWMMXT_OP_ENV(packsw)
1392IWMMXT_OP_ENV(packsl)
1393IWMMXT_OP_ENV(packsq)
e677137d 1394
e677137d
PB
1395static void gen_op_iwmmxt_set_mup(void)
1396{
39d5492a 1397 TCGv_i32 tmp;
e677137d
PB
1398 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1399 tcg_gen_ori_i32(tmp, tmp, 2);
1400 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1401}
1402
1403static void gen_op_iwmmxt_set_cup(void)
1404{
39d5492a 1405 TCGv_i32 tmp;
e677137d
PB
1406 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1407 tcg_gen_ori_i32(tmp, tmp, 1);
1408 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1409}
1410
1411static void gen_op_iwmmxt_setpsr_nz(void)
1412{
39d5492a 1413 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1414 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1415 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1416}
1417
1418static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1419{
1420 iwmmxt_load_reg(cpu_V1, rn);
86831435 1421 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1422 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1423}
1424
39d5492a
PM
1425static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1426 TCGv_i32 dest)
18c9b560
AZ
1427{
1428 int rd;
1429 uint32_t offset;
39d5492a 1430 TCGv_i32 tmp;
18c9b560
AZ
1431
1432 rd = (insn >> 16) & 0xf;
da6b5335 1433 tmp = load_reg(s, rd);
18c9b560
AZ
1434
1435 offset = (insn & 0xff) << ((insn >> 7) & 2);
1436 if (insn & (1 << 24)) {
1437 /* Pre indexed */
1438 if (insn & (1 << 23))
da6b5335 1439 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1440 else
da6b5335
FN
1441 tcg_gen_addi_i32(tmp, tmp, -offset);
1442 tcg_gen_mov_i32(dest, tmp);
18c9b560 1443 if (insn & (1 << 21))
da6b5335
FN
1444 store_reg(s, rd, tmp);
1445 else
7d1b0095 1446 tcg_temp_free_i32(tmp);
18c9b560
AZ
1447 } else if (insn & (1 << 21)) {
1448 /* Post indexed */
da6b5335 1449 tcg_gen_mov_i32(dest, tmp);
18c9b560 1450 if (insn & (1 << 23))
da6b5335 1451 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1452 else
da6b5335
FN
1453 tcg_gen_addi_i32(tmp, tmp, -offset);
1454 store_reg(s, rd, tmp);
18c9b560
AZ
1455 } else if (!(insn & (1 << 23)))
1456 return 1;
1457 return 0;
1458}
1459
39d5492a 1460static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1461{
1462 int rd = (insn >> 0) & 0xf;
39d5492a 1463 TCGv_i32 tmp;
18c9b560 1464
da6b5335
FN
1465 if (insn & (1 << 8)) {
1466 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1467 return 1;
da6b5335
FN
1468 } else {
1469 tmp = iwmmxt_load_creg(rd);
1470 }
1471 } else {
7d1b0095 1472 tmp = tcg_temp_new_i32();
da6b5335
FN
1473 iwmmxt_load_reg(cpu_V0, rd);
1474 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1475 }
1476 tcg_gen_andi_i32(tmp, tmp, mask);
1477 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1478 tcg_temp_free_i32(tmp);
18c9b560
AZ
1479 return 0;
1480}
1481
a1c7273b 1482/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1483 (ie. an undefined instruction). */
0ecb72a5 1484static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
1485{
1486 int rd, wrd;
1487 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1488 TCGv_i32 addr;
1489 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1490
1491 if ((insn & 0x0e000e00) == 0x0c000000) {
1492 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1493 wrd = insn & 0xf;
1494 rdlo = (insn >> 12) & 0xf;
1495 rdhi = (insn >> 16) & 0xf;
1496 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1497 iwmmxt_load_reg(cpu_V0, wrd);
1498 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1499 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1500 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1501 } else { /* TMCRR */
da6b5335
FN
1502 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1503 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1504 gen_op_iwmmxt_set_mup();
1505 }
1506 return 0;
1507 }
1508
1509 wrd = (insn >> 12) & 0xf;
7d1b0095 1510 addr = tcg_temp_new_i32();
da6b5335 1511 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1512 tcg_temp_free_i32(addr);
18c9b560 1513 return 1;
da6b5335 1514 }
18c9b560
AZ
1515 if (insn & ARM_CP_RW_BIT) {
1516 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1517 tmp = tcg_temp_new_i32();
6ce2faf4 1518 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
da6b5335 1519 iwmmxt_store_creg(wrd, tmp);
18c9b560 1520 } else {
e677137d
PB
1521 i = 1;
1522 if (insn & (1 << 8)) {
1523 if (insn & (1 << 22)) { /* WLDRD */
6ce2faf4 1524 gen_aa32_ld64(cpu_M0, addr, get_mem_index(s));
e677137d
PB
1525 i = 0;
1526 } else { /* WLDRW wRd */
29531141 1527 tmp = tcg_temp_new_i32();
6ce2faf4 1528 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
e677137d
PB
1529 }
1530 } else {
29531141 1531 tmp = tcg_temp_new_i32();
e677137d 1532 if (insn & (1 << 22)) { /* WLDRH */
6ce2faf4 1533 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
e677137d 1534 } else { /* WLDRB */
6ce2faf4 1535 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
e677137d
PB
1536 }
1537 }
1538 if (i) {
1539 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1540 tcg_temp_free_i32(tmp);
e677137d 1541 }
18c9b560
AZ
1542 gen_op_iwmmxt_movq_wRn_M0(wrd);
1543 }
1544 } else {
1545 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1546 tmp = iwmmxt_load_creg(wrd);
6ce2faf4 1547 gen_aa32_st32(tmp, addr, get_mem_index(s));
18c9b560
AZ
1548 } else {
1549 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1550 tmp = tcg_temp_new_i32();
e677137d
PB
1551 if (insn & (1 << 8)) {
1552 if (insn & (1 << 22)) { /* WSTRD */
6ce2faf4 1553 gen_aa32_st64(cpu_M0, addr, get_mem_index(s));
e677137d
PB
1554 } else { /* WSTRW wRd */
1555 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
6ce2faf4 1556 gen_aa32_st32(tmp, addr, get_mem_index(s));
e677137d
PB
1557 }
1558 } else {
1559 if (insn & (1 << 22)) { /* WSTRH */
1560 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
6ce2faf4 1561 gen_aa32_st16(tmp, addr, get_mem_index(s));
e677137d
PB
1562 } else { /* WSTRB */
1563 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
6ce2faf4 1564 gen_aa32_st8(tmp, addr, get_mem_index(s));
e677137d
PB
1565 }
1566 }
18c9b560 1567 }
29531141 1568 tcg_temp_free_i32(tmp);
18c9b560 1569 }
7d1b0095 1570 tcg_temp_free_i32(addr);
18c9b560
AZ
1571 return 0;
1572 }
1573
1574 if ((insn & 0x0f000000) != 0x0e000000)
1575 return 1;
1576
1577 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1578 case 0x000: /* WOR */
1579 wrd = (insn >> 12) & 0xf;
1580 rd0 = (insn >> 0) & 0xf;
1581 rd1 = (insn >> 16) & 0xf;
1582 gen_op_iwmmxt_movq_M0_wRn(rd0);
1583 gen_op_iwmmxt_orq_M0_wRn(rd1);
1584 gen_op_iwmmxt_setpsr_nz();
1585 gen_op_iwmmxt_movq_wRn_M0(wrd);
1586 gen_op_iwmmxt_set_mup();
1587 gen_op_iwmmxt_set_cup();
1588 break;
1589 case 0x011: /* TMCR */
1590 if (insn & 0xf)
1591 return 1;
1592 rd = (insn >> 12) & 0xf;
1593 wrd = (insn >> 16) & 0xf;
1594 switch (wrd) {
1595 case ARM_IWMMXT_wCID:
1596 case ARM_IWMMXT_wCASF:
1597 break;
1598 case ARM_IWMMXT_wCon:
1599 gen_op_iwmmxt_set_cup();
1600 /* Fall through. */
1601 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1602 tmp = iwmmxt_load_creg(wrd);
1603 tmp2 = load_reg(s, rd);
f669df27 1604 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1605 tcg_temp_free_i32(tmp2);
da6b5335 1606 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1607 break;
1608 case ARM_IWMMXT_wCGR0:
1609 case ARM_IWMMXT_wCGR1:
1610 case ARM_IWMMXT_wCGR2:
1611 case ARM_IWMMXT_wCGR3:
1612 gen_op_iwmmxt_set_cup();
da6b5335
FN
1613 tmp = load_reg(s, rd);
1614 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1615 break;
1616 default:
1617 return 1;
1618 }
1619 break;
1620 case 0x100: /* WXOR */
1621 wrd = (insn >> 12) & 0xf;
1622 rd0 = (insn >> 0) & 0xf;
1623 rd1 = (insn >> 16) & 0xf;
1624 gen_op_iwmmxt_movq_M0_wRn(rd0);
1625 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1626 gen_op_iwmmxt_setpsr_nz();
1627 gen_op_iwmmxt_movq_wRn_M0(wrd);
1628 gen_op_iwmmxt_set_mup();
1629 gen_op_iwmmxt_set_cup();
1630 break;
1631 case 0x111: /* TMRC */
1632 if (insn & 0xf)
1633 return 1;
1634 rd = (insn >> 12) & 0xf;
1635 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1636 tmp = iwmmxt_load_creg(wrd);
1637 store_reg(s, rd, tmp);
18c9b560
AZ
1638 break;
1639 case 0x300: /* WANDN */
1640 wrd = (insn >> 12) & 0xf;
1641 rd0 = (insn >> 0) & 0xf;
1642 rd1 = (insn >> 16) & 0xf;
1643 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1644 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1645 gen_op_iwmmxt_andq_M0_wRn(rd1);
1646 gen_op_iwmmxt_setpsr_nz();
1647 gen_op_iwmmxt_movq_wRn_M0(wrd);
1648 gen_op_iwmmxt_set_mup();
1649 gen_op_iwmmxt_set_cup();
1650 break;
1651 case 0x200: /* WAND */
1652 wrd = (insn >> 12) & 0xf;
1653 rd0 = (insn >> 0) & 0xf;
1654 rd1 = (insn >> 16) & 0xf;
1655 gen_op_iwmmxt_movq_M0_wRn(rd0);
1656 gen_op_iwmmxt_andq_M0_wRn(rd1);
1657 gen_op_iwmmxt_setpsr_nz();
1658 gen_op_iwmmxt_movq_wRn_M0(wrd);
1659 gen_op_iwmmxt_set_mup();
1660 gen_op_iwmmxt_set_cup();
1661 break;
1662 case 0x810: case 0xa10: /* WMADD */
1663 wrd = (insn >> 12) & 0xf;
1664 rd0 = (insn >> 0) & 0xf;
1665 rd1 = (insn >> 16) & 0xf;
1666 gen_op_iwmmxt_movq_M0_wRn(rd0);
1667 if (insn & (1 << 21))
1668 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1669 else
1670 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1671 gen_op_iwmmxt_movq_wRn_M0(wrd);
1672 gen_op_iwmmxt_set_mup();
1673 break;
1674 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1675 wrd = (insn >> 12) & 0xf;
1676 rd0 = (insn >> 16) & 0xf;
1677 rd1 = (insn >> 0) & 0xf;
1678 gen_op_iwmmxt_movq_M0_wRn(rd0);
1679 switch ((insn >> 22) & 3) {
1680 case 0:
1681 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1682 break;
1683 case 1:
1684 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1685 break;
1686 case 2:
1687 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1688 break;
1689 case 3:
1690 return 1;
1691 }
1692 gen_op_iwmmxt_movq_wRn_M0(wrd);
1693 gen_op_iwmmxt_set_mup();
1694 gen_op_iwmmxt_set_cup();
1695 break;
1696 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1697 wrd = (insn >> 12) & 0xf;
1698 rd0 = (insn >> 16) & 0xf;
1699 rd1 = (insn >> 0) & 0xf;
1700 gen_op_iwmmxt_movq_M0_wRn(rd0);
1701 switch ((insn >> 22) & 3) {
1702 case 0:
1703 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1704 break;
1705 case 1:
1706 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1707 break;
1708 case 2:
1709 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1710 break;
1711 case 3:
1712 return 1;
1713 }
1714 gen_op_iwmmxt_movq_wRn_M0(wrd);
1715 gen_op_iwmmxt_set_mup();
1716 gen_op_iwmmxt_set_cup();
1717 break;
1718 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1719 wrd = (insn >> 12) & 0xf;
1720 rd0 = (insn >> 16) & 0xf;
1721 rd1 = (insn >> 0) & 0xf;
1722 gen_op_iwmmxt_movq_M0_wRn(rd0);
1723 if (insn & (1 << 22))
1724 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1725 else
1726 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1727 if (!(insn & (1 << 20)))
1728 gen_op_iwmmxt_addl_M0_wRn(wrd);
1729 gen_op_iwmmxt_movq_wRn_M0(wrd);
1730 gen_op_iwmmxt_set_mup();
1731 break;
1732 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1733 wrd = (insn >> 12) & 0xf;
1734 rd0 = (insn >> 16) & 0xf;
1735 rd1 = (insn >> 0) & 0xf;
1736 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1737 if (insn & (1 << 21)) {
1738 if (insn & (1 << 20))
1739 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1740 else
1741 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1742 } else {
1743 if (insn & (1 << 20))
1744 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1745 else
1746 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1747 }
18c9b560
AZ
1748 gen_op_iwmmxt_movq_wRn_M0(wrd);
1749 gen_op_iwmmxt_set_mup();
1750 break;
1751 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1752 wrd = (insn >> 12) & 0xf;
1753 rd0 = (insn >> 16) & 0xf;
1754 rd1 = (insn >> 0) & 0xf;
1755 gen_op_iwmmxt_movq_M0_wRn(rd0);
1756 if (insn & (1 << 21))
1757 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1758 else
1759 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1760 if (!(insn & (1 << 20))) {
e677137d
PB
1761 iwmmxt_load_reg(cpu_V1, wrd);
1762 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1763 }
1764 gen_op_iwmmxt_movq_wRn_M0(wrd);
1765 gen_op_iwmmxt_set_mup();
1766 break;
1767 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1768 wrd = (insn >> 12) & 0xf;
1769 rd0 = (insn >> 16) & 0xf;
1770 rd1 = (insn >> 0) & 0xf;
1771 gen_op_iwmmxt_movq_M0_wRn(rd0);
1772 switch ((insn >> 22) & 3) {
1773 case 0:
1774 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1775 break;
1776 case 1:
1777 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1778 break;
1779 case 2:
1780 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1781 break;
1782 case 3:
1783 return 1;
1784 }
1785 gen_op_iwmmxt_movq_wRn_M0(wrd);
1786 gen_op_iwmmxt_set_mup();
1787 gen_op_iwmmxt_set_cup();
1788 break;
1789 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1790 wrd = (insn >> 12) & 0xf;
1791 rd0 = (insn >> 16) & 0xf;
1792 rd1 = (insn >> 0) & 0xf;
1793 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1794 if (insn & (1 << 22)) {
1795 if (insn & (1 << 20))
1796 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1797 else
1798 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1799 } else {
1800 if (insn & (1 << 20))
1801 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1802 else
1803 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1804 }
18c9b560
AZ
1805 gen_op_iwmmxt_movq_wRn_M0(wrd);
1806 gen_op_iwmmxt_set_mup();
1807 gen_op_iwmmxt_set_cup();
1808 break;
1809 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1810 wrd = (insn >> 12) & 0xf;
1811 rd0 = (insn >> 16) & 0xf;
1812 rd1 = (insn >> 0) & 0xf;
1813 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1814 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1815 tcg_gen_andi_i32(tmp, tmp, 7);
1816 iwmmxt_load_reg(cpu_V1, rd1);
1817 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1818 tcg_temp_free_i32(tmp);
18c9b560
AZ
1819 gen_op_iwmmxt_movq_wRn_M0(wrd);
1820 gen_op_iwmmxt_set_mup();
1821 break;
1822 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1823 if (((insn >> 6) & 3) == 3)
1824 return 1;
18c9b560
AZ
1825 rd = (insn >> 12) & 0xf;
1826 wrd = (insn >> 16) & 0xf;
da6b5335 1827 tmp = load_reg(s, rd);
18c9b560
AZ
1828 gen_op_iwmmxt_movq_M0_wRn(wrd);
1829 switch ((insn >> 6) & 3) {
1830 case 0:
da6b5335
FN
1831 tmp2 = tcg_const_i32(0xff);
1832 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1833 break;
1834 case 1:
da6b5335
FN
1835 tmp2 = tcg_const_i32(0xffff);
1836 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1837 break;
1838 case 2:
da6b5335
FN
1839 tmp2 = tcg_const_i32(0xffffffff);
1840 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1841 break;
da6b5335 1842 default:
39d5492a
PM
1843 TCGV_UNUSED_I32(tmp2);
1844 TCGV_UNUSED_I32(tmp3);
18c9b560 1845 }
da6b5335 1846 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
1847 tcg_temp_free_i32(tmp3);
1848 tcg_temp_free_i32(tmp2);
7d1b0095 1849 tcg_temp_free_i32(tmp);
18c9b560
AZ
1850 gen_op_iwmmxt_movq_wRn_M0(wrd);
1851 gen_op_iwmmxt_set_mup();
1852 break;
1853 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1854 rd = (insn >> 12) & 0xf;
1855 wrd = (insn >> 16) & 0xf;
da6b5335 1856 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1857 return 1;
1858 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1859 tmp = tcg_temp_new_i32();
18c9b560
AZ
1860 switch ((insn >> 22) & 3) {
1861 case 0:
da6b5335
FN
1862 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1863 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1864 if (insn & 8) {
1865 tcg_gen_ext8s_i32(tmp, tmp);
1866 } else {
1867 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1868 }
1869 break;
1870 case 1:
da6b5335
FN
1871 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1872 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1873 if (insn & 8) {
1874 tcg_gen_ext16s_i32(tmp, tmp);
1875 } else {
1876 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1877 }
1878 break;
1879 case 2:
da6b5335
FN
1880 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1881 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1882 break;
18c9b560 1883 }
da6b5335 1884 store_reg(s, rd, tmp);
18c9b560
AZ
1885 break;
1886 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1887 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1888 return 1;
da6b5335 1889 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1890 switch ((insn >> 22) & 3) {
1891 case 0:
da6b5335 1892 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1893 break;
1894 case 1:
da6b5335 1895 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1896 break;
1897 case 2:
da6b5335 1898 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1899 break;
18c9b560 1900 }
da6b5335
FN
1901 tcg_gen_shli_i32(tmp, tmp, 28);
1902 gen_set_nzcv(tmp);
7d1b0095 1903 tcg_temp_free_i32(tmp);
18c9b560
AZ
1904 break;
1905 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1906 if (((insn >> 6) & 3) == 3)
1907 return 1;
18c9b560
AZ
1908 rd = (insn >> 12) & 0xf;
1909 wrd = (insn >> 16) & 0xf;
da6b5335 1910 tmp = load_reg(s, rd);
18c9b560
AZ
1911 switch ((insn >> 6) & 3) {
1912 case 0:
da6b5335 1913 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1914 break;
1915 case 1:
da6b5335 1916 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1917 break;
1918 case 2:
da6b5335 1919 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1920 break;
18c9b560 1921 }
7d1b0095 1922 tcg_temp_free_i32(tmp);
18c9b560
AZ
1923 gen_op_iwmmxt_movq_wRn_M0(wrd);
1924 gen_op_iwmmxt_set_mup();
1925 break;
1926 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1927 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1928 return 1;
da6b5335 1929 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1930 tmp2 = tcg_temp_new_i32();
da6b5335 1931 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1932 switch ((insn >> 22) & 3) {
1933 case 0:
1934 for (i = 0; i < 7; i ++) {
da6b5335
FN
1935 tcg_gen_shli_i32(tmp2, tmp2, 4);
1936 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1937 }
1938 break;
1939 case 1:
1940 for (i = 0; i < 3; i ++) {
da6b5335
FN
1941 tcg_gen_shli_i32(tmp2, tmp2, 8);
1942 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1943 }
1944 break;
1945 case 2:
da6b5335
FN
1946 tcg_gen_shli_i32(tmp2, tmp2, 16);
1947 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1948 break;
18c9b560 1949 }
da6b5335 1950 gen_set_nzcv(tmp);
7d1b0095
PM
1951 tcg_temp_free_i32(tmp2);
1952 tcg_temp_free_i32(tmp);
18c9b560
AZ
1953 break;
1954 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1955 wrd = (insn >> 12) & 0xf;
1956 rd0 = (insn >> 16) & 0xf;
1957 gen_op_iwmmxt_movq_M0_wRn(rd0);
1958 switch ((insn >> 22) & 3) {
1959 case 0:
e677137d 1960 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1961 break;
1962 case 1:
e677137d 1963 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1964 break;
1965 case 2:
e677137d 1966 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1967 break;
1968 case 3:
1969 return 1;
1970 }
1971 gen_op_iwmmxt_movq_wRn_M0(wrd);
1972 gen_op_iwmmxt_set_mup();
1973 break;
1974 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1975 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1976 return 1;
da6b5335 1977 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1978 tmp2 = tcg_temp_new_i32();
da6b5335 1979 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1980 switch ((insn >> 22) & 3) {
1981 case 0:
1982 for (i = 0; i < 7; i ++) {
da6b5335
FN
1983 tcg_gen_shli_i32(tmp2, tmp2, 4);
1984 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1985 }
1986 break;
1987 case 1:
1988 for (i = 0; i < 3; i ++) {
da6b5335
FN
1989 tcg_gen_shli_i32(tmp2, tmp2, 8);
1990 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1991 }
1992 break;
1993 case 2:
da6b5335
FN
1994 tcg_gen_shli_i32(tmp2, tmp2, 16);
1995 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1996 break;
18c9b560 1997 }
da6b5335 1998 gen_set_nzcv(tmp);
7d1b0095
PM
1999 tcg_temp_free_i32(tmp2);
2000 tcg_temp_free_i32(tmp);
18c9b560
AZ
2001 break;
2002 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2003 rd = (insn >> 12) & 0xf;
2004 rd0 = (insn >> 16) & 0xf;
da6b5335 2005 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2006 return 1;
2007 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2008 tmp = tcg_temp_new_i32();
18c9b560
AZ
2009 switch ((insn >> 22) & 3) {
2010 case 0:
da6b5335 2011 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2012 break;
2013 case 1:
da6b5335 2014 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2015 break;
2016 case 2:
da6b5335 2017 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2018 break;
18c9b560 2019 }
da6b5335 2020 store_reg(s, rd, tmp);
18c9b560
AZ
2021 break;
2022 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2023 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2024 wrd = (insn >> 12) & 0xf;
2025 rd0 = (insn >> 16) & 0xf;
2026 rd1 = (insn >> 0) & 0xf;
2027 gen_op_iwmmxt_movq_M0_wRn(rd0);
2028 switch ((insn >> 22) & 3) {
2029 case 0:
2030 if (insn & (1 << 21))
2031 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2032 else
2033 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2034 break;
2035 case 1:
2036 if (insn & (1 << 21))
2037 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2038 else
2039 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2040 break;
2041 case 2:
2042 if (insn & (1 << 21))
2043 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2044 else
2045 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2046 break;
2047 case 3:
2048 return 1;
2049 }
2050 gen_op_iwmmxt_movq_wRn_M0(wrd);
2051 gen_op_iwmmxt_set_mup();
2052 gen_op_iwmmxt_set_cup();
2053 break;
2054 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2055 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2056 wrd = (insn >> 12) & 0xf;
2057 rd0 = (insn >> 16) & 0xf;
2058 gen_op_iwmmxt_movq_M0_wRn(rd0);
2059 switch ((insn >> 22) & 3) {
2060 case 0:
2061 if (insn & (1 << 21))
2062 gen_op_iwmmxt_unpacklsb_M0();
2063 else
2064 gen_op_iwmmxt_unpacklub_M0();
2065 break;
2066 case 1:
2067 if (insn & (1 << 21))
2068 gen_op_iwmmxt_unpacklsw_M0();
2069 else
2070 gen_op_iwmmxt_unpackluw_M0();
2071 break;
2072 case 2:
2073 if (insn & (1 << 21))
2074 gen_op_iwmmxt_unpacklsl_M0();
2075 else
2076 gen_op_iwmmxt_unpacklul_M0();
2077 break;
2078 case 3:
2079 return 1;
2080 }
2081 gen_op_iwmmxt_movq_wRn_M0(wrd);
2082 gen_op_iwmmxt_set_mup();
2083 gen_op_iwmmxt_set_cup();
2084 break;
2085 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2086 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2087 wrd = (insn >> 12) & 0xf;
2088 rd0 = (insn >> 16) & 0xf;
2089 gen_op_iwmmxt_movq_M0_wRn(rd0);
2090 switch ((insn >> 22) & 3) {
2091 case 0:
2092 if (insn & (1 << 21))
2093 gen_op_iwmmxt_unpackhsb_M0();
2094 else
2095 gen_op_iwmmxt_unpackhub_M0();
2096 break;
2097 case 1:
2098 if (insn & (1 << 21))
2099 gen_op_iwmmxt_unpackhsw_M0();
2100 else
2101 gen_op_iwmmxt_unpackhuw_M0();
2102 break;
2103 case 2:
2104 if (insn & (1 << 21))
2105 gen_op_iwmmxt_unpackhsl_M0();
2106 else
2107 gen_op_iwmmxt_unpackhul_M0();
2108 break;
2109 case 3:
2110 return 1;
2111 }
2112 gen_op_iwmmxt_movq_wRn_M0(wrd);
2113 gen_op_iwmmxt_set_mup();
2114 gen_op_iwmmxt_set_cup();
2115 break;
2116 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2117 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2118 if (((insn >> 22) & 3) == 0)
2119 return 1;
18c9b560
AZ
2120 wrd = (insn >> 12) & 0xf;
2121 rd0 = (insn >> 16) & 0xf;
2122 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2123 tmp = tcg_temp_new_i32();
da6b5335 2124 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2125 tcg_temp_free_i32(tmp);
18c9b560 2126 return 1;
da6b5335 2127 }
18c9b560 2128 switch ((insn >> 22) & 3) {
18c9b560 2129 case 1:
477955bd 2130 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2131 break;
2132 case 2:
477955bd 2133 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2134 break;
2135 case 3:
477955bd 2136 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2137 break;
2138 }
7d1b0095 2139 tcg_temp_free_i32(tmp);
18c9b560
AZ
2140 gen_op_iwmmxt_movq_wRn_M0(wrd);
2141 gen_op_iwmmxt_set_mup();
2142 gen_op_iwmmxt_set_cup();
2143 break;
2144 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2145 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2146 if (((insn >> 22) & 3) == 0)
2147 return 1;
18c9b560
AZ
2148 wrd = (insn >> 12) & 0xf;
2149 rd0 = (insn >> 16) & 0xf;
2150 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2151 tmp = tcg_temp_new_i32();
da6b5335 2152 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2153 tcg_temp_free_i32(tmp);
18c9b560 2154 return 1;
da6b5335 2155 }
18c9b560 2156 switch ((insn >> 22) & 3) {
18c9b560 2157 case 1:
477955bd 2158 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2159 break;
2160 case 2:
477955bd 2161 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2162 break;
2163 case 3:
477955bd 2164 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2165 break;
2166 }
7d1b0095 2167 tcg_temp_free_i32(tmp);
18c9b560
AZ
2168 gen_op_iwmmxt_movq_wRn_M0(wrd);
2169 gen_op_iwmmxt_set_mup();
2170 gen_op_iwmmxt_set_cup();
2171 break;
2172 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2173 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2174 if (((insn >> 22) & 3) == 0)
2175 return 1;
18c9b560
AZ
2176 wrd = (insn >> 12) & 0xf;
2177 rd0 = (insn >> 16) & 0xf;
2178 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2179 tmp = tcg_temp_new_i32();
da6b5335 2180 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2181 tcg_temp_free_i32(tmp);
18c9b560 2182 return 1;
da6b5335 2183 }
18c9b560 2184 switch ((insn >> 22) & 3) {
18c9b560 2185 case 1:
477955bd 2186 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2187 break;
2188 case 2:
477955bd 2189 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2190 break;
2191 case 3:
477955bd 2192 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2193 break;
2194 }
7d1b0095 2195 tcg_temp_free_i32(tmp);
18c9b560
AZ
2196 gen_op_iwmmxt_movq_wRn_M0(wrd);
2197 gen_op_iwmmxt_set_mup();
2198 gen_op_iwmmxt_set_cup();
2199 break;
2200 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2201 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2202 if (((insn >> 22) & 3) == 0)
2203 return 1;
18c9b560
AZ
2204 wrd = (insn >> 12) & 0xf;
2205 rd0 = (insn >> 16) & 0xf;
2206 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2207 tmp = tcg_temp_new_i32();
18c9b560 2208 switch ((insn >> 22) & 3) {
18c9b560 2209 case 1:
da6b5335 2210 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2211 tcg_temp_free_i32(tmp);
18c9b560 2212 return 1;
da6b5335 2213 }
477955bd 2214 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2215 break;
2216 case 2:
da6b5335 2217 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2218 tcg_temp_free_i32(tmp);
18c9b560 2219 return 1;
da6b5335 2220 }
477955bd 2221 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2222 break;
2223 case 3:
da6b5335 2224 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2225 tcg_temp_free_i32(tmp);
18c9b560 2226 return 1;
da6b5335 2227 }
477955bd 2228 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2229 break;
2230 }
7d1b0095 2231 tcg_temp_free_i32(tmp);
18c9b560
AZ
2232 gen_op_iwmmxt_movq_wRn_M0(wrd);
2233 gen_op_iwmmxt_set_mup();
2234 gen_op_iwmmxt_set_cup();
2235 break;
2236 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2237 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2238 wrd = (insn >> 12) & 0xf;
2239 rd0 = (insn >> 16) & 0xf;
2240 rd1 = (insn >> 0) & 0xf;
2241 gen_op_iwmmxt_movq_M0_wRn(rd0);
2242 switch ((insn >> 22) & 3) {
2243 case 0:
2244 if (insn & (1 << 21))
2245 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2246 else
2247 gen_op_iwmmxt_minub_M0_wRn(rd1);
2248 break;
2249 case 1:
2250 if (insn & (1 << 21))
2251 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2252 else
2253 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2254 break;
2255 case 2:
2256 if (insn & (1 << 21))
2257 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2258 else
2259 gen_op_iwmmxt_minul_M0_wRn(rd1);
2260 break;
2261 case 3:
2262 return 1;
2263 }
2264 gen_op_iwmmxt_movq_wRn_M0(wrd);
2265 gen_op_iwmmxt_set_mup();
2266 break;
2267 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2268 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2269 wrd = (insn >> 12) & 0xf;
2270 rd0 = (insn >> 16) & 0xf;
2271 rd1 = (insn >> 0) & 0xf;
2272 gen_op_iwmmxt_movq_M0_wRn(rd0);
2273 switch ((insn >> 22) & 3) {
2274 case 0:
2275 if (insn & (1 << 21))
2276 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2277 else
2278 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2279 break;
2280 case 1:
2281 if (insn & (1 << 21))
2282 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2283 else
2284 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2285 break;
2286 case 2:
2287 if (insn & (1 << 21))
2288 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2289 else
2290 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2291 break;
2292 case 3:
2293 return 1;
2294 }
2295 gen_op_iwmmxt_movq_wRn_M0(wrd);
2296 gen_op_iwmmxt_set_mup();
2297 break;
2298 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2299 case 0x402: case 0x502: case 0x602: case 0x702:
2300 wrd = (insn >> 12) & 0xf;
2301 rd0 = (insn >> 16) & 0xf;
2302 rd1 = (insn >> 0) & 0xf;
2303 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2304 tmp = tcg_const_i32((insn >> 20) & 3);
2305 iwmmxt_load_reg(cpu_V1, rd1);
2306 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2307 tcg_temp_free_i32(tmp);
18c9b560
AZ
2308 gen_op_iwmmxt_movq_wRn_M0(wrd);
2309 gen_op_iwmmxt_set_mup();
2310 break;
2311 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2312 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2313 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2314 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2315 wrd = (insn >> 12) & 0xf;
2316 rd0 = (insn >> 16) & 0xf;
2317 rd1 = (insn >> 0) & 0xf;
2318 gen_op_iwmmxt_movq_M0_wRn(rd0);
2319 switch ((insn >> 20) & 0xf) {
2320 case 0x0:
2321 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2322 break;
2323 case 0x1:
2324 gen_op_iwmmxt_subub_M0_wRn(rd1);
2325 break;
2326 case 0x3:
2327 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2328 break;
2329 case 0x4:
2330 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2331 break;
2332 case 0x5:
2333 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2334 break;
2335 case 0x7:
2336 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2337 break;
2338 case 0x8:
2339 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2340 break;
2341 case 0x9:
2342 gen_op_iwmmxt_subul_M0_wRn(rd1);
2343 break;
2344 case 0xb:
2345 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2346 break;
2347 default:
2348 return 1;
2349 }
2350 gen_op_iwmmxt_movq_wRn_M0(wrd);
2351 gen_op_iwmmxt_set_mup();
2352 gen_op_iwmmxt_set_cup();
2353 break;
2354 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2355 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2356 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2357 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2358 wrd = (insn >> 12) & 0xf;
2359 rd0 = (insn >> 16) & 0xf;
2360 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2361 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2362 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2363 tcg_temp_free_i32(tmp);
18c9b560
AZ
2364 gen_op_iwmmxt_movq_wRn_M0(wrd);
2365 gen_op_iwmmxt_set_mup();
2366 gen_op_iwmmxt_set_cup();
2367 break;
2368 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2369 case 0x418: case 0x518: case 0x618: case 0x718:
2370 case 0x818: case 0x918: case 0xa18: case 0xb18:
2371 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2372 wrd = (insn >> 12) & 0xf;
2373 rd0 = (insn >> 16) & 0xf;
2374 rd1 = (insn >> 0) & 0xf;
2375 gen_op_iwmmxt_movq_M0_wRn(rd0);
2376 switch ((insn >> 20) & 0xf) {
2377 case 0x0:
2378 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2379 break;
2380 case 0x1:
2381 gen_op_iwmmxt_addub_M0_wRn(rd1);
2382 break;
2383 case 0x3:
2384 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2385 break;
2386 case 0x4:
2387 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2388 break;
2389 case 0x5:
2390 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2391 break;
2392 case 0x7:
2393 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2394 break;
2395 case 0x8:
2396 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2397 break;
2398 case 0x9:
2399 gen_op_iwmmxt_addul_M0_wRn(rd1);
2400 break;
2401 case 0xb:
2402 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2403 break;
2404 default:
2405 return 1;
2406 }
2407 gen_op_iwmmxt_movq_wRn_M0(wrd);
2408 gen_op_iwmmxt_set_mup();
2409 gen_op_iwmmxt_set_cup();
2410 break;
2411 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2412 case 0x408: case 0x508: case 0x608: case 0x708:
2413 case 0x808: case 0x908: case 0xa08: case 0xb08:
2414 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2415 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2416 return 1;
18c9b560
AZ
2417 wrd = (insn >> 12) & 0xf;
2418 rd0 = (insn >> 16) & 0xf;
2419 rd1 = (insn >> 0) & 0xf;
2420 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2421 switch ((insn >> 22) & 3) {
18c9b560
AZ
2422 case 1:
2423 if (insn & (1 << 21))
2424 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2425 else
2426 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2427 break;
2428 case 2:
2429 if (insn & (1 << 21))
2430 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2431 else
2432 gen_op_iwmmxt_packul_M0_wRn(rd1);
2433 break;
2434 case 3:
2435 if (insn & (1 << 21))
2436 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2437 else
2438 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2439 break;
2440 }
2441 gen_op_iwmmxt_movq_wRn_M0(wrd);
2442 gen_op_iwmmxt_set_mup();
2443 gen_op_iwmmxt_set_cup();
2444 break;
2445 case 0x201: case 0x203: case 0x205: case 0x207:
2446 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2447 case 0x211: case 0x213: case 0x215: case 0x217:
2448 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2449 wrd = (insn >> 5) & 0xf;
2450 rd0 = (insn >> 12) & 0xf;
2451 rd1 = (insn >> 0) & 0xf;
2452 if (rd0 == 0xf || rd1 == 0xf)
2453 return 1;
2454 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2455 tmp = load_reg(s, rd0);
2456 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2457 switch ((insn >> 16) & 0xf) {
2458 case 0x0: /* TMIA */
da6b5335 2459 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2460 break;
2461 case 0x8: /* TMIAPH */
da6b5335 2462 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2463 break;
2464 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2465 if (insn & (1 << 16))
da6b5335 2466 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2467 if (insn & (1 << 17))
da6b5335
FN
2468 tcg_gen_shri_i32(tmp2, tmp2, 16);
2469 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2470 break;
2471 default:
7d1b0095
PM
2472 tcg_temp_free_i32(tmp2);
2473 tcg_temp_free_i32(tmp);
18c9b560
AZ
2474 return 1;
2475 }
7d1b0095
PM
2476 tcg_temp_free_i32(tmp2);
2477 tcg_temp_free_i32(tmp);
18c9b560
AZ
2478 gen_op_iwmmxt_movq_wRn_M0(wrd);
2479 gen_op_iwmmxt_set_mup();
2480 break;
2481 default:
2482 return 1;
2483 }
2484
2485 return 0;
2486}
2487
a1c7273b 2488/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2489 (ie. an undefined instruction). */
0ecb72a5 2490static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
2491{
2492 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2493 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2494
2495 if ((insn & 0x0ff00f10) == 0x0e200010) {
2496 /* Multiply with Internal Accumulate Format */
2497 rd0 = (insn >> 12) & 0xf;
2498 rd1 = insn & 0xf;
2499 acc = (insn >> 5) & 7;
2500
2501 if (acc != 0)
2502 return 1;
2503
3a554c0f
FN
2504 tmp = load_reg(s, rd0);
2505 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2506 switch ((insn >> 16) & 0xf) {
2507 case 0x0: /* MIA */
3a554c0f 2508 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2509 break;
2510 case 0x8: /* MIAPH */
3a554c0f 2511 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2512 break;
2513 case 0xc: /* MIABB */
2514 case 0xd: /* MIABT */
2515 case 0xe: /* MIATB */
2516 case 0xf: /* MIATT */
18c9b560 2517 if (insn & (1 << 16))
3a554c0f 2518 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2519 if (insn & (1 << 17))
3a554c0f
FN
2520 tcg_gen_shri_i32(tmp2, tmp2, 16);
2521 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2522 break;
2523 default:
2524 return 1;
2525 }
7d1b0095
PM
2526 tcg_temp_free_i32(tmp2);
2527 tcg_temp_free_i32(tmp);
18c9b560
AZ
2528
2529 gen_op_iwmmxt_movq_wRn_M0(acc);
2530 return 0;
2531 }
2532
2533 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2534 /* Internal Accumulator Access Format */
2535 rdhi = (insn >> 16) & 0xf;
2536 rdlo = (insn >> 12) & 0xf;
2537 acc = insn & 7;
2538
2539 if (acc != 0)
2540 return 1;
2541
2542 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2543 iwmmxt_load_reg(cpu_V0, acc);
2544 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2545 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2546 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2547 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2548 } else { /* MAR */
3a554c0f
FN
2549 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2550 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2551 }
2552 return 0;
2553 }
2554
2555 return 1;
2556}
2557
9ee6e8bb
PB
2558#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2559#define VFP_SREG(insn, bigbit, smallbit) \
2560 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2561#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2562 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2563 reg = (((insn) >> (bigbit)) & 0x0f) \
2564 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2565 } else { \
2566 if (insn & (1 << (smallbit))) \
2567 return 1; \
2568 reg = ((insn) >> (bigbit)) & 0x0f; \
2569 }} while (0)
2570
2571#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2572#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2573#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2574#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2575#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2576#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2577
4373f3ce 2578/* Move between integer and VFP cores. */
39d5492a 2579static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 2580{
39d5492a 2581 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
2582 tcg_gen_mov_i32(tmp, cpu_F0s);
2583 return tmp;
2584}
2585
39d5492a 2586static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
2587{
2588 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2589 tcg_temp_free_i32(tmp);
4373f3ce
PB
2590}
2591
39d5492a 2592static void gen_neon_dup_u8(TCGv_i32 var, int shift)
ad69471c 2593{
39d5492a 2594 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2595 if (shift)
2596 tcg_gen_shri_i32(var, var, shift);
86831435 2597 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2598 tcg_gen_shli_i32(tmp, var, 8);
2599 tcg_gen_or_i32(var, var, tmp);
2600 tcg_gen_shli_i32(tmp, var, 16);
2601 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2602 tcg_temp_free_i32(tmp);
ad69471c
PB
2603}
2604
39d5492a 2605static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2606{
39d5492a 2607 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2608 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2609 tcg_gen_shli_i32(tmp, var, 16);
2610 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2611 tcg_temp_free_i32(tmp);
ad69471c
PB
2612}
2613
39d5492a 2614static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2615{
39d5492a 2616 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2617 tcg_gen_andi_i32(var, var, 0xffff0000);
2618 tcg_gen_shri_i32(tmp, var, 16);
2619 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2620 tcg_temp_free_i32(tmp);
ad69471c
PB
2621}
2622
39d5492a 2623static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
8e18cde3
PM
2624{
2625 /* Load a single Neon element and replicate into a 32 bit TCG reg */
58ab8e96 2626 TCGv_i32 tmp = tcg_temp_new_i32();
8e18cde3
PM
2627 switch (size) {
2628 case 0:
6ce2faf4 2629 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
8e18cde3
PM
2630 gen_neon_dup_u8(tmp, 0);
2631 break;
2632 case 1:
6ce2faf4 2633 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
8e18cde3
PM
2634 gen_neon_dup_low16(tmp);
2635 break;
2636 case 2:
6ce2faf4 2637 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
8e18cde3
PM
2638 break;
2639 default: /* Avoid compiler warnings. */
2640 abort();
2641 }
2642 return tmp;
2643}
2644
04731fb5
WN
2645static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2646 uint32_t dp)
2647{
2648 uint32_t cc = extract32(insn, 20, 2);
2649
2650 if (dp) {
2651 TCGv_i64 frn, frm, dest;
2652 TCGv_i64 tmp, zero, zf, nf, vf;
2653
2654 zero = tcg_const_i64(0);
2655
2656 frn = tcg_temp_new_i64();
2657 frm = tcg_temp_new_i64();
2658 dest = tcg_temp_new_i64();
2659
2660 zf = tcg_temp_new_i64();
2661 nf = tcg_temp_new_i64();
2662 vf = tcg_temp_new_i64();
2663
2664 tcg_gen_extu_i32_i64(zf, cpu_ZF);
2665 tcg_gen_ext_i32_i64(nf, cpu_NF);
2666 tcg_gen_ext_i32_i64(vf, cpu_VF);
2667
2668 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2669 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2670 switch (cc) {
2671 case 0: /* eq: Z */
2672 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
2673 frn, frm);
2674 break;
2675 case 1: /* vs: V */
2676 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
2677 frn, frm);
2678 break;
2679 case 2: /* ge: N == V -> N ^ V == 0 */
2680 tmp = tcg_temp_new_i64();
2681 tcg_gen_xor_i64(tmp, vf, nf);
2682 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2683 frn, frm);
2684 tcg_temp_free_i64(tmp);
2685 break;
2686 case 3: /* gt: !Z && N == V */
2687 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
2688 frn, frm);
2689 tmp = tcg_temp_new_i64();
2690 tcg_gen_xor_i64(tmp, vf, nf);
2691 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2692 dest, frm);
2693 tcg_temp_free_i64(tmp);
2694 break;
2695 }
2696 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2697 tcg_temp_free_i64(frn);
2698 tcg_temp_free_i64(frm);
2699 tcg_temp_free_i64(dest);
2700
2701 tcg_temp_free_i64(zf);
2702 tcg_temp_free_i64(nf);
2703 tcg_temp_free_i64(vf);
2704
2705 tcg_temp_free_i64(zero);
2706 } else {
2707 TCGv_i32 frn, frm, dest;
2708 TCGv_i32 tmp, zero;
2709
2710 zero = tcg_const_i32(0);
2711
2712 frn = tcg_temp_new_i32();
2713 frm = tcg_temp_new_i32();
2714 dest = tcg_temp_new_i32();
2715 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2716 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2717 switch (cc) {
2718 case 0: /* eq: Z */
2719 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
2720 frn, frm);
2721 break;
2722 case 1: /* vs: V */
2723 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
2724 frn, frm);
2725 break;
2726 case 2: /* ge: N == V -> N ^ V == 0 */
2727 tmp = tcg_temp_new_i32();
2728 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2729 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2730 frn, frm);
2731 tcg_temp_free_i32(tmp);
2732 break;
2733 case 3: /* gt: !Z && N == V */
2734 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
2735 frn, frm);
2736 tmp = tcg_temp_new_i32();
2737 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2738 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2739 dest, frm);
2740 tcg_temp_free_i32(tmp);
2741 break;
2742 }
2743 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2744 tcg_temp_free_i32(frn);
2745 tcg_temp_free_i32(frm);
2746 tcg_temp_free_i32(dest);
2747
2748 tcg_temp_free_i32(zero);
2749 }
2750
2751 return 0;
2752}
2753
40cfacdd
WN
2754static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
2755 uint32_t rm, uint32_t dp)
2756{
2757 uint32_t vmin = extract32(insn, 6, 1);
2758 TCGv_ptr fpst = get_fpstatus_ptr(0);
2759
2760 if (dp) {
2761 TCGv_i64 frn, frm, dest;
2762
2763 frn = tcg_temp_new_i64();
2764 frm = tcg_temp_new_i64();
2765 dest = tcg_temp_new_i64();
2766
2767 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2768 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2769 if (vmin) {
f71a2ae5 2770 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
40cfacdd 2771 } else {
f71a2ae5 2772 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
40cfacdd
WN
2773 }
2774 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2775 tcg_temp_free_i64(frn);
2776 tcg_temp_free_i64(frm);
2777 tcg_temp_free_i64(dest);
2778 } else {
2779 TCGv_i32 frn, frm, dest;
2780
2781 frn = tcg_temp_new_i32();
2782 frm = tcg_temp_new_i32();
2783 dest = tcg_temp_new_i32();
2784
2785 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2786 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2787 if (vmin) {
f71a2ae5 2788 gen_helper_vfp_minnums(dest, frn, frm, fpst);
40cfacdd 2789 } else {
f71a2ae5 2790 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
40cfacdd
WN
2791 }
2792 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2793 tcg_temp_free_i32(frn);
2794 tcg_temp_free_i32(frm);
2795 tcg_temp_free_i32(dest);
2796 }
2797
2798 tcg_temp_free_ptr(fpst);
2799 return 0;
2800}
2801
7655f39b
WN
2802static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2803 int rounding)
2804{
2805 TCGv_ptr fpst = get_fpstatus_ptr(0);
2806 TCGv_i32 tcg_rmode;
2807
2808 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2809 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2810
2811 if (dp) {
2812 TCGv_i64 tcg_op;
2813 TCGv_i64 tcg_res;
2814 tcg_op = tcg_temp_new_i64();
2815 tcg_res = tcg_temp_new_i64();
2816 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2817 gen_helper_rintd(tcg_res, tcg_op, fpst);
2818 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
2819 tcg_temp_free_i64(tcg_op);
2820 tcg_temp_free_i64(tcg_res);
2821 } else {
2822 TCGv_i32 tcg_op;
2823 TCGv_i32 tcg_res;
2824 tcg_op = tcg_temp_new_i32();
2825 tcg_res = tcg_temp_new_i32();
2826 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2827 gen_helper_rints(tcg_res, tcg_op, fpst);
2828 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
2829 tcg_temp_free_i32(tcg_op);
2830 tcg_temp_free_i32(tcg_res);
2831 }
2832
2833 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2834 tcg_temp_free_i32(tcg_rmode);
2835
2836 tcg_temp_free_ptr(fpst);
2837 return 0;
2838}
2839
c9975a83
WN
2840static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2841 int rounding)
2842{
2843 bool is_signed = extract32(insn, 7, 1);
2844 TCGv_ptr fpst = get_fpstatus_ptr(0);
2845 TCGv_i32 tcg_rmode, tcg_shift;
2846
2847 tcg_shift = tcg_const_i32(0);
2848
2849 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2850 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2851
2852 if (dp) {
2853 TCGv_i64 tcg_double, tcg_res;
2854 TCGv_i32 tcg_tmp;
2855 /* Rd is encoded as a single precision register even when the source
2856 * is double precision.
2857 */
2858 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
2859 tcg_double = tcg_temp_new_i64();
2860 tcg_res = tcg_temp_new_i64();
2861 tcg_tmp = tcg_temp_new_i32();
2862 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
2863 if (is_signed) {
2864 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
2865 } else {
2866 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
2867 }
2868 tcg_gen_trunc_i64_i32(tcg_tmp, tcg_res);
2869 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
2870 tcg_temp_free_i32(tcg_tmp);
2871 tcg_temp_free_i64(tcg_res);
2872 tcg_temp_free_i64(tcg_double);
2873 } else {
2874 TCGv_i32 tcg_single, tcg_res;
2875 tcg_single = tcg_temp_new_i32();
2876 tcg_res = tcg_temp_new_i32();
2877 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
2878 if (is_signed) {
2879 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
2880 } else {
2881 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
2882 }
2883 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
2884 tcg_temp_free_i32(tcg_res);
2885 tcg_temp_free_i32(tcg_single);
2886 }
2887
2888 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2889 tcg_temp_free_i32(tcg_rmode);
2890
2891 tcg_temp_free_i32(tcg_shift);
2892
2893 tcg_temp_free_ptr(fpst);
2894
2895 return 0;
2896}
7655f39b
WN
2897
2898/* Table for converting the most common AArch32 encoding of
2899 * rounding mode to arm_fprounding order (which matches the
2900 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
2901 */
2902static const uint8_t fp_decode_rm[] = {
2903 FPROUNDING_TIEAWAY,
2904 FPROUNDING_TIEEVEN,
2905 FPROUNDING_POSINF,
2906 FPROUNDING_NEGINF,
2907};
2908
04731fb5
WN
2909static int disas_vfp_v8_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
2910{
2911 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
2912
2913 if (!arm_feature(env, ARM_FEATURE_V8)) {
2914 return 1;
2915 }
2916
2917 if (dp) {
2918 VFP_DREG_D(rd, insn);
2919 VFP_DREG_N(rn, insn);
2920 VFP_DREG_M(rm, insn);
2921 } else {
2922 rd = VFP_SREG_D(insn);
2923 rn = VFP_SREG_N(insn);
2924 rm = VFP_SREG_M(insn);
2925 }
2926
2927 if ((insn & 0x0f800e50) == 0x0e000a00) {
2928 return handle_vsel(insn, rd, rn, rm, dp);
40cfacdd
WN
2929 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
2930 return handle_vminmaxnm(insn, rd, rn, rm, dp);
7655f39b
WN
2931 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
2932 /* VRINTA, VRINTN, VRINTP, VRINTM */
2933 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
2934 return handle_vrint(insn, rd, rm, dp, rounding);
c9975a83
WN
2935 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
2936 /* VCVTA, VCVTN, VCVTP, VCVTM */
2937 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
2938 return handle_vcvt(insn, rd, rm, dp, rounding);
04731fb5
WN
2939 }
2940 return 1;
2941}
2942
a1c7273b 2943/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 2944 (ie. an undefined instruction). */
0ecb72a5 2945static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
b7bcbe95
FB
2946{
2947 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2948 int dp, veclen;
39d5492a
PM
2949 TCGv_i32 addr;
2950 TCGv_i32 tmp;
2951 TCGv_i32 tmp2;
b7bcbe95 2952
40f137e1
PB
2953 if (!arm_feature(env, ARM_FEATURE_VFP))
2954 return 1;
2955
2c7ffc41
PM
2956 /* FIXME: this access check should not take precedence over UNDEF
2957 * for invalid encodings; we will generate incorrect syndrome information
2958 * for attempts to execute invalid vfp/neon encodings with FP disabled.
2959 */
2960 if (!s->cpacr_fpen) {
2961 gen_exception_insn(s, 4, EXCP_UDEF,
2962 syn_fp_access_trap(1, 0xe, s->thumb));
2963 return 0;
2964 }
2965
5df8bac1 2966 if (!s->vfp_enabled) {
9ee6e8bb 2967 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2968 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2969 return 1;
2970 rn = (insn >> 16) & 0xf;
a50c0f51
PM
2971 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
2972 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
40f137e1 2973 return 1;
a50c0f51 2974 }
40f137e1 2975 }
6a57f3eb
WN
2976
2977 if (extract32(insn, 28, 4) == 0xf) {
2978 /* Encodings with T=1 (Thumb) or unconditional (ARM):
2979 * only used in v8 and above.
2980 */
04731fb5 2981 return disas_vfp_v8_insn(env, s, insn);
6a57f3eb
WN
2982 }
2983
b7bcbe95
FB
2984 dp = ((insn & 0xf00) == 0xb00);
2985 switch ((insn >> 24) & 0xf) {
2986 case 0xe:
2987 if (insn & (1 << 4)) {
2988 /* single register transfer */
b7bcbe95
FB
2989 rd = (insn >> 12) & 0xf;
2990 if (dp) {
9ee6e8bb
PB
2991 int size;
2992 int pass;
2993
2994 VFP_DREG_N(rn, insn);
2995 if (insn & 0xf)
b7bcbe95 2996 return 1;
9ee6e8bb
PB
2997 if (insn & 0x00c00060
2998 && !arm_feature(env, ARM_FEATURE_NEON))
2999 return 1;
3000
3001 pass = (insn >> 21) & 1;
3002 if (insn & (1 << 22)) {
3003 size = 0;
3004 offset = ((insn >> 5) & 3) * 8;
3005 } else if (insn & (1 << 5)) {
3006 size = 1;
3007 offset = (insn & (1 << 6)) ? 16 : 0;
3008 } else {
3009 size = 2;
3010 offset = 0;
3011 }
18c9b560 3012 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3013 /* vfp->arm */
ad69471c 3014 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
3015 switch (size) {
3016 case 0:
9ee6e8bb 3017 if (offset)
ad69471c 3018 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 3019 if (insn & (1 << 23))
ad69471c 3020 gen_uxtb(tmp);
9ee6e8bb 3021 else
ad69471c 3022 gen_sxtb(tmp);
9ee6e8bb
PB
3023 break;
3024 case 1:
9ee6e8bb
PB
3025 if (insn & (1 << 23)) {
3026 if (offset) {
ad69471c 3027 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 3028 } else {
ad69471c 3029 gen_uxth(tmp);
9ee6e8bb
PB
3030 }
3031 } else {
3032 if (offset) {
ad69471c 3033 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 3034 } else {
ad69471c 3035 gen_sxth(tmp);
9ee6e8bb
PB
3036 }
3037 }
3038 break;
3039 case 2:
9ee6e8bb
PB
3040 break;
3041 }
ad69471c 3042 store_reg(s, rd, tmp);
b7bcbe95
FB
3043 } else {
3044 /* arm->vfp */
ad69471c 3045 tmp = load_reg(s, rd);
9ee6e8bb
PB
3046 if (insn & (1 << 23)) {
3047 /* VDUP */
3048 if (size == 0) {
ad69471c 3049 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 3050 } else if (size == 1) {
ad69471c 3051 gen_neon_dup_low16(tmp);
9ee6e8bb 3052 }
cbbccffc 3053 for (n = 0; n <= pass * 2; n++) {
7d1b0095 3054 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
3055 tcg_gen_mov_i32(tmp2, tmp);
3056 neon_store_reg(rn, n, tmp2);
3057 }
3058 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
3059 } else {
3060 /* VMOV */
3061 switch (size) {
3062 case 0:
ad69471c 3063 tmp2 = neon_load_reg(rn, pass);
d593c48e 3064 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 3065 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3066 break;
3067 case 1:
ad69471c 3068 tmp2 = neon_load_reg(rn, pass);
d593c48e 3069 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 3070 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3071 break;
3072 case 2:
9ee6e8bb
PB
3073 break;
3074 }
ad69471c 3075 neon_store_reg(rn, pass, tmp);
9ee6e8bb 3076 }
b7bcbe95 3077 }
9ee6e8bb
PB
3078 } else { /* !dp */
3079 if ((insn & 0x6f) != 0x00)
3080 return 1;
3081 rn = VFP_SREG_N(insn);
18c9b560 3082 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3083 /* vfp->arm */
3084 if (insn & (1 << 21)) {
3085 /* system register */
40f137e1 3086 rn >>= 1;
9ee6e8bb 3087
b7bcbe95 3088 switch (rn) {
40f137e1 3089 case ARM_VFP_FPSID:
4373f3ce 3090 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
3091 VFP3 restricts all id registers to privileged
3092 accesses. */
3093 if (IS_USER(s)
3094 && arm_feature(env, ARM_FEATURE_VFP3))
3095 return 1;
4373f3ce 3096 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3097 break;
40f137e1 3098 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3099 if (IS_USER(s))
3100 return 1;
4373f3ce 3101 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3102 break;
40f137e1
PB
3103 case ARM_VFP_FPINST:
3104 case ARM_VFP_FPINST2:
9ee6e8bb
PB
3105 /* Not present in VFP3. */
3106 if (IS_USER(s)
3107 || arm_feature(env, ARM_FEATURE_VFP3))
3108 return 1;
4373f3ce 3109 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 3110 break;
40f137e1 3111 case ARM_VFP_FPSCR:
601d70b9 3112 if (rd == 15) {
4373f3ce
PB
3113 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3114 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3115 } else {
7d1b0095 3116 tmp = tcg_temp_new_i32();
4373f3ce
PB
3117 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3118 }
b7bcbe95 3119 break;
a50c0f51
PM
3120 case ARM_VFP_MVFR2:
3121 if (!arm_feature(env, ARM_FEATURE_V8)) {
3122 return 1;
3123 }
3124 /* fall through */
9ee6e8bb
PB
3125 case ARM_VFP_MVFR0:
3126 case ARM_VFP_MVFR1:
3127 if (IS_USER(s)
06ed5d66 3128 || !arm_feature(env, ARM_FEATURE_MVFR))
9ee6e8bb 3129 return 1;
4373f3ce 3130 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3131 break;
b7bcbe95
FB
3132 default:
3133 return 1;
3134 }
3135 } else {
3136 gen_mov_F0_vreg(0, rn);
4373f3ce 3137 tmp = gen_vfp_mrs();
b7bcbe95
FB
3138 }
3139 if (rd == 15) {
b5ff1b31 3140 /* Set the 4 flag bits in the CPSR. */
4373f3ce 3141 gen_set_nzcv(tmp);
7d1b0095 3142 tcg_temp_free_i32(tmp);
4373f3ce
PB
3143 } else {
3144 store_reg(s, rd, tmp);
3145 }
b7bcbe95
FB
3146 } else {
3147 /* arm->vfp */
b7bcbe95 3148 if (insn & (1 << 21)) {
40f137e1 3149 rn >>= 1;
b7bcbe95
FB
3150 /* system register */
3151 switch (rn) {
40f137e1 3152 case ARM_VFP_FPSID:
9ee6e8bb
PB
3153 case ARM_VFP_MVFR0:
3154 case ARM_VFP_MVFR1:
b7bcbe95
FB
3155 /* Writes are ignored. */
3156 break;
40f137e1 3157 case ARM_VFP_FPSCR:
e4c1cfa5 3158 tmp = load_reg(s, rd);
4373f3ce 3159 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 3160 tcg_temp_free_i32(tmp);
b5ff1b31 3161 gen_lookup_tb(s);
b7bcbe95 3162 break;
40f137e1 3163 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3164 if (IS_USER(s))
3165 return 1;
71b3c3de
JR
3166 /* TODO: VFP subarchitecture support.
3167 * For now, keep the EN bit only */
e4c1cfa5 3168 tmp = load_reg(s, rd);
71b3c3de 3169 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 3170 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
3171 gen_lookup_tb(s);
3172 break;
3173 case ARM_VFP_FPINST:
3174 case ARM_VFP_FPINST2:
e4c1cfa5 3175 tmp = load_reg(s, rd);
4373f3ce 3176 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 3177 break;
b7bcbe95
FB
3178 default:
3179 return 1;
3180 }
3181 } else {
e4c1cfa5 3182 tmp = load_reg(s, rd);
4373f3ce 3183 gen_vfp_msr(tmp);
b7bcbe95
FB
3184 gen_mov_vreg_F0(0, rn);
3185 }
3186 }
3187 }
3188 } else {
3189 /* data processing */
3190 /* The opcode is in bits 23, 21, 20 and 6. */
3191 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3192 if (dp) {
3193 if (op == 15) {
3194 /* rn is opcode */
3195 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3196 } else {
3197 /* rn is register number */
9ee6e8bb 3198 VFP_DREG_N(rn, insn);
b7bcbe95
FB
3199 }
3200
239c20c7
WN
3201 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3202 ((rn & 0x1e) == 0x6))) {
3203 /* Integer or single/half precision destination. */
9ee6e8bb 3204 rd = VFP_SREG_D(insn);
b7bcbe95 3205 } else {
9ee6e8bb 3206 VFP_DREG_D(rd, insn);
b7bcbe95 3207 }
04595bf6 3208 if (op == 15 &&
239c20c7
WN
3209 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3210 ((rn & 0x1e) == 0x4))) {
3211 /* VCVT from int or half precision is always from S reg
3212 * regardless of dp bit. VCVT with immediate frac_bits
3213 * has same format as SREG_M.
04595bf6
PM
3214 */
3215 rm = VFP_SREG_M(insn);
b7bcbe95 3216 } else {
9ee6e8bb 3217 VFP_DREG_M(rm, insn);
b7bcbe95
FB
3218 }
3219 } else {
9ee6e8bb 3220 rn = VFP_SREG_N(insn);
b7bcbe95
FB
3221 if (op == 15 && rn == 15) {
3222 /* Double precision destination. */
9ee6e8bb
PB
3223 VFP_DREG_D(rd, insn);
3224 } else {
3225 rd = VFP_SREG_D(insn);
3226 }
04595bf6
PM
3227 /* NB that we implicitly rely on the encoding for the frac_bits
3228 * in VCVT of fixed to float being the same as that of an SREG_M
3229 */
9ee6e8bb 3230 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3231 }
3232
69d1fc22 3233 veclen = s->vec_len;
b7bcbe95
FB
3234 if (op == 15 && rn > 3)
3235 veclen = 0;
3236
3237 /* Shut up compiler warnings. */
3238 delta_m = 0;
3239 delta_d = 0;
3240 bank_mask = 0;
3b46e624 3241
b7bcbe95
FB
3242 if (veclen > 0) {
3243 if (dp)
3244 bank_mask = 0xc;
3245 else
3246 bank_mask = 0x18;
3247
3248 /* Figure out what type of vector operation this is. */
3249 if ((rd & bank_mask) == 0) {
3250 /* scalar */
3251 veclen = 0;
3252 } else {
3253 if (dp)
69d1fc22 3254 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3255 else
69d1fc22 3256 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3257
3258 if ((rm & bank_mask) == 0) {
3259 /* mixed scalar/vector */
3260 delta_m = 0;
3261 } else {
3262 /* vector */
3263 delta_m = delta_d;
3264 }
3265 }
3266 }
3267
3268 /* Load the initial operands. */
3269 if (op == 15) {
3270 switch (rn) {
3271 case 16:
3272 case 17:
3273 /* Integer source */
3274 gen_mov_F0_vreg(0, rm);
3275 break;
3276 case 8:
3277 case 9:
3278 /* Compare */
3279 gen_mov_F0_vreg(dp, rd);
3280 gen_mov_F1_vreg(dp, rm);
3281 break;
3282 case 10:
3283 case 11:
3284 /* Compare with zero */
3285 gen_mov_F0_vreg(dp, rd);
3286 gen_vfp_F1_ld0(dp);
3287 break;
9ee6e8bb
PB
3288 case 20:
3289 case 21:
3290 case 22:
3291 case 23:
644ad806
PB
3292 case 28:
3293 case 29:
3294 case 30:
3295 case 31:
9ee6e8bb
PB
3296 /* Source and destination the same. */
3297 gen_mov_F0_vreg(dp, rd);
3298 break;
6e0c0ed1
PM
3299 case 4:
3300 case 5:
3301 case 6:
3302 case 7:
239c20c7
WN
3303 /* VCVTB, VCVTT: only present with the halfprec extension
3304 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3305 * (we choose to UNDEF)
6e0c0ed1 3306 */
239c20c7
WN
3307 if ((dp && !arm_feature(env, ARM_FEATURE_V8)) ||
3308 !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
6e0c0ed1
PM
3309 return 1;
3310 }
239c20c7
WN
3311 if (!extract32(rn, 1, 1)) {
3312 /* Half precision source. */
3313 gen_mov_F0_vreg(0, rm);
3314 break;
3315 }
6e0c0ed1 3316 /* Otherwise fall through */
b7bcbe95
FB
3317 default:
3318 /* One source operand. */
3319 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3320 break;
b7bcbe95
FB
3321 }
3322 } else {
3323 /* Two source operands. */
3324 gen_mov_F0_vreg(dp, rn);
3325 gen_mov_F1_vreg(dp, rm);
3326 }
3327
3328 for (;;) {
3329 /* Perform the calculation. */
3330 switch (op) {
605a6aed
PM
3331 case 0: /* VMLA: fd + (fn * fm) */
3332 /* Note that order of inputs to the add matters for NaNs */
3333 gen_vfp_F1_mul(dp);
3334 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3335 gen_vfp_add(dp);
3336 break;
605a6aed 3337 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3338 gen_vfp_mul(dp);
605a6aed
PM
3339 gen_vfp_F1_neg(dp);
3340 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3341 gen_vfp_add(dp);
3342 break;
605a6aed
PM
3343 case 2: /* VNMLS: -fd + (fn * fm) */
3344 /* Note that it isn't valid to replace (-A + B) with (B - A)
3345 * or similar plausible looking simplifications
3346 * because this will give wrong results for NaNs.
3347 */
3348 gen_vfp_F1_mul(dp);
3349 gen_mov_F0_vreg(dp, rd);
3350 gen_vfp_neg(dp);
3351 gen_vfp_add(dp);
b7bcbe95 3352 break;
605a6aed 3353 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3354 gen_vfp_mul(dp);
605a6aed
PM
3355 gen_vfp_F1_neg(dp);
3356 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3357 gen_vfp_neg(dp);
605a6aed 3358 gen_vfp_add(dp);
b7bcbe95
FB
3359 break;
3360 case 4: /* mul: fn * fm */
3361 gen_vfp_mul(dp);
3362 break;
3363 case 5: /* nmul: -(fn * fm) */
3364 gen_vfp_mul(dp);
3365 gen_vfp_neg(dp);
3366 break;
3367 case 6: /* add: fn + fm */
3368 gen_vfp_add(dp);
3369 break;
3370 case 7: /* sub: fn - fm */
3371 gen_vfp_sub(dp);
3372 break;
3373 case 8: /* div: fn / fm */
3374 gen_vfp_div(dp);
3375 break;
da97f52c
PM
3376 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3377 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3378 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3379 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3380 /* These are fused multiply-add, and must be done as one
3381 * floating point operation with no rounding between the
3382 * multiplication and addition steps.
3383 * NB that doing the negations here as separate steps is
3384 * correct : an input NaN should come out with its sign bit
3385 * flipped if it is a negated-input.
3386 */
3387 if (!arm_feature(env, ARM_FEATURE_VFP4)) {
3388 return 1;
3389 }
3390 if (dp) {
3391 TCGv_ptr fpst;
3392 TCGv_i64 frd;
3393 if (op & 1) {
3394 /* VFNMS, VFMS */
3395 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3396 }
3397 frd = tcg_temp_new_i64();
3398 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3399 if (op & 2) {
3400 /* VFNMA, VFNMS */
3401 gen_helper_vfp_negd(frd, frd);
3402 }
3403 fpst = get_fpstatus_ptr(0);
3404 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3405 cpu_F1d, frd, fpst);
3406 tcg_temp_free_ptr(fpst);
3407 tcg_temp_free_i64(frd);
3408 } else {
3409 TCGv_ptr fpst;
3410 TCGv_i32 frd;
3411 if (op & 1) {
3412 /* VFNMS, VFMS */
3413 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3414 }
3415 frd = tcg_temp_new_i32();
3416 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3417 if (op & 2) {
3418 gen_helper_vfp_negs(frd, frd);
3419 }
3420 fpst = get_fpstatus_ptr(0);
3421 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3422 cpu_F1s, frd, fpst);
3423 tcg_temp_free_ptr(fpst);
3424 tcg_temp_free_i32(frd);
3425 }
3426 break;
9ee6e8bb
PB
3427 case 14: /* fconst */
3428 if (!arm_feature(env, ARM_FEATURE_VFP3))
3429 return 1;
3430
3431 n = (insn << 12) & 0x80000000;
3432 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3433 if (dp) {
3434 if (i & 0x40)
3435 i |= 0x3f80;
3436 else
3437 i |= 0x4000;
3438 n |= i << 16;
4373f3ce 3439 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3440 } else {
3441 if (i & 0x40)
3442 i |= 0x780;
3443 else
3444 i |= 0x800;
3445 n |= i << 19;
5b340b51 3446 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3447 }
9ee6e8bb 3448 break;
b7bcbe95
FB
3449 case 15: /* extension space */
3450 switch (rn) {
3451 case 0: /* cpy */
3452 /* no-op */
3453 break;
3454 case 1: /* abs */
3455 gen_vfp_abs(dp);
3456 break;
3457 case 2: /* neg */
3458 gen_vfp_neg(dp);
3459 break;
3460 case 3: /* sqrt */
3461 gen_vfp_sqrt(dp);
3462 break;
239c20c7 3463 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
60011498
PB
3464 tmp = gen_vfp_mrs();
3465 tcg_gen_ext16u_i32(tmp, tmp);
239c20c7
WN
3466 if (dp) {
3467 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3468 cpu_env);
3469 } else {
3470 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3471 cpu_env);
3472 }
7d1b0095 3473 tcg_temp_free_i32(tmp);
60011498 3474 break;
239c20c7 3475 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
60011498
PB
3476 tmp = gen_vfp_mrs();
3477 tcg_gen_shri_i32(tmp, tmp, 16);
239c20c7
WN
3478 if (dp) {
3479 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3480 cpu_env);
3481 } else {
3482 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3483 cpu_env);
3484 }
7d1b0095 3485 tcg_temp_free_i32(tmp);
60011498 3486 break;
239c20c7 3487 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
7d1b0095 3488 tmp = tcg_temp_new_i32();
239c20c7
WN
3489 if (dp) {
3490 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3491 cpu_env);
3492 } else {
3493 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3494 cpu_env);
3495 }
60011498
PB
3496 gen_mov_F0_vreg(0, rd);
3497 tmp2 = gen_vfp_mrs();
3498 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3499 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3500 tcg_temp_free_i32(tmp2);
60011498
PB
3501 gen_vfp_msr(tmp);
3502 break;
239c20c7 3503 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
7d1b0095 3504 tmp = tcg_temp_new_i32();
239c20c7
WN
3505 if (dp) {
3506 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3507 cpu_env);
3508 } else {
3509 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3510 cpu_env);
3511 }
60011498
PB
3512 tcg_gen_shli_i32(tmp, tmp, 16);
3513 gen_mov_F0_vreg(0, rd);
3514 tmp2 = gen_vfp_mrs();
3515 tcg_gen_ext16u_i32(tmp2, tmp2);
3516 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3517 tcg_temp_free_i32(tmp2);
60011498
PB
3518 gen_vfp_msr(tmp);
3519 break;
b7bcbe95
FB
3520 case 8: /* cmp */
3521 gen_vfp_cmp(dp);
3522 break;
3523 case 9: /* cmpe */
3524 gen_vfp_cmpe(dp);
3525 break;
3526 case 10: /* cmpz */
3527 gen_vfp_cmp(dp);
3528 break;
3529 case 11: /* cmpez */
3530 gen_vfp_F1_ld0(dp);
3531 gen_vfp_cmpe(dp);
3532 break;
664c6733
WN
3533 case 12: /* vrintr */
3534 {
3535 TCGv_ptr fpst = get_fpstatus_ptr(0);
3536 if (dp) {
3537 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3538 } else {
3539 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3540 }
3541 tcg_temp_free_ptr(fpst);
3542 break;
3543 }
a290c62a
WN
3544 case 13: /* vrintz */
3545 {
3546 TCGv_ptr fpst = get_fpstatus_ptr(0);
3547 TCGv_i32 tcg_rmode;
3548 tcg_rmode = tcg_const_i32(float_round_to_zero);
3549 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3550 if (dp) {
3551 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3552 } else {
3553 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3554 }
3555 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3556 tcg_temp_free_i32(tcg_rmode);
3557 tcg_temp_free_ptr(fpst);
3558 break;
3559 }
4e82bc01
WN
3560 case 14: /* vrintx */
3561 {
3562 TCGv_ptr fpst = get_fpstatus_ptr(0);
3563 if (dp) {
3564 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3565 } else {
3566 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3567 }
3568 tcg_temp_free_ptr(fpst);
3569 break;
3570 }
b7bcbe95
FB
3571 case 15: /* single<->double conversion */
3572 if (dp)
4373f3ce 3573 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3574 else
4373f3ce 3575 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3576 break;
3577 case 16: /* fuito */
5500b06c 3578 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3579 break;
3580 case 17: /* fsito */
5500b06c 3581 gen_vfp_sito(dp, 0);
b7bcbe95 3582 break;
9ee6e8bb
PB
3583 case 20: /* fshto */
3584 if (!arm_feature(env, ARM_FEATURE_VFP3))
3585 return 1;
5500b06c 3586 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3587 break;
3588 case 21: /* fslto */
3589 if (!arm_feature(env, ARM_FEATURE_VFP3))
3590 return 1;
5500b06c 3591 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3592 break;
3593 case 22: /* fuhto */
3594 if (!arm_feature(env, ARM_FEATURE_VFP3))
3595 return 1;
5500b06c 3596 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3597 break;
3598 case 23: /* fulto */
3599 if (!arm_feature(env, ARM_FEATURE_VFP3))
3600 return 1;
5500b06c 3601 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3602 break;
b7bcbe95 3603 case 24: /* ftoui */
5500b06c 3604 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3605 break;
3606 case 25: /* ftouiz */
5500b06c 3607 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3608 break;
3609 case 26: /* ftosi */
5500b06c 3610 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3611 break;
3612 case 27: /* ftosiz */
5500b06c 3613 gen_vfp_tosiz(dp, 0);
b7bcbe95 3614 break;
9ee6e8bb
PB
3615 case 28: /* ftosh */
3616 if (!arm_feature(env, ARM_FEATURE_VFP3))
3617 return 1;
5500b06c 3618 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3619 break;
3620 case 29: /* ftosl */
3621 if (!arm_feature(env, ARM_FEATURE_VFP3))
3622 return 1;
5500b06c 3623 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3624 break;
3625 case 30: /* ftouh */
3626 if (!arm_feature(env, ARM_FEATURE_VFP3))
3627 return 1;
5500b06c 3628 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3629 break;
3630 case 31: /* ftoul */
3631 if (!arm_feature(env, ARM_FEATURE_VFP3))
3632 return 1;
5500b06c 3633 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3634 break;
b7bcbe95 3635 default: /* undefined */
b7bcbe95
FB
3636 return 1;
3637 }
3638 break;
3639 default: /* undefined */
b7bcbe95
FB
3640 return 1;
3641 }
3642
3643 /* Write back the result. */
239c20c7
WN
3644 if (op == 15 && (rn >= 8 && rn <= 11)) {
3645 /* Comparison, do nothing. */
3646 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
3647 (rn & 0x1e) == 0x6)) {
3648 /* VCVT double to int: always integer result.
3649 * VCVT double to half precision is always a single
3650 * precision result.
3651 */
b7bcbe95 3652 gen_mov_vreg_F0(0, rd);
239c20c7 3653 } else if (op == 15 && rn == 15) {
b7bcbe95
FB
3654 /* conversion */
3655 gen_mov_vreg_F0(!dp, rd);
239c20c7 3656 } else {
b7bcbe95 3657 gen_mov_vreg_F0(dp, rd);
239c20c7 3658 }
b7bcbe95
FB
3659
3660 /* break out of the loop if we have finished */
3661 if (veclen == 0)
3662 break;
3663
3664 if (op == 15 && delta_m == 0) {
3665 /* single source one-many */
3666 while (veclen--) {
3667 rd = ((rd + delta_d) & (bank_mask - 1))
3668 | (rd & bank_mask);
3669 gen_mov_vreg_F0(dp, rd);
3670 }
3671 break;
3672 }
3673 /* Setup the next operands. */
3674 veclen--;
3675 rd = ((rd + delta_d) & (bank_mask - 1))
3676 | (rd & bank_mask);
3677
3678 if (op == 15) {
3679 /* One source operand. */
3680 rm = ((rm + delta_m) & (bank_mask - 1))
3681 | (rm & bank_mask);
3682 gen_mov_F0_vreg(dp, rm);
3683 } else {
3684 /* Two source operands. */
3685 rn = ((rn + delta_d) & (bank_mask - 1))
3686 | (rn & bank_mask);
3687 gen_mov_F0_vreg(dp, rn);
3688 if (delta_m) {
3689 rm = ((rm + delta_m) & (bank_mask - 1))
3690 | (rm & bank_mask);
3691 gen_mov_F1_vreg(dp, rm);
3692 }
3693 }
3694 }
3695 }
3696 break;
3697 case 0xc:
3698 case 0xd:
8387da81 3699 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3700 /* two-register transfer */
3701 rn = (insn >> 16) & 0xf;
3702 rd = (insn >> 12) & 0xf;
3703 if (dp) {
9ee6e8bb
PB
3704 VFP_DREG_M(rm, insn);
3705 } else {
3706 rm = VFP_SREG_M(insn);
3707 }
b7bcbe95 3708
18c9b560 3709 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3710 /* vfp->arm */
3711 if (dp) {
4373f3ce
PB
3712 gen_mov_F0_vreg(0, rm * 2);
3713 tmp = gen_vfp_mrs();
3714 store_reg(s, rd, tmp);
3715 gen_mov_F0_vreg(0, rm * 2 + 1);
3716 tmp = gen_vfp_mrs();
3717 store_reg(s, rn, tmp);
b7bcbe95
FB
3718 } else {
3719 gen_mov_F0_vreg(0, rm);
4373f3ce 3720 tmp = gen_vfp_mrs();
8387da81 3721 store_reg(s, rd, tmp);
b7bcbe95 3722 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3723 tmp = gen_vfp_mrs();
8387da81 3724 store_reg(s, rn, tmp);
b7bcbe95
FB
3725 }
3726 } else {
3727 /* arm->vfp */
3728 if (dp) {
4373f3ce
PB
3729 tmp = load_reg(s, rd);
3730 gen_vfp_msr(tmp);
3731 gen_mov_vreg_F0(0, rm * 2);
3732 tmp = load_reg(s, rn);
3733 gen_vfp_msr(tmp);
3734 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3735 } else {
8387da81 3736 tmp = load_reg(s, rd);
4373f3ce 3737 gen_vfp_msr(tmp);
b7bcbe95 3738 gen_mov_vreg_F0(0, rm);
8387da81 3739 tmp = load_reg(s, rn);
4373f3ce 3740 gen_vfp_msr(tmp);
b7bcbe95
FB
3741 gen_mov_vreg_F0(0, rm + 1);
3742 }
3743 }
3744 } else {
3745 /* Load/store */
3746 rn = (insn >> 16) & 0xf;
3747 if (dp)
9ee6e8bb 3748 VFP_DREG_D(rd, insn);
b7bcbe95 3749 else
9ee6e8bb 3750 rd = VFP_SREG_D(insn);
b7bcbe95
FB
3751 if ((insn & 0x01200000) == 0x01000000) {
3752 /* Single load/store */
3753 offset = (insn & 0xff) << 2;
3754 if ((insn & (1 << 23)) == 0)
3755 offset = -offset;
934814f1
PM
3756 if (s->thumb && rn == 15) {
3757 /* This is actually UNPREDICTABLE */
3758 addr = tcg_temp_new_i32();
3759 tcg_gen_movi_i32(addr, s->pc & ~2);
3760 } else {
3761 addr = load_reg(s, rn);
3762 }
312eea9f 3763 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3764 if (insn & (1 << 20)) {
312eea9f 3765 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3766 gen_mov_vreg_F0(dp, rd);
3767 } else {
3768 gen_mov_F0_vreg(dp, rd);
312eea9f 3769 gen_vfp_st(s, dp, addr);
b7bcbe95 3770 }
7d1b0095 3771 tcg_temp_free_i32(addr);
b7bcbe95
FB
3772 } else {
3773 /* load/store multiple */
934814f1 3774 int w = insn & (1 << 21);
b7bcbe95
FB
3775 if (dp)
3776 n = (insn >> 1) & 0x7f;
3777 else
3778 n = insn & 0xff;
3779
934814f1
PM
3780 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3781 /* P == U , W == 1 => UNDEF */
3782 return 1;
3783 }
3784 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3785 /* UNPREDICTABLE cases for bad immediates: we choose to
3786 * UNDEF to avoid generating huge numbers of TCG ops
3787 */
3788 return 1;
3789 }
3790 if (rn == 15 && w) {
3791 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3792 return 1;
3793 }
3794
3795 if (s->thumb && rn == 15) {
3796 /* This is actually UNPREDICTABLE */
3797 addr = tcg_temp_new_i32();
3798 tcg_gen_movi_i32(addr, s->pc & ~2);
3799 } else {
3800 addr = load_reg(s, rn);
3801 }
b7bcbe95 3802 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3803 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3804
3805 if (dp)
3806 offset = 8;
3807 else
3808 offset = 4;
3809 for (i = 0; i < n; i++) {
18c9b560 3810 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3811 /* load */
312eea9f 3812 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3813 gen_mov_vreg_F0(dp, rd + i);
3814 } else {
3815 /* store */
3816 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3817 gen_vfp_st(s, dp, addr);
b7bcbe95 3818 }
312eea9f 3819 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3820 }
934814f1 3821 if (w) {
b7bcbe95
FB
3822 /* writeback */
3823 if (insn & (1 << 24))
3824 offset = -offset * n;
3825 else if (dp && (insn & 1))
3826 offset = 4;
3827 else
3828 offset = 0;
3829
3830 if (offset != 0)
312eea9f
FN
3831 tcg_gen_addi_i32(addr, addr, offset);
3832 store_reg(s, rn, addr);
3833 } else {
7d1b0095 3834 tcg_temp_free_i32(addr);
b7bcbe95
FB
3835 }
3836 }
3837 }
3838 break;
3839 default:
3840 /* Should never happen. */
3841 return 1;
3842 }
3843 return 0;
3844}
3845
0a2461fa 3846static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
c53be334 3847{
6e256c93
FB
3848 TranslationBlock *tb;
3849
3850 tb = s->tb;
3851 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3852 tcg_gen_goto_tb(n);
eaed129d 3853 gen_set_pc_im(s, dest);
8cfd0495 3854 tcg_gen_exit_tb((uintptr_t)tb + n);
6e256c93 3855 } else {
eaed129d 3856 gen_set_pc_im(s, dest);
57fec1fe 3857 tcg_gen_exit_tb(0);
6e256c93 3858 }
c53be334
FB
3859}
3860
8aaca4c0
FB
3861static inline void gen_jmp (DisasContext *s, uint32_t dest)
3862{
551bd27f 3863 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3864 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3865 if (s->thumb)
d9ba4830
PB
3866 dest |= 1;
3867 gen_bx_im(s, dest);
8aaca4c0 3868 } else {
6e256c93 3869 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3870 s->is_jmp = DISAS_TB_JUMP;
3871 }
3872}
3873
39d5492a 3874static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 3875{
ee097184 3876 if (x)
d9ba4830 3877 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3878 else
d9ba4830 3879 gen_sxth(t0);
ee097184 3880 if (y)
d9ba4830 3881 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3882 else
d9ba4830
PB
3883 gen_sxth(t1);
3884 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3885}
3886
3887/* Return the mask of PSR bits set by a MSR instruction. */
0ecb72a5 3888static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3889 uint32_t mask;
3890
3891 mask = 0;
3892 if (flags & (1 << 0))
3893 mask |= 0xff;
3894 if (flags & (1 << 1))
3895 mask |= 0xff00;
3896 if (flags & (1 << 2))
3897 mask |= 0xff0000;
3898 if (flags & (1 << 3))
3899 mask |= 0xff000000;
9ee6e8bb 3900
2ae23e75 3901 /* Mask out undefined bits. */
9ee6e8bb 3902 mask &= ~CPSR_RESERVED;
be5e7a76
DES
3903 if (!arm_feature(env, ARM_FEATURE_V4T))
3904 mask &= ~CPSR_T;
3905 if (!arm_feature(env, ARM_FEATURE_V5))
3906 mask &= ~CPSR_Q; /* V5TE in reality*/
9ee6e8bb 3907 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3908 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3909 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3910 mask &= ~CPSR_IT;
4051e12c
PM
3911 /* Mask out execution state and reserved bits. */
3912 if (!spsr) {
3913 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
3914 }
b5ff1b31
FB
3915 /* Mask out privileged bits. */
3916 if (IS_USER(s))
9ee6e8bb 3917 mask &= CPSR_USER;
b5ff1b31
FB
3918 return mask;
3919}
3920
2fbac54b 3921/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 3922static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 3923{
39d5492a 3924 TCGv_i32 tmp;
b5ff1b31
FB
3925 if (spsr) {
3926 /* ??? This is also undefined in system mode. */
3927 if (IS_USER(s))
3928 return 1;
d9ba4830
PB
3929
3930 tmp = load_cpu_field(spsr);
3931 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3932 tcg_gen_andi_i32(t0, t0, mask);
3933 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3934 store_cpu_field(tmp, spsr);
b5ff1b31 3935 } else {
2fbac54b 3936 gen_set_cpsr(t0, mask);
b5ff1b31 3937 }
7d1b0095 3938 tcg_temp_free_i32(t0);
b5ff1b31
FB
3939 gen_lookup_tb(s);
3940 return 0;
3941}
3942
2fbac54b
FN
3943/* Returns nonzero if access to the PSR is not permitted. */
3944static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3945{
39d5492a 3946 TCGv_i32 tmp;
7d1b0095 3947 tmp = tcg_temp_new_i32();
2fbac54b
FN
3948 tcg_gen_movi_i32(tmp, val);
3949 return gen_set_psr(s, mask, spsr, tmp);
3950}
3951
e9bb4aa9 3952/* Generate an old-style exception return. Marks pc as dead. */
39d5492a 3953static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
b5ff1b31 3954{
39d5492a 3955 TCGv_i32 tmp;
e9bb4aa9 3956 store_reg(s, 15, pc);
d9ba4830 3957 tmp = load_cpu_field(spsr);
4051e12c 3958 gen_set_cpsr(tmp, CPSR_ERET_MASK);
7d1b0095 3959 tcg_temp_free_i32(tmp);
b5ff1b31
FB
3960 s->is_jmp = DISAS_UPDATE;
3961}
3962
b0109805 3963/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 3964static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 3965{
4051e12c 3966 gen_set_cpsr(cpsr, CPSR_ERET_MASK);
7d1b0095 3967 tcg_temp_free_i32(cpsr);
b0109805 3968 store_reg(s, 15, pc);
9ee6e8bb
PB
3969 s->is_jmp = DISAS_UPDATE;
3970}
3b46e624 3971
9ee6e8bb
PB
3972static void gen_nop_hint(DisasContext *s, int val)
3973{
3974 switch (val) {
3975 case 3: /* wfi */
eaed129d 3976 gen_set_pc_im(s, s->pc);
9ee6e8bb
PB
3977 s->is_jmp = DISAS_WFI;
3978 break;
3979 case 2: /* wfe */
72c1d3af
PM
3980 gen_set_pc_im(s, s->pc);
3981 s->is_jmp = DISAS_WFE;
3982 break;
9ee6e8bb 3983 case 4: /* sev */
12b10571
MR
3984 case 5: /* sevl */
3985 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
3986 default: /* nop */
3987 break;
3988 }
3989}
99c475ab 3990
ad69471c 3991#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3992
39d5492a 3993static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
3994{
3995 switch (size) {
dd8fbd78
FN
3996 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3997 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3998 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 3999 default: abort();
9ee6e8bb 4000 }
9ee6e8bb
PB
4001}
4002
39d5492a 4003static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
4004{
4005 switch (size) {
dd8fbd78
FN
4006 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4007 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4008 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
4009 default: return;
4010 }
4011}
4012
4013/* 32-bit pairwise ops end up the same as the elementwise versions. */
4014#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4015#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4016#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4017#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4018
ad69471c
PB
4019#define GEN_NEON_INTEGER_OP_ENV(name) do { \
4020 switch ((size << 1) | u) { \
4021 case 0: \
dd8fbd78 4022 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4023 break; \
4024 case 1: \
dd8fbd78 4025 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4026 break; \
4027 case 2: \
dd8fbd78 4028 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4029 break; \
4030 case 3: \
dd8fbd78 4031 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4032 break; \
4033 case 4: \
dd8fbd78 4034 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4035 break; \
4036 case 5: \
dd8fbd78 4037 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4038 break; \
4039 default: return 1; \
4040 }} while (0)
9ee6e8bb
PB
4041
4042#define GEN_NEON_INTEGER_OP(name) do { \
4043 switch ((size << 1) | u) { \
ad69471c 4044 case 0: \
dd8fbd78 4045 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
4046 break; \
4047 case 1: \
dd8fbd78 4048 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
4049 break; \
4050 case 2: \
dd8fbd78 4051 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
4052 break; \
4053 case 3: \
dd8fbd78 4054 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
4055 break; \
4056 case 4: \
dd8fbd78 4057 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
4058 break; \
4059 case 5: \
dd8fbd78 4060 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 4061 break; \
9ee6e8bb
PB
4062 default: return 1; \
4063 }} while (0)
4064
39d5492a 4065static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 4066{
39d5492a 4067 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
4068 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4069 return tmp;
9ee6e8bb
PB
4070}
4071
39d5492a 4072static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 4073{
dd8fbd78 4074 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 4075 tcg_temp_free_i32(var);
9ee6e8bb
PB
4076}
4077
39d5492a 4078static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 4079{
39d5492a 4080 TCGv_i32 tmp;
9ee6e8bb 4081 if (size == 1) {
0fad6efc
PM
4082 tmp = neon_load_reg(reg & 7, reg >> 4);
4083 if (reg & 8) {
dd8fbd78 4084 gen_neon_dup_high16(tmp);
0fad6efc
PM
4085 } else {
4086 gen_neon_dup_low16(tmp);
dd8fbd78 4087 }
0fad6efc
PM
4088 } else {
4089 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 4090 }
dd8fbd78 4091 return tmp;
9ee6e8bb
PB
4092}
4093
02acedf9 4094static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 4095{
39d5492a 4096 TCGv_i32 tmp, tmp2;
600b828c 4097 if (!q && size == 2) {
02acedf9
PM
4098 return 1;
4099 }
4100 tmp = tcg_const_i32(rd);
4101 tmp2 = tcg_const_i32(rm);
4102 if (q) {
4103 switch (size) {
4104 case 0:
02da0b2d 4105 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4106 break;
4107 case 1:
02da0b2d 4108 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4109 break;
4110 case 2:
02da0b2d 4111 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
4112 break;
4113 default:
4114 abort();
4115 }
4116 } else {
4117 switch (size) {
4118 case 0:
02da0b2d 4119 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4120 break;
4121 case 1:
02da0b2d 4122 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4123 break;
4124 default:
4125 abort();
4126 }
4127 }
4128 tcg_temp_free_i32(tmp);
4129 tcg_temp_free_i32(tmp2);
4130 return 0;
19457615
FN
4131}
4132
d68a6f3a 4133static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 4134{
39d5492a 4135 TCGv_i32 tmp, tmp2;
600b828c 4136 if (!q && size == 2) {
d68a6f3a
PM
4137 return 1;
4138 }
4139 tmp = tcg_const_i32(rd);
4140 tmp2 = tcg_const_i32(rm);
4141 if (q) {
4142 switch (size) {
4143 case 0:
02da0b2d 4144 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4145 break;
4146 case 1:
02da0b2d 4147 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4148 break;
4149 case 2:
02da0b2d 4150 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
4151 break;
4152 default:
4153 abort();
4154 }
4155 } else {
4156 switch (size) {
4157 case 0:
02da0b2d 4158 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4159 break;
4160 case 1:
02da0b2d 4161 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4162 break;
4163 default:
4164 abort();
4165 }
4166 }
4167 tcg_temp_free_i32(tmp);
4168 tcg_temp_free_i32(tmp2);
4169 return 0;
19457615
FN
4170}
4171
39d5492a 4172static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 4173{
39d5492a 4174 TCGv_i32 rd, tmp;
19457615 4175
7d1b0095
PM
4176 rd = tcg_temp_new_i32();
4177 tmp = tcg_temp_new_i32();
19457615
FN
4178
4179 tcg_gen_shli_i32(rd, t0, 8);
4180 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4181 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4182 tcg_gen_or_i32(rd, rd, tmp);
4183
4184 tcg_gen_shri_i32(t1, t1, 8);
4185 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4186 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4187 tcg_gen_or_i32(t1, t1, tmp);
4188 tcg_gen_mov_i32(t0, rd);
4189
7d1b0095
PM
4190 tcg_temp_free_i32(tmp);
4191 tcg_temp_free_i32(rd);
19457615
FN
4192}
4193
39d5492a 4194static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 4195{
39d5492a 4196 TCGv_i32 rd, tmp;
19457615 4197
7d1b0095
PM
4198 rd = tcg_temp_new_i32();
4199 tmp = tcg_temp_new_i32();
19457615
FN
4200
4201 tcg_gen_shli_i32(rd, t0, 16);
4202 tcg_gen_andi_i32(tmp, t1, 0xffff);
4203 tcg_gen_or_i32(rd, rd, tmp);
4204 tcg_gen_shri_i32(t1, t1, 16);
4205 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4206 tcg_gen_or_i32(t1, t1, tmp);
4207 tcg_gen_mov_i32(t0, rd);
4208
7d1b0095
PM
4209 tcg_temp_free_i32(tmp);
4210 tcg_temp_free_i32(rd);
19457615
FN
4211}
4212
4213
9ee6e8bb
PB
4214static struct {
4215 int nregs;
4216 int interleave;
4217 int spacing;
4218} neon_ls_element_type[11] = {
4219 {4, 4, 1},
4220 {4, 4, 2},
4221 {4, 1, 1},
4222 {4, 2, 1},
4223 {3, 3, 1},
4224 {3, 3, 2},
4225 {3, 1, 1},
4226 {1, 1, 1},
4227 {2, 2, 1},
4228 {2, 2, 2},
4229 {2, 1, 1}
4230};
4231
4232/* Translate a NEON load/store element instruction. Return nonzero if the
4233 instruction is invalid. */
0ecb72a5 4234static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4235{
4236 int rd, rn, rm;
4237 int op;
4238 int nregs;
4239 int interleave;
84496233 4240 int spacing;
9ee6e8bb
PB
4241 int stride;
4242 int size;
4243 int reg;
4244 int pass;
4245 int load;
4246 int shift;
9ee6e8bb 4247 int n;
39d5492a
PM
4248 TCGv_i32 addr;
4249 TCGv_i32 tmp;
4250 TCGv_i32 tmp2;
84496233 4251 TCGv_i64 tmp64;
9ee6e8bb 4252
2c7ffc41
PM
4253 /* FIXME: this access check should not take precedence over UNDEF
4254 * for invalid encodings; we will generate incorrect syndrome information
4255 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4256 */
4257 if (!s->cpacr_fpen) {
4258 gen_exception_insn(s, 4, EXCP_UDEF,
4259 syn_fp_access_trap(1, 0xe, s->thumb));
4260 return 0;
4261 }
4262
5df8bac1 4263 if (!s->vfp_enabled)
9ee6e8bb
PB
4264 return 1;
4265 VFP_DREG_D(rd, insn);
4266 rn = (insn >> 16) & 0xf;
4267 rm = insn & 0xf;
4268 load = (insn & (1 << 21)) != 0;
4269 if ((insn & (1 << 23)) == 0) {
4270 /* Load store all elements. */
4271 op = (insn >> 8) & 0xf;
4272 size = (insn >> 6) & 3;
84496233 4273 if (op > 10)
9ee6e8bb 4274 return 1;
f2dd89d0
PM
4275 /* Catch UNDEF cases for bad values of align field */
4276 switch (op & 0xc) {
4277 case 4:
4278 if (((insn >> 5) & 1) == 1) {
4279 return 1;
4280 }
4281 break;
4282 case 8:
4283 if (((insn >> 4) & 3) == 3) {
4284 return 1;
4285 }
4286 break;
4287 default:
4288 break;
4289 }
9ee6e8bb
PB
4290 nregs = neon_ls_element_type[op].nregs;
4291 interleave = neon_ls_element_type[op].interleave;
84496233
JR
4292 spacing = neon_ls_element_type[op].spacing;
4293 if (size == 3 && (interleave | spacing) != 1)
4294 return 1;
e318a60b 4295 addr = tcg_temp_new_i32();
dcc65026 4296 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4297 stride = (1 << size) * interleave;
4298 for (reg = 0; reg < nregs; reg++) {
4299 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
4300 load_reg_var(s, addr, rn);
4301 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 4302 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
4303 load_reg_var(s, addr, rn);
4304 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 4305 }
84496233 4306 if (size == 3) {
8ed1237d 4307 tmp64 = tcg_temp_new_i64();
84496233 4308 if (load) {
6ce2faf4 4309 gen_aa32_ld64(tmp64, addr, get_mem_index(s));
84496233 4310 neon_store_reg64(tmp64, rd);
84496233 4311 } else {
84496233 4312 neon_load_reg64(tmp64, rd);
6ce2faf4 4313 gen_aa32_st64(tmp64, addr, get_mem_index(s));
84496233 4314 }
8ed1237d 4315 tcg_temp_free_i64(tmp64);
84496233
JR
4316 tcg_gen_addi_i32(addr, addr, stride);
4317 } else {
4318 for (pass = 0; pass < 2; pass++) {
4319 if (size == 2) {
4320 if (load) {
58ab8e96 4321 tmp = tcg_temp_new_i32();
6ce2faf4 4322 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
84496233
JR
4323 neon_store_reg(rd, pass, tmp);
4324 } else {
4325 tmp = neon_load_reg(rd, pass);
6ce2faf4 4326 gen_aa32_st32(tmp, addr, get_mem_index(s));
58ab8e96 4327 tcg_temp_free_i32(tmp);
84496233 4328 }
1b2b1e54 4329 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
4330 } else if (size == 1) {
4331 if (load) {
58ab8e96 4332 tmp = tcg_temp_new_i32();
6ce2faf4 4333 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
84496233 4334 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96 4335 tmp2 = tcg_temp_new_i32();
6ce2faf4 4336 gen_aa32_ld16u(tmp2, addr, get_mem_index(s));
84496233 4337 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
4338 tcg_gen_shli_i32(tmp2, tmp2, 16);
4339 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4340 tcg_temp_free_i32(tmp2);
84496233
JR
4341 neon_store_reg(rd, pass, tmp);
4342 } else {
4343 tmp = neon_load_reg(rd, pass);
7d1b0095 4344 tmp2 = tcg_temp_new_i32();
84496233 4345 tcg_gen_shri_i32(tmp2, tmp, 16);
6ce2faf4 4346 gen_aa32_st16(tmp, addr, get_mem_index(s));
58ab8e96 4347 tcg_temp_free_i32(tmp);
84496233 4348 tcg_gen_addi_i32(addr, addr, stride);
6ce2faf4 4349 gen_aa32_st16(tmp2, addr, get_mem_index(s));
58ab8e96 4350 tcg_temp_free_i32(tmp2);
1b2b1e54 4351 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 4352 }
84496233
JR
4353 } else /* size == 0 */ {
4354 if (load) {
39d5492a 4355 TCGV_UNUSED_I32(tmp2);
84496233 4356 for (n = 0; n < 4; n++) {
58ab8e96 4357 tmp = tcg_temp_new_i32();
6ce2faf4 4358 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
84496233
JR
4359 tcg_gen_addi_i32(addr, addr, stride);
4360 if (n == 0) {
4361 tmp2 = tmp;
4362 } else {
41ba8341
PB
4363 tcg_gen_shli_i32(tmp, tmp, n * 8);
4364 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 4365 tcg_temp_free_i32(tmp);
84496233 4366 }
9ee6e8bb 4367 }
84496233
JR
4368 neon_store_reg(rd, pass, tmp2);
4369 } else {
4370 tmp2 = neon_load_reg(rd, pass);
4371 for (n = 0; n < 4; n++) {
7d1b0095 4372 tmp = tcg_temp_new_i32();
84496233
JR
4373 if (n == 0) {
4374 tcg_gen_mov_i32(tmp, tmp2);
4375 } else {
4376 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4377 }
6ce2faf4 4378 gen_aa32_st8(tmp, addr, get_mem_index(s));
58ab8e96 4379 tcg_temp_free_i32(tmp);
84496233
JR
4380 tcg_gen_addi_i32(addr, addr, stride);
4381 }
7d1b0095 4382 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
4383 }
4384 }
4385 }
4386 }
84496233 4387 rd += spacing;
9ee6e8bb 4388 }
e318a60b 4389 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4390 stride = nregs * 8;
4391 } else {
4392 size = (insn >> 10) & 3;
4393 if (size == 3) {
4394 /* Load single element to all lanes. */
8e18cde3
PM
4395 int a = (insn >> 4) & 1;
4396 if (!load) {
9ee6e8bb 4397 return 1;
8e18cde3 4398 }
9ee6e8bb
PB
4399 size = (insn >> 6) & 3;
4400 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
4401
4402 if (size == 3) {
4403 if (nregs != 4 || a == 0) {
9ee6e8bb 4404 return 1;
99c475ab 4405 }
8e18cde3
PM
4406 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4407 size = 2;
4408 }
4409 if (nregs == 1 && a == 1 && size == 0) {
4410 return 1;
4411 }
4412 if (nregs == 3 && a == 1) {
4413 return 1;
4414 }
e318a60b 4415 addr = tcg_temp_new_i32();
8e18cde3
PM
4416 load_reg_var(s, addr, rn);
4417 if (nregs == 1) {
4418 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4419 tmp = gen_load_and_replicate(s, addr, size);
4420 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4421 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4422 if (insn & (1 << 5)) {
4423 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4424 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4425 }
4426 tcg_temp_free_i32(tmp);
4427 } else {
4428 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4429 stride = (insn & (1 << 5)) ? 2 : 1;
4430 for (reg = 0; reg < nregs; reg++) {
4431 tmp = gen_load_and_replicate(s, addr, size);
4432 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4433 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4434 tcg_temp_free_i32(tmp);
4435 tcg_gen_addi_i32(addr, addr, 1 << size);
4436 rd += stride;
4437 }
9ee6e8bb 4438 }
e318a60b 4439 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4440 stride = (1 << size) * nregs;
4441 } else {
4442 /* Single element. */
93262b16 4443 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
4444 pass = (insn >> 7) & 1;
4445 switch (size) {
4446 case 0:
4447 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
4448 stride = 1;
4449 break;
4450 case 1:
4451 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
4452 stride = (insn & (1 << 5)) ? 2 : 1;
4453 break;
4454 case 2:
4455 shift = 0;
9ee6e8bb
PB
4456 stride = (insn & (1 << 6)) ? 2 : 1;
4457 break;
4458 default:
4459 abort();
4460 }
4461 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
4462 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4463 switch (nregs) {
4464 case 1:
4465 if (((idx & (1 << size)) != 0) ||
4466 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4467 return 1;
4468 }
4469 break;
4470 case 3:
4471 if ((idx & 1) != 0) {
4472 return 1;
4473 }
4474 /* fall through */
4475 case 2:
4476 if (size == 2 && (idx & 2) != 0) {
4477 return 1;
4478 }
4479 break;
4480 case 4:
4481 if ((size == 2) && ((idx & 3) == 3)) {
4482 return 1;
4483 }
4484 break;
4485 default:
4486 abort();
4487 }
4488 if ((rd + stride * (nregs - 1)) > 31) {
4489 /* Attempts to write off the end of the register file
4490 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4491 * the neon_load_reg() would write off the end of the array.
4492 */
4493 return 1;
4494 }
e318a60b 4495 addr = tcg_temp_new_i32();
dcc65026 4496 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4497 for (reg = 0; reg < nregs; reg++) {
4498 if (load) {
58ab8e96 4499 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
4500 switch (size) {
4501 case 0:
6ce2faf4 4502 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4503 break;
4504 case 1:
6ce2faf4 4505 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4506 break;
4507 case 2:
6ce2faf4 4508 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9ee6e8bb 4509 break;
a50f5b91
PB
4510 default: /* Avoid compiler warnings. */
4511 abort();
9ee6e8bb
PB
4512 }
4513 if (size != 2) {
8f8e3aa4 4514 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
4515 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4516 shift, size ? 16 : 8);
7d1b0095 4517 tcg_temp_free_i32(tmp2);
9ee6e8bb 4518 }
8f8e3aa4 4519 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4520 } else { /* Store */
8f8e3aa4
PB
4521 tmp = neon_load_reg(rd, pass);
4522 if (shift)
4523 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4524 switch (size) {
4525 case 0:
6ce2faf4 4526 gen_aa32_st8(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4527 break;
4528 case 1:
6ce2faf4 4529 gen_aa32_st16(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4530 break;
4531 case 2:
6ce2faf4 4532 gen_aa32_st32(tmp, addr, get_mem_index(s));
9ee6e8bb 4533 break;
99c475ab 4534 }
58ab8e96 4535 tcg_temp_free_i32(tmp);
99c475ab 4536 }
9ee6e8bb 4537 rd += stride;
1b2b1e54 4538 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4539 }
e318a60b 4540 tcg_temp_free_i32(addr);
9ee6e8bb 4541 stride = nregs * (1 << size);
99c475ab 4542 }
9ee6e8bb
PB
4543 }
4544 if (rm != 15) {
39d5492a 4545 TCGv_i32 base;
b26eefb6
PB
4546
4547 base = load_reg(s, rn);
9ee6e8bb 4548 if (rm == 13) {
b26eefb6 4549 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4550 } else {
39d5492a 4551 TCGv_i32 index;
b26eefb6
PB
4552 index = load_reg(s, rm);
4553 tcg_gen_add_i32(base, base, index);
7d1b0095 4554 tcg_temp_free_i32(index);
9ee6e8bb 4555 }
b26eefb6 4556 store_reg(s, rn, base);
9ee6e8bb
PB
4557 }
4558 return 0;
4559}
3b46e624 4560
8f8e3aa4 4561/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
39d5492a 4562static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
8f8e3aa4
PB
4563{
4564 tcg_gen_and_i32(t, t, c);
f669df27 4565 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4566 tcg_gen_or_i32(dest, t, f);
4567}
4568
39d5492a 4569static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4570{
4571 switch (size) {
4572 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4573 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4574 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4575 default: abort();
4576 }
4577}
4578
39d5492a 4579static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4580{
4581 switch (size) {
02da0b2d
PM
4582 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4583 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4584 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
4585 default: abort();
4586 }
4587}
4588
39d5492a 4589static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4590{
4591 switch (size) {
02da0b2d
PM
4592 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4593 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4594 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
4595 default: abort();
4596 }
4597}
4598
39d5492a 4599static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
4600{
4601 switch (size) {
02da0b2d
PM
4602 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4603 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4604 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
4605 default: abort();
4606 }
4607}
4608
39d5492a 4609static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
4610 int q, int u)
4611{
4612 if (q) {
4613 if (u) {
4614 switch (size) {
4615 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4616 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4617 default: abort();
4618 }
4619 } else {
4620 switch (size) {
4621 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4622 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4623 default: abort();
4624 }
4625 }
4626 } else {
4627 if (u) {
4628 switch (size) {
b408a9b0
CL
4629 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4630 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4631 default: abort();
4632 }
4633 } else {
4634 switch (size) {
4635 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4636 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4637 default: abort();
4638 }
4639 }
4640 }
4641}
4642
39d5492a 4643static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
4644{
4645 if (u) {
4646 switch (size) {
4647 case 0: gen_helper_neon_widen_u8(dest, src); break;
4648 case 1: gen_helper_neon_widen_u16(dest, src); break;
4649 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4650 default: abort();
4651 }
4652 } else {
4653 switch (size) {
4654 case 0: gen_helper_neon_widen_s8(dest, src); break;
4655 case 1: gen_helper_neon_widen_s16(dest, src); break;
4656 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4657 default: abort();
4658 }
4659 }
7d1b0095 4660 tcg_temp_free_i32(src);
ad69471c
PB
4661}
4662
4663static inline void gen_neon_addl(int size)
4664{
4665 switch (size) {
4666 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4667 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4668 case 2: tcg_gen_add_i64(CPU_V001); break;
4669 default: abort();
4670 }
4671}
4672
4673static inline void gen_neon_subl(int size)
4674{
4675 switch (size) {
4676 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4677 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4678 case 2: tcg_gen_sub_i64(CPU_V001); break;
4679 default: abort();
4680 }
4681}
4682
a7812ae4 4683static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4684{
4685 switch (size) {
4686 case 0: gen_helper_neon_negl_u16(var, var); break;
4687 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
4688 case 2:
4689 tcg_gen_neg_i64(var, var);
4690 break;
ad69471c
PB
4691 default: abort();
4692 }
4693}
4694
a7812ae4 4695static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4696{
4697 switch (size) {
02da0b2d
PM
4698 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4699 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
4700 default: abort();
4701 }
4702}
4703
39d5492a
PM
4704static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
4705 int size, int u)
ad69471c 4706{
a7812ae4 4707 TCGv_i64 tmp;
ad69471c
PB
4708
4709 switch ((size << 1) | u) {
4710 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4711 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4712 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4713 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4714 case 4:
4715 tmp = gen_muls_i64_i32(a, b);
4716 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4717 tcg_temp_free_i64(tmp);
ad69471c
PB
4718 break;
4719 case 5:
4720 tmp = gen_mulu_i64_i32(a, b);
4721 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4722 tcg_temp_free_i64(tmp);
ad69471c
PB
4723 break;
4724 default: abort();
4725 }
c6067f04
CL
4726
4727 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4728 Don't forget to clean them now. */
4729 if (size < 2) {
7d1b0095
PM
4730 tcg_temp_free_i32(a);
4731 tcg_temp_free_i32(b);
c6067f04 4732 }
ad69471c
PB
4733}
4734
39d5492a
PM
4735static void gen_neon_narrow_op(int op, int u, int size,
4736 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
4737{
4738 if (op) {
4739 if (u) {
4740 gen_neon_unarrow_sats(size, dest, src);
4741 } else {
4742 gen_neon_narrow(size, dest, src);
4743 }
4744 } else {
4745 if (u) {
4746 gen_neon_narrow_satu(size, dest, src);
4747 } else {
4748 gen_neon_narrow_sats(size, dest, src);
4749 }
4750 }
4751}
4752
62698be3
PM
4753/* Symbolic constants for op fields for Neon 3-register same-length.
4754 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4755 * table A7-9.
4756 */
4757#define NEON_3R_VHADD 0
4758#define NEON_3R_VQADD 1
4759#define NEON_3R_VRHADD 2
4760#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4761#define NEON_3R_VHSUB 4
4762#define NEON_3R_VQSUB 5
4763#define NEON_3R_VCGT 6
4764#define NEON_3R_VCGE 7
4765#define NEON_3R_VSHL 8
4766#define NEON_3R_VQSHL 9
4767#define NEON_3R_VRSHL 10
4768#define NEON_3R_VQRSHL 11
4769#define NEON_3R_VMAX 12
4770#define NEON_3R_VMIN 13
4771#define NEON_3R_VABD 14
4772#define NEON_3R_VABA 15
4773#define NEON_3R_VADD_VSUB 16
4774#define NEON_3R_VTST_VCEQ 17
4775#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4776#define NEON_3R_VMUL 19
4777#define NEON_3R_VPMAX 20
4778#define NEON_3R_VPMIN 21
4779#define NEON_3R_VQDMULH_VQRDMULH 22
4780#define NEON_3R_VPADD 23
f1ecb913 4781#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
da97f52c 4782#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
4783#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4784#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4785#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4786#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4787#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 4788#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
4789
4790static const uint8_t neon_3r_sizes[] = {
4791 [NEON_3R_VHADD] = 0x7,
4792 [NEON_3R_VQADD] = 0xf,
4793 [NEON_3R_VRHADD] = 0x7,
4794 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4795 [NEON_3R_VHSUB] = 0x7,
4796 [NEON_3R_VQSUB] = 0xf,
4797 [NEON_3R_VCGT] = 0x7,
4798 [NEON_3R_VCGE] = 0x7,
4799 [NEON_3R_VSHL] = 0xf,
4800 [NEON_3R_VQSHL] = 0xf,
4801 [NEON_3R_VRSHL] = 0xf,
4802 [NEON_3R_VQRSHL] = 0xf,
4803 [NEON_3R_VMAX] = 0x7,
4804 [NEON_3R_VMIN] = 0x7,
4805 [NEON_3R_VABD] = 0x7,
4806 [NEON_3R_VABA] = 0x7,
4807 [NEON_3R_VADD_VSUB] = 0xf,
4808 [NEON_3R_VTST_VCEQ] = 0x7,
4809 [NEON_3R_VML] = 0x7,
4810 [NEON_3R_VMUL] = 0x7,
4811 [NEON_3R_VPMAX] = 0x7,
4812 [NEON_3R_VPMIN] = 0x7,
4813 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4814 [NEON_3R_VPADD] = 0x7,
f1ecb913 4815 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
da97f52c 4816 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4817 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4818 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4819 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4820 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4821 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 4822 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4823};
4824
600b828c
PM
4825/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4826 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4827 * table A7-13.
4828 */
4829#define NEON_2RM_VREV64 0
4830#define NEON_2RM_VREV32 1
4831#define NEON_2RM_VREV16 2
4832#define NEON_2RM_VPADDL 4
4833#define NEON_2RM_VPADDL_U 5
9d935509
AB
4834#define NEON_2RM_AESE 6 /* Includes AESD */
4835#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
4836#define NEON_2RM_VCLS 8
4837#define NEON_2RM_VCLZ 9
4838#define NEON_2RM_VCNT 10
4839#define NEON_2RM_VMVN 11
4840#define NEON_2RM_VPADAL 12
4841#define NEON_2RM_VPADAL_U 13
4842#define NEON_2RM_VQABS 14
4843#define NEON_2RM_VQNEG 15
4844#define NEON_2RM_VCGT0 16
4845#define NEON_2RM_VCGE0 17
4846#define NEON_2RM_VCEQ0 18
4847#define NEON_2RM_VCLE0 19
4848#define NEON_2RM_VCLT0 20
f1ecb913 4849#define NEON_2RM_SHA1H 21
600b828c
PM
4850#define NEON_2RM_VABS 22
4851#define NEON_2RM_VNEG 23
4852#define NEON_2RM_VCGT0_F 24
4853#define NEON_2RM_VCGE0_F 25
4854#define NEON_2RM_VCEQ0_F 26
4855#define NEON_2RM_VCLE0_F 27
4856#define NEON_2RM_VCLT0_F 28
4857#define NEON_2RM_VABS_F 30
4858#define NEON_2RM_VNEG_F 31
4859#define NEON_2RM_VSWP 32
4860#define NEON_2RM_VTRN 33
4861#define NEON_2RM_VUZP 34
4862#define NEON_2RM_VZIP 35
4863#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4864#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4865#define NEON_2RM_VSHLL 38
f1ecb913 4866#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
34f7b0a2 4867#define NEON_2RM_VRINTN 40
2ce70625 4868#define NEON_2RM_VRINTX 41
34f7b0a2
WN
4869#define NEON_2RM_VRINTA 42
4870#define NEON_2RM_VRINTZ 43
600b828c 4871#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 4872#define NEON_2RM_VRINTM 45
600b828c 4873#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 4874#define NEON_2RM_VRINTP 47
901ad525
WN
4875#define NEON_2RM_VCVTAU 48
4876#define NEON_2RM_VCVTAS 49
4877#define NEON_2RM_VCVTNU 50
4878#define NEON_2RM_VCVTNS 51
4879#define NEON_2RM_VCVTPU 52
4880#define NEON_2RM_VCVTPS 53
4881#define NEON_2RM_VCVTMU 54
4882#define NEON_2RM_VCVTMS 55
600b828c
PM
4883#define NEON_2RM_VRECPE 56
4884#define NEON_2RM_VRSQRTE 57
4885#define NEON_2RM_VRECPE_F 58
4886#define NEON_2RM_VRSQRTE_F 59
4887#define NEON_2RM_VCVT_FS 60
4888#define NEON_2RM_VCVT_FU 61
4889#define NEON_2RM_VCVT_SF 62
4890#define NEON_2RM_VCVT_UF 63
4891
4892static int neon_2rm_is_float_op(int op)
4893{
4894 /* Return true if this neon 2reg-misc op is float-to-float */
4895 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
34f7b0a2 4896 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
901ad525
WN
4897 op == NEON_2RM_VRINTM ||
4898 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
34f7b0a2 4899 op >= NEON_2RM_VRECPE_F);
600b828c
PM
4900}
4901
4902/* Each entry in this array has bit n set if the insn allows
4903 * size value n (otherwise it will UNDEF). Since unallocated
4904 * op values will have no bits set they always UNDEF.
4905 */
4906static const uint8_t neon_2rm_sizes[] = {
4907 [NEON_2RM_VREV64] = 0x7,
4908 [NEON_2RM_VREV32] = 0x3,
4909 [NEON_2RM_VREV16] = 0x1,
4910 [NEON_2RM_VPADDL] = 0x7,
4911 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
4912 [NEON_2RM_AESE] = 0x1,
4913 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
4914 [NEON_2RM_VCLS] = 0x7,
4915 [NEON_2RM_VCLZ] = 0x7,
4916 [NEON_2RM_VCNT] = 0x1,
4917 [NEON_2RM_VMVN] = 0x1,
4918 [NEON_2RM_VPADAL] = 0x7,
4919 [NEON_2RM_VPADAL_U] = 0x7,
4920 [NEON_2RM_VQABS] = 0x7,
4921 [NEON_2RM_VQNEG] = 0x7,
4922 [NEON_2RM_VCGT0] = 0x7,
4923 [NEON_2RM_VCGE0] = 0x7,
4924 [NEON_2RM_VCEQ0] = 0x7,
4925 [NEON_2RM_VCLE0] = 0x7,
4926 [NEON_2RM_VCLT0] = 0x7,
f1ecb913 4927 [NEON_2RM_SHA1H] = 0x4,
600b828c
PM
4928 [NEON_2RM_VABS] = 0x7,
4929 [NEON_2RM_VNEG] = 0x7,
4930 [NEON_2RM_VCGT0_F] = 0x4,
4931 [NEON_2RM_VCGE0_F] = 0x4,
4932 [NEON_2RM_VCEQ0_F] = 0x4,
4933 [NEON_2RM_VCLE0_F] = 0x4,
4934 [NEON_2RM_VCLT0_F] = 0x4,
4935 [NEON_2RM_VABS_F] = 0x4,
4936 [NEON_2RM_VNEG_F] = 0x4,
4937 [NEON_2RM_VSWP] = 0x1,
4938 [NEON_2RM_VTRN] = 0x7,
4939 [NEON_2RM_VUZP] = 0x7,
4940 [NEON_2RM_VZIP] = 0x7,
4941 [NEON_2RM_VMOVN] = 0x7,
4942 [NEON_2RM_VQMOVN] = 0x7,
4943 [NEON_2RM_VSHLL] = 0x7,
f1ecb913 4944 [NEON_2RM_SHA1SU1] = 0x4,
34f7b0a2 4945 [NEON_2RM_VRINTN] = 0x4,
2ce70625 4946 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
4947 [NEON_2RM_VRINTA] = 0x4,
4948 [NEON_2RM_VRINTZ] = 0x4,
600b828c 4949 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 4950 [NEON_2RM_VRINTM] = 0x4,
600b828c 4951 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 4952 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
4953 [NEON_2RM_VCVTAU] = 0x4,
4954 [NEON_2RM_VCVTAS] = 0x4,
4955 [NEON_2RM_VCVTNU] = 0x4,
4956 [NEON_2RM_VCVTNS] = 0x4,
4957 [NEON_2RM_VCVTPU] = 0x4,
4958 [NEON_2RM_VCVTPS] = 0x4,
4959 [NEON_2RM_VCVTMU] = 0x4,
4960 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
4961 [NEON_2RM_VRECPE] = 0x4,
4962 [NEON_2RM_VRSQRTE] = 0x4,
4963 [NEON_2RM_VRECPE_F] = 0x4,
4964 [NEON_2RM_VRSQRTE_F] = 0x4,
4965 [NEON_2RM_VCVT_FS] = 0x4,
4966 [NEON_2RM_VCVT_FU] = 0x4,
4967 [NEON_2RM_VCVT_SF] = 0x4,
4968 [NEON_2RM_VCVT_UF] = 0x4,
4969};
4970
9ee6e8bb
PB
4971/* Translate a NEON data processing instruction. Return nonzero if the
4972 instruction is invalid.
ad69471c
PB
4973 We process data in a mixture of 32-bit and 64-bit chunks.
4974 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4975
0ecb72a5 4976static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4977{
4978 int op;
4979 int q;
4980 int rd, rn, rm;
4981 int size;
4982 int shift;
4983 int pass;
4984 int count;
4985 int pairwise;
4986 int u;
ca9a32e4 4987 uint32_t imm, mask;
39d5492a 4988 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4989 TCGv_i64 tmp64;
9ee6e8bb 4990
2c7ffc41
PM
4991 /* FIXME: this access check should not take precedence over UNDEF
4992 * for invalid encodings; we will generate incorrect syndrome information
4993 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4994 */
4995 if (!s->cpacr_fpen) {
4996 gen_exception_insn(s, 4, EXCP_UDEF,
4997 syn_fp_access_trap(1, 0xe, s->thumb));
4998 return 0;
4999 }
5000
5df8bac1 5001 if (!s->vfp_enabled)
9ee6e8bb
PB
5002 return 1;
5003 q = (insn & (1 << 6)) != 0;
5004 u = (insn >> 24) & 1;
5005 VFP_DREG_D(rd, insn);
5006 VFP_DREG_N(rn, insn);
5007 VFP_DREG_M(rm, insn);
5008 size = (insn >> 20) & 3;
5009 if ((insn & (1 << 23)) == 0) {
5010 /* Three register same length. */
5011 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
5012 /* Catch invalid op and bad size combinations: UNDEF */
5013 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5014 return 1;
5015 }
25f84f79
PM
5016 /* All insns of this form UNDEF for either this condition or the
5017 * superset of cases "Q==1"; we catch the latter later.
5018 */
5019 if (q && ((rd | rn | rm) & 1)) {
5020 return 1;
5021 }
f1ecb913
AB
5022 /*
5023 * The SHA-1/SHA-256 3-register instructions require special treatment
5024 * here, as their size field is overloaded as an op type selector, and
5025 * they all consume their input in a single pass.
5026 */
5027 if (op == NEON_3R_SHA) {
5028 if (!q) {
5029 return 1;
5030 }
5031 if (!u) { /* SHA-1 */
5032 if (!arm_feature(env, ARM_FEATURE_V8_SHA1)) {
5033 return 1;
5034 }
5035 tmp = tcg_const_i32(rd);
5036 tmp2 = tcg_const_i32(rn);
5037 tmp3 = tcg_const_i32(rm);
5038 tmp4 = tcg_const_i32(size);
5039 gen_helper_crypto_sha1_3reg(cpu_env, tmp, tmp2, tmp3, tmp4);
5040 tcg_temp_free_i32(tmp4);
5041 } else { /* SHA-256 */
5042 if (!arm_feature(env, ARM_FEATURE_V8_SHA256) || size == 3) {
5043 return 1;
5044 }
5045 tmp = tcg_const_i32(rd);
5046 tmp2 = tcg_const_i32(rn);
5047 tmp3 = tcg_const_i32(rm);
5048 switch (size) {
5049 case 0:
5050 gen_helper_crypto_sha256h(cpu_env, tmp, tmp2, tmp3);
5051 break;
5052 case 1:
5053 gen_helper_crypto_sha256h2(cpu_env, tmp, tmp2, tmp3);
5054 break;
5055 case 2:
5056 gen_helper_crypto_sha256su1(cpu_env, tmp, tmp2, tmp3);
5057 break;
5058 }
5059 }
5060 tcg_temp_free_i32(tmp);
5061 tcg_temp_free_i32(tmp2);
5062 tcg_temp_free_i32(tmp3);
5063 return 0;
5064 }
62698be3
PM
5065 if (size == 3 && op != NEON_3R_LOGIC) {
5066 /* 64-bit element instructions. */
9ee6e8bb 5067 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
5068 neon_load_reg64(cpu_V0, rn + pass);
5069 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 5070 switch (op) {
62698be3 5071 case NEON_3R_VQADD:
9ee6e8bb 5072 if (u) {
02da0b2d
PM
5073 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5074 cpu_V0, cpu_V1);
2c0262af 5075 } else {
02da0b2d
PM
5076 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5077 cpu_V0, cpu_V1);
2c0262af 5078 }
9ee6e8bb 5079 break;
62698be3 5080 case NEON_3R_VQSUB:
9ee6e8bb 5081 if (u) {
02da0b2d
PM
5082 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5083 cpu_V0, cpu_V1);
ad69471c 5084 } else {
02da0b2d
PM
5085 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5086 cpu_V0, cpu_V1);
ad69471c
PB
5087 }
5088 break;
62698be3 5089 case NEON_3R_VSHL:
ad69471c
PB
5090 if (u) {
5091 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5092 } else {
5093 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5094 }
5095 break;
62698be3 5096 case NEON_3R_VQSHL:
ad69471c 5097 if (u) {
02da0b2d
PM
5098 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5099 cpu_V1, cpu_V0);
ad69471c 5100 } else {
02da0b2d
PM
5101 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5102 cpu_V1, cpu_V0);
ad69471c
PB
5103 }
5104 break;
62698be3 5105 case NEON_3R_VRSHL:
ad69471c
PB
5106 if (u) {
5107 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 5108 } else {
ad69471c
PB
5109 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5110 }
5111 break;
62698be3 5112 case NEON_3R_VQRSHL:
ad69471c 5113 if (u) {
02da0b2d
PM
5114 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5115 cpu_V1, cpu_V0);
ad69471c 5116 } else {
02da0b2d
PM
5117 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5118 cpu_V1, cpu_V0);
1e8d4eec 5119 }
9ee6e8bb 5120 break;
62698be3 5121 case NEON_3R_VADD_VSUB:
9ee6e8bb 5122 if (u) {
ad69471c 5123 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 5124 } else {
ad69471c 5125 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
5126 }
5127 break;
5128 default:
5129 abort();
2c0262af 5130 }
ad69471c 5131 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 5132 }
9ee6e8bb 5133 return 0;
2c0262af 5134 }
25f84f79 5135 pairwise = 0;
9ee6e8bb 5136 switch (op) {
62698be3
PM
5137 case NEON_3R_VSHL:
5138 case NEON_3R_VQSHL:
5139 case NEON_3R_VRSHL:
5140 case NEON_3R_VQRSHL:
9ee6e8bb 5141 {
ad69471c
PB
5142 int rtmp;
5143 /* Shift instruction operands are reversed. */
5144 rtmp = rn;
9ee6e8bb 5145 rn = rm;
ad69471c 5146 rm = rtmp;
9ee6e8bb 5147 }
2c0262af 5148 break;
25f84f79
PM
5149 case NEON_3R_VPADD:
5150 if (u) {
5151 return 1;
5152 }
5153 /* Fall through */
62698be3
PM
5154 case NEON_3R_VPMAX:
5155 case NEON_3R_VPMIN:
9ee6e8bb 5156 pairwise = 1;
2c0262af 5157 break;
25f84f79
PM
5158 case NEON_3R_FLOAT_ARITH:
5159 pairwise = (u && size < 2); /* if VPADD (float) */
5160 break;
5161 case NEON_3R_FLOAT_MINMAX:
5162 pairwise = u; /* if VPMIN/VPMAX (float) */
5163 break;
5164 case NEON_3R_FLOAT_CMP:
5165 if (!u && size) {
5166 /* no encoding for U=0 C=1x */
5167 return 1;
5168 }
5169 break;
5170 case NEON_3R_FLOAT_ACMP:
5171 if (!u) {
5172 return 1;
5173 }
5174 break;
505935fc
WN
5175 case NEON_3R_FLOAT_MISC:
5176 /* VMAXNM/VMINNM in ARMv8 */
5177 if (u && !arm_feature(env, ARM_FEATURE_V8)) {
25f84f79
PM
5178 return 1;
5179 }
2c0262af 5180 break;
25f84f79
PM
5181 case NEON_3R_VMUL:
5182 if (u && (size != 0)) {
5183 /* UNDEF on invalid size for polynomial subcase */
5184 return 1;
5185 }
2c0262af 5186 break;
da97f52c
PM
5187 case NEON_3R_VFM:
5188 if (!arm_feature(env, ARM_FEATURE_VFP4) || u) {
5189 return 1;
5190 }
5191 break;
9ee6e8bb 5192 default:
2c0262af 5193 break;
9ee6e8bb 5194 }
dd8fbd78 5195
25f84f79
PM
5196 if (pairwise && q) {
5197 /* All the pairwise insns UNDEF if Q is set */
5198 return 1;
5199 }
5200
9ee6e8bb
PB
5201 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5202
5203 if (pairwise) {
5204 /* Pairwise. */
a5a14945
JR
5205 if (pass < 1) {
5206 tmp = neon_load_reg(rn, 0);
5207 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 5208 } else {
a5a14945
JR
5209 tmp = neon_load_reg(rm, 0);
5210 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
5211 }
5212 } else {
5213 /* Elementwise. */
dd8fbd78
FN
5214 tmp = neon_load_reg(rn, pass);
5215 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
5216 }
5217 switch (op) {
62698be3 5218 case NEON_3R_VHADD:
9ee6e8bb
PB
5219 GEN_NEON_INTEGER_OP(hadd);
5220 break;
62698be3 5221 case NEON_3R_VQADD:
02da0b2d 5222 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 5223 break;
62698be3 5224 case NEON_3R_VRHADD:
9ee6e8bb 5225 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 5226 break;
62698be3 5227 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
5228 switch ((u << 2) | size) {
5229 case 0: /* VAND */
dd8fbd78 5230 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5231 break;
5232 case 1: /* BIC */
f669df27 5233 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5234 break;
5235 case 2: /* VORR */
dd8fbd78 5236 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5237 break;
5238 case 3: /* VORN */
f669df27 5239 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5240 break;
5241 case 4: /* VEOR */
dd8fbd78 5242 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5243 break;
5244 case 5: /* VBSL */
dd8fbd78
FN
5245 tmp3 = neon_load_reg(rd, pass);
5246 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 5247 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5248 break;
5249 case 6: /* VBIT */
dd8fbd78
FN
5250 tmp3 = neon_load_reg(rd, pass);
5251 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 5252 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5253 break;
5254 case 7: /* VBIF */
dd8fbd78
FN
5255 tmp3 = neon_load_reg(rd, pass);
5256 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 5257 tcg_temp_free_i32(tmp3);
9ee6e8bb 5258 break;
2c0262af
FB
5259 }
5260 break;
62698be3 5261 case NEON_3R_VHSUB:
9ee6e8bb
PB
5262 GEN_NEON_INTEGER_OP(hsub);
5263 break;
62698be3 5264 case NEON_3R_VQSUB:
02da0b2d 5265 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 5266 break;
62698be3 5267 case NEON_3R_VCGT:
9ee6e8bb
PB
5268 GEN_NEON_INTEGER_OP(cgt);
5269 break;
62698be3 5270 case NEON_3R_VCGE:
9ee6e8bb
PB
5271 GEN_NEON_INTEGER_OP(cge);
5272 break;
62698be3 5273 case NEON_3R_VSHL:
ad69471c 5274 GEN_NEON_INTEGER_OP(shl);
2c0262af 5275 break;
62698be3 5276 case NEON_3R_VQSHL:
02da0b2d 5277 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 5278 break;
62698be3 5279 case NEON_3R_VRSHL:
ad69471c 5280 GEN_NEON_INTEGER_OP(rshl);
2c0262af 5281 break;
62698be3 5282 case NEON_3R_VQRSHL:
02da0b2d 5283 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 5284 break;
62698be3 5285 case NEON_3R_VMAX:
9ee6e8bb
PB
5286 GEN_NEON_INTEGER_OP(max);
5287 break;
62698be3 5288 case NEON_3R_VMIN:
9ee6e8bb
PB
5289 GEN_NEON_INTEGER_OP(min);
5290 break;
62698be3 5291 case NEON_3R_VABD:
9ee6e8bb
PB
5292 GEN_NEON_INTEGER_OP(abd);
5293 break;
62698be3 5294 case NEON_3R_VABA:
9ee6e8bb 5295 GEN_NEON_INTEGER_OP(abd);
7d1b0095 5296 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
5297 tmp2 = neon_load_reg(rd, pass);
5298 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 5299 break;
62698be3 5300 case NEON_3R_VADD_VSUB:
9ee6e8bb 5301 if (!u) { /* VADD */
62698be3 5302 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5303 } else { /* VSUB */
5304 switch (size) {
dd8fbd78
FN
5305 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
5306 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
5307 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 5308 default: abort();
9ee6e8bb
PB
5309 }
5310 }
5311 break;
62698be3 5312 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
5313 if (!u) { /* VTST */
5314 switch (size) {
dd8fbd78
FN
5315 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
5316 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
5317 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 5318 default: abort();
9ee6e8bb
PB
5319 }
5320 } else { /* VCEQ */
5321 switch (size) {
dd8fbd78
FN
5322 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5323 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5324 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 5325 default: abort();
9ee6e8bb
PB
5326 }
5327 }
5328 break;
62698be3 5329 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 5330 switch (size) {
dd8fbd78
FN
5331 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5332 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5333 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5334 default: abort();
9ee6e8bb 5335 }
7d1b0095 5336 tcg_temp_free_i32(tmp2);
dd8fbd78 5337 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5338 if (u) { /* VMLS */
dd8fbd78 5339 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 5340 } else { /* VMLA */
dd8fbd78 5341 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5342 }
5343 break;
62698be3 5344 case NEON_3R_VMUL:
9ee6e8bb 5345 if (u) { /* polynomial */
dd8fbd78 5346 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
5347 } else { /* Integer */
5348 switch (size) {
dd8fbd78
FN
5349 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5350 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5351 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5352 default: abort();
9ee6e8bb
PB
5353 }
5354 }
5355 break;
62698be3 5356 case NEON_3R_VPMAX:
9ee6e8bb
PB
5357 GEN_NEON_INTEGER_OP(pmax);
5358 break;
62698be3 5359 case NEON_3R_VPMIN:
9ee6e8bb
PB
5360 GEN_NEON_INTEGER_OP(pmin);
5361 break;
62698be3 5362 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
5363 if (!u) { /* VQDMULH */
5364 switch (size) {
02da0b2d
PM
5365 case 1:
5366 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5367 break;
5368 case 2:
5369 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5370 break;
62698be3 5371 default: abort();
9ee6e8bb 5372 }
62698be3 5373 } else { /* VQRDMULH */
9ee6e8bb 5374 switch (size) {
02da0b2d
PM
5375 case 1:
5376 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5377 break;
5378 case 2:
5379 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5380 break;
62698be3 5381 default: abort();
9ee6e8bb
PB
5382 }
5383 }
5384 break;
62698be3 5385 case NEON_3R_VPADD:
9ee6e8bb 5386 switch (size) {
dd8fbd78
FN
5387 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5388 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5389 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 5390 default: abort();
9ee6e8bb
PB
5391 }
5392 break;
62698be3 5393 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
5394 {
5395 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
5396 switch ((u << 2) | size) {
5397 case 0: /* VADD */
aa47cfdd
PM
5398 case 4: /* VPADD */
5399 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5400 break;
5401 case 2: /* VSUB */
aa47cfdd 5402 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5403 break;
5404 case 6: /* VABD */
aa47cfdd 5405 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5406 break;
5407 default:
62698be3 5408 abort();
9ee6e8bb 5409 }
aa47cfdd 5410 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5411 break;
aa47cfdd 5412 }
62698be3 5413 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
5414 {
5415 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5416 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5417 if (!u) {
7d1b0095 5418 tcg_temp_free_i32(tmp2);
dd8fbd78 5419 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5420 if (size == 0) {
aa47cfdd 5421 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5422 } else {
aa47cfdd 5423 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
5424 }
5425 }
aa47cfdd 5426 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5427 break;
aa47cfdd 5428 }
62698be3 5429 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
5430 {
5431 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 5432 if (!u) {
aa47cfdd 5433 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 5434 } else {
aa47cfdd
PM
5435 if (size == 0) {
5436 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5437 } else {
5438 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5439 }
b5ff1b31 5440 }
aa47cfdd 5441 tcg_temp_free_ptr(fpstatus);
2c0262af 5442 break;
aa47cfdd 5443 }
62698be3 5444 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
5445 {
5446 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5447 if (size == 0) {
5448 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5449 } else {
5450 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5451 }
5452 tcg_temp_free_ptr(fpstatus);
2c0262af 5453 break;
aa47cfdd 5454 }
62698be3 5455 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
5456 {
5457 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5458 if (size == 0) {
f71a2ae5 5459 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 5460 } else {
f71a2ae5 5461 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
5462 }
5463 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5464 break;
aa47cfdd 5465 }
505935fc
WN
5466 case NEON_3R_FLOAT_MISC:
5467 if (u) {
5468 /* VMAXNM/VMINNM */
5469 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5470 if (size == 0) {
f71a2ae5 5471 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 5472 } else {
f71a2ae5 5473 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
5474 }
5475 tcg_temp_free_ptr(fpstatus);
5476 } else {
5477 if (size == 0) {
5478 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5479 } else {
5480 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5481 }
5482 }
2c0262af 5483 break;
da97f52c
PM
5484 case NEON_3R_VFM:
5485 {
5486 /* VFMA, VFMS: fused multiply-add */
5487 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5488 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5489 if (size) {
5490 /* VFMS */
5491 gen_helper_vfp_negs(tmp, tmp);
5492 }
5493 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5494 tcg_temp_free_i32(tmp3);
5495 tcg_temp_free_ptr(fpstatus);
5496 break;
5497 }
9ee6e8bb
PB
5498 default:
5499 abort();
2c0262af 5500 }
7d1b0095 5501 tcg_temp_free_i32(tmp2);
dd8fbd78 5502
9ee6e8bb
PB
5503 /* Save the result. For elementwise operations we can put it
5504 straight into the destination register. For pairwise operations
5505 we have to be careful to avoid clobbering the source operands. */
5506 if (pairwise && rd == rm) {
dd8fbd78 5507 neon_store_scratch(pass, tmp);
9ee6e8bb 5508 } else {
dd8fbd78 5509 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5510 }
5511
5512 } /* for pass */
5513 if (pairwise && rd == rm) {
5514 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5515 tmp = neon_load_scratch(pass);
5516 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5517 }
5518 }
ad69471c 5519 /* End of 3 register same size operations. */
9ee6e8bb
PB
5520 } else if (insn & (1 << 4)) {
5521 if ((insn & 0x00380080) != 0) {
5522 /* Two registers and shift. */
5523 op = (insn >> 8) & 0xf;
5524 if (insn & (1 << 7)) {
cc13115b
PM
5525 /* 64-bit shift. */
5526 if (op > 7) {
5527 return 1;
5528 }
9ee6e8bb
PB
5529 size = 3;
5530 } else {
5531 size = 2;
5532 while ((insn & (1 << (size + 19))) == 0)
5533 size--;
5534 }
5535 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 5536 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
5537 by immediate using the variable shift operations. */
5538 if (op < 8) {
5539 /* Shift by immediate:
5540 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
5541 if (q && ((rd | rm) & 1)) {
5542 return 1;
5543 }
5544 if (!u && (op == 4 || op == 6)) {
5545 return 1;
5546 }
9ee6e8bb
PB
5547 /* Right shifts are encoded as N - shift, where N is the
5548 element size in bits. */
5549 if (op <= 4)
5550 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
5551 if (size == 3) {
5552 count = q + 1;
5553 } else {
5554 count = q ? 4: 2;
5555 }
5556 switch (size) {
5557 case 0:
5558 imm = (uint8_t) shift;
5559 imm |= imm << 8;
5560 imm |= imm << 16;
5561 break;
5562 case 1:
5563 imm = (uint16_t) shift;
5564 imm |= imm << 16;
5565 break;
5566 case 2:
5567 case 3:
5568 imm = shift;
5569 break;
5570 default:
5571 abort();
5572 }
5573
5574 for (pass = 0; pass < count; pass++) {
ad69471c
PB
5575 if (size == 3) {
5576 neon_load_reg64(cpu_V0, rm + pass);
5577 tcg_gen_movi_i64(cpu_V1, imm);
5578 switch (op) {
5579 case 0: /* VSHR */
5580 case 1: /* VSRA */
5581 if (u)
5582 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5583 else
ad69471c 5584 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5585 break;
ad69471c
PB
5586 case 2: /* VRSHR */
5587 case 3: /* VRSRA */
5588 if (u)
5589 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5590 else
ad69471c 5591 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5592 break;
ad69471c 5593 case 4: /* VSRI */
ad69471c
PB
5594 case 5: /* VSHL, VSLI */
5595 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5596 break;
0322b26e 5597 case 6: /* VQSHLU */
02da0b2d
PM
5598 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5599 cpu_V0, cpu_V1);
ad69471c 5600 break;
0322b26e
PM
5601 case 7: /* VQSHL */
5602 if (u) {
02da0b2d 5603 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
5604 cpu_V0, cpu_V1);
5605 } else {
02da0b2d 5606 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
5607 cpu_V0, cpu_V1);
5608 }
9ee6e8bb 5609 break;
9ee6e8bb 5610 }
ad69471c
PB
5611 if (op == 1 || op == 3) {
5612 /* Accumulate. */
5371cb81 5613 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
5614 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5615 } else if (op == 4 || (op == 5 && u)) {
5616 /* Insert */
923e6509
CL
5617 neon_load_reg64(cpu_V1, rd + pass);
5618 uint64_t mask;
5619 if (shift < -63 || shift > 63) {
5620 mask = 0;
5621 } else {
5622 if (op == 4) {
5623 mask = 0xffffffffffffffffull >> -shift;
5624 } else {
5625 mask = 0xffffffffffffffffull << shift;
5626 }
5627 }
5628 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5629 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5630 }
5631 neon_store_reg64(cpu_V0, rd + pass);
5632 } else { /* size < 3 */
5633 /* Operands in T0 and T1. */
dd8fbd78 5634 tmp = neon_load_reg(rm, pass);
7d1b0095 5635 tmp2 = tcg_temp_new_i32();
dd8fbd78 5636 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
5637 switch (op) {
5638 case 0: /* VSHR */
5639 case 1: /* VSRA */
5640 GEN_NEON_INTEGER_OP(shl);
5641 break;
5642 case 2: /* VRSHR */
5643 case 3: /* VRSRA */
5644 GEN_NEON_INTEGER_OP(rshl);
5645 break;
5646 case 4: /* VSRI */
ad69471c
PB
5647 case 5: /* VSHL, VSLI */
5648 switch (size) {
dd8fbd78
FN
5649 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5650 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5651 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 5652 default: abort();
ad69471c
PB
5653 }
5654 break;
0322b26e 5655 case 6: /* VQSHLU */
ad69471c 5656 switch (size) {
0322b26e 5657 case 0:
02da0b2d
PM
5658 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5659 tmp, tmp2);
0322b26e
PM
5660 break;
5661 case 1:
02da0b2d
PM
5662 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5663 tmp, tmp2);
0322b26e
PM
5664 break;
5665 case 2:
02da0b2d
PM
5666 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5667 tmp, tmp2);
0322b26e
PM
5668 break;
5669 default:
cc13115b 5670 abort();
ad69471c
PB
5671 }
5672 break;
0322b26e 5673 case 7: /* VQSHL */
02da0b2d 5674 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 5675 break;
ad69471c 5676 }
7d1b0095 5677 tcg_temp_free_i32(tmp2);
ad69471c
PB
5678
5679 if (op == 1 || op == 3) {
5680 /* Accumulate. */
dd8fbd78 5681 tmp2 = neon_load_reg(rd, pass);
5371cb81 5682 gen_neon_add(size, tmp, tmp2);
7d1b0095 5683 tcg_temp_free_i32(tmp2);
ad69471c
PB
5684 } else if (op == 4 || (op == 5 && u)) {
5685 /* Insert */
5686 switch (size) {
5687 case 0:
5688 if (op == 4)
ca9a32e4 5689 mask = 0xff >> -shift;
ad69471c 5690 else
ca9a32e4
JR
5691 mask = (uint8_t)(0xff << shift);
5692 mask |= mask << 8;
5693 mask |= mask << 16;
ad69471c
PB
5694 break;
5695 case 1:
5696 if (op == 4)
ca9a32e4 5697 mask = 0xffff >> -shift;
ad69471c 5698 else
ca9a32e4
JR
5699 mask = (uint16_t)(0xffff << shift);
5700 mask |= mask << 16;
ad69471c
PB
5701 break;
5702 case 2:
ca9a32e4
JR
5703 if (shift < -31 || shift > 31) {
5704 mask = 0;
5705 } else {
5706 if (op == 4)
5707 mask = 0xffffffffu >> -shift;
5708 else
5709 mask = 0xffffffffu << shift;
5710 }
ad69471c
PB
5711 break;
5712 default:
5713 abort();
5714 }
dd8fbd78 5715 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
5716 tcg_gen_andi_i32(tmp, tmp, mask);
5717 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 5718 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 5719 tcg_temp_free_i32(tmp2);
ad69471c 5720 }
dd8fbd78 5721 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5722 }
5723 } /* for pass */
5724 } else if (op < 10) {
ad69471c 5725 /* Shift by immediate and narrow:
9ee6e8bb 5726 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5727 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5728 if (rm & 1) {
5729 return 1;
5730 }
9ee6e8bb
PB
5731 shift = shift - (1 << (size + 3));
5732 size++;
92cdfaeb 5733 if (size == 3) {
a7812ae4 5734 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5735 neon_load_reg64(cpu_V0, rm);
5736 neon_load_reg64(cpu_V1, rm + 1);
5737 for (pass = 0; pass < 2; pass++) {
5738 TCGv_i64 in;
5739 if (pass == 0) {
5740 in = cpu_V0;
5741 } else {
5742 in = cpu_V1;
5743 }
ad69471c 5744 if (q) {
0b36f4cd 5745 if (input_unsigned) {
92cdfaeb 5746 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5747 } else {
92cdfaeb 5748 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5749 }
ad69471c 5750 } else {
0b36f4cd 5751 if (input_unsigned) {
92cdfaeb 5752 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5753 } else {
92cdfaeb 5754 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5755 }
ad69471c 5756 }
7d1b0095 5757 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5758 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5759 neon_store_reg(rd, pass, tmp);
5760 } /* for pass */
5761 tcg_temp_free_i64(tmp64);
5762 } else {
5763 if (size == 1) {
5764 imm = (uint16_t)shift;
5765 imm |= imm << 16;
2c0262af 5766 } else {
92cdfaeb
PM
5767 /* size == 2 */
5768 imm = (uint32_t)shift;
5769 }
5770 tmp2 = tcg_const_i32(imm);
5771 tmp4 = neon_load_reg(rm + 1, 0);
5772 tmp5 = neon_load_reg(rm + 1, 1);
5773 for (pass = 0; pass < 2; pass++) {
5774 if (pass == 0) {
5775 tmp = neon_load_reg(rm, 0);
5776 } else {
5777 tmp = tmp4;
5778 }
0b36f4cd
CL
5779 gen_neon_shift_narrow(size, tmp, tmp2, q,
5780 input_unsigned);
92cdfaeb
PM
5781 if (pass == 0) {
5782 tmp3 = neon_load_reg(rm, 1);
5783 } else {
5784 tmp3 = tmp5;
5785 }
0b36f4cd
CL
5786 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5787 input_unsigned);
36aa55dc 5788 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5789 tcg_temp_free_i32(tmp);
5790 tcg_temp_free_i32(tmp3);
5791 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5792 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5793 neon_store_reg(rd, pass, tmp);
5794 } /* for pass */
c6067f04 5795 tcg_temp_free_i32(tmp2);
b75263d6 5796 }
9ee6e8bb 5797 } else if (op == 10) {
cc13115b
PM
5798 /* VSHLL, VMOVL */
5799 if (q || (rd & 1)) {
9ee6e8bb 5800 return 1;
cc13115b 5801 }
ad69471c
PB
5802 tmp = neon_load_reg(rm, 0);
5803 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5804 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5805 if (pass == 1)
5806 tmp = tmp2;
5807
5808 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5809
9ee6e8bb
PB
5810 if (shift != 0) {
5811 /* The shift is less than the width of the source
ad69471c
PB
5812 type, so we can just shift the whole register. */
5813 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5814 /* Widen the result of shift: we need to clear
5815 * the potential overflow bits resulting from
5816 * left bits of the narrow input appearing as
5817 * right bits of left the neighbour narrow
5818 * input. */
ad69471c
PB
5819 if (size < 2 || !u) {
5820 uint64_t imm64;
5821 if (size == 0) {
5822 imm = (0xffu >> (8 - shift));
5823 imm |= imm << 16;
acdf01ef 5824 } else if (size == 1) {
ad69471c 5825 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5826 } else {
5827 /* size == 2 */
5828 imm = 0xffffffff >> (32 - shift);
5829 }
5830 if (size < 2) {
5831 imm64 = imm | (((uint64_t)imm) << 32);
5832 } else {
5833 imm64 = imm;
9ee6e8bb 5834 }
acdf01ef 5835 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5836 }
5837 }
ad69471c 5838 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5839 }
f73534a5 5840 } else if (op >= 14) {
9ee6e8bb 5841 /* VCVT fixed-point. */
cc13115b
PM
5842 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5843 return 1;
5844 }
f73534a5
PM
5845 /* We have already masked out the must-be-1 top bit of imm6,
5846 * hence this 32-shift where the ARM ARM has 64-imm6.
5847 */
5848 shift = 32 - shift;
9ee6e8bb 5849 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 5850 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 5851 if (!(op & 1)) {
9ee6e8bb 5852 if (u)
5500b06c 5853 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 5854 else
5500b06c 5855 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
5856 } else {
5857 if (u)
5500b06c 5858 gen_vfp_toul(0, shift, 1);
9ee6e8bb 5859 else
5500b06c 5860 gen_vfp_tosl(0, shift, 1);
2c0262af 5861 }
4373f3ce 5862 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
5863 }
5864 } else {
9ee6e8bb
PB
5865 return 1;
5866 }
5867 } else { /* (insn & 0x00380080) == 0 */
5868 int invert;
7d80fee5
PM
5869 if (q && (rd & 1)) {
5870 return 1;
5871 }
9ee6e8bb
PB
5872
5873 op = (insn >> 8) & 0xf;
5874 /* One register and immediate. */
5875 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5876 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
5877 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5878 * We choose to not special-case this and will behave as if a
5879 * valid constant encoding of 0 had been given.
5880 */
9ee6e8bb
PB
5881 switch (op) {
5882 case 0: case 1:
5883 /* no-op */
5884 break;
5885 case 2: case 3:
5886 imm <<= 8;
5887 break;
5888 case 4: case 5:
5889 imm <<= 16;
5890 break;
5891 case 6: case 7:
5892 imm <<= 24;
5893 break;
5894 case 8: case 9:
5895 imm |= imm << 16;
5896 break;
5897 case 10: case 11:
5898 imm = (imm << 8) | (imm << 24);
5899 break;
5900 case 12:
8e31209e 5901 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
5902 break;
5903 case 13:
5904 imm = (imm << 16) | 0xffff;
5905 break;
5906 case 14:
5907 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5908 if (invert)
5909 imm = ~imm;
5910 break;
5911 case 15:
7d80fee5
PM
5912 if (invert) {
5913 return 1;
5914 }
9ee6e8bb
PB
5915 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5916 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5917 break;
5918 }
5919 if (invert)
5920 imm = ~imm;
5921
9ee6e8bb
PB
5922 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5923 if (op & 1 && op < 12) {
ad69471c 5924 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
5925 if (invert) {
5926 /* The immediate value has already been inverted, so
5927 BIC becomes AND. */
ad69471c 5928 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 5929 } else {
ad69471c 5930 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 5931 }
9ee6e8bb 5932 } else {
ad69471c 5933 /* VMOV, VMVN. */
7d1b0095 5934 tmp = tcg_temp_new_i32();
9ee6e8bb 5935 if (op == 14 && invert) {
a5a14945 5936 int n;
ad69471c
PB
5937 uint32_t val;
5938 val = 0;
9ee6e8bb
PB
5939 for (n = 0; n < 4; n++) {
5940 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 5941 val |= 0xff << (n * 8);
9ee6e8bb 5942 }
ad69471c
PB
5943 tcg_gen_movi_i32(tmp, val);
5944 } else {
5945 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 5946 }
9ee6e8bb 5947 }
ad69471c 5948 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5949 }
5950 }
e4b3861d 5951 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
5952 if (size != 3) {
5953 op = (insn >> 8) & 0xf;
5954 if ((insn & (1 << 6)) == 0) {
5955 /* Three registers of different lengths. */
5956 int src1_wide;
5957 int src2_wide;
5958 int prewiden;
526d0096
PM
5959 /* undefreq: bit 0 : UNDEF if size == 0
5960 * bit 1 : UNDEF if size == 1
5961 * bit 2 : UNDEF if size == 2
5962 * bit 3 : UNDEF if U == 1
5963 * Note that [2:0] set implies 'always UNDEF'
695272dc
PM
5964 */
5965 int undefreq;
5966 /* prewiden, src1_wide, src2_wide, undefreq */
5967 static const int neon_3reg_wide[16][4] = {
5968 {1, 0, 0, 0}, /* VADDL */
5969 {1, 1, 0, 0}, /* VADDW */
5970 {1, 0, 0, 0}, /* VSUBL */
5971 {1, 1, 0, 0}, /* VSUBW */
5972 {0, 1, 1, 0}, /* VADDHN */
5973 {0, 0, 0, 0}, /* VABAL */
5974 {0, 1, 1, 0}, /* VSUBHN */
5975 {0, 0, 0, 0}, /* VABDL */
5976 {0, 0, 0, 0}, /* VMLAL */
526d0096 5977 {0, 0, 0, 9}, /* VQDMLAL */
695272dc 5978 {0, 0, 0, 0}, /* VMLSL */
526d0096 5979 {0, 0, 0, 9}, /* VQDMLSL */
695272dc 5980 {0, 0, 0, 0}, /* Integer VMULL */
526d0096 5981 {0, 0, 0, 1}, /* VQDMULL */
4e624eda 5982 {0, 0, 0, 0xa}, /* Polynomial VMULL */
526d0096 5983 {0, 0, 0, 7}, /* Reserved: always UNDEF */
9ee6e8bb
PB
5984 };
5985
5986 prewiden = neon_3reg_wide[op][0];
5987 src1_wide = neon_3reg_wide[op][1];
5988 src2_wide = neon_3reg_wide[op][2];
695272dc 5989 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 5990
526d0096
PM
5991 if ((undefreq & (1 << size)) ||
5992 ((undefreq & 8) && u)) {
695272dc
PM
5993 return 1;
5994 }
5995 if ((src1_wide && (rn & 1)) ||
5996 (src2_wide && (rm & 1)) ||
5997 (!src2_wide && (rd & 1))) {
ad69471c 5998 return 1;
695272dc 5999 }
ad69471c 6000
4e624eda
PM
6001 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6002 * outside the loop below as it only performs a single pass.
6003 */
6004 if (op == 14 && size == 2) {
6005 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6006
6007 if (!arm_feature(env, ARM_FEATURE_V8_PMULL)) {
6008 return 1;
6009 }
6010 tcg_rn = tcg_temp_new_i64();
6011 tcg_rm = tcg_temp_new_i64();
6012 tcg_rd = tcg_temp_new_i64();
6013 neon_load_reg64(tcg_rn, rn);
6014 neon_load_reg64(tcg_rm, rm);
6015 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6016 neon_store_reg64(tcg_rd, rd);
6017 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6018 neon_store_reg64(tcg_rd, rd + 1);
6019 tcg_temp_free_i64(tcg_rn);
6020 tcg_temp_free_i64(tcg_rm);
6021 tcg_temp_free_i64(tcg_rd);
6022 return 0;
6023 }
6024
9ee6e8bb
PB
6025 /* Avoid overlapping operands. Wide source operands are
6026 always aligned so will never overlap with wide
6027 destinations in problematic ways. */
8f8e3aa4 6028 if (rd == rm && !src2_wide) {
dd8fbd78
FN
6029 tmp = neon_load_reg(rm, 1);
6030 neon_store_scratch(2, tmp);
8f8e3aa4 6031 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
6032 tmp = neon_load_reg(rn, 1);
6033 neon_store_scratch(2, tmp);
9ee6e8bb 6034 }
39d5492a 6035 TCGV_UNUSED_I32(tmp3);
9ee6e8bb 6036 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6037 if (src1_wide) {
6038 neon_load_reg64(cpu_V0, rn + pass);
39d5492a 6039 TCGV_UNUSED_I32(tmp);
9ee6e8bb 6040 } else {
ad69471c 6041 if (pass == 1 && rd == rn) {
dd8fbd78 6042 tmp = neon_load_scratch(2);
9ee6e8bb 6043 } else {
ad69471c
PB
6044 tmp = neon_load_reg(rn, pass);
6045 }
6046 if (prewiden) {
6047 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
6048 }
6049 }
ad69471c
PB
6050 if (src2_wide) {
6051 neon_load_reg64(cpu_V1, rm + pass);
39d5492a 6052 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 6053 } else {
ad69471c 6054 if (pass == 1 && rd == rm) {
dd8fbd78 6055 tmp2 = neon_load_scratch(2);
9ee6e8bb 6056 } else {
ad69471c
PB
6057 tmp2 = neon_load_reg(rm, pass);
6058 }
6059 if (prewiden) {
6060 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 6061 }
9ee6e8bb
PB
6062 }
6063 switch (op) {
6064 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 6065 gen_neon_addl(size);
9ee6e8bb 6066 break;
79b0e534 6067 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 6068 gen_neon_subl(size);
9ee6e8bb
PB
6069 break;
6070 case 5: case 7: /* VABAL, VABDL */
6071 switch ((size << 1) | u) {
ad69471c
PB
6072 case 0:
6073 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6074 break;
6075 case 1:
6076 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6077 break;
6078 case 2:
6079 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6080 break;
6081 case 3:
6082 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6083 break;
6084 case 4:
6085 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6086 break;
6087 case 5:
6088 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6089 break;
9ee6e8bb
PB
6090 default: abort();
6091 }
7d1b0095
PM
6092 tcg_temp_free_i32(tmp2);
6093 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6094 break;
6095 case 8: case 9: case 10: case 11: case 12: case 13:
6096 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 6097 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
6098 break;
6099 case 14: /* Polynomial VMULL */
e5ca24cb 6100 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
6101 tcg_temp_free_i32(tmp2);
6102 tcg_temp_free_i32(tmp);
e5ca24cb 6103 break;
695272dc
PM
6104 default: /* 15 is RESERVED: caught earlier */
6105 abort();
9ee6e8bb 6106 }
ebcd88ce
PM
6107 if (op == 13) {
6108 /* VQDMULL */
6109 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6110 neon_store_reg64(cpu_V0, rd + pass);
6111 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 6112 /* Accumulate. */
ebcd88ce 6113 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6114 switch (op) {
4dc064e6
PM
6115 case 10: /* VMLSL */
6116 gen_neon_negl(cpu_V0, size);
6117 /* Fall through */
6118 case 5: case 8: /* VABAL, VMLAL */
ad69471c 6119 gen_neon_addl(size);
9ee6e8bb
PB
6120 break;
6121 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 6122 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6123 if (op == 11) {
6124 gen_neon_negl(cpu_V0, size);
6125 }
ad69471c
PB
6126 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6127 break;
9ee6e8bb
PB
6128 default:
6129 abort();
6130 }
ad69471c 6131 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6132 } else if (op == 4 || op == 6) {
6133 /* Narrowing operation. */
7d1b0095 6134 tmp = tcg_temp_new_i32();
79b0e534 6135 if (!u) {
9ee6e8bb 6136 switch (size) {
ad69471c
PB
6137 case 0:
6138 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6139 break;
6140 case 1:
6141 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6142 break;
6143 case 2:
6144 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6145 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
6146 break;
9ee6e8bb
PB
6147 default: abort();
6148 }
6149 } else {
6150 switch (size) {
ad69471c
PB
6151 case 0:
6152 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6153 break;
6154 case 1:
6155 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6156 break;
6157 case 2:
6158 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6159 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6160 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
6161 break;
9ee6e8bb
PB
6162 default: abort();
6163 }
6164 }
ad69471c
PB
6165 if (pass == 0) {
6166 tmp3 = tmp;
6167 } else {
6168 neon_store_reg(rd, 0, tmp3);
6169 neon_store_reg(rd, 1, tmp);
6170 }
9ee6e8bb
PB
6171 } else {
6172 /* Write back the result. */
ad69471c 6173 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6174 }
6175 }
6176 } else {
3e3326df
PM
6177 /* Two registers and a scalar. NB that for ops of this form
6178 * the ARM ARM labels bit 24 as Q, but it is in our variable
6179 * 'u', not 'q'.
6180 */
6181 if (size == 0) {
6182 return 1;
6183 }
9ee6e8bb 6184 switch (op) {
9ee6e8bb 6185 case 1: /* Float VMLA scalar */
9ee6e8bb 6186 case 5: /* Floating point VMLS scalar */
9ee6e8bb 6187 case 9: /* Floating point VMUL scalar */
3e3326df
PM
6188 if (size == 1) {
6189 return 1;
6190 }
6191 /* fall through */
6192 case 0: /* Integer VMLA scalar */
6193 case 4: /* Integer VMLS scalar */
6194 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
6195 case 12: /* VQDMULH scalar */
6196 case 13: /* VQRDMULH scalar */
3e3326df
PM
6197 if (u && ((rd | rn) & 1)) {
6198 return 1;
6199 }
dd8fbd78
FN
6200 tmp = neon_get_scalar(size, rm);
6201 neon_store_scratch(0, tmp);
9ee6e8bb 6202 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
6203 tmp = neon_load_scratch(0);
6204 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
6205 if (op == 12) {
6206 if (size == 1) {
02da0b2d 6207 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6208 } else {
02da0b2d 6209 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6210 }
6211 } else if (op == 13) {
6212 if (size == 1) {
02da0b2d 6213 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6214 } else {
02da0b2d 6215 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6216 }
6217 } else if (op & 1) {
aa47cfdd
PM
6218 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6219 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6220 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
6221 } else {
6222 switch (size) {
dd8fbd78
FN
6223 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6224 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6225 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 6226 default: abort();
9ee6e8bb
PB
6227 }
6228 }
7d1b0095 6229 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6230 if (op < 8) {
6231 /* Accumulate. */
dd8fbd78 6232 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
6233 switch (op) {
6234 case 0:
dd8fbd78 6235 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6236 break;
6237 case 1:
aa47cfdd
PM
6238 {
6239 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6240 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6241 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6242 break;
aa47cfdd 6243 }
9ee6e8bb 6244 case 4:
dd8fbd78 6245 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
6246 break;
6247 case 5:
aa47cfdd
PM
6248 {
6249 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6250 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6251 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6252 break;
aa47cfdd 6253 }
9ee6e8bb
PB
6254 default:
6255 abort();
6256 }
7d1b0095 6257 tcg_temp_free_i32(tmp2);
9ee6e8bb 6258 }
dd8fbd78 6259 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6260 }
6261 break;
9ee6e8bb 6262 case 3: /* VQDMLAL scalar */
9ee6e8bb 6263 case 7: /* VQDMLSL scalar */
9ee6e8bb 6264 case 11: /* VQDMULL scalar */
3e3326df 6265 if (u == 1) {
ad69471c 6266 return 1;
3e3326df
PM
6267 }
6268 /* fall through */
6269 case 2: /* VMLAL sclar */
6270 case 6: /* VMLSL scalar */
6271 case 10: /* VMULL scalar */
6272 if (rd & 1) {
6273 return 1;
6274 }
dd8fbd78 6275 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
6276 /* We need a copy of tmp2 because gen_neon_mull
6277 * deletes it during pass 0. */
7d1b0095 6278 tmp4 = tcg_temp_new_i32();
c6067f04 6279 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 6280 tmp3 = neon_load_reg(rn, 1);
ad69471c 6281
9ee6e8bb 6282 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6283 if (pass == 0) {
6284 tmp = neon_load_reg(rn, 0);
9ee6e8bb 6285 } else {
dd8fbd78 6286 tmp = tmp3;
c6067f04 6287 tmp2 = tmp4;
9ee6e8bb 6288 }
ad69471c 6289 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
6290 if (op != 11) {
6291 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6292 }
9ee6e8bb 6293 switch (op) {
4dc064e6
PM
6294 case 6:
6295 gen_neon_negl(cpu_V0, size);
6296 /* Fall through */
6297 case 2:
ad69471c 6298 gen_neon_addl(size);
9ee6e8bb
PB
6299 break;
6300 case 3: case 7:
ad69471c 6301 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6302 if (op == 7) {
6303 gen_neon_negl(cpu_V0, size);
6304 }
ad69471c 6305 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
6306 break;
6307 case 10:
6308 /* no-op */
6309 break;
6310 case 11:
ad69471c 6311 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
6312 break;
6313 default:
6314 abort();
6315 }
ad69471c 6316 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6317 }
dd8fbd78 6318
dd8fbd78 6319
9ee6e8bb
PB
6320 break;
6321 default: /* 14 and 15 are RESERVED */
6322 return 1;
6323 }
6324 }
6325 } else { /* size == 3 */
6326 if (!u) {
6327 /* Extract. */
9ee6e8bb 6328 imm = (insn >> 8) & 0xf;
ad69471c
PB
6329
6330 if (imm > 7 && !q)
6331 return 1;
6332
52579ea1
PM
6333 if (q && ((rd | rn | rm) & 1)) {
6334 return 1;
6335 }
6336
ad69471c
PB
6337 if (imm == 0) {
6338 neon_load_reg64(cpu_V0, rn);
6339 if (q) {
6340 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 6341 }
ad69471c
PB
6342 } else if (imm == 8) {
6343 neon_load_reg64(cpu_V0, rn + 1);
6344 if (q) {
6345 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6346 }
ad69471c 6347 } else if (q) {
a7812ae4 6348 tmp64 = tcg_temp_new_i64();
ad69471c
PB
6349 if (imm < 8) {
6350 neon_load_reg64(cpu_V0, rn);
a7812ae4 6351 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
6352 } else {
6353 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 6354 neon_load_reg64(tmp64, rm);
ad69471c
PB
6355 }
6356 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 6357 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
6358 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6359 if (imm < 8) {
6360 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6361 } else {
ad69471c
PB
6362 neon_load_reg64(cpu_V1, rm + 1);
6363 imm -= 8;
9ee6e8bb 6364 }
ad69471c 6365 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
6366 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6367 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 6368 tcg_temp_free_i64(tmp64);
ad69471c 6369 } else {
a7812ae4 6370 /* BUGFIX */
ad69471c 6371 neon_load_reg64(cpu_V0, rn);
a7812ae4 6372 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 6373 neon_load_reg64(cpu_V1, rm);
a7812ae4 6374 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
6375 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6376 }
6377 neon_store_reg64(cpu_V0, rd);
6378 if (q) {
6379 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
6380 }
6381 } else if ((insn & (1 << 11)) == 0) {
6382 /* Two register misc. */
6383 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6384 size = (insn >> 18) & 3;
600b828c
PM
6385 /* UNDEF for unknown op values and bad op-size combinations */
6386 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6387 return 1;
6388 }
fc2a9b37
PM
6389 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6390 q && ((rm | rd) & 1)) {
6391 return 1;
6392 }
9ee6e8bb 6393 switch (op) {
600b828c 6394 case NEON_2RM_VREV64:
9ee6e8bb 6395 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
6396 tmp = neon_load_reg(rm, pass * 2);
6397 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 6398 switch (size) {
dd8fbd78
FN
6399 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6400 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
6401 case 2: /* no-op */ break;
6402 default: abort();
6403 }
dd8fbd78 6404 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 6405 if (size == 2) {
dd8fbd78 6406 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 6407 } else {
9ee6e8bb 6408 switch (size) {
dd8fbd78
FN
6409 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6410 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
6411 default: abort();
6412 }
dd8fbd78 6413 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
6414 }
6415 }
6416 break;
600b828c
PM
6417 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6418 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
6419 for (pass = 0; pass < q + 1; pass++) {
6420 tmp = neon_load_reg(rm, pass * 2);
6421 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6422 tmp = neon_load_reg(rm, pass * 2 + 1);
6423 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6424 switch (size) {
6425 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6426 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6427 case 2: tcg_gen_add_i64(CPU_V001); break;
6428 default: abort();
6429 }
600b828c 6430 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 6431 /* Accumulate. */
ad69471c
PB
6432 neon_load_reg64(cpu_V1, rd + pass);
6433 gen_neon_addl(size);
9ee6e8bb 6434 }
ad69471c 6435 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6436 }
6437 break;
600b828c 6438 case NEON_2RM_VTRN:
9ee6e8bb 6439 if (size == 2) {
a5a14945 6440 int n;
9ee6e8bb 6441 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
6442 tmp = neon_load_reg(rm, n);
6443 tmp2 = neon_load_reg(rd, n + 1);
6444 neon_store_reg(rm, n, tmp2);
6445 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
6446 }
6447 } else {
6448 goto elementwise;
6449 }
6450 break;
600b828c 6451 case NEON_2RM_VUZP:
02acedf9 6452 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 6453 return 1;
9ee6e8bb
PB
6454 }
6455 break;
600b828c 6456 case NEON_2RM_VZIP:
d68a6f3a 6457 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 6458 return 1;
9ee6e8bb
PB
6459 }
6460 break;
600b828c
PM
6461 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6462 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
6463 if (rm & 1) {
6464 return 1;
6465 }
39d5492a 6466 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 6467 for (pass = 0; pass < 2; pass++) {
ad69471c 6468 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 6469 tmp = tcg_temp_new_i32();
600b828c
PM
6470 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6471 tmp, cpu_V0);
ad69471c
PB
6472 if (pass == 0) {
6473 tmp2 = tmp;
6474 } else {
6475 neon_store_reg(rd, 0, tmp2);
6476 neon_store_reg(rd, 1, tmp);
9ee6e8bb 6477 }
9ee6e8bb
PB
6478 }
6479 break;
600b828c 6480 case NEON_2RM_VSHLL:
fc2a9b37 6481 if (q || (rd & 1)) {
9ee6e8bb 6482 return 1;
600b828c 6483 }
ad69471c
PB
6484 tmp = neon_load_reg(rm, 0);
6485 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6486 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6487 if (pass == 1)
6488 tmp = tmp2;
6489 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 6490 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 6491 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6492 }
6493 break;
600b828c 6494 case NEON_2RM_VCVT_F16_F32:
fc2a9b37
PM
6495 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
6496 q || (rm & 1)) {
6497 return 1;
6498 }
7d1b0095
PM
6499 tmp = tcg_temp_new_i32();
6500 tmp2 = tcg_temp_new_i32();
60011498 6501 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 6502 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 6503 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 6504 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6505 tcg_gen_shli_i32(tmp2, tmp2, 16);
6506 tcg_gen_or_i32(tmp2, tmp2, tmp);
6507 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 6508 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
6509 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
6510 neon_store_reg(rd, 0, tmp2);
7d1b0095 6511 tmp2 = tcg_temp_new_i32();
2d981da7 6512 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6513 tcg_gen_shli_i32(tmp2, tmp2, 16);
6514 tcg_gen_or_i32(tmp2, tmp2, tmp);
6515 neon_store_reg(rd, 1, tmp2);
7d1b0095 6516 tcg_temp_free_i32(tmp);
60011498 6517 break;
600b828c 6518 case NEON_2RM_VCVT_F32_F16:
fc2a9b37
PM
6519 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
6520 q || (rd & 1)) {
6521 return 1;
6522 }
7d1b0095 6523 tmp3 = tcg_temp_new_i32();
60011498
PB
6524 tmp = neon_load_reg(rm, 0);
6525 tmp2 = neon_load_reg(rm, 1);
6526 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 6527 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6528 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
6529 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 6530 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6531 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 6532 tcg_temp_free_i32(tmp);
60011498 6533 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 6534 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6535 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
6536 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 6537 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6538 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
6539 tcg_temp_free_i32(tmp2);
6540 tcg_temp_free_i32(tmp3);
60011498 6541 break;
9d935509
AB
6542 case NEON_2RM_AESE: case NEON_2RM_AESMC:
6543 if (!arm_feature(env, ARM_FEATURE_V8_AES)
6544 || ((rm | rd) & 1)) {
6545 return 1;
6546 }
6547 tmp = tcg_const_i32(rd);
6548 tmp2 = tcg_const_i32(rm);
6549
6550 /* Bit 6 is the lowest opcode bit; it distinguishes between
6551 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6552 */
6553 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
6554
6555 if (op == NEON_2RM_AESE) {
6556 gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
6557 } else {
6558 gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
6559 }
6560 tcg_temp_free_i32(tmp);
6561 tcg_temp_free_i32(tmp2);
6562 tcg_temp_free_i32(tmp3);
6563 break;
f1ecb913
AB
6564 case NEON_2RM_SHA1H:
6565 if (!arm_feature(env, ARM_FEATURE_V8_SHA1)
6566 || ((rm | rd) & 1)) {
6567 return 1;
6568 }
6569 tmp = tcg_const_i32(rd);
6570 tmp2 = tcg_const_i32(rm);
6571
6572 gen_helper_crypto_sha1h(cpu_env, tmp, tmp2);
6573
6574 tcg_temp_free_i32(tmp);
6575 tcg_temp_free_i32(tmp2);
6576 break;
6577 case NEON_2RM_SHA1SU1:
6578 if ((rm | rd) & 1) {
6579 return 1;
6580 }
6581 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
6582 if (q) {
6583 if (!arm_feature(env, ARM_FEATURE_V8_SHA256)) {
6584 return 1;
6585 }
6586 } else if (!arm_feature(env, ARM_FEATURE_V8_SHA1)) {
6587 return 1;
6588 }
6589 tmp = tcg_const_i32(rd);
6590 tmp2 = tcg_const_i32(rm);
6591 if (q) {
6592 gen_helper_crypto_sha256su0(cpu_env, tmp, tmp2);
6593 } else {
6594 gen_helper_crypto_sha1su1(cpu_env, tmp, tmp2);
6595 }
6596 tcg_temp_free_i32(tmp);
6597 tcg_temp_free_i32(tmp2);
6598 break;
9ee6e8bb
PB
6599 default:
6600 elementwise:
6601 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 6602 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6603 tcg_gen_ld_f32(cpu_F0s, cpu_env,
6604 neon_reg_offset(rm, pass));
39d5492a 6605 TCGV_UNUSED_I32(tmp);
9ee6e8bb 6606 } else {
dd8fbd78 6607 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
6608 }
6609 switch (op) {
600b828c 6610 case NEON_2RM_VREV32:
9ee6e8bb 6611 switch (size) {
dd8fbd78
FN
6612 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6613 case 1: gen_swap_half(tmp); break;
600b828c 6614 default: abort();
9ee6e8bb
PB
6615 }
6616 break;
600b828c 6617 case NEON_2RM_VREV16:
dd8fbd78 6618 gen_rev16(tmp);
9ee6e8bb 6619 break;
600b828c 6620 case NEON_2RM_VCLS:
9ee6e8bb 6621 switch (size) {
dd8fbd78
FN
6622 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6623 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6624 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 6625 default: abort();
9ee6e8bb
PB
6626 }
6627 break;
600b828c 6628 case NEON_2RM_VCLZ:
9ee6e8bb 6629 switch (size) {
dd8fbd78
FN
6630 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6631 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6632 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 6633 default: abort();
9ee6e8bb
PB
6634 }
6635 break;
600b828c 6636 case NEON_2RM_VCNT:
dd8fbd78 6637 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 6638 break;
600b828c 6639 case NEON_2RM_VMVN:
dd8fbd78 6640 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 6641 break;
600b828c 6642 case NEON_2RM_VQABS:
9ee6e8bb 6643 switch (size) {
02da0b2d
PM
6644 case 0:
6645 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6646 break;
6647 case 1:
6648 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6649 break;
6650 case 2:
6651 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6652 break;
600b828c 6653 default: abort();
9ee6e8bb
PB
6654 }
6655 break;
600b828c 6656 case NEON_2RM_VQNEG:
9ee6e8bb 6657 switch (size) {
02da0b2d
PM
6658 case 0:
6659 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6660 break;
6661 case 1:
6662 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6663 break;
6664 case 2:
6665 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6666 break;
600b828c 6667 default: abort();
9ee6e8bb
PB
6668 }
6669 break;
600b828c 6670 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 6671 tmp2 = tcg_const_i32(0);
9ee6e8bb 6672 switch(size) {
dd8fbd78
FN
6673 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6674 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6675 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 6676 default: abort();
9ee6e8bb 6677 }
39d5492a 6678 tcg_temp_free_i32(tmp2);
600b828c 6679 if (op == NEON_2RM_VCLE0) {
dd8fbd78 6680 tcg_gen_not_i32(tmp, tmp);
600b828c 6681 }
9ee6e8bb 6682 break;
600b828c 6683 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 6684 tmp2 = tcg_const_i32(0);
9ee6e8bb 6685 switch(size) {
dd8fbd78
FN
6686 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6687 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6688 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 6689 default: abort();
9ee6e8bb 6690 }
39d5492a 6691 tcg_temp_free_i32(tmp2);
600b828c 6692 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6693 tcg_gen_not_i32(tmp, tmp);
600b828c 6694 }
9ee6e8bb 6695 break;
600b828c 6696 case NEON_2RM_VCEQ0:
dd8fbd78 6697 tmp2 = tcg_const_i32(0);
9ee6e8bb 6698 switch(size) {
dd8fbd78
FN
6699 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6700 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6701 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6702 default: abort();
9ee6e8bb 6703 }
39d5492a 6704 tcg_temp_free_i32(tmp2);
9ee6e8bb 6705 break;
600b828c 6706 case NEON_2RM_VABS:
9ee6e8bb 6707 switch(size) {
dd8fbd78
FN
6708 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6709 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6710 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 6711 default: abort();
9ee6e8bb
PB
6712 }
6713 break;
600b828c 6714 case NEON_2RM_VNEG:
dd8fbd78
FN
6715 tmp2 = tcg_const_i32(0);
6716 gen_neon_rsb(size, tmp, tmp2);
39d5492a 6717 tcg_temp_free_i32(tmp2);
9ee6e8bb 6718 break;
600b828c 6719 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
6720 {
6721 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6722 tmp2 = tcg_const_i32(0);
aa47cfdd 6723 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6724 tcg_temp_free_i32(tmp2);
aa47cfdd 6725 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6726 break;
aa47cfdd 6727 }
600b828c 6728 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
6729 {
6730 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6731 tmp2 = tcg_const_i32(0);
aa47cfdd 6732 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6733 tcg_temp_free_i32(tmp2);
aa47cfdd 6734 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6735 break;
aa47cfdd 6736 }
600b828c 6737 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
6738 {
6739 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6740 tmp2 = tcg_const_i32(0);
aa47cfdd 6741 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6742 tcg_temp_free_i32(tmp2);
aa47cfdd 6743 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6744 break;
aa47cfdd 6745 }
600b828c 6746 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
6747 {
6748 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6749 tmp2 = tcg_const_i32(0);
aa47cfdd 6750 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6751 tcg_temp_free_i32(tmp2);
aa47cfdd 6752 tcg_temp_free_ptr(fpstatus);
0e326109 6753 break;
aa47cfdd 6754 }
600b828c 6755 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
6756 {
6757 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6758 tmp2 = tcg_const_i32(0);
aa47cfdd 6759 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6760 tcg_temp_free_i32(tmp2);
aa47cfdd 6761 tcg_temp_free_ptr(fpstatus);
0e326109 6762 break;
aa47cfdd 6763 }
600b828c 6764 case NEON_2RM_VABS_F:
4373f3ce 6765 gen_vfp_abs(0);
9ee6e8bb 6766 break;
600b828c 6767 case NEON_2RM_VNEG_F:
4373f3ce 6768 gen_vfp_neg(0);
9ee6e8bb 6769 break;
600b828c 6770 case NEON_2RM_VSWP:
dd8fbd78
FN
6771 tmp2 = neon_load_reg(rd, pass);
6772 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6773 break;
600b828c 6774 case NEON_2RM_VTRN:
dd8fbd78 6775 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6776 switch (size) {
dd8fbd78
FN
6777 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6778 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6779 default: abort();
9ee6e8bb 6780 }
dd8fbd78 6781 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6782 break;
34f7b0a2
WN
6783 case NEON_2RM_VRINTN:
6784 case NEON_2RM_VRINTA:
6785 case NEON_2RM_VRINTM:
6786 case NEON_2RM_VRINTP:
6787 case NEON_2RM_VRINTZ:
6788 {
6789 TCGv_i32 tcg_rmode;
6790 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6791 int rmode;
6792
6793 if (op == NEON_2RM_VRINTZ) {
6794 rmode = FPROUNDING_ZERO;
6795 } else {
6796 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
6797 }
6798
6799 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6800 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6801 cpu_env);
6802 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
6803 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6804 cpu_env);
6805 tcg_temp_free_ptr(fpstatus);
6806 tcg_temp_free_i32(tcg_rmode);
6807 break;
6808 }
2ce70625
WN
6809 case NEON_2RM_VRINTX:
6810 {
6811 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6812 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
6813 tcg_temp_free_ptr(fpstatus);
6814 break;
6815 }
901ad525
WN
6816 case NEON_2RM_VCVTAU:
6817 case NEON_2RM_VCVTAS:
6818 case NEON_2RM_VCVTNU:
6819 case NEON_2RM_VCVTNS:
6820 case NEON_2RM_VCVTPU:
6821 case NEON_2RM_VCVTPS:
6822 case NEON_2RM_VCVTMU:
6823 case NEON_2RM_VCVTMS:
6824 {
6825 bool is_signed = !extract32(insn, 7, 1);
6826 TCGv_ptr fpst = get_fpstatus_ptr(1);
6827 TCGv_i32 tcg_rmode, tcg_shift;
6828 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
6829
6830 tcg_shift = tcg_const_i32(0);
6831 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6832 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6833 cpu_env);
6834
6835 if (is_signed) {
6836 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
6837 tcg_shift, fpst);
6838 } else {
6839 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
6840 tcg_shift, fpst);
6841 }
6842
6843 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6844 cpu_env);
6845 tcg_temp_free_i32(tcg_rmode);
6846 tcg_temp_free_i32(tcg_shift);
6847 tcg_temp_free_ptr(fpst);
6848 break;
6849 }
600b828c 6850 case NEON_2RM_VRECPE:
b6d4443a
AB
6851 {
6852 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6853 gen_helper_recpe_u32(tmp, tmp, fpstatus);
6854 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6855 break;
b6d4443a 6856 }
600b828c 6857 case NEON_2RM_VRSQRTE:
c2fb418e
AB
6858 {
6859 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6860 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
6861 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6862 break;
c2fb418e 6863 }
600b828c 6864 case NEON_2RM_VRECPE_F:
b6d4443a
AB
6865 {
6866 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6867 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
6868 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6869 break;
b6d4443a 6870 }
600b828c 6871 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
6872 {
6873 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6874 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
6875 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6876 break;
c2fb418e 6877 }
600b828c 6878 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 6879 gen_vfp_sito(0, 1);
9ee6e8bb 6880 break;
600b828c 6881 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 6882 gen_vfp_uito(0, 1);
9ee6e8bb 6883 break;
600b828c 6884 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 6885 gen_vfp_tosiz(0, 1);
9ee6e8bb 6886 break;
600b828c 6887 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 6888 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
6889 break;
6890 default:
600b828c
PM
6891 /* Reserved op values were caught by the
6892 * neon_2rm_sizes[] check earlier.
6893 */
6894 abort();
9ee6e8bb 6895 }
600b828c 6896 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6897 tcg_gen_st_f32(cpu_F0s, cpu_env,
6898 neon_reg_offset(rd, pass));
9ee6e8bb 6899 } else {
dd8fbd78 6900 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6901 }
6902 }
6903 break;
6904 }
6905 } else if ((insn & (1 << 10)) == 0) {
6906 /* VTBL, VTBX. */
56907d77
PM
6907 int n = ((insn >> 8) & 3) + 1;
6908 if ((rn + n) > 32) {
6909 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6910 * helper function running off the end of the register file.
6911 */
6912 return 1;
6913 }
6914 n <<= 3;
9ee6e8bb 6915 if (insn & (1 << 6)) {
8f8e3aa4 6916 tmp = neon_load_reg(rd, 0);
9ee6e8bb 6917 } else {
7d1b0095 6918 tmp = tcg_temp_new_i32();
8f8e3aa4 6919 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6920 }
8f8e3aa4 6921 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
6922 tmp4 = tcg_const_i32(rn);
6923 tmp5 = tcg_const_i32(n);
9ef39277 6924 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7d1b0095 6925 tcg_temp_free_i32(tmp);
9ee6e8bb 6926 if (insn & (1 << 6)) {
8f8e3aa4 6927 tmp = neon_load_reg(rd, 1);
9ee6e8bb 6928 } else {
7d1b0095 6929 tmp = tcg_temp_new_i32();
8f8e3aa4 6930 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6931 }
8f8e3aa4 6932 tmp3 = neon_load_reg(rm, 1);
9ef39277 6933 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
6934 tcg_temp_free_i32(tmp5);
6935 tcg_temp_free_i32(tmp4);
8f8e3aa4 6936 neon_store_reg(rd, 0, tmp2);
3018f259 6937 neon_store_reg(rd, 1, tmp3);
7d1b0095 6938 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6939 } else if ((insn & 0x380) == 0) {
6940 /* VDUP */
133da6aa
JR
6941 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6942 return 1;
6943 }
9ee6e8bb 6944 if (insn & (1 << 19)) {
dd8fbd78 6945 tmp = neon_load_reg(rm, 1);
9ee6e8bb 6946 } else {
dd8fbd78 6947 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
6948 }
6949 if (insn & (1 << 16)) {
dd8fbd78 6950 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
6951 } else if (insn & (1 << 17)) {
6952 if ((insn >> 18) & 1)
dd8fbd78 6953 gen_neon_dup_high16(tmp);
9ee6e8bb 6954 else
dd8fbd78 6955 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
6956 }
6957 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 6958 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
6959 tcg_gen_mov_i32(tmp2, tmp);
6960 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 6961 }
7d1b0095 6962 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6963 } else {
6964 return 1;
6965 }
6966 }
6967 }
6968 return 0;
6969}
6970
0ecb72a5 6971static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb 6972{
4b6a83fb
PM
6973 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
6974 const ARMCPRegInfo *ri;
9ee6e8bb
PB
6975
6976 cpnum = (insn >> 8) & 0xf;
6977 if (arm_feature(env, ARM_FEATURE_XSCALE)
6978 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6979 return 1;
6980
4b6a83fb 6981 /* First check for coprocessor space used for actual instructions */
9ee6e8bb
PB
6982 switch (cpnum) {
6983 case 0:
6984 case 1:
6985 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6986 return disas_iwmmxt_insn(env, s, insn);
6987 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6988 return disas_dsp_insn(env, s, insn);
6989 }
6990 return 1;
4b6a83fb
PM
6991 default:
6992 break;
6993 }
6994
6995 /* Otherwise treat as a generic register access */
6996 is64 = (insn & (1 << 25)) == 0;
6997 if (!is64 && ((insn & (1 << 4)) == 0)) {
6998 /* cdp */
6999 return 1;
7000 }
7001
7002 crm = insn & 0xf;
7003 if (is64) {
7004 crn = 0;
7005 opc1 = (insn >> 4) & 0xf;
7006 opc2 = 0;
7007 rt2 = (insn >> 16) & 0xf;
7008 } else {
7009 crn = (insn >> 16) & 0xf;
7010 opc1 = (insn >> 21) & 7;
7011 opc2 = (insn >> 5) & 7;
7012 rt2 = 0;
7013 }
7014 isread = (insn >> 20) & 1;
7015 rt = (insn >> 12) & 0xf;
7016
60322b39 7017 ri = get_arm_cp_reginfo(s->cp_regs,
4b6a83fb
PM
7018 ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
7019 if (ri) {
7020 /* Check access permissions */
60322b39 7021 if (!cp_access_ok(s->current_pl, ri, isread)) {
4b6a83fb
PM
7022 return 1;
7023 }
7024
f59df3f2
PM
7025 if (ri->accessfn) {
7026 /* Emit code to perform further access permissions checks at
7027 * runtime; this may result in an exception.
7028 */
7029 TCGv_ptr tmpptr;
8bcbf37c
PM
7030 TCGv_i32 tcg_syn;
7031 uint32_t syndrome;
7032
7033 /* Note that since we are an implementation which takes an
7034 * exception on a trapped conditional instruction only if the
7035 * instruction passes its condition code check, we can take
7036 * advantage of the clause in the ARM ARM that allows us to set
7037 * the COND field in the instruction to 0xE in all cases.
7038 * We could fish the actual condition out of the insn (ARM)
7039 * or the condexec bits (Thumb) but it isn't necessary.
7040 */
7041 switch (cpnum) {
7042 case 14:
7043 if (is64) {
7044 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7045 isread, s->thumb);
7046 } else {
7047 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7048 rt, isread, s->thumb);
7049 }
7050 break;
7051 case 15:
7052 if (is64) {
7053 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7054 isread, s->thumb);
7055 } else {
7056 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7057 rt, isread, s->thumb);
7058 }
7059 break;
7060 default:
7061 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7062 * so this can only happen if this is an ARMv7 or earlier CPU,
7063 * in which case the syndrome information won't actually be
7064 * guest visible.
7065 */
7066 assert(!arm_feature(env, ARM_FEATURE_V8));
7067 syndrome = syn_uncategorized();
7068 break;
7069 }
7070
f59df3f2
PM
7071 gen_set_pc_im(s, s->pc);
7072 tmpptr = tcg_const_ptr(ri);
8bcbf37c
PM
7073 tcg_syn = tcg_const_i32(syndrome);
7074 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn);
f59df3f2 7075 tcg_temp_free_ptr(tmpptr);
8bcbf37c 7076 tcg_temp_free_i32(tcg_syn);
f59df3f2
PM
7077 }
7078
4b6a83fb
PM
7079 /* Handle special cases first */
7080 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7081 case ARM_CP_NOP:
7082 return 0;
7083 case ARM_CP_WFI:
7084 if (isread) {
7085 return 1;
7086 }
eaed129d 7087 gen_set_pc_im(s, s->pc);
4b6a83fb 7088 s->is_jmp = DISAS_WFI;
2bee5105 7089 return 0;
4b6a83fb
PM
7090 default:
7091 break;
7092 }
7093
2452731c
PM
7094 if (use_icount && (ri->type & ARM_CP_IO)) {
7095 gen_io_start();
7096 }
7097
4b6a83fb
PM
7098 if (isread) {
7099 /* Read */
7100 if (is64) {
7101 TCGv_i64 tmp64;
7102 TCGv_i32 tmp;
7103 if (ri->type & ARM_CP_CONST) {
7104 tmp64 = tcg_const_i64(ri->resetvalue);
7105 } else if (ri->readfn) {
7106 TCGv_ptr tmpptr;
4b6a83fb
PM
7107 tmp64 = tcg_temp_new_i64();
7108 tmpptr = tcg_const_ptr(ri);
7109 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7110 tcg_temp_free_ptr(tmpptr);
7111 } else {
7112 tmp64 = tcg_temp_new_i64();
7113 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7114 }
7115 tmp = tcg_temp_new_i32();
7116 tcg_gen_trunc_i64_i32(tmp, tmp64);
7117 store_reg(s, rt, tmp);
7118 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 7119 tmp = tcg_temp_new_i32();
4b6a83fb 7120 tcg_gen_trunc_i64_i32(tmp, tmp64);
ed336850 7121 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
7122 store_reg(s, rt2, tmp);
7123 } else {
39d5492a 7124 TCGv_i32 tmp;
4b6a83fb
PM
7125 if (ri->type & ARM_CP_CONST) {
7126 tmp = tcg_const_i32(ri->resetvalue);
7127 } else if (ri->readfn) {
7128 TCGv_ptr tmpptr;
4b6a83fb
PM
7129 tmp = tcg_temp_new_i32();
7130 tmpptr = tcg_const_ptr(ri);
7131 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7132 tcg_temp_free_ptr(tmpptr);
7133 } else {
7134 tmp = load_cpu_offset(ri->fieldoffset);
7135 }
7136 if (rt == 15) {
7137 /* Destination register of r15 for 32 bit loads sets
7138 * the condition codes from the high 4 bits of the value
7139 */
7140 gen_set_nzcv(tmp);
7141 tcg_temp_free_i32(tmp);
7142 } else {
7143 store_reg(s, rt, tmp);
7144 }
7145 }
7146 } else {
7147 /* Write */
7148 if (ri->type & ARM_CP_CONST) {
7149 /* If not forbidden by access permissions, treat as WI */
7150 return 0;
7151 }
7152
7153 if (is64) {
39d5492a 7154 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
7155 TCGv_i64 tmp64 = tcg_temp_new_i64();
7156 tmplo = load_reg(s, rt);
7157 tmphi = load_reg(s, rt2);
7158 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7159 tcg_temp_free_i32(tmplo);
7160 tcg_temp_free_i32(tmphi);
7161 if (ri->writefn) {
7162 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
7163 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7164 tcg_temp_free_ptr(tmpptr);
7165 } else {
7166 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7167 }
7168 tcg_temp_free_i64(tmp64);
7169 } else {
7170 if (ri->writefn) {
39d5492a 7171 TCGv_i32 tmp;
4b6a83fb 7172 TCGv_ptr tmpptr;
4b6a83fb
PM
7173 tmp = load_reg(s, rt);
7174 tmpptr = tcg_const_ptr(ri);
7175 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7176 tcg_temp_free_ptr(tmpptr);
7177 tcg_temp_free_i32(tmp);
7178 } else {
39d5492a 7179 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
7180 store_cpu_offset(tmp, ri->fieldoffset);
7181 }
7182 }
2452731c
PM
7183 }
7184
7185 if (use_icount && (ri->type & ARM_CP_IO)) {
7186 /* I/O operations must end the TB here (whether read or write) */
7187 gen_io_end();
7188 gen_lookup_tb(s);
7189 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
7190 /* We default to ending the TB on a coprocessor register write,
7191 * but allow this to be suppressed by the register definition
7192 * (usually only necessary to work around guest bugs).
7193 */
2452731c 7194 gen_lookup_tb(s);
4b6a83fb 7195 }
2452731c 7196
4b6a83fb
PM
7197 return 0;
7198 }
7199
626187d8
PM
7200 /* Unknown register; this might be a guest error or a QEMU
7201 * unimplemented feature.
7202 */
7203 if (is64) {
7204 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7205 "64 bit system register cp:%d opc1: %d crm:%d\n",
7206 isread ? "read" : "write", cpnum, opc1, crm);
7207 } else {
7208 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7209 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d\n",
7210 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2);
7211 }
7212
4a9a539f 7213 return 1;
9ee6e8bb
PB
7214}
7215
5e3f878a
PB
7216
7217/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 7218static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 7219{
39d5492a 7220 TCGv_i32 tmp;
7d1b0095 7221 tmp = tcg_temp_new_i32();
5e3f878a
PB
7222 tcg_gen_trunc_i64_i32(tmp, val);
7223 store_reg(s, rlow, tmp);
7d1b0095 7224 tmp = tcg_temp_new_i32();
5e3f878a
PB
7225 tcg_gen_shri_i64(val, val, 32);
7226 tcg_gen_trunc_i64_i32(tmp, val);
7227 store_reg(s, rhigh, tmp);
7228}
7229
7230/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 7231static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 7232{
a7812ae4 7233 TCGv_i64 tmp;
39d5492a 7234 TCGv_i32 tmp2;
5e3f878a 7235
36aa55dc 7236 /* Load value and extend to 64 bits. */
a7812ae4 7237 tmp = tcg_temp_new_i64();
5e3f878a
PB
7238 tmp2 = load_reg(s, rlow);
7239 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 7240 tcg_temp_free_i32(tmp2);
5e3f878a 7241 tcg_gen_add_i64(val, val, tmp);
b75263d6 7242 tcg_temp_free_i64(tmp);
5e3f878a
PB
7243}
7244
7245/* load and add a 64-bit value from a register pair. */
a7812ae4 7246static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 7247{
a7812ae4 7248 TCGv_i64 tmp;
39d5492a
PM
7249 TCGv_i32 tmpl;
7250 TCGv_i32 tmph;
5e3f878a
PB
7251
7252 /* Load 64-bit value rd:rn. */
36aa55dc
PB
7253 tmpl = load_reg(s, rlow);
7254 tmph = load_reg(s, rhigh);
a7812ae4 7255 tmp = tcg_temp_new_i64();
36aa55dc 7256 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
7257 tcg_temp_free_i32(tmpl);
7258 tcg_temp_free_i32(tmph);
5e3f878a 7259 tcg_gen_add_i64(val, val, tmp);
b75263d6 7260 tcg_temp_free_i64(tmp);
5e3f878a
PB
7261}
7262
c9f10124 7263/* Set N and Z flags from hi|lo. */
39d5492a 7264static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 7265{
c9f10124
RH
7266 tcg_gen_mov_i32(cpu_NF, hi);
7267 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
7268}
7269
426f5abc
PB
7270/* Load/Store exclusive instructions are implemented by remembering
7271 the value/address loaded, and seeing if these are the same
b90372ad 7272 when the store is performed. This should be sufficient to implement
426f5abc
PB
7273 the architecturally mandated semantics, and avoids having to monitor
7274 regular stores.
7275
7276 In system emulation mode only one CPU will be running at once, so
7277 this sequence is effectively atomic. In user emulation mode we
7278 throw an exception and handle the atomic operation elsewhere. */
7279static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 7280 TCGv_i32 addr, int size)
426f5abc 7281{
94ee24e7 7282 TCGv_i32 tmp = tcg_temp_new_i32();
426f5abc
PB
7283
7284 switch (size) {
7285 case 0:
6ce2faf4 7286 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
426f5abc
PB
7287 break;
7288 case 1:
6ce2faf4 7289 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
426f5abc
PB
7290 break;
7291 case 2:
7292 case 3:
6ce2faf4 7293 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
426f5abc
PB
7294 break;
7295 default:
7296 abort();
7297 }
03d05e2d 7298
426f5abc 7299 if (size == 3) {
39d5492a 7300 TCGv_i32 tmp2 = tcg_temp_new_i32();
03d05e2d
PM
7301 TCGv_i32 tmp3 = tcg_temp_new_i32();
7302
2c9adbda 7303 tcg_gen_addi_i32(tmp2, addr, 4);
6ce2faf4 7304 gen_aa32_ld32u(tmp3, tmp2, get_mem_index(s));
7d1b0095 7305 tcg_temp_free_i32(tmp2);
03d05e2d
PM
7306 tcg_gen_concat_i32_i64(cpu_exclusive_val, tmp, tmp3);
7307 store_reg(s, rt2, tmp3);
7308 } else {
7309 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 7310 }
03d05e2d
PM
7311
7312 store_reg(s, rt, tmp);
7313 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
7314}
7315
7316static void gen_clrex(DisasContext *s)
7317{
03d05e2d 7318 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7319}
7320
7321#ifdef CONFIG_USER_ONLY
7322static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7323 TCGv_i32 addr, int size)
426f5abc 7324{
03d05e2d 7325 tcg_gen_extu_i32_i64(cpu_exclusive_test, addr);
426f5abc
PB
7326 tcg_gen_movi_i32(cpu_exclusive_info,
7327 size | (rd << 4) | (rt << 8) | (rt2 << 12));
d4a2dc67 7328 gen_exception_internal_insn(s, 4, EXCP_STREX);
426f5abc
PB
7329}
7330#else
7331static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7332 TCGv_i32 addr, int size)
426f5abc 7333{
39d5492a 7334 TCGv_i32 tmp;
03d05e2d 7335 TCGv_i64 val64, extaddr;
426f5abc
PB
7336 int done_label;
7337 int fail_label;
7338
7339 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7340 [addr] = {Rt};
7341 {Rd} = 0;
7342 } else {
7343 {Rd} = 1;
7344 } */
7345 fail_label = gen_new_label();
7346 done_label = gen_new_label();
03d05e2d
PM
7347 extaddr = tcg_temp_new_i64();
7348 tcg_gen_extu_i32_i64(extaddr, addr);
7349 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7350 tcg_temp_free_i64(extaddr);
7351
94ee24e7 7352 tmp = tcg_temp_new_i32();
426f5abc
PB
7353 switch (size) {
7354 case 0:
6ce2faf4 7355 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
426f5abc
PB
7356 break;
7357 case 1:
6ce2faf4 7358 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
426f5abc
PB
7359 break;
7360 case 2:
7361 case 3:
6ce2faf4 7362 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
426f5abc
PB
7363 break;
7364 default:
7365 abort();
7366 }
03d05e2d
PM
7367
7368 val64 = tcg_temp_new_i64();
426f5abc 7369 if (size == 3) {
39d5492a 7370 TCGv_i32 tmp2 = tcg_temp_new_i32();
03d05e2d 7371 TCGv_i32 tmp3 = tcg_temp_new_i32();
426f5abc 7372 tcg_gen_addi_i32(tmp2, addr, 4);
6ce2faf4 7373 gen_aa32_ld32u(tmp3, tmp2, get_mem_index(s));
7d1b0095 7374 tcg_temp_free_i32(tmp2);
03d05e2d
PM
7375 tcg_gen_concat_i32_i64(val64, tmp, tmp3);
7376 tcg_temp_free_i32(tmp3);
7377 } else {
7378 tcg_gen_extu_i32_i64(val64, tmp);
426f5abc 7379 }
03d05e2d
PM
7380 tcg_temp_free_i32(tmp);
7381
7382 tcg_gen_brcond_i64(TCG_COND_NE, val64, cpu_exclusive_val, fail_label);
7383 tcg_temp_free_i64(val64);
7384
426f5abc
PB
7385 tmp = load_reg(s, rt);
7386 switch (size) {
7387 case 0:
6ce2faf4 7388 gen_aa32_st8(tmp, addr, get_mem_index(s));
426f5abc
PB
7389 break;
7390 case 1:
6ce2faf4 7391 gen_aa32_st16(tmp, addr, get_mem_index(s));
426f5abc
PB
7392 break;
7393 case 2:
7394 case 3:
6ce2faf4 7395 gen_aa32_st32(tmp, addr, get_mem_index(s));
426f5abc
PB
7396 break;
7397 default:
7398 abort();
7399 }
94ee24e7 7400 tcg_temp_free_i32(tmp);
426f5abc
PB
7401 if (size == 3) {
7402 tcg_gen_addi_i32(addr, addr, 4);
7403 tmp = load_reg(s, rt2);
6ce2faf4 7404 gen_aa32_st32(tmp, addr, get_mem_index(s));
94ee24e7 7405 tcg_temp_free_i32(tmp);
426f5abc
PB
7406 }
7407 tcg_gen_movi_i32(cpu_R[rd], 0);
7408 tcg_gen_br(done_label);
7409 gen_set_label(fail_label);
7410 tcg_gen_movi_i32(cpu_R[rd], 1);
7411 gen_set_label(done_label);
03d05e2d 7412 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7413}
7414#endif
7415
81465888
PM
7416/* gen_srs:
7417 * @env: CPUARMState
7418 * @s: DisasContext
7419 * @mode: mode field from insn (which stack to store to)
7420 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7421 * @writeback: true if writeback bit set
7422 *
7423 * Generate code for the SRS (Store Return State) insn.
7424 */
7425static void gen_srs(DisasContext *s,
7426 uint32_t mode, uint32_t amode, bool writeback)
7427{
7428 int32_t offset;
7429 TCGv_i32 addr = tcg_temp_new_i32();
7430 TCGv_i32 tmp = tcg_const_i32(mode);
7431 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7432 tcg_temp_free_i32(tmp);
7433 switch (amode) {
7434 case 0: /* DA */
7435 offset = -4;
7436 break;
7437 case 1: /* IA */
7438 offset = 0;
7439 break;
7440 case 2: /* DB */
7441 offset = -8;
7442 break;
7443 case 3: /* IB */
7444 offset = 4;
7445 break;
7446 default:
7447 abort();
7448 }
7449 tcg_gen_addi_i32(addr, addr, offset);
7450 tmp = load_reg(s, 14);
c1197795 7451 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 7452 tcg_temp_free_i32(tmp);
81465888
PM
7453 tmp = load_cpu_field(spsr);
7454 tcg_gen_addi_i32(addr, addr, 4);
c1197795 7455 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 7456 tcg_temp_free_i32(tmp);
81465888
PM
7457 if (writeback) {
7458 switch (amode) {
7459 case 0:
7460 offset = -8;
7461 break;
7462 case 1:
7463 offset = 4;
7464 break;
7465 case 2:
7466 offset = -4;
7467 break;
7468 case 3:
7469 offset = 0;
7470 break;
7471 default:
7472 abort();
7473 }
7474 tcg_gen_addi_i32(addr, addr, offset);
7475 tmp = tcg_const_i32(mode);
7476 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7477 tcg_temp_free_i32(tmp);
7478 }
7479 tcg_temp_free_i32(addr);
7480}
7481
0ecb72a5 7482static void disas_arm_insn(CPUARMState * env, DisasContext *s)
9ee6e8bb
PB
7483{
7484 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
7485 TCGv_i32 tmp;
7486 TCGv_i32 tmp2;
7487 TCGv_i32 tmp3;
7488 TCGv_i32 addr;
a7812ae4 7489 TCGv_i64 tmp64;
9ee6e8bb 7490
d31dd73e 7491 insn = arm_ldl_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
7492 s->pc += 4;
7493
7494 /* M variants do not implement ARM mode. */
7495 if (IS_M(env))
7496 goto illegal_op;
7497 cond = insn >> 28;
7498 if (cond == 0xf){
be5e7a76
DES
7499 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
7500 * choose to UNDEF. In ARMv5 and above the space is used
7501 * for miscellaneous unconditional instructions.
7502 */
7503 ARCH(5);
7504
9ee6e8bb
PB
7505 /* Unconditional instructions. */
7506 if (((insn >> 25) & 7) == 1) {
7507 /* NEON Data processing. */
7508 if (!arm_feature(env, ARM_FEATURE_NEON))
7509 goto illegal_op;
7510
7511 if (disas_neon_data_insn(env, s, insn))
7512 goto illegal_op;
7513 return;
7514 }
7515 if ((insn & 0x0f100000) == 0x04000000) {
7516 /* NEON load/store. */
7517 if (!arm_feature(env, ARM_FEATURE_NEON))
7518 goto illegal_op;
7519
7520 if (disas_neon_ls_insn(env, s, insn))
7521 goto illegal_op;
7522 return;
7523 }
6a57f3eb
WN
7524 if ((insn & 0x0f000e10) == 0x0e000a00) {
7525 /* VFP. */
7526 if (disas_vfp_insn(env, s, insn)) {
7527 goto illegal_op;
7528 }
7529 return;
7530 }
3d185e5d
PM
7531 if (((insn & 0x0f30f000) == 0x0510f000) ||
7532 ((insn & 0x0f30f010) == 0x0710f000)) {
7533 if ((insn & (1 << 22)) == 0) {
7534 /* PLDW; v7MP */
7535 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
7536 goto illegal_op;
7537 }
7538 }
7539 /* Otherwise PLD; v5TE+ */
be5e7a76 7540 ARCH(5TE);
3d185e5d
PM
7541 return;
7542 }
7543 if (((insn & 0x0f70f000) == 0x0450f000) ||
7544 ((insn & 0x0f70f010) == 0x0650f000)) {
7545 ARCH(7);
7546 return; /* PLI; V7 */
7547 }
7548 if (((insn & 0x0f700000) == 0x04100000) ||
7549 ((insn & 0x0f700010) == 0x06100000)) {
7550 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
7551 goto illegal_op;
7552 }
7553 return; /* v7MP: Unallocated memory hint: must NOP */
7554 }
7555
7556 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
7557 ARCH(6);
7558 /* setend */
10962fd5
PM
7559 if (((insn >> 9) & 1) != s->bswap_code) {
7560 /* Dynamic endianness switching not implemented. */
e0c270d9 7561 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
9ee6e8bb
PB
7562 goto illegal_op;
7563 }
7564 return;
7565 } else if ((insn & 0x0fffff00) == 0x057ff000) {
7566 switch ((insn >> 4) & 0xf) {
7567 case 1: /* clrex */
7568 ARCH(6K);
426f5abc 7569 gen_clrex(s);
9ee6e8bb
PB
7570 return;
7571 case 4: /* dsb */
7572 case 5: /* dmb */
7573 case 6: /* isb */
7574 ARCH(7);
7575 /* We don't emulate caches so these are a no-op. */
7576 return;
7577 default:
7578 goto illegal_op;
7579 }
7580 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
7581 /* srs */
81465888 7582 if (IS_USER(s)) {
9ee6e8bb 7583 goto illegal_op;
9ee6e8bb 7584 }
81465888
PM
7585 ARCH(6);
7586 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 7587 return;
ea825eee 7588 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 7589 /* rfe */
c67b6b71 7590 int32_t offset;
9ee6e8bb
PB
7591 if (IS_USER(s))
7592 goto illegal_op;
7593 ARCH(6);
7594 rn = (insn >> 16) & 0xf;
b0109805 7595 addr = load_reg(s, rn);
9ee6e8bb
PB
7596 i = (insn >> 23) & 3;
7597 switch (i) {
b0109805 7598 case 0: offset = -4; break; /* DA */
c67b6b71
FN
7599 case 1: offset = 0; break; /* IA */
7600 case 2: offset = -8; break; /* DB */
b0109805 7601 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
7602 default: abort();
7603 }
7604 if (offset)
b0109805
PB
7605 tcg_gen_addi_i32(addr, addr, offset);
7606 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 7607 tmp = tcg_temp_new_i32();
6ce2faf4 7608 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 7609 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 7610 tmp2 = tcg_temp_new_i32();
6ce2faf4 7611 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
7612 if (insn & (1 << 21)) {
7613 /* Base writeback. */
7614 switch (i) {
b0109805 7615 case 0: offset = -8; break;
c67b6b71
FN
7616 case 1: offset = 4; break;
7617 case 2: offset = -4; break;
b0109805 7618 case 3: offset = 0; break;
9ee6e8bb
PB
7619 default: abort();
7620 }
7621 if (offset)
b0109805
PB
7622 tcg_gen_addi_i32(addr, addr, offset);
7623 store_reg(s, rn, addr);
7624 } else {
7d1b0095 7625 tcg_temp_free_i32(addr);
9ee6e8bb 7626 }
b0109805 7627 gen_rfe(s, tmp, tmp2);
c67b6b71 7628 return;
9ee6e8bb
PB
7629 } else if ((insn & 0x0e000000) == 0x0a000000) {
7630 /* branch link and change to thumb (blx <offset>) */
7631 int32_t offset;
7632
7633 val = (uint32_t)s->pc;
7d1b0095 7634 tmp = tcg_temp_new_i32();
d9ba4830
PB
7635 tcg_gen_movi_i32(tmp, val);
7636 store_reg(s, 14, tmp);
9ee6e8bb
PB
7637 /* Sign-extend the 24-bit offset */
7638 offset = (((int32_t)insn) << 8) >> 8;
7639 /* offset * 4 + bit24 * 2 + (thumb bit) */
7640 val += (offset << 2) | ((insn >> 23) & 2) | 1;
7641 /* pipeline offset */
7642 val += 4;
be5e7a76 7643 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 7644 gen_bx_im(s, val);
9ee6e8bb
PB
7645 return;
7646 } else if ((insn & 0x0e000f00) == 0x0c000100) {
7647 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
7648 /* iWMMXt register transfer. */
7649 if (env->cp15.c15_cpar & (1 << 1))
7650 if (!disas_iwmmxt_insn(env, s, insn))
7651 return;
7652 }
7653 } else if ((insn & 0x0fe00000) == 0x0c400000) {
7654 /* Coprocessor double register transfer. */
be5e7a76 7655 ARCH(5TE);
9ee6e8bb
PB
7656 } else if ((insn & 0x0f000010) == 0x0e000010) {
7657 /* Additional coprocessor register transfer. */
7997d92f 7658 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
7659 uint32_t mask;
7660 uint32_t val;
7661 /* cps (privileged) */
7662 if (IS_USER(s))
7663 return;
7664 mask = val = 0;
7665 if (insn & (1 << 19)) {
7666 if (insn & (1 << 8))
7667 mask |= CPSR_A;
7668 if (insn & (1 << 7))
7669 mask |= CPSR_I;
7670 if (insn & (1 << 6))
7671 mask |= CPSR_F;
7672 if (insn & (1 << 18))
7673 val |= mask;
7674 }
7997d92f 7675 if (insn & (1 << 17)) {
9ee6e8bb
PB
7676 mask |= CPSR_M;
7677 val |= (insn & 0x1f);
7678 }
7679 if (mask) {
2fbac54b 7680 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
7681 }
7682 return;
7683 }
7684 goto illegal_op;
7685 }
7686 if (cond != 0xe) {
7687 /* if not always execute, we generate a conditional jump to
7688 next instruction */
7689 s->condlabel = gen_new_label();
39fb730a 7690 arm_gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
7691 s->condjmp = 1;
7692 }
7693 if ((insn & 0x0f900000) == 0x03000000) {
7694 if ((insn & (1 << 21)) == 0) {
7695 ARCH(6T2);
7696 rd = (insn >> 12) & 0xf;
7697 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
7698 if ((insn & (1 << 22)) == 0) {
7699 /* MOVW */
7d1b0095 7700 tmp = tcg_temp_new_i32();
5e3f878a 7701 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
7702 } else {
7703 /* MOVT */
5e3f878a 7704 tmp = load_reg(s, rd);
86831435 7705 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 7706 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 7707 }
5e3f878a 7708 store_reg(s, rd, tmp);
9ee6e8bb
PB
7709 } else {
7710 if (((insn >> 12) & 0xf) != 0xf)
7711 goto illegal_op;
7712 if (((insn >> 16) & 0xf) == 0) {
7713 gen_nop_hint(s, insn & 0xff);
7714 } else {
7715 /* CPSR = immediate */
7716 val = insn & 0xff;
7717 shift = ((insn >> 8) & 0xf) * 2;
7718 if (shift)
7719 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 7720 i = ((insn & (1 << 22)) != 0);
2fbac54b 7721 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
7722 goto illegal_op;
7723 }
7724 }
7725 } else if ((insn & 0x0f900000) == 0x01000000
7726 && (insn & 0x00000090) != 0x00000090) {
7727 /* miscellaneous instructions */
7728 op1 = (insn >> 21) & 3;
7729 sh = (insn >> 4) & 0xf;
7730 rm = insn & 0xf;
7731 switch (sh) {
7732 case 0x0: /* move program status register */
7733 if (op1 & 1) {
7734 /* PSR = reg */
2fbac54b 7735 tmp = load_reg(s, rm);
9ee6e8bb 7736 i = ((op1 & 2) != 0);
2fbac54b 7737 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
7738 goto illegal_op;
7739 } else {
7740 /* reg = PSR */
7741 rd = (insn >> 12) & 0xf;
7742 if (op1 & 2) {
7743 if (IS_USER(s))
7744 goto illegal_op;
d9ba4830 7745 tmp = load_cpu_field(spsr);
9ee6e8bb 7746 } else {
7d1b0095 7747 tmp = tcg_temp_new_i32();
9ef39277 7748 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 7749 }
d9ba4830 7750 store_reg(s, rd, tmp);
9ee6e8bb
PB
7751 }
7752 break;
7753 case 0x1:
7754 if (op1 == 1) {
7755 /* branch/exchange thumb (bx). */
be5e7a76 7756 ARCH(4T);
d9ba4830
PB
7757 tmp = load_reg(s, rm);
7758 gen_bx(s, tmp);
9ee6e8bb
PB
7759 } else if (op1 == 3) {
7760 /* clz */
be5e7a76 7761 ARCH(5);
9ee6e8bb 7762 rd = (insn >> 12) & 0xf;
1497c961
PB
7763 tmp = load_reg(s, rm);
7764 gen_helper_clz(tmp, tmp);
7765 store_reg(s, rd, tmp);
9ee6e8bb
PB
7766 } else {
7767 goto illegal_op;
7768 }
7769 break;
7770 case 0x2:
7771 if (op1 == 1) {
7772 ARCH(5J); /* bxj */
7773 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
7774 tmp = load_reg(s, rm);
7775 gen_bx(s, tmp);
9ee6e8bb
PB
7776 } else {
7777 goto illegal_op;
7778 }
7779 break;
7780 case 0x3:
7781 if (op1 != 1)
7782 goto illegal_op;
7783
be5e7a76 7784 ARCH(5);
9ee6e8bb 7785 /* branch link/exchange thumb (blx) */
d9ba4830 7786 tmp = load_reg(s, rm);
7d1b0095 7787 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
7788 tcg_gen_movi_i32(tmp2, s->pc);
7789 store_reg(s, 14, tmp2);
7790 gen_bx(s, tmp);
9ee6e8bb 7791 break;
eb0ecd5a
WN
7792 case 0x4:
7793 {
7794 /* crc32/crc32c */
7795 uint32_t c = extract32(insn, 8, 4);
7796
7797 /* Check this CPU supports ARMv8 CRC instructions.
7798 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
7799 * Bits 8, 10 and 11 should be zero.
7800 */
7801 if (!arm_feature(env, ARM_FEATURE_CRC) || op1 == 0x3 ||
7802 (c & 0xd) != 0) {
7803 goto illegal_op;
7804 }
7805
7806 rn = extract32(insn, 16, 4);
7807 rd = extract32(insn, 12, 4);
7808
7809 tmp = load_reg(s, rn);
7810 tmp2 = load_reg(s, rm);
aa633469
PM
7811 if (op1 == 0) {
7812 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
7813 } else if (op1 == 1) {
7814 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
7815 }
eb0ecd5a
WN
7816 tmp3 = tcg_const_i32(1 << op1);
7817 if (c & 0x2) {
7818 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
7819 } else {
7820 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
7821 }
7822 tcg_temp_free_i32(tmp2);
7823 tcg_temp_free_i32(tmp3);
7824 store_reg(s, rd, tmp);
7825 break;
7826 }
9ee6e8bb 7827 case 0x5: /* saturating add/subtract */
be5e7a76 7828 ARCH(5TE);
9ee6e8bb
PB
7829 rd = (insn >> 12) & 0xf;
7830 rn = (insn >> 16) & 0xf;
b40d0353 7831 tmp = load_reg(s, rm);
5e3f878a 7832 tmp2 = load_reg(s, rn);
9ee6e8bb 7833 if (op1 & 2)
9ef39277 7834 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 7835 if (op1 & 1)
9ef39277 7836 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 7837 else
9ef39277 7838 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 7839 tcg_temp_free_i32(tmp2);
5e3f878a 7840 store_reg(s, rd, tmp);
9ee6e8bb 7841 break;
49e14940 7842 case 7:
d4a2dc67
PM
7843 {
7844 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
49e14940
AL
7845 /* SMC instruction (op1 == 3)
7846 and undefined instructions (op1 == 0 || op1 == 2)
7847 will trap */
7848 if (op1 != 1) {
7849 goto illegal_op;
7850 }
7851 /* bkpt */
be5e7a76 7852 ARCH(5);
d4a2dc67 7853 gen_exception_insn(s, 4, EXCP_BKPT, syn_aa32_bkpt(imm16, false));
9ee6e8bb 7854 break;
d4a2dc67 7855 }
9ee6e8bb
PB
7856 case 0x8: /* signed multiply */
7857 case 0xa:
7858 case 0xc:
7859 case 0xe:
be5e7a76 7860 ARCH(5TE);
9ee6e8bb
PB
7861 rs = (insn >> 8) & 0xf;
7862 rn = (insn >> 12) & 0xf;
7863 rd = (insn >> 16) & 0xf;
7864 if (op1 == 1) {
7865 /* (32 * 16) >> 16 */
5e3f878a
PB
7866 tmp = load_reg(s, rm);
7867 tmp2 = load_reg(s, rs);
9ee6e8bb 7868 if (sh & 4)
5e3f878a 7869 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7870 else
5e3f878a 7871 gen_sxth(tmp2);
a7812ae4
PB
7872 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7873 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 7874 tmp = tcg_temp_new_i32();
a7812ae4 7875 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 7876 tcg_temp_free_i64(tmp64);
9ee6e8bb 7877 if ((sh & 2) == 0) {
5e3f878a 7878 tmp2 = load_reg(s, rn);
9ef39277 7879 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7880 tcg_temp_free_i32(tmp2);
9ee6e8bb 7881 }
5e3f878a 7882 store_reg(s, rd, tmp);
9ee6e8bb
PB
7883 } else {
7884 /* 16 * 16 */
5e3f878a
PB
7885 tmp = load_reg(s, rm);
7886 tmp2 = load_reg(s, rs);
7887 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 7888 tcg_temp_free_i32(tmp2);
9ee6e8bb 7889 if (op1 == 2) {
a7812ae4
PB
7890 tmp64 = tcg_temp_new_i64();
7891 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7892 tcg_temp_free_i32(tmp);
a7812ae4
PB
7893 gen_addq(s, tmp64, rn, rd);
7894 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 7895 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7896 } else {
7897 if (op1 == 0) {
5e3f878a 7898 tmp2 = load_reg(s, rn);
9ef39277 7899 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7900 tcg_temp_free_i32(tmp2);
9ee6e8bb 7901 }
5e3f878a 7902 store_reg(s, rd, tmp);
9ee6e8bb
PB
7903 }
7904 }
7905 break;
7906 default:
7907 goto illegal_op;
7908 }
7909 } else if (((insn & 0x0e000000) == 0 &&
7910 (insn & 0x00000090) != 0x90) ||
7911 ((insn & 0x0e000000) == (1 << 25))) {
7912 int set_cc, logic_cc, shiftop;
7913
7914 op1 = (insn >> 21) & 0xf;
7915 set_cc = (insn >> 20) & 1;
7916 logic_cc = table_logic_cc[op1] & set_cc;
7917
7918 /* data processing instruction */
7919 if (insn & (1 << 25)) {
7920 /* immediate operand */
7921 val = insn & 0xff;
7922 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 7923 if (shift) {
9ee6e8bb 7924 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 7925 }
7d1b0095 7926 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
7927 tcg_gen_movi_i32(tmp2, val);
7928 if (logic_cc && shift) {
7929 gen_set_CF_bit31(tmp2);
7930 }
9ee6e8bb
PB
7931 } else {
7932 /* register */
7933 rm = (insn) & 0xf;
e9bb4aa9 7934 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7935 shiftop = (insn >> 5) & 3;
7936 if (!(insn & (1 << 4))) {
7937 shift = (insn >> 7) & 0x1f;
e9bb4aa9 7938 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
7939 } else {
7940 rs = (insn >> 8) & 0xf;
8984bd2e 7941 tmp = load_reg(s, rs);
e9bb4aa9 7942 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
7943 }
7944 }
7945 if (op1 != 0x0f && op1 != 0x0d) {
7946 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
7947 tmp = load_reg(s, rn);
7948 } else {
39d5492a 7949 TCGV_UNUSED_I32(tmp);
9ee6e8bb
PB
7950 }
7951 rd = (insn >> 12) & 0xf;
7952 switch(op1) {
7953 case 0x00:
e9bb4aa9
JR
7954 tcg_gen_and_i32(tmp, tmp, tmp2);
7955 if (logic_cc) {
7956 gen_logic_CC(tmp);
7957 }
21aeb343 7958 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7959 break;
7960 case 0x01:
e9bb4aa9
JR
7961 tcg_gen_xor_i32(tmp, tmp, tmp2);
7962 if (logic_cc) {
7963 gen_logic_CC(tmp);
7964 }
21aeb343 7965 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7966 break;
7967 case 0x02:
7968 if (set_cc && rd == 15) {
7969 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 7970 if (IS_USER(s)) {
9ee6e8bb 7971 goto illegal_op;
e9bb4aa9 7972 }
72485ec4 7973 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 7974 gen_exception_return(s, tmp);
9ee6e8bb 7975 } else {
e9bb4aa9 7976 if (set_cc) {
72485ec4 7977 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7978 } else {
7979 tcg_gen_sub_i32(tmp, tmp, tmp2);
7980 }
21aeb343 7981 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7982 }
7983 break;
7984 case 0x03:
e9bb4aa9 7985 if (set_cc) {
72485ec4 7986 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
7987 } else {
7988 tcg_gen_sub_i32(tmp, tmp2, tmp);
7989 }
21aeb343 7990 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7991 break;
7992 case 0x04:
e9bb4aa9 7993 if (set_cc) {
72485ec4 7994 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7995 } else {
7996 tcg_gen_add_i32(tmp, tmp, tmp2);
7997 }
21aeb343 7998 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7999 break;
8000 case 0x05:
e9bb4aa9 8001 if (set_cc) {
49b4c31e 8002 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8003 } else {
8004 gen_add_carry(tmp, tmp, tmp2);
8005 }
21aeb343 8006 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8007 break;
8008 case 0x06:
e9bb4aa9 8009 if (set_cc) {
2de68a49 8010 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8011 } else {
8012 gen_sub_carry(tmp, tmp, tmp2);
8013 }
21aeb343 8014 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8015 break;
8016 case 0x07:
e9bb4aa9 8017 if (set_cc) {
2de68a49 8018 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8019 } else {
8020 gen_sub_carry(tmp, tmp2, tmp);
8021 }
21aeb343 8022 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8023 break;
8024 case 0x08:
8025 if (set_cc) {
e9bb4aa9
JR
8026 tcg_gen_and_i32(tmp, tmp, tmp2);
8027 gen_logic_CC(tmp);
9ee6e8bb 8028 }
7d1b0095 8029 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8030 break;
8031 case 0x09:
8032 if (set_cc) {
e9bb4aa9
JR
8033 tcg_gen_xor_i32(tmp, tmp, tmp2);
8034 gen_logic_CC(tmp);
9ee6e8bb 8035 }
7d1b0095 8036 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8037 break;
8038 case 0x0a:
8039 if (set_cc) {
72485ec4 8040 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 8041 }
7d1b0095 8042 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8043 break;
8044 case 0x0b:
8045 if (set_cc) {
72485ec4 8046 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 8047 }
7d1b0095 8048 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8049 break;
8050 case 0x0c:
e9bb4aa9
JR
8051 tcg_gen_or_i32(tmp, tmp, tmp2);
8052 if (logic_cc) {
8053 gen_logic_CC(tmp);
8054 }
21aeb343 8055 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8056 break;
8057 case 0x0d:
8058 if (logic_cc && rd == 15) {
8059 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 8060 if (IS_USER(s)) {
9ee6e8bb 8061 goto illegal_op;
e9bb4aa9
JR
8062 }
8063 gen_exception_return(s, tmp2);
9ee6e8bb 8064 } else {
e9bb4aa9
JR
8065 if (logic_cc) {
8066 gen_logic_CC(tmp2);
8067 }
21aeb343 8068 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
8069 }
8070 break;
8071 case 0x0e:
f669df27 8072 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
8073 if (logic_cc) {
8074 gen_logic_CC(tmp);
8075 }
21aeb343 8076 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8077 break;
8078 default:
8079 case 0x0f:
e9bb4aa9
JR
8080 tcg_gen_not_i32(tmp2, tmp2);
8081 if (logic_cc) {
8082 gen_logic_CC(tmp2);
8083 }
21aeb343 8084 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
8085 break;
8086 }
e9bb4aa9 8087 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 8088 tcg_temp_free_i32(tmp2);
e9bb4aa9 8089 }
9ee6e8bb
PB
8090 } else {
8091 /* other instructions */
8092 op1 = (insn >> 24) & 0xf;
8093 switch(op1) {
8094 case 0x0:
8095 case 0x1:
8096 /* multiplies, extra load/stores */
8097 sh = (insn >> 5) & 3;
8098 if (sh == 0) {
8099 if (op1 == 0x0) {
8100 rd = (insn >> 16) & 0xf;
8101 rn = (insn >> 12) & 0xf;
8102 rs = (insn >> 8) & 0xf;
8103 rm = (insn) & 0xf;
8104 op1 = (insn >> 20) & 0xf;
8105 switch (op1) {
8106 case 0: case 1: case 2: case 3: case 6:
8107 /* 32 bit mul */
5e3f878a
PB
8108 tmp = load_reg(s, rs);
8109 tmp2 = load_reg(s, rm);
8110 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8111 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8112 if (insn & (1 << 22)) {
8113 /* Subtract (mls) */
8114 ARCH(6T2);
5e3f878a
PB
8115 tmp2 = load_reg(s, rn);
8116 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 8117 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8118 } else if (insn & (1 << 21)) {
8119 /* Add */
5e3f878a
PB
8120 tmp2 = load_reg(s, rn);
8121 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8122 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8123 }
8124 if (insn & (1 << 20))
5e3f878a
PB
8125 gen_logic_CC(tmp);
8126 store_reg(s, rd, tmp);
9ee6e8bb 8127 break;
8aac08b1
AJ
8128 case 4:
8129 /* 64 bit mul double accumulate (UMAAL) */
8130 ARCH(6);
8131 tmp = load_reg(s, rs);
8132 tmp2 = load_reg(s, rm);
8133 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8134 gen_addq_lo(s, tmp64, rn);
8135 gen_addq_lo(s, tmp64, rd);
8136 gen_storeq_reg(s, rn, rd, tmp64);
8137 tcg_temp_free_i64(tmp64);
8138 break;
8139 case 8: case 9: case 10: case 11:
8140 case 12: case 13: case 14: case 15:
8141 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
8142 tmp = load_reg(s, rs);
8143 tmp2 = load_reg(s, rm);
8aac08b1 8144 if (insn & (1 << 22)) {
c9f10124 8145 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 8146 } else {
c9f10124 8147 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
8148 }
8149 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
8150 TCGv_i32 al = load_reg(s, rn);
8151 TCGv_i32 ah = load_reg(s, rd);
c9f10124 8152 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
8153 tcg_temp_free_i32(al);
8154 tcg_temp_free_i32(ah);
9ee6e8bb 8155 }
8aac08b1 8156 if (insn & (1 << 20)) {
c9f10124 8157 gen_logicq_cc(tmp, tmp2);
8aac08b1 8158 }
c9f10124
RH
8159 store_reg(s, rn, tmp);
8160 store_reg(s, rd, tmp2);
9ee6e8bb 8161 break;
8aac08b1
AJ
8162 default:
8163 goto illegal_op;
9ee6e8bb
PB
8164 }
8165 } else {
8166 rn = (insn >> 16) & 0xf;
8167 rd = (insn >> 12) & 0xf;
8168 if (insn & (1 << 23)) {
8169 /* load/store exclusive */
2359bf80 8170 int op2 = (insn >> 8) & 3;
86753403 8171 op1 = (insn >> 21) & 0x3;
2359bf80
MR
8172
8173 switch (op2) {
8174 case 0: /* lda/stl */
8175 if (op1 == 1) {
8176 goto illegal_op;
8177 }
8178 ARCH(8);
8179 break;
8180 case 1: /* reserved */
8181 goto illegal_op;
8182 case 2: /* ldaex/stlex */
8183 ARCH(8);
8184 break;
8185 case 3: /* ldrex/strex */
8186 if (op1) {
8187 ARCH(6K);
8188 } else {
8189 ARCH(6);
8190 }
8191 break;
8192 }
8193
3174f8e9 8194 addr = tcg_temp_local_new_i32();
98a46317 8195 load_reg_var(s, addr, rn);
2359bf80
MR
8196
8197 /* Since the emulation does not have barriers,
8198 the acquire/release semantics need no special
8199 handling */
8200 if (op2 == 0) {
8201 if (insn & (1 << 20)) {
8202 tmp = tcg_temp_new_i32();
8203 switch (op1) {
8204 case 0: /* lda */
6ce2faf4 8205 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
2359bf80
MR
8206 break;
8207 case 2: /* ldab */
6ce2faf4 8208 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
2359bf80
MR
8209 break;
8210 case 3: /* ldah */
6ce2faf4 8211 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
2359bf80
MR
8212 break;
8213 default:
8214 abort();
8215 }
8216 store_reg(s, rd, tmp);
8217 } else {
8218 rm = insn & 0xf;
8219 tmp = load_reg(s, rm);
8220 switch (op1) {
8221 case 0: /* stl */
6ce2faf4 8222 gen_aa32_st32(tmp, addr, get_mem_index(s));
2359bf80
MR
8223 break;
8224 case 2: /* stlb */
6ce2faf4 8225 gen_aa32_st8(tmp, addr, get_mem_index(s));
2359bf80
MR
8226 break;
8227 case 3: /* stlh */
6ce2faf4 8228 gen_aa32_st16(tmp, addr, get_mem_index(s));
2359bf80
MR
8229 break;
8230 default:
8231 abort();
8232 }
8233 tcg_temp_free_i32(tmp);
8234 }
8235 } else if (insn & (1 << 20)) {
86753403
PB
8236 switch (op1) {
8237 case 0: /* ldrex */
426f5abc 8238 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
8239 break;
8240 case 1: /* ldrexd */
426f5abc 8241 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
8242 break;
8243 case 2: /* ldrexb */
426f5abc 8244 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
8245 break;
8246 case 3: /* ldrexh */
426f5abc 8247 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
8248 break;
8249 default:
8250 abort();
8251 }
9ee6e8bb
PB
8252 } else {
8253 rm = insn & 0xf;
86753403
PB
8254 switch (op1) {
8255 case 0: /* strex */
426f5abc 8256 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
8257 break;
8258 case 1: /* strexd */
502e64fe 8259 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
8260 break;
8261 case 2: /* strexb */
426f5abc 8262 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
8263 break;
8264 case 3: /* strexh */
426f5abc 8265 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
8266 break;
8267 default:
8268 abort();
8269 }
9ee6e8bb 8270 }
39d5492a 8271 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8272 } else {
8273 /* SWP instruction */
8274 rm = (insn) & 0xf;
8275
8984bd2e
PB
8276 /* ??? This is not really atomic. However we know
8277 we never have multiple CPUs running in parallel,
8278 so it is good enough. */
8279 addr = load_reg(s, rn);
8280 tmp = load_reg(s, rm);
5a839c0d 8281 tmp2 = tcg_temp_new_i32();
9ee6e8bb 8282 if (insn & (1 << 22)) {
6ce2faf4
EI
8283 gen_aa32_ld8u(tmp2, addr, get_mem_index(s));
8284 gen_aa32_st8(tmp, addr, get_mem_index(s));
9ee6e8bb 8285 } else {
6ce2faf4
EI
8286 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
8287 gen_aa32_st32(tmp, addr, get_mem_index(s));
9ee6e8bb 8288 }
5a839c0d 8289 tcg_temp_free_i32(tmp);
7d1b0095 8290 tcg_temp_free_i32(addr);
8984bd2e 8291 store_reg(s, rd, tmp2);
9ee6e8bb
PB
8292 }
8293 }
8294 } else {
8295 int address_offset;
8296 int load;
8297 /* Misc load/store */
8298 rn = (insn >> 16) & 0xf;
8299 rd = (insn >> 12) & 0xf;
b0109805 8300 addr = load_reg(s, rn);
9ee6e8bb 8301 if (insn & (1 << 24))
b0109805 8302 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
8303 address_offset = 0;
8304 if (insn & (1 << 20)) {
8305 /* load */
5a839c0d 8306 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
8307 switch(sh) {
8308 case 1:
6ce2faf4 8309 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
8310 break;
8311 case 2:
6ce2faf4 8312 gen_aa32_ld8s(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
8313 break;
8314 default:
8315 case 3:
6ce2faf4 8316 gen_aa32_ld16s(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
8317 break;
8318 }
8319 load = 1;
8320 } else if (sh & 2) {
be5e7a76 8321 ARCH(5TE);
9ee6e8bb
PB
8322 /* doubleword */
8323 if (sh & 1) {
8324 /* store */
b0109805 8325 tmp = load_reg(s, rd);
6ce2faf4 8326 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 8327 tcg_temp_free_i32(tmp);
b0109805
PB
8328 tcg_gen_addi_i32(addr, addr, 4);
8329 tmp = load_reg(s, rd + 1);
6ce2faf4 8330 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 8331 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8332 load = 0;
8333 } else {
8334 /* load */
5a839c0d 8335 tmp = tcg_temp_new_i32();
6ce2faf4 8336 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805
PB
8337 store_reg(s, rd, tmp);
8338 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8339 tmp = tcg_temp_new_i32();
6ce2faf4 8340 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
8341 rd++;
8342 load = 1;
8343 }
8344 address_offset = -4;
8345 } else {
8346 /* store */
b0109805 8347 tmp = load_reg(s, rd);
6ce2faf4 8348 gen_aa32_st16(tmp, addr, get_mem_index(s));
5a839c0d 8349 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8350 load = 0;
8351 }
8352 /* Perform base writeback before the loaded value to
8353 ensure correct behavior with overlapping index registers.
8354 ldrd with base writeback is is undefined if the
8355 destination and index registers overlap. */
8356 if (!(insn & (1 << 24))) {
b0109805
PB
8357 gen_add_datah_offset(s, insn, address_offset, addr);
8358 store_reg(s, rn, addr);
9ee6e8bb
PB
8359 } else if (insn & (1 << 21)) {
8360 if (address_offset)
b0109805
PB
8361 tcg_gen_addi_i32(addr, addr, address_offset);
8362 store_reg(s, rn, addr);
8363 } else {
7d1b0095 8364 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8365 }
8366 if (load) {
8367 /* Complete the load. */
b0109805 8368 store_reg(s, rd, tmp);
9ee6e8bb
PB
8369 }
8370 }
8371 break;
8372 case 0x4:
8373 case 0x5:
8374 goto do_ldst;
8375 case 0x6:
8376 case 0x7:
8377 if (insn & (1 << 4)) {
8378 ARCH(6);
8379 /* Armv6 Media instructions. */
8380 rm = insn & 0xf;
8381 rn = (insn >> 16) & 0xf;
2c0262af 8382 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
8383 rs = (insn >> 8) & 0xf;
8384 switch ((insn >> 23) & 3) {
8385 case 0: /* Parallel add/subtract. */
8386 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
8387 tmp = load_reg(s, rn);
8388 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8389 sh = (insn >> 5) & 7;
8390 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
8391 goto illegal_op;
6ddbc6e4 8392 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 8393 tcg_temp_free_i32(tmp2);
6ddbc6e4 8394 store_reg(s, rd, tmp);
9ee6e8bb
PB
8395 break;
8396 case 1:
8397 if ((insn & 0x00700020) == 0) {
6c95676b 8398 /* Halfword pack. */
3670669c
PB
8399 tmp = load_reg(s, rn);
8400 tmp2 = load_reg(s, rm);
9ee6e8bb 8401 shift = (insn >> 7) & 0x1f;
3670669c
PB
8402 if (insn & (1 << 6)) {
8403 /* pkhtb */
22478e79
AZ
8404 if (shift == 0)
8405 shift = 31;
8406 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 8407 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 8408 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
8409 } else {
8410 /* pkhbt */
22478e79
AZ
8411 if (shift)
8412 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 8413 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
8414 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8415 }
8416 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8417 tcg_temp_free_i32(tmp2);
3670669c 8418 store_reg(s, rd, tmp);
9ee6e8bb
PB
8419 } else if ((insn & 0x00200020) == 0x00200000) {
8420 /* [us]sat */
6ddbc6e4 8421 tmp = load_reg(s, rm);
9ee6e8bb
PB
8422 shift = (insn >> 7) & 0x1f;
8423 if (insn & (1 << 6)) {
8424 if (shift == 0)
8425 shift = 31;
6ddbc6e4 8426 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8427 } else {
6ddbc6e4 8428 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
8429 }
8430 sh = (insn >> 16) & 0x1f;
40d3c433
CL
8431 tmp2 = tcg_const_i32(sh);
8432 if (insn & (1 << 22))
9ef39277 8433 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 8434 else
9ef39277 8435 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 8436 tcg_temp_free_i32(tmp2);
6ddbc6e4 8437 store_reg(s, rd, tmp);
9ee6e8bb
PB
8438 } else if ((insn & 0x00300fe0) == 0x00200f20) {
8439 /* [us]sat16 */
6ddbc6e4 8440 tmp = load_reg(s, rm);
9ee6e8bb 8441 sh = (insn >> 16) & 0x1f;
40d3c433
CL
8442 tmp2 = tcg_const_i32(sh);
8443 if (insn & (1 << 22))
9ef39277 8444 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 8445 else
9ef39277 8446 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 8447 tcg_temp_free_i32(tmp2);
6ddbc6e4 8448 store_reg(s, rd, tmp);
9ee6e8bb
PB
8449 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
8450 /* Select bytes. */
6ddbc6e4
PB
8451 tmp = load_reg(s, rn);
8452 tmp2 = load_reg(s, rm);
7d1b0095 8453 tmp3 = tcg_temp_new_i32();
0ecb72a5 8454 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 8455 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8456 tcg_temp_free_i32(tmp3);
8457 tcg_temp_free_i32(tmp2);
6ddbc6e4 8458 store_reg(s, rd, tmp);
9ee6e8bb 8459 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 8460 tmp = load_reg(s, rm);
9ee6e8bb 8461 shift = (insn >> 10) & 3;
1301f322 8462 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
8463 rotate, a shift is sufficient. */
8464 if (shift != 0)
f669df27 8465 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8466 op1 = (insn >> 20) & 7;
8467 switch (op1) {
5e3f878a
PB
8468 case 0: gen_sxtb16(tmp); break;
8469 case 2: gen_sxtb(tmp); break;
8470 case 3: gen_sxth(tmp); break;
8471 case 4: gen_uxtb16(tmp); break;
8472 case 6: gen_uxtb(tmp); break;
8473 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
8474 default: goto illegal_op;
8475 }
8476 if (rn != 15) {
5e3f878a 8477 tmp2 = load_reg(s, rn);
9ee6e8bb 8478 if ((op1 & 3) == 0) {
5e3f878a 8479 gen_add16(tmp, tmp2);
9ee6e8bb 8480 } else {
5e3f878a 8481 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8482 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8483 }
8484 }
6c95676b 8485 store_reg(s, rd, tmp);
9ee6e8bb
PB
8486 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
8487 /* rev */
b0109805 8488 tmp = load_reg(s, rm);
9ee6e8bb
PB
8489 if (insn & (1 << 22)) {
8490 if (insn & (1 << 7)) {
b0109805 8491 gen_revsh(tmp);
9ee6e8bb
PB
8492 } else {
8493 ARCH(6T2);
b0109805 8494 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8495 }
8496 } else {
8497 if (insn & (1 << 7))
b0109805 8498 gen_rev16(tmp);
9ee6e8bb 8499 else
66896cb8 8500 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 8501 }
b0109805 8502 store_reg(s, rd, tmp);
9ee6e8bb
PB
8503 } else {
8504 goto illegal_op;
8505 }
8506 break;
8507 case 2: /* Multiplies (Type 3). */
41e9564d
PM
8508 switch ((insn >> 20) & 0x7) {
8509 case 5:
8510 if (((insn >> 6) ^ (insn >> 7)) & 1) {
8511 /* op2 not 00x or 11x : UNDEF */
8512 goto illegal_op;
8513 }
838fa72d
AJ
8514 /* Signed multiply most significant [accumulate].
8515 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
8516 tmp = load_reg(s, rm);
8517 tmp2 = load_reg(s, rs);
a7812ae4 8518 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 8519
955a7dd5 8520 if (rd != 15) {
838fa72d 8521 tmp = load_reg(s, rd);
9ee6e8bb 8522 if (insn & (1 << 6)) {
838fa72d 8523 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 8524 } else {
838fa72d 8525 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
8526 }
8527 }
838fa72d
AJ
8528 if (insn & (1 << 5)) {
8529 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8530 }
8531 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 8532 tmp = tcg_temp_new_i32();
838fa72d
AJ
8533 tcg_gen_trunc_i64_i32(tmp, tmp64);
8534 tcg_temp_free_i64(tmp64);
955a7dd5 8535 store_reg(s, rn, tmp);
41e9564d
PM
8536 break;
8537 case 0:
8538 case 4:
8539 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
8540 if (insn & (1 << 7)) {
8541 goto illegal_op;
8542 }
8543 tmp = load_reg(s, rm);
8544 tmp2 = load_reg(s, rs);
9ee6e8bb 8545 if (insn & (1 << 5))
5e3f878a
PB
8546 gen_swap_half(tmp2);
8547 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8548 if (insn & (1 << 22)) {
5e3f878a 8549 /* smlald, smlsld */
33bbd75a
PC
8550 TCGv_i64 tmp64_2;
8551
a7812ae4 8552 tmp64 = tcg_temp_new_i64();
33bbd75a 8553 tmp64_2 = tcg_temp_new_i64();
a7812ae4 8554 tcg_gen_ext_i32_i64(tmp64, tmp);
33bbd75a 8555 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
7d1b0095 8556 tcg_temp_free_i32(tmp);
33bbd75a
PC
8557 tcg_temp_free_i32(tmp2);
8558 if (insn & (1 << 6)) {
8559 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
8560 } else {
8561 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
8562 }
8563 tcg_temp_free_i64(tmp64_2);
a7812ae4
PB
8564 gen_addq(s, tmp64, rd, rn);
8565 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 8566 tcg_temp_free_i64(tmp64);
9ee6e8bb 8567 } else {
5e3f878a 8568 /* smuad, smusd, smlad, smlsd */
33bbd75a
PC
8569 if (insn & (1 << 6)) {
8570 /* This subtraction cannot overflow. */
8571 tcg_gen_sub_i32(tmp, tmp, tmp2);
8572 } else {
8573 /* This addition cannot overflow 32 bits;
8574 * however it may overflow considered as a
8575 * signed operation, in which case we must set
8576 * the Q flag.
8577 */
8578 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8579 }
8580 tcg_temp_free_i32(tmp2);
22478e79 8581 if (rd != 15)
9ee6e8bb 8582 {
22478e79 8583 tmp2 = load_reg(s, rd);
9ef39277 8584 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8585 tcg_temp_free_i32(tmp2);
9ee6e8bb 8586 }
22478e79 8587 store_reg(s, rn, tmp);
9ee6e8bb 8588 }
41e9564d 8589 break;
b8b8ea05
PM
8590 case 1:
8591 case 3:
8592 /* SDIV, UDIV */
8593 if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) {
8594 goto illegal_op;
8595 }
8596 if (((insn >> 5) & 7) || (rd != 15)) {
8597 goto illegal_op;
8598 }
8599 tmp = load_reg(s, rm);
8600 tmp2 = load_reg(s, rs);
8601 if (insn & (1 << 21)) {
8602 gen_helper_udiv(tmp, tmp, tmp2);
8603 } else {
8604 gen_helper_sdiv(tmp, tmp, tmp2);
8605 }
8606 tcg_temp_free_i32(tmp2);
8607 store_reg(s, rn, tmp);
8608 break;
41e9564d
PM
8609 default:
8610 goto illegal_op;
9ee6e8bb
PB
8611 }
8612 break;
8613 case 3:
8614 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
8615 switch (op1) {
8616 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
8617 ARCH(6);
8618 tmp = load_reg(s, rm);
8619 tmp2 = load_reg(s, rs);
8620 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 8621 tcg_temp_free_i32(tmp2);
ded9d295
AZ
8622 if (rd != 15) {
8623 tmp2 = load_reg(s, rd);
6ddbc6e4 8624 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8625 tcg_temp_free_i32(tmp2);
9ee6e8bb 8626 }
ded9d295 8627 store_reg(s, rn, tmp);
9ee6e8bb
PB
8628 break;
8629 case 0x20: case 0x24: case 0x28: case 0x2c:
8630 /* Bitfield insert/clear. */
8631 ARCH(6T2);
8632 shift = (insn >> 7) & 0x1f;
8633 i = (insn >> 16) & 0x1f;
8634 i = i + 1 - shift;
8635 if (rm == 15) {
7d1b0095 8636 tmp = tcg_temp_new_i32();
5e3f878a 8637 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 8638 } else {
5e3f878a 8639 tmp = load_reg(s, rm);
9ee6e8bb
PB
8640 }
8641 if (i != 32) {
5e3f878a 8642 tmp2 = load_reg(s, rd);
d593c48e 8643 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 8644 tcg_temp_free_i32(tmp2);
9ee6e8bb 8645 }
5e3f878a 8646 store_reg(s, rd, tmp);
9ee6e8bb
PB
8647 break;
8648 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
8649 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 8650 ARCH(6T2);
5e3f878a 8651 tmp = load_reg(s, rm);
9ee6e8bb
PB
8652 shift = (insn >> 7) & 0x1f;
8653 i = ((insn >> 16) & 0x1f) + 1;
8654 if (shift + i > 32)
8655 goto illegal_op;
8656 if (i < 32) {
8657 if (op1 & 0x20) {
5e3f878a 8658 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 8659 } else {
5e3f878a 8660 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
8661 }
8662 }
5e3f878a 8663 store_reg(s, rd, tmp);
9ee6e8bb
PB
8664 break;
8665 default:
8666 goto illegal_op;
8667 }
8668 break;
8669 }
8670 break;
8671 }
8672 do_ldst:
8673 /* Check for undefined extension instructions
8674 * per the ARM Bible IE:
8675 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
8676 */
8677 sh = (0xf << 20) | (0xf << 4);
8678 if (op1 == 0x7 && ((insn & sh) == sh))
8679 {
8680 goto illegal_op;
8681 }
8682 /* load/store byte/word */
8683 rn = (insn >> 16) & 0xf;
8684 rd = (insn >> 12) & 0xf;
b0109805 8685 tmp2 = load_reg(s, rn);
a99caa48
PM
8686 if ((insn & 0x01200000) == 0x00200000) {
8687 /* ldrt/strt */
8688 i = MMU_USER_IDX;
8689 } else {
8690 i = get_mem_index(s);
8691 }
9ee6e8bb 8692 if (insn & (1 << 24))
b0109805 8693 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
8694 if (insn & (1 << 20)) {
8695 /* load */
5a839c0d 8696 tmp = tcg_temp_new_i32();
9ee6e8bb 8697 if (insn & (1 << 22)) {
08307563 8698 gen_aa32_ld8u(tmp, tmp2, i);
9ee6e8bb 8699 } else {
08307563 8700 gen_aa32_ld32u(tmp, tmp2, i);
9ee6e8bb 8701 }
9ee6e8bb
PB
8702 } else {
8703 /* store */
b0109805 8704 tmp = load_reg(s, rd);
5a839c0d 8705 if (insn & (1 << 22)) {
08307563 8706 gen_aa32_st8(tmp, tmp2, i);
5a839c0d 8707 } else {
08307563 8708 gen_aa32_st32(tmp, tmp2, i);
5a839c0d
PM
8709 }
8710 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8711 }
8712 if (!(insn & (1 << 24))) {
b0109805
PB
8713 gen_add_data_offset(s, insn, tmp2);
8714 store_reg(s, rn, tmp2);
8715 } else if (insn & (1 << 21)) {
8716 store_reg(s, rn, tmp2);
8717 } else {
7d1b0095 8718 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8719 }
8720 if (insn & (1 << 20)) {
8721 /* Complete the load. */
be5e7a76 8722 store_reg_from_load(env, s, rd, tmp);
9ee6e8bb
PB
8723 }
8724 break;
8725 case 0x08:
8726 case 0x09:
8727 {
8728 int j, n, user, loaded_base;
39d5492a 8729 TCGv_i32 loaded_var;
9ee6e8bb
PB
8730 /* load/store multiple words */
8731 /* XXX: store correct base if write back */
8732 user = 0;
8733 if (insn & (1 << 22)) {
8734 if (IS_USER(s))
8735 goto illegal_op; /* only usable in supervisor mode */
8736
8737 if ((insn & (1 << 15)) == 0)
8738 user = 1;
8739 }
8740 rn = (insn >> 16) & 0xf;
b0109805 8741 addr = load_reg(s, rn);
9ee6e8bb
PB
8742
8743 /* compute total size */
8744 loaded_base = 0;
39d5492a 8745 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
8746 n = 0;
8747 for(i=0;i<16;i++) {
8748 if (insn & (1 << i))
8749 n++;
8750 }
8751 /* XXX: test invalid n == 0 case ? */
8752 if (insn & (1 << 23)) {
8753 if (insn & (1 << 24)) {
8754 /* pre increment */
b0109805 8755 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
8756 } else {
8757 /* post increment */
8758 }
8759 } else {
8760 if (insn & (1 << 24)) {
8761 /* pre decrement */
b0109805 8762 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
8763 } else {
8764 /* post decrement */
8765 if (n != 1)
b0109805 8766 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
8767 }
8768 }
8769 j = 0;
8770 for(i=0;i<16;i++) {
8771 if (insn & (1 << i)) {
8772 if (insn & (1 << 20)) {
8773 /* load */
5a839c0d 8774 tmp = tcg_temp_new_i32();
6ce2faf4 8775 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
be5e7a76 8776 if (user) {
b75263d6 8777 tmp2 = tcg_const_i32(i);
1ce94f81 8778 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 8779 tcg_temp_free_i32(tmp2);
7d1b0095 8780 tcg_temp_free_i32(tmp);
9ee6e8bb 8781 } else if (i == rn) {
b0109805 8782 loaded_var = tmp;
9ee6e8bb
PB
8783 loaded_base = 1;
8784 } else {
be5e7a76 8785 store_reg_from_load(env, s, i, tmp);
9ee6e8bb
PB
8786 }
8787 } else {
8788 /* store */
8789 if (i == 15) {
8790 /* special case: r15 = PC + 8 */
8791 val = (long)s->pc + 4;
7d1b0095 8792 tmp = tcg_temp_new_i32();
b0109805 8793 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 8794 } else if (user) {
7d1b0095 8795 tmp = tcg_temp_new_i32();
b75263d6 8796 tmp2 = tcg_const_i32(i);
9ef39277 8797 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 8798 tcg_temp_free_i32(tmp2);
9ee6e8bb 8799 } else {
b0109805 8800 tmp = load_reg(s, i);
9ee6e8bb 8801 }
6ce2faf4 8802 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 8803 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8804 }
8805 j++;
8806 /* no need to add after the last transfer */
8807 if (j != n)
b0109805 8808 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
8809 }
8810 }
8811 if (insn & (1 << 21)) {
8812 /* write back */
8813 if (insn & (1 << 23)) {
8814 if (insn & (1 << 24)) {
8815 /* pre increment */
8816 } else {
8817 /* post increment */
b0109805 8818 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
8819 }
8820 } else {
8821 if (insn & (1 << 24)) {
8822 /* pre decrement */
8823 if (n != 1)
b0109805 8824 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
8825 } else {
8826 /* post decrement */
b0109805 8827 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
8828 }
8829 }
b0109805
PB
8830 store_reg(s, rn, addr);
8831 } else {
7d1b0095 8832 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8833 }
8834 if (loaded_base) {
b0109805 8835 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
8836 }
8837 if ((insn & (1 << 22)) && !user) {
8838 /* Restore CPSR from SPSR. */
d9ba4830 8839 tmp = load_cpu_field(spsr);
4051e12c 8840 gen_set_cpsr(tmp, CPSR_ERET_MASK);
7d1b0095 8841 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8842 s->is_jmp = DISAS_UPDATE;
8843 }
8844 }
8845 break;
8846 case 0xa:
8847 case 0xb:
8848 {
8849 int32_t offset;
8850
8851 /* branch (and link) */
8852 val = (int32_t)s->pc;
8853 if (insn & (1 << 24)) {
7d1b0095 8854 tmp = tcg_temp_new_i32();
5e3f878a
PB
8855 tcg_gen_movi_i32(tmp, val);
8856 store_reg(s, 14, tmp);
9ee6e8bb 8857 }
534df156
PM
8858 offset = sextract32(insn << 2, 0, 26);
8859 val += offset + 4;
9ee6e8bb
PB
8860 gen_jmp(s, val);
8861 }
8862 break;
8863 case 0xc:
8864 case 0xd:
8865 case 0xe:
6a57f3eb
WN
8866 if (((insn >> 8) & 0xe) == 10) {
8867 /* VFP. */
8868 if (disas_vfp_insn(env, s, insn)) {
8869 goto illegal_op;
8870 }
8871 } else if (disas_coproc_insn(env, s, insn)) {
8872 /* Coprocessor. */
9ee6e8bb 8873 goto illegal_op;
6a57f3eb 8874 }
9ee6e8bb
PB
8875 break;
8876 case 0xf:
8877 /* swi */
eaed129d 8878 gen_set_pc_im(s, s->pc);
d4a2dc67 8879 s->svc_imm = extract32(insn, 0, 24);
9ee6e8bb
PB
8880 s->is_jmp = DISAS_SWI;
8881 break;
8882 default:
8883 illegal_op:
d4a2dc67 8884 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized());
9ee6e8bb
PB
8885 break;
8886 }
8887 }
8888}
8889
8890/* Return true if this is a Thumb-2 logical op. */
8891static int
8892thumb2_logic_op(int op)
8893{
8894 return (op < 8);
8895}
8896
8897/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
8898 then set condition code flags based on the result of the operation.
8899 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
8900 to the high bit of T1.
8901 Returns zero if the opcode is valid. */
8902
8903static int
39d5492a
PM
8904gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
8905 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
8906{
8907 int logic_cc;
8908
8909 logic_cc = 0;
8910 switch (op) {
8911 case 0: /* and */
396e467c 8912 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
8913 logic_cc = conds;
8914 break;
8915 case 1: /* bic */
f669df27 8916 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
8917 logic_cc = conds;
8918 break;
8919 case 2: /* orr */
396e467c 8920 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
8921 logic_cc = conds;
8922 break;
8923 case 3: /* orn */
29501f1b 8924 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
8925 logic_cc = conds;
8926 break;
8927 case 4: /* eor */
396e467c 8928 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
8929 logic_cc = conds;
8930 break;
8931 case 8: /* add */
8932 if (conds)
72485ec4 8933 gen_add_CC(t0, t0, t1);
9ee6e8bb 8934 else
396e467c 8935 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
8936 break;
8937 case 10: /* adc */
8938 if (conds)
49b4c31e 8939 gen_adc_CC(t0, t0, t1);
9ee6e8bb 8940 else
396e467c 8941 gen_adc(t0, t1);
9ee6e8bb
PB
8942 break;
8943 case 11: /* sbc */
2de68a49
RH
8944 if (conds) {
8945 gen_sbc_CC(t0, t0, t1);
8946 } else {
396e467c 8947 gen_sub_carry(t0, t0, t1);
2de68a49 8948 }
9ee6e8bb
PB
8949 break;
8950 case 13: /* sub */
8951 if (conds)
72485ec4 8952 gen_sub_CC(t0, t0, t1);
9ee6e8bb 8953 else
396e467c 8954 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
8955 break;
8956 case 14: /* rsb */
8957 if (conds)
72485ec4 8958 gen_sub_CC(t0, t1, t0);
9ee6e8bb 8959 else
396e467c 8960 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
8961 break;
8962 default: /* 5, 6, 7, 9, 12, 15. */
8963 return 1;
8964 }
8965 if (logic_cc) {
396e467c 8966 gen_logic_CC(t0);
9ee6e8bb 8967 if (shifter_out)
396e467c 8968 gen_set_CF_bit31(t1);
9ee6e8bb
PB
8969 }
8970 return 0;
8971}
8972
8973/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
8974 is not legal. */
0ecb72a5 8975static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9ee6e8bb 8976{
b0109805 8977 uint32_t insn, imm, shift, offset;
9ee6e8bb 8978 uint32_t rd, rn, rm, rs;
39d5492a
PM
8979 TCGv_i32 tmp;
8980 TCGv_i32 tmp2;
8981 TCGv_i32 tmp3;
8982 TCGv_i32 addr;
a7812ae4 8983 TCGv_i64 tmp64;
9ee6e8bb
PB
8984 int op;
8985 int shiftop;
8986 int conds;
8987 int logic_cc;
8988
8989 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
8990 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 8991 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
8992 16-bit instructions to get correct prefetch abort behavior. */
8993 insn = insn_hw1;
8994 if ((insn & (1 << 12)) == 0) {
be5e7a76 8995 ARCH(5);
9ee6e8bb
PB
8996 /* Second half of blx. */
8997 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
8998 tmp = load_reg(s, 14);
8999 tcg_gen_addi_i32(tmp, tmp, offset);
9000 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 9001
7d1b0095 9002 tmp2 = tcg_temp_new_i32();
b0109805 9003 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
9004 store_reg(s, 14, tmp2);
9005 gen_bx(s, tmp);
9ee6e8bb
PB
9006 return 0;
9007 }
9008 if (insn & (1 << 11)) {
9009 /* Second half of bl. */
9010 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 9011 tmp = load_reg(s, 14);
6a0d8a1d 9012 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 9013
7d1b0095 9014 tmp2 = tcg_temp_new_i32();
b0109805 9015 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
9016 store_reg(s, 14, tmp2);
9017 gen_bx(s, tmp);
9ee6e8bb
PB
9018 return 0;
9019 }
9020 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
9021 /* Instruction spans a page boundary. Implement it as two
9022 16-bit instructions in case the second half causes an
9023 prefetch abort. */
9024 offset = ((int32_t)insn << 21) >> 9;
396e467c 9025 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
9026 return 0;
9027 }
9028 /* Fall through to 32-bit decode. */
9029 }
9030
d31dd73e 9031 insn = arm_lduw_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
9032 s->pc += 2;
9033 insn |= (uint32_t)insn_hw1 << 16;
9034
9035 if ((insn & 0xf800e800) != 0xf000e800) {
9036 ARCH(6T2);
9037 }
9038
9039 rn = (insn >> 16) & 0xf;
9040 rs = (insn >> 12) & 0xf;
9041 rd = (insn >> 8) & 0xf;
9042 rm = insn & 0xf;
9043 switch ((insn >> 25) & 0xf) {
9044 case 0: case 1: case 2: case 3:
9045 /* 16-bit instructions. Should never happen. */
9046 abort();
9047 case 4:
9048 if (insn & (1 << 22)) {
9049 /* Other load/store, table branch. */
9050 if (insn & 0x01200000) {
9051 /* Load/store doubleword. */
9052 if (rn == 15) {
7d1b0095 9053 addr = tcg_temp_new_i32();
b0109805 9054 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 9055 } else {
b0109805 9056 addr = load_reg(s, rn);
9ee6e8bb
PB
9057 }
9058 offset = (insn & 0xff) * 4;
9059 if ((insn & (1 << 23)) == 0)
9060 offset = -offset;
9061 if (insn & (1 << 24)) {
b0109805 9062 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
9063 offset = 0;
9064 }
9065 if (insn & (1 << 20)) {
9066 /* ldrd */
e2592fad 9067 tmp = tcg_temp_new_i32();
6ce2faf4 9068 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805
PB
9069 store_reg(s, rs, tmp);
9070 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9071 tmp = tcg_temp_new_i32();
6ce2faf4 9072 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 9073 store_reg(s, rd, tmp);
9ee6e8bb
PB
9074 } else {
9075 /* strd */
b0109805 9076 tmp = load_reg(s, rs);
6ce2faf4 9077 gen_aa32_st32(tmp, addr, get_mem_index(s));
e2592fad 9078 tcg_temp_free_i32(tmp);
b0109805
PB
9079 tcg_gen_addi_i32(addr, addr, 4);
9080 tmp = load_reg(s, rd);
6ce2faf4 9081 gen_aa32_st32(tmp, addr, get_mem_index(s));
e2592fad 9082 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9083 }
9084 if (insn & (1 << 21)) {
9085 /* Base writeback. */
9086 if (rn == 15)
9087 goto illegal_op;
b0109805
PB
9088 tcg_gen_addi_i32(addr, addr, offset - 4);
9089 store_reg(s, rn, addr);
9090 } else {
7d1b0095 9091 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9092 }
9093 } else if ((insn & (1 << 23)) == 0) {
9094 /* Load/store exclusive word. */
39d5492a 9095 addr = tcg_temp_local_new_i32();
98a46317 9096 load_reg_var(s, addr, rn);
426f5abc 9097 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 9098 if (insn & (1 << 20)) {
426f5abc 9099 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 9100 } else {
426f5abc 9101 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 9102 }
39d5492a 9103 tcg_temp_free_i32(addr);
2359bf80 9104 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
9105 /* Table Branch. */
9106 if (rn == 15) {
7d1b0095 9107 addr = tcg_temp_new_i32();
b0109805 9108 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 9109 } else {
b0109805 9110 addr = load_reg(s, rn);
9ee6e8bb 9111 }
b26eefb6 9112 tmp = load_reg(s, rm);
b0109805 9113 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
9114 if (insn & (1 << 4)) {
9115 /* tbh */
b0109805 9116 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9117 tcg_temp_free_i32(tmp);
e2592fad 9118 tmp = tcg_temp_new_i32();
6ce2faf4 9119 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
9ee6e8bb 9120 } else { /* tbb */
7d1b0095 9121 tcg_temp_free_i32(tmp);
e2592fad 9122 tmp = tcg_temp_new_i32();
6ce2faf4 9123 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
9ee6e8bb 9124 }
7d1b0095 9125 tcg_temp_free_i32(addr);
b0109805
PB
9126 tcg_gen_shli_i32(tmp, tmp, 1);
9127 tcg_gen_addi_i32(tmp, tmp, s->pc);
9128 store_reg(s, 15, tmp);
9ee6e8bb 9129 } else {
2359bf80 9130 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 9131 op = (insn >> 4) & 0x3;
2359bf80
MR
9132 switch (op2) {
9133 case 0:
426f5abc 9134 goto illegal_op;
2359bf80
MR
9135 case 1:
9136 /* Load/store exclusive byte/halfword/doubleword */
9137 if (op == 2) {
9138 goto illegal_op;
9139 }
9140 ARCH(7);
9141 break;
9142 case 2:
9143 /* Load-acquire/store-release */
9144 if (op == 3) {
9145 goto illegal_op;
9146 }
9147 /* Fall through */
9148 case 3:
9149 /* Load-acquire/store-release exclusive */
9150 ARCH(8);
9151 break;
426f5abc 9152 }
39d5492a 9153 addr = tcg_temp_local_new_i32();
98a46317 9154 load_reg_var(s, addr, rn);
2359bf80
MR
9155 if (!(op2 & 1)) {
9156 if (insn & (1 << 20)) {
9157 tmp = tcg_temp_new_i32();
9158 switch (op) {
9159 case 0: /* ldab */
6ce2faf4 9160 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
2359bf80
MR
9161 break;
9162 case 1: /* ldah */
6ce2faf4 9163 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
2359bf80
MR
9164 break;
9165 case 2: /* lda */
6ce2faf4 9166 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
2359bf80
MR
9167 break;
9168 default:
9169 abort();
9170 }
9171 store_reg(s, rs, tmp);
9172 } else {
9173 tmp = load_reg(s, rs);
9174 switch (op) {
9175 case 0: /* stlb */
6ce2faf4 9176 gen_aa32_st8(tmp, addr, get_mem_index(s));
2359bf80
MR
9177 break;
9178 case 1: /* stlh */
6ce2faf4 9179 gen_aa32_st16(tmp, addr, get_mem_index(s));
2359bf80
MR
9180 break;
9181 case 2: /* stl */
6ce2faf4 9182 gen_aa32_st32(tmp, addr, get_mem_index(s));
2359bf80
MR
9183 break;
9184 default:
9185 abort();
9186 }
9187 tcg_temp_free_i32(tmp);
9188 }
9189 } else if (insn & (1 << 20)) {
426f5abc 9190 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 9191 } else {
426f5abc 9192 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 9193 }
39d5492a 9194 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9195 }
9196 } else {
9197 /* Load/store multiple, RFE, SRS. */
9198 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976
PM
9199 /* RFE, SRS: not available in user mode or on M profile */
9200 if (IS_USER(s) || IS_M(env)) {
9ee6e8bb 9201 goto illegal_op;
00115976 9202 }
9ee6e8bb
PB
9203 if (insn & (1 << 20)) {
9204 /* rfe */
b0109805
PB
9205 addr = load_reg(s, rn);
9206 if ((insn & (1 << 24)) == 0)
9207 tcg_gen_addi_i32(addr, addr, -8);
9208 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 9209 tmp = tcg_temp_new_i32();
6ce2faf4 9210 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 9211 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9212 tmp2 = tcg_temp_new_i32();
6ce2faf4 9213 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
9214 if (insn & (1 << 21)) {
9215 /* Base writeback. */
b0109805
PB
9216 if (insn & (1 << 24)) {
9217 tcg_gen_addi_i32(addr, addr, 4);
9218 } else {
9219 tcg_gen_addi_i32(addr, addr, -4);
9220 }
9221 store_reg(s, rn, addr);
9222 } else {
7d1b0095 9223 tcg_temp_free_i32(addr);
9ee6e8bb 9224 }
b0109805 9225 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
9226 } else {
9227 /* srs */
81465888
PM
9228 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
9229 insn & (1 << 21));
9ee6e8bb
PB
9230 }
9231 } else {
5856d44e 9232 int i, loaded_base = 0;
39d5492a 9233 TCGv_i32 loaded_var;
9ee6e8bb 9234 /* Load/store multiple. */
b0109805 9235 addr = load_reg(s, rn);
9ee6e8bb
PB
9236 offset = 0;
9237 for (i = 0; i < 16; i++) {
9238 if (insn & (1 << i))
9239 offset += 4;
9240 }
9241 if (insn & (1 << 24)) {
b0109805 9242 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9243 }
9244
39d5492a 9245 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
9246 for (i = 0; i < 16; i++) {
9247 if ((insn & (1 << i)) == 0)
9248 continue;
9249 if (insn & (1 << 20)) {
9250 /* Load. */
e2592fad 9251 tmp = tcg_temp_new_i32();
6ce2faf4 9252 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9ee6e8bb 9253 if (i == 15) {
b0109805 9254 gen_bx(s, tmp);
5856d44e
YO
9255 } else if (i == rn) {
9256 loaded_var = tmp;
9257 loaded_base = 1;
9ee6e8bb 9258 } else {
b0109805 9259 store_reg(s, i, tmp);
9ee6e8bb
PB
9260 }
9261 } else {
9262 /* Store. */
b0109805 9263 tmp = load_reg(s, i);
6ce2faf4 9264 gen_aa32_st32(tmp, addr, get_mem_index(s));
e2592fad 9265 tcg_temp_free_i32(tmp);
9ee6e8bb 9266 }
b0109805 9267 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 9268 }
5856d44e
YO
9269 if (loaded_base) {
9270 store_reg(s, rn, loaded_var);
9271 }
9ee6e8bb
PB
9272 if (insn & (1 << 21)) {
9273 /* Base register writeback. */
9274 if (insn & (1 << 24)) {
b0109805 9275 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9276 }
9277 /* Fault if writeback register is in register list. */
9278 if (insn & (1 << rn))
9279 goto illegal_op;
b0109805
PB
9280 store_reg(s, rn, addr);
9281 } else {
7d1b0095 9282 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9283 }
9284 }
9285 }
9286 break;
2af9ab77
JB
9287 case 5:
9288
9ee6e8bb 9289 op = (insn >> 21) & 0xf;
2af9ab77
JB
9290 if (op == 6) {
9291 /* Halfword pack. */
9292 tmp = load_reg(s, rn);
9293 tmp2 = load_reg(s, rm);
9294 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
9295 if (insn & (1 << 5)) {
9296 /* pkhtb */
9297 if (shift == 0)
9298 shift = 31;
9299 tcg_gen_sari_i32(tmp2, tmp2, shift);
9300 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
9301 tcg_gen_ext16u_i32(tmp2, tmp2);
9302 } else {
9303 /* pkhbt */
9304 if (shift)
9305 tcg_gen_shli_i32(tmp2, tmp2, shift);
9306 tcg_gen_ext16u_i32(tmp, tmp);
9307 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9308 }
9309 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 9310 tcg_temp_free_i32(tmp2);
3174f8e9
FN
9311 store_reg(s, rd, tmp);
9312 } else {
2af9ab77
JB
9313 /* Data processing register constant shift. */
9314 if (rn == 15) {
7d1b0095 9315 tmp = tcg_temp_new_i32();
2af9ab77
JB
9316 tcg_gen_movi_i32(tmp, 0);
9317 } else {
9318 tmp = load_reg(s, rn);
9319 }
9320 tmp2 = load_reg(s, rm);
9321
9322 shiftop = (insn >> 4) & 3;
9323 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
9324 conds = (insn & (1 << 20)) != 0;
9325 logic_cc = (conds && thumb2_logic_op(op));
9326 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9327 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9328 goto illegal_op;
7d1b0095 9329 tcg_temp_free_i32(tmp2);
2af9ab77
JB
9330 if (rd != 15) {
9331 store_reg(s, rd, tmp);
9332 } else {
7d1b0095 9333 tcg_temp_free_i32(tmp);
2af9ab77 9334 }
3174f8e9 9335 }
9ee6e8bb
PB
9336 break;
9337 case 13: /* Misc data processing. */
9338 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
9339 if (op < 4 && (insn & 0xf000) != 0xf000)
9340 goto illegal_op;
9341 switch (op) {
9342 case 0: /* Register controlled shift. */
8984bd2e
PB
9343 tmp = load_reg(s, rn);
9344 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9345 if ((insn & 0x70) != 0)
9346 goto illegal_op;
9347 op = (insn >> 21) & 3;
8984bd2e
PB
9348 logic_cc = (insn & (1 << 20)) != 0;
9349 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
9350 if (logic_cc)
9351 gen_logic_CC(tmp);
21aeb343 9352 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
9353 break;
9354 case 1: /* Sign/zero extend. */
5e3f878a 9355 tmp = load_reg(s, rm);
9ee6e8bb 9356 shift = (insn >> 4) & 3;
1301f322 9357 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
9358 rotate, a shift is sufficient. */
9359 if (shift != 0)
f669df27 9360 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9361 op = (insn >> 20) & 7;
9362 switch (op) {
5e3f878a
PB
9363 case 0: gen_sxth(tmp); break;
9364 case 1: gen_uxth(tmp); break;
9365 case 2: gen_sxtb16(tmp); break;
9366 case 3: gen_uxtb16(tmp); break;
9367 case 4: gen_sxtb(tmp); break;
9368 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
9369 default: goto illegal_op;
9370 }
9371 if (rn != 15) {
5e3f878a 9372 tmp2 = load_reg(s, rn);
9ee6e8bb 9373 if ((op >> 1) == 1) {
5e3f878a 9374 gen_add16(tmp, tmp2);
9ee6e8bb 9375 } else {
5e3f878a 9376 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9377 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9378 }
9379 }
5e3f878a 9380 store_reg(s, rd, tmp);
9ee6e8bb
PB
9381 break;
9382 case 2: /* SIMD add/subtract. */
9383 op = (insn >> 20) & 7;
9384 shift = (insn >> 4) & 7;
9385 if ((op & 3) == 3 || (shift & 3) == 3)
9386 goto illegal_op;
6ddbc6e4
PB
9387 tmp = load_reg(s, rn);
9388 tmp2 = load_reg(s, rm);
9389 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 9390 tcg_temp_free_i32(tmp2);
6ddbc6e4 9391 store_reg(s, rd, tmp);
9ee6e8bb
PB
9392 break;
9393 case 3: /* Other data processing. */
9394 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
9395 if (op < 4) {
9396 /* Saturating add/subtract. */
d9ba4830
PB
9397 tmp = load_reg(s, rn);
9398 tmp2 = load_reg(s, rm);
9ee6e8bb 9399 if (op & 1)
9ef39277 9400 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 9401 if (op & 2)
9ef39277 9402 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 9403 else
9ef39277 9404 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 9405 tcg_temp_free_i32(tmp2);
9ee6e8bb 9406 } else {
d9ba4830 9407 tmp = load_reg(s, rn);
9ee6e8bb
PB
9408 switch (op) {
9409 case 0x0a: /* rbit */
d9ba4830 9410 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
9411 break;
9412 case 0x08: /* rev */
66896cb8 9413 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
9414 break;
9415 case 0x09: /* rev16 */
d9ba4830 9416 gen_rev16(tmp);
9ee6e8bb
PB
9417 break;
9418 case 0x0b: /* revsh */
d9ba4830 9419 gen_revsh(tmp);
9ee6e8bb
PB
9420 break;
9421 case 0x10: /* sel */
d9ba4830 9422 tmp2 = load_reg(s, rm);
7d1b0095 9423 tmp3 = tcg_temp_new_i32();
0ecb72a5 9424 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 9425 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
9426 tcg_temp_free_i32(tmp3);
9427 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9428 break;
9429 case 0x18: /* clz */
d9ba4830 9430 gen_helper_clz(tmp, tmp);
9ee6e8bb 9431 break;
eb0ecd5a
WN
9432 case 0x20:
9433 case 0x21:
9434 case 0x22:
9435 case 0x28:
9436 case 0x29:
9437 case 0x2a:
9438 {
9439 /* crc32/crc32c */
9440 uint32_t sz = op & 0x3;
9441 uint32_t c = op & 0x8;
9442
9443 if (!arm_feature(env, ARM_FEATURE_CRC)) {
9444 goto illegal_op;
9445 }
9446
9447 tmp2 = load_reg(s, rm);
aa633469
PM
9448 if (sz == 0) {
9449 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
9450 } else if (sz == 1) {
9451 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
9452 }
eb0ecd5a
WN
9453 tmp3 = tcg_const_i32(1 << sz);
9454 if (c) {
9455 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
9456 } else {
9457 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
9458 }
9459 tcg_temp_free_i32(tmp2);
9460 tcg_temp_free_i32(tmp3);
9461 break;
9462 }
9ee6e8bb
PB
9463 default:
9464 goto illegal_op;
9465 }
9466 }
d9ba4830 9467 store_reg(s, rd, tmp);
9ee6e8bb
PB
9468 break;
9469 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
9470 op = (insn >> 4) & 0xf;
d9ba4830
PB
9471 tmp = load_reg(s, rn);
9472 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9473 switch ((insn >> 20) & 7) {
9474 case 0: /* 32 x 32 -> 32 */
d9ba4830 9475 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 9476 tcg_temp_free_i32(tmp2);
9ee6e8bb 9477 if (rs != 15) {
d9ba4830 9478 tmp2 = load_reg(s, rs);
9ee6e8bb 9479 if (op)
d9ba4830 9480 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 9481 else
d9ba4830 9482 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9483 tcg_temp_free_i32(tmp2);
9ee6e8bb 9484 }
9ee6e8bb
PB
9485 break;
9486 case 1: /* 16 x 16 -> 32 */
d9ba4830 9487 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 9488 tcg_temp_free_i32(tmp2);
9ee6e8bb 9489 if (rs != 15) {
d9ba4830 9490 tmp2 = load_reg(s, rs);
9ef39277 9491 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9492 tcg_temp_free_i32(tmp2);
9ee6e8bb 9493 }
9ee6e8bb
PB
9494 break;
9495 case 2: /* Dual multiply add. */
9496 case 4: /* Dual multiply subtract. */
9497 if (op)
d9ba4830
PB
9498 gen_swap_half(tmp2);
9499 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9500 if (insn & (1 << 22)) {
e1d177b9 9501 /* This subtraction cannot overflow. */
d9ba4830 9502 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 9503 } else {
e1d177b9
PM
9504 /* This addition cannot overflow 32 bits;
9505 * however it may overflow considered as a signed
9506 * operation, in which case we must set the Q flag.
9507 */
9ef39277 9508 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 9509 }
7d1b0095 9510 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9511 if (rs != 15)
9512 {
d9ba4830 9513 tmp2 = load_reg(s, rs);
9ef39277 9514 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9515 tcg_temp_free_i32(tmp2);
9ee6e8bb 9516 }
9ee6e8bb
PB
9517 break;
9518 case 3: /* 32 * 16 -> 32msb */
9519 if (op)
d9ba4830 9520 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 9521 else
d9ba4830 9522 gen_sxth(tmp2);
a7812ae4
PB
9523 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9524 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 9525 tmp = tcg_temp_new_i32();
a7812ae4 9526 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 9527 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
9528 if (rs != 15)
9529 {
d9ba4830 9530 tmp2 = load_reg(s, rs);
9ef39277 9531 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9532 tcg_temp_free_i32(tmp2);
9ee6e8bb 9533 }
9ee6e8bb 9534 break;
838fa72d
AJ
9535 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
9536 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 9537 if (rs != 15) {
838fa72d
AJ
9538 tmp = load_reg(s, rs);
9539 if (insn & (1 << 20)) {
9540 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 9541 } else {
838fa72d 9542 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 9543 }
2c0262af 9544 }
838fa72d
AJ
9545 if (insn & (1 << 4)) {
9546 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9547 }
9548 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 9549 tmp = tcg_temp_new_i32();
838fa72d
AJ
9550 tcg_gen_trunc_i64_i32(tmp, tmp64);
9551 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
9552 break;
9553 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 9554 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 9555 tcg_temp_free_i32(tmp2);
9ee6e8bb 9556 if (rs != 15) {
d9ba4830
PB
9557 tmp2 = load_reg(s, rs);
9558 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9559 tcg_temp_free_i32(tmp2);
5fd46862 9560 }
9ee6e8bb 9561 break;
2c0262af 9562 }
d9ba4830 9563 store_reg(s, rd, tmp);
2c0262af 9564 break;
9ee6e8bb
PB
9565 case 6: case 7: /* 64-bit multiply, Divide. */
9566 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
9567 tmp = load_reg(s, rn);
9568 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9569 if ((op & 0x50) == 0x10) {
9570 /* sdiv, udiv */
47789990 9571 if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 9572 goto illegal_op;
47789990 9573 }
9ee6e8bb 9574 if (op & 0x20)
5e3f878a 9575 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 9576 else
5e3f878a 9577 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 9578 tcg_temp_free_i32(tmp2);
5e3f878a 9579 store_reg(s, rd, tmp);
9ee6e8bb
PB
9580 } else if ((op & 0xe) == 0xc) {
9581 /* Dual multiply accumulate long. */
9582 if (op & 1)
5e3f878a
PB
9583 gen_swap_half(tmp2);
9584 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9585 if (op & 0x10) {
5e3f878a 9586 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 9587 } else {
5e3f878a 9588 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 9589 }
7d1b0095 9590 tcg_temp_free_i32(tmp2);
a7812ae4
PB
9591 /* BUGFIX */
9592 tmp64 = tcg_temp_new_i64();
9593 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 9594 tcg_temp_free_i32(tmp);
a7812ae4
PB
9595 gen_addq(s, tmp64, rs, rd);
9596 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 9597 tcg_temp_free_i64(tmp64);
2c0262af 9598 } else {
9ee6e8bb
PB
9599 if (op & 0x20) {
9600 /* Unsigned 64-bit multiply */
a7812ae4 9601 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 9602 } else {
9ee6e8bb
PB
9603 if (op & 8) {
9604 /* smlalxy */
5e3f878a 9605 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 9606 tcg_temp_free_i32(tmp2);
a7812ae4
PB
9607 tmp64 = tcg_temp_new_i64();
9608 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 9609 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9610 } else {
9611 /* Signed 64-bit multiply */
a7812ae4 9612 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 9613 }
b5ff1b31 9614 }
9ee6e8bb
PB
9615 if (op & 4) {
9616 /* umaal */
a7812ae4
PB
9617 gen_addq_lo(s, tmp64, rs);
9618 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
9619 } else if (op & 0x40) {
9620 /* 64-bit accumulate. */
a7812ae4 9621 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 9622 }
a7812ae4 9623 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 9624 tcg_temp_free_i64(tmp64);
5fd46862 9625 }
2c0262af 9626 break;
9ee6e8bb
PB
9627 }
9628 break;
9629 case 6: case 7: case 14: case 15:
9630 /* Coprocessor. */
9631 if (((insn >> 24) & 3) == 3) {
9632 /* Translate into the equivalent ARM encoding. */
f06053e3 9633 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9ee6e8bb
PB
9634 if (disas_neon_data_insn(env, s, insn))
9635 goto illegal_op;
6a57f3eb
WN
9636 } else if (((insn >> 8) & 0xe) == 10) {
9637 if (disas_vfp_insn(env, s, insn)) {
9638 goto illegal_op;
9639 }
9ee6e8bb
PB
9640 } else {
9641 if (insn & (1 << 28))
9642 goto illegal_op;
9643 if (disas_coproc_insn (env, s, insn))
9644 goto illegal_op;
9645 }
9646 break;
9647 case 8: case 9: case 10: case 11:
9648 if (insn & (1 << 15)) {
9649 /* Branches, misc control. */
9650 if (insn & 0x5000) {
9651 /* Unconditional branch. */
9652 /* signextend(hw1[10:0]) -> offset[:12]. */
9653 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
9654 /* hw1[10:0] -> offset[11:1]. */
9655 offset |= (insn & 0x7ff) << 1;
9656 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
9657 offset[24:22] already have the same value because of the
9658 sign extension above. */
9659 offset ^= ((~insn) & (1 << 13)) << 10;
9660 offset ^= ((~insn) & (1 << 11)) << 11;
9661
9ee6e8bb
PB
9662 if (insn & (1 << 14)) {
9663 /* Branch and link. */
3174f8e9 9664 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 9665 }
3b46e624 9666
b0109805 9667 offset += s->pc;
9ee6e8bb
PB
9668 if (insn & (1 << 12)) {
9669 /* b/bl */
b0109805 9670 gen_jmp(s, offset);
9ee6e8bb
PB
9671 } else {
9672 /* blx */
b0109805 9673 offset &= ~(uint32_t)2;
be5e7a76 9674 /* thumb2 bx, no need to check */
b0109805 9675 gen_bx_im(s, offset);
2c0262af 9676 }
9ee6e8bb
PB
9677 } else if (((insn >> 23) & 7) == 7) {
9678 /* Misc control */
9679 if (insn & (1 << 13))
9680 goto illegal_op;
9681
9682 if (insn & (1 << 26)) {
9683 /* Secure monitor call (v6Z) */
e0c270d9
SW
9684 qemu_log_mask(LOG_UNIMP,
9685 "arm: unimplemented secure monitor call\n");
9ee6e8bb 9686 goto illegal_op; /* not implemented. */
2c0262af 9687 } else {
9ee6e8bb
PB
9688 op = (insn >> 20) & 7;
9689 switch (op) {
9690 case 0: /* msr cpsr. */
9691 if (IS_M(env)) {
8984bd2e
PB
9692 tmp = load_reg(s, rn);
9693 addr = tcg_const_i32(insn & 0xff);
9694 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 9695 tcg_temp_free_i32(addr);
7d1b0095 9696 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9697 gen_lookup_tb(s);
9698 break;
9699 }
9700 /* fall through */
9701 case 1: /* msr spsr. */
9702 if (IS_M(env))
9703 goto illegal_op;
2fbac54b
FN
9704 tmp = load_reg(s, rn);
9705 if (gen_set_psr(s,
9ee6e8bb 9706 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 9707 op == 1, tmp))
9ee6e8bb
PB
9708 goto illegal_op;
9709 break;
9710 case 2: /* cps, nop-hint. */
9711 if (((insn >> 8) & 7) == 0) {
9712 gen_nop_hint(s, insn & 0xff);
9713 }
9714 /* Implemented as NOP in user mode. */
9715 if (IS_USER(s))
9716 break;
9717 offset = 0;
9718 imm = 0;
9719 if (insn & (1 << 10)) {
9720 if (insn & (1 << 7))
9721 offset |= CPSR_A;
9722 if (insn & (1 << 6))
9723 offset |= CPSR_I;
9724 if (insn & (1 << 5))
9725 offset |= CPSR_F;
9726 if (insn & (1 << 9))
9727 imm = CPSR_A | CPSR_I | CPSR_F;
9728 }
9729 if (insn & (1 << 8)) {
9730 offset |= 0x1f;
9731 imm |= (insn & 0x1f);
9732 }
9733 if (offset) {
2fbac54b 9734 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
9735 }
9736 break;
9737 case 3: /* Special control operations. */
426f5abc 9738 ARCH(7);
9ee6e8bb
PB
9739 op = (insn >> 4) & 0xf;
9740 switch (op) {
9741 case 2: /* clrex */
426f5abc 9742 gen_clrex(s);
9ee6e8bb
PB
9743 break;
9744 case 4: /* dsb */
9745 case 5: /* dmb */
9746 case 6: /* isb */
9747 /* These execute as NOPs. */
9ee6e8bb
PB
9748 break;
9749 default:
9750 goto illegal_op;
9751 }
9752 break;
9753 case 4: /* bxj */
9754 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
9755 tmp = load_reg(s, rn);
9756 gen_bx(s, tmp);
9ee6e8bb
PB
9757 break;
9758 case 5: /* Exception return. */
b8b45b68
RV
9759 if (IS_USER(s)) {
9760 goto illegal_op;
9761 }
9762 if (rn != 14 || rd != 15) {
9763 goto illegal_op;
9764 }
9765 tmp = load_reg(s, rn);
9766 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
9767 gen_exception_return(s, tmp);
9768 break;
9ee6e8bb 9769 case 6: /* mrs cpsr. */
7d1b0095 9770 tmp = tcg_temp_new_i32();
9ee6e8bb 9771 if (IS_M(env)) {
8984bd2e
PB
9772 addr = tcg_const_i32(insn & 0xff);
9773 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 9774 tcg_temp_free_i32(addr);
9ee6e8bb 9775 } else {
9ef39277 9776 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 9777 }
8984bd2e 9778 store_reg(s, rd, tmp);
9ee6e8bb
PB
9779 break;
9780 case 7: /* mrs spsr. */
9781 /* Not accessible in user mode. */
9782 if (IS_USER(s) || IS_M(env))
9783 goto illegal_op;
d9ba4830
PB
9784 tmp = load_cpu_field(spsr);
9785 store_reg(s, rd, tmp);
9ee6e8bb 9786 break;
2c0262af
FB
9787 }
9788 }
9ee6e8bb
PB
9789 } else {
9790 /* Conditional branch. */
9791 op = (insn >> 22) & 0xf;
9792 /* Generate a conditional jump to next instruction. */
9793 s->condlabel = gen_new_label();
39fb730a 9794 arm_gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
9795 s->condjmp = 1;
9796
9797 /* offset[11:1] = insn[10:0] */
9798 offset = (insn & 0x7ff) << 1;
9799 /* offset[17:12] = insn[21:16]. */
9800 offset |= (insn & 0x003f0000) >> 4;
9801 /* offset[31:20] = insn[26]. */
9802 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
9803 /* offset[18] = insn[13]. */
9804 offset |= (insn & (1 << 13)) << 5;
9805 /* offset[19] = insn[11]. */
9806 offset |= (insn & (1 << 11)) << 8;
9807
9808 /* jump to the offset */
b0109805 9809 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
9810 }
9811 } else {
9812 /* Data processing immediate. */
9813 if (insn & (1 << 25)) {
9814 if (insn & (1 << 24)) {
9815 if (insn & (1 << 20))
9816 goto illegal_op;
9817 /* Bitfield/Saturate. */
9818 op = (insn >> 21) & 7;
9819 imm = insn & 0x1f;
9820 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 9821 if (rn == 15) {
7d1b0095 9822 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
9823 tcg_gen_movi_i32(tmp, 0);
9824 } else {
9825 tmp = load_reg(s, rn);
9826 }
9ee6e8bb
PB
9827 switch (op) {
9828 case 2: /* Signed bitfield extract. */
9829 imm++;
9830 if (shift + imm > 32)
9831 goto illegal_op;
9832 if (imm < 32)
6ddbc6e4 9833 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
9834 break;
9835 case 6: /* Unsigned bitfield extract. */
9836 imm++;
9837 if (shift + imm > 32)
9838 goto illegal_op;
9839 if (imm < 32)
6ddbc6e4 9840 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
9841 break;
9842 case 3: /* Bitfield insert/clear. */
9843 if (imm < shift)
9844 goto illegal_op;
9845 imm = imm + 1 - shift;
9846 if (imm != 32) {
6ddbc6e4 9847 tmp2 = load_reg(s, rd);
d593c48e 9848 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 9849 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9850 }
9851 break;
9852 case 7:
9853 goto illegal_op;
9854 default: /* Saturate. */
9ee6e8bb
PB
9855 if (shift) {
9856 if (op & 1)
6ddbc6e4 9857 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 9858 else
6ddbc6e4 9859 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 9860 }
6ddbc6e4 9861 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
9862 if (op & 4) {
9863 /* Unsigned. */
9ee6e8bb 9864 if ((op & 1) && shift == 0)
9ef39277 9865 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 9866 else
9ef39277 9867 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
2c0262af 9868 } else {
9ee6e8bb 9869 /* Signed. */
9ee6e8bb 9870 if ((op & 1) && shift == 0)
9ef39277 9871 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 9872 else
9ef39277 9873 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
2c0262af 9874 }
b75263d6 9875 tcg_temp_free_i32(tmp2);
9ee6e8bb 9876 break;
2c0262af 9877 }
6ddbc6e4 9878 store_reg(s, rd, tmp);
9ee6e8bb
PB
9879 } else {
9880 imm = ((insn & 0x04000000) >> 15)
9881 | ((insn & 0x7000) >> 4) | (insn & 0xff);
9882 if (insn & (1 << 22)) {
9883 /* 16-bit immediate. */
9884 imm |= (insn >> 4) & 0xf000;
9885 if (insn & (1 << 23)) {
9886 /* movt */
5e3f878a 9887 tmp = load_reg(s, rd);
86831435 9888 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 9889 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 9890 } else {
9ee6e8bb 9891 /* movw */
7d1b0095 9892 tmp = tcg_temp_new_i32();
5e3f878a 9893 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
9894 }
9895 } else {
9ee6e8bb
PB
9896 /* Add/sub 12-bit immediate. */
9897 if (rn == 15) {
b0109805 9898 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 9899 if (insn & (1 << 23))
b0109805 9900 offset -= imm;
9ee6e8bb 9901 else
b0109805 9902 offset += imm;
7d1b0095 9903 tmp = tcg_temp_new_i32();
5e3f878a 9904 tcg_gen_movi_i32(tmp, offset);
2c0262af 9905 } else {
5e3f878a 9906 tmp = load_reg(s, rn);
9ee6e8bb 9907 if (insn & (1 << 23))
5e3f878a 9908 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 9909 else
5e3f878a 9910 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 9911 }
9ee6e8bb 9912 }
5e3f878a 9913 store_reg(s, rd, tmp);
191abaa2 9914 }
9ee6e8bb
PB
9915 } else {
9916 int shifter_out = 0;
9917 /* modified 12-bit immediate. */
9918 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
9919 imm = (insn & 0xff);
9920 switch (shift) {
9921 case 0: /* XY */
9922 /* Nothing to do. */
9923 break;
9924 case 1: /* 00XY00XY */
9925 imm |= imm << 16;
9926 break;
9927 case 2: /* XY00XY00 */
9928 imm |= imm << 16;
9929 imm <<= 8;
9930 break;
9931 case 3: /* XYXYXYXY */
9932 imm |= imm << 16;
9933 imm |= imm << 8;
9934 break;
9935 default: /* Rotated constant. */
9936 shift = (shift << 1) | (imm >> 7);
9937 imm |= 0x80;
9938 imm = imm << (32 - shift);
9939 shifter_out = 1;
9940 break;
b5ff1b31 9941 }
7d1b0095 9942 tmp2 = tcg_temp_new_i32();
3174f8e9 9943 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 9944 rn = (insn >> 16) & 0xf;
3174f8e9 9945 if (rn == 15) {
7d1b0095 9946 tmp = tcg_temp_new_i32();
3174f8e9
FN
9947 tcg_gen_movi_i32(tmp, 0);
9948 } else {
9949 tmp = load_reg(s, rn);
9950 }
9ee6e8bb
PB
9951 op = (insn >> 21) & 0xf;
9952 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 9953 shifter_out, tmp, tmp2))
9ee6e8bb 9954 goto illegal_op;
7d1b0095 9955 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9956 rd = (insn >> 8) & 0xf;
9957 if (rd != 15) {
3174f8e9
FN
9958 store_reg(s, rd, tmp);
9959 } else {
7d1b0095 9960 tcg_temp_free_i32(tmp);
2c0262af 9961 }
2c0262af 9962 }
9ee6e8bb
PB
9963 }
9964 break;
9965 case 12: /* Load/store single data item. */
9966 {
9967 int postinc = 0;
9968 int writeback = 0;
a99caa48 9969 int memidx;
9ee6e8bb
PB
9970 if ((insn & 0x01100000) == 0x01000000) {
9971 if (disas_neon_ls_insn(env, s, insn))
c1713132 9972 goto illegal_op;
9ee6e8bb
PB
9973 break;
9974 }
a2fdc890
PM
9975 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
9976 if (rs == 15) {
9977 if (!(insn & (1 << 20))) {
9978 goto illegal_op;
9979 }
9980 if (op != 2) {
9981 /* Byte or halfword load space with dest == r15 : memory hints.
9982 * Catch them early so we don't emit pointless addressing code.
9983 * This space is a mix of:
9984 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
9985 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
9986 * cores)
9987 * unallocated hints, which must be treated as NOPs
9988 * UNPREDICTABLE space, which we NOP or UNDEF depending on
9989 * which is easiest for the decoding logic
9990 * Some space which must UNDEF
9991 */
9992 int op1 = (insn >> 23) & 3;
9993 int op2 = (insn >> 6) & 0x3f;
9994 if (op & 2) {
9995 goto illegal_op;
9996 }
9997 if (rn == 15) {
02afbf64
PM
9998 /* UNPREDICTABLE, unallocated hint or
9999 * PLD/PLDW/PLI (literal)
10000 */
a2fdc890
PM
10001 return 0;
10002 }
10003 if (op1 & 1) {
02afbf64 10004 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10005 }
10006 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
02afbf64 10007 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10008 }
10009 /* UNDEF space, or an UNPREDICTABLE */
10010 return 1;
10011 }
10012 }
a99caa48 10013 memidx = get_mem_index(s);
9ee6e8bb 10014 if (rn == 15) {
7d1b0095 10015 addr = tcg_temp_new_i32();
9ee6e8bb
PB
10016 /* PC relative. */
10017 /* s->pc has already been incremented by 4. */
10018 imm = s->pc & 0xfffffffc;
10019 if (insn & (1 << 23))
10020 imm += insn & 0xfff;
10021 else
10022 imm -= insn & 0xfff;
b0109805 10023 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 10024 } else {
b0109805 10025 addr = load_reg(s, rn);
9ee6e8bb
PB
10026 if (insn & (1 << 23)) {
10027 /* Positive offset. */
10028 imm = insn & 0xfff;
b0109805 10029 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 10030 } else {
9ee6e8bb 10031 imm = insn & 0xff;
2a0308c5
PM
10032 switch ((insn >> 8) & 0xf) {
10033 case 0x0: /* Shifted Register. */
9ee6e8bb 10034 shift = (insn >> 4) & 0xf;
2a0308c5
PM
10035 if (shift > 3) {
10036 tcg_temp_free_i32(addr);
18c9b560 10037 goto illegal_op;
2a0308c5 10038 }
b26eefb6 10039 tmp = load_reg(s, rm);
9ee6e8bb 10040 if (shift)
b26eefb6 10041 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 10042 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 10043 tcg_temp_free_i32(tmp);
9ee6e8bb 10044 break;
2a0308c5 10045 case 0xc: /* Negative offset. */
b0109805 10046 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 10047 break;
2a0308c5 10048 case 0xe: /* User privilege. */
b0109805 10049 tcg_gen_addi_i32(addr, addr, imm);
a99caa48 10050 memidx = MMU_USER_IDX;
9ee6e8bb 10051 break;
2a0308c5 10052 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
10053 imm = -imm;
10054 /* Fall through. */
2a0308c5 10055 case 0xb: /* Post-increment. */
9ee6e8bb
PB
10056 postinc = 1;
10057 writeback = 1;
10058 break;
2a0308c5 10059 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
10060 imm = -imm;
10061 /* Fall through. */
2a0308c5 10062 case 0xf: /* Pre-increment. */
b0109805 10063 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
10064 writeback = 1;
10065 break;
10066 default:
2a0308c5 10067 tcg_temp_free_i32(addr);
b7bcbe95 10068 goto illegal_op;
9ee6e8bb
PB
10069 }
10070 }
10071 }
9ee6e8bb
PB
10072 if (insn & (1 << 20)) {
10073 /* Load. */
5a839c0d 10074 tmp = tcg_temp_new_i32();
a2fdc890 10075 switch (op) {
5a839c0d 10076 case 0:
a99caa48 10077 gen_aa32_ld8u(tmp, addr, memidx);
5a839c0d
PM
10078 break;
10079 case 4:
a99caa48 10080 gen_aa32_ld8s(tmp, addr, memidx);
5a839c0d
PM
10081 break;
10082 case 1:
a99caa48 10083 gen_aa32_ld16u(tmp, addr, memidx);
5a839c0d
PM
10084 break;
10085 case 5:
a99caa48 10086 gen_aa32_ld16s(tmp, addr, memidx);
5a839c0d
PM
10087 break;
10088 case 2:
a99caa48 10089 gen_aa32_ld32u(tmp, addr, memidx);
5a839c0d 10090 break;
2a0308c5 10091 default:
5a839c0d 10092 tcg_temp_free_i32(tmp);
2a0308c5
PM
10093 tcg_temp_free_i32(addr);
10094 goto illegal_op;
a2fdc890
PM
10095 }
10096 if (rs == 15) {
10097 gen_bx(s, tmp);
9ee6e8bb 10098 } else {
a2fdc890 10099 store_reg(s, rs, tmp);
9ee6e8bb
PB
10100 }
10101 } else {
10102 /* Store. */
b0109805 10103 tmp = load_reg(s, rs);
9ee6e8bb 10104 switch (op) {
5a839c0d 10105 case 0:
a99caa48 10106 gen_aa32_st8(tmp, addr, memidx);
5a839c0d
PM
10107 break;
10108 case 1:
a99caa48 10109 gen_aa32_st16(tmp, addr, memidx);
5a839c0d
PM
10110 break;
10111 case 2:
a99caa48 10112 gen_aa32_st32(tmp, addr, memidx);
5a839c0d 10113 break;
2a0308c5 10114 default:
5a839c0d 10115 tcg_temp_free_i32(tmp);
2a0308c5
PM
10116 tcg_temp_free_i32(addr);
10117 goto illegal_op;
b7bcbe95 10118 }
5a839c0d 10119 tcg_temp_free_i32(tmp);
2c0262af 10120 }
9ee6e8bb 10121 if (postinc)
b0109805
PB
10122 tcg_gen_addi_i32(addr, addr, imm);
10123 if (writeback) {
10124 store_reg(s, rn, addr);
10125 } else {
7d1b0095 10126 tcg_temp_free_i32(addr);
b0109805 10127 }
9ee6e8bb
PB
10128 }
10129 break;
10130 default:
10131 goto illegal_op;
2c0262af 10132 }
9ee6e8bb
PB
10133 return 0;
10134illegal_op:
10135 return 1;
2c0262af
FB
10136}
10137
0ecb72a5 10138static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
99c475ab
FB
10139{
10140 uint32_t val, insn, op, rm, rn, rd, shift, cond;
10141 int32_t offset;
10142 int i;
39d5492a
PM
10143 TCGv_i32 tmp;
10144 TCGv_i32 tmp2;
10145 TCGv_i32 addr;
99c475ab 10146
9ee6e8bb
PB
10147 if (s->condexec_mask) {
10148 cond = s->condexec_cond;
bedd2912
JB
10149 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
10150 s->condlabel = gen_new_label();
39fb730a 10151 arm_gen_test_cc(cond ^ 1, s->condlabel);
bedd2912
JB
10152 s->condjmp = 1;
10153 }
9ee6e8bb
PB
10154 }
10155
d31dd73e 10156 insn = arm_lduw_code(env, s->pc, s->bswap_code);
99c475ab 10157 s->pc += 2;
b5ff1b31 10158
99c475ab
FB
10159 switch (insn >> 12) {
10160 case 0: case 1:
396e467c 10161
99c475ab
FB
10162 rd = insn & 7;
10163 op = (insn >> 11) & 3;
10164 if (op == 3) {
10165 /* add/subtract */
10166 rn = (insn >> 3) & 7;
396e467c 10167 tmp = load_reg(s, rn);
99c475ab
FB
10168 if (insn & (1 << 10)) {
10169 /* immediate */
7d1b0095 10170 tmp2 = tcg_temp_new_i32();
396e467c 10171 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
10172 } else {
10173 /* reg */
10174 rm = (insn >> 6) & 7;
396e467c 10175 tmp2 = load_reg(s, rm);
99c475ab 10176 }
9ee6e8bb
PB
10177 if (insn & (1 << 9)) {
10178 if (s->condexec_mask)
396e467c 10179 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10180 else
72485ec4 10181 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
10182 } else {
10183 if (s->condexec_mask)
396e467c 10184 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 10185 else
72485ec4 10186 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 10187 }
7d1b0095 10188 tcg_temp_free_i32(tmp2);
396e467c 10189 store_reg(s, rd, tmp);
99c475ab
FB
10190 } else {
10191 /* shift immediate */
10192 rm = (insn >> 3) & 7;
10193 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
10194 tmp = load_reg(s, rm);
10195 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
10196 if (!s->condexec_mask)
10197 gen_logic_CC(tmp);
10198 store_reg(s, rd, tmp);
99c475ab
FB
10199 }
10200 break;
10201 case 2: case 3:
10202 /* arithmetic large immediate */
10203 op = (insn >> 11) & 3;
10204 rd = (insn >> 8) & 0x7;
396e467c 10205 if (op == 0) { /* mov */
7d1b0095 10206 tmp = tcg_temp_new_i32();
396e467c 10207 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 10208 if (!s->condexec_mask)
396e467c
FN
10209 gen_logic_CC(tmp);
10210 store_reg(s, rd, tmp);
10211 } else {
10212 tmp = load_reg(s, rd);
7d1b0095 10213 tmp2 = tcg_temp_new_i32();
396e467c
FN
10214 tcg_gen_movi_i32(tmp2, insn & 0xff);
10215 switch (op) {
10216 case 1: /* cmp */
72485ec4 10217 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
10218 tcg_temp_free_i32(tmp);
10219 tcg_temp_free_i32(tmp2);
396e467c
FN
10220 break;
10221 case 2: /* add */
10222 if (s->condexec_mask)
10223 tcg_gen_add_i32(tmp, tmp, tmp2);
10224 else
72485ec4 10225 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 10226 tcg_temp_free_i32(tmp2);
396e467c
FN
10227 store_reg(s, rd, tmp);
10228 break;
10229 case 3: /* sub */
10230 if (s->condexec_mask)
10231 tcg_gen_sub_i32(tmp, tmp, tmp2);
10232 else
72485ec4 10233 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 10234 tcg_temp_free_i32(tmp2);
396e467c
FN
10235 store_reg(s, rd, tmp);
10236 break;
10237 }
99c475ab 10238 }
99c475ab
FB
10239 break;
10240 case 4:
10241 if (insn & (1 << 11)) {
10242 rd = (insn >> 8) & 7;
5899f386
FB
10243 /* load pc-relative. Bit 1 of PC is ignored. */
10244 val = s->pc + 2 + ((insn & 0xff) * 4);
10245 val &= ~(uint32_t)2;
7d1b0095 10246 addr = tcg_temp_new_i32();
b0109805 10247 tcg_gen_movi_i32(addr, val);
c40c8556 10248 tmp = tcg_temp_new_i32();
6ce2faf4 10249 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
7d1b0095 10250 tcg_temp_free_i32(addr);
b0109805 10251 store_reg(s, rd, tmp);
99c475ab
FB
10252 break;
10253 }
10254 if (insn & (1 << 10)) {
10255 /* data processing extended or blx */
10256 rd = (insn & 7) | ((insn >> 4) & 8);
10257 rm = (insn >> 3) & 0xf;
10258 op = (insn >> 8) & 3;
10259 switch (op) {
10260 case 0: /* add */
396e467c
FN
10261 tmp = load_reg(s, rd);
10262 tmp2 = load_reg(s, rm);
10263 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10264 tcg_temp_free_i32(tmp2);
396e467c 10265 store_reg(s, rd, tmp);
99c475ab
FB
10266 break;
10267 case 1: /* cmp */
396e467c
FN
10268 tmp = load_reg(s, rd);
10269 tmp2 = load_reg(s, rm);
72485ec4 10270 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
10271 tcg_temp_free_i32(tmp2);
10272 tcg_temp_free_i32(tmp);
99c475ab
FB
10273 break;
10274 case 2: /* mov/cpy */
396e467c
FN
10275 tmp = load_reg(s, rm);
10276 store_reg(s, rd, tmp);
99c475ab
FB
10277 break;
10278 case 3:/* branch [and link] exchange thumb register */
b0109805 10279 tmp = load_reg(s, rm);
99c475ab 10280 if (insn & (1 << 7)) {
be5e7a76 10281 ARCH(5);
99c475ab 10282 val = (uint32_t)s->pc | 1;
7d1b0095 10283 tmp2 = tcg_temp_new_i32();
b0109805
PB
10284 tcg_gen_movi_i32(tmp2, val);
10285 store_reg(s, 14, tmp2);
99c475ab 10286 }
be5e7a76 10287 /* already thumb, no need to check */
d9ba4830 10288 gen_bx(s, tmp);
99c475ab
FB
10289 break;
10290 }
10291 break;
10292 }
10293
10294 /* data processing register */
10295 rd = insn & 7;
10296 rm = (insn >> 3) & 7;
10297 op = (insn >> 6) & 0xf;
10298 if (op == 2 || op == 3 || op == 4 || op == 7) {
10299 /* the shift/rotate ops want the operands backwards */
10300 val = rm;
10301 rm = rd;
10302 rd = val;
10303 val = 1;
10304 } else {
10305 val = 0;
10306 }
10307
396e467c 10308 if (op == 9) { /* neg */
7d1b0095 10309 tmp = tcg_temp_new_i32();
396e467c
FN
10310 tcg_gen_movi_i32(tmp, 0);
10311 } else if (op != 0xf) { /* mvn doesn't read its first operand */
10312 tmp = load_reg(s, rd);
10313 } else {
39d5492a 10314 TCGV_UNUSED_I32(tmp);
396e467c 10315 }
99c475ab 10316
396e467c 10317 tmp2 = load_reg(s, rm);
5899f386 10318 switch (op) {
99c475ab 10319 case 0x0: /* and */
396e467c 10320 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 10321 if (!s->condexec_mask)
396e467c 10322 gen_logic_CC(tmp);
99c475ab
FB
10323 break;
10324 case 0x1: /* eor */
396e467c 10325 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 10326 if (!s->condexec_mask)
396e467c 10327 gen_logic_CC(tmp);
99c475ab
FB
10328 break;
10329 case 0x2: /* lsl */
9ee6e8bb 10330 if (s->condexec_mask) {
365af80e 10331 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 10332 } else {
9ef39277 10333 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10334 gen_logic_CC(tmp2);
9ee6e8bb 10335 }
99c475ab
FB
10336 break;
10337 case 0x3: /* lsr */
9ee6e8bb 10338 if (s->condexec_mask) {
365af80e 10339 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 10340 } else {
9ef39277 10341 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10342 gen_logic_CC(tmp2);
9ee6e8bb 10343 }
99c475ab
FB
10344 break;
10345 case 0x4: /* asr */
9ee6e8bb 10346 if (s->condexec_mask) {
365af80e 10347 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 10348 } else {
9ef39277 10349 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10350 gen_logic_CC(tmp2);
9ee6e8bb 10351 }
99c475ab
FB
10352 break;
10353 case 0x5: /* adc */
49b4c31e 10354 if (s->condexec_mask) {
396e467c 10355 gen_adc(tmp, tmp2);
49b4c31e
RH
10356 } else {
10357 gen_adc_CC(tmp, tmp, tmp2);
10358 }
99c475ab
FB
10359 break;
10360 case 0x6: /* sbc */
2de68a49 10361 if (s->condexec_mask) {
396e467c 10362 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
10363 } else {
10364 gen_sbc_CC(tmp, tmp, tmp2);
10365 }
99c475ab
FB
10366 break;
10367 case 0x7: /* ror */
9ee6e8bb 10368 if (s->condexec_mask) {
f669df27
AJ
10369 tcg_gen_andi_i32(tmp, tmp, 0x1f);
10370 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 10371 } else {
9ef39277 10372 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10373 gen_logic_CC(tmp2);
9ee6e8bb 10374 }
99c475ab
FB
10375 break;
10376 case 0x8: /* tst */
396e467c
FN
10377 tcg_gen_and_i32(tmp, tmp, tmp2);
10378 gen_logic_CC(tmp);
99c475ab 10379 rd = 16;
5899f386 10380 break;
99c475ab 10381 case 0x9: /* neg */
9ee6e8bb 10382 if (s->condexec_mask)
396e467c 10383 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 10384 else
72485ec4 10385 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
10386 break;
10387 case 0xa: /* cmp */
72485ec4 10388 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
10389 rd = 16;
10390 break;
10391 case 0xb: /* cmn */
72485ec4 10392 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
10393 rd = 16;
10394 break;
10395 case 0xc: /* orr */
396e467c 10396 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 10397 if (!s->condexec_mask)
396e467c 10398 gen_logic_CC(tmp);
99c475ab
FB
10399 break;
10400 case 0xd: /* mul */
7b2919a0 10401 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 10402 if (!s->condexec_mask)
396e467c 10403 gen_logic_CC(tmp);
99c475ab
FB
10404 break;
10405 case 0xe: /* bic */
f669df27 10406 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 10407 if (!s->condexec_mask)
396e467c 10408 gen_logic_CC(tmp);
99c475ab
FB
10409 break;
10410 case 0xf: /* mvn */
396e467c 10411 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 10412 if (!s->condexec_mask)
396e467c 10413 gen_logic_CC(tmp2);
99c475ab 10414 val = 1;
5899f386 10415 rm = rd;
99c475ab
FB
10416 break;
10417 }
10418 if (rd != 16) {
396e467c
FN
10419 if (val) {
10420 store_reg(s, rm, tmp2);
10421 if (op != 0xf)
7d1b0095 10422 tcg_temp_free_i32(tmp);
396e467c
FN
10423 } else {
10424 store_reg(s, rd, tmp);
7d1b0095 10425 tcg_temp_free_i32(tmp2);
396e467c
FN
10426 }
10427 } else {
7d1b0095
PM
10428 tcg_temp_free_i32(tmp);
10429 tcg_temp_free_i32(tmp2);
99c475ab
FB
10430 }
10431 break;
10432
10433 case 5:
10434 /* load/store register offset. */
10435 rd = insn & 7;
10436 rn = (insn >> 3) & 7;
10437 rm = (insn >> 6) & 7;
10438 op = (insn >> 9) & 7;
b0109805 10439 addr = load_reg(s, rn);
b26eefb6 10440 tmp = load_reg(s, rm);
b0109805 10441 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 10442 tcg_temp_free_i32(tmp);
99c475ab 10443
c40c8556 10444 if (op < 3) { /* store */
b0109805 10445 tmp = load_reg(s, rd);
c40c8556
PM
10446 } else {
10447 tmp = tcg_temp_new_i32();
10448 }
99c475ab
FB
10449
10450 switch (op) {
10451 case 0: /* str */
6ce2faf4 10452 gen_aa32_st32(tmp, addr, get_mem_index(s));
99c475ab
FB
10453 break;
10454 case 1: /* strh */
6ce2faf4 10455 gen_aa32_st16(tmp, addr, get_mem_index(s));
99c475ab
FB
10456 break;
10457 case 2: /* strb */
6ce2faf4 10458 gen_aa32_st8(tmp, addr, get_mem_index(s));
99c475ab
FB
10459 break;
10460 case 3: /* ldrsb */
6ce2faf4 10461 gen_aa32_ld8s(tmp, addr, get_mem_index(s));
99c475ab
FB
10462 break;
10463 case 4: /* ldr */
6ce2faf4 10464 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
99c475ab
FB
10465 break;
10466 case 5: /* ldrh */
6ce2faf4 10467 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
99c475ab
FB
10468 break;
10469 case 6: /* ldrb */
6ce2faf4 10470 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
99c475ab
FB
10471 break;
10472 case 7: /* ldrsh */
6ce2faf4 10473 gen_aa32_ld16s(tmp, addr, get_mem_index(s));
99c475ab
FB
10474 break;
10475 }
c40c8556 10476 if (op >= 3) { /* load */
b0109805 10477 store_reg(s, rd, tmp);
c40c8556
PM
10478 } else {
10479 tcg_temp_free_i32(tmp);
10480 }
7d1b0095 10481 tcg_temp_free_i32(addr);
99c475ab
FB
10482 break;
10483
10484 case 6:
10485 /* load/store word immediate offset */
10486 rd = insn & 7;
10487 rn = (insn >> 3) & 7;
b0109805 10488 addr = load_reg(s, rn);
99c475ab 10489 val = (insn >> 4) & 0x7c;
b0109805 10490 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10491
10492 if (insn & (1 << 11)) {
10493 /* load */
c40c8556 10494 tmp = tcg_temp_new_i32();
6ce2faf4 10495 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 10496 store_reg(s, rd, tmp);
99c475ab
FB
10497 } else {
10498 /* store */
b0109805 10499 tmp = load_reg(s, rd);
6ce2faf4 10500 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 10501 tcg_temp_free_i32(tmp);
99c475ab 10502 }
7d1b0095 10503 tcg_temp_free_i32(addr);
99c475ab
FB
10504 break;
10505
10506 case 7:
10507 /* load/store byte immediate offset */
10508 rd = insn & 7;
10509 rn = (insn >> 3) & 7;
b0109805 10510 addr = load_reg(s, rn);
99c475ab 10511 val = (insn >> 6) & 0x1f;
b0109805 10512 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10513
10514 if (insn & (1 << 11)) {
10515 /* load */
c40c8556 10516 tmp = tcg_temp_new_i32();
6ce2faf4 10517 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
b0109805 10518 store_reg(s, rd, tmp);
99c475ab
FB
10519 } else {
10520 /* store */
b0109805 10521 tmp = load_reg(s, rd);
6ce2faf4 10522 gen_aa32_st8(tmp, addr, get_mem_index(s));
c40c8556 10523 tcg_temp_free_i32(tmp);
99c475ab 10524 }
7d1b0095 10525 tcg_temp_free_i32(addr);
99c475ab
FB
10526 break;
10527
10528 case 8:
10529 /* load/store halfword immediate offset */
10530 rd = insn & 7;
10531 rn = (insn >> 3) & 7;
b0109805 10532 addr = load_reg(s, rn);
99c475ab 10533 val = (insn >> 5) & 0x3e;
b0109805 10534 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10535
10536 if (insn & (1 << 11)) {
10537 /* load */
c40c8556 10538 tmp = tcg_temp_new_i32();
6ce2faf4 10539 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
b0109805 10540 store_reg(s, rd, tmp);
99c475ab
FB
10541 } else {
10542 /* store */
b0109805 10543 tmp = load_reg(s, rd);
6ce2faf4 10544 gen_aa32_st16(tmp, addr, get_mem_index(s));
c40c8556 10545 tcg_temp_free_i32(tmp);
99c475ab 10546 }
7d1b0095 10547 tcg_temp_free_i32(addr);
99c475ab
FB
10548 break;
10549
10550 case 9:
10551 /* load/store from stack */
10552 rd = (insn >> 8) & 7;
b0109805 10553 addr = load_reg(s, 13);
99c475ab 10554 val = (insn & 0xff) * 4;
b0109805 10555 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10556
10557 if (insn & (1 << 11)) {
10558 /* load */
c40c8556 10559 tmp = tcg_temp_new_i32();
6ce2faf4 10560 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 10561 store_reg(s, rd, tmp);
99c475ab
FB
10562 } else {
10563 /* store */
b0109805 10564 tmp = load_reg(s, rd);
6ce2faf4 10565 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 10566 tcg_temp_free_i32(tmp);
99c475ab 10567 }
7d1b0095 10568 tcg_temp_free_i32(addr);
99c475ab
FB
10569 break;
10570
10571 case 10:
10572 /* add to high reg */
10573 rd = (insn >> 8) & 7;
5899f386
FB
10574 if (insn & (1 << 11)) {
10575 /* SP */
5e3f878a 10576 tmp = load_reg(s, 13);
5899f386
FB
10577 } else {
10578 /* PC. bit 1 is ignored. */
7d1b0095 10579 tmp = tcg_temp_new_i32();
5e3f878a 10580 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 10581 }
99c475ab 10582 val = (insn & 0xff) * 4;
5e3f878a
PB
10583 tcg_gen_addi_i32(tmp, tmp, val);
10584 store_reg(s, rd, tmp);
99c475ab
FB
10585 break;
10586
10587 case 11:
10588 /* misc */
10589 op = (insn >> 8) & 0xf;
10590 switch (op) {
10591 case 0:
10592 /* adjust stack pointer */
b26eefb6 10593 tmp = load_reg(s, 13);
99c475ab
FB
10594 val = (insn & 0x7f) * 4;
10595 if (insn & (1 << 7))
6a0d8a1d 10596 val = -(int32_t)val;
b26eefb6
PB
10597 tcg_gen_addi_i32(tmp, tmp, val);
10598 store_reg(s, 13, tmp);
99c475ab
FB
10599 break;
10600
9ee6e8bb
PB
10601 case 2: /* sign/zero extend. */
10602 ARCH(6);
10603 rd = insn & 7;
10604 rm = (insn >> 3) & 7;
b0109805 10605 tmp = load_reg(s, rm);
9ee6e8bb 10606 switch ((insn >> 6) & 3) {
b0109805
PB
10607 case 0: gen_sxth(tmp); break;
10608 case 1: gen_sxtb(tmp); break;
10609 case 2: gen_uxth(tmp); break;
10610 case 3: gen_uxtb(tmp); break;
9ee6e8bb 10611 }
b0109805 10612 store_reg(s, rd, tmp);
9ee6e8bb 10613 break;
99c475ab
FB
10614 case 4: case 5: case 0xc: case 0xd:
10615 /* push/pop */
b0109805 10616 addr = load_reg(s, 13);
5899f386
FB
10617 if (insn & (1 << 8))
10618 offset = 4;
99c475ab 10619 else
5899f386
FB
10620 offset = 0;
10621 for (i = 0; i < 8; i++) {
10622 if (insn & (1 << i))
10623 offset += 4;
10624 }
10625 if ((insn & (1 << 11)) == 0) {
b0109805 10626 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 10627 }
99c475ab
FB
10628 for (i = 0; i < 8; i++) {
10629 if (insn & (1 << i)) {
10630 if (insn & (1 << 11)) {
10631 /* pop */
c40c8556 10632 tmp = tcg_temp_new_i32();
6ce2faf4 10633 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 10634 store_reg(s, i, tmp);
99c475ab
FB
10635 } else {
10636 /* push */
b0109805 10637 tmp = load_reg(s, i);
6ce2faf4 10638 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 10639 tcg_temp_free_i32(tmp);
99c475ab 10640 }
5899f386 10641 /* advance to the next address. */
b0109805 10642 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
10643 }
10644 }
39d5492a 10645 TCGV_UNUSED_I32(tmp);
99c475ab
FB
10646 if (insn & (1 << 8)) {
10647 if (insn & (1 << 11)) {
10648 /* pop pc */
c40c8556 10649 tmp = tcg_temp_new_i32();
6ce2faf4 10650 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
99c475ab
FB
10651 /* don't set the pc until the rest of the instruction
10652 has completed */
10653 } else {
10654 /* push lr */
b0109805 10655 tmp = load_reg(s, 14);
6ce2faf4 10656 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 10657 tcg_temp_free_i32(tmp);
99c475ab 10658 }
b0109805 10659 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 10660 }
5899f386 10661 if ((insn & (1 << 11)) == 0) {
b0109805 10662 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 10663 }
99c475ab 10664 /* write back the new stack pointer */
b0109805 10665 store_reg(s, 13, addr);
99c475ab 10666 /* set the new PC value */
be5e7a76
DES
10667 if ((insn & 0x0900) == 0x0900) {
10668 store_reg_from_load(env, s, 15, tmp);
10669 }
99c475ab
FB
10670 break;
10671
9ee6e8bb
PB
10672 case 1: case 3: case 9: case 11: /* czb */
10673 rm = insn & 7;
d9ba4830 10674 tmp = load_reg(s, rm);
9ee6e8bb
PB
10675 s->condlabel = gen_new_label();
10676 s->condjmp = 1;
10677 if (insn & (1 << 11))
cb63669a 10678 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 10679 else
cb63669a 10680 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 10681 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10682 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
10683 val = (uint32_t)s->pc + 2;
10684 val += offset;
10685 gen_jmp(s, val);
10686 break;
10687
10688 case 15: /* IT, nop-hint. */
10689 if ((insn & 0xf) == 0) {
10690 gen_nop_hint(s, (insn >> 4) & 0xf);
10691 break;
10692 }
10693 /* If Then. */
10694 s->condexec_cond = (insn >> 4) & 0xe;
10695 s->condexec_mask = insn & 0x1f;
10696 /* No actual code generated for this insn, just setup state. */
10697 break;
10698
06c949e6 10699 case 0xe: /* bkpt */
d4a2dc67
PM
10700 {
10701 int imm8 = extract32(insn, 0, 8);
be5e7a76 10702 ARCH(5);
d4a2dc67 10703 gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true));
06c949e6 10704 break;
d4a2dc67 10705 }
06c949e6 10706
9ee6e8bb
PB
10707 case 0xa: /* rev */
10708 ARCH(6);
10709 rn = (insn >> 3) & 0x7;
10710 rd = insn & 0x7;
b0109805 10711 tmp = load_reg(s, rn);
9ee6e8bb 10712 switch ((insn >> 6) & 3) {
66896cb8 10713 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
10714 case 1: gen_rev16(tmp); break;
10715 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
10716 default: goto illegal_op;
10717 }
b0109805 10718 store_reg(s, rd, tmp);
9ee6e8bb
PB
10719 break;
10720
d9e028c1
PM
10721 case 6:
10722 switch ((insn >> 5) & 7) {
10723 case 2:
10724 /* setend */
10725 ARCH(6);
10962fd5
PM
10726 if (((insn >> 3) & 1) != s->bswap_code) {
10727 /* Dynamic endianness switching not implemented. */
e0c270d9 10728 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
d9e028c1
PM
10729 goto illegal_op;
10730 }
9ee6e8bb 10731 break;
d9e028c1
PM
10732 case 3:
10733 /* cps */
10734 ARCH(6);
10735 if (IS_USER(s)) {
10736 break;
8984bd2e 10737 }
d9e028c1
PM
10738 if (IS_M(env)) {
10739 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
10740 /* FAULTMASK */
10741 if (insn & 1) {
10742 addr = tcg_const_i32(19);
10743 gen_helper_v7m_msr(cpu_env, addr, tmp);
10744 tcg_temp_free_i32(addr);
10745 }
10746 /* PRIMASK */
10747 if (insn & 2) {
10748 addr = tcg_const_i32(16);
10749 gen_helper_v7m_msr(cpu_env, addr, tmp);
10750 tcg_temp_free_i32(addr);
10751 }
10752 tcg_temp_free_i32(tmp);
10753 gen_lookup_tb(s);
10754 } else {
10755 if (insn & (1 << 4)) {
10756 shift = CPSR_A | CPSR_I | CPSR_F;
10757 } else {
10758 shift = 0;
10759 }
10760 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 10761 }
d9e028c1
PM
10762 break;
10763 default:
10764 goto undef;
9ee6e8bb
PB
10765 }
10766 break;
10767
99c475ab
FB
10768 default:
10769 goto undef;
10770 }
10771 break;
10772
10773 case 12:
a7d3970d 10774 {
99c475ab 10775 /* load/store multiple */
39d5492a
PM
10776 TCGv_i32 loaded_var;
10777 TCGV_UNUSED_I32(loaded_var);
99c475ab 10778 rn = (insn >> 8) & 0x7;
b0109805 10779 addr = load_reg(s, rn);
99c475ab
FB
10780 for (i = 0; i < 8; i++) {
10781 if (insn & (1 << i)) {
99c475ab
FB
10782 if (insn & (1 << 11)) {
10783 /* load */
c40c8556 10784 tmp = tcg_temp_new_i32();
6ce2faf4 10785 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
a7d3970d
PM
10786 if (i == rn) {
10787 loaded_var = tmp;
10788 } else {
10789 store_reg(s, i, tmp);
10790 }
99c475ab
FB
10791 } else {
10792 /* store */
b0109805 10793 tmp = load_reg(s, i);
6ce2faf4 10794 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 10795 tcg_temp_free_i32(tmp);
99c475ab 10796 }
5899f386 10797 /* advance to the next address */
b0109805 10798 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
10799 }
10800 }
b0109805 10801 if ((insn & (1 << rn)) == 0) {
a7d3970d 10802 /* base reg not in list: base register writeback */
b0109805
PB
10803 store_reg(s, rn, addr);
10804 } else {
a7d3970d
PM
10805 /* base reg in list: if load, complete it now */
10806 if (insn & (1 << 11)) {
10807 store_reg(s, rn, loaded_var);
10808 }
7d1b0095 10809 tcg_temp_free_i32(addr);
b0109805 10810 }
99c475ab 10811 break;
a7d3970d 10812 }
99c475ab
FB
10813 case 13:
10814 /* conditional branch or swi */
10815 cond = (insn >> 8) & 0xf;
10816 if (cond == 0xe)
10817 goto undef;
10818
10819 if (cond == 0xf) {
10820 /* swi */
eaed129d 10821 gen_set_pc_im(s, s->pc);
d4a2dc67 10822 s->svc_imm = extract32(insn, 0, 8);
9ee6e8bb 10823 s->is_jmp = DISAS_SWI;
99c475ab
FB
10824 break;
10825 }
10826 /* generate a conditional jump to next instruction */
e50e6a20 10827 s->condlabel = gen_new_label();
39fb730a 10828 arm_gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 10829 s->condjmp = 1;
99c475ab
FB
10830
10831 /* jump to the offset */
5899f386 10832 val = (uint32_t)s->pc + 2;
99c475ab 10833 offset = ((int32_t)insn << 24) >> 24;
5899f386 10834 val += offset << 1;
8aaca4c0 10835 gen_jmp(s, val);
99c475ab
FB
10836 break;
10837
10838 case 14:
358bf29e 10839 if (insn & (1 << 11)) {
9ee6e8bb
PB
10840 if (disas_thumb2_insn(env, s, insn))
10841 goto undef32;
358bf29e
PB
10842 break;
10843 }
9ee6e8bb 10844 /* unconditional branch */
99c475ab
FB
10845 val = (uint32_t)s->pc;
10846 offset = ((int32_t)insn << 21) >> 21;
10847 val += (offset << 1) + 2;
8aaca4c0 10848 gen_jmp(s, val);
99c475ab
FB
10849 break;
10850
10851 case 15:
9ee6e8bb 10852 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 10853 goto undef32;
9ee6e8bb 10854 break;
99c475ab
FB
10855 }
10856 return;
9ee6e8bb 10857undef32:
d4a2dc67 10858 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized());
9ee6e8bb
PB
10859 return;
10860illegal_op:
99c475ab 10861undef:
d4a2dc67 10862 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized());
99c475ab
FB
10863}
10864
2c0262af
FB
10865/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
10866 basic block 'tb'. If search_pc is TRUE, also generate PC
10867 information for each intermediate instruction. */
5639c3f2 10868static inline void gen_intermediate_code_internal(ARMCPU *cpu,
2cfc5f17 10869 TranslationBlock *tb,
5639c3f2 10870 bool search_pc)
2c0262af 10871{
ed2803da 10872 CPUState *cs = CPU(cpu);
5639c3f2 10873 CPUARMState *env = &cpu->env;
2c0262af 10874 DisasContext dc1, *dc = &dc1;
a1d1bb31 10875 CPUBreakpoint *bp;
2c0262af
FB
10876 uint16_t *gen_opc_end;
10877 int j, lj;
0fa85d43 10878 target_ulong pc_start;
0a2461fa 10879 target_ulong next_page_start;
2e70f6ef
PB
10880 int num_insns;
10881 int max_insns;
3b46e624 10882
2c0262af 10883 /* generate intermediate code */
40f860cd
PM
10884
10885 /* The A64 decoder has its own top level loop, because it doesn't need
10886 * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
10887 */
10888 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
10889 gen_intermediate_code_internal_a64(cpu, tb, search_pc);
10890 return;
10891 }
10892
0fa85d43 10893 pc_start = tb->pc;
3b46e624 10894
2c0262af
FB
10895 dc->tb = tb;
10896
92414b31 10897 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
10898
10899 dc->is_jmp = DISAS_NEXT;
10900 dc->pc = pc_start;
ed2803da 10901 dc->singlestep_enabled = cs->singlestep_enabled;
e50e6a20 10902 dc->condjmp = 0;
3926cc84 10903
40f860cd
PM
10904 dc->aarch64 = 0;
10905 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
10906 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
10907 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
10908 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
3926cc84 10909#if !defined(CONFIG_USER_ONLY)
40f860cd 10910 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
3926cc84 10911#endif
2c7ffc41 10912 dc->cpacr_fpen = ARM_TBFLAG_CPACR_FPEN(tb->flags);
40f860cd
PM
10913 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
10914 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
10915 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
60322b39
PM
10916 dc->cp_regs = cpu->cp_regs;
10917 dc->current_pl = arm_current_pl(env);
a984e42c 10918 dc->features = env->features;
40f860cd 10919
a7812ae4
PB
10920 cpu_F0s = tcg_temp_new_i32();
10921 cpu_F1s = tcg_temp_new_i32();
10922 cpu_F0d = tcg_temp_new_i64();
10923 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
10924 cpu_V0 = cpu_F0d;
10925 cpu_V1 = cpu_F1d;
e677137d 10926 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 10927 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 10928 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 10929 lj = -1;
2e70f6ef
PB
10930 num_insns = 0;
10931 max_insns = tb->cflags & CF_COUNT_MASK;
10932 if (max_insns == 0)
10933 max_insns = CF_COUNT_MASK;
10934
806f352d 10935 gen_tb_start();
e12ce78d 10936
3849902c
PM
10937 tcg_clear_temp_count();
10938
e12ce78d
PM
10939 /* A note on handling of the condexec (IT) bits:
10940 *
10941 * We want to avoid the overhead of having to write the updated condexec
0ecb72a5 10942 * bits back to the CPUARMState for every instruction in an IT block. So:
e12ce78d 10943 * (1) if the condexec bits are not already zero then we write
0ecb72a5 10944 * zero back into the CPUARMState now. This avoids complications trying
e12ce78d
PM
10945 * to do it at the end of the block. (For example if we don't do this
10946 * it's hard to identify whether we can safely skip writing condexec
10947 * at the end of the TB, which we definitely want to do for the case
10948 * where a TB doesn't do anything with the IT state at all.)
10949 * (2) if we are going to leave the TB then we call gen_set_condexec()
0ecb72a5 10950 * which will write the correct value into CPUARMState if zero is wrong.
e12ce78d
PM
10951 * This is done both for leaving the TB at the end, and for leaving
10952 * it because of an exception we know will happen, which is done in
10953 * gen_exception_insn(). The latter is necessary because we need to
10954 * leave the TB with the PC/IT state just prior to execution of the
10955 * instruction which caused the exception.
10956 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
0ecb72a5 10957 * then the CPUARMState will be wrong and we need to reset it.
e12ce78d
PM
10958 * This is handled in the same way as restoration of the
10959 * PC in these situations: we will be called again with search_pc=1
10960 * and generate a mapping of the condexec bits for each PC in
e87b7cb0
SW
10961 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
10962 * this to restore the condexec bits.
e12ce78d
PM
10963 *
10964 * Note that there are no instructions which can read the condexec
10965 * bits, and none which can write non-static values to them, so
0ecb72a5 10966 * we don't need to care about whether CPUARMState is correct in the
e12ce78d
PM
10967 * middle of a TB.
10968 */
10969
9ee6e8bb
PB
10970 /* Reset the conditional execution bits immediately. This avoids
10971 complications trying to do it at the end of the block. */
98eac7ca 10972 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 10973 {
39d5492a 10974 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e 10975 tcg_gen_movi_i32(tmp, 0);
d9ba4830 10976 store_cpu_field(tmp, condexec_bits);
8f01245e 10977 }
2c0262af 10978 do {
fbb4a2e3
PB
10979#ifdef CONFIG_USER_ONLY
10980 /* Intercept jump to the magic kernel page. */
40f860cd 10981 if (dc->pc >= 0xffff0000) {
fbb4a2e3
PB
10982 /* We always get here via a jump, so know we are not in a
10983 conditional execution block. */
d4a2dc67 10984 gen_exception_internal(EXCP_KERNEL_TRAP);
fbb4a2e3
PB
10985 dc->is_jmp = DISAS_UPDATE;
10986 break;
10987 }
10988#else
9ee6e8bb
PB
10989 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
10990 /* We always get here via a jump, so know we are not in a
10991 conditional execution block. */
d4a2dc67 10992 gen_exception_internal(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
10993 dc->is_jmp = DISAS_UPDATE;
10994 break;
9ee6e8bb
PB
10995 }
10996#endif
10997
f0c3c505
AF
10998 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
10999 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
a1d1bb31 11000 if (bp->pc == dc->pc) {
d4a2dc67 11001 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
9ee6e8bb
PB
11002 /* Advance PC so that clearing the breakpoint will
11003 invalidate this TB. */
11004 dc->pc += 2;
11005 goto done_generating;
1fddef4b
FB
11006 }
11007 }
11008 }
2c0262af 11009 if (search_pc) {
92414b31 11010 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2c0262af
FB
11011 if (lj < j) {
11012 lj++;
11013 while (lj < j)
ab1103de 11014 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2c0262af 11015 }
25983cad 11016 tcg_ctx.gen_opc_pc[lj] = dc->pc;
e12ce78d 11017 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
ab1103de 11018 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 11019 tcg_ctx.gen_opc_icount[lj] = num_insns;
2c0262af 11020 }
e50e6a20 11021
2e70f6ef
PB
11022 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
11023 gen_io_start();
11024
fdefe51c 11025 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
5642463a
PM
11026 tcg_gen_debug_insn_start(dc->pc);
11027 }
11028
40f860cd 11029 if (dc->thumb) {
9ee6e8bb
PB
11030 disas_thumb_insn(env, dc);
11031 if (dc->condexec_mask) {
11032 dc->condexec_cond = (dc->condexec_cond & 0xe)
11033 | ((dc->condexec_mask >> 4) & 1);
11034 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
11035 if (dc->condexec_mask == 0) {
11036 dc->condexec_cond = 0;
11037 }
11038 }
11039 } else {
11040 disas_arm_insn(env, dc);
11041 }
e50e6a20
FB
11042
11043 if (dc->condjmp && !dc->is_jmp) {
11044 gen_set_label(dc->condlabel);
11045 dc->condjmp = 0;
11046 }
3849902c
PM
11047
11048 if (tcg_check_temp_count()) {
0a2461fa
AG
11049 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
11050 dc->pc);
3849902c
PM
11051 }
11052
aaf2d97d 11053 /* Translation stops when a conditional branch is encountered.
e50e6a20 11054 * Otherwise the subsequent code could get translated several times.
b5ff1b31 11055 * Also stop translation when a page boundary is reached. This
bf20dc07 11056 * ensures prefetch aborts occur at the right place. */
2e70f6ef 11057 num_insns ++;
efd7f486 11058 } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
ed2803da 11059 !cs->singlestep_enabled &&
1b530a6d 11060 !singlestep &&
2e70f6ef
PB
11061 dc->pc < next_page_start &&
11062 num_insns < max_insns);
11063
11064 if (tb->cflags & CF_LAST_IO) {
11065 if (dc->condjmp) {
11066 /* FIXME: This can theoretically happen with self-modifying
11067 code. */
a47dddd7 11068 cpu_abort(cs, "IO on conditional branch instruction");
2e70f6ef
PB
11069 }
11070 gen_io_end();
11071 }
9ee6e8bb 11072
b5ff1b31 11073 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
11074 instruction was a conditional branch or trap, and the PC has
11075 already been written. */
ed2803da 11076 if (unlikely(cs->singlestep_enabled)) {
8aaca4c0 11077 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 11078 if (dc->condjmp) {
9ee6e8bb
PB
11079 gen_set_condexec(dc);
11080 if (dc->is_jmp == DISAS_SWI) {
d4a2dc67 11081 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
9ee6e8bb 11082 } else {
d4a2dc67 11083 gen_exception_internal(EXCP_DEBUG);
9ee6e8bb 11084 }
e50e6a20
FB
11085 gen_set_label(dc->condlabel);
11086 }
11087 if (dc->condjmp || !dc->is_jmp) {
eaed129d 11088 gen_set_pc_im(dc, dc->pc);
e50e6a20 11089 dc->condjmp = 0;
8aaca4c0 11090 }
9ee6e8bb
PB
11091 gen_set_condexec(dc);
11092 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d4a2dc67 11093 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
9ee6e8bb
PB
11094 } else {
11095 /* FIXME: Single stepping a WFI insn will not halt
11096 the CPU. */
d4a2dc67 11097 gen_exception_internal(EXCP_DEBUG);
9ee6e8bb 11098 }
8aaca4c0 11099 } else {
9ee6e8bb
PB
11100 /* While branches must always occur at the end of an IT block,
11101 there are a few other things that can cause us to terminate
65626741 11102 the TB in the middle of an IT block:
9ee6e8bb
PB
11103 - Exception generating instructions (bkpt, swi, undefined).
11104 - Page boundaries.
11105 - Hardware watchpoints.
11106 Hardware breakpoints have already been handled and skip this code.
11107 */
11108 gen_set_condexec(dc);
8aaca4c0 11109 switch(dc->is_jmp) {
8aaca4c0 11110 case DISAS_NEXT:
6e256c93 11111 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
11112 break;
11113 default:
11114 case DISAS_JUMP:
11115 case DISAS_UPDATE:
11116 /* indicate that the hash table must be used to find the next TB */
57fec1fe 11117 tcg_gen_exit_tb(0);
8aaca4c0
FB
11118 break;
11119 case DISAS_TB_JUMP:
11120 /* nothing more to generate */
11121 break;
9ee6e8bb 11122 case DISAS_WFI:
1ce94f81 11123 gen_helper_wfi(cpu_env);
9ee6e8bb 11124 break;
72c1d3af
PM
11125 case DISAS_WFE:
11126 gen_helper_wfe(cpu_env);
11127 break;
9ee6e8bb 11128 case DISAS_SWI:
d4a2dc67 11129 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
9ee6e8bb 11130 break;
8aaca4c0 11131 }
e50e6a20
FB
11132 if (dc->condjmp) {
11133 gen_set_label(dc->condlabel);
9ee6e8bb 11134 gen_set_condexec(dc);
6e256c93 11135 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
11136 dc->condjmp = 0;
11137 }
2c0262af 11138 }
2e70f6ef 11139
9ee6e8bb 11140done_generating:
806f352d 11141 gen_tb_end(tb, num_insns);
efd7f486 11142 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
2c0262af
FB
11143
11144#ifdef DEBUG_DISAS
8fec2b8c 11145 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
11146 qemu_log("----------------\n");
11147 qemu_log("IN: %s\n", lookup_symbol(pc_start));
f4359b9f 11148 log_target_disas(env, pc_start, dc->pc - pc_start,
d8fd2954 11149 dc->thumb | (dc->bswap_code << 1));
93fcfe39 11150 qemu_log("\n");
2c0262af
FB
11151 }
11152#endif
b5ff1b31 11153 if (search_pc) {
92414b31 11154 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
b5ff1b31
FB
11155 lj++;
11156 while (lj <= j)
ab1103de 11157 tcg_ctx.gen_opc_instr_start[lj++] = 0;
b5ff1b31 11158 } else {
2c0262af 11159 tb->size = dc->pc - pc_start;
2e70f6ef 11160 tb->icount = num_insns;
b5ff1b31 11161 }
2c0262af
FB
11162}
11163
0ecb72a5 11164void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
2c0262af 11165{
5639c3f2 11166 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, false);
2c0262af
FB
11167}
11168
0ecb72a5 11169void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
2c0262af 11170{
5639c3f2 11171 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, true);
2c0262af
FB
11172}
11173
b5ff1b31 11174static const char *cpu_mode_names[16] = {
28c9457d
EI
11175 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
11176 "???", "???", "hyp", "und", "???", "???", "???", "sys"
b5ff1b31 11177};
9ee6e8bb 11178
878096ee
AF
11179void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
11180 int flags)
2c0262af 11181{
878096ee
AF
11182 ARMCPU *cpu = ARM_CPU(cs);
11183 CPUARMState *env = &cpu->env;
2c0262af 11184 int i;
b5ff1b31 11185 uint32_t psr;
2c0262af 11186
17731115
PM
11187 if (is_a64(env)) {
11188 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
11189 return;
11190 }
11191
2c0262af 11192 for(i=0;i<16;i++) {
7fe48483 11193 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 11194 if ((i % 4) == 3)
7fe48483 11195 cpu_fprintf(f, "\n");
2c0262af 11196 else
7fe48483 11197 cpu_fprintf(f, " ");
2c0262af 11198 }
b5ff1b31 11199 psr = cpsr_read(env);
687fa640
TS
11200 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
11201 psr,
b5ff1b31
FB
11202 psr & (1 << 31) ? 'N' : '-',
11203 psr & (1 << 30) ? 'Z' : '-',
11204 psr & (1 << 29) ? 'C' : '-',
11205 psr & (1 << 28) ? 'V' : '-',
5fafdf24 11206 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 11207 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 11208
f2617cfc
PM
11209 if (flags & CPU_DUMP_FPU) {
11210 int numvfpregs = 0;
11211 if (arm_feature(env, ARM_FEATURE_VFP)) {
11212 numvfpregs += 16;
11213 }
11214 if (arm_feature(env, ARM_FEATURE_VFP3)) {
11215 numvfpregs += 16;
11216 }
11217 for (i = 0; i < numvfpregs; i++) {
11218 uint64_t v = float64_val(env->vfp.regs[i]);
11219 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
11220 i * 2, (uint32_t)v,
11221 i * 2 + 1, (uint32_t)(v >> 32),
11222 i, v);
11223 }
11224 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 11225 }
2c0262af 11226}
a6b025d3 11227
0ecb72a5 11228void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
d2856f1a 11229{
3926cc84
AG
11230 if (is_a64(env)) {
11231 env->pc = tcg_ctx.gen_opc_pc[pc_pos];
40f860cd 11232 env->condexec_bits = 0;
3926cc84
AG
11233 } else {
11234 env->regs[15] = tcg_ctx.gen_opc_pc[pc_pos];
40f860cd 11235 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
3926cc84 11236 }
d2856f1a 11237}