]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/translate.c
target-arm: Rearrange aa32 load and store functions
[mirror_qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 20 */
74c21bd0 21#include "qemu/osdep.h"
2c0262af
FB
22
23#include "cpu.h"
ccd38087 24#include "internals.h"
76cad711 25#include "disas/disas.h"
63c91552 26#include "exec/exec-all.h"
57fec1fe 27#include "tcg-op.h"
1de7afc9 28#include "qemu/log.h"
534df156 29#include "qemu/bitops.h"
1d854765 30#include "arm_ldst.h"
19a6e31c 31#include "exec/semihost.h"
1497c961 32
2ef6175a
RH
33#include "exec/helper-proto.h"
34#include "exec/helper-gen.h"
2c0262af 35
a7e30d84 36#include "trace-tcg.h"
508127e2 37#include "exec/log.h"
a7e30d84
LV
38
39
2b51668f
PM
40#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
41#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
be5e7a76 42/* currently all emulated v5 cores are also v5TE, so don't bother */
2b51668f 43#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
9ee6e8bb 44#define ENABLE_ARCH_5J 0
2b51668f
PM
45#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
46#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
47#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
48#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
49#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
b5ff1b31 50
86753403 51#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 52
f570c61e 53#include "translate.h"
e12ce78d 54
b5ff1b31
FB
55#if defined(CONFIG_USER_ONLY)
56#define IS_USER(s) 1
57#else
58#define IS_USER(s) (s->user)
59#endif
60
1bcea73e 61TCGv_env cpu_env;
ad69471c 62/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 63static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 64static TCGv_i32 cpu_R[16];
78bcaa3e
RH
65TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
66TCGv_i64 cpu_exclusive_addr;
67TCGv_i64 cpu_exclusive_val;
426f5abc 68#ifdef CONFIG_USER_ONLY
78bcaa3e
RH
69TCGv_i64 cpu_exclusive_test;
70TCGv_i32 cpu_exclusive_info;
426f5abc 71#endif
ad69471c 72
b26eefb6 73/* FIXME: These should be removed. */
39d5492a 74static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 75static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 76
022c62cb 77#include "exec/gen-icount.h"
2e70f6ef 78
155c3eac
FN
79static const char *regnames[] =
80 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
81 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
82
b26eefb6
PB
83/* initialize TCG globals. */
84void arm_translate_init(void)
85{
155c3eac
FN
86 int i;
87
a7812ae4 88 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
7c255043 89 tcg_ctx.tcg_env = cpu_env;
a7812ae4 90
155c3eac 91 for (i = 0; i < 16; i++) {
e1ccc054 92 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
0ecb72a5 93 offsetof(CPUARMState, regs[i]),
155c3eac
FN
94 regnames[i]);
95 }
e1ccc054
RH
96 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
97 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
98 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
99 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
66c374de 100
e1ccc054 101 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 102 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
e1ccc054 103 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 104 offsetof(CPUARMState, exclusive_val), "exclusive_val");
426f5abc 105#ifdef CONFIG_USER_ONLY
e1ccc054 106 cpu_exclusive_test = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 107 offsetof(CPUARMState, exclusive_test), "exclusive_test");
e1ccc054 108 cpu_exclusive_info = tcg_global_mem_new_i32(cpu_env,
0ecb72a5 109 offsetof(CPUARMState, exclusive_info), "exclusive_info");
426f5abc 110#endif
155c3eac 111
14ade10f 112 a64_translate_init();
b26eefb6
PB
113}
114
579d21cc
PM
115static inline ARMMMUIdx get_a32_user_mem_index(DisasContext *s)
116{
117 /* Return the mmu_idx to use for A32/T32 "unprivileged load/store"
118 * insns:
119 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
120 * otherwise, access as if at PL0.
121 */
122 switch (s->mmu_idx) {
123 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
124 case ARMMMUIdx_S12NSE0:
125 case ARMMMUIdx_S12NSE1:
126 return ARMMMUIdx_S12NSE0;
127 case ARMMMUIdx_S1E3:
128 case ARMMMUIdx_S1SE0:
129 case ARMMMUIdx_S1SE1:
130 return ARMMMUIdx_S1SE0;
131 case ARMMMUIdx_S2NS:
132 default:
133 g_assert_not_reached();
134 }
135}
136
39d5492a 137static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 138{
39d5492a 139 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
140 tcg_gen_ld_i32(tmp, cpu_env, offset);
141 return tmp;
142}
143
0ecb72a5 144#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 145
39d5492a 146static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
147{
148 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 149 tcg_temp_free_i32(var);
d9ba4830
PB
150}
151
152#define store_cpu_field(var, name) \
0ecb72a5 153 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 154
b26eefb6 155/* Set a variable to the value of a CPU register. */
39d5492a 156static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
157{
158 if (reg == 15) {
159 uint32_t addr;
b90372ad 160 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
161 if (s->thumb)
162 addr = (long)s->pc + 2;
163 else
164 addr = (long)s->pc + 4;
165 tcg_gen_movi_i32(var, addr);
166 } else {
155c3eac 167 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
168 }
169}
170
171/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 172static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 173{
39d5492a 174 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
175 load_reg_var(s, tmp, reg);
176 return tmp;
177}
178
179/* Set a CPU register. The source must be a temporary and will be
180 marked as dead. */
39d5492a 181static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
182{
183 if (reg == 15) {
9b6a3ea7
PM
184 /* In Thumb mode, we must ignore bit 0.
185 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
186 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
187 * We choose to ignore [1:0] in ARM mode for all architecture versions.
188 */
189 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
b26eefb6
PB
190 s->is_jmp = DISAS_JUMP;
191 }
155c3eac 192 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 193 tcg_temp_free_i32(var);
b26eefb6
PB
194}
195
b26eefb6 196/* Value extensions. */
86831435
PB
197#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
198#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
199#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
200#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
201
1497c961
PB
202#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
203#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 204
b26eefb6 205
39d5492a 206static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 207{
39d5492a 208 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 209 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
210 tcg_temp_free_i32(tmp_mask);
211}
d9ba4830
PB
212/* Set NZCV flags from the high 4 bits of var. */
213#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
214
d4a2dc67 215static void gen_exception_internal(int excp)
d9ba4830 216{
d4a2dc67
PM
217 TCGv_i32 tcg_excp = tcg_const_i32(excp);
218
219 assert(excp_is_internal(excp));
220 gen_helper_exception_internal(cpu_env, tcg_excp);
221 tcg_temp_free_i32(tcg_excp);
222}
223
73710361 224static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
d4a2dc67
PM
225{
226 TCGv_i32 tcg_excp = tcg_const_i32(excp);
227 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
73710361 228 TCGv_i32 tcg_el = tcg_const_i32(target_el);
d4a2dc67 229
73710361
GB
230 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
231 tcg_syn, tcg_el);
232
233 tcg_temp_free_i32(tcg_el);
d4a2dc67
PM
234 tcg_temp_free_i32(tcg_syn);
235 tcg_temp_free_i32(tcg_excp);
d9ba4830
PB
236}
237
50225ad0
PM
238static void gen_ss_advance(DisasContext *s)
239{
240 /* If the singlestep state is Active-not-pending, advance to
241 * Active-pending.
242 */
243 if (s->ss_active) {
244 s->pstate_ss = 0;
245 gen_helper_clear_pstate_ss(cpu_env);
246 }
247}
248
249static void gen_step_complete_exception(DisasContext *s)
250{
251 /* We just completed step of an insn. Move from Active-not-pending
252 * to Active-pending, and then also take the swstep exception.
253 * This corresponds to making the (IMPDEF) choice to prioritize
254 * swstep exceptions over asynchronous exceptions taken to an exception
255 * level where debug is disabled. This choice has the advantage that
256 * we do not need to maintain internal state corresponding to the
257 * ISV/EX syndrome bits between completion of the step and generation
258 * of the exception, and our syndrome information is always correct.
259 */
260 gen_ss_advance(s);
73710361
GB
261 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
262 default_exception_el(s));
50225ad0
PM
263 s->is_jmp = DISAS_EXC;
264}
265
39d5492a 266static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 267{
39d5492a
PM
268 TCGv_i32 tmp1 = tcg_temp_new_i32();
269 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
270 tcg_gen_ext16s_i32(tmp1, a);
271 tcg_gen_ext16s_i32(tmp2, b);
3670669c 272 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 273 tcg_temp_free_i32(tmp2);
3670669c
PB
274 tcg_gen_sari_i32(a, a, 16);
275 tcg_gen_sari_i32(b, b, 16);
276 tcg_gen_mul_i32(b, b, a);
277 tcg_gen_mov_i32(a, tmp1);
7d1b0095 278 tcg_temp_free_i32(tmp1);
3670669c
PB
279}
280
281/* Byteswap each halfword. */
39d5492a 282static void gen_rev16(TCGv_i32 var)
3670669c 283{
39d5492a 284 TCGv_i32 tmp = tcg_temp_new_i32();
3670669c
PB
285 tcg_gen_shri_i32(tmp, var, 8);
286 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
287 tcg_gen_shli_i32(var, var, 8);
288 tcg_gen_andi_i32(var, var, 0xff00ff00);
289 tcg_gen_or_i32(var, var, tmp);
7d1b0095 290 tcg_temp_free_i32(tmp);
3670669c
PB
291}
292
293/* Byteswap low halfword and sign extend. */
39d5492a 294static void gen_revsh(TCGv_i32 var)
3670669c 295{
1a855029
AJ
296 tcg_gen_ext16u_i32(var, var);
297 tcg_gen_bswap16_i32(var, var);
298 tcg_gen_ext16s_i32(var, var);
3670669c
PB
299}
300
301/* Unsigned bitfield extract. */
39d5492a 302static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
3670669c
PB
303{
304 if (shift)
305 tcg_gen_shri_i32(var, var, shift);
306 tcg_gen_andi_i32(var, var, mask);
307}
308
309/* Signed bitfield extract. */
39d5492a 310static void gen_sbfx(TCGv_i32 var, int shift, int width)
3670669c
PB
311{
312 uint32_t signbit;
313
314 if (shift)
315 tcg_gen_sari_i32(var, var, shift);
316 if (shift + width < 32) {
317 signbit = 1u << (width - 1);
318 tcg_gen_andi_i32(var, var, (1u << width) - 1);
319 tcg_gen_xori_i32(var, var, signbit);
320 tcg_gen_subi_i32(var, var, signbit);
321 }
322}
323
838fa72d 324/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 325static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 326{
838fa72d
AJ
327 TCGv_i64 tmp64 = tcg_temp_new_i64();
328
329 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 330 tcg_temp_free_i32(b);
838fa72d
AJ
331 tcg_gen_shli_i64(tmp64, tmp64, 32);
332 tcg_gen_add_i64(a, tmp64, a);
333
334 tcg_temp_free_i64(tmp64);
335 return a;
336}
337
338/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 339static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
340{
341 TCGv_i64 tmp64 = tcg_temp_new_i64();
342
343 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 344 tcg_temp_free_i32(b);
838fa72d
AJ
345 tcg_gen_shli_i64(tmp64, tmp64, 32);
346 tcg_gen_sub_i64(a, tmp64, a);
347
348 tcg_temp_free_i64(tmp64);
349 return a;
3670669c
PB
350}
351
5e3f878a 352/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 353static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 354{
39d5492a
PM
355 TCGv_i32 lo = tcg_temp_new_i32();
356 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 357 TCGv_i64 ret;
5e3f878a 358
831d7fe8 359 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 360 tcg_temp_free_i32(a);
7d1b0095 361 tcg_temp_free_i32(b);
831d7fe8
RH
362
363 ret = tcg_temp_new_i64();
364 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
365 tcg_temp_free_i32(lo);
366 tcg_temp_free_i32(hi);
831d7fe8
RH
367
368 return ret;
5e3f878a
PB
369}
370
39d5492a 371static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 372{
39d5492a
PM
373 TCGv_i32 lo = tcg_temp_new_i32();
374 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 375 TCGv_i64 ret;
5e3f878a 376
831d7fe8 377 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 378 tcg_temp_free_i32(a);
7d1b0095 379 tcg_temp_free_i32(b);
831d7fe8
RH
380
381 ret = tcg_temp_new_i64();
382 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
383 tcg_temp_free_i32(lo);
384 tcg_temp_free_i32(hi);
831d7fe8
RH
385
386 return ret;
5e3f878a
PB
387}
388
8f01245e 389/* Swap low and high halfwords. */
39d5492a 390static void gen_swap_half(TCGv_i32 var)
8f01245e 391{
39d5492a 392 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
393 tcg_gen_shri_i32(tmp, var, 16);
394 tcg_gen_shli_i32(var, var, 16);
395 tcg_gen_or_i32(var, var, tmp);
7d1b0095 396 tcg_temp_free_i32(tmp);
8f01245e
PB
397}
398
b26eefb6
PB
399/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
400 tmp = (t0 ^ t1) & 0x8000;
401 t0 &= ~0x8000;
402 t1 &= ~0x8000;
403 t0 = (t0 + t1) ^ tmp;
404 */
405
39d5492a 406static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 407{
39d5492a 408 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
409 tcg_gen_xor_i32(tmp, t0, t1);
410 tcg_gen_andi_i32(tmp, tmp, 0x8000);
411 tcg_gen_andi_i32(t0, t0, ~0x8000);
412 tcg_gen_andi_i32(t1, t1, ~0x8000);
413 tcg_gen_add_i32(t0, t0, t1);
414 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
415 tcg_temp_free_i32(tmp);
416 tcg_temp_free_i32(t1);
b26eefb6
PB
417}
418
419/* Set CF to the top bit of var. */
39d5492a 420static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 421{
66c374de 422 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
423}
424
425/* Set N and Z flags from var. */
39d5492a 426static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 427{
66c374de
AJ
428 tcg_gen_mov_i32(cpu_NF, var);
429 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
430}
431
432/* T0 += T1 + CF. */
39d5492a 433static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 434{
396e467c 435 tcg_gen_add_i32(t0, t0, t1);
66c374de 436 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
437}
438
e9bb4aa9 439/* dest = T0 + T1 + CF. */
39d5492a 440static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 441{
e9bb4aa9 442 tcg_gen_add_i32(dest, t0, t1);
66c374de 443 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
444}
445
3670669c 446/* dest = T0 - T1 + CF - 1. */
39d5492a 447static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 448{
3670669c 449 tcg_gen_sub_i32(dest, t0, t1);
66c374de 450 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 451 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
452}
453
72485ec4 454/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 455static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 456{
39d5492a 457 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
458 tcg_gen_movi_i32(tmp, 0);
459 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 460 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 461 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
462 tcg_gen_xor_i32(tmp, t0, t1);
463 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
464 tcg_temp_free_i32(tmp);
465 tcg_gen_mov_i32(dest, cpu_NF);
466}
467
49b4c31e 468/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 469static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 470{
39d5492a 471 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
472 if (TCG_TARGET_HAS_add2_i32) {
473 tcg_gen_movi_i32(tmp, 0);
474 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 475 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
476 } else {
477 TCGv_i64 q0 = tcg_temp_new_i64();
478 TCGv_i64 q1 = tcg_temp_new_i64();
479 tcg_gen_extu_i32_i64(q0, t0);
480 tcg_gen_extu_i32_i64(q1, t1);
481 tcg_gen_add_i64(q0, q0, q1);
482 tcg_gen_extu_i32_i64(q1, cpu_CF);
483 tcg_gen_add_i64(q0, q0, q1);
484 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
485 tcg_temp_free_i64(q0);
486 tcg_temp_free_i64(q1);
487 }
488 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
489 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
490 tcg_gen_xor_i32(tmp, t0, t1);
491 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
492 tcg_temp_free_i32(tmp);
493 tcg_gen_mov_i32(dest, cpu_NF);
494}
495
72485ec4 496/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 497static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 498{
39d5492a 499 TCGv_i32 tmp;
72485ec4
AJ
500 tcg_gen_sub_i32(cpu_NF, t0, t1);
501 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
502 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
503 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
504 tmp = tcg_temp_new_i32();
505 tcg_gen_xor_i32(tmp, t0, t1);
506 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
507 tcg_temp_free_i32(tmp);
508 tcg_gen_mov_i32(dest, cpu_NF);
509}
510
e77f0832 511/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 512static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 513{
39d5492a 514 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
515 tcg_gen_not_i32(tmp, t1);
516 gen_adc_CC(dest, t0, tmp);
39d5492a 517 tcg_temp_free_i32(tmp);
2de68a49
RH
518}
519
365af80e 520#define GEN_SHIFT(name) \
39d5492a 521static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 522{ \
39d5492a 523 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
524 tmp1 = tcg_temp_new_i32(); \
525 tcg_gen_andi_i32(tmp1, t1, 0xff); \
526 tmp2 = tcg_const_i32(0); \
527 tmp3 = tcg_const_i32(0x1f); \
528 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
529 tcg_temp_free_i32(tmp3); \
530 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
531 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
532 tcg_temp_free_i32(tmp2); \
533 tcg_temp_free_i32(tmp1); \
534}
535GEN_SHIFT(shl)
536GEN_SHIFT(shr)
537#undef GEN_SHIFT
538
39d5492a 539static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 540{
39d5492a 541 TCGv_i32 tmp1, tmp2;
365af80e
AJ
542 tmp1 = tcg_temp_new_i32();
543 tcg_gen_andi_i32(tmp1, t1, 0xff);
544 tmp2 = tcg_const_i32(0x1f);
545 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
546 tcg_temp_free_i32(tmp2);
547 tcg_gen_sar_i32(dest, t0, tmp1);
548 tcg_temp_free_i32(tmp1);
549}
550
39d5492a 551static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 552{
39d5492a
PM
553 TCGv_i32 c0 = tcg_const_i32(0);
554 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
555 tcg_gen_neg_i32(tmp, src);
556 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
557 tcg_temp_free_i32(c0);
558 tcg_temp_free_i32(tmp);
559}
ad69471c 560
39d5492a 561static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 562{
9a119ff6 563 if (shift == 0) {
66c374de 564 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 565 } else {
66c374de
AJ
566 tcg_gen_shri_i32(cpu_CF, var, shift);
567 if (shift != 31) {
568 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
569 }
9a119ff6 570 }
9a119ff6 571}
b26eefb6 572
9a119ff6 573/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
574static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
575 int shift, int flags)
9a119ff6
PB
576{
577 switch (shiftop) {
578 case 0: /* LSL */
579 if (shift != 0) {
580 if (flags)
581 shifter_out_im(var, 32 - shift);
582 tcg_gen_shli_i32(var, var, shift);
583 }
584 break;
585 case 1: /* LSR */
586 if (shift == 0) {
587 if (flags) {
66c374de 588 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
589 }
590 tcg_gen_movi_i32(var, 0);
591 } else {
592 if (flags)
593 shifter_out_im(var, shift - 1);
594 tcg_gen_shri_i32(var, var, shift);
595 }
596 break;
597 case 2: /* ASR */
598 if (shift == 0)
599 shift = 32;
600 if (flags)
601 shifter_out_im(var, shift - 1);
602 if (shift == 32)
603 shift = 31;
604 tcg_gen_sari_i32(var, var, shift);
605 break;
606 case 3: /* ROR/RRX */
607 if (shift != 0) {
608 if (flags)
609 shifter_out_im(var, shift - 1);
f669df27 610 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 611 } else {
39d5492a 612 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 613 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
614 if (flags)
615 shifter_out_im(var, 0);
616 tcg_gen_shri_i32(var, var, 1);
b26eefb6 617 tcg_gen_or_i32(var, var, tmp);
7d1b0095 618 tcg_temp_free_i32(tmp);
b26eefb6
PB
619 }
620 }
621};
622
39d5492a
PM
623static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
624 TCGv_i32 shift, int flags)
8984bd2e
PB
625{
626 if (flags) {
627 switch (shiftop) {
9ef39277
BS
628 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
629 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
630 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
631 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
632 }
633 } else {
634 switch (shiftop) {
365af80e
AJ
635 case 0:
636 gen_shl(var, var, shift);
637 break;
638 case 1:
639 gen_shr(var, var, shift);
640 break;
641 case 2:
642 gen_sar(var, var, shift);
643 break;
f669df27
AJ
644 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
645 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
646 }
647 }
7d1b0095 648 tcg_temp_free_i32(shift);
8984bd2e
PB
649}
650
6ddbc6e4
PB
651#define PAS_OP(pfx) \
652 switch (op2) { \
653 case 0: gen_pas_helper(glue(pfx,add16)); break; \
654 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
655 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
656 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
657 case 4: gen_pas_helper(glue(pfx,add8)); break; \
658 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
659 }
39d5492a 660static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 661{
a7812ae4 662 TCGv_ptr tmp;
6ddbc6e4
PB
663
664 switch (op1) {
665#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
666 case 1:
a7812ae4 667 tmp = tcg_temp_new_ptr();
0ecb72a5 668 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 669 PAS_OP(s)
b75263d6 670 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
671 break;
672 case 5:
a7812ae4 673 tmp = tcg_temp_new_ptr();
0ecb72a5 674 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 675 PAS_OP(u)
b75263d6 676 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
677 break;
678#undef gen_pas_helper
679#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
680 case 2:
681 PAS_OP(q);
682 break;
683 case 3:
684 PAS_OP(sh);
685 break;
686 case 6:
687 PAS_OP(uq);
688 break;
689 case 7:
690 PAS_OP(uh);
691 break;
692#undef gen_pas_helper
693 }
694}
9ee6e8bb
PB
695#undef PAS_OP
696
6ddbc6e4
PB
697/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
698#define PAS_OP(pfx) \
ed89a2f1 699 switch (op1) { \
6ddbc6e4
PB
700 case 0: gen_pas_helper(glue(pfx,add8)); break; \
701 case 1: gen_pas_helper(glue(pfx,add16)); break; \
702 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
703 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
704 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
705 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
706 }
39d5492a 707static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 708{
a7812ae4 709 TCGv_ptr tmp;
6ddbc6e4 710
ed89a2f1 711 switch (op2) {
6ddbc6e4
PB
712#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
713 case 0:
a7812ae4 714 tmp = tcg_temp_new_ptr();
0ecb72a5 715 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 716 PAS_OP(s)
b75263d6 717 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
718 break;
719 case 4:
a7812ae4 720 tmp = tcg_temp_new_ptr();
0ecb72a5 721 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 722 PAS_OP(u)
b75263d6 723 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
724 break;
725#undef gen_pas_helper
726#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
727 case 1:
728 PAS_OP(q);
729 break;
730 case 2:
731 PAS_OP(sh);
732 break;
733 case 5:
734 PAS_OP(uq);
735 break;
736 case 6:
737 PAS_OP(uh);
738 break;
739#undef gen_pas_helper
740 }
741}
9ee6e8bb
PB
742#undef PAS_OP
743
39fb730a 744/*
6c2c63d3 745 * Generate a conditional based on ARM condition code cc.
39fb730a
AG
746 * This is common between ARM and Aarch64 targets.
747 */
6c2c63d3 748void arm_test_cc(DisasCompare *cmp, int cc)
d9ba4830 749{
6c2c63d3
RH
750 TCGv_i32 value;
751 TCGCond cond;
752 bool global = true;
d9ba4830 753
d9ba4830
PB
754 switch (cc) {
755 case 0: /* eq: Z */
d9ba4830 756 case 1: /* ne: !Z */
6c2c63d3
RH
757 cond = TCG_COND_EQ;
758 value = cpu_ZF;
d9ba4830 759 break;
6c2c63d3 760
d9ba4830 761 case 2: /* cs: C */
d9ba4830 762 case 3: /* cc: !C */
6c2c63d3
RH
763 cond = TCG_COND_NE;
764 value = cpu_CF;
d9ba4830 765 break;
6c2c63d3 766
d9ba4830 767 case 4: /* mi: N */
d9ba4830 768 case 5: /* pl: !N */
6c2c63d3
RH
769 cond = TCG_COND_LT;
770 value = cpu_NF;
d9ba4830 771 break;
6c2c63d3 772
d9ba4830 773 case 6: /* vs: V */
d9ba4830 774 case 7: /* vc: !V */
6c2c63d3
RH
775 cond = TCG_COND_LT;
776 value = cpu_VF;
d9ba4830 777 break;
6c2c63d3 778
d9ba4830 779 case 8: /* hi: C && !Z */
6c2c63d3
RH
780 case 9: /* ls: !C || Z -> !(C && !Z) */
781 cond = TCG_COND_NE;
782 value = tcg_temp_new_i32();
783 global = false;
784 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
785 ZF is non-zero for !Z; so AND the two subexpressions. */
786 tcg_gen_neg_i32(value, cpu_CF);
787 tcg_gen_and_i32(value, value, cpu_ZF);
d9ba4830 788 break;
6c2c63d3 789
d9ba4830 790 case 10: /* ge: N == V -> N ^ V == 0 */
d9ba4830 791 case 11: /* lt: N != V -> N ^ V != 0 */
6c2c63d3
RH
792 /* Since we're only interested in the sign bit, == 0 is >= 0. */
793 cond = TCG_COND_GE;
794 value = tcg_temp_new_i32();
795 global = false;
796 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
d9ba4830 797 break;
6c2c63d3 798
d9ba4830 799 case 12: /* gt: !Z && N == V */
d9ba4830 800 case 13: /* le: Z || N != V */
6c2c63d3
RH
801 cond = TCG_COND_NE;
802 value = tcg_temp_new_i32();
803 global = false;
804 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
805 * the sign bit then AND with ZF to yield the result. */
806 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
807 tcg_gen_sari_i32(value, value, 31);
808 tcg_gen_andc_i32(value, cpu_ZF, value);
d9ba4830 809 break;
6c2c63d3 810
9305eac0
RH
811 case 14: /* always */
812 case 15: /* always */
813 /* Use the ALWAYS condition, which will fold early.
814 * It doesn't matter what we use for the value. */
815 cond = TCG_COND_ALWAYS;
816 value = cpu_ZF;
817 goto no_invert;
818
d9ba4830
PB
819 default:
820 fprintf(stderr, "Bad condition code 0x%x\n", cc);
821 abort();
822 }
6c2c63d3
RH
823
824 if (cc & 1) {
825 cond = tcg_invert_cond(cond);
826 }
827
9305eac0 828 no_invert:
6c2c63d3
RH
829 cmp->cond = cond;
830 cmp->value = value;
831 cmp->value_global = global;
832}
833
834void arm_free_cc(DisasCompare *cmp)
835{
836 if (!cmp->value_global) {
837 tcg_temp_free_i32(cmp->value);
838 }
839}
840
841void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
842{
843 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
844}
845
846void arm_gen_test_cc(int cc, TCGLabel *label)
847{
848 DisasCompare cmp;
849 arm_test_cc(&cmp, cc);
850 arm_jump_cc(&cmp, label);
851 arm_free_cc(&cmp);
d9ba4830 852}
2c0262af 853
b1d8e52e 854static const uint8_t table_logic_cc[16] = {
2c0262af
FB
855 1, /* and */
856 1, /* xor */
857 0, /* sub */
858 0, /* rsb */
859 0, /* add */
860 0, /* adc */
861 0, /* sbc */
862 0, /* rsc */
863 1, /* andl */
864 1, /* xorl */
865 0, /* cmp */
866 0, /* cmn */
867 1, /* orr */
868 1, /* mov */
869 1, /* bic */
870 1, /* mvn */
871};
3b46e624 872
d9ba4830
PB
873/* Set PC and Thumb state from an immediate address. */
874static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 875{
39d5492a 876 TCGv_i32 tmp;
99c475ab 877
577bf808 878 s->is_jmp = DISAS_JUMP;
d9ba4830 879 if (s->thumb != (addr & 1)) {
7d1b0095 880 tmp = tcg_temp_new_i32();
d9ba4830 881 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 882 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 883 tcg_temp_free_i32(tmp);
d9ba4830 884 }
155c3eac 885 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
886}
887
888/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 889static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 890{
577bf808 891 s->is_jmp = DISAS_JUMP;
155c3eac
FN
892 tcg_gen_andi_i32(cpu_R[15], var, ~1);
893 tcg_gen_andi_i32(var, var, 1);
894 store_cpu_field(var, thumb);
d9ba4830
PB
895}
896
21aeb343
JR
897/* Variant of store_reg which uses branch&exchange logic when storing
898 to r15 in ARM architecture v7 and above. The source must be a temporary
899 and will be marked as dead. */
7dcc1f89 900static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
21aeb343
JR
901{
902 if (reg == 15 && ENABLE_ARCH_7) {
903 gen_bx(s, var);
904 } else {
905 store_reg(s, reg, var);
906 }
907}
908
be5e7a76
DES
909/* Variant of store_reg which uses branch&exchange logic when storing
910 * to r15 in ARM architecture v5T and above. This is used for storing
911 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
912 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
7dcc1f89 913static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
be5e7a76
DES
914{
915 if (reg == 15 && ENABLE_ARCH_5) {
916 gen_bx(s, var);
917 } else {
918 store_reg(s, reg, var);
919 }
920}
921
e334bd31
PB
922#ifdef CONFIG_USER_ONLY
923#define IS_USER_ONLY 1
924#else
925#define IS_USER_ONLY 0
926#endif
927
08307563
PM
928/* Abstractions of "generate code to do a guest load/store for
929 * AArch32", where a vaddr is always 32 bits (and is zero
930 * extended if we're a 64 bit core) and data is also
931 * 32 bits unless specifically doing a 64 bit access.
932 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 933 * that the address argument is TCGv_i32 rather than TCGv.
08307563 934 */
08307563 935
7f5616f5 936static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
08307563 937{
7f5616f5
RH
938 TCGv addr = tcg_temp_new();
939 tcg_gen_extu_i32_tl(addr, a32);
940
e334bd31 941 /* Not needed for user-mode BE32, where we use MO_BE instead. */
7f5616f5
RH
942 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
943 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
e334bd31 944 }
7f5616f5 945 return addr;
08307563
PM
946}
947
7f5616f5
RH
948static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
949 int index, TCGMemOp opc)
08307563 950{
7f5616f5
RH
951 TCGv addr = gen_aa32_addr(s, a32, opc);
952 tcg_gen_qemu_ld_i32(val, addr, index, opc);
953 tcg_temp_free(addr);
08307563
PM
954}
955
7f5616f5
RH
956static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
957 int index, TCGMemOp opc)
958{
959 TCGv addr = gen_aa32_addr(s, a32, opc);
960 tcg_gen_qemu_st_i32(val, addr, index, opc);
961 tcg_temp_free(addr);
962}
08307563 963
7f5616f5 964#define DO_GEN_LD(SUFF, OPC) \
12dcc321 965static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 966 TCGv_i32 a32, int index) \
08307563 967{ \
7f5616f5 968 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
08307563
PM
969}
970
7f5616f5 971#define DO_GEN_ST(SUFF, OPC) \
12dcc321 972static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 973 TCGv_i32 a32, int index) \
08307563 974{ \
7f5616f5 975 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
08307563
PM
976}
977
7f5616f5 978static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
08307563 979{
e334bd31
PB
980 /* Not needed for user-mode BE32, where we use MO_BE instead. */
981 if (!IS_USER_ONLY && s->sctlr_b) {
982 tcg_gen_rotri_i64(val, val, 32);
983 }
08307563
PM
984}
985
7f5616f5
RH
986static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
987 int index, TCGMemOp opc)
08307563 988{
7f5616f5
RH
989 TCGv addr = gen_aa32_addr(s, a32, opc);
990 tcg_gen_qemu_ld_i64(val, addr, index, opc);
991 gen_aa32_frob64(s, val);
992 tcg_temp_free(addr);
993}
994
995static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
996 TCGv_i32 a32, int index)
997{
998 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
999}
1000
1001static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1002 int index, TCGMemOp opc)
1003{
1004 TCGv addr = gen_aa32_addr(s, a32, opc);
e334bd31
PB
1005
1006 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1007 if (!IS_USER_ONLY && s->sctlr_b) {
7f5616f5 1008 TCGv_i64 tmp = tcg_temp_new_i64();
e334bd31 1009 tcg_gen_rotri_i64(tmp, val, 32);
7f5616f5
RH
1010 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1011 tcg_temp_free_i64(tmp);
e334bd31 1012 } else {
7f5616f5 1013 tcg_gen_qemu_st_i64(val, addr, index, opc);
e334bd31 1014 }
7f5616f5 1015 tcg_temp_free(addr);
08307563
PM
1016}
1017
7f5616f5
RH
1018static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1019 TCGv_i32 a32, int index)
1020{
1021 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1022}
08307563 1023
7f5616f5
RH
1024DO_GEN_LD(8s, MO_SB)
1025DO_GEN_LD(8u, MO_UB)
1026DO_GEN_LD(16s, MO_SW)
1027DO_GEN_LD(16u, MO_UW)
1028DO_GEN_LD(32u, MO_UL)
30901475 1029/* 'a' variants include an alignment check */
7f5616f5
RH
1030DO_GEN_LD(16ua, MO_UW | MO_ALIGN)
1031DO_GEN_LD(32ua, MO_UL | MO_ALIGN)
1032DO_GEN_ST(8, MO_UB)
1033DO_GEN_ST(16, MO_UW)
1034DO_GEN_ST(32, MO_UL)
08307563 1035
eaed129d 1036static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
5e3f878a 1037{
40f860cd 1038 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
1039}
1040
37e6456e
PM
1041static inline void gen_hvc(DisasContext *s, int imm16)
1042{
1043 /* The pre HVC helper handles cases when HVC gets trapped
1044 * as an undefined insn by runtime configuration (ie before
1045 * the insn really executes).
1046 */
1047 gen_set_pc_im(s, s->pc - 4);
1048 gen_helper_pre_hvc(cpu_env);
1049 /* Otherwise we will treat this as a real exception which
1050 * happens after execution of the insn. (The distinction matters
1051 * for the PC value reported to the exception handler and also
1052 * for single stepping.)
1053 */
1054 s->svc_imm = imm16;
1055 gen_set_pc_im(s, s->pc);
1056 s->is_jmp = DISAS_HVC;
1057}
1058
1059static inline void gen_smc(DisasContext *s)
1060{
1061 /* As with HVC, we may take an exception either before or after
1062 * the insn executes.
1063 */
1064 TCGv_i32 tmp;
1065
1066 gen_set_pc_im(s, s->pc - 4);
1067 tmp = tcg_const_i32(syn_aa32_smc());
1068 gen_helper_pre_smc(cpu_env, tmp);
1069 tcg_temp_free_i32(tmp);
1070 gen_set_pc_im(s, s->pc);
1071 s->is_jmp = DISAS_SMC;
1072}
1073
d4a2dc67
PM
1074static inline void
1075gen_set_condexec (DisasContext *s)
1076{
1077 if (s->condexec_mask) {
1078 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
1079 TCGv_i32 tmp = tcg_temp_new_i32();
1080 tcg_gen_movi_i32(tmp, val);
1081 store_cpu_field(tmp, condexec_bits);
1082 }
1083}
1084
1085static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1086{
1087 gen_set_condexec(s);
1088 gen_set_pc_im(s, s->pc - offset);
1089 gen_exception_internal(excp);
1090 s->is_jmp = DISAS_JUMP;
1091}
1092
73710361
GB
1093static void gen_exception_insn(DisasContext *s, int offset, int excp,
1094 int syn, uint32_t target_el)
d4a2dc67
PM
1095{
1096 gen_set_condexec(s);
1097 gen_set_pc_im(s, s->pc - offset);
73710361 1098 gen_exception(excp, syn, target_el);
d4a2dc67
PM
1099 s->is_jmp = DISAS_JUMP;
1100}
1101
b5ff1b31
FB
1102/* Force a TB lookup after an instruction that changes the CPU state. */
1103static inline void gen_lookup_tb(DisasContext *s)
1104{
a6445c52 1105 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
577bf808 1106 s->is_jmp = DISAS_JUMP;
b5ff1b31
FB
1107}
1108
19a6e31c
PM
1109static inline void gen_hlt(DisasContext *s, int imm)
1110{
1111 /* HLT. This has two purposes.
1112 * Architecturally, it is an external halting debug instruction.
1113 * Since QEMU doesn't implement external debug, we treat this as
1114 * it is required for halting debug disabled: it will UNDEF.
1115 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1116 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1117 * must trigger semihosting even for ARMv7 and earlier, where
1118 * HLT was an undefined encoding.
1119 * In system mode, we don't allow userspace access to
1120 * semihosting, to provide some semblance of security
1121 * (and for consistency with our 32-bit semihosting).
1122 */
1123 if (semihosting_enabled() &&
1124#ifndef CONFIG_USER_ONLY
1125 s->current_el != 0 &&
1126#endif
1127 (imm == (s->thumb ? 0x3c : 0xf000))) {
1128 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1129 return;
1130 }
1131
1132 gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
1133 default_exception_el(s));
1134}
1135
b0109805 1136static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 1137 TCGv_i32 var)
2c0262af 1138{
1e8d4eec 1139 int val, rm, shift, shiftop;
39d5492a 1140 TCGv_i32 offset;
2c0262af
FB
1141
1142 if (!(insn & (1 << 25))) {
1143 /* immediate */
1144 val = insn & 0xfff;
1145 if (!(insn & (1 << 23)))
1146 val = -val;
537730b9 1147 if (val != 0)
b0109805 1148 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1149 } else {
1150 /* shift/register */
1151 rm = (insn) & 0xf;
1152 shift = (insn >> 7) & 0x1f;
1e8d4eec 1153 shiftop = (insn >> 5) & 3;
b26eefb6 1154 offset = load_reg(s, rm);
9a119ff6 1155 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 1156 if (!(insn & (1 << 23)))
b0109805 1157 tcg_gen_sub_i32(var, var, offset);
2c0262af 1158 else
b0109805 1159 tcg_gen_add_i32(var, var, offset);
7d1b0095 1160 tcg_temp_free_i32(offset);
2c0262af
FB
1161 }
1162}
1163
191f9a93 1164static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 1165 int extra, TCGv_i32 var)
2c0262af
FB
1166{
1167 int val, rm;
39d5492a 1168 TCGv_i32 offset;
3b46e624 1169
2c0262af
FB
1170 if (insn & (1 << 22)) {
1171 /* immediate */
1172 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1173 if (!(insn & (1 << 23)))
1174 val = -val;
18acad92 1175 val += extra;
537730b9 1176 if (val != 0)
b0109805 1177 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1178 } else {
1179 /* register */
191f9a93 1180 if (extra)
b0109805 1181 tcg_gen_addi_i32(var, var, extra);
2c0262af 1182 rm = (insn) & 0xf;
b26eefb6 1183 offset = load_reg(s, rm);
2c0262af 1184 if (!(insn & (1 << 23)))
b0109805 1185 tcg_gen_sub_i32(var, var, offset);
2c0262af 1186 else
b0109805 1187 tcg_gen_add_i32(var, var, offset);
7d1b0095 1188 tcg_temp_free_i32(offset);
2c0262af
FB
1189 }
1190}
1191
5aaebd13
PM
1192static TCGv_ptr get_fpstatus_ptr(int neon)
1193{
1194 TCGv_ptr statusptr = tcg_temp_new_ptr();
1195 int offset;
1196 if (neon) {
0ecb72a5 1197 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1198 } else {
0ecb72a5 1199 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1200 }
1201 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1202 return statusptr;
1203}
1204
4373f3ce
PB
1205#define VFP_OP2(name) \
1206static inline void gen_vfp_##name(int dp) \
1207{ \
ae1857ec
PM
1208 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1209 if (dp) { \
1210 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1211 } else { \
1212 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1213 } \
1214 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
1215}
1216
4373f3ce
PB
1217VFP_OP2(add)
1218VFP_OP2(sub)
1219VFP_OP2(mul)
1220VFP_OP2(div)
1221
1222#undef VFP_OP2
1223
605a6aed
PM
1224static inline void gen_vfp_F1_mul(int dp)
1225{
1226 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1227 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1228 if (dp) {
ae1857ec 1229 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1230 } else {
ae1857ec 1231 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1232 }
ae1857ec 1233 tcg_temp_free_ptr(fpst);
605a6aed
PM
1234}
1235
1236static inline void gen_vfp_F1_neg(int dp)
1237{
1238 /* Like gen_vfp_neg() but put result in F1 */
1239 if (dp) {
1240 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1241 } else {
1242 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1243 }
1244}
1245
4373f3ce
PB
1246static inline void gen_vfp_abs(int dp)
1247{
1248 if (dp)
1249 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1250 else
1251 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1252}
1253
1254static inline void gen_vfp_neg(int dp)
1255{
1256 if (dp)
1257 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1258 else
1259 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1260}
1261
1262static inline void gen_vfp_sqrt(int dp)
1263{
1264 if (dp)
1265 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1266 else
1267 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1268}
1269
1270static inline void gen_vfp_cmp(int dp)
1271{
1272 if (dp)
1273 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1274 else
1275 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1276}
1277
1278static inline void gen_vfp_cmpe(int dp)
1279{
1280 if (dp)
1281 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1282 else
1283 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1284}
1285
1286static inline void gen_vfp_F1_ld0(int dp)
1287{
1288 if (dp)
5b340b51 1289 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1290 else
5b340b51 1291 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1292}
1293
5500b06c
PM
1294#define VFP_GEN_ITOF(name) \
1295static inline void gen_vfp_##name(int dp, int neon) \
1296{ \
5aaebd13 1297 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1298 if (dp) { \
1299 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1300 } else { \
1301 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1302 } \
b7fa9214 1303 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1304}
1305
5500b06c
PM
1306VFP_GEN_ITOF(uito)
1307VFP_GEN_ITOF(sito)
1308#undef VFP_GEN_ITOF
4373f3ce 1309
5500b06c
PM
1310#define VFP_GEN_FTOI(name) \
1311static inline void gen_vfp_##name(int dp, int neon) \
1312{ \
5aaebd13 1313 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1314 if (dp) { \
1315 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1316 } else { \
1317 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1318 } \
b7fa9214 1319 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1320}
1321
5500b06c
PM
1322VFP_GEN_FTOI(toui)
1323VFP_GEN_FTOI(touiz)
1324VFP_GEN_FTOI(tosi)
1325VFP_GEN_FTOI(tosiz)
1326#undef VFP_GEN_FTOI
4373f3ce 1327
16d5b3ca 1328#define VFP_GEN_FIX(name, round) \
5500b06c 1329static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1330{ \
39d5492a 1331 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1332 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c 1333 if (dp) { \
16d5b3ca
WN
1334 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1335 statusptr); \
5500b06c 1336 } else { \
16d5b3ca
WN
1337 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1338 statusptr); \
5500b06c 1339 } \
b75263d6 1340 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1341 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1342}
16d5b3ca
WN
1343VFP_GEN_FIX(tosh, _round_to_zero)
1344VFP_GEN_FIX(tosl, _round_to_zero)
1345VFP_GEN_FIX(touh, _round_to_zero)
1346VFP_GEN_FIX(toul, _round_to_zero)
1347VFP_GEN_FIX(shto, )
1348VFP_GEN_FIX(slto, )
1349VFP_GEN_FIX(uhto, )
1350VFP_GEN_FIX(ulto, )
4373f3ce 1351#undef VFP_GEN_FIX
9ee6e8bb 1352
39d5492a 1353static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1354{
08307563 1355 if (dp) {
12dcc321 1356 gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1357 } else {
12dcc321 1358 gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
08307563 1359 }
b5ff1b31
FB
1360}
1361
39d5492a 1362static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1363{
08307563 1364 if (dp) {
12dcc321 1365 gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1366 } else {
12dcc321 1367 gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
08307563 1368 }
b5ff1b31
FB
1369}
1370
8e96005d
FB
1371static inline long
1372vfp_reg_offset (int dp, int reg)
1373{
1374 if (dp)
1375 return offsetof(CPUARMState, vfp.regs[reg]);
1376 else if (reg & 1) {
1377 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1378 + offsetof(CPU_DoubleU, l.upper);
1379 } else {
1380 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1381 + offsetof(CPU_DoubleU, l.lower);
1382 }
1383}
9ee6e8bb
PB
1384
1385/* Return the offset of a 32-bit piece of a NEON register.
1386 zero is the least significant end of the register. */
1387static inline long
1388neon_reg_offset (int reg, int n)
1389{
1390 int sreg;
1391 sreg = reg * 2 + n;
1392 return vfp_reg_offset(0, sreg);
1393}
1394
39d5492a 1395static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1396{
39d5492a 1397 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1398 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1399 return tmp;
1400}
1401
39d5492a 1402static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1403{
1404 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1405 tcg_temp_free_i32(var);
8f8e3aa4
PB
1406}
1407
a7812ae4 1408static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1409{
1410 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1411}
1412
a7812ae4 1413static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1414{
1415 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1416}
1417
4373f3ce
PB
1418#define tcg_gen_ld_f32 tcg_gen_ld_i32
1419#define tcg_gen_ld_f64 tcg_gen_ld_i64
1420#define tcg_gen_st_f32 tcg_gen_st_i32
1421#define tcg_gen_st_f64 tcg_gen_st_i64
1422
b7bcbe95
FB
1423static inline void gen_mov_F0_vreg(int dp, int reg)
1424{
1425 if (dp)
4373f3ce 1426 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1427 else
4373f3ce 1428 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1429}
1430
1431static inline void gen_mov_F1_vreg(int dp, int reg)
1432{
1433 if (dp)
4373f3ce 1434 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1435 else
4373f3ce 1436 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1437}
1438
1439static inline void gen_mov_vreg_F0(int dp, int reg)
1440{
1441 if (dp)
4373f3ce 1442 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1443 else
4373f3ce 1444 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1445}
1446
18c9b560
AZ
1447#define ARM_CP_RW_BIT (1 << 20)
1448
a7812ae4 1449static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1450{
0ecb72a5 1451 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1452}
1453
a7812ae4 1454static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1455{
0ecb72a5 1456 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1457}
1458
39d5492a 1459static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1460{
39d5492a 1461 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1462 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1463 return var;
e677137d
PB
1464}
1465
39d5492a 1466static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1467{
0ecb72a5 1468 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1469 tcg_temp_free_i32(var);
e677137d
PB
1470}
1471
1472static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1473{
1474 iwmmxt_store_reg(cpu_M0, rn);
1475}
1476
1477static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1478{
1479 iwmmxt_load_reg(cpu_M0, rn);
1480}
1481
1482static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1483{
1484 iwmmxt_load_reg(cpu_V1, rn);
1485 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1486}
1487
1488static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1489{
1490 iwmmxt_load_reg(cpu_V1, rn);
1491 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1492}
1493
1494static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1495{
1496 iwmmxt_load_reg(cpu_V1, rn);
1497 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1498}
1499
1500#define IWMMXT_OP(name) \
1501static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1502{ \
1503 iwmmxt_load_reg(cpu_V1, rn); \
1504 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1505}
1506
477955bd
PM
1507#define IWMMXT_OP_ENV(name) \
1508static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1509{ \
1510 iwmmxt_load_reg(cpu_V1, rn); \
1511 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1512}
1513
1514#define IWMMXT_OP_ENV_SIZE(name) \
1515IWMMXT_OP_ENV(name##b) \
1516IWMMXT_OP_ENV(name##w) \
1517IWMMXT_OP_ENV(name##l)
e677137d 1518
477955bd 1519#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1520static inline void gen_op_iwmmxt_##name##_M0(void) \
1521{ \
477955bd 1522 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1523}
1524
1525IWMMXT_OP(maddsq)
1526IWMMXT_OP(madduq)
1527IWMMXT_OP(sadb)
1528IWMMXT_OP(sadw)
1529IWMMXT_OP(mulslw)
1530IWMMXT_OP(mulshw)
1531IWMMXT_OP(mululw)
1532IWMMXT_OP(muluhw)
1533IWMMXT_OP(macsw)
1534IWMMXT_OP(macuw)
1535
477955bd
PM
1536IWMMXT_OP_ENV_SIZE(unpackl)
1537IWMMXT_OP_ENV_SIZE(unpackh)
1538
1539IWMMXT_OP_ENV1(unpacklub)
1540IWMMXT_OP_ENV1(unpackluw)
1541IWMMXT_OP_ENV1(unpacklul)
1542IWMMXT_OP_ENV1(unpackhub)
1543IWMMXT_OP_ENV1(unpackhuw)
1544IWMMXT_OP_ENV1(unpackhul)
1545IWMMXT_OP_ENV1(unpacklsb)
1546IWMMXT_OP_ENV1(unpacklsw)
1547IWMMXT_OP_ENV1(unpacklsl)
1548IWMMXT_OP_ENV1(unpackhsb)
1549IWMMXT_OP_ENV1(unpackhsw)
1550IWMMXT_OP_ENV1(unpackhsl)
1551
1552IWMMXT_OP_ENV_SIZE(cmpeq)
1553IWMMXT_OP_ENV_SIZE(cmpgtu)
1554IWMMXT_OP_ENV_SIZE(cmpgts)
1555
1556IWMMXT_OP_ENV_SIZE(mins)
1557IWMMXT_OP_ENV_SIZE(minu)
1558IWMMXT_OP_ENV_SIZE(maxs)
1559IWMMXT_OP_ENV_SIZE(maxu)
1560
1561IWMMXT_OP_ENV_SIZE(subn)
1562IWMMXT_OP_ENV_SIZE(addn)
1563IWMMXT_OP_ENV_SIZE(subu)
1564IWMMXT_OP_ENV_SIZE(addu)
1565IWMMXT_OP_ENV_SIZE(subs)
1566IWMMXT_OP_ENV_SIZE(adds)
1567
1568IWMMXT_OP_ENV(avgb0)
1569IWMMXT_OP_ENV(avgb1)
1570IWMMXT_OP_ENV(avgw0)
1571IWMMXT_OP_ENV(avgw1)
e677137d 1572
477955bd
PM
1573IWMMXT_OP_ENV(packuw)
1574IWMMXT_OP_ENV(packul)
1575IWMMXT_OP_ENV(packuq)
1576IWMMXT_OP_ENV(packsw)
1577IWMMXT_OP_ENV(packsl)
1578IWMMXT_OP_ENV(packsq)
e677137d 1579
e677137d
PB
1580static void gen_op_iwmmxt_set_mup(void)
1581{
39d5492a 1582 TCGv_i32 tmp;
e677137d
PB
1583 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1584 tcg_gen_ori_i32(tmp, tmp, 2);
1585 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1586}
1587
1588static void gen_op_iwmmxt_set_cup(void)
1589{
39d5492a 1590 TCGv_i32 tmp;
e677137d
PB
1591 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1592 tcg_gen_ori_i32(tmp, tmp, 1);
1593 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1594}
1595
1596static void gen_op_iwmmxt_setpsr_nz(void)
1597{
39d5492a 1598 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1599 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1600 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1601}
1602
1603static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1604{
1605 iwmmxt_load_reg(cpu_V1, rn);
86831435 1606 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1607 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1608}
1609
39d5492a
PM
1610static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1611 TCGv_i32 dest)
18c9b560
AZ
1612{
1613 int rd;
1614 uint32_t offset;
39d5492a 1615 TCGv_i32 tmp;
18c9b560
AZ
1616
1617 rd = (insn >> 16) & 0xf;
da6b5335 1618 tmp = load_reg(s, rd);
18c9b560
AZ
1619
1620 offset = (insn & 0xff) << ((insn >> 7) & 2);
1621 if (insn & (1 << 24)) {
1622 /* Pre indexed */
1623 if (insn & (1 << 23))
da6b5335 1624 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1625 else
da6b5335
FN
1626 tcg_gen_addi_i32(tmp, tmp, -offset);
1627 tcg_gen_mov_i32(dest, tmp);
18c9b560 1628 if (insn & (1 << 21))
da6b5335
FN
1629 store_reg(s, rd, tmp);
1630 else
7d1b0095 1631 tcg_temp_free_i32(tmp);
18c9b560
AZ
1632 } else if (insn & (1 << 21)) {
1633 /* Post indexed */
da6b5335 1634 tcg_gen_mov_i32(dest, tmp);
18c9b560 1635 if (insn & (1 << 23))
da6b5335 1636 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1637 else
da6b5335
FN
1638 tcg_gen_addi_i32(tmp, tmp, -offset);
1639 store_reg(s, rd, tmp);
18c9b560
AZ
1640 } else if (!(insn & (1 << 23)))
1641 return 1;
1642 return 0;
1643}
1644
39d5492a 1645static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1646{
1647 int rd = (insn >> 0) & 0xf;
39d5492a 1648 TCGv_i32 tmp;
18c9b560 1649
da6b5335
FN
1650 if (insn & (1 << 8)) {
1651 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1652 return 1;
da6b5335
FN
1653 } else {
1654 tmp = iwmmxt_load_creg(rd);
1655 }
1656 } else {
7d1b0095 1657 tmp = tcg_temp_new_i32();
da6b5335 1658 iwmmxt_load_reg(cpu_V0, rd);
ecc7b3aa 1659 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
da6b5335
FN
1660 }
1661 tcg_gen_andi_i32(tmp, tmp, mask);
1662 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1663 tcg_temp_free_i32(tmp);
18c9b560
AZ
1664 return 0;
1665}
1666
a1c7273b 1667/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1668 (ie. an undefined instruction). */
7dcc1f89 1669static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
1670{
1671 int rd, wrd;
1672 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1673 TCGv_i32 addr;
1674 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1675
1676 if ((insn & 0x0e000e00) == 0x0c000000) {
1677 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1678 wrd = insn & 0xf;
1679 rdlo = (insn >> 12) & 0xf;
1680 rdhi = (insn >> 16) & 0xf;
1681 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335 1682 iwmmxt_load_reg(cpu_V0, wrd);
ecc7b3aa 1683 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
da6b5335 1684 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 1685 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1686 } else { /* TMCRR */
da6b5335
FN
1687 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1688 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1689 gen_op_iwmmxt_set_mup();
1690 }
1691 return 0;
1692 }
1693
1694 wrd = (insn >> 12) & 0xf;
7d1b0095 1695 addr = tcg_temp_new_i32();
da6b5335 1696 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1697 tcg_temp_free_i32(addr);
18c9b560 1698 return 1;
da6b5335 1699 }
18c9b560
AZ
1700 if (insn & ARM_CP_RW_BIT) {
1701 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1702 tmp = tcg_temp_new_i32();
12dcc321 1703 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
da6b5335 1704 iwmmxt_store_creg(wrd, tmp);
18c9b560 1705 } else {
e677137d
PB
1706 i = 1;
1707 if (insn & (1 << 8)) {
1708 if (insn & (1 << 22)) { /* WLDRD */
12dcc321 1709 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
e677137d
PB
1710 i = 0;
1711 } else { /* WLDRW wRd */
29531141 1712 tmp = tcg_temp_new_i32();
12dcc321 1713 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1714 }
1715 } else {
29531141 1716 tmp = tcg_temp_new_i32();
e677137d 1717 if (insn & (1 << 22)) { /* WLDRH */
12dcc321 1718 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
e677137d 1719 } else { /* WLDRB */
12dcc321 1720 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1721 }
1722 }
1723 if (i) {
1724 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1725 tcg_temp_free_i32(tmp);
e677137d 1726 }
18c9b560
AZ
1727 gen_op_iwmmxt_movq_wRn_M0(wrd);
1728 }
1729 } else {
1730 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1731 tmp = iwmmxt_load_creg(wrd);
12dcc321 1732 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
18c9b560
AZ
1733 } else {
1734 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1735 tmp = tcg_temp_new_i32();
e677137d
PB
1736 if (insn & (1 << 8)) {
1737 if (insn & (1 << 22)) { /* WSTRD */
12dcc321 1738 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
e677137d 1739 } else { /* WSTRW wRd */
ecc7b3aa 1740 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1741 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e677137d
PB
1742 }
1743 } else {
1744 if (insn & (1 << 22)) { /* WSTRH */
ecc7b3aa 1745 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1746 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
e677137d 1747 } else { /* WSTRB */
ecc7b3aa 1748 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1749 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
e677137d
PB
1750 }
1751 }
18c9b560 1752 }
29531141 1753 tcg_temp_free_i32(tmp);
18c9b560 1754 }
7d1b0095 1755 tcg_temp_free_i32(addr);
18c9b560
AZ
1756 return 0;
1757 }
1758
1759 if ((insn & 0x0f000000) != 0x0e000000)
1760 return 1;
1761
1762 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1763 case 0x000: /* WOR */
1764 wrd = (insn >> 12) & 0xf;
1765 rd0 = (insn >> 0) & 0xf;
1766 rd1 = (insn >> 16) & 0xf;
1767 gen_op_iwmmxt_movq_M0_wRn(rd0);
1768 gen_op_iwmmxt_orq_M0_wRn(rd1);
1769 gen_op_iwmmxt_setpsr_nz();
1770 gen_op_iwmmxt_movq_wRn_M0(wrd);
1771 gen_op_iwmmxt_set_mup();
1772 gen_op_iwmmxt_set_cup();
1773 break;
1774 case 0x011: /* TMCR */
1775 if (insn & 0xf)
1776 return 1;
1777 rd = (insn >> 12) & 0xf;
1778 wrd = (insn >> 16) & 0xf;
1779 switch (wrd) {
1780 case ARM_IWMMXT_wCID:
1781 case ARM_IWMMXT_wCASF:
1782 break;
1783 case ARM_IWMMXT_wCon:
1784 gen_op_iwmmxt_set_cup();
1785 /* Fall through. */
1786 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1787 tmp = iwmmxt_load_creg(wrd);
1788 tmp2 = load_reg(s, rd);
f669df27 1789 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1790 tcg_temp_free_i32(tmp2);
da6b5335 1791 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1792 break;
1793 case ARM_IWMMXT_wCGR0:
1794 case ARM_IWMMXT_wCGR1:
1795 case ARM_IWMMXT_wCGR2:
1796 case ARM_IWMMXT_wCGR3:
1797 gen_op_iwmmxt_set_cup();
da6b5335
FN
1798 tmp = load_reg(s, rd);
1799 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1800 break;
1801 default:
1802 return 1;
1803 }
1804 break;
1805 case 0x100: /* WXOR */
1806 wrd = (insn >> 12) & 0xf;
1807 rd0 = (insn >> 0) & 0xf;
1808 rd1 = (insn >> 16) & 0xf;
1809 gen_op_iwmmxt_movq_M0_wRn(rd0);
1810 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1811 gen_op_iwmmxt_setpsr_nz();
1812 gen_op_iwmmxt_movq_wRn_M0(wrd);
1813 gen_op_iwmmxt_set_mup();
1814 gen_op_iwmmxt_set_cup();
1815 break;
1816 case 0x111: /* TMRC */
1817 if (insn & 0xf)
1818 return 1;
1819 rd = (insn >> 12) & 0xf;
1820 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1821 tmp = iwmmxt_load_creg(wrd);
1822 store_reg(s, rd, tmp);
18c9b560
AZ
1823 break;
1824 case 0x300: /* WANDN */
1825 wrd = (insn >> 12) & 0xf;
1826 rd0 = (insn >> 0) & 0xf;
1827 rd1 = (insn >> 16) & 0xf;
1828 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1829 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1830 gen_op_iwmmxt_andq_M0_wRn(rd1);
1831 gen_op_iwmmxt_setpsr_nz();
1832 gen_op_iwmmxt_movq_wRn_M0(wrd);
1833 gen_op_iwmmxt_set_mup();
1834 gen_op_iwmmxt_set_cup();
1835 break;
1836 case 0x200: /* WAND */
1837 wrd = (insn >> 12) & 0xf;
1838 rd0 = (insn >> 0) & 0xf;
1839 rd1 = (insn >> 16) & 0xf;
1840 gen_op_iwmmxt_movq_M0_wRn(rd0);
1841 gen_op_iwmmxt_andq_M0_wRn(rd1);
1842 gen_op_iwmmxt_setpsr_nz();
1843 gen_op_iwmmxt_movq_wRn_M0(wrd);
1844 gen_op_iwmmxt_set_mup();
1845 gen_op_iwmmxt_set_cup();
1846 break;
1847 case 0x810: case 0xa10: /* WMADD */
1848 wrd = (insn >> 12) & 0xf;
1849 rd0 = (insn >> 0) & 0xf;
1850 rd1 = (insn >> 16) & 0xf;
1851 gen_op_iwmmxt_movq_M0_wRn(rd0);
1852 if (insn & (1 << 21))
1853 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1854 else
1855 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1856 gen_op_iwmmxt_movq_wRn_M0(wrd);
1857 gen_op_iwmmxt_set_mup();
1858 break;
1859 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1860 wrd = (insn >> 12) & 0xf;
1861 rd0 = (insn >> 16) & 0xf;
1862 rd1 = (insn >> 0) & 0xf;
1863 gen_op_iwmmxt_movq_M0_wRn(rd0);
1864 switch ((insn >> 22) & 3) {
1865 case 0:
1866 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1867 break;
1868 case 1:
1869 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1870 break;
1871 case 2:
1872 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1873 break;
1874 case 3:
1875 return 1;
1876 }
1877 gen_op_iwmmxt_movq_wRn_M0(wrd);
1878 gen_op_iwmmxt_set_mup();
1879 gen_op_iwmmxt_set_cup();
1880 break;
1881 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1882 wrd = (insn >> 12) & 0xf;
1883 rd0 = (insn >> 16) & 0xf;
1884 rd1 = (insn >> 0) & 0xf;
1885 gen_op_iwmmxt_movq_M0_wRn(rd0);
1886 switch ((insn >> 22) & 3) {
1887 case 0:
1888 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1889 break;
1890 case 1:
1891 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1892 break;
1893 case 2:
1894 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1895 break;
1896 case 3:
1897 return 1;
1898 }
1899 gen_op_iwmmxt_movq_wRn_M0(wrd);
1900 gen_op_iwmmxt_set_mup();
1901 gen_op_iwmmxt_set_cup();
1902 break;
1903 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1904 wrd = (insn >> 12) & 0xf;
1905 rd0 = (insn >> 16) & 0xf;
1906 rd1 = (insn >> 0) & 0xf;
1907 gen_op_iwmmxt_movq_M0_wRn(rd0);
1908 if (insn & (1 << 22))
1909 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1910 else
1911 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1912 if (!(insn & (1 << 20)))
1913 gen_op_iwmmxt_addl_M0_wRn(wrd);
1914 gen_op_iwmmxt_movq_wRn_M0(wrd);
1915 gen_op_iwmmxt_set_mup();
1916 break;
1917 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1918 wrd = (insn >> 12) & 0xf;
1919 rd0 = (insn >> 16) & 0xf;
1920 rd1 = (insn >> 0) & 0xf;
1921 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1922 if (insn & (1 << 21)) {
1923 if (insn & (1 << 20))
1924 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1925 else
1926 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1927 } else {
1928 if (insn & (1 << 20))
1929 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1930 else
1931 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1932 }
18c9b560
AZ
1933 gen_op_iwmmxt_movq_wRn_M0(wrd);
1934 gen_op_iwmmxt_set_mup();
1935 break;
1936 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1937 wrd = (insn >> 12) & 0xf;
1938 rd0 = (insn >> 16) & 0xf;
1939 rd1 = (insn >> 0) & 0xf;
1940 gen_op_iwmmxt_movq_M0_wRn(rd0);
1941 if (insn & (1 << 21))
1942 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1943 else
1944 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1945 if (!(insn & (1 << 20))) {
e677137d
PB
1946 iwmmxt_load_reg(cpu_V1, wrd);
1947 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1948 }
1949 gen_op_iwmmxt_movq_wRn_M0(wrd);
1950 gen_op_iwmmxt_set_mup();
1951 break;
1952 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1953 wrd = (insn >> 12) & 0xf;
1954 rd0 = (insn >> 16) & 0xf;
1955 rd1 = (insn >> 0) & 0xf;
1956 gen_op_iwmmxt_movq_M0_wRn(rd0);
1957 switch ((insn >> 22) & 3) {
1958 case 0:
1959 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1960 break;
1961 case 1:
1962 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1963 break;
1964 case 2:
1965 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1966 break;
1967 case 3:
1968 return 1;
1969 }
1970 gen_op_iwmmxt_movq_wRn_M0(wrd);
1971 gen_op_iwmmxt_set_mup();
1972 gen_op_iwmmxt_set_cup();
1973 break;
1974 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1975 wrd = (insn >> 12) & 0xf;
1976 rd0 = (insn >> 16) & 0xf;
1977 rd1 = (insn >> 0) & 0xf;
1978 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1979 if (insn & (1 << 22)) {
1980 if (insn & (1 << 20))
1981 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1982 else
1983 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1984 } else {
1985 if (insn & (1 << 20))
1986 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1987 else
1988 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1989 }
18c9b560
AZ
1990 gen_op_iwmmxt_movq_wRn_M0(wrd);
1991 gen_op_iwmmxt_set_mup();
1992 gen_op_iwmmxt_set_cup();
1993 break;
1994 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1995 wrd = (insn >> 12) & 0xf;
1996 rd0 = (insn >> 16) & 0xf;
1997 rd1 = (insn >> 0) & 0xf;
1998 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1999 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2000 tcg_gen_andi_i32(tmp, tmp, 7);
2001 iwmmxt_load_reg(cpu_V1, rd1);
2002 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 2003 tcg_temp_free_i32(tmp);
18c9b560
AZ
2004 gen_op_iwmmxt_movq_wRn_M0(wrd);
2005 gen_op_iwmmxt_set_mup();
2006 break;
2007 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
2008 if (((insn >> 6) & 3) == 3)
2009 return 1;
18c9b560
AZ
2010 rd = (insn >> 12) & 0xf;
2011 wrd = (insn >> 16) & 0xf;
da6b5335 2012 tmp = load_reg(s, rd);
18c9b560
AZ
2013 gen_op_iwmmxt_movq_M0_wRn(wrd);
2014 switch ((insn >> 6) & 3) {
2015 case 0:
da6b5335
FN
2016 tmp2 = tcg_const_i32(0xff);
2017 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
2018 break;
2019 case 1:
da6b5335
FN
2020 tmp2 = tcg_const_i32(0xffff);
2021 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
2022 break;
2023 case 2:
da6b5335
FN
2024 tmp2 = tcg_const_i32(0xffffffff);
2025 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 2026 break;
da6b5335 2027 default:
39d5492a
PM
2028 TCGV_UNUSED_I32(tmp2);
2029 TCGV_UNUSED_I32(tmp3);
18c9b560 2030 }
da6b5335 2031 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
2032 tcg_temp_free_i32(tmp3);
2033 tcg_temp_free_i32(tmp2);
7d1b0095 2034 tcg_temp_free_i32(tmp);
18c9b560
AZ
2035 gen_op_iwmmxt_movq_wRn_M0(wrd);
2036 gen_op_iwmmxt_set_mup();
2037 break;
2038 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2039 rd = (insn >> 12) & 0xf;
2040 wrd = (insn >> 16) & 0xf;
da6b5335 2041 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2042 return 1;
2043 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 2044 tmp = tcg_temp_new_i32();
18c9b560
AZ
2045 switch ((insn >> 22) & 3) {
2046 case 0:
da6b5335 2047 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
ecc7b3aa 2048 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2049 if (insn & 8) {
2050 tcg_gen_ext8s_i32(tmp, tmp);
2051 } else {
2052 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
2053 }
2054 break;
2055 case 1:
da6b5335 2056 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
ecc7b3aa 2057 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2058 if (insn & 8) {
2059 tcg_gen_ext16s_i32(tmp, tmp);
2060 } else {
2061 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
2062 }
2063 break;
2064 case 2:
da6b5335 2065 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
ecc7b3aa 2066 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
18c9b560 2067 break;
18c9b560 2068 }
da6b5335 2069 store_reg(s, rd, tmp);
18c9b560
AZ
2070 break;
2071 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 2072 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2073 return 1;
da6b5335 2074 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
2075 switch ((insn >> 22) & 3) {
2076 case 0:
da6b5335 2077 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
2078 break;
2079 case 1:
da6b5335 2080 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
2081 break;
2082 case 2:
da6b5335 2083 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 2084 break;
18c9b560 2085 }
da6b5335
FN
2086 tcg_gen_shli_i32(tmp, tmp, 28);
2087 gen_set_nzcv(tmp);
7d1b0095 2088 tcg_temp_free_i32(tmp);
18c9b560
AZ
2089 break;
2090 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
2091 if (((insn >> 6) & 3) == 3)
2092 return 1;
18c9b560
AZ
2093 rd = (insn >> 12) & 0xf;
2094 wrd = (insn >> 16) & 0xf;
da6b5335 2095 tmp = load_reg(s, rd);
18c9b560
AZ
2096 switch ((insn >> 6) & 3) {
2097 case 0:
da6b5335 2098 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
2099 break;
2100 case 1:
da6b5335 2101 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
2102 break;
2103 case 2:
da6b5335 2104 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 2105 break;
18c9b560 2106 }
7d1b0095 2107 tcg_temp_free_i32(tmp);
18c9b560
AZ
2108 gen_op_iwmmxt_movq_wRn_M0(wrd);
2109 gen_op_iwmmxt_set_mup();
2110 break;
2111 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 2112 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2113 return 1;
da6b5335 2114 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2115 tmp2 = tcg_temp_new_i32();
da6b5335 2116 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2117 switch ((insn >> 22) & 3) {
2118 case 0:
2119 for (i = 0; i < 7; i ++) {
da6b5335
FN
2120 tcg_gen_shli_i32(tmp2, tmp2, 4);
2121 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2122 }
2123 break;
2124 case 1:
2125 for (i = 0; i < 3; i ++) {
da6b5335
FN
2126 tcg_gen_shli_i32(tmp2, tmp2, 8);
2127 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2128 }
2129 break;
2130 case 2:
da6b5335
FN
2131 tcg_gen_shli_i32(tmp2, tmp2, 16);
2132 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 2133 break;
18c9b560 2134 }
da6b5335 2135 gen_set_nzcv(tmp);
7d1b0095
PM
2136 tcg_temp_free_i32(tmp2);
2137 tcg_temp_free_i32(tmp);
18c9b560
AZ
2138 break;
2139 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2140 wrd = (insn >> 12) & 0xf;
2141 rd0 = (insn >> 16) & 0xf;
2142 gen_op_iwmmxt_movq_M0_wRn(rd0);
2143 switch ((insn >> 22) & 3) {
2144 case 0:
e677137d 2145 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
2146 break;
2147 case 1:
e677137d 2148 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
2149 break;
2150 case 2:
e677137d 2151 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
2152 break;
2153 case 3:
2154 return 1;
2155 }
2156 gen_op_iwmmxt_movq_wRn_M0(wrd);
2157 gen_op_iwmmxt_set_mup();
2158 break;
2159 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 2160 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2161 return 1;
da6b5335 2162 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2163 tmp2 = tcg_temp_new_i32();
da6b5335 2164 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2165 switch ((insn >> 22) & 3) {
2166 case 0:
2167 for (i = 0; i < 7; i ++) {
da6b5335
FN
2168 tcg_gen_shli_i32(tmp2, tmp2, 4);
2169 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2170 }
2171 break;
2172 case 1:
2173 for (i = 0; i < 3; i ++) {
da6b5335
FN
2174 tcg_gen_shli_i32(tmp2, tmp2, 8);
2175 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2176 }
2177 break;
2178 case 2:
da6b5335
FN
2179 tcg_gen_shli_i32(tmp2, tmp2, 16);
2180 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 2181 break;
18c9b560 2182 }
da6b5335 2183 gen_set_nzcv(tmp);
7d1b0095
PM
2184 tcg_temp_free_i32(tmp2);
2185 tcg_temp_free_i32(tmp);
18c9b560
AZ
2186 break;
2187 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2188 rd = (insn >> 12) & 0xf;
2189 rd0 = (insn >> 16) & 0xf;
da6b5335 2190 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2191 return 1;
2192 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2193 tmp = tcg_temp_new_i32();
18c9b560
AZ
2194 switch ((insn >> 22) & 3) {
2195 case 0:
da6b5335 2196 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2197 break;
2198 case 1:
da6b5335 2199 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2200 break;
2201 case 2:
da6b5335 2202 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2203 break;
18c9b560 2204 }
da6b5335 2205 store_reg(s, rd, tmp);
18c9b560
AZ
2206 break;
2207 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2208 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2209 wrd = (insn >> 12) & 0xf;
2210 rd0 = (insn >> 16) & 0xf;
2211 rd1 = (insn >> 0) & 0xf;
2212 gen_op_iwmmxt_movq_M0_wRn(rd0);
2213 switch ((insn >> 22) & 3) {
2214 case 0:
2215 if (insn & (1 << 21))
2216 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2217 else
2218 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2219 break;
2220 case 1:
2221 if (insn & (1 << 21))
2222 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2223 else
2224 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2225 break;
2226 case 2:
2227 if (insn & (1 << 21))
2228 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2229 else
2230 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2231 break;
2232 case 3:
2233 return 1;
2234 }
2235 gen_op_iwmmxt_movq_wRn_M0(wrd);
2236 gen_op_iwmmxt_set_mup();
2237 gen_op_iwmmxt_set_cup();
2238 break;
2239 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2240 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2241 wrd = (insn >> 12) & 0xf;
2242 rd0 = (insn >> 16) & 0xf;
2243 gen_op_iwmmxt_movq_M0_wRn(rd0);
2244 switch ((insn >> 22) & 3) {
2245 case 0:
2246 if (insn & (1 << 21))
2247 gen_op_iwmmxt_unpacklsb_M0();
2248 else
2249 gen_op_iwmmxt_unpacklub_M0();
2250 break;
2251 case 1:
2252 if (insn & (1 << 21))
2253 gen_op_iwmmxt_unpacklsw_M0();
2254 else
2255 gen_op_iwmmxt_unpackluw_M0();
2256 break;
2257 case 2:
2258 if (insn & (1 << 21))
2259 gen_op_iwmmxt_unpacklsl_M0();
2260 else
2261 gen_op_iwmmxt_unpacklul_M0();
2262 break;
2263 case 3:
2264 return 1;
2265 }
2266 gen_op_iwmmxt_movq_wRn_M0(wrd);
2267 gen_op_iwmmxt_set_mup();
2268 gen_op_iwmmxt_set_cup();
2269 break;
2270 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2271 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2272 wrd = (insn >> 12) & 0xf;
2273 rd0 = (insn >> 16) & 0xf;
2274 gen_op_iwmmxt_movq_M0_wRn(rd0);
2275 switch ((insn >> 22) & 3) {
2276 case 0:
2277 if (insn & (1 << 21))
2278 gen_op_iwmmxt_unpackhsb_M0();
2279 else
2280 gen_op_iwmmxt_unpackhub_M0();
2281 break;
2282 case 1:
2283 if (insn & (1 << 21))
2284 gen_op_iwmmxt_unpackhsw_M0();
2285 else
2286 gen_op_iwmmxt_unpackhuw_M0();
2287 break;
2288 case 2:
2289 if (insn & (1 << 21))
2290 gen_op_iwmmxt_unpackhsl_M0();
2291 else
2292 gen_op_iwmmxt_unpackhul_M0();
2293 break;
2294 case 3:
2295 return 1;
2296 }
2297 gen_op_iwmmxt_movq_wRn_M0(wrd);
2298 gen_op_iwmmxt_set_mup();
2299 gen_op_iwmmxt_set_cup();
2300 break;
2301 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2302 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2303 if (((insn >> 22) & 3) == 0)
2304 return 1;
18c9b560
AZ
2305 wrd = (insn >> 12) & 0xf;
2306 rd0 = (insn >> 16) & 0xf;
2307 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2308 tmp = tcg_temp_new_i32();
da6b5335 2309 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2310 tcg_temp_free_i32(tmp);
18c9b560 2311 return 1;
da6b5335 2312 }
18c9b560 2313 switch ((insn >> 22) & 3) {
18c9b560 2314 case 1:
477955bd 2315 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2316 break;
2317 case 2:
477955bd 2318 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2319 break;
2320 case 3:
477955bd 2321 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2322 break;
2323 }
7d1b0095 2324 tcg_temp_free_i32(tmp);
18c9b560
AZ
2325 gen_op_iwmmxt_movq_wRn_M0(wrd);
2326 gen_op_iwmmxt_set_mup();
2327 gen_op_iwmmxt_set_cup();
2328 break;
2329 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2330 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2331 if (((insn >> 22) & 3) == 0)
2332 return 1;
18c9b560
AZ
2333 wrd = (insn >> 12) & 0xf;
2334 rd0 = (insn >> 16) & 0xf;
2335 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2336 tmp = tcg_temp_new_i32();
da6b5335 2337 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2338 tcg_temp_free_i32(tmp);
18c9b560 2339 return 1;
da6b5335 2340 }
18c9b560 2341 switch ((insn >> 22) & 3) {
18c9b560 2342 case 1:
477955bd 2343 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2344 break;
2345 case 2:
477955bd 2346 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2347 break;
2348 case 3:
477955bd 2349 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2350 break;
2351 }
7d1b0095 2352 tcg_temp_free_i32(tmp);
18c9b560
AZ
2353 gen_op_iwmmxt_movq_wRn_M0(wrd);
2354 gen_op_iwmmxt_set_mup();
2355 gen_op_iwmmxt_set_cup();
2356 break;
2357 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2358 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2359 if (((insn >> 22) & 3) == 0)
2360 return 1;
18c9b560
AZ
2361 wrd = (insn >> 12) & 0xf;
2362 rd0 = (insn >> 16) & 0xf;
2363 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2364 tmp = tcg_temp_new_i32();
da6b5335 2365 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2366 tcg_temp_free_i32(tmp);
18c9b560 2367 return 1;
da6b5335 2368 }
18c9b560 2369 switch ((insn >> 22) & 3) {
18c9b560 2370 case 1:
477955bd 2371 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2372 break;
2373 case 2:
477955bd 2374 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2375 break;
2376 case 3:
477955bd 2377 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2378 break;
2379 }
7d1b0095 2380 tcg_temp_free_i32(tmp);
18c9b560
AZ
2381 gen_op_iwmmxt_movq_wRn_M0(wrd);
2382 gen_op_iwmmxt_set_mup();
2383 gen_op_iwmmxt_set_cup();
2384 break;
2385 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2386 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2387 if (((insn >> 22) & 3) == 0)
2388 return 1;
18c9b560
AZ
2389 wrd = (insn >> 12) & 0xf;
2390 rd0 = (insn >> 16) & 0xf;
2391 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2392 tmp = tcg_temp_new_i32();
18c9b560 2393 switch ((insn >> 22) & 3) {
18c9b560 2394 case 1:
da6b5335 2395 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2396 tcg_temp_free_i32(tmp);
18c9b560 2397 return 1;
da6b5335 2398 }
477955bd 2399 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2400 break;
2401 case 2:
da6b5335 2402 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2403 tcg_temp_free_i32(tmp);
18c9b560 2404 return 1;
da6b5335 2405 }
477955bd 2406 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2407 break;
2408 case 3:
da6b5335 2409 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2410 tcg_temp_free_i32(tmp);
18c9b560 2411 return 1;
da6b5335 2412 }
477955bd 2413 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2414 break;
2415 }
7d1b0095 2416 tcg_temp_free_i32(tmp);
18c9b560
AZ
2417 gen_op_iwmmxt_movq_wRn_M0(wrd);
2418 gen_op_iwmmxt_set_mup();
2419 gen_op_iwmmxt_set_cup();
2420 break;
2421 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2422 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2423 wrd = (insn >> 12) & 0xf;
2424 rd0 = (insn >> 16) & 0xf;
2425 rd1 = (insn >> 0) & 0xf;
2426 gen_op_iwmmxt_movq_M0_wRn(rd0);
2427 switch ((insn >> 22) & 3) {
2428 case 0:
2429 if (insn & (1 << 21))
2430 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2431 else
2432 gen_op_iwmmxt_minub_M0_wRn(rd1);
2433 break;
2434 case 1:
2435 if (insn & (1 << 21))
2436 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2437 else
2438 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2439 break;
2440 case 2:
2441 if (insn & (1 << 21))
2442 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2443 else
2444 gen_op_iwmmxt_minul_M0_wRn(rd1);
2445 break;
2446 case 3:
2447 return 1;
2448 }
2449 gen_op_iwmmxt_movq_wRn_M0(wrd);
2450 gen_op_iwmmxt_set_mup();
2451 break;
2452 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2453 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2454 wrd = (insn >> 12) & 0xf;
2455 rd0 = (insn >> 16) & 0xf;
2456 rd1 = (insn >> 0) & 0xf;
2457 gen_op_iwmmxt_movq_M0_wRn(rd0);
2458 switch ((insn >> 22) & 3) {
2459 case 0:
2460 if (insn & (1 << 21))
2461 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2462 else
2463 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2464 break;
2465 case 1:
2466 if (insn & (1 << 21))
2467 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2468 else
2469 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2470 break;
2471 case 2:
2472 if (insn & (1 << 21))
2473 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2474 else
2475 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2476 break;
2477 case 3:
2478 return 1;
2479 }
2480 gen_op_iwmmxt_movq_wRn_M0(wrd);
2481 gen_op_iwmmxt_set_mup();
2482 break;
2483 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2484 case 0x402: case 0x502: case 0x602: case 0x702:
2485 wrd = (insn >> 12) & 0xf;
2486 rd0 = (insn >> 16) & 0xf;
2487 rd1 = (insn >> 0) & 0xf;
2488 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2489 tmp = tcg_const_i32((insn >> 20) & 3);
2490 iwmmxt_load_reg(cpu_V1, rd1);
2491 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2492 tcg_temp_free_i32(tmp);
18c9b560
AZ
2493 gen_op_iwmmxt_movq_wRn_M0(wrd);
2494 gen_op_iwmmxt_set_mup();
2495 break;
2496 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2497 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2498 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2499 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2500 wrd = (insn >> 12) & 0xf;
2501 rd0 = (insn >> 16) & 0xf;
2502 rd1 = (insn >> 0) & 0xf;
2503 gen_op_iwmmxt_movq_M0_wRn(rd0);
2504 switch ((insn >> 20) & 0xf) {
2505 case 0x0:
2506 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2507 break;
2508 case 0x1:
2509 gen_op_iwmmxt_subub_M0_wRn(rd1);
2510 break;
2511 case 0x3:
2512 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2513 break;
2514 case 0x4:
2515 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2516 break;
2517 case 0x5:
2518 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2519 break;
2520 case 0x7:
2521 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2522 break;
2523 case 0x8:
2524 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2525 break;
2526 case 0x9:
2527 gen_op_iwmmxt_subul_M0_wRn(rd1);
2528 break;
2529 case 0xb:
2530 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2531 break;
2532 default:
2533 return 1;
2534 }
2535 gen_op_iwmmxt_movq_wRn_M0(wrd);
2536 gen_op_iwmmxt_set_mup();
2537 gen_op_iwmmxt_set_cup();
2538 break;
2539 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2540 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2541 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2542 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2543 wrd = (insn >> 12) & 0xf;
2544 rd0 = (insn >> 16) & 0xf;
2545 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2546 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2547 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2548 tcg_temp_free_i32(tmp);
18c9b560
AZ
2549 gen_op_iwmmxt_movq_wRn_M0(wrd);
2550 gen_op_iwmmxt_set_mup();
2551 gen_op_iwmmxt_set_cup();
2552 break;
2553 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2554 case 0x418: case 0x518: case 0x618: case 0x718:
2555 case 0x818: case 0x918: case 0xa18: case 0xb18:
2556 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2557 wrd = (insn >> 12) & 0xf;
2558 rd0 = (insn >> 16) & 0xf;
2559 rd1 = (insn >> 0) & 0xf;
2560 gen_op_iwmmxt_movq_M0_wRn(rd0);
2561 switch ((insn >> 20) & 0xf) {
2562 case 0x0:
2563 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2564 break;
2565 case 0x1:
2566 gen_op_iwmmxt_addub_M0_wRn(rd1);
2567 break;
2568 case 0x3:
2569 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2570 break;
2571 case 0x4:
2572 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2573 break;
2574 case 0x5:
2575 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2576 break;
2577 case 0x7:
2578 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2579 break;
2580 case 0x8:
2581 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2582 break;
2583 case 0x9:
2584 gen_op_iwmmxt_addul_M0_wRn(rd1);
2585 break;
2586 case 0xb:
2587 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2588 break;
2589 default:
2590 return 1;
2591 }
2592 gen_op_iwmmxt_movq_wRn_M0(wrd);
2593 gen_op_iwmmxt_set_mup();
2594 gen_op_iwmmxt_set_cup();
2595 break;
2596 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2597 case 0x408: case 0x508: case 0x608: case 0x708:
2598 case 0x808: case 0x908: case 0xa08: case 0xb08:
2599 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2600 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2601 return 1;
18c9b560
AZ
2602 wrd = (insn >> 12) & 0xf;
2603 rd0 = (insn >> 16) & 0xf;
2604 rd1 = (insn >> 0) & 0xf;
2605 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2606 switch ((insn >> 22) & 3) {
18c9b560
AZ
2607 case 1:
2608 if (insn & (1 << 21))
2609 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2610 else
2611 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2612 break;
2613 case 2:
2614 if (insn & (1 << 21))
2615 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2616 else
2617 gen_op_iwmmxt_packul_M0_wRn(rd1);
2618 break;
2619 case 3:
2620 if (insn & (1 << 21))
2621 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2622 else
2623 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2624 break;
2625 }
2626 gen_op_iwmmxt_movq_wRn_M0(wrd);
2627 gen_op_iwmmxt_set_mup();
2628 gen_op_iwmmxt_set_cup();
2629 break;
2630 case 0x201: case 0x203: case 0x205: case 0x207:
2631 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2632 case 0x211: case 0x213: case 0x215: case 0x217:
2633 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2634 wrd = (insn >> 5) & 0xf;
2635 rd0 = (insn >> 12) & 0xf;
2636 rd1 = (insn >> 0) & 0xf;
2637 if (rd0 == 0xf || rd1 == 0xf)
2638 return 1;
2639 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2640 tmp = load_reg(s, rd0);
2641 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2642 switch ((insn >> 16) & 0xf) {
2643 case 0x0: /* TMIA */
da6b5335 2644 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2645 break;
2646 case 0x8: /* TMIAPH */
da6b5335 2647 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2648 break;
2649 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2650 if (insn & (1 << 16))
da6b5335 2651 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2652 if (insn & (1 << 17))
da6b5335
FN
2653 tcg_gen_shri_i32(tmp2, tmp2, 16);
2654 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2655 break;
2656 default:
7d1b0095
PM
2657 tcg_temp_free_i32(tmp2);
2658 tcg_temp_free_i32(tmp);
18c9b560
AZ
2659 return 1;
2660 }
7d1b0095
PM
2661 tcg_temp_free_i32(tmp2);
2662 tcg_temp_free_i32(tmp);
18c9b560
AZ
2663 gen_op_iwmmxt_movq_wRn_M0(wrd);
2664 gen_op_iwmmxt_set_mup();
2665 break;
2666 default:
2667 return 1;
2668 }
2669
2670 return 0;
2671}
2672
a1c7273b 2673/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2674 (ie. an undefined instruction). */
7dcc1f89 2675static int disas_dsp_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
2676{
2677 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2678 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2679
2680 if ((insn & 0x0ff00f10) == 0x0e200010) {
2681 /* Multiply with Internal Accumulate Format */
2682 rd0 = (insn >> 12) & 0xf;
2683 rd1 = insn & 0xf;
2684 acc = (insn >> 5) & 7;
2685
2686 if (acc != 0)
2687 return 1;
2688
3a554c0f
FN
2689 tmp = load_reg(s, rd0);
2690 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2691 switch ((insn >> 16) & 0xf) {
2692 case 0x0: /* MIA */
3a554c0f 2693 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2694 break;
2695 case 0x8: /* MIAPH */
3a554c0f 2696 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2697 break;
2698 case 0xc: /* MIABB */
2699 case 0xd: /* MIABT */
2700 case 0xe: /* MIATB */
2701 case 0xf: /* MIATT */
18c9b560 2702 if (insn & (1 << 16))
3a554c0f 2703 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2704 if (insn & (1 << 17))
3a554c0f
FN
2705 tcg_gen_shri_i32(tmp2, tmp2, 16);
2706 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2707 break;
2708 default:
2709 return 1;
2710 }
7d1b0095
PM
2711 tcg_temp_free_i32(tmp2);
2712 tcg_temp_free_i32(tmp);
18c9b560
AZ
2713
2714 gen_op_iwmmxt_movq_wRn_M0(acc);
2715 return 0;
2716 }
2717
2718 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2719 /* Internal Accumulator Access Format */
2720 rdhi = (insn >> 16) & 0xf;
2721 rdlo = (insn >> 12) & 0xf;
2722 acc = insn & 7;
2723
2724 if (acc != 0)
2725 return 1;
2726
2727 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f 2728 iwmmxt_load_reg(cpu_V0, acc);
ecc7b3aa 2729 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
3a554c0f 2730 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 2731 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
3a554c0f 2732 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2733 } else { /* MAR */
3a554c0f
FN
2734 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2735 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2736 }
2737 return 0;
2738 }
2739
2740 return 1;
2741}
2742
9ee6e8bb
PB
2743#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2744#define VFP_SREG(insn, bigbit, smallbit) \
2745 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2746#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
d614a513 2747 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
9ee6e8bb
PB
2748 reg = (((insn) >> (bigbit)) & 0x0f) \
2749 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2750 } else { \
2751 if (insn & (1 << (smallbit))) \
2752 return 1; \
2753 reg = ((insn) >> (bigbit)) & 0x0f; \
2754 }} while (0)
2755
2756#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2757#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2758#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2759#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2760#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2761#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2762
4373f3ce 2763/* Move between integer and VFP cores. */
39d5492a 2764static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 2765{
39d5492a 2766 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
2767 tcg_gen_mov_i32(tmp, cpu_F0s);
2768 return tmp;
2769}
2770
39d5492a 2771static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
2772{
2773 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2774 tcg_temp_free_i32(tmp);
4373f3ce
PB
2775}
2776
39d5492a 2777static void gen_neon_dup_u8(TCGv_i32 var, int shift)
ad69471c 2778{
39d5492a 2779 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2780 if (shift)
2781 tcg_gen_shri_i32(var, var, shift);
86831435 2782 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2783 tcg_gen_shli_i32(tmp, var, 8);
2784 tcg_gen_or_i32(var, var, tmp);
2785 tcg_gen_shli_i32(tmp, var, 16);
2786 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2787 tcg_temp_free_i32(tmp);
ad69471c
PB
2788}
2789
39d5492a 2790static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2791{
39d5492a 2792 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2793 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2794 tcg_gen_shli_i32(tmp, var, 16);
2795 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2796 tcg_temp_free_i32(tmp);
ad69471c
PB
2797}
2798
39d5492a 2799static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2800{
39d5492a 2801 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2802 tcg_gen_andi_i32(var, var, 0xffff0000);
2803 tcg_gen_shri_i32(tmp, var, 16);
2804 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2805 tcg_temp_free_i32(tmp);
ad69471c
PB
2806}
2807
39d5492a 2808static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
8e18cde3
PM
2809{
2810 /* Load a single Neon element and replicate into a 32 bit TCG reg */
58ab8e96 2811 TCGv_i32 tmp = tcg_temp_new_i32();
8e18cde3
PM
2812 switch (size) {
2813 case 0:
12dcc321 2814 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2815 gen_neon_dup_u8(tmp, 0);
2816 break;
2817 case 1:
12dcc321 2818 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2819 gen_neon_dup_low16(tmp);
2820 break;
2821 case 2:
12dcc321 2822 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2823 break;
2824 default: /* Avoid compiler warnings. */
2825 abort();
2826 }
2827 return tmp;
2828}
2829
04731fb5
WN
2830static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2831 uint32_t dp)
2832{
2833 uint32_t cc = extract32(insn, 20, 2);
2834
2835 if (dp) {
2836 TCGv_i64 frn, frm, dest;
2837 TCGv_i64 tmp, zero, zf, nf, vf;
2838
2839 zero = tcg_const_i64(0);
2840
2841 frn = tcg_temp_new_i64();
2842 frm = tcg_temp_new_i64();
2843 dest = tcg_temp_new_i64();
2844
2845 zf = tcg_temp_new_i64();
2846 nf = tcg_temp_new_i64();
2847 vf = tcg_temp_new_i64();
2848
2849 tcg_gen_extu_i32_i64(zf, cpu_ZF);
2850 tcg_gen_ext_i32_i64(nf, cpu_NF);
2851 tcg_gen_ext_i32_i64(vf, cpu_VF);
2852
2853 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2854 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2855 switch (cc) {
2856 case 0: /* eq: Z */
2857 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
2858 frn, frm);
2859 break;
2860 case 1: /* vs: V */
2861 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
2862 frn, frm);
2863 break;
2864 case 2: /* ge: N == V -> N ^ V == 0 */
2865 tmp = tcg_temp_new_i64();
2866 tcg_gen_xor_i64(tmp, vf, nf);
2867 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2868 frn, frm);
2869 tcg_temp_free_i64(tmp);
2870 break;
2871 case 3: /* gt: !Z && N == V */
2872 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
2873 frn, frm);
2874 tmp = tcg_temp_new_i64();
2875 tcg_gen_xor_i64(tmp, vf, nf);
2876 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2877 dest, frm);
2878 tcg_temp_free_i64(tmp);
2879 break;
2880 }
2881 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2882 tcg_temp_free_i64(frn);
2883 tcg_temp_free_i64(frm);
2884 tcg_temp_free_i64(dest);
2885
2886 tcg_temp_free_i64(zf);
2887 tcg_temp_free_i64(nf);
2888 tcg_temp_free_i64(vf);
2889
2890 tcg_temp_free_i64(zero);
2891 } else {
2892 TCGv_i32 frn, frm, dest;
2893 TCGv_i32 tmp, zero;
2894
2895 zero = tcg_const_i32(0);
2896
2897 frn = tcg_temp_new_i32();
2898 frm = tcg_temp_new_i32();
2899 dest = tcg_temp_new_i32();
2900 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2901 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2902 switch (cc) {
2903 case 0: /* eq: Z */
2904 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
2905 frn, frm);
2906 break;
2907 case 1: /* vs: V */
2908 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
2909 frn, frm);
2910 break;
2911 case 2: /* ge: N == V -> N ^ V == 0 */
2912 tmp = tcg_temp_new_i32();
2913 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2914 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2915 frn, frm);
2916 tcg_temp_free_i32(tmp);
2917 break;
2918 case 3: /* gt: !Z && N == V */
2919 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
2920 frn, frm);
2921 tmp = tcg_temp_new_i32();
2922 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2923 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2924 dest, frm);
2925 tcg_temp_free_i32(tmp);
2926 break;
2927 }
2928 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2929 tcg_temp_free_i32(frn);
2930 tcg_temp_free_i32(frm);
2931 tcg_temp_free_i32(dest);
2932
2933 tcg_temp_free_i32(zero);
2934 }
2935
2936 return 0;
2937}
2938
40cfacdd
WN
2939static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
2940 uint32_t rm, uint32_t dp)
2941{
2942 uint32_t vmin = extract32(insn, 6, 1);
2943 TCGv_ptr fpst = get_fpstatus_ptr(0);
2944
2945 if (dp) {
2946 TCGv_i64 frn, frm, dest;
2947
2948 frn = tcg_temp_new_i64();
2949 frm = tcg_temp_new_i64();
2950 dest = tcg_temp_new_i64();
2951
2952 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2953 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2954 if (vmin) {
f71a2ae5 2955 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
40cfacdd 2956 } else {
f71a2ae5 2957 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
40cfacdd
WN
2958 }
2959 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2960 tcg_temp_free_i64(frn);
2961 tcg_temp_free_i64(frm);
2962 tcg_temp_free_i64(dest);
2963 } else {
2964 TCGv_i32 frn, frm, dest;
2965
2966 frn = tcg_temp_new_i32();
2967 frm = tcg_temp_new_i32();
2968 dest = tcg_temp_new_i32();
2969
2970 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2971 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2972 if (vmin) {
f71a2ae5 2973 gen_helper_vfp_minnums(dest, frn, frm, fpst);
40cfacdd 2974 } else {
f71a2ae5 2975 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
40cfacdd
WN
2976 }
2977 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2978 tcg_temp_free_i32(frn);
2979 tcg_temp_free_i32(frm);
2980 tcg_temp_free_i32(dest);
2981 }
2982
2983 tcg_temp_free_ptr(fpst);
2984 return 0;
2985}
2986
7655f39b
WN
2987static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2988 int rounding)
2989{
2990 TCGv_ptr fpst = get_fpstatus_ptr(0);
2991 TCGv_i32 tcg_rmode;
2992
2993 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2994 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2995
2996 if (dp) {
2997 TCGv_i64 tcg_op;
2998 TCGv_i64 tcg_res;
2999 tcg_op = tcg_temp_new_i64();
3000 tcg_res = tcg_temp_new_i64();
3001 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3002 gen_helper_rintd(tcg_res, tcg_op, fpst);
3003 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3004 tcg_temp_free_i64(tcg_op);
3005 tcg_temp_free_i64(tcg_res);
3006 } else {
3007 TCGv_i32 tcg_op;
3008 TCGv_i32 tcg_res;
3009 tcg_op = tcg_temp_new_i32();
3010 tcg_res = tcg_temp_new_i32();
3011 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3012 gen_helper_rints(tcg_res, tcg_op, fpst);
3013 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3014 tcg_temp_free_i32(tcg_op);
3015 tcg_temp_free_i32(tcg_res);
3016 }
3017
3018 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3019 tcg_temp_free_i32(tcg_rmode);
3020
3021 tcg_temp_free_ptr(fpst);
3022 return 0;
3023}
3024
c9975a83
WN
3025static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3026 int rounding)
3027{
3028 bool is_signed = extract32(insn, 7, 1);
3029 TCGv_ptr fpst = get_fpstatus_ptr(0);
3030 TCGv_i32 tcg_rmode, tcg_shift;
3031
3032 tcg_shift = tcg_const_i32(0);
3033
3034 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3035 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3036
3037 if (dp) {
3038 TCGv_i64 tcg_double, tcg_res;
3039 TCGv_i32 tcg_tmp;
3040 /* Rd is encoded as a single precision register even when the source
3041 * is double precision.
3042 */
3043 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
3044 tcg_double = tcg_temp_new_i64();
3045 tcg_res = tcg_temp_new_i64();
3046 tcg_tmp = tcg_temp_new_i32();
3047 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
3048 if (is_signed) {
3049 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
3050 } else {
3051 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
3052 }
ecc7b3aa 3053 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
c9975a83
WN
3054 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
3055 tcg_temp_free_i32(tcg_tmp);
3056 tcg_temp_free_i64(tcg_res);
3057 tcg_temp_free_i64(tcg_double);
3058 } else {
3059 TCGv_i32 tcg_single, tcg_res;
3060 tcg_single = tcg_temp_new_i32();
3061 tcg_res = tcg_temp_new_i32();
3062 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
3063 if (is_signed) {
3064 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
3065 } else {
3066 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3067 }
3068 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3069 tcg_temp_free_i32(tcg_res);
3070 tcg_temp_free_i32(tcg_single);
3071 }
3072
3073 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3074 tcg_temp_free_i32(tcg_rmode);
3075
3076 tcg_temp_free_i32(tcg_shift);
3077
3078 tcg_temp_free_ptr(fpst);
3079
3080 return 0;
3081}
7655f39b
WN
3082
3083/* Table for converting the most common AArch32 encoding of
3084 * rounding mode to arm_fprounding order (which matches the
3085 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3086 */
3087static const uint8_t fp_decode_rm[] = {
3088 FPROUNDING_TIEAWAY,
3089 FPROUNDING_TIEEVEN,
3090 FPROUNDING_POSINF,
3091 FPROUNDING_NEGINF,
3092};
3093
7dcc1f89 3094static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
04731fb5
WN
3095{
3096 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3097
d614a513 3098 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
04731fb5
WN
3099 return 1;
3100 }
3101
3102 if (dp) {
3103 VFP_DREG_D(rd, insn);
3104 VFP_DREG_N(rn, insn);
3105 VFP_DREG_M(rm, insn);
3106 } else {
3107 rd = VFP_SREG_D(insn);
3108 rn = VFP_SREG_N(insn);
3109 rm = VFP_SREG_M(insn);
3110 }
3111
3112 if ((insn & 0x0f800e50) == 0x0e000a00) {
3113 return handle_vsel(insn, rd, rn, rm, dp);
40cfacdd
WN
3114 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3115 return handle_vminmaxnm(insn, rd, rn, rm, dp);
7655f39b
WN
3116 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3117 /* VRINTA, VRINTN, VRINTP, VRINTM */
3118 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3119 return handle_vrint(insn, rd, rm, dp, rounding);
c9975a83
WN
3120 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3121 /* VCVTA, VCVTN, VCVTP, VCVTM */
3122 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3123 return handle_vcvt(insn, rd, rm, dp, rounding);
04731fb5
WN
3124 }
3125 return 1;
3126}
3127
a1c7273b 3128/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 3129 (ie. an undefined instruction). */
7dcc1f89 3130static int disas_vfp_insn(DisasContext *s, uint32_t insn)
b7bcbe95
FB
3131{
3132 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3133 int dp, veclen;
39d5492a
PM
3134 TCGv_i32 addr;
3135 TCGv_i32 tmp;
3136 TCGv_i32 tmp2;
b7bcbe95 3137
d614a513 3138 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
40f137e1 3139 return 1;
d614a513 3140 }
40f137e1 3141
2c7ffc41
PM
3142 /* FIXME: this access check should not take precedence over UNDEF
3143 * for invalid encodings; we will generate incorrect syndrome information
3144 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3145 */
9dbbc748 3146 if (s->fp_excp_el) {
2c7ffc41 3147 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 3148 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
3149 return 0;
3150 }
3151
5df8bac1 3152 if (!s->vfp_enabled) {
9ee6e8bb 3153 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
3154 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3155 return 1;
3156 rn = (insn >> 16) & 0xf;
a50c0f51
PM
3157 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3158 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
40f137e1 3159 return 1;
a50c0f51 3160 }
40f137e1 3161 }
6a57f3eb
WN
3162
3163 if (extract32(insn, 28, 4) == 0xf) {
3164 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3165 * only used in v8 and above.
3166 */
7dcc1f89 3167 return disas_vfp_v8_insn(s, insn);
6a57f3eb
WN
3168 }
3169
b7bcbe95
FB
3170 dp = ((insn & 0xf00) == 0xb00);
3171 switch ((insn >> 24) & 0xf) {
3172 case 0xe:
3173 if (insn & (1 << 4)) {
3174 /* single register transfer */
b7bcbe95
FB
3175 rd = (insn >> 12) & 0xf;
3176 if (dp) {
9ee6e8bb
PB
3177 int size;
3178 int pass;
3179
3180 VFP_DREG_N(rn, insn);
3181 if (insn & 0xf)
b7bcbe95 3182 return 1;
9ee6e8bb 3183 if (insn & 0x00c00060
d614a513 3184 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 3185 return 1;
d614a513 3186 }
9ee6e8bb
PB
3187
3188 pass = (insn >> 21) & 1;
3189 if (insn & (1 << 22)) {
3190 size = 0;
3191 offset = ((insn >> 5) & 3) * 8;
3192 } else if (insn & (1 << 5)) {
3193 size = 1;
3194 offset = (insn & (1 << 6)) ? 16 : 0;
3195 } else {
3196 size = 2;
3197 offset = 0;
3198 }
18c9b560 3199 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3200 /* vfp->arm */
ad69471c 3201 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
3202 switch (size) {
3203 case 0:
9ee6e8bb 3204 if (offset)
ad69471c 3205 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 3206 if (insn & (1 << 23))
ad69471c 3207 gen_uxtb(tmp);
9ee6e8bb 3208 else
ad69471c 3209 gen_sxtb(tmp);
9ee6e8bb
PB
3210 break;
3211 case 1:
9ee6e8bb
PB
3212 if (insn & (1 << 23)) {
3213 if (offset) {
ad69471c 3214 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 3215 } else {
ad69471c 3216 gen_uxth(tmp);
9ee6e8bb
PB
3217 }
3218 } else {
3219 if (offset) {
ad69471c 3220 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 3221 } else {
ad69471c 3222 gen_sxth(tmp);
9ee6e8bb
PB
3223 }
3224 }
3225 break;
3226 case 2:
9ee6e8bb
PB
3227 break;
3228 }
ad69471c 3229 store_reg(s, rd, tmp);
b7bcbe95
FB
3230 } else {
3231 /* arm->vfp */
ad69471c 3232 tmp = load_reg(s, rd);
9ee6e8bb
PB
3233 if (insn & (1 << 23)) {
3234 /* VDUP */
3235 if (size == 0) {
ad69471c 3236 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 3237 } else if (size == 1) {
ad69471c 3238 gen_neon_dup_low16(tmp);
9ee6e8bb 3239 }
cbbccffc 3240 for (n = 0; n <= pass * 2; n++) {
7d1b0095 3241 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
3242 tcg_gen_mov_i32(tmp2, tmp);
3243 neon_store_reg(rn, n, tmp2);
3244 }
3245 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
3246 } else {
3247 /* VMOV */
3248 switch (size) {
3249 case 0:
ad69471c 3250 tmp2 = neon_load_reg(rn, pass);
d593c48e 3251 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 3252 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3253 break;
3254 case 1:
ad69471c 3255 tmp2 = neon_load_reg(rn, pass);
d593c48e 3256 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 3257 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3258 break;
3259 case 2:
9ee6e8bb
PB
3260 break;
3261 }
ad69471c 3262 neon_store_reg(rn, pass, tmp);
9ee6e8bb 3263 }
b7bcbe95 3264 }
9ee6e8bb
PB
3265 } else { /* !dp */
3266 if ((insn & 0x6f) != 0x00)
3267 return 1;
3268 rn = VFP_SREG_N(insn);
18c9b560 3269 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3270 /* vfp->arm */
3271 if (insn & (1 << 21)) {
3272 /* system register */
40f137e1 3273 rn >>= 1;
9ee6e8bb 3274
b7bcbe95 3275 switch (rn) {
40f137e1 3276 case ARM_VFP_FPSID:
4373f3ce 3277 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
3278 VFP3 restricts all id registers to privileged
3279 accesses. */
3280 if (IS_USER(s)
d614a513 3281 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3282 return 1;
d614a513 3283 }
4373f3ce 3284 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3285 break;
40f137e1 3286 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3287 if (IS_USER(s))
3288 return 1;
4373f3ce 3289 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3290 break;
40f137e1
PB
3291 case ARM_VFP_FPINST:
3292 case ARM_VFP_FPINST2:
9ee6e8bb
PB
3293 /* Not present in VFP3. */
3294 if (IS_USER(s)
d614a513 3295 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3296 return 1;
d614a513 3297 }
4373f3ce 3298 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 3299 break;
40f137e1 3300 case ARM_VFP_FPSCR:
601d70b9 3301 if (rd == 15) {
4373f3ce
PB
3302 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3303 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3304 } else {
7d1b0095 3305 tmp = tcg_temp_new_i32();
4373f3ce
PB
3306 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3307 }
b7bcbe95 3308 break;
a50c0f51 3309 case ARM_VFP_MVFR2:
d614a513 3310 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
a50c0f51
PM
3311 return 1;
3312 }
3313 /* fall through */
9ee6e8bb
PB
3314 case ARM_VFP_MVFR0:
3315 case ARM_VFP_MVFR1:
3316 if (IS_USER(s)
d614a513 3317 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
9ee6e8bb 3318 return 1;
d614a513 3319 }
4373f3ce 3320 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3321 break;
b7bcbe95
FB
3322 default:
3323 return 1;
3324 }
3325 } else {
3326 gen_mov_F0_vreg(0, rn);
4373f3ce 3327 tmp = gen_vfp_mrs();
b7bcbe95
FB
3328 }
3329 if (rd == 15) {
b5ff1b31 3330 /* Set the 4 flag bits in the CPSR. */
4373f3ce 3331 gen_set_nzcv(tmp);
7d1b0095 3332 tcg_temp_free_i32(tmp);
4373f3ce
PB
3333 } else {
3334 store_reg(s, rd, tmp);
3335 }
b7bcbe95
FB
3336 } else {
3337 /* arm->vfp */
b7bcbe95 3338 if (insn & (1 << 21)) {
40f137e1 3339 rn >>= 1;
b7bcbe95
FB
3340 /* system register */
3341 switch (rn) {
40f137e1 3342 case ARM_VFP_FPSID:
9ee6e8bb
PB
3343 case ARM_VFP_MVFR0:
3344 case ARM_VFP_MVFR1:
b7bcbe95
FB
3345 /* Writes are ignored. */
3346 break;
40f137e1 3347 case ARM_VFP_FPSCR:
e4c1cfa5 3348 tmp = load_reg(s, rd);
4373f3ce 3349 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 3350 tcg_temp_free_i32(tmp);
b5ff1b31 3351 gen_lookup_tb(s);
b7bcbe95 3352 break;
40f137e1 3353 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3354 if (IS_USER(s))
3355 return 1;
71b3c3de
JR
3356 /* TODO: VFP subarchitecture support.
3357 * For now, keep the EN bit only */
e4c1cfa5 3358 tmp = load_reg(s, rd);
71b3c3de 3359 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 3360 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
3361 gen_lookup_tb(s);
3362 break;
3363 case ARM_VFP_FPINST:
3364 case ARM_VFP_FPINST2:
23adb861
PM
3365 if (IS_USER(s)) {
3366 return 1;
3367 }
e4c1cfa5 3368 tmp = load_reg(s, rd);
4373f3ce 3369 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 3370 break;
b7bcbe95
FB
3371 default:
3372 return 1;
3373 }
3374 } else {
e4c1cfa5 3375 tmp = load_reg(s, rd);
4373f3ce 3376 gen_vfp_msr(tmp);
b7bcbe95
FB
3377 gen_mov_vreg_F0(0, rn);
3378 }
3379 }
3380 }
3381 } else {
3382 /* data processing */
3383 /* The opcode is in bits 23, 21, 20 and 6. */
3384 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3385 if (dp) {
3386 if (op == 15) {
3387 /* rn is opcode */
3388 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3389 } else {
3390 /* rn is register number */
9ee6e8bb 3391 VFP_DREG_N(rn, insn);
b7bcbe95
FB
3392 }
3393
239c20c7
WN
3394 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3395 ((rn & 0x1e) == 0x6))) {
3396 /* Integer or single/half precision destination. */
9ee6e8bb 3397 rd = VFP_SREG_D(insn);
b7bcbe95 3398 } else {
9ee6e8bb 3399 VFP_DREG_D(rd, insn);
b7bcbe95 3400 }
04595bf6 3401 if (op == 15 &&
239c20c7
WN
3402 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3403 ((rn & 0x1e) == 0x4))) {
3404 /* VCVT from int or half precision is always from S reg
3405 * regardless of dp bit. VCVT with immediate frac_bits
3406 * has same format as SREG_M.
04595bf6
PM
3407 */
3408 rm = VFP_SREG_M(insn);
b7bcbe95 3409 } else {
9ee6e8bb 3410 VFP_DREG_M(rm, insn);
b7bcbe95
FB
3411 }
3412 } else {
9ee6e8bb 3413 rn = VFP_SREG_N(insn);
b7bcbe95
FB
3414 if (op == 15 && rn == 15) {
3415 /* Double precision destination. */
9ee6e8bb
PB
3416 VFP_DREG_D(rd, insn);
3417 } else {
3418 rd = VFP_SREG_D(insn);
3419 }
04595bf6
PM
3420 /* NB that we implicitly rely on the encoding for the frac_bits
3421 * in VCVT of fixed to float being the same as that of an SREG_M
3422 */
9ee6e8bb 3423 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3424 }
3425
69d1fc22 3426 veclen = s->vec_len;
b7bcbe95
FB
3427 if (op == 15 && rn > 3)
3428 veclen = 0;
3429
3430 /* Shut up compiler warnings. */
3431 delta_m = 0;
3432 delta_d = 0;
3433 bank_mask = 0;
3b46e624 3434
b7bcbe95
FB
3435 if (veclen > 0) {
3436 if (dp)
3437 bank_mask = 0xc;
3438 else
3439 bank_mask = 0x18;
3440
3441 /* Figure out what type of vector operation this is. */
3442 if ((rd & bank_mask) == 0) {
3443 /* scalar */
3444 veclen = 0;
3445 } else {
3446 if (dp)
69d1fc22 3447 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3448 else
69d1fc22 3449 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3450
3451 if ((rm & bank_mask) == 0) {
3452 /* mixed scalar/vector */
3453 delta_m = 0;
3454 } else {
3455 /* vector */
3456 delta_m = delta_d;
3457 }
3458 }
3459 }
3460
3461 /* Load the initial operands. */
3462 if (op == 15) {
3463 switch (rn) {
3464 case 16:
3465 case 17:
3466 /* Integer source */
3467 gen_mov_F0_vreg(0, rm);
3468 break;
3469 case 8:
3470 case 9:
3471 /* Compare */
3472 gen_mov_F0_vreg(dp, rd);
3473 gen_mov_F1_vreg(dp, rm);
3474 break;
3475 case 10:
3476 case 11:
3477 /* Compare with zero */
3478 gen_mov_F0_vreg(dp, rd);
3479 gen_vfp_F1_ld0(dp);
3480 break;
9ee6e8bb
PB
3481 case 20:
3482 case 21:
3483 case 22:
3484 case 23:
644ad806
PB
3485 case 28:
3486 case 29:
3487 case 30:
3488 case 31:
9ee6e8bb
PB
3489 /* Source and destination the same. */
3490 gen_mov_F0_vreg(dp, rd);
3491 break;
6e0c0ed1
PM
3492 case 4:
3493 case 5:
3494 case 6:
3495 case 7:
239c20c7
WN
3496 /* VCVTB, VCVTT: only present with the halfprec extension
3497 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3498 * (we choose to UNDEF)
6e0c0ed1 3499 */
d614a513
PM
3500 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3501 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
6e0c0ed1
PM
3502 return 1;
3503 }
239c20c7
WN
3504 if (!extract32(rn, 1, 1)) {
3505 /* Half precision source. */
3506 gen_mov_F0_vreg(0, rm);
3507 break;
3508 }
6e0c0ed1 3509 /* Otherwise fall through */
b7bcbe95
FB
3510 default:
3511 /* One source operand. */
3512 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3513 break;
b7bcbe95
FB
3514 }
3515 } else {
3516 /* Two source operands. */
3517 gen_mov_F0_vreg(dp, rn);
3518 gen_mov_F1_vreg(dp, rm);
3519 }
3520
3521 for (;;) {
3522 /* Perform the calculation. */
3523 switch (op) {
605a6aed
PM
3524 case 0: /* VMLA: fd + (fn * fm) */
3525 /* Note that order of inputs to the add matters for NaNs */
3526 gen_vfp_F1_mul(dp);
3527 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3528 gen_vfp_add(dp);
3529 break;
605a6aed 3530 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3531 gen_vfp_mul(dp);
605a6aed
PM
3532 gen_vfp_F1_neg(dp);
3533 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3534 gen_vfp_add(dp);
3535 break;
605a6aed
PM
3536 case 2: /* VNMLS: -fd + (fn * fm) */
3537 /* Note that it isn't valid to replace (-A + B) with (B - A)
3538 * or similar plausible looking simplifications
3539 * because this will give wrong results for NaNs.
3540 */
3541 gen_vfp_F1_mul(dp);
3542 gen_mov_F0_vreg(dp, rd);
3543 gen_vfp_neg(dp);
3544 gen_vfp_add(dp);
b7bcbe95 3545 break;
605a6aed 3546 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3547 gen_vfp_mul(dp);
605a6aed
PM
3548 gen_vfp_F1_neg(dp);
3549 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3550 gen_vfp_neg(dp);
605a6aed 3551 gen_vfp_add(dp);
b7bcbe95
FB
3552 break;
3553 case 4: /* mul: fn * fm */
3554 gen_vfp_mul(dp);
3555 break;
3556 case 5: /* nmul: -(fn * fm) */
3557 gen_vfp_mul(dp);
3558 gen_vfp_neg(dp);
3559 break;
3560 case 6: /* add: fn + fm */
3561 gen_vfp_add(dp);
3562 break;
3563 case 7: /* sub: fn - fm */
3564 gen_vfp_sub(dp);
3565 break;
3566 case 8: /* div: fn / fm */
3567 gen_vfp_div(dp);
3568 break;
da97f52c
PM
3569 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3570 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3571 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3572 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3573 /* These are fused multiply-add, and must be done as one
3574 * floating point operation with no rounding between the
3575 * multiplication and addition steps.
3576 * NB that doing the negations here as separate steps is
3577 * correct : an input NaN should come out with its sign bit
3578 * flipped if it is a negated-input.
3579 */
d614a513 3580 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
3581 return 1;
3582 }
3583 if (dp) {
3584 TCGv_ptr fpst;
3585 TCGv_i64 frd;
3586 if (op & 1) {
3587 /* VFNMS, VFMS */
3588 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3589 }
3590 frd = tcg_temp_new_i64();
3591 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3592 if (op & 2) {
3593 /* VFNMA, VFNMS */
3594 gen_helper_vfp_negd(frd, frd);
3595 }
3596 fpst = get_fpstatus_ptr(0);
3597 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3598 cpu_F1d, frd, fpst);
3599 tcg_temp_free_ptr(fpst);
3600 tcg_temp_free_i64(frd);
3601 } else {
3602 TCGv_ptr fpst;
3603 TCGv_i32 frd;
3604 if (op & 1) {
3605 /* VFNMS, VFMS */
3606 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3607 }
3608 frd = tcg_temp_new_i32();
3609 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3610 if (op & 2) {
3611 gen_helper_vfp_negs(frd, frd);
3612 }
3613 fpst = get_fpstatus_ptr(0);
3614 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3615 cpu_F1s, frd, fpst);
3616 tcg_temp_free_ptr(fpst);
3617 tcg_temp_free_i32(frd);
3618 }
3619 break;
9ee6e8bb 3620 case 14: /* fconst */
d614a513
PM
3621 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3622 return 1;
3623 }
9ee6e8bb
PB
3624
3625 n = (insn << 12) & 0x80000000;
3626 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3627 if (dp) {
3628 if (i & 0x40)
3629 i |= 0x3f80;
3630 else
3631 i |= 0x4000;
3632 n |= i << 16;
4373f3ce 3633 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3634 } else {
3635 if (i & 0x40)
3636 i |= 0x780;
3637 else
3638 i |= 0x800;
3639 n |= i << 19;
5b340b51 3640 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3641 }
9ee6e8bb 3642 break;
b7bcbe95
FB
3643 case 15: /* extension space */
3644 switch (rn) {
3645 case 0: /* cpy */
3646 /* no-op */
3647 break;
3648 case 1: /* abs */
3649 gen_vfp_abs(dp);
3650 break;
3651 case 2: /* neg */
3652 gen_vfp_neg(dp);
3653 break;
3654 case 3: /* sqrt */
3655 gen_vfp_sqrt(dp);
3656 break;
239c20c7 3657 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
60011498
PB
3658 tmp = gen_vfp_mrs();
3659 tcg_gen_ext16u_i32(tmp, tmp);
239c20c7
WN
3660 if (dp) {
3661 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3662 cpu_env);
3663 } else {
3664 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3665 cpu_env);
3666 }
7d1b0095 3667 tcg_temp_free_i32(tmp);
60011498 3668 break;
239c20c7 3669 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
60011498
PB
3670 tmp = gen_vfp_mrs();
3671 tcg_gen_shri_i32(tmp, tmp, 16);
239c20c7
WN
3672 if (dp) {
3673 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3674 cpu_env);
3675 } else {
3676 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3677 cpu_env);
3678 }
7d1b0095 3679 tcg_temp_free_i32(tmp);
60011498 3680 break;
239c20c7 3681 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
7d1b0095 3682 tmp = tcg_temp_new_i32();
239c20c7
WN
3683 if (dp) {
3684 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3685 cpu_env);
3686 } else {
3687 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3688 cpu_env);
3689 }
60011498
PB
3690 gen_mov_F0_vreg(0, rd);
3691 tmp2 = gen_vfp_mrs();
3692 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3693 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3694 tcg_temp_free_i32(tmp2);
60011498
PB
3695 gen_vfp_msr(tmp);
3696 break;
239c20c7 3697 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
7d1b0095 3698 tmp = tcg_temp_new_i32();
239c20c7
WN
3699 if (dp) {
3700 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3701 cpu_env);
3702 } else {
3703 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3704 cpu_env);
3705 }
60011498
PB
3706 tcg_gen_shli_i32(tmp, tmp, 16);
3707 gen_mov_F0_vreg(0, rd);
3708 tmp2 = gen_vfp_mrs();
3709 tcg_gen_ext16u_i32(tmp2, tmp2);
3710 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3711 tcg_temp_free_i32(tmp2);
60011498
PB
3712 gen_vfp_msr(tmp);
3713 break;
b7bcbe95
FB
3714 case 8: /* cmp */
3715 gen_vfp_cmp(dp);
3716 break;
3717 case 9: /* cmpe */
3718 gen_vfp_cmpe(dp);
3719 break;
3720 case 10: /* cmpz */
3721 gen_vfp_cmp(dp);
3722 break;
3723 case 11: /* cmpez */
3724 gen_vfp_F1_ld0(dp);
3725 gen_vfp_cmpe(dp);
3726 break;
664c6733
WN
3727 case 12: /* vrintr */
3728 {
3729 TCGv_ptr fpst = get_fpstatus_ptr(0);
3730 if (dp) {
3731 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3732 } else {
3733 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3734 }
3735 tcg_temp_free_ptr(fpst);
3736 break;
3737 }
a290c62a
WN
3738 case 13: /* vrintz */
3739 {
3740 TCGv_ptr fpst = get_fpstatus_ptr(0);
3741 TCGv_i32 tcg_rmode;
3742 tcg_rmode = tcg_const_i32(float_round_to_zero);
3743 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3744 if (dp) {
3745 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3746 } else {
3747 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3748 }
3749 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3750 tcg_temp_free_i32(tcg_rmode);
3751 tcg_temp_free_ptr(fpst);
3752 break;
3753 }
4e82bc01
WN
3754 case 14: /* vrintx */
3755 {
3756 TCGv_ptr fpst = get_fpstatus_ptr(0);
3757 if (dp) {
3758 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3759 } else {
3760 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3761 }
3762 tcg_temp_free_ptr(fpst);
3763 break;
3764 }
b7bcbe95
FB
3765 case 15: /* single<->double conversion */
3766 if (dp)
4373f3ce 3767 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3768 else
4373f3ce 3769 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3770 break;
3771 case 16: /* fuito */
5500b06c 3772 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3773 break;
3774 case 17: /* fsito */
5500b06c 3775 gen_vfp_sito(dp, 0);
b7bcbe95 3776 break;
9ee6e8bb 3777 case 20: /* fshto */
d614a513
PM
3778 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3779 return 1;
3780 }
5500b06c 3781 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3782 break;
3783 case 21: /* fslto */
d614a513
PM
3784 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3785 return 1;
3786 }
5500b06c 3787 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3788 break;
3789 case 22: /* fuhto */
d614a513
PM
3790 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3791 return 1;
3792 }
5500b06c 3793 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3794 break;
3795 case 23: /* fulto */
d614a513
PM
3796 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3797 return 1;
3798 }
5500b06c 3799 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3800 break;
b7bcbe95 3801 case 24: /* ftoui */
5500b06c 3802 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3803 break;
3804 case 25: /* ftouiz */
5500b06c 3805 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3806 break;
3807 case 26: /* ftosi */
5500b06c 3808 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3809 break;
3810 case 27: /* ftosiz */
5500b06c 3811 gen_vfp_tosiz(dp, 0);
b7bcbe95 3812 break;
9ee6e8bb 3813 case 28: /* ftosh */
d614a513
PM
3814 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3815 return 1;
3816 }
5500b06c 3817 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3818 break;
3819 case 29: /* ftosl */
d614a513
PM
3820 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3821 return 1;
3822 }
5500b06c 3823 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3824 break;
3825 case 30: /* ftouh */
d614a513
PM
3826 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3827 return 1;
3828 }
5500b06c 3829 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3830 break;
3831 case 31: /* ftoul */
d614a513
PM
3832 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3833 return 1;
3834 }
5500b06c 3835 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3836 break;
b7bcbe95 3837 default: /* undefined */
b7bcbe95
FB
3838 return 1;
3839 }
3840 break;
3841 default: /* undefined */
b7bcbe95
FB
3842 return 1;
3843 }
3844
3845 /* Write back the result. */
239c20c7
WN
3846 if (op == 15 && (rn >= 8 && rn <= 11)) {
3847 /* Comparison, do nothing. */
3848 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
3849 (rn & 0x1e) == 0x6)) {
3850 /* VCVT double to int: always integer result.
3851 * VCVT double to half precision is always a single
3852 * precision result.
3853 */
b7bcbe95 3854 gen_mov_vreg_F0(0, rd);
239c20c7 3855 } else if (op == 15 && rn == 15) {
b7bcbe95
FB
3856 /* conversion */
3857 gen_mov_vreg_F0(!dp, rd);
239c20c7 3858 } else {
b7bcbe95 3859 gen_mov_vreg_F0(dp, rd);
239c20c7 3860 }
b7bcbe95
FB
3861
3862 /* break out of the loop if we have finished */
3863 if (veclen == 0)
3864 break;
3865
3866 if (op == 15 && delta_m == 0) {
3867 /* single source one-many */
3868 while (veclen--) {
3869 rd = ((rd + delta_d) & (bank_mask - 1))
3870 | (rd & bank_mask);
3871 gen_mov_vreg_F0(dp, rd);
3872 }
3873 break;
3874 }
3875 /* Setup the next operands. */
3876 veclen--;
3877 rd = ((rd + delta_d) & (bank_mask - 1))
3878 | (rd & bank_mask);
3879
3880 if (op == 15) {
3881 /* One source operand. */
3882 rm = ((rm + delta_m) & (bank_mask - 1))
3883 | (rm & bank_mask);
3884 gen_mov_F0_vreg(dp, rm);
3885 } else {
3886 /* Two source operands. */
3887 rn = ((rn + delta_d) & (bank_mask - 1))
3888 | (rn & bank_mask);
3889 gen_mov_F0_vreg(dp, rn);
3890 if (delta_m) {
3891 rm = ((rm + delta_m) & (bank_mask - 1))
3892 | (rm & bank_mask);
3893 gen_mov_F1_vreg(dp, rm);
3894 }
3895 }
3896 }
3897 }
3898 break;
3899 case 0xc:
3900 case 0xd:
8387da81 3901 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3902 /* two-register transfer */
3903 rn = (insn >> 16) & 0xf;
3904 rd = (insn >> 12) & 0xf;
3905 if (dp) {
9ee6e8bb
PB
3906 VFP_DREG_M(rm, insn);
3907 } else {
3908 rm = VFP_SREG_M(insn);
3909 }
b7bcbe95 3910
18c9b560 3911 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3912 /* vfp->arm */
3913 if (dp) {
4373f3ce
PB
3914 gen_mov_F0_vreg(0, rm * 2);
3915 tmp = gen_vfp_mrs();
3916 store_reg(s, rd, tmp);
3917 gen_mov_F0_vreg(0, rm * 2 + 1);
3918 tmp = gen_vfp_mrs();
3919 store_reg(s, rn, tmp);
b7bcbe95
FB
3920 } else {
3921 gen_mov_F0_vreg(0, rm);
4373f3ce 3922 tmp = gen_vfp_mrs();
8387da81 3923 store_reg(s, rd, tmp);
b7bcbe95 3924 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3925 tmp = gen_vfp_mrs();
8387da81 3926 store_reg(s, rn, tmp);
b7bcbe95
FB
3927 }
3928 } else {
3929 /* arm->vfp */
3930 if (dp) {
4373f3ce
PB
3931 tmp = load_reg(s, rd);
3932 gen_vfp_msr(tmp);
3933 gen_mov_vreg_F0(0, rm * 2);
3934 tmp = load_reg(s, rn);
3935 gen_vfp_msr(tmp);
3936 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3937 } else {
8387da81 3938 tmp = load_reg(s, rd);
4373f3ce 3939 gen_vfp_msr(tmp);
b7bcbe95 3940 gen_mov_vreg_F0(0, rm);
8387da81 3941 tmp = load_reg(s, rn);
4373f3ce 3942 gen_vfp_msr(tmp);
b7bcbe95
FB
3943 gen_mov_vreg_F0(0, rm + 1);
3944 }
3945 }
3946 } else {
3947 /* Load/store */
3948 rn = (insn >> 16) & 0xf;
3949 if (dp)
9ee6e8bb 3950 VFP_DREG_D(rd, insn);
b7bcbe95 3951 else
9ee6e8bb 3952 rd = VFP_SREG_D(insn);
b7bcbe95
FB
3953 if ((insn & 0x01200000) == 0x01000000) {
3954 /* Single load/store */
3955 offset = (insn & 0xff) << 2;
3956 if ((insn & (1 << 23)) == 0)
3957 offset = -offset;
934814f1
PM
3958 if (s->thumb && rn == 15) {
3959 /* This is actually UNPREDICTABLE */
3960 addr = tcg_temp_new_i32();
3961 tcg_gen_movi_i32(addr, s->pc & ~2);
3962 } else {
3963 addr = load_reg(s, rn);
3964 }
312eea9f 3965 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3966 if (insn & (1 << 20)) {
312eea9f 3967 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3968 gen_mov_vreg_F0(dp, rd);
3969 } else {
3970 gen_mov_F0_vreg(dp, rd);
312eea9f 3971 gen_vfp_st(s, dp, addr);
b7bcbe95 3972 }
7d1b0095 3973 tcg_temp_free_i32(addr);
b7bcbe95
FB
3974 } else {
3975 /* load/store multiple */
934814f1 3976 int w = insn & (1 << 21);
b7bcbe95
FB
3977 if (dp)
3978 n = (insn >> 1) & 0x7f;
3979 else
3980 n = insn & 0xff;
3981
934814f1
PM
3982 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3983 /* P == U , W == 1 => UNDEF */
3984 return 1;
3985 }
3986 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3987 /* UNPREDICTABLE cases for bad immediates: we choose to
3988 * UNDEF to avoid generating huge numbers of TCG ops
3989 */
3990 return 1;
3991 }
3992 if (rn == 15 && w) {
3993 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3994 return 1;
3995 }
3996
3997 if (s->thumb && rn == 15) {
3998 /* This is actually UNPREDICTABLE */
3999 addr = tcg_temp_new_i32();
4000 tcg_gen_movi_i32(addr, s->pc & ~2);
4001 } else {
4002 addr = load_reg(s, rn);
4003 }
b7bcbe95 4004 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 4005 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
4006
4007 if (dp)
4008 offset = 8;
4009 else
4010 offset = 4;
4011 for (i = 0; i < n; i++) {
18c9b560 4012 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 4013 /* load */
312eea9f 4014 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4015 gen_mov_vreg_F0(dp, rd + i);
4016 } else {
4017 /* store */
4018 gen_mov_F0_vreg(dp, rd + i);
312eea9f 4019 gen_vfp_st(s, dp, addr);
b7bcbe95 4020 }
312eea9f 4021 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4022 }
934814f1 4023 if (w) {
b7bcbe95
FB
4024 /* writeback */
4025 if (insn & (1 << 24))
4026 offset = -offset * n;
4027 else if (dp && (insn & 1))
4028 offset = 4;
4029 else
4030 offset = 0;
4031
4032 if (offset != 0)
312eea9f
FN
4033 tcg_gen_addi_i32(addr, addr, offset);
4034 store_reg(s, rn, addr);
4035 } else {
7d1b0095 4036 tcg_temp_free_i32(addr);
b7bcbe95
FB
4037 }
4038 }
4039 }
4040 break;
4041 default:
4042 /* Should never happen. */
4043 return 1;
4044 }
4045 return 0;
4046}
4047
90aa39a1 4048static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
c53be334 4049{
90aa39a1
SF
4050#ifndef CONFIG_USER_ONLY
4051 return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
4052 ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
4053#else
4054 return true;
4055#endif
4056}
6e256c93 4057
90aa39a1
SF
4058static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
4059{
4060 if (use_goto_tb(s, dest)) {
57fec1fe 4061 tcg_gen_goto_tb(n);
eaed129d 4062 gen_set_pc_im(s, dest);
90aa39a1 4063 tcg_gen_exit_tb((uintptr_t)s->tb + n);
6e256c93 4064 } else {
eaed129d 4065 gen_set_pc_im(s, dest);
57fec1fe 4066 tcg_gen_exit_tb(0);
6e256c93 4067 }
c53be334
FB
4068}
4069
8aaca4c0
FB
4070static inline void gen_jmp (DisasContext *s, uint32_t dest)
4071{
50225ad0 4072 if (unlikely(s->singlestep_enabled || s->ss_active)) {
8aaca4c0 4073 /* An indirect jump so that we still trigger the debug exception. */
5899f386 4074 if (s->thumb)
d9ba4830
PB
4075 dest |= 1;
4076 gen_bx_im(s, dest);
8aaca4c0 4077 } else {
6e256c93 4078 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
4079 s->is_jmp = DISAS_TB_JUMP;
4080 }
4081}
4082
39d5492a 4083static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 4084{
ee097184 4085 if (x)
d9ba4830 4086 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 4087 else
d9ba4830 4088 gen_sxth(t0);
ee097184 4089 if (y)
d9ba4830 4090 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 4091 else
d9ba4830
PB
4092 gen_sxth(t1);
4093 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
4094}
4095
4096/* Return the mask of PSR bits set by a MSR instruction. */
7dcc1f89
PM
4097static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4098{
b5ff1b31
FB
4099 uint32_t mask;
4100
4101 mask = 0;
4102 if (flags & (1 << 0))
4103 mask |= 0xff;
4104 if (flags & (1 << 1))
4105 mask |= 0xff00;
4106 if (flags & (1 << 2))
4107 mask |= 0xff0000;
4108 if (flags & (1 << 3))
4109 mask |= 0xff000000;
9ee6e8bb 4110
2ae23e75 4111 /* Mask out undefined bits. */
9ee6e8bb 4112 mask &= ~CPSR_RESERVED;
d614a513 4113 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
be5e7a76 4114 mask &= ~CPSR_T;
d614a513
PM
4115 }
4116 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
be5e7a76 4117 mask &= ~CPSR_Q; /* V5TE in reality*/
d614a513
PM
4118 }
4119 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
e160c51c 4120 mask &= ~(CPSR_E | CPSR_GE);
d614a513
PM
4121 }
4122 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
e160c51c 4123 mask &= ~CPSR_IT;
d614a513 4124 }
4051e12c
PM
4125 /* Mask out execution state and reserved bits. */
4126 if (!spsr) {
4127 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4128 }
b5ff1b31
FB
4129 /* Mask out privileged bits. */
4130 if (IS_USER(s))
9ee6e8bb 4131 mask &= CPSR_USER;
b5ff1b31
FB
4132 return mask;
4133}
4134
2fbac54b 4135/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 4136static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 4137{
39d5492a 4138 TCGv_i32 tmp;
b5ff1b31
FB
4139 if (spsr) {
4140 /* ??? This is also undefined in system mode. */
4141 if (IS_USER(s))
4142 return 1;
d9ba4830
PB
4143
4144 tmp = load_cpu_field(spsr);
4145 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
4146 tcg_gen_andi_i32(t0, t0, mask);
4147 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 4148 store_cpu_field(tmp, spsr);
b5ff1b31 4149 } else {
2fbac54b 4150 gen_set_cpsr(t0, mask);
b5ff1b31 4151 }
7d1b0095 4152 tcg_temp_free_i32(t0);
b5ff1b31
FB
4153 gen_lookup_tb(s);
4154 return 0;
4155}
4156
2fbac54b
FN
4157/* Returns nonzero if access to the PSR is not permitted. */
4158static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4159{
39d5492a 4160 TCGv_i32 tmp;
7d1b0095 4161 tmp = tcg_temp_new_i32();
2fbac54b
FN
4162 tcg_gen_movi_i32(tmp, val);
4163 return gen_set_psr(s, mask, spsr, tmp);
4164}
4165
8bfd0550
PM
4166static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
4167 int *tgtmode, int *regno)
4168{
4169 /* Decode the r and sysm fields of MSR/MRS banked accesses into
4170 * the target mode and register number, and identify the various
4171 * unpredictable cases.
4172 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
4173 * + executed in user mode
4174 * + using R15 as the src/dest register
4175 * + accessing an unimplemented register
4176 * + accessing a register that's inaccessible at current PL/security state*
4177 * + accessing a register that you could access with a different insn
4178 * We choose to UNDEF in all these cases.
4179 * Since we don't know which of the various AArch32 modes we are in
4180 * we have to defer some checks to runtime.
4181 * Accesses to Monitor mode registers from Secure EL1 (which implies
4182 * that EL3 is AArch64) must trap to EL3.
4183 *
4184 * If the access checks fail this function will emit code to take
4185 * an exception and return false. Otherwise it will return true,
4186 * and set *tgtmode and *regno appropriately.
4187 */
4188 int exc_target = default_exception_el(s);
4189
4190 /* These instructions are present only in ARMv8, or in ARMv7 with the
4191 * Virtualization Extensions.
4192 */
4193 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
4194 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
4195 goto undef;
4196 }
4197
4198 if (IS_USER(s) || rn == 15) {
4199 goto undef;
4200 }
4201
4202 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
4203 * of registers into (r, sysm).
4204 */
4205 if (r) {
4206 /* SPSRs for other modes */
4207 switch (sysm) {
4208 case 0xe: /* SPSR_fiq */
4209 *tgtmode = ARM_CPU_MODE_FIQ;
4210 break;
4211 case 0x10: /* SPSR_irq */
4212 *tgtmode = ARM_CPU_MODE_IRQ;
4213 break;
4214 case 0x12: /* SPSR_svc */
4215 *tgtmode = ARM_CPU_MODE_SVC;
4216 break;
4217 case 0x14: /* SPSR_abt */
4218 *tgtmode = ARM_CPU_MODE_ABT;
4219 break;
4220 case 0x16: /* SPSR_und */
4221 *tgtmode = ARM_CPU_MODE_UND;
4222 break;
4223 case 0x1c: /* SPSR_mon */
4224 *tgtmode = ARM_CPU_MODE_MON;
4225 break;
4226 case 0x1e: /* SPSR_hyp */
4227 *tgtmode = ARM_CPU_MODE_HYP;
4228 break;
4229 default: /* unallocated */
4230 goto undef;
4231 }
4232 /* We arbitrarily assign SPSR a register number of 16. */
4233 *regno = 16;
4234 } else {
4235 /* general purpose registers for other modes */
4236 switch (sysm) {
4237 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
4238 *tgtmode = ARM_CPU_MODE_USR;
4239 *regno = sysm + 8;
4240 break;
4241 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
4242 *tgtmode = ARM_CPU_MODE_FIQ;
4243 *regno = sysm;
4244 break;
4245 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
4246 *tgtmode = ARM_CPU_MODE_IRQ;
4247 *regno = sysm & 1 ? 13 : 14;
4248 break;
4249 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
4250 *tgtmode = ARM_CPU_MODE_SVC;
4251 *regno = sysm & 1 ? 13 : 14;
4252 break;
4253 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
4254 *tgtmode = ARM_CPU_MODE_ABT;
4255 *regno = sysm & 1 ? 13 : 14;
4256 break;
4257 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
4258 *tgtmode = ARM_CPU_MODE_UND;
4259 *regno = sysm & 1 ? 13 : 14;
4260 break;
4261 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
4262 *tgtmode = ARM_CPU_MODE_MON;
4263 *regno = sysm & 1 ? 13 : 14;
4264 break;
4265 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
4266 *tgtmode = ARM_CPU_MODE_HYP;
4267 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
4268 *regno = sysm & 1 ? 13 : 17;
4269 break;
4270 default: /* unallocated */
4271 goto undef;
4272 }
4273 }
4274
4275 /* Catch the 'accessing inaccessible register' cases we can detect
4276 * at translate time.
4277 */
4278 switch (*tgtmode) {
4279 case ARM_CPU_MODE_MON:
4280 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
4281 goto undef;
4282 }
4283 if (s->current_el == 1) {
4284 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
4285 * then accesses to Mon registers trap to EL3
4286 */
4287 exc_target = 3;
4288 goto undef;
4289 }
4290 break;
4291 case ARM_CPU_MODE_HYP:
4292 /* Note that we can forbid accesses from EL2 here because they
4293 * must be from Hyp mode itself
4294 */
4295 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 3) {
4296 goto undef;
4297 }
4298 break;
4299 default:
4300 break;
4301 }
4302
4303 return true;
4304
4305undef:
4306 /* If we get here then some access check did not pass */
4307 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
4308 return false;
4309}
4310
4311static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
4312{
4313 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4314 int tgtmode = 0, regno = 0;
4315
4316 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4317 return;
4318 }
4319
4320 /* Sync state because msr_banked() can raise exceptions */
4321 gen_set_condexec(s);
4322 gen_set_pc_im(s, s->pc - 4);
4323 tcg_reg = load_reg(s, rn);
4324 tcg_tgtmode = tcg_const_i32(tgtmode);
4325 tcg_regno = tcg_const_i32(regno);
4326 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
4327 tcg_temp_free_i32(tcg_tgtmode);
4328 tcg_temp_free_i32(tcg_regno);
4329 tcg_temp_free_i32(tcg_reg);
4330 s->is_jmp = DISAS_UPDATE;
4331}
4332
4333static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
4334{
4335 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4336 int tgtmode = 0, regno = 0;
4337
4338 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4339 return;
4340 }
4341
4342 /* Sync state because mrs_banked() can raise exceptions */
4343 gen_set_condexec(s);
4344 gen_set_pc_im(s, s->pc - 4);
4345 tcg_reg = tcg_temp_new_i32();
4346 tcg_tgtmode = tcg_const_i32(tgtmode);
4347 tcg_regno = tcg_const_i32(regno);
4348 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
4349 tcg_temp_free_i32(tcg_tgtmode);
4350 tcg_temp_free_i32(tcg_regno);
4351 store_reg(s, rn, tcg_reg);
4352 s->is_jmp = DISAS_UPDATE;
4353}
4354
fb0e8e79
PM
4355/* Store value to PC as for an exception return (ie don't
4356 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
4357 * will do the masking based on the new value of the Thumb bit.
4358 */
4359static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
b5ff1b31 4360{
fb0e8e79
PM
4361 tcg_gen_mov_i32(cpu_R[15], pc);
4362 tcg_temp_free_i32(pc);
b5ff1b31
FB
4363}
4364
b0109805 4365/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 4366static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 4367{
fb0e8e79
PM
4368 store_pc_exc_ret(s, pc);
4369 /* The cpsr_write_eret helper will mask the low bits of PC
4370 * appropriately depending on the new Thumb bit, so it must
4371 * be called after storing the new PC.
4372 */
235ea1f5 4373 gen_helper_cpsr_write_eret(cpu_env, cpsr);
7d1b0095 4374 tcg_temp_free_i32(cpsr);
577bf808 4375 s->is_jmp = DISAS_JUMP;
9ee6e8bb 4376}
3b46e624 4377
fb0e8e79
PM
4378/* Generate an old-style exception return. Marks pc as dead. */
4379static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
4380{
4381 gen_rfe(s, pc, load_cpu_field(spsr));
4382}
4383
9ee6e8bb
PB
4384static void gen_nop_hint(DisasContext *s, int val)
4385{
4386 switch (val) {
c87e5a61
PM
4387 case 1: /* yield */
4388 gen_set_pc_im(s, s->pc);
4389 s->is_jmp = DISAS_YIELD;
4390 break;
9ee6e8bb 4391 case 3: /* wfi */
eaed129d 4392 gen_set_pc_im(s, s->pc);
9ee6e8bb
PB
4393 s->is_jmp = DISAS_WFI;
4394 break;
4395 case 2: /* wfe */
72c1d3af
PM
4396 gen_set_pc_im(s, s->pc);
4397 s->is_jmp = DISAS_WFE;
4398 break;
9ee6e8bb 4399 case 4: /* sev */
12b10571
MR
4400 case 5: /* sevl */
4401 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
4402 default: /* nop */
4403 break;
4404 }
4405}
99c475ab 4406
ad69471c 4407#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 4408
39d5492a 4409static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
4410{
4411 switch (size) {
dd8fbd78
FN
4412 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4413 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4414 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 4415 default: abort();
9ee6e8bb 4416 }
9ee6e8bb
PB
4417}
4418
39d5492a 4419static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
4420{
4421 switch (size) {
dd8fbd78
FN
4422 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4423 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4424 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
4425 default: return;
4426 }
4427}
4428
4429/* 32-bit pairwise ops end up the same as the elementwise versions. */
4430#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4431#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4432#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4433#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4434
ad69471c
PB
4435#define GEN_NEON_INTEGER_OP_ENV(name) do { \
4436 switch ((size << 1) | u) { \
4437 case 0: \
dd8fbd78 4438 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4439 break; \
4440 case 1: \
dd8fbd78 4441 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4442 break; \
4443 case 2: \
dd8fbd78 4444 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4445 break; \
4446 case 3: \
dd8fbd78 4447 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4448 break; \
4449 case 4: \
dd8fbd78 4450 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4451 break; \
4452 case 5: \
dd8fbd78 4453 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4454 break; \
4455 default: return 1; \
4456 }} while (0)
9ee6e8bb
PB
4457
4458#define GEN_NEON_INTEGER_OP(name) do { \
4459 switch ((size << 1) | u) { \
ad69471c 4460 case 0: \
dd8fbd78 4461 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
4462 break; \
4463 case 1: \
dd8fbd78 4464 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
4465 break; \
4466 case 2: \
dd8fbd78 4467 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
4468 break; \
4469 case 3: \
dd8fbd78 4470 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
4471 break; \
4472 case 4: \
dd8fbd78 4473 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
4474 break; \
4475 case 5: \
dd8fbd78 4476 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 4477 break; \
9ee6e8bb
PB
4478 default: return 1; \
4479 }} while (0)
4480
39d5492a 4481static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 4482{
39d5492a 4483 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
4484 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4485 return tmp;
9ee6e8bb
PB
4486}
4487
39d5492a 4488static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 4489{
dd8fbd78 4490 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 4491 tcg_temp_free_i32(var);
9ee6e8bb
PB
4492}
4493
39d5492a 4494static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 4495{
39d5492a 4496 TCGv_i32 tmp;
9ee6e8bb 4497 if (size == 1) {
0fad6efc
PM
4498 tmp = neon_load_reg(reg & 7, reg >> 4);
4499 if (reg & 8) {
dd8fbd78 4500 gen_neon_dup_high16(tmp);
0fad6efc
PM
4501 } else {
4502 gen_neon_dup_low16(tmp);
dd8fbd78 4503 }
0fad6efc
PM
4504 } else {
4505 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 4506 }
dd8fbd78 4507 return tmp;
9ee6e8bb
PB
4508}
4509
02acedf9 4510static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 4511{
39d5492a 4512 TCGv_i32 tmp, tmp2;
600b828c 4513 if (!q && size == 2) {
02acedf9
PM
4514 return 1;
4515 }
4516 tmp = tcg_const_i32(rd);
4517 tmp2 = tcg_const_i32(rm);
4518 if (q) {
4519 switch (size) {
4520 case 0:
02da0b2d 4521 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4522 break;
4523 case 1:
02da0b2d 4524 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4525 break;
4526 case 2:
02da0b2d 4527 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
4528 break;
4529 default:
4530 abort();
4531 }
4532 } else {
4533 switch (size) {
4534 case 0:
02da0b2d 4535 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4536 break;
4537 case 1:
02da0b2d 4538 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4539 break;
4540 default:
4541 abort();
4542 }
4543 }
4544 tcg_temp_free_i32(tmp);
4545 tcg_temp_free_i32(tmp2);
4546 return 0;
19457615
FN
4547}
4548
d68a6f3a 4549static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 4550{
39d5492a 4551 TCGv_i32 tmp, tmp2;
600b828c 4552 if (!q && size == 2) {
d68a6f3a
PM
4553 return 1;
4554 }
4555 tmp = tcg_const_i32(rd);
4556 tmp2 = tcg_const_i32(rm);
4557 if (q) {
4558 switch (size) {
4559 case 0:
02da0b2d 4560 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4561 break;
4562 case 1:
02da0b2d 4563 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4564 break;
4565 case 2:
02da0b2d 4566 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
4567 break;
4568 default:
4569 abort();
4570 }
4571 } else {
4572 switch (size) {
4573 case 0:
02da0b2d 4574 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4575 break;
4576 case 1:
02da0b2d 4577 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4578 break;
4579 default:
4580 abort();
4581 }
4582 }
4583 tcg_temp_free_i32(tmp);
4584 tcg_temp_free_i32(tmp2);
4585 return 0;
19457615
FN
4586}
4587
39d5492a 4588static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 4589{
39d5492a 4590 TCGv_i32 rd, tmp;
19457615 4591
7d1b0095
PM
4592 rd = tcg_temp_new_i32();
4593 tmp = tcg_temp_new_i32();
19457615
FN
4594
4595 tcg_gen_shli_i32(rd, t0, 8);
4596 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4597 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4598 tcg_gen_or_i32(rd, rd, tmp);
4599
4600 tcg_gen_shri_i32(t1, t1, 8);
4601 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4602 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4603 tcg_gen_or_i32(t1, t1, tmp);
4604 tcg_gen_mov_i32(t0, rd);
4605
7d1b0095
PM
4606 tcg_temp_free_i32(tmp);
4607 tcg_temp_free_i32(rd);
19457615
FN
4608}
4609
39d5492a 4610static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 4611{
39d5492a 4612 TCGv_i32 rd, tmp;
19457615 4613
7d1b0095
PM
4614 rd = tcg_temp_new_i32();
4615 tmp = tcg_temp_new_i32();
19457615
FN
4616
4617 tcg_gen_shli_i32(rd, t0, 16);
4618 tcg_gen_andi_i32(tmp, t1, 0xffff);
4619 tcg_gen_or_i32(rd, rd, tmp);
4620 tcg_gen_shri_i32(t1, t1, 16);
4621 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4622 tcg_gen_or_i32(t1, t1, tmp);
4623 tcg_gen_mov_i32(t0, rd);
4624
7d1b0095
PM
4625 tcg_temp_free_i32(tmp);
4626 tcg_temp_free_i32(rd);
19457615
FN
4627}
4628
4629
9ee6e8bb
PB
4630static struct {
4631 int nregs;
4632 int interleave;
4633 int spacing;
4634} neon_ls_element_type[11] = {
4635 {4, 4, 1},
4636 {4, 4, 2},
4637 {4, 1, 1},
4638 {4, 2, 1},
4639 {3, 3, 1},
4640 {3, 3, 2},
4641 {3, 1, 1},
4642 {1, 1, 1},
4643 {2, 2, 1},
4644 {2, 2, 2},
4645 {2, 1, 1}
4646};
4647
4648/* Translate a NEON load/store element instruction. Return nonzero if the
4649 instruction is invalid. */
7dcc1f89 4650static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4651{
4652 int rd, rn, rm;
4653 int op;
4654 int nregs;
4655 int interleave;
84496233 4656 int spacing;
9ee6e8bb
PB
4657 int stride;
4658 int size;
4659 int reg;
4660 int pass;
4661 int load;
4662 int shift;
9ee6e8bb 4663 int n;
39d5492a
PM
4664 TCGv_i32 addr;
4665 TCGv_i32 tmp;
4666 TCGv_i32 tmp2;
84496233 4667 TCGv_i64 tmp64;
9ee6e8bb 4668
2c7ffc41
PM
4669 /* FIXME: this access check should not take precedence over UNDEF
4670 * for invalid encodings; we will generate incorrect syndrome information
4671 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4672 */
9dbbc748 4673 if (s->fp_excp_el) {
2c7ffc41 4674 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 4675 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
4676 return 0;
4677 }
4678
5df8bac1 4679 if (!s->vfp_enabled)
9ee6e8bb
PB
4680 return 1;
4681 VFP_DREG_D(rd, insn);
4682 rn = (insn >> 16) & 0xf;
4683 rm = insn & 0xf;
4684 load = (insn & (1 << 21)) != 0;
4685 if ((insn & (1 << 23)) == 0) {
4686 /* Load store all elements. */
4687 op = (insn >> 8) & 0xf;
4688 size = (insn >> 6) & 3;
84496233 4689 if (op > 10)
9ee6e8bb 4690 return 1;
f2dd89d0
PM
4691 /* Catch UNDEF cases for bad values of align field */
4692 switch (op & 0xc) {
4693 case 4:
4694 if (((insn >> 5) & 1) == 1) {
4695 return 1;
4696 }
4697 break;
4698 case 8:
4699 if (((insn >> 4) & 3) == 3) {
4700 return 1;
4701 }
4702 break;
4703 default:
4704 break;
4705 }
9ee6e8bb
PB
4706 nregs = neon_ls_element_type[op].nregs;
4707 interleave = neon_ls_element_type[op].interleave;
84496233
JR
4708 spacing = neon_ls_element_type[op].spacing;
4709 if (size == 3 && (interleave | spacing) != 1)
4710 return 1;
e318a60b 4711 addr = tcg_temp_new_i32();
dcc65026 4712 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4713 stride = (1 << size) * interleave;
4714 for (reg = 0; reg < nregs; reg++) {
4715 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
4716 load_reg_var(s, addr, rn);
4717 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 4718 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
4719 load_reg_var(s, addr, rn);
4720 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 4721 }
84496233 4722 if (size == 3) {
8ed1237d 4723 tmp64 = tcg_temp_new_i64();
84496233 4724 if (load) {
12dcc321 4725 gen_aa32_ld64(s, tmp64, addr, get_mem_index(s));
84496233 4726 neon_store_reg64(tmp64, rd);
84496233 4727 } else {
84496233 4728 neon_load_reg64(tmp64, rd);
12dcc321 4729 gen_aa32_st64(s, tmp64, addr, get_mem_index(s));
84496233 4730 }
8ed1237d 4731 tcg_temp_free_i64(tmp64);
84496233
JR
4732 tcg_gen_addi_i32(addr, addr, stride);
4733 } else {
4734 for (pass = 0; pass < 2; pass++) {
4735 if (size == 2) {
4736 if (load) {
58ab8e96 4737 tmp = tcg_temp_new_i32();
12dcc321 4738 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
84496233
JR
4739 neon_store_reg(rd, pass, tmp);
4740 } else {
4741 tmp = neon_load_reg(rd, pass);
12dcc321 4742 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
58ab8e96 4743 tcg_temp_free_i32(tmp);
84496233 4744 }
1b2b1e54 4745 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
4746 } else if (size == 1) {
4747 if (load) {
58ab8e96 4748 tmp = tcg_temp_new_i32();
12dcc321 4749 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
84496233 4750 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96 4751 tmp2 = tcg_temp_new_i32();
12dcc321 4752 gen_aa32_ld16u(s, tmp2, addr, get_mem_index(s));
84496233 4753 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
4754 tcg_gen_shli_i32(tmp2, tmp2, 16);
4755 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4756 tcg_temp_free_i32(tmp2);
84496233
JR
4757 neon_store_reg(rd, pass, tmp);
4758 } else {
4759 tmp = neon_load_reg(rd, pass);
7d1b0095 4760 tmp2 = tcg_temp_new_i32();
84496233 4761 tcg_gen_shri_i32(tmp2, tmp, 16);
12dcc321 4762 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
58ab8e96 4763 tcg_temp_free_i32(tmp);
84496233 4764 tcg_gen_addi_i32(addr, addr, stride);
12dcc321 4765 gen_aa32_st16(s, tmp2, addr, get_mem_index(s));
58ab8e96 4766 tcg_temp_free_i32(tmp2);
1b2b1e54 4767 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 4768 }
84496233
JR
4769 } else /* size == 0 */ {
4770 if (load) {
39d5492a 4771 TCGV_UNUSED_I32(tmp2);
84496233 4772 for (n = 0; n < 4; n++) {
58ab8e96 4773 tmp = tcg_temp_new_i32();
12dcc321 4774 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
84496233
JR
4775 tcg_gen_addi_i32(addr, addr, stride);
4776 if (n == 0) {
4777 tmp2 = tmp;
4778 } else {
41ba8341
PB
4779 tcg_gen_shli_i32(tmp, tmp, n * 8);
4780 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 4781 tcg_temp_free_i32(tmp);
84496233 4782 }
9ee6e8bb 4783 }
84496233
JR
4784 neon_store_reg(rd, pass, tmp2);
4785 } else {
4786 tmp2 = neon_load_reg(rd, pass);
4787 for (n = 0; n < 4; n++) {
7d1b0095 4788 tmp = tcg_temp_new_i32();
84496233
JR
4789 if (n == 0) {
4790 tcg_gen_mov_i32(tmp, tmp2);
4791 } else {
4792 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4793 }
12dcc321 4794 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
58ab8e96 4795 tcg_temp_free_i32(tmp);
84496233
JR
4796 tcg_gen_addi_i32(addr, addr, stride);
4797 }
7d1b0095 4798 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
4799 }
4800 }
4801 }
4802 }
84496233 4803 rd += spacing;
9ee6e8bb 4804 }
e318a60b 4805 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4806 stride = nregs * 8;
4807 } else {
4808 size = (insn >> 10) & 3;
4809 if (size == 3) {
4810 /* Load single element to all lanes. */
8e18cde3
PM
4811 int a = (insn >> 4) & 1;
4812 if (!load) {
9ee6e8bb 4813 return 1;
8e18cde3 4814 }
9ee6e8bb
PB
4815 size = (insn >> 6) & 3;
4816 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
4817
4818 if (size == 3) {
4819 if (nregs != 4 || a == 0) {
9ee6e8bb 4820 return 1;
99c475ab 4821 }
8e18cde3
PM
4822 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4823 size = 2;
4824 }
4825 if (nregs == 1 && a == 1 && size == 0) {
4826 return 1;
4827 }
4828 if (nregs == 3 && a == 1) {
4829 return 1;
4830 }
e318a60b 4831 addr = tcg_temp_new_i32();
8e18cde3
PM
4832 load_reg_var(s, addr, rn);
4833 if (nregs == 1) {
4834 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4835 tmp = gen_load_and_replicate(s, addr, size);
4836 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4837 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4838 if (insn & (1 << 5)) {
4839 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4840 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4841 }
4842 tcg_temp_free_i32(tmp);
4843 } else {
4844 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4845 stride = (insn & (1 << 5)) ? 2 : 1;
4846 for (reg = 0; reg < nregs; reg++) {
4847 tmp = gen_load_and_replicate(s, addr, size);
4848 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4849 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4850 tcg_temp_free_i32(tmp);
4851 tcg_gen_addi_i32(addr, addr, 1 << size);
4852 rd += stride;
4853 }
9ee6e8bb 4854 }
e318a60b 4855 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4856 stride = (1 << size) * nregs;
4857 } else {
4858 /* Single element. */
93262b16 4859 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
4860 pass = (insn >> 7) & 1;
4861 switch (size) {
4862 case 0:
4863 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
4864 stride = 1;
4865 break;
4866 case 1:
4867 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
4868 stride = (insn & (1 << 5)) ? 2 : 1;
4869 break;
4870 case 2:
4871 shift = 0;
9ee6e8bb
PB
4872 stride = (insn & (1 << 6)) ? 2 : 1;
4873 break;
4874 default:
4875 abort();
4876 }
4877 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
4878 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4879 switch (nregs) {
4880 case 1:
4881 if (((idx & (1 << size)) != 0) ||
4882 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4883 return 1;
4884 }
4885 break;
4886 case 3:
4887 if ((idx & 1) != 0) {
4888 return 1;
4889 }
4890 /* fall through */
4891 case 2:
4892 if (size == 2 && (idx & 2) != 0) {
4893 return 1;
4894 }
4895 break;
4896 case 4:
4897 if ((size == 2) && ((idx & 3) == 3)) {
4898 return 1;
4899 }
4900 break;
4901 default:
4902 abort();
4903 }
4904 if ((rd + stride * (nregs - 1)) > 31) {
4905 /* Attempts to write off the end of the register file
4906 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4907 * the neon_load_reg() would write off the end of the array.
4908 */
4909 return 1;
4910 }
e318a60b 4911 addr = tcg_temp_new_i32();
dcc65026 4912 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4913 for (reg = 0; reg < nregs; reg++) {
4914 if (load) {
58ab8e96 4915 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
4916 switch (size) {
4917 case 0:
12dcc321 4918 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4919 break;
4920 case 1:
12dcc321 4921 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4922 break;
4923 case 2:
12dcc321 4924 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 4925 break;
a50f5b91
PB
4926 default: /* Avoid compiler warnings. */
4927 abort();
9ee6e8bb
PB
4928 }
4929 if (size != 2) {
8f8e3aa4 4930 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
4931 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4932 shift, size ? 16 : 8);
7d1b0095 4933 tcg_temp_free_i32(tmp2);
9ee6e8bb 4934 }
8f8e3aa4 4935 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4936 } else { /* Store */
8f8e3aa4
PB
4937 tmp = neon_load_reg(rd, pass);
4938 if (shift)
4939 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4940 switch (size) {
4941 case 0:
12dcc321 4942 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4943 break;
4944 case 1:
12dcc321 4945 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4946 break;
4947 case 2:
12dcc321 4948 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9ee6e8bb 4949 break;
99c475ab 4950 }
58ab8e96 4951 tcg_temp_free_i32(tmp);
99c475ab 4952 }
9ee6e8bb 4953 rd += stride;
1b2b1e54 4954 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4955 }
e318a60b 4956 tcg_temp_free_i32(addr);
9ee6e8bb 4957 stride = nregs * (1 << size);
99c475ab 4958 }
9ee6e8bb
PB
4959 }
4960 if (rm != 15) {
39d5492a 4961 TCGv_i32 base;
b26eefb6
PB
4962
4963 base = load_reg(s, rn);
9ee6e8bb 4964 if (rm == 13) {
b26eefb6 4965 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4966 } else {
39d5492a 4967 TCGv_i32 index;
b26eefb6
PB
4968 index = load_reg(s, rm);
4969 tcg_gen_add_i32(base, base, index);
7d1b0095 4970 tcg_temp_free_i32(index);
9ee6e8bb 4971 }
b26eefb6 4972 store_reg(s, rn, base);
9ee6e8bb
PB
4973 }
4974 return 0;
4975}
3b46e624 4976
8f8e3aa4 4977/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
39d5492a 4978static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
8f8e3aa4
PB
4979{
4980 tcg_gen_and_i32(t, t, c);
f669df27 4981 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4982 tcg_gen_or_i32(dest, t, f);
4983}
4984
39d5492a 4985static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4986{
4987 switch (size) {
4988 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4989 case 1: gen_helper_neon_narrow_u16(dest, src); break;
ecc7b3aa 4990 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
ad69471c
PB
4991 default: abort();
4992 }
4993}
4994
39d5492a 4995static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4996{
4997 switch (size) {
02da0b2d
PM
4998 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4999 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
5000 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
5001 default: abort();
5002 }
5003}
5004
39d5492a 5005static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5006{
5007 switch (size) {
02da0b2d
PM
5008 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
5009 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
5010 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
5011 default: abort();
5012 }
5013}
5014
39d5492a 5015static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
5016{
5017 switch (size) {
02da0b2d
PM
5018 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
5019 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
5020 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
5021 default: abort();
5022 }
5023}
5024
39d5492a 5025static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
5026 int q, int u)
5027{
5028 if (q) {
5029 if (u) {
5030 switch (size) {
5031 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
5032 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
5033 default: abort();
5034 }
5035 } else {
5036 switch (size) {
5037 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
5038 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
5039 default: abort();
5040 }
5041 }
5042 } else {
5043 if (u) {
5044 switch (size) {
b408a9b0
CL
5045 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
5046 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
5047 default: abort();
5048 }
5049 } else {
5050 switch (size) {
5051 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
5052 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
5053 default: abort();
5054 }
5055 }
5056 }
5057}
5058
39d5492a 5059static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
5060{
5061 if (u) {
5062 switch (size) {
5063 case 0: gen_helper_neon_widen_u8(dest, src); break;
5064 case 1: gen_helper_neon_widen_u16(dest, src); break;
5065 case 2: tcg_gen_extu_i32_i64(dest, src); break;
5066 default: abort();
5067 }
5068 } else {
5069 switch (size) {
5070 case 0: gen_helper_neon_widen_s8(dest, src); break;
5071 case 1: gen_helper_neon_widen_s16(dest, src); break;
5072 case 2: tcg_gen_ext_i32_i64(dest, src); break;
5073 default: abort();
5074 }
5075 }
7d1b0095 5076 tcg_temp_free_i32(src);
ad69471c
PB
5077}
5078
5079static inline void gen_neon_addl(int size)
5080{
5081 switch (size) {
5082 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
5083 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
5084 case 2: tcg_gen_add_i64(CPU_V001); break;
5085 default: abort();
5086 }
5087}
5088
5089static inline void gen_neon_subl(int size)
5090{
5091 switch (size) {
5092 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
5093 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
5094 case 2: tcg_gen_sub_i64(CPU_V001); break;
5095 default: abort();
5096 }
5097}
5098
a7812ae4 5099static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
5100{
5101 switch (size) {
5102 case 0: gen_helper_neon_negl_u16(var, var); break;
5103 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
5104 case 2:
5105 tcg_gen_neg_i64(var, var);
5106 break;
ad69471c
PB
5107 default: abort();
5108 }
5109}
5110
a7812ae4 5111static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
5112{
5113 switch (size) {
02da0b2d
PM
5114 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
5115 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
5116 default: abort();
5117 }
5118}
5119
39d5492a
PM
5120static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
5121 int size, int u)
ad69471c 5122{
a7812ae4 5123 TCGv_i64 tmp;
ad69471c
PB
5124
5125 switch ((size << 1) | u) {
5126 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
5127 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
5128 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
5129 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
5130 case 4:
5131 tmp = gen_muls_i64_i32(a, b);
5132 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5133 tcg_temp_free_i64(tmp);
ad69471c
PB
5134 break;
5135 case 5:
5136 tmp = gen_mulu_i64_i32(a, b);
5137 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5138 tcg_temp_free_i64(tmp);
ad69471c
PB
5139 break;
5140 default: abort();
5141 }
c6067f04
CL
5142
5143 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
5144 Don't forget to clean them now. */
5145 if (size < 2) {
7d1b0095
PM
5146 tcg_temp_free_i32(a);
5147 tcg_temp_free_i32(b);
c6067f04 5148 }
ad69471c
PB
5149}
5150
39d5492a
PM
5151static void gen_neon_narrow_op(int op, int u, int size,
5152 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
5153{
5154 if (op) {
5155 if (u) {
5156 gen_neon_unarrow_sats(size, dest, src);
5157 } else {
5158 gen_neon_narrow(size, dest, src);
5159 }
5160 } else {
5161 if (u) {
5162 gen_neon_narrow_satu(size, dest, src);
5163 } else {
5164 gen_neon_narrow_sats(size, dest, src);
5165 }
5166 }
5167}
5168
62698be3
PM
5169/* Symbolic constants for op fields for Neon 3-register same-length.
5170 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
5171 * table A7-9.
5172 */
5173#define NEON_3R_VHADD 0
5174#define NEON_3R_VQADD 1
5175#define NEON_3R_VRHADD 2
5176#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
5177#define NEON_3R_VHSUB 4
5178#define NEON_3R_VQSUB 5
5179#define NEON_3R_VCGT 6
5180#define NEON_3R_VCGE 7
5181#define NEON_3R_VSHL 8
5182#define NEON_3R_VQSHL 9
5183#define NEON_3R_VRSHL 10
5184#define NEON_3R_VQRSHL 11
5185#define NEON_3R_VMAX 12
5186#define NEON_3R_VMIN 13
5187#define NEON_3R_VABD 14
5188#define NEON_3R_VABA 15
5189#define NEON_3R_VADD_VSUB 16
5190#define NEON_3R_VTST_VCEQ 17
5191#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
5192#define NEON_3R_VMUL 19
5193#define NEON_3R_VPMAX 20
5194#define NEON_3R_VPMIN 21
5195#define NEON_3R_VQDMULH_VQRDMULH 22
5196#define NEON_3R_VPADD 23
f1ecb913 5197#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
da97f52c 5198#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
5199#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
5200#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
5201#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
5202#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
5203#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 5204#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
5205
5206static const uint8_t neon_3r_sizes[] = {
5207 [NEON_3R_VHADD] = 0x7,
5208 [NEON_3R_VQADD] = 0xf,
5209 [NEON_3R_VRHADD] = 0x7,
5210 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
5211 [NEON_3R_VHSUB] = 0x7,
5212 [NEON_3R_VQSUB] = 0xf,
5213 [NEON_3R_VCGT] = 0x7,
5214 [NEON_3R_VCGE] = 0x7,
5215 [NEON_3R_VSHL] = 0xf,
5216 [NEON_3R_VQSHL] = 0xf,
5217 [NEON_3R_VRSHL] = 0xf,
5218 [NEON_3R_VQRSHL] = 0xf,
5219 [NEON_3R_VMAX] = 0x7,
5220 [NEON_3R_VMIN] = 0x7,
5221 [NEON_3R_VABD] = 0x7,
5222 [NEON_3R_VABA] = 0x7,
5223 [NEON_3R_VADD_VSUB] = 0xf,
5224 [NEON_3R_VTST_VCEQ] = 0x7,
5225 [NEON_3R_VML] = 0x7,
5226 [NEON_3R_VMUL] = 0x7,
5227 [NEON_3R_VPMAX] = 0x7,
5228 [NEON_3R_VPMIN] = 0x7,
5229 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
5230 [NEON_3R_VPADD] = 0x7,
f1ecb913 5231 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
da97f52c 5232 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
5233 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
5234 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
5235 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
5236 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
5237 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 5238 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
5239};
5240
600b828c
PM
5241/* Symbolic constants for op fields for Neon 2-register miscellaneous.
5242 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
5243 * table A7-13.
5244 */
5245#define NEON_2RM_VREV64 0
5246#define NEON_2RM_VREV32 1
5247#define NEON_2RM_VREV16 2
5248#define NEON_2RM_VPADDL 4
5249#define NEON_2RM_VPADDL_U 5
9d935509
AB
5250#define NEON_2RM_AESE 6 /* Includes AESD */
5251#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
5252#define NEON_2RM_VCLS 8
5253#define NEON_2RM_VCLZ 9
5254#define NEON_2RM_VCNT 10
5255#define NEON_2RM_VMVN 11
5256#define NEON_2RM_VPADAL 12
5257#define NEON_2RM_VPADAL_U 13
5258#define NEON_2RM_VQABS 14
5259#define NEON_2RM_VQNEG 15
5260#define NEON_2RM_VCGT0 16
5261#define NEON_2RM_VCGE0 17
5262#define NEON_2RM_VCEQ0 18
5263#define NEON_2RM_VCLE0 19
5264#define NEON_2RM_VCLT0 20
f1ecb913 5265#define NEON_2RM_SHA1H 21
600b828c
PM
5266#define NEON_2RM_VABS 22
5267#define NEON_2RM_VNEG 23
5268#define NEON_2RM_VCGT0_F 24
5269#define NEON_2RM_VCGE0_F 25
5270#define NEON_2RM_VCEQ0_F 26
5271#define NEON_2RM_VCLE0_F 27
5272#define NEON_2RM_VCLT0_F 28
5273#define NEON_2RM_VABS_F 30
5274#define NEON_2RM_VNEG_F 31
5275#define NEON_2RM_VSWP 32
5276#define NEON_2RM_VTRN 33
5277#define NEON_2RM_VUZP 34
5278#define NEON_2RM_VZIP 35
5279#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5280#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5281#define NEON_2RM_VSHLL 38
f1ecb913 5282#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
34f7b0a2 5283#define NEON_2RM_VRINTN 40
2ce70625 5284#define NEON_2RM_VRINTX 41
34f7b0a2
WN
5285#define NEON_2RM_VRINTA 42
5286#define NEON_2RM_VRINTZ 43
600b828c 5287#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 5288#define NEON_2RM_VRINTM 45
600b828c 5289#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 5290#define NEON_2RM_VRINTP 47
901ad525
WN
5291#define NEON_2RM_VCVTAU 48
5292#define NEON_2RM_VCVTAS 49
5293#define NEON_2RM_VCVTNU 50
5294#define NEON_2RM_VCVTNS 51
5295#define NEON_2RM_VCVTPU 52
5296#define NEON_2RM_VCVTPS 53
5297#define NEON_2RM_VCVTMU 54
5298#define NEON_2RM_VCVTMS 55
600b828c
PM
5299#define NEON_2RM_VRECPE 56
5300#define NEON_2RM_VRSQRTE 57
5301#define NEON_2RM_VRECPE_F 58
5302#define NEON_2RM_VRSQRTE_F 59
5303#define NEON_2RM_VCVT_FS 60
5304#define NEON_2RM_VCVT_FU 61
5305#define NEON_2RM_VCVT_SF 62
5306#define NEON_2RM_VCVT_UF 63
5307
5308static int neon_2rm_is_float_op(int op)
5309{
5310 /* Return true if this neon 2reg-misc op is float-to-float */
5311 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
34f7b0a2 5312 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
901ad525
WN
5313 op == NEON_2RM_VRINTM ||
5314 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
34f7b0a2 5315 op >= NEON_2RM_VRECPE_F);
600b828c
PM
5316}
5317
fe8fcf3d
PM
5318static bool neon_2rm_is_v8_op(int op)
5319{
5320 /* Return true if this neon 2reg-misc op is ARMv8 and up */
5321 switch (op) {
5322 case NEON_2RM_VRINTN:
5323 case NEON_2RM_VRINTA:
5324 case NEON_2RM_VRINTM:
5325 case NEON_2RM_VRINTP:
5326 case NEON_2RM_VRINTZ:
5327 case NEON_2RM_VRINTX:
5328 case NEON_2RM_VCVTAU:
5329 case NEON_2RM_VCVTAS:
5330 case NEON_2RM_VCVTNU:
5331 case NEON_2RM_VCVTNS:
5332 case NEON_2RM_VCVTPU:
5333 case NEON_2RM_VCVTPS:
5334 case NEON_2RM_VCVTMU:
5335 case NEON_2RM_VCVTMS:
5336 return true;
5337 default:
5338 return false;
5339 }
5340}
5341
600b828c
PM
5342/* Each entry in this array has bit n set if the insn allows
5343 * size value n (otherwise it will UNDEF). Since unallocated
5344 * op values will have no bits set they always UNDEF.
5345 */
5346static const uint8_t neon_2rm_sizes[] = {
5347 [NEON_2RM_VREV64] = 0x7,
5348 [NEON_2RM_VREV32] = 0x3,
5349 [NEON_2RM_VREV16] = 0x1,
5350 [NEON_2RM_VPADDL] = 0x7,
5351 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
5352 [NEON_2RM_AESE] = 0x1,
5353 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
5354 [NEON_2RM_VCLS] = 0x7,
5355 [NEON_2RM_VCLZ] = 0x7,
5356 [NEON_2RM_VCNT] = 0x1,
5357 [NEON_2RM_VMVN] = 0x1,
5358 [NEON_2RM_VPADAL] = 0x7,
5359 [NEON_2RM_VPADAL_U] = 0x7,
5360 [NEON_2RM_VQABS] = 0x7,
5361 [NEON_2RM_VQNEG] = 0x7,
5362 [NEON_2RM_VCGT0] = 0x7,
5363 [NEON_2RM_VCGE0] = 0x7,
5364 [NEON_2RM_VCEQ0] = 0x7,
5365 [NEON_2RM_VCLE0] = 0x7,
5366 [NEON_2RM_VCLT0] = 0x7,
f1ecb913 5367 [NEON_2RM_SHA1H] = 0x4,
600b828c
PM
5368 [NEON_2RM_VABS] = 0x7,
5369 [NEON_2RM_VNEG] = 0x7,
5370 [NEON_2RM_VCGT0_F] = 0x4,
5371 [NEON_2RM_VCGE0_F] = 0x4,
5372 [NEON_2RM_VCEQ0_F] = 0x4,
5373 [NEON_2RM_VCLE0_F] = 0x4,
5374 [NEON_2RM_VCLT0_F] = 0x4,
5375 [NEON_2RM_VABS_F] = 0x4,
5376 [NEON_2RM_VNEG_F] = 0x4,
5377 [NEON_2RM_VSWP] = 0x1,
5378 [NEON_2RM_VTRN] = 0x7,
5379 [NEON_2RM_VUZP] = 0x7,
5380 [NEON_2RM_VZIP] = 0x7,
5381 [NEON_2RM_VMOVN] = 0x7,
5382 [NEON_2RM_VQMOVN] = 0x7,
5383 [NEON_2RM_VSHLL] = 0x7,
f1ecb913 5384 [NEON_2RM_SHA1SU1] = 0x4,
34f7b0a2 5385 [NEON_2RM_VRINTN] = 0x4,
2ce70625 5386 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
5387 [NEON_2RM_VRINTA] = 0x4,
5388 [NEON_2RM_VRINTZ] = 0x4,
600b828c 5389 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 5390 [NEON_2RM_VRINTM] = 0x4,
600b828c 5391 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 5392 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
5393 [NEON_2RM_VCVTAU] = 0x4,
5394 [NEON_2RM_VCVTAS] = 0x4,
5395 [NEON_2RM_VCVTNU] = 0x4,
5396 [NEON_2RM_VCVTNS] = 0x4,
5397 [NEON_2RM_VCVTPU] = 0x4,
5398 [NEON_2RM_VCVTPS] = 0x4,
5399 [NEON_2RM_VCVTMU] = 0x4,
5400 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
5401 [NEON_2RM_VRECPE] = 0x4,
5402 [NEON_2RM_VRSQRTE] = 0x4,
5403 [NEON_2RM_VRECPE_F] = 0x4,
5404 [NEON_2RM_VRSQRTE_F] = 0x4,
5405 [NEON_2RM_VCVT_FS] = 0x4,
5406 [NEON_2RM_VCVT_FU] = 0x4,
5407 [NEON_2RM_VCVT_SF] = 0x4,
5408 [NEON_2RM_VCVT_UF] = 0x4,
5409};
5410
9ee6e8bb
PB
5411/* Translate a NEON data processing instruction. Return nonzero if the
5412 instruction is invalid.
ad69471c
PB
5413 We process data in a mixture of 32-bit and 64-bit chunks.
5414 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 5415
7dcc1f89 5416static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
5417{
5418 int op;
5419 int q;
5420 int rd, rn, rm;
5421 int size;
5422 int shift;
5423 int pass;
5424 int count;
5425 int pairwise;
5426 int u;
ca9a32e4 5427 uint32_t imm, mask;
39d5492a 5428 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 5429 TCGv_i64 tmp64;
9ee6e8bb 5430
2c7ffc41
PM
5431 /* FIXME: this access check should not take precedence over UNDEF
5432 * for invalid encodings; we will generate incorrect syndrome information
5433 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5434 */
9dbbc748 5435 if (s->fp_excp_el) {
2c7ffc41 5436 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 5437 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
5438 return 0;
5439 }
5440
5df8bac1 5441 if (!s->vfp_enabled)
9ee6e8bb
PB
5442 return 1;
5443 q = (insn & (1 << 6)) != 0;
5444 u = (insn >> 24) & 1;
5445 VFP_DREG_D(rd, insn);
5446 VFP_DREG_N(rn, insn);
5447 VFP_DREG_M(rm, insn);
5448 size = (insn >> 20) & 3;
5449 if ((insn & (1 << 23)) == 0) {
5450 /* Three register same length. */
5451 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
5452 /* Catch invalid op and bad size combinations: UNDEF */
5453 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5454 return 1;
5455 }
25f84f79
PM
5456 /* All insns of this form UNDEF for either this condition or the
5457 * superset of cases "Q==1"; we catch the latter later.
5458 */
5459 if (q && ((rd | rn | rm) & 1)) {
5460 return 1;
5461 }
f1ecb913
AB
5462 /*
5463 * The SHA-1/SHA-256 3-register instructions require special treatment
5464 * here, as their size field is overloaded as an op type selector, and
5465 * they all consume their input in a single pass.
5466 */
5467 if (op == NEON_3R_SHA) {
5468 if (!q) {
5469 return 1;
5470 }
5471 if (!u) { /* SHA-1 */
d614a513 5472 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
5473 return 1;
5474 }
5475 tmp = tcg_const_i32(rd);
5476 tmp2 = tcg_const_i32(rn);
5477 tmp3 = tcg_const_i32(rm);
5478 tmp4 = tcg_const_i32(size);
5479 gen_helper_crypto_sha1_3reg(cpu_env, tmp, tmp2, tmp3, tmp4);
5480 tcg_temp_free_i32(tmp4);
5481 } else { /* SHA-256 */
d614a513 5482 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
f1ecb913
AB
5483 return 1;
5484 }
5485 tmp = tcg_const_i32(rd);
5486 tmp2 = tcg_const_i32(rn);
5487 tmp3 = tcg_const_i32(rm);
5488 switch (size) {
5489 case 0:
5490 gen_helper_crypto_sha256h(cpu_env, tmp, tmp2, tmp3);
5491 break;
5492 case 1:
5493 gen_helper_crypto_sha256h2(cpu_env, tmp, tmp2, tmp3);
5494 break;
5495 case 2:
5496 gen_helper_crypto_sha256su1(cpu_env, tmp, tmp2, tmp3);
5497 break;
5498 }
5499 }
5500 tcg_temp_free_i32(tmp);
5501 tcg_temp_free_i32(tmp2);
5502 tcg_temp_free_i32(tmp3);
5503 return 0;
5504 }
62698be3
PM
5505 if (size == 3 && op != NEON_3R_LOGIC) {
5506 /* 64-bit element instructions. */
9ee6e8bb 5507 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
5508 neon_load_reg64(cpu_V0, rn + pass);
5509 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 5510 switch (op) {
62698be3 5511 case NEON_3R_VQADD:
9ee6e8bb 5512 if (u) {
02da0b2d
PM
5513 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5514 cpu_V0, cpu_V1);
2c0262af 5515 } else {
02da0b2d
PM
5516 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5517 cpu_V0, cpu_V1);
2c0262af 5518 }
9ee6e8bb 5519 break;
62698be3 5520 case NEON_3R_VQSUB:
9ee6e8bb 5521 if (u) {
02da0b2d
PM
5522 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5523 cpu_V0, cpu_V1);
ad69471c 5524 } else {
02da0b2d
PM
5525 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5526 cpu_V0, cpu_V1);
ad69471c
PB
5527 }
5528 break;
62698be3 5529 case NEON_3R_VSHL:
ad69471c
PB
5530 if (u) {
5531 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5532 } else {
5533 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5534 }
5535 break;
62698be3 5536 case NEON_3R_VQSHL:
ad69471c 5537 if (u) {
02da0b2d
PM
5538 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5539 cpu_V1, cpu_V0);
ad69471c 5540 } else {
02da0b2d
PM
5541 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5542 cpu_V1, cpu_V0);
ad69471c
PB
5543 }
5544 break;
62698be3 5545 case NEON_3R_VRSHL:
ad69471c
PB
5546 if (u) {
5547 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 5548 } else {
ad69471c
PB
5549 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5550 }
5551 break;
62698be3 5552 case NEON_3R_VQRSHL:
ad69471c 5553 if (u) {
02da0b2d
PM
5554 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5555 cpu_V1, cpu_V0);
ad69471c 5556 } else {
02da0b2d
PM
5557 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5558 cpu_V1, cpu_V0);
1e8d4eec 5559 }
9ee6e8bb 5560 break;
62698be3 5561 case NEON_3R_VADD_VSUB:
9ee6e8bb 5562 if (u) {
ad69471c 5563 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 5564 } else {
ad69471c 5565 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
5566 }
5567 break;
5568 default:
5569 abort();
2c0262af 5570 }
ad69471c 5571 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 5572 }
9ee6e8bb 5573 return 0;
2c0262af 5574 }
25f84f79 5575 pairwise = 0;
9ee6e8bb 5576 switch (op) {
62698be3
PM
5577 case NEON_3R_VSHL:
5578 case NEON_3R_VQSHL:
5579 case NEON_3R_VRSHL:
5580 case NEON_3R_VQRSHL:
9ee6e8bb 5581 {
ad69471c
PB
5582 int rtmp;
5583 /* Shift instruction operands are reversed. */
5584 rtmp = rn;
9ee6e8bb 5585 rn = rm;
ad69471c 5586 rm = rtmp;
9ee6e8bb 5587 }
2c0262af 5588 break;
25f84f79
PM
5589 case NEON_3R_VPADD:
5590 if (u) {
5591 return 1;
5592 }
5593 /* Fall through */
62698be3
PM
5594 case NEON_3R_VPMAX:
5595 case NEON_3R_VPMIN:
9ee6e8bb 5596 pairwise = 1;
2c0262af 5597 break;
25f84f79
PM
5598 case NEON_3R_FLOAT_ARITH:
5599 pairwise = (u && size < 2); /* if VPADD (float) */
5600 break;
5601 case NEON_3R_FLOAT_MINMAX:
5602 pairwise = u; /* if VPMIN/VPMAX (float) */
5603 break;
5604 case NEON_3R_FLOAT_CMP:
5605 if (!u && size) {
5606 /* no encoding for U=0 C=1x */
5607 return 1;
5608 }
5609 break;
5610 case NEON_3R_FLOAT_ACMP:
5611 if (!u) {
5612 return 1;
5613 }
5614 break;
505935fc
WN
5615 case NEON_3R_FLOAT_MISC:
5616 /* VMAXNM/VMINNM in ARMv8 */
d614a513 5617 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
25f84f79
PM
5618 return 1;
5619 }
2c0262af 5620 break;
25f84f79
PM
5621 case NEON_3R_VMUL:
5622 if (u && (size != 0)) {
5623 /* UNDEF on invalid size for polynomial subcase */
5624 return 1;
5625 }
2c0262af 5626 break;
da97f52c 5627 case NEON_3R_VFM:
d614a513 5628 if (!arm_dc_feature(s, ARM_FEATURE_VFP4) || u) {
da97f52c
PM
5629 return 1;
5630 }
5631 break;
9ee6e8bb 5632 default:
2c0262af 5633 break;
9ee6e8bb 5634 }
dd8fbd78 5635
25f84f79
PM
5636 if (pairwise && q) {
5637 /* All the pairwise insns UNDEF if Q is set */
5638 return 1;
5639 }
5640
9ee6e8bb
PB
5641 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5642
5643 if (pairwise) {
5644 /* Pairwise. */
a5a14945
JR
5645 if (pass < 1) {
5646 tmp = neon_load_reg(rn, 0);
5647 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 5648 } else {
a5a14945
JR
5649 tmp = neon_load_reg(rm, 0);
5650 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
5651 }
5652 } else {
5653 /* Elementwise. */
dd8fbd78
FN
5654 tmp = neon_load_reg(rn, pass);
5655 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
5656 }
5657 switch (op) {
62698be3 5658 case NEON_3R_VHADD:
9ee6e8bb
PB
5659 GEN_NEON_INTEGER_OP(hadd);
5660 break;
62698be3 5661 case NEON_3R_VQADD:
02da0b2d 5662 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 5663 break;
62698be3 5664 case NEON_3R_VRHADD:
9ee6e8bb 5665 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 5666 break;
62698be3 5667 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
5668 switch ((u << 2) | size) {
5669 case 0: /* VAND */
dd8fbd78 5670 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5671 break;
5672 case 1: /* BIC */
f669df27 5673 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5674 break;
5675 case 2: /* VORR */
dd8fbd78 5676 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5677 break;
5678 case 3: /* VORN */
f669df27 5679 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5680 break;
5681 case 4: /* VEOR */
dd8fbd78 5682 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5683 break;
5684 case 5: /* VBSL */
dd8fbd78
FN
5685 tmp3 = neon_load_reg(rd, pass);
5686 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 5687 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5688 break;
5689 case 6: /* VBIT */
dd8fbd78
FN
5690 tmp3 = neon_load_reg(rd, pass);
5691 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 5692 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5693 break;
5694 case 7: /* VBIF */
dd8fbd78
FN
5695 tmp3 = neon_load_reg(rd, pass);
5696 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 5697 tcg_temp_free_i32(tmp3);
9ee6e8bb 5698 break;
2c0262af
FB
5699 }
5700 break;
62698be3 5701 case NEON_3R_VHSUB:
9ee6e8bb
PB
5702 GEN_NEON_INTEGER_OP(hsub);
5703 break;
62698be3 5704 case NEON_3R_VQSUB:
02da0b2d 5705 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 5706 break;
62698be3 5707 case NEON_3R_VCGT:
9ee6e8bb
PB
5708 GEN_NEON_INTEGER_OP(cgt);
5709 break;
62698be3 5710 case NEON_3R_VCGE:
9ee6e8bb
PB
5711 GEN_NEON_INTEGER_OP(cge);
5712 break;
62698be3 5713 case NEON_3R_VSHL:
ad69471c 5714 GEN_NEON_INTEGER_OP(shl);
2c0262af 5715 break;
62698be3 5716 case NEON_3R_VQSHL:
02da0b2d 5717 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 5718 break;
62698be3 5719 case NEON_3R_VRSHL:
ad69471c 5720 GEN_NEON_INTEGER_OP(rshl);
2c0262af 5721 break;
62698be3 5722 case NEON_3R_VQRSHL:
02da0b2d 5723 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 5724 break;
62698be3 5725 case NEON_3R_VMAX:
9ee6e8bb
PB
5726 GEN_NEON_INTEGER_OP(max);
5727 break;
62698be3 5728 case NEON_3R_VMIN:
9ee6e8bb
PB
5729 GEN_NEON_INTEGER_OP(min);
5730 break;
62698be3 5731 case NEON_3R_VABD:
9ee6e8bb
PB
5732 GEN_NEON_INTEGER_OP(abd);
5733 break;
62698be3 5734 case NEON_3R_VABA:
9ee6e8bb 5735 GEN_NEON_INTEGER_OP(abd);
7d1b0095 5736 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
5737 tmp2 = neon_load_reg(rd, pass);
5738 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 5739 break;
62698be3 5740 case NEON_3R_VADD_VSUB:
9ee6e8bb 5741 if (!u) { /* VADD */
62698be3 5742 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5743 } else { /* VSUB */
5744 switch (size) {
dd8fbd78
FN
5745 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
5746 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
5747 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 5748 default: abort();
9ee6e8bb
PB
5749 }
5750 }
5751 break;
62698be3 5752 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
5753 if (!u) { /* VTST */
5754 switch (size) {
dd8fbd78
FN
5755 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
5756 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
5757 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 5758 default: abort();
9ee6e8bb
PB
5759 }
5760 } else { /* VCEQ */
5761 switch (size) {
dd8fbd78
FN
5762 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5763 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5764 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 5765 default: abort();
9ee6e8bb
PB
5766 }
5767 }
5768 break;
62698be3 5769 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 5770 switch (size) {
dd8fbd78
FN
5771 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5772 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5773 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5774 default: abort();
9ee6e8bb 5775 }
7d1b0095 5776 tcg_temp_free_i32(tmp2);
dd8fbd78 5777 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5778 if (u) { /* VMLS */
dd8fbd78 5779 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 5780 } else { /* VMLA */
dd8fbd78 5781 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5782 }
5783 break;
62698be3 5784 case NEON_3R_VMUL:
9ee6e8bb 5785 if (u) { /* polynomial */
dd8fbd78 5786 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
5787 } else { /* Integer */
5788 switch (size) {
dd8fbd78
FN
5789 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5790 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5791 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5792 default: abort();
9ee6e8bb
PB
5793 }
5794 }
5795 break;
62698be3 5796 case NEON_3R_VPMAX:
9ee6e8bb
PB
5797 GEN_NEON_INTEGER_OP(pmax);
5798 break;
62698be3 5799 case NEON_3R_VPMIN:
9ee6e8bb
PB
5800 GEN_NEON_INTEGER_OP(pmin);
5801 break;
62698be3 5802 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
5803 if (!u) { /* VQDMULH */
5804 switch (size) {
02da0b2d
PM
5805 case 1:
5806 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5807 break;
5808 case 2:
5809 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5810 break;
62698be3 5811 default: abort();
9ee6e8bb 5812 }
62698be3 5813 } else { /* VQRDMULH */
9ee6e8bb 5814 switch (size) {
02da0b2d
PM
5815 case 1:
5816 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5817 break;
5818 case 2:
5819 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5820 break;
62698be3 5821 default: abort();
9ee6e8bb
PB
5822 }
5823 }
5824 break;
62698be3 5825 case NEON_3R_VPADD:
9ee6e8bb 5826 switch (size) {
dd8fbd78
FN
5827 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5828 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5829 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 5830 default: abort();
9ee6e8bb
PB
5831 }
5832 break;
62698be3 5833 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
5834 {
5835 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
5836 switch ((u << 2) | size) {
5837 case 0: /* VADD */
aa47cfdd
PM
5838 case 4: /* VPADD */
5839 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5840 break;
5841 case 2: /* VSUB */
aa47cfdd 5842 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5843 break;
5844 case 6: /* VABD */
aa47cfdd 5845 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5846 break;
5847 default:
62698be3 5848 abort();
9ee6e8bb 5849 }
aa47cfdd 5850 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5851 break;
aa47cfdd 5852 }
62698be3 5853 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
5854 {
5855 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5856 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5857 if (!u) {
7d1b0095 5858 tcg_temp_free_i32(tmp2);
dd8fbd78 5859 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5860 if (size == 0) {
aa47cfdd 5861 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5862 } else {
aa47cfdd 5863 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
5864 }
5865 }
aa47cfdd 5866 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5867 break;
aa47cfdd 5868 }
62698be3 5869 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
5870 {
5871 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 5872 if (!u) {
aa47cfdd 5873 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 5874 } else {
aa47cfdd
PM
5875 if (size == 0) {
5876 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5877 } else {
5878 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5879 }
b5ff1b31 5880 }
aa47cfdd 5881 tcg_temp_free_ptr(fpstatus);
2c0262af 5882 break;
aa47cfdd 5883 }
62698be3 5884 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
5885 {
5886 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5887 if (size == 0) {
5888 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5889 } else {
5890 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5891 }
5892 tcg_temp_free_ptr(fpstatus);
2c0262af 5893 break;
aa47cfdd 5894 }
62698be3 5895 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
5896 {
5897 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5898 if (size == 0) {
f71a2ae5 5899 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 5900 } else {
f71a2ae5 5901 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
5902 }
5903 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5904 break;
aa47cfdd 5905 }
505935fc
WN
5906 case NEON_3R_FLOAT_MISC:
5907 if (u) {
5908 /* VMAXNM/VMINNM */
5909 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5910 if (size == 0) {
f71a2ae5 5911 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 5912 } else {
f71a2ae5 5913 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
5914 }
5915 tcg_temp_free_ptr(fpstatus);
5916 } else {
5917 if (size == 0) {
5918 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5919 } else {
5920 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5921 }
5922 }
2c0262af 5923 break;
da97f52c
PM
5924 case NEON_3R_VFM:
5925 {
5926 /* VFMA, VFMS: fused multiply-add */
5927 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5928 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5929 if (size) {
5930 /* VFMS */
5931 gen_helper_vfp_negs(tmp, tmp);
5932 }
5933 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5934 tcg_temp_free_i32(tmp3);
5935 tcg_temp_free_ptr(fpstatus);
5936 break;
5937 }
9ee6e8bb
PB
5938 default:
5939 abort();
2c0262af 5940 }
7d1b0095 5941 tcg_temp_free_i32(tmp2);
dd8fbd78 5942
9ee6e8bb
PB
5943 /* Save the result. For elementwise operations we can put it
5944 straight into the destination register. For pairwise operations
5945 we have to be careful to avoid clobbering the source operands. */
5946 if (pairwise && rd == rm) {
dd8fbd78 5947 neon_store_scratch(pass, tmp);
9ee6e8bb 5948 } else {
dd8fbd78 5949 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5950 }
5951
5952 } /* for pass */
5953 if (pairwise && rd == rm) {
5954 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5955 tmp = neon_load_scratch(pass);
5956 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5957 }
5958 }
ad69471c 5959 /* End of 3 register same size operations. */
9ee6e8bb
PB
5960 } else if (insn & (1 << 4)) {
5961 if ((insn & 0x00380080) != 0) {
5962 /* Two registers and shift. */
5963 op = (insn >> 8) & 0xf;
5964 if (insn & (1 << 7)) {
cc13115b
PM
5965 /* 64-bit shift. */
5966 if (op > 7) {
5967 return 1;
5968 }
9ee6e8bb
PB
5969 size = 3;
5970 } else {
5971 size = 2;
5972 while ((insn & (1 << (size + 19))) == 0)
5973 size--;
5974 }
5975 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 5976 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
5977 by immediate using the variable shift operations. */
5978 if (op < 8) {
5979 /* Shift by immediate:
5980 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
5981 if (q && ((rd | rm) & 1)) {
5982 return 1;
5983 }
5984 if (!u && (op == 4 || op == 6)) {
5985 return 1;
5986 }
9ee6e8bb
PB
5987 /* Right shifts are encoded as N - shift, where N is the
5988 element size in bits. */
5989 if (op <= 4)
5990 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
5991 if (size == 3) {
5992 count = q + 1;
5993 } else {
5994 count = q ? 4: 2;
5995 }
5996 switch (size) {
5997 case 0:
5998 imm = (uint8_t) shift;
5999 imm |= imm << 8;
6000 imm |= imm << 16;
6001 break;
6002 case 1:
6003 imm = (uint16_t) shift;
6004 imm |= imm << 16;
6005 break;
6006 case 2:
6007 case 3:
6008 imm = shift;
6009 break;
6010 default:
6011 abort();
6012 }
6013
6014 for (pass = 0; pass < count; pass++) {
ad69471c
PB
6015 if (size == 3) {
6016 neon_load_reg64(cpu_V0, rm + pass);
6017 tcg_gen_movi_i64(cpu_V1, imm);
6018 switch (op) {
6019 case 0: /* VSHR */
6020 case 1: /* VSRA */
6021 if (u)
6022 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6023 else
ad69471c 6024 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6025 break;
ad69471c
PB
6026 case 2: /* VRSHR */
6027 case 3: /* VRSRA */
6028 if (u)
6029 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6030 else
ad69471c 6031 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6032 break;
ad69471c 6033 case 4: /* VSRI */
ad69471c
PB
6034 case 5: /* VSHL, VSLI */
6035 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
6036 break;
0322b26e 6037 case 6: /* VQSHLU */
02da0b2d
PM
6038 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
6039 cpu_V0, cpu_V1);
ad69471c 6040 break;
0322b26e
PM
6041 case 7: /* VQSHL */
6042 if (u) {
02da0b2d 6043 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
6044 cpu_V0, cpu_V1);
6045 } else {
02da0b2d 6046 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
6047 cpu_V0, cpu_V1);
6048 }
9ee6e8bb 6049 break;
9ee6e8bb 6050 }
ad69471c
PB
6051 if (op == 1 || op == 3) {
6052 /* Accumulate. */
5371cb81 6053 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
6054 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
6055 } else if (op == 4 || (op == 5 && u)) {
6056 /* Insert */
923e6509
CL
6057 neon_load_reg64(cpu_V1, rd + pass);
6058 uint64_t mask;
6059 if (shift < -63 || shift > 63) {
6060 mask = 0;
6061 } else {
6062 if (op == 4) {
6063 mask = 0xffffffffffffffffull >> -shift;
6064 } else {
6065 mask = 0xffffffffffffffffull << shift;
6066 }
6067 }
6068 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
6069 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
6070 }
6071 neon_store_reg64(cpu_V0, rd + pass);
6072 } else { /* size < 3 */
6073 /* Operands in T0 and T1. */
dd8fbd78 6074 tmp = neon_load_reg(rm, pass);
7d1b0095 6075 tmp2 = tcg_temp_new_i32();
dd8fbd78 6076 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
6077 switch (op) {
6078 case 0: /* VSHR */
6079 case 1: /* VSRA */
6080 GEN_NEON_INTEGER_OP(shl);
6081 break;
6082 case 2: /* VRSHR */
6083 case 3: /* VRSRA */
6084 GEN_NEON_INTEGER_OP(rshl);
6085 break;
6086 case 4: /* VSRI */
ad69471c
PB
6087 case 5: /* VSHL, VSLI */
6088 switch (size) {
dd8fbd78
FN
6089 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
6090 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
6091 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 6092 default: abort();
ad69471c
PB
6093 }
6094 break;
0322b26e 6095 case 6: /* VQSHLU */
ad69471c 6096 switch (size) {
0322b26e 6097 case 0:
02da0b2d
PM
6098 gen_helper_neon_qshlu_s8(tmp, cpu_env,
6099 tmp, tmp2);
0322b26e
PM
6100 break;
6101 case 1:
02da0b2d
PM
6102 gen_helper_neon_qshlu_s16(tmp, cpu_env,
6103 tmp, tmp2);
0322b26e
PM
6104 break;
6105 case 2:
02da0b2d
PM
6106 gen_helper_neon_qshlu_s32(tmp, cpu_env,
6107 tmp, tmp2);
0322b26e
PM
6108 break;
6109 default:
cc13115b 6110 abort();
ad69471c
PB
6111 }
6112 break;
0322b26e 6113 case 7: /* VQSHL */
02da0b2d 6114 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 6115 break;
ad69471c 6116 }
7d1b0095 6117 tcg_temp_free_i32(tmp2);
ad69471c
PB
6118
6119 if (op == 1 || op == 3) {
6120 /* Accumulate. */
dd8fbd78 6121 tmp2 = neon_load_reg(rd, pass);
5371cb81 6122 gen_neon_add(size, tmp, tmp2);
7d1b0095 6123 tcg_temp_free_i32(tmp2);
ad69471c
PB
6124 } else if (op == 4 || (op == 5 && u)) {
6125 /* Insert */
6126 switch (size) {
6127 case 0:
6128 if (op == 4)
ca9a32e4 6129 mask = 0xff >> -shift;
ad69471c 6130 else
ca9a32e4
JR
6131 mask = (uint8_t)(0xff << shift);
6132 mask |= mask << 8;
6133 mask |= mask << 16;
ad69471c
PB
6134 break;
6135 case 1:
6136 if (op == 4)
ca9a32e4 6137 mask = 0xffff >> -shift;
ad69471c 6138 else
ca9a32e4
JR
6139 mask = (uint16_t)(0xffff << shift);
6140 mask |= mask << 16;
ad69471c
PB
6141 break;
6142 case 2:
ca9a32e4
JR
6143 if (shift < -31 || shift > 31) {
6144 mask = 0;
6145 } else {
6146 if (op == 4)
6147 mask = 0xffffffffu >> -shift;
6148 else
6149 mask = 0xffffffffu << shift;
6150 }
ad69471c
PB
6151 break;
6152 default:
6153 abort();
6154 }
dd8fbd78 6155 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
6156 tcg_gen_andi_i32(tmp, tmp, mask);
6157 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 6158 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 6159 tcg_temp_free_i32(tmp2);
ad69471c 6160 }
dd8fbd78 6161 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6162 }
6163 } /* for pass */
6164 } else if (op < 10) {
ad69471c 6165 /* Shift by immediate and narrow:
9ee6e8bb 6166 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 6167 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
6168 if (rm & 1) {
6169 return 1;
6170 }
9ee6e8bb
PB
6171 shift = shift - (1 << (size + 3));
6172 size++;
92cdfaeb 6173 if (size == 3) {
a7812ae4 6174 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
6175 neon_load_reg64(cpu_V0, rm);
6176 neon_load_reg64(cpu_V1, rm + 1);
6177 for (pass = 0; pass < 2; pass++) {
6178 TCGv_i64 in;
6179 if (pass == 0) {
6180 in = cpu_V0;
6181 } else {
6182 in = cpu_V1;
6183 }
ad69471c 6184 if (q) {
0b36f4cd 6185 if (input_unsigned) {
92cdfaeb 6186 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 6187 } else {
92cdfaeb 6188 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 6189 }
ad69471c 6190 } else {
0b36f4cd 6191 if (input_unsigned) {
92cdfaeb 6192 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 6193 } else {
92cdfaeb 6194 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 6195 }
ad69471c 6196 }
7d1b0095 6197 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6198 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6199 neon_store_reg(rd, pass, tmp);
6200 } /* for pass */
6201 tcg_temp_free_i64(tmp64);
6202 } else {
6203 if (size == 1) {
6204 imm = (uint16_t)shift;
6205 imm |= imm << 16;
2c0262af 6206 } else {
92cdfaeb
PM
6207 /* size == 2 */
6208 imm = (uint32_t)shift;
6209 }
6210 tmp2 = tcg_const_i32(imm);
6211 tmp4 = neon_load_reg(rm + 1, 0);
6212 tmp5 = neon_load_reg(rm + 1, 1);
6213 for (pass = 0; pass < 2; pass++) {
6214 if (pass == 0) {
6215 tmp = neon_load_reg(rm, 0);
6216 } else {
6217 tmp = tmp4;
6218 }
0b36f4cd
CL
6219 gen_neon_shift_narrow(size, tmp, tmp2, q,
6220 input_unsigned);
92cdfaeb
PM
6221 if (pass == 0) {
6222 tmp3 = neon_load_reg(rm, 1);
6223 } else {
6224 tmp3 = tmp5;
6225 }
0b36f4cd
CL
6226 gen_neon_shift_narrow(size, tmp3, tmp2, q,
6227 input_unsigned);
36aa55dc 6228 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
6229 tcg_temp_free_i32(tmp);
6230 tcg_temp_free_i32(tmp3);
6231 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6232 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6233 neon_store_reg(rd, pass, tmp);
6234 } /* for pass */
c6067f04 6235 tcg_temp_free_i32(tmp2);
b75263d6 6236 }
9ee6e8bb 6237 } else if (op == 10) {
cc13115b
PM
6238 /* VSHLL, VMOVL */
6239 if (q || (rd & 1)) {
9ee6e8bb 6240 return 1;
cc13115b 6241 }
ad69471c
PB
6242 tmp = neon_load_reg(rm, 0);
6243 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6244 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6245 if (pass == 1)
6246 tmp = tmp2;
6247
6248 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 6249
9ee6e8bb
PB
6250 if (shift != 0) {
6251 /* The shift is less than the width of the source
ad69471c
PB
6252 type, so we can just shift the whole register. */
6253 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
6254 /* Widen the result of shift: we need to clear
6255 * the potential overflow bits resulting from
6256 * left bits of the narrow input appearing as
6257 * right bits of left the neighbour narrow
6258 * input. */
ad69471c
PB
6259 if (size < 2 || !u) {
6260 uint64_t imm64;
6261 if (size == 0) {
6262 imm = (0xffu >> (8 - shift));
6263 imm |= imm << 16;
acdf01ef 6264 } else if (size == 1) {
ad69471c 6265 imm = 0xffff >> (16 - shift);
acdf01ef
CL
6266 } else {
6267 /* size == 2 */
6268 imm = 0xffffffff >> (32 - shift);
6269 }
6270 if (size < 2) {
6271 imm64 = imm | (((uint64_t)imm) << 32);
6272 } else {
6273 imm64 = imm;
9ee6e8bb 6274 }
acdf01ef 6275 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
6276 }
6277 }
ad69471c 6278 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6279 }
f73534a5 6280 } else if (op >= 14) {
9ee6e8bb 6281 /* VCVT fixed-point. */
cc13115b
PM
6282 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
6283 return 1;
6284 }
f73534a5
PM
6285 /* We have already masked out the must-be-1 top bit of imm6,
6286 * hence this 32-shift where the ARM ARM has 64-imm6.
6287 */
6288 shift = 32 - shift;
9ee6e8bb 6289 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 6290 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 6291 if (!(op & 1)) {
9ee6e8bb 6292 if (u)
5500b06c 6293 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 6294 else
5500b06c 6295 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
6296 } else {
6297 if (u)
5500b06c 6298 gen_vfp_toul(0, shift, 1);
9ee6e8bb 6299 else
5500b06c 6300 gen_vfp_tosl(0, shift, 1);
2c0262af 6301 }
4373f3ce 6302 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
6303 }
6304 } else {
9ee6e8bb
PB
6305 return 1;
6306 }
6307 } else { /* (insn & 0x00380080) == 0 */
6308 int invert;
7d80fee5
PM
6309 if (q && (rd & 1)) {
6310 return 1;
6311 }
9ee6e8bb
PB
6312
6313 op = (insn >> 8) & 0xf;
6314 /* One register and immediate. */
6315 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
6316 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
6317 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
6318 * We choose to not special-case this and will behave as if a
6319 * valid constant encoding of 0 had been given.
6320 */
9ee6e8bb
PB
6321 switch (op) {
6322 case 0: case 1:
6323 /* no-op */
6324 break;
6325 case 2: case 3:
6326 imm <<= 8;
6327 break;
6328 case 4: case 5:
6329 imm <<= 16;
6330 break;
6331 case 6: case 7:
6332 imm <<= 24;
6333 break;
6334 case 8: case 9:
6335 imm |= imm << 16;
6336 break;
6337 case 10: case 11:
6338 imm = (imm << 8) | (imm << 24);
6339 break;
6340 case 12:
8e31209e 6341 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
6342 break;
6343 case 13:
6344 imm = (imm << 16) | 0xffff;
6345 break;
6346 case 14:
6347 imm |= (imm << 8) | (imm << 16) | (imm << 24);
6348 if (invert)
6349 imm = ~imm;
6350 break;
6351 case 15:
7d80fee5
PM
6352 if (invert) {
6353 return 1;
6354 }
9ee6e8bb
PB
6355 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
6356 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
6357 break;
6358 }
6359 if (invert)
6360 imm = ~imm;
6361
9ee6e8bb
PB
6362 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6363 if (op & 1 && op < 12) {
ad69471c 6364 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
6365 if (invert) {
6366 /* The immediate value has already been inverted, so
6367 BIC becomes AND. */
ad69471c 6368 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 6369 } else {
ad69471c 6370 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 6371 }
9ee6e8bb 6372 } else {
ad69471c 6373 /* VMOV, VMVN. */
7d1b0095 6374 tmp = tcg_temp_new_i32();
9ee6e8bb 6375 if (op == 14 && invert) {
a5a14945 6376 int n;
ad69471c
PB
6377 uint32_t val;
6378 val = 0;
9ee6e8bb
PB
6379 for (n = 0; n < 4; n++) {
6380 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 6381 val |= 0xff << (n * 8);
9ee6e8bb 6382 }
ad69471c
PB
6383 tcg_gen_movi_i32(tmp, val);
6384 } else {
6385 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 6386 }
9ee6e8bb 6387 }
ad69471c 6388 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6389 }
6390 }
e4b3861d 6391 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
6392 if (size != 3) {
6393 op = (insn >> 8) & 0xf;
6394 if ((insn & (1 << 6)) == 0) {
6395 /* Three registers of different lengths. */
6396 int src1_wide;
6397 int src2_wide;
6398 int prewiden;
526d0096
PM
6399 /* undefreq: bit 0 : UNDEF if size == 0
6400 * bit 1 : UNDEF if size == 1
6401 * bit 2 : UNDEF if size == 2
6402 * bit 3 : UNDEF if U == 1
6403 * Note that [2:0] set implies 'always UNDEF'
695272dc
PM
6404 */
6405 int undefreq;
6406 /* prewiden, src1_wide, src2_wide, undefreq */
6407 static const int neon_3reg_wide[16][4] = {
6408 {1, 0, 0, 0}, /* VADDL */
6409 {1, 1, 0, 0}, /* VADDW */
6410 {1, 0, 0, 0}, /* VSUBL */
6411 {1, 1, 0, 0}, /* VSUBW */
6412 {0, 1, 1, 0}, /* VADDHN */
6413 {0, 0, 0, 0}, /* VABAL */
6414 {0, 1, 1, 0}, /* VSUBHN */
6415 {0, 0, 0, 0}, /* VABDL */
6416 {0, 0, 0, 0}, /* VMLAL */
526d0096 6417 {0, 0, 0, 9}, /* VQDMLAL */
695272dc 6418 {0, 0, 0, 0}, /* VMLSL */
526d0096 6419 {0, 0, 0, 9}, /* VQDMLSL */
695272dc 6420 {0, 0, 0, 0}, /* Integer VMULL */
526d0096 6421 {0, 0, 0, 1}, /* VQDMULL */
4e624eda 6422 {0, 0, 0, 0xa}, /* Polynomial VMULL */
526d0096 6423 {0, 0, 0, 7}, /* Reserved: always UNDEF */
9ee6e8bb
PB
6424 };
6425
6426 prewiden = neon_3reg_wide[op][0];
6427 src1_wide = neon_3reg_wide[op][1];
6428 src2_wide = neon_3reg_wide[op][2];
695272dc 6429 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 6430
526d0096
PM
6431 if ((undefreq & (1 << size)) ||
6432 ((undefreq & 8) && u)) {
695272dc
PM
6433 return 1;
6434 }
6435 if ((src1_wide && (rn & 1)) ||
6436 (src2_wide && (rm & 1)) ||
6437 (!src2_wide && (rd & 1))) {
ad69471c 6438 return 1;
695272dc 6439 }
ad69471c 6440
4e624eda
PM
6441 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6442 * outside the loop below as it only performs a single pass.
6443 */
6444 if (op == 14 && size == 2) {
6445 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6446
d614a513 6447 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
4e624eda
PM
6448 return 1;
6449 }
6450 tcg_rn = tcg_temp_new_i64();
6451 tcg_rm = tcg_temp_new_i64();
6452 tcg_rd = tcg_temp_new_i64();
6453 neon_load_reg64(tcg_rn, rn);
6454 neon_load_reg64(tcg_rm, rm);
6455 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6456 neon_store_reg64(tcg_rd, rd);
6457 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6458 neon_store_reg64(tcg_rd, rd + 1);
6459 tcg_temp_free_i64(tcg_rn);
6460 tcg_temp_free_i64(tcg_rm);
6461 tcg_temp_free_i64(tcg_rd);
6462 return 0;
6463 }
6464
9ee6e8bb
PB
6465 /* Avoid overlapping operands. Wide source operands are
6466 always aligned so will never overlap with wide
6467 destinations in problematic ways. */
8f8e3aa4 6468 if (rd == rm && !src2_wide) {
dd8fbd78
FN
6469 tmp = neon_load_reg(rm, 1);
6470 neon_store_scratch(2, tmp);
8f8e3aa4 6471 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
6472 tmp = neon_load_reg(rn, 1);
6473 neon_store_scratch(2, tmp);
9ee6e8bb 6474 }
39d5492a 6475 TCGV_UNUSED_I32(tmp3);
9ee6e8bb 6476 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6477 if (src1_wide) {
6478 neon_load_reg64(cpu_V0, rn + pass);
39d5492a 6479 TCGV_UNUSED_I32(tmp);
9ee6e8bb 6480 } else {
ad69471c 6481 if (pass == 1 && rd == rn) {
dd8fbd78 6482 tmp = neon_load_scratch(2);
9ee6e8bb 6483 } else {
ad69471c
PB
6484 tmp = neon_load_reg(rn, pass);
6485 }
6486 if (prewiden) {
6487 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
6488 }
6489 }
ad69471c
PB
6490 if (src2_wide) {
6491 neon_load_reg64(cpu_V1, rm + pass);
39d5492a 6492 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 6493 } else {
ad69471c 6494 if (pass == 1 && rd == rm) {
dd8fbd78 6495 tmp2 = neon_load_scratch(2);
9ee6e8bb 6496 } else {
ad69471c
PB
6497 tmp2 = neon_load_reg(rm, pass);
6498 }
6499 if (prewiden) {
6500 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 6501 }
9ee6e8bb
PB
6502 }
6503 switch (op) {
6504 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 6505 gen_neon_addl(size);
9ee6e8bb 6506 break;
79b0e534 6507 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 6508 gen_neon_subl(size);
9ee6e8bb
PB
6509 break;
6510 case 5: case 7: /* VABAL, VABDL */
6511 switch ((size << 1) | u) {
ad69471c
PB
6512 case 0:
6513 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6514 break;
6515 case 1:
6516 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6517 break;
6518 case 2:
6519 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6520 break;
6521 case 3:
6522 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6523 break;
6524 case 4:
6525 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6526 break;
6527 case 5:
6528 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6529 break;
9ee6e8bb
PB
6530 default: abort();
6531 }
7d1b0095
PM
6532 tcg_temp_free_i32(tmp2);
6533 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6534 break;
6535 case 8: case 9: case 10: case 11: case 12: case 13:
6536 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 6537 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
6538 break;
6539 case 14: /* Polynomial VMULL */
e5ca24cb 6540 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
6541 tcg_temp_free_i32(tmp2);
6542 tcg_temp_free_i32(tmp);
e5ca24cb 6543 break;
695272dc
PM
6544 default: /* 15 is RESERVED: caught earlier */
6545 abort();
9ee6e8bb 6546 }
ebcd88ce
PM
6547 if (op == 13) {
6548 /* VQDMULL */
6549 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6550 neon_store_reg64(cpu_V0, rd + pass);
6551 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 6552 /* Accumulate. */
ebcd88ce 6553 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6554 switch (op) {
4dc064e6
PM
6555 case 10: /* VMLSL */
6556 gen_neon_negl(cpu_V0, size);
6557 /* Fall through */
6558 case 5: case 8: /* VABAL, VMLAL */
ad69471c 6559 gen_neon_addl(size);
9ee6e8bb
PB
6560 break;
6561 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 6562 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6563 if (op == 11) {
6564 gen_neon_negl(cpu_V0, size);
6565 }
ad69471c
PB
6566 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6567 break;
9ee6e8bb
PB
6568 default:
6569 abort();
6570 }
ad69471c 6571 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6572 } else if (op == 4 || op == 6) {
6573 /* Narrowing operation. */
7d1b0095 6574 tmp = tcg_temp_new_i32();
79b0e534 6575 if (!u) {
9ee6e8bb 6576 switch (size) {
ad69471c
PB
6577 case 0:
6578 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6579 break;
6580 case 1:
6581 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6582 break;
6583 case 2:
6584 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6585 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6586 break;
9ee6e8bb
PB
6587 default: abort();
6588 }
6589 } else {
6590 switch (size) {
ad69471c
PB
6591 case 0:
6592 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6593 break;
6594 case 1:
6595 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6596 break;
6597 case 2:
6598 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6599 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6600 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6601 break;
9ee6e8bb
PB
6602 default: abort();
6603 }
6604 }
ad69471c
PB
6605 if (pass == 0) {
6606 tmp3 = tmp;
6607 } else {
6608 neon_store_reg(rd, 0, tmp3);
6609 neon_store_reg(rd, 1, tmp);
6610 }
9ee6e8bb
PB
6611 } else {
6612 /* Write back the result. */
ad69471c 6613 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6614 }
6615 }
6616 } else {
3e3326df
PM
6617 /* Two registers and a scalar. NB that for ops of this form
6618 * the ARM ARM labels bit 24 as Q, but it is in our variable
6619 * 'u', not 'q'.
6620 */
6621 if (size == 0) {
6622 return 1;
6623 }
9ee6e8bb 6624 switch (op) {
9ee6e8bb 6625 case 1: /* Float VMLA scalar */
9ee6e8bb 6626 case 5: /* Floating point VMLS scalar */
9ee6e8bb 6627 case 9: /* Floating point VMUL scalar */
3e3326df
PM
6628 if (size == 1) {
6629 return 1;
6630 }
6631 /* fall through */
6632 case 0: /* Integer VMLA scalar */
6633 case 4: /* Integer VMLS scalar */
6634 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
6635 case 12: /* VQDMULH scalar */
6636 case 13: /* VQRDMULH scalar */
3e3326df
PM
6637 if (u && ((rd | rn) & 1)) {
6638 return 1;
6639 }
dd8fbd78
FN
6640 tmp = neon_get_scalar(size, rm);
6641 neon_store_scratch(0, tmp);
9ee6e8bb 6642 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
6643 tmp = neon_load_scratch(0);
6644 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
6645 if (op == 12) {
6646 if (size == 1) {
02da0b2d 6647 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6648 } else {
02da0b2d 6649 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6650 }
6651 } else if (op == 13) {
6652 if (size == 1) {
02da0b2d 6653 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6654 } else {
02da0b2d 6655 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6656 }
6657 } else if (op & 1) {
aa47cfdd
PM
6658 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6659 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6660 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
6661 } else {
6662 switch (size) {
dd8fbd78
FN
6663 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6664 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6665 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 6666 default: abort();
9ee6e8bb
PB
6667 }
6668 }
7d1b0095 6669 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6670 if (op < 8) {
6671 /* Accumulate. */
dd8fbd78 6672 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
6673 switch (op) {
6674 case 0:
dd8fbd78 6675 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6676 break;
6677 case 1:
aa47cfdd
PM
6678 {
6679 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6680 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6681 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6682 break;
aa47cfdd 6683 }
9ee6e8bb 6684 case 4:
dd8fbd78 6685 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
6686 break;
6687 case 5:
aa47cfdd
PM
6688 {
6689 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6690 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6691 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6692 break;
aa47cfdd 6693 }
9ee6e8bb
PB
6694 default:
6695 abort();
6696 }
7d1b0095 6697 tcg_temp_free_i32(tmp2);
9ee6e8bb 6698 }
dd8fbd78 6699 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6700 }
6701 break;
9ee6e8bb 6702 case 3: /* VQDMLAL scalar */
9ee6e8bb 6703 case 7: /* VQDMLSL scalar */
9ee6e8bb 6704 case 11: /* VQDMULL scalar */
3e3326df 6705 if (u == 1) {
ad69471c 6706 return 1;
3e3326df
PM
6707 }
6708 /* fall through */
6709 case 2: /* VMLAL sclar */
6710 case 6: /* VMLSL scalar */
6711 case 10: /* VMULL scalar */
6712 if (rd & 1) {
6713 return 1;
6714 }
dd8fbd78 6715 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
6716 /* We need a copy of tmp2 because gen_neon_mull
6717 * deletes it during pass 0. */
7d1b0095 6718 tmp4 = tcg_temp_new_i32();
c6067f04 6719 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 6720 tmp3 = neon_load_reg(rn, 1);
ad69471c 6721
9ee6e8bb 6722 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6723 if (pass == 0) {
6724 tmp = neon_load_reg(rn, 0);
9ee6e8bb 6725 } else {
dd8fbd78 6726 tmp = tmp3;
c6067f04 6727 tmp2 = tmp4;
9ee6e8bb 6728 }
ad69471c 6729 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
6730 if (op != 11) {
6731 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6732 }
9ee6e8bb 6733 switch (op) {
4dc064e6
PM
6734 case 6:
6735 gen_neon_negl(cpu_V0, size);
6736 /* Fall through */
6737 case 2:
ad69471c 6738 gen_neon_addl(size);
9ee6e8bb
PB
6739 break;
6740 case 3: case 7:
ad69471c 6741 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6742 if (op == 7) {
6743 gen_neon_negl(cpu_V0, size);
6744 }
ad69471c 6745 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
6746 break;
6747 case 10:
6748 /* no-op */
6749 break;
6750 case 11:
ad69471c 6751 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
6752 break;
6753 default:
6754 abort();
6755 }
ad69471c 6756 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6757 }
dd8fbd78 6758
dd8fbd78 6759
9ee6e8bb
PB
6760 break;
6761 default: /* 14 and 15 are RESERVED */
6762 return 1;
6763 }
6764 }
6765 } else { /* size == 3 */
6766 if (!u) {
6767 /* Extract. */
9ee6e8bb 6768 imm = (insn >> 8) & 0xf;
ad69471c
PB
6769
6770 if (imm > 7 && !q)
6771 return 1;
6772
52579ea1
PM
6773 if (q && ((rd | rn | rm) & 1)) {
6774 return 1;
6775 }
6776
ad69471c
PB
6777 if (imm == 0) {
6778 neon_load_reg64(cpu_V0, rn);
6779 if (q) {
6780 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 6781 }
ad69471c
PB
6782 } else if (imm == 8) {
6783 neon_load_reg64(cpu_V0, rn + 1);
6784 if (q) {
6785 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6786 }
ad69471c 6787 } else if (q) {
a7812ae4 6788 tmp64 = tcg_temp_new_i64();
ad69471c
PB
6789 if (imm < 8) {
6790 neon_load_reg64(cpu_V0, rn);
a7812ae4 6791 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
6792 } else {
6793 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 6794 neon_load_reg64(tmp64, rm);
ad69471c
PB
6795 }
6796 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 6797 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
6798 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6799 if (imm < 8) {
6800 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6801 } else {
ad69471c
PB
6802 neon_load_reg64(cpu_V1, rm + 1);
6803 imm -= 8;
9ee6e8bb 6804 }
ad69471c 6805 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
6806 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6807 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 6808 tcg_temp_free_i64(tmp64);
ad69471c 6809 } else {
a7812ae4 6810 /* BUGFIX */
ad69471c 6811 neon_load_reg64(cpu_V0, rn);
a7812ae4 6812 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 6813 neon_load_reg64(cpu_V1, rm);
a7812ae4 6814 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
6815 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6816 }
6817 neon_store_reg64(cpu_V0, rd);
6818 if (q) {
6819 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
6820 }
6821 } else if ((insn & (1 << 11)) == 0) {
6822 /* Two register misc. */
6823 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6824 size = (insn >> 18) & 3;
600b828c
PM
6825 /* UNDEF for unknown op values and bad op-size combinations */
6826 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6827 return 1;
6828 }
fe8fcf3d
PM
6829 if (neon_2rm_is_v8_op(op) &&
6830 !arm_dc_feature(s, ARM_FEATURE_V8)) {
6831 return 1;
6832 }
fc2a9b37
PM
6833 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6834 q && ((rm | rd) & 1)) {
6835 return 1;
6836 }
9ee6e8bb 6837 switch (op) {
600b828c 6838 case NEON_2RM_VREV64:
9ee6e8bb 6839 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
6840 tmp = neon_load_reg(rm, pass * 2);
6841 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 6842 switch (size) {
dd8fbd78
FN
6843 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6844 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
6845 case 2: /* no-op */ break;
6846 default: abort();
6847 }
dd8fbd78 6848 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 6849 if (size == 2) {
dd8fbd78 6850 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 6851 } else {
9ee6e8bb 6852 switch (size) {
dd8fbd78
FN
6853 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6854 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
6855 default: abort();
6856 }
dd8fbd78 6857 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
6858 }
6859 }
6860 break;
600b828c
PM
6861 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6862 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
6863 for (pass = 0; pass < q + 1; pass++) {
6864 tmp = neon_load_reg(rm, pass * 2);
6865 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6866 tmp = neon_load_reg(rm, pass * 2 + 1);
6867 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6868 switch (size) {
6869 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6870 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6871 case 2: tcg_gen_add_i64(CPU_V001); break;
6872 default: abort();
6873 }
600b828c 6874 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 6875 /* Accumulate. */
ad69471c
PB
6876 neon_load_reg64(cpu_V1, rd + pass);
6877 gen_neon_addl(size);
9ee6e8bb 6878 }
ad69471c 6879 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6880 }
6881 break;
600b828c 6882 case NEON_2RM_VTRN:
9ee6e8bb 6883 if (size == 2) {
a5a14945 6884 int n;
9ee6e8bb 6885 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
6886 tmp = neon_load_reg(rm, n);
6887 tmp2 = neon_load_reg(rd, n + 1);
6888 neon_store_reg(rm, n, tmp2);
6889 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
6890 }
6891 } else {
6892 goto elementwise;
6893 }
6894 break;
600b828c 6895 case NEON_2RM_VUZP:
02acedf9 6896 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 6897 return 1;
9ee6e8bb
PB
6898 }
6899 break;
600b828c 6900 case NEON_2RM_VZIP:
d68a6f3a 6901 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 6902 return 1;
9ee6e8bb
PB
6903 }
6904 break;
600b828c
PM
6905 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6906 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
6907 if (rm & 1) {
6908 return 1;
6909 }
39d5492a 6910 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 6911 for (pass = 0; pass < 2; pass++) {
ad69471c 6912 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 6913 tmp = tcg_temp_new_i32();
600b828c
PM
6914 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6915 tmp, cpu_V0);
ad69471c
PB
6916 if (pass == 0) {
6917 tmp2 = tmp;
6918 } else {
6919 neon_store_reg(rd, 0, tmp2);
6920 neon_store_reg(rd, 1, tmp);
9ee6e8bb 6921 }
9ee6e8bb
PB
6922 }
6923 break;
600b828c 6924 case NEON_2RM_VSHLL:
fc2a9b37 6925 if (q || (rd & 1)) {
9ee6e8bb 6926 return 1;
600b828c 6927 }
ad69471c
PB
6928 tmp = neon_load_reg(rm, 0);
6929 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6930 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6931 if (pass == 1)
6932 tmp = tmp2;
6933 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 6934 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 6935 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6936 }
6937 break;
600b828c 6938 case NEON_2RM_VCVT_F16_F32:
d614a513 6939 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
6940 q || (rm & 1)) {
6941 return 1;
6942 }
7d1b0095
PM
6943 tmp = tcg_temp_new_i32();
6944 tmp2 = tcg_temp_new_i32();
60011498 6945 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 6946 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 6947 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 6948 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6949 tcg_gen_shli_i32(tmp2, tmp2, 16);
6950 tcg_gen_or_i32(tmp2, tmp2, tmp);
6951 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 6952 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
6953 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
6954 neon_store_reg(rd, 0, tmp2);
7d1b0095 6955 tmp2 = tcg_temp_new_i32();
2d981da7 6956 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6957 tcg_gen_shli_i32(tmp2, tmp2, 16);
6958 tcg_gen_or_i32(tmp2, tmp2, tmp);
6959 neon_store_reg(rd, 1, tmp2);
7d1b0095 6960 tcg_temp_free_i32(tmp);
60011498 6961 break;
600b828c 6962 case NEON_2RM_VCVT_F32_F16:
d614a513 6963 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
6964 q || (rd & 1)) {
6965 return 1;
6966 }
7d1b0095 6967 tmp3 = tcg_temp_new_i32();
60011498
PB
6968 tmp = neon_load_reg(rm, 0);
6969 tmp2 = neon_load_reg(rm, 1);
6970 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 6971 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6972 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
6973 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 6974 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6975 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 6976 tcg_temp_free_i32(tmp);
60011498 6977 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 6978 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6979 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
6980 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 6981 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6982 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
6983 tcg_temp_free_i32(tmp2);
6984 tcg_temp_free_i32(tmp3);
60011498 6985 break;
9d935509 6986 case NEON_2RM_AESE: case NEON_2RM_AESMC:
d614a513 6987 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
9d935509
AB
6988 || ((rm | rd) & 1)) {
6989 return 1;
6990 }
6991 tmp = tcg_const_i32(rd);
6992 tmp2 = tcg_const_i32(rm);
6993
6994 /* Bit 6 is the lowest opcode bit; it distinguishes between
6995 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6996 */
6997 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
6998
6999 if (op == NEON_2RM_AESE) {
7000 gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
7001 } else {
7002 gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
7003 }
7004 tcg_temp_free_i32(tmp);
7005 tcg_temp_free_i32(tmp2);
7006 tcg_temp_free_i32(tmp3);
7007 break;
f1ecb913 7008 case NEON_2RM_SHA1H:
d614a513 7009 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
f1ecb913
AB
7010 || ((rm | rd) & 1)) {
7011 return 1;
7012 }
7013 tmp = tcg_const_i32(rd);
7014 tmp2 = tcg_const_i32(rm);
7015
7016 gen_helper_crypto_sha1h(cpu_env, tmp, tmp2);
7017
7018 tcg_temp_free_i32(tmp);
7019 tcg_temp_free_i32(tmp2);
7020 break;
7021 case NEON_2RM_SHA1SU1:
7022 if ((rm | rd) & 1) {
7023 return 1;
7024 }
7025 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
7026 if (q) {
d614a513 7027 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
f1ecb913
AB
7028 return 1;
7029 }
d614a513 7030 } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
7031 return 1;
7032 }
7033 tmp = tcg_const_i32(rd);
7034 tmp2 = tcg_const_i32(rm);
7035 if (q) {
7036 gen_helper_crypto_sha256su0(cpu_env, tmp, tmp2);
7037 } else {
7038 gen_helper_crypto_sha1su1(cpu_env, tmp, tmp2);
7039 }
7040 tcg_temp_free_i32(tmp);
7041 tcg_temp_free_i32(tmp2);
7042 break;
9ee6e8bb
PB
7043 default:
7044 elementwise:
7045 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 7046 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7047 tcg_gen_ld_f32(cpu_F0s, cpu_env,
7048 neon_reg_offset(rm, pass));
39d5492a 7049 TCGV_UNUSED_I32(tmp);
9ee6e8bb 7050 } else {
dd8fbd78 7051 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
7052 }
7053 switch (op) {
600b828c 7054 case NEON_2RM_VREV32:
9ee6e8bb 7055 switch (size) {
dd8fbd78
FN
7056 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7057 case 1: gen_swap_half(tmp); break;
600b828c 7058 default: abort();
9ee6e8bb
PB
7059 }
7060 break;
600b828c 7061 case NEON_2RM_VREV16:
dd8fbd78 7062 gen_rev16(tmp);
9ee6e8bb 7063 break;
600b828c 7064 case NEON_2RM_VCLS:
9ee6e8bb 7065 switch (size) {
dd8fbd78
FN
7066 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
7067 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
7068 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 7069 default: abort();
9ee6e8bb
PB
7070 }
7071 break;
600b828c 7072 case NEON_2RM_VCLZ:
9ee6e8bb 7073 switch (size) {
dd8fbd78
FN
7074 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
7075 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7076 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 7077 default: abort();
9ee6e8bb
PB
7078 }
7079 break;
600b828c 7080 case NEON_2RM_VCNT:
dd8fbd78 7081 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 7082 break;
600b828c 7083 case NEON_2RM_VMVN:
dd8fbd78 7084 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 7085 break;
600b828c 7086 case NEON_2RM_VQABS:
9ee6e8bb 7087 switch (size) {
02da0b2d
PM
7088 case 0:
7089 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
7090 break;
7091 case 1:
7092 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
7093 break;
7094 case 2:
7095 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
7096 break;
600b828c 7097 default: abort();
9ee6e8bb
PB
7098 }
7099 break;
600b828c 7100 case NEON_2RM_VQNEG:
9ee6e8bb 7101 switch (size) {
02da0b2d
PM
7102 case 0:
7103 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
7104 break;
7105 case 1:
7106 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
7107 break;
7108 case 2:
7109 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
7110 break;
600b828c 7111 default: abort();
9ee6e8bb
PB
7112 }
7113 break;
600b828c 7114 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 7115 tmp2 = tcg_const_i32(0);
9ee6e8bb 7116 switch(size) {
dd8fbd78
FN
7117 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
7118 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
7119 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 7120 default: abort();
9ee6e8bb 7121 }
39d5492a 7122 tcg_temp_free_i32(tmp2);
600b828c 7123 if (op == NEON_2RM_VCLE0) {
dd8fbd78 7124 tcg_gen_not_i32(tmp, tmp);
600b828c 7125 }
9ee6e8bb 7126 break;
600b828c 7127 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 7128 tmp2 = tcg_const_i32(0);
9ee6e8bb 7129 switch(size) {
dd8fbd78
FN
7130 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
7131 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
7132 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 7133 default: abort();
9ee6e8bb 7134 }
39d5492a 7135 tcg_temp_free_i32(tmp2);
600b828c 7136 if (op == NEON_2RM_VCLT0) {
dd8fbd78 7137 tcg_gen_not_i32(tmp, tmp);
600b828c 7138 }
9ee6e8bb 7139 break;
600b828c 7140 case NEON_2RM_VCEQ0:
dd8fbd78 7141 tmp2 = tcg_const_i32(0);
9ee6e8bb 7142 switch(size) {
dd8fbd78
FN
7143 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
7144 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
7145 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 7146 default: abort();
9ee6e8bb 7147 }
39d5492a 7148 tcg_temp_free_i32(tmp2);
9ee6e8bb 7149 break;
600b828c 7150 case NEON_2RM_VABS:
9ee6e8bb 7151 switch(size) {
dd8fbd78
FN
7152 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
7153 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
7154 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 7155 default: abort();
9ee6e8bb
PB
7156 }
7157 break;
600b828c 7158 case NEON_2RM_VNEG:
dd8fbd78
FN
7159 tmp2 = tcg_const_i32(0);
7160 gen_neon_rsb(size, tmp, tmp2);
39d5492a 7161 tcg_temp_free_i32(tmp2);
9ee6e8bb 7162 break;
600b828c 7163 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
7164 {
7165 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7166 tmp2 = tcg_const_i32(0);
aa47cfdd 7167 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7168 tcg_temp_free_i32(tmp2);
aa47cfdd 7169 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7170 break;
aa47cfdd 7171 }
600b828c 7172 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
7173 {
7174 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7175 tmp2 = tcg_const_i32(0);
aa47cfdd 7176 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7177 tcg_temp_free_i32(tmp2);
aa47cfdd 7178 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7179 break;
aa47cfdd 7180 }
600b828c 7181 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
7182 {
7183 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7184 tmp2 = tcg_const_i32(0);
aa47cfdd 7185 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7186 tcg_temp_free_i32(tmp2);
aa47cfdd 7187 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7188 break;
aa47cfdd 7189 }
600b828c 7190 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
7191 {
7192 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7193 tmp2 = tcg_const_i32(0);
aa47cfdd 7194 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7195 tcg_temp_free_i32(tmp2);
aa47cfdd 7196 tcg_temp_free_ptr(fpstatus);
0e326109 7197 break;
aa47cfdd 7198 }
600b828c 7199 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
7200 {
7201 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7202 tmp2 = tcg_const_i32(0);
aa47cfdd 7203 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7204 tcg_temp_free_i32(tmp2);
aa47cfdd 7205 tcg_temp_free_ptr(fpstatus);
0e326109 7206 break;
aa47cfdd 7207 }
600b828c 7208 case NEON_2RM_VABS_F:
4373f3ce 7209 gen_vfp_abs(0);
9ee6e8bb 7210 break;
600b828c 7211 case NEON_2RM_VNEG_F:
4373f3ce 7212 gen_vfp_neg(0);
9ee6e8bb 7213 break;
600b828c 7214 case NEON_2RM_VSWP:
dd8fbd78
FN
7215 tmp2 = neon_load_reg(rd, pass);
7216 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7217 break;
600b828c 7218 case NEON_2RM_VTRN:
dd8fbd78 7219 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 7220 switch (size) {
dd8fbd78
FN
7221 case 0: gen_neon_trn_u8(tmp, tmp2); break;
7222 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 7223 default: abort();
9ee6e8bb 7224 }
dd8fbd78 7225 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7226 break;
34f7b0a2
WN
7227 case NEON_2RM_VRINTN:
7228 case NEON_2RM_VRINTA:
7229 case NEON_2RM_VRINTM:
7230 case NEON_2RM_VRINTP:
7231 case NEON_2RM_VRINTZ:
7232 {
7233 TCGv_i32 tcg_rmode;
7234 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7235 int rmode;
7236
7237 if (op == NEON_2RM_VRINTZ) {
7238 rmode = FPROUNDING_ZERO;
7239 } else {
7240 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
7241 }
7242
7243 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7244 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7245 cpu_env);
7246 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
7247 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7248 cpu_env);
7249 tcg_temp_free_ptr(fpstatus);
7250 tcg_temp_free_i32(tcg_rmode);
7251 break;
7252 }
2ce70625
WN
7253 case NEON_2RM_VRINTX:
7254 {
7255 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7256 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
7257 tcg_temp_free_ptr(fpstatus);
7258 break;
7259 }
901ad525
WN
7260 case NEON_2RM_VCVTAU:
7261 case NEON_2RM_VCVTAS:
7262 case NEON_2RM_VCVTNU:
7263 case NEON_2RM_VCVTNS:
7264 case NEON_2RM_VCVTPU:
7265 case NEON_2RM_VCVTPS:
7266 case NEON_2RM_VCVTMU:
7267 case NEON_2RM_VCVTMS:
7268 {
7269 bool is_signed = !extract32(insn, 7, 1);
7270 TCGv_ptr fpst = get_fpstatus_ptr(1);
7271 TCGv_i32 tcg_rmode, tcg_shift;
7272 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
7273
7274 tcg_shift = tcg_const_i32(0);
7275 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7276 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7277 cpu_env);
7278
7279 if (is_signed) {
7280 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
7281 tcg_shift, fpst);
7282 } else {
7283 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
7284 tcg_shift, fpst);
7285 }
7286
7287 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7288 cpu_env);
7289 tcg_temp_free_i32(tcg_rmode);
7290 tcg_temp_free_i32(tcg_shift);
7291 tcg_temp_free_ptr(fpst);
7292 break;
7293 }
600b828c 7294 case NEON_2RM_VRECPE:
b6d4443a
AB
7295 {
7296 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7297 gen_helper_recpe_u32(tmp, tmp, fpstatus);
7298 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7299 break;
b6d4443a 7300 }
600b828c 7301 case NEON_2RM_VRSQRTE:
c2fb418e
AB
7302 {
7303 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7304 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
7305 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7306 break;
c2fb418e 7307 }
600b828c 7308 case NEON_2RM_VRECPE_F:
b6d4443a
AB
7309 {
7310 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7311 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
7312 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7313 break;
b6d4443a 7314 }
600b828c 7315 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
7316 {
7317 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7318 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
7319 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7320 break;
c2fb418e 7321 }
600b828c 7322 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 7323 gen_vfp_sito(0, 1);
9ee6e8bb 7324 break;
600b828c 7325 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 7326 gen_vfp_uito(0, 1);
9ee6e8bb 7327 break;
600b828c 7328 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 7329 gen_vfp_tosiz(0, 1);
9ee6e8bb 7330 break;
600b828c 7331 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 7332 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
7333 break;
7334 default:
600b828c
PM
7335 /* Reserved op values were caught by the
7336 * neon_2rm_sizes[] check earlier.
7337 */
7338 abort();
9ee6e8bb 7339 }
600b828c 7340 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7341 tcg_gen_st_f32(cpu_F0s, cpu_env,
7342 neon_reg_offset(rd, pass));
9ee6e8bb 7343 } else {
dd8fbd78 7344 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
7345 }
7346 }
7347 break;
7348 }
7349 } else if ((insn & (1 << 10)) == 0) {
7350 /* VTBL, VTBX. */
56907d77
PM
7351 int n = ((insn >> 8) & 3) + 1;
7352 if ((rn + n) > 32) {
7353 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7354 * helper function running off the end of the register file.
7355 */
7356 return 1;
7357 }
7358 n <<= 3;
9ee6e8bb 7359 if (insn & (1 << 6)) {
8f8e3aa4 7360 tmp = neon_load_reg(rd, 0);
9ee6e8bb 7361 } else {
7d1b0095 7362 tmp = tcg_temp_new_i32();
8f8e3aa4 7363 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7364 }
8f8e3aa4 7365 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
7366 tmp4 = tcg_const_i32(rn);
7367 tmp5 = tcg_const_i32(n);
9ef39277 7368 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7d1b0095 7369 tcg_temp_free_i32(tmp);
9ee6e8bb 7370 if (insn & (1 << 6)) {
8f8e3aa4 7371 tmp = neon_load_reg(rd, 1);
9ee6e8bb 7372 } else {
7d1b0095 7373 tmp = tcg_temp_new_i32();
8f8e3aa4 7374 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7375 }
8f8e3aa4 7376 tmp3 = neon_load_reg(rm, 1);
9ef39277 7377 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
7378 tcg_temp_free_i32(tmp5);
7379 tcg_temp_free_i32(tmp4);
8f8e3aa4 7380 neon_store_reg(rd, 0, tmp2);
3018f259 7381 neon_store_reg(rd, 1, tmp3);
7d1b0095 7382 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7383 } else if ((insn & 0x380) == 0) {
7384 /* VDUP */
133da6aa
JR
7385 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
7386 return 1;
7387 }
9ee6e8bb 7388 if (insn & (1 << 19)) {
dd8fbd78 7389 tmp = neon_load_reg(rm, 1);
9ee6e8bb 7390 } else {
dd8fbd78 7391 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
7392 }
7393 if (insn & (1 << 16)) {
dd8fbd78 7394 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
7395 } else if (insn & (1 << 17)) {
7396 if ((insn >> 18) & 1)
dd8fbd78 7397 gen_neon_dup_high16(tmp);
9ee6e8bb 7398 else
dd8fbd78 7399 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
7400 }
7401 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 7402 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
7403 tcg_gen_mov_i32(tmp2, tmp);
7404 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 7405 }
7d1b0095 7406 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7407 } else {
7408 return 1;
7409 }
7410 }
7411 }
7412 return 0;
7413}
7414
7dcc1f89 7415static int disas_coproc_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 7416{
4b6a83fb
PM
7417 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7418 const ARMCPRegInfo *ri;
9ee6e8bb
PB
7419
7420 cpnum = (insn >> 8) & 0xf;
c0f4af17
PM
7421
7422 /* First check for coprocessor space used for XScale/iwMMXt insns */
d614a513 7423 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
c0f4af17
PM
7424 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7425 return 1;
7426 }
d614a513 7427 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7dcc1f89 7428 return disas_iwmmxt_insn(s, insn);
d614a513 7429 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7dcc1f89 7430 return disas_dsp_insn(s, insn);
c0f4af17
PM
7431 }
7432 return 1;
4b6a83fb
PM
7433 }
7434
7435 /* Otherwise treat as a generic register access */
7436 is64 = (insn & (1 << 25)) == 0;
7437 if (!is64 && ((insn & (1 << 4)) == 0)) {
7438 /* cdp */
7439 return 1;
7440 }
7441
7442 crm = insn & 0xf;
7443 if (is64) {
7444 crn = 0;
7445 opc1 = (insn >> 4) & 0xf;
7446 opc2 = 0;
7447 rt2 = (insn >> 16) & 0xf;
7448 } else {
7449 crn = (insn >> 16) & 0xf;
7450 opc1 = (insn >> 21) & 7;
7451 opc2 = (insn >> 5) & 7;
7452 rt2 = 0;
7453 }
7454 isread = (insn >> 20) & 1;
7455 rt = (insn >> 12) & 0xf;
7456
60322b39 7457 ri = get_arm_cp_reginfo(s->cp_regs,
51a79b03 7458 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
4b6a83fb
PM
7459 if (ri) {
7460 /* Check access permissions */
dcbff19b 7461 if (!cp_access_ok(s->current_el, ri, isread)) {
4b6a83fb
PM
7462 return 1;
7463 }
7464
c0f4af17 7465 if (ri->accessfn ||
d614a513 7466 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
f59df3f2
PM
7467 /* Emit code to perform further access permissions checks at
7468 * runtime; this may result in an exception.
c0f4af17
PM
7469 * Note that on XScale all cp0..c13 registers do an access check
7470 * call in order to handle c15_cpar.
f59df3f2
PM
7471 */
7472 TCGv_ptr tmpptr;
3f208fd7 7473 TCGv_i32 tcg_syn, tcg_isread;
8bcbf37c
PM
7474 uint32_t syndrome;
7475
7476 /* Note that since we are an implementation which takes an
7477 * exception on a trapped conditional instruction only if the
7478 * instruction passes its condition code check, we can take
7479 * advantage of the clause in the ARM ARM that allows us to set
7480 * the COND field in the instruction to 0xE in all cases.
7481 * We could fish the actual condition out of the insn (ARM)
7482 * or the condexec bits (Thumb) but it isn't necessary.
7483 */
7484 switch (cpnum) {
7485 case 14:
7486 if (is64) {
7487 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7488 isread, false);
8bcbf37c
PM
7489 } else {
7490 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7491 rt, isread, false);
8bcbf37c
PM
7492 }
7493 break;
7494 case 15:
7495 if (is64) {
7496 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7497 isread, false);
8bcbf37c
PM
7498 } else {
7499 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7500 rt, isread, false);
8bcbf37c
PM
7501 }
7502 break;
7503 default:
7504 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7505 * so this can only happen if this is an ARMv7 or earlier CPU,
7506 * in which case the syndrome information won't actually be
7507 * guest visible.
7508 */
d614a513 7509 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
8bcbf37c
PM
7510 syndrome = syn_uncategorized();
7511 break;
7512 }
7513
43bfa4a1 7514 gen_set_condexec(s);
3977ee5d 7515 gen_set_pc_im(s, s->pc - 4);
f59df3f2 7516 tmpptr = tcg_const_ptr(ri);
8bcbf37c 7517 tcg_syn = tcg_const_i32(syndrome);
3f208fd7
PM
7518 tcg_isread = tcg_const_i32(isread);
7519 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
7520 tcg_isread);
f59df3f2 7521 tcg_temp_free_ptr(tmpptr);
8bcbf37c 7522 tcg_temp_free_i32(tcg_syn);
3f208fd7 7523 tcg_temp_free_i32(tcg_isread);
f59df3f2
PM
7524 }
7525
4b6a83fb
PM
7526 /* Handle special cases first */
7527 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7528 case ARM_CP_NOP:
7529 return 0;
7530 case ARM_CP_WFI:
7531 if (isread) {
7532 return 1;
7533 }
eaed129d 7534 gen_set_pc_im(s, s->pc);
4b6a83fb 7535 s->is_jmp = DISAS_WFI;
2bee5105 7536 return 0;
4b6a83fb
PM
7537 default:
7538 break;
7539 }
7540
bd79255d 7541 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7542 gen_io_start();
7543 }
7544
4b6a83fb
PM
7545 if (isread) {
7546 /* Read */
7547 if (is64) {
7548 TCGv_i64 tmp64;
7549 TCGv_i32 tmp;
7550 if (ri->type & ARM_CP_CONST) {
7551 tmp64 = tcg_const_i64(ri->resetvalue);
7552 } else if (ri->readfn) {
7553 TCGv_ptr tmpptr;
4b6a83fb
PM
7554 tmp64 = tcg_temp_new_i64();
7555 tmpptr = tcg_const_ptr(ri);
7556 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7557 tcg_temp_free_ptr(tmpptr);
7558 } else {
7559 tmp64 = tcg_temp_new_i64();
7560 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7561 }
7562 tmp = tcg_temp_new_i32();
ecc7b3aa 7563 tcg_gen_extrl_i64_i32(tmp, tmp64);
4b6a83fb
PM
7564 store_reg(s, rt, tmp);
7565 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 7566 tmp = tcg_temp_new_i32();
ecc7b3aa 7567 tcg_gen_extrl_i64_i32(tmp, tmp64);
ed336850 7568 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
7569 store_reg(s, rt2, tmp);
7570 } else {
39d5492a 7571 TCGv_i32 tmp;
4b6a83fb
PM
7572 if (ri->type & ARM_CP_CONST) {
7573 tmp = tcg_const_i32(ri->resetvalue);
7574 } else if (ri->readfn) {
7575 TCGv_ptr tmpptr;
4b6a83fb
PM
7576 tmp = tcg_temp_new_i32();
7577 tmpptr = tcg_const_ptr(ri);
7578 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7579 tcg_temp_free_ptr(tmpptr);
7580 } else {
7581 tmp = load_cpu_offset(ri->fieldoffset);
7582 }
7583 if (rt == 15) {
7584 /* Destination register of r15 for 32 bit loads sets
7585 * the condition codes from the high 4 bits of the value
7586 */
7587 gen_set_nzcv(tmp);
7588 tcg_temp_free_i32(tmp);
7589 } else {
7590 store_reg(s, rt, tmp);
7591 }
7592 }
7593 } else {
7594 /* Write */
7595 if (ri->type & ARM_CP_CONST) {
7596 /* If not forbidden by access permissions, treat as WI */
7597 return 0;
7598 }
7599
7600 if (is64) {
39d5492a 7601 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
7602 TCGv_i64 tmp64 = tcg_temp_new_i64();
7603 tmplo = load_reg(s, rt);
7604 tmphi = load_reg(s, rt2);
7605 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7606 tcg_temp_free_i32(tmplo);
7607 tcg_temp_free_i32(tmphi);
7608 if (ri->writefn) {
7609 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
7610 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7611 tcg_temp_free_ptr(tmpptr);
7612 } else {
7613 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7614 }
7615 tcg_temp_free_i64(tmp64);
7616 } else {
7617 if (ri->writefn) {
39d5492a 7618 TCGv_i32 tmp;
4b6a83fb 7619 TCGv_ptr tmpptr;
4b6a83fb
PM
7620 tmp = load_reg(s, rt);
7621 tmpptr = tcg_const_ptr(ri);
7622 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7623 tcg_temp_free_ptr(tmpptr);
7624 tcg_temp_free_i32(tmp);
7625 } else {
39d5492a 7626 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
7627 store_cpu_offset(tmp, ri->fieldoffset);
7628 }
7629 }
2452731c
PM
7630 }
7631
bd79255d 7632 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7633 /* I/O operations must end the TB here (whether read or write) */
7634 gen_io_end();
7635 gen_lookup_tb(s);
7636 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
7637 /* We default to ending the TB on a coprocessor register write,
7638 * but allow this to be suppressed by the register definition
7639 * (usually only necessary to work around guest bugs).
7640 */
2452731c 7641 gen_lookup_tb(s);
4b6a83fb 7642 }
2452731c 7643
4b6a83fb
PM
7644 return 0;
7645 }
7646
626187d8
PM
7647 /* Unknown register; this might be a guest error or a QEMU
7648 * unimplemented feature.
7649 */
7650 if (is64) {
7651 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7652 "64 bit system register cp:%d opc1: %d crm:%d "
7653 "(%s)\n",
7654 isread ? "read" : "write", cpnum, opc1, crm,
7655 s->ns ? "non-secure" : "secure");
626187d8
PM
7656 } else {
7657 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7658 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7659 "(%s)\n",
7660 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
7661 s->ns ? "non-secure" : "secure");
626187d8
PM
7662 }
7663
4a9a539f 7664 return 1;
9ee6e8bb
PB
7665}
7666
5e3f878a
PB
7667
7668/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 7669static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 7670{
39d5492a 7671 TCGv_i32 tmp;
7d1b0095 7672 tmp = tcg_temp_new_i32();
ecc7b3aa 7673 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a 7674 store_reg(s, rlow, tmp);
7d1b0095 7675 tmp = tcg_temp_new_i32();
5e3f878a 7676 tcg_gen_shri_i64(val, val, 32);
ecc7b3aa 7677 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a
PB
7678 store_reg(s, rhigh, tmp);
7679}
7680
7681/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 7682static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 7683{
a7812ae4 7684 TCGv_i64 tmp;
39d5492a 7685 TCGv_i32 tmp2;
5e3f878a 7686
36aa55dc 7687 /* Load value and extend to 64 bits. */
a7812ae4 7688 tmp = tcg_temp_new_i64();
5e3f878a
PB
7689 tmp2 = load_reg(s, rlow);
7690 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 7691 tcg_temp_free_i32(tmp2);
5e3f878a 7692 tcg_gen_add_i64(val, val, tmp);
b75263d6 7693 tcg_temp_free_i64(tmp);
5e3f878a
PB
7694}
7695
7696/* load and add a 64-bit value from a register pair. */
a7812ae4 7697static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 7698{
a7812ae4 7699 TCGv_i64 tmp;
39d5492a
PM
7700 TCGv_i32 tmpl;
7701 TCGv_i32 tmph;
5e3f878a
PB
7702
7703 /* Load 64-bit value rd:rn. */
36aa55dc
PB
7704 tmpl = load_reg(s, rlow);
7705 tmph = load_reg(s, rhigh);
a7812ae4 7706 tmp = tcg_temp_new_i64();
36aa55dc 7707 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
7708 tcg_temp_free_i32(tmpl);
7709 tcg_temp_free_i32(tmph);
5e3f878a 7710 tcg_gen_add_i64(val, val, tmp);
b75263d6 7711 tcg_temp_free_i64(tmp);
5e3f878a
PB
7712}
7713
c9f10124 7714/* Set N and Z flags from hi|lo. */
39d5492a 7715static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 7716{
c9f10124
RH
7717 tcg_gen_mov_i32(cpu_NF, hi);
7718 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
7719}
7720
426f5abc
PB
7721/* Load/Store exclusive instructions are implemented by remembering
7722 the value/address loaded, and seeing if these are the same
b90372ad 7723 when the store is performed. This should be sufficient to implement
426f5abc
PB
7724 the architecturally mandated semantics, and avoids having to monitor
7725 regular stores.
7726
7727 In system emulation mode only one CPU will be running at once, so
7728 this sequence is effectively atomic. In user emulation mode we
7729 throw an exception and handle the atomic operation elsewhere. */
7730static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 7731 TCGv_i32 addr, int size)
426f5abc 7732{
94ee24e7 7733 TCGv_i32 tmp = tcg_temp_new_i32();
426f5abc 7734
50225ad0
PM
7735 s->is_ldex = true;
7736
426f5abc
PB
7737 switch (size) {
7738 case 0:
12dcc321 7739 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7740 break;
7741 case 1:
12dcc321 7742 gen_aa32_ld16ua(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7743 break;
7744 case 2:
7745 case 3:
12dcc321 7746 gen_aa32_ld32ua(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7747 break;
7748 default:
7749 abort();
7750 }
03d05e2d 7751
426f5abc 7752 if (size == 3) {
39d5492a 7753 TCGv_i32 tmp2 = tcg_temp_new_i32();
03d05e2d
PM
7754 TCGv_i32 tmp3 = tcg_temp_new_i32();
7755
2c9adbda 7756 tcg_gen_addi_i32(tmp2, addr, 4);
12dcc321 7757 gen_aa32_ld32u(s, tmp3, tmp2, get_mem_index(s));
7d1b0095 7758 tcg_temp_free_i32(tmp2);
03d05e2d
PM
7759 tcg_gen_concat_i32_i64(cpu_exclusive_val, tmp, tmp3);
7760 store_reg(s, rt2, tmp3);
7761 } else {
7762 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 7763 }
03d05e2d
PM
7764
7765 store_reg(s, rt, tmp);
7766 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
7767}
7768
7769static void gen_clrex(DisasContext *s)
7770{
03d05e2d 7771 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7772}
7773
7774#ifdef CONFIG_USER_ONLY
7775static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7776 TCGv_i32 addr, int size)
426f5abc 7777{
03d05e2d 7778 tcg_gen_extu_i32_i64(cpu_exclusive_test, addr);
426f5abc
PB
7779 tcg_gen_movi_i32(cpu_exclusive_info,
7780 size | (rd << 4) | (rt << 8) | (rt2 << 12));
d4a2dc67 7781 gen_exception_internal_insn(s, 4, EXCP_STREX);
426f5abc
PB
7782}
7783#else
7784static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7785 TCGv_i32 addr, int size)
426f5abc 7786{
39d5492a 7787 TCGv_i32 tmp;
03d05e2d 7788 TCGv_i64 val64, extaddr;
42a268c2
RH
7789 TCGLabel *done_label;
7790 TCGLabel *fail_label;
426f5abc
PB
7791
7792 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7793 [addr] = {Rt};
7794 {Rd} = 0;
7795 } else {
7796 {Rd} = 1;
7797 } */
7798 fail_label = gen_new_label();
7799 done_label = gen_new_label();
03d05e2d
PM
7800 extaddr = tcg_temp_new_i64();
7801 tcg_gen_extu_i32_i64(extaddr, addr);
7802 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7803 tcg_temp_free_i64(extaddr);
7804
94ee24e7 7805 tmp = tcg_temp_new_i32();
426f5abc
PB
7806 switch (size) {
7807 case 0:
12dcc321 7808 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7809 break;
7810 case 1:
12dcc321 7811 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7812 break;
7813 case 2:
7814 case 3:
12dcc321 7815 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7816 break;
7817 default:
7818 abort();
7819 }
03d05e2d
PM
7820
7821 val64 = tcg_temp_new_i64();
426f5abc 7822 if (size == 3) {
39d5492a 7823 TCGv_i32 tmp2 = tcg_temp_new_i32();
03d05e2d 7824 TCGv_i32 tmp3 = tcg_temp_new_i32();
426f5abc 7825 tcg_gen_addi_i32(tmp2, addr, 4);
12dcc321 7826 gen_aa32_ld32u(s, tmp3, tmp2, get_mem_index(s));
7d1b0095 7827 tcg_temp_free_i32(tmp2);
03d05e2d
PM
7828 tcg_gen_concat_i32_i64(val64, tmp, tmp3);
7829 tcg_temp_free_i32(tmp3);
7830 } else {
7831 tcg_gen_extu_i32_i64(val64, tmp);
426f5abc 7832 }
03d05e2d
PM
7833 tcg_temp_free_i32(tmp);
7834
7835 tcg_gen_brcond_i64(TCG_COND_NE, val64, cpu_exclusive_val, fail_label);
7836 tcg_temp_free_i64(val64);
7837
426f5abc
PB
7838 tmp = load_reg(s, rt);
7839 switch (size) {
7840 case 0:
12dcc321 7841 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7842 break;
7843 case 1:
12dcc321 7844 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7845 break;
7846 case 2:
7847 case 3:
12dcc321 7848 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7849 break;
7850 default:
7851 abort();
7852 }
94ee24e7 7853 tcg_temp_free_i32(tmp);
426f5abc
PB
7854 if (size == 3) {
7855 tcg_gen_addi_i32(addr, addr, 4);
7856 tmp = load_reg(s, rt2);
12dcc321 7857 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
94ee24e7 7858 tcg_temp_free_i32(tmp);
426f5abc
PB
7859 }
7860 tcg_gen_movi_i32(cpu_R[rd], 0);
7861 tcg_gen_br(done_label);
7862 gen_set_label(fail_label);
7863 tcg_gen_movi_i32(cpu_R[rd], 1);
7864 gen_set_label(done_label);
03d05e2d 7865 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7866}
7867#endif
7868
81465888
PM
7869/* gen_srs:
7870 * @env: CPUARMState
7871 * @s: DisasContext
7872 * @mode: mode field from insn (which stack to store to)
7873 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7874 * @writeback: true if writeback bit set
7875 *
7876 * Generate code for the SRS (Store Return State) insn.
7877 */
7878static void gen_srs(DisasContext *s,
7879 uint32_t mode, uint32_t amode, bool writeback)
7880{
7881 int32_t offset;
cbc0326b
PM
7882 TCGv_i32 addr, tmp;
7883 bool undef = false;
7884
7885 /* SRS is:
7886 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
ba63cf47 7887 * and specified mode is monitor mode
cbc0326b
PM
7888 * - UNDEFINED in Hyp mode
7889 * - UNPREDICTABLE in User or System mode
7890 * - UNPREDICTABLE if the specified mode is:
7891 * -- not implemented
7892 * -- not a valid mode number
7893 * -- a mode that's at a higher exception level
7894 * -- Monitor, if we are Non-secure
f01377f5 7895 * For the UNPREDICTABLE cases we choose to UNDEF.
cbc0326b 7896 */
ba63cf47 7897 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
cbc0326b
PM
7898 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
7899 return;
7900 }
7901
7902 if (s->current_el == 0 || s->current_el == 2) {
7903 undef = true;
7904 }
7905
7906 switch (mode) {
7907 case ARM_CPU_MODE_USR:
7908 case ARM_CPU_MODE_FIQ:
7909 case ARM_CPU_MODE_IRQ:
7910 case ARM_CPU_MODE_SVC:
7911 case ARM_CPU_MODE_ABT:
7912 case ARM_CPU_MODE_UND:
7913 case ARM_CPU_MODE_SYS:
7914 break;
7915 case ARM_CPU_MODE_HYP:
7916 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
7917 undef = true;
7918 }
7919 break;
7920 case ARM_CPU_MODE_MON:
7921 /* No need to check specifically for "are we non-secure" because
7922 * we've already made EL0 UNDEF and handled the trap for S-EL1;
7923 * so if this isn't EL3 then we must be non-secure.
7924 */
7925 if (s->current_el != 3) {
7926 undef = true;
7927 }
7928 break;
7929 default:
7930 undef = true;
7931 }
7932
7933 if (undef) {
7934 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
7935 default_exception_el(s));
7936 return;
7937 }
7938
7939 addr = tcg_temp_new_i32();
7940 tmp = tcg_const_i32(mode);
f01377f5
PM
7941 /* get_r13_banked() will raise an exception if called from System mode */
7942 gen_set_condexec(s);
7943 gen_set_pc_im(s, s->pc - 4);
81465888
PM
7944 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7945 tcg_temp_free_i32(tmp);
7946 switch (amode) {
7947 case 0: /* DA */
7948 offset = -4;
7949 break;
7950 case 1: /* IA */
7951 offset = 0;
7952 break;
7953 case 2: /* DB */
7954 offset = -8;
7955 break;
7956 case 3: /* IB */
7957 offset = 4;
7958 break;
7959 default:
7960 abort();
7961 }
7962 tcg_gen_addi_i32(addr, addr, offset);
7963 tmp = load_reg(s, 14);
12dcc321 7964 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 7965 tcg_temp_free_i32(tmp);
81465888
PM
7966 tmp = load_cpu_field(spsr);
7967 tcg_gen_addi_i32(addr, addr, 4);
12dcc321 7968 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 7969 tcg_temp_free_i32(tmp);
81465888
PM
7970 if (writeback) {
7971 switch (amode) {
7972 case 0:
7973 offset = -8;
7974 break;
7975 case 1:
7976 offset = 4;
7977 break;
7978 case 2:
7979 offset = -4;
7980 break;
7981 case 3:
7982 offset = 0;
7983 break;
7984 default:
7985 abort();
7986 }
7987 tcg_gen_addi_i32(addr, addr, offset);
7988 tmp = tcg_const_i32(mode);
7989 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7990 tcg_temp_free_i32(tmp);
7991 }
7992 tcg_temp_free_i32(addr);
f01377f5 7993 s->is_jmp = DISAS_UPDATE;
81465888
PM
7994}
7995
f4df2210 7996static void disas_arm_insn(DisasContext *s, unsigned int insn)
9ee6e8bb 7997{
f4df2210 7998 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
7999 TCGv_i32 tmp;
8000 TCGv_i32 tmp2;
8001 TCGv_i32 tmp3;
8002 TCGv_i32 addr;
a7812ae4 8003 TCGv_i64 tmp64;
9ee6e8bb 8004
9ee6e8bb 8005 /* M variants do not implement ARM mode. */
b53d8923 8006 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 8007 goto illegal_op;
b53d8923 8008 }
9ee6e8bb
PB
8009 cond = insn >> 28;
8010 if (cond == 0xf){
be5e7a76
DES
8011 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
8012 * choose to UNDEF. In ARMv5 and above the space is used
8013 * for miscellaneous unconditional instructions.
8014 */
8015 ARCH(5);
8016
9ee6e8bb
PB
8017 /* Unconditional instructions. */
8018 if (((insn >> 25) & 7) == 1) {
8019 /* NEON Data processing. */
d614a513 8020 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8021 goto illegal_op;
d614a513 8022 }
9ee6e8bb 8023
7dcc1f89 8024 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 8025 goto illegal_op;
7dcc1f89 8026 }
9ee6e8bb
PB
8027 return;
8028 }
8029 if ((insn & 0x0f100000) == 0x04000000) {
8030 /* NEON load/store. */
d614a513 8031 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8032 goto illegal_op;
d614a513 8033 }
9ee6e8bb 8034
7dcc1f89 8035 if (disas_neon_ls_insn(s, insn)) {
9ee6e8bb 8036 goto illegal_op;
7dcc1f89 8037 }
9ee6e8bb
PB
8038 return;
8039 }
6a57f3eb
WN
8040 if ((insn & 0x0f000e10) == 0x0e000a00) {
8041 /* VFP. */
7dcc1f89 8042 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
8043 goto illegal_op;
8044 }
8045 return;
8046 }
3d185e5d
PM
8047 if (((insn & 0x0f30f000) == 0x0510f000) ||
8048 ((insn & 0x0f30f010) == 0x0710f000)) {
8049 if ((insn & (1 << 22)) == 0) {
8050 /* PLDW; v7MP */
d614a513 8051 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8052 goto illegal_op;
8053 }
8054 }
8055 /* Otherwise PLD; v5TE+ */
be5e7a76 8056 ARCH(5TE);
3d185e5d
PM
8057 return;
8058 }
8059 if (((insn & 0x0f70f000) == 0x0450f000) ||
8060 ((insn & 0x0f70f010) == 0x0650f000)) {
8061 ARCH(7);
8062 return; /* PLI; V7 */
8063 }
8064 if (((insn & 0x0f700000) == 0x04100000) ||
8065 ((insn & 0x0f700010) == 0x06100000)) {
d614a513 8066 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8067 goto illegal_op;
8068 }
8069 return; /* v7MP: Unallocated memory hint: must NOP */
8070 }
8071
8072 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
8073 ARCH(6);
8074 /* setend */
9886ecdf
PB
8075 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
8076 gen_helper_setend(cpu_env);
8077 s->is_jmp = DISAS_UPDATE;
9ee6e8bb
PB
8078 }
8079 return;
8080 } else if ((insn & 0x0fffff00) == 0x057ff000) {
8081 switch ((insn >> 4) & 0xf) {
8082 case 1: /* clrex */
8083 ARCH(6K);
426f5abc 8084 gen_clrex(s);
9ee6e8bb
PB
8085 return;
8086 case 4: /* dsb */
8087 case 5: /* dmb */
9ee6e8bb 8088 ARCH(7);
61e4c432 8089 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 8090 return;
6df99dec
SS
8091 case 6: /* isb */
8092 /* We need to break the TB after this insn to execute
8093 * self-modifying code correctly and also to take
8094 * any pending interrupts immediately.
8095 */
8096 gen_lookup_tb(s);
8097 return;
9ee6e8bb
PB
8098 default:
8099 goto illegal_op;
8100 }
8101 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
8102 /* srs */
81465888
PM
8103 ARCH(6);
8104 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 8105 return;
ea825eee 8106 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 8107 /* rfe */
c67b6b71 8108 int32_t offset;
9ee6e8bb
PB
8109 if (IS_USER(s))
8110 goto illegal_op;
8111 ARCH(6);
8112 rn = (insn >> 16) & 0xf;
b0109805 8113 addr = load_reg(s, rn);
9ee6e8bb
PB
8114 i = (insn >> 23) & 3;
8115 switch (i) {
b0109805 8116 case 0: offset = -4; break; /* DA */
c67b6b71
FN
8117 case 1: offset = 0; break; /* IA */
8118 case 2: offset = -8; break; /* DB */
b0109805 8119 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
8120 default: abort();
8121 }
8122 if (offset)
b0109805
PB
8123 tcg_gen_addi_i32(addr, addr, offset);
8124 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 8125 tmp = tcg_temp_new_i32();
12dcc321 8126 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 8127 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8128 tmp2 = tcg_temp_new_i32();
12dcc321 8129 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
8130 if (insn & (1 << 21)) {
8131 /* Base writeback. */
8132 switch (i) {
b0109805 8133 case 0: offset = -8; break;
c67b6b71
FN
8134 case 1: offset = 4; break;
8135 case 2: offset = -4; break;
b0109805 8136 case 3: offset = 0; break;
9ee6e8bb
PB
8137 default: abort();
8138 }
8139 if (offset)
b0109805
PB
8140 tcg_gen_addi_i32(addr, addr, offset);
8141 store_reg(s, rn, addr);
8142 } else {
7d1b0095 8143 tcg_temp_free_i32(addr);
9ee6e8bb 8144 }
b0109805 8145 gen_rfe(s, tmp, tmp2);
c67b6b71 8146 return;
9ee6e8bb
PB
8147 } else if ((insn & 0x0e000000) == 0x0a000000) {
8148 /* branch link and change to thumb (blx <offset>) */
8149 int32_t offset;
8150
8151 val = (uint32_t)s->pc;
7d1b0095 8152 tmp = tcg_temp_new_i32();
d9ba4830
PB
8153 tcg_gen_movi_i32(tmp, val);
8154 store_reg(s, 14, tmp);
9ee6e8bb
PB
8155 /* Sign-extend the 24-bit offset */
8156 offset = (((int32_t)insn) << 8) >> 8;
8157 /* offset * 4 + bit24 * 2 + (thumb bit) */
8158 val += (offset << 2) | ((insn >> 23) & 2) | 1;
8159 /* pipeline offset */
8160 val += 4;
be5e7a76 8161 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 8162 gen_bx_im(s, val);
9ee6e8bb
PB
8163 return;
8164 } else if ((insn & 0x0e000f00) == 0x0c000100) {
d614a513 8165 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
9ee6e8bb 8166 /* iWMMXt register transfer. */
c0f4af17 8167 if (extract32(s->c15_cpar, 1, 1)) {
7dcc1f89 8168 if (!disas_iwmmxt_insn(s, insn)) {
9ee6e8bb 8169 return;
c0f4af17
PM
8170 }
8171 }
9ee6e8bb
PB
8172 }
8173 } else if ((insn & 0x0fe00000) == 0x0c400000) {
8174 /* Coprocessor double register transfer. */
be5e7a76 8175 ARCH(5TE);
9ee6e8bb
PB
8176 } else if ((insn & 0x0f000010) == 0x0e000010) {
8177 /* Additional coprocessor register transfer. */
7997d92f 8178 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
8179 uint32_t mask;
8180 uint32_t val;
8181 /* cps (privileged) */
8182 if (IS_USER(s))
8183 return;
8184 mask = val = 0;
8185 if (insn & (1 << 19)) {
8186 if (insn & (1 << 8))
8187 mask |= CPSR_A;
8188 if (insn & (1 << 7))
8189 mask |= CPSR_I;
8190 if (insn & (1 << 6))
8191 mask |= CPSR_F;
8192 if (insn & (1 << 18))
8193 val |= mask;
8194 }
7997d92f 8195 if (insn & (1 << 17)) {
9ee6e8bb
PB
8196 mask |= CPSR_M;
8197 val |= (insn & 0x1f);
8198 }
8199 if (mask) {
2fbac54b 8200 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
8201 }
8202 return;
8203 }
8204 goto illegal_op;
8205 }
8206 if (cond != 0xe) {
8207 /* if not always execute, we generate a conditional jump to
8208 next instruction */
8209 s->condlabel = gen_new_label();
39fb730a 8210 arm_gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
8211 s->condjmp = 1;
8212 }
8213 if ((insn & 0x0f900000) == 0x03000000) {
8214 if ((insn & (1 << 21)) == 0) {
8215 ARCH(6T2);
8216 rd = (insn >> 12) & 0xf;
8217 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8218 if ((insn & (1 << 22)) == 0) {
8219 /* MOVW */
7d1b0095 8220 tmp = tcg_temp_new_i32();
5e3f878a 8221 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
8222 } else {
8223 /* MOVT */
5e3f878a 8224 tmp = load_reg(s, rd);
86831435 8225 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8226 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 8227 }
5e3f878a 8228 store_reg(s, rd, tmp);
9ee6e8bb
PB
8229 } else {
8230 if (((insn >> 12) & 0xf) != 0xf)
8231 goto illegal_op;
8232 if (((insn >> 16) & 0xf) == 0) {
8233 gen_nop_hint(s, insn & 0xff);
8234 } else {
8235 /* CPSR = immediate */
8236 val = insn & 0xff;
8237 shift = ((insn >> 8) & 0xf) * 2;
8238 if (shift)
8239 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 8240 i = ((insn & (1 << 22)) != 0);
7dcc1f89
PM
8241 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
8242 i, val)) {
9ee6e8bb 8243 goto illegal_op;
7dcc1f89 8244 }
9ee6e8bb
PB
8245 }
8246 }
8247 } else if ((insn & 0x0f900000) == 0x01000000
8248 && (insn & 0x00000090) != 0x00000090) {
8249 /* miscellaneous instructions */
8250 op1 = (insn >> 21) & 3;
8251 sh = (insn >> 4) & 0xf;
8252 rm = insn & 0xf;
8253 switch (sh) {
8bfd0550
PM
8254 case 0x0: /* MSR, MRS */
8255 if (insn & (1 << 9)) {
8256 /* MSR (banked) and MRS (banked) */
8257 int sysm = extract32(insn, 16, 4) |
8258 (extract32(insn, 8, 1) << 4);
8259 int r = extract32(insn, 22, 1);
8260
8261 if (op1 & 1) {
8262 /* MSR (banked) */
8263 gen_msr_banked(s, r, sysm, rm);
8264 } else {
8265 /* MRS (banked) */
8266 int rd = extract32(insn, 12, 4);
8267
8268 gen_mrs_banked(s, r, sysm, rd);
8269 }
8270 break;
8271 }
8272
8273 /* MSR, MRS (for PSRs) */
9ee6e8bb
PB
8274 if (op1 & 1) {
8275 /* PSR = reg */
2fbac54b 8276 tmp = load_reg(s, rm);
9ee6e8bb 8277 i = ((op1 & 2) != 0);
7dcc1f89 8278 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
8279 goto illegal_op;
8280 } else {
8281 /* reg = PSR */
8282 rd = (insn >> 12) & 0xf;
8283 if (op1 & 2) {
8284 if (IS_USER(s))
8285 goto illegal_op;
d9ba4830 8286 tmp = load_cpu_field(spsr);
9ee6e8bb 8287 } else {
7d1b0095 8288 tmp = tcg_temp_new_i32();
9ef39277 8289 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 8290 }
d9ba4830 8291 store_reg(s, rd, tmp);
9ee6e8bb
PB
8292 }
8293 break;
8294 case 0x1:
8295 if (op1 == 1) {
8296 /* branch/exchange thumb (bx). */
be5e7a76 8297 ARCH(4T);
d9ba4830
PB
8298 tmp = load_reg(s, rm);
8299 gen_bx(s, tmp);
9ee6e8bb
PB
8300 } else if (op1 == 3) {
8301 /* clz */
be5e7a76 8302 ARCH(5);
9ee6e8bb 8303 rd = (insn >> 12) & 0xf;
1497c961
PB
8304 tmp = load_reg(s, rm);
8305 gen_helper_clz(tmp, tmp);
8306 store_reg(s, rd, tmp);
9ee6e8bb
PB
8307 } else {
8308 goto illegal_op;
8309 }
8310 break;
8311 case 0x2:
8312 if (op1 == 1) {
8313 ARCH(5J); /* bxj */
8314 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8315 tmp = load_reg(s, rm);
8316 gen_bx(s, tmp);
9ee6e8bb
PB
8317 } else {
8318 goto illegal_op;
8319 }
8320 break;
8321 case 0x3:
8322 if (op1 != 1)
8323 goto illegal_op;
8324
be5e7a76 8325 ARCH(5);
9ee6e8bb 8326 /* branch link/exchange thumb (blx) */
d9ba4830 8327 tmp = load_reg(s, rm);
7d1b0095 8328 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
8329 tcg_gen_movi_i32(tmp2, s->pc);
8330 store_reg(s, 14, tmp2);
8331 gen_bx(s, tmp);
9ee6e8bb 8332 break;
eb0ecd5a
WN
8333 case 0x4:
8334 {
8335 /* crc32/crc32c */
8336 uint32_t c = extract32(insn, 8, 4);
8337
8338 /* Check this CPU supports ARMv8 CRC instructions.
8339 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8340 * Bits 8, 10 and 11 should be zero.
8341 */
d614a513 8342 if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
eb0ecd5a
WN
8343 (c & 0xd) != 0) {
8344 goto illegal_op;
8345 }
8346
8347 rn = extract32(insn, 16, 4);
8348 rd = extract32(insn, 12, 4);
8349
8350 tmp = load_reg(s, rn);
8351 tmp2 = load_reg(s, rm);
aa633469
PM
8352 if (op1 == 0) {
8353 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
8354 } else if (op1 == 1) {
8355 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
8356 }
eb0ecd5a
WN
8357 tmp3 = tcg_const_i32(1 << op1);
8358 if (c & 0x2) {
8359 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
8360 } else {
8361 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
8362 }
8363 tcg_temp_free_i32(tmp2);
8364 tcg_temp_free_i32(tmp3);
8365 store_reg(s, rd, tmp);
8366 break;
8367 }
9ee6e8bb 8368 case 0x5: /* saturating add/subtract */
be5e7a76 8369 ARCH(5TE);
9ee6e8bb
PB
8370 rd = (insn >> 12) & 0xf;
8371 rn = (insn >> 16) & 0xf;
b40d0353 8372 tmp = load_reg(s, rm);
5e3f878a 8373 tmp2 = load_reg(s, rn);
9ee6e8bb 8374 if (op1 & 2)
9ef39277 8375 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 8376 if (op1 & 1)
9ef39277 8377 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8378 else
9ef39277 8379 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 8380 tcg_temp_free_i32(tmp2);
5e3f878a 8381 store_reg(s, rd, tmp);
9ee6e8bb 8382 break;
49e14940 8383 case 7:
d4a2dc67
PM
8384 {
8385 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
37e6456e 8386 switch (op1) {
19a6e31c
PM
8387 case 0:
8388 /* HLT */
8389 gen_hlt(s, imm16);
8390 break;
37e6456e
PM
8391 case 1:
8392 /* bkpt */
8393 ARCH(5);
8394 gen_exception_insn(s, 4, EXCP_BKPT,
73710361
GB
8395 syn_aa32_bkpt(imm16, false),
8396 default_exception_el(s));
37e6456e
PM
8397 break;
8398 case 2:
8399 /* Hypervisor call (v7) */
8400 ARCH(7);
8401 if (IS_USER(s)) {
8402 goto illegal_op;
8403 }
8404 gen_hvc(s, imm16);
8405 break;
8406 case 3:
8407 /* Secure monitor call (v6+) */
8408 ARCH(6K);
8409 if (IS_USER(s)) {
8410 goto illegal_op;
8411 }
8412 gen_smc(s);
8413 break;
8414 default:
19a6e31c 8415 g_assert_not_reached();
49e14940 8416 }
9ee6e8bb 8417 break;
d4a2dc67 8418 }
9ee6e8bb
PB
8419 case 0x8: /* signed multiply */
8420 case 0xa:
8421 case 0xc:
8422 case 0xe:
be5e7a76 8423 ARCH(5TE);
9ee6e8bb
PB
8424 rs = (insn >> 8) & 0xf;
8425 rn = (insn >> 12) & 0xf;
8426 rd = (insn >> 16) & 0xf;
8427 if (op1 == 1) {
8428 /* (32 * 16) >> 16 */
5e3f878a
PB
8429 tmp = load_reg(s, rm);
8430 tmp2 = load_reg(s, rs);
9ee6e8bb 8431 if (sh & 4)
5e3f878a 8432 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8433 else
5e3f878a 8434 gen_sxth(tmp2);
a7812ae4
PB
8435 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8436 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8437 tmp = tcg_temp_new_i32();
ecc7b3aa 8438 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 8439 tcg_temp_free_i64(tmp64);
9ee6e8bb 8440 if ((sh & 2) == 0) {
5e3f878a 8441 tmp2 = load_reg(s, rn);
9ef39277 8442 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8443 tcg_temp_free_i32(tmp2);
9ee6e8bb 8444 }
5e3f878a 8445 store_reg(s, rd, tmp);
9ee6e8bb
PB
8446 } else {
8447 /* 16 * 16 */
5e3f878a
PB
8448 tmp = load_reg(s, rm);
8449 tmp2 = load_reg(s, rs);
8450 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 8451 tcg_temp_free_i32(tmp2);
9ee6e8bb 8452 if (op1 == 2) {
a7812ae4
PB
8453 tmp64 = tcg_temp_new_i64();
8454 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8455 tcg_temp_free_i32(tmp);
a7812ae4
PB
8456 gen_addq(s, tmp64, rn, rd);
8457 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 8458 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8459 } else {
8460 if (op1 == 0) {
5e3f878a 8461 tmp2 = load_reg(s, rn);
9ef39277 8462 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8463 tcg_temp_free_i32(tmp2);
9ee6e8bb 8464 }
5e3f878a 8465 store_reg(s, rd, tmp);
9ee6e8bb
PB
8466 }
8467 }
8468 break;
8469 default:
8470 goto illegal_op;
8471 }
8472 } else if (((insn & 0x0e000000) == 0 &&
8473 (insn & 0x00000090) != 0x90) ||
8474 ((insn & 0x0e000000) == (1 << 25))) {
8475 int set_cc, logic_cc, shiftop;
8476
8477 op1 = (insn >> 21) & 0xf;
8478 set_cc = (insn >> 20) & 1;
8479 logic_cc = table_logic_cc[op1] & set_cc;
8480
8481 /* data processing instruction */
8482 if (insn & (1 << 25)) {
8483 /* immediate operand */
8484 val = insn & 0xff;
8485 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 8486 if (shift) {
9ee6e8bb 8487 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 8488 }
7d1b0095 8489 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
8490 tcg_gen_movi_i32(tmp2, val);
8491 if (logic_cc && shift) {
8492 gen_set_CF_bit31(tmp2);
8493 }
9ee6e8bb
PB
8494 } else {
8495 /* register */
8496 rm = (insn) & 0xf;
e9bb4aa9 8497 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8498 shiftop = (insn >> 5) & 3;
8499 if (!(insn & (1 << 4))) {
8500 shift = (insn >> 7) & 0x1f;
e9bb4aa9 8501 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
8502 } else {
8503 rs = (insn >> 8) & 0xf;
8984bd2e 8504 tmp = load_reg(s, rs);
e9bb4aa9 8505 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
8506 }
8507 }
8508 if (op1 != 0x0f && op1 != 0x0d) {
8509 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
8510 tmp = load_reg(s, rn);
8511 } else {
39d5492a 8512 TCGV_UNUSED_I32(tmp);
9ee6e8bb
PB
8513 }
8514 rd = (insn >> 12) & 0xf;
8515 switch(op1) {
8516 case 0x00:
e9bb4aa9
JR
8517 tcg_gen_and_i32(tmp, tmp, tmp2);
8518 if (logic_cc) {
8519 gen_logic_CC(tmp);
8520 }
7dcc1f89 8521 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8522 break;
8523 case 0x01:
e9bb4aa9
JR
8524 tcg_gen_xor_i32(tmp, tmp, tmp2);
8525 if (logic_cc) {
8526 gen_logic_CC(tmp);
8527 }
7dcc1f89 8528 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8529 break;
8530 case 0x02:
8531 if (set_cc && rd == 15) {
8532 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 8533 if (IS_USER(s)) {
9ee6e8bb 8534 goto illegal_op;
e9bb4aa9 8535 }
72485ec4 8536 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 8537 gen_exception_return(s, tmp);
9ee6e8bb 8538 } else {
e9bb4aa9 8539 if (set_cc) {
72485ec4 8540 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8541 } else {
8542 tcg_gen_sub_i32(tmp, tmp, tmp2);
8543 }
7dcc1f89 8544 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8545 }
8546 break;
8547 case 0x03:
e9bb4aa9 8548 if (set_cc) {
72485ec4 8549 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8550 } else {
8551 tcg_gen_sub_i32(tmp, tmp2, tmp);
8552 }
7dcc1f89 8553 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8554 break;
8555 case 0x04:
e9bb4aa9 8556 if (set_cc) {
72485ec4 8557 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8558 } else {
8559 tcg_gen_add_i32(tmp, tmp, tmp2);
8560 }
7dcc1f89 8561 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8562 break;
8563 case 0x05:
e9bb4aa9 8564 if (set_cc) {
49b4c31e 8565 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8566 } else {
8567 gen_add_carry(tmp, tmp, tmp2);
8568 }
7dcc1f89 8569 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8570 break;
8571 case 0x06:
e9bb4aa9 8572 if (set_cc) {
2de68a49 8573 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8574 } else {
8575 gen_sub_carry(tmp, tmp, tmp2);
8576 }
7dcc1f89 8577 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8578 break;
8579 case 0x07:
e9bb4aa9 8580 if (set_cc) {
2de68a49 8581 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8582 } else {
8583 gen_sub_carry(tmp, tmp2, tmp);
8584 }
7dcc1f89 8585 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8586 break;
8587 case 0x08:
8588 if (set_cc) {
e9bb4aa9
JR
8589 tcg_gen_and_i32(tmp, tmp, tmp2);
8590 gen_logic_CC(tmp);
9ee6e8bb 8591 }
7d1b0095 8592 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8593 break;
8594 case 0x09:
8595 if (set_cc) {
e9bb4aa9
JR
8596 tcg_gen_xor_i32(tmp, tmp, tmp2);
8597 gen_logic_CC(tmp);
9ee6e8bb 8598 }
7d1b0095 8599 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8600 break;
8601 case 0x0a:
8602 if (set_cc) {
72485ec4 8603 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 8604 }
7d1b0095 8605 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8606 break;
8607 case 0x0b:
8608 if (set_cc) {
72485ec4 8609 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 8610 }
7d1b0095 8611 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8612 break;
8613 case 0x0c:
e9bb4aa9
JR
8614 tcg_gen_or_i32(tmp, tmp, tmp2);
8615 if (logic_cc) {
8616 gen_logic_CC(tmp);
8617 }
7dcc1f89 8618 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8619 break;
8620 case 0x0d:
8621 if (logic_cc && rd == 15) {
8622 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 8623 if (IS_USER(s)) {
9ee6e8bb 8624 goto illegal_op;
e9bb4aa9
JR
8625 }
8626 gen_exception_return(s, tmp2);
9ee6e8bb 8627 } else {
e9bb4aa9
JR
8628 if (logic_cc) {
8629 gen_logic_CC(tmp2);
8630 }
7dcc1f89 8631 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8632 }
8633 break;
8634 case 0x0e:
f669df27 8635 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
8636 if (logic_cc) {
8637 gen_logic_CC(tmp);
8638 }
7dcc1f89 8639 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8640 break;
8641 default:
8642 case 0x0f:
e9bb4aa9
JR
8643 tcg_gen_not_i32(tmp2, tmp2);
8644 if (logic_cc) {
8645 gen_logic_CC(tmp2);
8646 }
7dcc1f89 8647 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8648 break;
8649 }
e9bb4aa9 8650 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 8651 tcg_temp_free_i32(tmp2);
e9bb4aa9 8652 }
9ee6e8bb
PB
8653 } else {
8654 /* other instructions */
8655 op1 = (insn >> 24) & 0xf;
8656 switch(op1) {
8657 case 0x0:
8658 case 0x1:
8659 /* multiplies, extra load/stores */
8660 sh = (insn >> 5) & 3;
8661 if (sh == 0) {
8662 if (op1 == 0x0) {
8663 rd = (insn >> 16) & 0xf;
8664 rn = (insn >> 12) & 0xf;
8665 rs = (insn >> 8) & 0xf;
8666 rm = (insn) & 0xf;
8667 op1 = (insn >> 20) & 0xf;
8668 switch (op1) {
8669 case 0: case 1: case 2: case 3: case 6:
8670 /* 32 bit mul */
5e3f878a
PB
8671 tmp = load_reg(s, rs);
8672 tmp2 = load_reg(s, rm);
8673 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8674 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8675 if (insn & (1 << 22)) {
8676 /* Subtract (mls) */
8677 ARCH(6T2);
5e3f878a
PB
8678 tmp2 = load_reg(s, rn);
8679 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 8680 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8681 } else if (insn & (1 << 21)) {
8682 /* Add */
5e3f878a
PB
8683 tmp2 = load_reg(s, rn);
8684 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8685 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8686 }
8687 if (insn & (1 << 20))
5e3f878a
PB
8688 gen_logic_CC(tmp);
8689 store_reg(s, rd, tmp);
9ee6e8bb 8690 break;
8aac08b1
AJ
8691 case 4:
8692 /* 64 bit mul double accumulate (UMAAL) */
8693 ARCH(6);
8694 tmp = load_reg(s, rs);
8695 tmp2 = load_reg(s, rm);
8696 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8697 gen_addq_lo(s, tmp64, rn);
8698 gen_addq_lo(s, tmp64, rd);
8699 gen_storeq_reg(s, rn, rd, tmp64);
8700 tcg_temp_free_i64(tmp64);
8701 break;
8702 case 8: case 9: case 10: case 11:
8703 case 12: case 13: case 14: case 15:
8704 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
8705 tmp = load_reg(s, rs);
8706 tmp2 = load_reg(s, rm);
8aac08b1 8707 if (insn & (1 << 22)) {
c9f10124 8708 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 8709 } else {
c9f10124 8710 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
8711 }
8712 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
8713 TCGv_i32 al = load_reg(s, rn);
8714 TCGv_i32 ah = load_reg(s, rd);
c9f10124 8715 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
8716 tcg_temp_free_i32(al);
8717 tcg_temp_free_i32(ah);
9ee6e8bb 8718 }
8aac08b1 8719 if (insn & (1 << 20)) {
c9f10124 8720 gen_logicq_cc(tmp, tmp2);
8aac08b1 8721 }
c9f10124
RH
8722 store_reg(s, rn, tmp);
8723 store_reg(s, rd, tmp2);
9ee6e8bb 8724 break;
8aac08b1
AJ
8725 default:
8726 goto illegal_op;
9ee6e8bb
PB
8727 }
8728 } else {
8729 rn = (insn >> 16) & 0xf;
8730 rd = (insn >> 12) & 0xf;
8731 if (insn & (1 << 23)) {
8732 /* load/store exclusive */
2359bf80 8733 int op2 = (insn >> 8) & 3;
86753403 8734 op1 = (insn >> 21) & 0x3;
2359bf80
MR
8735
8736 switch (op2) {
8737 case 0: /* lda/stl */
8738 if (op1 == 1) {
8739 goto illegal_op;
8740 }
8741 ARCH(8);
8742 break;
8743 case 1: /* reserved */
8744 goto illegal_op;
8745 case 2: /* ldaex/stlex */
8746 ARCH(8);
8747 break;
8748 case 3: /* ldrex/strex */
8749 if (op1) {
8750 ARCH(6K);
8751 } else {
8752 ARCH(6);
8753 }
8754 break;
8755 }
8756
3174f8e9 8757 addr = tcg_temp_local_new_i32();
98a46317 8758 load_reg_var(s, addr, rn);
2359bf80
MR
8759
8760 /* Since the emulation does not have barriers,
8761 the acquire/release semantics need no special
8762 handling */
8763 if (op2 == 0) {
8764 if (insn & (1 << 20)) {
8765 tmp = tcg_temp_new_i32();
8766 switch (op1) {
8767 case 0: /* lda */
12dcc321
PB
8768 gen_aa32_ld32u(s, tmp, addr,
8769 get_mem_index(s));
2359bf80
MR
8770 break;
8771 case 2: /* ldab */
12dcc321
PB
8772 gen_aa32_ld8u(s, tmp, addr,
8773 get_mem_index(s));
2359bf80
MR
8774 break;
8775 case 3: /* ldah */
12dcc321
PB
8776 gen_aa32_ld16u(s, tmp, addr,
8777 get_mem_index(s));
2359bf80
MR
8778 break;
8779 default:
8780 abort();
8781 }
8782 store_reg(s, rd, tmp);
8783 } else {
8784 rm = insn & 0xf;
8785 tmp = load_reg(s, rm);
8786 switch (op1) {
8787 case 0: /* stl */
12dcc321
PB
8788 gen_aa32_st32(s, tmp, addr,
8789 get_mem_index(s));
2359bf80
MR
8790 break;
8791 case 2: /* stlb */
12dcc321
PB
8792 gen_aa32_st8(s, tmp, addr,
8793 get_mem_index(s));
2359bf80
MR
8794 break;
8795 case 3: /* stlh */
12dcc321
PB
8796 gen_aa32_st16(s, tmp, addr,
8797 get_mem_index(s));
2359bf80
MR
8798 break;
8799 default:
8800 abort();
8801 }
8802 tcg_temp_free_i32(tmp);
8803 }
8804 } else if (insn & (1 << 20)) {
86753403
PB
8805 switch (op1) {
8806 case 0: /* ldrex */
426f5abc 8807 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
8808 break;
8809 case 1: /* ldrexd */
426f5abc 8810 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
8811 break;
8812 case 2: /* ldrexb */
426f5abc 8813 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
8814 break;
8815 case 3: /* ldrexh */
426f5abc 8816 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
8817 break;
8818 default:
8819 abort();
8820 }
9ee6e8bb
PB
8821 } else {
8822 rm = insn & 0xf;
86753403
PB
8823 switch (op1) {
8824 case 0: /* strex */
426f5abc 8825 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
8826 break;
8827 case 1: /* strexd */
502e64fe 8828 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
8829 break;
8830 case 2: /* strexb */
426f5abc 8831 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
8832 break;
8833 case 3: /* strexh */
426f5abc 8834 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
8835 break;
8836 default:
8837 abort();
8838 }
9ee6e8bb 8839 }
39d5492a 8840 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8841 } else {
8842 /* SWP instruction */
8843 rm = (insn) & 0xf;
8844
8984bd2e
PB
8845 /* ??? This is not really atomic. However we know
8846 we never have multiple CPUs running in parallel,
8847 so it is good enough. */
8848 addr = load_reg(s, rn);
8849 tmp = load_reg(s, rm);
5a839c0d 8850 tmp2 = tcg_temp_new_i32();
9ee6e8bb 8851 if (insn & (1 << 22)) {
12dcc321
PB
8852 gen_aa32_ld8u(s, tmp2, addr, get_mem_index(s));
8853 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
9ee6e8bb 8854 } else {
12dcc321
PB
8855 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
8856 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9ee6e8bb 8857 }
5a839c0d 8858 tcg_temp_free_i32(tmp);
7d1b0095 8859 tcg_temp_free_i32(addr);
8984bd2e 8860 store_reg(s, rd, tmp2);
9ee6e8bb
PB
8861 }
8862 }
8863 } else {
8864 int address_offset;
3960c336
PM
8865 bool load = insn & (1 << 20);
8866 bool doubleword = false;
9ee6e8bb
PB
8867 /* Misc load/store */
8868 rn = (insn >> 16) & 0xf;
8869 rd = (insn >> 12) & 0xf;
3960c336
PM
8870
8871 if (!load && (sh & 2)) {
8872 /* doubleword */
8873 ARCH(5TE);
8874 if (rd & 1) {
8875 /* UNPREDICTABLE; we choose to UNDEF */
8876 goto illegal_op;
8877 }
8878 load = (sh & 1) == 0;
8879 doubleword = true;
8880 }
8881
b0109805 8882 addr = load_reg(s, rn);
9ee6e8bb 8883 if (insn & (1 << 24))
b0109805 8884 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb 8885 address_offset = 0;
3960c336
PM
8886
8887 if (doubleword) {
8888 if (!load) {
9ee6e8bb 8889 /* store */
b0109805 8890 tmp = load_reg(s, rd);
12dcc321 8891 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8892 tcg_temp_free_i32(tmp);
b0109805
PB
8893 tcg_gen_addi_i32(addr, addr, 4);
8894 tmp = load_reg(s, rd + 1);
12dcc321 8895 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8896 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8897 } else {
8898 /* load */
5a839c0d 8899 tmp = tcg_temp_new_i32();
12dcc321 8900 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
8901 store_reg(s, rd, tmp);
8902 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8903 tmp = tcg_temp_new_i32();
12dcc321 8904 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 8905 rd++;
9ee6e8bb
PB
8906 }
8907 address_offset = -4;
3960c336
PM
8908 } else if (load) {
8909 /* load */
8910 tmp = tcg_temp_new_i32();
8911 switch (sh) {
8912 case 1:
12dcc321 8913 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
3960c336
PM
8914 break;
8915 case 2:
12dcc321 8916 gen_aa32_ld8s(s, tmp, addr, get_mem_index(s));
3960c336
PM
8917 break;
8918 default:
8919 case 3:
12dcc321 8920 gen_aa32_ld16s(s, tmp, addr, get_mem_index(s));
3960c336
PM
8921 break;
8922 }
9ee6e8bb
PB
8923 } else {
8924 /* store */
b0109805 8925 tmp = load_reg(s, rd);
12dcc321 8926 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
5a839c0d 8927 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8928 }
8929 /* Perform base writeback before the loaded value to
8930 ensure correct behavior with overlapping index registers.
b6af0975 8931 ldrd with base writeback is undefined if the
9ee6e8bb
PB
8932 destination and index registers overlap. */
8933 if (!(insn & (1 << 24))) {
b0109805
PB
8934 gen_add_datah_offset(s, insn, address_offset, addr);
8935 store_reg(s, rn, addr);
9ee6e8bb
PB
8936 } else if (insn & (1 << 21)) {
8937 if (address_offset)
b0109805
PB
8938 tcg_gen_addi_i32(addr, addr, address_offset);
8939 store_reg(s, rn, addr);
8940 } else {
7d1b0095 8941 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8942 }
8943 if (load) {
8944 /* Complete the load. */
b0109805 8945 store_reg(s, rd, tmp);
9ee6e8bb
PB
8946 }
8947 }
8948 break;
8949 case 0x4:
8950 case 0x5:
8951 goto do_ldst;
8952 case 0x6:
8953 case 0x7:
8954 if (insn & (1 << 4)) {
8955 ARCH(6);
8956 /* Armv6 Media instructions. */
8957 rm = insn & 0xf;
8958 rn = (insn >> 16) & 0xf;
2c0262af 8959 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
8960 rs = (insn >> 8) & 0xf;
8961 switch ((insn >> 23) & 3) {
8962 case 0: /* Parallel add/subtract. */
8963 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
8964 tmp = load_reg(s, rn);
8965 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8966 sh = (insn >> 5) & 7;
8967 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
8968 goto illegal_op;
6ddbc6e4 8969 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 8970 tcg_temp_free_i32(tmp2);
6ddbc6e4 8971 store_reg(s, rd, tmp);
9ee6e8bb
PB
8972 break;
8973 case 1:
8974 if ((insn & 0x00700020) == 0) {
6c95676b 8975 /* Halfword pack. */
3670669c
PB
8976 tmp = load_reg(s, rn);
8977 tmp2 = load_reg(s, rm);
9ee6e8bb 8978 shift = (insn >> 7) & 0x1f;
3670669c
PB
8979 if (insn & (1 << 6)) {
8980 /* pkhtb */
22478e79
AZ
8981 if (shift == 0)
8982 shift = 31;
8983 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 8984 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 8985 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
8986 } else {
8987 /* pkhbt */
22478e79
AZ
8988 if (shift)
8989 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 8990 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
8991 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8992 }
8993 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8994 tcg_temp_free_i32(tmp2);
3670669c 8995 store_reg(s, rd, tmp);
9ee6e8bb
PB
8996 } else if ((insn & 0x00200020) == 0x00200000) {
8997 /* [us]sat */
6ddbc6e4 8998 tmp = load_reg(s, rm);
9ee6e8bb
PB
8999 shift = (insn >> 7) & 0x1f;
9000 if (insn & (1 << 6)) {
9001 if (shift == 0)
9002 shift = 31;
6ddbc6e4 9003 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 9004 } else {
6ddbc6e4 9005 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
9006 }
9007 sh = (insn >> 16) & 0x1f;
40d3c433
CL
9008 tmp2 = tcg_const_i32(sh);
9009 if (insn & (1 << 22))
9ef39277 9010 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 9011 else
9ef39277 9012 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 9013 tcg_temp_free_i32(tmp2);
6ddbc6e4 9014 store_reg(s, rd, tmp);
9ee6e8bb
PB
9015 } else if ((insn & 0x00300fe0) == 0x00200f20) {
9016 /* [us]sat16 */
6ddbc6e4 9017 tmp = load_reg(s, rm);
9ee6e8bb 9018 sh = (insn >> 16) & 0x1f;
40d3c433
CL
9019 tmp2 = tcg_const_i32(sh);
9020 if (insn & (1 << 22))
9ef39277 9021 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 9022 else
9ef39277 9023 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 9024 tcg_temp_free_i32(tmp2);
6ddbc6e4 9025 store_reg(s, rd, tmp);
9ee6e8bb
PB
9026 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
9027 /* Select bytes. */
6ddbc6e4
PB
9028 tmp = load_reg(s, rn);
9029 tmp2 = load_reg(s, rm);
7d1b0095 9030 tmp3 = tcg_temp_new_i32();
0ecb72a5 9031 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 9032 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
9033 tcg_temp_free_i32(tmp3);
9034 tcg_temp_free_i32(tmp2);
6ddbc6e4 9035 store_reg(s, rd, tmp);
9ee6e8bb 9036 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 9037 tmp = load_reg(s, rm);
9ee6e8bb 9038 shift = (insn >> 10) & 3;
1301f322 9039 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
9040 rotate, a shift is sufficient. */
9041 if (shift != 0)
f669df27 9042 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9043 op1 = (insn >> 20) & 7;
9044 switch (op1) {
5e3f878a
PB
9045 case 0: gen_sxtb16(tmp); break;
9046 case 2: gen_sxtb(tmp); break;
9047 case 3: gen_sxth(tmp); break;
9048 case 4: gen_uxtb16(tmp); break;
9049 case 6: gen_uxtb(tmp); break;
9050 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
9051 default: goto illegal_op;
9052 }
9053 if (rn != 15) {
5e3f878a 9054 tmp2 = load_reg(s, rn);
9ee6e8bb 9055 if ((op1 & 3) == 0) {
5e3f878a 9056 gen_add16(tmp, tmp2);
9ee6e8bb 9057 } else {
5e3f878a 9058 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9059 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9060 }
9061 }
6c95676b 9062 store_reg(s, rd, tmp);
9ee6e8bb
PB
9063 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
9064 /* rev */
b0109805 9065 tmp = load_reg(s, rm);
9ee6e8bb
PB
9066 if (insn & (1 << 22)) {
9067 if (insn & (1 << 7)) {
b0109805 9068 gen_revsh(tmp);
9ee6e8bb
PB
9069 } else {
9070 ARCH(6T2);
b0109805 9071 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
9072 }
9073 } else {
9074 if (insn & (1 << 7))
b0109805 9075 gen_rev16(tmp);
9ee6e8bb 9076 else
66896cb8 9077 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 9078 }
b0109805 9079 store_reg(s, rd, tmp);
9ee6e8bb
PB
9080 } else {
9081 goto illegal_op;
9082 }
9083 break;
9084 case 2: /* Multiplies (Type 3). */
41e9564d
PM
9085 switch ((insn >> 20) & 0x7) {
9086 case 5:
9087 if (((insn >> 6) ^ (insn >> 7)) & 1) {
9088 /* op2 not 00x or 11x : UNDEF */
9089 goto illegal_op;
9090 }
838fa72d
AJ
9091 /* Signed multiply most significant [accumulate].
9092 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
9093 tmp = load_reg(s, rm);
9094 tmp2 = load_reg(s, rs);
a7812ae4 9095 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 9096
955a7dd5 9097 if (rd != 15) {
838fa72d 9098 tmp = load_reg(s, rd);
9ee6e8bb 9099 if (insn & (1 << 6)) {
838fa72d 9100 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 9101 } else {
838fa72d 9102 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
9103 }
9104 }
838fa72d
AJ
9105 if (insn & (1 << 5)) {
9106 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9107 }
9108 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 9109 tmp = tcg_temp_new_i32();
ecc7b3aa 9110 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 9111 tcg_temp_free_i64(tmp64);
955a7dd5 9112 store_reg(s, rn, tmp);
41e9564d
PM
9113 break;
9114 case 0:
9115 case 4:
9116 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
9117 if (insn & (1 << 7)) {
9118 goto illegal_op;
9119 }
9120 tmp = load_reg(s, rm);
9121 tmp2 = load_reg(s, rs);
9ee6e8bb 9122 if (insn & (1 << 5))
5e3f878a
PB
9123 gen_swap_half(tmp2);
9124 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9125 if (insn & (1 << 22)) {
5e3f878a 9126 /* smlald, smlsld */
33bbd75a
PC
9127 TCGv_i64 tmp64_2;
9128
a7812ae4 9129 tmp64 = tcg_temp_new_i64();
33bbd75a 9130 tmp64_2 = tcg_temp_new_i64();
a7812ae4 9131 tcg_gen_ext_i32_i64(tmp64, tmp);
33bbd75a 9132 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
7d1b0095 9133 tcg_temp_free_i32(tmp);
33bbd75a
PC
9134 tcg_temp_free_i32(tmp2);
9135 if (insn & (1 << 6)) {
9136 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
9137 } else {
9138 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
9139 }
9140 tcg_temp_free_i64(tmp64_2);
a7812ae4
PB
9141 gen_addq(s, tmp64, rd, rn);
9142 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 9143 tcg_temp_free_i64(tmp64);
9ee6e8bb 9144 } else {
5e3f878a 9145 /* smuad, smusd, smlad, smlsd */
33bbd75a
PC
9146 if (insn & (1 << 6)) {
9147 /* This subtraction cannot overflow. */
9148 tcg_gen_sub_i32(tmp, tmp, tmp2);
9149 } else {
9150 /* This addition cannot overflow 32 bits;
9151 * however it may overflow considered as a
9152 * signed operation, in which case we must set
9153 * the Q flag.
9154 */
9155 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9156 }
9157 tcg_temp_free_i32(tmp2);
22478e79 9158 if (rd != 15)
9ee6e8bb 9159 {
22478e79 9160 tmp2 = load_reg(s, rd);
9ef39277 9161 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9162 tcg_temp_free_i32(tmp2);
9ee6e8bb 9163 }
22478e79 9164 store_reg(s, rn, tmp);
9ee6e8bb 9165 }
41e9564d 9166 break;
b8b8ea05
PM
9167 case 1:
9168 case 3:
9169 /* SDIV, UDIV */
d614a513 9170 if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
b8b8ea05
PM
9171 goto illegal_op;
9172 }
9173 if (((insn >> 5) & 7) || (rd != 15)) {
9174 goto illegal_op;
9175 }
9176 tmp = load_reg(s, rm);
9177 tmp2 = load_reg(s, rs);
9178 if (insn & (1 << 21)) {
9179 gen_helper_udiv(tmp, tmp, tmp2);
9180 } else {
9181 gen_helper_sdiv(tmp, tmp, tmp2);
9182 }
9183 tcg_temp_free_i32(tmp2);
9184 store_reg(s, rn, tmp);
9185 break;
41e9564d
PM
9186 default:
9187 goto illegal_op;
9ee6e8bb
PB
9188 }
9189 break;
9190 case 3:
9191 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
9192 switch (op1) {
9193 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
9194 ARCH(6);
9195 tmp = load_reg(s, rm);
9196 tmp2 = load_reg(s, rs);
9197 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 9198 tcg_temp_free_i32(tmp2);
ded9d295
AZ
9199 if (rd != 15) {
9200 tmp2 = load_reg(s, rd);
6ddbc6e4 9201 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9202 tcg_temp_free_i32(tmp2);
9ee6e8bb 9203 }
ded9d295 9204 store_reg(s, rn, tmp);
9ee6e8bb
PB
9205 break;
9206 case 0x20: case 0x24: case 0x28: case 0x2c:
9207 /* Bitfield insert/clear. */
9208 ARCH(6T2);
9209 shift = (insn >> 7) & 0x1f;
9210 i = (insn >> 16) & 0x1f;
45140a57
KB
9211 if (i < shift) {
9212 /* UNPREDICTABLE; we choose to UNDEF */
9213 goto illegal_op;
9214 }
9ee6e8bb
PB
9215 i = i + 1 - shift;
9216 if (rm == 15) {
7d1b0095 9217 tmp = tcg_temp_new_i32();
5e3f878a 9218 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 9219 } else {
5e3f878a 9220 tmp = load_reg(s, rm);
9ee6e8bb
PB
9221 }
9222 if (i != 32) {
5e3f878a 9223 tmp2 = load_reg(s, rd);
d593c48e 9224 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 9225 tcg_temp_free_i32(tmp2);
9ee6e8bb 9226 }
5e3f878a 9227 store_reg(s, rd, tmp);
9ee6e8bb
PB
9228 break;
9229 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
9230 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 9231 ARCH(6T2);
5e3f878a 9232 tmp = load_reg(s, rm);
9ee6e8bb
PB
9233 shift = (insn >> 7) & 0x1f;
9234 i = ((insn >> 16) & 0x1f) + 1;
9235 if (shift + i > 32)
9236 goto illegal_op;
9237 if (i < 32) {
9238 if (op1 & 0x20) {
5e3f878a 9239 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 9240 } else {
5e3f878a 9241 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
9242 }
9243 }
5e3f878a 9244 store_reg(s, rd, tmp);
9ee6e8bb
PB
9245 break;
9246 default:
9247 goto illegal_op;
9248 }
9249 break;
9250 }
9251 break;
9252 }
9253 do_ldst:
9254 /* Check for undefined extension instructions
9255 * per the ARM Bible IE:
9256 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
9257 */
9258 sh = (0xf << 20) | (0xf << 4);
9259 if (op1 == 0x7 && ((insn & sh) == sh))
9260 {
9261 goto illegal_op;
9262 }
9263 /* load/store byte/word */
9264 rn = (insn >> 16) & 0xf;
9265 rd = (insn >> 12) & 0xf;
b0109805 9266 tmp2 = load_reg(s, rn);
a99caa48
PM
9267 if ((insn & 0x01200000) == 0x00200000) {
9268 /* ldrt/strt */
579d21cc 9269 i = get_a32_user_mem_index(s);
a99caa48
PM
9270 } else {
9271 i = get_mem_index(s);
9272 }
9ee6e8bb 9273 if (insn & (1 << 24))
b0109805 9274 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
9275 if (insn & (1 << 20)) {
9276 /* load */
5a839c0d 9277 tmp = tcg_temp_new_i32();
9ee6e8bb 9278 if (insn & (1 << 22)) {
12dcc321 9279 gen_aa32_ld8u(s, tmp, tmp2, i);
9ee6e8bb 9280 } else {
12dcc321 9281 gen_aa32_ld32u(s, tmp, tmp2, i);
9ee6e8bb 9282 }
9ee6e8bb
PB
9283 } else {
9284 /* store */
b0109805 9285 tmp = load_reg(s, rd);
5a839c0d 9286 if (insn & (1 << 22)) {
12dcc321 9287 gen_aa32_st8(s, tmp, tmp2, i);
5a839c0d 9288 } else {
12dcc321 9289 gen_aa32_st32(s, tmp, tmp2, i);
5a839c0d
PM
9290 }
9291 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9292 }
9293 if (!(insn & (1 << 24))) {
b0109805
PB
9294 gen_add_data_offset(s, insn, tmp2);
9295 store_reg(s, rn, tmp2);
9296 } else if (insn & (1 << 21)) {
9297 store_reg(s, rn, tmp2);
9298 } else {
7d1b0095 9299 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9300 }
9301 if (insn & (1 << 20)) {
9302 /* Complete the load. */
7dcc1f89 9303 store_reg_from_load(s, rd, tmp);
9ee6e8bb
PB
9304 }
9305 break;
9306 case 0x08:
9307 case 0x09:
9308 {
da3e53dd
PM
9309 int j, n, loaded_base;
9310 bool exc_return = false;
9311 bool is_load = extract32(insn, 20, 1);
9312 bool user = false;
39d5492a 9313 TCGv_i32 loaded_var;
9ee6e8bb
PB
9314 /* load/store multiple words */
9315 /* XXX: store correct base if write back */
9ee6e8bb 9316 if (insn & (1 << 22)) {
da3e53dd 9317 /* LDM (user), LDM (exception return) and STM (user) */
9ee6e8bb
PB
9318 if (IS_USER(s))
9319 goto illegal_op; /* only usable in supervisor mode */
9320
da3e53dd
PM
9321 if (is_load && extract32(insn, 15, 1)) {
9322 exc_return = true;
9323 } else {
9324 user = true;
9325 }
9ee6e8bb
PB
9326 }
9327 rn = (insn >> 16) & 0xf;
b0109805 9328 addr = load_reg(s, rn);
9ee6e8bb
PB
9329
9330 /* compute total size */
9331 loaded_base = 0;
39d5492a 9332 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
9333 n = 0;
9334 for(i=0;i<16;i++) {
9335 if (insn & (1 << i))
9336 n++;
9337 }
9338 /* XXX: test invalid n == 0 case ? */
9339 if (insn & (1 << 23)) {
9340 if (insn & (1 << 24)) {
9341 /* pre increment */
b0109805 9342 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9343 } else {
9344 /* post increment */
9345 }
9346 } else {
9347 if (insn & (1 << 24)) {
9348 /* pre decrement */
b0109805 9349 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9350 } else {
9351 /* post decrement */
9352 if (n != 1)
b0109805 9353 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9354 }
9355 }
9356 j = 0;
9357 for(i=0;i<16;i++) {
9358 if (insn & (1 << i)) {
da3e53dd 9359 if (is_load) {
9ee6e8bb 9360 /* load */
5a839c0d 9361 tmp = tcg_temp_new_i32();
12dcc321 9362 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
be5e7a76 9363 if (user) {
b75263d6 9364 tmp2 = tcg_const_i32(i);
1ce94f81 9365 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 9366 tcg_temp_free_i32(tmp2);
7d1b0095 9367 tcg_temp_free_i32(tmp);
9ee6e8bb 9368 } else if (i == rn) {
b0109805 9369 loaded_var = tmp;
9ee6e8bb 9370 loaded_base = 1;
fb0e8e79
PM
9371 } else if (rn == 15 && exc_return) {
9372 store_pc_exc_ret(s, tmp);
9ee6e8bb 9373 } else {
7dcc1f89 9374 store_reg_from_load(s, i, tmp);
9ee6e8bb
PB
9375 }
9376 } else {
9377 /* store */
9378 if (i == 15) {
9379 /* special case: r15 = PC + 8 */
9380 val = (long)s->pc + 4;
7d1b0095 9381 tmp = tcg_temp_new_i32();
b0109805 9382 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 9383 } else if (user) {
7d1b0095 9384 tmp = tcg_temp_new_i32();
b75263d6 9385 tmp2 = tcg_const_i32(i);
9ef39277 9386 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 9387 tcg_temp_free_i32(tmp2);
9ee6e8bb 9388 } else {
b0109805 9389 tmp = load_reg(s, i);
9ee6e8bb 9390 }
12dcc321 9391 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9392 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9393 }
9394 j++;
9395 /* no need to add after the last transfer */
9396 if (j != n)
b0109805 9397 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9398 }
9399 }
9400 if (insn & (1 << 21)) {
9401 /* write back */
9402 if (insn & (1 << 23)) {
9403 if (insn & (1 << 24)) {
9404 /* pre increment */
9405 } else {
9406 /* post increment */
b0109805 9407 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9408 }
9409 } else {
9410 if (insn & (1 << 24)) {
9411 /* pre decrement */
9412 if (n != 1)
b0109805 9413 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9414 } else {
9415 /* post decrement */
b0109805 9416 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9417 }
9418 }
b0109805
PB
9419 store_reg(s, rn, addr);
9420 } else {
7d1b0095 9421 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9422 }
9423 if (loaded_base) {
b0109805 9424 store_reg(s, rn, loaded_var);
9ee6e8bb 9425 }
da3e53dd 9426 if (exc_return) {
9ee6e8bb 9427 /* Restore CPSR from SPSR. */
d9ba4830 9428 tmp = load_cpu_field(spsr);
235ea1f5 9429 gen_helper_cpsr_write_eret(cpu_env, tmp);
7d1b0095 9430 tcg_temp_free_i32(tmp);
577bf808 9431 s->is_jmp = DISAS_JUMP;
9ee6e8bb
PB
9432 }
9433 }
9434 break;
9435 case 0xa:
9436 case 0xb:
9437 {
9438 int32_t offset;
9439
9440 /* branch (and link) */
9441 val = (int32_t)s->pc;
9442 if (insn & (1 << 24)) {
7d1b0095 9443 tmp = tcg_temp_new_i32();
5e3f878a
PB
9444 tcg_gen_movi_i32(tmp, val);
9445 store_reg(s, 14, tmp);
9ee6e8bb 9446 }
534df156
PM
9447 offset = sextract32(insn << 2, 0, 26);
9448 val += offset + 4;
9ee6e8bb
PB
9449 gen_jmp(s, val);
9450 }
9451 break;
9452 case 0xc:
9453 case 0xd:
9454 case 0xe:
6a57f3eb
WN
9455 if (((insn >> 8) & 0xe) == 10) {
9456 /* VFP. */
7dcc1f89 9457 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
9458 goto illegal_op;
9459 }
7dcc1f89 9460 } else if (disas_coproc_insn(s, insn)) {
6a57f3eb 9461 /* Coprocessor. */
9ee6e8bb 9462 goto illegal_op;
6a57f3eb 9463 }
9ee6e8bb
PB
9464 break;
9465 case 0xf:
9466 /* swi */
eaed129d 9467 gen_set_pc_im(s, s->pc);
d4a2dc67 9468 s->svc_imm = extract32(insn, 0, 24);
9ee6e8bb
PB
9469 s->is_jmp = DISAS_SWI;
9470 break;
9471 default:
9472 illegal_op:
73710361
GB
9473 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
9474 default_exception_el(s));
9ee6e8bb
PB
9475 break;
9476 }
9477 }
9478}
9479
9480/* Return true if this is a Thumb-2 logical op. */
9481static int
9482thumb2_logic_op(int op)
9483{
9484 return (op < 8);
9485}
9486
9487/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9488 then set condition code flags based on the result of the operation.
9489 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9490 to the high bit of T1.
9491 Returns zero if the opcode is valid. */
9492
9493static int
39d5492a
PM
9494gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
9495 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
9496{
9497 int logic_cc;
9498
9499 logic_cc = 0;
9500 switch (op) {
9501 case 0: /* and */
396e467c 9502 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
9503 logic_cc = conds;
9504 break;
9505 case 1: /* bic */
f669df27 9506 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
9507 logic_cc = conds;
9508 break;
9509 case 2: /* orr */
396e467c 9510 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
9511 logic_cc = conds;
9512 break;
9513 case 3: /* orn */
29501f1b 9514 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
9515 logic_cc = conds;
9516 break;
9517 case 4: /* eor */
396e467c 9518 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
9519 logic_cc = conds;
9520 break;
9521 case 8: /* add */
9522 if (conds)
72485ec4 9523 gen_add_CC(t0, t0, t1);
9ee6e8bb 9524 else
396e467c 9525 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
9526 break;
9527 case 10: /* adc */
9528 if (conds)
49b4c31e 9529 gen_adc_CC(t0, t0, t1);
9ee6e8bb 9530 else
396e467c 9531 gen_adc(t0, t1);
9ee6e8bb
PB
9532 break;
9533 case 11: /* sbc */
2de68a49
RH
9534 if (conds) {
9535 gen_sbc_CC(t0, t0, t1);
9536 } else {
396e467c 9537 gen_sub_carry(t0, t0, t1);
2de68a49 9538 }
9ee6e8bb
PB
9539 break;
9540 case 13: /* sub */
9541 if (conds)
72485ec4 9542 gen_sub_CC(t0, t0, t1);
9ee6e8bb 9543 else
396e467c 9544 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
9545 break;
9546 case 14: /* rsb */
9547 if (conds)
72485ec4 9548 gen_sub_CC(t0, t1, t0);
9ee6e8bb 9549 else
396e467c 9550 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
9551 break;
9552 default: /* 5, 6, 7, 9, 12, 15. */
9553 return 1;
9554 }
9555 if (logic_cc) {
396e467c 9556 gen_logic_CC(t0);
9ee6e8bb 9557 if (shifter_out)
396e467c 9558 gen_set_CF_bit31(t1);
9ee6e8bb
PB
9559 }
9560 return 0;
9561}
9562
9563/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
9564 is not legal. */
0ecb72a5 9565static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9ee6e8bb 9566{
b0109805 9567 uint32_t insn, imm, shift, offset;
9ee6e8bb 9568 uint32_t rd, rn, rm, rs;
39d5492a
PM
9569 TCGv_i32 tmp;
9570 TCGv_i32 tmp2;
9571 TCGv_i32 tmp3;
9572 TCGv_i32 addr;
a7812ae4 9573 TCGv_i64 tmp64;
9ee6e8bb
PB
9574 int op;
9575 int shiftop;
9576 int conds;
9577 int logic_cc;
9578
d614a513
PM
9579 if (!(arm_dc_feature(s, ARM_FEATURE_THUMB2)
9580 || arm_dc_feature(s, ARM_FEATURE_M))) {
601d70b9 9581 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
9582 16-bit instructions to get correct prefetch abort behavior. */
9583 insn = insn_hw1;
9584 if ((insn & (1 << 12)) == 0) {
be5e7a76 9585 ARCH(5);
9ee6e8bb
PB
9586 /* Second half of blx. */
9587 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
9588 tmp = load_reg(s, 14);
9589 tcg_gen_addi_i32(tmp, tmp, offset);
9590 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 9591
7d1b0095 9592 tmp2 = tcg_temp_new_i32();
b0109805 9593 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
9594 store_reg(s, 14, tmp2);
9595 gen_bx(s, tmp);
9ee6e8bb
PB
9596 return 0;
9597 }
9598 if (insn & (1 << 11)) {
9599 /* Second half of bl. */
9600 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 9601 tmp = load_reg(s, 14);
6a0d8a1d 9602 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 9603
7d1b0095 9604 tmp2 = tcg_temp_new_i32();
b0109805 9605 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
9606 store_reg(s, 14, tmp2);
9607 gen_bx(s, tmp);
9ee6e8bb
PB
9608 return 0;
9609 }
9610 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
9611 /* Instruction spans a page boundary. Implement it as two
9612 16-bit instructions in case the second half causes an
9613 prefetch abort. */
9614 offset = ((int32_t)insn << 21) >> 9;
396e467c 9615 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
9616 return 0;
9617 }
9618 /* Fall through to 32-bit decode. */
9619 }
9620
f9fd40eb 9621 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
9ee6e8bb
PB
9622 s->pc += 2;
9623 insn |= (uint32_t)insn_hw1 << 16;
9624
9625 if ((insn & 0xf800e800) != 0xf000e800) {
9626 ARCH(6T2);
9627 }
9628
9629 rn = (insn >> 16) & 0xf;
9630 rs = (insn >> 12) & 0xf;
9631 rd = (insn >> 8) & 0xf;
9632 rm = insn & 0xf;
9633 switch ((insn >> 25) & 0xf) {
9634 case 0: case 1: case 2: case 3:
9635 /* 16-bit instructions. Should never happen. */
9636 abort();
9637 case 4:
9638 if (insn & (1 << 22)) {
9639 /* Other load/store, table branch. */
9640 if (insn & 0x01200000) {
9641 /* Load/store doubleword. */
9642 if (rn == 15) {
7d1b0095 9643 addr = tcg_temp_new_i32();
b0109805 9644 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 9645 } else {
b0109805 9646 addr = load_reg(s, rn);
9ee6e8bb
PB
9647 }
9648 offset = (insn & 0xff) * 4;
9649 if ((insn & (1 << 23)) == 0)
9650 offset = -offset;
9651 if (insn & (1 << 24)) {
b0109805 9652 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
9653 offset = 0;
9654 }
9655 if (insn & (1 << 20)) {
9656 /* ldrd */
e2592fad 9657 tmp = tcg_temp_new_i32();
12dcc321 9658 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
9659 store_reg(s, rs, tmp);
9660 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9661 tmp = tcg_temp_new_i32();
12dcc321 9662 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9663 store_reg(s, rd, tmp);
9ee6e8bb
PB
9664 } else {
9665 /* strd */
b0109805 9666 tmp = load_reg(s, rs);
12dcc321 9667 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9668 tcg_temp_free_i32(tmp);
b0109805
PB
9669 tcg_gen_addi_i32(addr, addr, 4);
9670 tmp = load_reg(s, rd);
12dcc321 9671 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9672 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9673 }
9674 if (insn & (1 << 21)) {
9675 /* Base writeback. */
9676 if (rn == 15)
9677 goto illegal_op;
b0109805
PB
9678 tcg_gen_addi_i32(addr, addr, offset - 4);
9679 store_reg(s, rn, addr);
9680 } else {
7d1b0095 9681 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9682 }
9683 } else if ((insn & (1 << 23)) == 0) {
9684 /* Load/store exclusive word. */
39d5492a 9685 addr = tcg_temp_local_new_i32();
98a46317 9686 load_reg_var(s, addr, rn);
426f5abc 9687 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 9688 if (insn & (1 << 20)) {
426f5abc 9689 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 9690 } else {
426f5abc 9691 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 9692 }
39d5492a 9693 tcg_temp_free_i32(addr);
2359bf80 9694 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
9695 /* Table Branch. */
9696 if (rn == 15) {
7d1b0095 9697 addr = tcg_temp_new_i32();
b0109805 9698 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 9699 } else {
b0109805 9700 addr = load_reg(s, rn);
9ee6e8bb 9701 }
b26eefb6 9702 tmp = load_reg(s, rm);
b0109805 9703 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
9704 if (insn & (1 << 4)) {
9705 /* tbh */
b0109805 9706 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9707 tcg_temp_free_i32(tmp);
e2592fad 9708 tmp = tcg_temp_new_i32();
12dcc321 9709 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9710 } else { /* tbb */
7d1b0095 9711 tcg_temp_free_i32(tmp);
e2592fad 9712 tmp = tcg_temp_new_i32();
12dcc321 9713 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9714 }
7d1b0095 9715 tcg_temp_free_i32(addr);
b0109805
PB
9716 tcg_gen_shli_i32(tmp, tmp, 1);
9717 tcg_gen_addi_i32(tmp, tmp, s->pc);
9718 store_reg(s, 15, tmp);
9ee6e8bb 9719 } else {
2359bf80 9720 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 9721 op = (insn >> 4) & 0x3;
2359bf80
MR
9722 switch (op2) {
9723 case 0:
426f5abc 9724 goto illegal_op;
2359bf80
MR
9725 case 1:
9726 /* Load/store exclusive byte/halfword/doubleword */
9727 if (op == 2) {
9728 goto illegal_op;
9729 }
9730 ARCH(7);
9731 break;
9732 case 2:
9733 /* Load-acquire/store-release */
9734 if (op == 3) {
9735 goto illegal_op;
9736 }
9737 /* Fall through */
9738 case 3:
9739 /* Load-acquire/store-release exclusive */
9740 ARCH(8);
9741 break;
426f5abc 9742 }
39d5492a 9743 addr = tcg_temp_local_new_i32();
98a46317 9744 load_reg_var(s, addr, rn);
2359bf80
MR
9745 if (!(op2 & 1)) {
9746 if (insn & (1 << 20)) {
9747 tmp = tcg_temp_new_i32();
9748 switch (op) {
9749 case 0: /* ldab */
12dcc321 9750 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
2359bf80
MR
9751 break;
9752 case 1: /* ldah */
12dcc321 9753 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
2359bf80
MR
9754 break;
9755 case 2: /* lda */
12dcc321 9756 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
2359bf80
MR
9757 break;
9758 default:
9759 abort();
9760 }
9761 store_reg(s, rs, tmp);
9762 } else {
9763 tmp = load_reg(s, rs);
9764 switch (op) {
9765 case 0: /* stlb */
12dcc321 9766 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
2359bf80
MR
9767 break;
9768 case 1: /* stlh */
12dcc321 9769 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
2359bf80
MR
9770 break;
9771 case 2: /* stl */
12dcc321 9772 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
2359bf80
MR
9773 break;
9774 default:
9775 abort();
9776 }
9777 tcg_temp_free_i32(tmp);
9778 }
9779 } else if (insn & (1 << 20)) {
426f5abc 9780 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 9781 } else {
426f5abc 9782 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 9783 }
39d5492a 9784 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9785 }
9786 } else {
9787 /* Load/store multiple, RFE, SRS. */
9788 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976 9789 /* RFE, SRS: not available in user mode or on M profile */
b53d8923 9790 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 9791 goto illegal_op;
00115976 9792 }
9ee6e8bb
PB
9793 if (insn & (1 << 20)) {
9794 /* rfe */
b0109805
PB
9795 addr = load_reg(s, rn);
9796 if ((insn & (1 << 24)) == 0)
9797 tcg_gen_addi_i32(addr, addr, -8);
9798 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 9799 tmp = tcg_temp_new_i32();
12dcc321 9800 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9801 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9802 tmp2 = tcg_temp_new_i32();
12dcc321 9803 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
9804 if (insn & (1 << 21)) {
9805 /* Base writeback. */
b0109805
PB
9806 if (insn & (1 << 24)) {
9807 tcg_gen_addi_i32(addr, addr, 4);
9808 } else {
9809 tcg_gen_addi_i32(addr, addr, -4);
9810 }
9811 store_reg(s, rn, addr);
9812 } else {
7d1b0095 9813 tcg_temp_free_i32(addr);
9ee6e8bb 9814 }
b0109805 9815 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
9816 } else {
9817 /* srs */
81465888
PM
9818 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
9819 insn & (1 << 21));
9ee6e8bb
PB
9820 }
9821 } else {
5856d44e 9822 int i, loaded_base = 0;
39d5492a 9823 TCGv_i32 loaded_var;
9ee6e8bb 9824 /* Load/store multiple. */
b0109805 9825 addr = load_reg(s, rn);
9ee6e8bb
PB
9826 offset = 0;
9827 for (i = 0; i < 16; i++) {
9828 if (insn & (1 << i))
9829 offset += 4;
9830 }
9831 if (insn & (1 << 24)) {
b0109805 9832 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9833 }
9834
39d5492a 9835 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
9836 for (i = 0; i < 16; i++) {
9837 if ((insn & (1 << i)) == 0)
9838 continue;
9839 if (insn & (1 << 20)) {
9840 /* Load. */
e2592fad 9841 tmp = tcg_temp_new_i32();
12dcc321 9842 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9843 if (i == 15) {
b0109805 9844 gen_bx(s, tmp);
5856d44e
YO
9845 } else if (i == rn) {
9846 loaded_var = tmp;
9847 loaded_base = 1;
9ee6e8bb 9848 } else {
b0109805 9849 store_reg(s, i, tmp);
9ee6e8bb
PB
9850 }
9851 } else {
9852 /* Store. */
b0109805 9853 tmp = load_reg(s, i);
12dcc321 9854 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9855 tcg_temp_free_i32(tmp);
9ee6e8bb 9856 }
b0109805 9857 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 9858 }
5856d44e
YO
9859 if (loaded_base) {
9860 store_reg(s, rn, loaded_var);
9861 }
9ee6e8bb
PB
9862 if (insn & (1 << 21)) {
9863 /* Base register writeback. */
9864 if (insn & (1 << 24)) {
b0109805 9865 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9866 }
9867 /* Fault if writeback register is in register list. */
9868 if (insn & (1 << rn))
9869 goto illegal_op;
b0109805
PB
9870 store_reg(s, rn, addr);
9871 } else {
7d1b0095 9872 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9873 }
9874 }
9875 }
9876 break;
2af9ab77
JB
9877 case 5:
9878
9ee6e8bb 9879 op = (insn >> 21) & 0xf;
2af9ab77 9880 if (op == 6) {
62b44f05
AR
9881 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9882 goto illegal_op;
9883 }
2af9ab77
JB
9884 /* Halfword pack. */
9885 tmp = load_reg(s, rn);
9886 tmp2 = load_reg(s, rm);
9887 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
9888 if (insn & (1 << 5)) {
9889 /* pkhtb */
9890 if (shift == 0)
9891 shift = 31;
9892 tcg_gen_sari_i32(tmp2, tmp2, shift);
9893 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
9894 tcg_gen_ext16u_i32(tmp2, tmp2);
9895 } else {
9896 /* pkhbt */
9897 if (shift)
9898 tcg_gen_shli_i32(tmp2, tmp2, shift);
9899 tcg_gen_ext16u_i32(tmp, tmp);
9900 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9901 }
9902 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 9903 tcg_temp_free_i32(tmp2);
3174f8e9
FN
9904 store_reg(s, rd, tmp);
9905 } else {
2af9ab77
JB
9906 /* Data processing register constant shift. */
9907 if (rn == 15) {
7d1b0095 9908 tmp = tcg_temp_new_i32();
2af9ab77
JB
9909 tcg_gen_movi_i32(tmp, 0);
9910 } else {
9911 tmp = load_reg(s, rn);
9912 }
9913 tmp2 = load_reg(s, rm);
9914
9915 shiftop = (insn >> 4) & 3;
9916 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
9917 conds = (insn & (1 << 20)) != 0;
9918 logic_cc = (conds && thumb2_logic_op(op));
9919 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9920 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9921 goto illegal_op;
7d1b0095 9922 tcg_temp_free_i32(tmp2);
2af9ab77
JB
9923 if (rd != 15) {
9924 store_reg(s, rd, tmp);
9925 } else {
7d1b0095 9926 tcg_temp_free_i32(tmp);
2af9ab77 9927 }
3174f8e9 9928 }
9ee6e8bb
PB
9929 break;
9930 case 13: /* Misc data processing. */
9931 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
9932 if (op < 4 && (insn & 0xf000) != 0xf000)
9933 goto illegal_op;
9934 switch (op) {
9935 case 0: /* Register controlled shift. */
8984bd2e
PB
9936 tmp = load_reg(s, rn);
9937 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9938 if ((insn & 0x70) != 0)
9939 goto illegal_op;
9940 op = (insn >> 21) & 3;
8984bd2e
PB
9941 logic_cc = (insn & (1 << 20)) != 0;
9942 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
9943 if (logic_cc)
9944 gen_logic_CC(tmp);
7dcc1f89 9945 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9946 break;
9947 case 1: /* Sign/zero extend. */
62b44f05
AR
9948 op = (insn >> 20) & 7;
9949 switch (op) {
9950 case 0: /* SXTAH, SXTH */
9951 case 1: /* UXTAH, UXTH */
9952 case 4: /* SXTAB, SXTB */
9953 case 5: /* UXTAB, UXTB */
9954 break;
9955 case 2: /* SXTAB16, SXTB16 */
9956 case 3: /* UXTAB16, UXTB16 */
9957 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9958 goto illegal_op;
9959 }
9960 break;
9961 default:
9962 goto illegal_op;
9963 }
9964 if (rn != 15) {
9965 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9966 goto illegal_op;
9967 }
9968 }
5e3f878a 9969 tmp = load_reg(s, rm);
9ee6e8bb 9970 shift = (insn >> 4) & 3;
1301f322 9971 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
9972 rotate, a shift is sufficient. */
9973 if (shift != 0)
f669df27 9974 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9975 op = (insn >> 20) & 7;
9976 switch (op) {
5e3f878a
PB
9977 case 0: gen_sxth(tmp); break;
9978 case 1: gen_uxth(tmp); break;
9979 case 2: gen_sxtb16(tmp); break;
9980 case 3: gen_uxtb16(tmp); break;
9981 case 4: gen_sxtb(tmp); break;
9982 case 5: gen_uxtb(tmp); break;
62b44f05
AR
9983 default:
9984 g_assert_not_reached();
9ee6e8bb
PB
9985 }
9986 if (rn != 15) {
5e3f878a 9987 tmp2 = load_reg(s, rn);
9ee6e8bb 9988 if ((op >> 1) == 1) {
5e3f878a 9989 gen_add16(tmp, tmp2);
9ee6e8bb 9990 } else {
5e3f878a 9991 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9992 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9993 }
9994 }
5e3f878a 9995 store_reg(s, rd, tmp);
9ee6e8bb
PB
9996 break;
9997 case 2: /* SIMD add/subtract. */
62b44f05
AR
9998 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9999 goto illegal_op;
10000 }
9ee6e8bb
PB
10001 op = (insn >> 20) & 7;
10002 shift = (insn >> 4) & 7;
10003 if ((op & 3) == 3 || (shift & 3) == 3)
10004 goto illegal_op;
6ddbc6e4
PB
10005 tmp = load_reg(s, rn);
10006 tmp2 = load_reg(s, rm);
10007 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 10008 tcg_temp_free_i32(tmp2);
6ddbc6e4 10009 store_reg(s, rd, tmp);
9ee6e8bb
PB
10010 break;
10011 case 3: /* Other data processing. */
10012 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
10013 if (op < 4) {
10014 /* Saturating add/subtract. */
62b44f05
AR
10015 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10016 goto illegal_op;
10017 }
d9ba4830
PB
10018 tmp = load_reg(s, rn);
10019 tmp2 = load_reg(s, rm);
9ee6e8bb 10020 if (op & 1)
9ef39277 10021 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 10022 if (op & 2)
9ef39277 10023 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 10024 else
9ef39277 10025 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 10026 tcg_temp_free_i32(tmp2);
9ee6e8bb 10027 } else {
62b44f05
AR
10028 switch (op) {
10029 case 0x0a: /* rbit */
10030 case 0x08: /* rev */
10031 case 0x09: /* rev16 */
10032 case 0x0b: /* revsh */
10033 case 0x18: /* clz */
10034 break;
10035 case 0x10: /* sel */
10036 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10037 goto illegal_op;
10038 }
10039 break;
10040 case 0x20: /* crc32/crc32c */
10041 case 0x21:
10042 case 0x22:
10043 case 0x28:
10044 case 0x29:
10045 case 0x2a:
10046 if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
10047 goto illegal_op;
10048 }
10049 break;
10050 default:
10051 goto illegal_op;
10052 }
d9ba4830 10053 tmp = load_reg(s, rn);
9ee6e8bb
PB
10054 switch (op) {
10055 case 0x0a: /* rbit */
d9ba4830 10056 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
10057 break;
10058 case 0x08: /* rev */
66896cb8 10059 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
10060 break;
10061 case 0x09: /* rev16 */
d9ba4830 10062 gen_rev16(tmp);
9ee6e8bb
PB
10063 break;
10064 case 0x0b: /* revsh */
d9ba4830 10065 gen_revsh(tmp);
9ee6e8bb
PB
10066 break;
10067 case 0x10: /* sel */
d9ba4830 10068 tmp2 = load_reg(s, rm);
7d1b0095 10069 tmp3 = tcg_temp_new_i32();
0ecb72a5 10070 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 10071 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
10072 tcg_temp_free_i32(tmp3);
10073 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10074 break;
10075 case 0x18: /* clz */
d9ba4830 10076 gen_helper_clz(tmp, tmp);
9ee6e8bb 10077 break;
eb0ecd5a
WN
10078 case 0x20:
10079 case 0x21:
10080 case 0x22:
10081 case 0x28:
10082 case 0x29:
10083 case 0x2a:
10084 {
10085 /* crc32/crc32c */
10086 uint32_t sz = op & 0x3;
10087 uint32_t c = op & 0x8;
10088
eb0ecd5a 10089 tmp2 = load_reg(s, rm);
aa633469
PM
10090 if (sz == 0) {
10091 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
10092 } else if (sz == 1) {
10093 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
10094 }
eb0ecd5a
WN
10095 tmp3 = tcg_const_i32(1 << sz);
10096 if (c) {
10097 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
10098 } else {
10099 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
10100 }
10101 tcg_temp_free_i32(tmp2);
10102 tcg_temp_free_i32(tmp3);
10103 break;
10104 }
9ee6e8bb 10105 default:
62b44f05 10106 g_assert_not_reached();
9ee6e8bb
PB
10107 }
10108 }
d9ba4830 10109 store_reg(s, rd, tmp);
9ee6e8bb
PB
10110 break;
10111 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
62b44f05
AR
10112 switch ((insn >> 20) & 7) {
10113 case 0: /* 32 x 32 -> 32 */
10114 case 7: /* Unsigned sum of absolute differences. */
10115 break;
10116 case 1: /* 16 x 16 -> 32 */
10117 case 2: /* Dual multiply add. */
10118 case 3: /* 32 * 16 -> 32msb */
10119 case 4: /* Dual multiply subtract. */
10120 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10121 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10122 goto illegal_op;
10123 }
10124 break;
10125 }
9ee6e8bb 10126 op = (insn >> 4) & 0xf;
d9ba4830
PB
10127 tmp = load_reg(s, rn);
10128 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10129 switch ((insn >> 20) & 7) {
10130 case 0: /* 32 x 32 -> 32 */
d9ba4830 10131 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 10132 tcg_temp_free_i32(tmp2);
9ee6e8bb 10133 if (rs != 15) {
d9ba4830 10134 tmp2 = load_reg(s, rs);
9ee6e8bb 10135 if (op)
d9ba4830 10136 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 10137 else
d9ba4830 10138 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10139 tcg_temp_free_i32(tmp2);
9ee6e8bb 10140 }
9ee6e8bb
PB
10141 break;
10142 case 1: /* 16 x 16 -> 32 */
d9ba4830 10143 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10144 tcg_temp_free_i32(tmp2);
9ee6e8bb 10145 if (rs != 15) {
d9ba4830 10146 tmp2 = load_reg(s, rs);
9ef39277 10147 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10148 tcg_temp_free_i32(tmp2);
9ee6e8bb 10149 }
9ee6e8bb
PB
10150 break;
10151 case 2: /* Dual multiply add. */
10152 case 4: /* Dual multiply subtract. */
10153 if (op)
d9ba4830
PB
10154 gen_swap_half(tmp2);
10155 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10156 if (insn & (1 << 22)) {
e1d177b9 10157 /* This subtraction cannot overflow. */
d9ba4830 10158 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10159 } else {
e1d177b9
PM
10160 /* This addition cannot overflow 32 bits;
10161 * however it may overflow considered as a signed
10162 * operation, in which case we must set the Q flag.
10163 */
9ef39277 10164 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 10165 }
7d1b0095 10166 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10167 if (rs != 15)
10168 {
d9ba4830 10169 tmp2 = load_reg(s, rs);
9ef39277 10170 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10171 tcg_temp_free_i32(tmp2);
9ee6e8bb 10172 }
9ee6e8bb
PB
10173 break;
10174 case 3: /* 32 * 16 -> 32msb */
10175 if (op)
d9ba4830 10176 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 10177 else
d9ba4830 10178 gen_sxth(tmp2);
a7812ae4
PB
10179 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10180 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 10181 tmp = tcg_temp_new_i32();
ecc7b3aa 10182 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 10183 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10184 if (rs != 15)
10185 {
d9ba4830 10186 tmp2 = load_reg(s, rs);
9ef39277 10187 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10188 tcg_temp_free_i32(tmp2);
9ee6e8bb 10189 }
9ee6e8bb 10190 break;
838fa72d
AJ
10191 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10192 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10193 if (rs != 15) {
838fa72d
AJ
10194 tmp = load_reg(s, rs);
10195 if (insn & (1 << 20)) {
10196 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 10197 } else {
838fa72d 10198 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 10199 }
2c0262af 10200 }
838fa72d
AJ
10201 if (insn & (1 << 4)) {
10202 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10203 }
10204 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 10205 tmp = tcg_temp_new_i32();
ecc7b3aa 10206 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 10207 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10208 break;
10209 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 10210 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 10211 tcg_temp_free_i32(tmp2);
9ee6e8bb 10212 if (rs != 15) {
d9ba4830
PB
10213 tmp2 = load_reg(s, rs);
10214 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10215 tcg_temp_free_i32(tmp2);
5fd46862 10216 }
9ee6e8bb 10217 break;
2c0262af 10218 }
d9ba4830 10219 store_reg(s, rd, tmp);
2c0262af 10220 break;
9ee6e8bb
PB
10221 case 6: case 7: /* 64-bit multiply, Divide. */
10222 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
10223 tmp = load_reg(s, rn);
10224 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10225 if ((op & 0x50) == 0x10) {
10226 /* sdiv, udiv */
d614a513 10227 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 10228 goto illegal_op;
47789990 10229 }
9ee6e8bb 10230 if (op & 0x20)
5e3f878a 10231 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 10232 else
5e3f878a 10233 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 10234 tcg_temp_free_i32(tmp2);
5e3f878a 10235 store_reg(s, rd, tmp);
9ee6e8bb
PB
10236 } else if ((op & 0xe) == 0xc) {
10237 /* Dual multiply accumulate long. */
62b44f05
AR
10238 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10239 tcg_temp_free_i32(tmp);
10240 tcg_temp_free_i32(tmp2);
10241 goto illegal_op;
10242 }
9ee6e8bb 10243 if (op & 1)
5e3f878a
PB
10244 gen_swap_half(tmp2);
10245 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10246 if (op & 0x10) {
5e3f878a 10247 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 10248 } else {
5e3f878a 10249 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 10250 }
7d1b0095 10251 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10252 /* BUGFIX */
10253 tmp64 = tcg_temp_new_i64();
10254 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10255 tcg_temp_free_i32(tmp);
a7812ae4
PB
10256 gen_addq(s, tmp64, rs, rd);
10257 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10258 tcg_temp_free_i64(tmp64);
2c0262af 10259 } else {
9ee6e8bb
PB
10260 if (op & 0x20) {
10261 /* Unsigned 64-bit multiply */
a7812ae4 10262 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 10263 } else {
9ee6e8bb
PB
10264 if (op & 8) {
10265 /* smlalxy */
62b44f05
AR
10266 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10267 tcg_temp_free_i32(tmp2);
10268 tcg_temp_free_i32(tmp);
10269 goto illegal_op;
10270 }
5e3f878a 10271 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10272 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10273 tmp64 = tcg_temp_new_i64();
10274 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10275 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10276 } else {
10277 /* Signed 64-bit multiply */
a7812ae4 10278 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10279 }
b5ff1b31 10280 }
9ee6e8bb
PB
10281 if (op & 4) {
10282 /* umaal */
62b44f05
AR
10283 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10284 tcg_temp_free_i64(tmp64);
10285 goto illegal_op;
10286 }
a7812ae4
PB
10287 gen_addq_lo(s, tmp64, rs);
10288 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
10289 } else if (op & 0x40) {
10290 /* 64-bit accumulate. */
a7812ae4 10291 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 10292 }
a7812ae4 10293 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10294 tcg_temp_free_i64(tmp64);
5fd46862 10295 }
2c0262af 10296 break;
9ee6e8bb
PB
10297 }
10298 break;
10299 case 6: case 7: case 14: case 15:
10300 /* Coprocessor. */
10301 if (((insn >> 24) & 3) == 3) {
10302 /* Translate into the equivalent ARM encoding. */
f06053e3 10303 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
7dcc1f89 10304 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 10305 goto illegal_op;
7dcc1f89 10306 }
6a57f3eb 10307 } else if (((insn >> 8) & 0xe) == 10) {
7dcc1f89 10308 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
10309 goto illegal_op;
10310 }
9ee6e8bb
PB
10311 } else {
10312 if (insn & (1 << 28))
10313 goto illegal_op;
7dcc1f89 10314 if (disas_coproc_insn(s, insn)) {
9ee6e8bb 10315 goto illegal_op;
7dcc1f89 10316 }
9ee6e8bb
PB
10317 }
10318 break;
10319 case 8: case 9: case 10: case 11:
10320 if (insn & (1 << 15)) {
10321 /* Branches, misc control. */
10322 if (insn & 0x5000) {
10323 /* Unconditional branch. */
10324 /* signextend(hw1[10:0]) -> offset[:12]. */
10325 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
10326 /* hw1[10:0] -> offset[11:1]. */
10327 offset |= (insn & 0x7ff) << 1;
10328 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
10329 offset[24:22] already have the same value because of the
10330 sign extension above. */
10331 offset ^= ((~insn) & (1 << 13)) << 10;
10332 offset ^= ((~insn) & (1 << 11)) << 11;
10333
9ee6e8bb
PB
10334 if (insn & (1 << 14)) {
10335 /* Branch and link. */
3174f8e9 10336 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 10337 }
3b46e624 10338
b0109805 10339 offset += s->pc;
9ee6e8bb
PB
10340 if (insn & (1 << 12)) {
10341 /* b/bl */
b0109805 10342 gen_jmp(s, offset);
9ee6e8bb
PB
10343 } else {
10344 /* blx */
b0109805 10345 offset &= ~(uint32_t)2;
be5e7a76 10346 /* thumb2 bx, no need to check */
b0109805 10347 gen_bx_im(s, offset);
2c0262af 10348 }
9ee6e8bb
PB
10349 } else if (((insn >> 23) & 7) == 7) {
10350 /* Misc control */
10351 if (insn & (1 << 13))
10352 goto illegal_op;
10353
10354 if (insn & (1 << 26)) {
37e6456e
PM
10355 if (!(insn & (1 << 20))) {
10356 /* Hypervisor call (v7) */
10357 int imm16 = extract32(insn, 16, 4) << 12
10358 | extract32(insn, 0, 12);
10359 ARCH(7);
10360 if (IS_USER(s)) {
10361 goto illegal_op;
10362 }
10363 gen_hvc(s, imm16);
10364 } else {
10365 /* Secure monitor call (v6+) */
10366 ARCH(6K);
10367 if (IS_USER(s)) {
10368 goto illegal_op;
10369 }
10370 gen_smc(s);
10371 }
2c0262af 10372 } else {
9ee6e8bb
PB
10373 op = (insn >> 20) & 7;
10374 switch (op) {
10375 case 0: /* msr cpsr. */
b53d8923 10376 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
10377 tmp = load_reg(s, rn);
10378 addr = tcg_const_i32(insn & 0xff);
10379 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 10380 tcg_temp_free_i32(addr);
7d1b0095 10381 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10382 gen_lookup_tb(s);
10383 break;
10384 }
10385 /* fall through */
10386 case 1: /* msr spsr. */
b53d8923 10387 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10388 goto illegal_op;
b53d8923 10389 }
8bfd0550
PM
10390
10391 if (extract32(insn, 5, 1)) {
10392 /* MSR (banked) */
10393 int sysm = extract32(insn, 8, 4) |
10394 (extract32(insn, 4, 1) << 4);
10395 int r = op & 1;
10396
10397 gen_msr_banked(s, r, sysm, rm);
10398 break;
10399 }
10400
10401 /* MSR (for PSRs) */
2fbac54b
FN
10402 tmp = load_reg(s, rn);
10403 if (gen_set_psr(s,
7dcc1f89 10404 msr_mask(s, (insn >> 8) & 0xf, op == 1),
2fbac54b 10405 op == 1, tmp))
9ee6e8bb
PB
10406 goto illegal_op;
10407 break;
10408 case 2: /* cps, nop-hint. */
10409 if (((insn >> 8) & 7) == 0) {
10410 gen_nop_hint(s, insn & 0xff);
10411 }
10412 /* Implemented as NOP in user mode. */
10413 if (IS_USER(s))
10414 break;
10415 offset = 0;
10416 imm = 0;
10417 if (insn & (1 << 10)) {
10418 if (insn & (1 << 7))
10419 offset |= CPSR_A;
10420 if (insn & (1 << 6))
10421 offset |= CPSR_I;
10422 if (insn & (1 << 5))
10423 offset |= CPSR_F;
10424 if (insn & (1 << 9))
10425 imm = CPSR_A | CPSR_I | CPSR_F;
10426 }
10427 if (insn & (1 << 8)) {
10428 offset |= 0x1f;
10429 imm |= (insn & 0x1f);
10430 }
10431 if (offset) {
2fbac54b 10432 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
10433 }
10434 break;
10435 case 3: /* Special control operations. */
426f5abc 10436 ARCH(7);
9ee6e8bb
PB
10437 op = (insn >> 4) & 0xf;
10438 switch (op) {
10439 case 2: /* clrex */
426f5abc 10440 gen_clrex(s);
9ee6e8bb
PB
10441 break;
10442 case 4: /* dsb */
10443 case 5: /* dmb */
61e4c432 10444 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 10445 break;
6df99dec
SS
10446 case 6: /* isb */
10447 /* We need to break the TB after this insn
10448 * to execute self-modifying code correctly
10449 * and also to take any pending interrupts
10450 * immediately.
10451 */
10452 gen_lookup_tb(s);
10453 break;
9ee6e8bb
PB
10454 default:
10455 goto illegal_op;
10456 }
10457 break;
10458 case 4: /* bxj */
10459 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
10460 tmp = load_reg(s, rn);
10461 gen_bx(s, tmp);
9ee6e8bb
PB
10462 break;
10463 case 5: /* Exception return. */
b8b45b68
RV
10464 if (IS_USER(s)) {
10465 goto illegal_op;
10466 }
10467 if (rn != 14 || rd != 15) {
10468 goto illegal_op;
10469 }
10470 tmp = load_reg(s, rn);
10471 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
10472 gen_exception_return(s, tmp);
10473 break;
8bfd0550
PM
10474 case 6: /* MRS */
10475 if (extract32(insn, 5, 1)) {
10476 /* MRS (banked) */
10477 int sysm = extract32(insn, 16, 4) |
10478 (extract32(insn, 4, 1) << 4);
10479
10480 gen_mrs_banked(s, 0, sysm, rd);
10481 break;
10482 }
10483
10484 /* mrs cpsr */
7d1b0095 10485 tmp = tcg_temp_new_i32();
b53d8923 10486 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
10487 addr = tcg_const_i32(insn & 0xff);
10488 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 10489 tcg_temp_free_i32(addr);
9ee6e8bb 10490 } else {
9ef39277 10491 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 10492 }
8984bd2e 10493 store_reg(s, rd, tmp);
9ee6e8bb 10494 break;
8bfd0550
PM
10495 case 7: /* MRS */
10496 if (extract32(insn, 5, 1)) {
10497 /* MRS (banked) */
10498 int sysm = extract32(insn, 16, 4) |
10499 (extract32(insn, 4, 1) << 4);
10500
10501 gen_mrs_banked(s, 1, sysm, rd);
10502 break;
10503 }
10504
10505 /* mrs spsr. */
9ee6e8bb 10506 /* Not accessible in user mode. */
b53d8923 10507 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10508 goto illegal_op;
b53d8923 10509 }
d9ba4830
PB
10510 tmp = load_cpu_field(spsr);
10511 store_reg(s, rd, tmp);
9ee6e8bb 10512 break;
2c0262af
FB
10513 }
10514 }
9ee6e8bb
PB
10515 } else {
10516 /* Conditional branch. */
10517 op = (insn >> 22) & 0xf;
10518 /* Generate a conditional jump to next instruction. */
10519 s->condlabel = gen_new_label();
39fb730a 10520 arm_gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
10521 s->condjmp = 1;
10522
10523 /* offset[11:1] = insn[10:0] */
10524 offset = (insn & 0x7ff) << 1;
10525 /* offset[17:12] = insn[21:16]. */
10526 offset |= (insn & 0x003f0000) >> 4;
10527 /* offset[31:20] = insn[26]. */
10528 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
10529 /* offset[18] = insn[13]. */
10530 offset |= (insn & (1 << 13)) << 5;
10531 /* offset[19] = insn[11]. */
10532 offset |= (insn & (1 << 11)) << 8;
10533
10534 /* jump to the offset */
b0109805 10535 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
10536 }
10537 } else {
10538 /* Data processing immediate. */
10539 if (insn & (1 << 25)) {
10540 if (insn & (1 << 24)) {
10541 if (insn & (1 << 20))
10542 goto illegal_op;
10543 /* Bitfield/Saturate. */
10544 op = (insn >> 21) & 7;
10545 imm = insn & 0x1f;
10546 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 10547 if (rn == 15) {
7d1b0095 10548 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
10549 tcg_gen_movi_i32(tmp, 0);
10550 } else {
10551 tmp = load_reg(s, rn);
10552 }
9ee6e8bb
PB
10553 switch (op) {
10554 case 2: /* Signed bitfield extract. */
10555 imm++;
10556 if (shift + imm > 32)
10557 goto illegal_op;
10558 if (imm < 32)
6ddbc6e4 10559 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
10560 break;
10561 case 6: /* Unsigned bitfield extract. */
10562 imm++;
10563 if (shift + imm > 32)
10564 goto illegal_op;
10565 if (imm < 32)
6ddbc6e4 10566 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
10567 break;
10568 case 3: /* Bitfield insert/clear. */
10569 if (imm < shift)
10570 goto illegal_op;
10571 imm = imm + 1 - shift;
10572 if (imm != 32) {
6ddbc6e4 10573 tmp2 = load_reg(s, rd);
d593c48e 10574 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 10575 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10576 }
10577 break;
10578 case 7:
10579 goto illegal_op;
10580 default: /* Saturate. */
9ee6e8bb
PB
10581 if (shift) {
10582 if (op & 1)
6ddbc6e4 10583 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 10584 else
6ddbc6e4 10585 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 10586 }
6ddbc6e4 10587 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
10588 if (op & 4) {
10589 /* Unsigned. */
62b44f05
AR
10590 if ((op & 1) && shift == 0) {
10591 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10592 tcg_temp_free_i32(tmp);
10593 tcg_temp_free_i32(tmp2);
10594 goto illegal_op;
10595 }
9ef39277 10596 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10597 } else {
9ef39277 10598 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
62b44f05 10599 }
2c0262af 10600 } else {
9ee6e8bb 10601 /* Signed. */
62b44f05
AR
10602 if ((op & 1) && shift == 0) {
10603 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10604 tcg_temp_free_i32(tmp);
10605 tcg_temp_free_i32(tmp2);
10606 goto illegal_op;
10607 }
9ef39277 10608 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10609 } else {
9ef39277 10610 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
62b44f05 10611 }
2c0262af 10612 }
b75263d6 10613 tcg_temp_free_i32(tmp2);
9ee6e8bb 10614 break;
2c0262af 10615 }
6ddbc6e4 10616 store_reg(s, rd, tmp);
9ee6e8bb
PB
10617 } else {
10618 imm = ((insn & 0x04000000) >> 15)
10619 | ((insn & 0x7000) >> 4) | (insn & 0xff);
10620 if (insn & (1 << 22)) {
10621 /* 16-bit immediate. */
10622 imm |= (insn >> 4) & 0xf000;
10623 if (insn & (1 << 23)) {
10624 /* movt */
5e3f878a 10625 tmp = load_reg(s, rd);
86831435 10626 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 10627 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 10628 } else {
9ee6e8bb 10629 /* movw */
7d1b0095 10630 tmp = tcg_temp_new_i32();
5e3f878a 10631 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
10632 }
10633 } else {
9ee6e8bb
PB
10634 /* Add/sub 12-bit immediate. */
10635 if (rn == 15) {
b0109805 10636 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 10637 if (insn & (1 << 23))
b0109805 10638 offset -= imm;
9ee6e8bb 10639 else
b0109805 10640 offset += imm;
7d1b0095 10641 tmp = tcg_temp_new_i32();
5e3f878a 10642 tcg_gen_movi_i32(tmp, offset);
2c0262af 10643 } else {
5e3f878a 10644 tmp = load_reg(s, rn);
9ee6e8bb 10645 if (insn & (1 << 23))
5e3f878a 10646 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 10647 else
5e3f878a 10648 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 10649 }
9ee6e8bb 10650 }
5e3f878a 10651 store_reg(s, rd, tmp);
191abaa2 10652 }
9ee6e8bb
PB
10653 } else {
10654 int shifter_out = 0;
10655 /* modified 12-bit immediate. */
10656 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
10657 imm = (insn & 0xff);
10658 switch (shift) {
10659 case 0: /* XY */
10660 /* Nothing to do. */
10661 break;
10662 case 1: /* 00XY00XY */
10663 imm |= imm << 16;
10664 break;
10665 case 2: /* XY00XY00 */
10666 imm |= imm << 16;
10667 imm <<= 8;
10668 break;
10669 case 3: /* XYXYXYXY */
10670 imm |= imm << 16;
10671 imm |= imm << 8;
10672 break;
10673 default: /* Rotated constant. */
10674 shift = (shift << 1) | (imm >> 7);
10675 imm |= 0x80;
10676 imm = imm << (32 - shift);
10677 shifter_out = 1;
10678 break;
b5ff1b31 10679 }
7d1b0095 10680 tmp2 = tcg_temp_new_i32();
3174f8e9 10681 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 10682 rn = (insn >> 16) & 0xf;
3174f8e9 10683 if (rn == 15) {
7d1b0095 10684 tmp = tcg_temp_new_i32();
3174f8e9
FN
10685 tcg_gen_movi_i32(tmp, 0);
10686 } else {
10687 tmp = load_reg(s, rn);
10688 }
9ee6e8bb
PB
10689 op = (insn >> 21) & 0xf;
10690 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 10691 shifter_out, tmp, tmp2))
9ee6e8bb 10692 goto illegal_op;
7d1b0095 10693 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10694 rd = (insn >> 8) & 0xf;
10695 if (rd != 15) {
3174f8e9
FN
10696 store_reg(s, rd, tmp);
10697 } else {
7d1b0095 10698 tcg_temp_free_i32(tmp);
2c0262af 10699 }
2c0262af 10700 }
9ee6e8bb
PB
10701 }
10702 break;
10703 case 12: /* Load/store single data item. */
10704 {
10705 int postinc = 0;
10706 int writeback = 0;
a99caa48 10707 int memidx;
9ee6e8bb 10708 if ((insn & 0x01100000) == 0x01000000) {
7dcc1f89 10709 if (disas_neon_ls_insn(s, insn)) {
c1713132 10710 goto illegal_op;
7dcc1f89 10711 }
9ee6e8bb
PB
10712 break;
10713 }
a2fdc890
PM
10714 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
10715 if (rs == 15) {
10716 if (!(insn & (1 << 20))) {
10717 goto illegal_op;
10718 }
10719 if (op != 2) {
10720 /* Byte or halfword load space with dest == r15 : memory hints.
10721 * Catch them early so we don't emit pointless addressing code.
10722 * This space is a mix of:
10723 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10724 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10725 * cores)
10726 * unallocated hints, which must be treated as NOPs
10727 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10728 * which is easiest for the decoding logic
10729 * Some space which must UNDEF
10730 */
10731 int op1 = (insn >> 23) & 3;
10732 int op2 = (insn >> 6) & 0x3f;
10733 if (op & 2) {
10734 goto illegal_op;
10735 }
10736 if (rn == 15) {
02afbf64
PM
10737 /* UNPREDICTABLE, unallocated hint or
10738 * PLD/PLDW/PLI (literal)
10739 */
a2fdc890
PM
10740 return 0;
10741 }
10742 if (op1 & 1) {
02afbf64 10743 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10744 }
10745 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
02afbf64 10746 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10747 }
10748 /* UNDEF space, or an UNPREDICTABLE */
10749 return 1;
10750 }
10751 }
a99caa48 10752 memidx = get_mem_index(s);
9ee6e8bb 10753 if (rn == 15) {
7d1b0095 10754 addr = tcg_temp_new_i32();
9ee6e8bb
PB
10755 /* PC relative. */
10756 /* s->pc has already been incremented by 4. */
10757 imm = s->pc & 0xfffffffc;
10758 if (insn & (1 << 23))
10759 imm += insn & 0xfff;
10760 else
10761 imm -= insn & 0xfff;
b0109805 10762 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 10763 } else {
b0109805 10764 addr = load_reg(s, rn);
9ee6e8bb
PB
10765 if (insn & (1 << 23)) {
10766 /* Positive offset. */
10767 imm = insn & 0xfff;
b0109805 10768 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 10769 } else {
9ee6e8bb 10770 imm = insn & 0xff;
2a0308c5
PM
10771 switch ((insn >> 8) & 0xf) {
10772 case 0x0: /* Shifted Register. */
9ee6e8bb 10773 shift = (insn >> 4) & 0xf;
2a0308c5
PM
10774 if (shift > 3) {
10775 tcg_temp_free_i32(addr);
18c9b560 10776 goto illegal_op;
2a0308c5 10777 }
b26eefb6 10778 tmp = load_reg(s, rm);
9ee6e8bb 10779 if (shift)
b26eefb6 10780 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 10781 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 10782 tcg_temp_free_i32(tmp);
9ee6e8bb 10783 break;
2a0308c5 10784 case 0xc: /* Negative offset. */
b0109805 10785 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 10786 break;
2a0308c5 10787 case 0xe: /* User privilege. */
b0109805 10788 tcg_gen_addi_i32(addr, addr, imm);
579d21cc 10789 memidx = get_a32_user_mem_index(s);
9ee6e8bb 10790 break;
2a0308c5 10791 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
10792 imm = -imm;
10793 /* Fall through. */
2a0308c5 10794 case 0xb: /* Post-increment. */
9ee6e8bb
PB
10795 postinc = 1;
10796 writeback = 1;
10797 break;
2a0308c5 10798 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
10799 imm = -imm;
10800 /* Fall through. */
2a0308c5 10801 case 0xf: /* Pre-increment. */
b0109805 10802 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
10803 writeback = 1;
10804 break;
10805 default:
2a0308c5 10806 tcg_temp_free_i32(addr);
b7bcbe95 10807 goto illegal_op;
9ee6e8bb
PB
10808 }
10809 }
10810 }
9ee6e8bb
PB
10811 if (insn & (1 << 20)) {
10812 /* Load. */
5a839c0d 10813 tmp = tcg_temp_new_i32();
a2fdc890 10814 switch (op) {
5a839c0d 10815 case 0:
12dcc321 10816 gen_aa32_ld8u(s, tmp, addr, memidx);
5a839c0d
PM
10817 break;
10818 case 4:
12dcc321 10819 gen_aa32_ld8s(s, tmp, addr, memidx);
5a839c0d
PM
10820 break;
10821 case 1:
12dcc321 10822 gen_aa32_ld16u(s, tmp, addr, memidx);
5a839c0d
PM
10823 break;
10824 case 5:
12dcc321 10825 gen_aa32_ld16s(s, tmp, addr, memidx);
5a839c0d
PM
10826 break;
10827 case 2:
12dcc321 10828 gen_aa32_ld32u(s, tmp, addr, memidx);
5a839c0d 10829 break;
2a0308c5 10830 default:
5a839c0d 10831 tcg_temp_free_i32(tmp);
2a0308c5
PM
10832 tcg_temp_free_i32(addr);
10833 goto illegal_op;
a2fdc890
PM
10834 }
10835 if (rs == 15) {
10836 gen_bx(s, tmp);
9ee6e8bb 10837 } else {
a2fdc890 10838 store_reg(s, rs, tmp);
9ee6e8bb
PB
10839 }
10840 } else {
10841 /* Store. */
b0109805 10842 tmp = load_reg(s, rs);
9ee6e8bb 10843 switch (op) {
5a839c0d 10844 case 0:
12dcc321 10845 gen_aa32_st8(s, tmp, addr, memidx);
5a839c0d
PM
10846 break;
10847 case 1:
12dcc321 10848 gen_aa32_st16(s, tmp, addr, memidx);
5a839c0d
PM
10849 break;
10850 case 2:
12dcc321 10851 gen_aa32_st32(s, tmp, addr, memidx);
5a839c0d 10852 break;
2a0308c5 10853 default:
5a839c0d 10854 tcg_temp_free_i32(tmp);
2a0308c5
PM
10855 tcg_temp_free_i32(addr);
10856 goto illegal_op;
b7bcbe95 10857 }
5a839c0d 10858 tcg_temp_free_i32(tmp);
2c0262af 10859 }
9ee6e8bb 10860 if (postinc)
b0109805
PB
10861 tcg_gen_addi_i32(addr, addr, imm);
10862 if (writeback) {
10863 store_reg(s, rn, addr);
10864 } else {
7d1b0095 10865 tcg_temp_free_i32(addr);
b0109805 10866 }
9ee6e8bb
PB
10867 }
10868 break;
10869 default:
10870 goto illegal_op;
2c0262af 10871 }
9ee6e8bb
PB
10872 return 0;
10873illegal_op:
10874 return 1;
2c0262af
FB
10875}
10876
0ecb72a5 10877static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
99c475ab
FB
10878{
10879 uint32_t val, insn, op, rm, rn, rd, shift, cond;
10880 int32_t offset;
10881 int i;
39d5492a
PM
10882 TCGv_i32 tmp;
10883 TCGv_i32 tmp2;
10884 TCGv_i32 addr;
99c475ab 10885
9ee6e8bb
PB
10886 if (s->condexec_mask) {
10887 cond = s->condexec_cond;
bedd2912
JB
10888 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
10889 s->condlabel = gen_new_label();
39fb730a 10890 arm_gen_test_cc(cond ^ 1, s->condlabel);
bedd2912
JB
10891 s->condjmp = 1;
10892 }
9ee6e8bb
PB
10893 }
10894
f9fd40eb 10895 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
99c475ab 10896 s->pc += 2;
b5ff1b31 10897
99c475ab
FB
10898 switch (insn >> 12) {
10899 case 0: case 1:
396e467c 10900
99c475ab
FB
10901 rd = insn & 7;
10902 op = (insn >> 11) & 3;
10903 if (op == 3) {
10904 /* add/subtract */
10905 rn = (insn >> 3) & 7;
396e467c 10906 tmp = load_reg(s, rn);
99c475ab
FB
10907 if (insn & (1 << 10)) {
10908 /* immediate */
7d1b0095 10909 tmp2 = tcg_temp_new_i32();
396e467c 10910 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
10911 } else {
10912 /* reg */
10913 rm = (insn >> 6) & 7;
396e467c 10914 tmp2 = load_reg(s, rm);
99c475ab 10915 }
9ee6e8bb
PB
10916 if (insn & (1 << 9)) {
10917 if (s->condexec_mask)
396e467c 10918 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10919 else
72485ec4 10920 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
10921 } else {
10922 if (s->condexec_mask)
396e467c 10923 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 10924 else
72485ec4 10925 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 10926 }
7d1b0095 10927 tcg_temp_free_i32(tmp2);
396e467c 10928 store_reg(s, rd, tmp);
99c475ab
FB
10929 } else {
10930 /* shift immediate */
10931 rm = (insn >> 3) & 7;
10932 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
10933 tmp = load_reg(s, rm);
10934 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
10935 if (!s->condexec_mask)
10936 gen_logic_CC(tmp);
10937 store_reg(s, rd, tmp);
99c475ab
FB
10938 }
10939 break;
10940 case 2: case 3:
10941 /* arithmetic large immediate */
10942 op = (insn >> 11) & 3;
10943 rd = (insn >> 8) & 0x7;
396e467c 10944 if (op == 0) { /* mov */
7d1b0095 10945 tmp = tcg_temp_new_i32();
396e467c 10946 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 10947 if (!s->condexec_mask)
396e467c
FN
10948 gen_logic_CC(tmp);
10949 store_reg(s, rd, tmp);
10950 } else {
10951 tmp = load_reg(s, rd);
7d1b0095 10952 tmp2 = tcg_temp_new_i32();
396e467c
FN
10953 tcg_gen_movi_i32(tmp2, insn & 0xff);
10954 switch (op) {
10955 case 1: /* cmp */
72485ec4 10956 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
10957 tcg_temp_free_i32(tmp);
10958 tcg_temp_free_i32(tmp2);
396e467c
FN
10959 break;
10960 case 2: /* add */
10961 if (s->condexec_mask)
10962 tcg_gen_add_i32(tmp, tmp, tmp2);
10963 else
72485ec4 10964 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 10965 tcg_temp_free_i32(tmp2);
396e467c
FN
10966 store_reg(s, rd, tmp);
10967 break;
10968 case 3: /* sub */
10969 if (s->condexec_mask)
10970 tcg_gen_sub_i32(tmp, tmp, tmp2);
10971 else
72485ec4 10972 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 10973 tcg_temp_free_i32(tmp2);
396e467c
FN
10974 store_reg(s, rd, tmp);
10975 break;
10976 }
99c475ab 10977 }
99c475ab
FB
10978 break;
10979 case 4:
10980 if (insn & (1 << 11)) {
10981 rd = (insn >> 8) & 7;
5899f386
FB
10982 /* load pc-relative. Bit 1 of PC is ignored. */
10983 val = s->pc + 2 + ((insn & 0xff) * 4);
10984 val &= ~(uint32_t)2;
7d1b0095 10985 addr = tcg_temp_new_i32();
b0109805 10986 tcg_gen_movi_i32(addr, val);
c40c8556 10987 tmp = tcg_temp_new_i32();
12dcc321 10988 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
7d1b0095 10989 tcg_temp_free_i32(addr);
b0109805 10990 store_reg(s, rd, tmp);
99c475ab
FB
10991 break;
10992 }
10993 if (insn & (1 << 10)) {
10994 /* data processing extended or blx */
10995 rd = (insn & 7) | ((insn >> 4) & 8);
10996 rm = (insn >> 3) & 0xf;
10997 op = (insn >> 8) & 3;
10998 switch (op) {
10999 case 0: /* add */
396e467c
FN
11000 tmp = load_reg(s, rd);
11001 tmp2 = load_reg(s, rm);
11002 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 11003 tcg_temp_free_i32(tmp2);
396e467c 11004 store_reg(s, rd, tmp);
99c475ab
FB
11005 break;
11006 case 1: /* cmp */
396e467c
FN
11007 tmp = load_reg(s, rd);
11008 tmp2 = load_reg(s, rm);
72485ec4 11009 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11010 tcg_temp_free_i32(tmp2);
11011 tcg_temp_free_i32(tmp);
99c475ab
FB
11012 break;
11013 case 2: /* mov/cpy */
396e467c
FN
11014 tmp = load_reg(s, rm);
11015 store_reg(s, rd, tmp);
99c475ab
FB
11016 break;
11017 case 3:/* branch [and link] exchange thumb register */
b0109805 11018 tmp = load_reg(s, rm);
99c475ab 11019 if (insn & (1 << 7)) {
be5e7a76 11020 ARCH(5);
99c475ab 11021 val = (uint32_t)s->pc | 1;
7d1b0095 11022 tmp2 = tcg_temp_new_i32();
b0109805
PB
11023 tcg_gen_movi_i32(tmp2, val);
11024 store_reg(s, 14, tmp2);
99c475ab 11025 }
be5e7a76 11026 /* already thumb, no need to check */
d9ba4830 11027 gen_bx(s, tmp);
99c475ab
FB
11028 break;
11029 }
11030 break;
11031 }
11032
11033 /* data processing register */
11034 rd = insn & 7;
11035 rm = (insn >> 3) & 7;
11036 op = (insn >> 6) & 0xf;
11037 if (op == 2 || op == 3 || op == 4 || op == 7) {
11038 /* the shift/rotate ops want the operands backwards */
11039 val = rm;
11040 rm = rd;
11041 rd = val;
11042 val = 1;
11043 } else {
11044 val = 0;
11045 }
11046
396e467c 11047 if (op == 9) { /* neg */
7d1b0095 11048 tmp = tcg_temp_new_i32();
396e467c
FN
11049 tcg_gen_movi_i32(tmp, 0);
11050 } else if (op != 0xf) { /* mvn doesn't read its first operand */
11051 tmp = load_reg(s, rd);
11052 } else {
39d5492a 11053 TCGV_UNUSED_I32(tmp);
396e467c 11054 }
99c475ab 11055
396e467c 11056 tmp2 = load_reg(s, rm);
5899f386 11057 switch (op) {
99c475ab 11058 case 0x0: /* and */
396e467c 11059 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 11060 if (!s->condexec_mask)
396e467c 11061 gen_logic_CC(tmp);
99c475ab
FB
11062 break;
11063 case 0x1: /* eor */
396e467c 11064 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 11065 if (!s->condexec_mask)
396e467c 11066 gen_logic_CC(tmp);
99c475ab
FB
11067 break;
11068 case 0x2: /* lsl */
9ee6e8bb 11069 if (s->condexec_mask) {
365af80e 11070 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 11071 } else {
9ef39277 11072 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11073 gen_logic_CC(tmp2);
9ee6e8bb 11074 }
99c475ab
FB
11075 break;
11076 case 0x3: /* lsr */
9ee6e8bb 11077 if (s->condexec_mask) {
365af80e 11078 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 11079 } else {
9ef39277 11080 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11081 gen_logic_CC(tmp2);
9ee6e8bb 11082 }
99c475ab
FB
11083 break;
11084 case 0x4: /* asr */
9ee6e8bb 11085 if (s->condexec_mask) {
365af80e 11086 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 11087 } else {
9ef39277 11088 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11089 gen_logic_CC(tmp2);
9ee6e8bb 11090 }
99c475ab
FB
11091 break;
11092 case 0x5: /* adc */
49b4c31e 11093 if (s->condexec_mask) {
396e467c 11094 gen_adc(tmp, tmp2);
49b4c31e
RH
11095 } else {
11096 gen_adc_CC(tmp, tmp, tmp2);
11097 }
99c475ab
FB
11098 break;
11099 case 0x6: /* sbc */
2de68a49 11100 if (s->condexec_mask) {
396e467c 11101 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
11102 } else {
11103 gen_sbc_CC(tmp, tmp, tmp2);
11104 }
99c475ab
FB
11105 break;
11106 case 0x7: /* ror */
9ee6e8bb 11107 if (s->condexec_mask) {
f669df27
AJ
11108 tcg_gen_andi_i32(tmp, tmp, 0x1f);
11109 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 11110 } else {
9ef39277 11111 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11112 gen_logic_CC(tmp2);
9ee6e8bb 11113 }
99c475ab
FB
11114 break;
11115 case 0x8: /* tst */
396e467c
FN
11116 tcg_gen_and_i32(tmp, tmp, tmp2);
11117 gen_logic_CC(tmp);
99c475ab 11118 rd = 16;
5899f386 11119 break;
99c475ab 11120 case 0x9: /* neg */
9ee6e8bb 11121 if (s->condexec_mask)
396e467c 11122 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 11123 else
72485ec4 11124 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11125 break;
11126 case 0xa: /* cmp */
72485ec4 11127 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11128 rd = 16;
11129 break;
11130 case 0xb: /* cmn */
72485ec4 11131 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
11132 rd = 16;
11133 break;
11134 case 0xc: /* orr */
396e467c 11135 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 11136 if (!s->condexec_mask)
396e467c 11137 gen_logic_CC(tmp);
99c475ab
FB
11138 break;
11139 case 0xd: /* mul */
7b2919a0 11140 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 11141 if (!s->condexec_mask)
396e467c 11142 gen_logic_CC(tmp);
99c475ab
FB
11143 break;
11144 case 0xe: /* bic */
f669df27 11145 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 11146 if (!s->condexec_mask)
396e467c 11147 gen_logic_CC(tmp);
99c475ab
FB
11148 break;
11149 case 0xf: /* mvn */
396e467c 11150 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 11151 if (!s->condexec_mask)
396e467c 11152 gen_logic_CC(tmp2);
99c475ab 11153 val = 1;
5899f386 11154 rm = rd;
99c475ab
FB
11155 break;
11156 }
11157 if (rd != 16) {
396e467c
FN
11158 if (val) {
11159 store_reg(s, rm, tmp2);
11160 if (op != 0xf)
7d1b0095 11161 tcg_temp_free_i32(tmp);
396e467c
FN
11162 } else {
11163 store_reg(s, rd, tmp);
7d1b0095 11164 tcg_temp_free_i32(tmp2);
396e467c
FN
11165 }
11166 } else {
7d1b0095
PM
11167 tcg_temp_free_i32(tmp);
11168 tcg_temp_free_i32(tmp2);
99c475ab
FB
11169 }
11170 break;
11171
11172 case 5:
11173 /* load/store register offset. */
11174 rd = insn & 7;
11175 rn = (insn >> 3) & 7;
11176 rm = (insn >> 6) & 7;
11177 op = (insn >> 9) & 7;
b0109805 11178 addr = load_reg(s, rn);
b26eefb6 11179 tmp = load_reg(s, rm);
b0109805 11180 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 11181 tcg_temp_free_i32(tmp);
99c475ab 11182
c40c8556 11183 if (op < 3) { /* store */
b0109805 11184 tmp = load_reg(s, rd);
c40c8556
PM
11185 } else {
11186 tmp = tcg_temp_new_i32();
11187 }
99c475ab
FB
11188
11189 switch (op) {
11190 case 0: /* str */
12dcc321 11191 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11192 break;
11193 case 1: /* strh */
12dcc321 11194 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11195 break;
11196 case 2: /* strb */
12dcc321 11197 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11198 break;
11199 case 3: /* ldrsb */
12dcc321 11200 gen_aa32_ld8s(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11201 break;
11202 case 4: /* ldr */
12dcc321 11203 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11204 break;
11205 case 5: /* ldrh */
12dcc321 11206 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11207 break;
11208 case 6: /* ldrb */
12dcc321 11209 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11210 break;
11211 case 7: /* ldrsh */
12dcc321 11212 gen_aa32_ld16s(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11213 break;
11214 }
c40c8556 11215 if (op >= 3) { /* load */
b0109805 11216 store_reg(s, rd, tmp);
c40c8556
PM
11217 } else {
11218 tcg_temp_free_i32(tmp);
11219 }
7d1b0095 11220 tcg_temp_free_i32(addr);
99c475ab
FB
11221 break;
11222
11223 case 6:
11224 /* load/store word immediate offset */
11225 rd = insn & 7;
11226 rn = (insn >> 3) & 7;
b0109805 11227 addr = load_reg(s, rn);
99c475ab 11228 val = (insn >> 4) & 0x7c;
b0109805 11229 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11230
11231 if (insn & (1 << 11)) {
11232 /* load */
c40c8556 11233 tmp = tcg_temp_new_i32();
12dcc321 11234 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11235 store_reg(s, rd, tmp);
99c475ab
FB
11236 } else {
11237 /* store */
b0109805 11238 tmp = load_reg(s, rd);
12dcc321 11239 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11240 tcg_temp_free_i32(tmp);
99c475ab 11241 }
7d1b0095 11242 tcg_temp_free_i32(addr);
99c475ab
FB
11243 break;
11244
11245 case 7:
11246 /* load/store byte immediate offset */
11247 rd = insn & 7;
11248 rn = (insn >> 3) & 7;
b0109805 11249 addr = load_reg(s, rn);
99c475ab 11250 val = (insn >> 6) & 0x1f;
b0109805 11251 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11252
11253 if (insn & (1 << 11)) {
11254 /* load */
c40c8556 11255 tmp = tcg_temp_new_i32();
12dcc321 11256 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
b0109805 11257 store_reg(s, rd, tmp);
99c475ab
FB
11258 } else {
11259 /* store */
b0109805 11260 tmp = load_reg(s, rd);
12dcc321 11261 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
c40c8556 11262 tcg_temp_free_i32(tmp);
99c475ab 11263 }
7d1b0095 11264 tcg_temp_free_i32(addr);
99c475ab
FB
11265 break;
11266
11267 case 8:
11268 /* load/store halfword immediate offset */
11269 rd = insn & 7;
11270 rn = (insn >> 3) & 7;
b0109805 11271 addr = load_reg(s, rn);
99c475ab 11272 val = (insn >> 5) & 0x3e;
b0109805 11273 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11274
11275 if (insn & (1 << 11)) {
11276 /* load */
c40c8556 11277 tmp = tcg_temp_new_i32();
12dcc321 11278 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
b0109805 11279 store_reg(s, rd, tmp);
99c475ab
FB
11280 } else {
11281 /* store */
b0109805 11282 tmp = load_reg(s, rd);
12dcc321 11283 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
c40c8556 11284 tcg_temp_free_i32(tmp);
99c475ab 11285 }
7d1b0095 11286 tcg_temp_free_i32(addr);
99c475ab
FB
11287 break;
11288
11289 case 9:
11290 /* load/store from stack */
11291 rd = (insn >> 8) & 7;
b0109805 11292 addr = load_reg(s, 13);
99c475ab 11293 val = (insn & 0xff) * 4;
b0109805 11294 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11295
11296 if (insn & (1 << 11)) {
11297 /* load */
c40c8556 11298 tmp = tcg_temp_new_i32();
12dcc321 11299 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11300 store_reg(s, rd, tmp);
99c475ab
FB
11301 } else {
11302 /* store */
b0109805 11303 tmp = load_reg(s, rd);
12dcc321 11304 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11305 tcg_temp_free_i32(tmp);
99c475ab 11306 }
7d1b0095 11307 tcg_temp_free_i32(addr);
99c475ab
FB
11308 break;
11309
11310 case 10:
11311 /* add to high reg */
11312 rd = (insn >> 8) & 7;
5899f386
FB
11313 if (insn & (1 << 11)) {
11314 /* SP */
5e3f878a 11315 tmp = load_reg(s, 13);
5899f386
FB
11316 } else {
11317 /* PC. bit 1 is ignored. */
7d1b0095 11318 tmp = tcg_temp_new_i32();
5e3f878a 11319 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 11320 }
99c475ab 11321 val = (insn & 0xff) * 4;
5e3f878a
PB
11322 tcg_gen_addi_i32(tmp, tmp, val);
11323 store_reg(s, rd, tmp);
99c475ab
FB
11324 break;
11325
11326 case 11:
11327 /* misc */
11328 op = (insn >> 8) & 0xf;
11329 switch (op) {
11330 case 0:
11331 /* adjust stack pointer */
b26eefb6 11332 tmp = load_reg(s, 13);
99c475ab
FB
11333 val = (insn & 0x7f) * 4;
11334 if (insn & (1 << 7))
6a0d8a1d 11335 val = -(int32_t)val;
b26eefb6
PB
11336 tcg_gen_addi_i32(tmp, tmp, val);
11337 store_reg(s, 13, tmp);
99c475ab
FB
11338 break;
11339
9ee6e8bb
PB
11340 case 2: /* sign/zero extend. */
11341 ARCH(6);
11342 rd = insn & 7;
11343 rm = (insn >> 3) & 7;
b0109805 11344 tmp = load_reg(s, rm);
9ee6e8bb 11345 switch ((insn >> 6) & 3) {
b0109805
PB
11346 case 0: gen_sxth(tmp); break;
11347 case 1: gen_sxtb(tmp); break;
11348 case 2: gen_uxth(tmp); break;
11349 case 3: gen_uxtb(tmp); break;
9ee6e8bb 11350 }
b0109805 11351 store_reg(s, rd, tmp);
9ee6e8bb 11352 break;
99c475ab
FB
11353 case 4: case 5: case 0xc: case 0xd:
11354 /* push/pop */
b0109805 11355 addr = load_reg(s, 13);
5899f386
FB
11356 if (insn & (1 << 8))
11357 offset = 4;
99c475ab 11358 else
5899f386
FB
11359 offset = 0;
11360 for (i = 0; i < 8; i++) {
11361 if (insn & (1 << i))
11362 offset += 4;
11363 }
11364 if ((insn & (1 << 11)) == 0) {
b0109805 11365 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11366 }
99c475ab
FB
11367 for (i = 0; i < 8; i++) {
11368 if (insn & (1 << i)) {
11369 if (insn & (1 << 11)) {
11370 /* pop */
c40c8556 11371 tmp = tcg_temp_new_i32();
12dcc321 11372 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11373 store_reg(s, i, tmp);
99c475ab
FB
11374 } else {
11375 /* push */
b0109805 11376 tmp = load_reg(s, i);
12dcc321 11377 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11378 tcg_temp_free_i32(tmp);
99c475ab 11379 }
5899f386 11380 /* advance to the next address. */
b0109805 11381 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11382 }
11383 }
39d5492a 11384 TCGV_UNUSED_I32(tmp);
99c475ab
FB
11385 if (insn & (1 << 8)) {
11386 if (insn & (1 << 11)) {
11387 /* pop pc */
c40c8556 11388 tmp = tcg_temp_new_i32();
12dcc321 11389 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11390 /* don't set the pc until the rest of the instruction
11391 has completed */
11392 } else {
11393 /* push lr */
b0109805 11394 tmp = load_reg(s, 14);
12dcc321 11395 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11396 tcg_temp_free_i32(tmp);
99c475ab 11397 }
b0109805 11398 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 11399 }
5899f386 11400 if ((insn & (1 << 11)) == 0) {
b0109805 11401 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11402 }
99c475ab 11403 /* write back the new stack pointer */
b0109805 11404 store_reg(s, 13, addr);
99c475ab 11405 /* set the new PC value */
be5e7a76 11406 if ((insn & 0x0900) == 0x0900) {
7dcc1f89 11407 store_reg_from_load(s, 15, tmp);
be5e7a76 11408 }
99c475ab
FB
11409 break;
11410
9ee6e8bb
PB
11411 case 1: case 3: case 9: case 11: /* czb */
11412 rm = insn & 7;
d9ba4830 11413 tmp = load_reg(s, rm);
9ee6e8bb
PB
11414 s->condlabel = gen_new_label();
11415 s->condjmp = 1;
11416 if (insn & (1 << 11))
cb63669a 11417 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 11418 else
cb63669a 11419 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 11420 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
11421 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
11422 val = (uint32_t)s->pc + 2;
11423 val += offset;
11424 gen_jmp(s, val);
11425 break;
11426
11427 case 15: /* IT, nop-hint. */
11428 if ((insn & 0xf) == 0) {
11429 gen_nop_hint(s, (insn >> 4) & 0xf);
11430 break;
11431 }
11432 /* If Then. */
11433 s->condexec_cond = (insn >> 4) & 0xe;
11434 s->condexec_mask = insn & 0x1f;
11435 /* No actual code generated for this insn, just setup state. */
11436 break;
11437
06c949e6 11438 case 0xe: /* bkpt */
d4a2dc67
PM
11439 {
11440 int imm8 = extract32(insn, 0, 8);
be5e7a76 11441 ARCH(5);
73710361
GB
11442 gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true),
11443 default_exception_el(s));
06c949e6 11444 break;
d4a2dc67 11445 }
06c949e6 11446
19a6e31c
PM
11447 case 0xa: /* rev, and hlt */
11448 {
11449 int op1 = extract32(insn, 6, 2);
11450
11451 if (op1 == 2) {
11452 /* HLT */
11453 int imm6 = extract32(insn, 0, 6);
11454
11455 gen_hlt(s, imm6);
11456 break;
11457 }
11458
11459 /* Otherwise this is rev */
9ee6e8bb
PB
11460 ARCH(6);
11461 rn = (insn >> 3) & 0x7;
11462 rd = insn & 0x7;
b0109805 11463 tmp = load_reg(s, rn);
19a6e31c 11464 switch (op1) {
66896cb8 11465 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
11466 case 1: gen_rev16(tmp); break;
11467 case 3: gen_revsh(tmp); break;
19a6e31c
PM
11468 default:
11469 g_assert_not_reached();
9ee6e8bb 11470 }
b0109805 11471 store_reg(s, rd, tmp);
9ee6e8bb 11472 break;
19a6e31c 11473 }
9ee6e8bb 11474
d9e028c1
PM
11475 case 6:
11476 switch ((insn >> 5) & 7) {
11477 case 2:
11478 /* setend */
11479 ARCH(6);
9886ecdf
PB
11480 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
11481 gen_helper_setend(cpu_env);
11482 s->is_jmp = DISAS_UPDATE;
d9e028c1 11483 }
9ee6e8bb 11484 break;
d9e028c1
PM
11485 case 3:
11486 /* cps */
11487 ARCH(6);
11488 if (IS_USER(s)) {
11489 break;
8984bd2e 11490 }
b53d8923 11491 if (arm_dc_feature(s, ARM_FEATURE_M)) {
d9e028c1
PM
11492 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
11493 /* FAULTMASK */
11494 if (insn & 1) {
11495 addr = tcg_const_i32(19);
11496 gen_helper_v7m_msr(cpu_env, addr, tmp);
11497 tcg_temp_free_i32(addr);
11498 }
11499 /* PRIMASK */
11500 if (insn & 2) {
11501 addr = tcg_const_i32(16);
11502 gen_helper_v7m_msr(cpu_env, addr, tmp);
11503 tcg_temp_free_i32(addr);
11504 }
11505 tcg_temp_free_i32(tmp);
11506 gen_lookup_tb(s);
11507 } else {
11508 if (insn & (1 << 4)) {
11509 shift = CPSR_A | CPSR_I | CPSR_F;
11510 } else {
11511 shift = 0;
11512 }
11513 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 11514 }
d9e028c1
PM
11515 break;
11516 default:
11517 goto undef;
9ee6e8bb
PB
11518 }
11519 break;
11520
99c475ab
FB
11521 default:
11522 goto undef;
11523 }
11524 break;
11525
11526 case 12:
a7d3970d 11527 {
99c475ab 11528 /* load/store multiple */
39d5492a
PM
11529 TCGv_i32 loaded_var;
11530 TCGV_UNUSED_I32(loaded_var);
99c475ab 11531 rn = (insn >> 8) & 0x7;
b0109805 11532 addr = load_reg(s, rn);
99c475ab
FB
11533 for (i = 0; i < 8; i++) {
11534 if (insn & (1 << i)) {
99c475ab
FB
11535 if (insn & (1 << 11)) {
11536 /* load */
c40c8556 11537 tmp = tcg_temp_new_i32();
12dcc321 11538 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
a7d3970d
PM
11539 if (i == rn) {
11540 loaded_var = tmp;
11541 } else {
11542 store_reg(s, i, tmp);
11543 }
99c475ab
FB
11544 } else {
11545 /* store */
b0109805 11546 tmp = load_reg(s, i);
12dcc321 11547 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11548 tcg_temp_free_i32(tmp);
99c475ab 11549 }
5899f386 11550 /* advance to the next address */
b0109805 11551 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11552 }
11553 }
b0109805 11554 if ((insn & (1 << rn)) == 0) {
a7d3970d 11555 /* base reg not in list: base register writeback */
b0109805
PB
11556 store_reg(s, rn, addr);
11557 } else {
a7d3970d
PM
11558 /* base reg in list: if load, complete it now */
11559 if (insn & (1 << 11)) {
11560 store_reg(s, rn, loaded_var);
11561 }
7d1b0095 11562 tcg_temp_free_i32(addr);
b0109805 11563 }
99c475ab 11564 break;
a7d3970d 11565 }
99c475ab
FB
11566 case 13:
11567 /* conditional branch or swi */
11568 cond = (insn >> 8) & 0xf;
11569 if (cond == 0xe)
11570 goto undef;
11571
11572 if (cond == 0xf) {
11573 /* swi */
eaed129d 11574 gen_set_pc_im(s, s->pc);
d4a2dc67 11575 s->svc_imm = extract32(insn, 0, 8);
9ee6e8bb 11576 s->is_jmp = DISAS_SWI;
99c475ab
FB
11577 break;
11578 }
11579 /* generate a conditional jump to next instruction */
e50e6a20 11580 s->condlabel = gen_new_label();
39fb730a 11581 arm_gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 11582 s->condjmp = 1;
99c475ab
FB
11583
11584 /* jump to the offset */
5899f386 11585 val = (uint32_t)s->pc + 2;
99c475ab 11586 offset = ((int32_t)insn << 24) >> 24;
5899f386 11587 val += offset << 1;
8aaca4c0 11588 gen_jmp(s, val);
99c475ab
FB
11589 break;
11590
11591 case 14:
358bf29e 11592 if (insn & (1 << 11)) {
9ee6e8bb
PB
11593 if (disas_thumb2_insn(env, s, insn))
11594 goto undef32;
358bf29e
PB
11595 break;
11596 }
9ee6e8bb 11597 /* unconditional branch */
99c475ab
FB
11598 val = (uint32_t)s->pc;
11599 offset = ((int32_t)insn << 21) >> 21;
11600 val += (offset << 1) + 2;
8aaca4c0 11601 gen_jmp(s, val);
99c475ab
FB
11602 break;
11603
11604 case 15:
9ee6e8bb 11605 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 11606 goto undef32;
9ee6e8bb 11607 break;
99c475ab
FB
11608 }
11609 return;
9ee6e8bb 11610undef32:
73710361
GB
11611 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
11612 default_exception_el(s));
9ee6e8bb
PB
11613 return;
11614illegal_op:
99c475ab 11615undef:
73710361
GB
11616 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
11617 default_exception_el(s));
99c475ab
FB
11618}
11619
541ebcd4
PM
11620static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
11621{
11622 /* Return true if the insn at dc->pc might cross a page boundary.
11623 * (False positives are OK, false negatives are not.)
11624 */
11625 uint16_t insn;
11626
11627 if ((s->pc & 3) == 0) {
11628 /* At a 4-aligned address we can't be crossing a page */
11629 return false;
11630 }
11631
11632 /* This must be a Thumb insn */
f9fd40eb 11633 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
541ebcd4
PM
11634
11635 if ((insn >> 11) >= 0x1d) {
11636 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
11637 * First half of a 32-bit Thumb insn. Thumb-1 cores might
11638 * end up actually treating this as two 16-bit insns (see the
11639 * code at the start of disas_thumb2_insn()) but we don't bother
11640 * to check for that as it is unlikely, and false positives here
11641 * are harmless.
11642 */
11643 return true;
11644 }
11645 /* Definitely a 16-bit insn, can't be crossing a page. */
11646 return false;
11647}
11648
20157705 11649/* generate intermediate code for basic block 'tb'. */
4e5e1215 11650void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
2c0262af 11651{
4e5e1215 11652 ARMCPU *cpu = arm_env_get_cpu(env);
ed2803da 11653 CPUState *cs = CPU(cpu);
2c0262af 11654 DisasContext dc1, *dc = &dc1;
0fa85d43 11655 target_ulong pc_start;
0a2461fa 11656 target_ulong next_page_start;
2e70f6ef
PB
11657 int num_insns;
11658 int max_insns;
541ebcd4 11659 bool end_of_page;
3b46e624 11660
2c0262af 11661 /* generate intermediate code */
40f860cd
PM
11662
11663 /* The A64 decoder has its own top level loop, because it doesn't need
11664 * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
11665 */
11666 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
4e5e1215 11667 gen_intermediate_code_a64(cpu, tb);
40f860cd
PM
11668 return;
11669 }
11670
0fa85d43 11671 pc_start = tb->pc;
3b46e624 11672
2c0262af
FB
11673 dc->tb = tb;
11674
2c0262af
FB
11675 dc->is_jmp = DISAS_NEXT;
11676 dc->pc = pc_start;
ed2803da 11677 dc->singlestep_enabled = cs->singlestep_enabled;
e50e6a20 11678 dc->condjmp = 0;
3926cc84 11679
40f860cd 11680 dc->aarch64 = 0;
cef9ee70
SS
11681 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11682 * there is no secure EL1, so we route exceptions to EL3.
11683 */
11684 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
11685 !arm_el_is_aa64(env, 3);
40f860cd 11686 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
f9fd40eb 11687 dc->sctlr_b = ARM_TBFLAG_SCTLR_B(tb->flags);
91cca2cd 11688 dc->be_data = ARM_TBFLAG_BE_DATA(tb->flags) ? MO_BE : MO_LE;
40f860cd
PM
11689 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
11690 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
c1e37810
PM
11691 dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags);
11692 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
3926cc84 11693#if !defined(CONFIG_USER_ONLY)
c1e37810 11694 dc->user = (dc->current_el == 0);
3926cc84 11695#endif
3f342b9e 11696 dc->ns = ARM_TBFLAG_NS(tb->flags);
9dbbc748 11697 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(tb->flags);
40f860cd
PM
11698 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
11699 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
11700 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
c0f4af17 11701 dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags);
60322b39 11702 dc->cp_regs = cpu->cp_regs;
a984e42c 11703 dc->features = env->features;
40f860cd 11704
50225ad0
PM
11705 /* Single step state. The code-generation logic here is:
11706 * SS_ACTIVE == 0:
11707 * generate code with no special handling for single-stepping (except
11708 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11709 * this happens anyway because those changes are all system register or
11710 * PSTATE writes).
11711 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11712 * emit code for one insn
11713 * emit code to clear PSTATE.SS
11714 * emit code to generate software step exception for completed step
11715 * end TB (as usual for having generated an exception)
11716 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11717 * emit code to generate a software step exception
11718 * end the TB
11719 */
11720 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags);
11721 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags);
11722 dc->is_ldex = false;
11723 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
11724
a7812ae4
PB
11725 cpu_F0s = tcg_temp_new_i32();
11726 cpu_F1s = tcg_temp_new_i32();
11727 cpu_F0d = tcg_temp_new_i64();
11728 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
11729 cpu_V0 = cpu_F0d;
11730 cpu_V1 = cpu_F1d;
e677137d 11731 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 11732 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 11733 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2e70f6ef
PB
11734 num_insns = 0;
11735 max_insns = tb->cflags & CF_COUNT_MASK;
190ce7fb 11736 if (max_insns == 0) {
2e70f6ef 11737 max_insns = CF_COUNT_MASK;
190ce7fb
RH
11738 }
11739 if (max_insns > TCG_MAX_INSNS) {
11740 max_insns = TCG_MAX_INSNS;
11741 }
2e70f6ef 11742
cd42d5b2 11743 gen_tb_start(tb);
e12ce78d 11744
3849902c
PM
11745 tcg_clear_temp_count();
11746
e12ce78d
PM
11747 /* A note on handling of the condexec (IT) bits:
11748 *
11749 * We want to avoid the overhead of having to write the updated condexec
0ecb72a5 11750 * bits back to the CPUARMState for every instruction in an IT block. So:
e12ce78d 11751 * (1) if the condexec bits are not already zero then we write
0ecb72a5 11752 * zero back into the CPUARMState now. This avoids complications trying
e12ce78d
PM
11753 * to do it at the end of the block. (For example if we don't do this
11754 * it's hard to identify whether we can safely skip writing condexec
11755 * at the end of the TB, which we definitely want to do for the case
11756 * where a TB doesn't do anything with the IT state at all.)
11757 * (2) if we are going to leave the TB then we call gen_set_condexec()
0ecb72a5 11758 * which will write the correct value into CPUARMState if zero is wrong.
e12ce78d
PM
11759 * This is done both for leaving the TB at the end, and for leaving
11760 * it because of an exception we know will happen, which is done in
11761 * gen_exception_insn(). The latter is necessary because we need to
11762 * leave the TB with the PC/IT state just prior to execution of the
11763 * instruction which caused the exception.
11764 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
0ecb72a5 11765 * then the CPUARMState will be wrong and we need to reset it.
e12ce78d 11766 * This is handled in the same way as restoration of the
4e5e1215
RH
11767 * PC in these situations; we save the value of the condexec bits
11768 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
11769 * then uses this to restore them after an exception.
e12ce78d
PM
11770 *
11771 * Note that there are no instructions which can read the condexec
11772 * bits, and none which can write non-static values to them, so
0ecb72a5 11773 * we don't need to care about whether CPUARMState is correct in the
e12ce78d
PM
11774 * middle of a TB.
11775 */
11776
9ee6e8bb
PB
11777 /* Reset the conditional execution bits immediately. This avoids
11778 complications trying to do it at the end of the block. */
98eac7ca 11779 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 11780 {
39d5492a 11781 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e 11782 tcg_gen_movi_i32(tmp, 0);
d9ba4830 11783 store_cpu_field(tmp, condexec_bits);
8f01245e 11784 }
2c0262af 11785 do {
52e971d9 11786 tcg_gen_insn_start(dc->pc,
aaa1f954
EI
11787 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
11788 0);
b933066a
RH
11789 num_insns++;
11790
fbb4a2e3
PB
11791#ifdef CONFIG_USER_ONLY
11792 /* Intercept jump to the magic kernel page. */
40f860cd 11793 if (dc->pc >= 0xffff0000) {
fbb4a2e3
PB
11794 /* We always get here via a jump, so know we are not in a
11795 conditional execution block. */
d4a2dc67 11796 gen_exception_internal(EXCP_KERNEL_TRAP);
577bf808 11797 dc->is_jmp = DISAS_EXC;
fbb4a2e3
PB
11798 break;
11799 }
11800#else
b53d8923 11801 if (dc->pc >= 0xfffffff0 && arm_dc_feature(dc, ARM_FEATURE_M)) {
9ee6e8bb
PB
11802 /* We always get here via a jump, so know we are not in a
11803 conditional execution block. */
d4a2dc67 11804 gen_exception_internal(EXCP_EXCEPTION_EXIT);
577bf808 11805 dc->is_jmp = DISAS_EXC;
d60bb01c 11806 break;
9ee6e8bb
PB
11807 }
11808#endif
11809
f0c3c505 11810 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
b933066a 11811 CPUBreakpoint *bp;
f0c3c505 11812 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
a1d1bb31 11813 if (bp->pc == dc->pc) {
5d98bf8f 11814 if (bp->flags & BP_CPU) {
ce8a1b54 11815 gen_set_condexec(dc);
ed6c6448 11816 gen_set_pc_im(dc, dc->pc);
5d98bf8f
SF
11817 gen_helper_check_breakpoints(cpu_env);
11818 /* End the TB early; it's likely not going to be executed */
11819 dc->is_jmp = DISAS_UPDATE;
11820 } else {
11821 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
522a0d4e
RH
11822 /* The address covered by the breakpoint must be
11823 included in [tb->pc, tb->pc + tb->size) in order
11824 to for it to be properly cleared -- thus we
11825 increment the PC here so that the logic setting
11826 tb->size below does the right thing. */
5d98bf8f
SF
11827 /* TODO: Advance PC by correct instruction length to
11828 * avoid disassembler error messages */
11829 dc->pc += 2;
11830 goto done_generating;
11831 }
11832 break;
1fddef4b
FB
11833 }
11834 }
11835 }
e50e6a20 11836
959082fc 11837 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
2e70f6ef 11838 gen_io_start();
959082fc 11839 }
2e70f6ef 11840
50225ad0
PM
11841 if (dc->ss_active && !dc->pstate_ss) {
11842 /* Singlestep state is Active-pending.
11843 * If we're in this state at the start of a TB then either
11844 * a) we just took an exception to an EL which is being debugged
11845 * and this is the first insn in the exception handler
11846 * b) debug exceptions were masked and we just unmasked them
11847 * without changing EL (eg by clearing PSTATE.D)
11848 * In either case we're going to take a swstep exception in the
11849 * "did not step an insn" case, and so the syndrome ISV and EX
11850 * bits should be zero.
11851 */
959082fc 11852 assert(num_insns == 1);
73710361
GB
11853 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
11854 default_exception_el(dc));
50225ad0
PM
11855 goto done_generating;
11856 }
11857
40f860cd 11858 if (dc->thumb) {
9ee6e8bb
PB
11859 disas_thumb_insn(env, dc);
11860 if (dc->condexec_mask) {
11861 dc->condexec_cond = (dc->condexec_cond & 0xe)
11862 | ((dc->condexec_mask >> 4) & 1);
11863 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
11864 if (dc->condexec_mask == 0) {
11865 dc->condexec_cond = 0;
11866 }
11867 }
11868 } else {
f9fd40eb 11869 unsigned int insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
f4df2210
PM
11870 dc->pc += 4;
11871 disas_arm_insn(dc, insn);
9ee6e8bb 11872 }
e50e6a20
FB
11873
11874 if (dc->condjmp && !dc->is_jmp) {
11875 gen_set_label(dc->condlabel);
11876 dc->condjmp = 0;
11877 }
3849902c
PM
11878
11879 if (tcg_check_temp_count()) {
0a2461fa
AG
11880 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
11881 dc->pc);
3849902c
PM
11882 }
11883
aaf2d97d 11884 /* Translation stops when a conditional branch is encountered.
e50e6a20 11885 * Otherwise the subsequent code could get translated several times.
b5ff1b31 11886 * Also stop translation when a page boundary is reached. This
bf20dc07 11887 * ensures prefetch aborts occur at the right place. */
541ebcd4
PM
11888
11889 /* We want to stop the TB if the next insn starts in a new page,
11890 * or if it spans between this page and the next. This means that
11891 * if we're looking at the last halfword in the page we need to
11892 * see if it's a 16-bit Thumb insn (which will fit in this TB)
11893 * or a 32-bit Thumb insn (which won't).
11894 * This is to avoid generating a silly TB with a single 16-bit insn
11895 * in it at the end of this page (which would execute correctly
11896 * but isn't very efficient).
11897 */
11898 end_of_page = (dc->pc >= next_page_start) ||
11899 ((dc->pc >= next_page_start - 3) && insn_crosses_page(env, dc));
11900
fe700adb 11901 } while (!dc->is_jmp && !tcg_op_buf_full() &&
ed2803da 11902 !cs->singlestep_enabled &&
1b530a6d 11903 !singlestep &&
50225ad0 11904 !dc->ss_active &&
541ebcd4 11905 !end_of_page &&
2e70f6ef
PB
11906 num_insns < max_insns);
11907
11908 if (tb->cflags & CF_LAST_IO) {
11909 if (dc->condjmp) {
11910 /* FIXME: This can theoretically happen with self-modifying
11911 code. */
a47dddd7 11912 cpu_abort(cs, "IO on conditional branch instruction");
2e70f6ef
PB
11913 }
11914 gen_io_end();
11915 }
9ee6e8bb 11916
b5ff1b31 11917 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
11918 instruction was a conditional branch or trap, and the PC has
11919 already been written. */
50225ad0 11920 if (unlikely(cs->singlestep_enabled || dc->ss_active)) {
7999a5c8 11921 /* Unconditional and "condition passed" instruction codepath. */
9ee6e8bb 11922 gen_set_condexec(dc);
7999a5c8
SF
11923 switch (dc->is_jmp) {
11924 case DISAS_SWI:
50225ad0 11925 gen_ss_advance(dc);
73710361
GB
11926 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
11927 default_exception_el(dc));
7999a5c8
SF
11928 break;
11929 case DISAS_HVC:
37e6456e 11930 gen_ss_advance(dc);
73710361 11931 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
7999a5c8
SF
11932 break;
11933 case DISAS_SMC:
37e6456e 11934 gen_ss_advance(dc);
73710361 11935 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
7999a5c8
SF
11936 break;
11937 case DISAS_NEXT:
11938 case DISAS_UPDATE:
11939 gen_set_pc_im(dc, dc->pc);
11940 /* fall through */
11941 default:
11942 if (dc->ss_active) {
11943 gen_step_complete_exception(dc);
11944 } else {
11945 /* FIXME: Single stepping a WFI insn will not halt
11946 the CPU. */
11947 gen_exception_internal(EXCP_DEBUG);
11948 }
11949 }
11950 if (dc->condjmp) {
11951 /* "Condition failed" instruction codepath. */
11952 gen_set_label(dc->condlabel);
11953 gen_set_condexec(dc);
11954 gen_set_pc_im(dc, dc->pc);
11955 if (dc->ss_active) {
11956 gen_step_complete_exception(dc);
11957 } else {
11958 gen_exception_internal(EXCP_DEBUG);
11959 }
9ee6e8bb 11960 }
8aaca4c0 11961 } else {
9ee6e8bb
PB
11962 /* While branches must always occur at the end of an IT block,
11963 there are a few other things that can cause us to terminate
65626741 11964 the TB in the middle of an IT block:
9ee6e8bb
PB
11965 - Exception generating instructions (bkpt, swi, undefined).
11966 - Page boundaries.
11967 - Hardware watchpoints.
11968 Hardware breakpoints have already been handled and skip this code.
11969 */
11970 gen_set_condexec(dc);
8aaca4c0 11971 switch(dc->is_jmp) {
8aaca4c0 11972 case DISAS_NEXT:
6e256c93 11973 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0 11974 break;
8aaca4c0 11975 case DISAS_UPDATE:
577bf808
SF
11976 gen_set_pc_im(dc, dc->pc);
11977 /* fall through */
11978 case DISAS_JUMP:
11979 default:
8aaca4c0 11980 /* indicate that the hash table must be used to find the next TB */
57fec1fe 11981 tcg_gen_exit_tb(0);
8aaca4c0
FB
11982 break;
11983 case DISAS_TB_JUMP:
11984 /* nothing more to generate */
11985 break;
9ee6e8bb 11986 case DISAS_WFI:
1ce94f81 11987 gen_helper_wfi(cpu_env);
84549b6d
PM
11988 /* The helper doesn't necessarily throw an exception, but we
11989 * must go back to the main loop to check for interrupts anyway.
11990 */
11991 tcg_gen_exit_tb(0);
9ee6e8bb 11992 break;
72c1d3af
PM
11993 case DISAS_WFE:
11994 gen_helper_wfe(cpu_env);
11995 break;
c87e5a61
PM
11996 case DISAS_YIELD:
11997 gen_helper_yield(cpu_env);
11998 break;
9ee6e8bb 11999 case DISAS_SWI:
73710361
GB
12000 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12001 default_exception_el(dc));
9ee6e8bb 12002 break;
37e6456e 12003 case DISAS_HVC:
73710361 12004 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
37e6456e
PM
12005 break;
12006 case DISAS_SMC:
73710361 12007 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
37e6456e 12008 break;
8aaca4c0 12009 }
e50e6a20
FB
12010 if (dc->condjmp) {
12011 gen_set_label(dc->condlabel);
9ee6e8bb 12012 gen_set_condexec(dc);
6e256c93 12013 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
12014 dc->condjmp = 0;
12015 }
2c0262af 12016 }
2e70f6ef 12017
9ee6e8bb 12018done_generating:
806f352d 12019 gen_tb_end(tb, num_insns);
2c0262af
FB
12020
12021#ifdef DEBUG_DISAS
06486077
AB
12022 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) &&
12023 qemu_log_in_addr_range(pc_start)) {
93fcfe39
AL
12024 qemu_log("----------------\n");
12025 qemu_log("IN: %s\n", lookup_symbol(pc_start));
d49190c4 12026 log_target_disas(cs, pc_start, dc->pc - pc_start,
f9fd40eb 12027 dc->thumb | (dc->sctlr_b << 1));
93fcfe39 12028 qemu_log("\n");
2c0262af
FB
12029 }
12030#endif
4e5e1215
RH
12031 tb->size = dc->pc - pc_start;
12032 tb->icount = num_insns;
2c0262af
FB
12033}
12034
b5ff1b31 12035static const char *cpu_mode_names[16] = {
28c9457d
EI
12036 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
12037 "???", "???", "hyp", "und", "???", "???", "???", "sys"
b5ff1b31 12038};
9ee6e8bb 12039
878096ee
AF
12040void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
12041 int flags)
2c0262af 12042{
878096ee
AF
12043 ARMCPU *cpu = ARM_CPU(cs);
12044 CPUARMState *env = &cpu->env;
2c0262af 12045 int i;
b5ff1b31 12046 uint32_t psr;
06e5cf7a 12047 const char *ns_status;
2c0262af 12048
17731115
PM
12049 if (is_a64(env)) {
12050 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
12051 return;
12052 }
12053
2c0262af 12054 for(i=0;i<16;i++) {
7fe48483 12055 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 12056 if ((i % 4) == 3)
7fe48483 12057 cpu_fprintf(f, "\n");
2c0262af 12058 else
7fe48483 12059 cpu_fprintf(f, " ");
2c0262af 12060 }
b5ff1b31 12061 psr = cpsr_read(env);
06e5cf7a
PM
12062
12063 if (arm_feature(env, ARM_FEATURE_EL3) &&
12064 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
12065 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
12066 } else {
12067 ns_status = "";
12068 }
12069
12070 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
687fa640 12071 psr,
b5ff1b31
FB
12072 psr & (1 << 31) ? 'N' : '-',
12073 psr & (1 << 30) ? 'Z' : '-',
12074 psr & (1 << 29) ? 'C' : '-',
12075 psr & (1 << 28) ? 'V' : '-',
5fafdf24 12076 psr & CPSR_T ? 'T' : 'A',
06e5cf7a 12077 ns_status,
b5ff1b31 12078 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 12079
f2617cfc
PM
12080 if (flags & CPU_DUMP_FPU) {
12081 int numvfpregs = 0;
12082 if (arm_feature(env, ARM_FEATURE_VFP)) {
12083 numvfpregs += 16;
12084 }
12085 if (arm_feature(env, ARM_FEATURE_VFP3)) {
12086 numvfpregs += 16;
12087 }
12088 for (i = 0; i < numvfpregs; i++) {
12089 uint64_t v = float64_val(env->vfp.regs[i]);
12090 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
12091 i * 2, (uint32_t)v,
12092 i * 2 + 1, (uint32_t)(v >> 32),
12093 i, v);
12094 }
12095 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 12096 }
2c0262af 12097}
a6b025d3 12098
bad729e2
RH
12099void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
12100 target_ulong *data)
d2856f1a 12101{
3926cc84 12102 if (is_a64(env)) {
bad729e2 12103 env->pc = data[0];
40f860cd 12104 env->condexec_bits = 0;
aaa1f954 12105 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12106 } else {
bad729e2
RH
12107 env->regs[15] = data[0];
12108 env->condexec_bits = data[1];
aaa1f954 12109 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12110 }
d2856f1a 12111}